file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
align.rs | mod rustbio;
use std::{
ops::Range,
sync::{
atomic::{AtomicBool, AtomicU16, Ordering},
mpsc::{Sender, SyncSender},
Arc,
},
thread::available_parallelism,
};
use crate::{file::FileContent, view::AlignedMessage};
use bio::alignment::AlignmentOperation as Op;
use realfft::{num_complex::Complex64, RealFftPlanner, RealToComplex};
use serde::{Deserialize, Serialize};
use self::rustbio::{align_banded, RustBio};
pub const DEFAULT_BLOCKSIZE: usize = 8192;
pub const DEFAULT_KMER: usize = 8;
pub const DEFAULT_WINDOW: usize = 6;
/// An align mode, can be either Local for local alignment, global for global alignment,
/// or Blockwise with a given block size. The blockwise mode starts from a given position
/// and aligns only using `blocksize` bytes from each sequence in one direction, which
/// makes it works fast and local, but it doesn't see bigger gaps and everything after big gaps
/// tends to be unaligned.
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
pub enum AlignMode {
Local,
Global,
Blockwise(usize),
}
#[derive(Clone, Copy, Debug)]
pub enum InternalMode {
Local,
Global,
Semiglobal,
}
impl From<AlignMode> for InternalMode {
fn from(value: AlignMode) -> Self {
match value {
AlignMode::Local => InternalMode::Local,
AlignMode::Global | AlignMode::Blockwise(_) => InternalMode::Global,
}
}
}
trait Align {
fn align(&self, algo: &AlignAlgorithm, mode: InternalMode, x: &[u8], y: &[u8]) -> Vec<Op>;
}
/// Determines whether to use the banded variant of the algorithm with given k-mer length
/// and window size
#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub enum Banded {
Normal,
Banded { kmer: usize, window: usize },
}
/// Contains parameters to run the alignment algorithm with
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct AlignAlgorithm {
pub gap_open: i32,
pub gap_extend: i32,
pub mismatch_score: i32,
pub match_score: i32,
pub mode: AlignMode,
pub band: Banded,
}
impl Default for AlignAlgorithm {
fn default() -> Self {
AlignAlgorithm {
gap_open: -5,
gap_extend: -1,
mismatch_score: -1,
match_score: 1,
mode: AlignMode::Blockwise(DEFAULT_BLOCKSIZE),
band: Banded::Normal,
}
}
}
impl AlignAlgorithm {
/// This function starts the threads for the alignment, which send the data over the sender.
/// It should then immediately return.
pub fn start_align(
&self,
x: FileContent,
y: FileContent,
addr: (usize, usize),
sender: Sender<AlignedMessage>,
) {
let algo = *self;
match self.mode {
AlignMode::Local => {
// we only need one thread
std::thread::spawn(move || algo.align_whole(x, y, InternalMode::Local, sender));
}
AlignMode::Global => {
std::thread::spawn(move || algo.align_whole(x, y, InternalMode::Global, sender));
}
AlignMode::Blockwise(blocksize) => {
// for Blockwise, we need one thread for each direction from the cursor
// Clone the data for the second thread here
let x_cp = x.clone();
let y_cp = y.clone();
let sender_cp = sender.clone();
std::thread::spawn(move || algo.align_end(x, y, addr, blocksize, sender));
std::thread::spawn(move || {
algo.align_front(x_cp, y_cp, addr, blocksize, sender_cp)
});
}
}
}
pub fn start_align_with_selection(
&self,
files: [FileContent; 2],
selection: [Option<Range<usize>>; 2],
addr: [usize; 2],
sender: Sender<AlignedMessage>,
) {
let (selected, right, end) = match selection.clone() {
[None, None] | [Some(_), Some(_)] => {
let [file0, file1] = files;
// if both or none are selected, just do the normal process
return self.start_align(file0, file1, (addr[0], addr[1]), sender);
}
[Some(x), None] | [None, Some(x)] => {
if x.is_empty() {
// selection is empty, does not really make sense to do glocal alignment
let [file0, file1] = files;
return self.start_align(file0, file1, (addr[0], addr[1]), sender);
}
let right = selection[1].is_some();
(
x.clone(),
selection[1].is_some(),
addr[right as usize] != x.start,
)
}
};
let algo = *self;
std::thread::spawn(move || {
algo.align_with_selection(files, (selected, right), end, sender)
});
}
fn align(&self, x: &[u8], y: &[u8], mode: InternalMode) -> Vec<Op> {
if x[..] == y[..] {
return vec![Op::Match; x.len()];
}
if self.band == Banded::Normal {
RustBio.align(self, mode, x, y)
} else {
align_banded(self, mode, x, y)
}
}
/// Aligns x to y as a whole
fn align_whole(
&self,
x: FileContent,
y: FileContent,
mode: InternalMode,
sender: Sender<AlignedMessage>,
) {
let alignment = self.align(&x, &y, mode);
let _ = sender.send(AlignedMessage::Append(
AlignElement::from_array(&alignment, &x, &y, 0, 0).0,
));
}
fn align_with_selection(
&self,
files: [FileContent; 2],
selection: (Range<usize>, bool),
end: bool,
sender: Sender<AlignedMessage>,
) {
let (select, right) = selection;
let full_pattern = &files[right as usize].clone();
let pattern = &files[right as usize].clone()[select.clone()];
let text = &files[(!right) as usize].clone()[..];
let alignment = self.align(pattern, text, InternalMode::Semiglobal);
let (alignment, textaddr) = ops_pattern_subrange(&alignment);
let (mut array, pattern_end, text_end) =
AlignElement::from_array(alignment, full_pattern, text, select.start, textaddr);
let (start_addr, end_addr) = if right {
array.iter_mut().for_each(|x| *x = x.mirror());
((textaddr, select.start), (text_end, pattern_end))
} else {
((select.start, textaddr), (pattern_end, text_end))
};
let (prepend, append) = if end {
let ap = array.pop().into_iter().collect();
(array, ap)
} else {
(Vec::new(), array)
};
if sender.send(AlignedMessage::Append(append)).is_err() {
return;
}
if sender.send(AlignedMessage::Prepend(prepend)).is_err() {
return;
}
let blocksize = if let AlignMode::Blockwise(s) = self.mode {
s
} else {
usize::MAX
};
let files2 = files.clone();
let sender2 = sender.clone();
let algo = *self;
std::thread::spawn(move || {
algo.align_end(
files2[0].clone(),
files2[1].clone(),
end_addr,
blocksize,
sender2,
);
});
self.align_front(
files[0].clone(),
files[1].clone(),
start_addr,
blocksize,
sender,
);
}
/// Blockwise alignment in the ascending address direction
pub fn align_end(
&self,
x: FileContent,
y: FileContent,
addr: (usize, usize),
block_size: usize,
sender: Sender<AlignedMessage>,
) {
let (mut xaddr, mut yaddr) = addr;
// we want to have the beginning of our two arrays aligned at the same place
// since we start from a previous alignment or a cursor
while xaddr < x.len() && yaddr < y.len() {
// align at most block_size bytes from each sequence
let end_aligned = self.align(
&x[xaddr..(xaddr + block_size).min(x.len())],
&y[yaddr..(yaddr + block_size).min(y.len())],
self.mode.into(),
);
// we only actually append at most half of the block size since we make sure gaps crossing
// block boundaries are better detected
let ops = &end_aligned[0..end_aligned.len().min(block_size / 2)];
// we will not progress like this, so might as well quit
if ops.is_empty() {
break;
}
let (end, new_xaddr, new_yaddr) = AlignElement::from_array(ops, &x, &y, xaddr, yaddr);
if sender.send(AlignedMessage::Append(end)).is_err() {
return;
}
xaddr = new_xaddr;
yaddr = new_yaddr;
}
let clip = if x.len() == xaddr {
Op::Yclip(y.len() - yaddr)
} else if y.len() == yaddr {
Op::Xclip(x.len() - xaddr)
} else {
return;
};
let leftover = AlignElement::from_array(&[clip], &x, &y, xaddr, yaddr).0;
let _ = sender.send(AlignedMessage::Append(leftover));
}
/// Same as align_end, but in the other direction
pub fn align_front(
&self,
x: FileContent,
y: FileContent,
addr: (usize, usize),
block_size: usize,
sender: Sender<AlignedMessage>,
) {
let (mut xaddr, mut yaddr) = addr;
while xaddr > 0 && yaddr > 0 {
let lower_xaddr = xaddr.saturating_sub(block_size);
let lower_yaddr = yaddr.saturating_sub(block_size);
let aligned = self.align(
&x[lower_xaddr..xaddr],
&y[lower_yaddr..yaddr],
self.mode.into(),
);
// unlike in align_end, we create the Alignelement from the whole array and then cut it
// in half. This is because the addresses returned from from_array are at the end, which
// we already know, so we instead take the start addresses from the array itself
let (end, _, _) = AlignElement::from_array(&aligned, &x, &y, lower_xaddr, lower_yaddr);
let real_end = Vec::from(&end[end.len().saturating_sub(block_size / 2)..end.len()]);
// if this is empty, we will not progress, so send the leftover out and quit after that
if real_end.is_empty() {
break;
}
let first = real_end.first().unwrap();
xaddr = first.xaddr;
yaddr = first.yaddr;
if sender.send(AlignedMessage::Prepend(real_end)).is_err() {
return;
}
}
let clip = if xaddr == 0 {
Op::Yclip(yaddr)
} else if yaddr == 0 {
Op::Xclip(xaddr)
} else {
return;
};
let leftover = AlignElement::from_array(&[clip], &x, &y, 0, 0).0;
let _ = sender.send(AlignedMessage::Prepend(leftover));
}
}
/// Representation of the alignment that saves the original addresses of the bytes.
/// This has some space overhead, but alignment is slow enough for that not to matter in most cases.
#[derive(Clone, Copy, Debug)]
pub struct AlignElement {
pub xaddr: usize,
pub xbyte: Option<u8>,
pub yaddr: usize,
pub ybyte: Option<u8>,
}
impl AlignElement {
/// mirrors the values
pub fn mirror(&self) -> AlignElement {
AlignElement {
xaddr: self.yaddr,
xbyte: self.ybyte,
yaddr: self.xaddr,
ybyte: self.xbyte,
}
}
/// Creates a vector out of `AlignElement`s from the operations outputted by rust-bio.
/// Also outputs the addresses at the end of the array.
fn from_array(
r: &[Op],
x: &[u8],
y: &[u8],
mut xaddr: usize,
mut yaddr: usize,
) -> (Vec<AlignElement>, usize, usize) {
let mut v = Vec::new();
for op in r {
match op {
Op::Match | Op::Subst => {
v.push(AlignElement {
xaddr,
xbyte: Some(x[xaddr]),
yaddr,
ybyte: Some(y[yaddr]),
});
xaddr += 1;
yaddr += 1;
}
Op::Ins => |
Op::Del => {
v.push(AlignElement {
xaddr,
xbyte: None,
yaddr,
ybyte: Some(y[yaddr]),
});
yaddr += 1;
}
Op::Xclip(size) => {
v.extend((xaddr..xaddr + size).map(|s| AlignElement {
xaddr: s,
xbyte: Some(x[s]),
yaddr,
ybyte: None,
}));
xaddr += size
}
Op::Yclip(size) => {
v.extend((yaddr..yaddr + size).map(|s| AlignElement {
xaddr,
xbyte: None,
yaddr: s,
ybyte: Some(y[s]),
}));
yaddr += size
}
}
}
(v, xaddr, yaddr)
}
}
fn ops_pattern_subrange(mut ops: &[Op]) -> (&[Op], usize) {
let mut ret_addr = 0;
if let [Op::Yclip(addr), rest @ ..] = ops {
ops = rest;
ret_addr += addr;
}
while let [Op::Del, rest @ ..] = ops {
ops = rest;
ret_addr += 1;
}
while let [rest @ .., Op::Del | Op::Yclip(_)] = ops {
ops = rest;
}
(ops, ret_addr)
}
pub enum FlatAlignProgressMessage {
Incomplete(u16),
Complete(isize),
}
pub struct FlatAlignmentContext {
is_running: Arc<AtomicBool>,
vecs: [FileContent; 2],
update_progress: Box<dyn FnMut(FlatAlignProgressMessage) + Send + 'static>,
}
impl FlatAlignmentContext {
pub fn new(
is_running: Arc<AtomicBool>,
vecs: [FileContent; 2],
update_progress: Box<dyn FnMut(FlatAlignProgressMessage) + Send + 'static>,
) -> Self {
Self {
is_running,
vecs,
update_progress,
}
}
// this finds the alignment between two arrays *without* removing elements such that
// fewest bytes are different (for the compvec)
pub fn align_flat(mut self) {
// this algorithm works by, for each byte:
// * making an indicator vector for both files indicating the addresses that have the given byte
// * cross-correlating them, which results in the number of matches of that byte value for each relative offset
// and then adding them all together to get the total number of matching bytes
let mut progress = 0u16;
let current_byte = Arc::new(AtomicU16::new(0));
let mut fft_planner = RealFftPlanner::new();
let total_len = self.vecs.iter().map(|x| x.len()).max().unwrap() * 2;
// the cross correlation is done using the omnipresent fft algorithm
let fft_forward = fft_planner.plan_fft_forward(total_len);
let fft_inverse = fft_planner.plan_fft_inverse(total_len);
let mut sum = fft_forward.make_output_vec();
// this is easily parallelizable for up to 256 threads, for which we span a thread pool
let thread_num = available_parallelism().map(usize::from).unwrap_or(1);
let (send, recv) = std::sync::mpsc::sync_channel::<Vec<Complex64>>(4.max(thread_num));
for _ in 0..thread_num {
let vecs = [self.vecs[0].clone(), self.vecs[1].clone()];
let inbyte = current_byte.clone();
let outvecs = send.clone();
let fft = fft_forward.clone();
std::thread::spawn(move || correlation_thread(vecs, inbyte, outvecs, fft));
}
for vec in recv.into_iter().take(256) {
if !self.is_running.load(Ordering::Relaxed) {
return;
}
// add the vectors together in the frequency domain
for (a, b) in sum.iter_mut().zip(vec.into_iter()) {
*a += b;
}
progress += 1;
(self.update_progress)(FlatAlignProgressMessage::Incomplete(progress));
}
// get the actual result in the time domain
let mut result = fft_inverse.make_output_vec();
fft_inverse
.process(&mut sum, &mut result)
.expect("Wrong lengths");
drop(sum);
// positive offset of the array with the highest value of overlap
let offset = result
.iter()
.enumerate()
.max_by(|a, b| {
a.1.partial_cmp(b.1).unwrap_or_else(|| {
if a.1.is_nan() {
std::cmp::Ordering::Less
} else {
std::cmp::Ordering::Greater
}
})
})
.unwrap_or((0, &0.0))
.0;
drop(result);
// reverse direction of result array
let offset = total_len - offset - 1;
// get the relative offset between the two vectors with optimal overlap
let relative_offset = if offset >= total_len / 2 {
offset as isize - total_len as isize
} else {
offset as isize
};
(self.update_progress)(FlatAlignProgressMessage::Complete(relative_offset))
}
}
fn correlation_thread(
vecs: [FileContent; 2],
inbyte: Arc<AtomicU16>,
outvecs: SyncSender<Vec<Complex64>>,
fft: Arc<dyn RealToComplex<f64>>,
) {
let len = fft.len();
loop {
// check if the next value in queue is still below 256
let byte: u8 = match inbyte.fetch_add(1, Ordering::Relaxed).try_into() {
Ok(f) => f,
Err(_) => return,
};
// cross-correlation using ffts
let mut first_out = fft.make_output_vec();
let mut first = fft.make_input_vec();
// one of the vectors is reversed because we want correlation, not convolution
for (i, x) in vecs[0].iter().enumerate() {
if *x == byte {
first[len - i - 1] = 1.0;
}
}
fft.process(&mut first, &mut first_out)
.expect("Wrong fft vector lengths");
// these vectors can be large, so drop them as soon as possible
drop(first);
let mut second = fft.make_input_vec();
for (i, x) in vecs[1].iter().enumerate() {
if *x == byte {
second[i] = 1.0
}
}
let mut second_out = fft.make_output_vec();
fft.process(&mut second, &mut second_out)
.expect("Wrong fft vector lengths");
drop(second);
for (a, b) in first_out.iter_mut().zip(second_out.iter()) {
*a *= b;
}
drop(second_out);
// note: we do not correlate fully, since we can add all the samples together
// in the frequency domain, saving nearly 1/3 of the processing time
if outvecs.send(first_out).is_err() {
return;
}
}
}
| {
v.push(AlignElement {
xaddr,
xbyte: Some(x[xaddr]),
yaddr,
ybyte: None,
});
xaddr += 1;
} | conditional_block |
efi.rs | use core::ffi::c_void;
use core::fmt;
const ERROR_BIT: usize = 1 << (core::mem::size_of::<usize>() * 8 - 1);
#[derive(Clone, Copy)]
#[repr(transparent)]
pub struct EFI_HANDLE(*mut c_void);
#[derive(PartialEq, Debug)]
#[repr(usize)]
pub enum EFI_STATUS {
/// The operation completed successfully.
SUCCESS = 0,
/// The string contained characters that could not be rendered and were skipped.
WARN_UNKNOWN_GLYPH = 1,
/// The handle was closed, but the file was not deleted.
WARN_DELETE_FAILURE = 2,
/// The handle was closed, but the data to the file was not flushed properly.
WARN_WRITE_FAILURE = 3,
/// The resulting buffer was too small, and the data was truncated.
WARN_BUFFER_TOO_SMALL = 4,
/// The data has not been updated within the timeframe set by local policy.
WARN_STALE_DATA = 5,
/// The resulting buffer contains UEFI-compliant file system.
WARN_FILE_SYSTEM = 6,
/// The operation will be processed across a system reset.
WARN_RESET_REQUIRED = 7,
/// The image failed to load.
LOAD_ERROR = ERROR_BIT | 1,
/// A parameter was incorrect.
INVALID_PARAMETER = ERROR_BIT | 2,
/// The operation is not supported.
UNSUPPORTED = ERROR_BIT | 3,
/// The buffer was not the proper size for the request.
BAD_BUFFER_SIZE = ERROR_BIT | 4,
/// The buffer is not large enough to hold the requested data.
/// The required buffer size is returned in the appropriate parameter.
BUFFER_TOO_SMALL = ERROR_BIT | 5,
/// There is no data pending upon return.
NOT_READY = ERROR_BIT | 6,
/// The physical device reported an error while attempting the operation.
DEVICE_ERROR = ERROR_BIT | 7,
/// The device cannot be written to.
WRITE_PROTECTED = ERROR_BIT | 8,
/// A resource has run out.
OUT_OF_RESOURCES = ERROR_BIT | 9,
/// An inconstency was detected on the file system.
VOLUME_CORRUPTED = ERROR_BIT | 10,
/// There is no more space on the file system.
VOLUME_FULL = ERROR_BIT | 11,
/// The device does not contain any medium to perform the operation.
NO_MEDIA = ERROR_BIT | 12,
/// The medium in the device has changed since the last access.
MEDIA_CHANGED = ERROR_BIT | 13,
/// The item was not found.
NOT_FOUND = ERROR_BIT | 14,
/// Access was denied.
ACCESS_DENIED = ERROR_BIT | 15,
/// The server was not found or did not respond to the request.
NO_RESPONSE = ERROR_BIT | 16,
/// A mapping to a device does not exist.
NO_MAPPING = ERROR_BIT | 17,
/// The timeout time expired.
TIMEOUT = ERROR_BIT | 18,
/// The protocol has not been started.
NOT_STARTED = ERROR_BIT | 19,
/// The protocol has already been started.
ALREADY_STARTED = ERROR_BIT | 20,
/// The operation was aborted.
ABORTED = ERROR_BIT | 21,
/// An ICMP error occurred during the network operation.
ICMP_ERROR = ERROR_BIT | 22,
/// A TFTP error occurred during the network operation.
TFTP_ERROR = ERROR_BIT | 23,
/// A protocol error occurred during the network operation.
PROTOCOL_ERROR = ERROR_BIT | 24,
/// The function encountered an internal version that was
/// incompatible with a version requested by the caller.
INCOMPATIBLE_VERSION = ERROR_BIT | 25,
/// The function was not performed due to a security violation.
SECURITY_VIOLATION = ERROR_BIT | 26,
/// A CRC error was detected.
CRC_ERROR = ERROR_BIT | 27,
/// Beginning or end of media was reached
END_OF_MEDIA = ERROR_BIT | 28,
/// The end of the file was reached.
END_OF_FILE = ERROR_BIT | 31,
/// The language specified was invalid.
INVALID_LANGUAGE = ERROR_BIT | 32,
/// The security status of the data is unknown or compromised and
/// the data must be updated or replaced to restore a valid security status.
COMPROMISED_DATA = ERROR_BIT | 33,
/// There is an address conflict address allocation
IP_ADDRESS_CONFLICT = ERROR_BIT | 34,
/// A HTTP error occurred during the network operation.
HTTP_ERROR = ERROR_BIT | 35,
}
#[repr(C)]
pub struct EFI_SYSTEM_TABLE {
pub Hdr: EFI_TABLE_HEADER,
pub FirmwareVendor: *const u16,
pub FirmwareRevision: u32,
pub ConsoleInHandle: EFI_HANDLE,
pub ConIn: *mut EFI_SIMPLE_TEXT_INPUT_PROTOCOL,
pub ConsoleOutHandle: EFI_HANDLE,
pub ConOut: *mut EFI_SIMPLE_TEXT_OUTPUT_PROTOCOL,
pub StandardErrorHandle: EFI_HANDLE,
pub StdErr: *mut EFI_SIMPLE_TEXT_OUTPUT_PROTOCOL,
pub RuntimeServices: *mut EFI_RUNTIME_SERVICES,
pub BootServices: *mut EFI_BOOT_SERVICES,
pub NumberOfTableEntries: usize,
pub ConfigurationTable: *mut EFI_CONFIGURATION_TABLE,
}
#[repr(C)]
pub struct EFI_RUNTIME_SERVICES {
pub Hdr: EFI_TABLE_HEADER,
pub GetTime: unsafe extern "C" fn(Time: *mut EFI_TIME, Capabilities: *mut EFI_TIME_CAPABILITIES) -> EFI_STATUS,
dummy1: [usize;3], // Time Services
dummy2: [usize;2], // Virtual Memory Services
dummy3: [usize;3], // Variable Services
dummy4: [usize;2], // Miscellaneous Services
dummy5: [usize;2], // UEFI 2.0 Capsule Services
dummy6: [usize;1], // Miscellaneous UEFI 2.0 Service
}
#[repr(C)]
pub struct EFI_BOOT_SERVICES {
pub Hdr: EFI_TABLE_HEADER,
// Task Priority Services
dummy1: [usize;2],
// Memory Services
pub AllocatePages: unsafe extern "C" fn(Type: EFI_ALLOCATE_TYPE, MemoryType: EFI_MEMORY_TYPE,
Pages:usize, Memory: &mut u64) -> EFI_STATUS,
dymmy2a: [usize;1],
pub GetMemoryMap: unsafe extern "C" fn(MemoryMapSize: &mut usize, MemoryMap: *mut EFI_MEMORY_DESCRIPTOR,
MapKey: &mut usize, DescriptorSize: &mut usize, DescriptorVersion: &mut u32) -> EFI_STATUS,
dummy2b: [usize;2],
// Event & Timer Services
dummy3: [usize;6],
// Protocol Handler Services
dummy4a: [usize;3],
pub HandleProtocol: unsafe extern "C" fn(Handle: EFI_HANDLE, Protocol: &EFI_GUID, Interface: &mut *mut c_void) -> EFI_STATUS,
dummy4b: [usize;4],
// Image Services
dummy5: [usize;5],
// Miscellaneous Services
dummy6: [usize;2],
pub SetWatchdogTimer: unsafe extern "C" fn (Timeout: usize, WatchdogCode: u64, DataSize: usize,
WatchdogData: *const u16) -> EFI_STATUS,
// DriverSupport Services
dummy7: [usize;2],
// Open and Close Protocol Services
dummy8: [usize;3],
// Library Services
dummy9a: [usize;2],
pub LocateProtocol: unsafe extern "C" fn (Protocol: &EFI_GUID, Registration: *mut c_void, Interface: &mut *mut c_void) -> EFI_STATUS,
dummy9b: [usize;2],
// 32-bit CRC Services
dummy10: [usize;1],
// Miscellaneous Services
dummy11: [usize;3],
}
#[repr(C)]
pub struct EFI_CONFIGURATION_TABLE {
pub Hdr: EFI_TABLE_HEADER,
// TBD
}
#[repr(C)]
pub struct EFI_TABLE_HEADER {
pub Signature: u64,
pub Revision: u32,
pub HeaderSize: u32,
pub CRC32: u32,
pub Reserved: u32,
}
#[repr(C)]
pub struct EFI_SIMPLE_TEXT_INPUT_PROTOCOL {
pub Reset: unsafe extern "C" fn(This: &EFI_SIMPLE_TEXT_INPUT_PROTOCOL, ExtendedVerification: bool) -> EFI_STATUS,
pub ReadKeyStroke: unsafe extern "C" fn(This: &EFI_SIMPLE_TEXT_INPUT_PROTOCOL, Key: &EFI_INPUT_KEY) -> EFI_STATUS,
// TBD
}
#[repr(C)]
pub struct EFI_INPUT_KEY {
pub ScanCode: u16,
pub UnicodeChar: u16,
}
#[repr(C)]
pub struct EFI_SIMPLE_TEXT_OUTPUT_PROTOCOL {
pub Reset: unsafe extern "C" fn(This: &EFI_SIMPLE_TEXT_OUTPUT_PROTOCOL, ExtendedVerification: bool) -> EFI_STATUS,
pub OutputString: unsafe extern "C" fn(This: &EFI_SIMPLE_TEXT_OUTPUT_PROTOCOL, String: *const u16) -> EFI_STATUS,
// TBD
}
#[derive(PartialEq)]
#[repr(usize)]
pub enum EFI_ALLOCATE_TYPE {
AllocateAnyPages,
AllocateMaxAddress,
AllocateAddress,
MaxAllocateType,
}
#[derive(Debug, PartialEq, Copy, Clone)]
#[repr(u32)]
pub enum EFI_MEMORY_TYPE {
EfiReservedMemoryType,
EfiLoaderCode,
EfiLoaderData,
EfiBootServicesCode,
EfiBootServicesData,
EfiRuntimeServicesCode,
EfiRuntimeServicesData,
EfiConventionalMemory,
EfiUnusableMemory,
EfiACPIReclaimMemory,
EfiACPIMemoryNVS,
EfiMemoryMappedIO,
EfiMemoryMappedIOPortSpace,
EfiPalCode,
EfiPersistentMemory,
EfiMaxMemoryType,
}
impl fmt::Display for EFI_MEMORY_TYPE {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result |
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
#[repr(C)]
pub struct MemoryMapKey(usize);
#[repr(C)]
#[derive(Copy, Clone)]
pub struct EFI_MEMORY_DESCRIPTOR {
pub Type: EFI_MEMORY_TYPE,
padding: u32,
pub PhysicalStart: u64,
pub VirtualStart: u64,
pub NumberOfPages: u64,
pub Attribute: u64,
}
impl fmt::Display for EFI_MEMORY_DESCRIPTOR {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{} {:x}-{:x}", self.Type, self.PhysicalStart, self.PhysicalStart+self.NumberOfPages*4096)
}
}
#[derive(Debug, Copy, Clone)]
#[repr(C)]
pub struct EFI_TIME {
pub Year: u16,
pub Month: u8,
pub Day: u8,
pub Hour: u8,
pub Minute: u8,
pub Second: u8,
pub Pad1: u8,
pub Nanosecond: u32,
pub TimeZone: u16,
pub Daylight: u8,
pub Pad2: u8,
}
#[repr(C)]
pub struct EFI_TIME_CAPABILITIES {
pub Resolution: u32,
pub Accuracy: u32,
pub SetsToZero: bool,
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
#[repr(C)]
pub struct EFI_GUID {
pub a: u32,
pub b: u16,
pub c: u16,
pub d: [u8;8],
}
#[repr(C)]
pub struct EFI_SIMPLE_FILE_SYSTEM_PROTOCOL {
pub Revision: u64,
pub OpenVolume: unsafe extern "C" fn(This: &mut EFI_SIMPLE_FILE_SYSTEM_PROTOCOL, Root: &mut *mut EFI_FILE_PROTOCOL) -> EFI_STATUS,
}
#[repr(C)]
pub struct EFI_FILE_PROTOCOL {
pub Revision: u64,
pub Open: unsafe extern "C" fn(This: &EFI_FILE_PROTOCOL, NewHandle: &mut *mut EFI_FILE_PROTOCOL, FileName: *const u16,
OpenMode: EFI_FILE_MODE, Attributes: EFI_FILE_ATTRIBUTE) -> EFI_STATUS,
pub Close: unsafe extern "C" fn(This: &EFI_FILE_PROTOCOL) -> EFI_STATUS,
pub Delete: unsafe extern "C" fn(This: &EFI_FILE_PROTOCOL) -> EFI_STATUS,
pub Read: unsafe extern "C" fn(This: &EFI_FILE_PROTOCOL, BufferSize: &mut usize, Buffer: *mut u8) -> EFI_STATUS,
// TBD
}
#[repr(C)]
pub struct EFI_LOADED_IMAGE_PROTOCOL {
pub Revision: u32,
pub ParentHandle: EFI_HANDLE,
pub SystemTable: *const EFI_SYSTEM_TABLE,
pub DeviceHandle: EFI_HANDLE,
pub FilePath: *const c_void,
pub Reserved: *const c_void,
pub LoadOptionsSize: u32,
pub LoadOptions: *const c_void,
pub ImageBase: usize,
pub ImageSize: u64,
pub ImageCodeType: EFI_MEMORY_TYPE,
pub ImageDataType: EFI_MEMORY_TYPE,
pub Unload: unsafe extern "C" fn(ImageHandle: EFI_HANDLE) -> EFI_STATUS,
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
#[repr(u64)]
pub enum EFI_FILE_MODE {
EFI_FILE_MODE_READ = 1,
EFI_FILE_MODE_WRITE = 2 | 1,
EFI_FILE_MODE_CREATE = (1 << 63) | 2 | 1,
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
#[repr(u64)]
pub enum EFI_FILE_ATTRIBUTE {
EFI_FILE_READ_ONLY = 1,
EFI_FILE_HIDDEN = 1 << 1,
EFI_FILE_SYSTEM = 1 << 2,
EFI_FILE_RESERVED = 1 << 3,
EFI_FILE_DIRECTORY = 1 << 4,
EFI_FILE_ARCHIVE = 1 << 5,
EFI_FILE_VALID_ATTR = 0x37,
} | {
let string = match *self {
EFI_MEMORY_TYPE::EfiReservedMemoryType => "ReservedMemory",
EFI_MEMORY_TYPE::EfiLoaderCode => "LoaderCode",
EFI_MEMORY_TYPE::EfiLoaderData => "LoaderData",
EFI_MEMORY_TYPE::EfiBootServicesCode => "BootServicesCode",
EFI_MEMORY_TYPE::EfiBootServicesData => "BootServicesData",
EFI_MEMORY_TYPE::EfiRuntimeServicesCode => "RuntimeServicesCode",
EFI_MEMORY_TYPE::EfiRuntimeServicesData => "RuntimeServicesData",
EFI_MEMORY_TYPE::EfiConventionalMemory => "Conventional",
EFI_MEMORY_TYPE::EfiUnusableMemory => "Unusable",
EFI_MEMORY_TYPE::EfiACPIReclaimMemory => "ACPIReclaim",
EFI_MEMORY_TYPE::EfiACPIMemoryNVS => "ACPIMemoryNVS ",
EFI_MEMORY_TYPE::EfiMemoryMappedIO => "MemoryMappedIO",
EFI_MEMORY_TYPE::EfiMemoryMappedIOPortSpace => "MemoryMappedIOPortSpace",
EFI_MEMORY_TYPE::EfiPalCode => "PalCode",
EFI_MEMORY_TYPE::EfiPersistentMemory => "PersistentMemory",
_ => "",
};
write!(f, "{:>20}", string)
} | identifier_body |
efi.rs | use core::ffi::c_void;
use core::fmt;
const ERROR_BIT: usize = 1 << (core::mem::size_of::<usize>() * 8 - 1);
#[derive(Clone, Copy)]
#[repr(transparent)]
pub struct EFI_HANDLE(*mut c_void);
#[derive(PartialEq, Debug)]
#[repr(usize)]
pub enum EFI_STATUS {
/// The operation completed successfully.
SUCCESS = 0,
/// The string contained characters that could not be rendered and were skipped.
WARN_UNKNOWN_GLYPH = 1,
/// The handle was closed, but the file was not deleted.
WARN_DELETE_FAILURE = 2,
/// The handle was closed, but the data to the file was not flushed properly.
WARN_WRITE_FAILURE = 3,
/// The resulting buffer was too small, and the data was truncated.
WARN_BUFFER_TOO_SMALL = 4,
/// The data has not been updated within the timeframe set by local policy.
WARN_STALE_DATA = 5,
/// The resulting buffer contains UEFI-compliant file system.
WARN_FILE_SYSTEM = 6,
/// The operation will be processed across a system reset.
WARN_RESET_REQUIRED = 7,
/// The image failed to load.
LOAD_ERROR = ERROR_BIT | 1,
/// A parameter was incorrect.
INVALID_PARAMETER = ERROR_BIT | 2,
/// The operation is not supported.
UNSUPPORTED = ERROR_BIT | 3,
/// The buffer was not the proper size for the request.
BAD_BUFFER_SIZE = ERROR_BIT | 4,
/// The buffer is not large enough to hold the requested data.
/// The required buffer size is returned in the appropriate parameter.
BUFFER_TOO_SMALL = ERROR_BIT | 5,
/// There is no data pending upon return.
NOT_READY = ERROR_BIT | 6,
/// The physical device reported an error while attempting the operation.
DEVICE_ERROR = ERROR_BIT | 7,
/// The device cannot be written to.
WRITE_PROTECTED = ERROR_BIT | 8,
/// A resource has run out.
OUT_OF_RESOURCES = ERROR_BIT | 9,
/// An inconstency was detected on the file system.
VOLUME_CORRUPTED = ERROR_BIT | 10,
/// There is no more space on the file system.
VOLUME_FULL = ERROR_BIT | 11,
/// The device does not contain any medium to perform the operation.
NO_MEDIA = ERROR_BIT | 12,
/// The medium in the device has changed since the last access.
MEDIA_CHANGED = ERROR_BIT | 13,
/// The item was not found.
NOT_FOUND = ERROR_BIT | 14,
/// Access was denied.
ACCESS_DENIED = ERROR_BIT | 15,
/// The server was not found or did not respond to the request.
NO_RESPONSE = ERROR_BIT | 16,
/// A mapping to a device does not exist.
NO_MAPPING = ERROR_BIT | 17,
/// The timeout time expired.
TIMEOUT = ERROR_BIT | 18,
/// The protocol has not been started.
NOT_STARTED = ERROR_BIT | 19,
/// The protocol has already been started.
ALREADY_STARTED = ERROR_BIT | 20,
/// The operation was aborted.
ABORTED = ERROR_BIT | 21,
/// An ICMP error occurred during the network operation.
ICMP_ERROR = ERROR_BIT | 22,
/// A TFTP error occurred during the network operation.
TFTP_ERROR = ERROR_BIT | 23,
/// A protocol error occurred during the network operation.
PROTOCOL_ERROR = ERROR_BIT | 24,
/// The function encountered an internal version that was
/// incompatible with a version requested by the caller.
INCOMPATIBLE_VERSION = ERROR_BIT | 25,
/// The function was not performed due to a security violation.
SECURITY_VIOLATION = ERROR_BIT | 26,
/// A CRC error was detected.
CRC_ERROR = ERROR_BIT | 27,
/// Beginning or end of media was reached
END_OF_MEDIA = ERROR_BIT | 28,
/// The end of the file was reached.
END_OF_FILE = ERROR_BIT | 31,
/// The language specified was invalid.
INVALID_LANGUAGE = ERROR_BIT | 32,
/// The security status of the data is unknown or compromised and
/// the data must be updated or replaced to restore a valid security status.
COMPROMISED_DATA = ERROR_BIT | 33,
/// There is an address conflict address allocation
IP_ADDRESS_CONFLICT = ERROR_BIT | 34,
/// A HTTP error occurred during the network operation.
HTTP_ERROR = ERROR_BIT | 35,
}
#[repr(C)]
pub struct EFI_SYSTEM_TABLE {
pub Hdr: EFI_TABLE_HEADER,
pub FirmwareVendor: *const u16,
pub FirmwareRevision: u32,
pub ConsoleInHandle: EFI_HANDLE,
pub ConIn: *mut EFI_SIMPLE_TEXT_INPUT_PROTOCOL,
pub ConsoleOutHandle: EFI_HANDLE,
pub ConOut: *mut EFI_SIMPLE_TEXT_OUTPUT_PROTOCOL,
pub StandardErrorHandle: EFI_HANDLE,
pub StdErr: *mut EFI_SIMPLE_TEXT_OUTPUT_PROTOCOL,
pub RuntimeServices: *mut EFI_RUNTIME_SERVICES,
pub BootServices: *mut EFI_BOOT_SERVICES,
pub NumberOfTableEntries: usize,
pub ConfigurationTable: *mut EFI_CONFIGURATION_TABLE,
}
#[repr(C)]
pub struct EFI_RUNTIME_SERVICES {
pub Hdr: EFI_TABLE_HEADER,
pub GetTime: unsafe extern "C" fn(Time: *mut EFI_TIME, Capabilities: *mut EFI_TIME_CAPABILITIES) -> EFI_STATUS,
dummy1: [usize;3], // Time Services
dummy2: [usize;2], // Virtual Memory Services
dummy3: [usize;3], // Variable Services
dummy4: [usize;2], // Miscellaneous Services
dummy5: [usize;2], // UEFI 2.0 Capsule Services
dummy6: [usize;1], // Miscellaneous UEFI 2.0 Service
}
#[repr(C)]
pub struct | {
pub Hdr: EFI_TABLE_HEADER,
// Task Priority Services
dummy1: [usize;2],
// Memory Services
pub AllocatePages: unsafe extern "C" fn(Type: EFI_ALLOCATE_TYPE, MemoryType: EFI_MEMORY_TYPE,
Pages:usize, Memory: &mut u64) -> EFI_STATUS,
dymmy2a: [usize;1],
pub GetMemoryMap: unsafe extern "C" fn(MemoryMapSize: &mut usize, MemoryMap: *mut EFI_MEMORY_DESCRIPTOR,
MapKey: &mut usize, DescriptorSize: &mut usize, DescriptorVersion: &mut u32) -> EFI_STATUS,
dummy2b: [usize;2],
// Event & Timer Services
dummy3: [usize;6],
// Protocol Handler Services
dummy4a: [usize;3],
pub HandleProtocol: unsafe extern "C" fn(Handle: EFI_HANDLE, Protocol: &EFI_GUID, Interface: &mut *mut c_void) -> EFI_STATUS,
dummy4b: [usize;4],
// Image Services
dummy5: [usize;5],
// Miscellaneous Services
dummy6: [usize;2],
pub SetWatchdogTimer: unsafe extern "C" fn (Timeout: usize, WatchdogCode: u64, DataSize: usize,
WatchdogData: *const u16) -> EFI_STATUS,
// DriverSupport Services
dummy7: [usize;2],
// Open and Close Protocol Services
dummy8: [usize;3],
// Library Services
dummy9a: [usize;2],
pub LocateProtocol: unsafe extern "C" fn (Protocol: &EFI_GUID, Registration: *mut c_void, Interface: &mut *mut c_void) -> EFI_STATUS,
dummy9b: [usize;2],
// 32-bit CRC Services
dummy10: [usize;1],
// Miscellaneous Services
dummy11: [usize;3],
}
#[repr(C)]
pub struct EFI_CONFIGURATION_TABLE {
pub Hdr: EFI_TABLE_HEADER,
// TBD
}
#[repr(C)]
pub struct EFI_TABLE_HEADER {
pub Signature: u64,
pub Revision: u32,
pub HeaderSize: u32,
pub CRC32: u32,
pub Reserved: u32,
}
#[repr(C)]
pub struct EFI_SIMPLE_TEXT_INPUT_PROTOCOL {
pub Reset: unsafe extern "C" fn(This: &EFI_SIMPLE_TEXT_INPUT_PROTOCOL, ExtendedVerification: bool) -> EFI_STATUS,
pub ReadKeyStroke: unsafe extern "C" fn(This: &EFI_SIMPLE_TEXT_INPUT_PROTOCOL, Key: &EFI_INPUT_KEY) -> EFI_STATUS,
// TBD
}
#[repr(C)]
pub struct EFI_INPUT_KEY {
pub ScanCode: u16,
pub UnicodeChar: u16,
}
#[repr(C)]
pub struct EFI_SIMPLE_TEXT_OUTPUT_PROTOCOL {
pub Reset: unsafe extern "C" fn(This: &EFI_SIMPLE_TEXT_OUTPUT_PROTOCOL, ExtendedVerification: bool) -> EFI_STATUS,
pub OutputString: unsafe extern "C" fn(This: &EFI_SIMPLE_TEXT_OUTPUT_PROTOCOL, String: *const u16) -> EFI_STATUS,
// TBD
}
#[derive(PartialEq)]
#[repr(usize)]
pub enum EFI_ALLOCATE_TYPE {
AllocateAnyPages,
AllocateMaxAddress,
AllocateAddress,
MaxAllocateType,
}
#[derive(Debug, PartialEq, Copy, Clone)]
#[repr(u32)]
pub enum EFI_MEMORY_TYPE {
EfiReservedMemoryType,
EfiLoaderCode,
EfiLoaderData,
EfiBootServicesCode,
EfiBootServicesData,
EfiRuntimeServicesCode,
EfiRuntimeServicesData,
EfiConventionalMemory,
EfiUnusableMemory,
EfiACPIReclaimMemory,
EfiACPIMemoryNVS,
EfiMemoryMappedIO,
EfiMemoryMappedIOPortSpace,
EfiPalCode,
EfiPersistentMemory,
EfiMaxMemoryType,
}
impl fmt::Display for EFI_MEMORY_TYPE {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let string = match *self {
EFI_MEMORY_TYPE::EfiReservedMemoryType => "ReservedMemory",
EFI_MEMORY_TYPE::EfiLoaderCode => "LoaderCode",
EFI_MEMORY_TYPE::EfiLoaderData => "LoaderData",
EFI_MEMORY_TYPE::EfiBootServicesCode => "BootServicesCode",
EFI_MEMORY_TYPE::EfiBootServicesData => "BootServicesData",
EFI_MEMORY_TYPE::EfiRuntimeServicesCode => "RuntimeServicesCode",
EFI_MEMORY_TYPE::EfiRuntimeServicesData => "RuntimeServicesData",
EFI_MEMORY_TYPE::EfiConventionalMemory => "Conventional",
EFI_MEMORY_TYPE::EfiUnusableMemory => "Unusable",
EFI_MEMORY_TYPE::EfiACPIReclaimMemory => "ACPIReclaim",
EFI_MEMORY_TYPE::EfiACPIMemoryNVS => "ACPIMemoryNVS ",
EFI_MEMORY_TYPE::EfiMemoryMappedIO => "MemoryMappedIO",
EFI_MEMORY_TYPE::EfiMemoryMappedIOPortSpace => "MemoryMappedIOPortSpace",
EFI_MEMORY_TYPE::EfiPalCode => "PalCode",
EFI_MEMORY_TYPE::EfiPersistentMemory => "PersistentMemory",
_ => "",
};
write!(f, "{:>20}", string)
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
#[repr(C)]
pub struct MemoryMapKey(usize);
#[repr(C)]
#[derive(Copy, Clone)]
pub struct EFI_MEMORY_DESCRIPTOR {
pub Type: EFI_MEMORY_TYPE,
padding: u32,
pub PhysicalStart: u64,
pub VirtualStart: u64,
pub NumberOfPages: u64,
pub Attribute: u64,
}
impl fmt::Display for EFI_MEMORY_DESCRIPTOR {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{} {:x}-{:x}", self.Type, self.PhysicalStart, self.PhysicalStart+self.NumberOfPages*4096)
}
}
#[derive(Debug, Copy, Clone)]
#[repr(C)]
pub struct EFI_TIME {
pub Year: u16,
pub Month: u8,
pub Day: u8,
pub Hour: u8,
pub Minute: u8,
pub Second: u8,
pub Pad1: u8,
pub Nanosecond: u32,
pub TimeZone: u16,
pub Daylight: u8,
pub Pad2: u8,
}
#[repr(C)]
pub struct EFI_TIME_CAPABILITIES {
pub Resolution: u32,
pub Accuracy: u32,
pub SetsToZero: bool,
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
#[repr(C)]
pub struct EFI_GUID {
pub a: u32,
pub b: u16,
pub c: u16,
pub d: [u8;8],
}
#[repr(C)]
pub struct EFI_SIMPLE_FILE_SYSTEM_PROTOCOL {
pub Revision: u64,
pub OpenVolume: unsafe extern "C" fn(This: &mut EFI_SIMPLE_FILE_SYSTEM_PROTOCOL, Root: &mut *mut EFI_FILE_PROTOCOL) -> EFI_STATUS,
}
#[repr(C)]
pub struct EFI_FILE_PROTOCOL {
pub Revision: u64,
pub Open: unsafe extern "C" fn(This: &EFI_FILE_PROTOCOL, NewHandle: &mut *mut EFI_FILE_PROTOCOL, FileName: *const u16,
OpenMode: EFI_FILE_MODE, Attributes: EFI_FILE_ATTRIBUTE) -> EFI_STATUS,
pub Close: unsafe extern "C" fn(This: &EFI_FILE_PROTOCOL) -> EFI_STATUS,
pub Delete: unsafe extern "C" fn(This: &EFI_FILE_PROTOCOL) -> EFI_STATUS,
pub Read: unsafe extern "C" fn(This: &EFI_FILE_PROTOCOL, BufferSize: &mut usize, Buffer: *mut u8) -> EFI_STATUS,
// TBD
}
#[repr(C)]
pub struct EFI_LOADED_IMAGE_PROTOCOL {
pub Revision: u32,
pub ParentHandle: EFI_HANDLE,
pub SystemTable: *const EFI_SYSTEM_TABLE,
pub DeviceHandle: EFI_HANDLE,
pub FilePath: *const c_void,
pub Reserved: *const c_void,
pub LoadOptionsSize: u32,
pub LoadOptions: *const c_void,
pub ImageBase: usize,
pub ImageSize: u64,
pub ImageCodeType: EFI_MEMORY_TYPE,
pub ImageDataType: EFI_MEMORY_TYPE,
pub Unload: unsafe extern "C" fn(ImageHandle: EFI_HANDLE) -> EFI_STATUS,
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
#[repr(u64)]
pub enum EFI_FILE_MODE {
EFI_FILE_MODE_READ = 1,
EFI_FILE_MODE_WRITE = 2 | 1,
EFI_FILE_MODE_CREATE = (1 << 63) | 2 | 1,
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
#[repr(u64)]
pub enum EFI_FILE_ATTRIBUTE {
EFI_FILE_READ_ONLY = 1,
EFI_FILE_HIDDEN = 1 << 1,
EFI_FILE_SYSTEM = 1 << 2,
EFI_FILE_RESERVED = 1 << 3,
EFI_FILE_DIRECTORY = 1 << 4,
EFI_FILE_ARCHIVE = 1 << 5,
EFI_FILE_VALID_ATTR = 0x37,
} | EFI_BOOT_SERVICES | identifier_name |
efi.rs | use core::ffi::c_void;
use core::fmt;
const ERROR_BIT: usize = 1 << (core::mem::size_of::<usize>() * 8 - 1);
#[derive(Clone, Copy)]
#[repr(transparent)]
pub struct EFI_HANDLE(*mut c_void);
#[derive(PartialEq, Debug)]
#[repr(usize)]
pub enum EFI_STATUS {
/// The operation completed successfully.
SUCCESS = 0,
/// The string contained characters that could not be rendered and were skipped.
WARN_UNKNOWN_GLYPH = 1,
/// The handle was closed, but the file was not deleted.
WARN_DELETE_FAILURE = 2,
/// The handle was closed, but the data to the file was not flushed properly.
WARN_WRITE_FAILURE = 3,
/// The resulting buffer was too small, and the data was truncated.
WARN_BUFFER_TOO_SMALL = 4,
/// The data has not been updated within the timeframe set by local policy.
WARN_STALE_DATA = 5,
/// The resulting buffer contains UEFI-compliant file system.
WARN_FILE_SYSTEM = 6,
/// The operation will be processed across a system reset.
WARN_RESET_REQUIRED = 7,
/// The image failed to load.
LOAD_ERROR = ERROR_BIT | 1,
/// A parameter was incorrect.
INVALID_PARAMETER = ERROR_BIT | 2,
/// The operation is not supported.
UNSUPPORTED = ERROR_BIT | 3,
/// The buffer was not the proper size for the request.
BAD_BUFFER_SIZE = ERROR_BIT | 4,
/// The buffer is not large enough to hold the requested data.
/// The required buffer size is returned in the appropriate parameter.
BUFFER_TOO_SMALL = ERROR_BIT | 5,
/// There is no data pending upon return.
NOT_READY = ERROR_BIT | 6,
/// The physical device reported an error while attempting the operation.
DEVICE_ERROR = ERROR_BIT | 7,
/// The device cannot be written to.
WRITE_PROTECTED = ERROR_BIT | 8,
/// A resource has run out.
OUT_OF_RESOURCES = ERROR_BIT | 9,
/// An inconstency was detected on the file system.
VOLUME_CORRUPTED = ERROR_BIT | 10,
/// There is no more space on the file system.
VOLUME_FULL = ERROR_BIT | 11,
/// The device does not contain any medium to perform the operation.
NO_MEDIA = ERROR_BIT | 12,
/// The medium in the device has changed since the last access.
MEDIA_CHANGED = ERROR_BIT | 13,
/// The item was not found.
NOT_FOUND = ERROR_BIT | 14,
/// Access was denied.
ACCESS_DENIED = ERROR_BIT | 15,
/// The server was not found or did not respond to the request.
NO_RESPONSE = ERROR_BIT | 16,
/// A mapping to a device does not exist.
NO_MAPPING = ERROR_BIT | 17,
/// The timeout time expired.
TIMEOUT = ERROR_BIT | 18,
/// The protocol has not been started.
NOT_STARTED = ERROR_BIT | 19,
/// The protocol has already been started.
ALREADY_STARTED = ERROR_BIT | 20,
/// The operation was aborted.
ABORTED = ERROR_BIT | 21,
/// An ICMP error occurred during the network operation.
ICMP_ERROR = ERROR_BIT | 22,
/// A TFTP error occurred during the network operation.
TFTP_ERROR = ERROR_BIT | 23,
/// A protocol error occurred during the network operation.
PROTOCOL_ERROR = ERROR_BIT | 24,
/// The function encountered an internal version that was
/// incompatible with a version requested by the caller.
INCOMPATIBLE_VERSION = ERROR_BIT | 25,
/// The function was not performed due to a security violation.
SECURITY_VIOLATION = ERROR_BIT | 26,
/// A CRC error was detected.
CRC_ERROR = ERROR_BIT | 27,
/// Beginning or end of media was reached
END_OF_MEDIA = ERROR_BIT | 28,
/// The end of the file was reached.
END_OF_FILE = ERROR_BIT | 31,
/// The language specified was invalid.
INVALID_LANGUAGE = ERROR_BIT | 32,
/// The security status of the data is unknown or compromised and
/// the data must be updated or replaced to restore a valid security status.
COMPROMISED_DATA = ERROR_BIT | 33,
/// There is an address conflict address allocation
IP_ADDRESS_CONFLICT = ERROR_BIT | 34,
/// A HTTP error occurred during the network operation.
HTTP_ERROR = ERROR_BIT | 35,
}
#[repr(C)]
pub struct EFI_SYSTEM_TABLE {
pub Hdr: EFI_TABLE_HEADER,
pub FirmwareVendor: *const u16,
pub FirmwareRevision: u32,
pub ConsoleInHandle: EFI_HANDLE,
pub ConIn: *mut EFI_SIMPLE_TEXT_INPUT_PROTOCOL,
pub ConsoleOutHandle: EFI_HANDLE,
pub ConOut: *mut EFI_SIMPLE_TEXT_OUTPUT_PROTOCOL,
pub StandardErrorHandle: EFI_HANDLE,
pub StdErr: *mut EFI_SIMPLE_TEXT_OUTPUT_PROTOCOL,
pub RuntimeServices: *mut EFI_RUNTIME_SERVICES,
pub BootServices: *mut EFI_BOOT_SERVICES,
pub NumberOfTableEntries: usize,
pub ConfigurationTable: *mut EFI_CONFIGURATION_TABLE,
}
#[repr(C)]
pub struct EFI_RUNTIME_SERVICES {
pub Hdr: EFI_TABLE_HEADER,
pub GetTime: unsafe extern "C" fn(Time: *mut EFI_TIME, Capabilities: *mut EFI_TIME_CAPABILITIES) -> EFI_STATUS,
dummy1: [usize;3], // Time Services
dummy2: [usize;2], // Virtual Memory Services
dummy3: [usize;3], // Variable Services
dummy4: [usize;2], // Miscellaneous Services
dummy5: [usize;2], // UEFI 2.0 Capsule Services
dummy6: [usize;1], // Miscellaneous UEFI 2.0 Service
}
#[repr(C)]
pub struct EFI_BOOT_SERVICES {
pub Hdr: EFI_TABLE_HEADER,
// Task Priority Services
dummy1: [usize;2],
// Memory Services
pub AllocatePages: unsafe extern "C" fn(Type: EFI_ALLOCATE_TYPE, MemoryType: EFI_MEMORY_TYPE,
Pages:usize, Memory: &mut u64) -> EFI_STATUS,
dymmy2a: [usize;1],
pub GetMemoryMap: unsafe extern "C" fn(MemoryMapSize: &mut usize, MemoryMap: *mut EFI_MEMORY_DESCRIPTOR,
MapKey: &mut usize, DescriptorSize: &mut usize, DescriptorVersion: &mut u32) -> EFI_STATUS,
dummy2b: [usize;2],
// Event & Timer Services
dummy3: [usize;6],
// Protocol Handler Services
dummy4a: [usize;3],
pub HandleProtocol: unsafe extern "C" fn(Handle: EFI_HANDLE, Protocol: &EFI_GUID, Interface: &mut *mut c_void) -> EFI_STATUS,
dummy4b: [usize;4],
// Image Services
dummy5: [usize;5],
// Miscellaneous Services
dummy6: [usize;2],
pub SetWatchdogTimer: unsafe extern "C" fn (Timeout: usize, WatchdogCode: u64, DataSize: usize,
WatchdogData: *const u16) -> EFI_STATUS,
// DriverSupport Services | // Library Services
dummy9a: [usize;2],
pub LocateProtocol: unsafe extern "C" fn (Protocol: &EFI_GUID, Registration: *mut c_void, Interface: &mut *mut c_void) -> EFI_STATUS,
dummy9b: [usize;2],
// 32-bit CRC Services
dummy10: [usize;1],
// Miscellaneous Services
dummy11: [usize;3],
}
#[repr(C)]
pub struct EFI_CONFIGURATION_TABLE {
pub Hdr: EFI_TABLE_HEADER,
// TBD
}
#[repr(C)]
pub struct EFI_TABLE_HEADER {
pub Signature: u64,
pub Revision: u32,
pub HeaderSize: u32,
pub CRC32: u32,
pub Reserved: u32,
}
#[repr(C)]
pub struct EFI_SIMPLE_TEXT_INPUT_PROTOCOL {
pub Reset: unsafe extern "C" fn(This: &EFI_SIMPLE_TEXT_INPUT_PROTOCOL, ExtendedVerification: bool) -> EFI_STATUS,
pub ReadKeyStroke: unsafe extern "C" fn(This: &EFI_SIMPLE_TEXT_INPUT_PROTOCOL, Key: &EFI_INPUT_KEY) -> EFI_STATUS,
// TBD
}
#[repr(C)]
pub struct EFI_INPUT_KEY {
pub ScanCode: u16,
pub UnicodeChar: u16,
}
#[repr(C)]
pub struct EFI_SIMPLE_TEXT_OUTPUT_PROTOCOL {
pub Reset: unsafe extern "C" fn(This: &EFI_SIMPLE_TEXT_OUTPUT_PROTOCOL, ExtendedVerification: bool) -> EFI_STATUS,
pub OutputString: unsafe extern "C" fn(This: &EFI_SIMPLE_TEXT_OUTPUT_PROTOCOL, String: *const u16) -> EFI_STATUS,
// TBD
}
#[derive(PartialEq)]
#[repr(usize)]
pub enum EFI_ALLOCATE_TYPE {
AllocateAnyPages,
AllocateMaxAddress,
AllocateAddress,
MaxAllocateType,
}
#[derive(Debug, PartialEq, Copy, Clone)]
#[repr(u32)]
pub enum EFI_MEMORY_TYPE {
EfiReservedMemoryType,
EfiLoaderCode,
EfiLoaderData,
EfiBootServicesCode,
EfiBootServicesData,
EfiRuntimeServicesCode,
EfiRuntimeServicesData,
EfiConventionalMemory,
EfiUnusableMemory,
EfiACPIReclaimMemory,
EfiACPIMemoryNVS,
EfiMemoryMappedIO,
EfiMemoryMappedIOPortSpace,
EfiPalCode,
EfiPersistentMemory,
EfiMaxMemoryType,
}
impl fmt::Display for EFI_MEMORY_TYPE {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let string = match *self {
EFI_MEMORY_TYPE::EfiReservedMemoryType => "ReservedMemory",
EFI_MEMORY_TYPE::EfiLoaderCode => "LoaderCode",
EFI_MEMORY_TYPE::EfiLoaderData => "LoaderData",
EFI_MEMORY_TYPE::EfiBootServicesCode => "BootServicesCode",
EFI_MEMORY_TYPE::EfiBootServicesData => "BootServicesData",
EFI_MEMORY_TYPE::EfiRuntimeServicesCode => "RuntimeServicesCode",
EFI_MEMORY_TYPE::EfiRuntimeServicesData => "RuntimeServicesData",
EFI_MEMORY_TYPE::EfiConventionalMemory => "Conventional",
EFI_MEMORY_TYPE::EfiUnusableMemory => "Unusable",
EFI_MEMORY_TYPE::EfiACPIReclaimMemory => "ACPIReclaim",
EFI_MEMORY_TYPE::EfiACPIMemoryNVS => "ACPIMemoryNVS ",
EFI_MEMORY_TYPE::EfiMemoryMappedIO => "MemoryMappedIO",
EFI_MEMORY_TYPE::EfiMemoryMappedIOPortSpace => "MemoryMappedIOPortSpace",
EFI_MEMORY_TYPE::EfiPalCode => "PalCode",
EFI_MEMORY_TYPE::EfiPersistentMemory => "PersistentMemory",
_ => "",
};
write!(f, "{:>20}", string)
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
#[repr(C)]
pub struct MemoryMapKey(usize);
#[repr(C)]
#[derive(Copy, Clone)]
pub struct EFI_MEMORY_DESCRIPTOR {
pub Type: EFI_MEMORY_TYPE,
padding: u32,
pub PhysicalStart: u64,
pub VirtualStart: u64,
pub NumberOfPages: u64,
pub Attribute: u64,
}
impl fmt::Display for EFI_MEMORY_DESCRIPTOR {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{} {:x}-{:x}", self.Type, self.PhysicalStart, self.PhysicalStart+self.NumberOfPages*4096)
}
}
#[derive(Debug, Copy, Clone)]
#[repr(C)]
pub struct EFI_TIME {
pub Year: u16,
pub Month: u8,
pub Day: u8,
pub Hour: u8,
pub Minute: u8,
pub Second: u8,
pub Pad1: u8,
pub Nanosecond: u32,
pub TimeZone: u16,
pub Daylight: u8,
pub Pad2: u8,
}
#[repr(C)]
pub struct EFI_TIME_CAPABILITIES {
pub Resolution: u32,
pub Accuracy: u32,
pub SetsToZero: bool,
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
#[repr(C)]
pub struct EFI_GUID {
pub a: u32,
pub b: u16,
pub c: u16,
pub d: [u8;8],
}
#[repr(C)]
pub struct EFI_SIMPLE_FILE_SYSTEM_PROTOCOL {
pub Revision: u64,
pub OpenVolume: unsafe extern "C" fn(This: &mut EFI_SIMPLE_FILE_SYSTEM_PROTOCOL, Root: &mut *mut EFI_FILE_PROTOCOL) -> EFI_STATUS,
}
#[repr(C)]
pub struct EFI_FILE_PROTOCOL {
pub Revision: u64,
pub Open: unsafe extern "C" fn(This: &EFI_FILE_PROTOCOL, NewHandle: &mut *mut EFI_FILE_PROTOCOL, FileName: *const u16,
OpenMode: EFI_FILE_MODE, Attributes: EFI_FILE_ATTRIBUTE) -> EFI_STATUS,
pub Close: unsafe extern "C" fn(This: &EFI_FILE_PROTOCOL) -> EFI_STATUS,
pub Delete: unsafe extern "C" fn(This: &EFI_FILE_PROTOCOL) -> EFI_STATUS,
pub Read: unsafe extern "C" fn(This: &EFI_FILE_PROTOCOL, BufferSize: &mut usize, Buffer: *mut u8) -> EFI_STATUS,
// TBD
}
#[repr(C)]
pub struct EFI_LOADED_IMAGE_PROTOCOL {
pub Revision: u32,
pub ParentHandle: EFI_HANDLE,
pub SystemTable: *const EFI_SYSTEM_TABLE,
pub DeviceHandle: EFI_HANDLE,
pub FilePath: *const c_void,
pub Reserved: *const c_void,
pub LoadOptionsSize: u32,
pub LoadOptions: *const c_void,
pub ImageBase: usize,
pub ImageSize: u64,
pub ImageCodeType: EFI_MEMORY_TYPE,
pub ImageDataType: EFI_MEMORY_TYPE,
pub Unload: unsafe extern "C" fn(ImageHandle: EFI_HANDLE) -> EFI_STATUS,
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
#[repr(u64)]
pub enum EFI_FILE_MODE {
EFI_FILE_MODE_READ = 1,
EFI_FILE_MODE_WRITE = 2 | 1,
EFI_FILE_MODE_CREATE = (1 << 63) | 2 | 1,
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
#[repr(u64)]
pub enum EFI_FILE_ATTRIBUTE {
EFI_FILE_READ_ONLY = 1,
EFI_FILE_HIDDEN = 1 << 1,
EFI_FILE_SYSTEM = 1 << 2,
EFI_FILE_RESERVED = 1 << 3,
EFI_FILE_DIRECTORY = 1 << 4,
EFI_FILE_ARCHIVE = 1 << 5,
EFI_FILE_VALID_ATTR = 0x37,
} | dummy7: [usize;2],
// Open and Close Protocol Services
dummy8: [usize;3],
| random_line_split |
schellyhook.go | package schellyhook
import (
"encoding/json"
"flag"
"fmt"
"net/http"
"time"
"github.com/gorilla/mux"
uuid "github.com/satori/go.uuid"
"github.com/sirupsen/logrus"
)
//Options command line options
type Options struct {
ListenPort int
ListenIP string
PrePostTimeout int
PreBackupCommand string
PostBackupCommand string
}
//SchellyResponse schelly webhook response
type SchellyResponse struct {
ID string `json:"id",omitempty`
DataID string `json:"data_id",omitempty`
Status string `json:"status",omitempty`
Message string `json:"message",omitempty`
SizeMB float64 `json:"size_mb",omitempty`
}
//Backuper interface for who is implementing specific backup operations on backend
type Backuper interface {
//Init register command line flags here etc
Init() error
//RegisterFlags register flags for command line options
RegisterFlags() error
//CreateNewBackup create a new backup synchronously (return only after complete backup creation). If you set shellContext.CmdRef when calling a Shell Script, the bridge will cancel the process automatically if a DELETE /backup/{id} for the running backup is received
CreateNewBackup(apiID string, timeout time.Duration, shellContext *ShellContext) error
//DeleteBackup remove backup data from storage. if backup is still running and set cmdRef on ShellContext of CreateBackup call, cancel it
DeleteBackup(apiID string) error
//GetAllBackups returns all tracked backups. this is optional for Schelly
GetAllBackups() ([]SchellyResponse, error)
//GetBackup returns a specific backup info. if requested apiID is running, this method is not even called, because schellyhook will do this for you
GetBackup(apiID string) (*SchellyResponse, error)
}
var options = new(Options)
var currentBackupContext = ShellContext{}
var currentBackuper Backuper
//RunningBackupAPIID current apiID of the currently running backup, if any
var RunningBackupAPIID = ""
//CurrentBackupStartTime start time of currently running backup, if any
var CurrentBackupStartTime time.Time
//Initialize must be invoked to start REST server along with all Backuper hooks
func Initialize(backuper Backuper) error {
if currentBackuper != nil {
logrus.Infof("Replacing previously existing 'backuper' instance in Schelly-Webhook")
}
currentBackuper = backuper
err := currentBackuper.RegisterFlags()
if err != nil {
return err
}
listenPort := flag.Int("listen-port", 7070, "REST API server listen port")
listenIP := flag.String("listen-ip", "0.0.0.0", "REST API server listen ip address")
logLevel := flag.String("log-level", "info", "debug, info, warning or error")
preBackupCommand := flag.String("pre-backup-command", "", "Command to be executed before running the backup")
postBackupCommand := flag.String("post-backup-command", "", "Command to be executed after running the backup")
prePostTimeout := flag.Int("pre-post-timeout", 7200, "Max time for pre or post command to be executing. After that time the process will be killed")
flag.Parse()
switch *logLevel {
case "debug":
logrus.SetLevel(logrus.DebugLevel)
break
case "warning":
logrus.SetLevel(logrus.WarnLevel)
break
case "error":
logrus.SetLevel(logrus.ErrorLevel)
break
default:
logrus.SetLevel(logrus.InfoLevel)
}
options.ListenPort = *listenPort
options.ListenIP = *listenIP
options.PrePostTimeout = *prePostTimeout
options.PreBackupCommand = *preBackupCommand
options.PostBackupCommand = *postBackupCommand
router := mux.NewRouter()
router.HandleFunc("/backups", getBackups).Methods("GET")
router.HandleFunc("/backups", createBackup).Methods("POST")
router.HandleFunc("/backups/{id}", getBackup).Methods("GET")
router.HandleFunc("/backups/{id}", deleteBackup).Methods("DELETE")
listen := fmt.Sprintf("%s:%d", options.ListenIP, options.ListenPort)
logrus.Infof("Listening at %s", listen)
err = currentBackuper.Init()
if err != nil {
return err
}
err = http.ListenAndServe(listen, router)
if err != nil {
return err
}
return nil
}
//GetBackups - get backups from Backuper
func getBackups(w http.ResponseWriter, r *http.Request) {
logrus.Debugf("GetBackups r=%s", r)
w.Header().Set("Content-Type", "application/json")
gab, err := currentBackuper.GetAllBackups()
if err != nil {
logrus.Warnf("Error calling getAllBackups(). err=%s", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = json.NewEncoder(w).Encode(gab)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
//GetBackup - get specific backup from Backuper
func | (w http.ResponseWriter, r *http.Request) {
logrus.Debugf("GetBackup r=%s", r)
params := mux.Vars(r)
apiID := params["id"]
if RunningBackupAPIID == apiID {
sendSchellyResponse(apiID, "", "running", "backup is running", -1, http.StatusOK, w)
return
}
resp, err := currentBackuper.GetBackup(apiID)
if err != nil {
logrus.Warnf("Error calling getBackup() for id %s. err=%s", apiID, err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
} else if resp == nil {
logrus.Debugf("Backup %s not found", apiID)
http.Error(w, fmt.Sprintf("Backup %s not found", apiID), http.StatusNotFound)
return
}
sendSchellyResponse(apiID, resp.DataID, resp.Status, resp.Message, resp.SizeMB, http.StatusOK, w)
}
//CreateBackup - trigger new backup
func createBackup(w http.ResponseWriter, r *http.Request) {
logrus.Infof(">>>>CreateBackup r=%s", r)
if RunningBackupAPIID != "" {
logrus.Infof("Another backup id %s is already running. Aborting.", RunningBackupAPIID)
http.Error(w, fmt.Sprintf("Another backup id %s is already running. Aborting.", RunningBackupAPIID), http.StatusConflict)
return
}
RunningBackupAPIID = createAPIID()
CurrentBackupStartTime = time.Now()
//run backup assyncronouslly
go runBackup(RunningBackupAPIID)
sendSchellyResponse(RunningBackupAPIID, "", "running", "backup triggered", -1, http.StatusAccepted, w)
}
//DeleteBackup - delete backup from Backuper
func deleteBackup(w http.ResponseWriter, r *http.Request) {
logrus.Debugf("DeleteBackup r=%s", r)
params := mux.Vars(r)
apiID := params["id"]
if RunningBackupAPIID == apiID {
if currentBackupContext.CmdRef != nil {
logrus.Debugf("Canceling currently running backup %s", RunningBackupAPIID)
err := (*currentBackupContext.CmdRef).Stop()
if err != nil {
sendSchellyResponse(apiID, "", "running", "Couldn't cancel current running backup task. err="+err.Error(), -1, http.StatusInternalServerError, w)
} else {
sendSchellyResponse(apiID, "", "deleted", "Running backup task was cancelled successfuly", -1, http.StatusOK, w)
}
}
return
}
bk, err := currentBackuper.GetBackup(apiID)
if err != nil {
logrus.Warnf("Error calling deleteBackup() with id %s", apiID)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
} else if bk == nil {
logrus.Warnf("Backup %s not found", apiID)
http.Error(w, fmt.Sprintf("Backup %s not found", apiID), http.StatusNotFound)
return
}
err = currentBackuper.DeleteBackup(apiID)
if err != nil {
logrus.Warnf("Error calling deleteBackup() with id %s", apiID)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
logrus.Debugf("Backup %s deleted", apiID)
sendSchellyResponse(apiID, bk.DataID, "deleted", "backup deleted successfuly", -1, http.StatusOK, w)
}
func sendSchellyResponse(apiID string, dataID string, status string, message string, size float64, httpStatus int, w http.ResponseWriter) {
resp := SchellyResponse{
ID: apiID,
DataID: dataID,
Status: status,
Message: message,
SizeMB: size,
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(httpStatus)
err := json.NewEncoder(w).Encode(resp)
if err != nil {
logrus.Errorf("Error encoding response. err=%s", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
} else {
logrus.Debugf("SchellyResponse sent %s", resp)
}
}
func runBackup(apiID string) {
logrus.Debugf("Backup request arrived apiID=%s", RunningBackupAPIID)
RunningBackupAPIID = apiID
//process pre backup command before calling backup
if options.PreBackupCommand != "" {
logrus.Infof("Running pre-backup command '%s'", options.PreBackupCommand)
out, err := ExecShellTimeout(options.PreBackupCommand, time.Duration(options.PrePostTimeout)*time.Second, ¤tBackupContext)
if err != nil {
status := currentBackupContext.CmdRef.Status()
if status.Exit == -1 {
logrus.Warnf("Pre-backup command timeout enforced (%d seconds)", (status.StopTs-status.StartTs)/1000000000)
}
logrus.Debugf("Pre-backup command error. out=%s; err=%s", out, err.Error())
RunningBackupAPIID = ""
return
} else {
logrus.Debug("Pre-backup command success")
}
}
//run backup
logrus.Infof("Running backup")
err := currentBackuper.CreateNewBackup(RunningBackupAPIID, time.Duration(options.PrePostTimeout)*time.Second, ¤tBackupContext)
if err != nil {
status := currentBackupContext.CmdRef.Status()
if status.Exit == -1 {
logrus.Warnf("Backup command timeout enforced (%d seconds)", (status.StopTs-status.StartTs)/1000000000)
}
logrus.Debugf("Backup error. err=%s", err.Error())
RunningBackupAPIID = ""
return
} else {
logrus.Debugf("Backup creation success on Backuper. backup id %s", RunningBackupAPIID)
}
//process post backup command after finished
if options.PostBackupCommand != "" {
logrus.Infof("Running post-backup command '%s'", options.PostBackupCommand)
out, err := ExecShellTimeout(options.PostBackupCommand, time.Duration(options.PrePostTimeout)*time.Second, ¤tBackupContext)
if err != nil {
status := currentBackupContext.CmdRef.Status()
if status.Exit == -1 {
logrus.Warnf("Post-backup command timeout enforced (%d seconds)", (status.StopTs-status.StartTs)/1000000000)
}
logrus.Debugf("Post-backup command error. out=%s; err=%s", out, err.Error())
RunningBackupAPIID = ""
return
} else {
logrus.Debug("Post-backup command success")
}
}
logrus.Infof("Backup finished")
//now we can accept another POST /backups call...
RunningBackupAPIID = ""
}
func createAPIID() string {
uuid := uuid.NewV4()
return uuid.String()
}
| getBackup | identifier_name |
schellyhook.go | package schellyhook
import (
"encoding/json"
"flag"
"fmt"
"net/http"
"time"
"github.com/gorilla/mux"
uuid "github.com/satori/go.uuid"
"github.com/sirupsen/logrus"
)
//Options command line options
type Options struct {
ListenPort int
ListenIP string
PrePostTimeout int
PreBackupCommand string
PostBackupCommand string
}
//SchellyResponse schelly webhook response
type SchellyResponse struct {
ID string `json:"id",omitempty`
DataID string `json:"data_id",omitempty`
Status string `json:"status",omitempty`
Message string `json:"message",omitempty`
SizeMB float64 `json:"size_mb",omitempty`
}
//Backuper interface for who is implementing specific backup operations on backend
type Backuper interface {
//Init register command line flags here etc
Init() error
//RegisterFlags register flags for command line options
RegisterFlags() error
//CreateNewBackup create a new backup synchronously (return only after complete backup creation). If you set shellContext.CmdRef when calling a Shell Script, the bridge will cancel the process automatically if a DELETE /backup/{id} for the running backup is received
CreateNewBackup(apiID string, timeout time.Duration, shellContext *ShellContext) error
//DeleteBackup remove backup data from storage. if backup is still running and set cmdRef on ShellContext of CreateBackup call, cancel it
DeleteBackup(apiID string) error
//GetAllBackups returns all tracked backups. this is optional for Schelly
GetAllBackups() ([]SchellyResponse, error)
//GetBackup returns a specific backup info. if requested apiID is running, this method is not even called, because schellyhook will do this for you
GetBackup(apiID string) (*SchellyResponse, error)
}
var options = new(Options)
var currentBackupContext = ShellContext{}
var currentBackuper Backuper
//RunningBackupAPIID current apiID of the currently running backup, if any
var RunningBackupAPIID = ""
//CurrentBackupStartTime start time of currently running backup, if any
var CurrentBackupStartTime time.Time
//Initialize must be invoked to start REST server along with all Backuper hooks
func Initialize(backuper Backuper) error {
if currentBackuper != nil {
logrus.Infof("Replacing previously existing 'backuper' instance in Schelly-Webhook")
}
currentBackuper = backuper
err := currentBackuper.RegisterFlags()
if err != nil {
return err
}
listenPort := flag.Int("listen-port", 7070, "REST API server listen port")
listenIP := flag.String("listen-ip", "0.0.0.0", "REST API server listen ip address")
logLevel := flag.String("log-level", "info", "debug, info, warning or error")
preBackupCommand := flag.String("pre-backup-command", "", "Command to be executed before running the backup")
postBackupCommand := flag.String("post-backup-command", "", "Command to be executed after running the backup")
prePostTimeout := flag.Int("pre-post-timeout", 7200, "Max time for pre or post command to be executing. After that time the process will be killed")
flag.Parse()
switch *logLevel {
case "debug":
logrus.SetLevel(logrus.DebugLevel)
break
case "warning":
logrus.SetLevel(logrus.WarnLevel)
break
case "error":
logrus.SetLevel(logrus.ErrorLevel)
break
default:
logrus.SetLevel(logrus.InfoLevel)
}
options.ListenPort = *listenPort
options.ListenIP = *listenIP
options.PrePostTimeout = *prePostTimeout
options.PreBackupCommand = *preBackupCommand
options.PostBackupCommand = *postBackupCommand
router := mux.NewRouter()
router.HandleFunc("/backups", getBackups).Methods("GET")
router.HandleFunc("/backups", createBackup).Methods("POST")
router.HandleFunc("/backups/{id}", getBackup).Methods("GET")
router.HandleFunc("/backups/{id}", deleteBackup).Methods("DELETE")
listen := fmt.Sprintf("%s:%d", options.ListenIP, options.ListenPort)
logrus.Infof("Listening at %s", listen)
err = currentBackuper.Init()
if err != nil {
return err
}
err = http.ListenAndServe(listen, router)
if err != nil {
return err
}
return nil
}
//GetBackups - get backups from Backuper
func getBackups(w http.ResponseWriter, r *http.Request) {
logrus.Debugf("GetBackups r=%s", r)
w.Header().Set("Content-Type", "application/json")
gab, err := currentBackuper.GetAllBackups()
if err != nil {
logrus.Warnf("Error calling getAllBackups(). err=%s", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = json.NewEncoder(w).Encode(gab)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
//GetBackup - get specific backup from Backuper
func getBackup(w http.ResponseWriter, r *http.Request) {
logrus.Debugf("GetBackup r=%s", r)
params := mux.Vars(r)
apiID := params["id"]
if RunningBackupAPIID == apiID {
sendSchellyResponse(apiID, "", "running", "backup is running", -1, http.StatusOK, w)
return
}
resp, err := currentBackuper.GetBackup(apiID)
if err != nil {
logrus.Warnf("Error calling getBackup() for id %s. err=%s", apiID, err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
} else if resp == nil {
logrus.Debugf("Backup %s not found", apiID)
http.Error(w, fmt.Sprintf("Backup %s not found", apiID), http.StatusNotFound)
return
}
sendSchellyResponse(apiID, resp.DataID, resp.Status, resp.Message, resp.SizeMB, http.StatusOK, w)
}
//CreateBackup - trigger new backup
func createBackup(w http.ResponseWriter, r *http.Request) {
logrus.Infof(">>>>CreateBackup r=%s", r)
if RunningBackupAPIID != "" {
logrus.Infof("Another backup id %s is already running. Aborting.", RunningBackupAPIID)
http.Error(w, fmt.Sprintf("Another backup id %s is already running. Aborting.", RunningBackupAPIID), http.StatusConflict)
return
}
RunningBackupAPIID = createAPIID()
CurrentBackupStartTime = time.Now()
//run backup assyncronouslly
go runBackup(RunningBackupAPIID)
sendSchellyResponse(RunningBackupAPIID, "", "running", "backup triggered", -1, http.StatusAccepted, w)
}
//DeleteBackup - delete backup from Backuper
func deleteBackup(w http.ResponseWriter, r *http.Request) {
logrus.Debugf("DeleteBackup r=%s", r)
params := mux.Vars(r)
apiID := params["id"]
if RunningBackupAPIID == apiID {
if currentBackupContext.CmdRef != nil {
logrus.Debugf("Canceling currently running backup %s", RunningBackupAPIID)
err := (*currentBackupContext.CmdRef).Stop()
if err != nil {
sendSchellyResponse(apiID, "", "running", "Couldn't cancel current running backup task. err="+err.Error(), -1, http.StatusInternalServerError, w)
} else {
sendSchellyResponse(apiID, "", "deleted", "Running backup task was cancelled successfuly", -1, http.StatusOK, w)
}
}
return
}
bk, err := currentBackuper.GetBackup(apiID)
if err != nil {
logrus.Warnf("Error calling deleteBackup() with id %s", apiID)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
} else if bk == nil {
logrus.Warnf("Backup %s not found", apiID)
http.Error(w, fmt.Sprintf("Backup %s not found", apiID), http.StatusNotFound)
return
}
err = currentBackuper.DeleteBackup(apiID)
if err != nil {
logrus.Warnf("Error calling deleteBackup() with id %s", apiID)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
logrus.Debugf("Backup %s deleted", apiID)
sendSchellyResponse(apiID, bk.DataID, "deleted", "backup deleted successfuly", -1, http.StatusOK, w)
}
func sendSchellyResponse(apiID string, dataID string, status string, message string, size float64, httpStatus int, w http.ResponseWriter) {
resp := SchellyResponse{
ID: apiID,
DataID: dataID,
Status: status,
Message: message,
SizeMB: size,
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(httpStatus)
err := json.NewEncoder(w).Encode(resp)
if err != nil {
logrus.Errorf("Error encoding response. err=%s", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
} else {
logrus.Debugf("SchellyResponse sent %s", resp)
}
}
func runBackup(apiID string) {
logrus.Debugf("Backup request arrived apiID=%s", RunningBackupAPIID)
RunningBackupAPIID = apiID
//process pre backup command before calling backup
if options.PreBackupCommand != "" {
logrus.Infof("Running pre-backup command '%s'", options.PreBackupCommand)
out, err := ExecShellTimeout(options.PreBackupCommand, time.Duration(options.PrePostTimeout)*time.Second, ¤tBackupContext)
if err != nil {
status := currentBackupContext.CmdRef.Status()
if status.Exit == -1 {
logrus.Warnf("Pre-backup command timeout enforced (%d seconds)", (status.StopTs-status.StartTs)/1000000000)
}
logrus.Debugf("Pre-backup command error. out=%s; err=%s", out, err.Error())
RunningBackupAPIID = ""
return
} else {
logrus.Debug("Pre-backup command success")
}
}
//run backup
logrus.Infof("Running backup")
err := currentBackuper.CreateNewBackup(RunningBackupAPIID, time.Duration(options.PrePostTimeout)*time.Second, ¤tBackupContext)
if err != nil {
status := currentBackupContext.CmdRef.Status()
if status.Exit == -1 {
logrus.Warnf("Backup command timeout enforced (%d seconds)", (status.StopTs-status.StartTs)/1000000000)
}
logrus.Debugf("Backup error. err=%s", err.Error())
RunningBackupAPIID = ""
return
} else {
logrus.Debugf("Backup creation success on Backuper. backup id %s", RunningBackupAPIID)
}
//process post backup command after finished
if options.PostBackupCommand != "" {
logrus.Infof("Running post-backup command '%s'", options.PostBackupCommand)
out, err := ExecShellTimeout(options.PostBackupCommand, time.Duration(options.PrePostTimeout)*time.Second, ¤tBackupContext)
if err != nil {
status := currentBackupContext.CmdRef.Status()
if status.Exit == -1 {
logrus.Warnf("Post-backup command timeout enforced (%d seconds)", (status.StopTs-status.StartTs)/1000000000)
}
logrus.Debugf("Post-backup command error. out=%s; err=%s", out, err.Error())
RunningBackupAPIID = ""
return
} else {
logrus.Debug("Post-backup command success")
}
}
logrus.Infof("Backup finished")
//now we can accept another POST /backups call...
RunningBackupAPIID = ""
}
func createAPIID() string | {
uuid := uuid.NewV4()
return uuid.String()
} | identifier_body | |
schellyhook.go | package schellyhook
import (
"encoding/json"
"flag"
"fmt"
"net/http"
"time"
"github.com/gorilla/mux"
uuid "github.com/satori/go.uuid"
"github.com/sirupsen/logrus"
)
//Options command line options
type Options struct {
ListenPort int
ListenIP string
PrePostTimeout int
PreBackupCommand string
PostBackupCommand string
}
//SchellyResponse schelly webhook response
type SchellyResponse struct {
ID string `json:"id",omitempty`
DataID string `json:"data_id",omitempty`
Status string `json:"status",omitempty`
Message string `json:"message",omitempty`
SizeMB float64 `json:"size_mb",omitempty`
}
//Backuper interface for who is implementing specific backup operations on backend
type Backuper interface {
//Init register command line flags here etc
Init() error
//RegisterFlags register flags for command line options
RegisterFlags() error
//CreateNewBackup create a new backup synchronously (return only after complete backup creation). If you set shellContext.CmdRef when calling a Shell Script, the bridge will cancel the process automatically if a DELETE /backup/{id} for the running backup is received
CreateNewBackup(apiID string, timeout time.Duration, shellContext *ShellContext) error
//DeleteBackup remove backup data from storage. if backup is still running and set cmdRef on ShellContext of CreateBackup call, cancel it
DeleteBackup(apiID string) error
//GetAllBackups returns all tracked backups. this is optional for Schelly
GetAllBackups() ([]SchellyResponse, error)
//GetBackup returns a specific backup info. if requested apiID is running, this method is not even called, because schellyhook will do this for you
GetBackup(apiID string) (*SchellyResponse, error)
}
var options = new(Options)
var currentBackupContext = ShellContext{}
var currentBackuper Backuper
//RunningBackupAPIID current apiID of the currently running backup, if any
var RunningBackupAPIID = ""
//CurrentBackupStartTime start time of currently running backup, if any
var CurrentBackupStartTime time.Time
//Initialize must be invoked to start REST server along with all Backuper hooks
func Initialize(backuper Backuper) error {
if currentBackuper != nil {
logrus.Infof("Replacing previously existing 'backuper' instance in Schelly-Webhook")
}
currentBackuper = backuper
err := currentBackuper.RegisterFlags()
if err != nil {
return err
}
listenPort := flag.Int("listen-port", 7070, "REST API server listen port")
listenIP := flag.String("listen-ip", "0.0.0.0", "REST API server listen ip address")
logLevel := flag.String("log-level", "info", "debug, info, warning or error")
preBackupCommand := flag.String("pre-backup-command", "", "Command to be executed before running the backup")
postBackupCommand := flag.String("post-backup-command", "", "Command to be executed after running the backup")
prePostTimeout := flag.Int("pre-post-timeout", 7200, "Max time for pre or post command to be executing. After that time the process will be killed")
flag.Parse()
switch *logLevel {
case "debug":
logrus.SetLevel(logrus.DebugLevel)
break
case "warning":
logrus.SetLevel(logrus.WarnLevel)
break
case "error":
logrus.SetLevel(logrus.ErrorLevel)
break
default:
logrus.SetLevel(logrus.InfoLevel)
}
options.ListenPort = *listenPort
options.ListenIP = *listenIP
options.PrePostTimeout = *prePostTimeout
options.PreBackupCommand = *preBackupCommand
options.PostBackupCommand = *postBackupCommand
router := mux.NewRouter()
router.HandleFunc("/backups", getBackups).Methods("GET")
router.HandleFunc("/backups", createBackup).Methods("POST")
router.HandleFunc("/backups/{id}", getBackup).Methods("GET")
router.HandleFunc("/backups/{id}", deleteBackup).Methods("DELETE")
listen := fmt.Sprintf("%s:%d", options.ListenIP, options.ListenPort)
logrus.Infof("Listening at %s", listen)
err = currentBackuper.Init()
if err != nil {
return err
}
err = http.ListenAndServe(listen, router)
if err != nil {
return err
}
return nil
}
//GetBackups - get backups from Backuper
func getBackups(w http.ResponseWriter, r *http.Request) {
logrus.Debugf("GetBackups r=%s", r)
w.Header().Set("Content-Type", "application/json")
gab, err := currentBackuper.GetAllBackups()
if err != nil {
logrus.Warnf("Error calling getAllBackups(). err=%s", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = json.NewEncoder(w).Encode(gab)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
//GetBackup - get specific backup from Backuper
func getBackup(w http.ResponseWriter, r *http.Request) {
logrus.Debugf("GetBackup r=%s", r)
params := mux.Vars(r)
apiID := params["id"]
if RunningBackupAPIID == apiID {
sendSchellyResponse(apiID, "", "running", "backup is running", -1, http.StatusOK, w)
return
}
resp, err := currentBackuper.GetBackup(apiID)
if err != nil {
logrus.Warnf("Error calling getBackup() for id %s. err=%s", apiID, err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
} else if resp == nil {
logrus.Debugf("Backup %s not found", apiID)
http.Error(w, fmt.Sprintf("Backup %s not found", apiID), http.StatusNotFound)
return
}
sendSchellyResponse(apiID, resp.DataID, resp.Status, resp.Message, resp.SizeMB, http.StatusOK, w)
}
//CreateBackup - trigger new backup
func createBackup(w http.ResponseWriter, r *http.Request) {
logrus.Infof(">>>>CreateBackup r=%s", r)
if RunningBackupAPIID != "" {
logrus.Infof("Another backup id %s is already running. Aborting.", RunningBackupAPIID)
http.Error(w, fmt.Sprintf("Another backup id %s is already running. Aborting.", RunningBackupAPIID), http.StatusConflict)
return
}
RunningBackupAPIID = createAPIID()
CurrentBackupStartTime = time.Now()
//run backup assyncronouslly
go runBackup(RunningBackupAPIID)
sendSchellyResponse(RunningBackupAPIID, "", "running", "backup triggered", -1, http.StatusAccepted, w)
}
//DeleteBackup - delete backup from Backuper
func deleteBackup(w http.ResponseWriter, r *http.Request) {
logrus.Debugf("DeleteBackup r=%s", r)
params := mux.Vars(r)
apiID := params["id"]
if RunningBackupAPIID == apiID {
if currentBackupContext.CmdRef != nil {
logrus.Debugf("Canceling currently running backup %s", RunningBackupAPIID)
err := (*currentBackupContext.CmdRef).Stop()
if err != nil {
sendSchellyResponse(apiID, "", "running", "Couldn't cancel current running backup task. err="+err.Error(), -1, http.StatusInternalServerError, w)
} else {
sendSchellyResponse(apiID, "", "deleted", "Running backup task was cancelled successfuly", -1, http.StatusOK, w)
} |
bk, err := currentBackuper.GetBackup(apiID)
if err != nil {
logrus.Warnf("Error calling deleteBackup() with id %s", apiID)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
} else if bk == nil {
logrus.Warnf("Backup %s not found", apiID)
http.Error(w, fmt.Sprintf("Backup %s not found", apiID), http.StatusNotFound)
return
}
err = currentBackuper.DeleteBackup(apiID)
if err != nil {
logrus.Warnf("Error calling deleteBackup() with id %s", apiID)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
logrus.Debugf("Backup %s deleted", apiID)
sendSchellyResponse(apiID, bk.DataID, "deleted", "backup deleted successfuly", -1, http.StatusOK, w)
}
func sendSchellyResponse(apiID string, dataID string, status string, message string, size float64, httpStatus int, w http.ResponseWriter) {
resp := SchellyResponse{
ID: apiID,
DataID: dataID,
Status: status,
Message: message,
SizeMB: size,
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(httpStatus)
err := json.NewEncoder(w).Encode(resp)
if err != nil {
logrus.Errorf("Error encoding response. err=%s", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
} else {
logrus.Debugf("SchellyResponse sent %s", resp)
}
}
func runBackup(apiID string) {
logrus.Debugf("Backup request arrived apiID=%s", RunningBackupAPIID)
RunningBackupAPIID = apiID
//process pre backup command before calling backup
if options.PreBackupCommand != "" {
logrus.Infof("Running pre-backup command '%s'", options.PreBackupCommand)
out, err := ExecShellTimeout(options.PreBackupCommand, time.Duration(options.PrePostTimeout)*time.Second, ¤tBackupContext)
if err != nil {
status := currentBackupContext.CmdRef.Status()
if status.Exit == -1 {
logrus.Warnf("Pre-backup command timeout enforced (%d seconds)", (status.StopTs-status.StartTs)/1000000000)
}
logrus.Debugf("Pre-backup command error. out=%s; err=%s", out, err.Error())
RunningBackupAPIID = ""
return
} else {
logrus.Debug("Pre-backup command success")
}
}
//run backup
logrus.Infof("Running backup")
err := currentBackuper.CreateNewBackup(RunningBackupAPIID, time.Duration(options.PrePostTimeout)*time.Second, ¤tBackupContext)
if err != nil {
status := currentBackupContext.CmdRef.Status()
if status.Exit == -1 {
logrus.Warnf("Backup command timeout enforced (%d seconds)", (status.StopTs-status.StartTs)/1000000000)
}
logrus.Debugf("Backup error. err=%s", err.Error())
RunningBackupAPIID = ""
return
} else {
logrus.Debugf("Backup creation success on Backuper. backup id %s", RunningBackupAPIID)
}
//process post backup command after finished
if options.PostBackupCommand != "" {
logrus.Infof("Running post-backup command '%s'", options.PostBackupCommand)
out, err := ExecShellTimeout(options.PostBackupCommand, time.Duration(options.PrePostTimeout)*time.Second, ¤tBackupContext)
if err != nil {
status := currentBackupContext.CmdRef.Status()
if status.Exit == -1 {
logrus.Warnf("Post-backup command timeout enforced (%d seconds)", (status.StopTs-status.StartTs)/1000000000)
}
logrus.Debugf("Post-backup command error. out=%s; err=%s", out, err.Error())
RunningBackupAPIID = ""
return
} else {
logrus.Debug("Post-backup command success")
}
}
logrus.Infof("Backup finished")
//now we can accept another POST /backups call...
RunningBackupAPIID = ""
}
func createAPIID() string {
uuid := uuid.NewV4()
return uuid.String()
} | }
return
} | random_line_split |
schellyhook.go | package schellyhook
import (
"encoding/json"
"flag"
"fmt"
"net/http"
"time"
"github.com/gorilla/mux"
uuid "github.com/satori/go.uuid"
"github.com/sirupsen/logrus"
)
//Options command line options
type Options struct {
ListenPort int
ListenIP string
PrePostTimeout int
PreBackupCommand string
PostBackupCommand string
}
//SchellyResponse schelly webhook response
type SchellyResponse struct {
ID string `json:"id",omitempty`
DataID string `json:"data_id",omitempty`
Status string `json:"status",omitempty`
Message string `json:"message",omitempty`
SizeMB float64 `json:"size_mb",omitempty`
}
//Backuper interface for who is implementing specific backup operations on backend
type Backuper interface {
//Init register command line flags here etc
Init() error
//RegisterFlags register flags for command line options
RegisterFlags() error
//CreateNewBackup create a new backup synchronously (return only after complete backup creation). If you set shellContext.CmdRef when calling a Shell Script, the bridge will cancel the process automatically if a DELETE /backup/{id} for the running backup is received
CreateNewBackup(apiID string, timeout time.Duration, shellContext *ShellContext) error
//DeleteBackup remove backup data from storage. if backup is still running and set cmdRef on ShellContext of CreateBackup call, cancel it
DeleteBackup(apiID string) error
//GetAllBackups returns all tracked backups. this is optional for Schelly
GetAllBackups() ([]SchellyResponse, error)
//GetBackup returns a specific backup info. if requested apiID is running, this method is not even called, because schellyhook will do this for you
GetBackup(apiID string) (*SchellyResponse, error)
}
var options = new(Options)
var currentBackupContext = ShellContext{}
var currentBackuper Backuper
//RunningBackupAPIID current apiID of the currently running backup, if any
var RunningBackupAPIID = ""
//CurrentBackupStartTime start time of currently running backup, if any
var CurrentBackupStartTime time.Time
//Initialize must be invoked to start REST server along with all Backuper hooks
func Initialize(backuper Backuper) error {
if currentBackuper != nil {
logrus.Infof("Replacing previously existing 'backuper' instance in Schelly-Webhook")
}
currentBackuper = backuper
err := currentBackuper.RegisterFlags()
if err != nil {
return err
}
listenPort := flag.Int("listen-port", 7070, "REST API server listen port")
listenIP := flag.String("listen-ip", "0.0.0.0", "REST API server listen ip address")
logLevel := flag.String("log-level", "info", "debug, info, warning or error")
preBackupCommand := flag.String("pre-backup-command", "", "Command to be executed before running the backup")
postBackupCommand := flag.String("post-backup-command", "", "Command to be executed after running the backup")
prePostTimeout := flag.Int("pre-post-timeout", 7200, "Max time for pre or post command to be executing. After that time the process will be killed")
flag.Parse()
switch *logLevel {
case "debug":
logrus.SetLevel(logrus.DebugLevel)
break
case "warning":
logrus.SetLevel(logrus.WarnLevel)
break
case "error":
logrus.SetLevel(logrus.ErrorLevel)
break
default:
logrus.SetLevel(logrus.InfoLevel)
}
options.ListenPort = *listenPort
options.ListenIP = *listenIP
options.PrePostTimeout = *prePostTimeout
options.PreBackupCommand = *preBackupCommand
options.PostBackupCommand = *postBackupCommand
router := mux.NewRouter()
router.HandleFunc("/backups", getBackups).Methods("GET")
router.HandleFunc("/backups", createBackup).Methods("POST")
router.HandleFunc("/backups/{id}", getBackup).Methods("GET")
router.HandleFunc("/backups/{id}", deleteBackup).Methods("DELETE")
listen := fmt.Sprintf("%s:%d", options.ListenIP, options.ListenPort)
logrus.Infof("Listening at %s", listen)
err = currentBackuper.Init()
if err != nil {
return err
}
err = http.ListenAndServe(listen, router)
if err != nil {
return err
}
return nil
}
//GetBackups - get backups from Backuper
func getBackups(w http.ResponseWriter, r *http.Request) {
logrus.Debugf("GetBackups r=%s", r)
w.Header().Set("Content-Type", "application/json")
gab, err := currentBackuper.GetAllBackups()
if err != nil {
logrus.Warnf("Error calling getAllBackups(). err=%s", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = json.NewEncoder(w).Encode(gab)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
//GetBackup - get specific backup from Backuper
func getBackup(w http.ResponseWriter, r *http.Request) {
logrus.Debugf("GetBackup r=%s", r)
params := mux.Vars(r)
apiID := params["id"]
if RunningBackupAPIID == apiID {
sendSchellyResponse(apiID, "", "running", "backup is running", -1, http.StatusOK, w)
return
}
resp, err := currentBackuper.GetBackup(apiID)
if err != nil {
logrus.Warnf("Error calling getBackup() for id %s. err=%s", apiID, err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
} else if resp == nil {
logrus.Debugf("Backup %s not found", apiID)
http.Error(w, fmt.Sprintf("Backup %s not found", apiID), http.StatusNotFound)
return
}
sendSchellyResponse(apiID, resp.DataID, resp.Status, resp.Message, resp.SizeMB, http.StatusOK, w)
}
//CreateBackup - trigger new backup
func createBackup(w http.ResponseWriter, r *http.Request) {
logrus.Infof(">>>>CreateBackup r=%s", r)
if RunningBackupAPIID != "" {
logrus.Infof("Another backup id %s is already running. Aborting.", RunningBackupAPIID)
http.Error(w, fmt.Sprintf("Another backup id %s is already running. Aborting.", RunningBackupAPIID), http.StatusConflict)
return
}
RunningBackupAPIID = createAPIID()
CurrentBackupStartTime = time.Now()
//run backup assyncronouslly
go runBackup(RunningBackupAPIID)
sendSchellyResponse(RunningBackupAPIID, "", "running", "backup triggered", -1, http.StatusAccepted, w)
}
//DeleteBackup - delete backup from Backuper
func deleteBackup(w http.ResponseWriter, r *http.Request) {
logrus.Debugf("DeleteBackup r=%s", r)
params := mux.Vars(r)
apiID := params["id"]
if RunningBackupAPIID == apiID {
if currentBackupContext.CmdRef != nil {
logrus.Debugf("Canceling currently running backup %s", RunningBackupAPIID)
err := (*currentBackupContext.CmdRef).Stop()
if err != nil {
sendSchellyResponse(apiID, "", "running", "Couldn't cancel current running backup task. err="+err.Error(), -1, http.StatusInternalServerError, w)
} else {
sendSchellyResponse(apiID, "", "deleted", "Running backup task was cancelled successfuly", -1, http.StatusOK, w)
}
}
return
}
bk, err := currentBackuper.GetBackup(apiID)
if err != nil {
logrus.Warnf("Error calling deleteBackup() with id %s", apiID)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
} else if bk == nil {
logrus.Warnf("Backup %s not found", apiID)
http.Error(w, fmt.Sprintf("Backup %s not found", apiID), http.StatusNotFound)
return
}
err = currentBackuper.DeleteBackup(apiID)
if err != nil {
logrus.Warnf("Error calling deleteBackup() with id %s", apiID)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
logrus.Debugf("Backup %s deleted", apiID)
sendSchellyResponse(apiID, bk.DataID, "deleted", "backup deleted successfuly", -1, http.StatusOK, w)
}
func sendSchellyResponse(apiID string, dataID string, status string, message string, size float64, httpStatus int, w http.ResponseWriter) {
resp := SchellyResponse{
ID: apiID,
DataID: dataID,
Status: status,
Message: message,
SizeMB: size,
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(httpStatus)
err := json.NewEncoder(w).Encode(resp)
if err != nil {
logrus.Errorf("Error encoding response. err=%s", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
} else {
logrus.Debugf("SchellyResponse sent %s", resp)
}
}
func runBackup(apiID string) {
logrus.Debugf("Backup request arrived apiID=%s", RunningBackupAPIID)
RunningBackupAPIID = apiID
//process pre backup command before calling backup
if options.PreBackupCommand != "" {
logrus.Infof("Running pre-backup command '%s'", options.PreBackupCommand)
out, err := ExecShellTimeout(options.PreBackupCommand, time.Duration(options.PrePostTimeout)*time.Second, ¤tBackupContext)
if err != nil | else {
logrus.Debug("Pre-backup command success")
}
}
//run backup
logrus.Infof("Running backup")
err := currentBackuper.CreateNewBackup(RunningBackupAPIID, time.Duration(options.PrePostTimeout)*time.Second, ¤tBackupContext)
if err != nil {
status := currentBackupContext.CmdRef.Status()
if status.Exit == -1 {
logrus.Warnf("Backup command timeout enforced (%d seconds)", (status.StopTs-status.StartTs)/1000000000)
}
logrus.Debugf("Backup error. err=%s", err.Error())
RunningBackupAPIID = ""
return
} else {
logrus.Debugf("Backup creation success on Backuper. backup id %s", RunningBackupAPIID)
}
//process post backup command after finished
if options.PostBackupCommand != "" {
logrus.Infof("Running post-backup command '%s'", options.PostBackupCommand)
out, err := ExecShellTimeout(options.PostBackupCommand, time.Duration(options.PrePostTimeout)*time.Second, ¤tBackupContext)
if err != nil {
status := currentBackupContext.CmdRef.Status()
if status.Exit == -1 {
logrus.Warnf("Post-backup command timeout enforced (%d seconds)", (status.StopTs-status.StartTs)/1000000000)
}
logrus.Debugf("Post-backup command error. out=%s; err=%s", out, err.Error())
RunningBackupAPIID = ""
return
} else {
logrus.Debug("Post-backup command success")
}
}
logrus.Infof("Backup finished")
//now we can accept another POST /backups call...
RunningBackupAPIID = ""
}
func createAPIID() string {
uuid := uuid.NewV4()
return uuid.String()
}
| {
status := currentBackupContext.CmdRef.Status()
if status.Exit == -1 {
logrus.Warnf("Pre-backup command timeout enforced (%d seconds)", (status.StopTs-status.StartTs)/1000000000)
}
logrus.Debugf("Pre-backup command error. out=%s; err=%s", out, err.Error())
RunningBackupAPIID = ""
return
} | conditional_block |
util.py | import unicodedata
from datetime import datetime
import functools
from http import cookies
import inspect
import importlib
import itertools
from pathlib import Path
import re
from urllib import parse
import tempfile
import requests
def string_combinations(
seed="",
pattern=None,
start=None,
end=None,
valid_chars=None,
variants=None,
min_length=1,
index=None
):
'''
Generates string combinations from `seed`, until the string is at least of `min_length`.
Only combinations that are lexicographically equal to or after `start` and equal to
or before `end` are returned, if `start` or `end` are given.
If `pattern` is given, only those combinations are returned that match the pattern.
`valid_chars` specifies which characters can be added to the `seed` string.
If any of the valid characters can have multiple variants (such as `c` being `c` or `ch`),
these can be specified by `variants`. `variants` must be either a list of tuples or dict.
Keys must match characters that have multiple variants. Values must be a list of these variants.
These variants can be of any length.
Index specifies which character of the `seed` string is being considered. If `index` is out of range
of `seed` string, the new character is appended to the `seed` string
EXAMPLE:
>>> string_combinations(
>>> seed="hi",
>>> start='ho',
>>> end='hq',
>>> variants={'o': ['oh', 'ok', 'obuh']},
>>> min_length=4,
>>> index=1
>>> )
# From string 'hi', generates all strings that start with 'ho' and 'hq' (inclusive) and everything in between,
>>>
# whereas the string combinations start at index 1 ("i"). Generated string are of length 4, possibly except when
>>>
# strings containing 'o' were generated variants with 'oh', 'ok', or 'obuh' instead of 'o'.
>>>
'''
if index is not None \
and len(seed) >= min_length \
and (not(start) or seed[:len(start)] >= start)\
and (not(end) or seed[:len(end)] <= end):
yield seed
return
seed = bytearray(seed, "ascii")
index = len(seed) if index is None else index
valid_chars = valid_chars or 'abcdefghijklmnopqrstuvwxzy0123456789'
# variants should be {char: [list, of, variants]} or [(char, [list, of, variants])]
variants = variants or []
variants = variants.items() if isinstance(variants, dict) else variants
start_reached = False
for s in valid_chars:
# Skip if start is given and has not been reached yet
# or if end is given and has been already reached
if (start and not(start_reached) and len(start) >= (index + 1) and s != start[index]):
continue
# Prevent going into depth if we already have minimum length
# and start or end conditions are shorter than that
elif index > min_length - 1 and (start and index > len(start) - 1) and (end and index > len(end) - 1):
continue
if not start_reached:
start_reached = True
# workaround for "ch" being considered a separate char.
# uses (temp_seed + variant) as a final name for all variants
curr_variants = [s]
for case, v in variants:
if s == case:
curr_variants.extend(v)
for v in curr_variants:
temp_seed = seed.copy()
# Modify seed with current variant
for i, c in enumerate(v):
if len(temp_seed) < index + 1 + i:
temp_seed.append(ord(c))
else:
temp_seed[index] = ord(c)
temp_seed = temp_seed.decode()
# End reached
if end and temp_seed[:len(end)] > end:
return
# Skip seed if it does not match the pattern
if pattern and not re.search(pattern, temp_seed):
continue
# Go one level deeper (1 char longer seed)
results = string_combinations(
seed=temp_seed,
valid_chars=valid_chars,
pattern=pattern,
start=start,
end=end,
variants=variants,
min_length=min_length,
index=index + 1
)
for res in results:
yield res
def map_dict_val(fn, d):
return {
k: fn(v)
for k, v in d.items()
}
def unpack_url(url):
'''
Get URL object and Query object from a url, as returned by
urllib.parse.urlparse and urllib.parse.parse_qs, respectively.
Reverse of pack_url
'''
url_obj = parse.urlparse(url)
q_obj = parse.parse_qs(url_obj.query)
q_obj = map_dict_val(lambda l: l[0], q_obj)
return url_obj, q_obj
def pack_url(url_obj, q_obj):
'''
Get url string from URL object and Query object.
Reverse of unpack_url
'''
url_obj = url_obj._replace(query=parse.urlencode(q_obj))
url_string = parse.urlunparse(url_obj)
return url_string
def xpath_class(classes, operator="or"):
''''Format an XPath class condition'''
return f" {operator} ".join(
f"contains(concat(' ', normalize-space(@class),' '),' {cls} ')"
for cls in classes
)
def xpath_startswith(attr, s):
return f"@{attr} and starts-with(@{attr}, '{s}')"
def get_dir(obj):
path = inspect.getfile(obj)
return str(Path(path).parent.absolute())
def module_from_abs_path(name, path):
'''
See https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path
# importing-a-source-file-directly
and https://docs.python.org/3/library/importlib.html
'''
spec = importlib.util.spec_from_file_location(name, path)
mdl = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mdl)
return mdl
def local_module(cls, filename):
'''
Search the directory of a module where `cls` is defined and
look for file `filename`.
Raises `ValueError` if the file does not exist.
Raises `ModuleNotFoundError` if the import failed.
'''
cls_dir = get_dir(cls)
file_path = Path(cls_dir, filename)
file_abs_path = str(file_path.absolute())
if not file_path.exists():
raise ValueError('File not found: {}'.format(file_abs_path))
import_path = cls.__module__.rsplit('.', 1)[0]
module_path = "{}.{}".format(import_path, file_path.stem)
module = module_from_abs_path(module_path, file_abs_path)
return module
def pairwise(iterable):
'''See https://docs.python.org/3/library/itertools.html#itertools-recipes'''
a, b = itertools.tee(iterable)
next(b, None)
return itertools.zip_longest(a, b, fillvalue=object())
def remove_adjacent_dup(iterable):
'''
See https://stackoverflow.com/a/34986013/9788634
'''
return [x for x, y in pairwise(x) if x != y]
def soft_update(d1, *dicts, dict_mode='override', list_mode='override', copy=False):
'''
Update dictonary entries, overriding values if they are primitives
or if the type changes.
Returns the updated dictionary. If `copy` is `True`, the updates are made
to a copy.
If the values are dictionaries then one of the following modes apply:
- `update` - keep the nested dictionaries, and only update entries
- `override` - replace the nested dictonaries with new values
If the values are lists then one of the following modes apply:
- `append` - join elements from all occurences
- `set` - add new list member only if it is not present in the list already
- `override` - replace the list with new value
'''
if copy:
out = {}
the_dicts = [d1, *dicts]
else:
out = d1
the_dicts = dicts
for d in the_dicts:
for k, v in d.items():
if k not in out:
out[k] = v
continue
elif type(v) != type(out[k]):
out[k] = v
elif isinstance(v, dict):
if dict_mode == 'update':
out[k].update(v)
elif dict_mode == 'override':
out[k] = v
else:
raise ValueError(f'Unknown dict mode "{dict_mode}"')
elif isinstance(v, list):
if list_mode == 'append':
out[k].extend(v)
elif list_mode == 'set':
out[k].extend([i for i in v if i not in out[k]])
elif list_mode == 'override':
out[k] = v
else:
raise ValueError(f'Unknown list mode "{list_mode}"')
else:
out[k] = v
return out
def grouper(n, iterable, fillvalue=None):
"grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return itertools.zip_longest(fillvalue=fillvalue, *args)
def flatten_json(y):
out = {}
def flatten(x, name=''):
if type(x) is dict:
for a in x:
flatten(x[a], name + a + '_')
elif type(x) is list:
i = 0
for a in x:
flatten(a, name + str(i) + '_')
i += 1
else:
out[name[:-1]] = x
flatten(y)
return out
@functools.wraps(filter)
def lfilter(functionOrNone, iterable):
return list(filter(functionOrNone, iterable))
@functools.wraps(map)
def lmap(func, *iterables):
return list(map(func, *iterables))
@functools.wraps(map)
def map2str(*iterables):
return map(str, iterables)
@functools.wraps(map)
def map2int(*iterables):
return map(int, iterables)
def flatten(iterable):
t = type(iterable)
return t(i for grp in iterable for i in grp)
def lflatten(iterable):
return flatten(list(iterable))
def time_tag():
return datetime.now().strftime('%Y_%m_%d__%H_%M_%S')
def update_request_cookies(request, inplace=True, pattern=None):
c = cookies.SimpleCookie()
h = request.headers.copy() if not inplace else request.headers
for header in ['Cookie', 'Set-Cookie']:
for ck in h.getlist(header):
c.load(ck.decode('utf-8'))
h.pop('cookie', None)
h.pop('set-cookie', None)
for morsel in c.values():
if pattern is None or re.search(pattern, morsel.key):
h.appendlist('cookie', '{}={}'.format(morsel.key, morsel.value))
return h
def strip_accents(text):
'''https://stackoverflow.com/a/44433664/9788634'''
try:
text = unicode(text, 'utf-8')
except NameError: # unicode is a default on python 3
pass
text = unicodedata.normalize('NFD', text)\
.encode('ascii', 'ignore')\
.decode("utf-8")
return str(text)
def dol2lot(dol):
'''Convert dict of lists to list of (key, value) tuples'''
lot = []
for k, val in dol.items():
try:
vals = iter(val)
except TypeError:
vals = [val]
lot.extend((k, v) for v in vals)
return lot
def lot2dol(lot):
|
def conditional_deco(deco, predicate):
'''
Decorator that takes another decorator and a predicate,
and applies the second decorator to a function only if the predicate
evaluates to True.a[@href and starts-with(@href, '/events/csv')]
'''
def deco_(function):
@functools.wraps(function)
def inner(*args, **kwargs):
if predicate(*args, **kwargs):
return deco(function)(*args, **kwargs)
return function(*args, **kwargs)
return inner
return deco_
def is_url(url):
try:
result = parse.urlparse(url)
return all([result.scheme, result.netloc])
except ValueError:
return False
def _parse_user_agent_url(url):
return requests.get(url).strip().split('\n')
def get_user_agent_list(brws):
# Taken from https://github.com/tamimibrahim17/List-of-user-agents
url_template = 'https://raw.githubusercontent.com/tamimibrahim17/List-of-user-agents/master/{}.txt'
ual = []
for brw in brws:
url = url_template.format(parse.quote(brw))
uas = [
ua
for ua in _parse_user_agent_url(url)[:-2]
if "user agents string" not in ua
]
ual.extend(uas)
tempfile.NamedTemporaryFile()
return ual
def get_proxy_list(urls=None, files=None):
proxies = [p for p_list in map(_parse_user_agent_url, urls) for p in p_list]
proxies.extend([
Path(f).read_text(encoding='utf-8') for f in files
])
return proxies | '''Convert list of (key, value) tuples to dict of lists'''
dol = {}
for k, val in lot:
if k not in dol:
dol[k] = []
dol[k].append(val)
return dol | identifier_body |
util.py | import unicodedata
from datetime import datetime
import functools
from http import cookies
import inspect
import importlib
import itertools
from pathlib import Path
import re
from urllib import parse
import tempfile
import requests
def string_combinations(
seed="",
pattern=None,
start=None,
end=None,
valid_chars=None,
variants=None,
min_length=1,
index=None
):
'''
Generates string combinations from `seed`, until the string is at least of `min_length`.
Only combinations that are lexicographically equal to or after `start` and equal to
or before `end` are returned, if `start` or `end` are given.
If `pattern` is given, only those combinations are returned that match the pattern.
`valid_chars` specifies which characters can be added to the `seed` string.
If any of the valid characters can have multiple variants (such as `c` being `c` or `ch`),
these can be specified by `variants`. `variants` must be either a list of tuples or dict.
Keys must match characters that have multiple variants. Values must be a list of these variants.
These variants can be of any length.
Index specifies which character of the `seed` string is being considered. If `index` is out of range
of `seed` string, the new character is appended to the `seed` string
EXAMPLE:
>>> string_combinations(
>>> seed="hi",
>>> start='ho',
>>> end='hq',
>>> variants={'o': ['oh', 'ok', 'obuh']},
>>> min_length=4,
>>> index=1
>>> )
# From string 'hi', generates all strings that start with 'ho' and 'hq' (inclusive) and everything in between,
>>>
# whereas the string combinations start at index 1 ("i"). Generated string are of length 4, possibly except when
>>>
# strings containing 'o' were generated variants with 'oh', 'ok', or 'obuh' instead of 'o'.
>>>
'''
if index is not None \
and len(seed) >= min_length \
and (not(start) or seed[:len(start)] >= start)\
and (not(end) or seed[:len(end)] <= end):
yield seed
return
seed = bytearray(seed, "ascii")
index = len(seed) if index is None else index
valid_chars = valid_chars or 'abcdefghijklmnopqrstuvwxzy0123456789'
# variants should be {char: [list, of, variants]} or [(char, [list, of, variants])]
variants = variants or []
variants = variants.items() if isinstance(variants, dict) else variants
start_reached = False
for s in valid_chars:
# Skip if start is given and has not been reached yet
# or if end is given and has been already reached
if (start and not(start_reached) and len(start) >= (index + 1) and s != start[index]):
continue
# Prevent going into depth if we already have minimum length
# and start or end conditions are shorter than that
elif index > min_length - 1 and (start and index > len(start) - 1) and (end and index > len(end) - 1):
continue
if not start_reached:
start_reached = True
# workaround for "ch" being considered a separate char.
# uses (temp_seed + variant) as a final name for all variants
curr_variants = [s]
for case, v in variants:
if s == case:
curr_variants.extend(v)
for v in curr_variants:
temp_seed = seed.copy()
# Modify seed with current variant
for i, c in enumerate(v):
if len(temp_seed) < index + 1 + i:
temp_seed.append(ord(c))
else:
temp_seed[index] = ord(c)
temp_seed = temp_seed.decode()
# End reached
if end and temp_seed[:len(end)] > end:
return
# Skip seed if it does not match the pattern
if pattern and not re.search(pattern, temp_seed):
continue
# Go one level deeper (1 char longer seed)
results = string_combinations(
seed=temp_seed,
valid_chars=valid_chars,
pattern=pattern,
start=start,
end=end,
variants=variants,
min_length=min_length,
index=index + 1
)
for res in results:
yield res
def map_dict_val(fn, d):
return {
k: fn(v)
for k, v in d.items()
}
def unpack_url(url):
'''
Get URL object and Query object from a url, as returned by
urllib.parse.urlparse and urllib.parse.parse_qs, respectively. |
url_obj = parse.urlparse(url)
q_obj = parse.parse_qs(url_obj.query)
q_obj = map_dict_val(lambda l: l[0], q_obj)
return url_obj, q_obj
def pack_url(url_obj, q_obj):
'''
Get url string from URL object and Query object.
Reverse of unpack_url
'''
url_obj = url_obj._replace(query=parse.urlencode(q_obj))
url_string = parse.urlunparse(url_obj)
return url_string
def xpath_class(classes, operator="or"):
''''Format an XPath class condition'''
return f" {operator} ".join(
f"contains(concat(' ', normalize-space(@class),' '),' {cls} ')"
for cls in classes
)
def xpath_startswith(attr, s):
return f"@{attr} and starts-with(@{attr}, '{s}')"
def get_dir(obj):
path = inspect.getfile(obj)
return str(Path(path).parent.absolute())
def module_from_abs_path(name, path):
'''
See https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path
# importing-a-source-file-directly
and https://docs.python.org/3/library/importlib.html
'''
spec = importlib.util.spec_from_file_location(name, path)
mdl = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mdl)
return mdl
def local_module(cls, filename):
'''
Search the directory of a module where `cls` is defined and
look for file `filename`.
Raises `ValueError` if the file does not exist.
Raises `ModuleNotFoundError` if the import failed.
'''
cls_dir = get_dir(cls)
file_path = Path(cls_dir, filename)
file_abs_path = str(file_path.absolute())
if not file_path.exists():
raise ValueError('File not found: {}'.format(file_abs_path))
import_path = cls.__module__.rsplit('.', 1)[0]
module_path = "{}.{}".format(import_path, file_path.stem)
module = module_from_abs_path(module_path, file_abs_path)
return module
def pairwise(iterable):
'''See https://docs.python.org/3/library/itertools.html#itertools-recipes'''
a, b = itertools.tee(iterable)
next(b, None)
return itertools.zip_longest(a, b, fillvalue=object())
def remove_adjacent_dup(iterable):
'''
See https://stackoverflow.com/a/34986013/9788634
'''
return [x for x, y in pairwise(x) if x != y]
def soft_update(d1, *dicts, dict_mode='override', list_mode='override', copy=False):
'''
Update dictonary entries, overriding values if they are primitives
or if the type changes.
Returns the updated dictionary. If `copy` is `True`, the updates are made
to a copy.
If the values are dictionaries then one of the following modes apply:
- `update` - keep the nested dictionaries, and only update entries
- `override` - replace the nested dictonaries with new values
If the values are lists then one of the following modes apply:
- `append` - join elements from all occurences
- `set` - add new list member only if it is not present in the list already
- `override` - replace the list with new value
'''
if copy:
out = {}
the_dicts = [d1, *dicts]
else:
out = d1
the_dicts = dicts
for d in the_dicts:
for k, v in d.items():
if k not in out:
out[k] = v
continue
elif type(v) != type(out[k]):
out[k] = v
elif isinstance(v, dict):
if dict_mode == 'update':
out[k].update(v)
elif dict_mode == 'override':
out[k] = v
else:
raise ValueError(f'Unknown dict mode "{dict_mode}"')
elif isinstance(v, list):
if list_mode == 'append':
out[k].extend(v)
elif list_mode == 'set':
out[k].extend([i for i in v if i not in out[k]])
elif list_mode == 'override':
out[k] = v
else:
raise ValueError(f'Unknown list mode "{list_mode}"')
else:
out[k] = v
return out
def grouper(n, iterable, fillvalue=None):
"grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return itertools.zip_longest(fillvalue=fillvalue, *args)
def flatten_json(y):
out = {}
def flatten(x, name=''):
if type(x) is dict:
for a in x:
flatten(x[a], name + a + '_')
elif type(x) is list:
i = 0
for a in x:
flatten(a, name + str(i) + '_')
i += 1
else:
out[name[:-1]] = x
flatten(y)
return out
@functools.wraps(filter)
def lfilter(functionOrNone, iterable):
return list(filter(functionOrNone, iterable))
@functools.wraps(map)
def lmap(func, *iterables):
return list(map(func, *iterables))
@functools.wraps(map)
def map2str(*iterables):
return map(str, iterables)
@functools.wraps(map)
def map2int(*iterables):
return map(int, iterables)
def flatten(iterable):
t = type(iterable)
return t(i for grp in iterable for i in grp)
def lflatten(iterable):
return flatten(list(iterable))
def time_tag():
return datetime.now().strftime('%Y_%m_%d__%H_%M_%S')
def update_request_cookies(request, inplace=True, pattern=None):
c = cookies.SimpleCookie()
h = request.headers.copy() if not inplace else request.headers
for header in ['Cookie', 'Set-Cookie']:
for ck in h.getlist(header):
c.load(ck.decode('utf-8'))
h.pop('cookie', None)
h.pop('set-cookie', None)
for morsel in c.values():
if pattern is None or re.search(pattern, morsel.key):
h.appendlist('cookie', '{}={}'.format(morsel.key, morsel.value))
return h
def strip_accents(text):
'''https://stackoverflow.com/a/44433664/9788634'''
try:
text = unicode(text, 'utf-8')
except NameError: # unicode is a default on python 3
pass
text = unicodedata.normalize('NFD', text)\
.encode('ascii', 'ignore')\
.decode("utf-8")
return str(text)
def dol2lot(dol):
'''Convert dict of lists to list of (key, value) tuples'''
lot = []
for k, val in dol.items():
try:
vals = iter(val)
except TypeError:
vals = [val]
lot.extend((k, v) for v in vals)
return lot
def lot2dol(lot):
'''Convert list of (key, value) tuples to dict of lists'''
dol = {}
for k, val in lot:
if k not in dol:
dol[k] = []
dol[k].append(val)
return dol
def conditional_deco(deco, predicate):
'''
Decorator that takes another decorator and a predicate,
and applies the second decorator to a function only if the predicate
evaluates to True.a[@href and starts-with(@href, '/events/csv')]
'''
def deco_(function):
@functools.wraps(function)
def inner(*args, **kwargs):
if predicate(*args, **kwargs):
return deco(function)(*args, **kwargs)
return function(*args, **kwargs)
return inner
return deco_
def is_url(url):
try:
result = parse.urlparse(url)
return all([result.scheme, result.netloc])
except ValueError:
return False
def _parse_user_agent_url(url):
return requests.get(url).strip().split('\n')
def get_user_agent_list(brws):
# Taken from https://github.com/tamimibrahim17/List-of-user-agents
url_template = 'https://raw.githubusercontent.com/tamimibrahim17/List-of-user-agents/master/{}.txt'
ual = []
for brw in brws:
url = url_template.format(parse.quote(brw))
uas = [
ua
for ua in _parse_user_agent_url(url)[:-2]
if "user agents string" not in ua
]
ual.extend(uas)
tempfile.NamedTemporaryFile()
return ual
def get_proxy_list(urls=None, files=None):
proxies = [p for p_list in map(_parse_user_agent_url, urls) for p in p_list]
proxies.extend([
Path(f).read_text(encoding='utf-8') for f in files
])
return proxies |
Reverse of pack_url
''' | random_line_split |
util.py | import unicodedata
from datetime import datetime
import functools
from http import cookies
import inspect
import importlib
import itertools
from pathlib import Path
import re
from urllib import parse
import tempfile
import requests
def string_combinations(
seed="",
pattern=None,
start=None,
end=None,
valid_chars=None,
variants=None,
min_length=1,
index=None
):
'''
Generates string combinations from `seed`, until the string is at least of `min_length`.
Only combinations that are lexicographically equal to or after `start` and equal to
or before `end` are returned, if `start` or `end` are given.
If `pattern` is given, only those combinations are returned that match the pattern.
`valid_chars` specifies which characters can be added to the `seed` string.
If any of the valid characters can have multiple variants (such as `c` being `c` or `ch`),
these can be specified by `variants`. `variants` must be either a list of tuples or dict.
Keys must match characters that have multiple variants. Values must be a list of these variants.
These variants can be of any length.
Index specifies which character of the `seed` string is being considered. If `index` is out of range
of `seed` string, the new character is appended to the `seed` string
EXAMPLE:
>>> string_combinations(
>>> seed="hi",
>>> start='ho',
>>> end='hq',
>>> variants={'o': ['oh', 'ok', 'obuh']},
>>> min_length=4,
>>> index=1
>>> )
# From string 'hi', generates all strings that start with 'ho' and 'hq' (inclusive) and everything in between,
>>>
# whereas the string combinations start at index 1 ("i"). Generated string are of length 4, possibly except when
>>>
# strings containing 'o' were generated variants with 'oh', 'ok', or 'obuh' instead of 'o'.
>>>
'''
if index is not None \
and len(seed) >= min_length \
and (not(start) or seed[:len(start)] >= start)\
and (not(end) or seed[:len(end)] <= end):
yield seed
return
seed = bytearray(seed, "ascii")
index = len(seed) if index is None else index
valid_chars = valid_chars or 'abcdefghijklmnopqrstuvwxzy0123456789'
# variants should be {char: [list, of, variants]} or [(char, [list, of, variants])]
variants = variants or []
variants = variants.items() if isinstance(variants, dict) else variants
start_reached = False
for s in valid_chars:
# Skip if start is given and has not been reached yet
# or if end is given and has been already reached
if (start and not(start_reached) and len(start) >= (index + 1) and s != start[index]):
continue
# Prevent going into depth if we already have minimum length
# and start or end conditions are shorter than that
elif index > min_length - 1 and (start and index > len(start) - 1) and (end and index > len(end) - 1):
continue
if not start_reached:
start_reached = True
# workaround for "ch" being considered a separate char.
# uses (temp_seed + variant) as a final name for all variants
curr_variants = [s]
for case, v in variants:
if s == case:
curr_variants.extend(v)
for v in curr_variants:
temp_seed = seed.copy()
# Modify seed with current variant
for i, c in enumerate(v):
if len(temp_seed) < index + 1 + i:
temp_seed.append(ord(c))
else:
temp_seed[index] = ord(c)
temp_seed = temp_seed.decode()
# End reached
if end and temp_seed[:len(end)] > end:
return
# Skip seed if it does not match the pattern
if pattern and not re.search(pattern, temp_seed):
continue
# Go one level deeper (1 char longer seed)
results = string_combinations(
seed=temp_seed,
valid_chars=valid_chars,
pattern=pattern,
start=start,
end=end,
variants=variants,
min_length=min_length,
index=index + 1
)
for res in results:
yield res
def map_dict_val(fn, d):
return {
k: fn(v)
for k, v in d.items()
}
def unpack_url(url):
'''
Get URL object and Query object from a url, as returned by
urllib.parse.urlparse and urllib.parse.parse_qs, respectively.
Reverse of pack_url
'''
url_obj = parse.urlparse(url)
q_obj = parse.parse_qs(url_obj.query)
q_obj = map_dict_val(lambda l: l[0], q_obj)
return url_obj, q_obj
def pack_url(url_obj, q_obj):
'''
Get url string from URL object and Query object.
Reverse of unpack_url
'''
url_obj = url_obj._replace(query=parse.urlencode(q_obj))
url_string = parse.urlunparse(url_obj)
return url_string
def xpath_class(classes, operator="or"):
''''Format an XPath class condition'''
return f" {operator} ".join(
f"contains(concat(' ', normalize-space(@class),' '),' {cls} ')"
for cls in classes
)
def xpath_startswith(attr, s):
return f"@{attr} and starts-with(@{attr}, '{s}')"
def get_dir(obj):
path = inspect.getfile(obj)
return str(Path(path).parent.absolute())
def module_from_abs_path(name, path):
'''
See https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path
# importing-a-source-file-directly
and https://docs.python.org/3/library/importlib.html
'''
spec = importlib.util.spec_from_file_location(name, path)
mdl = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mdl)
return mdl
def local_module(cls, filename):
'''
Search the directory of a module where `cls` is defined and
look for file `filename`.
Raises `ValueError` if the file does not exist.
Raises `ModuleNotFoundError` if the import failed.
'''
cls_dir = get_dir(cls)
file_path = Path(cls_dir, filename)
file_abs_path = str(file_path.absolute())
if not file_path.exists():
raise ValueError('File not found: {}'.format(file_abs_path))
import_path = cls.__module__.rsplit('.', 1)[0]
module_path = "{}.{}".format(import_path, file_path.stem)
module = module_from_abs_path(module_path, file_abs_path)
return module
def pairwise(iterable):
'''See https://docs.python.org/3/library/itertools.html#itertools-recipes'''
a, b = itertools.tee(iterable)
next(b, None)
return itertools.zip_longest(a, b, fillvalue=object())
def remove_adjacent_dup(iterable):
'''
See https://stackoverflow.com/a/34986013/9788634
'''
return [x for x, y in pairwise(x) if x != y]
def soft_update(d1, *dicts, dict_mode='override', list_mode='override', copy=False):
'''
Update dictonary entries, overriding values if they are primitives
or if the type changes.
Returns the updated dictionary. If `copy` is `True`, the updates are made
to a copy.
If the values are dictionaries then one of the following modes apply:
- `update` - keep the nested dictionaries, and only update entries
- `override` - replace the nested dictonaries with new values
If the values are lists then one of the following modes apply:
- `append` - join elements from all occurences
- `set` - add new list member only if it is not present in the list already
- `override` - replace the list with new value
'''
if copy:
out = {}
the_dicts = [d1, *dicts]
else:
out = d1
the_dicts = dicts
for d in the_dicts:
for k, v in d.items():
if k not in out:
out[k] = v
continue
elif type(v) != type(out[k]):
out[k] = v
elif isinstance(v, dict):
if dict_mode == 'update':
out[k].update(v)
elif dict_mode == 'override':
out[k] = v
else:
raise ValueError(f'Unknown dict mode "{dict_mode}"')
elif isinstance(v, list):
if list_mode == 'append':
out[k].extend(v)
elif list_mode == 'set':
out[k].extend([i for i in v if i not in out[k]])
elif list_mode == 'override':
out[k] = v
else:
raise ValueError(f'Unknown list mode "{list_mode}"')
else:
out[k] = v
return out
def grouper(n, iterable, fillvalue=None):
"grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return itertools.zip_longest(fillvalue=fillvalue, *args)
def flatten_json(y):
out = {}
def flatten(x, name=''):
if type(x) is dict:
for a in x:
flatten(x[a], name + a + '_')
elif type(x) is list:
|
else:
out[name[:-1]] = x
flatten(y)
return out
@functools.wraps(filter)
def lfilter(functionOrNone, iterable):
return list(filter(functionOrNone, iterable))
@functools.wraps(map)
def lmap(func, *iterables):
return list(map(func, *iterables))
@functools.wraps(map)
def map2str(*iterables):
return map(str, iterables)
@functools.wraps(map)
def map2int(*iterables):
return map(int, iterables)
def flatten(iterable):
t = type(iterable)
return t(i for grp in iterable for i in grp)
def lflatten(iterable):
return flatten(list(iterable))
def time_tag():
return datetime.now().strftime('%Y_%m_%d__%H_%M_%S')
def update_request_cookies(request, inplace=True, pattern=None):
c = cookies.SimpleCookie()
h = request.headers.copy() if not inplace else request.headers
for header in ['Cookie', 'Set-Cookie']:
for ck in h.getlist(header):
c.load(ck.decode('utf-8'))
h.pop('cookie', None)
h.pop('set-cookie', None)
for morsel in c.values():
if pattern is None or re.search(pattern, morsel.key):
h.appendlist('cookie', '{}={}'.format(morsel.key, morsel.value))
return h
def strip_accents(text):
'''https://stackoverflow.com/a/44433664/9788634'''
try:
text = unicode(text, 'utf-8')
except NameError: # unicode is a default on python 3
pass
text = unicodedata.normalize('NFD', text)\
.encode('ascii', 'ignore')\
.decode("utf-8")
return str(text)
def dol2lot(dol):
'''Convert dict of lists to list of (key, value) tuples'''
lot = []
for k, val in dol.items():
try:
vals = iter(val)
except TypeError:
vals = [val]
lot.extend((k, v) for v in vals)
return lot
def lot2dol(lot):
'''Convert list of (key, value) tuples to dict of lists'''
dol = {}
for k, val in lot:
if k not in dol:
dol[k] = []
dol[k].append(val)
return dol
def conditional_deco(deco, predicate):
'''
Decorator that takes another decorator and a predicate,
and applies the second decorator to a function only if the predicate
evaluates to True.a[@href and starts-with(@href, '/events/csv')]
'''
def deco_(function):
@functools.wraps(function)
def inner(*args, **kwargs):
if predicate(*args, **kwargs):
return deco(function)(*args, **kwargs)
return function(*args, **kwargs)
return inner
return deco_
def is_url(url):
try:
result = parse.urlparse(url)
return all([result.scheme, result.netloc])
except ValueError:
return False
def _parse_user_agent_url(url):
return requests.get(url).strip().split('\n')
def get_user_agent_list(brws):
# Taken from https://github.com/tamimibrahim17/List-of-user-agents
url_template = 'https://raw.githubusercontent.com/tamimibrahim17/List-of-user-agents/master/{}.txt'
ual = []
for brw in brws:
url = url_template.format(parse.quote(brw))
uas = [
ua
for ua in _parse_user_agent_url(url)[:-2]
if "user agents string" not in ua
]
ual.extend(uas)
tempfile.NamedTemporaryFile()
return ual
def get_proxy_list(urls=None, files=None):
proxies = [p for p_list in map(_parse_user_agent_url, urls) for p in p_list]
proxies.extend([
Path(f).read_text(encoding='utf-8') for f in files
])
return proxies | i = 0
for a in x:
flatten(a, name + str(i) + '_')
i += 1 | conditional_block |
util.py | import unicodedata
from datetime import datetime
import functools
from http import cookies
import inspect
import importlib
import itertools
from pathlib import Path
import re
from urllib import parse
import tempfile
import requests
def string_combinations(
seed="",
pattern=None,
start=None,
end=None,
valid_chars=None,
variants=None,
min_length=1,
index=None
):
'''
Generates string combinations from `seed`, until the string is at least of `min_length`.
Only combinations that are lexicographically equal to or after `start` and equal to
or before `end` are returned, if `start` or `end` are given.
If `pattern` is given, only those combinations are returned that match the pattern.
`valid_chars` specifies which characters can be added to the `seed` string.
If any of the valid characters can have multiple variants (such as `c` being `c` or `ch`),
these can be specified by `variants`. `variants` must be either a list of tuples or dict.
Keys must match characters that have multiple variants. Values must be a list of these variants.
These variants can be of any length.
Index specifies which character of the `seed` string is being considered. If `index` is out of range
of `seed` string, the new character is appended to the `seed` string
EXAMPLE:
>>> string_combinations(
>>> seed="hi",
>>> start='ho',
>>> end='hq',
>>> variants={'o': ['oh', 'ok', 'obuh']},
>>> min_length=4,
>>> index=1
>>> )
# From string 'hi', generates all strings that start with 'ho' and 'hq' (inclusive) and everything in between,
>>>
# whereas the string combinations start at index 1 ("i"). Generated string are of length 4, possibly except when
>>>
# strings containing 'o' were generated variants with 'oh', 'ok', or 'obuh' instead of 'o'.
>>>
'''
if index is not None \
and len(seed) >= min_length \
and (not(start) or seed[:len(start)] >= start)\
and (not(end) or seed[:len(end)] <= end):
yield seed
return
seed = bytearray(seed, "ascii")
index = len(seed) if index is None else index
valid_chars = valid_chars or 'abcdefghijklmnopqrstuvwxzy0123456789'
# variants should be {char: [list, of, variants]} or [(char, [list, of, variants])]
variants = variants or []
variants = variants.items() if isinstance(variants, dict) else variants
start_reached = False
for s in valid_chars:
# Skip if start is given and has not been reached yet
# or if end is given and has been already reached
if (start and not(start_reached) and len(start) >= (index + 1) and s != start[index]):
continue
# Prevent going into depth if we already have minimum length
# and start or end conditions are shorter than that
elif index > min_length - 1 and (start and index > len(start) - 1) and (end and index > len(end) - 1):
continue
if not start_reached:
start_reached = True
# workaround for "ch" being considered a separate char.
# uses (temp_seed + variant) as a final name for all variants
curr_variants = [s]
for case, v in variants:
if s == case:
curr_variants.extend(v)
for v in curr_variants:
temp_seed = seed.copy()
# Modify seed with current variant
for i, c in enumerate(v):
if len(temp_seed) < index + 1 + i:
temp_seed.append(ord(c))
else:
temp_seed[index] = ord(c)
temp_seed = temp_seed.decode()
# End reached
if end and temp_seed[:len(end)] > end:
return
# Skip seed if it does not match the pattern
if pattern and not re.search(pattern, temp_seed):
continue
# Go one level deeper (1 char longer seed)
results = string_combinations(
seed=temp_seed,
valid_chars=valid_chars,
pattern=pattern,
start=start,
end=end,
variants=variants,
min_length=min_length,
index=index + 1
)
for res in results:
yield res
def map_dict_val(fn, d):
return {
k: fn(v)
for k, v in d.items()
}
def unpack_url(url):
'''
Get URL object and Query object from a url, as returned by
urllib.parse.urlparse and urllib.parse.parse_qs, respectively.
Reverse of pack_url
'''
url_obj = parse.urlparse(url)
q_obj = parse.parse_qs(url_obj.query)
q_obj = map_dict_val(lambda l: l[0], q_obj)
return url_obj, q_obj
def pack_url(url_obj, q_obj):
'''
Get url string from URL object and Query object.
Reverse of unpack_url
'''
url_obj = url_obj._replace(query=parse.urlencode(q_obj))
url_string = parse.urlunparse(url_obj)
return url_string
def xpath_class(classes, operator="or"):
''''Format an XPath class condition'''
return f" {operator} ".join(
f"contains(concat(' ', normalize-space(@class),' '),' {cls} ')"
for cls in classes
)
def xpath_startswith(attr, s):
return f"@{attr} and starts-with(@{attr}, '{s}')"
def get_dir(obj):
path = inspect.getfile(obj)
return str(Path(path).parent.absolute())
def | (name, path):
'''
See https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path
# importing-a-source-file-directly
and https://docs.python.org/3/library/importlib.html
'''
spec = importlib.util.spec_from_file_location(name, path)
mdl = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mdl)
return mdl
def local_module(cls, filename):
'''
Search the directory of a module where `cls` is defined and
look for file `filename`.
Raises `ValueError` if the file does not exist.
Raises `ModuleNotFoundError` if the import failed.
'''
cls_dir = get_dir(cls)
file_path = Path(cls_dir, filename)
file_abs_path = str(file_path.absolute())
if not file_path.exists():
raise ValueError('File not found: {}'.format(file_abs_path))
import_path = cls.__module__.rsplit('.', 1)[0]
module_path = "{}.{}".format(import_path, file_path.stem)
module = module_from_abs_path(module_path, file_abs_path)
return module
def pairwise(iterable):
'''See https://docs.python.org/3/library/itertools.html#itertools-recipes'''
a, b = itertools.tee(iterable)
next(b, None)
return itertools.zip_longest(a, b, fillvalue=object())
def remove_adjacent_dup(iterable):
'''
See https://stackoverflow.com/a/34986013/9788634
'''
return [x for x, y in pairwise(x) if x != y]
def soft_update(d1, *dicts, dict_mode='override', list_mode='override', copy=False):
'''
Update dictonary entries, overriding values if they are primitives
or if the type changes.
Returns the updated dictionary. If `copy` is `True`, the updates are made
to a copy.
If the values are dictionaries then one of the following modes apply:
- `update` - keep the nested dictionaries, and only update entries
- `override` - replace the nested dictonaries with new values
If the values are lists then one of the following modes apply:
- `append` - join elements from all occurences
- `set` - add new list member only if it is not present in the list already
- `override` - replace the list with new value
'''
if copy:
out = {}
the_dicts = [d1, *dicts]
else:
out = d1
the_dicts = dicts
for d in the_dicts:
for k, v in d.items():
if k not in out:
out[k] = v
continue
elif type(v) != type(out[k]):
out[k] = v
elif isinstance(v, dict):
if dict_mode == 'update':
out[k].update(v)
elif dict_mode == 'override':
out[k] = v
else:
raise ValueError(f'Unknown dict mode "{dict_mode}"')
elif isinstance(v, list):
if list_mode == 'append':
out[k].extend(v)
elif list_mode == 'set':
out[k].extend([i for i in v if i not in out[k]])
elif list_mode == 'override':
out[k] = v
else:
raise ValueError(f'Unknown list mode "{list_mode}"')
else:
out[k] = v
return out
def grouper(n, iterable, fillvalue=None):
"grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return itertools.zip_longest(fillvalue=fillvalue, *args)
def flatten_json(y):
out = {}
def flatten(x, name=''):
if type(x) is dict:
for a in x:
flatten(x[a], name + a + '_')
elif type(x) is list:
i = 0
for a in x:
flatten(a, name + str(i) + '_')
i += 1
else:
out[name[:-1]] = x
flatten(y)
return out
@functools.wraps(filter)
def lfilter(functionOrNone, iterable):
return list(filter(functionOrNone, iterable))
@functools.wraps(map)
def lmap(func, *iterables):
return list(map(func, *iterables))
@functools.wraps(map)
def map2str(*iterables):
return map(str, iterables)
@functools.wraps(map)
def map2int(*iterables):
return map(int, iterables)
def flatten(iterable):
t = type(iterable)
return t(i for grp in iterable for i in grp)
def lflatten(iterable):
return flatten(list(iterable))
def time_tag():
return datetime.now().strftime('%Y_%m_%d__%H_%M_%S')
def update_request_cookies(request, inplace=True, pattern=None):
c = cookies.SimpleCookie()
h = request.headers.copy() if not inplace else request.headers
for header in ['Cookie', 'Set-Cookie']:
for ck in h.getlist(header):
c.load(ck.decode('utf-8'))
h.pop('cookie', None)
h.pop('set-cookie', None)
for morsel in c.values():
if pattern is None or re.search(pattern, morsel.key):
h.appendlist('cookie', '{}={}'.format(morsel.key, morsel.value))
return h
def strip_accents(text):
'''https://stackoverflow.com/a/44433664/9788634'''
try:
text = unicode(text, 'utf-8')
except NameError: # unicode is a default on python 3
pass
text = unicodedata.normalize('NFD', text)\
.encode('ascii', 'ignore')\
.decode("utf-8")
return str(text)
def dol2lot(dol):
'''Convert dict of lists to list of (key, value) tuples'''
lot = []
for k, val in dol.items():
try:
vals = iter(val)
except TypeError:
vals = [val]
lot.extend((k, v) for v in vals)
return lot
def lot2dol(lot):
'''Convert list of (key, value) tuples to dict of lists'''
dol = {}
for k, val in lot:
if k not in dol:
dol[k] = []
dol[k].append(val)
return dol
def conditional_deco(deco, predicate):
'''
Decorator that takes another decorator and a predicate,
and applies the second decorator to a function only if the predicate
evaluates to True.a[@href and starts-with(@href, '/events/csv')]
'''
def deco_(function):
@functools.wraps(function)
def inner(*args, **kwargs):
if predicate(*args, **kwargs):
return deco(function)(*args, **kwargs)
return function(*args, **kwargs)
return inner
return deco_
def is_url(url):
try:
result = parse.urlparse(url)
return all([result.scheme, result.netloc])
except ValueError:
return False
def _parse_user_agent_url(url):
return requests.get(url).strip().split('\n')
def get_user_agent_list(brws):
# Taken from https://github.com/tamimibrahim17/List-of-user-agents
url_template = 'https://raw.githubusercontent.com/tamimibrahim17/List-of-user-agents/master/{}.txt'
ual = []
for brw in brws:
url = url_template.format(parse.quote(brw))
uas = [
ua
for ua in _parse_user_agent_url(url)[:-2]
if "user agents string" not in ua
]
ual.extend(uas)
tempfile.NamedTemporaryFile()
return ual
def get_proxy_list(urls=None, files=None):
proxies = [p for p_list in map(_parse_user_agent_url, urls) for p in p_list]
proxies.extend([
Path(f).read_text(encoding='utf-8') for f in files
])
return proxies | module_from_abs_path | identifier_name |
website.go | package website
import (
"encoding/json"
"errors"
"fmt"
"html"
"io"
"io/ioutil"
"log"
"mime"
"net/http"
_ "net/http/pprof"
"os/exec"
"path"
"path/filepath"
"runtime/debug"
"strconv"
"strings"
"time"
"vger/download"
"vger/native"
"vger/player/shared"
"vger/task"
"vger/thunder"
"vger/util"
"github.com/gorilla/mux"
"github.com/gorilla/websocket"
)
var upgrader = websocket.Upgrader{
CheckOrigin: func(r *http.Request) bool {
return true
},
} //use default buffer size
func checkIfSubtitle(input string) bool {
return !(strings.Contains(input, "://") || strings.HasSuffix(input, ".torrent") || strings.HasPrefix(input, "magnet:"))
}
func checkIfSpeed(input string) (int64, bool) {
num, err := strconv.ParseUint(input, 10, 64)
if err != nil {
return 0, false
}
if num > 10*1024*1024 {
num = 10 * 1024 * 1024
}
return int64(num), true
}
func viewHandler(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "index.html")
}
func openHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
name := vars["name"]
fmt.Printf("open \"%s\".\n", name)
// cmd := exec.Command("./player", fmt.Sprintf("-task=%s", name))
t, err := task.GetTask(name)
p := util.ReadConfig("dir")
if err == nil && t != nil {
p = path.Join(p, t.Subscribe)
}
cmd := exec.Command("open", path.Join(p, name))
err = cmd.Start()
if err != nil {
writeError(w, err)
}
}
func trashHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
name := vars["name"]
log.Printf("trash \"%s\".\n", name)
err := task.DeleteTask(name)
if err != nil {
writeError(w, err)
return
} else {
err = shared.DeleteSubtitle(name)
if err != nil {
log.Print(err)
}
err = shared.DeletePlaying(name)
if err != nil {
log.Print(err)
}
}
}
func resumeHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
name := vars["name"]
fmt.Printf("resume download \"%s\".\n", name)
if err := task.ResumeTask(name); err != nil {
writeError(w, err)
}
}
func newTaskHandler(w http.ResponseWriter, r *http.Request) {
defer func() {
if re := recover(); re != nil {
err := re.(error)
log.Print(err)
w.Write([]byte(html.EscapeString(err.Error())))
}
}()
vars := mux.Vars(r)
name := vars["name"]
input, _ := ioutil.ReadAll(r.Body)
if url := string(input); url != "" {
_, name2, size, _, err := download.GetDownloadInfo(url, false)
if err != nil {
writeError(w, err)
return
}
if name == "" {
name = name2
}
fmt.Printf("add download \"%s\".\nname: %s\n", url, name)
if t, err := task.GetTask(name); err == nil {
if t.Status == "Finished" {
w.Write([]byte("File has already been downloaded."))
} else if t.Status != "Downloading" && t.Status != "Stopped" {
if t.Status == "Deleted" {
log.Print("deleted task")
t.DownloadedSize = 0
}
t.URL = url
t.Size = size
t.Status = "Stopped"
if err := task.SaveTask(t); err != nil {
writeError(w, err)
}
}
} else if err := task.NewTask(name, url, size, "Stopped"); err != nil {
writeError(w, err)
} else {
native.SendNotification("V'ger add task", name)
}
}
}
func thunderNewHandler(w http.ResponseWriter, r *http.Request) {
defer func() {
if re := recover(); re != nil {
err := re.(error)
log.Print(err)
log.Print(string(debug.Stack()))
w.Write([]byte(html.EscapeString(err.Error())))
}
}()
input, _ := ioutil.ReadAll(r.Body)
m := make(map[string]string)
err := json.Unmarshal(input, &m)
if err != nil {
writeError(w, err)
return
}
url := string(m["url"])
verifycode := string(m["verifycode"])
log.Print("thunderNewHandler:", url, verifycode)
files, err := thunder.NewTask(url, verifycode)
if err == nil {
writeJson(w, files)
} else {
writeError(w, err)
}
}
func thunderVerifyCodeHandler(w http.ResponseWriter, r *http.Request) {
h := w.Header()
h.Add("Content-Type", "image/jpeg")
thunder.WriteValidationCode(w)
}
func thunderTorrentHandler(w http.ResponseWriter, r *http.Request) {
defer func() {
if re := recover(); re != nil {
err := re.(error)
writeError(w, err)
}
}()
// res, _ := httputil.DumpRequest(r, true)
// fmt.Println(string(res))
fmt.Println("thunder torrent handler")
f, _, err := r.FormFile("torrent")
if err != nil {
writeError(w, err)
return
}
input, _ := ioutil.ReadAll(f)
// thunder.Login(config["thunder-user"], config["thunder-password"])
files, err := thunder.NewTaskWithTorrent(input)
if err == nil {
writeJson(w, files)
} else {
writeError(w, err)
}
}
func stopHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
name := vars["name"]
fmt.Printf("stop download \"%s\".\n", name)
if err := task.StopTask(name); err != nil {
writeError(w, err)
}
fmt.Println("stop download finish")
}
func limitHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
input := vars["speed"]
speed, _ := strconv.Atoi(string(input))
fmt.Printf("limit speed %dKB/s.\n", speed)
util.SaveConfig("max-speed", input)
if err := download.LimitSpeed(speed); err != nil {
writeError(w, err)
}
}
func configHandler(w http.ResponseWriter, r *http.Request) {
configs := util.ReadAllConfigs()
writeJson(w, configs)
}
func configSimultaneousHandler(w http.ResponseWriter, r *http.Request) {
input, _ := ioutil.ReadAll(r.Body)
cnt, _ := strconv.Atoi(string(input))
if cnt > 0 {
// oldcnt := util.ReadIntConfig("simultaneous-downloads")
downloadingCnt := task.NumOfDownloadingTasks()
for i := cnt; i < downloadingCnt; i++ {
err := task.QueueDownloadingTask()
if err != nil {
log.Print(err)
}
}
for i := downloadingCnt; i < cnt; i++ {
err, _ := task.ResumeNextTask()
if err != nil {
log.Print(err)
}
}
util.SaveConfig("simultaneous-downloads", string(input))
} else {
writeError(w, fmt.Errorf("Simultaneous must greater than zero."))
}
}
func setAutoShutdownHandler(w http.ResponseWriter, r *http.Request) {
input, _ := ioutil.ReadAll(r.Body)
util.SaveConfig("shutdown-after-finish", string(input))
// fmt.Printf("Autoshutdown task \"%s\" %s.", name, autoshutdown)
// task.SetAutoshutdown(name, autoshutdown == "on")
}
func progressHandler(w http.ResponseWriter, r *http.Request) {
ws, err := upgrader.Upgrade(w, r, nil)
if err != nil {
writeError(w, err)
return
}
tasks := task.GetTasks()
cnt := 50
tks := make([]*task.Task, 0)
for _, t := range tasks {
tks = append(tks, t)
if len(tks) == cnt {
err := ws.WriteJSON(tks) //writeJson(ws, tks)
if err != nil {
return
}
tks = tks[0:0]
}
}
if len(tks) > 0 {
err := ws.WriteJSON(tks) //writeJson(ws, tks)
if err != nil {
return
}
}
ch := make(chan *task.Task, 20)
// log.Println("website watch task change ", ch)
task.WatchChange(ch)
defer task.RemoveWatch(ch)
// ws.SetDeadline(time.Now().Add(21 * time.Second))
for {
select {
case t := <-ch:
err := ws.WriteJSON([]*task.Task{t}) //writeJson(ws, []*task.Task{t})
if err != nil {
return
}
break
case <-time.After(time.Second * 20):
//close connection every 20 seconds
//if client is alive, it should reconnect to server
//prevent socket connection & goroutine leak
ws.Close()
return
}
}
}
func playHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
name := vars["name"]
fmt.Printf("play \"%s\".\n", name)
// playerPath := util.ReadConfig("video-player")
t, err := task.GetTask(name)
if err != nil {
writeError(w, err)
return
}
fmt.Printf("open %s", fmt.Sprintf("vgerplayer://%s", t.URL))
cmd := exec.Command("open", fmt.Sprintf("vgerplayer://%s", t.URL)) //playerPath, "--args", t.URL)
// config := util.ReadAllConfigs()
// playerPath := config["video-player"]
// util.KillProcess(playerPath)
// cmd := exec.Command("open", playerPath, "--args", "http://"+config["server"]+"/video/"+name)
err = cmd.Start()
if err != nil {
writeError(w, err)
}
}
func writeError(w http.ResponseWriter, err error) {
log.Print(err)
log.Print(string(debug.Stack()))
w.Write([]byte(err.Error()))
}
func writeJson(w io.Writer, obj interface{}) {
text, err := json.Marshal(obj)
if err != nil {
log.Print(err)
} else {
_, err := w.Write(text)
if err != nil {
log.Print(err)
}
}
}
func videoHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
name := vars["name"]
t, err := task.GetTask(name)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if t.Status == "Downloading" {
err := task.StopTask(name)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
size := t.Size
code := http.StatusOK
// If Content-Type isn't set, use the file's extension to find it.
ctype := w.Header().Get("Content-Type")
if ctype == "" {
ctype = mime.TypeByExtension(filepath.Ext(name))
if ctype != "" {
w.Header().Set("Content-Type", ctype)
} else {
w.Header().Set("Content-Type", "application/octet-stream")
}
}
sendSize := size
ranges, err := parseRange(r.Header.Get("Range"), size)
if err != nil {
http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable)
return
}
ra := ranges[0]
sendSize = ra.length
code = http.StatusPartialContent
w.Header().Set("Content-Range", ra.contentRange(size))
w.Header().Set("Accept-Ranges", "bytes")
if w.Header().Get("Content-Encoding") == "" {
w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10))
}
w.WriteHeader(code)
download.Play(t, w, ra.start, ra.start+sendSize)
}
type httpRange struct {
start, length int64
}
func (r httpRange) contentRange(size int64) string {
return fmt.Sprintf("bytes %d-%d/%d", r.start, r.start+r.length-1, size)
}
// parseRange parses a Range header string as per RFC 2616.
func parseRange(s string, size int64) ([]httpRange, error) {
if s == "" {
return nil, nil // header not present
}
const b = "bytes="
if !strings.HasPrefix(s, b) {
return nil, errors.New("invalid range")
}
var ranges []httpRange
for _, ra := range strings.Split(s[len(b):], ",") {
ra = strings.TrimSpace(ra)
if ra == "" {
continue
}
i := strings.Index(ra, "-")
if i < 0 {
return nil, errors.New("invalid range")
}
start, end := strings.TrimSpace(ra[:i]), strings.TrimSpace(ra[i+1:])
var r httpRange
if start == "" {
// If no start is specified, end specifies the
// range start relative to the end of the file.
i, err := strconv.ParseInt(end, 10, 64)
if err != nil {
return nil, errors.New("invalid range")
}
if i > size {
i = size
}
r.start = size - i
r.length = size - r.start
} else {
i, err := strconv.ParseInt(start, 10, 64)
if err != nil || i > size || i < 0 {
return nil, errors.New("invalid range")
}
r.start = i
if end == "" {
// If no end is specified, range extends to end of the file.
r.length = size - r.start
} else {
i, err := strconv.ParseInt(end, 10, 64)
if err != nil || r.start > i {
return nil, errors.New("invalid range")
}
if i >= size {
i = size - 1
}
r.length = i - r.start + 1
}
}
ranges = append(ranges, r)
}
return ranges, nil
}
type MyServer struct {
r *mux.Router
}
func (s MyServer) ServeHTTP(w http.ResponseWriter, req *http.Request) |
func Run(isDebug bool) {
if !isDebug {
go Monitor()
}
err, _ := util.MakeSurePathExists(path.Join(util.ReadConfig("dir"), "subs"))
if err != nil {
log.Fatal(err)
}
r := mux.NewRouter()
r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "./index.html")
})
r.HandleFunc("/newclient", func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "./newindex.html")
})
r.HandleFunc("/favicon.ico", func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "./assets/favicon.png")
})
r.HandleFunc("/open/{name}", openHandler)
r.HandleFunc("/play/{name}", playHandler)
r.HandleFunc("/video/{name}", videoHandler)
r.HandleFunc("/resume/{name}", resumeHandler)
r.HandleFunc("/stop/{name}", stopHandler)
r.HandleFunc("/progress", progressHandler)
r.HandleFunc("/new/{name}", newTaskHandler)
r.HandleFunc("/new", newTaskHandler)
r.HandleFunc("/limit/{speed:[0-9]+}", limitHandler)
r.HandleFunc("/config", configHandler)
r.HandleFunc("/config/simultaneous", configSimultaneousHandler)
r.HandleFunc("/trash/{name}", trashHandler)
r.HandleFunc("/autoshutdown", setAutoShutdownHandler)
// http.HandleFunc("/queue/", queueHandler)
r.HandleFunc("/subscribe/new", subscribeNewHandler)
r.HandleFunc("/subscribe", subscribeHandler)
r.HandleFunc("/subscribe/banner/{name}", subscribeBannerHandler)
r.HandleFunc("/unsubscribe/{name}", unsubscribeHandler)
r.HandleFunc("/thunder/new", thunderNewHandler)
r.HandleFunc("/thunder/torrent", thunderTorrentHandler)
r.HandleFunc("/thunder/verifycode", thunderVerifyCodeHandler)
r.HandleFunc("/thunder/verifycode/", thunderVerifyCodeHandler)
r.HandleFunc("/subtitles/search/{movie}", subtitlesSearchHandler)
r.HandleFunc("/subtitles/download/{movie}", subtitlesDownloadHandler)
r.HandleFunc("/app/status", appStatusHandler)
r.HandleFunc("/app/shutdown", appShutdownHandler)
r.HandleFunc("/app/gc", appGCHandler)
r.HandleFunc("/app/cookie/{domain}", appCookieHandler)
r.PathPrefix("/assets/").Handler(http.FileServer(http.Dir(".")))
http.Handle("/", MyServer{r})
server := util.ReadConfig("server")
log.Print("server ", server, " started.")
err = http.ListenAndServe(server, nil)
if err != nil {
log.Fatal(err)
}
}
| {
w.Header().Set("Access-Control-Allow-Origin", "*")
s.r.ServeHTTP(w, req)
} | identifier_body |
website.go | package website
import (
"encoding/json"
"errors"
"fmt"
"html"
"io"
"io/ioutil"
"log"
"mime"
"net/http"
_ "net/http/pprof"
"os/exec"
"path"
"path/filepath"
"runtime/debug"
"strconv"
"strings"
"time"
"vger/download"
"vger/native"
"vger/player/shared"
"vger/task"
"vger/thunder"
"vger/util"
"github.com/gorilla/mux"
"github.com/gorilla/websocket"
)
var upgrader = websocket.Upgrader{
CheckOrigin: func(r *http.Request) bool {
return true
},
} //use default buffer size
func checkIfSubtitle(input string) bool {
return !(strings.Contains(input, "://") || strings.HasSuffix(input, ".torrent") || strings.HasPrefix(input, "magnet:"))
}
func checkIfSpeed(input string) (int64, bool) {
num, err := strconv.ParseUint(input, 10, 64)
if err != nil {
return 0, false
}
if num > 10*1024*1024 {
num = 10 * 1024 * 1024
}
return int64(num), true
}
func viewHandler(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "index.html")
}
func openHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
name := vars["name"]
fmt.Printf("open \"%s\".\n", name)
// cmd := exec.Command("./player", fmt.Sprintf("-task=%s", name))
t, err := task.GetTask(name)
p := util.ReadConfig("dir")
if err == nil && t != nil {
p = path.Join(p, t.Subscribe)
}
cmd := exec.Command("open", path.Join(p, name))
err = cmd.Start()
if err != nil {
writeError(w, err)
}
}
func trashHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
name := vars["name"]
log.Printf("trash \"%s\".\n", name)
err := task.DeleteTask(name)
if err != nil {
writeError(w, err)
return
} else {
err = shared.DeleteSubtitle(name)
if err != nil {
log.Print(err)
}
err = shared.DeletePlaying(name)
if err != nil {
log.Print(err)
}
}
}
func resumeHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
name := vars["name"]
fmt.Printf("resume download \"%s\".\n", name)
if err := task.ResumeTask(name); err != nil {
writeError(w, err)
}
}
func newTaskHandler(w http.ResponseWriter, r *http.Request) {
defer func() {
if re := recover(); re != nil {
err := re.(error)
log.Print(err)
w.Write([]byte(html.EscapeString(err.Error())))
}
}()
vars := mux.Vars(r)
name := vars["name"]
input, _ := ioutil.ReadAll(r.Body)
if url := string(input); url != "" {
_, name2, size, _, err := download.GetDownloadInfo(url, false)
if err != nil {
writeError(w, err)
return
}
if name == "" {
name = name2
}
fmt.Printf("add download \"%s\".\nname: %s\n", url, name)
if t, err := task.GetTask(name); err == nil {
if t.Status == "Finished" {
w.Write([]byte("File has already been downloaded."))
} else if t.Status != "Downloading" && t.Status != "Stopped" {
if t.Status == "Deleted" {
log.Print("deleted task")
t.DownloadedSize = 0
}
t.URL = url
t.Size = size
t.Status = "Stopped"
if err := task.SaveTask(t); err != nil {
writeError(w, err)
}
}
} else if err := task.NewTask(name, url, size, "Stopped"); err != nil {
writeError(w, err)
} else {
native.SendNotification("V'ger add task", name)
}
}
}
func | (w http.ResponseWriter, r *http.Request) {
defer func() {
if re := recover(); re != nil {
err := re.(error)
log.Print(err)
log.Print(string(debug.Stack()))
w.Write([]byte(html.EscapeString(err.Error())))
}
}()
input, _ := ioutil.ReadAll(r.Body)
m := make(map[string]string)
err := json.Unmarshal(input, &m)
if err != nil {
writeError(w, err)
return
}
url := string(m["url"])
verifycode := string(m["verifycode"])
log.Print("thunderNewHandler:", url, verifycode)
files, err := thunder.NewTask(url, verifycode)
if err == nil {
writeJson(w, files)
} else {
writeError(w, err)
}
}
func thunderVerifyCodeHandler(w http.ResponseWriter, r *http.Request) {
h := w.Header()
h.Add("Content-Type", "image/jpeg")
thunder.WriteValidationCode(w)
}
func thunderTorrentHandler(w http.ResponseWriter, r *http.Request) {
defer func() {
if re := recover(); re != nil {
err := re.(error)
writeError(w, err)
}
}()
// res, _ := httputil.DumpRequest(r, true)
// fmt.Println(string(res))
fmt.Println("thunder torrent handler")
f, _, err := r.FormFile("torrent")
if err != nil {
writeError(w, err)
return
}
input, _ := ioutil.ReadAll(f)
// thunder.Login(config["thunder-user"], config["thunder-password"])
files, err := thunder.NewTaskWithTorrent(input)
if err == nil {
writeJson(w, files)
} else {
writeError(w, err)
}
}
func stopHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
name := vars["name"]
fmt.Printf("stop download \"%s\".\n", name)
if err := task.StopTask(name); err != nil {
writeError(w, err)
}
fmt.Println("stop download finish")
}
func limitHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
input := vars["speed"]
speed, _ := strconv.Atoi(string(input))
fmt.Printf("limit speed %dKB/s.\n", speed)
util.SaveConfig("max-speed", input)
if err := download.LimitSpeed(speed); err != nil {
writeError(w, err)
}
}
func configHandler(w http.ResponseWriter, r *http.Request) {
configs := util.ReadAllConfigs()
writeJson(w, configs)
}
func configSimultaneousHandler(w http.ResponseWriter, r *http.Request) {
input, _ := ioutil.ReadAll(r.Body)
cnt, _ := strconv.Atoi(string(input))
if cnt > 0 {
// oldcnt := util.ReadIntConfig("simultaneous-downloads")
downloadingCnt := task.NumOfDownloadingTasks()
for i := cnt; i < downloadingCnt; i++ {
err := task.QueueDownloadingTask()
if err != nil {
log.Print(err)
}
}
for i := downloadingCnt; i < cnt; i++ {
err, _ := task.ResumeNextTask()
if err != nil {
log.Print(err)
}
}
util.SaveConfig("simultaneous-downloads", string(input))
} else {
writeError(w, fmt.Errorf("Simultaneous must greater than zero."))
}
}
func setAutoShutdownHandler(w http.ResponseWriter, r *http.Request) {
input, _ := ioutil.ReadAll(r.Body)
util.SaveConfig("shutdown-after-finish", string(input))
// fmt.Printf("Autoshutdown task \"%s\" %s.", name, autoshutdown)
// task.SetAutoshutdown(name, autoshutdown == "on")
}
func progressHandler(w http.ResponseWriter, r *http.Request) {
ws, err := upgrader.Upgrade(w, r, nil)
if err != nil {
writeError(w, err)
return
}
tasks := task.GetTasks()
cnt := 50
tks := make([]*task.Task, 0)
for _, t := range tasks {
tks = append(tks, t)
if len(tks) == cnt {
err := ws.WriteJSON(tks) //writeJson(ws, tks)
if err != nil {
return
}
tks = tks[0:0]
}
}
if len(tks) > 0 {
err := ws.WriteJSON(tks) //writeJson(ws, tks)
if err != nil {
return
}
}
ch := make(chan *task.Task, 20)
// log.Println("website watch task change ", ch)
task.WatchChange(ch)
defer task.RemoveWatch(ch)
// ws.SetDeadline(time.Now().Add(21 * time.Second))
for {
select {
case t := <-ch:
err := ws.WriteJSON([]*task.Task{t}) //writeJson(ws, []*task.Task{t})
if err != nil {
return
}
break
case <-time.After(time.Second * 20):
//close connection every 20 seconds
//if client is alive, it should reconnect to server
//prevent socket connection & goroutine leak
ws.Close()
return
}
}
}
func playHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
name := vars["name"]
fmt.Printf("play \"%s\".\n", name)
// playerPath := util.ReadConfig("video-player")
t, err := task.GetTask(name)
if err != nil {
writeError(w, err)
return
}
fmt.Printf("open %s", fmt.Sprintf("vgerplayer://%s", t.URL))
cmd := exec.Command("open", fmt.Sprintf("vgerplayer://%s", t.URL)) //playerPath, "--args", t.URL)
// config := util.ReadAllConfigs()
// playerPath := config["video-player"]
// util.KillProcess(playerPath)
// cmd := exec.Command("open", playerPath, "--args", "http://"+config["server"]+"/video/"+name)
err = cmd.Start()
if err != nil {
writeError(w, err)
}
}
func writeError(w http.ResponseWriter, err error) {
log.Print(err)
log.Print(string(debug.Stack()))
w.Write([]byte(err.Error()))
}
func writeJson(w io.Writer, obj interface{}) {
text, err := json.Marshal(obj)
if err != nil {
log.Print(err)
} else {
_, err := w.Write(text)
if err != nil {
log.Print(err)
}
}
}
func videoHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
name := vars["name"]
t, err := task.GetTask(name)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if t.Status == "Downloading" {
err := task.StopTask(name)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
size := t.Size
code := http.StatusOK
// If Content-Type isn't set, use the file's extension to find it.
ctype := w.Header().Get("Content-Type")
if ctype == "" {
ctype = mime.TypeByExtension(filepath.Ext(name))
if ctype != "" {
w.Header().Set("Content-Type", ctype)
} else {
w.Header().Set("Content-Type", "application/octet-stream")
}
}
sendSize := size
ranges, err := parseRange(r.Header.Get("Range"), size)
if err != nil {
http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable)
return
}
ra := ranges[0]
sendSize = ra.length
code = http.StatusPartialContent
w.Header().Set("Content-Range", ra.contentRange(size))
w.Header().Set("Accept-Ranges", "bytes")
if w.Header().Get("Content-Encoding") == "" {
w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10))
}
w.WriteHeader(code)
download.Play(t, w, ra.start, ra.start+sendSize)
}
type httpRange struct {
start, length int64
}
func (r httpRange) contentRange(size int64) string {
return fmt.Sprintf("bytes %d-%d/%d", r.start, r.start+r.length-1, size)
}
// parseRange parses a Range header string as per RFC 2616.
func parseRange(s string, size int64) ([]httpRange, error) {
if s == "" {
return nil, nil // header not present
}
const b = "bytes="
if !strings.HasPrefix(s, b) {
return nil, errors.New("invalid range")
}
var ranges []httpRange
for _, ra := range strings.Split(s[len(b):], ",") {
ra = strings.TrimSpace(ra)
if ra == "" {
continue
}
i := strings.Index(ra, "-")
if i < 0 {
return nil, errors.New("invalid range")
}
start, end := strings.TrimSpace(ra[:i]), strings.TrimSpace(ra[i+1:])
var r httpRange
if start == "" {
// If no start is specified, end specifies the
// range start relative to the end of the file.
i, err := strconv.ParseInt(end, 10, 64)
if err != nil {
return nil, errors.New("invalid range")
}
if i > size {
i = size
}
r.start = size - i
r.length = size - r.start
} else {
i, err := strconv.ParseInt(start, 10, 64)
if err != nil || i > size || i < 0 {
return nil, errors.New("invalid range")
}
r.start = i
if end == "" {
// If no end is specified, range extends to end of the file.
r.length = size - r.start
} else {
i, err := strconv.ParseInt(end, 10, 64)
if err != nil || r.start > i {
return nil, errors.New("invalid range")
}
if i >= size {
i = size - 1
}
r.length = i - r.start + 1
}
}
ranges = append(ranges, r)
}
return ranges, nil
}
type MyServer struct {
r *mux.Router
}
func (s MyServer) ServeHTTP(w http.ResponseWriter, req *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
s.r.ServeHTTP(w, req)
}
func Run(isDebug bool) {
if !isDebug {
go Monitor()
}
err, _ := util.MakeSurePathExists(path.Join(util.ReadConfig("dir"), "subs"))
if err != nil {
log.Fatal(err)
}
r := mux.NewRouter()
r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "./index.html")
})
r.HandleFunc("/newclient", func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "./newindex.html")
})
r.HandleFunc("/favicon.ico", func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "./assets/favicon.png")
})
r.HandleFunc("/open/{name}", openHandler)
r.HandleFunc("/play/{name}", playHandler)
r.HandleFunc("/video/{name}", videoHandler)
r.HandleFunc("/resume/{name}", resumeHandler)
r.HandleFunc("/stop/{name}", stopHandler)
r.HandleFunc("/progress", progressHandler)
r.HandleFunc("/new/{name}", newTaskHandler)
r.HandleFunc("/new", newTaskHandler)
r.HandleFunc("/limit/{speed:[0-9]+}", limitHandler)
r.HandleFunc("/config", configHandler)
r.HandleFunc("/config/simultaneous", configSimultaneousHandler)
r.HandleFunc("/trash/{name}", trashHandler)
r.HandleFunc("/autoshutdown", setAutoShutdownHandler)
// http.HandleFunc("/queue/", queueHandler)
r.HandleFunc("/subscribe/new", subscribeNewHandler)
r.HandleFunc("/subscribe", subscribeHandler)
r.HandleFunc("/subscribe/banner/{name}", subscribeBannerHandler)
r.HandleFunc("/unsubscribe/{name}", unsubscribeHandler)
r.HandleFunc("/thunder/new", thunderNewHandler)
r.HandleFunc("/thunder/torrent", thunderTorrentHandler)
r.HandleFunc("/thunder/verifycode", thunderVerifyCodeHandler)
r.HandleFunc("/thunder/verifycode/", thunderVerifyCodeHandler)
r.HandleFunc("/subtitles/search/{movie}", subtitlesSearchHandler)
r.HandleFunc("/subtitles/download/{movie}", subtitlesDownloadHandler)
r.HandleFunc("/app/status", appStatusHandler)
r.HandleFunc("/app/shutdown", appShutdownHandler)
r.HandleFunc("/app/gc", appGCHandler)
r.HandleFunc("/app/cookie/{domain}", appCookieHandler)
r.PathPrefix("/assets/").Handler(http.FileServer(http.Dir(".")))
http.Handle("/", MyServer{r})
server := util.ReadConfig("server")
log.Print("server ", server, " started.")
err = http.ListenAndServe(server, nil)
if err != nil {
log.Fatal(err)
}
}
| thunderNewHandler | identifier_name |
website.go | package website
import (
"encoding/json"
"errors"
"fmt"
"html"
"io"
"io/ioutil"
"log"
"mime"
"net/http"
_ "net/http/pprof"
"os/exec"
"path"
"path/filepath"
"runtime/debug"
"strconv"
"strings"
"time"
"vger/download"
"vger/native"
"vger/player/shared"
"vger/task"
"vger/thunder"
"vger/util"
"github.com/gorilla/mux"
"github.com/gorilla/websocket"
)
var upgrader = websocket.Upgrader{
CheckOrigin: func(r *http.Request) bool {
return true
},
} //use default buffer size |
func checkIfSubtitle(input string) bool {
return !(strings.Contains(input, "://") || strings.HasSuffix(input, ".torrent") || strings.HasPrefix(input, "magnet:"))
}
func checkIfSpeed(input string) (int64, bool) {
num, err := strconv.ParseUint(input, 10, 64)
if err != nil {
return 0, false
}
if num > 10*1024*1024 {
num = 10 * 1024 * 1024
}
return int64(num), true
}
func viewHandler(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "index.html")
}
func openHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
name := vars["name"]
fmt.Printf("open \"%s\".\n", name)
// cmd := exec.Command("./player", fmt.Sprintf("-task=%s", name))
t, err := task.GetTask(name)
p := util.ReadConfig("dir")
if err == nil && t != nil {
p = path.Join(p, t.Subscribe)
}
cmd := exec.Command("open", path.Join(p, name))
err = cmd.Start()
if err != nil {
writeError(w, err)
}
}
func trashHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
name := vars["name"]
log.Printf("trash \"%s\".\n", name)
err := task.DeleteTask(name)
if err != nil {
writeError(w, err)
return
} else {
err = shared.DeleteSubtitle(name)
if err != nil {
log.Print(err)
}
err = shared.DeletePlaying(name)
if err != nil {
log.Print(err)
}
}
}
func resumeHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
name := vars["name"]
fmt.Printf("resume download \"%s\".\n", name)
if err := task.ResumeTask(name); err != nil {
writeError(w, err)
}
}
func newTaskHandler(w http.ResponseWriter, r *http.Request) {
defer func() {
if re := recover(); re != nil {
err := re.(error)
log.Print(err)
w.Write([]byte(html.EscapeString(err.Error())))
}
}()
vars := mux.Vars(r)
name := vars["name"]
input, _ := ioutil.ReadAll(r.Body)
if url := string(input); url != "" {
_, name2, size, _, err := download.GetDownloadInfo(url, false)
if err != nil {
writeError(w, err)
return
}
if name == "" {
name = name2
}
fmt.Printf("add download \"%s\".\nname: %s\n", url, name)
if t, err := task.GetTask(name); err == nil {
if t.Status == "Finished" {
w.Write([]byte("File has already been downloaded."))
} else if t.Status != "Downloading" && t.Status != "Stopped" {
if t.Status == "Deleted" {
log.Print("deleted task")
t.DownloadedSize = 0
}
t.URL = url
t.Size = size
t.Status = "Stopped"
if err := task.SaveTask(t); err != nil {
writeError(w, err)
}
}
} else if err := task.NewTask(name, url, size, "Stopped"); err != nil {
writeError(w, err)
} else {
native.SendNotification("V'ger add task", name)
}
}
}
func thunderNewHandler(w http.ResponseWriter, r *http.Request) {
defer func() {
if re := recover(); re != nil {
err := re.(error)
log.Print(err)
log.Print(string(debug.Stack()))
w.Write([]byte(html.EscapeString(err.Error())))
}
}()
input, _ := ioutil.ReadAll(r.Body)
m := make(map[string]string)
err := json.Unmarshal(input, &m)
if err != nil {
writeError(w, err)
return
}
url := string(m["url"])
verifycode := string(m["verifycode"])
log.Print("thunderNewHandler:", url, verifycode)
files, err := thunder.NewTask(url, verifycode)
if err == nil {
writeJson(w, files)
} else {
writeError(w, err)
}
}
func thunderVerifyCodeHandler(w http.ResponseWriter, r *http.Request) {
h := w.Header()
h.Add("Content-Type", "image/jpeg")
thunder.WriteValidationCode(w)
}
func thunderTorrentHandler(w http.ResponseWriter, r *http.Request) {
defer func() {
if re := recover(); re != nil {
err := re.(error)
writeError(w, err)
}
}()
// res, _ := httputil.DumpRequest(r, true)
// fmt.Println(string(res))
fmt.Println("thunder torrent handler")
f, _, err := r.FormFile("torrent")
if err != nil {
writeError(w, err)
return
}
input, _ := ioutil.ReadAll(f)
// thunder.Login(config["thunder-user"], config["thunder-password"])
files, err := thunder.NewTaskWithTorrent(input)
if err == nil {
writeJson(w, files)
} else {
writeError(w, err)
}
}
func stopHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
name := vars["name"]
fmt.Printf("stop download \"%s\".\n", name)
if err := task.StopTask(name); err != nil {
writeError(w, err)
}
fmt.Println("stop download finish")
}
func limitHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
input := vars["speed"]
speed, _ := strconv.Atoi(string(input))
fmt.Printf("limit speed %dKB/s.\n", speed)
util.SaveConfig("max-speed", input)
if err := download.LimitSpeed(speed); err != nil {
writeError(w, err)
}
}
func configHandler(w http.ResponseWriter, r *http.Request) {
configs := util.ReadAllConfigs()
writeJson(w, configs)
}
func configSimultaneousHandler(w http.ResponseWriter, r *http.Request) {
input, _ := ioutil.ReadAll(r.Body)
cnt, _ := strconv.Atoi(string(input))
if cnt > 0 {
// oldcnt := util.ReadIntConfig("simultaneous-downloads")
downloadingCnt := task.NumOfDownloadingTasks()
for i := cnt; i < downloadingCnt; i++ {
err := task.QueueDownloadingTask()
if err != nil {
log.Print(err)
}
}
for i := downloadingCnt; i < cnt; i++ {
err, _ := task.ResumeNextTask()
if err != nil {
log.Print(err)
}
}
util.SaveConfig("simultaneous-downloads", string(input))
} else {
writeError(w, fmt.Errorf("Simultaneous must greater than zero."))
}
}
func setAutoShutdownHandler(w http.ResponseWriter, r *http.Request) {
input, _ := ioutil.ReadAll(r.Body)
util.SaveConfig("shutdown-after-finish", string(input))
// fmt.Printf("Autoshutdown task \"%s\" %s.", name, autoshutdown)
// task.SetAutoshutdown(name, autoshutdown == "on")
}
func progressHandler(w http.ResponseWriter, r *http.Request) {
ws, err := upgrader.Upgrade(w, r, nil)
if err != nil {
writeError(w, err)
return
}
tasks := task.GetTasks()
cnt := 50
tks := make([]*task.Task, 0)
for _, t := range tasks {
tks = append(tks, t)
if len(tks) == cnt {
err := ws.WriteJSON(tks) //writeJson(ws, tks)
if err != nil {
return
}
tks = tks[0:0]
}
}
if len(tks) > 0 {
err := ws.WriteJSON(tks) //writeJson(ws, tks)
if err != nil {
return
}
}
ch := make(chan *task.Task, 20)
// log.Println("website watch task change ", ch)
task.WatchChange(ch)
defer task.RemoveWatch(ch)
// ws.SetDeadline(time.Now().Add(21 * time.Second))
for {
select {
case t := <-ch:
err := ws.WriteJSON([]*task.Task{t}) //writeJson(ws, []*task.Task{t})
if err != nil {
return
}
break
case <-time.After(time.Second * 20):
//close connection every 20 seconds
//if client is alive, it should reconnect to server
//prevent socket connection & goroutine leak
ws.Close()
return
}
}
}
func playHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
name := vars["name"]
fmt.Printf("play \"%s\".\n", name)
// playerPath := util.ReadConfig("video-player")
t, err := task.GetTask(name)
if err != nil {
writeError(w, err)
return
}
fmt.Printf("open %s", fmt.Sprintf("vgerplayer://%s", t.URL))
cmd := exec.Command("open", fmt.Sprintf("vgerplayer://%s", t.URL)) //playerPath, "--args", t.URL)
// config := util.ReadAllConfigs()
// playerPath := config["video-player"]
// util.KillProcess(playerPath)
// cmd := exec.Command("open", playerPath, "--args", "http://"+config["server"]+"/video/"+name)
err = cmd.Start()
if err != nil {
writeError(w, err)
}
}
func writeError(w http.ResponseWriter, err error) {
log.Print(err)
log.Print(string(debug.Stack()))
w.Write([]byte(err.Error()))
}
func writeJson(w io.Writer, obj interface{}) {
text, err := json.Marshal(obj)
if err != nil {
log.Print(err)
} else {
_, err := w.Write(text)
if err != nil {
log.Print(err)
}
}
}
func videoHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
name := vars["name"]
t, err := task.GetTask(name)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if t.Status == "Downloading" {
err := task.StopTask(name)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
size := t.Size
code := http.StatusOK
// If Content-Type isn't set, use the file's extension to find it.
ctype := w.Header().Get("Content-Type")
if ctype == "" {
ctype = mime.TypeByExtension(filepath.Ext(name))
if ctype != "" {
w.Header().Set("Content-Type", ctype)
} else {
w.Header().Set("Content-Type", "application/octet-stream")
}
}
sendSize := size
ranges, err := parseRange(r.Header.Get("Range"), size)
if err != nil {
http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable)
return
}
ra := ranges[0]
sendSize = ra.length
code = http.StatusPartialContent
w.Header().Set("Content-Range", ra.contentRange(size))
w.Header().Set("Accept-Ranges", "bytes")
if w.Header().Get("Content-Encoding") == "" {
w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10))
}
w.WriteHeader(code)
download.Play(t, w, ra.start, ra.start+sendSize)
}
type httpRange struct {
start, length int64
}
func (r httpRange) contentRange(size int64) string {
return fmt.Sprintf("bytes %d-%d/%d", r.start, r.start+r.length-1, size)
}
// parseRange parses a Range header string as per RFC 2616.
func parseRange(s string, size int64) ([]httpRange, error) {
if s == "" {
return nil, nil // header not present
}
const b = "bytes="
if !strings.HasPrefix(s, b) {
return nil, errors.New("invalid range")
}
var ranges []httpRange
for _, ra := range strings.Split(s[len(b):], ",") {
ra = strings.TrimSpace(ra)
if ra == "" {
continue
}
i := strings.Index(ra, "-")
if i < 0 {
return nil, errors.New("invalid range")
}
start, end := strings.TrimSpace(ra[:i]), strings.TrimSpace(ra[i+1:])
var r httpRange
if start == "" {
// If no start is specified, end specifies the
// range start relative to the end of the file.
i, err := strconv.ParseInt(end, 10, 64)
if err != nil {
return nil, errors.New("invalid range")
}
if i > size {
i = size
}
r.start = size - i
r.length = size - r.start
} else {
i, err := strconv.ParseInt(start, 10, 64)
if err != nil || i > size || i < 0 {
return nil, errors.New("invalid range")
}
r.start = i
if end == "" {
// If no end is specified, range extends to end of the file.
r.length = size - r.start
} else {
i, err := strconv.ParseInt(end, 10, 64)
if err != nil || r.start > i {
return nil, errors.New("invalid range")
}
if i >= size {
i = size - 1
}
r.length = i - r.start + 1
}
}
ranges = append(ranges, r)
}
return ranges, nil
}
type MyServer struct {
r *mux.Router
}
func (s MyServer) ServeHTTP(w http.ResponseWriter, req *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
s.r.ServeHTTP(w, req)
}
func Run(isDebug bool) {
if !isDebug {
go Monitor()
}
err, _ := util.MakeSurePathExists(path.Join(util.ReadConfig("dir"), "subs"))
if err != nil {
log.Fatal(err)
}
r := mux.NewRouter()
r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "./index.html")
})
r.HandleFunc("/newclient", func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "./newindex.html")
})
r.HandleFunc("/favicon.ico", func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "./assets/favicon.png")
})
r.HandleFunc("/open/{name}", openHandler)
r.HandleFunc("/play/{name}", playHandler)
r.HandleFunc("/video/{name}", videoHandler)
r.HandleFunc("/resume/{name}", resumeHandler)
r.HandleFunc("/stop/{name}", stopHandler)
r.HandleFunc("/progress", progressHandler)
r.HandleFunc("/new/{name}", newTaskHandler)
r.HandleFunc("/new", newTaskHandler)
r.HandleFunc("/limit/{speed:[0-9]+}", limitHandler)
r.HandleFunc("/config", configHandler)
r.HandleFunc("/config/simultaneous", configSimultaneousHandler)
r.HandleFunc("/trash/{name}", trashHandler)
r.HandleFunc("/autoshutdown", setAutoShutdownHandler)
// http.HandleFunc("/queue/", queueHandler)
r.HandleFunc("/subscribe/new", subscribeNewHandler)
r.HandleFunc("/subscribe", subscribeHandler)
r.HandleFunc("/subscribe/banner/{name}", subscribeBannerHandler)
r.HandleFunc("/unsubscribe/{name}", unsubscribeHandler)
r.HandleFunc("/thunder/new", thunderNewHandler)
r.HandleFunc("/thunder/torrent", thunderTorrentHandler)
r.HandleFunc("/thunder/verifycode", thunderVerifyCodeHandler)
r.HandleFunc("/thunder/verifycode/", thunderVerifyCodeHandler)
r.HandleFunc("/subtitles/search/{movie}", subtitlesSearchHandler)
r.HandleFunc("/subtitles/download/{movie}", subtitlesDownloadHandler)
r.HandleFunc("/app/status", appStatusHandler)
r.HandleFunc("/app/shutdown", appShutdownHandler)
r.HandleFunc("/app/gc", appGCHandler)
r.HandleFunc("/app/cookie/{domain}", appCookieHandler)
r.PathPrefix("/assets/").Handler(http.FileServer(http.Dir(".")))
http.Handle("/", MyServer{r})
server := util.ReadConfig("server")
log.Print("server ", server, " started.")
err = http.ListenAndServe(server, nil)
if err != nil {
log.Fatal(err)
}
} | random_line_split | |
website.go | package website
import (
"encoding/json"
"errors"
"fmt"
"html"
"io"
"io/ioutil"
"log"
"mime"
"net/http"
_ "net/http/pprof"
"os/exec"
"path"
"path/filepath"
"runtime/debug"
"strconv"
"strings"
"time"
"vger/download"
"vger/native"
"vger/player/shared"
"vger/task"
"vger/thunder"
"vger/util"
"github.com/gorilla/mux"
"github.com/gorilla/websocket"
)
var upgrader = websocket.Upgrader{
CheckOrigin: func(r *http.Request) bool {
return true
},
} //use default buffer size
func checkIfSubtitle(input string) bool {
return !(strings.Contains(input, "://") || strings.HasSuffix(input, ".torrent") || strings.HasPrefix(input, "magnet:"))
}
func checkIfSpeed(input string) (int64, bool) {
num, err := strconv.ParseUint(input, 10, 64)
if err != nil {
return 0, false
}
if num > 10*1024*1024 {
num = 10 * 1024 * 1024
}
return int64(num), true
}
func viewHandler(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "index.html")
}
func openHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
name := vars["name"]
fmt.Printf("open \"%s\".\n", name)
// cmd := exec.Command("./player", fmt.Sprintf("-task=%s", name))
t, err := task.GetTask(name)
p := util.ReadConfig("dir")
if err == nil && t != nil {
p = path.Join(p, t.Subscribe)
}
cmd := exec.Command("open", path.Join(p, name))
err = cmd.Start()
if err != nil {
writeError(w, err)
}
}
func trashHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
name := vars["name"]
log.Printf("trash \"%s\".\n", name)
err := task.DeleteTask(name)
if err != nil {
writeError(w, err)
return
} else {
err = shared.DeleteSubtitle(name)
if err != nil {
log.Print(err)
}
err = shared.DeletePlaying(name)
if err != nil {
log.Print(err)
}
}
}
func resumeHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
name := vars["name"]
fmt.Printf("resume download \"%s\".\n", name)
if err := task.ResumeTask(name); err != nil {
writeError(w, err)
}
}
func newTaskHandler(w http.ResponseWriter, r *http.Request) {
defer func() {
if re := recover(); re != nil {
err := re.(error)
log.Print(err)
w.Write([]byte(html.EscapeString(err.Error())))
}
}()
vars := mux.Vars(r)
name := vars["name"]
input, _ := ioutil.ReadAll(r.Body)
if url := string(input); url != "" {
_, name2, size, _, err := download.GetDownloadInfo(url, false)
if err != nil {
writeError(w, err)
return
}
if name == "" {
name = name2
}
fmt.Printf("add download \"%s\".\nname: %s\n", url, name)
if t, err := task.GetTask(name); err == nil {
if t.Status == "Finished" {
w.Write([]byte("File has already been downloaded."))
} else if t.Status != "Downloading" && t.Status != "Stopped" {
if t.Status == "Deleted" {
log.Print("deleted task")
t.DownloadedSize = 0
}
t.URL = url
t.Size = size
t.Status = "Stopped"
if err := task.SaveTask(t); err != nil {
writeError(w, err)
}
}
} else if err := task.NewTask(name, url, size, "Stopped"); err != nil {
writeError(w, err)
} else {
native.SendNotification("V'ger add task", name)
}
}
}
func thunderNewHandler(w http.ResponseWriter, r *http.Request) {
defer func() {
if re := recover(); re != nil {
err := re.(error)
log.Print(err)
log.Print(string(debug.Stack()))
w.Write([]byte(html.EscapeString(err.Error())))
}
}()
input, _ := ioutil.ReadAll(r.Body)
m := make(map[string]string)
err := json.Unmarshal(input, &m)
if err != nil {
writeError(w, err)
return
}
url := string(m["url"])
verifycode := string(m["verifycode"])
log.Print("thunderNewHandler:", url, verifycode)
files, err := thunder.NewTask(url, verifycode)
if err == nil {
writeJson(w, files)
} else {
writeError(w, err)
}
}
func thunderVerifyCodeHandler(w http.ResponseWriter, r *http.Request) {
h := w.Header()
h.Add("Content-Type", "image/jpeg")
thunder.WriteValidationCode(w)
}
func thunderTorrentHandler(w http.ResponseWriter, r *http.Request) {
defer func() {
if re := recover(); re != nil {
err := re.(error)
writeError(w, err)
}
}()
// res, _ := httputil.DumpRequest(r, true)
// fmt.Println(string(res))
fmt.Println("thunder torrent handler")
f, _, err := r.FormFile("torrent")
if err != nil {
writeError(w, err)
return
}
input, _ := ioutil.ReadAll(f)
// thunder.Login(config["thunder-user"], config["thunder-password"])
files, err := thunder.NewTaskWithTorrent(input)
if err == nil {
writeJson(w, files)
} else {
writeError(w, err)
}
}
func stopHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
name := vars["name"]
fmt.Printf("stop download \"%s\".\n", name)
if err := task.StopTask(name); err != nil {
writeError(w, err)
}
fmt.Println("stop download finish")
}
func limitHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
input := vars["speed"]
speed, _ := strconv.Atoi(string(input))
fmt.Printf("limit speed %dKB/s.\n", speed)
util.SaveConfig("max-speed", input)
if err := download.LimitSpeed(speed); err != nil {
writeError(w, err)
}
}
func configHandler(w http.ResponseWriter, r *http.Request) {
configs := util.ReadAllConfigs()
writeJson(w, configs)
}
func configSimultaneousHandler(w http.ResponseWriter, r *http.Request) {
input, _ := ioutil.ReadAll(r.Body)
cnt, _ := strconv.Atoi(string(input))
if cnt > 0 {
// oldcnt := util.ReadIntConfig("simultaneous-downloads")
downloadingCnt := task.NumOfDownloadingTasks()
for i := cnt; i < downloadingCnt; i++ |
for i := downloadingCnt; i < cnt; i++ {
err, _ := task.ResumeNextTask()
if err != nil {
log.Print(err)
}
}
util.SaveConfig("simultaneous-downloads", string(input))
} else {
writeError(w, fmt.Errorf("Simultaneous must greater than zero."))
}
}
func setAutoShutdownHandler(w http.ResponseWriter, r *http.Request) {
input, _ := ioutil.ReadAll(r.Body)
util.SaveConfig("shutdown-after-finish", string(input))
// fmt.Printf("Autoshutdown task \"%s\" %s.", name, autoshutdown)
// task.SetAutoshutdown(name, autoshutdown == "on")
}
func progressHandler(w http.ResponseWriter, r *http.Request) {
ws, err := upgrader.Upgrade(w, r, nil)
if err != nil {
writeError(w, err)
return
}
tasks := task.GetTasks()
cnt := 50
tks := make([]*task.Task, 0)
for _, t := range tasks {
tks = append(tks, t)
if len(tks) == cnt {
err := ws.WriteJSON(tks) //writeJson(ws, tks)
if err != nil {
return
}
tks = tks[0:0]
}
}
if len(tks) > 0 {
err := ws.WriteJSON(tks) //writeJson(ws, tks)
if err != nil {
return
}
}
ch := make(chan *task.Task, 20)
// log.Println("website watch task change ", ch)
task.WatchChange(ch)
defer task.RemoveWatch(ch)
// ws.SetDeadline(time.Now().Add(21 * time.Second))
for {
select {
case t := <-ch:
err := ws.WriteJSON([]*task.Task{t}) //writeJson(ws, []*task.Task{t})
if err != nil {
return
}
break
case <-time.After(time.Second * 20):
//close connection every 20 seconds
//if client is alive, it should reconnect to server
//prevent socket connection & goroutine leak
ws.Close()
return
}
}
}
func playHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
name := vars["name"]
fmt.Printf("play \"%s\".\n", name)
// playerPath := util.ReadConfig("video-player")
t, err := task.GetTask(name)
if err != nil {
writeError(w, err)
return
}
fmt.Printf("open %s", fmt.Sprintf("vgerplayer://%s", t.URL))
cmd := exec.Command("open", fmt.Sprintf("vgerplayer://%s", t.URL)) //playerPath, "--args", t.URL)
// config := util.ReadAllConfigs()
// playerPath := config["video-player"]
// util.KillProcess(playerPath)
// cmd := exec.Command("open", playerPath, "--args", "http://"+config["server"]+"/video/"+name)
err = cmd.Start()
if err != nil {
writeError(w, err)
}
}
func writeError(w http.ResponseWriter, err error) {
log.Print(err)
log.Print(string(debug.Stack()))
w.Write([]byte(err.Error()))
}
func writeJson(w io.Writer, obj interface{}) {
text, err := json.Marshal(obj)
if err != nil {
log.Print(err)
} else {
_, err := w.Write(text)
if err != nil {
log.Print(err)
}
}
}
func videoHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
name := vars["name"]
t, err := task.GetTask(name)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if t.Status == "Downloading" {
err := task.StopTask(name)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
size := t.Size
code := http.StatusOK
// If Content-Type isn't set, use the file's extension to find it.
ctype := w.Header().Get("Content-Type")
if ctype == "" {
ctype = mime.TypeByExtension(filepath.Ext(name))
if ctype != "" {
w.Header().Set("Content-Type", ctype)
} else {
w.Header().Set("Content-Type", "application/octet-stream")
}
}
sendSize := size
ranges, err := parseRange(r.Header.Get("Range"), size)
if err != nil {
http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable)
return
}
ra := ranges[0]
sendSize = ra.length
code = http.StatusPartialContent
w.Header().Set("Content-Range", ra.contentRange(size))
w.Header().Set("Accept-Ranges", "bytes")
if w.Header().Get("Content-Encoding") == "" {
w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10))
}
w.WriteHeader(code)
download.Play(t, w, ra.start, ra.start+sendSize)
}
type httpRange struct {
start, length int64
}
func (r httpRange) contentRange(size int64) string {
return fmt.Sprintf("bytes %d-%d/%d", r.start, r.start+r.length-1, size)
}
// parseRange parses a Range header string as per RFC 2616.
func parseRange(s string, size int64) ([]httpRange, error) {
if s == "" {
return nil, nil // header not present
}
const b = "bytes="
if !strings.HasPrefix(s, b) {
return nil, errors.New("invalid range")
}
var ranges []httpRange
for _, ra := range strings.Split(s[len(b):], ",") {
ra = strings.TrimSpace(ra)
if ra == "" {
continue
}
i := strings.Index(ra, "-")
if i < 0 {
return nil, errors.New("invalid range")
}
start, end := strings.TrimSpace(ra[:i]), strings.TrimSpace(ra[i+1:])
var r httpRange
if start == "" {
// If no start is specified, end specifies the
// range start relative to the end of the file.
i, err := strconv.ParseInt(end, 10, 64)
if err != nil {
return nil, errors.New("invalid range")
}
if i > size {
i = size
}
r.start = size - i
r.length = size - r.start
} else {
i, err := strconv.ParseInt(start, 10, 64)
if err != nil || i > size || i < 0 {
return nil, errors.New("invalid range")
}
r.start = i
if end == "" {
// If no end is specified, range extends to end of the file.
r.length = size - r.start
} else {
i, err := strconv.ParseInt(end, 10, 64)
if err != nil || r.start > i {
return nil, errors.New("invalid range")
}
if i >= size {
i = size - 1
}
r.length = i - r.start + 1
}
}
ranges = append(ranges, r)
}
return ranges, nil
}
type MyServer struct {
r *mux.Router
}
func (s MyServer) ServeHTTP(w http.ResponseWriter, req *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
s.r.ServeHTTP(w, req)
}
func Run(isDebug bool) {
if !isDebug {
go Monitor()
}
err, _ := util.MakeSurePathExists(path.Join(util.ReadConfig("dir"), "subs"))
if err != nil {
log.Fatal(err)
}
r := mux.NewRouter()
r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "./index.html")
})
r.HandleFunc("/newclient", func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "./newindex.html")
})
r.HandleFunc("/favicon.ico", func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "./assets/favicon.png")
})
r.HandleFunc("/open/{name}", openHandler)
r.HandleFunc("/play/{name}", playHandler)
r.HandleFunc("/video/{name}", videoHandler)
r.HandleFunc("/resume/{name}", resumeHandler)
r.HandleFunc("/stop/{name}", stopHandler)
r.HandleFunc("/progress", progressHandler)
r.HandleFunc("/new/{name}", newTaskHandler)
r.HandleFunc("/new", newTaskHandler)
r.HandleFunc("/limit/{speed:[0-9]+}", limitHandler)
r.HandleFunc("/config", configHandler)
r.HandleFunc("/config/simultaneous", configSimultaneousHandler)
r.HandleFunc("/trash/{name}", trashHandler)
r.HandleFunc("/autoshutdown", setAutoShutdownHandler)
// http.HandleFunc("/queue/", queueHandler)
r.HandleFunc("/subscribe/new", subscribeNewHandler)
r.HandleFunc("/subscribe", subscribeHandler)
r.HandleFunc("/subscribe/banner/{name}", subscribeBannerHandler)
r.HandleFunc("/unsubscribe/{name}", unsubscribeHandler)
r.HandleFunc("/thunder/new", thunderNewHandler)
r.HandleFunc("/thunder/torrent", thunderTorrentHandler)
r.HandleFunc("/thunder/verifycode", thunderVerifyCodeHandler)
r.HandleFunc("/thunder/verifycode/", thunderVerifyCodeHandler)
r.HandleFunc("/subtitles/search/{movie}", subtitlesSearchHandler)
r.HandleFunc("/subtitles/download/{movie}", subtitlesDownloadHandler)
r.HandleFunc("/app/status", appStatusHandler)
r.HandleFunc("/app/shutdown", appShutdownHandler)
r.HandleFunc("/app/gc", appGCHandler)
r.HandleFunc("/app/cookie/{domain}", appCookieHandler)
r.PathPrefix("/assets/").Handler(http.FileServer(http.Dir(".")))
http.Handle("/", MyServer{r})
server := util.ReadConfig("server")
log.Print("server ", server, " started.")
err = http.ListenAndServe(server, nil)
if err != nil {
log.Fatal(err)
}
}
| {
err := task.QueueDownloadingTask()
if err != nil {
log.Print(err)
}
} | conditional_block |
createpost.component.ts | // General libraries
import { Component, OnInit, ViewChild, ElementRef } from '@angular/core';
import { Observable } from 'rxjs';
import { TextField } from 'tns-core-modules/ui/text-field';
import { RouterExtensions } from 'nativescript-angular/router';
import * as localStorage from 'tns-core-modules/application-settings';
import * as dialogs from 'tns-core-modules/ui/dialogs';
// Services
import { ConvertService } from '~/app/services/convert.service';
import { DrinkService } from '~/app/services/drink.service';
import { ImageService } from '~/app/services/image.service';
import { PostService } from '~/app/services/post.service';
// Dataclasses | import { Image } from 'tns-core-modules/ui/image';
@Component({
selector: 'ns-createpost',
templateUrl: './createpost.component.html',
styleUrls: ['./createpost.component.css'],
moduleId: module.id
})
export class CreatepostComponent implements OnInit {
// Get button-elements from the template to temporary variables.
@ViewChild('cameraBtn') public cameraButton: ElementRef;
@ViewChild('galleryBtn') public galleryButton: ElementRef;
@ViewChild('selectBtn') public selectButton: ElementRef;
@ViewChild('firstTab') public firstTab: ElementRef;
// Get's username from localStorage
public username: string;
// Will contain all the necessary information about the post
public post = new Post();
// Currently selected tab (starts at 0)
public selectedIndex: number;
// Boolean value telling if user is searching
public searching: boolean;
// Boolean value indicating if searching should be force-stopped
public forceStopSearch: boolean;
// Will contain Font Awesome's stars if user gives any
public rate: string;
// Observable array holding all searched drinks by user input
public drinks$: Observable<Drink[]>;
// Raw picture which'll be placed for user to view
public picture: Image;
// Raw picture converted to base64string-mode for HTTP-calls
public base64picture: string;
// Boolean value toggling if image-options are showing
public imageOptionsUp: boolean;
// Indicator showing if post if being made
public isBusy: boolean;
constructor (
private _routerExtensions: RouterExtensions,
private _convertService: ConvertService,
private _drinkService: DrinkService,
private _postService: PostService,
private _imageService: ImageService
) {}
ngOnInit() {
this.username = localStorage.getString('username');
}
// Converts rating
public ratingValue(event): void {
this.rate = this._convertService.convertRating(event.value);
}
/**
* SENDS THE POST
*
* After user pushes the SEND-button the function will check whether post's text were given
* (which is the only mandatory field). If one is given, function will show activity-indicator
* for the user and pushes post's information to service.
*
* After frontpage get's feedback from the post it'll determine whether post was successfully
* added to database. If success, it'll transform user to frontpage. If post has failed
* (usually because of 500 internet-error) an alert will be shown telling user that the post
* has failed.
*
*/
public sendPost(): void {
if (this.post.text) {
this.isBusy = true;
this._postService.createPost(this.post, this.base64picture, this.username)
.subscribe(() => {
this.isBusy = false;
this._routerExtensions.navigate(['/frontpage'], { animated: true, transition: {
name: 'slide', duration: 200, curve: 'easeInOut'
}});
}, err => {
console.error(err);
this.isBusy = false;
this._convertService.convertPrompt('Postausta ei voitu lähettää', null, null, 5);
});
} else {
this._convertService.convertPrompt('Postauksen teksti ei voi olla tyhjä', null, null, 2);
}
}
/**
* REMOVES IMAGE
*
* Function which simply removes the image from the post and from user's view.
*
*/
public removeImage(): void {
this.picture = new Image();
this.base64picture = '';
this.firstTab.nativeElement.backgroundImage = '~/assets/images/sketchs/createpost-picture-3.jpg';
}
/**
* TOGGLES BETWEEN IMAGE MODES
*
* Function which'll show options for pushing an image to a new post. After user
* presses the "paperclip"-button, various options, including camera and gallery, will
* show up with a smooth animation.
*
*/
public toggleImageOptions(): void {
if (!this.imageOptionsUp) {
this.cameraButton.nativeElement.animate({
opacity: 1,
duration: 80,
translate: { x: 0, y: -130 }
});
this.galleryButton.nativeElement.animate({
opacity: 1,
duration: 80,
translate: { x: 0, y: -65 }
});
this.selectButton.nativeElement.animate({
duration: 150,
rotate: 180
});
this.imageOptionsUp = true;
} else {
this.cameraButton.nativeElement.animate({
opacity: 0,
duration: 80,
translate: { x: 0, y: 0 }
});
this.galleryButton.nativeElement.animate({
opacity: 0,
duration: 80,
translate: { x: 0, y: 0 }
});
this.selectButton.nativeElement.animate({
duration: 150,
rotate: 0
});
this.imageOptionsUp = false;
}
}
/**
* CLOSES TEXTFIELDES TEXTBOARDS AND SEARCH SELECTION
*
* This function will be executed when user taps away from textfield(s) or from
* search selection. After it'll do a blank search with empty string to return an
* empty array which'll close search-window.
*
* @param {TextField} nameField Beer's name textfield input (optional)
* @param {TextField} typeField Beer's type textfield input (optional)
* @param {TextField} descField Description of the post (optional)
*
*/
closeElements(nameField?: TextField, typeField?: TextField, descField?: TextField): void {
if (nameField) nameField.dismissSoftInput();
if (typeField) typeField.dismissSoftInput();
if (descField) descField.dismissSoftInput();
if (this.imageOptionsUp) this.toggleImageOptions();
this.search('');
}
/**
* CLOSES AND/OR FORCE-CLOSES BEER'S NAME FIELD
*
* Function which closes and/or force-closes beer's name textfield-input.
* Normal closing will only close name-search for that moment. Forcing
* means that input will only search for beer names if user re-focuses
* that same textfield.
*
* @param {boolean} force Don't open name-search without re-focusing field
*
*/
dismissSearch(force: boolean): void {
this.searching = false;
if (force) this.forceStopSearch = true;
}
/**
* RETURNS FOCUS FOR NAME-SEARCH
*
* This function will fire if user goes to next input field and then back to
* beer's name input. This'll override force-stop of name search and start
* re-searching them with a keyword again.
*
*/
returnFocus(): void {
if (this.forceStopSearch) this.forceStopSearch = false;
}
/**
* PICKS A BEER FROM NAME-SUGGESTION LIST TO INPUT-FIELDS
*
* Function which'll be executed if user picks something from
* name-search suggestions. These values will be then inserted into
* two-way binding inputs (Beer name & Beer type) -- meaning they'll
* saved in form and displayed to user.
*
* @param {Drink} selectedDrink Drink which was selected from list
* @param {TextField} nameField Beer's name field (whose keyboard will be closed)
*
*/
public chooseDrink(selectedDrink: Drink, nameField: TextField): void {
nameField.dismissSoftInput();
this.dismissSearch(true);
this.post.drink_name = selectedDrink.name;
this.post.drink_type = selectedDrink.type;
}
/**
* SEARCHS FOR BEER'S NAMES BY USER INPUT
*
* Function which'll search for beer's names based on user input. If search
* is force-stopped it'll do nothing. Also if the search field is empty it'll
* stop the search.
*
* @param {string} drink User input which'll be used for search
*
*/
public search(drink: string): void {
if (!this.forceStopSearch) {
if (!drink) this.searching = false;
else {
this.searching = true;
this.drinks$ = this._drinkService.searchDrinks(drink);
}
}
}
/**
* EMPTIES DRINKS
*
* Function which'll empty all the additional information about the post.
*
*/
public empty(): void {
this.post.drink_name = '';
this.post.drink_type = '';
this.post.rating = 0;
}
/**
* OPEN CAMERA AND TAKE PICTURE TO THE POST
*
* Calls service's takePicture() -method to get picture and base64-string of it.
* Returned values will be placed inside global variables inside class.
*
*/
public takePicture(): void {
this.toggleImageOptions();
this._imageService.takePicture((res, err) => {
if (res) {
this.picture = res.picture;
this.base64picture = res.base64picture;
// Blurs the background-image
this.firstTab.nativeElement.backgroundImage = '~/assets/images/sketchs/createpost-picture-3-blur.jpg';
} else {
console.log(err);
}
});
}
/**
* CHOOSE SINGLE PICTURE FROM PHONES GALLERY
*
* Calls service's getPicture() -method to get picture and base64-string of it.
* Returned values will be placed inside global variables inside class.
*
*/
public getPicture(): void {
this.toggleImageOptions();
this._imageService.getPicture((res, err) => {
console.log(res.base64picture);
if (res) {
this.picture = res.picture;
this.base64picture = res.base64picture;
// Blurs the background-image
this.firstTab.nativeElement.backgroundImage = '~/assets/images/sketchs/createpost-picture-3-blur.jpg';
} else {
console.log(err);
}
})
}
} | import { Drink } from '~/app/dataclasses/drink';
import { Post } from '~/app/dataclasses/post'; | random_line_split |
createpost.component.ts | // General libraries
import { Component, OnInit, ViewChild, ElementRef } from '@angular/core';
import { Observable } from 'rxjs';
import { TextField } from 'tns-core-modules/ui/text-field';
import { RouterExtensions } from 'nativescript-angular/router';
import * as localStorage from 'tns-core-modules/application-settings';
import * as dialogs from 'tns-core-modules/ui/dialogs';
// Services
import { ConvertService } from '~/app/services/convert.service';
import { DrinkService } from '~/app/services/drink.service';
import { ImageService } from '~/app/services/image.service';
import { PostService } from '~/app/services/post.service';
// Dataclasses
import { Drink } from '~/app/dataclasses/drink';
import { Post } from '~/app/dataclasses/post';
import { Image } from 'tns-core-modules/ui/image';
@Component({
selector: 'ns-createpost',
templateUrl: './createpost.component.html',
styleUrls: ['./createpost.component.css'],
moduleId: module.id
})
export class CreatepostComponent implements OnInit {
// Get button-elements from the template to temporary variables.
@ViewChild('cameraBtn') public cameraButton: ElementRef;
@ViewChild('galleryBtn') public galleryButton: ElementRef;
@ViewChild('selectBtn') public selectButton: ElementRef;
@ViewChild('firstTab') public firstTab: ElementRef;
// Get's username from localStorage
public username: string;
// Will contain all the necessary information about the post
public post = new Post();
// Currently selected tab (starts at 0)
public selectedIndex: number;
// Boolean value telling if user is searching
public searching: boolean;
// Boolean value indicating if searching should be force-stopped
public forceStopSearch: boolean;
// Will contain Font Awesome's stars if user gives any
public rate: string;
// Observable array holding all searched drinks by user input
public drinks$: Observable<Drink[]>;
// Raw picture which'll be placed for user to view
public picture: Image;
// Raw picture converted to base64string-mode for HTTP-calls
public base64picture: string;
// Boolean value toggling if image-options are showing
public imageOptionsUp: boolean;
// Indicator showing if post if being made
public isBusy: boolean;
constructor (
private _routerExtensions: RouterExtensions,
private _convertService: ConvertService,
private _drinkService: DrinkService,
private _postService: PostService,
private _imageService: ImageService
) {}
ngOnInit() {
this.username = localStorage.getString('username');
}
// Converts rating
public ratingValue(event): void {
this.rate = this._convertService.convertRating(event.value);
}
/**
* SENDS THE POST
*
* After user pushes the SEND-button the function will check whether post's text were given
* (which is the only mandatory field). If one is given, function will show activity-indicator
* for the user and pushes post's information to service.
*
* After frontpage get's feedback from the post it'll determine whether post was successfully
* added to database. If success, it'll transform user to frontpage. If post has failed
* (usually because of 500 internet-error) an alert will be shown telling user that the post
* has failed.
*
*/
public sendPost(): void {
if (this.post.text) {
this.isBusy = true;
this._postService.createPost(this.post, this.base64picture, this.username)
.subscribe(() => {
this.isBusy = false;
this._routerExtensions.navigate(['/frontpage'], { animated: true, transition: {
name: 'slide', duration: 200, curve: 'easeInOut'
}});
}, err => {
console.error(err);
this.isBusy = false;
this._convertService.convertPrompt('Postausta ei voitu lähettää', null, null, 5);
});
} else {
this._convertService.convertPrompt('Postauksen teksti ei voi olla tyhjä', null, null, 2);
}
}
/**
* REMOVES IMAGE
*
* Function which simply removes the image from the post and from user's view.
*
*/
public removeImage(): void {
this.picture = new Image();
this.base64picture = '';
this.firstTab.nativeElement.backgroundImage = '~/assets/images/sketchs/createpost-picture-3.jpg';
}
/**
* TOGGLES BETWEEN IMAGE MODES
*
* Function which'll show options for pushing an image to a new post. After user
* presses the "paperclip"-button, various options, including camera and gallery, will
* show up with a smooth animation.
*
*/
public toggleImageOptions(): void {
if (!this.imageOptionsUp) {
this.cameraButton.nativeElement.animate({
opacity: 1,
duration: 80,
translate: { x: 0, y: -130 }
});
this.galleryButton.nativeElement.animate({
opacity: 1,
duration: 80,
translate: { x: 0, y: -65 }
});
this.selectButton.nativeElement.animate({
duration: 150,
rotate: 180
});
this.imageOptionsUp = true;
} else {
this.cameraButton.nativeElement.animate({
opacity: 0,
duration: 80,
translate: { x: 0, y: 0 }
});
this.galleryButton.nativeElement.animate({
opacity: 0,
duration: 80,
translate: { x: 0, y: 0 }
});
this.selectButton.nativeElement.animate({
duration: 150,
rotate: 0
});
this.imageOptionsUp = false;
}
}
/**
* CLOSES TEXTFIELDES TEXTBOARDS AND SEARCH SELECTION
*
* This function will be executed when user taps away from textfield(s) or from
* search selection. After it'll do a blank search with empty string to return an
* empty array which'll close search-window.
*
* @param {TextField} nameField Beer's name textfield input (optional)
* @param {TextField} typeField Beer's type textfield input (optional)
* @param {TextField} descField Description of the post (optional)
*
*/
closeElements(nameField?: TextField, typeField?: TextField, descField?: TextField): void {
if (nameField) nameField.dismissSoftInput();
if (typeField) typeField.dismissSoftInput();
if (descField) descField.dismissSoftInput();
if (this.imageOptionsUp) this.toggleImageOptions();
this.search('');
}
/**
* CLOSES AND/OR FORCE-CLOSES BEER'S NAME FIELD
*
* Function which closes and/or force-closes beer's name textfield-input.
* Normal closing will only close name-search for that moment. Forcing
* means that input will only search for beer names if user re-focuses
* that same textfield.
*
* @param {boolean} force Don't open name-search without re-focusing field
*
*/
dismissSearch(force: boolean): void {
this.searching = false;
if (force) this.forceStopSearch = true;
}
/**
* RETURNS FOCUS FOR NAME-SEARCH
*
* This function will fire if user goes to next input field and then back to
* beer's name input. This'll override force-stop of name search and start
* re-searching them with a keyword again.
*
*/
returnFocus(): void {
if (this.forceStopSearch) this.forceStopSearch = false;
}
/**
* PICKS A BEER FROM NAME-SUGGESTION LIST TO INPUT-FIELDS
*
* Function which'll be executed if user picks something from
* name-search suggestions. These values will be then inserted into
* two-way binding inputs (Beer name & Beer type) -- meaning they'll
* saved in form and displayed to user.
*
* @param {Drink} selectedDrink Drink which was selected from list
* @param {TextField} nameField Beer's name field (whose keyboard will be closed)
*
*/
public chooseDrink(selectedDrink: Drink, nameField: TextField): void {
nameField.dismissSoftInput();
this.dismissSearch(true);
this.post.drink_name = selectedDrink.name;
this.post.drink_type = selectedDrink.type;
}
/**
* SEARCHS FOR BEER'S NAMES BY USER INPUT
*
* Function which'll search for beer's names based on user input. If search
* is force-stopped it'll do nothing. Also if the search field is empty it'll
* stop the search.
*
* @param {string} drink User input which'll be used for search
*
*/
public search(drink: string): void {
if (!this.forceStopSearch) {
if (!drink) this.searching = false;
else {
this.searching = true;
this.drinks$ = this._drinkService.searchDrinks(drink);
}
}
}
/**
* EMPTIES DRINKS
*
* Function which'll empty all the additional information about the post.
*
*/
public empty(): void {
t | *
* OPEN CAMERA AND TAKE PICTURE TO THE POST
*
* Calls service's takePicture() -method to get picture and base64-string of it.
* Returned values will be placed inside global variables inside class.
*
*/
public takePicture(): void {
this.toggleImageOptions();
this._imageService.takePicture((res, err) => {
if (res) {
this.picture = res.picture;
this.base64picture = res.base64picture;
// Blurs the background-image
this.firstTab.nativeElement.backgroundImage = '~/assets/images/sketchs/createpost-picture-3-blur.jpg';
} else {
console.log(err);
}
});
}
/**
* CHOOSE SINGLE PICTURE FROM PHONES GALLERY
*
* Calls service's getPicture() -method to get picture and base64-string of it.
* Returned values will be placed inside global variables inside class.
*
*/
public getPicture(): void {
this.toggleImageOptions();
this._imageService.getPicture((res, err) => {
console.log(res.base64picture);
if (res) {
this.picture = res.picture;
this.base64picture = res.base64picture;
// Blurs the background-image
this.firstTab.nativeElement.backgroundImage = '~/assets/images/sketchs/createpost-picture-3-blur.jpg';
} else {
console.log(err);
}
})
}
} | his.post.drink_name = '';
this.post.drink_type = '';
this.post.rating = 0;
}
/* | identifier_body |
createpost.component.ts | // General libraries
import { Component, OnInit, ViewChild, ElementRef } from '@angular/core';
import { Observable } from 'rxjs';
import { TextField } from 'tns-core-modules/ui/text-field';
import { RouterExtensions } from 'nativescript-angular/router';
import * as localStorage from 'tns-core-modules/application-settings';
import * as dialogs from 'tns-core-modules/ui/dialogs';
// Services
import { ConvertService } from '~/app/services/convert.service';
import { DrinkService } from '~/app/services/drink.service';
import { ImageService } from '~/app/services/image.service';
import { PostService } from '~/app/services/post.service';
// Dataclasses
import { Drink } from '~/app/dataclasses/drink';
import { Post } from '~/app/dataclasses/post';
import { Image } from 'tns-core-modules/ui/image';
@Component({
selector: 'ns-createpost',
templateUrl: './createpost.component.html',
styleUrls: ['./createpost.component.css'],
moduleId: module.id
})
export class CreatepostComponent implements OnInit {
// Get button-elements from the template to temporary variables.
@ViewChild('cameraBtn') public cameraButton: ElementRef;
@ViewChild('galleryBtn') public galleryButton: ElementRef;
@ViewChild('selectBtn') public selectButton: ElementRef;
@ViewChild('firstTab') public firstTab: ElementRef;
// Get's username from localStorage
public username: string;
// Will contain all the necessary information about the post
public post = new Post();
// Currently selected tab (starts at 0)
public selectedIndex: number;
// Boolean value telling if user is searching
public searching: boolean;
// Boolean value indicating if searching should be force-stopped
public forceStopSearch: boolean;
// Will contain Font Awesome's stars if user gives any
public rate: string;
// Observable array holding all searched drinks by user input
public drinks$: Observable<Drink[]>;
// Raw picture which'll be placed for user to view
public picture: Image;
// Raw picture converted to base64string-mode for HTTP-calls
public base64picture: string;
// Boolean value toggling if image-options are showing
public imageOptionsUp: boolean;
// Indicator showing if post if being made
public isBusy: boolean;
constructor (
private _routerExtensions: RouterExtensions,
private _convertService: ConvertService,
private _drinkService: DrinkService,
private _postService: PostService,
private _imageService: ImageService
) {}
ngOnInit() {
this.username = localStorage.getString('username');
}
// Converts rating
public ratingValue(event): void {
this.rate = this._convertService.convertRating(event.value);
}
/**
* SENDS THE POST
*
* After user pushes the SEND-button the function will check whether post's text were given
* (which is the only mandatory field). If one is given, function will show activity-indicator
* for the user and pushes post's information to service.
*
* After frontpage get's feedback from the post it'll determine whether post was successfully
* added to database. If success, it'll transform user to frontpage. If post has failed
* (usually because of 500 internet-error) an alert will be shown telling user that the post
* has failed.
*
*/
public sendPost(): void {
if (this.post.text) {
this.isBusy = true;
this._postService.createPost(this.post, this.base64picture, this.username)
.subscribe(() => {
this.isBusy = false;
this._routerExtensions.navigate(['/frontpage'], { animated: true, transition: {
name: 'slide', duration: 200, curve: 'easeInOut'
}});
}, err => {
console.error(err);
this.isBusy = false;
this._convertService.convertPrompt('Postausta ei voitu lähettää', null, null, 5);
});
} else {
this._convertService.convertPrompt('Postauksen teksti ei voi olla tyhjä', null, null, 2);
}
}
/**
* REMOVES IMAGE
*
* Function which simply removes the image from the post and from user's view.
*
*/
public removeImage(): void {
this.picture = new Image();
this.base64picture = '';
this.firstTab.nativeElement.backgroundImage = '~/assets/images/sketchs/createpost-picture-3.jpg';
}
/**
* TOGGLES BETWEEN IMAGE MODES
*
* Function which'll show options for pushing an image to a new post. After user
* presses the "paperclip"-button, various options, including camera and gallery, will
* show up with a smooth animation.
*
*/
public toggleImageOptions(): void {
if (!this.imageOptionsUp) {
this.cameraButton.nativeElement.animate({
opacity: 1,
duration: 80,
translate: { x: 0, y: -130 }
});
this.galleryButton.nativeElement.animate({
opacity: 1,
duration: 80,
translate: { x: 0, y: -65 }
});
this.selectButton.nativeElement.animate({
duration: 150,
rotate: 180
});
this.imageOptionsUp = true;
} else {
this.cameraButton.nativeElement.animate({
opacity: 0,
duration: 80,
translate: { x: 0, y: 0 }
});
this.galleryButton.nativeElement.animate({
opacity: 0,
duration: 80,
translate: { x: 0, y: 0 }
});
this.selectButton.nativeElement.animate({
duration: 150,
rotate: 0
});
this.imageOptionsUp = false;
}
}
/**
* CLOSES TEXTFIELDES TEXTBOARDS AND SEARCH SELECTION
*
* This function will be executed when user taps away from textfield(s) or from
* search selection. After it'll do a blank search with empty string to return an
* empty array which'll close search-window.
*
* @param {TextField} nameField Beer's name textfield input (optional)
* @param {TextField} typeField Beer's type textfield input (optional)
* @param {TextField} descField Description of the post (optional)
*
*/
closeEl | eld?: TextField, typeField?: TextField, descField?: TextField): void {
if (nameField) nameField.dismissSoftInput();
if (typeField) typeField.dismissSoftInput();
if (descField) descField.dismissSoftInput();
if (this.imageOptionsUp) this.toggleImageOptions();
this.search('');
}
/**
* CLOSES AND/OR FORCE-CLOSES BEER'S NAME FIELD
*
* Function which closes and/or force-closes beer's name textfield-input.
* Normal closing will only close name-search for that moment. Forcing
* means that input will only search for beer names if user re-focuses
* that same textfield.
*
* @param {boolean} force Don't open name-search without re-focusing field
*
*/
dismissSearch(force: boolean): void {
this.searching = false;
if (force) this.forceStopSearch = true;
}
/**
* RETURNS FOCUS FOR NAME-SEARCH
*
* This function will fire if user goes to next input field and then back to
* beer's name input. This'll override force-stop of name search and start
* re-searching them with a keyword again.
*
*/
returnFocus(): void {
if (this.forceStopSearch) this.forceStopSearch = false;
}
/**
* PICKS A BEER FROM NAME-SUGGESTION LIST TO INPUT-FIELDS
*
* Function which'll be executed if user picks something from
* name-search suggestions. These values will be then inserted into
* two-way binding inputs (Beer name & Beer type) -- meaning they'll
* saved in form and displayed to user.
*
* @param {Drink} selectedDrink Drink which was selected from list
* @param {TextField} nameField Beer's name field (whose keyboard will be closed)
*
*/
public chooseDrink(selectedDrink: Drink, nameField: TextField): void {
nameField.dismissSoftInput();
this.dismissSearch(true);
this.post.drink_name = selectedDrink.name;
this.post.drink_type = selectedDrink.type;
}
/**
* SEARCHS FOR BEER'S NAMES BY USER INPUT
*
* Function which'll search for beer's names based on user input. If search
* is force-stopped it'll do nothing. Also if the search field is empty it'll
* stop the search.
*
* @param {string} drink User input which'll be used for search
*
*/
public search(drink: string): void {
if (!this.forceStopSearch) {
if (!drink) this.searching = false;
else {
this.searching = true;
this.drinks$ = this._drinkService.searchDrinks(drink);
}
}
}
/**
* EMPTIES DRINKS
*
* Function which'll empty all the additional information about the post.
*
*/
public empty(): void {
this.post.drink_name = '';
this.post.drink_type = '';
this.post.rating = 0;
}
/**
* OPEN CAMERA AND TAKE PICTURE TO THE POST
*
* Calls service's takePicture() -method to get picture and base64-string of it.
* Returned values will be placed inside global variables inside class.
*
*/
public takePicture(): void {
this.toggleImageOptions();
this._imageService.takePicture((res, err) => {
if (res) {
this.picture = res.picture;
this.base64picture = res.base64picture;
// Blurs the background-image
this.firstTab.nativeElement.backgroundImage = '~/assets/images/sketchs/createpost-picture-3-blur.jpg';
} else {
console.log(err);
}
});
}
/**
* CHOOSE SINGLE PICTURE FROM PHONES GALLERY
*
* Calls service's getPicture() -method to get picture and base64-string of it.
* Returned values will be placed inside global variables inside class.
*
*/
public getPicture(): void {
this.toggleImageOptions();
this._imageService.getPicture((res, err) => {
console.log(res.base64picture);
if (res) {
this.picture = res.picture;
this.base64picture = res.base64picture;
// Blurs the background-image
this.firstTab.nativeElement.backgroundImage = '~/assets/images/sketchs/createpost-picture-3-blur.jpg';
} else {
console.log(err);
}
})
}
} | ements(nameFi | identifier_name |
prio_bitmap.rs | //! Provides `FixedPrioBitmap`, a bit array structure supporting
//! logarithmic-time bit scan operations.
use core::{convert::TryFrom, fmt};
use super::{ctz::trailing_zeros, BinInteger, Init};
/// The maximum bit count supported by [`FixedPrioBitmap`].
pub const FIXED_PRIO_BITMAP_MAX_LEN: usize = WORD_LEN * WORD_LEN * WORD_LEN;
/// A bit array structure supporting logarithmic-time bit scan operations.
///
/// All valid instantiations implement [`PrioBitmap`].
pub type FixedPrioBitmap<const LEN: usize> = If! {
if (LEN <= WORD_LEN) {
OneLevelPrioBitmap<LEN>
} else if (LEN <= WORD_LEN * WORD_LEN) {
TwoLevelPrioBitmapImpl<
OneLevelPrioBitmap<{(LEN + WORD_LEN - 1) / WORD_LEN}>,
{(LEN + WORD_LEN - 1) / WORD_LEN}
>
} else if (LEN <= WORD_LEN * WORD_LEN * WORD_LEN) {
TwoLevelPrioBitmapImpl<
TwoLevelPrioBitmapImpl<
OneLevelPrioBitmap<{(LEN + WORD_LEN * WORD_LEN - 1) / (WORD_LEN * WORD_LEN)}>,
{(LEN + WORD_LEN * WORD_LEN - 1) / (WORD_LEN * WORD_LEN)}
>,
{(LEN + WORD_LEN - 1) / WORD_LEN}
>
} else {
TooManyLevels
}
};
/// Get an instantiation of `OneLevelPrioBitmapImpl` capable of storing `LEN`
/// entries.
#[doc(hidden)]
pub type OneLevelPrioBitmap<const LEN: usize> = If! {
if (LEN == 0) {
()
} else if (LEN <= 8 && LEN <= WORD_LEN) {
OneLevelPrioBitmapImpl<u8, LEN>
} else if (LEN <= 16 && LEN <= WORD_LEN) {
OneLevelPrioBitmapImpl<u16, LEN>
} else if (LEN <= 32 && LEN <= WORD_LEN) {
OneLevelPrioBitmapImpl<u32, LEN>
} else if (LEN <= 64 && LEN <= WORD_LEN) {
OneLevelPrioBitmapImpl<u64, LEN>
} else if (LEN <= 128 && LEN <= WORD_LEN) {
OneLevelPrioBitmapImpl<u128, LEN>
} else {
TooManyLevels
}
};
/// Trait for [`FixedPrioBitmap`].
///
/// All methods panic when the given bit position is out of range.
pub trait PrioBitmap: Init + Send + Sync + Clone + Copy + fmt::Debug + 'static {
/// Get the bit at the specified position.
fn get(&self, i: usize) -> bool;
/// Clear the bit at the specified position.
fn clear(&mut self, i: usize);
/// Set the bit at the specified position.
fn set(&mut self, i: usize);
/// Get the position of the first set bit.
fn find_set(&self) -> Option<usize>;
}
impl PrioBitmap for () {
fn get(&self, _: usize) -> bool {
unreachable!()
}
fn clear(&mut self, _: usize) {
unreachable!()
}
fn set(&mut self, _: usize) {
unreachable!()
}
fn find_set(&self) -> Option<usize> {
None
}
}
/// Stores `LEN` (≤ `T::BITS`) entries.
#[doc(hidden)]
#[derive(Clone, Copy)]
pub struct OneLevelPrioBitmapImpl<T, const LEN: usize> {
bits: T,
}
impl<T: BinInteger, const LEN: usize> Init for OneLevelPrioBitmapImpl<T, LEN> {
const INIT: Self = Self { bits: T::INIT };
}
impl<T: BinInteger, const LEN: usize> fmt::Debug for OneLevelPrioBitmapImpl<T, LEN> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_list().entries(self.bits.one_digits()).finish()
}
}
impl<T: BinInteger, const LEN: usize> PrioBitmap for OneLevelPrioBitmapImpl<T, LEN> {
fn get(&self, i: usize) -> bool {
assert!(i < LEN && i < usize::try_from(T::BITS).unwrap());
self.bits.get_bit(i as u32)
}
fn clear(&mut self, i: usize) {
assert!(i < LEN && i < usize::try_from(T::BITS).unwrap());
self.bits.clear_bit(i as u32);
}
fn set(&mut self, i: usize) {
assert!(i < LEN && i < usize::try_from(T::BITS).unwrap());
self.bits.set_bit(i as u32);
}
fn find_set(&self) -> Option<usize> {
if LEN <= usize::BITS as usize {
// Use an optimized version of `trailing_zeros`
let bits = self.bits.to_usize().unwrap();
let i = trailing_zeros::<LEN>(bits);
if i == usize::BITS {
None
} else {
Some(i as usize)
}
} else {
let i = self.bits.trailing_zeros();
if i == T::BITS {
None
} else {
Some(i as usize)
}
}
}
}
/// Stores `WORD_LEN * LEN` entries. `T` must implement `PrioBitmap` and
/// be able to store `LEN` entries.
#[doc(hidden)]
#[derive(Clone, Copy)]
pub struct TwoLevelPrioBitmapImpl<T, const LEN: usize> {
// Invariant: `first.get(i) == (second[i] != 0)`
first: T,
second: [Word; LEN],
}
type Word = usize;
const WORD_LEN: usize = core::mem::size_of::<Word>() * 8;
impl<T: PrioBitmap, const LEN: usize> Init for TwoLevelPrioBitmapImpl<T, LEN> {
const INIT: Self = Self {
first: T::INIT,
second: [0; LEN],
};
}
impl<T: PrioBitmap, const LEN: usize> fmt::Debug for TwoLevelPrioBitmapImpl<T, LEN> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_list()
.entries(
self.second
.iter()
.enumerate()
.map(|(group_i, group)| {
group
.one_digits()
.map(move |subgroup_i| subgroup_i as usize + group_i * WORD_LEN)
})
.flatten(),
)
.finish()
}
}
impl<T: PrioBitmap, const LEN: usize> PrioBitmap for TwoLevelPrioBitmapImpl<T, LEN> {
fn get(&self, i: usize) -> bool {
self.second[i / WORD_LEN].get_bit(u32::try_from(i % WORD_LEN).unwrap())
}
fn clear(&mut self, i: usize) {
let group = &mut self.second[i / WORD_LEN];
group.clear_bit(u32::try_from(i % WORD_LEN).unwrap());
if *group == 0 {
self.first.clear(i / WORD_LEN);
}
}
fn set(&mut self, i: usize) {
let group = &mut self.second[i / WORD_LEN];
group.set_bit(u32::try_from(i % WORD_LEN).unwrap());
self.first.set(i / WORD_LEN);
}
fn find_set(&self) -> Option<usize> {
self.first.find_set().map(|group_i| {
let group = self.second[group_i];
let subgroup_i = group.trailing_zeros() as usize;
debug_assert_ne!(subgroup_i, WORD_LEN);
subgroup_i as usize + group_i * WORD_LEN
})
}
}
/// Indicates the requested size is not supported.
#[doc(hidden)]
#[non_exhaustive]
pub struct TooManyLevels {}
#[cfg(test)]
mod tests {
use super::*;
use quickcheck_macros::quickcheck;
use std::collections::BTreeSet;
struct BTreePrioBitmap(BTreeSet<usize>);
impl BTreePrioBitmap {
fn new() -> Self {
Self(BTreeSet::new())
}
fn enum_set_bits(&self) -> Vec<usize> {
self.0.iter().cloned().collect()
}
fn clear(&mut self, i: usize) {
self.0.remove(&i);
}
fn set(&mut self, i: usize) {
self.0.insert(i);
}
fn find_set(&self) -> Option<usize> {
self.0.iter().next().cloned()
}
}
/// A modifying operation on `PrioBitmap`.
#[derive(Debug)]
enum Cmd {
Insert(usize),
Remove(usize),
}
/// Map random bytes to operations on `PrioBitmap`.
fn interpret(bytecode: &[u8], bitmap_len: usize) -> impl Iterator<Item = Cmd> + '_ {
let mut i = 0;
let mut known_set_bits = Vec::new();
std::iter::from_fn(move || {
if bitmap_len == 0 {
None
} else if let Some(instr) = bytecode.get(i..i + 5) {
i += 5;
let value = u32::from_le_bytes([instr[1], instr[2], instr[3], instr[4]]) as usize;
if instr[0] % 2 == 0 || known_set_bits.is_empty() {
let bit = value % bitmap_len;
known_set_bits.push(bit);
Some(Cmd::Insert(bit))
} else {
let i = value % known_set_bits.len();
let bit = known_set_bits.swap_remove(i);
Some(Cmd::Remove(bit))
}
} else {
None
}
})
}
fn enum_set_bits(bitmap: &impl PrioBitmap, bitmap_len: usize) -> Vec<usize> {
(0..bitmap_len).filter(|&i| bitmap.get(i)).collect()
}
fn test_inner<T: PrioBitmap>(bytecode: Vec<u8>, size: usize) {
let mut subject = T::INIT;
let mut reference = BTreePrioBitmap::new();
log::info!("size = {}", size);
for cmd in interpret(&bytecode, size) {
log::trace!(" {:?}", cmd);
match cmd {
Cmd::Insert(bit) => {
subject.set(bit);
reference.set(bit);
}
Cmd::Remove(bit) => {
subject.clear(bit);
reference.clear(bit);
}
}
assert_eq!(subject.find_set(), reference.find_set());
}
assert_eq!(subject.find_set(), reference.find_set());
assert_eq!(enum_set_bits(&subject, size), reference.enum_set_bits());
}
macro_rules! gen_test {
($(#[$m:meta])* mod $name:ident, $size:literal) => {
$(#[$m])*
mod $name {
use super::*;
#[quickcheck]
fn test(bytecode: Vec<u8>) {
test_inner::<FixedPrioBitmap<$size>>(bytecode, $size);
}
}
};
}
gen_test!(mod size_0, 0);
gen_test!(mod size_1, 1);
gen_test!(mod size_10, 10);
gen_test!(mod size_100, 100);
gen_test!(mod size_1000, 1000); | );
gen_test!(
#[cfg(any(target_pointer_width = "64", target_pointer_width = "128"))]
mod size_100000, 100000
);
} | gen_test!(
#[cfg(any(target_pointer_width = "32", target_pointer_width = "64", target_pointer_width = "128"))]
mod size_10000, 10000 | random_line_split |
prio_bitmap.rs | //! Provides `FixedPrioBitmap`, a bit array structure supporting
//! logarithmic-time bit scan operations.
use core::{convert::TryFrom, fmt};
use super::{ctz::trailing_zeros, BinInteger, Init};
/// The maximum bit count supported by [`FixedPrioBitmap`].
pub const FIXED_PRIO_BITMAP_MAX_LEN: usize = WORD_LEN * WORD_LEN * WORD_LEN;
/// A bit array structure supporting logarithmic-time bit scan operations.
///
/// All valid instantiations implement [`PrioBitmap`].
pub type FixedPrioBitmap<const LEN: usize> = If! {
if (LEN <= WORD_LEN) {
OneLevelPrioBitmap<LEN>
} else if (LEN <= WORD_LEN * WORD_LEN) {
TwoLevelPrioBitmapImpl<
OneLevelPrioBitmap<{(LEN + WORD_LEN - 1) / WORD_LEN}>,
{(LEN + WORD_LEN - 1) / WORD_LEN}
>
} else if (LEN <= WORD_LEN * WORD_LEN * WORD_LEN) {
TwoLevelPrioBitmapImpl<
TwoLevelPrioBitmapImpl<
OneLevelPrioBitmap<{(LEN + WORD_LEN * WORD_LEN - 1) / (WORD_LEN * WORD_LEN)}>,
{(LEN + WORD_LEN * WORD_LEN - 1) / (WORD_LEN * WORD_LEN)}
>,
{(LEN + WORD_LEN - 1) / WORD_LEN}
>
} else {
TooManyLevels
}
};
/// Get an instantiation of `OneLevelPrioBitmapImpl` capable of storing `LEN`
/// entries.
#[doc(hidden)]
pub type OneLevelPrioBitmap<const LEN: usize> = If! {
if (LEN == 0) {
()
} else if (LEN <= 8 && LEN <= WORD_LEN) {
OneLevelPrioBitmapImpl<u8, LEN>
} else if (LEN <= 16 && LEN <= WORD_LEN) {
OneLevelPrioBitmapImpl<u16, LEN>
} else if (LEN <= 32 && LEN <= WORD_LEN) {
OneLevelPrioBitmapImpl<u32, LEN>
} else if (LEN <= 64 && LEN <= WORD_LEN) {
OneLevelPrioBitmapImpl<u64, LEN>
} else if (LEN <= 128 && LEN <= WORD_LEN) {
OneLevelPrioBitmapImpl<u128, LEN>
} else {
TooManyLevels
}
};
/// Trait for [`FixedPrioBitmap`].
///
/// All methods panic when the given bit position is out of range.
pub trait PrioBitmap: Init + Send + Sync + Clone + Copy + fmt::Debug + 'static {
/// Get the bit at the specified position.
fn get(&self, i: usize) -> bool;
/// Clear the bit at the specified position.
fn clear(&mut self, i: usize);
/// Set the bit at the specified position.
fn set(&mut self, i: usize);
/// Get the position of the first set bit.
fn find_set(&self) -> Option<usize>;
}
impl PrioBitmap for () {
fn get(&self, _: usize) -> bool {
unreachable!()
}
fn clear(&mut self, _: usize) {
unreachable!()
}
fn set(&mut self, _: usize) {
unreachable!()
}
fn find_set(&self) -> Option<usize> {
None
}
}
/// Stores `LEN` (≤ `T::BITS`) entries.
#[doc(hidden)]
#[derive(Clone, Copy)]
pub struct OneLevelPrioBitmapImpl<T, const LEN: usize> {
bits: T,
}
impl<T: BinInteger, const LEN: usize> Init for OneLevelPrioBitmapImpl<T, LEN> {
const INIT: Self = Self { bits: T::INIT };
}
impl<T: BinInteger, const LEN: usize> fmt::Debug for OneLevelPrioBitmapImpl<T, LEN> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_list().entries(self.bits.one_digits()).finish()
}
}
impl<T: BinInteger, const LEN: usize> PrioBitmap for OneLevelPrioBitmapImpl<T, LEN> {
fn get(&self, i: usize) -> bool {
assert!(i < LEN && i < usize::try_from(T::BITS).unwrap());
self.bits.get_bit(i as u32)
}
fn clear(&mut self, i: usize) {
assert!(i < LEN && i < usize::try_from(T::BITS).unwrap());
self.bits.clear_bit(i as u32);
}
fn set(&mut self, i: usize) {
assert!(i < LEN && i < usize::try_from(T::BITS).unwrap());
self.bits.set_bit(i as u32);
}
fn find_set(&self) -> Option<usize> {
if LEN <= usize::BITS as usize {
// Use an optimized version of `trailing_zeros`
let bits = self.bits.to_usize().unwrap();
let i = trailing_zeros::<LEN>(bits);
if i == usize::BITS {
None
} else {
Some(i as usize)
}
} else {
let i = self.bits.trailing_zeros();
if i == T::BITS {
None
} else {
Some(i as usize)
}
}
}
}
/// Stores `WORD_LEN * LEN` entries. `T` must implement `PrioBitmap` and
/// be able to store `LEN` entries.
#[doc(hidden)]
#[derive(Clone, Copy)]
pub struct TwoLevelPrioBitmapImpl<T, const LEN: usize> {
// Invariant: `first.get(i) == (second[i] != 0)`
first: T,
second: [Word; LEN],
}
type Word = usize;
const WORD_LEN: usize = core::mem::size_of::<Word>() * 8;
impl<T: PrioBitmap, const LEN: usize> Init for TwoLevelPrioBitmapImpl<T, LEN> {
const INIT: Self = Self {
first: T::INIT,
second: [0; LEN],
};
}
impl<T: PrioBitmap, const LEN: usize> fmt::Debug for TwoLevelPrioBitmapImpl<T, LEN> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_list()
.entries(
self.second
.iter()
.enumerate()
.map(|(group_i, group)| {
group
.one_digits()
.map(move |subgroup_i| subgroup_i as usize + group_i * WORD_LEN)
})
.flatten(),
)
.finish()
}
}
impl<T: PrioBitmap, const LEN: usize> PrioBitmap for TwoLevelPrioBitmapImpl<T, LEN> {
fn get(&self, i: usize) -> bool {
self.second[i / WORD_LEN].get_bit(u32::try_from(i % WORD_LEN).unwrap())
}
fn clear(&mut self, i: usize) {
let group = &mut self.second[i / WORD_LEN];
group.clear_bit(u32::try_from(i % WORD_LEN).unwrap());
if *group == 0 {
self.first.clear(i / WORD_LEN);
}
}
fn set(&mut self, i: usize) {
let group = &mut self.second[i / WORD_LEN];
group.set_bit(u32::try_from(i % WORD_LEN).unwrap());
self.first.set(i / WORD_LEN);
}
fn find_set(&self) -> Option<usize> {
self.first.find_set().map(|group_i| {
let group = self.second[group_i];
let subgroup_i = group.trailing_zeros() as usize;
debug_assert_ne!(subgroup_i, WORD_LEN);
subgroup_i as usize + group_i * WORD_LEN
})
}
}
/// Indicates the requested size is not supported.
#[doc(hidden)]
#[non_exhaustive]
pub struct TooManyLevels {}
#[cfg(test)]
mod tests {
use super::*;
use quickcheck_macros::quickcheck;
use std::collections::BTreeSet;
struct BTreePrioBitmap(BTreeSet<usize>);
impl BTreePrioBitmap {
fn new() -> Self {
Self(BTreeSet::new())
}
fn enum_set_bits(&self) -> Vec<usize> {
self.0.iter().cloned().collect()
}
fn clear(&mut self, i: usize) {
self.0.remove(&i);
}
fn set(&mut self, i: usize) {
self.0.insert(i);
}
fn find_set(&self) -> Option<usize> {
self.0.iter().next().cloned()
}
}
/// A modifying operation on `PrioBitmap`.
#[derive(Debug)]
enum Cmd {
Insert(usize),
Remove(usize),
}
/// Map random bytes to operations on `PrioBitmap`.
fn interpret(bytecode: &[u8], bitmap_len: usize) -> impl Iterator<Item = Cmd> + '_ {
let mut i = 0;
let mut known_set_bits = Vec::new();
std::iter::from_fn(move || {
if bitmap_len == 0 {
None
} else if let Some(instr) = bytecode.get(i..i + 5) {
i += 5;
let value = u32::from_le_bytes([instr[1], instr[2], instr[3], instr[4]]) as usize;
if instr[0] % 2 == 0 || known_set_bits.is_empty() {
let bit = value % bitmap_len;
known_set_bits.push(bit);
Some(Cmd::Insert(bit))
} else {
let i = value % known_set_bits.len();
let bit = known_set_bits.swap_remove(i);
Some(Cmd::Remove(bit))
}
} else {
| })
}
fn enum_set_bits(bitmap: &impl PrioBitmap, bitmap_len: usize) -> Vec<usize> {
(0..bitmap_len).filter(|&i| bitmap.get(i)).collect()
}
fn test_inner<T: PrioBitmap>(bytecode: Vec<u8>, size: usize) {
let mut subject = T::INIT;
let mut reference = BTreePrioBitmap::new();
log::info!("size = {}", size);
for cmd in interpret(&bytecode, size) {
log::trace!(" {:?}", cmd);
match cmd {
Cmd::Insert(bit) => {
subject.set(bit);
reference.set(bit);
}
Cmd::Remove(bit) => {
subject.clear(bit);
reference.clear(bit);
}
}
assert_eq!(subject.find_set(), reference.find_set());
}
assert_eq!(subject.find_set(), reference.find_set());
assert_eq!(enum_set_bits(&subject, size), reference.enum_set_bits());
}
macro_rules! gen_test {
($(#[$m:meta])* mod $name:ident, $size:literal) => {
$(#[$m])*
mod $name {
use super::*;
#[quickcheck]
fn test(bytecode: Vec<u8>) {
test_inner::<FixedPrioBitmap<$size>>(bytecode, $size);
}
}
};
}
gen_test!(mod size_0, 0);
gen_test!(mod size_1, 1);
gen_test!(mod size_10, 10);
gen_test!(mod size_100, 100);
gen_test!(mod size_1000, 1000);
gen_test!(
#[cfg(any(target_pointer_width = "32", target_pointer_width = "64", target_pointer_width = "128"))]
mod size_10000, 10000
);
gen_test!(
#[cfg(any(target_pointer_width = "64", target_pointer_width = "128"))]
mod size_100000, 100000
);
}
| None
}
| conditional_block |
prio_bitmap.rs | //! Provides `FixedPrioBitmap`, a bit array structure supporting
//! logarithmic-time bit scan operations.
use core::{convert::TryFrom, fmt};
use super::{ctz::trailing_zeros, BinInteger, Init};
/// The maximum bit count supported by [`FixedPrioBitmap`].
pub const FIXED_PRIO_BITMAP_MAX_LEN: usize = WORD_LEN * WORD_LEN * WORD_LEN;
/// A bit array structure supporting logarithmic-time bit scan operations.
///
/// All valid instantiations implement [`PrioBitmap`].
pub type FixedPrioBitmap<const LEN: usize> = If! {
if (LEN <= WORD_LEN) {
OneLevelPrioBitmap<LEN>
} else if (LEN <= WORD_LEN * WORD_LEN) {
TwoLevelPrioBitmapImpl<
OneLevelPrioBitmap<{(LEN + WORD_LEN - 1) / WORD_LEN}>,
{(LEN + WORD_LEN - 1) / WORD_LEN}
>
} else if (LEN <= WORD_LEN * WORD_LEN * WORD_LEN) {
TwoLevelPrioBitmapImpl<
TwoLevelPrioBitmapImpl<
OneLevelPrioBitmap<{(LEN + WORD_LEN * WORD_LEN - 1) / (WORD_LEN * WORD_LEN)}>,
{(LEN + WORD_LEN * WORD_LEN - 1) / (WORD_LEN * WORD_LEN)}
>,
{(LEN + WORD_LEN - 1) / WORD_LEN}
>
} else {
TooManyLevels
}
};
/// Get an instantiation of `OneLevelPrioBitmapImpl` capable of storing `LEN`
/// entries.
#[doc(hidden)]
pub type OneLevelPrioBitmap<const LEN: usize> = If! {
if (LEN == 0) {
()
} else if (LEN <= 8 && LEN <= WORD_LEN) {
OneLevelPrioBitmapImpl<u8, LEN>
} else if (LEN <= 16 && LEN <= WORD_LEN) {
OneLevelPrioBitmapImpl<u16, LEN>
} else if (LEN <= 32 && LEN <= WORD_LEN) {
OneLevelPrioBitmapImpl<u32, LEN>
} else if (LEN <= 64 && LEN <= WORD_LEN) {
OneLevelPrioBitmapImpl<u64, LEN>
} else if (LEN <= 128 && LEN <= WORD_LEN) {
OneLevelPrioBitmapImpl<u128, LEN>
} else {
TooManyLevels
}
};
/// Trait for [`FixedPrioBitmap`].
///
/// All methods panic when the given bit position is out of range.
pub trait PrioBitmap: Init + Send + Sync + Clone + Copy + fmt::Debug + 'static {
/// Get the bit at the specified position.
fn get(&self, i: usize) -> bool;
/// Clear the bit at the specified position.
fn clear(&mut self, i: usize);
/// Set the bit at the specified position.
fn set(&mut self, i: usize);
/// Get the position of the first set bit.
fn find_set(&self) -> Option<usize>;
}
impl PrioBitmap for () {
fn get(&self, _: usize) -> bool {
unreachable!()
}
fn clear(&mut self, _: usize) {
unreachable!()
}
fn set(&mut self, _: usize) {
unreachable!()
}
fn find_set(&self) -> Option<usize> {
None
}
}
/// Stores `LEN` (≤ `T::BITS`) entries.
#[doc(hidden)]
#[derive(Clone, Copy)]
pub struct OneLevelPrioBitmapImpl<T, const LEN: usize> {
bits: T,
}
impl<T: BinInteger, const LEN: usize> Init for OneLevelPrioBitmapImpl<T, LEN> {
const INIT: Self = Self { bits: T::INIT };
}
impl<T: BinInteger, const LEN: usize> fmt::Debug for OneLevelPrioBitmapImpl<T, LEN> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_list().entries(self.bits.one_digits()).finish()
}
}
impl<T: BinInteger, const LEN: usize> PrioBitmap for OneLevelPrioBitmapImpl<T, LEN> {
fn get(&self, i: usize) -> bool {
assert!(i < LEN && i < usize::try_from(T::BITS).unwrap());
self.bits.get_bit(i as u32)
}
fn clear(&mut self, i: usize) {
assert!(i < LEN && i < usize::try_from(T::BITS).unwrap());
self.bits.clear_bit(i as u32);
}
fn set(&mut self, i: usize) {
assert!(i < LEN && i < usize::try_from(T::BITS).unwrap());
self.bits.set_bit(i as u32);
}
fn find_set(&self) -> Option<usize> {
if LEN <= usize::BITS as usize {
// Use an optimized version of `trailing_zeros`
let bits = self.bits.to_usize().unwrap();
let i = trailing_zeros::<LEN>(bits);
if i == usize::BITS {
None
} else {
Some(i as usize)
}
} else {
let i = self.bits.trailing_zeros();
if i == T::BITS {
None
} else {
Some(i as usize)
}
}
}
}
/// Stores `WORD_LEN * LEN` entries. `T` must implement `PrioBitmap` and
/// be able to store `LEN` entries.
#[doc(hidden)]
#[derive(Clone, Copy)]
pub struct TwoLevelPrioBitmapImpl<T, const LEN: usize> {
// Invariant: `first.get(i) == (second[i] != 0)`
first: T,
second: [Word; LEN],
}
type Word = usize;
const WORD_LEN: usize = core::mem::size_of::<Word>() * 8;
impl<T: PrioBitmap, const LEN: usize> Init for TwoLevelPrioBitmapImpl<T, LEN> {
const INIT: Self = Self {
first: T::INIT,
second: [0; LEN],
};
}
impl<T: PrioBitmap, const LEN: usize> fmt::Debug for TwoLevelPrioBitmapImpl<T, LEN> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_list()
.entries(
self.second
.iter()
.enumerate()
.map(|(group_i, group)| {
group
.one_digits()
.map(move |subgroup_i| subgroup_i as usize + group_i * WORD_LEN)
})
.flatten(),
)
.finish()
}
}
impl<T: PrioBitmap, const LEN: usize> PrioBitmap for TwoLevelPrioBitmapImpl<T, LEN> {
fn get(&self, i: usize) -> bool {
self.second[i / WORD_LEN].get_bit(u32::try_from(i % WORD_LEN).unwrap())
}
fn clear(&mut self, i: usize) {
let group = &mut self.second[i / WORD_LEN];
group.clear_bit(u32::try_from(i % WORD_LEN).unwrap());
if *group == 0 {
self.first.clear(i / WORD_LEN);
}
}
fn set(&mut self, i: usize) {
let group = &mut self.second[i / WORD_LEN];
group.set_bit(u32::try_from(i % WORD_LEN).unwrap());
self.first.set(i / WORD_LEN);
}
fn find_set(&self) -> Option<usize> {
self.first.find_set().map(|group_i| {
let group = self.second[group_i];
let subgroup_i = group.trailing_zeros() as usize;
debug_assert_ne!(subgroup_i, WORD_LEN);
subgroup_i as usize + group_i * WORD_LEN
})
}
}
/// Indicates the requested size is not supported.
#[doc(hidden)]
#[non_exhaustive]
pub struct TooManyLevels {}
#[cfg(test)]
mod tests {
use super::*;
use quickcheck_macros::quickcheck;
use std::collections::BTreeSet;
struct BTreePrioBitmap(BTreeSet<usize>);
impl BTreePrioBitmap {
fn ne | -> Self {
Self(BTreeSet::new())
}
fn enum_set_bits(&self) -> Vec<usize> {
self.0.iter().cloned().collect()
}
fn clear(&mut self, i: usize) {
self.0.remove(&i);
}
fn set(&mut self, i: usize) {
self.0.insert(i);
}
fn find_set(&self) -> Option<usize> {
self.0.iter().next().cloned()
}
}
/// A modifying operation on `PrioBitmap`.
#[derive(Debug)]
enum Cmd {
Insert(usize),
Remove(usize),
}
/// Map random bytes to operations on `PrioBitmap`.
fn interpret(bytecode: &[u8], bitmap_len: usize) -> impl Iterator<Item = Cmd> + '_ {
let mut i = 0;
let mut known_set_bits = Vec::new();
std::iter::from_fn(move || {
if bitmap_len == 0 {
None
} else if let Some(instr) = bytecode.get(i..i + 5) {
i += 5;
let value = u32::from_le_bytes([instr[1], instr[2], instr[3], instr[4]]) as usize;
if instr[0] % 2 == 0 || known_set_bits.is_empty() {
let bit = value % bitmap_len;
known_set_bits.push(bit);
Some(Cmd::Insert(bit))
} else {
let i = value % known_set_bits.len();
let bit = known_set_bits.swap_remove(i);
Some(Cmd::Remove(bit))
}
} else {
None
}
})
}
fn enum_set_bits(bitmap: &impl PrioBitmap, bitmap_len: usize) -> Vec<usize> {
(0..bitmap_len).filter(|&i| bitmap.get(i)).collect()
}
fn test_inner<T: PrioBitmap>(bytecode: Vec<u8>, size: usize) {
let mut subject = T::INIT;
let mut reference = BTreePrioBitmap::new();
log::info!("size = {}", size);
for cmd in interpret(&bytecode, size) {
log::trace!(" {:?}", cmd);
match cmd {
Cmd::Insert(bit) => {
subject.set(bit);
reference.set(bit);
}
Cmd::Remove(bit) => {
subject.clear(bit);
reference.clear(bit);
}
}
assert_eq!(subject.find_set(), reference.find_set());
}
assert_eq!(subject.find_set(), reference.find_set());
assert_eq!(enum_set_bits(&subject, size), reference.enum_set_bits());
}
macro_rules! gen_test {
($(#[$m:meta])* mod $name:ident, $size:literal) => {
$(#[$m])*
mod $name {
use super::*;
#[quickcheck]
fn test(bytecode: Vec<u8>) {
test_inner::<FixedPrioBitmap<$size>>(bytecode, $size);
}
}
};
}
gen_test!(mod size_0, 0);
gen_test!(mod size_1, 1);
gen_test!(mod size_10, 10);
gen_test!(mod size_100, 100);
gen_test!(mod size_1000, 1000);
gen_test!(
#[cfg(any(target_pointer_width = "32", target_pointer_width = "64", target_pointer_width = "128"))]
mod size_10000, 10000
);
gen_test!(
#[cfg(any(target_pointer_width = "64", target_pointer_width = "128"))]
mod size_100000, 100000
);
}
| w() | identifier_name |
table.go | /*
<DBGo - A flat-file relational database engine implementation in Go programming language>
Copyright (C) <2011> <Houzuo (Howard) Guo>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
DBGo table has:
tableName.data - table data, formatted like a spreadsheet, e.g.
yJOSHUA FB CGG
NIKKI MYB NH
BUZZ TWITTER BUZZ01
CHRISTINA FACEBOOK CG
CHRISTINA SKYPE JAMD
tableName.def - column definitions, e.g.
~del:1
NAME:20
SITE:20
USERNAME:40
Note that ~del is a special column, if ~del is set to "y", it means the row is deleted.
tableName.exclusive - when the table is exclusively locked by a transaction, the
file is created and the content of the file is the ID of the transaction.
tableName.shared (directory) - when the table is locked by a transaction in shared mode,
a file is created, the file name is the ID of the transaction.
This package handles basic, low-level table logics.
*/
package table
import (
"os"
"time"
"strings"
"strconv"
"column"
"constant"
"st"
"util"
"logg"
"tablefilemanager"
)
type Table struct {
// Path is the table's database's path, must end with /
Path, Name, DefFilePath, DataFilePath string
DefFile, DataFile *os.File
Columns map[string]*column.Column
RowLength int
// sequence of columns
ColumnsInOrder []*column.Column
}
// Opens a table.
func Open(path, name string) (*Table, int) {
var table *Table
table = new(Table)
table.Path = path
table.Name = name
status := table.Init()
if status != st.OK {
logg.Err("table", "Open", "Failed to open"+path+name+" Err: "+string(status))
return nil, status
}
return table, st.OK
}
// Load the table (column definitions, etc.).
func (table *Table) Init() int {
// This function may be called multiple times, thus clear previous state.
table.RowLength = 0
table.Columns = make(map[string]*column.Column)
table.ColumnsInOrder = make([]*column.Column, 0)
table.DefFilePath = table.Path + table.Name + ".def"
table.DataFilePath = table.Path + table.Name + ".data"
status := table.OpenFiles()
if status != st.OK {
return status
}
defFileInfo, err := table.DefFile.Stat()
if err != nil {
logg.Err("table", "Init", err.String())
return st.CannotStatTableDefFile
}
// Read definition file into memeory.
content := make([]byte, defFileInfo.Size)
table.DefFile.Read(content)
// Each line contains one column definition.
lines := strings.Split(string(content), "\n")
for _, line := range lines {
if line != "" {
var aColumn *column.Column
// Convert the definition into a Column.
aColumn, status = column.ColumnFromDef(table.RowLength, line)
if status != st.OK {
return status
}
table.Columns[aColumn.Name] = aColumn
table.ColumnsInOrder = append(table.ColumnsInOrder[:], aColumn)
table.RowLength += aColumn.Length
}
}
table.RowLength++
return st.OK
}
// Opens file handles.
func (table *Table) OpenFiles() int {
var err os.Error
table.DefFile, err = os.OpenFile(table.DefFilePath, os.O_RDWR, constant.DataFilePerm)
if err == nil {
table.DataFile, err = os.OpenFile(table.DataFilePath, os.O_RDWR, constant.DataFilePerm)
if err != nil {
logg.Err("table", "OpenFiles", err.String())
return st.CannotOpenTableDataFile
}
} else {
logg.Err("table", "OpenFiles", err.String())
return st.CannotOpenTableDefFile
}
return st.OK
}
// Flushes table's files
func (table *Table) Flush() int {
err := table.DefFile.Sync()
if err == nil {
err = table.DataFile.Sync()
if err != nil {
logg.Err("table", "Flush", err.String())
return st.CannotFlushTableDataFile
}
} else {
return st.CannotFlushTableDefFile
}
return st.OK
}
// Seeks to a row (e.g. row number 10).
func (table *Table) Seek(rowNumber int) int {
var numberOfRows int
numberOfRows, status := table.NumberOfRows()
if status == st.OK && rowNumber < numberOfRows {
_, err := table.DataFile.Seek(int64(rowNumber*table.RowLength), 0)
if err != nil {
logg.Err("table", "Seek", err.String())
return st.CannotSeekTableDataFile
}
}
return st.OK
}
// Seeks to a row and column (e.g. row number 10 column "NAME").
func (table *Table) SeekColumn(rowNumber int, columnName string) int {
status := table.Seek(rowNumber)
if status == st.OK {
column, exists := table.Columns[columnName]
if exists {
_, err := table.DataFile.Seek(int64(column.Offset), 1)
if err != nil {
logg.Err("table", "SeekColumn", err.String())
return st.CannotSeekTableDataFile
}
}
}
return st.OK
}
// Returns the number of rows in this table.
func (table *Table) NumberOfRows() (int, int) {
var numberOfRows int
var dataFileInfo *os.FileInfo
dataFileInfo, err := table.DataFile.Stat()
if err != nil {
logg.Err("table", "NumberOfRows", err.String())
return 0, st.CannotStatTableDataFile
}
numberOfRows = int(dataFileInfo.Size) / table.RowLength
return numberOfRows, st.OK
}
// Reads a row and return a map representation (name1:value1, name2:value2...)
func (table *Table) Read(rowNumber int) (map[string]string, int) {
row := make(map[string]string)
status := table.Seek(rowNumber)
if status == st.OK {
rowInBytes := make([]byte, table.RowLength)
_, err := table.DataFile.Read(rowInBytes)
if err == nil {
// For the columns in their order
for _, column := range table.ColumnsInOrder {
// column1:value2, column2:value2...
row[column.Name] = strings.TrimSpace(string(rowInBytes[column.Offset : column.Offset+column.Length]))
}
} else {
logg.Err("table", "Read", err.String())
return nil, st.CannotReadTableDataFile
}
}
return row, st.OK
}
// Writes a column value without seeking to a cursor position.
func (table *Table) Write(column *column.Column, value string) int {
_, err := table.DataFile.WriteString(util.TrimLength(value, column.Length))
if err != nil {
return st.CannotWriteTableDataFile
}
return st.OK
}
// Inserts a row to the bottom of the table.
func (table *Table) Insert(row map[string]string) int {
// Seek to EOF
_, err := table.DataFile.Seek(0, 2)
if err == nil {
// For the columns in their order
for _, column := range table.ColumnsInOrder {
value, exists := row[column.Name]
if !exists {
value = ""
}
// Keep writing the column value.
status := table.Write(column, value)
if status != st.OK {
return status
}
}
// Write a new-line character.
_, err = table.DataFile.WriteString("\n")
if err != nil {
logg.Err("table", "Insert", err.String())
return st.CannotWriteTableDataFile
}
} else {
logg.Err("table", "Insert", err.String())
return st.CannotSeekTableDataFile
}
return st.OK
}
// Deletes a row.
func (table *Table) | (rowNumber int) int {
status := table.Seek(rowNumber)
if status == st.OK {
del, exists := table.Columns["~del"]
if exists {
// Set ~del column value to "y" indicating the row is deleted
return table.Write(del, "y")
} else {
return st.TableDoesNotHaveDelColumn
}
}
return st.OK
}
// Updates a row.
func (table *Table) Update(rowNumber int, row map[string]string) int {
for columnName, value := range row {
column, exists := table.Columns[columnName]
if exists {
// Seek to the row and column, then write value in.
status := table.SeekColumn(rowNumber, column.Name)
if status != st.OK {
return status
}
status = table.Write(column, value)
if status != st.OK {
return status
}
}
}
return st.OK
}
// Puts a new column.
func (table *Table) pushNewColumn(name string, length int) *column.Column {
newColumn := &column.Column{Name: name, Offset: table.RowLength - 1, Length: length}
table.ColumnsInOrder = append(table.ColumnsInOrder[:], newColumn)
table.Columns[name] = newColumn
return newColumn
}
// Adds a new column.
func (table *Table) Add(name string, length int) int {
_, exists := table.Columns[name]
if exists {
return st.ColumnAlreadyExists
}
if len(name) > constant.MaxColumnNameLength {
return st.ColumnNameTooLong
}
if length <= 0 {
return st.InvalidColumnLength
}
var numberOfRows int
numberOfRows, status := table.NumberOfRows()
if status == st.OK && numberOfRows > 0 {
// Rebuild data file if there are already rows in the table.
// (To leave space for the new column)
status = table.RebuildDataFile(name, length)
table.pushNewColumn(name, length)
} else {
newColumn := table.pushNewColumn(name, length)
// Write definition of the new column into definition file.
_, err := table.DefFile.Seek(0, 2)
if err != nil {
logg.Err("table", "Add", err.String())
return st.CannotSeekTableDefFile
}
_, err = table.DefFile.WriteString(column.ColumnToDef(newColumn))
if err != nil {
logg.Err("table", "Add", err.String())
return st.CannotWriteTableDefFile
}
}
table.RowLength += length
return st.OK
}
// Removes a column.
func (table *Table) Remove(name string) int {
var theColumn *column.Column
// Find index of the column.
var columnIndex int
for i, column := range table.ColumnsInOrder {
if column.Name == name {
theColumn = table.ColumnsInOrder[i]
columnIndex = i
break
}
}
if theColumn == nil {
return st.ColumnNameNotFound
}
if strings.HasPrefix(name, "~") {
return st.CannotRemoveSpecialColumn
}
length := theColumn.Length
// Remove the column from columns array.
table.ColumnsInOrder = append(table.ColumnsInOrder[:columnIndex], table.ColumnsInOrder[columnIndex+1:]...)
// Remove the column from columns map.
table.Columns[name] = nil, true
numberOfRows, status := table.NumberOfRows()
if status != st.OK {
return status
}
if numberOfRows > 0 {
// Rebuild data file if there are already rows in the table.
// (To remove data in the deleted column)
status = table.RebuildDataFile("", 0)
} else {
status = util.RemoveLine(table.DefFilePath, column.ColumnToDef(theColumn))
}
table.RowLength -= length
if status != st.OK {
return status
}
return st.OK
}
// Rebuild data file, get rid off removed rows, optionally leaves space for a new column.
func (table *Table) RebuildDataFile(name string, length int) int {
// Create a temporary table named by an accurate timestamp.
tempName := strconv.Itoa64(time.Nanoseconds())
tablefilemanager.Create(table.Path, tempName)
var tempTable *Table
tempTable, status := Open(table.Path, tempName)
if status != st.OK {
return status
}
// Put all columns of this table to the temporary table.
for _, column := range table.ColumnsInOrder {
tempTable.Add(column.Name, column.Length)
}
// Add the new column into the table as well.
if name != "" {
tempTable.Add(name, length)
}
var numberOfRows int
numberOfRows, status = table.NumberOfRows()
if status != st.OK {
return status
}
var everFailed bool
if name == "" {
// If no new column, simply copy rows from this table to the temp table.
for i := 0; i < numberOfRows; i++ {
row, ret := table.Read(i)
if ret != st.OK {
everFailed = true
}
if row["~del"] != "y" {
tempTable.Insert(row)
}
}
} else {
// If adding new column, not only copy rows from this table to the temporary one.
// Also leave space for the new column's values.
for i := 0; i < numberOfRows; i++ {
row, ret := table.Read(i)
if ret != st.OK {
everFailed = true
}
if row["~del"] != "y" {
row[name] = ""
tempTable.Insert(row)
}
}
}
// Flush all the changes made to temporary table.
status = tempTable.Flush()
if everFailed || status != st.OK {
return st.FailedToCopyCertainRows
}
// Delete the old table (one that is rebuilt), and rename the temporary
// table to the name of the rebuilt table.
status = tablefilemanager.Delete(table.Path, table.Name)
if status == st.OK {
status = tablefilemanager.Rename(table.Path, tempName, table.Name)
if status == st.OK {
// Files have been changed, thus re-open file handles.
return table.OpenFiles()
}
}
return st.OK
}
// Returns an array of all rows, not including deleted rows.
func (table *Table) SelectAll() ([]map[string]string, int) {
numberOfRows, status := table.NumberOfRows()
if status != st.OK {
return nil, status
}
var everFailed bool
rows := make([]map[string]string, numberOfRows)
for i := 0; i < numberOfRows; i++ {
row, status := table.Read(i)
if status != st.OK {
everFailed = true
}
if row["~del"] != "y" {
rows[i] = row
}
}
if everFailed {
return rows, st.FailedToReadCertainRows
}
return rows, st.OK
}
| Delete | identifier_name |
table.go | /*
<DBGo - A flat-file relational database engine implementation in Go programming language>
Copyright (C) <2011> <Houzuo (Howard) Guo>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
DBGo table has:
tableName.data - table data, formatted like a spreadsheet, e.g.
yJOSHUA FB CGG
NIKKI MYB NH
BUZZ TWITTER BUZZ01
CHRISTINA FACEBOOK CG
CHRISTINA SKYPE JAMD
tableName.def - column definitions, e.g.
~del:1
NAME:20
SITE:20
USERNAME:40
Note that ~del is a special column, if ~del is set to "y", it means the row is deleted.
tableName.exclusive - when the table is exclusively locked by a transaction, the
file is created and the content of the file is the ID of the transaction.
tableName.shared (directory) - when the table is locked by a transaction in shared mode,
a file is created, the file name is the ID of the transaction.
This package handles basic, low-level table logics.
*/
package table
import (
"os"
"time"
"strings"
"strconv"
"column"
"constant"
"st"
"util"
"logg"
"tablefilemanager"
)
type Table struct {
// Path is the table's database's path, must end with /
Path, Name, DefFilePath, DataFilePath string
DefFile, DataFile *os.File
Columns map[string]*column.Column
RowLength int
// sequence of columns
ColumnsInOrder []*column.Column
}
// Opens a table.
func Open(path, name string) (*Table, int) {
var table *Table
table = new(Table)
table.Path = path
table.Name = name
status := table.Init()
if status != st.OK |
return table, st.OK
}
// Load the table (column definitions, etc.).
func (table *Table) Init() int {
// This function may be called multiple times, thus clear previous state.
table.RowLength = 0
table.Columns = make(map[string]*column.Column)
table.ColumnsInOrder = make([]*column.Column, 0)
table.DefFilePath = table.Path + table.Name + ".def"
table.DataFilePath = table.Path + table.Name + ".data"
status := table.OpenFiles()
if status != st.OK {
return status
}
defFileInfo, err := table.DefFile.Stat()
if err != nil {
logg.Err("table", "Init", err.String())
return st.CannotStatTableDefFile
}
// Read definition file into memeory.
content := make([]byte, defFileInfo.Size)
table.DefFile.Read(content)
// Each line contains one column definition.
lines := strings.Split(string(content), "\n")
for _, line := range lines {
if line != "" {
var aColumn *column.Column
// Convert the definition into a Column.
aColumn, status = column.ColumnFromDef(table.RowLength, line)
if status != st.OK {
return status
}
table.Columns[aColumn.Name] = aColumn
table.ColumnsInOrder = append(table.ColumnsInOrder[:], aColumn)
table.RowLength += aColumn.Length
}
}
table.RowLength++
return st.OK
}
// Opens file handles.
func (table *Table) OpenFiles() int {
var err os.Error
table.DefFile, err = os.OpenFile(table.DefFilePath, os.O_RDWR, constant.DataFilePerm)
if err == nil {
table.DataFile, err = os.OpenFile(table.DataFilePath, os.O_RDWR, constant.DataFilePerm)
if err != nil {
logg.Err("table", "OpenFiles", err.String())
return st.CannotOpenTableDataFile
}
} else {
logg.Err("table", "OpenFiles", err.String())
return st.CannotOpenTableDefFile
}
return st.OK
}
// Flushes table's files
func (table *Table) Flush() int {
err := table.DefFile.Sync()
if err == nil {
err = table.DataFile.Sync()
if err != nil {
logg.Err("table", "Flush", err.String())
return st.CannotFlushTableDataFile
}
} else {
return st.CannotFlushTableDefFile
}
return st.OK
}
// Seeks to a row (e.g. row number 10).
func (table *Table) Seek(rowNumber int) int {
var numberOfRows int
numberOfRows, status := table.NumberOfRows()
if status == st.OK && rowNumber < numberOfRows {
_, err := table.DataFile.Seek(int64(rowNumber*table.RowLength), 0)
if err != nil {
logg.Err("table", "Seek", err.String())
return st.CannotSeekTableDataFile
}
}
return st.OK
}
// Seeks to a row and column (e.g. row number 10 column "NAME").
func (table *Table) SeekColumn(rowNumber int, columnName string) int {
status := table.Seek(rowNumber)
if status == st.OK {
column, exists := table.Columns[columnName]
if exists {
_, err := table.DataFile.Seek(int64(column.Offset), 1)
if err != nil {
logg.Err("table", "SeekColumn", err.String())
return st.CannotSeekTableDataFile
}
}
}
return st.OK
}
// Returns the number of rows in this table.
func (table *Table) NumberOfRows() (int, int) {
var numberOfRows int
var dataFileInfo *os.FileInfo
dataFileInfo, err := table.DataFile.Stat()
if err != nil {
logg.Err("table", "NumberOfRows", err.String())
return 0, st.CannotStatTableDataFile
}
numberOfRows = int(dataFileInfo.Size) / table.RowLength
return numberOfRows, st.OK
}
// Reads a row and return a map representation (name1:value1, name2:value2...)
func (table *Table) Read(rowNumber int) (map[string]string, int) {
row := make(map[string]string)
status := table.Seek(rowNumber)
if status == st.OK {
rowInBytes := make([]byte, table.RowLength)
_, err := table.DataFile.Read(rowInBytes)
if err == nil {
// For the columns in their order
for _, column := range table.ColumnsInOrder {
// column1:value2, column2:value2...
row[column.Name] = strings.TrimSpace(string(rowInBytes[column.Offset : column.Offset+column.Length]))
}
} else {
logg.Err("table", "Read", err.String())
return nil, st.CannotReadTableDataFile
}
}
return row, st.OK
}
// Writes a column value without seeking to a cursor position.
func (table *Table) Write(column *column.Column, value string) int {
_, err := table.DataFile.WriteString(util.TrimLength(value, column.Length))
if err != nil {
return st.CannotWriteTableDataFile
}
return st.OK
}
// Inserts a row to the bottom of the table.
func (table *Table) Insert(row map[string]string) int {
// Seek to EOF
_, err := table.DataFile.Seek(0, 2)
if err == nil {
// For the columns in their order
for _, column := range table.ColumnsInOrder {
value, exists := row[column.Name]
if !exists {
value = ""
}
// Keep writing the column value.
status := table.Write(column, value)
if status != st.OK {
return status
}
}
// Write a new-line character.
_, err = table.DataFile.WriteString("\n")
if err != nil {
logg.Err("table", "Insert", err.String())
return st.CannotWriteTableDataFile
}
} else {
logg.Err("table", "Insert", err.String())
return st.CannotSeekTableDataFile
}
return st.OK
}
// Deletes a row.
func (table *Table) Delete(rowNumber int) int {
status := table.Seek(rowNumber)
if status == st.OK {
del, exists := table.Columns["~del"]
if exists {
// Set ~del column value to "y" indicating the row is deleted
return table.Write(del, "y")
} else {
return st.TableDoesNotHaveDelColumn
}
}
return st.OK
}
// Updates a row.
func (table *Table) Update(rowNumber int, row map[string]string) int {
for columnName, value := range row {
column, exists := table.Columns[columnName]
if exists {
// Seek to the row and column, then write value in.
status := table.SeekColumn(rowNumber, column.Name)
if status != st.OK {
return status
}
status = table.Write(column, value)
if status != st.OK {
return status
}
}
}
return st.OK
}
// Puts a new column.
func (table *Table) pushNewColumn(name string, length int) *column.Column {
newColumn := &column.Column{Name: name, Offset: table.RowLength - 1, Length: length}
table.ColumnsInOrder = append(table.ColumnsInOrder[:], newColumn)
table.Columns[name] = newColumn
return newColumn
}
// Adds a new column.
func (table *Table) Add(name string, length int) int {
_, exists := table.Columns[name]
if exists {
return st.ColumnAlreadyExists
}
if len(name) > constant.MaxColumnNameLength {
return st.ColumnNameTooLong
}
if length <= 0 {
return st.InvalidColumnLength
}
var numberOfRows int
numberOfRows, status := table.NumberOfRows()
if status == st.OK && numberOfRows > 0 {
// Rebuild data file if there are already rows in the table.
// (To leave space for the new column)
status = table.RebuildDataFile(name, length)
table.pushNewColumn(name, length)
} else {
newColumn := table.pushNewColumn(name, length)
// Write definition of the new column into definition file.
_, err := table.DefFile.Seek(0, 2)
if err != nil {
logg.Err("table", "Add", err.String())
return st.CannotSeekTableDefFile
}
_, err = table.DefFile.WriteString(column.ColumnToDef(newColumn))
if err != nil {
logg.Err("table", "Add", err.String())
return st.CannotWriteTableDefFile
}
}
table.RowLength += length
return st.OK
}
// Removes a column.
func (table *Table) Remove(name string) int {
var theColumn *column.Column
// Find index of the column.
var columnIndex int
for i, column := range table.ColumnsInOrder {
if column.Name == name {
theColumn = table.ColumnsInOrder[i]
columnIndex = i
break
}
}
if theColumn == nil {
return st.ColumnNameNotFound
}
if strings.HasPrefix(name, "~") {
return st.CannotRemoveSpecialColumn
}
length := theColumn.Length
// Remove the column from columns array.
table.ColumnsInOrder = append(table.ColumnsInOrder[:columnIndex], table.ColumnsInOrder[columnIndex+1:]...)
// Remove the column from columns map.
table.Columns[name] = nil, true
numberOfRows, status := table.NumberOfRows()
if status != st.OK {
return status
}
if numberOfRows > 0 {
// Rebuild data file if there are already rows in the table.
// (To remove data in the deleted column)
status = table.RebuildDataFile("", 0)
} else {
status = util.RemoveLine(table.DefFilePath, column.ColumnToDef(theColumn))
}
table.RowLength -= length
if status != st.OK {
return status
}
return st.OK
}
// Rebuild data file, get rid off removed rows, optionally leaves space for a new column.
func (table *Table) RebuildDataFile(name string, length int) int {
// Create a temporary table named by an accurate timestamp.
tempName := strconv.Itoa64(time.Nanoseconds())
tablefilemanager.Create(table.Path, tempName)
var tempTable *Table
tempTable, status := Open(table.Path, tempName)
if status != st.OK {
return status
}
// Put all columns of this table to the temporary table.
for _, column := range table.ColumnsInOrder {
tempTable.Add(column.Name, column.Length)
}
// Add the new column into the table as well.
if name != "" {
tempTable.Add(name, length)
}
var numberOfRows int
numberOfRows, status = table.NumberOfRows()
if status != st.OK {
return status
}
var everFailed bool
if name == "" {
// If no new column, simply copy rows from this table to the temp table.
for i := 0; i < numberOfRows; i++ {
row, ret := table.Read(i)
if ret != st.OK {
everFailed = true
}
if row["~del"] != "y" {
tempTable.Insert(row)
}
}
} else {
// If adding new column, not only copy rows from this table to the temporary one.
// Also leave space for the new column's values.
for i := 0; i < numberOfRows; i++ {
row, ret := table.Read(i)
if ret != st.OK {
everFailed = true
}
if row["~del"] != "y" {
row[name] = ""
tempTable.Insert(row)
}
}
}
// Flush all the changes made to temporary table.
status = tempTable.Flush()
if everFailed || status != st.OK {
return st.FailedToCopyCertainRows
}
// Delete the old table (one that is rebuilt), and rename the temporary
// table to the name of the rebuilt table.
status = tablefilemanager.Delete(table.Path, table.Name)
if status == st.OK {
status = tablefilemanager.Rename(table.Path, tempName, table.Name)
if status == st.OK {
// Files have been changed, thus re-open file handles.
return table.OpenFiles()
}
}
return st.OK
}
// Returns an array of all rows, not including deleted rows.
func (table *Table) SelectAll() ([]map[string]string, int) {
numberOfRows, status := table.NumberOfRows()
if status != st.OK {
return nil, status
}
var everFailed bool
rows := make([]map[string]string, numberOfRows)
for i := 0; i < numberOfRows; i++ {
row, status := table.Read(i)
if status != st.OK {
everFailed = true
}
if row["~del"] != "y" {
rows[i] = row
}
}
if everFailed {
return rows, st.FailedToReadCertainRows
}
return rows, st.OK
}
| {
logg.Err("table", "Open", "Failed to open"+path+name+" Err: "+string(status))
return nil, status
} | conditional_block |
table.go | /*
<DBGo - A flat-file relational database engine implementation in Go programming language>
Copyright (C) <2011> <Houzuo (Howard) Guo>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
DBGo table has:
tableName.data - table data, formatted like a spreadsheet, e.g.
yJOSHUA FB CGG
NIKKI MYB NH
BUZZ TWITTER BUZZ01
CHRISTINA FACEBOOK CG
CHRISTINA SKYPE JAMD
tableName.def - column definitions, e.g.
~del:1
NAME:20
SITE:20
USERNAME:40
Note that ~del is a special column, if ~del is set to "y", it means the row is deleted.
tableName.exclusive - when the table is exclusively locked by a transaction, the
file is created and the content of the file is the ID of the transaction.
tableName.shared (directory) - when the table is locked by a transaction in shared mode,
a file is created, the file name is the ID of the transaction.
This package handles basic, low-level table logics.
*/
package table
import (
"os"
"time"
"strings"
"strconv"
"column"
"constant"
"st"
"util"
"logg"
"tablefilemanager"
)
type Table struct {
// Path is the table's database's path, must end with /
Path, Name, DefFilePath, DataFilePath string
DefFile, DataFile *os.File
Columns map[string]*column.Column
RowLength int
// sequence of columns
ColumnsInOrder []*column.Column
}
// Opens a table.
func Open(path, name string) (*Table, int) {
var table *Table
table = new(Table)
table.Path = path
table.Name = name
status := table.Init()
if status != st.OK {
logg.Err("table", "Open", "Failed to open"+path+name+" Err: "+string(status))
return nil, status
}
return table, st.OK
}
// Load the table (column definitions, etc.).
func (table *Table) Init() int {
// This function may be called multiple times, thus clear previous state.
table.RowLength = 0
table.Columns = make(map[string]*column.Column)
table.ColumnsInOrder = make([]*column.Column, 0)
table.DefFilePath = table.Path + table.Name + ".def"
table.DataFilePath = table.Path + table.Name + ".data"
status := table.OpenFiles()
if status != st.OK {
return status
}
defFileInfo, err := table.DefFile.Stat()
if err != nil {
logg.Err("table", "Init", err.String())
return st.CannotStatTableDefFile
}
// Read definition file into memeory.
content := make([]byte, defFileInfo.Size)
table.DefFile.Read(content)
// Each line contains one column definition.
lines := strings.Split(string(content), "\n")
for _, line := range lines {
if line != "" {
var aColumn *column.Column
// Convert the definition into a Column.
aColumn, status = column.ColumnFromDef(table.RowLength, line)
if status != st.OK {
return status
}
table.Columns[aColumn.Name] = aColumn
table.ColumnsInOrder = append(table.ColumnsInOrder[:], aColumn)
table.RowLength += aColumn.Length
}
}
table.RowLength++
return st.OK
}
// Opens file handles.
func (table *Table) OpenFiles() int {
var err os.Error
table.DefFile, err = os.OpenFile(table.DefFilePath, os.O_RDWR, constant.DataFilePerm)
if err == nil {
table.DataFile, err = os.OpenFile(table.DataFilePath, os.O_RDWR, constant.DataFilePerm)
if err != nil {
logg.Err("table", "OpenFiles", err.String())
return st.CannotOpenTableDataFile
}
} else {
logg.Err("table", "OpenFiles", err.String())
return st.CannotOpenTableDefFile
}
return st.OK
}
// Flushes table's files
func (table *Table) Flush() int {
err := table.DefFile.Sync()
if err == nil {
err = table.DataFile.Sync()
if err != nil {
logg.Err("table", "Flush", err.String())
return st.CannotFlushTableDataFile
}
} else {
return st.CannotFlushTableDefFile
}
return st.OK
}
// Seeks to a row (e.g. row number 10).
func (table *Table) Seek(rowNumber int) int {
var numberOfRows int
numberOfRows, status := table.NumberOfRows()
if status == st.OK && rowNumber < numberOfRows {
_, err := table.DataFile.Seek(int64(rowNumber*table.RowLength), 0)
if err != nil {
logg.Err("table", "Seek", err.String())
return st.CannotSeekTableDataFile
}
}
return st.OK
}
// Seeks to a row and column (e.g. row number 10 column "NAME").
func (table *Table) SeekColumn(rowNumber int, columnName string) int {
status := table.Seek(rowNumber)
if status == st.OK {
column, exists := table.Columns[columnName]
if exists {
_, err := table.DataFile.Seek(int64(column.Offset), 1)
if err != nil {
logg.Err("table", "SeekColumn", err.String())
return st.CannotSeekTableDataFile
}
}
}
return st.OK
}
// Returns the number of rows in this table.
func (table *Table) NumberOfRows() (int, int) {
var numberOfRows int
var dataFileInfo *os.FileInfo
dataFileInfo, err := table.DataFile.Stat()
if err != nil {
logg.Err("table", "NumberOfRows", err.String())
return 0, st.CannotStatTableDataFile
}
numberOfRows = int(dataFileInfo.Size) / table.RowLength
return numberOfRows, st.OK
}
// Reads a row and return a map representation (name1:value1, name2:value2...)
func (table *Table) Read(rowNumber int) (map[string]string, int) |
// Writes a column value without seeking to a cursor position.
func (table *Table) Write(column *column.Column, value string) int {
_, err := table.DataFile.WriteString(util.TrimLength(value, column.Length))
if err != nil {
return st.CannotWriteTableDataFile
}
return st.OK
}
// Inserts a row to the bottom of the table.
func (table *Table) Insert(row map[string]string) int {
// Seek to EOF
_, err := table.DataFile.Seek(0, 2)
if err == nil {
// For the columns in their order
for _, column := range table.ColumnsInOrder {
value, exists := row[column.Name]
if !exists {
value = ""
}
// Keep writing the column value.
status := table.Write(column, value)
if status != st.OK {
return status
}
}
// Write a new-line character.
_, err = table.DataFile.WriteString("\n")
if err != nil {
logg.Err("table", "Insert", err.String())
return st.CannotWriteTableDataFile
}
} else {
logg.Err("table", "Insert", err.String())
return st.CannotSeekTableDataFile
}
return st.OK
}
// Deletes a row.
func (table *Table) Delete(rowNumber int) int {
status := table.Seek(rowNumber)
if status == st.OK {
del, exists := table.Columns["~del"]
if exists {
// Set ~del column value to "y" indicating the row is deleted
return table.Write(del, "y")
} else {
return st.TableDoesNotHaveDelColumn
}
}
return st.OK
}
// Updates a row.
func (table *Table) Update(rowNumber int, row map[string]string) int {
for columnName, value := range row {
column, exists := table.Columns[columnName]
if exists {
// Seek to the row and column, then write value in.
status := table.SeekColumn(rowNumber, column.Name)
if status != st.OK {
return status
}
status = table.Write(column, value)
if status != st.OK {
return status
}
}
}
return st.OK
}
// Puts a new column.
func (table *Table) pushNewColumn(name string, length int) *column.Column {
newColumn := &column.Column{Name: name, Offset: table.RowLength - 1, Length: length}
table.ColumnsInOrder = append(table.ColumnsInOrder[:], newColumn)
table.Columns[name] = newColumn
return newColumn
}
// Adds a new column.
func (table *Table) Add(name string, length int) int {
_, exists := table.Columns[name]
if exists {
return st.ColumnAlreadyExists
}
if len(name) > constant.MaxColumnNameLength {
return st.ColumnNameTooLong
}
if length <= 0 {
return st.InvalidColumnLength
}
var numberOfRows int
numberOfRows, status := table.NumberOfRows()
if status == st.OK && numberOfRows > 0 {
// Rebuild data file if there are already rows in the table.
// (To leave space for the new column)
status = table.RebuildDataFile(name, length)
table.pushNewColumn(name, length)
} else {
newColumn := table.pushNewColumn(name, length)
// Write definition of the new column into definition file.
_, err := table.DefFile.Seek(0, 2)
if err != nil {
logg.Err("table", "Add", err.String())
return st.CannotSeekTableDefFile
}
_, err = table.DefFile.WriteString(column.ColumnToDef(newColumn))
if err != nil {
logg.Err("table", "Add", err.String())
return st.CannotWriteTableDefFile
}
}
table.RowLength += length
return st.OK
}
// Removes a column.
func (table *Table) Remove(name string) int {
var theColumn *column.Column
// Find index of the column.
var columnIndex int
for i, column := range table.ColumnsInOrder {
if column.Name == name {
theColumn = table.ColumnsInOrder[i]
columnIndex = i
break
}
}
if theColumn == nil {
return st.ColumnNameNotFound
}
if strings.HasPrefix(name, "~") {
return st.CannotRemoveSpecialColumn
}
length := theColumn.Length
// Remove the column from columns array.
table.ColumnsInOrder = append(table.ColumnsInOrder[:columnIndex], table.ColumnsInOrder[columnIndex+1:]...)
// Remove the column from columns map.
table.Columns[name] = nil, true
numberOfRows, status := table.NumberOfRows()
if status != st.OK {
return status
}
if numberOfRows > 0 {
// Rebuild data file if there are already rows in the table.
// (To remove data in the deleted column)
status = table.RebuildDataFile("", 0)
} else {
status = util.RemoveLine(table.DefFilePath, column.ColumnToDef(theColumn))
}
table.RowLength -= length
if status != st.OK {
return status
}
return st.OK
}
// Rebuild data file, get rid off removed rows, optionally leaves space for a new column.
func (table *Table) RebuildDataFile(name string, length int) int {
// Create a temporary table named by an accurate timestamp.
tempName := strconv.Itoa64(time.Nanoseconds())
tablefilemanager.Create(table.Path, tempName)
var tempTable *Table
tempTable, status := Open(table.Path, tempName)
if status != st.OK {
return status
}
// Put all columns of this table to the temporary table.
for _, column := range table.ColumnsInOrder {
tempTable.Add(column.Name, column.Length)
}
// Add the new column into the table as well.
if name != "" {
tempTable.Add(name, length)
}
var numberOfRows int
numberOfRows, status = table.NumberOfRows()
if status != st.OK {
return status
}
var everFailed bool
if name == "" {
// If no new column, simply copy rows from this table to the temp table.
for i := 0; i < numberOfRows; i++ {
row, ret := table.Read(i)
if ret != st.OK {
everFailed = true
}
if row["~del"] != "y" {
tempTable.Insert(row)
}
}
} else {
// If adding new column, not only copy rows from this table to the temporary one.
// Also leave space for the new column's values.
for i := 0; i < numberOfRows; i++ {
row, ret := table.Read(i)
if ret != st.OK {
everFailed = true
}
if row["~del"] != "y" {
row[name] = ""
tempTable.Insert(row)
}
}
}
// Flush all the changes made to temporary table.
status = tempTable.Flush()
if everFailed || status != st.OK {
return st.FailedToCopyCertainRows
}
// Delete the old table (one that is rebuilt), and rename the temporary
// table to the name of the rebuilt table.
status = tablefilemanager.Delete(table.Path, table.Name)
if status == st.OK {
status = tablefilemanager.Rename(table.Path, tempName, table.Name)
if status == st.OK {
// Files have been changed, thus re-open file handles.
return table.OpenFiles()
}
}
return st.OK
}
// Returns an array of all rows, not including deleted rows.
func (table *Table) SelectAll() ([]map[string]string, int) {
numberOfRows, status := table.NumberOfRows()
if status != st.OK {
return nil, status
}
var everFailed bool
rows := make([]map[string]string, numberOfRows)
for i := 0; i < numberOfRows; i++ {
row, status := table.Read(i)
if status != st.OK {
everFailed = true
}
if row["~del"] != "y" {
rows[i] = row
}
}
if everFailed {
return rows, st.FailedToReadCertainRows
}
return rows, st.OK
}
| {
row := make(map[string]string)
status := table.Seek(rowNumber)
if status == st.OK {
rowInBytes := make([]byte, table.RowLength)
_, err := table.DataFile.Read(rowInBytes)
if err == nil {
// For the columns in their order
for _, column := range table.ColumnsInOrder {
// column1:value2, column2:value2...
row[column.Name] = strings.TrimSpace(string(rowInBytes[column.Offset : column.Offset+column.Length]))
}
} else {
logg.Err("table", "Read", err.String())
return nil, st.CannotReadTableDataFile
}
}
return row, st.OK
} | identifier_body |
table.go | /*
<DBGo - A flat-file relational database engine implementation in Go programming language>
Copyright (C) <2011> <Houzuo (Howard) Guo>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
DBGo table has:
tableName.data - table data, formatted like a spreadsheet, e.g.
yJOSHUA FB CGG
NIKKI MYB NH
BUZZ TWITTER BUZZ01
CHRISTINA FACEBOOK CG
CHRISTINA SKYPE JAMD
tableName.def - column definitions, e.g.
~del:1
NAME:20
SITE:20
USERNAME:40
Note that ~del is a special column, if ~del is set to "y", it means the row is deleted.
tableName.exclusive - when the table is exclusively locked by a transaction, the
file is created and the content of the file is the ID of the transaction.
tableName.shared (directory) - when the table is locked by a transaction in shared mode,
a file is created, the file name is the ID of the transaction.
This package handles basic, low-level table logics.
*/
package table
import (
"os"
"time"
"strings"
"strconv"
"column"
"constant"
"st"
"util"
"logg"
"tablefilemanager"
)
type Table struct {
// Path is the table's database's path, must end with /
Path, Name, DefFilePath, DataFilePath string
DefFile, DataFile *os.File
Columns map[string]*column.Column
RowLength int
// sequence of columns
ColumnsInOrder []*column.Column
}
// Opens a table.
func Open(path, name string) (*Table, int) {
var table *Table
table = new(Table)
table.Path = path
table.Name = name
status := table.Init()
if status != st.OK {
logg.Err("table", "Open", "Failed to open"+path+name+" Err: "+string(status))
return nil, status
}
return table, st.OK
}
// Load the table (column definitions, etc.).
func (table *Table) Init() int {
// This function may be called multiple times, thus clear previous state.
table.RowLength = 0
table.Columns = make(map[string]*column.Column)
table.ColumnsInOrder = make([]*column.Column, 0)
table.DefFilePath = table.Path + table.Name + ".def"
table.DataFilePath = table.Path + table.Name + ".data"
status := table.OpenFiles()
if status != st.OK {
return status
}
defFileInfo, err := table.DefFile.Stat()
if err != nil {
logg.Err("table", "Init", err.String())
return st.CannotStatTableDefFile
}
// Read definition file into memeory.
content := make([]byte, defFileInfo.Size)
table.DefFile.Read(content)
// Each line contains one column definition.
lines := strings.Split(string(content), "\n")
for _, line := range lines {
if line != "" {
var aColumn *column.Column
// Convert the definition into a Column.
aColumn, status = column.ColumnFromDef(table.RowLength, line)
if status != st.OK {
return status
}
table.Columns[aColumn.Name] = aColumn
table.ColumnsInOrder = append(table.ColumnsInOrder[:], aColumn)
table.RowLength += aColumn.Length
}
}
table.RowLength++
return st.OK
}
// Opens file handles.
func (table *Table) OpenFiles() int {
var err os.Error
table.DefFile, err = os.OpenFile(table.DefFilePath, os.O_RDWR, constant.DataFilePerm)
if err == nil {
table.DataFile, err = os.OpenFile(table.DataFilePath, os.O_RDWR, constant.DataFilePerm)
if err != nil {
logg.Err("table", "OpenFiles", err.String())
return st.CannotOpenTableDataFile
}
} else {
logg.Err("table", "OpenFiles", err.String())
return st.CannotOpenTableDefFile
}
return st.OK
}
// Flushes table's files
func (table *Table) Flush() int {
err := table.DefFile.Sync()
if err == nil {
err = table.DataFile.Sync()
if err != nil {
logg.Err("table", "Flush", err.String())
return st.CannotFlushTableDataFile
}
} else {
return st.CannotFlushTableDefFile
}
return st.OK
}
// Seeks to a row (e.g. row number 10).
func (table *Table) Seek(rowNumber int) int {
var numberOfRows int
numberOfRows, status := table.NumberOfRows()
if status == st.OK && rowNumber < numberOfRows {
_, err := table.DataFile.Seek(int64(rowNumber*table.RowLength), 0)
if err != nil {
logg.Err("table", "Seek", err.String())
return st.CannotSeekTableDataFile
}
}
return st.OK
}
// Seeks to a row and column (e.g. row number 10 column "NAME").
func (table *Table) SeekColumn(rowNumber int, columnName string) int {
status := table.Seek(rowNumber)
if status == st.OK {
column, exists := table.Columns[columnName]
if exists {
_, err := table.DataFile.Seek(int64(column.Offset), 1)
if err != nil {
logg.Err("table", "SeekColumn", err.String())
return st.CannotSeekTableDataFile
}
}
}
return st.OK
}
// Returns the number of rows in this table.
func (table *Table) NumberOfRows() (int, int) {
var numberOfRows int
var dataFileInfo *os.FileInfo
dataFileInfo, err := table.DataFile.Stat()
if err != nil {
logg.Err("table", "NumberOfRows", err.String())
return 0, st.CannotStatTableDataFile
}
numberOfRows = int(dataFileInfo.Size) / table.RowLength
return numberOfRows, st.OK
}
// Reads a row and return a map representation (name1:value1, name2:value2...)
func (table *Table) Read(rowNumber int) (map[string]string, int) {
row := make(map[string]string)
status := table.Seek(rowNumber)
if status == st.OK {
rowInBytes := make([]byte, table.RowLength)
_, err := table.DataFile.Read(rowInBytes)
if err == nil {
// For the columns in their order
for _, column := range table.ColumnsInOrder {
// column1:value2, column2:value2...
row[column.Name] = strings.TrimSpace(string(rowInBytes[column.Offset : column.Offset+column.Length]))
}
} else {
logg.Err("table", "Read", err.String())
return nil, st.CannotReadTableDataFile
}
}
return row, st.OK
}
// Writes a column value without seeking to a cursor position.
func (table *Table) Write(column *column.Column, value string) int {
_, err := table.DataFile.WriteString(util.TrimLength(value, column.Length))
if err != nil {
return st.CannotWriteTableDataFile
}
return st.OK
}
// Inserts a row to the bottom of the table.
func (table *Table) Insert(row map[string]string) int {
// Seek to EOF
_, err := table.DataFile.Seek(0, 2) | // For the columns in their order
for _, column := range table.ColumnsInOrder {
value, exists := row[column.Name]
if !exists {
value = ""
}
// Keep writing the column value.
status := table.Write(column, value)
if status != st.OK {
return status
}
}
// Write a new-line character.
_, err = table.DataFile.WriteString("\n")
if err != nil {
logg.Err("table", "Insert", err.String())
return st.CannotWriteTableDataFile
}
} else {
logg.Err("table", "Insert", err.String())
return st.CannotSeekTableDataFile
}
return st.OK
}
// Deletes a row.
func (table *Table) Delete(rowNumber int) int {
status := table.Seek(rowNumber)
if status == st.OK {
del, exists := table.Columns["~del"]
if exists {
// Set ~del column value to "y" indicating the row is deleted
return table.Write(del, "y")
} else {
return st.TableDoesNotHaveDelColumn
}
}
return st.OK
}
// Updates a row.
func (table *Table) Update(rowNumber int, row map[string]string) int {
for columnName, value := range row {
column, exists := table.Columns[columnName]
if exists {
// Seek to the row and column, then write value in.
status := table.SeekColumn(rowNumber, column.Name)
if status != st.OK {
return status
}
status = table.Write(column, value)
if status != st.OK {
return status
}
}
}
return st.OK
}
// Puts a new column.
func (table *Table) pushNewColumn(name string, length int) *column.Column {
newColumn := &column.Column{Name: name, Offset: table.RowLength - 1, Length: length}
table.ColumnsInOrder = append(table.ColumnsInOrder[:], newColumn)
table.Columns[name] = newColumn
return newColumn
}
// Adds a new column.
func (table *Table) Add(name string, length int) int {
_, exists := table.Columns[name]
if exists {
return st.ColumnAlreadyExists
}
if len(name) > constant.MaxColumnNameLength {
return st.ColumnNameTooLong
}
if length <= 0 {
return st.InvalidColumnLength
}
var numberOfRows int
numberOfRows, status := table.NumberOfRows()
if status == st.OK && numberOfRows > 0 {
// Rebuild data file if there are already rows in the table.
// (To leave space for the new column)
status = table.RebuildDataFile(name, length)
table.pushNewColumn(name, length)
} else {
newColumn := table.pushNewColumn(name, length)
// Write definition of the new column into definition file.
_, err := table.DefFile.Seek(0, 2)
if err != nil {
logg.Err("table", "Add", err.String())
return st.CannotSeekTableDefFile
}
_, err = table.DefFile.WriteString(column.ColumnToDef(newColumn))
if err != nil {
logg.Err("table", "Add", err.String())
return st.CannotWriteTableDefFile
}
}
table.RowLength += length
return st.OK
}
// Removes a column.
func (table *Table) Remove(name string) int {
var theColumn *column.Column
// Find index of the column.
var columnIndex int
for i, column := range table.ColumnsInOrder {
if column.Name == name {
theColumn = table.ColumnsInOrder[i]
columnIndex = i
break
}
}
if theColumn == nil {
return st.ColumnNameNotFound
}
if strings.HasPrefix(name, "~") {
return st.CannotRemoveSpecialColumn
}
length := theColumn.Length
// Remove the column from columns array.
table.ColumnsInOrder = append(table.ColumnsInOrder[:columnIndex], table.ColumnsInOrder[columnIndex+1:]...)
// Remove the column from columns map.
table.Columns[name] = nil, true
numberOfRows, status := table.NumberOfRows()
if status != st.OK {
return status
}
if numberOfRows > 0 {
// Rebuild data file if there are already rows in the table.
// (To remove data in the deleted column)
status = table.RebuildDataFile("", 0)
} else {
status = util.RemoveLine(table.DefFilePath, column.ColumnToDef(theColumn))
}
table.RowLength -= length
if status != st.OK {
return status
}
return st.OK
}
// Rebuild data file, get rid off removed rows, optionally leaves space for a new column.
func (table *Table) RebuildDataFile(name string, length int) int {
// Create a temporary table named by an accurate timestamp.
tempName := strconv.Itoa64(time.Nanoseconds())
tablefilemanager.Create(table.Path, tempName)
var tempTable *Table
tempTable, status := Open(table.Path, tempName)
if status != st.OK {
return status
}
// Put all columns of this table to the temporary table.
for _, column := range table.ColumnsInOrder {
tempTable.Add(column.Name, column.Length)
}
// Add the new column into the table as well.
if name != "" {
tempTable.Add(name, length)
}
var numberOfRows int
numberOfRows, status = table.NumberOfRows()
if status != st.OK {
return status
}
var everFailed bool
if name == "" {
// If no new column, simply copy rows from this table to the temp table.
for i := 0; i < numberOfRows; i++ {
row, ret := table.Read(i)
if ret != st.OK {
everFailed = true
}
if row["~del"] != "y" {
tempTable.Insert(row)
}
}
} else {
// If adding new column, not only copy rows from this table to the temporary one.
// Also leave space for the new column's values.
for i := 0; i < numberOfRows; i++ {
row, ret := table.Read(i)
if ret != st.OK {
everFailed = true
}
if row["~del"] != "y" {
row[name] = ""
tempTable.Insert(row)
}
}
}
// Flush all the changes made to temporary table.
status = tempTable.Flush()
if everFailed || status != st.OK {
return st.FailedToCopyCertainRows
}
// Delete the old table (one that is rebuilt), and rename the temporary
// table to the name of the rebuilt table.
status = tablefilemanager.Delete(table.Path, table.Name)
if status == st.OK {
status = tablefilemanager.Rename(table.Path, tempName, table.Name)
if status == st.OK {
// Files have been changed, thus re-open file handles.
return table.OpenFiles()
}
}
return st.OK
}
// Returns an array of all rows, not including deleted rows.
func (table *Table) SelectAll() ([]map[string]string, int) {
numberOfRows, status := table.NumberOfRows()
if status != st.OK {
return nil, status
}
var everFailed bool
rows := make([]map[string]string, numberOfRows)
for i := 0; i < numberOfRows; i++ {
row, status := table.Read(i)
if status != st.OK {
everFailed = true
}
if row["~del"] != "y" {
rows[i] = row
}
}
if everFailed {
return rows, st.FailedToReadCertainRows
}
return rows, st.OK
} | if err == nil { | random_line_split |
fsexp.go | /*
* Copyright 1999-2019 Alibaba Group Holding Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package pod
import (
"context"
"fmt"
"strconv"
"strings"
"github.com/chaosblade-io/chaosblade-spec-go/util"
"github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/chaosblade-io/chaosblade-spec-go/spec"
"github.com/chaosblade-io/chaosblade-operator/channel"
"github.com/chaosblade-io/chaosblade-operator/exec/model"
"github.com/chaosblade-io/chaosblade-operator/pkg/apis/chaosblade/v1alpha1"
chaosfs "github.com/chaosblade-io/chaosblade-operator/pkg/hookfs"
webhook "github.com/chaosblade-io/chaosblade-operator/pkg/webhook/pod"
)
type PodIOActionSpec struct {
spec.BaseExpActionCommandSpec
}
func NewPodIOActionSpec(client *channel.Client) spec.ExpActionCommandSpec {
return &PodIOActionSpec{
spec.BaseExpActionCommandSpec{
ActionMatchers: []spec.ExpFlagSpec{
&spec.ExpFlag{
Name: "method",
Desc: "inject methods, only support read and write",
},
&spec.ExpFlag{
Name: "delay",
Desc: "file io delay time, ms",
},
},
ActionFlags: []spec.ExpFlagSpec{
&spec.ExpFlag{
Name: "path",
Desc: "I/O exception path or file",
},
&spec.ExpFlag{
Name: "random",
Desc: "random inject I/O code",
NoArgs: true,
},
&spec.ExpFlag{
Name: "percent",
Desc: "I/O error percent [0-100],",
},
&spec.ExpFlag{
Name: "errno",
Desc: "I/O error code",
},
},
ActionExecutor: &PodIOActionExecutor{client: client},
ActionExample: `# Two types of exceptions were injected for the READ operation, with an exception rate of 60 percent
blade create k8s pod-pod IO --method read --delay 1000 --path /home --percent 60 --errno 28 --labels "app=test" --namespace default`,
ActionCategories: []string{model.CategorySystemContainer},
},
}
}
func (*PodIOActionSpec) Name() string {
return "IO"
}
func (*PodIOActionSpec) Aliases() []string {
return []string{}
}
func (*PodIOActionSpec) ShortDesc() string {
return "Pod File System IO Exception"
}
func (*PodIOActionSpec) LongDesc() string {
return "Pod File System IO Exception"
}
type PodIOActionExecutor struct {
client *channel.Client
}
func (*PodIOActionExecutor) Name() string {
return "IO"
}
func (*PodIOActionExecutor) SetChannel(channel spec.Channel) {
}
func (d *PodIOActionExecutor) Exec(uid string, ctx context.Context, model *spec.ExpModel) *spec.Response {
if _, ok := spec.IsDestroy(ctx); ok {
return d.destroy(ctx, model)
} else {
return d.create(ctx, model)
}
}
func (d *PodIOActionExecutor) create(ctx context.Context, expModel *spec.ExpModel) *spec.Response {
containerMatchedList, err := model.GetContainerObjectMetaListFromContext(ctx)
experimentId := model.GetExperimentIdFromContext(ctx)
if err != nil {
util.Errorf(experimentId, util.GetRunFuncName(), err.Error())
return spec.ResponseFailWithFlags(spec.ContainerInContextNotFound,
v1alpha1.CreateFailExperimentStatus(spec.ContainerInContextNotFound.Msg, []v1alpha1.ResourceStatus{}))
}
logrusField := logrus.WithField("experiment", experimentId)
statuses := make([]v1alpha1.ResourceStatus, 0)
success := false
for _, c := range containerMatchedList {
status := v1alpha1.ResourceStatus{
Kind: v1alpha1.PodKind,
Identifier: c.GetIdentifier(),
}
pod := &v1.Pod{}
err := d.client.Get(context.TODO(), client.ObjectKey{Namespace: c.Namespace, Name: c.PodName}, pod)
if err != nil {
logrusField.Errorf("get pod %s err, %v", c.PodName, err)
statuses = append(statuses, status.CreateFailResourceStatus(
spec.K8sExecFailed.Sprintf("get", err), spec.K8sExecFailed.Code))
continue
}
if !isPodReady(pod) {
logrusField.Infof("pod %s is not ready", c.PodName)
statuses = append(statuses, status.CreateFailResourceStatus(spec.PodNotReady.Msg, spec.PodNotReady.Code))
continue
}
methods, ok := expModel.ActionFlags["method"]
if !ok && len(methods) != 0 {
logrusField.Error("method cannot be empty")
statuses = append(statuses, status.CreateFailResourceStatus(
spec.ParameterLess.Sprintf("method"), spec.ParameterLess.Code))
continue
}
var delay, percent, errno int
delayStr, ok := expModel.ActionFlags["delay"]
if ok && len(delayStr) != 0 {
delay, err = strconv.Atoi(delayStr)
if err != nil {
logrusField.Error("delay must be integer")
statuses = append(statuses, status.CreateFailResourceStatus(
spec.ParameterIllegal.Sprintf("delay", delayStr, err), spec.ParameterIllegal.Code))
continue
}
}
percentStr, ok := expModel.ActionFlags["percent"]
if ok && len(percentStr) != 0 |
errnoStr, ok := expModel.ActionFlags["errno"]
if ok && len(errnoStr) != 0 {
if errno, err = strconv.Atoi(errnoStr); err != nil {
logrusField.Error("errno must be integer")
statuses = append(statuses, status.CreateFailResourceStatus(
spec.ParameterIllegal.Sprintf("errno", errnoStr, err), spec.ParameterIllegal.Code))
continue
}
}
random := false
randomStr, ok := expModel.ActionFlags["random"]
if ok && randomStr == "true" {
random = true
}
request := &chaosfs.InjectMessage{
Methods: strings.Split(methods, ","),
Path: expModel.ActionFlags["path"],
Delay: uint32(delay),
Percent: uint32(percent),
Random: random,
Errno: uint32(errno),
}
chaosfsClient, err := getChaosfsClient(pod)
if err != nil {
logrusField.WithField("pod", c.PodName).WithField("request", request).
Errorf("init chaosfs client failed: %v", err)
statuses = append(statuses, status.CreateFailResourceStatus(
spec.ChaosfsClientFailed.Sprintf(pod.Name, err), spec.ChaosfsClientFailed.Code))
continue
}
err = chaosfsClient.InjectFault(ctx, request)
if err != nil {
logrusField.Errorf("inject io exception in pod %s failed, request %v, err: %v", c.PodName, request, err)
statuses = append(statuses, status.CreateFailResourceStatus(
spec.ChaosfsInjectFailed.Sprintf(pod.Name, request, err), spec.ChaosfsInjectFailed.Code))
continue
}
statuses = append(statuses, status.CreateSuccessResourceStatus())
success = true
}
var experimentStatus v1alpha1.ExperimentStatus
if success {
experimentStatus = v1alpha1.CreateSuccessExperimentStatus(statuses)
} else {
experimentStatus = v1alpha1.CreateFailExperimentStatus("see resStatuses for details", statuses)
}
return spec.ReturnResultIgnoreCode(experimentStatus)
}
func (d *PodIOActionExecutor) destroy(ctx context.Context, expModel *spec.ExpModel) *spec.Response {
containerMatchedList, err := model.GetContainerObjectMetaListFromContext(ctx)
experimentId := model.GetExperimentIdFromContext(ctx)
if err != nil {
util.Errorf(experimentId, util.GetRunFuncName(), err.Error())
return spec.ResponseFailWithResult(spec.ContainerInContextNotFound,
v1alpha1.CreateFailExperimentStatus(spec.ContainerInContextNotFound.Msg, []v1alpha1.ResourceStatus{}))
}
logrusField := logrus.WithField("experiment", experimentId)
experimentStatus := v1alpha1.CreateDestroyedExperimentStatus([]v1alpha1.ResourceStatus{})
statuses := experimentStatus.ResStatuses
for _, c := range containerMatchedList {
status := v1alpha1.ResourceStatus{
Kind: v1alpha1.PodKind,
Identifier: c.GetIdentifier(),
}
pod := &v1.Pod{}
err := d.client.Get(context.TODO(), client.ObjectKey{Namespace: c.Namespace, Name: c.PodName}, pod)
if err != nil {
logrusField.Errorf("get pod %s err, %v", c.PodName, err)
continue
}
if !isPodReady(pod) {
logrusField.Errorf("pod %s is not ready", c.PodName)
continue
}
chaosfsClient, err := getChaosfsClient(pod)
if err != nil {
logrusField.Errorf("init chaosfs client failed in pod %v, err: %v", pod.Name, err)
statuses = append(statuses, status.CreateFailResourceStatus(
spec.ChaosfsClientFailed.Sprintf(pod.Name, err), spec.ChaosfsClientFailed.Code))
continue
}
err = chaosfsClient.Revoke(ctx)
if err != nil {
logrusField.Errorf("recover io exception failed in pod %v, err: %v", c.PodName, err)
statuses = append(statuses, status.CreateFailResourceStatus(
spec.ChaosfsRecoverFailed.Sprintf(pod.Name, err), spec.ChaosfsRecoverFailed.Code))
continue
}
}
experimentStatus.ResStatuses = statuses
return spec.ReturnResultIgnoreCode(experimentStatus)
}
func isPodReady(pod *v1.Pod) bool {
if pod.ObjectMeta.DeletionTimestamp != nil {
return false
}
for _, condition := range pod.Status.Conditions {
if condition.Type == v1.PodReady &&
condition.Status == v1.ConditionTrue {
return true
}
}
return false
}
func getChaosfsClient(pod *v1.Pod) (*chaosfs.ChaosBladeHookClient, error) {
port, err := getContainerPort(webhook.FuseServerPortName, pod)
if err != nil {
return nil, err
}
addr := fmt.Sprintf("%s:%d", pod.Status.PodIP, port)
return chaosfs.NewChabladeHookClient(addr), nil
}
func getContainerPort(portName string, pod *v1.Pod) (int32, error) {
for _, container := range pod.Spec.Containers {
for _, port := range container.Ports {
if port.Name == portName {
return port.ContainerPort, nil
}
}
}
return 0, fmt.Errorf("can not found fuse-server container port ")
}
| {
if percent, err = strconv.Atoi(percentStr); err != nil {
logrusField.Error("percent must be integer")
statuses = append(statuses, status.CreateFailResourceStatus(
spec.ParameterIllegal.Sprintf("percent", percentStr, err), spec.ParameterIllegal.Code))
continue
}
} | conditional_block |
fsexp.go | /*
* Copyright 1999-2019 Alibaba Group Holding Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package pod
import (
"context"
"fmt"
"strconv"
"strings"
"github.com/chaosblade-io/chaosblade-spec-go/util"
"github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/chaosblade-io/chaosblade-spec-go/spec"
"github.com/chaosblade-io/chaosblade-operator/channel"
"github.com/chaosblade-io/chaosblade-operator/exec/model"
"github.com/chaosblade-io/chaosblade-operator/pkg/apis/chaosblade/v1alpha1"
chaosfs "github.com/chaosblade-io/chaosblade-operator/pkg/hookfs"
webhook "github.com/chaosblade-io/chaosblade-operator/pkg/webhook/pod"
)
type PodIOActionSpec struct {
spec.BaseExpActionCommandSpec
}
func NewPodIOActionSpec(client *channel.Client) spec.ExpActionCommandSpec {
return &PodIOActionSpec{
spec.BaseExpActionCommandSpec{
ActionMatchers: []spec.ExpFlagSpec{
&spec.ExpFlag{
Name: "method",
Desc: "inject methods, only support read and write",
},
&spec.ExpFlag{
Name: "delay",
Desc: "file io delay time, ms",
},
},
ActionFlags: []spec.ExpFlagSpec{
&spec.ExpFlag{
Name: "path",
Desc: "I/O exception path or file",
},
&spec.ExpFlag{
Name: "random",
Desc: "random inject I/O code",
NoArgs: true,
},
&spec.ExpFlag{
Name: "percent",
Desc: "I/O error percent [0-100],",
},
&spec.ExpFlag{
Name: "errno",
Desc: "I/O error code",
},
},
ActionExecutor: &PodIOActionExecutor{client: client},
ActionExample: `# Two types of exceptions were injected for the READ operation, with an exception rate of 60 percent
blade create k8s pod-pod IO --method read --delay 1000 --path /home --percent 60 --errno 28 --labels "app=test" --namespace default`,
ActionCategories: []string{model.CategorySystemContainer},
},
}
}
func (*PodIOActionSpec) Name() string {
return "IO"
}
func (*PodIOActionSpec) Aliases() []string {
return []string{}
}
func (*PodIOActionSpec) ShortDesc() string {
return "Pod File System IO Exception"
}
func (*PodIOActionSpec) LongDesc() string {
return "Pod File System IO Exception"
}
type PodIOActionExecutor struct {
client *channel.Client
}
func (*PodIOActionExecutor) Name() string {
return "IO"
}
func (*PodIOActionExecutor) SetChannel(channel spec.Channel) {
}
func (d *PodIOActionExecutor) Exec(uid string, ctx context.Context, model *spec.ExpModel) *spec.Response {
if _, ok := spec.IsDestroy(ctx); ok {
return d.destroy(ctx, model)
} else {
return d.create(ctx, model)
}
}
func (d *PodIOActionExecutor) create(ctx context.Context, expModel *spec.ExpModel) *spec.Response {
containerMatchedList, err := model.GetContainerObjectMetaListFromContext(ctx)
experimentId := model.GetExperimentIdFromContext(ctx)
if err != nil {
util.Errorf(experimentId, util.GetRunFuncName(), err.Error())
return spec.ResponseFailWithFlags(spec.ContainerInContextNotFound,
v1alpha1.CreateFailExperimentStatus(spec.ContainerInContextNotFound.Msg, []v1alpha1.ResourceStatus{}))
}
logrusField := logrus.WithField("experiment", experimentId)
statuses := make([]v1alpha1.ResourceStatus, 0)
success := false
for _, c := range containerMatchedList {
status := v1alpha1.ResourceStatus{
Kind: v1alpha1.PodKind,
Identifier: c.GetIdentifier(),
}
pod := &v1.Pod{}
err := d.client.Get(context.TODO(), client.ObjectKey{Namespace: c.Namespace, Name: c.PodName}, pod)
if err != nil {
logrusField.Errorf("get pod %s err, %v", c.PodName, err)
statuses = append(statuses, status.CreateFailResourceStatus(
spec.K8sExecFailed.Sprintf("get", err), spec.K8sExecFailed.Code))
continue
}
if !isPodReady(pod) {
logrusField.Infof("pod %s is not ready", c.PodName)
statuses = append(statuses, status.CreateFailResourceStatus(spec.PodNotReady.Msg, spec.PodNotReady.Code))
continue
}
methods, ok := expModel.ActionFlags["method"]
if !ok && len(methods) != 0 {
logrusField.Error("method cannot be empty")
statuses = append(statuses, status.CreateFailResourceStatus(
spec.ParameterLess.Sprintf("method"), spec.ParameterLess.Code))
continue
}
var delay, percent, errno int
delayStr, ok := expModel.ActionFlags["delay"]
if ok && len(delayStr) != 0 {
delay, err = strconv.Atoi(delayStr)
if err != nil {
logrusField.Error("delay must be integer")
statuses = append(statuses, status.CreateFailResourceStatus(
spec.ParameterIllegal.Sprintf("delay", delayStr, err), spec.ParameterIllegal.Code))
continue
}
}
percentStr, ok := expModel.ActionFlags["percent"]
if ok && len(percentStr) != 0 {
if percent, err = strconv.Atoi(percentStr); err != nil {
logrusField.Error("percent must be integer")
statuses = append(statuses, status.CreateFailResourceStatus(
spec.ParameterIllegal.Sprintf("percent", percentStr, err), spec.ParameterIllegal.Code))
continue
}
}
errnoStr, ok := expModel.ActionFlags["errno"]
if ok && len(errnoStr) != 0 {
if errno, err = strconv.Atoi(errnoStr); err != nil {
logrusField.Error("errno must be integer")
statuses = append(statuses, status.CreateFailResourceStatus(
spec.ParameterIllegal.Sprintf("errno", errnoStr, err), spec.ParameterIllegal.Code))
continue
}
}
random := false
randomStr, ok := expModel.ActionFlags["random"]
if ok && randomStr == "true" {
random = true
}
request := &chaosfs.InjectMessage{
Methods: strings.Split(methods, ","),
Path: expModel.ActionFlags["path"],
Delay: uint32(delay),
Percent: uint32(percent),
Random: random,
Errno: uint32(errno),
}
chaosfsClient, err := getChaosfsClient(pod)
if err != nil {
logrusField.WithField("pod", c.PodName).WithField("request", request).
Errorf("init chaosfs client failed: %v", err)
statuses = append(statuses, status.CreateFailResourceStatus(
spec.ChaosfsClientFailed.Sprintf(pod.Name, err), spec.ChaosfsClientFailed.Code))
continue
}
err = chaosfsClient.InjectFault(ctx, request)
if err != nil {
logrusField.Errorf("inject io exception in pod %s failed, request %v, err: %v", c.PodName, request, err)
statuses = append(statuses, status.CreateFailResourceStatus(
spec.ChaosfsInjectFailed.Sprintf(pod.Name, request, err), spec.ChaosfsInjectFailed.Code))
continue
}
statuses = append(statuses, status.CreateSuccessResourceStatus())
success = true
}
var experimentStatus v1alpha1.ExperimentStatus
if success {
experimentStatus = v1alpha1.CreateSuccessExperimentStatus(statuses)
} else {
experimentStatus = v1alpha1.CreateFailExperimentStatus("see resStatuses for details", statuses)
}
return spec.ReturnResultIgnoreCode(experimentStatus)
}
func (d *PodIOActionExecutor) destroy(ctx context.Context, expModel *spec.ExpModel) *spec.Response {
containerMatchedList, err := model.GetContainerObjectMetaListFromContext(ctx)
experimentId := model.GetExperimentIdFromContext(ctx)
if err != nil {
util.Errorf(experimentId, util.GetRunFuncName(), err.Error())
return spec.ResponseFailWithResult(spec.ContainerInContextNotFound,
v1alpha1.CreateFailExperimentStatus(spec.ContainerInContextNotFound.Msg, []v1alpha1.ResourceStatus{}))
}
logrusField := logrus.WithField("experiment", experimentId)
experimentStatus := v1alpha1.CreateDestroyedExperimentStatus([]v1alpha1.ResourceStatus{})
statuses := experimentStatus.ResStatuses
for _, c := range containerMatchedList {
status := v1alpha1.ResourceStatus{
Kind: v1alpha1.PodKind,
Identifier: c.GetIdentifier(),
}
pod := &v1.Pod{}
err := d.client.Get(context.TODO(), client.ObjectKey{Namespace: c.Namespace, Name: c.PodName}, pod)
if err != nil {
logrusField.Errorf("get pod %s err, %v", c.PodName, err)
continue
}
if !isPodReady(pod) {
logrusField.Errorf("pod %s is not ready", c.PodName)
continue
}
chaosfsClient, err := getChaosfsClient(pod)
if err != nil {
logrusField.Errorf("init chaosfs client failed in pod %v, err: %v", pod.Name, err)
statuses = append(statuses, status.CreateFailResourceStatus(
spec.ChaosfsClientFailed.Sprintf(pod.Name, err), spec.ChaosfsClientFailed.Code))
continue
}
err = chaosfsClient.Revoke(ctx)
if err != nil {
logrusField.Errorf("recover io exception failed in pod %v, err: %v", c.PodName, err)
statuses = append(statuses, status.CreateFailResourceStatus(
spec.ChaosfsRecoverFailed.Sprintf(pod.Name, err), spec.ChaosfsRecoverFailed.Code))
continue
}
}
experimentStatus.ResStatuses = statuses
return spec.ReturnResultIgnoreCode(experimentStatus)
}
func isPodReady(pod *v1.Pod) bool { | if pod.ObjectMeta.DeletionTimestamp != nil {
return false
}
for _, condition := range pod.Status.Conditions {
if condition.Type == v1.PodReady &&
condition.Status == v1.ConditionTrue {
return true
}
}
return false
}
func getChaosfsClient(pod *v1.Pod) (*chaosfs.ChaosBladeHookClient, error) {
port, err := getContainerPort(webhook.FuseServerPortName, pod)
if err != nil {
return nil, err
}
addr := fmt.Sprintf("%s:%d", pod.Status.PodIP, port)
return chaosfs.NewChabladeHookClient(addr), nil
}
func getContainerPort(portName string, pod *v1.Pod) (int32, error) {
for _, container := range pod.Spec.Containers {
for _, port := range container.Ports {
if port.Name == portName {
return port.ContainerPort, nil
}
}
}
return 0, fmt.Errorf("can not found fuse-server container port ")
} | random_line_split | |
fsexp.go | /*
* Copyright 1999-2019 Alibaba Group Holding Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package pod
import (
"context"
"fmt"
"strconv"
"strings"
"github.com/chaosblade-io/chaosblade-spec-go/util"
"github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/chaosblade-io/chaosblade-spec-go/spec"
"github.com/chaosblade-io/chaosblade-operator/channel"
"github.com/chaosblade-io/chaosblade-operator/exec/model"
"github.com/chaosblade-io/chaosblade-operator/pkg/apis/chaosblade/v1alpha1"
chaosfs "github.com/chaosblade-io/chaosblade-operator/pkg/hookfs"
webhook "github.com/chaosblade-io/chaosblade-operator/pkg/webhook/pod"
)
type PodIOActionSpec struct {
spec.BaseExpActionCommandSpec
}
func NewPodIOActionSpec(client *channel.Client) spec.ExpActionCommandSpec {
return &PodIOActionSpec{
spec.BaseExpActionCommandSpec{
ActionMatchers: []spec.ExpFlagSpec{
&spec.ExpFlag{
Name: "method",
Desc: "inject methods, only support read and write",
},
&spec.ExpFlag{
Name: "delay",
Desc: "file io delay time, ms",
},
},
ActionFlags: []spec.ExpFlagSpec{
&spec.ExpFlag{
Name: "path",
Desc: "I/O exception path or file",
},
&spec.ExpFlag{
Name: "random",
Desc: "random inject I/O code",
NoArgs: true,
},
&spec.ExpFlag{
Name: "percent",
Desc: "I/O error percent [0-100],",
},
&spec.ExpFlag{
Name: "errno",
Desc: "I/O error code",
},
},
ActionExecutor: &PodIOActionExecutor{client: client},
ActionExample: `# Two types of exceptions were injected for the READ operation, with an exception rate of 60 percent
blade create k8s pod-pod IO --method read --delay 1000 --path /home --percent 60 --errno 28 --labels "app=test" --namespace default`,
ActionCategories: []string{model.CategorySystemContainer},
},
}
}
func (*PodIOActionSpec) Name() string {
return "IO"
}
func (*PodIOActionSpec) | () []string {
return []string{}
}
func (*PodIOActionSpec) ShortDesc() string {
return "Pod File System IO Exception"
}
func (*PodIOActionSpec) LongDesc() string {
return "Pod File System IO Exception"
}
type PodIOActionExecutor struct {
client *channel.Client
}
func (*PodIOActionExecutor) Name() string {
return "IO"
}
func (*PodIOActionExecutor) SetChannel(channel spec.Channel) {
}
func (d *PodIOActionExecutor) Exec(uid string, ctx context.Context, model *spec.ExpModel) *spec.Response {
if _, ok := spec.IsDestroy(ctx); ok {
return d.destroy(ctx, model)
} else {
return d.create(ctx, model)
}
}
func (d *PodIOActionExecutor) create(ctx context.Context, expModel *spec.ExpModel) *spec.Response {
containerMatchedList, err := model.GetContainerObjectMetaListFromContext(ctx)
experimentId := model.GetExperimentIdFromContext(ctx)
if err != nil {
util.Errorf(experimentId, util.GetRunFuncName(), err.Error())
return spec.ResponseFailWithFlags(spec.ContainerInContextNotFound,
v1alpha1.CreateFailExperimentStatus(spec.ContainerInContextNotFound.Msg, []v1alpha1.ResourceStatus{}))
}
logrusField := logrus.WithField("experiment", experimentId)
statuses := make([]v1alpha1.ResourceStatus, 0)
success := false
for _, c := range containerMatchedList {
status := v1alpha1.ResourceStatus{
Kind: v1alpha1.PodKind,
Identifier: c.GetIdentifier(),
}
pod := &v1.Pod{}
err := d.client.Get(context.TODO(), client.ObjectKey{Namespace: c.Namespace, Name: c.PodName}, pod)
if err != nil {
logrusField.Errorf("get pod %s err, %v", c.PodName, err)
statuses = append(statuses, status.CreateFailResourceStatus(
spec.K8sExecFailed.Sprintf("get", err), spec.K8sExecFailed.Code))
continue
}
if !isPodReady(pod) {
logrusField.Infof("pod %s is not ready", c.PodName)
statuses = append(statuses, status.CreateFailResourceStatus(spec.PodNotReady.Msg, spec.PodNotReady.Code))
continue
}
methods, ok := expModel.ActionFlags["method"]
if !ok && len(methods) != 0 {
logrusField.Error("method cannot be empty")
statuses = append(statuses, status.CreateFailResourceStatus(
spec.ParameterLess.Sprintf("method"), spec.ParameterLess.Code))
continue
}
var delay, percent, errno int
delayStr, ok := expModel.ActionFlags["delay"]
if ok && len(delayStr) != 0 {
delay, err = strconv.Atoi(delayStr)
if err != nil {
logrusField.Error("delay must be integer")
statuses = append(statuses, status.CreateFailResourceStatus(
spec.ParameterIllegal.Sprintf("delay", delayStr, err), spec.ParameterIllegal.Code))
continue
}
}
percentStr, ok := expModel.ActionFlags["percent"]
if ok && len(percentStr) != 0 {
if percent, err = strconv.Atoi(percentStr); err != nil {
logrusField.Error("percent must be integer")
statuses = append(statuses, status.CreateFailResourceStatus(
spec.ParameterIllegal.Sprintf("percent", percentStr, err), spec.ParameterIllegal.Code))
continue
}
}
errnoStr, ok := expModel.ActionFlags["errno"]
if ok && len(errnoStr) != 0 {
if errno, err = strconv.Atoi(errnoStr); err != nil {
logrusField.Error("errno must be integer")
statuses = append(statuses, status.CreateFailResourceStatus(
spec.ParameterIllegal.Sprintf("errno", errnoStr, err), spec.ParameterIllegal.Code))
continue
}
}
random := false
randomStr, ok := expModel.ActionFlags["random"]
if ok && randomStr == "true" {
random = true
}
request := &chaosfs.InjectMessage{
Methods: strings.Split(methods, ","),
Path: expModel.ActionFlags["path"],
Delay: uint32(delay),
Percent: uint32(percent),
Random: random,
Errno: uint32(errno),
}
chaosfsClient, err := getChaosfsClient(pod)
if err != nil {
logrusField.WithField("pod", c.PodName).WithField("request", request).
Errorf("init chaosfs client failed: %v", err)
statuses = append(statuses, status.CreateFailResourceStatus(
spec.ChaosfsClientFailed.Sprintf(pod.Name, err), spec.ChaosfsClientFailed.Code))
continue
}
err = chaosfsClient.InjectFault(ctx, request)
if err != nil {
logrusField.Errorf("inject io exception in pod %s failed, request %v, err: %v", c.PodName, request, err)
statuses = append(statuses, status.CreateFailResourceStatus(
spec.ChaosfsInjectFailed.Sprintf(pod.Name, request, err), spec.ChaosfsInjectFailed.Code))
continue
}
statuses = append(statuses, status.CreateSuccessResourceStatus())
success = true
}
var experimentStatus v1alpha1.ExperimentStatus
if success {
experimentStatus = v1alpha1.CreateSuccessExperimentStatus(statuses)
} else {
experimentStatus = v1alpha1.CreateFailExperimentStatus("see resStatuses for details", statuses)
}
return spec.ReturnResultIgnoreCode(experimentStatus)
}
func (d *PodIOActionExecutor) destroy(ctx context.Context, expModel *spec.ExpModel) *spec.Response {
containerMatchedList, err := model.GetContainerObjectMetaListFromContext(ctx)
experimentId := model.GetExperimentIdFromContext(ctx)
if err != nil {
util.Errorf(experimentId, util.GetRunFuncName(), err.Error())
return spec.ResponseFailWithResult(spec.ContainerInContextNotFound,
v1alpha1.CreateFailExperimentStatus(spec.ContainerInContextNotFound.Msg, []v1alpha1.ResourceStatus{}))
}
logrusField := logrus.WithField("experiment", experimentId)
experimentStatus := v1alpha1.CreateDestroyedExperimentStatus([]v1alpha1.ResourceStatus{})
statuses := experimentStatus.ResStatuses
for _, c := range containerMatchedList {
status := v1alpha1.ResourceStatus{
Kind: v1alpha1.PodKind,
Identifier: c.GetIdentifier(),
}
pod := &v1.Pod{}
err := d.client.Get(context.TODO(), client.ObjectKey{Namespace: c.Namespace, Name: c.PodName}, pod)
if err != nil {
logrusField.Errorf("get pod %s err, %v", c.PodName, err)
continue
}
if !isPodReady(pod) {
logrusField.Errorf("pod %s is not ready", c.PodName)
continue
}
chaosfsClient, err := getChaosfsClient(pod)
if err != nil {
logrusField.Errorf("init chaosfs client failed in pod %v, err: %v", pod.Name, err)
statuses = append(statuses, status.CreateFailResourceStatus(
spec.ChaosfsClientFailed.Sprintf(pod.Name, err), spec.ChaosfsClientFailed.Code))
continue
}
err = chaosfsClient.Revoke(ctx)
if err != nil {
logrusField.Errorf("recover io exception failed in pod %v, err: %v", c.PodName, err)
statuses = append(statuses, status.CreateFailResourceStatus(
spec.ChaosfsRecoverFailed.Sprintf(pod.Name, err), spec.ChaosfsRecoverFailed.Code))
continue
}
}
experimentStatus.ResStatuses = statuses
return spec.ReturnResultIgnoreCode(experimentStatus)
}
func isPodReady(pod *v1.Pod) bool {
if pod.ObjectMeta.DeletionTimestamp != nil {
return false
}
for _, condition := range pod.Status.Conditions {
if condition.Type == v1.PodReady &&
condition.Status == v1.ConditionTrue {
return true
}
}
return false
}
func getChaosfsClient(pod *v1.Pod) (*chaosfs.ChaosBladeHookClient, error) {
port, err := getContainerPort(webhook.FuseServerPortName, pod)
if err != nil {
return nil, err
}
addr := fmt.Sprintf("%s:%d", pod.Status.PodIP, port)
return chaosfs.NewChabladeHookClient(addr), nil
}
func getContainerPort(portName string, pod *v1.Pod) (int32, error) {
for _, container := range pod.Spec.Containers {
for _, port := range container.Ports {
if port.Name == portName {
return port.ContainerPort, nil
}
}
}
return 0, fmt.Errorf("can not found fuse-server container port ")
}
| Aliases | identifier_name |
fsexp.go | /*
* Copyright 1999-2019 Alibaba Group Holding Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package pod
import (
"context"
"fmt"
"strconv"
"strings"
"github.com/chaosblade-io/chaosblade-spec-go/util"
"github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/chaosblade-io/chaosblade-spec-go/spec"
"github.com/chaosblade-io/chaosblade-operator/channel"
"github.com/chaosblade-io/chaosblade-operator/exec/model"
"github.com/chaosblade-io/chaosblade-operator/pkg/apis/chaosblade/v1alpha1"
chaosfs "github.com/chaosblade-io/chaosblade-operator/pkg/hookfs"
webhook "github.com/chaosblade-io/chaosblade-operator/pkg/webhook/pod"
)
type PodIOActionSpec struct {
spec.BaseExpActionCommandSpec
}
func NewPodIOActionSpec(client *channel.Client) spec.ExpActionCommandSpec {
return &PodIOActionSpec{
spec.BaseExpActionCommandSpec{
ActionMatchers: []spec.ExpFlagSpec{
&spec.ExpFlag{
Name: "method",
Desc: "inject methods, only support read and write",
},
&spec.ExpFlag{
Name: "delay",
Desc: "file io delay time, ms",
},
},
ActionFlags: []spec.ExpFlagSpec{
&spec.ExpFlag{
Name: "path",
Desc: "I/O exception path or file",
},
&spec.ExpFlag{
Name: "random",
Desc: "random inject I/O code",
NoArgs: true,
},
&spec.ExpFlag{
Name: "percent",
Desc: "I/O error percent [0-100],",
},
&spec.ExpFlag{
Name: "errno",
Desc: "I/O error code",
},
},
ActionExecutor: &PodIOActionExecutor{client: client},
ActionExample: `# Two types of exceptions were injected for the READ operation, with an exception rate of 60 percent
blade create k8s pod-pod IO --method read --delay 1000 --path /home --percent 60 --errno 28 --labels "app=test" --namespace default`,
ActionCategories: []string{model.CategorySystemContainer},
},
}
}
func (*PodIOActionSpec) Name() string {
return "IO"
}
func (*PodIOActionSpec) Aliases() []string {
return []string{}
}
func (*PodIOActionSpec) ShortDesc() string {
return "Pod File System IO Exception"
}
func (*PodIOActionSpec) LongDesc() string {
return "Pod File System IO Exception"
}
type PodIOActionExecutor struct {
client *channel.Client
}
func (*PodIOActionExecutor) Name() string {
return "IO"
}
func (*PodIOActionExecutor) SetChannel(channel spec.Channel) {
}
func (d *PodIOActionExecutor) Exec(uid string, ctx context.Context, model *spec.ExpModel) *spec.Response {
if _, ok := spec.IsDestroy(ctx); ok {
return d.destroy(ctx, model)
} else {
return d.create(ctx, model)
}
}
func (d *PodIOActionExecutor) create(ctx context.Context, expModel *spec.ExpModel) *spec.Response {
containerMatchedList, err := model.GetContainerObjectMetaListFromContext(ctx)
experimentId := model.GetExperimentIdFromContext(ctx)
if err != nil {
util.Errorf(experimentId, util.GetRunFuncName(), err.Error())
return spec.ResponseFailWithFlags(spec.ContainerInContextNotFound,
v1alpha1.CreateFailExperimentStatus(spec.ContainerInContextNotFound.Msg, []v1alpha1.ResourceStatus{}))
}
logrusField := logrus.WithField("experiment", experimentId)
statuses := make([]v1alpha1.ResourceStatus, 0)
success := false
for _, c := range containerMatchedList {
status := v1alpha1.ResourceStatus{
Kind: v1alpha1.PodKind,
Identifier: c.GetIdentifier(),
}
pod := &v1.Pod{}
err := d.client.Get(context.TODO(), client.ObjectKey{Namespace: c.Namespace, Name: c.PodName}, pod)
if err != nil {
logrusField.Errorf("get pod %s err, %v", c.PodName, err)
statuses = append(statuses, status.CreateFailResourceStatus(
spec.K8sExecFailed.Sprintf("get", err), spec.K8sExecFailed.Code))
continue
}
if !isPodReady(pod) {
logrusField.Infof("pod %s is not ready", c.PodName)
statuses = append(statuses, status.CreateFailResourceStatus(spec.PodNotReady.Msg, spec.PodNotReady.Code))
continue
}
methods, ok := expModel.ActionFlags["method"]
if !ok && len(methods) != 0 {
logrusField.Error("method cannot be empty")
statuses = append(statuses, status.CreateFailResourceStatus(
spec.ParameterLess.Sprintf("method"), spec.ParameterLess.Code))
continue
}
var delay, percent, errno int
delayStr, ok := expModel.ActionFlags["delay"]
if ok && len(delayStr) != 0 {
delay, err = strconv.Atoi(delayStr)
if err != nil {
logrusField.Error("delay must be integer")
statuses = append(statuses, status.CreateFailResourceStatus(
spec.ParameterIllegal.Sprintf("delay", delayStr, err), spec.ParameterIllegal.Code))
continue
}
}
percentStr, ok := expModel.ActionFlags["percent"]
if ok && len(percentStr) != 0 {
if percent, err = strconv.Atoi(percentStr); err != nil {
logrusField.Error("percent must be integer")
statuses = append(statuses, status.CreateFailResourceStatus(
spec.ParameterIllegal.Sprintf("percent", percentStr, err), spec.ParameterIllegal.Code))
continue
}
}
errnoStr, ok := expModel.ActionFlags["errno"]
if ok && len(errnoStr) != 0 {
if errno, err = strconv.Atoi(errnoStr); err != nil {
logrusField.Error("errno must be integer")
statuses = append(statuses, status.CreateFailResourceStatus(
spec.ParameterIllegal.Sprintf("errno", errnoStr, err), spec.ParameterIllegal.Code))
continue
}
}
random := false
randomStr, ok := expModel.ActionFlags["random"]
if ok && randomStr == "true" {
random = true
}
request := &chaosfs.InjectMessage{
Methods: strings.Split(methods, ","),
Path: expModel.ActionFlags["path"],
Delay: uint32(delay),
Percent: uint32(percent),
Random: random,
Errno: uint32(errno),
}
chaosfsClient, err := getChaosfsClient(pod)
if err != nil {
logrusField.WithField("pod", c.PodName).WithField("request", request).
Errorf("init chaosfs client failed: %v", err)
statuses = append(statuses, status.CreateFailResourceStatus(
spec.ChaosfsClientFailed.Sprintf(pod.Name, err), spec.ChaosfsClientFailed.Code))
continue
}
err = chaosfsClient.InjectFault(ctx, request)
if err != nil {
logrusField.Errorf("inject io exception in pod %s failed, request %v, err: %v", c.PodName, request, err)
statuses = append(statuses, status.CreateFailResourceStatus(
spec.ChaosfsInjectFailed.Sprintf(pod.Name, request, err), spec.ChaosfsInjectFailed.Code))
continue
}
statuses = append(statuses, status.CreateSuccessResourceStatus())
success = true
}
var experimentStatus v1alpha1.ExperimentStatus
if success {
experimentStatus = v1alpha1.CreateSuccessExperimentStatus(statuses)
} else {
experimentStatus = v1alpha1.CreateFailExperimentStatus("see resStatuses for details", statuses)
}
return spec.ReturnResultIgnoreCode(experimentStatus)
}
func (d *PodIOActionExecutor) destroy(ctx context.Context, expModel *spec.ExpModel) *spec.Response {
containerMatchedList, err := model.GetContainerObjectMetaListFromContext(ctx)
experimentId := model.GetExperimentIdFromContext(ctx)
if err != nil {
util.Errorf(experimentId, util.GetRunFuncName(), err.Error())
return spec.ResponseFailWithResult(spec.ContainerInContextNotFound,
v1alpha1.CreateFailExperimentStatus(spec.ContainerInContextNotFound.Msg, []v1alpha1.ResourceStatus{}))
}
logrusField := logrus.WithField("experiment", experimentId)
experimentStatus := v1alpha1.CreateDestroyedExperimentStatus([]v1alpha1.ResourceStatus{})
statuses := experimentStatus.ResStatuses
for _, c := range containerMatchedList {
status := v1alpha1.ResourceStatus{
Kind: v1alpha1.PodKind,
Identifier: c.GetIdentifier(),
}
pod := &v1.Pod{}
err := d.client.Get(context.TODO(), client.ObjectKey{Namespace: c.Namespace, Name: c.PodName}, pod)
if err != nil {
logrusField.Errorf("get pod %s err, %v", c.PodName, err)
continue
}
if !isPodReady(pod) {
logrusField.Errorf("pod %s is not ready", c.PodName)
continue
}
chaosfsClient, err := getChaosfsClient(pod)
if err != nil {
logrusField.Errorf("init chaosfs client failed in pod %v, err: %v", pod.Name, err)
statuses = append(statuses, status.CreateFailResourceStatus(
spec.ChaosfsClientFailed.Sprintf(pod.Name, err), spec.ChaosfsClientFailed.Code))
continue
}
err = chaosfsClient.Revoke(ctx)
if err != nil {
logrusField.Errorf("recover io exception failed in pod %v, err: %v", c.PodName, err)
statuses = append(statuses, status.CreateFailResourceStatus(
spec.ChaosfsRecoverFailed.Sprintf(pod.Name, err), spec.ChaosfsRecoverFailed.Code))
continue
}
}
experimentStatus.ResStatuses = statuses
return spec.ReturnResultIgnoreCode(experimentStatus)
}
func isPodReady(pod *v1.Pod) bool {
if pod.ObjectMeta.DeletionTimestamp != nil {
return false
}
for _, condition := range pod.Status.Conditions {
if condition.Type == v1.PodReady &&
condition.Status == v1.ConditionTrue {
return true
}
}
return false
}
func getChaosfsClient(pod *v1.Pod) (*chaosfs.ChaosBladeHookClient, error) |
func getContainerPort(portName string, pod *v1.Pod) (int32, error) {
for _, container := range pod.Spec.Containers {
for _, port := range container.Ports {
if port.Name == portName {
return port.ContainerPort, nil
}
}
}
return 0, fmt.Errorf("can not found fuse-server container port ")
}
| {
port, err := getContainerPort(webhook.FuseServerPortName, pod)
if err != nil {
return nil, err
}
addr := fmt.Sprintf("%s:%d", pod.Status.PodIP, port)
return chaosfs.NewChabladeHookClient(addr), nil
} | identifier_body |
index.js | //challenge 1: Your Age in Days
function ageInDays() {
let birthYear = prompt(" What year you were born in?");
let aegInDayss = (2020 - birthYear) * 365;
let h1 = document.createElement("h1");
let textAnswer = document.createTextNode(
"you are " + aegInDayss + " days old."
);
h1.setAttribute("id", "ageInDays");
h1.appendChild(textAnswer);
document.getElementById("flex-box-result").appendChild(h1);
}
function reset() {
document.getElementById("ageInDays").remove(this.editor);
}
//challenge 2: Generate cat
function generateCat() {
let image = document.createElement("img");
let div = document.getElementById("flex-cat-gen");
image.src =
"https://thecatapi.com/api/images/get?format=src&type=gif&size=small";
div.appendChild(image);
}
// challenge 3: Rock, Paper, Scissor---->
function rpsGame(yourChoice) {
//console.log(yourChoice);
let humanChoice, botChoice;
humanChoice = yourChoice.id;
botChoice = numberToChoice(randToRpsInt());
//console.log("computerChoice:", botChoice);
result = decideWinner(humanChoice, botChoice);
//console.log(result);
message = finalMessage(result); // {'message' : 'You Won', 'color': 'green'}
//console.log(message);
rpsFrontEnd(yourChoice.id, botChoice, message);
}
//bot's random number between 1-3
function randToRpsInt() {
return Math.floor(Math.random() * 3);
}
function numberToChoice(number) {
return ["rock", "paper", "scissors"][number];
}
function decideWinner(yourChoice, computerChoice) {
var rpsDatabase = {
rock: { scissors: 1, rock: 0.5, paper: 0 },
paper: { rock: 1, paper: 0.5, scissors: 0 },
scissors: { paper: 1, scissors: 0.5, rock: 0 },
};
let yourScore = rpsDatabase[yourChoice][computerChoice];
let computerScore = rpsDatabase[computerChoice][yourChoice];
return [yourScore, computerScore];
}
function finalMessage([yourScore, computerScore]) {
if (yourScore === 0) {
return { message: "You Lost", color: "red" };
} else if (yourScore === 0.5) {
return { message: "You Tied", color: "yellow" };
} else {
return { message: "You Youn", color: "green" };
}
}
function rpsFrontEnd(humanImageChoice, botImageChoice, finalMessage) {
let ImagesDatabase = {
rock: document.getElementById("rock").src,
paper: document.getElementById("paper").src,
scissors: document.getElementById("scissors").src,
};
//remove all the images
document.getElementById("rock").remove();
document.getElementById("paper").remove();
document.getElementById("scissors").remove();
let humanDiv = document.createElement("div");
let botDiv = document.createElement("div");
let messageDiv = document.createElement("div");
humanDiv.innerHTML =
"<img src='" +
ImagesDatabase[humanImageChoice] +
" ' height=150 width=150 style=' box-shadow: 0px 10px 50px rgba(37, 50, 233, 1);'>";
botDiv.innerHTML =
"<img src='" +
ImagesDatabase[botImageChoice] +
" ' height=150 width=150 style=' box-shadow: 0px 10px 50px rgba(243, 38, 24, 1);'>";
messageDiv.innerHTML =
"<h1 style='color: " +
finalMessage["color"] +
"; font-size: 60px; padding: 30px;'>" +
finalMessage["message"] +
"</h1>";
document.getElementById("flex-box-rps-div").appendChild(humanDiv);
document.getElementById("flex-box-rps-div").appendChild(messageDiv);
document.getElementById("flex-box-rps-div").appendChild(botDiv);
}
// challenge 4: Change the Color the of All Buttons---->
let all_buttons = document.getElementsByTagName("button");
//console.log(all_buttons);
let copyAllButtons = [];
for (let i = 0; i < all_buttons.length; i++) {
copyAllButtons.push(all_buttons[i].classList[1]);
}
//console.log(copyAllButtons);
function buttonColorChange(buttonThingy) {
if (buttonThingy.value === "red") {
buttonsRed();
} else if (buttonThingy.value === "green") {
buttonsGreen();
} else if (buttonThingy.value === "reset") {
buttonsReset();
} else if (buttonThingy.value === "random") {
buttonsRandom();
}
}
// all the buttons will be red
function buttonsRed() {
for (let i = 0; i < all_buttons.length; i++) {
all_buttons[i].classList.remove(all_buttons[i].classList[1]);
all_buttons[i].classList.add("btn-danger");
}
}
// all the buttons will be Green
function buttonsGreen() {
for (let i = 0; i < all_buttons.length; i++) {
all_buttons[i].classList.remove(all_buttons[i].classList[1]);
all_buttons[i].classList.add("btn-success");
}
}
// all the buttons will be reseted
function buttonsReset() {
for (let i = 0; i < all_buttons.length; i++) {
all_buttons[i].classList.remove(all_buttons[i].classList[1]);
all_buttons[i].classList.add(copyAllButtons[i]);
}
}
// all the buttons will be randomly changed
function buttonsRandom() {
let choices = ["btn-primary", "btn-danger", "btn-success", "btn-warning"];
for (let i = 0; i < all_buttons.length; i++) {
let randomNumber = Math.floor(Math.random() * 4);
all_buttons[i].classList.remove(all_buttons[i].classList[1]);
all_buttons[i].classList.add(choices[randomNumber]);
}
}
// challenge 5: BlackJack
let blcakjackGame = {
you: { scoreSpan: "#your-blackjack-result", div: "#your-box", score: 0 },
dealer: {
scoreSpan: "#dealer-blackjack-result",
div: "#dealer-box",
score: 0,
},
cards: ["2", "3", "4", "5", "6", "7", "8", "9", "10", "K", "J", "Q", "A"],
cardsMap: {
2: 2,
3: 3,
4: 4,
5: 5,
6: 6,
7: 7,
8: 8,
9: 9,
10: 10,
K: 10,
J: 10,
Q: 10,
A: [1, 11],
},
wins: 0,
losses: 0,
draws: 0,
isStand: false,
turnsOver: false,
};
const hitSound = new Audio("resources/sounds/swish.m4a");
const winSound = new Audio("resources/sounds/cash.mp3");
const lossSound = new Audio("resources/sounds/aww.mp3");
const YOU = blcakjackGame["you"];
const DEALER = blcakjackGame["dealer"];
//hit button event
document
.querySelector("#blackjack-hit-button")
.addEventListener("click", blackjackHit);
//stand button event
document
.querySelector("#blackjack-stand-button")
.addEventListener("click", blackjackStand);
//deal button event
document
.querySelector("#blackjack-deal-button")
.addEventListener("click", blackjackDeal);
//hit button function
function blackjackHit() {
if (blcakjackGame["isStand"] === false) {
let card = randomCard();
//console.log(card);
showCard(card, YOU);
updateScore(card, YOU);
showScore(YOU);
//console.log(YOU["score"]);
}
}
function sleep(ms) {
return new Promise((resolve) => setTimeout(resolve, ms));
}
//stand button Dealer logic
async function blackjackStand() {
blcakjackGame["isStand"] = true;
while (DEALER["score"] < 16 && blcakjackGame["isStand"] === true) {
let card = randomCard();
showCard(card, DEALER);
updateScore(card, DEALER);
showScore(DEALER);
await sleep(800);
}
blcakjackGame["turnsOver"] = true;
let winner = computeWinner();
//console.log(winner);
showResult(winner);
}
//random card function
function randomCard() {
let randomIndex = Math.floor(Math.random() * 13);
return blcakjackGame["cards"][randomIndex];
}
// showing card images
function showCard(card, activePlayer) {
if (activePlayer["score"] <= 21) {
let CardImage = document.createElement("img");
CardImage.src = `resources/images/card/${card}.png`;
document.querySelector(activePlayer["div"]).appendChild(CardImage);
hitSound.play();
}
}
//removes all Deal button
function blackjackDeal() {
if (blcakjackGame["turnsOver"] === true) {
blcakjackGame["isStand"] = false;
let yourImages = document
.querySelector("#your-box")
.querySelectorAll("img");
let dealerImages = document
.querySelector("#dealer-box")
.querySelectorAll("img");
for (let i = 0; i < yourImages.length; i++) {
yourImages[i].remove();
}
for (let i = 0; i < dealerImages.length; i++) {
dealerImages[i].remove();
}
YOU["score"] = 0;
DEALER["score"] = 0;
document.querySelector("#your-blackjack-result").textContent = 0;
document.querySelector("#dealer-blackjack-result").textContent = 0;
document.querySelector("#your-blackjack-result").style.color = "white";
document.querySelector("#dealer-blackjack-result").style.color = "white";
| }
}
function updateScore(card, activePlayer) {
if (card === "A") {
//if adding 11 keeps me below 21 add 11 other wise adds 1
if (activePlayer["score"] + blcakjackGame["cardsMap"][card][1] <= 21) {
activePlayer["score"] += blcakjackGame["cardsMap"][card][1];
} else {
activePlayer["score"] += blcakjackGame["cardsMap"][card][0];
}
} else {
activePlayer["score"] += blcakjackGame["cardsMap"][card];
}
}
function showScore(activePlayer) {
if (activePlayer["score"] > 21) {
document.querySelector(activePlayer["scoreSpan"]).textContent = "BUST!!";
document.querySelector(activePlayer["scoreSpan"]).style.color = "red";
} else {
document.querySelector(activePlayer["scoreSpan"]).textContent =
activePlayer["score"];
}
}
//compute winner and return result
//updating wins,losses,draws
function computeWinner() {
let winner;
if (YOU["score"] <= 21) {
// conditions: higher score than dealer or dealer busts but your 21 or less
if (YOU["score"] > DEALER["score"] || DEALER["score"] > 21) {
blcakjackGame["wins"]++;
winner = YOU;
} else if (YOU["score"] < DEALER["score"]) {
blcakjackGame["losses"]++;
winner = DEALER;
} else if (YOU["score"] === DEALER["score"]) {
blcakjackGame["draws"]++;
}
//condition when user bust but dealer dosent
} else if (YOU["score"] > 21 && DEALER["score"] <= 21) {
blcakjackGame["losses"]++;
winner = DEALER;
}
//condition when you and dealer both bust
else if (YOU["score"] > 21 && DEALER["score"] > 21) {
blcakjackGame["draws"]++;
}
console.log(blcakjackGame);
return winner;
}
function showResult(winner) {
let message, messageColor;
if (blcakjackGame["turnsOver"] === true) {
if (winner === YOU) {
document.querySelector("#wins").textContent = blcakjackGame["wins"];
message = "You Won!";
messageColor = "green";
winSound.play();
} else if (winner === DEALER) {
document.querySelector("#losses").textContent = blcakjackGame["losses"];
message = "You Lost!";
messageColor = "red";
lossSound.play();
} else {
document.querySelector("#draws").textContent = blcakjackGame["draws"];
message = "You Drew!";
messageColor = "black";
}
document.querySelector("#blackjack-result").textContent = message;
document.querySelector("#blackjack-result").style.color = messageColor;
}
} | document.querySelector("#blackjack-result").textContent = "Let's play";
document.querySelector("#blackjack-result").style.color = "black";
blcakjackGame["turnsOver"] = true; | random_line_split |
index.js | //challenge 1: Your Age in Days
function ageInDays() {
let birthYear = prompt(" What year you were born in?");
let aegInDayss = (2020 - birthYear) * 365;
let h1 = document.createElement("h1");
let textAnswer = document.createTextNode(
"you are " + aegInDayss + " days old."
);
h1.setAttribute("id", "ageInDays");
h1.appendChild(textAnswer);
document.getElementById("flex-box-result").appendChild(h1);
}
function reset() {
document.getElementById("ageInDays").remove(this.editor);
}
//challenge 2: Generate cat
function generateCat() {
let image = document.createElement("img");
let div = document.getElementById("flex-cat-gen");
image.src =
"https://thecatapi.com/api/images/get?format=src&type=gif&size=small";
div.appendChild(image);
}
// challenge 3: Rock, Paper, Scissor---->
function rpsGame(yourChoice) {
//console.log(yourChoice);
let humanChoice, botChoice;
humanChoice = yourChoice.id;
botChoice = numberToChoice(randToRpsInt());
//console.log("computerChoice:", botChoice);
result = decideWinner(humanChoice, botChoice);
//console.log(result);
message = finalMessage(result); // {'message' : 'You Won', 'color': 'green'}
//console.log(message);
rpsFrontEnd(yourChoice.id, botChoice, message);
}
//bot's random number between 1-3
function randToRpsInt() {
return Math.floor(Math.random() * 3);
}
function numberToChoice(number) {
return ["rock", "paper", "scissors"][number];
}
function decideWinner(yourChoice, computerChoice) {
var rpsDatabase = {
rock: { scissors: 1, rock: 0.5, paper: 0 },
paper: { rock: 1, paper: 0.5, scissors: 0 },
scissors: { paper: 1, scissors: 0.5, rock: 0 },
};
let yourScore = rpsDatabase[yourChoice][computerChoice];
let computerScore = rpsDatabase[computerChoice][yourChoice];
return [yourScore, computerScore];
}
function finalMessage([yourScore, computerScore]) {
if (yourScore === 0) {
return { message: "You Lost", color: "red" };
} else if (yourScore === 0.5) {
return { message: "You Tied", color: "yellow" };
} else {
return { message: "You Youn", color: "green" };
}
}
function rpsFrontEnd(humanImageChoice, botImageChoice, finalMessage) {
let ImagesDatabase = {
rock: document.getElementById("rock").src,
paper: document.getElementById("paper").src,
scissors: document.getElementById("scissors").src,
};
//remove all the images
document.getElementById("rock").remove();
document.getElementById("paper").remove();
document.getElementById("scissors").remove();
let humanDiv = document.createElement("div");
let botDiv = document.createElement("div");
let messageDiv = document.createElement("div");
humanDiv.innerHTML =
"<img src='" +
ImagesDatabase[humanImageChoice] +
" ' height=150 width=150 style=' box-shadow: 0px 10px 50px rgba(37, 50, 233, 1);'>";
botDiv.innerHTML =
"<img src='" +
ImagesDatabase[botImageChoice] +
" ' height=150 width=150 style=' box-shadow: 0px 10px 50px rgba(243, 38, 24, 1);'>";
messageDiv.innerHTML =
"<h1 style='color: " +
finalMessage["color"] +
"; font-size: 60px; padding: 30px;'>" +
finalMessage["message"] +
"</h1>";
document.getElementById("flex-box-rps-div").appendChild(humanDiv);
document.getElementById("flex-box-rps-div").appendChild(messageDiv);
document.getElementById("flex-box-rps-div").appendChild(botDiv);
}
// challenge 4: Change the Color the of All Buttons---->
let all_buttons = document.getElementsByTagName("button");
//console.log(all_buttons);
let copyAllButtons = [];
for (let i = 0; i < all_buttons.length; i++) {
copyAllButtons.push(all_buttons[i].classList[1]);
}
//console.log(copyAllButtons);
function buttonColorChange(buttonThingy) {
if (buttonThingy.value === "red") {
buttonsRed();
} else if (buttonThingy.value === "green") {
buttonsGreen();
} else if (buttonThingy.value === "reset") {
buttonsReset();
} else if (buttonThingy.value === "random") {
buttonsRandom();
}
}
// all the buttons will be red
function buttonsRed() {
for (let i = 0; i < all_buttons.length; i++) {
all_buttons[i].classList.remove(all_buttons[i].classList[1]);
all_buttons[i].classList.add("btn-danger");
}
}
// all the buttons will be Green
function buttonsGreen() {
for (let i = 0; i < all_buttons.length; i++) {
all_buttons[i].classList.remove(all_buttons[i].classList[1]);
all_buttons[i].classList.add("btn-success");
}
}
// all the buttons will be reseted
function buttonsReset() {
for (let i = 0; i < all_buttons.length; i++) {
all_buttons[i].classList.remove(all_buttons[i].classList[1]);
all_buttons[i].classList.add(copyAllButtons[i]);
}
}
// all the buttons will be randomly changed
function buttonsRandom() {
let choices = ["btn-primary", "btn-danger", "btn-success", "btn-warning"];
for (let i = 0; i < all_buttons.length; i++) {
let randomNumber = Math.floor(Math.random() * 4);
all_buttons[i].classList.remove(all_buttons[i].classList[1]);
all_buttons[i].classList.add(choices[randomNumber]);
}
}
// challenge 5: BlackJack
let blcakjackGame = {
you: { scoreSpan: "#your-blackjack-result", div: "#your-box", score: 0 },
dealer: {
scoreSpan: "#dealer-blackjack-result",
div: "#dealer-box",
score: 0,
},
cards: ["2", "3", "4", "5", "6", "7", "8", "9", "10", "K", "J", "Q", "A"],
cardsMap: {
2: 2,
3: 3,
4: 4,
5: 5,
6: 6,
7: 7,
8: 8,
9: 9,
10: 10,
K: 10,
J: 10,
Q: 10,
A: [1, 11],
},
wins: 0,
losses: 0,
draws: 0,
isStand: false,
turnsOver: false,
};
const hitSound = new Audio("resources/sounds/swish.m4a");
const winSound = new Audio("resources/sounds/cash.mp3");
const lossSound = new Audio("resources/sounds/aww.mp3");
const YOU = blcakjackGame["you"];
const DEALER = blcakjackGame["dealer"];
//hit button event
document
.querySelector("#blackjack-hit-button")
.addEventListener("click", blackjackHit);
//stand button event
document
.querySelector("#blackjack-stand-button")
.addEventListener("click", blackjackStand);
//deal button event
document
.querySelector("#blackjack-deal-button")
.addEventListener("click", blackjackDeal);
//hit button function
function blackjackHit() {
if (blcakjackGame["isStand"] === false) {
let card = randomCard();
//console.log(card);
showCard(card, YOU);
updateScore(card, YOU);
showScore(YOU);
//console.log(YOU["score"]);
}
}
function sleep(ms) {
return new Promise((resolve) => setTimeout(resolve, ms));
}
//stand button Dealer logic
async function blackjackStand() {
blcakjackGame["isStand"] = true;
while (DEALER["score"] < 16 && blcakjackGame["isStand"] === true) {
let card = randomCard();
showCard(card, DEALER);
updateScore(card, DEALER);
showScore(DEALER);
await sleep(800);
}
blcakjackGame["turnsOver"] = true;
let winner = computeWinner();
//console.log(winner);
showResult(winner);
}
//random card function
function randomCard() {
let randomIndex = Math.floor(Math.random() * 13);
return blcakjackGame["cards"][randomIndex];
}
// showing card images
function showCard(card, activePlayer) {
if (activePlayer["score"] <= 21) {
let CardImage = document.createElement("img");
CardImage.src = `resources/images/card/${card}.png`;
document.querySelector(activePlayer["div"]).appendChild(CardImage);
hitSound.play();
}
}
//removes all Deal button
function blackjackDeal() {
if (blcakjackGame["turnsOver"] === true) {
blcakjackGame["isStand"] = false;
let yourImages = document
.querySelector("#your-box")
.querySelectorAll("img");
let dealerImages = document
.querySelector("#dealer-box")
.querySelectorAll("img");
for (let i = 0; i < yourImages.length; i++) {
yourImages[i].remove();
}
for (let i = 0; i < dealerImages.length; i++) {
dealerImages[i].remove();
}
YOU["score"] = 0;
DEALER["score"] = 0;
document.querySelector("#your-blackjack-result").textContent = 0;
document.querySelector("#dealer-blackjack-result").textContent = 0;
document.querySelector("#your-blackjack-result").style.color = "white";
document.querySelector("#dealer-blackjack-result").style.color = "white";
document.querySelector("#blackjack-result").textContent = "Let's play";
document.querySelector("#blackjack-result").style.color = "black";
blcakjackGame["turnsOver"] = true;
}
}
function updateScore(card, activePlayer) {
if (card === "A") {
//if adding 11 keeps me below 21 add 11 other wise adds 1
if (activePlayer["score"] + blcakjackGame["cardsMap"][card][1] <= 21) {
activePlayer["score"] += blcakjackGame["cardsMap"][card][1];
} else {
activePlayer["score"] += blcakjackGame["cardsMap"][card][0];
}
} else {
activePlayer["score"] += blcakjackGame["cardsMap"][card];
}
}
function showScore(activePlayer) {
if (activePlayer["score"] > 21) {
document.querySelector(activePlayer["scoreSpan"]).textContent = "BUST!!";
document.querySelector(activePlayer["scoreSpan"]).style.color = "red";
} else {
document.querySelector(activePlayer["scoreSpan"]).textContent =
activePlayer["score"];
}
}
//compute winner and return result
//updating wins,losses,draws
function computeWinner() {
let winner;
if (YOU["score"] <= 21) | else if (YOU["score"] > 21 && DEALER["score"] <= 21) {
blcakjackGame["losses"]++;
winner = DEALER;
}
//condition when you and dealer both bust
else if (YOU["score"] > 21 && DEALER["score"] > 21) {
blcakjackGame["draws"]++;
}
console.log(blcakjackGame);
return winner;
}
function showResult(winner) {
let message, messageColor;
if (blcakjackGame["turnsOver"] === true) {
if (winner === YOU) {
document.querySelector("#wins").textContent = blcakjackGame["wins"];
message = "You Won!";
messageColor = "green";
winSound.play();
} else if (winner === DEALER) {
document.querySelector("#losses").textContent = blcakjackGame["losses"];
message = "You Lost!";
messageColor = "red";
lossSound.play();
} else {
document.querySelector("#draws").textContent = blcakjackGame["draws"];
message = "You Drew!";
messageColor = "black";
}
document.querySelector("#blackjack-result").textContent = message;
document.querySelector("#blackjack-result").style.color = messageColor;
}
}
| {
// conditions: higher score than dealer or dealer busts but your 21 or less
if (YOU["score"] > DEALER["score"] || DEALER["score"] > 21) {
blcakjackGame["wins"]++;
winner = YOU;
} else if (YOU["score"] < DEALER["score"]) {
blcakjackGame["losses"]++;
winner = DEALER;
} else if (YOU["score"] === DEALER["score"]) {
blcakjackGame["draws"]++;
}
//condition when user bust but dealer dosent
} | conditional_block |
index.js | //challenge 1: Your Age in Days
function ageInDays() {
let birthYear = prompt(" What year you were born in?");
let aegInDayss = (2020 - birthYear) * 365;
let h1 = document.createElement("h1");
let textAnswer = document.createTextNode(
"you are " + aegInDayss + " days old."
);
h1.setAttribute("id", "ageInDays");
h1.appendChild(textAnswer);
document.getElementById("flex-box-result").appendChild(h1);
}
function reset() |
//challenge 2: Generate cat
function generateCat() {
let image = document.createElement("img");
let div = document.getElementById("flex-cat-gen");
image.src =
"https://thecatapi.com/api/images/get?format=src&type=gif&size=small";
div.appendChild(image);
}
// challenge 3: Rock, Paper, Scissor---->
function rpsGame(yourChoice) {
//console.log(yourChoice);
let humanChoice, botChoice;
humanChoice = yourChoice.id;
botChoice = numberToChoice(randToRpsInt());
//console.log("computerChoice:", botChoice);
result = decideWinner(humanChoice, botChoice);
//console.log(result);
message = finalMessage(result); // {'message' : 'You Won', 'color': 'green'}
//console.log(message);
rpsFrontEnd(yourChoice.id, botChoice, message);
}
//bot's random number between 1-3
function randToRpsInt() {
return Math.floor(Math.random() * 3);
}
function numberToChoice(number) {
return ["rock", "paper", "scissors"][number];
}
function decideWinner(yourChoice, computerChoice) {
var rpsDatabase = {
rock: { scissors: 1, rock: 0.5, paper: 0 },
paper: { rock: 1, paper: 0.5, scissors: 0 },
scissors: { paper: 1, scissors: 0.5, rock: 0 },
};
let yourScore = rpsDatabase[yourChoice][computerChoice];
let computerScore = rpsDatabase[computerChoice][yourChoice];
return [yourScore, computerScore];
}
function finalMessage([yourScore, computerScore]) {
if (yourScore === 0) {
return { message: "You Lost", color: "red" };
} else if (yourScore === 0.5) {
return { message: "You Tied", color: "yellow" };
} else {
return { message: "You Youn", color: "green" };
}
}
function rpsFrontEnd(humanImageChoice, botImageChoice, finalMessage) {
let ImagesDatabase = {
rock: document.getElementById("rock").src,
paper: document.getElementById("paper").src,
scissors: document.getElementById("scissors").src,
};
//remove all the images
document.getElementById("rock").remove();
document.getElementById("paper").remove();
document.getElementById("scissors").remove();
let humanDiv = document.createElement("div");
let botDiv = document.createElement("div");
let messageDiv = document.createElement("div");
humanDiv.innerHTML =
"<img src='" +
ImagesDatabase[humanImageChoice] +
" ' height=150 width=150 style=' box-shadow: 0px 10px 50px rgba(37, 50, 233, 1);'>";
botDiv.innerHTML =
"<img src='" +
ImagesDatabase[botImageChoice] +
" ' height=150 width=150 style=' box-shadow: 0px 10px 50px rgba(243, 38, 24, 1);'>";
messageDiv.innerHTML =
"<h1 style='color: " +
finalMessage["color"] +
"; font-size: 60px; padding: 30px;'>" +
finalMessage["message"] +
"</h1>";
document.getElementById("flex-box-rps-div").appendChild(humanDiv);
document.getElementById("flex-box-rps-div").appendChild(messageDiv);
document.getElementById("flex-box-rps-div").appendChild(botDiv);
}
// challenge 4: Change the Color the of All Buttons---->
let all_buttons = document.getElementsByTagName("button");
//console.log(all_buttons);
let copyAllButtons = [];
for (let i = 0; i < all_buttons.length; i++) {
copyAllButtons.push(all_buttons[i].classList[1]);
}
//console.log(copyAllButtons);
function buttonColorChange(buttonThingy) {
if (buttonThingy.value === "red") {
buttonsRed();
} else if (buttonThingy.value === "green") {
buttonsGreen();
} else if (buttonThingy.value === "reset") {
buttonsReset();
} else if (buttonThingy.value === "random") {
buttonsRandom();
}
}
// all the buttons will be red
function buttonsRed() {
for (let i = 0; i < all_buttons.length; i++) {
all_buttons[i].classList.remove(all_buttons[i].classList[1]);
all_buttons[i].classList.add("btn-danger");
}
}
// all the buttons will be Green
function buttonsGreen() {
for (let i = 0; i < all_buttons.length; i++) {
all_buttons[i].classList.remove(all_buttons[i].classList[1]);
all_buttons[i].classList.add("btn-success");
}
}
// all the buttons will be reseted
function buttonsReset() {
for (let i = 0; i < all_buttons.length; i++) {
all_buttons[i].classList.remove(all_buttons[i].classList[1]);
all_buttons[i].classList.add(copyAllButtons[i]);
}
}
// all the buttons will be randomly changed
function buttonsRandom() {
let choices = ["btn-primary", "btn-danger", "btn-success", "btn-warning"];
for (let i = 0; i < all_buttons.length; i++) {
let randomNumber = Math.floor(Math.random() * 4);
all_buttons[i].classList.remove(all_buttons[i].classList[1]);
all_buttons[i].classList.add(choices[randomNumber]);
}
}
// challenge 5: BlackJack
let blcakjackGame = {
you: { scoreSpan: "#your-blackjack-result", div: "#your-box", score: 0 },
dealer: {
scoreSpan: "#dealer-blackjack-result",
div: "#dealer-box",
score: 0,
},
cards: ["2", "3", "4", "5", "6", "7", "8", "9", "10", "K", "J", "Q", "A"],
cardsMap: {
2: 2,
3: 3,
4: 4,
5: 5,
6: 6,
7: 7,
8: 8,
9: 9,
10: 10,
K: 10,
J: 10,
Q: 10,
A: [1, 11],
},
wins: 0,
losses: 0,
draws: 0,
isStand: false,
turnsOver: false,
};
const hitSound = new Audio("resources/sounds/swish.m4a");
const winSound = new Audio("resources/sounds/cash.mp3");
const lossSound = new Audio("resources/sounds/aww.mp3");
const YOU = blcakjackGame["you"];
const DEALER = blcakjackGame["dealer"];
//hit button event
document
.querySelector("#blackjack-hit-button")
.addEventListener("click", blackjackHit);
//stand button event
document
.querySelector("#blackjack-stand-button")
.addEventListener("click", blackjackStand);
//deal button event
document
.querySelector("#blackjack-deal-button")
.addEventListener("click", blackjackDeal);
//hit button function
function blackjackHit() {
if (blcakjackGame["isStand"] === false) {
let card = randomCard();
//console.log(card);
showCard(card, YOU);
updateScore(card, YOU);
showScore(YOU);
//console.log(YOU["score"]);
}
}
function sleep(ms) {
return new Promise((resolve) => setTimeout(resolve, ms));
}
//stand button Dealer logic
async function blackjackStand() {
blcakjackGame["isStand"] = true;
while (DEALER["score"] < 16 && blcakjackGame["isStand"] === true) {
let card = randomCard();
showCard(card, DEALER);
updateScore(card, DEALER);
showScore(DEALER);
await sleep(800);
}
blcakjackGame["turnsOver"] = true;
let winner = computeWinner();
//console.log(winner);
showResult(winner);
}
//random card function
function randomCard() {
let randomIndex = Math.floor(Math.random() * 13);
return blcakjackGame["cards"][randomIndex];
}
// showing card images
function showCard(card, activePlayer) {
if (activePlayer["score"] <= 21) {
let CardImage = document.createElement("img");
CardImage.src = `resources/images/card/${card}.png`;
document.querySelector(activePlayer["div"]).appendChild(CardImage);
hitSound.play();
}
}
//removes all Deal button
function blackjackDeal() {
if (blcakjackGame["turnsOver"] === true) {
blcakjackGame["isStand"] = false;
let yourImages = document
.querySelector("#your-box")
.querySelectorAll("img");
let dealerImages = document
.querySelector("#dealer-box")
.querySelectorAll("img");
for (let i = 0; i < yourImages.length; i++) {
yourImages[i].remove();
}
for (let i = 0; i < dealerImages.length; i++) {
dealerImages[i].remove();
}
YOU["score"] = 0;
DEALER["score"] = 0;
document.querySelector("#your-blackjack-result").textContent = 0;
document.querySelector("#dealer-blackjack-result").textContent = 0;
document.querySelector("#your-blackjack-result").style.color = "white";
document.querySelector("#dealer-blackjack-result").style.color = "white";
document.querySelector("#blackjack-result").textContent = "Let's play";
document.querySelector("#blackjack-result").style.color = "black";
blcakjackGame["turnsOver"] = true;
}
}
function updateScore(card, activePlayer) {
if (card === "A") {
//if adding 11 keeps me below 21 add 11 other wise adds 1
if (activePlayer["score"] + blcakjackGame["cardsMap"][card][1] <= 21) {
activePlayer["score"] += blcakjackGame["cardsMap"][card][1];
} else {
activePlayer["score"] += blcakjackGame["cardsMap"][card][0];
}
} else {
activePlayer["score"] += blcakjackGame["cardsMap"][card];
}
}
function showScore(activePlayer) {
if (activePlayer["score"] > 21) {
document.querySelector(activePlayer["scoreSpan"]).textContent = "BUST!!";
document.querySelector(activePlayer["scoreSpan"]).style.color = "red";
} else {
document.querySelector(activePlayer["scoreSpan"]).textContent =
activePlayer["score"];
}
}
//compute winner and return result
//updating wins,losses,draws
function computeWinner() {
let winner;
if (YOU["score"] <= 21) {
// conditions: higher score than dealer or dealer busts but your 21 or less
if (YOU["score"] > DEALER["score"] || DEALER["score"] > 21) {
blcakjackGame["wins"]++;
winner = YOU;
} else if (YOU["score"] < DEALER["score"]) {
blcakjackGame["losses"]++;
winner = DEALER;
} else if (YOU["score"] === DEALER["score"]) {
blcakjackGame["draws"]++;
}
//condition when user bust but dealer dosent
} else if (YOU["score"] > 21 && DEALER["score"] <= 21) {
blcakjackGame["losses"]++;
winner = DEALER;
}
//condition when you and dealer both bust
else if (YOU["score"] > 21 && DEALER["score"] > 21) {
blcakjackGame["draws"]++;
}
console.log(blcakjackGame);
return winner;
}
function showResult(winner) {
let message, messageColor;
if (blcakjackGame["turnsOver"] === true) {
if (winner === YOU) {
document.querySelector("#wins").textContent = blcakjackGame["wins"];
message = "You Won!";
messageColor = "green";
winSound.play();
} else if (winner === DEALER) {
document.querySelector("#losses").textContent = blcakjackGame["losses"];
message = "You Lost!";
messageColor = "red";
lossSound.play();
} else {
document.querySelector("#draws").textContent = blcakjackGame["draws"];
message = "You Drew!";
messageColor = "black";
}
document.querySelector("#blackjack-result").textContent = message;
document.querySelector("#blackjack-result").style.color = messageColor;
}
}
| {
document.getElementById("ageInDays").remove(this.editor);
} | identifier_body |
index.js | //challenge 1: Your Age in Days
function ageInDays() {
let birthYear = prompt(" What year you were born in?");
let aegInDayss = (2020 - birthYear) * 365;
let h1 = document.createElement("h1");
let textAnswer = document.createTextNode(
"you are " + aegInDayss + " days old."
);
h1.setAttribute("id", "ageInDays");
h1.appendChild(textAnswer);
document.getElementById("flex-box-result").appendChild(h1);
}
function reset() {
document.getElementById("ageInDays").remove(this.editor);
}
//challenge 2: Generate cat
function generateCat() {
let image = document.createElement("img");
let div = document.getElementById("flex-cat-gen");
image.src =
"https://thecatapi.com/api/images/get?format=src&type=gif&size=small";
div.appendChild(image);
}
// challenge 3: Rock, Paper, Scissor---->
function rpsGame(yourChoice) {
//console.log(yourChoice);
let humanChoice, botChoice;
humanChoice = yourChoice.id;
botChoice = numberToChoice(randToRpsInt());
//console.log("computerChoice:", botChoice);
result = decideWinner(humanChoice, botChoice);
//console.log(result);
message = finalMessage(result); // {'message' : 'You Won', 'color': 'green'}
//console.log(message);
rpsFrontEnd(yourChoice.id, botChoice, message);
}
//bot's random number between 1-3
function randToRpsInt() {
return Math.floor(Math.random() * 3);
}
function | (number) {
return ["rock", "paper", "scissors"][number];
}
function decideWinner(yourChoice, computerChoice) {
var rpsDatabase = {
rock: { scissors: 1, rock: 0.5, paper: 0 },
paper: { rock: 1, paper: 0.5, scissors: 0 },
scissors: { paper: 1, scissors: 0.5, rock: 0 },
};
let yourScore = rpsDatabase[yourChoice][computerChoice];
let computerScore = rpsDatabase[computerChoice][yourChoice];
return [yourScore, computerScore];
}
function finalMessage([yourScore, computerScore]) {
if (yourScore === 0) {
return { message: "You Lost", color: "red" };
} else if (yourScore === 0.5) {
return { message: "You Tied", color: "yellow" };
} else {
return { message: "You Youn", color: "green" };
}
}
function rpsFrontEnd(humanImageChoice, botImageChoice, finalMessage) {
let ImagesDatabase = {
rock: document.getElementById("rock").src,
paper: document.getElementById("paper").src,
scissors: document.getElementById("scissors").src,
};
//remove all the images
document.getElementById("rock").remove();
document.getElementById("paper").remove();
document.getElementById("scissors").remove();
let humanDiv = document.createElement("div");
let botDiv = document.createElement("div");
let messageDiv = document.createElement("div");
humanDiv.innerHTML =
"<img src='" +
ImagesDatabase[humanImageChoice] +
" ' height=150 width=150 style=' box-shadow: 0px 10px 50px rgba(37, 50, 233, 1);'>";
botDiv.innerHTML =
"<img src='" +
ImagesDatabase[botImageChoice] +
" ' height=150 width=150 style=' box-shadow: 0px 10px 50px rgba(243, 38, 24, 1);'>";
messageDiv.innerHTML =
"<h1 style='color: " +
finalMessage["color"] +
"; font-size: 60px; padding: 30px;'>" +
finalMessage["message"] +
"</h1>";
document.getElementById("flex-box-rps-div").appendChild(humanDiv);
document.getElementById("flex-box-rps-div").appendChild(messageDiv);
document.getElementById("flex-box-rps-div").appendChild(botDiv);
}
// challenge 4: Change the Color the of All Buttons---->
let all_buttons = document.getElementsByTagName("button");
//console.log(all_buttons);
let copyAllButtons = [];
for (let i = 0; i < all_buttons.length; i++) {
copyAllButtons.push(all_buttons[i].classList[1]);
}
//console.log(copyAllButtons);
function buttonColorChange(buttonThingy) {
if (buttonThingy.value === "red") {
buttonsRed();
} else if (buttonThingy.value === "green") {
buttonsGreen();
} else if (buttonThingy.value === "reset") {
buttonsReset();
} else if (buttonThingy.value === "random") {
buttonsRandom();
}
}
// all the buttons will be red
function buttonsRed() {
for (let i = 0; i < all_buttons.length; i++) {
all_buttons[i].classList.remove(all_buttons[i].classList[1]);
all_buttons[i].classList.add("btn-danger");
}
}
// all the buttons will be Green
function buttonsGreen() {
for (let i = 0; i < all_buttons.length; i++) {
all_buttons[i].classList.remove(all_buttons[i].classList[1]);
all_buttons[i].classList.add("btn-success");
}
}
// all the buttons will be reseted
function buttonsReset() {
for (let i = 0; i < all_buttons.length; i++) {
all_buttons[i].classList.remove(all_buttons[i].classList[1]);
all_buttons[i].classList.add(copyAllButtons[i]);
}
}
// all the buttons will be randomly changed
function buttonsRandom() {
let choices = ["btn-primary", "btn-danger", "btn-success", "btn-warning"];
for (let i = 0; i < all_buttons.length; i++) {
let randomNumber = Math.floor(Math.random() * 4);
all_buttons[i].classList.remove(all_buttons[i].classList[1]);
all_buttons[i].classList.add(choices[randomNumber]);
}
}
// challenge 5: BlackJack
let blcakjackGame = {
you: { scoreSpan: "#your-blackjack-result", div: "#your-box", score: 0 },
dealer: {
scoreSpan: "#dealer-blackjack-result",
div: "#dealer-box",
score: 0,
},
cards: ["2", "3", "4", "5", "6", "7", "8", "9", "10", "K", "J", "Q", "A"],
cardsMap: {
2: 2,
3: 3,
4: 4,
5: 5,
6: 6,
7: 7,
8: 8,
9: 9,
10: 10,
K: 10,
J: 10,
Q: 10,
A: [1, 11],
},
wins: 0,
losses: 0,
draws: 0,
isStand: false,
turnsOver: false,
};
const hitSound = new Audio("resources/sounds/swish.m4a");
const winSound = new Audio("resources/sounds/cash.mp3");
const lossSound = new Audio("resources/sounds/aww.mp3");
const YOU = blcakjackGame["you"];
const DEALER = blcakjackGame["dealer"];
//hit button event
document
.querySelector("#blackjack-hit-button")
.addEventListener("click", blackjackHit);
//stand button event
document
.querySelector("#blackjack-stand-button")
.addEventListener("click", blackjackStand);
//deal button event
document
.querySelector("#blackjack-deal-button")
.addEventListener("click", blackjackDeal);
//hit button function
function blackjackHit() {
if (blcakjackGame["isStand"] === false) {
let card = randomCard();
//console.log(card);
showCard(card, YOU);
updateScore(card, YOU);
showScore(YOU);
//console.log(YOU["score"]);
}
}
function sleep(ms) {
return new Promise((resolve) => setTimeout(resolve, ms));
}
//stand button Dealer logic
async function blackjackStand() {
blcakjackGame["isStand"] = true;
while (DEALER["score"] < 16 && blcakjackGame["isStand"] === true) {
let card = randomCard();
showCard(card, DEALER);
updateScore(card, DEALER);
showScore(DEALER);
await sleep(800);
}
blcakjackGame["turnsOver"] = true;
let winner = computeWinner();
//console.log(winner);
showResult(winner);
}
//random card function
function randomCard() {
let randomIndex = Math.floor(Math.random() * 13);
return blcakjackGame["cards"][randomIndex];
}
// showing card images
function showCard(card, activePlayer) {
if (activePlayer["score"] <= 21) {
let CardImage = document.createElement("img");
CardImage.src = `resources/images/card/${card}.png`;
document.querySelector(activePlayer["div"]).appendChild(CardImage);
hitSound.play();
}
}
//removes all Deal button
function blackjackDeal() {
if (blcakjackGame["turnsOver"] === true) {
blcakjackGame["isStand"] = false;
let yourImages = document
.querySelector("#your-box")
.querySelectorAll("img");
let dealerImages = document
.querySelector("#dealer-box")
.querySelectorAll("img");
for (let i = 0; i < yourImages.length; i++) {
yourImages[i].remove();
}
for (let i = 0; i < dealerImages.length; i++) {
dealerImages[i].remove();
}
YOU["score"] = 0;
DEALER["score"] = 0;
document.querySelector("#your-blackjack-result").textContent = 0;
document.querySelector("#dealer-blackjack-result").textContent = 0;
document.querySelector("#your-blackjack-result").style.color = "white";
document.querySelector("#dealer-blackjack-result").style.color = "white";
document.querySelector("#blackjack-result").textContent = "Let's play";
document.querySelector("#blackjack-result").style.color = "black";
blcakjackGame["turnsOver"] = true;
}
}
function updateScore(card, activePlayer) {
if (card === "A") {
//if adding 11 keeps me below 21 add 11 other wise adds 1
if (activePlayer["score"] + blcakjackGame["cardsMap"][card][1] <= 21) {
activePlayer["score"] += blcakjackGame["cardsMap"][card][1];
} else {
activePlayer["score"] += blcakjackGame["cardsMap"][card][0];
}
} else {
activePlayer["score"] += blcakjackGame["cardsMap"][card];
}
}
function showScore(activePlayer) {
if (activePlayer["score"] > 21) {
document.querySelector(activePlayer["scoreSpan"]).textContent = "BUST!!";
document.querySelector(activePlayer["scoreSpan"]).style.color = "red";
} else {
document.querySelector(activePlayer["scoreSpan"]).textContent =
activePlayer["score"];
}
}
//compute winner and return result
//updating wins,losses,draws
function computeWinner() {
let winner;
if (YOU["score"] <= 21) {
// conditions: higher score than dealer or dealer busts but your 21 or less
if (YOU["score"] > DEALER["score"] || DEALER["score"] > 21) {
blcakjackGame["wins"]++;
winner = YOU;
} else if (YOU["score"] < DEALER["score"]) {
blcakjackGame["losses"]++;
winner = DEALER;
} else if (YOU["score"] === DEALER["score"]) {
blcakjackGame["draws"]++;
}
//condition when user bust but dealer dosent
} else if (YOU["score"] > 21 && DEALER["score"] <= 21) {
blcakjackGame["losses"]++;
winner = DEALER;
}
//condition when you and dealer both bust
else if (YOU["score"] > 21 && DEALER["score"] > 21) {
blcakjackGame["draws"]++;
}
console.log(blcakjackGame);
return winner;
}
function showResult(winner) {
let message, messageColor;
if (blcakjackGame["turnsOver"] === true) {
if (winner === YOU) {
document.querySelector("#wins").textContent = blcakjackGame["wins"];
message = "You Won!";
messageColor = "green";
winSound.play();
} else if (winner === DEALER) {
document.querySelector("#losses").textContent = blcakjackGame["losses"];
message = "You Lost!";
messageColor = "red";
lossSound.play();
} else {
document.querySelector("#draws").textContent = blcakjackGame["draws"];
message = "You Drew!";
messageColor = "black";
}
document.querySelector("#blackjack-result").textContent = message;
document.querySelector("#blackjack-result").style.color = messageColor;
}
}
| numberToChoice | identifier_name |
stat.go | package plot
import (
"fmt"
"math"
"os"
"sort"
)
var _ = os.Open
// Stat is the interface of statistical transform.
//
// Statistical transform take a data frame and produce an other data frame.
// This is typically done by "summarizing", "modeling" or "transforming"
// the data in a statistically significant way.
//
// TODO: Location-/scale-invariance? f(x+a) = f(x)+a and f(x*a)=f(x*a) ??
type Stat interface {
// Name returns the name of this statistic.
Name() string
// Apply this statistic to data. The panel can be used to
// access the current scales, e.g. if the x-range is needed.
Apply(data *DataFrame, panel *Panel) *DataFrame
// Info returns the StatInfo which describes how this
// statistic can be used.
Info() StatInfo
}
// StatInfo contains information about how a stat can be used.
type StatInfo struct {
// NeededAes are the aestetics which must be present in the
// data frame. If not all needed aestetics are mapped this
// statistics cannot be applied.
NeededAes []string
// OptionalAes are the aestetocs which are used by this
// statistics if present, but it is no error if they are
// not mapped.
OptionalAes []string
ExtraFieldHandling ExtraFieldHandling
// TODO: Add information about resulting data frame?
}
type ExtraFieldHandling int
const (
IgnoreExtraFields ExtraFieldHandling = iota
FailOnExtraFields
GroupOnExtraFields
)
// -------------------------------------------------------------------------
// StatBin
type StatBin struct {
BinWidth float64
Drop bool
Origin *float64 // TODO: both optional fields as *float64?
}
var _ Stat = StatBin{}
func (StatBin) Name() string { return "StatBin" }
func (StatBin) Info() StatInfo {
return StatInfo{
NeededAes: []string{"x"},
OptionalAes: []string{"weight"},
ExtraFieldHandling: GroupOnExtraFields,
}
}
func (s StatBin) Apply(data *DataFrame, _ *Panel) *DataFrame {
if data == nil || data.N == 0 {
return nil
}
// println("StatBin Data:")
// data.Print(os.Stdout)
min, max, mini, maxi := MinMax(data, "x")
if mini == -1 && maxi == -1 {
return nil
}
// println("min/max", min, max)
if min == max {
// TODO. Also NaN and Inf
min -= 1
max += 1
}
var binWidth float64 = s.BinWidth
var numBins int
var origin float64
if binWidth == 0 {
binWidth = (max - min) / 30
numBins = 30
} else {
numBins = int((max-min)/binWidth + 0.5)
}
if s.Origin != nil {
origin = *s.Origin
} else {
origin = math.Floor(min/binWidth) * binWidth // round origin TODO: might overflow
}
x2bin := func(x float64) int { return int((x - origin) / binWidth) }
bin2x := func(b int) float64 { return float64(b)*binWidth + binWidth/2 + origin }
counts := make([]int64, numBins+1) // TODO: Buggy here?
// println("StatBin, made counts", len(counts), min, max, origin, binWidth)
column := data.Columns["x"].Data
maxcount := int64(0)
for i := 0; i < data.N; i++ {
bin := x2bin(column[i])
// println(" StatBin ", i, column[i], bin)
counts[bin]++
if counts[bin] > maxcount {
maxcount = counts[bin]
}
}
pool := data.Pool
result := NewDataFrame(fmt.Sprintf("%s binned by x", data.Name), pool)
nr := 0
for _, count := range counts {
if count == 0 && s.Drop {
continue
}
nr++
}
result.N = nr
X := NewField(nr, data.Columns["x"].Type, pool)
Count := NewField(nr, Float, pool) // TODO: Int?
NCount := NewField(nr, Float, pool) | if count == 0 && s.Drop {
continue
}
X.Data[i] = bin2x(bin)
Count.Data[i] = float64(count)
NCount.Data[i] = float64(count) / float64(maxcount)
density := float64(count) / binWidth / float64(data.N)
Density.Data[i] = density
if density > maxDensity {
maxDensity = density
}
// println("bin =", bin, " x =", bin2x(bin), " count =", count)
i++
}
i = 0
// TODO: all in one loop?
for _, count := range counts {
if count == 0 && s.Drop {
continue
}
NDensity.Data[i] = Density.Data[i] / maxDensity
i++
}
result.Columns["x"] = X
result.Columns["count"] = Count
result.Columns["ncount"] = NCount
result.Columns["density"] = Density
result.Columns["ndensity"] = NDensity
return result
}
// -------------------------------------------------------------------------
// StatLinReg
type StatLinReq struct {
A, B float64
}
var _ Stat = &StatLinReq{}
func (StatLinReq) Name() string { return "StatLinReq" }
func (StatLinReq) Info() StatInfo {
return StatInfo{
NeededAes: []string{"x", "y"},
OptionalAes: []string{"weight"},
ExtraFieldHandling: GroupOnExtraFields,
}
}
func (s *StatLinReq) Apply(data *DataFrame, _ *Panel) *DataFrame {
if data == nil {
return nil
}
xc, yc := data.Columns["x"].Data, data.Columns["y"].Data
xm, ym := float64(0), float64(0)
for i := 0; i < data.N; i++ {
xm += xc[i]
ym += yc[i]
}
xm /= float64(data.N)
ym /= float64(data.N)
sy, sx := float64(0), float64(0)
for i := 0; i < data.N; i++ {
x := xc[i]
y := yc[i]
dx := x - xm
sx += dx * dx
sy += dx * (y - ym)
}
s.B = sy / sx
s.A = ym - s.B*xm
aErr, bErr := s.A*0.2, s.B*0.1 // BUG
// See http://en.wikipedia.org/wiki/Simple_linear_regression#Normality_assumption
// for convidance intervalls of A and B.
pool := data.Pool
result := NewDataFrame(fmt.Sprintf("linear regression of %s", data.Name), pool)
result.N = 1
intercept, slope := NewField(1, Float, pool), NewField(1, Float, pool)
intercept.Data[0], slope.Data[0] = s.A, s.B
interceptErr, slopeErr := NewField(1, Float, pool), NewField(1, Float, pool)
interceptErr.Data[0], slopeErr.Data[0] = aErr, bErr
result.Columns["intercept"] = intercept
result.Columns["slope"] = slope
result.Columns["interceptErr"] = interceptErr
result.Columns["slopeErr"] = slopeErr
return result
}
// -------------------------------------------------------------------------
// Stat Smooth
// Major TODO
type StatSmooth struct {
A, B float64
}
var _ Stat = &StatSmooth{}
func (StatSmooth) Name() string { return "StatSmooth" }
func (StatSmooth) Info() StatInfo {
return StatInfo{
NeededAes: []string{"x", "y"},
OptionalAes: []string{"weight"},
ExtraFieldHandling: GroupOnExtraFields,
}
}
func (s *StatSmooth) Apply(data *DataFrame, _ *Panel) *DataFrame {
if data == nil {
return nil
}
xc, yc := data.Columns["x"].Data, data.Columns["y"].Data
xm, ym := float64(0), float64(0)
for i := 0; i < data.N; i++ {
xm += xc[i]
ym += yc[i]
}
xm /= float64(data.N)
ym /= float64(data.N)
sy, sx := float64(0), float64(0)
for i := 0; i < data.N; i++ {
x := xc[i]
y := xc[i]
dx := x - xm
sx += dx * dx
sy += dx * (y - ym)
}
s.B = sy / sx
s.A = ym - s.B*xm
aErr, bErr := s.A*0.2, s.B*0.1 // BUG
pool := data.Pool
result := NewDataFrame(fmt.Sprintf("linear regression of %s", data.Name), pool)
result.N = 100 // TODO
xf := NewField(result.N, Float, pool)
yf := NewField(result.N, Float, pool)
yminf := NewField(result.N, Float, pool)
ymaxf := NewField(result.N, Float, pool)
minx, maxx, _, _ := MinMax(data, "x")
// TODO: maybe rescale to full range
xrange := maxx - minx
for i := 0; i < result.N; i++ {
x := minx + float64(i)*xrange/float64(result.N-1)
xf.Data[i] = x
yf.Data[i] = s.A*x + s.B
yminf.Data[i] = (s.A-aErr)*x + (s.B - bErr) // BUG
ymaxf.Data[i] = (s.A+aErr)*x + (s.B + bErr) // BUG
}
return result
}
// -------------------------------------------------------------------------
// StatLabel
type StatLabel struct {
Format string
}
var _ Stat = StatLabel{}
func (StatLabel) Name() string { return "StatLabel" }
func (StatLabel) Info() StatInfo {
return StatInfo{
NeededAes: []string{"x", "y", "value"},
OptionalAes: []string{"color"},
ExtraFieldHandling: IgnoreExtraFields,
}
}
func (s StatLabel) Apply(data *DataFrame, _ *Panel) *DataFrame {
pool := data.Pool
result := NewDataFrame(fmt.Sprintf("labeling %s", data.Name), pool)
result.N = data.N
textf := NewField(result.N, String, pool)
value := data.Columns["value"].Data
for i := 0; i < result.N; i++ {
// BUG: what if value is time or string?
t := fmt.Sprintf(s.Format, value[i])
textf.Data[i] = float64(pool.Add(t))
}
result.Columns["x"] = data.Columns["x"].Copy()
result.Columns["y"] = data.Columns["y"].Copy()
result.Columns["text"] = textf
return result
}
// -------------------------------------------------------------------------
// StatFunction
// StatFunction draws the functions F interpolating it by N points.
type StatFunction struct {
F func(x float64) float64
N int
}
var _ Stat = StatFunction{}
func (StatFunction) Name() string { return "StatFunction" }
func (StatFunction) Info() StatInfo {
return StatInfo{
NeededAes: []string{},
OptionalAes: []string{},
ExtraFieldHandling: IgnoreExtraFields,
}
}
func (s StatFunction) Apply(data *DataFrame, panel *Panel) *DataFrame {
sx := panel.Scales["x"]
n := s.N
if n == 0 {
n = 101
}
xmin, xmax := sx.DomainMin, sx.DomainMax // TODO
fmt.Printf("StatFunction %.2f -- %.2f\n", xmin, xmax)
delta := (xmax - xmin) / float64(n-1)
result := NewDataFrame("function", data.Pool)
result.N = n
xf := NewField(n, Float, data.Pool)
yf := NewField(n, Float, data.Pool)
for i := 0; i < n; i++ {
x := xmin + float64(i)*delta
xf.Data[i] = x
yf.Data[i] = s.F(x)
if i%10 == 0 {
fmt.Printf("sin: x=%.2f y=%.2f\n", x, yf.Data[i])
}
}
result.Columns["x"] = xf
result.Columns["y"] = yf
return result
}
// -------------------------------------------------------------------------
// StatBoxplot
type StatBoxplot struct {
}
var _ Stat = StatBoxplot{}
func (StatBoxplot) Name() string { return "StatBoxplot" }
func (StatBoxplot) Info() StatInfo {
return StatInfo{
NeededAes: []string{"x", "y"},
OptionalAes: []string{},
ExtraFieldHandling: GroupOnExtraFields,
}
}
type boxplot struct {
n int
min, max float64
low, q1, med, q3, high float64
outliers []float64
}
// TODO: handle corner cases
func computeBoxplot(d []float64) (b boxplot) {
n := len(d)
b.n = n
sort.Float64s(d)
// Compute the five boxplot values.
b.min, b.max = d[0], d[n-1]
if n%2 == 1 {
b.med = d[(n-1)/2]
} else {
b.med = (d[n/2] + d[n/2-1]) / 2
}
b.q1, b.q3 = d[n/4], d[3*n/4]
iqr := b.q3 - b.q1
lo, hi := b.q1-1.5*iqr, b.q3+1.5*iqr
b.low, b.high = b.max, b.min
// Compute low, high and outliers.
for _, y := range d {
if y >= lo && y < b.low {
b.low = y
}
if y <= hi && y > b.high {
b.high = y
}
if y < lo || y > hi {
b.outliers = append(b.outliers, y)
}
}
return b
}
func (s StatBoxplot) Apply(data *DataFrame, _ *Panel) *DataFrame {
if data == nil || data.N == 0 {
return nil
}
xd, yd := data.Columns["x"].Data, data.Columns["y"].Data
xs := Levels(data, "x").Elements()
sort.Float64s(xs)
n := len(xs)
ys := make(map[float64][]float64)
pool := data.Pool
xf := NewField(n, data.Columns["x"].Type, pool)
numf, medf := NewField(n, Int, pool), NewField(n, Float, pool)
minf, maxf := NewField(n, Float, pool), NewField(n, Float, pool)
lowf, highf := NewField(n, Float, pool), NewField(n, Float, pool)
q1f, q3f := NewField(n, Float, pool), NewField(n, Float, pool)
outf := NewField(n, Vector, pool)
for i := 0; i < data.N; i++ {
x, y := xd[i], yd[i]
ys[x] = append(ys[x], y)
}
i := 0
for x, y := range ys {
b := computeBoxplot(y)
xf.Data[i] = x
numf.Data[i] = float64(b.n)
minf.Data[i] = b.min
lowf.Data[i] = b.low
q1f.Data[i] = b.q1
medf.Data[i] = b.med
q3f.Data[i] = b.q3
highf.Data[i] = b.high
maxf.Data[i] = b.max
outf.SetVec(i, b.outliers)
i++
}
result := NewDataFrame(fmt.Sprintf("boxplot of %s", data.Name), pool)
result.N = n
result.Columns["x"] = xf
result.Columns["count"] = numf
result.Columns["min"] = minf
result.Columns["low"] = lowf
result.Columns["q1"] = q1f
result.Columns["mid"] = medf
result.Columns["q3"] = q3f
result.Columns["high"] = highf
result.Columns["max"] = maxf
result.Columns["outliers"] = outf
return result
} | Density := NewField(nr, Float, pool)
NDensity := NewField(nr, Float, pool)
i := 0
maxDensity := float64(0)
for bin, count := range counts { | random_line_split |
stat.go | package plot
import (
"fmt"
"math"
"os"
"sort"
)
var _ = os.Open
// Stat is the interface of statistical transform.
//
// Statistical transform take a data frame and produce an other data frame.
// This is typically done by "summarizing", "modeling" or "transforming"
// the data in a statistically significant way.
//
// TODO: Location-/scale-invariance? f(x+a) = f(x)+a and f(x*a)=f(x*a) ??
type Stat interface {
// Name returns the name of this statistic.
Name() string
// Apply this statistic to data. The panel can be used to
// access the current scales, e.g. if the x-range is needed.
Apply(data *DataFrame, panel *Panel) *DataFrame
// Info returns the StatInfo which describes how this
// statistic can be used.
Info() StatInfo
}
// StatInfo contains information about how a stat can be used.
type StatInfo struct {
// NeededAes are the aestetics which must be present in the
// data frame. If not all needed aestetics are mapped this
// statistics cannot be applied.
NeededAes []string
// OptionalAes are the aestetocs which are used by this
// statistics if present, but it is no error if they are
// not mapped.
OptionalAes []string
ExtraFieldHandling ExtraFieldHandling
// TODO: Add information about resulting data frame?
}
type ExtraFieldHandling int
const (
IgnoreExtraFields ExtraFieldHandling = iota
FailOnExtraFields
GroupOnExtraFields
)
// -------------------------------------------------------------------------
// StatBin
type StatBin struct {
BinWidth float64
Drop bool
Origin *float64 // TODO: both optional fields as *float64?
}
var _ Stat = StatBin{}
func (StatBin) Name() string { return "StatBin" }
func (StatBin) Info() StatInfo {
return StatInfo{
NeededAes: []string{"x"},
OptionalAes: []string{"weight"},
ExtraFieldHandling: GroupOnExtraFields,
}
}
func (s StatBin) Apply(data *DataFrame, _ *Panel) *DataFrame {
if data == nil || data.N == 0 {
return nil
}
// println("StatBin Data:")
// data.Print(os.Stdout)
min, max, mini, maxi := MinMax(data, "x")
if mini == -1 && maxi == -1 {
return nil
}
// println("min/max", min, max)
if min == max {
// TODO. Also NaN and Inf
min -= 1
max += 1
}
var binWidth float64 = s.BinWidth
var numBins int
var origin float64
if binWidth == 0 {
binWidth = (max - min) / 30
numBins = 30
} else {
numBins = int((max-min)/binWidth + 0.5)
}
if s.Origin != nil {
origin = *s.Origin
} else {
origin = math.Floor(min/binWidth) * binWidth // round origin TODO: might overflow
}
x2bin := func(x float64) int { return int((x - origin) / binWidth) }
bin2x := func(b int) float64 { return float64(b)*binWidth + binWidth/2 + origin }
counts := make([]int64, numBins+1) // TODO: Buggy here?
// println("StatBin, made counts", len(counts), min, max, origin, binWidth)
column := data.Columns["x"].Data
maxcount := int64(0)
for i := 0; i < data.N; i++ {
bin := x2bin(column[i])
// println(" StatBin ", i, column[i], bin)
counts[bin]++
if counts[bin] > maxcount {
maxcount = counts[bin]
}
}
pool := data.Pool
result := NewDataFrame(fmt.Sprintf("%s binned by x", data.Name), pool)
nr := 0
for _, count := range counts {
if count == 0 && s.Drop {
continue
}
nr++
}
result.N = nr
X := NewField(nr, data.Columns["x"].Type, pool)
Count := NewField(nr, Float, pool) // TODO: Int?
NCount := NewField(nr, Float, pool)
Density := NewField(nr, Float, pool)
NDensity := NewField(nr, Float, pool)
i := 0
maxDensity := float64(0)
for bin, count := range counts {
if count == 0 && s.Drop {
continue
}
X.Data[i] = bin2x(bin)
Count.Data[i] = float64(count)
NCount.Data[i] = float64(count) / float64(maxcount)
density := float64(count) / binWidth / float64(data.N)
Density.Data[i] = density
if density > maxDensity {
maxDensity = density
}
// println("bin =", bin, " x =", bin2x(bin), " count =", count)
i++
}
i = 0
// TODO: all in one loop?
for _, count := range counts {
if count == 0 && s.Drop {
continue
}
NDensity.Data[i] = Density.Data[i] / maxDensity
i++
}
result.Columns["x"] = X
result.Columns["count"] = Count
result.Columns["ncount"] = NCount
result.Columns["density"] = Density
result.Columns["ndensity"] = NDensity
return result
}
// -------------------------------------------------------------------------
// StatLinReg
type StatLinReq struct {
A, B float64
}
var _ Stat = &StatLinReq{}
func (StatLinReq) Name() string { return "StatLinReq" }
func (StatLinReq) Info() StatInfo {
return StatInfo{
NeededAes: []string{"x", "y"},
OptionalAes: []string{"weight"},
ExtraFieldHandling: GroupOnExtraFields,
}
}
func (s *StatLinReq) Apply(data *DataFrame, _ *Panel) *DataFrame {
if data == nil {
return nil
}
xc, yc := data.Columns["x"].Data, data.Columns["y"].Data
xm, ym := float64(0), float64(0)
for i := 0; i < data.N; i++ {
xm += xc[i]
ym += yc[i]
}
xm /= float64(data.N)
ym /= float64(data.N)
sy, sx := float64(0), float64(0)
for i := 0; i < data.N; i++ |
s.B = sy / sx
s.A = ym - s.B*xm
aErr, bErr := s.A*0.2, s.B*0.1 // BUG
// See http://en.wikipedia.org/wiki/Simple_linear_regression#Normality_assumption
// for convidance intervalls of A and B.
pool := data.Pool
result := NewDataFrame(fmt.Sprintf("linear regression of %s", data.Name), pool)
result.N = 1
intercept, slope := NewField(1, Float, pool), NewField(1, Float, pool)
intercept.Data[0], slope.Data[0] = s.A, s.B
interceptErr, slopeErr := NewField(1, Float, pool), NewField(1, Float, pool)
interceptErr.Data[0], slopeErr.Data[0] = aErr, bErr
result.Columns["intercept"] = intercept
result.Columns["slope"] = slope
result.Columns["interceptErr"] = interceptErr
result.Columns["slopeErr"] = slopeErr
return result
}
// -------------------------------------------------------------------------
// Stat Smooth
// Major TODO
type StatSmooth struct {
A, B float64
}
var _ Stat = &StatSmooth{}
func (StatSmooth) Name() string { return "StatSmooth" }
func (StatSmooth) Info() StatInfo {
return StatInfo{
NeededAes: []string{"x", "y"},
OptionalAes: []string{"weight"},
ExtraFieldHandling: GroupOnExtraFields,
}
}
func (s *StatSmooth) Apply(data *DataFrame, _ *Panel) *DataFrame {
if data == nil {
return nil
}
xc, yc := data.Columns["x"].Data, data.Columns["y"].Data
xm, ym := float64(0), float64(0)
for i := 0; i < data.N; i++ {
xm += xc[i]
ym += yc[i]
}
xm /= float64(data.N)
ym /= float64(data.N)
sy, sx := float64(0), float64(0)
for i := 0; i < data.N; i++ {
x := xc[i]
y := xc[i]
dx := x - xm
sx += dx * dx
sy += dx * (y - ym)
}
s.B = sy / sx
s.A = ym - s.B*xm
aErr, bErr := s.A*0.2, s.B*0.1 // BUG
pool := data.Pool
result := NewDataFrame(fmt.Sprintf("linear regression of %s", data.Name), pool)
result.N = 100 // TODO
xf := NewField(result.N, Float, pool)
yf := NewField(result.N, Float, pool)
yminf := NewField(result.N, Float, pool)
ymaxf := NewField(result.N, Float, pool)
minx, maxx, _, _ := MinMax(data, "x")
// TODO: maybe rescale to full range
xrange := maxx - minx
for i := 0; i < result.N; i++ {
x := minx + float64(i)*xrange/float64(result.N-1)
xf.Data[i] = x
yf.Data[i] = s.A*x + s.B
yminf.Data[i] = (s.A-aErr)*x + (s.B - bErr) // BUG
ymaxf.Data[i] = (s.A+aErr)*x + (s.B + bErr) // BUG
}
return result
}
// -------------------------------------------------------------------------
// StatLabel
type StatLabel struct {
Format string
}
var _ Stat = StatLabel{}
func (StatLabel) Name() string { return "StatLabel" }
func (StatLabel) Info() StatInfo {
return StatInfo{
NeededAes: []string{"x", "y", "value"},
OptionalAes: []string{"color"},
ExtraFieldHandling: IgnoreExtraFields,
}
}
func (s StatLabel) Apply(data *DataFrame, _ *Panel) *DataFrame {
pool := data.Pool
result := NewDataFrame(fmt.Sprintf("labeling %s", data.Name), pool)
result.N = data.N
textf := NewField(result.N, String, pool)
value := data.Columns["value"].Data
for i := 0; i < result.N; i++ {
// BUG: what if value is time or string?
t := fmt.Sprintf(s.Format, value[i])
textf.Data[i] = float64(pool.Add(t))
}
result.Columns["x"] = data.Columns["x"].Copy()
result.Columns["y"] = data.Columns["y"].Copy()
result.Columns["text"] = textf
return result
}
// -------------------------------------------------------------------------
// StatFunction
// StatFunction draws the functions F interpolating it by N points.
type StatFunction struct {
F func(x float64) float64
N int
}
var _ Stat = StatFunction{}
func (StatFunction) Name() string { return "StatFunction" }
func (StatFunction) Info() StatInfo {
return StatInfo{
NeededAes: []string{},
OptionalAes: []string{},
ExtraFieldHandling: IgnoreExtraFields,
}
}
func (s StatFunction) Apply(data *DataFrame, panel *Panel) *DataFrame {
sx := panel.Scales["x"]
n := s.N
if n == 0 {
n = 101
}
xmin, xmax := sx.DomainMin, sx.DomainMax // TODO
fmt.Printf("StatFunction %.2f -- %.2f\n", xmin, xmax)
delta := (xmax - xmin) / float64(n-1)
result := NewDataFrame("function", data.Pool)
result.N = n
xf := NewField(n, Float, data.Pool)
yf := NewField(n, Float, data.Pool)
for i := 0; i < n; i++ {
x := xmin + float64(i)*delta
xf.Data[i] = x
yf.Data[i] = s.F(x)
if i%10 == 0 {
fmt.Printf("sin: x=%.2f y=%.2f\n", x, yf.Data[i])
}
}
result.Columns["x"] = xf
result.Columns["y"] = yf
return result
}
// -------------------------------------------------------------------------
// StatBoxplot
type StatBoxplot struct {
}
var _ Stat = StatBoxplot{}
func (StatBoxplot) Name() string { return "StatBoxplot" }
func (StatBoxplot) Info() StatInfo {
return StatInfo{
NeededAes: []string{"x", "y"},
OptionalAes: []string{},
ExtraFieldHandling: GroupOnExtraFields,
}
}
type boxplot struct {
n int
min, max float64
low, q1, med, q3, high float64
outliers []float64
}
// TODO: handle corner cases
func computeBoxplot(d []float64) (b boxplot) {
n := len(d)
b.n = n
sort.Float64s(d)
// Compute the five boxplot values.
b.min, b.max = d[0], d[n-1]
if n%2 == 1 {
b.med = d[(n-1)/2]
} else {
b.med = (d[n/2] + d[n/2-1]) / 2
}
b.q1, b.q3 = d[n/4], d[3*n/4]
iqr := b.q3 - b.q1
lo, hi := b.q1-1.5*iqr, b.q3+1.5*iqr
b.low, b.high = b.max, b.min
// Compute low, high and outliers.
for _, y := range d {
if y >= lo && y < b.low {
b.low = y
}
if y <= hi && y > b.high {
b.high = y
}
if y < lo || y > hi {
b.outliers = append(b.outliers, y)
}
}
return b
}
func (s StatBoxplot) Apply(data *DataFrame, _ *Panel) *DataFrame {
if data == nil || data.N == 0 {
return nil
}
xd, yd := data.Columns["x"].Data, data.Columns["y"].Data
xs := Levels(data, "x").Elements()
sort.Float64s(xs)
n := len(xs)
ys := make(map[float64][]float64)
pool := data.Pool
xf := NewField(n, data.Columns["x"].Type, pool)
numf, medf := NewField(n, Int, pool), NewField(n, Float, pool)
minf, maxf := NewField(n, Float, pool), NewField(n, Float, pool)
lowf, highf := NewField(n, Float, pool), NewField(n, Float, pool)
q1f, q3f := NewField(n, Float, pool), NewField(n, Float, pool)
outf := NewField(n, Vector, pool)
for i := 0; i < data.N; i++ {
x, y := xd[i], yd[i]
ys[x] = append(ys[x], y)
}
i := 0
for x, y := range ys {
b := computeBoxplot(y)
xf.Data[i] = x
numf.Data[i] = float64(b.n)
minf.Data[i] = b.min
lowf.Data[i] = b.low
q1f.Data[i] = b.q1
medf.Data[i] = b.med
q3f.Data[i] = b.q3
highf.Data[i] = b.high
maxf.Data[i] = b.max
outf.SetVec(i, b.outliers)
i++
}
result := NewDataFrame(fmt.Sprintf("boxplot of %s", data.Name), pool)
result.N = n
result.Columns["x"] = xf
result.Columns["count"] = numf
result.Columns["min"] = minf
result.Columns["low"] = lowf
result.Columns["q1"] = q1f
result.Columns["mid"] = medf
result.Columns["q3"] = q3f
result.Columns["high"] = highf
result.Columns["max"] = maxf
result.Columns["outliers"] = outf
return result
}
| {
x := xc[i]
y := yc[i]
dx := x - xm
sx += dx * dx
sy += dx * (y - ym)
} | conditional_block |
stat.go | package plot
import (
"fmt"
"math"
"os"
"sort"
)
var _ = os.Open
// Stat is the interface of statistical transform.
//
// Statistical transform take a data frame and produce an other data frame.
// This is typically done by "summarizing", "modeling" or "transforming"
// the data in a statistically significant way.
//
// TODO: Location-/scale-invariance? f(x+a) = f(x)+a and f(x*a)=f(x*a) ??
type Stat interface {
// Name returns the name of this statistic.
Name() string
// Apply this statistic to data. The panel can be used to
// access the current scales, e.g. if the x-range is needed.
Apply(data *DataFrame, panel *Panel) *DataFrame
// Info returns the StatInfo which describes how this
// statistic can be used.
Info() StatInfo
}
// StatInfo contains information about how a stat can be used.
type StatInfo struct {
// NeededAes are the aestetics which must be present in the
// data frame. If not all needed aestetics are mapped this
// statistics cannot be applied.
NeededAes []string
// OptionalAes are the aestetocs which are used by this
// statistics if present, but it is no error if they are
// not mapped.
OptionalAes []string
ExtraFieldHandling ExtraFieldHandling
// TODO: Add information about resulting data frame?
}
type ExtraFieldHandling int
const (
IgnoreExtraFields ExtraFieldHandling = iota
FailOnExtraFields
GroupOnExtraFields
)
// -------------------------------------------------------------------------
// StatBin
type StatBin struct {
BinWidth float64
Drop bool
Origin *float64 // TODO: both optional fields as *float64?
}
var _ Stat = StatBin{}
func (StatBin) Name() string { return "StatBin" }
func (StatBin) Info() StatInfo {
return StatInfo{
NeededAes: []string{"x"},
OptionalAes: []string{"weight"},
ExtraFieldHandling: GroupOnExtraFields,
}
}
func (s StatBin) Apply(data *DataFrame, _ *Panel) *DataFrame {
if data == nil || data.N == 0 {
return nil
}
// println("StatBin Data:")
// data.Print(os.Stdout)
min, max, mini, maxi := MinMax(data, "x")
if mini == -1 && maxi == -1 {
return nil
}
// println("min/max", min, max)
if min == max {
// TODO. Also NaN and Inf
min -= 1
max += 1
}
var binWidth float64 = s.BinWidth
var numBins int
var origin float64
if binWidth == 0 {
binWidth = (max - min) / 30
numBins = 30
} else {
numBins = int((max-min)/binWidth + 0.5)
}
if s.Origin != nil {
origin = *s.Origin
} else {
origin = math.Floor(min/binWidth) * binWidth // round origin TODO: might overflow
}
x2bin := func(x float64) int { return int((x - origin) / binWidth) }
bin2x := func(b int) float64 { return float64(b)*binWidth + binWidth/2 + origin }
counts := make([]int64, numBins+1) // TODO: Buggy here?
// println("StatBin, made counts", len(counts), min, max, origin, binWidth)
column := data.Columns["x"].Data
maxcount := int64(0)
for i := 0; i < data.N; i++ {
bin := x2bin(column[i])
// println(" StatBin ", i, column[i], bin)
counts[bin]++
if counts[bin] > maxcount {
maxcount = counts[bin]
}
}
pool := data.Pool
result := NewDataFrame(fmt.Sprintf("%s binned by x", data.Name), pool)
nr := 0
for _, count := range counts {
if count == 0 && s.Drop {
continue
}
nr++
}
result.N = nr
X := NewField(nr, data.Columns["x"].Type, pool)
Count := NewField(nr, Float, pool) // TODO: Int?
NCount := NewField(nr, Float, pool)
Density := NewField(nr, Float, pool)
NDensity := NewField(nr, Float, pool)
i := 0
maxDensity := float64(0)
for bin, count := range counts {
if count == 0 && s.Drop {
continue
}
X.Data[i] = bin2x(bin)
Count.Data[i] = float64(count)
NCount.Data[i] = float64(count) / float64(maxcount)
density := float64(count) / binWidth / float64(data.N)
Density.Data[i] = density
if density > maxDensity {
maxDensity = density
}
// println("bin =", bin, " x =", bin2x(bin), " count =", count)
i++
}
i = 0
// TODO: all in one loop?
for _, count := range counts {
if count == 0 && s.Drop {
continue
}
NDensity.Data[i] = Density.Data[i] / maxDensity
i++
}
result.Columns["x"] = X
result.Columns["count"] = Count
result.Columns["ncount"] = NCount
result.Columns["density"] = Density
result.Columns["ndensity"] = NDensity
return result
}
// -------------------------------------------------------------------------
// StatLinReg
type StatLinReq struct {
A, B float64
}
var _ Stat = &StatLinReq{}
func (StatLinReq) Name() string { return "StatLinReq" }
func (StatLinReq) Info() StatInfo {
return StatInfo{
NeededAes: []string{"x", "y"},
OptionalAes: []string{"weight"},
ExtraFieldHandling: GroupOnExtraFields,
}
}
func (s *StatLinReq) Apply(data *DataFrame, _ *Panel) *DataFrame |
// -------------------------------------------------------------------------
// Stat Smooth
// Major TODO
type StatSmooth struct {
A, B float64
}
var _ Stat = &StatSmooth{}
func (StatSmooth) Name() string { return "StatSmooth" }
func (StatSmooth) Info() StatInfo {
return StatInfo{
NeededAes: []string{"x", "y"},
OptionalAes: []string{"weight"},
ExtraFieldHandling: GroupOnExtraFields,
}
}
func (s *StatSmooth) Apply(data *DataFrame, _ *Panel) *DataFrame {
if data == nil {
return nil
}
xc, yc := data.Columns["x"].Data, data.Columns["y"].Data
xm, ym := float64(0), float64(0)
for i := 0; i < data.N; i++ {
xm += xc[i]
ym += yc[i]
}
xm /= float64(data.N)
ym /= float64(data.N)
sy, sx := float64(0), float64(0)
for i := 0; i < data.N; i++ {
x := xc[i]
y := xc[i]
dx := x - xm
sx += dx * dx
sy += dx * (y - ym)
}
s.B = sy / sx
s.A = ym - s.B*xm
aErr, bErr := s.A*0.2, s.B*0.1 // BUG
pool := data.Pool
result := NewDataFrame(fmt.Sprintf("linear regression of %s", data.Name), pool)
result.N = 100 // TODO
xf := NewField(result.N, Float, pool)
yf := NewField(result.N, Float, pool)
yminf := NewField(result.N, Float, pool)
ymaxf := NewField(result.N, Float, pool)
minx, maxx, _, _ := MinMax(data, "x")
// TODO: maybe rescale to full range
xrange := maxx - minx
for i := 0; i < result.N; i++ {
x := minx + float64(i)*xrange/float64(result.N-1)
xf.Data[i] = x
yf.Data[i] = s.A*x + s.B
yminf.Data[i] = (s.A-aErr)*x + (s.B - bErr) // BUG
ymaxf.Data[i] = (s.A+aErr)*x + (s.B + bErr) // BUG
}
return result
}
// -------------------------------------------------------------------------
// StatLabel
type StatLabel struct {
Format string
}
var _ Stat = StatLabel{}
func (StatLabel) Name() string { return "StatLabel" }
func (StatLabel) Info() StatInfo {
return StatInfo{
NeededAes: []string{"x", "y", "value"},
OptionalAes: []string{"color"},
ExtraFieldHandling: IgnoreExtraFields,
}
}
func (s StatLabel) Apply(data *DataFrame, _ *Panel) *DataFrame {
pool := data.Pool
result := NewDataFrame(fmt.Sprintf("labeling %s", data.Name), pool)
result.N = data.N
textf := NewField(result.N, String, pool)
value := data.Columns["value"].Data
for i := 0; i < result.N; i++ {
// BUG: what if value is time or string?
t := fmt.Sprintf(s.Format, value[i])
textf.Data[i] = float64(pool.Add(t))
}
result.Columns["x"] = data.Columns["x"].Copy()
result.Columns["y"] = data.Columns["y"].Copy()
result.Columns["text"] = textf
return result
}
// -------------------------------------------------------------------------
// StatFunction
// StatFunction draws the functions F interpolating it by N points.
type StatFunction struct {
F func(x float64) float64
N int
}
var _ Stat = StatFunction{}
func (StatFunction) Name() string { return "StatFunction" }
func (StatFunction) Info() StatInfo {
return StatInfo{
NeededAes: []string{},
OptionalAes: []string{},
ExtraFieldHandling: IgnoreExtraFields,
}
}
func (s StatFunction) Apply(data *DataFrame, panel *Panel) *DataFrame {
sx := panel.Scales["x"]
n := s.N
if n == 0 {
n = 101
}
xmin, xmax := sx.DomainMin, sx.DomainMax // TODO
fmt.Printf("StatFunction %.2f -- %.2f\n", xmin, xmax)
delta := (xmax - xmin) / float64(n-1)
result := NewDataFrame("function", data.Pool)
result.N = n
xf := NewField(n, Float, data.Pool)
yf := NewField(n, Float, data.Pool)
for i := 0; i < n; i++ {
x := xmin + float64(i)*delta
xf.Data[i] = x
yf.Data[i] = s.F(x)
if i%10 == 0 {
fmt.Printf("sin: x=%.2f y=%.2f\n", x, yf.Data[i])
}
}
result.Columns["x"] = xf
result.Columns["y"] = yf
return result
}
// -------------------------------------------------------------------------
// StatBoxplot
type StatBoxplot struct {
}
var _ Stat = StatBoxplot{}
func (StatBoxplot) Name() string { return "StatBoxplot" }
func (StatBoxplot) Info() StatInfo {
return StatInfo{
NeededAes: []string{"x", "y"},
OptionalAes: []string{},
ExtraFieldHandling: GroupOnExtraFields,
}
}
type boxplot struct {
n int
min, max float64
low, q1, med, q3, high float64
outliers []float64
}
// TODO: handle corner cases
func computeBoxplot(d []float64) (b boxplot) {
n := len(d)
b.n = n
sort.Float64s(d)
// Compute the five boxplot values.
b.min, b.max = d[0], d[n-1]
if n%2 == 1 {
b.med = d[(n-1)/2]
} else {
b.med = (d[n/2] + d[n/2-1]) / 2
}
b.q1, b.q3 = d[n/4], d[3*n/4]
iqr := b.q3 - b.q1
lo, hi := b.q1-1.5*iqr, b.q3+1.5*iqr
b.low, b.high = b.max, b.min
// Compute low, high and outliers.
for _, y := range d {
if y >= lo && y < b.low {
b.low = y
}
if y <= hi && y > b.high {
b.high = y
}
if y < lo || y > hi {
b.outliers = append(b.outliers, y)
}
}
return b
}
func (s StatBoxplot) Apply(data *DataFrame, _ *Panel) *DataFrame {
if data == nil || data.N == 0 {
return nil
}
xd, yd := data.Columns["x"].Data, data.Columns["y"].Data
xs := Levels(data, "x").Elements()
sort.Float64s(xs)
n := len(xs)
ys := make(map[float64][]float64)
pool := data.Pool
xf := NewField(n, data.Columns["x"].Type, pool)
numf, medf := NewField(n, Int, pool), NewField(n, Float, pool)
minf, maxf := NewField(n, Float, pool), NewField(n, Float, pool)
lowf, highf := NewField(n, Float, pool), NewField(n, Float, pool)
q1f, q3f := NewField(n, Float, pool), NewField(n, Float, pool)
outf := NewField(n, Vector, pool)
for i := 0; i < data.N; i++ {
x, y := xd[i], yd[i]
ys[x] = append(ys[x], y)
}
i := 0
for x, y := range ys {
b := computeBoxplot(y)
xf.Data[i] = x
numf.Data[i] = float64(b.n)
minf.Data[i] = b.min
lowf.Data[i] = b.low
q1f.Data[i] = b.q1
medf.Data[i] = b.med
q3f.Data[i] = b.q3
highf.Data[i] = b.high
maxf.Data[i] = b.max
outf.SetVec(i, b.outliers)
i++
}
result := NewDataFrame(fmt.Sprintf("boxplot of %s", data.Name), pool)
result.N = n
result.Columns["x"] = xf
result.Columns["count"] = numf
result.Columns["min"] = minf
result.Columns["low"] = lowf
result.Columns["q1"] = q1f
result.Columns["mid"] = medf
result.Columns["q3"] = q3f
result.Columns["high"] = highf
result.Columns["max"] = maxf
result.Columns["outliers"] = outf
return result
}
| {
if data == nil {
return nil
}
xc, yc := data.Columns["x"].Data, data.Columns["y"].Data
xm, ym := float64(0), float64(0)
for i := 0; i < data.N; i++ {
xm += xc[i]
ym += yc[i]
}
xm /= float64(data.N)
ym /= float64(data.N)
sy, sx := float64(0), float64(0)
for i := 0; i < data.N; i++ {
x := xc[i]
y := yc[i]
dx := x - xm
sx += dx * dx
sy += dx * (y - ym)
}
s.B = sy / sx
s.A = ym - s.B*xm
aErr, bErr := s.A*0.2, s.B*0.1 // BUG
// See http://en.wikipedia.org/wiki/Simple_linear_regression#Normality_assumption
// for convidance intervalls of A and B.
pool := data.Pool
result := NewDataFrame(fmt.Sprintf("linear regression of %s", data.Name), pool)
result.N = 1
intercept, slope := NewField(1, Float, pool), NewField(1, Float, pool)
intercept.Data[0], slope.Data[0] = s.A, s.B
interceptErr, slopeErr := NewField(1, Float, pool), NewField(1, Float, pool)
interceptErr.Data[0], slopeErr.Data[0] = aErr, bErr
result.Columns["intercept"] = intercept
result.Columns["slope"] = slope
result.Columns["interceptErr"] = interceptErr
result.Columns["slopeErr"] = slopeErr
return result
} | identifier_body |
stat.go | package plot
import (
"fmt"
"math"
"os"
"sort"
)
var _ = os.Open
// Stat is the interface of statistical transform.
//
// Statistical transform take a data frame and produce an other data frame.
// This is typically done by "summarizing", "modeling" or "transforming"
// the data in a statistically significant way.
//
// TODO: Location-/scale-invariance? f(x+a) = f(x)+a and f(x*a)=f(x*a) ??
type Stat interface {
// Name returns the name of this statistic.
Name() string
// Apply this statistic to data. The panel can be used to
// access the current scales, e.g. if the x-range is needed.
Apply(data *DataFrame, panel *Panel) *DataFrame
// Info returns the StatInfo which describes how this
// statistic can be used.
Info() StatInfo
}
// StatInfo contains information about how a stat can be used.
type StatInfo struct {
// NeededAes are the aestetics which must be present in the
// data frame. If not all needed aestetics are mapped this
// statistics cannot be applied.
NeededAes []string
// OptionalAes are the aestetocs which are used by this
// statistics if present, but it is no error if they are
// not mapped.
OptionalAes []string
ExtraFieldHandling ExtraFieldHandling
// TODO: Add information about resulting data frame?
}
type ExtraFieldHandling int
const (
IgnoreExtraFields ExtraFieldHandling = iota
FailOnExtraFields
GroupOnExtraFields
)
// -------------------------------------------------------------------------
// StatBin
type StatBin struct {
BinWidth float64
Drop bool
Origin *float64 // TODO: both optional fields as *float64?
}
var _ Stat = StatBin{}
func (StatBin) Name() string { return "StatBin" }
func (StatBin) Info() StatInfo {
return StatInfo{
NeededAes: []string{"x"},
OptionalAes: []string{"weight"},
ExtraFieldHandling: GroupOnExtraFields,
}
}
func (s StatBin) Apply(data *DataFrame, _ *Panel) *DataFrame {
if data == nil || data.N == 0 {
return nil
}
// println("StatBin Data:")
// data.Print(os.Stdout)
min, max, mini, maxi := MinMax(data, "x")
if mini == -1 && maxi == -1 {
return nil
}
// println("min/max", min, max)
if min == max {
// TODO. Also NaN and Inf
min -= 1
max += 1
}
var binWidth float64 = s.BinWidth
var numBins int
var origin float64
if binWidth == 0 {
binWidth = (max - min) / 30
numBins = 30
} else {
numBins = int((max-min)/binWidth + 0.5)
}
if s.Origin != nil {
origin = *s.Origin
} else {
origin = math.Floor(min/binWidth) * binWidth // round origin TODO: might overflow
}
x2bin := func(x float64) int { return int((x - origin) / binWidth) }
bin2x := func(b int) float64 { return float64(b)*binWidth + binWidth/2 + origin }
counts := make([]int64, numBins+1) // TODO: Buggy here?
// println("StatBin, made counts", len(counts), min, max, origin, binWidth)
column := data.Columns["x"].Data
maxcount := int64(0)
for i := 0; i < data.N; i++ {
bin := x2bin(column[i])
// println(" StatBin ", i, column[i], bin)
counts[bin]++
if counts[bin] > maxcount {
maxcount = counts[bin]
}
}
pool := data.Pool
result := NewDataFrame(fmt.Sprintf("%s binned by x", data.Name), pool)
nr := 0
for _, count := range counts {
if count == 0 && s.Drop {
continue
}
nr++
}
result.N = nr
X := NewField(nr, data.Columns["x"].Type, pool)
Count := NewField(nr, Float, pool) // TODO: Int?
NCount := NewField(nr, Float, pool)
Density := NewField(nr, Float, pool)
NDensity := NewField(nr, Float, pool)
i := 0
maxDensity := float64(0)
for bin, count := range counts {
if count == 0 && s.Drop {
continue
}
X.Data[i] = bin2x(bin)
Count.Data[i] = float64(count)
NCount.Data[i] = float64(count) / float64(maxcount)
density := float64(count) / binWidth / float64(data.N)
Density.Data[i] = density
if density > maxDensity {
maxDensity = density
}
// println("bin =", bin, " x =", bin2x(bin), " count =", count)
i++
}
i = 0
// TODO: all in one loop?
for _, count := range counts {
if count == 0 && s.Drop {
continue
}
NDensity.Data[i] = Density.Data[i] / maxDensity
i++
}
result.Columns["x"] = X
result.Columns["count"] = Count
result.Columns["ncount"] = NCount
result.Columns["density"] = Density
result.Columns["ndensity"] = NDensity
return result
}
// -------------------------------------------------------------------------
// StatLinReg
type StatLinReq struct {
A, B float64
}
var _ Stat = &StatLinReq{}
func (StatLinReq) Name() string { return "StatLinReq" }
func (StatLinReq) Info() StatInfo {
return StatInfo{
NeededAes: []string{"x", "y"},
OptionalAes: []string{"weight"},
ExtraFieldHandling: GroupOnExtraFields,
}
}
func (s *StatLinReq) Apply(data *DataFrame, _ *Panel) *DataFrame {
if data == nil {
return nil
}
xc, yc := data.Columns["x"].Data, data.Columns["y"].Data
xm, ym := float64(0), float64(0)
for i := 0; i < data.N; i++ {
xm += xc[i]
ym += yc[i]
}
xm /= float64(data.N)
ym /= float64(data.N)
sy, sx := float64(0), float64(0)
for i := 0; i < data.N; i++ {
x := xc[i]
y := yc[i]
dx := x - xm
sx += dx * dx
sy += dx * (y - ym)
}
s.B = sy / sx
s.A = ym - s.B*xm
aErr, bErr := s.A*0.2, s.B*0.1 // BUG
// See http://en.wikipedia.org/wiki/Simple_linear_regression#Normality_assumption
// for convidance intervalls of A and B.
pool := data.Pool
result := NewDataFrame(fmt.Sprintf("linear regression of %s", data.Name), pool)
result.N = 1
intercept, slope := NewField(1, Float, pool), NewField(1, Float, pool)
intercept.Data[0], slope.Data[0] = s.A, s.B
interceptErr, slopeErr := NewField(1, Float, pool), NewField(1, Float, pool)
interceptErr.Data[0], slopeErr.Data[0] = aErr, bErr
result.Columns["intercept"] = intercept
result.Columns["slope"] = slope
result.Columns["interceptErr"] = interceptErr
result.Columns["slopeErr"] = slopeErr
return result
}
// -------------------------------------------------------------------------
// Stat Smooth
// Major TODO
type StatSmooth struct {
A, B float64
}
var _ Stat = &StatSmooth{}
func (StatSmooth) Name() string { return "StatSmooth" }
func (StatSmooth) Info() StatInfo {
return StatInfo{
NeededAes: []string{"x", "y"},
OptionalAes: []string{"weight"},
ExtraFieldHandling: GroupOnExtraFields,
}
}
func (s *StatSmooth) Apply(data *DataFrame, _ *Panel) *DataFrame {
if data == nil {
return nil
}
xc, yc := data.Columns["x"].Data, data.Columns["y"].Data
xm, ym := float64(0), float64(0)
for i := 0; i < data.N; i++ {
xm += xc[i]
ym += yc[i]
}
xm /= float64(data.N)
ym /= float64(data.N)
sy, sx := float64(0), float64(0)
for i := 0; i < data.N; i++ {
x := xc[i]
y := xc[i]
dx := x - xm
sx += dx * dx
sy += dx * (y - ym)
}
s.B = sy / sx
s.A = ym - s.B*xm
aErr, bErr := s.A*0.2, s.B*0.1 // BUG
pool := data.Pool
result := NewDataFrame(fmt.Sprintf("linear regression of %s", data.Name), pool)
result.N = 100 // TODO
xf := NewField(result.N, Float, pool)
yf := NewField(result.N, Float, pool)
yminf := NewField(result.N, Float, pool)
ymaxf := NewField(result.N, Float, pool)
minx, maxx, _, _ := MinMax(data, "x")
// TODO: maybe rescale to full range
xrange := maxx - minx
for i := 0; i < result.N; i++ {
x := minx + float64(i)*xrange/float64(result.N-1)
xf.Data[i] = x
yf.Data[i] = s.A*x + s.B
yminf.Data[i] = (s.A-aErr)*x + (s.B - bErr) // BUG
ymaxf.Data[i] = (s.A+aErr)*x + (s.B + bErr) // BUG
}
return result
}
// -------------------------------------------------------------------------
// StatLabel
type StatLabel struct {
Format string
}
var _ Stat = StatLabel{}
func (StatLabel) Name() string { return "StatLabel" }
func (StatLabel) Info() StatInfo {
return StatInfo{
NeededAes: []string{"x", "y", "value"},
OptionalAes: []string{"color"},
ExtraFieldHandling: IgnoreExtraFields,
}
}
func (s StatLabel) Apply(data *DataFrame, _ *Panel) *DataFrame {
pool := data.Pool
result := NewDataFrame(fmt.Sprintf("labeling %s", data.Name), pool)
result.N = data.N
textf := NewField(result.N, String, pool)
value := data.Columns["value"].Data
for i := 0; i < result.N; i++ {
// BUG: what if value is time or string?
t := fmt.Sprintf(s.Format, value[i])
textf.Data[i] = float64(pool.Add(t))
}
result.Columns["x"] = data.Columns["x"].Copy()
result.Columns["y"] = data.Columns["y"].Copy()
result.Columns["text"] = textf
return result
}
// -------------------------------------------------------------------------
// StatFunction
// StatFunction draws the functions F interpolating it by N points.
type StatFunction struct {
F func(x float64) float64
N int
}
var _ Stat = StatFunction{}
func (StatFunction) | () string { return "StatFunction" }
func (StatFunction) Info() StatInfo {
return StatInfo{
NeededAes: []string{},
OptionalAes: []string{},
ExtraFieldHandling: IgnoreExtraFields,
}
}
func (s StatFunction) Apply(data *DataFrame, panel *Panel) *DataFrame {
sx := panel.Scales["x"]
n := s.N
if n == 0 {
n = 101
}
xmin, xmax := sx.DomainMin, sx.DomainMax // TODO
fmt.Printf("StatFunction %.2f -- %.2f\n", xmin, xmax)
delta := (xmax - xmin) / float64(n-1)
result := NewDataFrame("function", data.Pool)
result.N = n
xf := NewField(n, Float, data.Pool)
yf := NewField(n, Float, data.Pool)
for i := 0; i < n; i++ {
x := xmin + float64(i)*delta
xf.Data[i] = x
yf.Data[i] = s.F(x)
if i%10 == 0 {
fmt.Printf("sin: x=%.2f y=%.2f\n", x, yf.Data[i])
}
}
result.Columns["x"] = xf
result.Columns["y"] = yf
return result
}
// -------------------------------------------------------------------------
// StatBoxplot
type StatBoxplot struct {
}
var _ Stat = StatBoxplot{}
func (StatBoxplot) Name() string { return "StatBoxplot" }
func (StatBoxplot) Info() StatInfo {
return StatInfo{
NeededAes: []string{"x", "y"},
OptionalAes: []string{},
ExtraFieldHandling: GroupOnExtraFields,
}
}
type boxplot struct {
n int
min, max float64
low, q1, med, q3, high float64
outliers []float64
}
// TODO: handle corner cases
func computeBoxplot(d []float64) (b boxplot) {
n := len(d)
b.n = n
sort.Float64s(d)
// Compute the five boxplot values.
b.min, b.max = d[0], d[n-1]
if n%2 == 1 {
b.med = d[(n-1)/2]
} else {
b.med = (d[n/2] + d[n/2-1]) / 2
}
b.q1, b.q3 = d[n/4], d[3*n/4]
iqr := b.q3 - b.q1
lo, hi := b.q1-1.5*iqr, b.q3+1.5*iqr
b.low, b.high = b.max, b.min
// Compute low, high and outliers.
for _, y := range d {
if y >= lo && y < b.low {
b.low = y
}
if y <= hi && y > b.high {
b.high = y
}
if y < lo || y > hi {
b.outliers = append(b.outliers, y)
}
}
return b
}
func (s StatBoxplot) Apply(data *DataFrame, _ *Panel) *DataFrame {
if data == nil || data.N == 0 {
return nil
}
xd, yd := data.Columns["x"].Data, data.Columns["y"].Data
xs := Levels(data, "x").Elements()
sort.Float64s(xs)
n := len(xs)
ys := make(map[float64][]float64)
pool := data.Pool
xf := NewField(n, data.Columns["x"].Type, pool)
numf, medf := NewField(n, Int, pool), NewField(n, Float, pool)
minf, maxf := NewField(n, Float, pool), NewField(n, Float, pool)
lowf, highf := NewField(n, Float, pool), NewField(n, Float, pool)
q1f, q3f := NewField(n, Float, pool), NewField(n, Float, pool)
outf := NewField(n, Vector, pool)
for i := 0; i < data.N; i++ {
x, y := xd[i], yd[i]
ys[x] = append(ys[x], y)
}
i := 0
for x, y := range ys {
b := computeBoxplot(y)
xf.Data[i] = x
numf.Data[i] = float64(b.n)
minf.Data[i] = b.min
lowf.Data[i] = b.low
q1f.Data[i] = b.q1
medf.Data[i] = b.med
q3f.Data[i] = b.q3
highf.Data[i] = b.high
maxf.Data[i] = b.max
outf.SetVec(i, b.outliers)
i++
}
result := NewDataFrame(fmt.Sprintf("boxplot of %s", data.Name), pool)
result.N = n
result.Columns["x"] = xf
result.Columns["count"] = numf
result.Columns["min"] = minf
result.Columns["low"] = lowf
result.Columns["q1"] = q1f
result.Columns["mid"] = medf
result.Columns["q3"] = q3f
result.Columns["high"] = highf
result.Columns["max"] = maxf
result.Columns["outliers"] = outf
return result
}
| Name | identifier_name |
mkPlots2.py | #!/usr/bin/env python
import sys,os,json,re
from optparse import OptionParser,OptionGroup
from array import array
basepath=os.path.split(os.path.abspath(__file__))[0]
sys.path.append(basepath+'/../../common/')
tempargv = sys.argv[:]
sys.argv = []
import ROOT
from ROOT import *
sys.argv = tempargv
from write_cuts import *
from toolkit import *
from main import main
# OPTION PARSER ##################################################################################
##
def parser(mp=None):
if mp==None: mp = OptionParser()
mpExtra = OptionGroup(mp,cyan+"Extra options"+plain)
mpExtra.add_option('--mvaBins',help='mva bins: var#3#1;3;6;9,...',type='str',action='callback',callback=optsplitdict)#,default={'mvaNOM':['4',['-0.6','0.0','0.7','0.84','1.0']],'mvaVBF':['3',['-0.1','0.4','0.8','1.0']]})
mpExtra.add_option('--complexWghts',help='Wght info.',type='str',action='callback',callback=optsplitmore,default=[])
mpExtra.add_option('--read',help='Read h\'s from file.',action='store_true',default=False)
mp.add_option_group(mpExtra)
return mp
####################################################################################################
def INTERNALprepare(opts):
makeDirs(os.path.split(opts.fout)[0])
makeDirs('plots')
jsonsamp = json.loads(filecontent(opts.jsonsamp))
jsonvars = json.loads(filecontent(opts.jsonvars))
jsoninfo = json.loads(filecontent(opts.jsoninfo))
jsoncuts = json.loads(filecontent(opts.jsoncuts))
jsons = {'samp':jsonsamp,'vars':jsonvars,'info':jsoninfo,'cuts':jsoncuts}
# fix binning
if opts.binning:
for v in opts.binning:
#print v
jsons['vars']['variables'][v[0]]['nbins_x'] = v[1]
jsons['vars']['variables'][v[0]]['xmin'] = v[2]
jsons['vars']['variables'][v[0]]['xmax'] = v[3]
if opts.mvaBins:
for v in opts.mvaBins:
jsons['vars']['variables'][v]['nbins_x'] = opts.mvaBins[v][0]
jsons['vars']['variables'][v]['xs'] = array('f',[round(float(x),4) for x in opts.mvaBins[v][1]])
jsons['vars']['variables'][v]['xmin'] = opts.mvaBins[v][1][0]
jsons['vars']['variables'][v]['xmax'] = opts.mvaBins[v][1][-1]
return jsons
####################################################################################################
def INTERNALblind(h,min,max):
for ix in range(1,h.GetNbinsX()+1):
x = h.GetBinCenter(ix)
if x>=min and x<=max:
h.SetBinContent(ix,0)
h.SetBinError(ix,0)
return h
####################################################################################################
def INTERNALstyle():
gROOT.ProcessLine("TH1::SetDefaultSumw2(1);")
gROOT.ProcessLine(".x %s/styleCMSTDR.C++"%basepath)
gROOT.ProcessLine('gROOT->ForceStyle();')
gStyle.SetPadLeftMargin(0.18)
gStyle.SetPadRightMargin(0.04)
gStyle.SetStripDecimals(0)
markers = [20, 21, 20 , 23]
colours = [kBlack,kBlue,kRed,kGreen+2]
return markers, colours
####################################################################################################
#def INTERNALstyleHist(h,i,markers,colours):
# h.Sumw2()
# h.SetMarkerStyle(markers[i])
# h.SetMarkerColor(colours[i])
# h.SetMarkerSize(1.2)
# h.SetLineColor(colours[i])
# h.GetXaxis().SetTitle("M_{bb} (GeV)")
# h.GetYaxis().SetTitle("PDF")
# h.GetYaxis().SetNdivisions(505)
# h.SetMaximum(0.25)
# return h
####################################################################################################
def INTERNALgraph(h):
g = TGraphErrors(h.GetNbinsX())
for ibin in range(1,h.GetNbinsX()+1):
x = h.GetBinCenter(ibin)
y = h.GetBinContent(ibin)
ex = h.GetBinWidth(ibin)/2.
ey = h.GetBinError(ibin)
g.SetPoint(ibin-1,x,y)
g.SetPointError(ibin-1,ex,ey)
return g
####################################################################################################
def INTERNALhistograms(opts,sels,jsons):
hProf = {}
vars = jsons['vars']['variables']
for s in sels:
for v in opts.variable:
vroot = vars[v]['root']
if s=='NOM' and '[2]' in vroot: continue
if s=='VBF' and '[1]' in vroot: continue
if 'mva' in v and not s in v: continue
for tag in ['QCD','DAT']:
vy = 'BDT'
vy2 = 'mva%s'%s
if tag=='QCD': hProf[(s,v,vy,tag)] = TProfile("h%s_sel%s_%s_%s"%(tag,s,v,vy),"h%s_sel%s_%s_%s;%s;<%s>%s"%(tag,s,v,vy,'BDT output',vars[v]['title_x'].replace(' (GeV)','')," (GeV)" if 'GeV' in vars[v]['title_x'] else ""),int(int(vars[vy2]['nbins_x'])/(2 if not ('mbb' in v or 'cos' in v) else 4)),float(vars[vy2]['xmin']),float(vars[vy2]['xmax']))
else: hProf[(s,v,vy,tag)] = TProfile("h%s_sel%s_%s_%s"%(tag,s,v,vy),"h%s_sel%s_%s_%s;%s;<%s>%s"%(tag,s,v,vy,'BDT output',vars[v]['title_x'].replace(' (GeV)','')," (GeV)" if 'GeV' in vars[v]['title_x'] else ""),int(vars[vy2]['nbins_x']),float(vars[vy2]['xmin']),float(vars[vy2]['xmax']))
hProf[(s,v,vy,tag)].Sumw2()
if 'mbbReg' in v: continue
vy = 'mbbReg'
vy2 = 'mbbReg[%d]'%(1 if s=='NOM' else 2)
if tag=='QCD': hProf[(s,v,vy,tag)] = TProfile("h%s_sel%s_%s_%s"%(tag,s,v,vy),"h%s_sel%s_%s_%s;%s;<%s>%s"%(tag,s,v,vy,'regressed M_{bb} (GeV)',vars[v]['title_x'].replace(' (GeV)','')," (GeV)" if 'GeV' in vars[v]['title_x'] else ""),int(48/(2 if not ('cos' in v) else 4)),80.,200.)
else: hProf[(s,v,vy,tag)] = TProfile("h%s_sel%s_%s_%s"%(tag,s,v,vy),"h%s_sel%s_%s_%s;%s;<%s>%s"%(tag,s,v,vy,'regressed M_{bb} (GeV)',vars[v]['title_x'].replace(' (GeV)','')," (GeV)" if 'GeV' in vars[v]['title_x'] else ""),48,80.,200.)
hProf[(s,v,vy,tag)].Sumw2()
return hProf
####################################################################################################
#def INTERNALcanvases(opts):
# return None
####################################################################################################
#def INTERNALlegend(Tag):
# leg = TLegend(0.6,0.62,0.95,0.92)
# leg.SetHeader("%s selection"%Tag)
# leg.SetFillColor(0)
# leg.SetBorderSize(0)
# leg.SetTextFont(42)
# leg.SetTextSize(0.05)
# return leg
####################################################################################################
#def INTERNALline(fun, min, max):
# ln = TF1("line",fun,min,max)
# ln.SetLineColor(kBlack)
# ln.SetLineWidth(1)
# ln.SetLineStyle(2)
# ln.SetMinimum(0.7)
# ln.SetMaximum(1.3)
# ln.GetXaxis().SetTitle("M_{bb} (GeV)")
# ln.GetYaxis().SetTitle("Signal / Control")
# return ln
####################################################################################################
def INTERNALpicksamples(opts,jsons):
l1("Global path: %s"%opts.globalpath)
l1("Using input samples:")
allsamples = jsons['samp']['files']
selsamples = []
for s in allsamples.itervalues():
# require regex in opts.sample
if not opts.sample==[] and not any([(x in s['tag']) for x in opts.sample]): continue
# veto regex in opts.nosample
if not opts.nosample==[] and any([(x in s['tag']) for x in opts.nosample]): continue
selsamples += [s]
for s in sorted(selsamples,key=lambda x:(x['tag'][:3],int(re.findall('[0-9]+',x['tag'])[0]) if len(re.findall('[0-9]+',x['tag']))>0 else 1.,not 'NOM' in x['fname'])):
s['tfile'] = TFile.Open(opts.globalpath + s['fname'])
s['tree'] = s['tfile'].Get("Hbb/events")
s['incarnation'] = 'NOM' if 'NOM' in s['tag'] else 'VBF'
l2('%-15s: %-50s(%s)'%(s['tag'],s['fname'],s['tfile']))
return selsamples
####################################################################################################
def mkBDTcorrelations():
mp = parser()
opts,fout,samples = main(mp,False,False,True)
sels = ['NOM','VBF']
jsons = INTERNALprepare(opts)
markers, colours = INTERNALstyle()
selsamples = INTERNALpicksamples(opts,jsons)
can = TCanvas("main","main",650,600)
hProf = INTERNALhistograms(opts,sels,jsons)
#INTERNALcanvases(opts)
l1("Creating profiles")
if not opts.read:
for isel,sel in enumerate(sels):
ssel = [s for s in selsamples if sel in s['fname']]
for s in ssel:
l2("Running for %s"%s['tag'])
cut,cutlabel = write_cuts(["None"],["None"],reftrig=["None"],sample=s['tag'],jsonsamp=opts.jsonsamp,jsoncuts=opts.jsoncuts,weight=opts.complexWghts[(sel,'old')],trigequal=trigTruth(opts.usebool))
#cut = re.findall('\*.*',cut)[0][2:-1].replace('pu','ev.pu').replace('trig','ev.trig')
if opts.debug: l3("Cut %s: \n\t\t\t%s%s%s: \n\t\t\t%s"%(s['tag'],blue,cutlabel,plain,cut))
cutextra = "mbbReg[%d]>=80. && mbbReg[%d]<=200."%(1 if sel=='NOM' else 2, 1 if sel=='NOM' else 2)
for v in [jsons['vars']['variables'][vv] for vv in opts.variable]:
if sel=='NOM' and '[2]' in v['root']: continue
if sel=='VBF' and '[1]' in v['root']: continue
if 'mva' in v and not sel in v: |
s['tree'].Draw("%s:%s>>+%s"%(v['root'],"mva%s"%sel,hProf[(sel,v['var'],'BDT','QCD' if 'QCD' in s['fname'] else 'DAT')].GetName()),TCut(cutextra)*TCut(cut),"prof")
if 'mbbReg' in v['var']: continue
s['tree'].Draw("%s:%s>>+%s"%(v['root'],"mbbReg[%d]"%(1 if sel=='NOM' else 2),hProf[(sel,v['var'],'mbbReg','QCD' if 'QCD' in s['fname'] else 'DAT')].GetName()),TCut(cutextra)*TCut(cut),"prof")
else:
for k in hProf.iterkeys():
hProf[k] = fout.Get("h%s_sel%s_%s_%s"%(k[3],k[0],k[1],k[2]))
k = list(set([x[0:3] for x in hProf.iterkeys()]))
for ik in k:
low = min(hProf[(ik[0],ik[1],ik[2],'QCD')].GetBinContent(hProf[(ik[0],ik[1],ik[2],'QCD')].GetMinimumBin()),hProf[(ik[0],ik[1],ik[2],'DAT')].GetBinContent(hProf[(ik[0],ik[1],ik[2],'DAT')].GetMinimumBin()))
hig = max(hProf[(ik[0],ik[1],ik[2],'QCD')].GetBinContent(hProf[(ik[0],ik[1],ik[2],'QCD')].GetMaximumBin()),hProf[(ik[0],ik[1],ik[2],'DAT')].GetBinContent(hProf[(ik[0],ik[1],ik[2],'DAT')].GetMaximumBin()))
med = hProf[(ik[0],ik[1],ik[2],'DAT')].GetMean(2)
dif = hig-low
setlow = med - 1.6*dif
sethig = med + 1.6*dif
if not 'cos' in ik[1]:
setlow = max(setlow,0)
limits = {'dEtaqq1':[2,7,2,7],'dPhiqq1':[0,3,0,3],'jetBtag00':[0.5,1.2,0.8,1.0],'jetBtag01':[0.0,1.2,0.55,0.75],'jetQGLNOM0':[0,1,0.5,0.65],'jetQGLNOM1':[0,1,0.35,0.55],'jetQGLNOM2':[0,1,0.5,0.7],'jetQGLNOM3':[0,1,0.5,0.7],'mqq1':[0,2500,500,800],'mbbReg1':[115,145,115,145],'cosTheta1':[-0.35,0.35,-0.2,0.2],'jetQGLVBF0':[0,1,0.4,0.55],'jetQGLVBF1':[0,1,0.3,0.5],'jetQGLVBF2':[0,1,0.55,0.75],'jetQGLVBF3':[0,1,0.5,0.7],'mqq2':[0,2500,1000,1300],'mbbReg2':[115,145,115,145],'cosTheta2':[-0.35,0.35,-0.2,0.2],'dEtaqq2':[2,7,2,7],'dPhiqq2':[0,3,0,3],'softHt':[0,100,25,50],'softN2':[0,10,4,7]} #[3,7,4,6], [0,3,1,2] [0,3,1.5,2.5]
if ik[1] in limits:
if ik[2]=='BDT':
setlow = limits[ik[1]][0]
sethig = limits[ik[1]][1]
else:
setlow = limits[ik[1]][0]#2]
sethig = limits[ik[1]][1]#3]
# if 'jetBtag01' in ik[1] and ik[0]=='VBF' and ik[2]=='mbbReg':
# setlow = 0.4
# sethig = 0.55
hProf[(ik[0],ik[1],ik[2],'DAT')].GetYaxis().SetRangeUser(setlow,sethig)
hProf[(ik[0],ik[1],ik[2],'DAT')].GetYaxis().SetTitleOffset(1.4)
hProf[(ik[0],ik[1],ik[2],'DAT')].GetXaxis().SetTitleOffset(1.0)
hProf[(ik[0],ik[1],ik[2],'DAT')].GetYaxis().SetNdivisions(507)
hProf[(ik[0],ik[1],ik[2],'DAT')].GetXaxis().SetNdivisions(507)
hProf[(ik[0],ik[1],ik[2],'DAT')].Draw("axis")
h = INTERNALgraph(hProf[(ik[0],ik[1],ik[2],'DAT')])
h.SetMarkerStyle(20)
h.SetMarkerSize(0.8)
h.SetMarkerColor(kBlack)
h.SetLineColor(kBlack)
h.SetFillStyle(0)
h.Draw("same pz")
g = INTERNALgraph(hProf[(ik[0],ik[1],ik[2],'QCD')])
# hProf[(ik[0],ik[1],ik[2],'QCD')].SetMarkerStyle(25)
# hProf[(ik[0],ik[1],ik[2],'QCD')].SetMarkerColor(kBlue)
# hProf[(ik[0],ik[1],ik[2],'QCD')].Draw("same e")
g.SetMarkerStyle(25)
g.SetMarkerColor(kBlue)
g.SetLineColor(kBlue)
g.SetFillStyle(0)
g.Draw("same pz")
leg = TLegend(0.62,0.75,0.9,0.92,"%s selection"%(ik[0].strip('sel')))
leg.SetFillStyle(-1)
leg.SetBorderSize(0)
leg.SetTextFont(42)
leg.SetTextSize(0.045)
#leg.AddEntry(hProf[(ik[0],ik[1],ik[2],'DAT')],'Data')
#leg.AddEntry(hProf[(ik[0],ik[1],ik[2],'QCD')],'QCD')
leg.AddEntry(h,'Data','LP')
leg.AddEntry(g,'QCD','LP')
leg.SetY1(leg.GetY2()-(leg.GetNRows()+1)*0.05)
leg.Draw()
can.SaveAs("plots/%s.png"%hProf[(ik[0],ik[1],ik[2],'DAT')].GetName().replace('hDAT','h'))
can.SaveAs("plots/%s.pdf"%hProf[(ik[0],ik[1],ik[2],'DAT')].GetName().replace('hDAT','h'))
fout.cd()
for h in hProf.itervalues():
h.Write(h.GetName(),TH1.kOverwrite)
# clean
fout.Close()
####################################################################################################
if __name__=='__main__':
mkBDTcorrelations()
| continue | conditional_block |
mkPlots2.py | #!/usr/bin/env python
import sys,os,json,re
from optparse import OptionParser,OptionGroup
from array import array
basepath=os.path.split(os.path.abspath(__file__))[0]
sys.path.append(basepath+'/../../common/')
tempargv = sys.argv[:]
sys.argv = []
import ROOT
from ROOT import *
sys.argv = tempargv
from write_cuts import *
from toolkit import *
from main import main
# OPTION PARSER ##################################################################################
##
def parser(mp=None):
if mp==None: mp = OptionParser()
mpExtra = OptionGroup(mp,cyan+"Extra options"+plain)
mpExtra.add_option('--mvaBins',help='mva bins: var#3#1;3;6;9,...',type='str',action='callback',callback=optsplitdict)#,default={'mvaNOM':['4',['-0.6','0.0','0.7','0.84','1.0']],'mvaVBF':['3',['-0.1','0.4','0.8','1.0']]})
mpExtra.add_option('--complexWghts',help='Wght info.',type='str',action='callback',callback=optsplitmore,default=[])
mpExtra.add_option('--read',help='Read h\'s from file.',action='store_true',default=False)
mp.add_option_group(mpExtra)
return mp
####################################################################################################
def INTERNALprepare(opts):
makeDirs(os.path.split(opts.fout)[0])
makeDirs('plots')
jsonsamp = json.loads(filecontent(opts.jsonsamp))
jsonvars = json.loads(filecontent(opts.jsonvars))
jsoninfo = json.loads(filecontent(opts.jsoninfo))
jsoncuts = json.loads(filecontent(opts.jsoncuts))
jsons = {'samp':jsonsamp,'vars':jsonvars,'info':jsoninfo,'cuts':jsoncuts}
# fix binning
if opts.binning:
for v in opts.binning:
#print v
jsons['vars']['variables'][v[0]]['nbins_x'] = v[1]
jsons['vars']['variables'][v[0]]['xmin'] = v[2]
jsons['vars']['variables'][v[0]]['xmax'] = v[3]
if opts.mvaBins:
for v in opts.mvaBins:
jsons['vars']['variables'][v]['nbins_x'] = opts.mvaBins[v][0]
jsons['vars']['variables'][v]['xs'] = array('f',[round(float(x),4) for x in opts.mvaBins[v][1]])
jsons['vars']['variables'][v]['xmin'] = opts.mvaBins[v][1][0]
jsons['vars']['variables'][v]['xmax'] = opts.mvaBins[v][1][-1]
return jsons
####################################################################################################
def INTERNALblind(h,min,max):
for ix in range(1,h.GetNbinsX()+1):
x = h.GetBinCenter(ix)
if x>=min and x<=max:
h.SetBinContent(ix,0) |
####################################################################################################
def INTERNALstyle():
gROOT.ProcessLine("TH1::SetDefaultSumw2(1);")
gROOT.ProcessLine(".x %s/styleCMSTDR.C++"%basepath)
gROOT.ProcessLine('gROOT->ForceStyle();')
gStyle.SetPadLeftMargin(0.18)
gStyle.SetPadRightMargin(0.04)
gStyle.SetStripDecimals(0)
markers = [20, 21, 20 , 23]
colours = [kBlack,kBlue,kRed,kGreen+2]
return markers, colours
####################################################################################################
#def INTERNALstyleHist(h,i,markers,colours):
# h.Sumw2()
# h.SetMarkerStyle(markers[i])
# h.SetMarkerColor(colours[i])
# h.SetMarkerSize(1.2)
# h.SetLineColor(colours[i])
# h.GetXaxis().SetTitle("M_{bb} (GeV)")
# h.GetYaxis().SetTitle("PDF")
# h.GetYaxis().SetNdivisions(505)
# h.SetMaximum(0.25)
# return h
####################################################################################################
def INTERNALgraph(h):
g = TGraphErrors(h.GetNbinsX())
for ibin in range(1,h.GetNbinsX()+1):
x = h.GetBinCenter(ibin)
y = h.GetBinContent(ibin)
ex = h.GetBinWidth(ibin)/2.
ey = h.GetBinError(ibin)
g.SetPoint(ibin-1,x,y)
g.SetPointError(ibin-1,ex,ey)
return g
####################################################################################################
def INTERNALhistograms(opts,sels,jsons):
hProf = {}
vars = jsons['vars']['variables']
for s in sels:
for v in opts.variable:
vroot = vars[v]['root']
if s=='NOM' and '[2]' in vroot: continue
if s=='VBF' and '[1]' in vroot: continue
if 'mva' in v and not s in v: continue
for tag in ['QCD','DAT']:
vy = 'BDT'
vy2 = 'mva%s'%s
if tag=='QCD': hProf[(s,v,vy,tag)] = TProfile("h%s_sel%s_%s_%s"%(tag,s,v,vy),"h%s_sel%s_%s_%s;%s;<%s>%s"%(tag,s,v,vy,'BDT output',vars[v]['title_x'].replace(' (GeV)','')," (GeV)" if 'GeV' in vars[v]['title_x'] else ""),int(int(vars[vy2]['nbins_x'])/(2 if not ('mbb' in v or 'cos' in v) else 4)),float(vars[vy2]['xmin']),float(vars[vy2]['xmax']))
else: hProf[(s,v,vy,tag)] = TProfile("h%s_sel%s_%s_%s"%(tag,s,v,vy),"h%s_sel%s_%s_%s;%s;<%s>%s"%(tag,s,v,vy,'BDT output',vars[v]['title_x'].replace(' (GeV)','')," (GeV)" if 'GeV' in vars[v]['title_x'] else ""),int(vars[vy2]['nbins_x']),float(vars[vy2]['xmin']),float(vars[vy2]['xmax']))
hProf[(s,v,vy,tag)].Sumw2()
if 'mbbReg' in v: continue
vy = 'mbbReg'
vy2 = 'mbbReg[%d]'%(1 if s=='NOM' else 2)
if tag=='QCD': hProf[(s,v,vy,tag)] = TProfile("h%s_sel%s_%s_%s"%(tag,s,v,vy),"h%s_sel%s_%s_%s;%s;<%s>%s"%(tag,s,v,vy,'regressed M_{bb} (GeV)',vars[v]['title_x'].replace(' (GeV)','')," (GeV)" if 'GeV' in vars[v]['title_x'] else ""),int(48/(2 if not ('cos' in v) else 4)),80.,200.)
else: hProf[(s,v,vy,tag)] = TProfile("h%s_sel%s_%s_%s"%(tag,s,v,vy),"h%s_sel%s_%s_%s;%s;<%s>%s"%(tag,s,v,vy,'regressed M_{bb} (GeV)',vars[v]['title_x'].replace(' (GeV)','')," (GeV)" if 'GeV' in vars[v]['title_x'] else ""),48,80.,200.)
hProf[(s,v,vy,tag)].Sumw2()
return hProf
####################################################################################################
#def INTERNALcanvases(opts):
# return None
####################################################################################################
#def INTERNALlegend(Tag):
# leg = TLegend(0.6,0.62,0.95,0.92)
# leg.SetHeader("%s selection"%Tag)
# leg.SetFillColor(0)
# leg.SetBorderSize(0)
# leg.SetTextFont(42)
# leg.SetTextSize(0.05)
# return leg
####################################################################################################
#def INTERNALline(fun, min, max):
# ln = TF1("line",fun,min,max)
# ln.SetLineColor(kBlack)
# ln.SetLineWidth(1)
# ln.SetLineStyle(2)
# ln.SetMinimum(0.7)
# ln.SetMaximum(1.3)
# ln.GetXaxis().SetTitle("M_{bb} (GeV)")
# ln.GetYaxis().SetTitle("Signal / Control")
# return ln
####################################################################################################
def INTERNALpicksamples(opts,jsons):
l1("Global path: %s"%opts.globalpath)
l1("Using input samples:")
allsamples = jsons['samp']['files']
selsamples = []
for s in allsamples.itervalues():
# require regex in opts.sample
if not opts.sample==[] and not any([(x in s['tag']) for x in opts.sample]): continue
# veto regex in opts.nosample
if not opts.nosample==[] and any([(x in s['tag']) for x in opts.nosample]): continue
selsamples += [s]
for s in sorted(selsamples,key=lambda x:(x['tag'][:3],int(re.findall('[0-9]+',x['tag'])[0]) if len(re.findall('[0-9]+',x['tag']))>0 else 1.,not 'NOM' in x['fname'])):
s['tfile'] = TFile.Open(opts.globalpath + s['fname'])
s['tree'] = s['tfile'].Get("Hbb/events")
s['incarnation'] = 'NOM' if 'NOM' in s['tag'] else 'VBF'
l2('%-15s: %-50s(%s)'%(s['tag'],s['fname'],s['tfile']))
return selsamples
####################################################################################################
def mkBDTcorrelations():
mp = parser()
opts,fout,samples = main(mp,False,False,True)
sels = ['NOM','VBF']
jsons = INTERNALprepare(opts)
markers, colours = INTERNALstyle()
selsamples = INTERNALpicksamples(opts,jsons)
can = TCanvas("main","main",650,600)
hProf = INTERNALhistograms(opts,sels,jsons)
#INTERNALcanvases(opts)
l1("Creating profiles")
if not opts.read:
for isel,sel in enumerate(sels):
ssel = [s for s in selsamples if sel in s['fname']]
for s in ssel:
l2("Running for %s"%s['tag'])
cut,cutlabel = write_cuts(["None"],["None"],reftrig=["None"],sample=s['tag'],jsonsamp=opts.jsonsamp,jsoncuts=opts.jsoncuts,weight=opts.complexWghts[(sel,'old')],trigequal=trigTruth(opts.usebool))
#cut = re.findall('\*.*',cut)[0][2:-1].replace('pu','ev.pu').replace('trig','ev.trig')
if opts.debug: l3("Cut %s: \n\t\t\t%s%s%s: \n\t\t\t%s"%(s['tag'],blue,cutlabel,plain,cut))
cutextra = "mbbReg[%d]>=80. && mbbReg[%d]<=200."%(1 if sel=='NOM' else 2, 1 if sel=='NOM' else 2)
for v in [jsons['vars']['variables'][vv] for vv in opts.variable]:
if sel=='NOM' and '[2]' in v['root']: continue
if sel=='VBF' and '[1]' in v['root']: continue
if 'mva' in v and not sel in v: continue
s['tree'].Draw("%s:%s>>+%s"%(v['root'],"mva%s"%sel,hProf[(sel,v['var'],'BDT','QCD' if 'QCD' in s['fname'] else 'DAT')].GetName()),TCut(cutextra)*TCut(cut),"prof")
if 'mbbReg' in v['var']: continue
s['tree'].Draw("%s:%s>>+%s"%(v['root'],"mbbReg[%d]"%(1 if sel=='NOM' else 2),hProf[(sel,v['var'],'mbbReg','QCD' if 'QCD' in s['fname'] else 'DAT')].GetName()),TCut(cutextra)*TCut(cut),"prof")
else:
for k in hProf.iterkeys():
hProf[k] = fout.Get("h%s_sel%s_%s_%s"%(k[3],k[0],k[1],k[2]))
k = list(set([x[0:3] for x in hProf.iterkeys()]))
for ik in k:
low = min(hProf[(ik[0],ik[1],ik[2],'QCD')].GetBinContent(hProf[(ik[0],ik[1],ik[2],'QCD')].GetMinimumBin()),hProf[(ik[0],ik[1],ik[2],'DAT')].GetBinContent(hProf[(ik[0],ik[1],ik[2],'DAT')].GetMinimumBin()))
hig = max(hProf[(ik[0],ik[1],ik[2],'QCD')].GetBinContent(hProf[(ik[0],ik[1],ik[2],'QCD')].GetMaximumBin()),hProf[(ik[0],ik[1],ik[2],'DAT')].GetBinContent(hProf[(ik[0],ik[1],ik[2],'DAT')].GetMaximumBin()))
med = hProf[(ik[0],ik[1],ik[2],'DAT')].GetMean(2)
dif = hig-low
setlow = med - 1.6*dif
sethig = med + 1.6*dif
if not 'cos' in ik[1]:
setlow = max(setlow,0)
limits = {'dEtaqq1':[2,7,2,7],'dPhiqq1':[0,3,0,3],'jetBtag00':[0.5,1.2,0.8,1.0],'jetBtag01':[0.0,1.2,0.55,0.75],'jetQGLNOM0':[0,1,0.5,0.65],'jetQGLNOM1':[0,1,0.35,0.55],'jetQGLNOM2':[0,1,0.5,0.7],'jetQGLNOM3':[0,1,0.5,0.7],'mqq1':[0,2500,500,800],'mbbReg1':[115,145,115,145],'cosTheta1':[-0.35,0.35,-0.2,0.2],'jetQGLVBF0':[0,1,0.4,0.55],'jetQGLVBF1':[0,1,0.3,0.5],'jetQGLVBF2':[0,1,0.55,0.75],'jetQGLVBF3':[0,1,0.5,0.7],'mqq2':[0,2500,1000,1300],'mbbReg2':[115,145,115,145],'cosTheta2':[-0.35,0.35,-0.2,0.2],'dEtaqq2':[2,7,2,7],'dPhiqq2':[0,3,0,3],'softHt':[0,100,25,50],'softN2':[0,10,4,7]} #[3,7,4,6], [0,3,1,2] [0,3,1.5,2.5]
if ik[1] in limits:
if ik[2]=='BDT':
setlow = limits[ik[1]][0]
sethig = limits[ik[1]][1]
else:
setlow = limits[ik[1]][0]#2]
sethig = limits[ik[1]][1]#3]
# if 'jetBtag01' in ik[1] and ik[0]=='VBF' and ik[2]=='mbbReg':
# setlow = 0.4
# sethig = 0.55
hProf[(ik[0],ik[1],ik[2],'DAT')].GetYaxis().SetRangeUser(setlow,sethig)
hProf[(ik[0],ik[1],ik[2],'DAT')].GetYaxis().SetTitleOffset(1.4)
hProf[(ik[0],ik[1],ik[2],'DAT')].GetXaxis().SetTitleOffset(1.0)
hProf[(ik[0],ik[1],ik[2],'DAT')].GetYaxis().SetNdivisions(507)
hProf[(ik[0],ik[1],ik[2],'DAT')].GetXaxis().SetNdivisions(507)
hProf[(ik[0],ik[1],ik[2],'DAT')].Draw("axis")
h = INTERNALgraph(hProf[(ik[0],ik[1],ik[2],'DAT')])
h.SetMarkerStyle(20)
h.SetMarkerSize(0.8)
h.SetMarkerColor(kBlack)
h.SetLineColor(kBlack)
h.SetFillStyle(0)
h.Draw("same pz")
g = INTERNALgraph(hProf[(ik[0],ik[1],ik[2],'QCD')])
# hProf[(ik[0],ik[1],ik[2],'QCD')].SetMarkerStyle(25)
# hProf[(ik[0],ik[1],ik[2],'QCD')].SetMarkerColor(kBlue)
# hProf[(ik[0],ik[1],ik[2],'QCD')].Draw("same e")
g.SetMarkerStyle(25)
g.SetMarkerColor(kBlue)
g.SetLineColor(kBlue)
g.SetFillStyle(0)
g.Draw("same pz")
leg = TLegend(0.62,0.75,0.9,0.92,"%s selection"%(ik[0].strip('sel')))
leg.SetFillStyle(-1)
leg.SetBorderSize(0)
leg.SetTextFont(42)
leg.SetTextSize(0.045)
#leg.AddEntry(hProf[(ik[0],ik[1],ik[2],'DAT')],'Data')
#leg.AddEntry(hProf[(ik[0],ik[1],ik[2],'QCD')],'QCD')
leg.AddEntry(h,'Data','LP')
leg.AddEntry(g,'QCD','LP')
leg.SetY1(leg.GetY2()-(leg.GetNRows()+1)*0.05)
leg.Draw()
can.SaveAs("plots/%s.png"%hProf[(ik[0],ik[1],ik[2],'DAT')].GetName().replace('hDAT','h'))
can.SaveAs("plots/%s.pdf"%hProf[(ik[0],ik[1],ik[2],'DAT')].GetName().replace('hDAT','h'))
fout.cd()
for h in hProf.itervalues():
h.Write(h.GetName(),TH1.kOverwrite)
# clean
fout.Close()
####################################################################################################
if __name__=='__main__':
mkBDTcorrelations() | h.SetBinError(ix,0)
return h | random_line_split |
mkPlots2.py | #!/usr/bin/env python
import sys,os,json,re
from optparse import OptionParser,OptionGroup
from array import array
basepath=os.path.split(os.path.abspath(__file__))[0]
sys.path.append(basepath+'/../../common/')
tempargv = sys.argv[:]
sys.argv = []
import ROOT
from ROOT import *
sys.argv = tempargv
from write_cuts import *
from toolkit import *
from main import main
# OPTION PARSER ##################################################################################
##
def parser(mp=None):
if mp==None: mp = OptionParser()
mpExtra = OptionGroup(mp,cyan+"Extra options"+plain)
mpExtra.add_option('--mvaBins',help='mva bins: var#3#1;3;6;9,...',type='str',action='callback',callback=optsplitdict)#,default={'mvaNOM':['4',['-0.6','0.0','0.7','0.84','1.0']],'mvaVBF':['3',['-0.1','0.4','0.8','1.0']]})
mpExtra.add_option('--complexWghts',help='Wght info.',type='str',action='callback',callback=optsplitmore,default=[])
mpExtra.add_option('--read',help='Read h\'s from file.',action='store_true',default=False)
mp.add_option_group(mpExtra)
return mp
####################################################################################################
def INTERNALprepare(opts):
|
####################################################################################################
def INTERNALblind(h,min,max):
for ix in range(1,h.GetNbinsX()+1):
x = h.GetBinCenter(ix)
if x>=min and x<=max:
h.SetBinContent(ix,0)
h.SetBinError(ix,0)
return h
####################################################################################################
def INTERNALstyle():
gROOT.ProcessLine("TH1::SetDefaultSumw2(1);")
gROOT.ProcessLine(".x %s/styleCMSTDR.C++"%basepath)
gROOT.ProcessLine('gROOT->ForceStyle();')
gStyle.SetPadLeftMargin(0.18)
gStyle.SetPadRightMargin(0.04)
gStyle.SetStripDecimals(0)
markers = [20, 21, 20 , 23]
colours = [kBlack,kBlue,kRed,kGreen+2]
return markers, colours
####################################################################################################
#def INTERNALstyleHist(h,i,markers,colours):
# h.Sumw2()
# h.SetMarkerStyle(markers[i])
# h.SetMarkerColor(colours[i])
# h.SetMarkerSize(1.2)
# h.SetLineColor(colours[i])
# h.GetXaxis().SetTitle("M_{bb} (GeV)")
# h.GetYaxis().SetTitle("PDF")
# h.GetYaxis().SetNdivisions(505)
# h.SetMaximum(0.25)
# return h
####################################################################################################
def INTERNALgraph(h):
g = TGraphErrors(h.GetNbinsX())
for ibin in range(1,h.GetNbinsX()+1):
x = h.GetBinCenter(ibin)
y = h.GetBinContent(ibin)
ex = h.GetBinWidth(ibin)/2.
ey = h.GetBinError(ibin)
g.SetPoint(ibin-1,x,y)
g.SetPointError(ibin-1,ex,ey)
return g
####################################################################################################
def INTERNALhistograms(opts,sels,jsons):
hProf = {}
vars = jsons['vars']['variables']
for s in sels:
for v in opts.variable:
vroot = vars[v]['root']
if s=='NOM' and '[2]' in vroot: continue
if s=='VBF' and '[1]' in vroot: continue
if 'mva' in v and not s in v: continue
for tag in ['QCD','DAT']:
vy = 'BDT'
vy2 = 'mva%s'%s
if tag=='QCD': hProf[(s,v,vy,tag)] = TProfile("h%s_sel%s_%s_%s"%(tag,s,v,vy),"h%s_sel%s_%s_%s;%s;<%s>%s"%(tag,s,v,vy,'BDT output',vars[v]['title_x'].replace(' (GeV)','')," (GeV)" if 'GeV' in vars[v]['title_x'] else ""),int(int(vars[vy2]['nbins_x'])/(2 if not ('mbb' in v or 'cos' in v) else 4)),float(vars[vy2]['xmin']),float(vars[vy2]['xmax']))
else: hProf[(s,v,vy,tag)] = TProfile("h%s_sel%s_%s_%s"%(tag,s,v,vy),"h%s_sel%s_%s_%s;%s;<%s>%s"%(tag,s,v,vy,'BDT output',vars[v]['title_x'].replace(' (GeV)','')," (GeV)" if 'GeV' in vars[v]['title_x'] else ""),int(vars[vy2]['nbins_x']),float(vars[vy2]['xmin']),float(vars[vy2]['xmax']))
hProf[(s,v,vy,tag)].Sumw2()
if 'mbbReg' in v: continue
vy = 'mbbReg'
vy2 = 'mbbReg[%d]'%(1 if s=='NOM' else 2)
if tag=='QCD': hProf[(s,v,vy,tag)] = TProfile("h%s_sel%s_%s_%s"%(tag,s,v,vy),"h%s_sel%s_%s_%s;%s;<%s>%s"%(tag,s,v,vy,'regressed M_{bb} (GeV)',vars[v]['title_x'].replace(' (GeV)','')," (GeV)" if 'GeV' in vars[v]['title_x'] else ""),int(48/(2 if not ('cos' in v) else 4)),80.,200.)
else: hProf[(s,v,vy,tag)] = TProfile("h%s_sel%s_%s_%s"%(tag,s,v,vy),"h%s_sel%s_%s_%s;%s;<%s>%s"%(tag,s,v,vy,'regressed M_{bb} (GeV)',vars[v]['title_x'].replace(' (GeV)','')," (GeV)" if 'GeV' in vars[v]['title_x'] else ""),48,80.,200.)
hProf[(s,v,vy,tag)].Sumw2()
return hProf
####################################################################################################
#def INTERNALcanvases(opts):
# return None
####################################################################################################
#def INTERNALlegend(Tag):
# leg = TLegend(0.6,0.62,0.95,0.92)
# leg.SetHeader("%s selection"%Tag)
# leg.SetFillColor(0)
# leg.SetBorderSize(0)
# leg.SetTextFont(42)
# leg.SetTextSize(0.05)
# return leg
####################################################################################################
#def INTERNALline(fun, min, max):
# ln = TF1("line",fun,min,max)
# ln.SetLineColor(kBlack)
# ln.SetLineWidth(1)
# ln.SetLineStyle(2)
# ln.SetMinimum(0.7)
# ln.SetMaximum(1.3)
# ln.GetXaxis().SetTitle("M_{bb} (GeV)")
# ln.GetYaxis().SetTitle("Signal / Control")
# return ln
####################################################################################################
def INTERNALpicksamples(opts,jsons):
l1("Global path: %s"%opts.globalpath)
l1("Using input samples:")
allsamples = jsons['samp']['files']
selsamples = []
for s in allsamples.itervalues():
# require regex in opts.sample
if not opts.sample==[] and not any([(x in s['tag']) for x in opts.sample]): continue
# veto regex in opts.nosample
if not opts.nosample==[] and any([(x in s['tag']) for x in opts.nosample]): continue
selsamples += [s]
for s in sorted(selsamples,key=lambda x:(x['tag'][:3],int(re.findall('[0-9]+',x['tag'])[0]) if len(re.findall('[0-9]+',x['tag']))>0 else 1.,not 'NOM' in x['fname'])):
s['tfile'] = TFile.Open(opts.globalpath + s['fname'])
s['tree'] = s['tfile'].Get("Hbb/events")
s['incarnation'] = 'NOM' if 'NOM' in s['tag'] else 'VBF'
l2('%-15s: %-50s(%s)'%(s['tag'],s['fname'],s['tfile']))
return selsamples
####################################################################################################
def mkBDTcorrelations():
mp = parser()
opts,fout,samples = main(mp,False,False,True)
sels = ['NOM','VBF']
jsons = INTERNALprepare(opts)
markers, colours = INTERNALstyle()
selsamples = INTERNALpicksamples(opts,jsons)
can = TCanvas("main","main",650,600)
hProf = INTERNALhistograms(opts,sels,jsons)
#INTERNALcanvases(opts)
l1("Creating profiles")
if not opts.read:
for isel,sel in enumerate(sels):
ssel = [s for s in selsamples if sel in s['fname']]
for s in ssel:
l2("Running for %s"%s['tag'])
cut,cutlabel = write_cuts(["None"],["None"],reftrig=["None"],sample=s['tag'],jsonsamp=opts.jsonsamp,jsoncuts=opts.jsoncuts,weight=opts.complexWghts[(sel,'old')],trigequal=trigTruth(opts.usebool))
#cut = re.findall('\*.*',cut)[0][2:-1].replace('pu','ev.pu').replace('trig','ev.trig')
if opts.debug: l3("Cut %s: \n\t\t\t%s%s%s: \n\t\t\t%s"%(s['tag'],blue,cutlabel,plain,cut))
cutextra = "mbbReg[%d]>=80. && mbbReg[%d]<=200."%(1 if sel=='NOM' else 2, 1 if sel=='NOM' else 2)
for v in [jsons['vars']['variables'][vv] for vv in opts.variable]:
if sel=='NOM' and '[2]' in v['root']: continue
if sel=='VBF' and '[1]' in v['root']: continue
if 'mva' in v and not sel in v: continue
s['tree'].Draw("%s:%s>>+%s"%(v['root'],"mva%s"%sel,hProf[(sel,v['var'],'BDT','QCD' if 'QCD' in s['fname'] else 'DAT')].GetName()),TCut(cutextra)*TCut(cut),"prof")
if 'mbbReg' in v['var']: continue
s['tree'].Draw("%s:%s>>+%s"%(v['root'],"mbbReg[%d]"%(1 if sel=='NOM' else 2),hProf[(sel,v['var'],'mbbReg','QCD' if 'QCD' in s['fname'] else 'DAT')].GetName()),TCut(cutextra)*TCut(cut),"prof")
else:
for k in hProf.iterkeys():
hProf[k] = fout.Get("h%s_sel%s_%s_%s"%(k[3],k[0],k[1],k[2]))
k = list(set([x[0:3] for x in hProf.iterkeys()]))
for ik in k:
low = min(hProf[(ik[0],ik[1],ik[2],'QCD')].GetBinContent(hProf[(ik[0],ik[1],ik[2],'QCD')].GetMinimumBin()),hProf[(ik[0],ik[1],ik[2],'DAT')].GetBinContent(hProf[(ik[0],ik[1],ik[2],'DAT')].GetMinimumBin()))
hig = max(hProf[(ik[0],ik[1],ik[2],'QCD')].GetBinContent(hProf[(ik[0],ik[1],ik[2],'QCD')].GetMaximumBin()),hProf[(ik[0],ik[1],ik[2],'DAT')].GetBinContent(hProf[(ik[0],ik[1],ik[2],'DAT')].GetMaximumBin()))
med = hProf[(ik[0],ik[1],ik[2],'DAT')].GetMean(2)
dif = hig-low
setlow = med - 1.6*dif
sethig = med + 1.6*dif
if not 'cos' in ik[1]:
setlow = max(setlow,0)
limits = {'dEtaqq1':[2,7,2,7],'dPhiqq1':[0,3,0,3],'jetBtag00':[0.5,1.2,0.8,1.0],'jetBtag01':[0.0,1.2,0.55,0.75],'jetQGLNOM0':[0,1,0.5,0.65],'jetQGLNOM1':[0,1,0.35,0.55],'jetQGLNOM2':[0,1,0.5,0.7],'jetQGLNOM3':[0,1,0.5,0.7],'mqq1':[0,2500,500,800],'mbbReg1':[115,145,115,145],'cosTheta1':[-0.35,0.35,-0.2,0.2],'jetQGLVBF0':[0,1,0.4,0.55],'jetQGLVBF1':[0,1,0.3,0.5],'jetQGLVBF2':[0,1,0.55,0.75],'jetQGLVBF3':[0,1,0.5,0.7],'mqq2':[0,2500,1000,1300],'mbbReg2':[115,145,115,145],'cosTheta2':[-0.35,0.35,-0.2,0.2],'dEtaqq2':[2,7,2,7],'dPhiqq2':[0,3,0,3],'softHt':[0,100,25,50],'softN2':[0,10,4,7]} #[3,7,4,6], [0,3,1,2] [0,3,1.5,2.5]
if ik[1] in limits:
if ik[2]=='BDT':
setlow = limits[ik[1]][0]
sethig = limits[ik[1]][1]
else:
setlow = limits[ik[1]][0]#2]
sethig = limits[ik[1]][1]#3]
# if 'jetBtag01' in ik[1] and ik[0]=='VBF' and ik[2]=='mbbReg':
# setlow = 0.4
# sethig = 0.55
hProf[(ik[0],ik[1],ik[2],'DAT')].GetYaxis().SetRangeUser(setlow,sethig)
hProf[(ik[0],ik[1],ik[2],'DAT')].GetYaxis().SetTitleOffset(1.4)
hProf[(ik[0],ik[1],ik[2],'DAT')].GetXaxis().SetTitleOffset(1.0)
hProf[(ik[0],ik[1],ik[2],'DAT')].GetYaxis().SetNdivisions(507)
hProf[(ik[0],ik[1],ik[2],'DAT')].GetXaxis().SetNdivisions(507)
hProf[(ik[0],ik[1],ik[2],'DAT')].Draw("axis")
h = INTERNALgraph(hProf[(ik[0],ik[1],ik[2],'DAT')])
h.SetMarkerStyle(20)
h.SetMarkerSize(0.8)
h.SetMarkerColor(kBlack)
h.SetLineColor(kBlack)
h.SetFillStyle(0)
h.Draw("same pz")
g = INTERNALgraph(hProf[(ik[0],ik[1],ik[2],'QCD')])
# hProf[(ik[0],ik[1],ik[2],'QCD')].SetMarkerStyle(25)
# hProf[(ik[0],ik[1],ik[2],'QCD')].SetMarkerColor(kBlue)
# hProf[(ik[0],ik[1],ik[2],'QCD')].Draw("same e")
g.SetMarkerStyle(25)
g.SetMarkerColor(kBlue)
g.SetLineColor(kBlue)
g.SetFillStyle(0)
g.Draw("same pz")
leg = TLegend(0.62,0.75,0.9,0.92,"%s selection"%(ik[0].strip('sel')))
leg.SetFillStyle(-1)
leg.SetBorderSize(0)
leg.SetTextFont(42)
leg.SetTextSize(0.045)
#leg.AddEntry(hProf[(ik[0],ik[1],ik[2],'DAT')],'Data')
#leg.AddEntry(hProf[(ik[0],ik[1],ik[2],'QCD')],'QCD')
leg.AddEntry(h,'Data','LP')
leg.AddEntry(g,'QCD','LP')
leg.SetY1(leg.GetY2()-(leg.GetNRows()+1)*0.05)
leg.Draw()
can.SaveAs("plots/%s.png"%hProf[(ik[0],ik[1],ik[2],'DAT')].GetName().replace('hDAT','h'))
can.SaveAs("plots/%s.pdf"%hProf[(ik[0],ik[1],ik[2],'DAT')].GetName().replace('hDAT','h'))
fout.cd()
for h in hProf.itervalues():
h.Write(h.GetName(),TH1.kOverwrite)
# clean
fout.Close()
####################################################################################################
if __name__=='__main__':
mkBDTcorrelations()
| makeDirs(os.path.split(opts.fout)[0])
makeDirs('plots')
jsonsamp = json.loads(filecontent(opts.jsonsamp))
jsonvars = json.loads(filecontent(opts.jsonvars))
jsoninfo = json.loads(filecontent(opts.jsoninfo))
jsoncuts = json.loads(filecontent(opts.jsoncuts))
jsons = {'samp':jsonsamp,'vars':jsonvars,'info':jsoninfo,'cuts':jsoncuts}
# fix binning
if opts.binning:
for v in opts.binning:
#print v
jsons['vars']['variables'][v[0]]['nbins_x'] = v[1]
jsons['vars']['variables'][v[0]]['xmin'] = v[2]
jsons['vars']['variables'][v[0]]['xmax'] = v[3]
if opts.mvaBins:
for v in opts.mvaBins:
jsons['vars']['variables'][v]['nbins_x'] = opts.mvaBins[v][0]
jsons['vars']['variables'][v]['xs'] = array('f',[round(float(x),4) for x in opts.mvaBins[v][1]])
jsons['vars']['variables'][v]['xmin'] = opts.mvaBins[v][1][0]
jsons['vars']['variables'][v]['xmax'] = opts.mvaBins[v][1][-1]
return jsons | identifier_body |
mkPlots2.py | #!/usr/bin/env python
import sys,os,json,re
from optparse import OptionParser,OptionGroup
from array import array
basepath=os.path.split(os.path.abspath(__file__))[0]
sys.path.append(basepath+'/../../common/')
tempargv = sys.argv[:]
sys.argv = []
import ROOT
from ROOT import *
sys.argv = tempargv
from write_cuts import *
from toolkit import *
from main import main
# OPTION PARSER ##################################################################################
##
def parser(mp=None):
if mp==None: mp = OptionParser()
mpExtra = OptionGroup(mp,cyan+"Extra options"+plain)
mpExtra.add_option('--mvaBins',help='mva bins: var#3#1;3;6;9,...',type='str',action='callback',callback=optsplitdict)#,default={'mvaNOM':['4',['-0.6','0.0','0.7','0.84','1.0']],'mvaVBF':['3',['-0.1','0.4','0.8','1.0']]})
mpExtra.add_option('--complexWghts',help='Wght info.',type='str',action='callback',callback=optsplitmore,default=[])
mpExtra.add_option('--read',help='Read h\'s from file.',action='store_true',default=False)
mp.add_option_group(mpExtra)
return mp
####################################################################################################
def INTERNALprepare(opts):
makeDirs(os.path.split(opts.fout)[0])
makeDirs('plots')
jsonsamp = json.loads(filecontent(opts.jsonsamp))
jsonvars = json.loads(filecontent(opts.jsonvars))
jsoninfo = json.loads(filecontent(opts.jsoninfo))
jsoncuts = json.loads(filecontent(opts.jsoncuts))
jsons = {'samp':jsonsamp,'vars':jsonvars,'info':jsoninfo,'cuts':jsoncuts}
# fix binning
if opts.binning:
for v in opts.binning:
#print v
jsons['vars']['variables'][v[0]]['nbins_x'] = v[1]
jsons['vars']['variables'][v[0]]['xmin'] = v[2]
jsons['vars']['variables'][v[0]]['xmax'] = v[3]
if opts.mvaBins:
for v in opts.mvaBins:
jsons['vars']['variables'][v]['nbins_x'] = opts.mvaBins[v][0]
jsons['vars']['variables'][v]['xs'] = array('f',[round(float(x),4) for x in opts.mvaBins[v][1]])
jsons['vars']['variables'][v]['xmin'] = opts.mvaBins[v][1][0]
jsons['vars']['variables'][v]['xmax'] = opts.mvaBins[v][1][-1]
return jsons
####################################################################################################
def INTERNALblind(h,min,max):
for ix in range(1,h.GetNbinsX()+1):
x = h.GetBinCenter(ix)
if x>=min and x<=max:
h.SetBinContent(ix,0)
h.SetBinError(ix,0)
return h
####################################################################################################
def INTERNALstyle():
gROOT.ProcessLine("TH1::SetDefaultSumw2(1);")
gROOT.ProcessLine(".x %s/styleCMSTDR.C++"%basepath)
gROOT.ProcessLine('gROOT->ForceStyle();')
gStyle.SetPadLeftMargin(0.18)
gStyle.SetPadRightMargin(0.04)
gStyle.SetStripDecimals(0)
markers = [20, 21, 20 , 23]
colours = [kBlack,kBlue,kRed,kGreen+2]
return markers, colours
####################################################################################################
#def INTERNALstyleHist(h,i,markers,colours):
# h.Sumw2()
# h.SetMarkerStyle(markers[i])
# h.SetMarkerColor(colours[i])
# h.SetMarkerSize(1.2)
# h.SetLineColor(colours[i])
# h.GetXaxis().SetTitle("M_{bb} (GeV)")
# h.GetYaxis().SetTitle("PDF")
# h.GetYaxis().SetNdivisions(505)
# h.SetMaximum(0.25)
# return h
####################################################################################################
def INTERNALgraph(h):
g = TGraphErrors(h.GetNbinsX())
for ibin in range(1,h.GetNbinsX()+1):
x = h.GetBinCenter(ibin)
y = h.GetBinContent(ibin)
ex = h.GetBinWidth(ibin)/2.
ey = h.GetBinError(ibin)
g.SetPoint(ibin-1,x,y)
g.SetPointError(ibin-1,ex,ey)
return g
####################################################################################################
def | (opts,sels,jsons):
hProf = {}
vars = jsons['vars']['variables']
for s in sels:
for v in opts.variable:
vroot = vars[v]['root']
if s=='NOM' and '[2]' in vroot: continue
if s=='VBF' and '[1]' in vroot: continue
if 'mva' in v and not s in v: continue
for tag in ['QCD','DAT']:
vy = 'BDT'
vy2 = 'mva%s'%s
if tag=='QCD': hProf[(s,v,vy,tag)] = TProfile("h%s_sel%s_%s_%s"%(tag,s,v,vy),"h%s_sel%s_%s_%s;%s;<%s>%s"%(tag,s,v,vy,'BDT output',vars[v]['title_x'].replace(' (GeV)','')," (GeV)" if 'GeV' in vars[v]['title_x'] else ""),int(int(vars[vy2]['nbins_x'])/(2 if not ('mbb' in v or 'cos' in v) else 4)),float(vars[vy2]['xmin']),float(vars[vy2]['xmax']))
else: hProf[(s,v,vy,tag)] = TProfile("h%s_sel%s_%s_%s"%(tag,s,v,vy),"h%s_sel%s_%s_%s;%s;<%s>%s"%(tag,s,v,vy,'BDT output',vars[v]['title_x'].replace(' (GeV)','')," (GeV)" if 'GeV' in vars[v]['title_x'] else ""),int(vars[vy2]['nbins_x']),float(vars[vy2]['xmin']),float(vars[vy2]['xmax']))
hProf[(s,v,vy,tag)].Sumw2()
if 'mbbReg' in v: continue
vy = 'mbbReg'
vy2 = 'mbbReg[%d]'%(1 if s=='NOM' else 2)
if tag=='QCD': hProf[(s,v,vy,tag)] = TProfile("h%s_sel%s_%s_%s"%(tag,s,v,vy),"h%s_sel%s_%s_%s;%s;<%s>%s"%(tag,s,v,vy,'regressed M_{bb} (GeV)',vars[v]['title_x'].replace(' (GeV)','')," (GeV)" if 'GeV' in vars[v]['title_x'] else ""),int(48/(2 if not ('cos' in v) else 4)),80.,200.)
else: hProf[(s,v,vy,tag)] = TProfile("h%s_sel%s_%s_%s"%(tag,s,v,vy),"h%s_sel%s_%s_%s;%s;<%s>%s"%(tag,s,v,vy,'regressed M_{bb} (GeV)',vars[v]['title_x'].replace(' (GeV)','')," (GeV)" if 'GeV' in vars[v]['title_x'] else ""),48,80.,200.)
hProf[(s,v,vy,tag)].Sumw2()
return hProf
####################################################################################################
#def INTERNALcanvases(opts):
# return None
####################################################################################################
#def INTERNALlegend(Tag):
# leg = TLegend(0.6,0.62,0.95,0.92)
# leg.SetHeader("%s selection"%Tag)
# leg.SetFillColor(0)
# leg.SetBorderSize(0)
# leg.SetTextFont(42)
# leg.SetTextSize(0.05)
# return leg
####################################################################################################
#def INTERNALline(fun, min, max):
# ln = TF1("line",fun,min,max)
# ln.SetLineColor(kBlack)
# ln.SetLineWidth(1)
# ln.SetLineStyle(2)
# ln.SetMinimum(0.7)
# ln.SetMaximum(1.3)
# ln.GetXaxis().SetTitle("M_{bb} (GeV)")
# ln.GetYaxis().SetTitle("Signal / Control")
# return ln
####################################################################################################
def INTERNALpicksamples(opts,jsons):
l1("Global path: %s"%opts.globalpath)
l1("Using input samples:")
allsamples = jsons['samp']['files']
selsamples = []
for s in allsamples.itervalues():
# require regex in opts.sample
if not opts.sample==[] and not any([(x in s['tag']) for x in opts.sample]): continue
# veto regex in opts.nosample
if not opts.nosample==[] and any([(x in s['tag']) for x in opts.nosample]): continue
selsamples += [s]
for s in sorted(selsamples,key=lambda x:(x['tag'][:3],int(re.findall('[0-9]+',x['tag'])[0]) if len(re.findall('[0-9]+',x['tag']))>0 else 1.,not 'NOM' in x['fname'])):
s['tfile'] = TFile.Open(opts.globalpath + s['fname'])
s['tree'] = s['tfile'].Get("Hbb/events")
s['incarnation'] = 'NOM' if 'NOM' in s['tag'] else 'VBF'
l2('%-15s: %-50s(%s)'%(s['tag'],s['fname'],s['tfile']))
return selsamples
####################################################################################################
def mkBDTcorrelations():
mp = parser()
opts,fout,samples = main(mp,False,False,True)
sels = ['NOM','VBF']
jsons = INTERNALprepare(opts)
markers, colours = INTERNALstyle()
selsamples = INTERNALpicksamples(opts,jsons)
can = TCanvas("main","main",650,600)
hProf = INTERNALhistograms(opts,sels,jsons)
#INTERNALcanvases(opts)
l1("Creating profiles")
if not opts.read:
for isel,sel in enumerate(sels):
ssel = [s for s in selsamples if sel in s['fname']]
for s in ssel:
l2("Running for %s"%s['tag'])
cut,cutlabel = write_cuts(["None"],["None"],reftrig=["None"],sample=s['tag'],jsonsamp=opts.jsonsamp,jsoncuts=opts.jsoncuts,weight=opts.complexWghts[(sel,'old')],trigequal=trigTruth(opts.usebool))
#cut = re.findall('\*.*',cut)[0][2:-1].replace('pu','ev.pu').replace('trig','ev.trig')
if opts.debug: l3("Cut %s: \n\t\t\t%s%s%s: \n\t\t\t%s"%(s['tag'],blue,cutlabel,plain,cut))
cutextra = "mbbReg[%d]>=80. && mbbReg[%d]<=200."%(1 if sel=='NOM' else 2, 1 if sel=='NOM' else 2)
for v in [jsons['vars']['variables'][vv] for vv in opts.variable]:
if sel=='NOM' and '[2]' in v['root']: continue
if sel=='VBF' and '[1]' in v['root']: continue
if 'mva' in v and not sel in v: continue
s['tree'].Draw("%s:%s>>+%s"%(v['root'],"mva%s"%sel,hProf[(sel,v['var'],'BDT','QCD' if 'QCD' in s['fname'] else 'DAT')].GetName()),TCut(cutextra)*TCut(cut),"prof")
if 'mbbReg' in v['var']: continue
s['tree'].Draw("%s:%s>>+%s"%(v['root'],"mbbReg[%d]"%(1 if sel=='NOM' else 2),hProf[(sel,v['var'],'mbbReg','QCD' if 'QCD' in s['fname'] else 'DAT')].GetName()),TCut(cutextra)*TCut(cut),"prof")
else:
for k in hProf.iterkeys():
hProf[k] = fout.Get("h%s_sel%s_%s_%s"%(k[3],k[0],k[1],k[2]))
k = list(set([x[0:3] for x in hProf.iterkeys()]))
for ik in k:
low = min(hProf[(ik[0],ik[1],ik[2],'QCD')].GetBinContent(hProf[(ik[0],ik[1],ik[2],'QCD')].GetMinimumBin()),hProf[(ik[0],ik[1],ik[2],'DAT')].GetBinContent(hProf[(ik[0],ik[1],ik[2],'DAT')].GetMinimumBin()))
hig = max(hProf[(ik[0],ik[1],ik[2],'QCD')].GetBinContent(hProf[(ik[0],ik[1],ik[2],'QCD')].GetMaximumBin()),hProf[(ik[0],ik[1],ik[2],'DAT')].GetBinContent(hProf[(ik[0],ik[1],ik[2],'DAT')].GetMaximumBin()))
med = hProf[(ik[0],ik[1],ik[2],'DAT')].GetMean(2)
dif = hig-low
setlow = med - 1.6*dif
sethig = med + 1.6*dif
if not 'cos' in ik[1]:
setlow = max(setlow,0)
limits = {'dEtaqq1':[2,7,2,7],'dPhiqq1':[0,3,0,3],'jetBtag00':[0.5,1.2,0.8,1.0],'jetBtag01':[0.0,1.2,0.55,0.75],'jetQGLNOM0':[0,1,0.5,0.65],'jetQGLNOM1':[0,1,0.35,0.55],'jetQGLNOM2':[0,1,0.5,0.7],'jetQGLNOM3':[0,1,0.5,0.7],'mqq1':[0,2500,500,800],'mbbReg1':[115,145,115,145],'cosTheta1':[-0.35,0.35,-0.2,0.2],'jetQGLVBF0':[0,1,0.4,0.55],'jetQGLVBF1':[0,1,0.3,0.5],'jetQGLVBF2':[0,1,0.55,0.75],'jetQGLVBF3':[0,1,0.5,0.7],'mqq2':[0,2500,1000,1300],'mbbReg2':[115,145,115,145],'cosTheta2':[-0.35,0.35,-0.2,0.2],'dEtaqq2':[2,7,2,7],'dPhiqq2':[0,3,0,3],'softHt':[0,100,25,50],'softN2':[0,10,4,7]} #[3,7,4,6], [0,3,1,2] [0,3,1.5,2.5]
if ik[1] in limits:
if ik[2]=='BDT':
setlow = limits[ik[1]][0]
sethig = limits[ik[1]][1]
else:
setlow = limits[ik[1]][0]#2]
sethig = limits[ik[1]][1]#3]
# if 'jetBtag01' in ik[1] and ik[0]=='VBF' and ik[2]=='mbbReg':
# setlow = 0.4
# sethig = 0.55
hProf[(ik[0],ik[1],ik[2],'DAT')].GetYaxis().SetRangeUser(setlow,sethig)
hProf[(ik[0],ik[1],ik[2],'DAT')].GetYaxis().SetTitleOffset(1.4)
hProf[(ik[0],ik[1],ik[2],'DAT')].GetXaxis().SetTitleOffset(1.0)
hProf[(ik[0],ik[1],ik[2],'DAT')].GetYaxis().SetNdivisions(507)
hProf[(ik[0],ik[1],ik[2],'DAT')].GetXaxis().SetNdivisions(507)
hProf[(ik[0],ik[1],ik[2],'DAT')].Draw("axis")
h = INTERNALgraph(hProf[(ik[0],ik[1],ik[2],'DAT')])
h.SetMarkerStyle(20)
h.SetMarkerSize(0.8)
h.SetMarkerColor(kBlack)
h.SetLineColor(kBlack)
h.SetFillStyle(0)
h.Draw("same pz")
g = INTERNALgraph(hProf[(ik[0],ik[1],ik[2],'QCD')])
# hProf[(ik[0],ik[1],ik[2],'QCD')].SetMarkerStyle(25)
# hProf[(ik[0],ik[1],ik[2],'QCD')].SetMarkerColor(kBlue)
# hProf[(ik[0],ik[1],ik[2],'QCD')].Draw("same e")
g.SetMarkerStyle(25)
g.SetMarkerColor(kBlue)
g.SetLineColor(kBlue)
g.SetFillStyle(0)
g.Draw("same pz")
leg = TLegend(0.62,0.75,0.9,0.92,"%s selection"%(ik[0].strip('sel')))
leg.SetFillStyle(-1)
leg.SetBorderSize(0)
leg.SetTextFont(42)
leg.SetTextSize(0.045)
#leg.AddEntry(hProf[(ik[0],ik[1],ik[2],'DAT')],'Data')
#leg.AddEntry(hProf[(ik[0],ik[1],ik[2],'QCD')],'QCD')
leg.AddEntry(h,'Data','LP')
leg.AddEntry(g,'QCD','LP')
leg.SetY1(leg.GetY2()-(leg.GetNRows()+1)*0.05)
leg.Draw()
can.SaveAs("plots/%s.png"%hProf[(ik[0],ik[1],ik[2],'DAT')].GetName().replace('hDAT','h'))
can.SaveAs("plots/%s.pdf"%hProf[(ik[0],ik[1],ik[2],'DAT')].GetName().replace('hDAT','h'))
fout.cd()
for h in hProf.itervalues():
h.Write(h.GetName(),TH1.kOverwrite)
# clean
fout.Close()
####################################################################################################
if __name__=='__main__':
mkBDTcorrelations()
| INTERNALhistograms | identifier_name |
criterion.rs | use std::cmp;
use std::fmt::Show;
use std::io::Command;
use std::num;
use bencher::Bencher;
use fs;
use outliers::Outliers;
use plot;
use statistics::{Estimate,Estimates,Mean,Median,MedianAbsDev,Sample,StdDev};
use stream::Stream;
use target::{Function,FunctionFamily,Program,Target};
use time::prefix::{Mili,Nano};
use time::traits::{Milisecond,Nanosecond,Second};
use time::types::Ns;
use time;
/// The "criterion" for the benchmark, which is also the benchmark "manager"
#[experimental]
pub struct Criterion {
confidence_level: f64,
measurement_time: Ns<u64>,
noise_threshold: f64,
nresamples: uint,
sample_size: uint,
significance_level: f64,
warm_up_time: Ns<u64>,
}
#[experimental]
impl Criterion {
/// This is the default criterion:
///
/// * Confidence level: 0.95
/// * Measurement time: 10 ms
/// * Noise threshold: 0.01 (1%)
/// * Bootstrap with 100 000 resamples
/// * Sample size: 100 measurements
/// * Significance level: 0.05
/// * Warm-up time: 1 s
#[experimental]
pub fn default() -> Criterion {
Criterion {
confidence_level: 0.95,
measurement_time: 10.ms().to::<Nano>(),
noise_threshold: 0.01,
nresamples: 100_000,
sample_size: 100,
significance_level: 0.05,
warm_up_time: 1.s().to::<Nano>(),
}
}
/// Changes the confidence level
///
/// The confidence level is used to calculate the confidence intervals of the estimates
#[experimental]
pub fn confidence_level(&mut self, cl: f64) -> &mut Criterion {
assert!(cl > 0.0 && cl < 1.0);
self.confidence_level = cl;
self
}
/// Change the measurement time
///
/// The program/function under test is iterated for `measurement_time` ms. And the average run
/// time is reported as a measurement
#[experimental]
pub fn measurement_time(&mut self, ms: u64) -> &mut Criterion {
self.measurement_time = ms.ms().to::<Nano>();
self
}
/// Changes the noise threshold
///
/// When comparing benchmark results, only relative changes of the execution time above this
/// threshold are considered significant
#[experimental]
pub fn noise_threshold(&mut self, nt: f64) -> &mut Criterion {
assert!(nt >= 0.0);
self.noise_threshold = nt;
self
}
/// Changes the number of resamples
///
/// Number of resamples to use for bootstraping via case resampling
#[experimental]
pub fn nresamples(&mut self, n: uint) -> &mut Criterion |
/// Changes the size of a sample
///
/// A sample consists of severals measurements
#[experimental]
pub fn sample_size(&mut self, n: uint) -> &mut Criterion {
self.sample_size = n;
self
}
/// Changes the significance level
///
/// Significance level to use for hypothesis testing
#[experimental]
pub fn significance_level(&mut self, sl: f64) -> &mut Criterion {
assert!(sl > 0.0 && sl < 1.0);
self.significance_level = sl;
self
}
/// Changes the warm up time
///
/// The program/function under test is executed during `warm_up_time` ms before the real
/// measurement starts
#[experimental]
pub fn warm_up_time(&mut self, ms: u64) -> &mut Criterion {
self.warm_up_time = ms.ms().to::<Nano>();
self
}
/// Benchmark a function. See `Bench::iter()` for an example of how `fun` should look
#[experimental]
pub fn bench(&mut self, id: &str, fun: fn (&mut Bencher)) -> &mut Criterion {
local_data_key!(clock: Ns<f64>);
if clock.get().is_none() {
clock.replace(Some(clock_cost(self)));
}
// TODO Use clock cost to set a minimum `measurement_time`
bench(id, Function::<()>(fun), self);
println!("");
self
}
/// Benchmark a family of functions
///
/// `fun` will be benchmarked under each input
///
/// For example, if you want to benchmark `Vec::from_elem` with different size, use these
/// arguments:
///
/// let fun = |b, n| Vec::from_elem(n, 0u);
/// let inputs = [100, 10_000, 1_000_000];
///
/// This is equivalent to calling `bench` on each of the following functions:
///
/// let fun1 = |b| Vec::from_elem(100, 0u);
/// let fun2 = |b| Vec::from_elem(10_000, 0u);
/// let fun3 = |b| Vec::from_elem(1_000_000, 0u);
#[experimental]
pub fn bench_family<I: Show>(
&mut self,
id: &str,
fun: fn (&mut Bencher, &I),
inputs: &[I])
-> &mut Criterion {
for input in inputs.iter() {
let id = format!("{}/{}", id, input);
bench(id.as_slice(), FunctionFamily(fun, input), self);
}
print!("Summarizing results of {}... ", id);
plot::summarize(&Path::new(".criterion").join(id), id);
println!("DONE\n");
self
}
/// Benchmark an external program
///
/// The program must conform to the following specification:
///
/// extern crate time;
///
/// fn main() {
/// // Optional: Get the program arguments
/// let args = std::os::args();
///
/// for line in std::io::stdio::stdin().lines() {
/// // Get number of iterations to do
/// let iters: u64 = from_str(line.unwrap().as_slice().trim()).unwrap();
///
/// // Setup
///
/// // (For best results, use a monotonic timer)
/// let start = time::precise_time_ns();
/// for _ in range(0, iters) {
/// // Routine to benchmark goes here
/// }
/// let end = time::precise_time_ns();
///
/// // Teardown
///
/// // Report back the time (in nanoseconds) required to execute the routine
/// // `iters` times
/// println!("{}", end - start);
/// }
/// }
///
/// For example, to benchmark a python script use the following command
///
/// let cmd = Command::new("python3").args(["-O", "clock.py"]);
#[experimental]
pub fn bench_prog(&mut self,
id: &str,
prog: &Command)
-> &mut Criterion {
bench(id, Program::<()>(Stream::spawn(prog)), self);
println!("");
self
}
/// Benchmark an external program under various inputs
///
/// For example, to benchmark a python script under various inputs, use this combination:
///
/// let cmd = Command::new("python3").args(["-O", "fib.py"]);
/// let inputs = [5u, 10, 15];
///
/// This is equivalent to calling `bench_prog` on each of the following commands:
///
/// let cmd1 = Command::new("python3").args(["-O", "fib.py", "5"]);
/// let cmd2 = Command::new("python3").args(["-O", "fib.py", "10"]);
/// let cmd2 = Command::new("python3").args(["-O", "fib.py", "15"]);
#[experimental]
pub fn bench_prog_family<I: Show>(
&mut self,
id: &str,
prog: &Command,
inputs: &[I])
-> &mut Criterion {
for input in inputs.iter() {
let id = format!("{}/{}", id, input);
self.bench_prog(id.as_slice(), prog.clone().arg(format!("{}", input)));
}
print!("Summarizing results of {}... ", id);
plot::summarize(&Path::new(".criterion").join(id), id);
println!("DONE\n");
self
}
/// Summarize the results stored under the `.criterion/${id}` folder
///
/// Note that `bench_family` and `bench_prog_family` internally call the `summarize` method
#[experimental]
pub fn summarize(&mut self, id: &str) -> &mut Criterion {
print!("Summarizing results of {}... ", id);
plot::summarize(&Path::new(".criterion").join(id), id);
println!("DONE\n");
self
}
}
// FIXME Sorry! Everything below this point is a mess :/
fn bench<I>(id: &str, mut target: Target<I>, criterion: &Criterion) {
println!("Benchmarking {}", id);
rename_new_dir_to_base(id);
build_directory_skeleton(id);
let root = Path::new(".criterion").join(id);
let base_dir = root.join("base");
let change_dir = root.join("change");
let new_dir = root.join("new");
match target {
Program(_) => {
let _clock_cost =
external_clock_cost(&mut target, criterion, &new_dir.join("clock"), id);
// TODO use clock_cost to set minimal measurement_time
},
_ => {},
}
let sample = take_sample(&mut target, criterion).unwrap();
sample.save(&new_dir.join("sample.json"));
plot::sample(&sample, new_dir.join("points.svg"), id);
plot::pdf(&sample, new_dir.join("pdf.svg"), id);
let outliers = Outliers::classify(sample.as_slice());
outliers.report();
outliers.save(&new_dir.join("outliers/classification.json"));
plot::outliers(&outliers, new_dir.join("outliers/boxplot.svg"), id);
println!("> Estimating the statistics of the sample");
let nresamples = criterion.nresamples;
let cl = criterion.confidence_level;
println!(" > Bootstrapping the sample with {} resamples", nresamples);
let (estimates, distributions) =
sample.bootstrap([Mean, Median, StdDev, MedianAbsDev], nresamples, cl);
estimates.save(&new_dir.join("bootstrap/estimates.json"));
report_time(&estimates);
plot::time_distributions(&distributions,
&estimates,
&new_dir.join("bootstrap/distribution"),
id);
if !base_dir.exists() {
return;
}
println!("{}: Comparing with previous sample", id);
let base_sample = Sample::<Vec<f64>>::load(&base_dir.join("sample.json"));
let both_dir = root.join("both");
plot::both::pdfs(&base_sample, &sample, both_dir.join("pdfs.svg"), id);
plot::both::points(&base_sample, &sample, both_dir.join("points.svg"), id);
println!("> H0: Both samples belong to the same population");
println!(" > Bootstrapping with {} resamples", nresamples);
let t_statistic = sample.t_test(&base_sample);
let t_distribution = sample.bootstrap_t_test(&base_sample, nresamples, cl);
let t = t_statistic.abs();
let hits = t_distribution.as_slice().iter().filter(|&&x| x > t || x < -t).count();
let p_value = hits as f64 / nresamples as f64;
let sl = criterion.significance_level;
let different_population = p_value < sl;
println!(" > p = {}", p_value);
println!(" > {} reject the null hypothesis",
if different_population { "Strong evidence to" } else { "Can't" })
plot::t_test(t_statistic, &t_distribution, change_dir.join("bootstrap/t_test.svg"), id);
let nresamples_sqrt = (nresamples as f64).sqrt().ceil() as uint;
let nresamples = nresamples_sqrt * nresamples_sqrt;
println!("> Estimating relative change of statistics");
println!(" > Bootstrapping with {} resamples", nresamples);
let (estimates, distributions) =
sample.bootstrap_compare(&base_sample, [Mean, Median], nresamples_sqrt, cl);
estimates.save(&change_dir.join("bootstrap/estimates.json"));
report_change(&estimates);
plot::ratio_distributions(&distributions,
&estimates,
&change_dir.join("bootstrap/distribution"),
id);
let threshold = criterion.noise_threshold;
let mut regressed = vec!();
for &statistic in [Mean, Median].iter() {
let estimate = estimates.get(statistic);
let result = compare_to_threshold(estimate, threshold);
let p = estimate.point_estimate();
match result {
Improved => {
println!(" > {} has improved by {:.2}%", statistic, -100.0 * p);
regressed.push(false);
},
Regressed => {
println!(" > {} has regressed by {:.2}%", statistic, 100.0 * p);
regressed.push(true);
},
NonSignificant => {
regressed.push(false);
},
}
}
if different_population && regressed.iter().all(|&x| x) {
fail!("{} has regressed", id);
}
}
fn external_clock_cost<I>(
target: &mut Target<I>,
criterion: &Criterion,
dir: &Path,
id: &str,
) -> Ns<f64> {
println!("> Estimating the cost of a clock call");
let wu_time = criterion.warm_up_time;
println!(" > Warming up for {}", wu_time.to::<Mili>());
let init = time::now();
while time::now() - init < wu_time {
target.run(0);
}
println!(" > Collecting {} measurements", criterion.sample_size);
let sample = Sample::new(
range(0, criterion.sample_size).
map(|_| target.run(0).unwrap() as f64).
collect::<Vec<f64>>());
let clock_cost = sample.compute(Median);
println!(" > {}: {}", Median, format_time(clock_cost));
fs::mkdirp(dir);
plot::sample(&sample, dir.join("points.svg"), format!("{}/clock_cost", id));
plot::pdf(&sample, dir.join("pdf.svg"), format!("{}/clock_cost", id));
clock_cost.ns()
}
fn extrapolate_iters(iters: u64, took: Ns<u64>, want: Ns<u64>) -> (Ns<f64>, u64) {
let e_iters = cmp::max(want * iters / took, 1);
let e_time = (took * e_iters).cast::<f64>() / iters as f64;
(e_time, e_iters)
}
fn time_now(b: &mut Bencher) {
b.iter(|| time::now());
}
fn clock_cost(criterion: &Criterion) -> Ns<f64> {
println!("Estimating the cost of `precise_time_ns`");
let sample = take_sample(&mut Function::<()>(time_now), criterion);
let median = sample.unwrap().compute(Mean).ns();
println!("> Median: {}\n", median);
median
}
fn take_sample<I>(t: &mut Target<I>, criterion: &Criterion) -> Ns<Sample<Vec<f64>>> {
let wu_time = criterion.warm_up_time;
println!("> Warming up for {}", wu_time.to::<Mili>())
let (took, iters) = t.warm_up(wu_time);
let m_time = criterion.measurement_time;
let (m_time, m_iters) = extrapolate_iters(iters, took, m_time);
let sample_size = criterion.sample_size;
println!("> Collecting {} measurements, {} iters each in estimated {}",
sample_size,
m_iters,
format_time((m_time * sample_size as f64).unwrap()));
let sample = t.bench(sample_size, m_iters).unwrap();
sample.ns()
}
fn rename_new_dir_to_base(id: &str) {
let root_dir = Path::new(".criterion").join(id);
let base_dir = root_dir.join("base");
let new_dir = root_dir.join("new");
if base_dir.exists() { fs::rmrf(&base_dir) }
if new_dir.exists() { fs::mv(&new_dir, &base_dir) };
}
fn build_directory_skeleton(id: &str) {
let root = Path::new(".criterion").join(id);
fs::mkdirp(&root.join("both"));
fs::mkdirp(&root.join("change/bootstrap/distribution"));
fs::mkdirp(&root.join("new/bootstrap/distribution"));
fs::mkdirp(&root.join("new/outliers"));
}
fn format_short(n: f64) -> String {
if n < 10.0 { format!("{:.4}", n) }
else if n < 100.0 { format!("{:.3}", n) }
else if n < 1000.0 { format!("{:.2}", n) }
else { format!("{}", n) }
}
fn format_signed_short(n: f64) -> String {
let n_abs = n.abs();
if n_abs < 10.0 { format!("{:+.4}", n) }
else if n_abs < 100.0 { format!("{:+.3}", n) }
else if n_abs < 1000.0 { format!("{:+.2}", n) }
else { format!("{:+}", n) }
}
fn report_time(estimates: &Estimates) {
for &statistic in [Mean, Median, StdDev, MedianAbsDev].iter() {
let estimate = estimates.get(statistic);
let p = format_time(estimate.point_estimate());
let ci = estimate.confidence_interval();
let lb = format_time(ci.lower_bound());
let ub = format_time(ci.upper_bound());
let se = format_time(estimate.standard_error());
let cl = ci.confidence_level();
println!(" > {:<7} {} ± {} [{} {}] {}% CI", statistic, p, se, lb, ub, cl * 100.0);
}
}
fn format_time(ns: f64) -> String {
if ns < 1.0 {
format!("{:>6} ps", format_short(ns * 1e3))
} else if ns < num::pow(10.0, 3) {
format!("{:>6} ns", format_short(ns))
} else if ns < num::pow(10.0, 6) {
format!("{:>6} us", format_short(ns / 1e3))
} else if ns < num::pow(10.0, 9) {
format!("{:>6} ms", format_short(ns / 1e6))
} else {
format!("{:>6} s", format_short(ns / 1e9))
}
}
fn report_change(estimates: &Estimates) {
for &statistic in [Mean, Median].iter() {
let estimate = estimates.get(statistic);
let p = format_change(estimate.point_estimate(), true);
let ci = estimate.confidence_interval();
let lb = format_change(ci.lower_bound(), true);
let ub = format_change(ci.upper_bound(), true);
let se = format_change(estimate.standard_error(), false);
let cl = ci.confidence_level();
println!(" > {:<7} {} ± {} [{} {}] {}% CI", statistic, p, se, lb, ub, cl * 100.0);
}
}
fn format_change(pct: f64, signed: bool) -> String {
if signed {
format!("{:>+6}%", format_signed_short(pct * 1e2))
} else {
format!("{:>6}%", format_short(pct * 1e2))
}
}
enum ComparisonResult {
Improved,
Regressed,
NonSignificant,
}
fn compare_to_threshold(estimate: &Estimate, noise: f64) -> ComparisonResult {
let ci = estimate.confidence_interval();
let lb = ci.lower_bound();
let ub = ci.upper_bound();
if lb < -noise && ub < -noise {
Improved
} else if lb > noise && ub > noise {
Regressed
} else {
NonSignificant
}
}
| {
self.nresamples = n;
self
} | identifier_body |
criterion.rs | use std::cmp;
use std::fmt::Show;
use std::io::Command;
use std::num;
use bencher::Bencher;
use fs;
use outliers::Outliers;
use plot;
use statistics::{Estimate,Estimates,Mean,Median,MedianAbsDev,Sample,StdDev};
use stream::Stream;
use target::{Function,FunctionFamily,Program,Target};
use time::prefix::{Mili,Nano};
use time::traits::{Milisecond,Nanosecond,Second};
use time::types::Ns;
use time;
/// The "criterion" for the benchmark, which is also the benchmark "manager"
#[experimental]
pub struct Criterion {
confidence_level: f64,
measurement_time: Ns<u64>,
noise_threshold: f64,
nresamples: uint,
sample_size: uint,
significance_level: f64,
warm_up_time: Ns<u64>,
}
#[experimental]
impl Criterion {
/// This is the default criterion:
///
/// * Confidence level: 0.95
/// * Measurement time: 10 ms
/// * Noise threshold: 0.01 (1%)
/// * Bootstrap with 100 000 resamples
/// * Sample size: 100 measurements
/// * Significance level: 0.05
/// * Warm-up time: 1 s
#[experimental]
pub fn default() -> Criterion {
Criterion {
confidence_level: 0.95,
measurement_time: 10.ms().to::<Nano>(),
noise_threshold: 0.01,
nresamples: 100_000,
sample_size: 100,
significance_level: 0.05,
warm_up_time: 1.s().to::<Nano>(),
}
}
/// Changes the confidence level
///
/// The confidence level is used to calculate the confidence intervals of the estimates
#[experimental]
pub fn confidence_level(&mut self, cl: f64) -> &mut Criterion {
assert!(cl > 0.0 && cl < 1.0);
self.confidence_level = cl;
self
}
/// Change the measurement time
///
/// The program/function under test is iterated for `measurement_time` ms. And the average run
/// time is reported as a measurement
#[experimental]
pub fn measurement_time(&mut self, ms: u64) -> &mut Criterion {
self.measurement_time = ms.ms().to::<Nano>();
self
}
/// Changes the noise threshold
///
/// When comparing benchmark results, only relative changes of the execution time above this
/// threshold are considered significant
#[experimental]
pub fn noise_threshold(&mut self, nt: f64) -> &mut Criterion {
assert!(nt >= 0.0);
self.noise_threshold = nt;
self
}
/// Changes the number of resamples
///
/// Number of resamples to use for bootstraping via case resampling
#[experimental]
pub fn nresamples(&mut self, n: uint) -> &mut Criterion {
self.nresamples = n;
self
}
/// Changes the size of a sample
///
/// A sample consists of severals measurements
#[experimental]
pub fn sample_size(&mut self, n: uint) -> &mut Criterion {
self.sample_size = n;
self
}
/// Changes the significance level
///
/// Significance level to use for hypothesis testing
#[experimental]
pub fn | (&mut self, sl: f64) -> &mut Criterion {
assert!(sl > 0.0 && sl < 1.0);
self.significance_level = sl;
self
}
/// Changes the warm up time
///
/// The program/function under test is executed during `warm_up_time` ms before the real
/// measurement starts
#[experimental]
pub fn warm_up_time(&mut self, ms: u64) -> &mut Criterion {
self.warm_up_time = ms.ms().to::<Nano>();
self
}
/// Benchmark a function. See `Bench::iter()` for an example of how `fun` should look
#[experimental]
pub fn bench(&mut self, id: &str, fun: fn (&mut Bencher)) -> &mut Criterion {
local_data_key!(clock: Ns<f64>);
if clock.get().is_none() {
clock.replace(Some(clock_cost(self)));
}
// TODO Use clock cost to set a minimum `measurement_time`
bench(id, Function::<()>(fun), self);
println!("");
self
}
/// Benchmark a family of functions
///
/// `fun` will be benchmarked under each input
///
/// For example, if you want to benchmark `Vec::from_elem` with different size, use these
/// arguments:
///
/// let fun = |b, n| Vec::from_elem(n, 0u);
/// let inputs = [100, 10_000, 1_000_000];
///
/// This is equivalent to calling `bench` on each of the following functions:
///
/// let fun1 = |b| Vec::from_elem(100, 0u);
/// let fun2 = |b| Vec::from_elem(10_000, 0u);
/// let fun3 = |b| Vec::from_elem(1_000_000, 0u);
#[experimental]
pub fn bench_family<I: Show>(
&mut self,
id: &str,
fun: fn (&mut Bencher, &I),
inputs: &[I])
-> &mut Criterion {
for input in inputs.iter() {
let id = format!("{}/{}", id, input);
bench(id.as_slice(), FunctionFamily(fun, input), self);
}
print!("Summarizing results of {}... ", id);
plot::summarize(&Path::new(".criterion").join(id), id);
println!("DONE\n");
self
}
/// Benchmark an external program
///
/// The program must conform to the following specification:
///
/// extern crate time;
///
/// fn main() {
/// // Optional: Get the program arguments
/// let args = std::os::args();
///
/// for line in std::io::stdio::stdin().lines() {
/// // Get number of iterations to do
/// let iters: u64 = from_str(line.unwrap().as_slice().trim()).unwrap();
///
/// // Setup
///
/// // (For best results, use a monotonic timer)
/// let start = time::precise_time_ns();
/// for _ in range(0, iters) {
/// // Routine to benchmark goes here
/// }
/// let end = time::precise_time_ns();
///
/// // Teardown
///
/// // Report back the time (in nanoseconds) required to execute the routine
/// // `iters` times
/// println!("{}", end - start);
/// }
/// }
///
/// For example, to benchmark a python script use the following command
///
/// let cmd = Command::new("python3").args(["-O", "clock.py"]);
#[experimental]
pub fn bench_prog(&mut self,
id: &str,
prog: &Command)
-> &mut Criterion {
bench(id, Program::<()>(Stream::spawn(prog)), self);
println!("");
self
}
/// Benchmark an external program under various inputs
///
/// For example, to benchmark a python script under various inputs, use this combination:
///
/// let cmd = Command::new("python3").args(["-O", "fib.py"]);
/// let inputs = [5u, 10, 15];
///
/// This is equivalent to calling `bench_prog` on each of the following commands:
///
/// let cmd1 = Command::new("python3").args(["-O", "fib.py", "5"]);
/// let cmd2 = Command::new("python3").args(["-O", "fib.py", "10"]);
/// let cmd2 = Command::new("python3").args(["-O", "fib.py", "15"]);
#[experimental]
pub fn bench_prog_family<I: Show>(
&mut self,
id: &str,
prog: &Command,
inputs: &[I])
-> &mut Criterion {
for input in inputs.iter() {
let id = format!("{}/{}", id, input);
self.bench_prog(id.as_slice(), prog.clone().arg(format!("{}", input)));
}
print!("Summarizing results of {}... ", id);
plot::summarize(&Path::new(".criterion").join(id), id);
println!("DONE\n");
self
}
/// Summarize the results stored under the `.criterion/${id}` folder
///
/// Note that `bench_family` and `bench_prog_family` internally call the `summarize` method
#[experimental]
pub fn summarize(&mut self, id: &str) -> &mut Criterion {
print!("Summarizing results of {}... ", id);
plot::summarize(&Path::new(".criterion").join(id), id);
println!("DONE\n");
self
}
}
// FIXME Sorry! Everything below this point is a mess :/
fn bench<I>(id: &str, mut target: Target<I>, criterion: &Criterion) {
println!("Benchmarking {}", id);
rename_new_dir_to_base(id);
build_directory_skeleton(id);
let root = Path::new(".criterion").join(id);
let base_dir = root.join("base");
let change_dir = root.join("change");
let new_dir = root.join("new");
match target {
Program(_) => {
let _clock_cost =
external_clock_cost(&mut target, criterion, &new_dir.join("clock"), id);
// TODO use clock_cost to set minimal measurement_time
},
_ => {},
}
let sample = take_sample(&mut target, criterion).unwrap();
sample.save(&new_dir.join("sample.json"));
plot::sample(&sample, new_dir.join("points.svg"), id);
plot::pdf(&sample, new_dir.join("pdf.svg"), id);
let outliers = Outliers::classify(sample.as_slice());
outliers.report();
outliers.save(&new_dir.join("outliers/classification.json"));
plot::outliers(&outliers, new_dir.join("outliers/boxplot.svg"), id);
println!("> Estimating the statistics of the sample");
let nresamples = criterion.nresamples;
let cl = criterion.confidence_level;
println!(" > Bootstrapping the sample with {} resamples", nresamples);
let (estimates, distributions) =
sample.bootstrap([Mean, Median, StdDev, MedianAbsDev], nresamples, cl);
estimates.save(&new_dir.join("bootstrap/estimates.json"));
report_time(&estimates);
plot::time_distributions(&distributions,
&estimates,
&new_dir.join("bootstrap/distribution"),
id);
if !base_dir.exists() {
return;
}
println!("{}: Comparing with previous sample", id);
let base_sample = Sample::<Vec<f64>>::load(&base_dir.join("sample.json"));
let both_dir = root.join("both");
plot::both::pdfs(&base_sample, &sample, both_dir.join("pdfs.svg"), id);
plot::both::points(&base_sample, &sample, both_dir.join("points.svg"), id);
println!("> H0: Both samples belong to the same population");
println!(" > Bootstrapping with {} resamples", nresamples);
let t_statistic = sample.t_test(&base_sample);
let t_distribution = sample.bootstrap_t_test(&base_sample, nresamples, cl);
let t = t_statistic.abs();
let hits = t_distribution.as_slice().iter().filter(|&&x| x > t || x < -t).count();
let p_value = hits as f64 / nresamples as f64;
let sl = criterion.significance_level;
let different_population = p_value < sl;
println!(" > p = {}", p_value);
println!(" > {} reject the null hypothesis",
if different_population { "Strong evidence to" } else { "Can't" })
plot::t_test(t_statistic, &t_distribution, change_dir.join("bootstrap/t_test.svg"), id);
let nresamples_sqrt = (nresamples as f64).sqrt().ceil() as uint;
let nresamples = nresamples_sqrt * nresamples_sqrt;
println!("> Estimating relative change of statistics");
println!(" > Bootstrapping with {} resamples", nresamples);
let (estimates, distributions) =
sample.bootstrap_compare(&base_sample, [Mean, Median], nresamples_sqrt, cl);
estimates.save(&change_dir.join("bootstrap/estimates.json"));
report_change(&estimates);
plot::ratio_distributions(&distributions,
&estimates,
&change_dir.join("bootstrap/distribution"),
id);
let threshold = criterion.noise_threshold;
let mut regressed = vec!();
for &statistic in [Mean, Median].iter() {
let estimate = estimates.get(statistic);
let result = compare_to_threshold(estimate, threshold);
let p = estimate.point_estimate();
match result {
Improved => {
println!(" > {} has improved by {:.2}%", statistic, -100.0 * p);
regressed.push(false);
},
Regressed => {
println!(" > {} has regressed by {:.2}%", statistic, 100.0 * p);
regressed.push(true);
},
NonSignificant => {
regressed.push(false);
},
}
}
if different_population && regressed.iter().all(|&x| x) {
fail!("{} has regressed", id);
}
}
fn external_clock_cost<I>(
target: &mut Target<I>,
criterion: &Criterion,
dir: &Path,
id: &str,
) -> Ns<f64> {
println!("> Estimating the cost of a clock call");
let wu_time = criterion.warm_up_time;
println!(" > Warming up for {}", wu_time.to::<Mili>());
let init = time::now();
while time::now() - init < wu_time {
target.run(0);
}
println!(" > Collecting {} measurements", criterion.sample_size);
let sample = Sample::new(
range(0, criterion.sample_size).
map(|_| target.run(0).unwrap() as f64).
collect::<Vec<f64>>());
let clock_cost = sample.compute(Median);
println!(" > {}: {}", Median, format_time(clock_cost));
fs::mkdirp(dir);
plot::sample(&sample, dir.join("points.svg"), format!("{}/clock_cost", id));
plot::pdf(&sample, dir.join("pdf.svg"), format!("{}/clock_cost", id));
clock_cost.ns()
}
fn extrapolate_iters(iters: u64, took: Ns<u64>, want: Ns<u64>) -> (Ns<f64>, u64) {
let e_iters = cmp::max(want * iters / took, 1);
let e_time = (took * e_iters).cast::<f64>() / iters as f64;
(e_time, e_iters)
}
fn time_now(b: &mut Bencher) {
b.iter(|| time::now());
}
fn clock_cost(criterion: &Criterion) -> Ns<f64> {
println!("Estimating the cost of `precise_time_ns`");
let sample = take_sample(&mut Function::<()>(time_now), criterion);
let median = sample.unwrap().compute(Mean).ns();
println!("> Median: {}\n", median);
median
}
fn take_sample<I>(t: &mut Target<I>, criterion: &Criterion) -> Ns<Sample<Vec<f64>>> {
let wu_time = criterion.warm_up_time;
println!("> Warming up for {}", wu_time.to::<Mili>())
let (took, iters) = t.warm_up(wu_time);
let m_time = criterion.measurement_time;
let (m_time, m_iters) = extrapolate_iters(iters, took, m_time);
let sample_size = criterion.sample_size;
println!("> Collecting {} measurements, {} iters each in estimated {}",
sample_size,
m_iters,
format_time((m_time * sample_size as f64).unwrap()));
let sample = t.bench(sample_size, m_iters).unwrap();
sample.ns()
}
fn rename_new_dir_to_base(id: &str) {
let root_dir = Path::new(".criterion").join(id);
let base_dir = root_dir.join("base");
let new_dir = root_dir.join("new");
if base_dir.exists() { fs::rmrf(&base_dir) }
if new_dir.exists() { fs::mv(&new_dir, &base_dir) };
}
fn build_directory_skeleton(id: &str) {
let root = Path::new(".criterion").join(id);
fs::mkdirp(&root.join("both"));
fs::mkdirp(&root.join("change/bootstrap/distribution"));
fs::mkdirp(&root.join("new/bootstrap/distribution"));
fs::mkdirp(&root.join("new/outliers"));
}
fn format_short(n: f64) -> String {
if n < 10.0 { format!("{:.4}", n) }
else if n < 100.0 { format!("{:.3}", n) }
else if n < 1000.0 { format!("{:.2}", n) }
else { format!("{}", n) }
}
fn format_signed_short(n: f64) -> String {
let n_abs = n.abs();
if n_abs < 10.0 { format!("{:+.4}", n) }
else if n_abs < 100.0 { format!("{:+.3}", n) }
else if n_abs < 1000.0 { format!("{:+.2}", n) }
else { format!("{:+}", n) }
}
fn report_time(estimates: &Estimates) {
for &statistic in [Mean, Median, StdDev, MedianAbsDev].iter() {
let estimate = estimates.get(statistic);
let p = format_time(estimate.point_estimate());
let ci = estimate.confidence_interval();
let lb = format_time(ci.lower_bound());
let ub = format_time(ci.upper_bound());
let se = format_time(estimate.standard_error());
let cl = ci.confidence_level();
println!(" > {:<7} {} ± {} [{} {}] {}% CI", statistic, p, se, lb, ub, cl * 100.0);
}
}
fn format_time(ns: f64) -> String {
if ns < 1.0 {
format!("{:>6} ps", format_short(ns * 1e3))
} else if ns < num::pow(10.0, 3) {
format!("{:>6} ns", format_short(ns))
} else if ns < num::pow(10.0, 6) {
format!("{:>6} us", format_short(ns / 1e3))
} else if ns < num::pow(10.0, 9) {
format!("{:>6} ms", format_short(ns / 1e6))
} else {
format!("{:>6} s", format_short(ns / 1e9))
}
}
fn report_change(estimates: &Estimates) {
for &statistic in [Mean, Median].iter() {
let estimate = estimates.get(statistic);
let p = format_change(estimate.point_estimate(), true);
let ci = estimate.confidence_interval();
let lb = format_change(ci.lower_bound(), true);
let ub = format_change(ci.upper_bound(), true);
let se = format_change(estimate.standard_error(), false);
let cl = ci.confidence_level();
println!(" > {:<7} {} ± {} [{} {}] {}% CI", statistic, p, se, lb, ub, cl * 100.0);
}
}
fn format_change(pct: f64, signed: bool) -> String {
if signed {
format!("{:>+6}%", format_signed_short(pct * 1e2))
} else {
format!("{:>6}%", format_short(pct * 1e2))
}
}
enum ComparisonResult {
Improved,
Regressed,
NonSignificant,
}
fn compare_to_threshold(estimate: &Estimate, noise: f64) -> ComparisonResult {
let ci = estimate.confidence_interval();
let lb = ci.lower_bound();
let ub = ci.upper_bound();
if lb < -noise && ub < -noise {
Improved
} else if lb > noise && ub > noise {
Regressed
} else {
NonSignificant
}
}
| significance_level | identifier_name |
criterion.rs | use std::cmp;
use std::fmt::Show;
use std::io::Command;
use std::num;
use bencher::Bencher;
use fs;
use outliers::Outliers;
use plot;
use statistics::{Estimate,Estimates,Mean,Median,MedianAbsDev,Sample,StdDev};
use stream::Stream;
use target::{Function,FunctionFamily,Program,Target};
use time::prefix::{Mili,Nano};
use time::traits::{Milisecond,Nanosecond,Second};
use time::types::Ns;
use time;
/// The "criterion" for the benchmark, which is also the benchmark "manager"
#[experimental]
pub struct Criterion {
confidence_level: f64,
measurement_time: Ns<u64>,
noise_threshold: f64,
nresamples: uint,
sample_size: uint,
significance_level: f64,
warm_up_time: Ns<u64>,
}
#[experimental]
impl Criterion {
/// This is the default criterion:
///
/// * Confidence level: 0.95
/// * Measurement time: 10 ms
/// * Noise threshold: 0.01 (1%)
/// * Bootstrap with 100 000 resamples
/// * Sample size: 100 measurements
/// * Significance level: 0.05
/// * Warm-up time: 1 s
#[experimental]
pub fn default() -> Criterion {
Criterion {
confidence_level: 0.95,
measurement_time: 10.ms().to::<Nano>(),
noise_threshold: 0.01,
nresamples: 100_000,
sample_size: 100,
significance_level: 0.05,
warm_up_time: 1.s().to::<Nano>(),
}
}
/// Changes the confidence level
///
/// The confidence level is used to calculate the confidence intervals of the estimates
#[experimental]
pub fn confidence_level(&mut self, cl: f64) -> &mut Criterion {
assert!(cl > 0.0 && cl < 1.0);
self.confidence_level = cl;
self
}
/// Change the measurement time
///
/// The program/function under test is iterated for `measurement_time` ms. And the average run
/// time is reported as a measurement
#[experimental]
pub fn measurement_time(&mut self, ms: u64) -> &mut Criterion {
self.measurement_time = ms.ms().to::<Nano>();
self
}
/// Changes the noise threshold
///
/// When comparing benchmark results, only relative changes of the execution time above this
/// threshold are considered significant
#[experimental]
pub fn noise_threshold(&mut self, nt: f64) -> &mut Criterion {
assert!(nt >= 0.0);
self.noise_threshold = nt;
self
}
/// Changes the number of resamples
///
/// Number of resamples to use for bootstraping via case resampling
#[experimental]
pub fn nresamples(&mut self, n: uint) -> &mut Criterion {
self.nresamples = n;
self
}
/// Changes the size of a sample
///
/// A sample consists of severals measurements
#[experimental]
pub fn sample_size(&mut self, n: uint) -> &mut Criterion {
self.sample_size = n;
self
}
/// Changes the significance level
///
/// Significance level to use for hypothesis testing
#[experimental]
pub fn significance_level(&mut self, sl: f64) -> &mut Criterion {
assert!(sl > 0.0 && sl < 1.0);
self.significance_level = sl;
self
}
/// Changes the warm up time
///
/// The program/function under test is executed during `warm_up_time` ms before the real
/// measurement starts
#[experimental]
pub fn warm_up_time(&mut self, ms: u64) -> &mut Criterion {
self.warm_up_time = ms.ms().to::<Nano>();
self
}
/// Benchmark a function. See `Bench::iter()` for an example of how `fun` should look
#[experimental]
pub fn bench(&mut self, id: &str, fun: fn (&mut Bencher)) -> &mut Criterion {
local_data_key!(clock: Ns<f64>);
if clock.get().is_none() {
clock.replace(Some(clock_cost(self)));
}
// TODO Use clock cost to set a minimum `measurement_time`
bench(id, Function::<()>(fun), self);
println!("");
self
}
/// Benchmark a family of functions
///
/// `fun` will be benchmarked under each input
///
/// For example, if you want to benchmark `Vec::from_elem` with different size, use these
/// arguments:
///
/// let fun = |b, n| Vec::from_elem(n, 0u);
/// let inputs = [100, 10_000, 1_000_000];
///
/// This is equivalent to calling `bench` on each of the following functions:
///
/// let fun1 = |b| Vec::from_elem(100, 0u);
/// let fun2 = |b| Vec::from_elem(10_000, 0u);
/// let fun3 = |b| Vec::from_elem(1_000_000, 0u);
#[experimental]
pub fn bench_family<I: Show>(
&mut self,
id: &str,
fun: fn (&mut Bencher, &I),
inputs: &[I])
-> &mut Criterion {
for input in inputs.iter() {
let id = format!("{}/{}", id, input);
bench(id.as_slice(), FunctionFamily(fun, input), self);
}
print!("Summarizing results of {}... ", id);
plot::summarize(&Path::new(".criterion").join(id), id);
println!("DONE\n");
self
}
/// Benchmark an external program
///
/// The program must conform to the following specification:
///
/// extern crate time;
///
/// fn main() {
/// // Optional: Get the program arguments
/// let args = std::os::args();
///
/// for line in std::io::stdio::stdin().lines() {
/// // Get number of iterations to do
/// let iters: u64 = from_str(line.unwrap().as_slice().trim()).unwrap();
///
/// // Setup
///
/// // (For best results, use a monotonic timer)
/// let start = time::precise_time_ns();
/// for _ in range(0, iters) {
/// // Routine to benchmark goes here
/// }
/// let end = time::precise_time_ns();
///
/// // Teardown
///
/// // Report back the time (in nanoseconds) required to execute the routine
/// // `iters` times
/// println!("{}", end - start);
/// }
/// }
///
/// For example, to benchmark a python script use the following command
///
/// let cmd = Command::new("python3").args(["-O", "clock.py"]);
#[experimental]
pub fn bench_prog(&mut self,
id: &str,
prog: &Command)
-> &mut Criterion {
bench(id, Program::<()>(Stream::spawn(prog)), self);
println!("");
self
}
/// Benchmark an external program under various inputs
///
/// For example, to benchmark a python script under various inputs, use this combination:
///
/// let cmd = Command::new("python3").args(["-O", "fib.py"]);
/// let inputs = [5u, 10, 15];
///
/// This is equivalent to calling `bench_prog` on each of the following commands:
///
/// let cmd1 = Command::new("python3").args(["-O", "fib.py", "5"]);
/// let cmd2 = Command::new("python3").args(["-O", "fib.py", "10"]);
/// let cmd2 = Command::new("python3").args(["-O", "fib.py", "15"]);
#[experimental]
pub fn bench_prog_family<I: Show>(
&mut self,
id: &str,
prog: &Command,
inputs: &[I])
-> &mut Criterion {
for input in inputs.iter() {
let id = format!("{}/{}", id, input);
self.bench_prog(id.as_slice(), prog.clone().arg(format!("{}", input)));
}
print!("Summarizing results of {}... ", id);
plot::summarize(&Path::new(".criterion").join(id), id);
println!("DONE\n");
self
}
/// Summarize the results stored under the `.criterion/${id}` folder
///
/// Note that `bench_family` and `bench_prog_family` internally call the `summarize` method
#[experimental]
pub fn summarize(&mut self, id: &str) -> &mut Criterion {
print!("Summarizing results of {}... ", id);
plot::summarize(&Path::new(".criterion").join(id), id);
println!("DONE\n");
self
}
}
// FIXME Sorry! Everything below this point is a mess :/
fn bench<I>(id: &str, mut target: Target<I>, criterion: &Criterion) {
println!("Benchmarking {}", id);
rename_new_dir_to_base(id);
build_directory_skeleton(id);
let root = Path::new(".criterion").join(id);
let base_dir = root.join("base");
let change_dir = root.join("change");
let new_dir = root.join("new");
match target {
Program(_) => {
let _clock_cost =
external_clock_cost(&mut target, criterion, &new_dir.join("clock"), id);
// TODO use clock_cost to set minimal measurement_time
},
_ => {},
}
let sample = take_sample(&mut target, criterion).unwrap();
sample.save(&new_dir.join("sample.json"));
plot::sample(&sample, new_dir.join("points.svg"), id);
plot::pdf(&sample, new_dir.join("pdf.svg"), id);
let outliers = Outliers::classify(sample.as_slice());
outliers.report();
outliers.save(&new_dir.join("outliers/classification.json"));
plot::outliers(&outliers, new_dir.join("outliers/boxplot.svg"), id);
println!("> Estimating the statistics of the sample");
let nresamples = criterion.nresamples;
let cl = criterion.confidence_level;
println!(" > Bootstrapping the sample with {} resamples", nresamples);
let (estimates, distributions) =
sample.bootstrap([Mean, Median, StdDev, MedianAbsDev], nresamples, cl);
estimates.save(&new_dir.join("bootstrap/estimates.json"));
report_time(&estimates);
plot::time_distributions(&distributions,
&estimates,
&new_dir.join("bootstrap/distribution"),
id);
if !base_dir.exists() {
return;
}
println!("{}: Comparing with previous sample", id);
let base_sample = Sample::<Vec<f64>>::load(&base_dir.join("sample.json"));
let both_dir = root.join("both");
plot::both::pdfs(&base_sample, &sample, both_dir.join("pdfs.svg"), id);
plot::both::points(&base_sample, &sample, both_dir.join("points.svg"), id);
println!("> H0: Both samples belong to the same population");
println!(" > Bootstrapping with {} resamples", nresamples);
let t_statistic = sample.t_test(&base_sample);
let t_distribution = sample.bootstrap_t_test(&base_sample, nresamples, cl);
let t = t_statistic.abs();
let hits = t_distribution.as_slice().iter().filter(|&&x| x > t || x < -t).count();
let p_value = hits as f64 / nresamples as f64;
let sl = criterion.significance_level;
let different_population = p_value < sl;
println!(" > p = {}", p_value);
println!(" > {} reject the null hypothesis",
if different_population { "Strong evidence to" } else { "Can't" })
plot::t_test(t_statistic, &t_distribution, change_dir.join("bootstrap/t_test.svg"), id);
let nresamples_sqrt = (nresamples as f64).sqrt().ceil() as uint;
let nresamples = nresamples_sqrt * nresamples_sqrt;
println!("> Estimating relative change of statistics");
println!(" > Bootstrapping with {} resamples", nresamples);
let (estimates, distributions) =
sample.bootstrap_compare(&base_sample, [Mean, Median], nresamples_sqrt, cl);
estimates.save(&change_dir.join("bootstrap/estimates.json"));
report_change(&estimates);
plot::ratio_distributions(&distributions,
&estimates,
&change_dir.join("bootstrap/distribution"),
id);
let threshold = criterion.noise_threshold;
let mut regressed = vec!();
for &statistic in [Mean, Median].iter() {
let estimate = estimates.get(statistic);
let result = compare_to_threshold(estimate, threshold);
let p = estimate.point_estimate();
match result {
Improved => {
println!(" > {} has improved by {:.2}%", statistic, -100.0 * p);
regressed.push(false);
},
Regressed => {
println!(" > {} has regressed by {:.2}%", statistic, 100.0 * p);
regressed.push(true);
},
NonSignificant => {
regressed.push(false);
},
}
}
if different_population && regressed.iter().all(|&x| x) {
fail!("{} has regressed", id);
}
}
fn external_clock_cost<I>(
target: &mut Target<I>,
criterion: &Criterion,
dir: &Path,
id: &str,
) -> Ns<f64> {
println!("> Estimating the cost of a clock call");
let wu_time = criterion.warm_up_time;
println!(" > Warming up for {}", wu_time.to::<Mili>());
let init = time::now();
while time::now() - init < wu_time {
target.run(0);
}
println!(" > Collecting {} measurements", criterion.sample_size);
let sample = Sample::new(
range(0, criterion.sample_size).
map(|_| target.run(0).unwrap() as f64).
collect::<Vec<f64>>());
let clock_cost = sample.compute(Median);
println!(" > {}: {}", Median, format_time(clock_cost));
fs::mkdirp(dir);
plot::sample(&sample, dir.join("points.svg"), format!("{}/clock_cost", id));
plot::pdf(&sample, dir.join("pdf.svg"), format!("{}/clock_cost", id));
clock_cost.ns()
}
fn extrapolate_iters(iters: u64, took: Ns<u64>, want: Ns<u64>) -> (Ns<f64>, u64) {
let e_iters = cmp::max(want * iters / took, 1);
let e_time = (took * e_iters).cast::<f64>() / iters as f64;
(e_time, e_iters)
}
fn time_now(b: &mut Bencher) {
b.iter(|| time::now());
}
fn clock_cost(criterion: &Criterion) -> Ns<f64> {
println!("Estimating the cost of `precise_time_ns`");
let sample = take_sample(&mut Function::<()>(time_now), criterion);
let median = sample.unwrap().compute(Mean).ns();
println!("> Median: {}\n", median);
median
}
fn take_sample<I>(t: &mut Target<I>, criterion: &Criterion) -> Ns<Sample<Vec<f64>>> {
let wu_time = criterion.warm_up_time;
println!("> Warming up for {}", wu_time.to::<Mili>())
let (took, iters) = t.warm_up(wu_time);
let m_time = criterion.measurement_time;
let (m_time, m_iters) = extrapolate_iters(iters, took, m_time);
let sample_size = criterion.sample_size;
println!("> Collecting {} measurements, {} iters each in estimated {}",
sample_size,
m_iters,
format_time((m_time * sample_size as f64).unwrap()));
let sample = t.bench(sample_size, m_iters).unwrap();
sample.ns()
}
fn rename_new_dir_to_base(id: &str) {
let root_dir = Path::new(".criterion").join(id);
let base_dir = root_dir.join("base");
let new_dir = root_dir.join("new");
if base_dir.exists() { fs::rmrf(&base_dir) }
if new_dir.exists() { fs::mv(&new_dir, &base_dir) };
}
fn build_directory_skeleton(id: &str) {
let root = Path::new(".criterion").join(id);
fs::mkdirp(&root.join("both"));
fs::mkdirp(&root.join("change/bootstrap/distribution"));
fs::mkdirp(&root.join("new/bootstrap/distribution"));
fs::mkdirp(&root.join("new/outliers"));
}
fn format_short(n: f64) -> String {
if n < 10.0 { format!("{:.4}", n) }
else if n < 100.0 { format!("{:.3}", n) }
else if n < 1000.0 { format!("{:.2}", n) }
else { format!("{}", n) }
}
fn format_signed_short(n: f64) -> String {
let n_abs = n.abs();
if n_abs < 10.0 { format!("{:+.4}", n) }
else if n_abs < 100.0 { format!("{:+.3}", n) }
else if n_abs < 1000.0 { format!("{:+.2}", n) }
else { format!("{:+}", n) }
}
fn report_time(estimates: &Estimates) {
for &statistic in [Mean, Median, StdDev, MedianAbsDev].iter() {
let estimate = estimates.get(statistic);
let p = format_time(estimate.point_estimate());
let ci = estimate.confidence_interval();
let lb = format_time(ci.lower_bound());
let ub = format_time(ci.upper_bound());
let se = format_time(estimate.standard_error());
let cl = ci.confidence_level();
println!(" > {:<7} {} ± {} [{} {}] {}% CI", statistic, p, se, lb, ub, cl * 100.0); | }
}
fn format_time(ns: f64) -> String {
if ns < 1.0 {
format!("{:>6} ps", format_short(ns * 1e3))
} else if ns < num::pow(10.0, 3) {
format!("{:>6} ns", format_short(ns))
} else if ns < num::pow(10.0, 6) {
format!("{:>6} us", format_short(ns / 1e3))
} else if ns < num::pow(10.0, 9) {
format!("{:>6} ms", format_short(ns / 1e6))
} else {
format!("{:>6} s", format_short(ns / 1e9))
}
}
fn report_change(estimates: &Estimates) {
for &statistic in [Mean, Median].iter() {
let estimate = estimates.get(statistic);
let p = format_change(estimate.point_estimate(), true);
let ci = estimate.confidence_interval();
let lb = format_change(ci.lower_bound(), true);
let ub = format_change(ci.upper_bound(), true);
let se = format_change(estimate.standard_error(), false);
let cl = ci.confidence_level();
println!(" > {:<7} {} ± {} [{} {}] {}% CI", statistic, p, se, lb, ub, cl * 100.0);
}
}
fn format_change(pct: f64, signed: bool) -> String {
if signed {
format!("{:>+6}%", format_signed_short(pct * 1e2))
} else {
format!("{:>6}%", format_short(pct * 1e2))
}
}
enum ComparisonResult {
Improved,
Regressed,
NonSignificant,
}
fn compare_to_threshold(estimate: &Estimate, noise: f64) -> ComparisonResult {
let ci = estimate.confidence_interval();
let lb = ci.lower_bound();
let ub = ci.upper_bound();
if lb < -noise && ub < -noise {
Improved
} else if lb > noise && ub > noise {
Regressed
} else {
NonSignificant
}
} | random_line_split | |
send.go | // Package transport provides streaming object-based transport over http for intra-cluster continuous
// intra-cluster communications (see README for details and usage example).
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*/
package transport
import (
"encoding/binary"
"fmt"
"io"
"io/ioutil"
"math"
"net/url"
"os"
"path"
"runtime"
"strconv"
"sync"
"time"
"unsafe"
"github.com/NVIDIA/aistore/3rdparty/atomic"
"github.com/NVIDIA/aistore/3rdparty/glog"
"github.com/NVIDIA/aistore/cmn"
"github.com/NVIDIA/aistore/memsys"
"github.com/NVIDIA/aistore/xoshiro256"
lz4 "github.com/pierrec/lz4/v3"
)
// transport defaults
const (
maxHeaderSize = 1024
lastMarker = math.MaxInt64
tickMarker = math.MaxInt64 ^ 0xa5a5a5a5
tickUnit = time.Second
defaultIdleOut = time.Second * 2
burstNum = 32 // default max num objects that can be posted for sending without any back-pressure
)
// stream TCP/HTTP session: inactive <=> active transitions
const (
inactive = iota
active
)
// termination: reasons
const (
reasonUnknown = "unknown"
reasonError = "error"
endOfStream = "end-of-stream"
reasonStopped = "stopped"
)
// API types
type (
Stream struct {
client Client // http client this send-stream will use
// user-defined & queryable
toURL, trname string // http endpoint
sessID int64 // stream session ID
sessST atomic.Int64 // state of the TCP/HTTP session: active (connected) | inactive (disconnected)
stats Stats // stream stats
Numcur, Sizecur int64 // gets reset to zero upon each timeout
// internals
lid string // log prefix
workCh chan Obj // aka SQ: next object to stream
cmplCh chan cmpl // aka SCQ; note that SQ and SCQ together form a FIFO
lastCh *cmn.StopCh // end of stream
stopCh *cmn.StopCh // stop/abort stream
postCh chan struct{} // to indicate that workCh has work
callback SendCallback // to free SGLs, close files, etc.
time struct {
start atomic.Int64 // to support idle(%)
idleOut time.Duration // idle timeout
inSend atomic.Bool // true upon Send() or Read() - info for Collector to delay cleanup
ticks int // num 1s ticks until idle timeout
index int // heap stuff
}
wg sync.WaitGroup
sendoff sendoff
maxheader []byte // max header buffer
header []byte // object header - slice of the maxheader with bucket/objName, etc. fields
term struct {
mu sync.Mutex
terminated bool
err error
reason *string
}
lz4s lz4Stream
}
// advanced usage: additional stream control
Extra struct {
IdleTimeout time.Duration // stream idle timeout: causes PUT to terminate (and renew on the next obj send)
Callback SendCallback // typical usage: to free SGLs, close files, etc.
Compression string // see CompressAlways, etc. enum
MMSA *memsys.MMSA // compression-related buffering
Config *cmn.Config
}
// stream stats
Stats struct {
Num atomic.Int64 // number of transferred objects including zero size (header-only) objects
Size atomic.Int64 // transferred object size (does not include transport headers)
Offset atomic.Int64 // stream offset, in bytes
CompressedSize atomic.Int64 // compressed size (NOTE: converges to the actual compressed size over time)
}
EndpointStats map[uint64]*Stats // all stats for a given http endpoint defined by a tuple(network, trname) by session ID
// object attrs
ObjectAttrs struct {
Atime int64 // access time - nanoseconds since UNIX epoch
Size int64 // size of objects in bytes
CksumType string // checksum type
CksumValue string // checksum of the object produced by given checksum type
Version string // version of the object
}
// object header
Header struct {
Bck cmn.Bck
ObjName string
ObjAttrs ObjectAttrs // attributes/metadata of the sent object
Opaque []byte // custom control (optional)
}
// object to transmit
Obj struct {
Hdr Header // object header
Reader io.ReadCloser // reader, to read the object, and close when done
Callback SendCallback // callback fired when sending is done OR when the stream terminates (see term.reason)
CmplPtr unsafe.Pointer // local pointer that gets returned to the caller via Send completion callback
// private
prc *atomic.Int64 // if present, ref-counts num sent objects to call SendCallback only once
}
// object-sent callback that has the following signature can optionally be defined on a:
// a) per-stream basis (via NewStream constructor - see Extra struct above)
// b) for a given object that is being sent (for instance, to support a call-per-batch semantics)
// Naturally, object callback "overrides" the per-stream one: when object callback is defined
// (i.e., non-nil), the stream callback is ignored/skipped.
// NOTE: if defined, the callback executes asynchronously as far as the sending part is concerned
SendCallback func(Header, io.ReadCloser, unsafe.Pointer, error)
StreamCollector struct {
cmn.Named
}
)
// internal types
type (
lz4Stream struct {
s *Stream
zw *lz4.Writer // orig reader => zw
sgl *memsys.SGL // zw => bb => network
blockMaxSize int // *uncompressed* block max size
frameChecksum bool // true: checksum lz4 frames
}
sendoff struct {
obj Obj
// in progress
off int64
dod int64
}
cmpl struct { // send completions => SCQ
obj Obj
err error
}
nopReadCloser struct{}
collector struct {
streams map[string]*Stream
heap []*Stream
ticker *time.Ticker
stopCh *cmn.StopCh
ctrlCh chan ctrl
}
ctrl struct { // add/del channel to/from collector
s *Stream
add bool
}
)
var (
nopRC = &nopReadCloser{} // read and close stubs
nextSID = *atomic.NewInt64(100) // unique session IDs starting from 101
sc = &StreamCollector{} // idle timer and house-keeping (slow path)
gc *collector // real stream collector
)
func (extra *Extra) compressed() bool {
return extra.Compression != "" && extra.Compression != cmn.CompressNever
}
//
// API methods
//
func NewStream(client Client, toURL string, extra *Extra) (s *Stream) {
u, err := url.Parse(toURL)
if err != nil {
glog.Errorf("Failed to parse %s: %v", toURL, err)
return
}
s = &Stream{client: client, toURL: toURL}
s.time.idleOut = defaultIdleOut
if extra != nil {
s.callback = extra.Callback
if extra.IdleTimeout > 0 {
s.time.idleOut = extra.IdleTimeout
}
if extra.compressed() {
config := extra.Config
if config == nil {
config = cmn.GCO.Get()
}
s.lz4s.s = s
s.lz4s.blockMaxSize = config.Compression.BlockMaxSize
s.lz4s.frameChecksum = config.Compression.Checksum
mem := extra.MMSA
if mem == nil {
mem = memsys.DefaultPageMM()
glog.Warningln("Using global memory manager for streaming inline compression")
}
if s.lz4s.blockMaxSize >= memsys.MaxPageSlabSize {
s.lz4s.sgl = mem.NewSGL(memsys.MaxPageSlabSize, memsys.MaxPageSlabSize)
} else {
s.lz4s.sgl = mem.NewSGL(cmn.KiB*64, cmn.KiB*64)
}
}
}
if s.time.idleOut < tickUnit {
s.time.idleOut = tickUnit
}
s.time.ticks = int(s.time.idleOut / tickUnit)
s.sessID = nextSID.Inc()
s.trname = path.Base(u.Path)
if !s.compressed() {
s.lid = fmt.Sprintf("%s[%d]", s.trname, s.sessID)
} else {
s.lid = fmt.Sprintf("%s[%d[%s]]", s.trname, s.sessID, cmn.B2S(int64(s.lz4s.blockMaxSize), 0))
}
// burst size: the number of objects the caller is permitted to post for sending
// without experiencing any sort of back-pressure
burst := burstNum
if a := os.Getenv("AIS_STREAM_BURST_NUM"); a != "" {
if burst64, err := strconv.ParseInt(a, 10, 0); err != nil {
glog.Errorf("%s: error parsing env AIS_STREAM_BURST_NUM=%s: %v", s, a, err)
burst = burstNum
} else {
burst = int(burst64)
}
}
s.workCh = make(chan Obj, burst) // Send Qeueue or SQ
s.cmplCh = make(chan cmpl, burst) // Send Completion Queue or SCQ
s.lastCh = cmn.NewStopCh()
s.stopCh = cmn.NewStopCh()
s.postCh = make(chan struct{}, 1)
s.maxheader = make([]byte, maxHeaderSize) // NOTE: must be large enough to accommodate all max-size Header
s.sessST.Store(inactive) // NOTE: initiate HTTP session upon arrival of the first object
s.time.start.Store(time.Now().UnixNano())
s.term.reason = new(string)
s.wg.Add(2)
var dryrun bool
if a := os.Getenv("AIS_STREAM_DRY_RUN"); a != "" {
if dryrun, err = strconv.ParseBool(a); err != nil {
glog.Errorf("%s: error parsing env AIS_STREAM_DRY_RUN=%s: %v", s, a, err)
}
cmn.Assert(dryrun || client != nil)
}
go s.sendLoop(dryrun) // handle SQ
go s.cmplLoop() // handle SCQ
gc.ctrlCh <- ctrl{s, true /* collect */}
return
}
func (s *Stream) compressed() bool { return s.lz4s.s == s }
// Asynchronously send an object defined by its header and its reader.
// ---------------------------------------------------------------------------------------
//
// The sending pipeline is implemented as a pair (SQ, SCQ) where the former is a send queue
// realized as workCh, and the latter is a send completion queue (cmplCh).
// Together, SQ and SCQ form a FIFO as far as ordering of transmitted objects.
//
// NOTE: header-only objects are supported; when there's no data to send (that is,
// when the header's Dsize field is set to zero), the reader is not required and the
// corresponding argument in Send() can be set to nil.
//
// NOTE: object reader is always closed by the code that handles send completions.
// In the case when SendCallback is provided (i.e., non-nil), the closing is done
// right after calling this callback - see objDone below for details.
//
// NOTE: Optional reference counting is also done by (and in) the objDone, so that the
// SendCallback gets called if and only when the refcount (if provided i.e., non-nil)
// reaches zero.
//
// NOTE: For every transmission of every object there's always an objDone() completion
// (with its refcounting and reader-closing). This holds true in all cases including
// network errors that may cause sudden and instant termination of the underlying
// stream(s).
//
// ---------------------------------------------------------------------------------------
func (s *Stream) Send(obj Obj) (err error) {
s.time.inSend.Store(true) // an indication for Collector to postpone cleanup
hdr := &obj.Hdr
if s.Terminated() {
err = fmt.Errorf("%s terminated(%s, %v), cannot send [%s/%s(%d)]",
s, *s.term.reason, s.term.err, hdr.Bck, hdr.ObjName, hdr.ObjAttrs.Size)
glog.Errorln(err)
return
}
if s.sessST.CAS(inactive, active) {
s.postCh <- struct{}{}
if glog.FastV(4, glog.SmoduleTransport) {
glog.Infof("%s: inactive => active", s)
}
}
// next object => SQ
if obj.Reader == nil {
cmn.Assert(hdr.IsHeaderOnly())
obj.Reader = nopRC
}
s.workCh <- obj
if glog.FastV(4, glog.SmoduleTransport) {
glog.Infof("%s: send %s/%s(%d)[sq=%d]", s, hdr.Bck, hdr.ObjName, hdr.ObjAttrs.Size, len(s.workCh))
}
return
}
func (s *Stream) Fin() {
_ = s.Send(Obj{Hdr: Header{ObjAttrs: ObjectAttrs{Size: lastMarker}}})
s.wg.Wait()
}
func (s *Stream) Stop() { s.stopCh.Close() }
func (s *Stream) URL() string { return s.toURL }
func (s *Stream) ID() (string, int64) { return s.trname, s.sessID }
func (s *Stream) String() string { return s.lid }
func (s *Stream) Terminated() (terminated bool) {
s.term.mu.Lock()
terminated = s.term.terminated
s.term.mu.Unlock()
return
}
func (s *Stream) terminate() {
s.term.mu.Lock()
cmn.Assert(!s.term.terminated)
s.term.terminated = true
s.Stop()
hdr := Header{ObjAttrs: ObjectAttrs{Size: lastMarker}}
obj := Obj{Hdr: hdr}
s.cmplCh <- cmpl{obj, s.term.err}
s.term.mu.Unlock()
// Remove stream after lock because we could deadlock between `do()`
// (which checks for `Terminated` status) and this function which
// would be under lock.
gc.remove(s)
if s.compressed() {
s.lz4s.sgl.Free()
if s.lz4s.zw != nil {
s.lz4s.zw.Reset(nil)
}
}
}
func (s *Stream) TermInfo() (string, error) {
if s.Terminated() && *s.term.reason == "" {
if s.term.err == nil {
s.term.err = fmt.Errorf(reasonUnknown)
}
*s.term.reason = reasonUnknown
}
return *s.term.reason, s.term.err
}
func (s *Stream) GetStats() (stats Stats) {
// byte-num transfer stats
stats.Num.Store(s.stats.Num.Load())
stats.Offset.Store(s.stats.Offset.Load())
stats.Size.Store(s.stats.Size.Load())
stats.CompressedSize.Store(s.stats.CompressedSize.Load())
return
}
func (hdr *Header) IsLast() bool { return hdr.ObjAttrs.Size == lastMarker }
func (hdr *Header) IsIdleTick() bool { return hdr.ObjAttrs.Size == tickMarker }
func (hdr *Header) IsHeaderOnly() bool { return hdr.ObjAttrs.Size == 0 || hdr.IsLast() }
//
// internal methods including the sending and completing loops below, each running in its own goroutine
//
func (s *Stream) sendLoop(dryrun bool) {
for {
if s.sessST.Load() == active {
if dryrun {
s.dryrun()
} else if err := s.doRequest(); err != nil {
*s.term.reason = reasonError
s.term.err = err
break
}
}
if !s.isNextReq() {
break
}
}
s.terminate()
s.wg.Done()
// handle termination that is caused by anything other than Fin()
if *s.term.reason != endOfStream {
if *s.term.reason == reasonStopped {
if glog.FastV(4, glog.SmoduleTransport) {
glog.Infof("%s: stopped", s)
}
} else {
glog.Errorf("%s: terminating (%s, %v)", s, *s.term.reason, s.term.err)
}
// first, wait for the SCQ/cmplCh to empty
s.wg.Wait()
// second, handle the last send that was interrupted
if s.sendoff.obj.Reader != nil {
obj := &s.sendoff.obj
s.objDone(obj, s.term.err)
}
// finally, handle pending SQ
for obj := range s.workCh {
s.objDone(&obj, s.term.err)
}
}
}
func (s *Stream) cmplLoop() {
for {
cmpl, ok := <-s.cmplCh
obj := &cmpl.obj
if !ok || obj.Hdr.IsLast() {
break
}
s.objDone(&cmpl.obj, cmpl.err)
}
s.wg.Done()
}
// refcount, invoke Sendcallback, and *always* close the reader
func (s *Stream) objDone(obj *Obj, err error) {
var rc int64
if obj.prc != nil {
rc = obj.prc.Dec()
cmn.Assert(rc >= 0) // remove
}
// SCQ completion callback
if rc == 0 {
if obj.Callback != nil {
obj.Callback(obj.Hdr, obj.Reader, obj.CmplPtr, err)
} else if s.callback != nil {
s.callback(obj.Hdr, obj.Reader, obj.CmplPtr, err)
}
}
if obj.Reader != nil {
obj.Reader.Close() // NOTE: always closing
}
}
func (s *Stream) isNextReq() (next bool) {
for {
select {
case <-s.lastCh.Listen():
if glog.FastV(4, glog.SmoduleTransport) {
glog.Infof("%s: end-of-stream", s)
}
*s.term.reason = endOfStream
return
case <-s.stopCh.Listen():
glog.Infof("%s: stopped", s)
*s.term.reason = reasonStopped
return
case <-s.postCh:
s.sessST.Store(active)
next = true // initiate new HTTP/TCP session
if glog.FastV(4, glog.SmoduleTransport) {
glog.Infof("%s: active <- posted", s)
}
return
}
}
}
func (s *Stream) doRequest() (err error) {
var (
body io.Reader = s
)
s.Numcur, s.Sizecur = 0, 0
if s.compressed() {
s.lz4s.sgl.Reset()
if s.lz4s.zw == nil {
s.lz4s.zw = lz4.NewWriter(s.lz4s.sgl)
} else {
s.lz4s.zw.Reset(s.lz4s.sgl)
}
// lz4 framing spec at http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html
s.lz4s.zw.Header.BlockChecksum = false
s.lz4s.zw.Header.NoChecksum = !s.lz4s.frameChecksum
s.lz4s.zw.Header.BlockMaxSize = s.lz4s.blockMaxSize
body = &s.lz4s
}
return s.do(body)
}
// as io.Reader
func (s *Stream) Read(b []byte) (n int, err error) {
s.time.inSend.Store(true) // indication for Collector to delay cleanup
obj := &s.sendoff.obj
if obj.Reader != nil { // have object - fast path
if s.sendoff.dod != 0 {
if !obj.Hdr.IsHeaderOnly() {
return s.sendData(b)
}
if !obj.Hdr.IsLast() {
s.eoObj(nil)
} else {
err = io.EOF
return
}
} else {
return s.sendHdr(b)
}
}
repeat:
select {
case s.sendoff.obj = <-s.workCh: // next object OR idle tick
if s.sendoff.obj.Hdr.IsIdleTick() {
if len(s.workCh) > 0 {
goto repeat
}
return s.deactivate()
}
l := s.insHeader(s.sendoff.obj.Hdr)
s.header = s.maxheader[:l]
return s.sendHdr(b)
case <-s.stopCh.Listen():
num := s.stats.Num.Load()
glog.Infof("%s: stopped (%d/%d)", s, s.Numcur, num)
err = io.EOF
return
}
}
func (s *Stream) deactivate() (n int, err error) {
err = io.EOF
if glog.FastV(4, glog.SmoduleTransport) {
num := s.stats.Num.Load()
glog.Infof("%s: connection teardown (%d/%d)", s, s.Numcur, num)
}
return
}
func (s *Stream) sendHdr(b []byte) (n int, err error) {
n = copy(b, s.header[s.sendoff.off:])
s.sendoff.off += int64(n)
if s.sendoff.off >= int64(len(s.header)) {
cmn.Assert(s.sendoff.off == int64(len(s.header)))
s.stats.Offset.Add(s.sendoff.off)
if glog.FastV(4, glog.SmoduleTransport) {
num := s.stats.Num.Load()
glog.Infof("%s: hlen=%d (%d/%d)", s, s.sendoff.off, s.Numcur, num)
}
s.sendoff.dod = s.sendoff.off
s.sendoff.off = 0
if s.sendoff.obj.Hdr.IsLast() {
if glog.FastV(4, glog.SmoduleTransport) {
glog.Infof("%s: sent last", s)
}
err = io.EOF
s.lastCh.Close()
}
} else if glog.FastV(4, glog.SmoduleTransport) {
glog.Infof("%s: split header: copied %d < %d hlen", s, s.sendoff.off, len(s.header))
}
return
}
func (s *Stream) sendData(b []byte) (n int, err error) {
var (
obj = &s.sendoff.obj
objSize = obj.Hdr.ObjAttrs.Size
)
n, err = obj.Reader.Read(b)
s.sendoff.off += int64(n)
if err != nil {
if err == io.EOF {
if s.sendoff.off < objSize {
return n, fmt.Errorf("%s: read (%d) shorter than expected (%d)", s, s.sendoff.off, objSize)
}
err = nil
}
s.eoObj(err)
} else if s.sendoff.off >= objSize {
s.eoObj(err)
}
return
}
//
// end-of-object: updates stats, reset idle timeout, and post completion
// NOTE: reader.Close() is done by the completion handling code objDone
//
func (s *Stream) eoObj(err error) {
var obj = &s.sendoff.obj
s.Sizecur += s.sendoff.off
s.stats.Offset.Add(s.sendoff.off)
if err != nil {
goto exit
}
if s.sendoff.off != obj.Hdr.ObjAttrs.Size {
err = fmt.Errorf("%s: obj %s/%s offset %d != %d size",
s, s.sendoff.obj.Hdr.Bck, s.sendoff.obj.Hdr.ObjName, s.sendoff.off, obj.Hdr.ObjAttrs.Size)
goto exit
}
s.stats.Size.Add(obj.Hdr.ObjAttrs.Size)
s.Numcur++
s.stats.Num.Inc()
if glog.FastV(4, glog.SmoduleTransport) {
glog.Infof("%s: sent size=%d (%d/%d): %s", s, obj.Hdr.ObjAttrs.Size, s.Numcur, s.stats.Num.Load(), obj.Hdr.ObjName)
}
exit:
if err != nil {
glog.Errorln(err)
}
// next completion => SCQ
s.cmplCh <- cmpl{s.sendoff.obj, err}
s.sendoff = sendoff{}
}
//
// stream helpers
//
func (s *Stream) insHeader(hdr Header) (l int) {
l = cmn.SizeofI64 * 2
l = insString(l, s.maxheader, hdr.Bck.Name)
l = insString(l, s.maxheader, hdr.ObjName)
l = insString(l, s.maxheader, hdr.Bck.Provider)
l = insString(l, s.maxheader, hdr.Bck.Ns.Name)
l = insString(l, s.maxheader, hdr.Bck.Ns.UUID)
l = insByte(l, s.maxheader, hdr.Opaque)
l = insAttrs(l, s.maxheader, hdr.ObjAttrs)
hlen := l - cmn.SizeofI64*2
insInt64(0, s.maxheader, int64(hlen))
checksum := xoshiro256.Hash(uint64(hlen))
insUint64(cmn.SizeofI64, s.maxheader, checksum)
return
}
func insString(off int, to []byte, str string) int {
return insByte(off, to, []byte(str))
}
func insByte(off int, to, b []byte) int {
var l = len(b)
binary.BigEndian.PutUint64(to[off:], uint64(l))
off += cmn.SizeofI64
n := copy(to[off:], b)
cmn.Assert(n == l)
return off + l
}
func insInt64(off int, to []byte, i int64) int {
return insUint64(off, to, uint64(i))
}
func insUint64(off int, to []byte, i uint64) int {
binary.BigEndian.PutUint64(to[off:], i)
return off + cmn.SizeofI64
}
func insAttrs(off int, to []byte, attr ObjectAttrs) int {
off = insInt64(off, to, attr.Size)
off = insInt64(off, to, attr.Atime)
off = insString(off, to, attr.CksumType)
off = insString(off, to, attr.CksumValue)
off = insString(off, to, attr.Version)
return off
}
//
// dry-run ---------------------------
//
func (s *Stream) | () {
buf := make([]byte, cmn.KiB*32)
scloser := ioutil.NopCloser(s)
it := iterator{trname: s.trname, body: scloser, headerBuf: make([]byte, maxHeaderSize)}
for {
objReader, _, err := it.next()
if objReader != nil {
written, _ := io.CopyBuffer(ioutil.Discard, objReader, buf)
cmn.Assert(written == objReader.hdr.ObjAttrs.Size)
continue
}
if err != nil {
break
}
}
}
//
// Stats ---------------------------
//
func (stats *Stats) CompressionRatio() float64 {
bytesRead := stats.Offset.Load()
bytesSent := stats.CompressedSize.Load()
return float64(bytesRead) / float64(bytesSent)
}
//
// nopReadCloser ---------------------------
//
func (r *nopReadCloser) Read([]byte) (n int, err error) { return }
func (r *nopReadCloser) Close() error { return nil }
//
// lz4Stream ---------------------------
//
func (lz4s *lz4Stream) Read(b []byte) (n int, err error) {
var (
sendoff = &lz4s.s.sendoff
last = sendoff.obj.Hdr.IsLast()
retry = 64 // insist on returning n > 0 (note that lz4 compresses /blocks/)
)
if lz4s.sgl.Len() > 0 {
lz4s.zw.Flush()
n, err = lz4s.sgl.Read(b)
if err == io.EOF { // reusing/rewinding this buf multiple times
err = nil
}
goto ex
}
re:
n, err = lz4s.s.Read(b)
_, _ = lz4s.zw.Write(b[:n])
if last {
lz4s.zw.Flush()
retry = 0
} else if lz4s.s.sendoff.obj.Reader == nil /*eoObj*/ || err != nil {
lz4s.zw.Flush()
retry = 0
}
n, _ = lz4s.sgl.Read(b)
if n == 0 {
if retry > 0 {
retry--
runtime.Gosched()
goto re
}
lz4s.zw.Flush()
n, _ = lz4s.sgl.Read(b)
}
ex:
lz4s.s.stats.CompressedSize.Add(int64(n))
if lz4s.sgl.Len() == 0 {
lz4s.sgl.Reset()
}
if last && err == nil {
err = io.EOF
}
return
}
| dryrun | identifier_name |
send.go | // Package transport provides streaming object-based transport over http for intra-cluster continuous
// intra-cluster communications (see README for details and usage example).
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*/
package transport
import (
"encoding/binary"
"fmt"
"io"
"io/ioutil"
"math"
"net/url"
"os"
"path"
"runtime"
"strconv"
"sync"
"time"
"unsafe"
"github.com/NVIDIA/aistore/3rdparty/atomic"
"github.com/NVIDIA/aistore/3rdparty/glog"
"github.com/NVIDIA/aistore/cmn"
"github.com/NVIDIA/aistore/memsys"
"github.com/NVIDIA/aistore/xoshiro256"
lz4 "github.com/pierrec/lz4/v3"
)
// transport defaults
const (
maxHeaderSize = 1024
lastMarker = math.MaxInt64
tickMarker = math.MaxInt64 ^ 0xa5a5a5a5
tickUnit = time.Second
defaultIdleOut = time.Second * 2
burstNum = 32 // default max num objects that can be posted for sending without any back-pressure
)
// stream TCP/HTTP session: inactive <=> active transitions
const (
inactive = iota
active
)
// termination: reasons
const (
reasonUnknown = "unknown"
reasonError = "error"
endOfStream = "end-of-stream"
reasonStopped = "stopped"
)
// API types
type (
Stream struct {
client Client // http client this send-stream will use
// user-defined & queryable
toURL, trname string // http endpoint
sessID int64 // stream session ID
sessST atomic.Int64 // state of the TCP/HTTP session: active (connected) | inactive (disconnected)
stats Stats // stream stats
Numcur, Sizecur int64 // gets reset to zero upon each timeout
// internals
lid string // log prefix
workCh chan Obj // aka SQ: next object to stream
cmplCh chan cmpl // aka SCQ; note that SQ and SCQ together form a FIFO
lastCh *cmn.StopCh // end of stream
stopCh *cmn.StopCh // stop/abort stream
postCh chan struct{} // to indicate that workCh has work
callback SendCallback // to free SGLs, close files, etc.
time struct {
start atomic.Int64 // to support idle(%)
idleOut time.Duration // idle timeout
inSend atomic.Bool // true upon Send() or Read() - info for Collector to delay cleanup
ticks int // num 1s ticks until idle timeout
index int // heap stuff
}
wg sync.WaitGroup
sendoff sendoff
maxheader []byte // max header buffer
header []byte // object header - slice of the maxheader with bucket/objName, etc. fields
term struct {
mu sync.Mutex
terminated bool
err error
reason *string
}
lz4s lz4Stream
}
// advanced usage: additional stream control
Extra struct {
IdleTimeout time.Duration // stream idle timeout: causes PUT to terminate (and renew on the next obj send)
Callback SendCallback // typical usage: to free SGLs, close files, etc.
Compression string // see CompressAlways, etc. enum
MMSA *memsys.MMSA // compression-related buffering
Config *cmn.Config
}
// stream stats
Stats struct {
Num atomic.Int64 // number of transferred objects including zero size (header-only) objects
Size atomic.Int64 // transferred object size (does not include transport headers)
Offset atomic.Int64 // stream offset, in bytes
CompressedSize atomic.Int64 // compressed size (NOTE: converges to the actual compressed size over time)
}
EndpointStats map[uint64]*Stats // all stats for a given http endpoint defined by a tuple(network, trname) by session ID
// object attrs
ObjectAttrs struct {
Atime int64 // access time - nanoseconds since UNIX epoch
Size int64 // size of objects in bytes
CksumType string // checksum type
CksumValue string // checksum of the object produced by given checksum type
Version string // version of the object
}
// object header
Header struct {
Bck cmn.Bck
ObjName string
ObjAttrs ObjectAttrs // attributes/metadata of the sent object
Opaque []byte // custom control (optional)
}
// object to transmit
Obj struct {
Hdr Header // object header
Reader io.ReadCloser // reader, to read the object, and close when done
Callback SendCallback // callback fired when sending is done OR when the stream terminates (see term.reason)
CmplPtr unsafe.Pointer // local pointer that gets returned to the caller via Send completion callback
// private
prc *atomic.Int64 // if present, ref-counts num sent objects to call SendCallback only once
}
// object-sent callback that has the following signature can optionally be defined on a:
// a) per-stream basis (via NewStream constructor - see Extra struct above)
// b) for a given object that is being sent (for instance, to support a call-per-batch semantics)
// Naturally, object callback "overrides" the per-stream one: when object callback is defined
// (i.e., non-nil), the stream callback is ignored/skipped.
// NOTE: if defined, the callback executes asynchronously as far as the sending part is concerned
SendCallback func(Header, io.ReadCloser, unsafe.Pointer, error)
StreamCollector struct {
cmn.Named
}
)
// internal types
type (
lz4Stream struct {
s *Stream
zw *lz4.Writer // orig reader => zw
sgl *memsys.SGL // zw => bb => network
blockMaxSize int // *uncompressed* block max size
frameChecksum bool // true: checksum lz4 frames
}
sendoff struct {
obj Obj
// in progress
off int64
dod int64
}
cmpl struct { // send completions => SCQ
obj Obj
err error
}
nopReadCloser struct{}
collector struct {
streams map[string]*Stream
heap []*Stream
ticker *time.Ticker
stopCh *cmn.StopCh
ctrlCh chan ctrl
}
ctrl struct { // add/del channel to/from collector
s *Stream
add bool
}
)
var (
nopRC = &nopReadCloser{} // read and close stubs
nextSID = *atomic.NewInt64(100) // unique session IDs starting from 101
sc = &StreamCollector{} // idle timer and house-keeping (slow path)
gc *collector // real stream collector
)
func (extra *Extra) compressed() bool {
return extra.Compression != "" && extra.Compression != cmn.CompressNever
}
//
// API methods
//
func NewStream(client Client, toURL string, extra *Extra) (s *Stream) {
u, err := url.Parse(toURL)
if err != nil {
glog.Errorf("Failed to parse %s: %v", toURL, err)
return
}
s = &Stream{client: client, toURL: toURL}
s.time.idleOut = defaultIdleOut
if extra != nil {
s.callback = extra.Callback
if extra.IdleTimeout > 0 {
s.time.idleOut = extra.IdleTimeout
}
if extra.compressed() {
config := extra.Config
if config == nil {
config = cmn.GCO.Get()
}
s.lz4s.s = s
s.lz4s.blockMaxSize = config.Compression.BlockMaxSize
s.lz4s.frameChecksum = config.Compression.Checksum
mem := extra.MMSA
if mem == nil {
mem = memsys.DefaultPageMM()
glog.Warningln("Using global memory manager for streaming inline compression")
}
if s.lz4s.blockMaxSize >= memsys.MaxPageSlabSize {
s.lz4s.sgl = mem.NewSGL(memsys.MaxPageSlabSize, memsys.MaxPageSlabSize)
} else {
s.lz4s.sgl = mem.NewSGL(cmn.KiB*64, cmn.KiB*64)
}
}
}
if s.time.idleOut < tickUnit {
s.time.idleOut = tickUnit
}
s.time.ticks = int(s.time.idleOut / tickUnit)
s.sessID = nextSID.Inc()
s.trname = path.Base(u.Path)
if !s.compressed() {
s.lid = fmt.Sprintf("%s[%d]", s.trname, s.sessID)
} else {
s.lid = fmt.Sprintf("%s[%d[%s]]", s.trname, s.sessID, cmn.B2S(int64(s.lz4s.blockMaxSize), 0))
}
// burst size: the number of objects the caller is permitted to post for sending
// without experiencing any sort of back-pressure
burst := burstNum
if a := os.Getenv("AIS_STREAM_BURST_NUM"); a != "" {
if burst64, err := strconv.ParseInt(a, 10, 0); err != nil {
glog.Errorf("%s: error parsing env AIS_STREAM_BURST_NUM=%s: %v", s, a, err)
burst = burstNum
} else {
burst = int(burst64)
}
}
s.workCh = make(chan Obj, burst) // Send Qeueue or SQ
s.cmplCh = make(chan cmpl, burst) // Send Completion Queue or SCQ
s.lastCh = cmn.NewStopCh()
s.stopCh = cmn.NewStopCh()
s.postCh = make(chan struct{}, 1)
s.maxheader = make([]byte, maxHeaderSize) // NOTE: must be large enough to accommodate all max-size Header
s.sessST.Store(inactive) // NOTE: initiate HTTP session upon arrival of the first object
s.time.start.Store(time.Now().UnixNano())
s.term.reason = new(string)
s.wg.Add(2)
var dryrun bool
if a := os.Getenv("AIS_STREAM_DRY_RUN"); a != "" {
if dryrun, err = strconv.ParseBool(a); err != nil {
glog.Errorf("%s: error parsing env AIS_STREAM_DRY_RUN=%s: %v", s, a, err)
}
cmn.Assert(dryrun || client != nil)
}
go s.sendLoop(dryrun) // handle SQ
go s.cmplLoop() // handle SCQ
gc.ctrlCh <- ctrl{s, true /* collect */}
return
}
func (s *Stream) compressed() bool { return s.lz4s.s == s }
// Asynchronously send an object defined by its header and its reader.
// ---------------------------------------------------------------------------------------
//
// The sending pipeline is implemented as a pair (SQ, SCQ) where the former is a send queue
// realized as workCh, and the latter is a send completion queue (cmplCh).
// Together, SQ and SCQ form a FIFO as far as ordering of transmitted objects.
//
// NOTE: header-only objects are supported; when there's no data to send (that is,
// when the header's Dsize field is set to zero), the reader is not required and the
// corresponding argument in Send() can be set to nil.
//
// NOTE: object reader is always closed by the code that handles send completions.
// In the case when SendCallback is provided (i.e., non-nil), the closing is done
// right after calling this callback - see objDone below for details.
//
// NOTE: Optional reference counting is also done by (and in) the objDone, so that the
// SendCallback gets called if and only when the refcount (if provided i.e., non-nil)
// reaches zero.
//
// NOTE: For every transmission of every object there's always an objDone() completion
// (with its refcounting and reader-closing). This holds true in all cases including
// network errors that may cause sudden and instant termination of the underlying
// stream(s).
//
// ---------------------------------------------------------------------------------------
func (s *Stream) Send(obj Obj) (err error) {
s.time.inSend.Store(true) // an indication for Collector to postpone cleanup
hdr := &obj.Hdr
if s.Terminated() {
err = fmt.Errorf("%s terminated(%s, %v), cannot send [%s/%s(%d)]",
s, *s.term.reason, s.term.err, hdr.Bck, hdr.ObjName, hdr.ObjAttrs.Size)
glog.Errorln(err)
return
}
if s.sessST.CAS(inactive, active) {
s.postCh <- struct{}{}
if glog.FastV(4, glog.SmoduleTransport) {
glog.Infof("%s: inactive => active", s)
}
}
// next object => SQ
if obj.Reader == nil {
cmn.Assert(hdr.IsHeaderOnly())
obj.Reader = nopRC
}
s.workCh <- obj
if glog.FastV(4, glog.SmoduleTransport) {
glog.Infof("%s: send %s/%s(%d)[sq=%d]", s, hdr.Bck, hdr.ObjName, hdr.ObjAttrs.Size, len(s.workCh))
}
return
}
func (s *Stream) Fin() {
_ = s.Send(Obj{Hdr: Header{ObjAttrs: ObjectAttrs{Size: lastMarker}}})
s.wg.Wait()
}
func (s *Stream) Stop() { s.stopCh.Close() }
func (s *Stream) URL() string { return s.toURL }
func (s *Stream) ID() (string, int64) { return s.trname, s.sessID }
func (s *Stream) String() string { return s.lid }
func (s *Stream) Terminated() (terminated bool) {
s.term.mu.Lock()
terminated = s.term.terminated
s.term.mu.Unlock()
return
}
func (s *Stream) terminate() {
s.term.mu.Lock()
cmn.Assert(!s.term.terminated)
s.term.terminated = true
s.Stop()
hdr := Header{ObjAttrs: ObjectAttrs{Size: lastMarker}}
obj := Obj{Hdr: hdr}
s.cmplCh <- cmpl{obj, s.term.err}
s.term.mu.Unlock()
// Remove stream after lock because we could deadlock between `do()`
// (which checks for `Terminated` status) and this function which
// would be under lock.
gc.remove(s)
if s.compressed() {
s.lz4s.sgl.Free()
if s.lz4s.zw != nil {
s.lz4s.zw.Reset(nil)
}
}
}
func (s *Stream) TermInfo() (string, error) {
if s.Terminated() && *s.term.reason == "" {
if s.term.err == nil {
s.term.err = fmt.Errorf(reasonUnknown)
}
*s.term.reason = reasonUnknown
}
return *s.term.reason, s.term.err
}
func (s *Stream) GetStats() (stats Stats) {
// byte-num transfer stats
stats.Num.Store(s.stats.Num.Load())
stats.Offset.Store(s.stats.Offset.Load())
stats.Size.Store(s.stats.Size.Load())
stats.CompressedSize.Store(s.stats.CompressedSize.Load())
return
}
func (hdr *Header) IsLast() bool { return hdr.ObjAttrs.Size == lastMarker }
func (hdr *Header) IsIdleTick() bool { return hdr.ObjAttrs.Size == tickMarker }
func (hdr *Header) IsHeaderOnly() bool { return hdr.ObjAttrs.Size == 0 || hdr.IsLast() }
//
// internal methods including the sending and completing loops below, each running in its own goroutine
//
func (s *Stream) sendLoop(dryrun bool) {
for {
if s.sessST.Load() == active {
if dryrun {
s.dryrun()
} else if err := s.doRequest(); err != nil {
*s.term.reason = reasonError
s.term.err = err
break
}
}
if !s.isNextReq() {
break
}
}
s.terminate()
s.wg.Done()
// handle termination that is caused by anything other than Fin()
if *s.term.reason != endOfStream {
if *s.term.reason == reasonStopped {
if glog.FastV(4, glog.SmoduleTransport) {
glog.Infof("%s: stopped", s)
}
} else {
glog.Errorf("%s: terminating (%s, %v)", s, *s.term.reason, s.term.err)
}
// first, wait for the SCQ/cmplCh to empty
s.wg.Wait()
// second, handle the last send that was interrupted
if s.sendoff.obj.Reader != nil {
obj := &s.sendoff.obj
s.objDone(obj, s.term.err)
}
// finally, handle pending SQ
for obj := range s.workCh {
s.objDone(&obj, s.term.err)
}
}
}
func (s *Stream) cmplLoop() {
for {
cmpl, ok := <-s.cmplCh
obj := &cmpl.obj
if !ok || obj.Hdr.IsLast() {
break
}
s.objDone(&cmpl.obj, cmpl.err)
}
s.wg.Done()
}
// refcount, invoke Sendcallback, and *always* close the reader
func (s *Stream) objDone(obj *Obj, err error) {
var rc int64
if obj.prc != nil {
rc = obj.prc.Dec()
cmn.Assert(rc >= 0) // remove
}
// SCQ completion callback
if rc == 0 {
if obj.Callback != nil {
obj.Callback(obj.Hdr, obj.Reader, obj.CmplPtr, err)
} else if s.callback != nil {
s.callback(obj.Hdr, obj.Reader, obj.CmplPtr, err)
}
}
if obj.Reader != nil {
obj.Reader.Close() // NOTE: always closing
}
}
func (s *Stream) isNextReq() (next bool) {
for {
select {
case <-s.lastCh.Listen():
if glog.FastV(4, glog.SmoduleTransport) {
glog.Infof("%s: end-of-stream", s)
}
*s.term.reason = endOfStream
return
case <-s.stopCh.Listen():
glog.Infof("%s: stopped", s)
*s.term.reason = reasonStopped
return
case <-s.postCh:
s.sessST.Store(active)
next = true // initiate new HTTP/TCP session
if glog.FastV(4, glog.SmoduleTransport) {
glog.Infof("%s: active <- posted", s)
}
return
}
}
}
func (s *Stream) doRequest() (err error) {
var (
body io.Reader = s
)
s.Numcur, s.Sizecur = 0, 0
if s.compressed() {
s.lz4s.sgl.Reset()
if s.lz4s.zw == nil {
s.lz4s.zw = lz4.NewWriter(s.lz4s.sgl)
} else {
s.lz4s.zw.Reset(s.lz4s.sgl)
}
// lz4 framing spec at http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html
s.lz4s.zw.Header.BlockChecksum = false
s.lz4s.zw.Header.NoChecksum = !s.lz4s.frameChecksum
s.lz4s.zw.Header.BlockMaxSize = s.lz4s.blockMaxSize
body = &s.lz4s
}
return s.do(body)
}
// as io.Reader
func (s *Stream) Read(b []byte) (n int, err error) {
s.time.inSend.Store(true) // indication for Collector to delay cleanup
obj := &s.sendoff.obj
if obj.Reader != nil { // have object - fast path
if s.sendoff.dod != 0 {
if !obj.Hdr.IsHeaderOnly() {
return s.sendData(b)
}
if !obj.Hdr.IsLast() | else {
err = io.EOF
return
}
} else {
return s.sendHdr(b)
}
}
repeat:
select {
case s.sendoff.obj = <-s.workCh: // next object OR idle tick
if s.sendoff.obj.Hdr.IsIdleTick() {
if len(s.workCh) > 0 {
goto repeat
}
return s.deactivate()
}
l := s.insHeader(s.sendoff.obj.Hdr)
s.header = s.maxheader[:l]
return s.sendHdr(b)
case <-s.stopCh.Listen():
num := s.stats.Num.Load()
glog.Infof("%s: stopped (%d/%d)", s, s.Numcur, num)
err = io.EOF
return
}
}
func (s *Stream) deactivate() (n int, err error) {
err = io.EOF
if glog.FastV(4, glog.SmoduleTransport) {
num := s.stats.Num.Load()
glog.Infof("%s: connection teardown (%d/%d)", s, s.Numcur, num)
}
return
}
func (s *Stream) sendHdr(b []byte) (n int, err error) {
n = copy(b, s.header[s.sendoff.off:])
s.sendoff.off += int64(n)
if s.sendoff.off >= int64(len(s.header)) {
cmn.Assert(s.sendoff.off == int64(len(s.header)))
s.stats.Offset.Add(s.sendoff.off)
if glog.FastV(4, glog.SmoduleTransport) {
num := s.stats.Num.Load()
glog.Infof("%s: hlen=%d (%d/%d)", s, s.sendoff.off, s.Numcur, num)
}
s.sendoff.dod = s.sendoff.off
s.sendoff.off = 0
if s.sendoff.obj.Hdr.IsLast() {
if glog.FastV(4, glog.SmoduleTransport) {
glog.Infof("%s: sent last", s)
}
err = io.EOF
s.lastCh.Close()
}
} else if glog.FastV(4, glog.SmoduleTransport) {
glog.Infof("%s: split header: copied %d < %d hlen", s, s.sendoff.off, len(s.header))
}
return
}
func (s *Stream) sendData(b []byte) (n int, err error) {
var (
obj = &s.sendoff.obj
objSize = obj.Hdr.ObjAttrs.Size
)
n, err = obj.Reader.Read(b)
s.sendoff.off += int64(n)
if err != nil {
if err == io.EOF {
if s.sendoff.off < objSize {
return n, fmt.Errorf("%s: read (%d) shorter than expected (%d)", s, s.sendoff.off, objSize)
}
err = nil
}
s.eoObj(err)
} else if s.sendoff.off >= objSize {
s.eoObj(err)
}
return
}
//
// end-of-object: updates stats, reset idle timeout, and post completion
// NOTE: reader.Close() is done by the completion handling code objDone
//
func (s *Stream) eoObj(err error) {
var obj = &s.sendoff.obj
s.Sizecur += s.sendoff.off
s.stats.Offset.Add(s.sendoff.off)
if err != nil {
goto exit
}
if s.sendoff.off != obj.Hdr.ObjAttrs.Size {
err = fmt.Errorf("%s: obj %s/%s offset %d != %d size",
s, s.sendoff.obj.Hdr.Bck, s.sendoff.obj.Hdr.ObjName, s.sendoff.off, obj.Hdr.ObjAttrs.Size)
goto exit
}
s.stats.Size.Add(obj.Hdr.ObjAttrs.Size)
s.Numcur++
s.stats.Num.Inc()
if glog.FastV(4, glog.SmoduleTransport) {
glog.Infof("%s: sent size=%d (%d/%d): %s", s, obj.Hdr.ObjAttrs.Size, s.Numcur, s.stats.Num.Load(), obj.Hdr.ObjName)
}
exit:
if err != nil {
glog.Errorln(err)
}
// next completion => SCQ
s.cmplCh <- cmpl{s.sendoff.obj, err}
s.sendoff = sendoff{}
}
//
// stream helpers
//
func (s *Stream) insHeader(hdr Header) (l int) {
l = cmn.SizeofI64 * 2
l = insString(l, s.maxheader, hdr.Bck.Name)
l = insString(l, s.maxheader, hdr.ObjName)
l = insString(l, s.maxheader, hdr.Bck.Provider)
l = insString(l, s.maxheader, hdr.Bck.Ns.Name)
l = insString(l, s.maxheader, hdr.Bck.Ns.UUID)
l = insByte(l, s.maxheader, hdr.Opaque)
l = insAttrs(l, s.maxheader, hdr.ObjAttrs)
hlen := l - cmn.SizeofI64*2
insInt64(0, s.maxheader, int64(hlen))
checksum := xoshiro256.Hash(uint64(hlen))
insUint64(cmn.SizeofI64, s.maxheader, checksum)
return
}
func insString(off int, to []byte, str string) int {
return insByte(off, to, []byte(str))
}
func insByte(off int, to, b []byte) int {
var l = len(b)
binary.BigEndian.PutUint64(to[off:], uint64(l))
off += cmn.SizeofI64
n := copy(to[off:], b)
cmn.Assert(n == l)
return off + l
}
func insInt64(off int, to []byte, i int64) int {
return insUint64(off, to, uint64(i))
}
func insUint64(off int, to []byte, i uint64) int {
binary.BigEndian.PutUint64(to[off:], i)
return off + cmn.SizeofI64
}
func insAttrs(off int, to []byte, attr ObjectAttrs) int {
off = insInt64(off, to, attr.Size)
off = insInt64(off, to, attr.Atime)
off = insString(off, to, attr.CksumType)
off = insString(off, to, attr.CksumValue)
off = insString(off, to, attr.Version)
return off
}
//
// dry-run ---------------------------
//
func (s *Stream) dryrun() {
buf := make([]byte, cmn.KiB*32)
scloser := ioutil.NopCloser(s)
it := iterator{trname: s.trname, body: scloser, headerBuf: make([]byte, maxHeaderSize)}
for {
objReader, _, err := it.next()
if objReader != nil {
written, _ := io.CopyBuffer(ioutil.Discard, objReader, buf)
cmn.Assert(written == objReader.hdr.ObjAttrs.Size)
continue
}
if err != nil {
break
}
}
}
//
// Stats ---------------------------
//
func (stats *Stats) CompressionRatio() float64 {
bytesRead := stats.Offset.Load()
bytesSent := stats.CompressedSize.Load()
return float64(bytesRead) / float64(bytesSent)
}
//
// nopReadCloser ---------------------------
//
func (r *nopReadCloser) Read([]byte) (n int, err error) { return }
func (r *nopReadCloser) Close() error { return nil }
//
// lz4Stream ---------------------------
//
func (lz4s *lz4Stream) Read(b []byte) (n int, err error) {
var (
sendoff = &lz4s.s.sendoff
last = sendoff.obj.Hdr.IsLast()
retry = 64 // insist on returning n > 0 (note that lz4 compresses /blocks/)
)
if lz4s.sgl.Len() > 0 {
lz4s.zw.Flush()
n, err = lz4s.sgl.Read(b)
if err == io.EOF { // reusing/rewinding this buf multiple times
err = nil
}
goto ex
}
re:
n, err = lz4s.s.Read(b)
_, _ = lz4s.zw.Write(b[:n])
if last {
lz4s.zw.Flush()
retry = 0
} else if lz4s.s.sendoff.obj.Reader == nil /*eoObj*/ || err != nil {
lz4s.zw.Flush()
retry = 0
}
n, _ = lz4s.sgl.Read(b)
if n == 0 {
if retry > 0 {
retry--
runtime.Gosched()
goto re
}
lz4s.zw.Flush()
n, _ = lz4s.sgl.Read(b)
}
ex:
lz4s.s.stats.CompressedSize.Add(int64(n))
if lz4s.sgl.Len() == 0 {
lz4s.sgl.Reset()
}
if last && err == nil {
err = io.EOF
}
return
}
| {
s.eoObj(nil)
} | conditional_block |
send.go | // Package transport provides streaming object-based transport over http for intra-cluster continuous
// intra-cluster communications (see README for details and usage example).
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*/
package transport
import (
"encoding/binary"
"fmt"
"io"
"io/ioutil"
"math"
"net/url"
"os"
"path"
"runtime"
"strconv"
"sync"
"time"
"unsafe"
"github.com/NVIDIA/aistore/3rdparty/atomic"
"github.com/NVIDIA/aistore/3rdparty/glog"
"github.com/NVIDIA/aistore/cmn"
"github.com/NVIDIA/aistore/memsys"
"github.com/NVIDIA/aistore/xoshiro256"
lz4 "github.com/pierrec/lz4/v3"
)
// transport defaults
const (
maxHeaderSize = 1024
lastMarker = math.MaxInt64
tickMarker = math.MaxInt64 ^ 0xa5a5a5a5
tickUnit = time.Second
defaultIdleOut = time.Second * 2
burstNum = 32 // default max num objects that can be posted for sending without any back-pressure
)
// stream TCP/HTTP session: inactive <=> active transitions
const (
inactive = iota
active
)
// termination: reasons
const (
reasonUnknown = "unknown"
reasonError = "error"
endOfStream = "end-of-stream"
reasonStopped = "stopped"
)
// API types
type (
Stream struct {
client Client // http client this send-stream will use
// user-defined & queryable
toURL, trname string // http endpoint
sessID int64 // stream session ID
sessST atomic.Int64 // state of the TCP/HTTP session: active (connected) | inactive (disconnected)
stats Stats // stream stats
Numcur, Sizecur int64 // gets reset to zero upon each timeout
// internals
lid string // log prefix
workCh chan Obj // aka SQ: next object to stream
cmplCh chan cmpl // aka SCQ; note that SQ and SCQ together form a FIFO
lastCh *cmn.StopCh // end of stream
stopCh *cmn.StopCh // stop/abort stream
postCh chan struct{} // to indicate that workCh has work
callback SendCallback // to free SGLs, close files, etc.
time struct {
start atomic.Int64 // to support idle(%)
idleOut time.Duration // idle timeout
inSend atomic.Bool // true upon Send() or Read() - info for Collector to delay cleanup
ticks int // num 1s ticks until idle timeout
index int // heap stuff
}
wg sync.WaitGroup
sendoff sendoff
maxheader []byte // max header buffer
header []byte // object header - slice of the maxheader with bucket/objName, etc. fields
term struct {
mu sync.Mutex
terminated bool
err error
reason *string
}
lz4s lz4Stream
}
// advanced usage: additional stream control
Extra struct {
IdleTimeout time.Duration // stream idle timeout: causes PUT to terminate (and renew on the next obj send)
Callback SendCallback // typical usage: to free SGLs, close files, etc.
Compression string // see CompressAlways, etc. enum
MMSA *memsys.MMSA // compression-related buffering
Config *cmn.Config
}
// stream stats
Stats struct {
Num atomic.Int64 // number of transferred objects including zero size (header-only) objects
Size atomic.Int64 // transferred object size (does not include transport headers)
Offset atomic.Int64 // stream offset, in bytes
CompressedSize atomic.Int64 // compressed size (NOTE: converges to the actual compressed size over time)
}
EndpointStats map[uint64]*Stats // all stats for a given http endpoint defined by a tuple(network, trname) by session ID
// object attrs
ObjectAttrs struct {
Atime int64 // access time - nanoseconds since UNIX epoch
Size int64 // size of objects in bytes
CksumType string // checksum type
CksumValue string // checksum of the object produced by given checksum type
Version string // version of the object
}
// object header
Header struct {
Bck cmn.Bck
ObjName string
ObjAttrs ObjectAttrs // attributes/metadata of the sent object
Opaque []byte // custom control (optional)
}
// object to transmit
Obj struct {
Hdr Header // object header
Reader io.ReadCloser // reader, to read the object, and close when done
Callback SendCallback // callback fired when sending is done OR when the stream terminates (see term.reason)
CmplPtr unsafe.Pointer // local pointer that gets returned to the caller via Send completion callback
// private
prc *atomic.Int64 // if present, ref-counts num sent objects to call SendCallback only once
}
// object-sent callback that has the following signature can optionally be defined on a:
// a) per-stream basis (via NewStream constructor - see Extra struct above)
// b) for a given object that is being sent (for instance, to support a call-per-batch semantics)
// Naturally, object callback "overrides" the per-stream one: when object callback is defined
// (i.e., non-nil), the stream callback is ignored/skipped.
// NOTE: if defined, the callback executes asynchronously as far as the sending part is concerned
SendCallback func(Header, io.ReadCloser, unsafe.Pointer, error)
StreamCollector struct {
cmn.Named
}
)
// internal types
type (
lz4Stream struct {
s *Stream
zw *lz4.Writer // orig reader => zw
sgl *memsys.SGL // zw => bb => network
blockMaxSize int // *uncompressed* block max size
frameChecksum bool // true: checksum lz4 frames
}
sendoff struct {
obj Obj
// in progress
off int64
dod int64
}
cmpl struct { // send completions => SCQ
obj Obj
err error
}
nopReadCloser struct{}
collector struct {
streams map[string]*Stream
heap []*Stream
ticker *time.Ticker
stopCh *cmn.StopCh
ctrlCh chan ctrl
}
ctrl struct { // add/del channel to/from collector
s *Stream
add bool
}
)
var (
nopRC = &nopReadCloser{} // read and close stubs
nextSID = *atomic.NewInt64(100) // unique session IDs starting from 101
sc = &StreamCollector{} // idle timer and house-keeping (slow path)
gc *collector // real stream collector
)
func (extra *Extra) compressed() bool {
return extra.Compression != "" && extra.Compression != cmn.CompressNever
}
//
// API methods
//
func NewStream(client Client, toURL string, extra *Extra) (s *Stream) {
u, err := url.Parse(toURL)
if err != nil {
glog.Errorf("Failed to parse %s: %v", toURL, err)
return
}
s = &Stream{client: client, toURL: toURL}
s.time.idleOut = defaultIdleOut
if extra != nil {
s.callback = extra.Callback
if extra.IdleTimeout > 0 {
s.time.idleOut = extra.IdleTimeout
}
if extra.compressed() {
config := extra.Config
if config == nil {
config = cmn.GCO.Get()
}
s.lz4s.s = s
s.lz4s.blockMaxSize = config.Compression.BlockMaxSize
s.lz4s.frameChecksum = config.Compression.Checksum
mem := extra.MMSA
if mem == nil {
mem = memsys.DefaultPageMM()
glog.Warningln("Using global memory manager for streaming inline compression")
}
if s.lz4s.blockMaxSize >= memsys.MaxPageSlabSize {
s.lz4s.sgl = mem.NewSGL(memsys.MaxPageSlabSize, memsys.MaxPageSlabSize)
} else {
s.lz4s.sgl = mem.NewSGL(cmn.KiB*64, cmn.KiB*64)
}
}
}
if s.time.idleOut < tickUnit {
s.time.idleOut = tickUnit
}
s.time.ticks = int(s.time.idleOut / tickUnit)
s.sessID = nextSID.Inc()
s.trname = path.Base(u.Path)
if !s.compressed() {
s.lid = fmt.Sprintf("%s[%d]", s.trname, s.sessID)
} else {
s.lid = fmt.Sprintf("%s[%d[%s]]", s.trname, s.sessID, cmn.B2S(int64(s.lz4s.blockMaxSize), 0))
}
// burst size: the number of objects the caller is permitted to post for sending
// without experiencing any sort of back-pressure
burst := burstNum
if a := os.Getenv("AIS_STREAM_BURST_NUM"); a != "" {
if burst64, err := strconv.ParseInt(a, 10, 0); err != nil {
glog.Errorf("%s: error parsing env AIS_STREAM_BURST_NUM=%s: %v", s, a, err)
burst = burstNum
} else {
burst = int(burst64)
}
}
s.workCh = make(chan Obj, burst) // Send Qeueue or SQ
s.cmplCh = make(chan cmpl, burst) // Send Completion Queue or SCQ
s.lastCh = cmn.NewStopCh()
s.stopCh = cmn.NewStopCh()
s.postCh = make(chan struct{}, 1)
s.maxheader = make([]byte, maxHeaderSize) // NOTE: must be large enough to accommodate all max-size Header
s.sessST.Store(inactive) // NOTE: initiate HTTP session upon arrival of the first object
s.time.start.Store(time.Now().UnixNano())
s.term.reason = new(string)
s.wg.Add(2)
var dryrun bool
if a := os.Getenv("AIS_STREAM_DRY_RUN"); a != "" {
if dryrun, err = strconv.ParseBool(a); err != nil {
glog.Errorf("%s: error parsing env AIS_STREAM_DRY_RUN=%s: %v", s, a, err)
}
cmn.Assert(dryrun || client != nil)
}
go s.sendLoop(dryrun) // handle SQ
go s.cmplLoop() // handle SCQ
gc.ctrlCh <- ctrl{s, true /* collect */}
return
}
func (s *Stream) compressed() bool { return s.lz4s.s == s }
// Asynchronously send an object defined by its header and its reader.
// ---------------------------------------------------------------------------------------
//
// The sending pipeline is implemented as a pair (SQ, SCQ) where the former is a send queue
// realized as workCh, and the latter is a send completion queue (cmplCh).
// Together, SQ and SCQ form a FIFO as far as ordering of transmitted objects.
//
// NOTE: header-only objects are supported; when there's no data to send (that is,
// when the header's Dsize field is set to zero), the reader is not required and the
// corresponding argument in Send() can be set to nil.
//
// NOTE: object reader is always closed by the code that handles send completions.
// In the case when SendCallback is provided (i.e., non-nil), the closing is done
// right after calling this callback - see objDone below for details.
//
// NOTE: Optional reference counting is also done by (and in) the objDone, so that the
// SendCallback gets called if and only when the refcount (if provided i.e., non-nil)
// reaches zero.
//
// NOTE: For every transmission of every object there's always an objDone() completion
// (with its refcounting and reader-closing). This holds true in all cases including
// network errors that may cause sudden and instant termination of the underlying
// stream(s).
//
// ---------------------------------------------------------------------------------------
func (s *Stream) Send(obj Obj) (err error) {
s.time.inSend.Store(true) // an indication for Collector to postpone cleanup
hdr := &obj.Hdr
if s.Terminated() {
err = fmt.Errorf("%s terminated(%s, %v), cannot send [%s/%s(%d)]",
s, *s.term.reason, s.term.err, hdr.Bck, hdr.ObjName, hdr.ObjAttrs.Size)
glog.Errorln(err)
return
}
if s.sessST.CAS(inactive, active) {
s.postCh <- struct{}{}
if glog.FastV(4, glog.SmoduleTransport) {
glog.Infof("%s: inactive => active", s)
}
}
// next object => SQ
if obj.Reader == nil {
cmn.Assert(hdr.IsHeaderOnly())
obj.Reader = nopRC
}
s.workCh <- obj
if glog.FastV(4, glog.SmoduleTransport) {
glog.Infof("%s: send %s/%s(%d)[sq=%d]", s, hdr.Bck, hdr.ObjName, hdr.ObjAttrs.Size, len(s.workCh))
}
return
}
func (s *Stream) Fin() {
_ = s.Send(Obj{Hdr: Header{ObjAttrs: ObjectAttrs{Size: lastMarker}}})
s.wg.Wait()
}
func (s *Stream) Stop() { s.stopCh.Close() }
func (s *Stream) URL() string |
func (s *Stream) ID() (string, int64) { return s.trname, s.sessID }
func (s *Stream) String() string { return s.lid }
func (s *Stream) Terminated() (terminated bool) {
s.term.mu.Lock()
terminated = s.term.terminated
s.term.mu.Unlock()
return
}
func (s *Stream) terminate() {
s.term.mu.Lock()
cmn.Assert(!s.term.terminated)
s.term.terminated = true
s.Stop()
hdr := Header{ObjAttrs: ObjectAttrs{Size: lastMarker}}
obj := Obj{Hdr: hdr}
s.cmplCh <- cmpl{obj, s.term.err}
s.term.mu.Unlock()
// Remove stream after lock because we could deadlock between `do()`
// (which checks for `Terminated` status) and this function which
// would be under lock.
gc.remove(s)
if s.compressed() {
s.lz4s.sgl.Free()
if s.lz4s.zw != nil {
s.lz4s.zw.Reset(nil)
}
}
}
func (s *Stream) TermInfo() (string, error) {
if s.Terminated() && *s.term.reason == "" {
if s.term.err == nil {
s.term.err = fmt.Errorf(reasonUnknown)
}
*s.term.reason = reasonUnknown
}
return *s.term.reason, s.term.err
}
func (s *Stream) GetStats() (stats Stats) {
// byte-num transfer stats
stats.Num.Store(s.stats.Num.Load())
stats.Offset.Store(s.stats.Offset.Load())
stats.Size.Store(s.stats.Size.Load())
stats.CompressedSize.Store(s.stats.CompressedSize.Load())
return
}
func (hdr *Header) IsLast() bool { return hdr.ObjAttrs.Size == lastMarker }
func (hdr *Header) IsIdleTick() bool { return hdr.ObjAttrs.Size == tickMarker }
func (hdr *Header) IsHeaderOnly() bool { return hdr.ObjAttrs.Size == 0 || hdr.IsLast() }
//
// internal methods including the sending and completing loops below, each running in its own goroutine
//
func (s *Stream) sendLoop(dryrun bool) {
for {
if s.sessST.Load() == active {
if dryrun {
s.dryrun()
} else if err := s.doRequest(); err != nil {
*s.term.reason = reasonError
s.term.err = err
break
}
}
if !s.isNextReq() {
break
}
}
s.terminate()
s.wg.Done()
// handle termination that is caused by anything other than Fin()
if *s.term.reason != endOfStream {
if *s.term.reason == reasonStopped {
if glog.FastV(4, glog.SmoduleTransport) {
glog.Infof("%s: stopped", s)
}
} else {
glog.Errorf("%s: terminating (%s, %v)", s, *s.term.reason, s.term.err)
}
// first, wait for the SCQ/cmplCh to empty
s.wg.Wait()
// second, handle the last send that was interrupted
if s.sendoff.obj.Reader != nil {
obj := &s.sendoff.obj
s.objDone(obj, s.term.err)
}
// finally, handle pending SQ
for obj := range s.workCh {
s.objDone(&obj, s.term.err)
}
}
}
func (s *Stream) cmplLoop() {
for {
cmpl, ok := <-s.cmplCh
obj := &cmpl.obj
if !ok || obj.Hdr.IsLast() {
break
}
s.objDone(&cmpl.obj, cmpl.err)
}
s.wg.Done()
}
// refcount, invoke Sendcallback, and *always* close the reader
func (s *Stream) objDone(obj *Obj, err error) {
var rc int64
if obj.prc != nil {
rc = obj.prc.Dec()
cmn.Assert(rc >= 0) // remove
}
// SCQ completion callback
if rc == 0 {
if obj.Callback != nil {
obj.Callback(obj.Hdr, obj.Reader, obj.CmplPtr, err)
} else if s.callback != nil {
s.callback(obj.Hdr, obj.Reader, obj.CmplPtr, err)
}
}
if obj.Reader != nil {
obj.Reader.Close() // NOTE: always closing
}
}
func (s *Stream) isNextReq() (next bool) {
for {
select {
case <-s.lastCh.Listen():
if glog.FastV(4, glog.SmoduleTransport) {
glog.Infof("%s: end-of-stream", s)
}
*s.term.reason = endOfStream
return
case <-s.stopCh.Listen():
glog.Infof("%s: stopped", s)
*s.term.reason = reasonStopped
return
case <-s.postCh:
s.sessST.Store(active)
next = true // initiate new HTTP/TCP session
if glog.FastV(4, glog.SmoduleTransport) {
glog.Infof("%s: active <- posted", s)
}
return
}
}
}
func (s *Stream) doRequest() (err error) {
var (
body io.Reader = s
)
s.Numcur, s.Sizecur = 0, 0
if s.compressed() {
s.lz4s.sgl.Reset()
if s.lz4s.zw == nil {
s.lz4s.zw = lz4.NewWriter(s.lz4s.sgl)
} else {
s.lz4s.zw.Reset(s.lz4s.sgl)
}
// lz4 framing spec at http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html
s.lz4s.zw.Header.BlockChecksum = false
s.lz4s.zw.Header.NoChecksum = !s.lz4s.frameChecksum
s.lz4s.zw.Header.BlockMaxSize = s.lz4s.blockMaxSize
body = &s.lz4s
}
return s.do(body)
}
// as io.Reader
func (s *Stream) Read(b []byte) (n int, err error) {
s.time.inSend.Store(true) // indication for Collector to delay cleanup
obj := &s.sendoff.obj
if obj.Reader != nil { // have object - fast path
if s.sendoff.dod != 0 {
if !obj.Hdr.IsHeaderOnly() {
return s.sendData(b)
}
if !obj.Hdr.IsLast() {
s.eoObj(nil)
} else {
err = io.EOF
return
}
} else {
return s.sendHdr(b)
}
}
repeat:
select {
case s.sendoff.obj = <-s.workCh: // next object OR idle tick
if s.sendoff.obj.Hdr.IsIdleTick() {
if len(s.workCh) > 0 {
goto repeat
}
return s.deactivate()
}
l := s.insHeader(s.sendoff.obj.Hdr)
s.header = s.maxheader[:l]
return s.sendHdr(b)
case <-s.stopCh.Listen():
num := s.stats.Num.Load()
glog.Infof("%s: stopped (%d/%d)", s, s.Numcur, num)
err = io.EOF
return
}
}
func (s *Stream) deactivate() (n int, err error) {
err = io.EOF
if glog.FastV(4, glog.SmoduleTransport) {
num := s.stats.Num.Load()
glog.Infof("%s: connection teardown (%d/%d)", s, s.Numcur, num)
}
return
}
func (s *Stream) sendHdr(b []byte) (n int, err error) {
n = copy(b, s.header[s.sendoff.off:])
s.sendoff.off += int64(n)
if s.sendoff.off >= int64(len(s.header)) {
cmn.Assert(s.sendoff.off == int64(len(s.header)))
s.stats.Offset.Add(s.sendoff.off)
if glog.FastV(4, glog.SmoduleTransport) {
num := s.stats.Num.Load()
glog.Infof("%s: hlen=%d (%d/%d)", s, s.sendoff.off, s.Numcur, num)
}
s.sendoff.dod = s.sendoff.off
s.sendoff.off = 0
if s.sendoff.obj.Hdr.IsLast() {
if glog.FastV(4, glog.SmoduleTransport) {
glog.Infof("%s: sent last", s)
}
err = io.EOF
s.lastCh.Close()
}
} else if glog.FastV(4, glog.SmoduleTransport) {
glog.Infof("%s: split header: copied %d < %d hlen", s, s.sendoff.off, len(s.header))
}
return
}
func (s *Stream) sendData(b []byte) (n int, err error) {
var (
obj = &s.sendoff.obj
objSize = obj.Hdr.ObjAttrs.Size
)
n, err = obj.Reader.Read(b)
s.sendoff.off += int64(n)
if err != nil {
if err == io.EOF {
if s.sendoff.off < objSize {
return n, fmt.Errorf("%s: read (%d) shorter than expected (%d)", s, s.sendoff.off, objSize)
}
err = nil
}
s.eoObj(err)
} else if s.sendoff.off >= objSize {
s.eoObj(err)
}
return
}
//
// end-of-object: updates stats, reset idle timeout, and post completion
// NOTE: reader.Close() is done by the completion handling code objDone
//
func (s *Stream) eoObj(err error) {
var obj = &s.sendoff.obj
s.Sizecur += s.sendoff.off
s.stats.Offset.Add(s.sendoff.off)
if err != nil {
goto exit
}
if s.sendoff.off != obj.Hdr.ObjAttrs.Size {
err = fmt.Errorf("%s: obj %s/%s offset %d != %d size",
s, s.sendoff.obj.Hdr.Bck, s.sendoff.obj.Hdr.ObjName, s.sendoff.off, obj.Hdr.ObjAttrs.Size)
goto exit
}
s.stats.Size.Add(obj.Hdr.ObjAttrs.Size)
s.Numcur++
s.stats.Num.Inc()
if glog.FastV(4, glog.SmoduleTransport) {
glog.Infof("%s: sent size=%d (%d/%d): %s", s, obj.Hdr.ObjAttrs.Size, s.Numcur, s.stats.Num.Load(), obj.Hdr.ObjName)
}
exit:
if err != nil {
glog.Errorln(err)
}
// next completion => SCQ
s.cmplCh <- cmpl{s.sendoff.obj, err}
s.sendoff = sendoff{}
}
//
// stream helpers
//
func (s *Stream) insHeader(hdr Header) (l int) {
l = cmn.SizeofI64 * 2
l = insString(l, s.maxheader, hdr.Bck.Name)
l = insString(l, s.maxheader, hdr.ObjName)
l = insString(l, s.maxheader, hdr.Bck.Provider)
l = insString(l, s.maxheader, hdr.Bck.Ns.Name)
l = insString(l, s.maxheader, hdr.Bck.Ns.UUID)
l = insByte(l, s.maxheader, hdr.Opaque)
l = insAttrs(l, s.maxheader, hdr.ObjAttrs)
hlen := l - cmn.SizeofI64*2
insInt64(0, s.maxheader, int64(hlen))
checksum := xoshiro256.Hash(uint64(hlen))
insUint64(cmn.SizeofI64, s.maxheader, checksum)
return
}
func insString(off int, to []byte, str string) int {
return insByte(off, to, []byte(str))
}
func insByte(off int, to, b []byte) int {
var l = len(b)
binary.BigEndian.PutUint64(to[off:], uint64(l))
off += cmn.SizeofI64
n := copy(to[off:], b)
cmn.Assert(n == l)
return off + l
}
func insInt64(off int, to []byte, i int64) int {
return insUint64(off, to, uint64(i))
}
func insUint64(off int, to []byte, i uint64) int {
binary.BigEndian.PutUint64(to[off:], i)
return off + cmn.SizeofI64
}
func insAttrs(off int, to []byte, attr ObjectAttrs) int {
off = insInt64(off, to, attr.Size)
off = insInt64(off, to, attr.Atime)
off = insString(off, to, attr.CksumType)
off = insString(off, to, attr.CksumValue)
off = insString(off, to, attr.Version)
return off
}
//
// dry-run ---------------------------
//
func (s *Stream) dryrun() {
buf := make([]byte, cmn.KiB*32)
scloser := ioutil.NopCloser(s)
it := iterator{trname: s.trname, body: scloser, headerBuf: make([]byte, maxHeaderSize)}
for {
objReader, _, err := it.next()
if objReader != nil {
written, _ := io.CopyBuffer(ioutil.Discard, objReader, buf)
cmn.Assert(written == objReader.hdr.ObjAttrs.Size)
continue
}
if err != nil {
break
}
}
}
//
// Stats ---------------------------
//
func (stats *Stats) CompressionRatio() float64 {
bytesRead := stats.Offset.Load()
bytesSent := stats.CompressedSize.Load()
return float64(bytesRead) / float64(bytesSent)
}
//
// nopReadCloser ---------------------------
//
func (r *nopReadCloser) Read([]byte) (n int, err error) { return }
func (r *nopReadCloser) Close() error { return nil }
//
// lz4Stream ---------------------------
//
func (lz4s *lz4Stream) Read(b []byte) (n int, err error) {
var (
sendoff = &lz4s.s.sendoff
last = sendoff.obj.Hdr.IsLast()
retry = 64 // insist on returning n > 0 (note that lz4 compresses /blocks/)
)
if lz4s.sgl.Len() > 0 {
lz4s.zw.Flush()
n, err = lz4s.sgl.Read(b)
if err == io.EOF { // reusing/rewinding this buf multiple times
err = nil
}
goto ex
}
re:
n, err = lz4s.s.Read(b)
_, _ = lz4s.zw.Write(b[:n])
if last {
lz4s.zw.Flush()
retry = 0
} else if lz4s.s.sendoff.obj.Reader == nil /*eoObj*/ || err != nil {
lz4s.zw.Flush()
retry = 0
}
n, _ = lz4s.sgl.Read(b)
if n == 0 {
if retry > 0 {
retry--
runtime.Gosched()
goto re
}
lz4s.zw.Flush()
n, _ = lz4s.sgl.Read(b)
}
ex:
lz4s.s.stats.CompressedSize.Add(int64(n))
if lz4s.sgl.Len() == 0 {
lz4s.sgl.Reset()
}
if last && err == nil {
err = io.EOF
}
return
}
| { return s.toURL } | identifier_body |
send.go | // Package transport provides streaming object-based transport over http for intra-cluster continuous
// intra-cluster communications (see README for details and usage example).
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*/
package transport
import (
"encoding/binary"
"fmt"
"io"
"io/ioutil"
"math"
"net/url"
"os"
"path"
"runtime"
"strconv"
"sync"
"time"
"unsafe"
"github.com/NVIDIA/aistore/3rdparty/atomic"
"github.com/NVIDIA/aistore/3rdparty/glog"
"github.com/NVIDIA/aistore/cmn"
"github.com/NVIDIA/aistore/memsys"
"github.com/NVIDIA/aistore/xoshiro256"
lz4 "github.com/pierrec/lz4/v3"
)
// transport defaults
const (
maxHeaderSize = 1024
lastMarker = math.MaxInt64
tickMarker = math.MaxInt64 ^ 0xa5a5a5a5
tickUnit = time.Second
defaultIdleOut = time.Second * 2 | )
// stream TCP/HTTP session: inactive <=> active transitions
const (
inactive = iota
active
)
// termination: reasons
const (
reasonUnknown = "unknown"
reasonError = "error"
endOfStream = "end-of-stream"
reasonStopped = "stopped"
)
// API types
type (
Stream struct {
client Client // http client this send-stream will use
// user-defined & queryable
toURL, trname string // http endpoint
sessID int64 // stream session ID
sessST atomic.Int64 // state of the TCP/HTTP session: active (connected) | inactive (disconnected)
stats Stats // stream stats
Numcur, Sizecur int64 // gets reset to zero upon each timeout
// internals
lid string // log prefix
workCh chan Obj // aka SQ: next object to stream
cmplCh chan cmpl // aka SCQ; note that SQ and SCQ together form a FIFO
lastCh *cmn.StopCh // end of stream
stopCh *cmn.StopCh // stop/abort stream
postCh chan struct{} // to indicate that workCh has work
callback SendCallback // to free SGLs, close files, etc.
time struct {
start atomic.Int64 // to support idle(%)
idleOut time.Duration // idle timeout
inSend atomic.Bool // true upon Send() or Read() - info for Collector to delay cleanup
ticks int // num 1s ticks until idle timeout
index int // heap stuff
}
wg sync.WaitGroup
sendoff sendoff
maxheader []byte // max header buffer
header []byte // object header - slice of the maxheader with bucket/objName, etc. fields
term struct {
mu sync.Mutex
terminated bool
err error
reason *string
}
lz4s lz4Stream
}
// advanced usage: additional stream control
Extra struct {
IdleTimeout time.Duration // stream idle timeout: causes PUT to terminate (and renew on the next obj send)
Callback SendCallback // typical usage: to free SGLs, close files, etc.
Compression string // see CompressAlways, etc. enum
MMSA *memsys.MMSA // compression-related buffering
Config *cmn.Config
}
// stream stats
Stats struct {
Num atomic.Int64 // number of transferred objects including zero size (header-only) objects
Size atomic.Int64 // transferred object size (does not include transport headers)
Offset atomic.Int64 // stream offset, in bytes
CompressedSize atomic.Int64 // compressed size (NOTE: converges to the actual compressed size over time)
}
EndpointStats map[uint64]*Stats // all stats for a given http endpoint defined by a tuple(network, trname) by session ID
// object attrs
ObjectAttrs struct {
Atime int64 // access time - nanoseconds since UNIX epoch
Size int64 // size of objects in bytes
CksumType string // checksum type
CksumValue string // checksum of the object produced by given checksum type
Version string // version of the object
}
// object header
Header struct {
Bck cmn.Bck
ObjName string
ObjAttrs ObjectAttrs // attributes/metadata of the sent object
Opaque []byte // custom control (optional)
}
// object to transmit
Obj struct {
Hdr Header // object header
Reader io.ReadCloser // reader, to read the object, and close when done
Callback SendCallback // callback fired when sending is done OR when the stream terminates (see term.reason)
CmplPtr unsafe.Pointer // local pointer that gets returned to the caller via Send completion callback
// private
prc *atomic.Int64 // if present, ref-counts num sent objects to call SendCallback only once
}
// object-sent callback that has the following signature can optionally be defined on a:
// a) per-stream basis (via NewStream constructor - see Extra struct above)
// b) for a given object that is being sent (for instance, to support a call-per-batch semantics)
// Naturally, object callback "overrides" the per-stream one: when object callback is defined
// (i.e., non-nil), the stream callback is ignored/skipped.
// NOTE: if defined, the callback executes asynchronously as far as the sending part is concerned
SendCallback func(Header, io.ReadCloser, unsafe.Pointer, error)
StreamCollector struct {
cmn.Named
}
)
// internal types
type (
lz4Stream struct {
s *Stream
zw *lz4.Writer // orig reader => zw
sgl *memsys.SGL // zw => bb => network
blockMaxSize int // *uncompressed* block max size
frameChecksum bool // true: checksum lz4 frames
}
sendoff struct {
obj Obj
// in progress
off int64
dod int64
}
cmpl struct { // send completions => SCQ
obj Obj
err error
}
nopReadCloser struct{}
collector struct {
streams map[string]*Stream
heap []*Stream
ticker *time.Ticker
stopCh *cmn.StopCh
ctrlCh chan ctrl
}
ctrl struct { // add/del channel to/from collector
s *Stream
add bool
}
)
var (
nopRC = &nopReadCloser{} // read and close stubs
nextSID = *atomic.NewInt64(100) // unique session IDs starting from 101
sc = &StreamCollector{} // idle timer and house-keeping (slow path)
gc *collector // real stream collector
)
func (extra *Extra) compressed() bool {
return extra.Compression != "" && extra.Compression != cmn.CompressNever
}
//
// API methods
//
func NewStream(client Client, toURL string, extra *Extra) (s *Stream) {
u, err := url.Parse(toURL)
if err != nil {
glog.Errorf("Failed to parse %s: %v", toURL, err)
return
}
s = &Stream{client: client, toURL: toURL}
s.time.idleOut = defaultIdleOut
if extra != nil {
s.callback = extra.Callback
if extra.IdleTimeout > 0 {
s.time.idleOut = extra.IdleTimeout
}
if extra.compressed() {
config := extra.Config
if config == nil {
config = cmn.GCO.Get()
}
s.lz4s.s = s
s.lz4s.blockMaxSize = config.Compression.BlockMaxSize
s.lz4s.frameChecksum = config.Compression.Checksum
mem := extra.MMSA
if mem == nil {
mem = memsys.DefaultPageMM()
glog.Warningln("Using global memory manager for streaming inline compression")
}
if s.lz4s.blockMaxSize >= memsys.MaxPageSlabSize {
s.lz4s.sgl = mem.NewSGL(memsys.MaxPageSlabSize, memsys.MaxPageSlabSize)
} else {
s.lz4s.sgl = mem.NewSGL(cmn.KiB*64, cmn.KiB*64)
}
}
}
if s.time.idleOut < tickUnit {
s.time.idleOut = tickUnit
}
s.time.ticks = int(s.time.idleOut / tickUnit)
s.sessID = nextSID.Inc()
s.trname = path.Base(u.Path)
if !s.compressed() {
s.lid = fmt.Sprintf("%s[%d]", s.trname, s.sessID)
} else {
s.lid = fmt.Sprintf("%s[%d[%s]]", s.trname, s.sessID, cmn.B2S(int64(s.lz4s.blockMaxSize), 0))
}
// burst size: the number of objects the caller is permitted to post for sending
// without experiencing any sort of back-pressure
burst := burstNum
if a := os.Getenv("AIS_STREAM_BURST_NUM"); a != "" {
if burst64, err := strconv.ParseInt(a, 10, 0); err != nil {
glog.Errorf("%s: error parsing env AIS_STREAM_BURST_NUM=%s: %v", s, a, err)
burst = burstNum
} else {
burst = int(burst64)
}
}
s.workCh = make(chan Obj, burst) // Send Qeueue or SQ
s.cmplCh = make(chan cmpl, burst) // Send Completion Queue or SCQ
s.lastCh = cmn.NewStopCh()
s.stopCh = cmn.NewStopCh()
s.postCh = make(chan struct{}, 1)
s.maxheader = make([]byte, maxHeaderSize) // NOTE: must be large enough to accommodate all max-size Header
s.sessST.Store(inactive) // NOTE: initiate HTTP session upon arrival of the first object
s.time.start.Store(time.Now().UnixNano())
s.term.reason = new(string)
s.wg.Add(2)
var dryrun bool
if a := os.Getenv("AIS_STREAM_DRY_RUN"); a != "" {
if dryrun, err = strconv.ParseBool(a); err != nil {
glog.Errorf("%s: error parsing env AIS_STREAM_DRY_RUN=%s: %v", s, a, err)
}
cmn.Assert(dryrun || client != nil)
}
go s.sendLoop(dryrun) // handle SQ
go s.cmplLoop() // handle SCQ
gc.ctrlCh <- ctrl{s, true /* collect */}
return
}
func (s *Stream) compressed() bool { return s.lz4s.s == s }
// Asynchronously send an object defined by its header and its reader.
// ---------------------------------------------------------------------------------------
//
// The sending pipeline is implemented as a pair (SQ, SCQ) where the former is a send queue
// realized as workCh, and the latter is a send completion queue (cmplCh).
// Together, SQ and SCQ form a FIFO as far as ordering of transmitted objects.
//
// NOTE: header-only objects are supported; when there's no data to send (that is,
// when the header's Dsize field is set to zero), the reader is not required and the
// corresponding argument in Send() can be set to nil.
//
// NOTE: object reader is always closed by the code that handles send completions.
// In the case when SendCallback is provided (i.e., non-nil), the closing is done
// right after calling this callback - see objDone below for details.
//
// NOTE: Optional reference counting is also done by (and in) the objDone, so that the
// SendCallback gets called if and only when the refcount (if provided i.e., non-nil)
// reaches zero.
//
// NOTE: For every transmission of every object there's always an objDone() completion
// (with its refcounting and reader-closing). This holds true in all cases including
// network errors that may cause sudden and instant termination of the underlying
// stream(s).
//
// ---------------------------------------------------------------------------------------
func (s *Stream) Send(obj Obj) (err error) {
s.time.inSend.Store(true) // an indication for Collector to postpone cleanup
hdr := &obj.Hdr
if s.Terminated() {
err = fmt.Errorf("%s terminated(%s, %v), cannot send [%s/%s(%d)]",
s, *s.term.reason, s.term.err, hdr.Bck, hdr.ObjName, hdr.ObjAttrs.Size)
glog.Errorln(err)
return
}
if s.sessST.CAS(inactive, active) {
s.postCh <- struct{}{}
if glog.FastV(4, glog.SmoduleTransport) {
glog.Infof("%s: inactive => active", s)
}
}
// next object => SQ
if obj.Reader == nil {
cmn.Assert(hdr.IsHeaderOnly())
obj.Reader = nopRC
}
s.workCh <- obj
if glog.FastV(4, glog.SmoduleTransport) {
glog.Infof("%s: send %s/%s(%d)[sq=%d]", s, hdr.Bck, hdr.ObjName, hdr.ObjAttrs.Size, len(s.workCh))
}
return
}
func (s *Stream) Fin() {
_ = s.Send(Obj{Hdr: Header{ObjAttrs: ObjectAttrs{Size: lastMarker}}})
s.wg.Wait()
}
func (s *Stream) Stop() { s.stopCh.Close() }
func (s *Stream) URL() string { return s.toURL }
func (s *Stream) ID() (string, int64) { return s.trname, s.sessID }
func (s *Stream) String() string { return s.lid }
func (s *Stream) Terminated() (terminated bool) {
s.term.mu.Lock()
terminated = s.term.terminated
s.term.mu.Unlock()
return
}
func (s *Stream) terminate() {
s.term.mu.Lock()
cmn.Assert(!s.term.terminated)
s.term.terminated = true
s.Stop()
hdr := Header{ObjAttrs: ObjectAttrs{Size: lastMarker}}
obj := Obj{Hdr: hdr}
s.cmplCh <- cmpl{obj, s.term.err}
s.term.mu.Unlock()
// Remove stream after lock because we could deadlock between `do()`
// (which checks for `Terminated` status) and this function which
// would be under lock.
gc.remove(s)
if s.compressed() {
s.lz4s.sgl.Free()
if s.lz4s.zw != nil {
s.lz4s.zw.Reset(nil)
}
}
}
func (s *Stream) TermInfo() (string, error) {
if s.Terminated() && *s.term.reason == "" {
if s.term.err == nil {
s.term.err = fmt.Errorf(reasonUnknown)
}
*s.term.reason = reasonUnknown
}
return *s.term.reason, s.term.err
}
func (s *Stream) GetStats() (stats Stats) {
// byte-num transfer stats
stats.Num.Store(s.stats.Num.Load())
stats.Offset.Store(s.stats.Offset.Load())
stats.Size.Store(s.stats.Size.Load())
stats.CompressedSize.Store(s.stats.CompressedSize.Load())
return
}
func (hdr *Header) IsLast() bool { return hdr.ObjAttrs.Size == lastMarker }
func (hdr *Header) IsIdleTick() bool { return hdr.ObjAttrs.Size == tickMarker }
func (hdr *Header) IsHeaderOnly() bool { return hdr.ObjAttrs.Size == 0 || hdr.IsLast() }
//
// internal methods including the sending and completing loops below, each running in its own goroutine
//
func (s *Stream) sendLoop(dryrun bool) {
for {
if s.sessST.Load() == active {
if dryrun {
s.dryrun()
} else if err := s.doRequest(); err != nil {
*s.term.reason = reasonError
s.term.err = err
break
}
}
if !s.isNextReq() {
break
}
}
s.terminate()
s.wg.Done()
// handle termination that is caused by anything other than Fin()
if *s.term.reason != endOfStream {
if *s.term.reason == reasonStopped {
if glog.FastV(4, glog.SmoduleTransport) {
glog.Infof("%s: stopped", s)
}
} else {
glog.Errorf("%s: terminating (%s, %v)", s, *s.term.reason, s.term.err)
}
// first, wait for the SCQ/cmplCh to empty
s.wg.Wait()
// second, handle the last send that was interrupted
if s.sendoff.obj.Reader != nil {
obj := &s.sendoff.obj
s.objDone(obj, s.term.err)
}
// finally, handle pending SQ
for obj := range s.workCh {
s.objDone(&obj, s.term.err)
}
}
}
func (s *Stream) cmplLoop() {
for {
cmpl, ok := <-s.cmplCh
obj := &cmpl.obj
if !ok || obj.Hdr.IsLast() {
break
}
s.objDone(&cmpl.obj, cmpl.err)
}
s.wg.Done()
}
// refcount, invoke Sendcallback, and *always* close the reader
func (s *Stream) objDone(obj *Obj, err error) {
var rc int64
if obj.prc != nil {
rc = obj.prc.Dec()
cmn.Assert(rc >= 0) // remove
}
// SCQ completion callback
if rc == 0 {
if obj.Callback != nil {
obj.Callback(obj.Hdr, obj.Reader, obj.CmplPtr, err)
} else if s.callback != nil {
s.callback(obj.Hdr, obj.Reader, obj.CmplPtr, err)
}
}
if obj.Reader != nil {
obj.Reader.Close() // NOTE: always closing
}
}
func (s *Stream) isNextReq() (next bool) {
for {
select {
case <-s.lastCh.Listen():
if glog.FastV(4, glog.SmoduleTransport) {
glog.Infof("%s: end-of-stream", s)
}
*s.term.reason = endOfStream
return
case <-s.stopCh.Listen():
glog.Infof("%s: stopped", s)
*s.term.reason = reasonStopped
return
case <-s.postCh:
s.sessST.Store(active)
next = true // initiate new HTTP/TCP session
if glog.FastV(4, glog.SmoduleTransport) {
glog.Infof("%s: active <- posted", s)
}
return
}
}
}
func (s *Stream) doRequest() (err error) {
var (
body io.Reader = s
)
s.Numcur, s.Sizecur = 0, 0
if s.compressed() {
s.lz4s.sgl.Reset()
if s.lz4s.zw == nil {
s.lz4s.zw = lz4.NewWriter(s.lz4s.sgl)
} else {
s.lz4s.zw.Reset(s.lz4s.sgl)
}
// lz4 framing spec at http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html
s.lz4s.zw.Header.BlockChecksum = false
s.lz4s.zw.Header.NoChecksum = !s.lz4s.frameChecksum
s.lz4s.zw.Header.BlockMaxSize = s.lz4s.blockMaxSize
body = &s.lz4s
}
return s.do(body)
}
// as io.Reader
func (s *Stream) Read(b []byte) (n int, err error) {
s.time.inSend.Store(true) // indication for Collector to delay cleanup
obj := &s.sendoff.obj
if obj.Reader != nil { // have object - fast path
if s.sendoff.dod != 0 {
if !obj.Hdr.IsHeaderOnly() {
return s.sendData(b)
}
if !obj.Hdr.IsLast() {
s.eoObj(nil)
} else {
err = io.EOF
return
}
} else {
return s.sendHdr(b)
}
}
repeat:
select {
case s.sendoff.obj = <-s.workCh: // next object OR idle tick
if s.sendoff.obj.Hdr.IsIdleTick() {
if len(s.workCh) > 0 {
goto repeat
}
return s.deactivate()
}
l := s.insHeader(s.sendoff.obj.Hdr)
s.header = s.maxheader[:l]
return s.sendHdr(b)
case <-s.stopCh.Listen():
num := s.stats.Num.Load()
glog.Infof("%s: stopped (%d/%d)", s, s.Numcur, num)
err = io.EOF
return
}
}
func (s *Stream) deactivate() (n int, err error) {
err = io.EOF
if glog.FastV(4, glog.SmoduleTransport) {
num := s.stats.Num.Load()
glog.Infof("%s: connection teardown (%d/%d)", s, s.Numcur, num)
}
return
}
func (s *Stream) sendHdr(b []byte) (n int, err error) {
n = copy(b, s.header[s.sendoff.off:])
s.sendoff.off += int64(n)
if s.sendoff.off >= int64(len(s.header)) {
cmn.Assert(s.sendoff.off == int64(len(s.header)))
s.stats.Offset.Add(s.sendoff.off)
if glog.FastV(4, glog.SmoduleTransport) {
num := s.stats.Num.Load()
glog.Infof("%s: hlen=%d (%d/%d)", s, s.sendoff.off, s.Numcur, num)
}
s.sendoff.dod = s.sendoff.off
s.sendoff.off = 0
if s.sendoff.obj.Hdr.IsLast() {
if glog.FastV(4, glog.SmoduleTransport) {
glog.Infof("%s: sent last", s)
}
err = io.EOF
s.lastCh.Close()
}
} else if glog.FastV(4, glog.SmoduleTransport) {
glog.Infof("%s: split header: copied %d < %d hlen", s, s.sendoff.off, len(s.header))
}
return
}
func (s *Stream) sendData(b []byte) (n int, err error) {
var (
obj = &s.sendoff.obj
objSize = obj.Hdr.ObjAttrs.Size
)
n, err = obj.Reader.Read(b)
s.sendoff.off += int64(n)
if err != nil {
if err == io.EOF {
if s.sendoff.off < objSize {
return n, fmt.Errorf("%s: read (%d) shorter than expected (%d)", s, s.sendoff.off, objSize)
}
err = nil
}
s.eoObj(err)
} else if s.sendoff.off >= objSize {
s.eoObj(err)
}
return
}
//
// end-of-object: updates stats, reset idle timeout, and post completion
// NOTE: reader.Close() is done by the completion handling code objDone
//
func (s *Stream) eoObj(err error) {
var obj = &s.sendoff.obj
s.Sizecur += s.sendoff.off
s.stats.Offset.Add(s.sendoff.off)
if err != nil {
goto exit
}
if s.sendoff.off != obj.Hdr.ObjAttrs.Size {
err = fmt.Errorf("%s: obj %s/%s offset %d != %d size",
s, s.sendoff.obj.Hdr.Bck, s.sendoff.obj.Hdr.ObjName, s.sendoff.off, obj.Hdr.ObjAttrs.Size)
goto exit
}
s.stats.Size.Add(obj.Hdr.ObjAttrs.Size)
s.Numcur++
s.stats.Num.Inc()
if glog.FastV(4, glog.SmoduleTransport) {
glog.Infof("%s: sent size=%d (%d/%d): %s", s, obj.Hdr.ObjAttrs.Size, s.Numcur, s.stats.Num.Load(), obj.Hdr.ObjName)
}
exit:
if err != nil {
glog.Errorln(err)
}
// next completion => SCQ
s.cmplCh <- cmpl{s.sendoff.obj, err}
s.sendoff = sendoff{}
}
//
// stream helpers
//
func (s *Stream) insHeader(hdr Header) (l int) {
l = cmn.SizeofI64 * 2
l = insString(l, s.maxheader, hdr.Bck.Name)
l = insString(l, s.maxheader, hdr.ObjName)
l = insString(l, s.maxheader, hdr.Bck.Provider)
l = insString(l, s.maxheader, hdr.Bck.Ns.Name)
l = insString(l, s.maxheader, hdr.Bck.Ns.UUID)
l = insByte(l, s.maxheader, hdr.Opaque)
l = insAttrs(l, s.maxheader, hdr.ObjAttrs)
hlen := l - cmn.SizeofI64*2
insInt64(0, s.maxheader, int64(hlen))
checksum := xoshiro256.Hash(uint64(hlen))
insUint64(cmn.SizeofI64, s.maxheader, checksum)
return
}
func insString(off int, to []byte, str string) int {
return insByte(off, to, []byte(str))
}
func insByte(off int, to, b []byte) int {
var l = len(b)
binary.BigEndian.PutUint64(to[off:], uint64(l))
off += cmn.SizeofI64
n := copy(to[off:], b)
cmn.Assert(n == l)
return off + l
}
func insInt64(off int, to []byte, i int64) int {
return insUint64(off, to, uint64(i))
}
func insUint64(off int, to []byte, i uint64) int {
binary.BigEndian.PutUint64(to[off:], i)
return off + cmn.SizeofI64
}
func insAttrs(off int, to []byte, attr ObjectAttrs) int {
off = insInt64(off, to, attr.Size)
off = insInt64(off, to, attr.Atime)
off = insString(off, to, attr.CksumType)
off = insString(off, to, attr.CksumValue)
off = insString(off, to, attr.Version)
return off
}
//
// dry-run ---------------------------
//
func (s *Stream) dryrun() {
buf := make([]byte, cmn.KiB*32)
scloser := ioutil.NopCloser(s)
it := iterator{trname: s.trname, body: scloser, headerBuf: make([]byte, maxHeaderSize)}
for {
objReader, _, err := it.next()
if objReader != nil {
written, _ := io.CopyBuffer(ioutil.Discard, objReader, buf)
cmn.Assert(written == objReader.hdr.ObjAttrs.Size)
continue
}
if err != nil {
break
}
}
}
//
// Stats ---------------------------
//
func (stats *Stats) CompressionRatio() float64 {
bytesRead := stats.Offset.Load()
bytesSent := stats.CompressedSize.Load()
return float64(bytesRead) / float64(bytesSent)
}
//
// nopReadCloser ---------------------------
//
func (r *nopReadCloser) Read([]byte) (n int, err error) { return }
func (r *nopReadCloser) Close() error { return nil }
//
// lz4Stream ---------------------------
//
func (lz4s *lz4Stream) Read(b []byte) (n int, err error) {
var (
sendoff = &lz4s.s.sendoff
last = sendoff.obj.Hdr.IsLast()
retry = 64 // insist on returning n > 0 (note that lz4 compresses /blocks/)
)
if lz4s.sgl.Len() > 0 {
lz4s.zw.Flush()
n, err = lz4s.sgl.Read(b)
if err == io.EOF { // reusing/rewinding this buf multiple times
err = nil
}
goto ex
}
re:
n, err = lz4s.s.Read(b)
_, _ = lz4s.zw.Write(b[:n])
if last {
lz4s.zw.Flush()
retry = 0
} else if lz4s.s.sendoff.obj.Reader == nil /*eoObj*/ || err != nil {
lz4s.zw.Flush()
retry = 0
}
n, _ = lz4s.sgl.Read(b)
if n == 0 {
if retry > 0 {
retry--
runtime.Gosched()
goto re
}
lz4s.zw.Flush()
n, _ = lz4s.sgl.Read(b)
}
ex:
lz4s.s.stats.CompressedSize.Add(int64(n))
if lz4s.sgl.Len() == 0 {
lz4s.sgl.Reset()
}
if last && err == nil {
err = io.EOF
}
return
} | burstNum = 32 // default max num objects that can be posted for sending without any back-pressure | random_line_split |
base_wizard.py | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import keystore
from wallet import Wallet, Imported_Wallet, Standard_Wallet, Multisig_Wallet, WalletStorage, wallet_types
from i18n import _
from plugins import run_hook
class BaseWizard(object):
def __init__(self, config, network, path):
super(BaseWizard, self).__init__()
self.config = config
self.network = network
self.storage = WalletStorage(path)
self.wallet = None
self.stack = []
self.plugin = None
def run(self, *args):
action = args[0]
args = args[1:]
self.stack.append((action, args))
if not action:
return
if type(action) is tuple:
self.plugin, action = action
if self.plugin and hasattr(self.plugin, action):
f = getattr(self.plugin, action)
apply(f, (self,) + args)
elif hasattr(self, action):
f = getattr(self, action)
apply(f, args)
else:
raise BaseException("unknown action", action)
def can_go_back(self):
return len(self.stack)>1
def go_back(self):
if not self.can_go_back():
return
self.stack.pop()
action, args = self.stack.pop()
self.run(action, *args)
def new(self):
name = os.path.basename(self.storage.path)
title = _("Welcome to the Electrum installation wizard.")
message = '\n'.join([
_("The wallet '%s' does not exist.") % name,
_("What kind of wallet do you want to create?")
])
wallet_kinds = [
('standard', _("Standard wallet")),
('2fa', _("Wallet with two-factor authentication")),
('multisig', _("Multi-signature wallet")),
('imported', _("Watch Bitcoin addresses")),
]
choices = [pair for pair in wallet_kinds if pair[0] in wallet_types]
self.choice_dialog(title=title, message=message, choices=choices, run_next=self.on_wallet_type)
def on_wallet_type(self, choice):
self.wallet_type = choice
if choice == 'standard':
action = 'choose_keystore'
elif choice == 'multisig':
action = 'choose_multisig'
elif choice == '2fa':
self.storage.put('wallet_type', '2fa')
self.storage.put('use_trustedcoin', True)
self.plugin = self.plugins.load_plugin('trustedcoin')
action = self.storage.get_action()
elif choice == 'imported':
action = 'import_addresses'
self.run(action)
def choose_multisig(self):
def on_multisig(m, n):
self.multisig_type = "%dof%d"%(m, n)
self.storage.put('wallet_type', self.multisig_type)
self.n = n
self.keystores = []
self.run('choose_keystore')
self.multisig_dialog(run_next=on_multisig)
def choose_keystore(self):
assert self.wallet_type in ['standard', 'multisig']
c = self.wallet_type == 'multisig' and len(self.keystores)>0
title = _('Add cosigner') + ' %d'%len(self.keystores) if c else _('Keystore')
message = _('Do you want to create a new seed, or to restore a wallet using an existing seed?')
if not c:
choices = [
('create_seed', _('Create a new seed')),
('restore_seed', _('I already have a seed')),
('restore_from_key', _('Import keys')),
('choose_device', _('Use hardware device')),
]
else:
choices = [
('restore_from_key', _('Import cosigner key')),
('choose_device', _('Cosign with hardware device')),
]
self.choice_dialog(title=title, message=message, choices=choices, run_next=self.run)
def restore_seed(self):
# TODO: return derivation password too
self.restore_seed_dialog(run_next=self.add_password, is_valid=keystore.is_seed)
def on_restore(self, text):
if keystore.is_address_list(text):
|
elif keystore.is_private(text):
self.add_password(text)
else:
self.create_keystore(text, None)
def import_addresses(self):
v = keystore.is_address_list
title = _("Import Bitcoin Addresses")
message = _("Enter a list of Bitcoin addresses. This will create a watching-only wallet.")
self.restore_keys_dialog(title=title, message=message, run_next=self.on_restore, is_valid=v)
def restore_from_key(self):
if self.wallet_type == 'standard':
v = keystore.is_any_key
title = _("Import keys")
message = ' '.join([
_("To create a watching-only wallet, please enter your master public key (xpub)."),
_("To create a spending wallet, please enter a master private key (xprv), or a list of Bitcoin private keys.")
])
else:
v = keystore.is_bip32_key
title = _("Master public or private key")
message = ' '.join([
_("To create a watching-only wallet, please enter your master public key (xpub)."),
_("To create a spending wallet, please enter a master private key (xprv).")
])
self.restore_keys_dialog(title=title, message=message, run_next=self.on_restore, is_valid=v)
def choose_device(self):
title = _('Hardware Keystore')
# check available plugins
support = self.plugins.get_hardware_support()
if not support:
msg = '\n'.join([
_('No hardware wallet support found on your system.'),
_('Please install the relevant libraries (eg python-trezor for Trezor).'),
])
self.confirm_dialog(title=title, message=msg, run_next= lambda x: self.choose_device())
return
# scan devices
devices = []
for name, description, plugin in support:
devmgr = plugin.device_manager()
try:
u = devmgr.unpaired_device_infos(self, plugin)
except:
print "error", name
continue
devices += map(lambda x: (name, x), u)
if not devices:
msg = '\n'.join([
_('No hardware device detected.'),
_('To trigger a rescan, press \'next\'.'),
])
self.confirm_dialog(title=title, message=msg, run_next= lambda x: self.choose_device())
return
# select device
self.devices = devices
choices = []
for name, device_info in devices:
choices.append( ((name, device_info), device_info.description) )
msg = _('Select a device') + ':'
self.choice_dialog(title=title, message=msg, choices=choices, run_next=self.on_device)
def on_device(self, name, device_info):
f = lambda x: self.run('on_hardware_account_id', name, device_info, x)
self.account_id_dialog(run_next=f)
def on_hardware_account_id(self, hw_type, device_info, account_id):
from keystore import hardware_keystore, bip44_derivation
derivation = bip44_derivation(int(account_id))
plugin = self.plugins.get_plugin(hw_type)
xpub = plugin.setup_device(device_info, derivation, self)
# create keystore
d = {
'type': 'hardware',
'hw_type': hw_type,
'derivation': derivation,
'xpub': xpub,
}
k = hardware_keystore(hw_type, d)
self.on_keystore(k, None)
def on_hardware_seed(self):
self.storage.put('key_type', 'hw_seed')
is_valid = lambda x: True #fixme: bip39
f = lambda seed: self.run('on_bip39_seed', seed)
self.restore_seed_dialog(run_next=f, is_valid=is_valid)
def on_bip39_seed(self, seed):
f = lambda passphrase: self.run('on_bip39_passphrase', seed, passphrase)
self.request_passphrase(self.storage.get('hw_type'), run_next=f)
def on_bip39_passphrase(self, seed, passphrase):
f = lambda account_id: self.run('on_bip44_account_id', seed, passphrase, account_id)
self.account_id_dialog(run_next=f)
def on_bip44_account_id(self, seed, passphrase, account_id):
f = lambda pw: self.run('on_bip44', seed, passphrase, account_id, pw)
self.request_password(run_next=f)
def on_bip44(self, seed, passphrase, account_id, password):
import keystore
k = keystore.BIP32_KeyStore()
k.add_seed(seed, password)
bip32_seed = keystore.bip39_to_seed(seed, passphrase)
derivation = "m/44'/0'/%d'"%account_id
self.storage.put('account_id', account_id)
k.add_xprv_from_seed(bip32_seed, derivation, password)
self.on_keystore(k, password)
def on_keystore(self, k, password):
if self.wallet_type == 'standard':
self.storage.put('keystore', k.dump())
self.wallet = Standard_Wallet(self.storage)
self.run('create_addresses')
elif self.wallet_type == 'multisig':
if k.xpub in map(lambda x: x.xpub, self.keystores):
raise BaseException('duplicate key')
self.keystores.append(k)
if len(self.keystores) == 1:
xpub = k.get_master_public_key()
self.stack = []
self.run('show_xpub_and_add_cosigners', xpub)
elif len(self.keystores) < self.n:
self.run('choose_keystore')
else:
for i, k in enumerate(self.keystores):
self.storage.put('x%d/'%(i+1), k.dump())
self.storage.write()
self.wallet = Multisig_Wallet(self.storage)
self.run('create_addresses')
def show_xpub_and_add_cosigners(self, xpub):
self.show_xpub_dialog(xpub=xpub, run_next=lambda x: self.run('choose_keystore'))
def add_cosigners(self, password, i):
self.add_cosigner_dialog(run_next=lambda x: self.on_cosigner(x, password, i), index=i, is_valid=keystore.is_xpub)
def on_cosigner(self, text, password, i):
k = keystore.from_text(text, password)
self.on_keystore(k)
def create_seed(self):
from electrum.mnemonic import Mnemonic
seed = Mnemonic('en').make_seed()
self.show_seed_dialog(run_next=self.confirm_seed, seed_text=seed)
def confirm_seed(self, seed):
self.confirm_seed_dialog(run_next=self.add_password, is_valid=lambda x: x==seed)
def add_password(self, text):
f = lambda pw: self.run('create_keystore', text, pw)
self.request_password(run_next=f)
def create_keystore(self, text, password):
k = keystore.from_text(text, password)
self.on_keystore(k, password)
def create_addresses(self):
def task():
self.wallet.synchronize()
self.wallet.storage.write()
self.terminate()
msg = _("Electrum is generating your addresses, please wait.")
self.waiting_dialog(task, msg)
| self.wallet = Imported_Wallet(self.storage)
for x in text.split():
self.wallet.import_address(x)
self.terminate() | conditional_block |
base_wizard.py | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import keystore
from wallet import Wallet, Imported_Wallet, Standard_Wallet, Multisig_Wallet, WalletStorage, wallet_types
from i18n import _
from plugins import run_hook
class BaseWizard(object):
def __init__(self, config, network, path):
super(BaseWizard, self).__init__()
self.config = config
self.network = network
self.storage = WalletStorage(path)
self.wallet = None
self.stack = []
self.plugin = None
def run(self, *args):
action = args[0]
args = args[1:]
self.stack.append((action, args))
if not action:
return
if type(action) is tuple:
self.plugin, action = action
if self.plugin and hasattr(self.plugin, action):
f = getattr(self.plugin, action)
apply(f, (self,) + args)
elif hasattr(self, action):
f = getattr(self, action)
apply(f, args)
else:
raise BaseException("unknown action", action)
def can_go_back(self):
return len(self.stack)>1
def go_back(self):
if not self.can_go_back():
return
self.stack.pop()
action, args = self.stack.pop()
self.run(action, *args)
def new(self):
name = os.path.basename(self.storage.path)
title = _("Welcome to the Electrum installation wizard.")
message = '\n'.join([
_("The wallet '%s' does not exist.") % name,
_("What kind of wallet do you want to create?")
])
wallet_kinds = [
('standard', _("Standard wallet")),
('2fa', _("Wallet with two-factor authentication")),
('multisig', _("Multi-signature wallet")),
('imported', _("Watch Bitcoin addresses")),
]
choices = [pair for pair in wallet_kinds if pair[0] in wallet_types]
self.choice_dialog(title=title, message=message, choices=choices, run_next=self.on_wallet_type)
def on_wallet_type(self, choice):
self.wallet_type = choice
if choice == 'standard':
action = 'choose_keystore'
elif choice == 'multisig':
action = 'choose_multisig'
elif choice == '2fa':
self.storage.put('wallet_type', '2fa')
self.storage.put('use_trustedcoin', True)
self.plugin = self.plugins.load_plugin('trustedcoin')
action = self.storage.get_action()
elif choice == 'imported':
action = 'import_addresses'
self.run(action)
def choose_multisig(self):
def on_multisig(m, n):
self.multisig_type = "%dof%d"%(m, n)
self.storage.put('wallet_type', self.multisig_type)
self.n = n
self.keystores = []
self.run('choose_keystore')
self.multisig_dialog(run_next=on_multisig)
def choose_keystore(self):
assert self.wallet_type in ['standard', 'multisig']
c = self.wallet_type == 'multisig' and len(self.keystores)>0
title = _('Add cosigner') + ' %d'%len(self.keystores) if c else _('Keystore')
message = _('Do you want to create a new seed, or to restore a wallet using an existing seed?')
if not c:
choices = [
('create_seed', _('Create a new seed')),
('restore_seed', _('I already have a seed')),
('restore_from_key', _('Import keys')),
('choose_device', _('Use hardware device')),
]
else:
choices = [
('restore_from_key', _('Import cosigner key')),
('choose_device', _('Cosign with hardware device')),
]
self.choice_dialog(title=title, message=message, choices=choices, run_next=self.run)
def restore_seed(self):
# TODO: return derivation password too
self.restore_seed_dialog(run_next=self.add_password, is_valid=keystore.is_seed)
def on_restore(self, text):
if keystore.is_address_list(text):
self.wallet = Imported_Wallet(self.storage)
for x in text.split():
self.wallet.import_address(x)
self.terminate()
elif keystore.is_private(text):
self.add_password(text)
else:
self.create_keystore(text, None)
def import_addresses(self):
v = keystore.is_address_list
title = _("Import Bitcoin Addresses")
message = _("Enter a list of Bitcoin addresses. This will create a watching-only wallet.")
self.restore_keys_dialog(title=title, message=message, run_next=self.on_restore, is_valid=v)
def restore_from_key(self):
if self.wallet_type == 'standard':
v = keystore.is_any_key
title = _("Import keys")
message = ' '.join([
_("To create a watching-only wallet, please enter your master public key (xpub)."),
_("To create a spending wallet, please enter a master private key (xprv), or a list of Bitcoin private keys.")
])
else:
v = keystore.is_bip32_key
title = _("Master public or private key")
message = ' '.join([
_("To create a watching-only wallet, please enter your master public key (xpub)."),
_("To create a spending wallet, please enter a master private key (xprv).")
])
self.restore_keys_dialog(title=title, message=message, run_next=self.on_restore, is_valid=v)
def choose_device(self):
title = _('Hardware Keystore')
# check available plugins
support = self.plugins.get_hardware_support()
if not support:
msg = '\n'.join([
_('No hardware wallet support found on your system.'),
_('Please install the relevant libraries (eg python-trezor for Trezor).'),
])
self.confirm_dialog(title=title, message=msg, run_next= lambda x: self.choose_device())
return
# scan devices
devices = []
for name, description, plugin in support:
devmgr = plugin.device_manager()
try:
u = devmgr.unpaired_device_infos(self, plugin)
except:
print "error", name
continue
devices += map(lambda x: (name, x), u)
if not devices:
msg = '\n'.join([
_('No hardware device detected.'),
_('To trigger a rescan, press \'next\'.'),
])
self.confirm_dialog(title=title, message=msg, run_next= lambda x: self.choose_device())
return | self.devices = devices
choices = []
for name, device_info in devices:
choices.append( ((name, device_info), device_info.description) )
msg = _('Select a device') + ':'
self.choice_dialog(title=title, message=msg, choices=choices, run_next=self.on_device)
def on_device(self, name, device_info):
f = lambda x: self.run('on_hardware_account_id', name, device_info, x)
self.account_id_dialog(run_next=f)
def on_hardware_account_id(self, hw_type, device_info, account_id):
from keystore import hardware_keystore, bip44_derivation
derivation = bip44_derivation(int(account_id))
plugin = self.plugins.get_plugin(hw_type)
xpub = plugin.setup_device(device_info, derivation, self)
# create keystore
d = {
'type': 'hardware',
'hw_type': hw_type,
'derivation': derivation,
'xpub': xpub,
}
k = hardware_keystore(hw_type, d)
self.on_keystore(k, None)
def on_hardware_seed(self):
self.storage.put('key_type', 'hw_seed')
is_valid = lambda x: True #fixme: bip39
f = lambda seed: self.run('on_bip39_seed', seed)
self.restore_seed_dialog(run_next=f, is_valid=is_valid)
def on_bip39_seed(self, seed):
f = lambda passphrase: self.run('on_bip39_passphrase', seed, passphrase)
self.request_passphrase(self.storage.get('hw_type'), run_next=f)
def on_bip39_passphrase(self, seed, passphrase):
f = lambda account_id: self.run('on_bip44_account_id', seed, passphrase, account_id)
self.account_id_dialog(run_next=f)
def on_bip44_account_id(self, seed, passphrase, account_id):
f = lambda pw: self.run('on_bip44', seed, passphrase, account_id, pw)
self.request_password(run_next=f)
def on_bip44(self, seed, passphrase, account_id, password):
import keystore
k = keystore.BIP32_KeyStore()
k.add_seed(seed, password)
bip32_seed = keystore.bip39_to_seed(seed, passphrase)
derivation = "m/44'/0'/%d'"%account_id
self.storage.put('account_id', account_id)
k.add_xprv_from_seed(bip32_seed, derivation, password)
self.on_keystore(k, password)
def on_keystore(self, k, password):
if self.wallet_type == 'standard':
self.storage.put('keystore', k.dump())
self.wallet = Standard_Wallet(self.storage)
self.run('create_addresses')
elif self.wallet_type == 'multisig':
if k.xpub in map(lambda x: x.xpub, self.keystores):
raise BaseException('duplicate key')
self.keystores.append(k)
if len(self.keystores) == 1:
xpub = k.get_master_public_key()
self.stack = []
self.run('show_xpub_and_add_cosigners', xpub)
elif len(self.keystores) < self.n:
self.run('choose_keystore')
else:
for i, k in enumerate(self.keystores):
self.storage.put('x%d/'%(i+1), k.dump())
self.storage.write()
self.wallet = Multisig_Wallet(self.storage)
self.run('create_addresses')
def show_xpub_and_add_cosigners(self, xpub):
self.show_xpub_dialog(xpub=xpub, run_next=lambda x: self.run('choose_keystore'))
def add_cosigners(self, password, i):
self.add_cosigner_dialog(run_next=lambda x: self.on_cosigner(x, password, i), index=i, is_valid=keystore.is_xpub)
def on_cosigner(self, text, password, i):
k = keystore.from_text(text, password)
self.on_keystore(k)
def create_seed(self):
from electrum.mnemonic import Mnemonic
seed = Mnemonic('en').make_seed()
self.show_seed_dialog(run_next=self.confirm_seed, seed_text=seed)
def confirm_seed(self, seed):
self.confirm_seed_dialog(run_next=self.add_password, is_valid=lambda x: x==seed)
def add_password(self, text):
f = lambda pw: self.run('create_keystore', text, pw)
self.request_password(run_next=f)
def create_keystore(self, text, password):
k = keystore.from_text(text, password)
self.on_keystore(k, password)
def create_addresses(self):
def task():
self.wallet.synchronize()
self.wallet.storage.write()
self.terminate()
msg = _("Electrum is generating your addresses, please wait.")
self.waiting_dialog(task, msg) | # select device | random_line_split |
base_wizard.py | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import keystore
from wallet import Wallet, Imported_Wallet, Standard_Wallet, Multisig_Wallet, WalletStorage, wallet_types
from i18n import _
from plugins import run_hook
class BaseWizard(object):
def __init__(self, config, network, path):
super(BaseWizard, self).__init__()
self.config = config
self.network = network
self.storage = WalletStorage(path)
self.wallet = None
self.stack = []
self.plugin = None
def run(self, *args):
action = args[0]
args = args[1:]
self.stack.append((action, args))
if not action:
return
if type(action) is tuple:
self.plugin, action = action
if self.plugin and hasattr(self.plugin, action):
f = getattr(self.plugin, action)
apply(f, (self,) + args)
elif hasattr(self, action):
f = getattr(self, action)
apply(f, args)
else:
raise BaseException("unknown action", action)
def can_go_back(self):
return len(self.stack)>1
def go_back(self):
if not self.can_go_back():
return
self.stack.pop()
action, args = self.stack.pop()
self.run(action, *args)
def new(self):
name = os.path.basename(self.storage.path)
title = _("Welcome to the Electrum installation wizard.")
message = '\n'.join([
_("The wallet '%s' does not exist.") % name,
_("What kind of wallet do you want to create?")
])
wallet_kinds = [
('standard', _("Standard wallet")),
('2fa', _("Wallet with two-factor authentication")),
('multisig', _("Multi-signature wallet")),
('imported', _("Watch Bitcoin addresses")),
]
choices = [pair for pair in wallet_kinds if pair[0] in wallet_types]
self.choice_dialog(title=title, message=message, choices=choices, run_next=self.on_wallet_type)
def on_wallet_type(self, choice):
self.wallet_type = choice
if choice == 'standard':
action = 'choose_keystore'
elif choice == 'multisig':
action = 'choose_multisig'
elif choice == '2fa':
self.storage.put('wallet_type', '2fa')
self.storage.put('use_trustedcoin', True)
self.plugin = self.plugins.load_plugin('trustedcoin')
action = self.storage.get_action()
elif choice == 'imported':
action = 'import_addresses'
self.run(action)
def choose_multisig(self):
def on_multisig(m, n):
self.multisig_type = "%dof%d"%(m, n)
self.storage.put('wallet_type', self.multisig_type)
self.n = n
self.keystores = []
self.run('choose_keystore')
self.multisig_dialog(run_next=on_multisig)
def | (self):
assert self.wallet_type in ['standard', 'multisig']
c = self.wallet_type == 'multisig' and len(self.keystores)>0
title = _('Add cosigner') + ' %d'%len(self.keystores) if c else _('Keystore')
message = _('Do you want to create a new seed, or to restore a wallet using an existing seed?')
if not c:
choices = [
('create_seed', _('Create a new seed')),
('restore_seed', _('I already have a seed')),
('restore_from_key', _('Import keys')),
('choose_device', _('Use hardware device')),
]
else:
choices = [
('restore_from_key', _('Import cosigner key')),
('choose_device', _('Cosign with hardware device')),
]
self.choice_dialog(title=title, message=message, choices=choices, run_next=self.run)
def restore_seed(self):
# TODO: return derivation password too
self.restore_seed_dialog(run_next=self.add_password, is_valid=keystore.is_seed)
def on_restore(self, text):
if keystore.is_address_list(text):
self.wallet = Imported_Wallet(self.storage)
for x in text.split():
self.wallet.import_address(x)
self.terminate()
elif keystore.is_private(text):
self.add_password(text)
else:
self.create_keystore(text, None)
def import_addresses(self):
v = keystore.is_address_list
title = _("Import Bitcoin Addresses")
message = _("Enter a list of Bitcoin addresses. This will create a watching-only wallet.")
self.restore_keys_dialog(title=title, message=message, run_next=self.on_restore, is_valid=v)
def restore_from_key(self):
if self.wallet_type == 'standard':
v = keystore.is_any_key
title = _("Import keys")
message = ' '.join([
_("To create a watching-only wallet, please enter your master public key (xpub)."),
_("To create a spending wallet, please enter a master private key (xprv), or a list of Bitcoin private keys.")
])
else:
v = keystore.is_bip32_key
title = _("Master public or private key")
message = ' '.join([
_("To create a watching-only wallet, please enter your master public key (xpub)."),
_("To create a spending wallet, please enter a master private key (xprv).")
])
self.restore_keys_dialog(title=title, message=message, run_next=self.on_restore, is_valid=v)
def choose_device(self):
title = _('Hardware Keystore')
# check available plugins
support = self.plugins.get_hardware_support()
if not support:
msg = '\n'.join([
_('No hardware wallet support found on your system.'),
_('Please install the relevant libraries (eg python-trezor for Trezor).'),
])
self.confirm_dialog(title=title, message=msg, run_next= lambda x: self.choose_device())
return
# scan devices
devices = []
for name, description, plugin in support:
devmgr = plugin.device_manager()
try:
u = devmgr.unpaired_device_infos(self, plugin)
except:
print "error", name
continue
devices += map(lambda x: (name, x), u)
if not devices:
msg = '\n'.join([
_('No hardware device detected.'),
_('To trigger a rescan, press \'next\'.'),
])
self.confirm_dialog(title=title, message=msg, run_next= lambda x: self.choose_device())
return
# select device
self.devices = devices
choices = []
for name, device_info in devices:
choices.append( ((name, device_info), device_info.description) )
msg = _('Select a device') + ':'
self.choice_dialog(title=title, message=msg, choices=choices, run_next=self.on_device)
def on_device(self, name, device_info):
f = lambda x: self.run('on_hardware_account_id', name, device_info, x)
self.account_id_dialog(run_next=f)
def on_hardware_account_id(self, hw_type, device_info, account_id):
from keystore import hardware_keystore, bip44_derivation
derivation = bip44_derivation(int(account_id))
plugin = self.plugins.get_plugin(hw_type)
xpub = plugin.setup_device(device_info, derivation, self)
# create keystore
d = {
'type': 'hardware',
'hw_type': hw_type,
'derivation': derivation,
'xpub': xpub,
}
k = hardware_keystore(hw_type, d)
self.on_keystore(k, None)
def on_hardware_seed(self):
self.storage.put('key_type', 'hw_seed')
is_valid = lambda x: True #fixme: bip39
f = lambda seed: self.run('on_bip39_seed', seed)
self.restore_seed_dialog(run_next=f, is_valid=is_valid)
def on_bip39_seed(self, seed):
f = lambda passphrase: self.run('on_bip39_passphrase', seed, passphrase)
self.request_passphrase(self.storage.get('hw_type'), run_next=f)
def on_bip39_passphrase(self, seed, passphrase):
f = lambda account_id: self.run('on_bip44_account_id', seed, passphrase, account_id)
self.account_id_dialog(run_next=f)
def on_bip44_account_id(self, seed, passphrase, account_id):
f = lambda pw: self.run('on_bip44', seed, passphrase, account_id, pw)
self.request_password(run_next=f)
def on_bip44(self, seed, passphrase, account_id, password):
import keystore
k = keystore.BIP32_KeyStore()
k.add_seed(seed, password)
bip32_seed = keystore.bip39_to_seed(seed, passphrase)
derivation = "m/44'/0'/%d'"%account_id
self.storage.put('account_id', account_id)
k.add_xprv_from_seed(bip32_seed, derivation, password)
self.on_keystore(k, password)
def on_keystore(self, k, password):
if self.wallet_type == 'standard':
self.storage.put('keystore', k.dump())
self.wallet = Standard_Wallet(self.storage)
self.run('create_addresses')
elif self.wallet_type == 'multisig':
if k.xpub in map(lambda x: x.xpub, self.keystores):
raise BaseException('duplicate key')
self.keystores.append(k)
if len(self.keystores) == 1:
xpub = k.get_master_public_key()
self.stack = []
self.run('show_xpub_and_add_cosigners', xpub)
elif len(self.keystores) < self.n:
self.run('choose_keystore')
else:
for i, k in enumerate(self.keystores):
self.storage.put('x%d/'%(i+1), k.dump())
self.storage.write()
self.wallet = Multisig_Wallet(self.storage)
self.run('create_addresses')
def show_xpub_and_add_cosigners(self, xpub):
self.show_xpub_dialog(xpub=xpub, run_next=lambda x: self.run('choose_keystore'))
def add_cosigners(self, password, i):
self.add_cosigner_dialog(run_next=lambda x: self.on_cosigner(x, password, i), index=i, is_valid=keystore.is_xpub)
def on_cosigner(self, text, password, i):
k = keystore.from_text(text, password)
self.on_keystore(k)
def create_seed(self):
from electrum.mnemonic import Mnemonic
seed = Mnemonic('en').make_seed()
self.show_seed_dialog(run_next=self.confirm_seed, seed_text=seed)
def confirm_seed(self, seed):
self.confirm_seed_dialog(run_next=self.add_password, is_valid=lambda x: x==seed)
def add_password(self, text):
f = lambda pw: self.run('create_keystore', text, pw)
self.request_password(run_next=f)
def create_keystore(self, text, password):
k = keystore.from_text(text, password)
self.on_keystore(k, password)
def create_addresses(self):
def task():
self.wallet.synchronize()
self.wallet.storage.write()
self.terminate()
msg = _("Electrum is generating your addresses, please wait.")
self.waiting_dialog(task, msg)
| choose_keystore | identifier_name |
base_wizard.py | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import keystore
from wallet import Wallet, Imported_Wallet, Standard_Wallet, Multisig_Wallet, WalletStorage, wallet_types
from i18n import _
from plugins import run_hook
class BaseWizard(object):
| def __init__(self, config, network, path):
super(BaseWizard, self).__init__()
self.config = config
self.network = network
self.storage = WalletStorage(path)
self.wallet = None
self.stack = []
self.plugin = None
def run(self, *args):
action = args[0]
args = args[1:]
self.stack.append((action, args))
if not action:
return
if type(action) is tuple:
self.plugin, action = action
if self.plugin and hasattr(self.plugin, action):
f = getattr(self.plugin, action)
apply(f, (self,) + args)
elif hasattr(self, action):
f = getattr(self, action)
apply(f, args)
else:
raise BaseException("unknown action", action)
def can_go_back(self):
return len(self.stack)>1
def go_back(self):
if not self.can_go_back():
return
self.stack.pop()
action, args = self.stack.pop()
self.run(action, *args)
def new(self):
name = os.path.basename(self.storage.path)
title = _("Welcome to the Electrum installation wizard.")
message = '\n'.join([
_("The wallet '%s' does not exist.") % name,
_("What kind of wallet do you want to create?")
])
wallet_kinds = [
('standard', _("Standard wallet")),
('2fa', _("Wallet with two-factor authentication")),
('multisig', _("Multi-signature wallet")),
('imported', _("Watch Bitcoin addresses")),
]
choices = [pair for pair in wallet_kinds if pair[0] in wallet_types]
self.choice_dialog(title=title, message=message, choices=choices, run_next=self.on_wallet_type)
def on_wallet_type(self, choice):
self.wallet_type = choice
if choice == 'standard':
action = 'choose_keystore'
elif choice == 'multisig':
action = 'choose_multisig'
elif choice == '2fa':
self.storage.put('wallet_type', '2fa')
self.storage.put('use_trustedcoin', True)
self.plugin = self.plugins.load_plugin('trustedcoin')
action = self.storage.get_action()
elif choice == 'imported':
action = 'import_addresses'
self.run(action)
def choose_multisig(self):
def on_multisig(m, n):
self.multisig_type = "%dof%d"%(m, n)
self.storage.put('wallet_type', self.multisig_type)
self.n = n
self.keystores = []
self.run('choose_keystore')
self.multisig_dialog(run_next=on_multisig)
def choose_keystore(self):
assert self.wallet_type in ['standard', 'multisig']
c = self.wallet_type == 'multisig' and len(self.keystores)>0
title = _('Add cosigner') + ' %d'%len(self.keystores) if c else _('Keystore')
message = _('Do you want to create a new seed, or to restore a wallet using an existing seed?')
if not c:
choices = [
('create_seed', _('Create a new seed')),
('restore_seed', _('I already have a seed')),
('restore_from_key', _('Import keys')),
('choose_device', _('Use hardware device')),
]
else:
choices = [
('restore_from_key', _('Import cosigner key')),
('choose_device', _('Cosign with hardware device')),
]
self.choice_dialog(title=title, message=message, choices=choices, run_next=self.run)
def restore_seed(self):
# TODO: return derivation password too
self.restore_seed_dialog(run_next=self.add_password, is_valid=keystore.is_seed)
def on_restore(self, text):
if keystore.is_address_list(text):
self.wallet = Imported_Wallet(self.storage)
for x in text.split():
self.wallet.import_address(x)
self.terminate()
elif keystore.is_private(text):
self.add_password(text)
else:
self.create_keystore(text, None)
def import_addresses(self):
v = keystore.is_address_list
title = _("Import Bitcoin Addresses")
message = _("Enter a list of Bitcoin addresses. This will create a watching-only wallet.")
self.restore_keys_dialog(title=title, message=message, run_next=self.on_restore, is_valid=v)
def restore_from_key(self):
if self.wallet_type == 'standard':
v = keystore.is_any_key
title = _("Import keys")
message = ' '.join([
_("To create a watching-only wallet, please enter your master public key (xpub)."),
_("To create a spending wallet, please enter a master private key (xprv), or a list of Bitcoin private keys.")
])
else:
v = keystore.is_bip32_key
title = _("Master public or private key")
message = ' '.join([
_("To create a watching-only wallet, please enter your master public key (xpub)."),
_("To create a spending wallet, please enter a master private key (xprv).")
])
self.restore_keys_dialog(title=title, message=message, run_next=self.on_restore, is_valid=v)
def choose_device(self):
title = _('Hardware Keystore')
# check available plugins
support = self.plugins.get_hardware_support()
if not support:
msg = '\n'.join([
_('No hardware wallet support found on your system.'),
_('Please install the relevant libraries (eg python-trezor for Trezor).'),
])
self.confirm_dialog(title=title, message=msg, run_next= lambda x: self.choose_device())
return
# scan devices
devices = []
for name, description, plugin in support:
devmgr = plugin.device_manager()
try:
u = devmgr.unpaired_device_infos(self, plugin)
except:
print "error", name
continue
devices += map(lambda x: (name, x), u)
if not devices:
msg = '\n'.join([
_('No hardware device detected.'),
_('To trigger a rescan, press \'next\'.'),
])
self.confirm_dialog(title=title, message=msg, run_next= lambda x: self.choose_device())
return
# select device
self.devices = devices
choices = []
for name, device_info in devices:
choices.append( ((name, device_info), device_info.description) )
msg = _('Select a device') + ':'
self.choice_dialog(title=title, message=msg, choices=choices, run_next=self.on_device)
def on_device(self, name, device_info):
f = lambda x: self.run('on_hardware_account_id', name, device_info, x)
self.account_id_dialog(run_next=f)
def on_hardware_account_id(self, hw_type, device_info, account_id):
from keystore import hardware_keystore, bip44_derivation
derivation = bip44_derivation(int(account_id))
plugin = self.plugins.get_plugin(hw_type)
xpub = plugin.setup_device(device_info, derivation, self)
# create keystore
d = {
'type': 'hardware',
'hw_type': hw_type,
'derivation': derivation,
'xpub': xpub,
}
k = hardware_keystore(hw_type, d)
self.on_keystore(k, None)
def on_hardware_seed(self):
self.storage.put('key_type', 'hw_seed')
is_valid = lambda x: True #fixme: bip39
f = lambda seed: self.run('on_bip39_seed', seed)
self.restore_seed_dialog(run_next=f, is_valid=is_valid)
def on_bip39_seed(self, seed):
f = lambda passphrase: self.run('on_bip39_passphrase', seed, passphrase)
self.request_passphrase(self.storage.get('hw_type'), run_next=f)
def on_bip39_passphrase(self, seed, passphrase):
f = lambda account_id: self.run('on_bip44_account_id', seed, passphrase, account_id)
self.account_id_dialog(run_next=f)
def on_bip44_account_id(self, seed, passphrase, account_id):
f = lambda pw: self.run('on_bip44', seed, passphrase, account_id, pw)
self.request_password(run_next=f)
def on_bip44(self, seed, passphrase, account_id, password):
import keystore
k = keystore.BIP32_KeyStore()
k.add_seed(seed, password)
bip32_seed = keystore.bip39_to_seed(seed, passphrase)
derivation = "m/44'/0'/%d'"%account_id
self.storage.put('account_id', account_id)
k.add_xprv_from_seed(bip32_seed, derivation, password)
self.on_keystore(k, password)
def on_keystore(self, k, password):
if self.wallet_type == 'standard':
self.storage.put('keystore', k.dump())
self.wallet = Standard_Wallet(self.storage)
self.run('create_addresses')
elif self.wallet_type == 'multisig':
if k.xpub in map(lambda x: x.xpub, self.keystores):
raise BaseException('duplicate key')
self.keystores.append(k)
if len(self.keystores) == 1:
xpub = k.get_master_public_key()
self.stack = []
self.run('show_xpub_and_add_cosigners', xpub)
elif len(self.keystores) < self.n:
self.run('choose_keystore')
else:
for i, k in enumerate(self.keystores):
self.storage.put('x%d/'%(i+1), k.dump())
self.storage.write()
self.wallet = Multisig_Wallet(self.storage)
self.run('create_addresses')
def show_xpub_and_add_cosigners(self, xpub):
self.show_xpub_dialog(xpub=xpub, run_next=lambda x: self.run('choose_keystore'))
def add_cosigners(self, password, i):
self.add_cosigner_dialog(run_next=lambda x: self.on_cosigner(x, password, i), index=i, is_valid=keystore.is_xpub)
def on_cosigner(self, text, password, i):
k = keystore.from_text(text, password)
self.on_keystore(k)
def create_seed(self):
from electrum.mnemonic import Mnemonic
seed = Mnemonic('en').make_seed()
self.show_seed_dialog(run_next=self.confirm_seed, seed_text=seed)
def confirm_seed(self, seed):
self.confirm_seed_dialog(run_next=self.add_password, is_valid=lambda x: x==seed)
def add_password(self, text):
f = lambda pw: self.run('create_keystore', text, pw)
self.request_password(run_next=f)
def create_keystore(self, text, password):
k = keystore.from_text(text, password)
self.on_keystore(k, password)
def create_addresses(self):
def task():
self.wallet.synchronize()
self.wallet.storage.write()
self.terminate()
msg = _("Electrum is generating your addresses, please wait.")
self.waiting_dialog(task, msg) | identifier_body | |
plugin.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package verify
import (
"context"
"errors"
"fmt"
"time"
"github.com/google/go-github/v32/github"
"sigs.k8s.io/kubebuilder-release-tools/verify/pkg/log"
)
const (
actionOpen = "opened"
actionReopen = "reopened"
actionEdit = "edited"
actionSync = "synchronize"
)
// ErrorWithHelp allows PRPlugin.ProcessPR to provide extended descriptions
type ErrorWithHelp interface {
error
Help() string
}
// PRPlugin handles pull request events
type PRPlugin struct {
ProcessPR func(pr *github.PullRequest) (string, error)
Name string
Title string
log.Logger
}
// init initializes the PRPlugin
func (p *PRPlugin) init() {
p.Logger = log.NewFor(p.Name)
p.Debug("plugin initialized")
}
// processPR executes the provided ProcessPR and parses the result
func (p PRPlugin) processPR(pr *github.PullRequest) (conclusion, summary, text string, err error) {
p.Debug("execute the plugin checks")
text, err = p.ProcessPR(pr)
if err != nil {
conclusion = "failure"
summary = err.Error()
var helpErr ErrorWithHelp
if errors.As(err, &helpErr) {
text = helpErr.Help()
}
} else {
conclusion = "success"
summary = "Success"
}
// Log in case we can't submit the result for some reason
p.Debugf("plugin conclusion: %q", conclusion)
p.Debugf("plugin result summary: %q", summary)
p.Debugf("plugin result details: %q", text)
return conclusion, summary, text, err
}
// processAndSubmit performs the checks and submits the result
func (p PRPlugin) processAndSubmit(env *ActionsEnv, checkRun *github.CheckRun) (*github.CheckRun, error) {
// Process the PR
conclusion, summary, text, procErr := p.processPR(env.Event.PullRequest)
// Update the check run
checkRun, err := p.finishCheckRun(env.Client, env.Owner, env.Repo, checkRun.GetID(), conclusion, summary, text)
if err != nil {
return checkRun, err
}
// Return failure here too so that the whole suite fails (since the actions
// suite seems to ignore failing check runs when calculating general failure)
if procErr != nil {
return checkRun, fmt.Errorf("failed: %v", procErr)
}
return checkRun, nil
}
////////////////////////////////////////////////////////////////////////////////
// Check API calls //
////////////////////////////////////////////////////////////////////////////////
// createCheckRun creates a new Check-Run.
// It returns an error in case it couldn't be created.
func (p PRPlugin) createCheckRun(client *github.Client, owner, repo, headSHA string) (*github.CheckRun, error) {
p.Debugf("creating check run %q on %s/%s @ %s...", p.Name, owner, repo, headSHA)
checkRun, res, err := client.Checks.CreateCheckRun(
context.TODO(),
owner,
repo,
github.CreateCheckRunOptions{
Name: p.Name,
HeadSHA: headSHA,
Status: Started.StringP(),
},
)
p.Debugf("create check API response: %+v", res)
p.Debugf("created run: %+v", checkRun)
if err != nil {
return nil, fmt.Errorf("unable to create check run: %w", err)
}
return checkRun, nil
}
// getCheckRun returns the Check-Run, creating it if it doesn't exist.
// It returns an error in case it didn't exist and couldn't be created, or if there are multiple matches.
func (p PRPlugin) getCheckRun(client *github.Client, owner, repo, headSHA string) (*github.CheckRun, error) {
p.Debugf("getting check run %q on %s/%s @ %s...", p.Name, owner, repo, headSHA)
checkRunList, res, err := client.Checks.ListCheckRunsForRef(
context.TODO(),
owner,
repo,
headSHA,
&github.ListCheckRunsOptions{
CheckName: github.String(p.Name),
},
)
p.Debugf("list check API response: %+v", res)
p.Debugf("listed runs: %+v", checkRunList)
if err != nil {
return nil, fmt.Errorf("unable to get check run: %w", err)
}
switch n := *checkRunList.Total; {
case n == 0:
return p.createCheckRun(client, owner, repo, headSHA)
case n == 1:
return checkRunList.CheckRuns[0], nil
case n > 1:
return nil, fmt.Errorf("multiple instances of `%s` check run found on %s/%s @ %s",
p.Name, owner, repo, headSHA)
default: // Should never happen
return nil, fmt.Errorf("negative number of instances (%d) of `%s` check run found on %s/%s @ %s",
n, p.Name, owner, repo, headSHA)
}
}
// resetCheckRun returns the Check-Run with executing status, creating it if it doesn't exist.
// It returns an error in case it didn't exist and couldn't be created, if there are multiple matches,
// or if it exists but couldn't be updated.
func (p PRPlugin) resetCheckRun(client *github.Client, owner, repo string, headSHA string) (*github.CheckRun, error) {
checkRun, err := p.getCheckRun(client, owner, repo, headSHA)
// If it errored, or it was created but not finished, we don't need to update it
if err != nil || Started.Equal(checkRun.GetStatus()) {
return checkRun, err
}
p.Debugf("resetting check run %q on %s/%s...", p.Name, owner, repo)
checkRun, updateResp, err := client.Checks.UpdateCheckRun(
context.TODO(),
owner,
repo,
checkRun.GetID(),
github.UpdateCheckRunOptions{
Name: p.Name,
Status: Started.StringP(),
},
)
p.Debugf("update check API response: %+v", updateResp)
p.Debugf("updated run: %+v", checkRun)
if err != nil {
return checkRun, fmt.Errorf("unable to reset check run: %w", err)
}
return checkRun, nil
}
// finishCheckRun updates the Check-Run with id checkRunID setting its output.
// It returns an error in case it couldn't be updated.
func (p PRPlugin) finishCheckRun(client *github.Client, owner, repo string, checkRunID int64, conclusion, summary, text string) (*github.CheckRun, error) {
p.Debugf("adding results to check run %q on %s/%s...", p.Name, owner, repo)
checkRun, updateResp, err := client.Checks.UpdateCheckRun(context.TODO(), owner, repo, checkRunID, github.UpdateCheckRunOptions{
Name: p.Name,
Conclusion: github.String(conclusion),
CompletedAt: &github.Timestamp{Time: time.Now()},
Output: &github.CheckRunOutput{
Title: github.String(p.Title),
Summary: github.String(summary),
Text: github.String(text),
},
})
p.Debugf("update check API response: %+v", updateResp)
p.Debugf("updated run: %+v", checkRun)
if err != nil {
return checkRun, fmt.Errorf("unable to update check run with results: %w", err)
}
return checkRun, nil
}
// duplicateCheckRun creates a new Check-Run with the same info as the provided one but for a new headSHA
func (p PRPlugin) duplicateCheckRun(client *github.Client, owner, repo, headSHA string, checkRun *github.CheckRun) (*github.CheckRun, error) {
p.Debugf("duplicating check run %q on %s/%s @ %s...", p.Name, owner, repo, headSHA)
checkRun, res, err := client.Checks.CreateCheckRun(
context.TODO(),
owner,
repo,
github.CreateCheckRunOptions{
Name: p.Name,
HeadSHA: headSHA,
DetailsURL: checkRun.DetailsURL,
ExternalID: checkRun.ExternalID,
Status: checkRun.Status,
Conclusion: checkRun.Conclusion,
StartedAt: checkRun.StartedAt,
CompletedAt: checkRun.CompletedAt,
Output: checkRun.Output,
},
)
p.Debugf("create check API response: %+v", res)
p.Debugf("created run: %+v", checkRun)
if err != nil {
return checkRun, fmt.Errorf("unable to duplicate check run: %w", err)
}
return checkRun, nil
}
////////////////////////////////////////////////////////////////////////////////
// Entrypoint //
////////////////////////////////////////////////////////////////////////////////
// entrypoint will call the corresponding handler
func (p PRPlugin) entrypoint(env *ActionsEnv) (err error) {
switch env.Event.GetAction() {
case actionOpen:
err = p.onOpen(env)
case actionReopen:
err = p.onReopen(env)
case actionEdit:
err = p.onEdit(env)
case actionSync:
err = p.onSync(env)
default:
p.Warningf("action %q received with no defined procedure, skipping", env.Event.GetAction())
}
return
}
// onOpen handles "open" actions
func (p PRPlugin) onOpen(env *ActionsEnv) error {
p.Debugf("%q handler", actionOpen)
// Create the check run
checkRun, err := p.createCheckRun(env.Client, env.Owner, env.Repo, env.Event.GetPullRequest().GetHead().GetSHA())
if err != nil {
return err
}
// Process the PR and submit the results
_, err = p.processAndSubmit(env, checkRun)
return err
}
// onReopen handles "reopen" actions
func (p PRPlugin) onReopen(env *ActionsEnv) error {
p.Debugf("%q handler", actionReopen)
// Get the check run
checkRun, err := p.getCheckRun(env.Client, env.Owner, env.Repo, env.Event.GetPullRequest().GetHead().GetSHA())
if err != nil {
return err
}
// Rerun the tests if they weren't finished
if !Finished.Equal(checkRun.GetStatus()) {
// Process the PR and submit the results
_, err = p.processAndSubmit(env, checkRun)
return err
}
// Return failure here too so that the whole suite fails (since the actions
// suite seems to ignore failing check runs when calculating general failure)
if *checkRun.Conclusion == "failure" {
return fmt.Errorf("failed: %v", *checkRun.Output.Summary)
}
return nil
}
// onEdit handles "edit" actions
func (p PRPlugin) onEdit(env *ActionsEnv) error {
p.Debugf("%q handler", actionEdit)
// Reset the check run
checkRun, err := p.resetCheckRun(env.Client, env.Owner, env.Repo, env.Event.GetPullRequest().GetHead().GetSHA())
if err != nil {
return err
}
// Process the PR and submit the results
_, err = p.processAndSubmit(env, checkRun)
return err
}
// onSync handles "synchronize" actions
func (p PRPlugin) onSync(env *ActionsEnv) error | {
p.Debugf("%q handler", actionSync)
// Get the check run
checkRun, err := p.getCheckRun(env.Client, env.Owner, env.Repo, env.Event.GetBefore())
if err != nil {
return err
}
// Rerun the tests if they weren't finished
if !Finished.Equal(checkRun.GetStatus()) {
// Process the PR and submit the results
checkRun, err = p.processAndSubmit(env, checkRun)
if err != nil {
return err
}
}
// Create a duplicate for the new commit
checkRun, err = p.duplicateCheckRun(env.Client, env.Owner, env.Repo, env.Event.GetAfter(), checkRun)
if err != nil {
return err
}
// Return failure here too so that the whole suite fails (since the actions
// suite seems to ignore failing check runs when calculating general failure)
if *checkRun.Conclusion == "failure" {
return fmt.Errorf("failed: %v", *checkRun.Output.Summary)
}
return nil
} | identifier_body | |
plugin.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package verify
import (
"context"
"errors"
"fmt"
"time"
"github.com/google/go-github/v32/github"
"sigs.k8s.io/kubebuilder-release-tools/verify/pkg/log"
)
const (
actionOpen = "opened"
actionReopen = "reopened"
actionEdit = "edited"
actionSync = "synchronize"
)
// ErrorWithHelp allows PRPlugin.ProcessPR to provide extended descriptions
type ErrorWithHelp interface {
error
Help() string
}
// PRPlugin handles pull request events
type PRPlugin struct {
ProcessPR func(pr *github.PullRequest) (string, error)
Name string
Title string
log.Logger
}
// init initializes the PRPlugin
func (p *PRPlugin) init() {
p.Logger = log.NewFor(p.Name)
p.Debug("plugin initialized")
}
// processPR executes the provided ProcessPR and parses the result
func (p PRPlugin) processPR(pr *github.PullRequest) (conclusion, summary, text string, err error) {
p.Debug("execute the plugin checks")
text, err = p.ProcessPR(pr)
if err != nil {
conclusion = "failure"
summary = err.Error()
var helpErr ErrorWithHelp
if errors.As(err, &helpErr) {
text = helpErr.Help()
}
} else {
conclusion = "success"
summary = "Success"
}
// Log in case we can't submit the result for some reason
p.Debugf("plugin conclusion: %q", conclusion)
p.Debugf("plugin result summary: %q", summary)
p.Debugf("plugin result details: %q", text)
return conclusion, summary, text, err
}
// processAndSubmit performs the checks and submits the result
func (p PRPlugin) processAndSubmit(env *ActionsEnv, checkRun *github.CheckRun) (*github.CheckRun, error) {
// Process the PR
conclusion, summary, text, procErr := p.processPR(env.Event.PullRequest)
// Update the check run
checkRun, err := p.finishCheckRun(env.Client, env.Owner, env.Repo, checkRun.GetID(), conclusion, summary, text)
if err != nil {
return checkRun, err
}
// Return failure here too so that the whole suite fails (since the actions
// suite seems to ignore failing check runs when calculating general failure)
if procErr != nil {
return checkRun, fmt.Errorf("failed: %v", procErr)
}
return checkRun, nil
}
////////////////////////////////////////////////////////////////////////////////
// Check API calls //
////////////////////////////////////////////////////////////////////////////////
// createCheckRun creates a new Check-Run.
// It returns an error in case it couldn't be created.
func (p PRPlugin) createCheckRun(client *github.Client, owner, repo, headSHA string) (*github.CheckRun, error) {
p.Debugf("creating check run %q on %s/%s @ %s...", p.Name, owner, repo, headSHA)
checkRun, res, err := client.Checks.CreateCheckRun(
context.TODO(),
owner,
repo,
github.CreateCheckRunOptions{
Name: p.Name,
HeadSHA: headSHA,
Status: Started.StringP(),
},
)
p.Debugf("create check API response: %+v", res)
p.Debugf("created run: %+v", checkRun)
if err != nil {
return nil, fmt.Errorf("unable to create check run: %w", err)
}
return checkRun, nil
}
// getCheckRun returns the Check-Run, creating it if it doesn't exist.
// It returns an error in case it didn't exist and couldn't be created, or if there are multiple matches.
func (p PRPlugin) getCheckRun(client *github.Client, owner, repo, headSHA string) (*github.CheckRun, error) {
p.Debugf("getting check run %q on %s/%s @ %s...", p.Name, owner, repo, headSHA)
checkRunList, res, err := client.Checks.ListCheckRunsForRef(
context.TODO(),
owner,
repo,
headSHA,
&github.ListCheckRunsOptions{
CheckName: github.String(p.Name),
},
)
p.Debugf("list check API response: %+v", res)
p.Debugf("listed runs: %+v", checkRunList)
if err != nil {
return nil, fmt.Errorf("unable to get check run: %w", err)
}
switch n := *checkRunList.Total; {
case n == 0:
return p.createCheckRun(client, owner, repo, headSHA)
case n == 1:
return checkRunList.CheckRuns[0], nil
case n > 1:
return nil, fmt.Errorf("multiple instances of `%s` check run found on %s/%s @ %s",
p.Name, owner, repo, headSHA)
default: // Should never happen
return nil, fmt.Errorf("negative number of instances (%d) of `%s` check run found on %s/%s @ %s",
n, p.Name, owner, repo, headSHA)
}
}
// resetCheckRun returns the Check-Run with executing status, creating it if it doesn't exist.
// It returns an error in case it didn't exist and couldn't be created, if there are multiple matches,
// or if it exists but couldn't be updated.
func (p PRPlugin) resetCheckRun(client *github.Client, owner, repo string, headSHA string) (*github.CheckRun, error) {
checkRun, err := p.getCheckRun(client, owner, repo, headSHA)
// If it errored, or it was created but not finished, we don't need to update it
if err != nil || Started.Equal(checkRun.GetStatus()) {
return checkRun, err
}
p.Debugf("resetting check run %q on %s/%s...", p.Name, owner, repo)
checkRun, updateResp, err := client.Checks.UpdateCheckRun(
context.TODO(),
owner,
repo,
checkRun.GetID(),
github.UpdateCheckRunOptions{
Name: p.Name,
Status: Started.StringP(),
},
)
p.Debugf("update check API response: %+v", updateResp)
p.Debugf("updated run: %+v", checkRun)
if err != nil {
return checkRun, fmt.Errorf("unable to reset check run: %w", err)
}
return checkRun, nil
}
// finishCheckRun updates the Check-Run with id checkRunID setting its output.
// It returns an error in case it couldn't be updated.
func (p PRPlugin) finishCheckRun(client *github.Client, owner, repo string, checkRunID int64, conclusion, summary, text string) (*github.CheckRun, error) {
p.Debugf("adding results to check run %q on %s/%s...", p.Name, owner, repo)
checkRun, updateResp, err := client.Checks.UpdateCheckRun(context.TODO(), owner, repo, checkRunID, github.UpdateCheckRunOptions{
Name: p.Name,
Conclusion: github.String(conclusion),
CompletedAt: &github.Timestamp{Time: time.Now()},
Output: &github.CheckRunOutput{
Title: github.String(p.Title),
Summary: github.String(summary),
Text: github.String(text),
},
})
p.Debugf("update check API response: %+v", updateResp)
p.Debugf("updated run: %+v", checkRun)
if err != nil {
return checkRun, fmt.Errorf("unable to update check run with results: %w", err)
}
return checkRun, nil
}
| p.Debugf("duplicating check run %q on %s/%s @ %s...", p.Name, owner, repo, headSHA)
checkRun, res, err := client.Checks.CreateCheckRun(
context.TODO(),
owner,
repo,
github.CreateCheckRunOptions{
Name: p.Name,
HeadSHA: headSHA,
DetailsURL: checkRun.DetailsURL,
ExternalID: checkRun.ExternalID,
Status: checkRun.Status,
Conclusion: checkRun.Conclusion,
StartedAt: checkRun.StartedAt,
CompletedAt: checkRun.CompletedAt,
Output: checkRun.Output,
},
)
p.Debugf("create check API response: %+v", res)
p.Debugf("created run: %+v", checkRun)
if err != nil {
return checkRun, fmt.Errorf("unable to duplicate check run: %w", err)
}
return checkRun, nil
}
////////////////////////////////////////////////////////////////////////////////
// Entrypoint //
////////////////////////////////////////////////////////////////////////////////
// entrypoint will call the corresponding handler
func (p PRPlugin) entrypoint(env *ActionsEnv) (err error) {
switch env.Event.GetAction() {
case actionOpen:
err = p.onOpen(env)
case actionReopen:
err = p.onReopen(env)
case actionEdit:
err = p.onEdit(env)
case actionSync:
err = p.onSync(env)
default:
p.Warningf("action %q received with no defined procedure, skipping", env.Event.GetAction())
}
return
}
// onOpen handles "open" actions
func (p PRPlugin) onOpen(env *ActionsEnv) error {
p.Debugf("%q handler", actionOpen)
// Create the check run
checkRun, err := p.createCheckRun(env.Client, env.Owner, env.Repo, env.Event.GetPullRequest().GetHead().GetSHA())
if err != nil {
return err
}
// Process the PR and submit the results
_, err = p.processAndSubmit(env, checkRun)
return err
}
// onReopen handles "reopen" actions
func (p PRPlugin) onReopen(env *ActionsEnv) error {
p.Debugf("%q handler", actionReopen)
// Get the check run
checkRun, err := p.getCheckRun(env.Client, env.Owner, env.Repo, env.Event.GetPullRequest().GetHead().GetSHA())
if err != nil {
return err
}
// Rerun the tests if they weren't finished
if !Finished.Equal(checkRun.GetStatus()) {
// Process the PR and submit the results
_, err = p.processAndSubmit(env, checkRun)
return err
}
// Return failure here too so that the whole suite fails (since the actions
// suite seems to ignore failing check runs when calculating general failure)
if *checkRun.Conclusion == "failure" {
return fmt.Errorf("failed: %v", *checkRun.Output.Summary)
}
return nil
}
// onEdit handles "edit" actions
func (p PRPlugin) onEdit(env *ActionsEnv) error {
p.Debugf("%q handler", actionEdit)
// Reset the check run
checkRun, err := p.resetCheckRun(env.Client, env.Owner, env.Repo, env.Event.GetPullRequest().GetHead().GetSHA())
if err != nil {
return err
}
// Process the PR and submit the results
_, err = p.processAndSubmit(env, checkRun)
return err
}
// onSync handles "synchronize" actions
func (p PRPlugin) onSync(env *ActionsEnv) error {
p.Debugf("%q handler", actionSync)
// Get the check run
checkRun, err := p.getCheckRun(env.Client, env.Owner, env.Repo, env.Event.GetBefore())
if err != nil {
return err
}
// Rerun the tests if they weren't finished
if !Finished.Equal(checkRun.GetStatus()) {
// Process the PR and submit the results
checkRun, err = p.processAndSubmit(env, checkRun)
if err != nil {
return err
}
}
// Create a duplicate for the new commit
checkRun, err = p.duplicateCheckRun(env.Client, env.Owner, env.Repo, env.Event.GetAfter(), checkRun)
if err != nil {
return err
}
// Return failure here too so that the whole suite fails (since the actions
// suite seems to ignore failing check runs when calculating general failure)
if *checkRun.Conclusion == "failure" {
return fmt.Errorf("failed: %v", *checkRun.Output.Summary)
}
return nil
} | // duplicateCheckRun creates a new Check-Run with the same info as the provided one but for a new headSHA
func (p PRPlugin) duplicateCheckRun(client *github.Client, owner, repo, headSHA string, checkRun *github.CheckRun) (*github.CheckRun, error) { | random_line_split |
plugin.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package verify
import (
"context"
"errors"
"fmt"
"time"
"github.com/google/go-github/v32/github"
"sigs.k8s.io/kubebuilder-release-tools/verify/pkg/log"
)
const (
actionOpen = "opened"
actionReopen = "reopened"
actionEdit = "edited"
actionSync = "synchronize"
)
// ErrorWithHelp allows PRPlugin.ProcessPR to provide extended descriptions
type ErrorWithHelp interface {
error
Help() string
}
// PRPlugin handles pull request events
type PRPlugin struct {
ProcessPR func(pr *github.PullRequest) (string, error)
Name string
Title string
log.Logger
}
// init initializes the PRPlugin
func (p *PRPlugin) init() {
p.Logger = log.NewFor(p.Name)
p.Debug("plugin initialized")
}
// processPR executes the provided ProcessPR and parses the result
func (p PRPlugin) processPR(pr *github.PullRequest) (conclusion, summary, text string, err error) {
p.Debug("execute the plugin checks")
text, err = p.ProcessPR(pr)
if err != nil {
conclusion = "failure"
summary = err.Error()
var helpErr ErrorWithHelp
if errors.As(err, &helpErr) {
text = helpErr.Help()
}
} else {
conclusion = "success"
summary = "Success"
}
// Log in case we can't submit the result for some reason
p.Debugf("plugin conclusion: %q", conclusion)
p.Debugf("plugin result summary: %q", summary)
p.Debugf("plugin result details: %q", text)
return conclusion, summary, text, err
}
// processAndSubmit performs the checks and submits the result
func (p PRPlugin) processAndSubmit(env *ActionsEnv, checkRun *github.CheckRun) (*github.CheckRun, error) {
// Process the PR
conclusion, summary, text, procErr := p.processPR(env.Event.PullRequest)
// Update the check run
checkRun, err := p.finishCheckRun(env.Client, env.Owner, env.Repo, checkRun.GetID(), conclusion, summary, text)
if err != nil {
return checkRun, err
}
// Return failure here too so that the whole suite fails (since the actions
// suite seems to ignore failing check runs when calculating general failure)
if procErr != nil {
return checkRun, fmt.Errorf("failed: %v", procErr)
}
return checkRun, nil
}
////////////////////////////////////////////////////////////////////////////////
// Check API calls //
////////////////////////////////////////////////////////////////////////////////
// createCheckRun creates a new Check-Run.
// It returns an error in case it couldn't be created.
func (p PRPlugin) createCheckRun(client *github.Client, owner, repo, headSHA string) (*github.CheckRun, error) {
p.Debugf("creating check run %q on %s/%s @ %s...", p.Name, owner, repo, headSHA)
checkRun, res, err := client.Checks.CreateCheckRun(
context.TODO(),
owner,
repo,
github.CreateCheckRunOptions{
Name: p.Name,
HeadSHA: headSHA,
Status: Started.StringP(),
},
)
p.Debugf("create check API response: %+v", res)
p.Debugf("created run: %+v", checkRun)
if err != nil {
return nil, fmt.Errorf("unable to create check run: %w", err)
}
return checkRun, nil
}
// getCheckRun returns the Check-Run, creating it if it doesn't exist.
// It returns an error in case it didn't exist and couldn't be created, or if there are multiple matches.
func (p PRPlugin) getCheckRun(client *github.Client, owner, repo, headSHA string) (*github.CheckRun, error) {
p.Debugf("getting check run %q on %s/%s @ %s...", p.Name, owner, repo, headSHA)
checkRunList, res, err := client.Checks.ListCheckRunsForRef(
context.TODO(),
owner,
repo,
headSHA,
&github.ListCheckRunsOptions{
CheckName: github.String(p.Name),
},
)
p.Debugf("list check API response: %+v", res)
p.Debugf("listed runs: %+v", checkRunList)
if err != nil {
return nil, fmt.Errorf("unable to get check run: %w", err)
}
switch n := *checkRunList.Total; {
case n == 0:
return p.createCheckRun(client, owner, repo, headSHA)
case n == 1:
return checkRunList.CheckRuns[0], nil
case n > 1:
return nil, fmt.Errorf("multiple instances of `%s` check run found on %s/%s @ %s",
p.Name, owner, repo, headSHA)
default: // Should never happen
return nil, fmt.Errorf("negative number of instances (%d) of `%s` check run found on %s/%s @ %s",
n, p.Name, owner, repo, headSHA)
}
}
// resetCheckRun returns the Check-Run with executing status, creating it if it doesn't exist.
// It returns an error in case it didn't exist and couldn't be created, if there are multiple matches,
// or if it exists but couldn't be updated.
func (p PRPlugin) resetCheckRun(client *github.Client, owner, repo string, headSHA string) (*github.CheckRun, error) {
checkRun, err := p.getCheckRun(client, owner, repo, headSHA)
// If it errored, or it was created but not finished, we don't need to update it
if err != nil || Started.Equal(checkRun.GetStatus()) {
return checkRun, err
}
p.Debugf("resetting check run %q on %s/%s...", p.Name, owner, repo)
checkRun, updateResp, err := client.Checks.UpdateCheckRun(
context.TODO(),
owner,
repo,
checkRun.GetID(),
github.UpdateCheckRunOptions{
Name: p.Name,
Status: Started.StringP(),
},
)
p.Debugf("update check API response: %+v", updateResp)
p.Debugf("updated run: %+v", checkRun)
if err != nil {
return checkRun, fmt.Errorf("unable to reset check run: %w", err)
}
return checkRun, nil
}
// finishCheckRun updates the Check-Run with id checkRunID setting its output.
// It returns an error in case it couldn't be updated.
func (p PRPlugin) finishCheckRun(client *github.Client, owner, repo string, checkRunID int64, conclusion, summary, text string) (*github.CheckRun, error) {
p.Debugf("adding results to check run %q on %s/%s...", p.Name, owner, repo)
checkRun, updateResp, err := client.Checks.UpdateCheckRun(context.TODO(), owner, repo, checkRunID, github.UpdateCheckRunOptions{
Name: p.Name,
Conclusion: github.String(conclusion),
CompletedAt: &github.Timestamp{Time: time.Now()},
Output: &github.CheckRunOutput{
Title: github.String(p.Title),
Summary: github.String(summary),
Text: github.String(text),
},
})
p.Debugf("update check API response: %+v", updateResp)
p.Debugf("updated run: %+v", checkRun)
if err != nil {
return checkRun, fmt.Errorf("unable to update check run with results: %w", err)
}
return checkRun, nil
}
// duplicateCheckRun creates a new Check-Run with the same info as the provided one but for a new headSHA
func (p PRPlugin) duplicateCheckRun(client *github.Client, owner, repo, headSHA string, checkRun *github.CheckRun) (*github.CheckRun, error) {
p.Debugf("duplicating check run %q on %s/%s @ %s...", p.Name, owner, repo, headSHA)
checkRun, res, err := client.Checks.CreateCheckRun(
context.TODO(),
owner,
repo,
github.CreateCheckRunOptions{
Name: p.Name,
HeadSHA: headSHA,
DetailsURL: checkRun.DetailsURL,
ExternalID: checkRun.ExternalID,
Status: checkRun.Status,
Conclusion: checkRun.Conclusion,
StartedAt: checkRun.StartedAt,
CompletedAt: checkRun.CompletedAt,
Output: checkRun.Output,
},
)
p.Debugf("create check API response: %+v", res)
p.Debugf("created run: %+v", checkRun)
if err != nil {
return checkRun, fmt.Errorf("unable to duplicate check run: %w", err)
}
return checkRun, nil
}
////////////////////////////////////////////////////////////////////////////////
// Entrypoint //
////////////////////////////////////////////////////////////////////////////////
// entrypoint will call the corresponding handler
func (p PRPlugin) entrypoint(env *ActionsEnv) (err error) {
switch env.Event.GetAction() {
case actionOpen:
err = p.onOpen(env)
case actionReopen:
err = p.onReopen(env)
case actionEdit:
err = p.onEdit(env)
case actionSync:
err = p.onSync(env)
default:
p.Warningf("action %q received with no defined procedure, skipping", env.Event.GetAction())
}
return
}
// onOpen handles "open" actions
func (p PRPlugin) onOpen(env *ActionsEnv) error {
p.Debugf("%q handler", actionOpen)
// Create the check run
checkRun, err := p.createCheckRun(env.Client, env.Owner, env.Repo, env.Event.GetPullRequest().GetHead().GetSHA())
if err != nil {
return err
}
// Process the PR and submit the results
_, err = p.processAndSubmit(env, checkRun)
return err
}
// onReopen handles "reopen" actions
func (p PRPlugin) onReopen(env *ActionsEnv) error {
p.Debugf("%q handler", actionReopen)
// Get the check run
checkRun, err := p.getCheckRun(env.Client, env.Owner, env.Repo, env.Event.GetPullRequest().GetHead().GetSHA())
if err != nil {
return err
}
// Rerun the tests if they weren't finished
if !Finished.Equal(checkRun.GetStatus()) {
// Process the PR and submit the results
_, err = p.processAndSubmit(env, checkRun)
return err
}
// Return failure here too so that the whole suite fails (since the actions
// suite seems to ignore failing check runs when calculating general failure)
if *checkRun.Conclusion == "failure" {
return fmt.Errorf("failed: %v", *checkRun.Output.Summary)
}
return nil
}
// onEdit handles "edit" actions
func (p PRPlugin) onEdit(env *ActionsEnv) error {
p.Debugf("%q handler", actionEdit)
// Reset the check run
checkRun, err := p.resetCheckRun(env.Client, env.Owner, env.Repo, env.Event.GetPullRequest().GetHead().GetSHA())
if err != nil {
return err
}
// Process the PR and submit the results
_, err = p.processAndSubmit(env, checkRun)
return err
}
// onSync handles "synchronize" actions
func (p PRPlugin) | (env *ActionsEnv) error {
p.Debugf("%q handler", actionSync)
// Get the check run
checkRun, err := p.getCheckRun(env.Client, env.Owner, env.Repo, env.Event.GetBefore())
if err != nil {
return err
}
// Rerun the tests if they weren't finished
if !Finished.Equal(checkRun.GetStatus()) {
// Process the PR and submit the results
checkRun, err = p.processAndSubmit(env, checkRun)
if err != nil {
return err
}
}
// Create a duplicate for the new commit
checkRun, err = p.duplicateCheckRun(env.Client, env.Owner, env.Repo, env.Event.GetAfter(), checkRun)
if err != nil {
return err
}
// Return failure here too so that the whole suite fails (since the actions
// suite seems to ignore failing check runs when calculating general failure)
if *checkRun.Conclusion == "failure" {
return fmt.Errorf("failed: %v", *checkRun.Output.Summary)
}
return nil
}
| onSync | identifier_name |
plugin.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package verify
import (
"context"
"errors"
"fmt"
"time"
"github.com/google/go-github/v32/github"
"sigs.k8s.io/kubebuilder-release-tools/verify/pkg/log"
)
const (
actionOpen = "opened"
actionReopen = "reopened"
actionEdit = "edited"
actionSync = "synchronize"
)
// ErrorWithHelp allows PRPlugin.ProcessPR to provide extended descriptions
type ErrorWithHelp interface {
error
Help() string
}
// PRPlugin handles pull request events
type PRPlugin struct {
ProcessPR func(pr *github.PullRequest) (string, error)
Name string
Title string
log.Logger
}
// init initializes the PRPlugin
func (p *PRPlugin) init() {
p.Logger = log.NewFor(p.Name)
p.Debug("plugin initialized")
}
// processPR executes the provided ProcessPR and parses the result
func (p PRPlugin) processPR(pr *github.PullRequest) (conclusion, summary, text string, err error) {
p.Debug("execute the plugin checks")
text, err = p.ProcessPR(pr)
if err != nil {
conclusion = "failure"
summary = err.Error()
var helpErr ErrorWithHelp
if errors.As(err, &helpErr) {
text = helpErr.Help()
}
} else {
conclusion = "success"
summary = "Success"
}
// Log in case we can't submit the result for some reason
p.Debugf("plugin conclusion: %q", conclusion)
p.Debugf("plugin result summary: %q", summary)
p.Debugf("plugin result details: %q", text)
return conclusion, summary, text, err
}
// processAndSubmit performs the checks and submits the result
func (p PRPlugin) processAndSubmit(env *ActionsEnv, checkRun *github.CheckRun) (*github.CheckRun, error) {
// Process the PR
conclusion, summary, text, procErr := p.processPR(env.Event.PullRequest)
// Update the check run
checkRun, err := p.finishCheckRun(env.Client, env.Owner, env.Repo, checkRun.GetID(), conclusion, summary, text)
if err != nil {
return checkRun, err
}
// Return failure here too so that the whole suite fails (since the actions
// suite seems to ignore failing check runs when calculating general failure)
if procErr != nil {
return checkRun, fmt.Errorf("failed: %v", procErr)
}
return checkRun, nil
}
////////////////////////////////////////////////////////////////////////////////
// Check API calls //
////////////////////////////////////////////////////////////////////////////////
// createCheckRun creates a new Check-Run.
// It returns an error in case it couldn't be created.
func (p PRPlugin) createCheckRun(client *github.Client, owner, repo, headSHA string) (*github.CheckRun, error) {
p.Debugf("creating check run %q on %s/%s @ %s...", p.Name, owner, repo, headSHA)
checkRun, res, err := client.Checks.CreateCheckRun(
context.TODO(),
owner,
repo,
github.CreateCheckRunOptions{
Name: p.Name,
HeadSHA: headSHA,
Status: Started.StringP(),
},
)
p.Debugf("create check API response: %+v", res)
p.Debugf("created run: %+v", checkRun)
if err != nil {
return nil, fmt.Errorf("unable to create check run: %w", err)
}
return checkRun, nil
}
// getCheckRun returns the Check-Run, creating it if it doesn't exist.
// It returns an error in case it didn't exist and couldn't be created, or if there are multiple matches.
func (p PRPlugin) getCheckRun(client *github.Client, owner, repo, headSHA string) (*github.CheckRun, error) {
p.Debugf("getting check run %q on %s/%s @ %s...", p.Name, owner, repo, headSHA)
checkRunList, res, err := client.Checks.ListCheckRunsForRef(
context.TODO(),
owner,
repo,
headSHA,
&github.ListCheckRunsOptions{
CheckName: github.String(p.Name),
},
)
p.Debugf("list check API response: %+v", res)
p.Debugf("listed runs: %+v", checkRunList)
if err != nil {
return nil, fmt.Errorf("unable to get check run: %w", err)
}
switch n := *checkRunList.Total; {
case n == 0:
return p.createCheckRun(client, owner, repo, headSHA)
case n == 1:
return checkRunList.CheckRuns[0], nil
case n > 1:
return nil, fmt.Errorf("multiple instances of `%s` check run found on %s/%s @ %s",
p.Name, owner, repo, headSHA)
default: // Should never happen
return nil, fmt.Errorf("negative number of instances (%d) of `%s` check run found on %s/%s @ %s",
n, p.Name, owner, repo, headSHA)
}
}
// resetCheckRun returns the Check-Run with executing status, creating it if it doesn't exist.
// It returns an error in case it didn't exist and couldn't be created, if there are multiple matches,
// or if it exists but couldn't be updated.
func (p PRPlugin) resetCheckRun(client *github.Client, owner, repo string, headSHA string) (*github.CheckRun, error) {
checkRun, err := p.getCheckRun(client, owner, repo, headSHA)
// If it errored, or it was created but not finished, we don't need to update it
if err != nil || Started.Equal(checkRun.GetStatus()) {
return checkRun, err
}
p.Debugf("resetting check run %q on %s/%s...", p.Name, owner, repo)
checkRun, updateResp, err := client.Checks.UpdateCheckRun(
context.TODO(),
owner,
repo,
checkRun.GetID(),
github.UpdateCheckRunOptions{
Name: p.Name,
Status: Started.StringP(),
},
)
p.Debugf("update check API response: %+v", updateResp)
p.Debugf("updated run: %+v", checkRun)
if err != nil {
return checkRun, fmt.Errorf("unable to reset check run: %w", err)
}
return checkRun, nil
}
// finishCheckRun updates the Check-Run with id checkRunID setting its output.
// It returns an error in case it couldn't be updated.
func (p PRPlugin) finishCheckRun(client *github.Client, owner, repo string, checkRunID int64, conclusion, summary, text string) (*github.CheckRun, error) {
p.Debugf("adding results to check run %q on %s/%s...", p.Name, owner, repo)
checkRun, updateResp, err := client.Checks.UpdateCheckRun(context.TODO(), owner, repo, checkRunID, github.UpdateCheckRunOptions{
Name: p.Name,
Conclusion: github.String(conclusion),
CompletedAt: &github.Timestamp{Time: time.Now()},
Output: &github.CheckRunOutput{
Title: github.String(p.Title),
Summary: github.String(summary),
Text: github.String(text),
},
})
p.Debugf("update check API response: %+v", updateResp)
p.Debugf("updated run: %+v", checkRun)
if err != nil {
return checkRun, fmt.Errorf("unable to update check run with results: %w", err)
}
return checkRun, nil
}
// duplicateCheckRun creates a new Check-Run with the same info as the provided one but for a new headSHA
func (p PRPlugin) duplicateCheckRun(client *github.Client, owner, repo, headSHA string, checkRun *github.CheckRun) (*github.CheckRun, error) {
p.Debugf("duplicating check run %q on %s/%s @ %s...", p.Name, owner, repo, headSHA)
checkRun, res, err := client.Checks.CreateCheckRun(
context.TODO(),
owner,
repo,
github.CreateCheckRunOptions{
Name: p.Name,
HeadSHA: headSHA,
DetailsURL: checkRun.DetailsURL,
ExternalID: checkRun.ExternalID,
Status: checkRun.Status,
Conclusion: checkRun.Conclusion,
StartedAt: checkRun.StartedAt,
CompletedAt: checkRun.CompletedAt,
Output: checkRun.Output,
},
)
p.Debugf("create check API response: %+v", res)
p.Debugf("created run: %+v", checkRun)
if err != nil {
return checkRun, fmt.Errorf("unable to duplicate check run: %w", err)
}
return checkRun, nil
}
////////////////////////////////////////////////////////////////////////////////
// Entrypoint //
////////////////////////////////////////////////////////////////////////////////
// entrypoint will call the corresponding handler
func (p PRPlugin) entrypoint(env *ActionsEnv) (err error) {
switch env.Event.GetAction() {
case actionOpen:
err = p.onOpen(env)
case actionReopen:
err = p.onReopen(env)
case actionEdit:
err = p.onEdit(env)
case actionSync:
err = p.onSync(env)
default:
p.Warningf("action %q received with no defined procedure, skipping", env.Event.GetAction())
}
return
}
// onOpen handles "open" actions
func (p PRPlugin) onOpen(env *ActionsEnv) error {
p.Debugf("%q handler", actionOpen)
// Create the check run
checkRun, err := p.createCheckRun(env.Client, env.Owner, env.Repo, env.Event.GetPullRequest().GetHead().GetSHA())
if err != nil |
// Process the PR and submit the results
_, err = p.processAndSubmit(env, checkRun)
return err
}
// onReopen handles "reopen" actions
func (p PRPlugin) onReopen(env *ActionsEnv) error {
p.Debugf("%q handler", actionReopen)
// Get the check run
checkRun, err := p.getCheckRun(env.Client, env.Owner, env.Repo, env.Event.GetPullRequest().GetHead().GetSHA())
if err != nil {
return err
}
// Rerun the tests if they weren't finished
if !Finished.Equal(checkRun.GetStatus()) {
// Process the PR and submit the results
_, err = p.processAndSubmit(env, checkRun)
return err
}
// Return failure here too so that the whole suite fails (since the actions
// suite seems to ignore failing check runs when calculating general failure)
if *checkRun.Conclusion == "failure" {
return fmt.Errorf("failed: %v", *checkRun.Output.Summary)
}
return nil
}
// onEdit handles "edit" actions
func (p PRPlugin) onEdit(env *ActionsEnv) error {
p.Debugf("%q handler", actionEdit)
// Reset the check run
checkRun, err := p.resetCheckRun(env.Client, env.Owner, env.Repo, env.Event.GetPullRequest().GetHead().GetSHA())
if err != nil {
return err
}
// Process the PR and submit the results
_, err = p.processAndSubmit(env, checkRun)
return err
}
// onSync handles "synchronize" actions
func (p PRPlugin) onSync(env *ActionsEnv) error {
p.Debugf("%q handler", actionSync)
// Get the check run
checkRun, err := p.getCheckRun(env.Client, env.Owner, env.Repo, env.Event.GetBefore())
if err != nil {
return err
}
// Rerun the tests if they weren't finished
if !Finished.Equal(checkRun.GetStatus()) {
// Process the PR and submit the results
checkRun, err = p.processAndSubmit(env, checkRun)
if err != nil {
return err
}
}
// Create a duplicate for the new commit
checkRun, err = p.duplicateCheckRun(env.Client, env.Owner, env.Repo, env.Event.GetAfter(), checkRun)
if err != nil {
return err
}
// Return failure here too so that the whole suite fails (since the actions
// suite seems to ignore failing check runs when calculating general failure)
if *checkRun.Conclusion == "failure" {
return fmt.Errorf("failed: %v", *checkRun.Output.Summary)
}
return nil
}
| {
return err
} | conditional_block |
bosh.ts | import { Agent, Transport, TransportConfig } from '../';
import { StreamErrorCondition } from '../Constants';
import StreamManagement from '../helpers/StreamManagement';
import { fetch, Duplex } from '../platform';
import { Stream } from '../protocol';
import { JSONData, ParsedData, Registry, StreamParser } from '../jxt';
import { sleep, timeoutPromise } from '../Utils';
class RequestChannel {
public rid!: number;
public maxTimeout: number;
public active = false;
private stream: BOSH;
private maxRetries = 5;
constructor(stream: BOSH) {
this.stream = stream;
this.maxTimeout = 1000 * 1.1 * this.stream.maxWaitTime;
}
public async send(rid: number, body: string): Promise<string> {
this.rid = rid;
this.active = true;
let attempts = 0;
while (attempts <= this.maxRetries) {
attempts += 1;
try {
const res = await timeoutPromise(
fetch(this.stream.url, {
body,
headers: {
'Content-Type': this.stream.contentType
},
method: 'POST'
}),
this.maxTimeout,
() => new Error('Request timed out')
);
if (!res.ok) {
throw new Error('HTTP Status Error: ' + res.status);
}
const result = await res.text();
this.active = false;
return result;
} catch (err) {
if (attempts === 1) {
continue;
} else if (attempts < this.maxRetries) {
const backoff = Math.min(this.maxTimeout, Math.pow(attempts, 2) * 1000);
await sleep(backoff + Math.random() * 1000);
continue;
} else {
this.active = false;
throw err;
}
}
}
throw new Error('Request failed');
}
}
export default class BOSH extends Duplex implements Transport {
public hasStream?: boolean;
public stream?: Stream;
public authenticated?: boolean;
public url!: string;
public rid?: number = Math.floor(Math.random() * 0xffffffff);
public sid?: string = '';
public maxHoldOpen = 2;
public maxWaitTime = 30;
public contentType = 'text/xml; charset=utf-8';
private channels: RequestChannel[] = [new RequestChannel(this), new RequestChannel(this)];
private activeChannelID = 0;
private client: Agent;
private config!: TransportConfig;
private sm: StreamManagement;
private stanzas: Registry;
private idleTimeout: any;
private queue: Array<[any, (err?: Error) => void]> = [];
private isEnded = false;
constructor(client: Agent, sm: StreamManagement, stanzas: Registry) {
super({
objectMode: true
});
this.client = client;
this.sm = sm;
this.stanzas = stanzas;
this.on('data', e => {
this.client.emit('stream:data', e.stanza, e.kind);
});
this.on('end', () => {
this.isEnded = true;
clearTimeout(this.idleTimeout);
if (this.client.transport === this) {
this.client.emit('--transport-disconnected');
}
});
}
public _write(chunk: string, encoding: string, done: (err?: Error) => void): void {
this.queue.push([chunk, done]);
this.scheduleRequests();
}
public _writev(
chunks: Array<{ chunk: string; encoding: string }>,
done: (err?: Error) => void
): void {
this.queue.push([chunks.map(c => c.chunk).join(''), done]);
this.scheduleRequests();
}
public _read(): void {
return;
}
public process(result: string): void {
const parser = new StreamParser({
acceptLanguages: this.config.acceptLanguages,
allowComments: false,
lang: this.config.lang,
registry: this.stanzas,
rootKey: 'bosh',
wrappedStream: true
});
parser.on('error', (err: any) => {
const streamError = {
condition: StreamErrorCondition.InvalidXML
};
this.client.emit('stream:error', streamError, err);
this.send('error', streamError);
return this.disconnect();
});
parser.on('data', (e: ParsedData) => {
if (e.event === 'stream-start') {
this.stream = e.stanza;
if (e.stanza.type === 'terminate') {
this.hasStream = false;
this.rid = undefined;
this.sid = undefined;
if (!this.isEnded) {
this.isEnded = true;
this.client.emit('bosh:terminate', e.stanza);
this.client.emit('stream:end');
this.push(null);
}
} else if (!this.hasStream) {
this.hasStream = true;
this.stream = e.stanza;
this.sid = e.stanza.sid || this.sid;
this.maxWaitTime = e.stanza.maxWaitTime || this.maxWaitTime;
this.client.emit('stream:start', e.stanza);
}
return;
}
if (!e.event) {
this.push({ kind: e.kind, stanza: e.stanza });
}
});
this.client.emit('raw', 'incoming', result);
parser.write(result);
this.scheduleRequests();
}
public connect(opts: TransportConfig): void {
this.config = opts;
this.url = opts.url!;
if (opts.rid) {
this.rid = opts.rid;
}
if (opts.sid) {
this.sid = opts.sid;
}
if (opts.wait) {
this.maxWaitTime = opts.wait;
}
if (opts.maxHoldOpen) {
this.maxHoldOpen = opts.maxHoldOpen;
}
if (this.sid) {
this.hasStream = true;
this.stream = {};
this.client.emit('connected');
this.client.emit('session:prebind', this.config.jid);
this.client.emit('session:started');
return;
}
this._send({
lang: opts.lang,
maxHoldOpen: this.maxHoldOpen,
maxWaitTime: this.maxWaitTime,
to: opts.server,
version: '1.6',
xmppVersion: '1.0'
});
}
public restart(): void {
this.hasStream = false;
this._send({
to: this.config.server,
xmppRestart: true
});
}
public disconnect(clean = true): void {
if (this.hasStream && clean) {
this._send({
type: 'terminate'
}); | }
}
public async send(dataOrName: string, data?: JSONData): Promise<void> {
let output: string | undefined;
if (data) {
output = this.stanzas.export(dataOrName, data)?.toString();
}
if (!output) {
return;
}
return new Promise<void>((resolve, reject) => {
this.write(output, 'utf8', err => (err ? reject(err) : resolve()));
});
}
private get sendingChannel() {
return this.channels[this.activeChannelID];
}
private get pollingChannel() {
return this.channels[this.activeChannelID === 0 ? 1 : 0];
}
private toggleChannel() {
this.activeChannelID = this.activeChannelID === 0 ? 1 : 0;
}
private async _send(boshData: any, payload = ''): Promise<void> {
if (this.isEnded) {
return;
}
const rid = this.rid!++;
const header = this.stanzas.export('bosh', {
...boshData,
rid,
sid: this.sid
})!;
let body: string;
if (payload) {
body = [header.openTag(), payload, header.closeTag()].join('');
} else {
body = header.toString();
}
this.client.emit('raw', 'outgoing', body);
this.sendingChannel
.send(rid, body)
.then(result => {
this.process(result);
})
.catch(err => {
this.end(err);
});
this.toggleChannel();
}
private async _poll(): Promise<void> {
if (this.isEnded) {
return;
}
const rid = this.rid!++;
const body = this.stanzas
.export('bosh', {
rid,
sid: this.sid
})!
.toString();
this.client.emit('raw', 'outgoing', body);
this.pollingChannel
.send(rid, body)
.then(result => {
this.process(result);
})
.catch(err => {
this.end(err);
});
}
private scheduleRequests() {
clearTimeout(this.idleTimeout);
this.idleTimeout = setTimeout(() => {
this.fireRequests();
}, 10);
}
private fireRequests() {
if (this.isEnded) {
return;
}
if (this.queue.length) {
if (!this.sendingChannel.active) {
const [data, done] = this.queue.shift()!;
this._send({}, data);
done();
} else {
this.scheduleRequests();
}
return;
}
if (this.authenticated && !(this.channels[0].active || this.channels[1].active)) {
this._poll();
}
}
} | } else {
this.stream = undefined;
this.sid = undefined;
this.rid = undefined;
this.client.emit('--transport-disconnected'); | random_line_split |
bosh.ts | import { Agent, Transport, TransportConfig } from '../';
import { StreamErrorCondition } from '../Constants';
import StreamManagement from '../helpers/StreamManagement';
import { fetch, Duplex } from '../platform';
import { Stream } from '../protocol';
import { JSONData, ParsedData, Registry, StreamParser } from '../jxt';
import { sleep, timeoutPromise } from '../Utils';
class RequestChannel {
public rid!: number;
public maxTimeout: number;
public active = false;
private stream: BOSH;
private maxRetries = 5;
constructor(stream: BOSH) {
this.stream = stream;
this.maxTimeout = 1000 * 1.1 * this.stream.maxWaitTime;
}
public async send(rid: number, body: string): Promise<string> {
this.rid = rid;
this.active = true;
let attempts = 0;
while (attempts <= this.maxRetries) {
attempts += 1;
try {
const res = await timeoutPromise(
fetch(this.stream.url, {
body,
headers: {
'Content-Type': this.stream.contentType
},
method: 'POST'
}),
this.maxTimeout,
() => new Error('Request timed out')
);
if (!res.ok) {
throw new Error('HTTP Status Error: ' + res.status);
}
const result = await res.text();
this.active = false;
return result;
} catch (err) {
if (attempts === 1) {
continue;
} else if (attempts < this.maxRetries) {
const backoff = Math.min(this.maxTimeout, Math.pow(attempts, 2) * 1000);
await sleep(backoff + Math.random() * 1000);
continue;
} else {
this.active = false;
throw err;
}
}
}
throw new Error('Request failed');
}
}
export default class BOSH extends Duplex implements Transport {
public hasStream?: boolean;
public stream?: Stream;
public authenticated?: boolean;
public url!: string;
public rid?: number = Math.floor(Math.random() * 0xffffffff);
public sid?: string = '';
public maxHoldOpen = 2;
public maxWaitTime = 30;
public contentType = 'text/xml; charset=utf-8';
private channels: RequestChannel[] = [new RequestChannel(this), new RequestChannel(this)];
private activeChannelID = 0;
private client: Agent;
private config!: TransportConfig;
private sm: StreamManagement;
private stanzas: Registry;
private idleTimeout: any;
private queue: Array<[any, (err?: Error) => void]> = [];
private isEnded = false;
constructor(client: Agent, sm: StreamManagement, stanzas: Registry) {
super({
objectMode: true
});
this.client = client;
this.sm = sm;
this.stanzas = stanzas;
this.on('data', e => {
this.client.emit('stream:data', e.stanza, e.kind);
});
this.on('end', () => {
this.isEnded = true;
clearTimeout(this.idleTimeout);
if (this.client.transport === this) {
this.client.emit('--transport-disconnected');
}
});
}
public _write(chunk: string, encoding: string, done: (err?: Error) => void): void {
this.queue.push([chunk, done]);
this.scheduleRequests();
}
public _writev(
chunks: Array<{ chunk: string; encoding: string }>,
done: (err?: Error) => void
): void {
this.queue.push([chunks.map(c => c.chunk).join(''), done]);
this.scheduleRequests();
}
public _read(): void {
return;
}
public process(result: string): void {
const parser = new StreamParser({
acceptLanguages: this.config.acceptLanguages,
allowComments: false,
lang: this.config.lang,
registry: this.stanzas,
rootKey: 'bosh',
wrappedStream: true
});
parser.on('error', (err: any) => {
const streamError = {
condition: StreamErrorCondition.InvalidXML
};
this.client.emit('stream:error', streamError, err);
this.send('error', streamError);
return this.disconnect();
});
parser.on('data', (e: ParsedData) => {
if (e.event === 'stream-start') {
this.stream = e.stanza;
if (e.stanza.type === 'terminate') {
this.hasStream = false;
this.rid = undefined;
this.sid = undefined;
if (!this.isEnded) {
this.isEnded = true;
this.client.emit('bosh:terminate', e.stanza);
this.client.emit('stream:end');
this.push(null);
}
} else if (!this.hasStream) {
this.hasStream = true;
this.stream = e.stanza;
this.sid = e.stanza.sid || this.sid;
this.maxWaitTime = e.stanza.maxWaitTime || this.maxWaitTime;
this.client.emit('stream:start', e.stanza);
}
return;
}
if (!e.event) {
this.push({ kind: e.kind, stanza: e.stanza });
}
});
this.client.emit('raw', 'incoming', result);
parser.write(result);
this.scheduleRequests();
}
public connect(opts: TransportConfig): void {
this.config = opts;
this.url = opts.url!;
if (opts.rid) {
this.rid = opts.rid;
}
if (opts.sid) {
this.sid = opts.sid;
}
if (opts.wait) {
this.maxWaitTime = opts.wait;
}
if (opts.maxHoldOpen) {
this.maxHoldOpen = opts.maxHoldOpen;
}
if (this.sid) |
this._send({
lang: opts.lang,
maxHoldOpen: this.maxHoldOpen,
maxWaitTime: this.maxWaitTime,
to: opts.server,
version: '1.6',
xmppVersion: '1.0'
});
}
public restart(): void {
this.hasStream = false;
this._send({
to: this.config.server,
xmppRestart: true
});
}
public disconnect(clean = true): void {
if (this.hasStream && clean) {
this._send({
type: 'terminate'
});
} else {
this.stream = undefined;
this.sid = undefined;
this.rid = undefined;
this.client.emit('--transport-disconnected');
}
}
public async send(dataOrName: string, data?: JSONData): Promise<void> {
let output: string | undefined;
if (data) {
output = this.stanzas.export(dataOrName, data)?.toString();
}
if (!output) {
return;
}
return new Promise<void>((resolve, reject) => {
this.write(output, 'utf8', err => (err ? reject(err) : resolve()));
});
}
private get sendingChannel() {
return this.channels[this.activeChannelID];
}
private get pollingChannel() {
return this.channels[this.activeChannelID === 0 ? 1 : 0];
}
private toggleChannel() {
this.activeChannelID = this.activeChannelID === 0 ? 1 : 0;
}
private async _send(boshData: any, payload = ''): Promise<void> {
if (this.isEnded) {
return;
}
const rid = this.rid!++;
const header = this.stanzas.export('bosh', {
...boshData,
rid,
sid: this.sid
})!;
let body: string;
if (payload) {
body = [header.openTag(), payload, header.closeTag()].join('');
} else {
body = header.toString();
}
this.client.emit('raw', 'outgoing', body);
this.sendingChannel
.send(rid, body)
.then(result => {
this.process(result);
})
.catch(err => {
this.end(err);
});
this.toggleChannel();
}
private async _poll(): Promise<void> {
if (this.isEnded) {
return;
}
const rid = this.rid!++;
const body = this.stanzas
.export('bosh', {
rid,
sid: this.sid
})!
.toString();
this.client.emit('raw', 'outgoing', body);
this.pollingChannel
.send(rid, body)
.then(result => {
this.process(result);
})
.catch(err => {
this.end(err);
});
}
private scheduleRequests() {
clearTimeout(this.idleTimeout);
this.idleTimeout = setTimeout(() => {
this.fireRequests();
}, 10);
}
private fireRequests() {
if (this.isEnded) {
return;
}
if (this.queue.length) {
if (!this.sendingChannel.active) {
const [data, done] = this.queue.shift()!;
this._send({}, data);
done();
} else {
this.scheduleRequests();
}
return;
}
if (this.authenticated && !(this.channels[0].active || this.channels[1].active)) {
this._poll();
}
}
}
| {
this.hasStream = true;
this.stream = {};
this.client.emit('connected');
this.client.emit('session:prebind', this.config.jid);
this.client.emit('session:started');
return;
} | conditional_block |
bosh.ts | import { Agent, Transport, TransportConfig } from '../';
import { StreamErrorCondition } from '../Constants';
import StreamManagement from '../helpers/StreamManagement';
import { fetch, Duplex } from '../platform';
import { Stream } from '../protocol';
import { JSONData, ParsedData, Registry, StreamParser } from '../jxt';
import { sleep, timeoutPromise } from '../Utils';
class RequestChannel {
public rid!: number;
public maxTimeout: number;
public active = false;
private stream: BOSH;
private maxRetries = 5;
constructor(stream: BOSH) {
this.stream = stream;
this.maxTimeout = 1000 * 1.1 * this.stream.maxWaitTime;
}
public async send(rid: number, body: string): Promise<string> {
this.rid = rid;
this.active = true;
let attempts = 0;
while (attempts <= this.maxRetries) {
attempts += 1;
try {
const res = await timeoutPromise(
fetch(this.stream.url, {
body,
headers: {
'Content-Type': this.stream.contentType
},
method: 'POST'
}),
this.maxTimeout,
() => new Error('Request timed out')
);
if (!res.ok) {
throw new Error('HTTP Status Error: ' + res.status);
}
const result = await res.text();
this.active = false;
return result;
} catch (err) {
if (attempts === 1) {
continue;
} else if (attempts < this.maxRetries) {
const backoff = Math.min(this.maxTimeout, Math.pow(attempts, 2) * 1000);
await sleep(backoff + Math.random() * 1000);
continue;
} else {
this.active = false;
throw err;
}
}
}
throw new Error('Request failed');
}
}
export default class BOSH extends Duplex implements Transport {
public hasStream?: boolean;
public stream?: Stream;
public authenticated?: boolean;
public url!: string;
public rid?: number = Math.floor(Math.random() * 0xffffffff);
public sid?: string = '';
public maxHoldOpen = 2;
public maxWaitTime = 30;
public contentType = 'text/xml; charset=utf-8';
private channels: RequestChannel[] = [new RequestChannel(this), new RequestChannel(this)];
private activeChannelID = 0;
private client: Agent;
private config!: TransportConfig;
private sm: StreamManagement;
private stanzas: Registry;
private idleTimeout: any;
private queue: Array<[any, (err?: Error) => void]> = [];
private isEnded = false;
constructor(client: Agent, sm: StreamManagement, stanzas: Registry) {
super({
objectMode: true
});
this.client = client;
this.sm = sm;
this.stanzas = stanzas;
this.on('data', e => {
this.client.emit('stream:data', e.stanza, e.kind);
});
this.on('end', () => {
this.isEnded = true;
clearTimeout(this.idleTimeout);
if (this.client.transport === this) {
this.client.emit('--transport-disconnected');
}
});
}
public _write(chunk: string, encoding: string, done: (err?: Error) => void): void {
this.queue.push([chunk, done]);
this.scheduleRequests();
}
public _writev(
chunks: Array<{ chunk: string; encoding: string }>,
done: (err?: Error) => void
): void {
this.queue.push([chunks.map(c => c.chunk).join(''), done]);
this.scheduleRequests();
}
public _read(): void {
return;
}
public process(result: string): void {
const parser = new StreamParser({
acceptLanguages: this.config.acceptLanguages,
allowComments: false,
lang: this.config.lang,
registry: this.stanzas,
rootKey: 'bosh',
wrappedStream: true
});
parser.on('error', (err: any) => {
const streamError = {
condition: StreamErrorCondition.InvalidXML
};
this.client.emit('stream:error', streamError, err);
this.send('error', streamError);
return this.disconnect();
});
parser.on('data', (e: ParsedData) => {
if (e.event === 'stream-start') {
this.stream = e.stanza;
if (e.stanza.type === 'terminate') {
this.hasStream = false;
this.rid = undefined;
this.sid = undefined;
if (!this.isEnded) {
this.isEnded = true;
this.client.emit('bosh:terminate', e.stanza);
this.client.emit('stream:end');
this.push(null);
}
} else if (!this.hasStream) {
this.hasStream = true;
this.stream = e.stanza;
this.sid = e.stanza.sid || this.sid;
this.maxWaitTime = e.stanza.maxWaitTime || this.maxWaitTime;
this.client.emit('stream:start', e.stanza);
}
return;
}
if (!e.event) {
this.push({ kind: e.kind, stanza: e.stanza });
}
});
this.client.emit('raw', 'incoming', result);
parser.write(result);
this.scheduleRequests();
}
public connect(opts: TransportConfig): void {
this.config = opts;
this.url = opts.url!;
if (opts.rid) {
this.rid = opts.rid;
}
if (opts.sid) {
this.sid = opts.sid;
}
if (opts.wait) {
this.maxWaitTime = opts.wait;
}
if (opts.maxHoldOpen) {
this.maxHoldOpen = opts.maxHoldOpen;
}
if (this.sid) {
this.hasStream = true;
this.stream = {};
this.client.emit('connected');
this.client.emit('session:prebind', this.config.jid);
this.client.emit('session:started');
return;
}
this._send({
lang: opts.lang,
maxHoldOpen: this.maxHoldOpen,
maxWaitTime: this.maxWaitTime,
to: opts.server,
version: '1.6',
xmppVersion: '1.0'
});
}
public restart(): void {
this.hasStream = false;
this._send({
to: this.config.server,
xmppRestart: true
});
}
public disconnect(clean = true): void {
if (this.hasStream && clean) {
this._send({
type: 'terminate'
});
} else {
this.stream = undefined;
this.sid = undefined;
this.rid = undefined;
this.client.emit('--transport-disconnected');
}
}
public async send(dataOrName: string, data?: JSONData): Promise<void> {
let output: string | undefined;
if (data) {
output = this.stanzas.export(dataOrName, data)?.toString();
}
if (!output) {
return;
}
return new Promise<void>((resolve, reject) => {
this.write(output, 'utf8', err => (err ? reject(err) : resolve()));
});
}
private get sendingChannel() {
return this.channels[this.activeChannelID];
}
private get pollingChannel() {
return this.channels[this.activeChannelID === 0 ? 1 : 0];
}
private toggleChannel() {
this.activeChannelID = this.activeChannelID === 0 ? 1 : 0;
}
private async _send(boshData: any, payload = ''): Promise<void> {
if (this.isEnded) {
return;
}
const rid = this.rid!++;
const header = this.stanzas.export('bosh', {
...boshData,
rid,
sid: this.sid
})!;
let body: string;
if (payload) {
body = [header.openTag(), payload, header.closeTag()].join('');
} else {
body = header.toString();
}
this.client.emit('raw', 'outgoing', body);
this.sendingChannel
.send(rid, body)
.then(result => {
this.process(result);
})
.catch(err => {
this.end(err);
});
this.toggleChannel();
}
private async | (): Promise<void> {
if (this.isEnded) {
return;
}
const rid = this.rid!++;
const body = this.stanzas
.export('bosh', {
rid,
sid: this.sid
})!
.toString();
this.client.emit('raw', 'outgoing', body);
this.pollingChannel
.send(rid, body)
.then(result => {
this.process(result);
})
.catch(err => {
this.end(err);
});
}
private scheduleRequests() {
clearTimeout(this.idleTimeout);
this.idleTimeout = setTimeout(() => {
this.fireRequests();
}, 10);
}
private fireRequests() {
if (this.isEnded) {
return;
}
if (this.queue.length) {
if (!this.sendingChannel.active) {
const [data, done] = this.queue.shift()!;
this._send({}, data);
done();
} else {
this.scheduleRequests();
}
return;
}
if (this.authenticated && !(this.channels[0].active || this.channels[1].active)) {
this._poll();
}
}
}
| _poll | identifier_name |
mod.rs | pub(crate) mod css;
mod css_function;
mod error;
pub mod formalargs;
mod imports;
mod media;
pub mod selectors;
mod span;
pub(crate) mod strings;
mod unit;
pub(crate) mod util;
pub mod value;
pub(crate) use self::strings::name;
pub use error::ParseError;
pub(crate) use span::DebugBytes;
pub(crate) use span::{position, Span};
use self::formalargs::{call_args, formal_args};
use self::selectors::selectors;
use self::strings::{
custom_value, sass_string, sass_string_dq, sass_string_sq,
};
use self::util::{
comment2, ignore_comments, ignore_space, opt_spacelike, semi_or_end,
spacelike,
};
use self::value::{
dictionary, function_call_or_string, single_value, value_expression,
};
use crate::input::{SourceFile, SourceName, SourcePos};
use crate::sass::parser::{variable_declaration2, variable_declaration_mod};
use crate::sass::{Callable, FormalArgs, Item, Name, Selectors, Value};
use crate::value::ListSeparator;
#[cfg(test)]
use crate::value::{Numeric, Unit};
use crate::Error;
use imports::{forward2, import2, use2};
use nom::branch::alt;
use nom::bytes::complete::{is_a, is_not, tag};
use nom::character::complete::one_of;
use nom::combinator::{
all_consuming, into, map, map_res, opt, peek, value, verify,
};
use nom::multi::{many0, many_till, separated_list0, separated_list1};
use nom::sequence::{delimited, pair, preceded, terminated};
use nom::IResult;
use std::str::{from_utf8, Utf8Error};
/// A Parsing Result; ok gives a span for the rest of the data and a parsed T.
pub(crate) type PResult<'a, T> = IResult<Span<'a>, T>;
pub(crate) fn code_span(value: &[u8]) -> SourcePos {
SourceFile::scss_bytes(value, SourceName::root("(rsass)")).into()
}
pub(crate) fn input_span(value: impl Into<Vec<u8>>) -> SourcePos {
SourceFile::scss_bytes(value, SourceName::root("-")).into()
}
/// Parse a scss value.
///
/// Returns a single value (or an error).
pub fn parse_value_data(data: &[u8]) -> Result<Value, Error> {
let data = code_span(data);
let value = all_consuming(value_expression)(data.borrow());
Ok(ParseError::check(value)?)
}
#[test]
fn test_parse_value_data_1() -> Result<(), Error> {
let v = parse_value_data(b"17em")?;
assert_eq!(Value::Numeric(Numeric::new(17, Unit::Em)), v);
Ok(())
}
#[test]
fn test_parse_value_data_2() -> Result<(), Error> {
let v = parse_value_data(b"17em;");
assert!(v.is_err());
Ok(())
}
pub(crate) fn sassfile(input: Span) -> PResult<Vec<Item>> {
preceded(
opt(tag("\u{feff}".as_bytes())),
map(
many_till(
preceded(opt_spacelike, top_level_item),
all_consuming(opt_spacelike),
),
|(v, _eof)| v,
),
)(input)
}
fn top_level_item(input: Span) -> PResult<Item> {
let (rest, tag) = alt((tag("$"), tag("/*"), tag("@"), tag("")))(input)?;
match tag.fragment() {
b"$" => into(variable_declaration2)(rest),
b"/*" => comment_item(rest),
b"@" => at_rule2(input),
b"" => alt((into(variable_declaration_mod), rule))(input),
_ => unreachable!(),
}
}
fn comment_item(input: Span) -> PResult<Item> {
map(comment2, Item::Comment)(input)
}
fn rule(input: Span) -> PResult<Item> {
map(pair(rule_start, body_block2), |(selectors, body)| {
Item::Rule(selectors, body)
})(input)
}
fn rule_start(input: Span) -> PResult<Selectors> {
terminated(selectors, terminated(opt(is_a(", \t\r\n")), tag("{")))(input)
}
fn body_item(input: Span) -> PResult<Item> {
let (rest, tag) =
alt((tag("$"), tag("/*"), tag(";"), tag("@"), tag("--"), tag("")))(
input,
)?;
match tag.fragment() {
b"$" => into(variable_declaration2)(rest),
b"/*" => comment_item(rest),
b";" => Ok((rest, Item::None)),
b"@" => at_rule2(input),
b"--" => {
let result = custom_property(rest);
if result.is_err() {
// Note use of `input` rather than `rest` here.
if let Ok((rest, rule)) = rule(input) {
return Ok((rest, rule));
}
}
result
}
b"" => match rule_start(rest) {
Ok((rest, selectors)) => {
let (rest, body) = body_block2(rest)?;
Ok((rest, Item::Rule(selectors, body)))
}
Err(_) => property_or_namespace_rule(rest),
},
_ => unreachable!(),
}
}
/// What follows the `@at-root` tag.
fn at_root2(input: Span) -> PResult<Item> {
preceded(
opt_spacelike,
map(
pair(
map(opt(selectors), |s| s.unwrap_or_else(Selectors::root)),
body_block,
),
|(selectors, body)| Item::AtRoot(selectors, body),
),
)(input)
}
/// What follows the `@include` tag.
fn mixin_call<'a>(start: Span, input: Span<'a>) -> PResult<'a, Item> {
let (rest, n1) = terminated(name, opt_spacelike)(input)?;
let (rest, n2) = opt(preceded(tag("."), name))(rest)?;
let name = n2.map(|n2| format!("{n1}.{n2}")).unwrap_or(n1);
let (rest, _) = opt_spacelike(rest)?;
let (rest0, args) = terminated(opt(call_args), opt_spacelike)(rest)?;
let (rest, t) = alt((tag("using"), tag("{"), tag("")))(rest0)?;
let (end, body) = match t.fragment() {
b"using" => {
let (end, args) = preceded(opt_spacelike, formal_args)(rest)?;
let (rest, body) = preceded(opt_spacelike, body_block)(end)?;
let decl = rest0.up_to(&end).to_owned();
(rest, Some(Callable::new(args, body, decl)))
}
b"{" => {
let (rest, body) = body_block(rest0)?;
let decl = rest0.up_to(&rest).to_owned();
(rest, Some(Callable::no_args(body, decl)))
}
_ => {
let (rest, _) = opt(tag(";"))(rest)?;
(rest, None)
}
};
let pos = start.up_to(&rest).to_owned();
Ok((
end,
Item::MixinCall(name, args.unwrap_or_default(), body, pos),
))
}
/// When we know that `input0` starts with an `@` sign.
fn at_rule2(input0: Span) -> PResult<Item> {
let (input, name) =
delimited(tag("@"), sass_string, opt_spacelike)(input0)?;
match name.single_raw().unwrap_or("") {
"at-root" => at_root2(input),
"charset" => charset2(input),
"content" => content_stmt2(input),
"debug" => map(expression_argument, Item::Debug)(input),
"each" => each_loop2(input),
"error" => {
let (end, v) = value_expression(input)?;
let (rest, _) = opt(tag(";"))(end)?;
let pos = input0.up_to(&end).to_owned();
Ok((rest, Item::Error(v, pos)))
}
"extend" => map(
delimited(
opt_spacelike,
selectors,
preceded(opt_spacelike, tag(";")),
),
Item::Extend,
)(input),
"for" => for_loop2(input),
"forward" => forward2(input0, input),
"function" => function_declaration2(input),
"if" => if_statement2(input),
"import" => import2(input),
"include" => mixin_call(input0, input),
"media" => media::rule(input0, input),
"mixin" => mixin_declaration2(input),
"return" => return_stmt2(input0, input),
"use" => use2(input0, input),
"warn" => map(expression_argument, Item::Warn)(input),
"while" => while_loop2(input),
_ => unknown_atrule(name, input0, input),
}
}
fn unknown_atrule<'a>(
name: SassString,
start: Span,
input: Span<'a>,
) -> PResult<'a, Item> {
let (input, args) =
terminated(opt(unknown_rule_args), opt(ignore_space))(input)?;
fn x_args(value: Value) -> Value {
match value {
Value::Variable(name, _pos) => {
Value::Literal(SassString::from(format!("${name}")))
}
Value::Map(map) => Value::Map(
map.into_iter()
.map(|(k, v)| (x_args(k), x_args(v)))
.collect(),
),
value => value,
}
}
let (rest, body) = if input.first() == Some(&b'{') {
map(body_block, Some)(input)?
} else {
value(None, semi_or_end)(input)?
};
Ok((
rest,
Item::AtRule {
name,
args: args.map_or(Value::Null, x_args),
body,
pos: start.up_to(&input).to_owned(),
},
))
}
fn expression_argument(input: Span) -> PResult<Value> {
terminated(value_expression, opt(tag(";")))(input)
}
fn charset2(input: Span) -> PResult<Item> {
use nom::combinator::map_opt;
map_opt(
terminated(
alt((sass_string_dq, sass_string_sq, sass_string)),
semi_or_end,
),
|s| {
s.single_raw().and_then(|s| {
if s.eq_ignore_ascii_case("UTF-8") {
Some(Item::None)
} else {
None
}
})
},
)(input)
}
/// Arguments to an unkown at rule.
fn unknown_rule_args(input: Span) -> PResult<Value> {
let (input, args) = separated_list0(
preceded(tag(","), opt_spacelike),
map(
many0(preceded(
opt(ignore_space),
alt((
terminated(
alt((
function_call_or_string,
dictionary,
map(
delimited(tag("("), media::args, tag(")")),
|v| Value::Paren(Box::new(v), true),
),
map(sass_string_dq, Value::Literal),
map(sass_string_sq, Value::Literal),
)),
alt((
value((), all_consuming(tag(""))),
value((), peek(one_of(") \r\n\t{,;"))),
)),
),
map(map_res(is_not("\"'{};#"), input_to_str), |s| {
Value::Literal(s.trim_end().into())
}),
)),
)),
|args| list_or_single(args, ListSeparator::Space),
),
)(input)?;
Ok((input, list_or_single(args, ListSeparator::Comma)))
}
#[cfg(test)]
pub(crate) fn check_parse<T>(
parser: impl Fn(Span) -> PResult<T>,
value: &[u8],
) -> Result<T, ParseError> {
ParseError::check(parser(code_span(value).borrow()))
}
fn if_statement_inner(input: Span) -> PResult<Item> {
preceded(
terminated(verify(name, |n: &String| n == "if"), opt_spacelike),
if_statement2,
)(input)
}
fn if_statement2(input: Span) -> PResult<Item> {
let (input, cond) = terminated(value_expression, opt_spacelike)(input)?;
let (input, body) = body_block(input)?;
let (input2, word) = opt(delimited(
preceded(opt_spacelike, tag("@")),
name,
opt_spacelike,
))(input)?;
match word.as_ref().map(AsRef::as_ref) {
Some("else") => {
let (input2, else_body) = alt((
body_block,
map(if_statement_inner, |s| vec![s]),
))(input2)?;
Ok((input2, Item::IfStatement(cond, body, else_body)))
}
Some("elseif") => {
let (input2, else_body) = if_statement2(input2)?;
Ok((input2, Item::IfStatement(cond, body, vec![else_body])))
}
_ => Ok((input, Item::IfStatement(cond, body, vec![]))),
}
}
/// The part of an each look that follows the `@each`.
fn each_loop2(input: Span) -> PResult<Item> {
let (input, names) = separated_list1(
delimited(opt_spacelike, tag(","), opt_spacelike),
map(preceded(tag("$"), name), Name::from),
)(input)?;
let (input, values) = delimited(
delimited(spacelike, tag("in"), spacelike),
value_expression,
opt_spacelike,
)(input)?;
let (input, body) = body_block(input)?;
Ok((input, Item::Each(names, values, body)))
}
/// A for loop after the initial `@for`.
fn for_loop2(input: Span) -> PResult<Item> {
let (input, name) = delimited(tag("$"), name, spacelike)(input)?;
let (input, from) = delimited(
terminated(tag("from"), spacelike),
single_value,
spacelike,
)(input)?;
let (input, inclusive) = terminated(
alt((value(true, tag("through")), value(false, tag("to")))),
spacelike,
)(input)?;
let (input, to) = terminated(single_value, opt_spacelike)(input)?;
let (input, body) = body_block(input)?;
Ok((
input,
Item::For {
name: name.into(),
from: Box::new(from),
to: Box::new(to),
inclusive,
body,
},
))
}
fn while_loop2(input: Span) -> PResult<Item> {
let (input, cond) = terminated(value_expression, opt_spacelike)(input)?;
let (input, body) = body_block(input)?;
Ok((input, Item::While(cond, body)))
}
fn mixin_declaration2(input: Span) -> PResult<Item> {
let (rest, name) = terminated(name, opt_spacelike)(input)?;
let (rest, args) = opt(formal_args)(rest)?;
let (end, body) = preceded(opt_spacelike, body_block)(rest)?;
let args = args.unwrap_or_else(FormalArgs::none);
let decl = input.up_to(&rest).to_owned();
Ok((
end,
Item::MixinDeclaration(name, Callable { args, body, decl }),
))
}
fn function_declaration2(input: Span) -> PResult<Item> {
let (end, name) = terminated(name, opt_spacelike)(input)?;
let (end, args) = formal_args(end)?;
let (rest, body) = preceded(opt_spacelike, body_block)(end)?;
let decl = input.up_to(&end).to_owned();
Ok((
rest,
Item::FunctionDeclaration(name, Callable { args, body, decl }),
))
}
fn return_stmt2<'a>(start: Span, input: Span<'a>) -> PResult<'a, Item> {
let (input, v) =
delimited(opt_spacelike, value_expression, opt_spacelike)(input)?;
let pos = start.up_to(&input).to_owned();
let (input, _) = opt(tag(";"))(input)?;
Ok((input, Item::Return(v, pos)))
}
/// The "rest" of an `@content` statement is just an optional terminator
fn content_stmt2(input: Span) -> PResult<Item> {
let (rest, _) = opt_spacelike(input)?;
let (rest, args) = opt(call_args)(rest)?;
let (rest, _) = opt(tag(";"))(rest)?;
let pos = input.up_to(&rest).to_owned();
Ok((rest, Item::Content(args.unwrap_or_default(), pos)))
}
fn custom_property(input: Span) -> PResult<Item> {
let (rest, name) = terminated(opt(sass_string), tag(":"))(input)?;
let mut name = name.unwrap_or_else(|| SassString::from(""));
// The dashes was parsed before calling this method.
name.prepend("--");
let (rest, value) =
terminated(custom_value, alt((tag(";"), peek(tag("}")))))(rest)?;
Ok((rest, Item::CustomProperty(name, value)))
}
fn property_or_namespace_rule(input: Span) -> PResult<Item> {
let (start_val, name) = terminated(
alt((
map(preceded(tag("*"), sass_string), |mut s| {
s.prepend("*");
s
}),
sass_string,
)),
delimited(ignore_comments, tag(":"), ignore_comments),
)(input)?;
let (input, val) = opt(value_expression)(start_val)?;
let pos = start_val.up_to(&input).to_owned();
let (input, _) = opt_spacelike(input)?;
| };
let (input, body) = match next.fragment() {
b"{" => map(body_block2, Some)(input)?,
b";" => (input, None),
b"" => (input, None),
_ => (input, None), // error?
};
let (input, _) = opt_spacelike(input)?;
Ok((input, ns_or_prop_item(name, val, body, pos)))
}
use crate::sass::SassString;
fn ns_or_prop_item(
name: SassString,
value: Option<Value>,
body: Option<Vec<Item>>,
pos: SourcePos,
) -> Item {
if let Some(body) = body {
Item::NamespaceRule(name, value.unwrap_or(Value::Null), body)
} else if let Some(value) = value {
Item::Property(name, value, pos)
} else {
unreachable!()
}
}
fn body_block(input: Span) -> PResult<Vec<Item>> {
preceded(tag("{"), body_block2)(input)
}
fn body_block2(input: Span) -> PResult<Vec<Item>> {
let (input, (v, _end)) = preceded(
opt_spacelike,
many_till(
terminated(body_item, opt_spacelike),
terminated(terminated(tag("}"), opt_spacelike), opt(tag(";"))),
),
)(input)?;
Ok((input, v))
}
fn input_to_str(s: Span) -> Result<&str, Utf8Error> {
from_utf8(s.fragment())
}
fn input_to_string(s: Span) -> Result<String, Utf8Error> {
from_utf8(s.fragment()).map(String::from)
}
fn list_or_single(list: Vec<Value>, sep: ListSeparator) -> Value {
if list.len() == 1 {
list.into_iter().next().unwrap()
} else {
Value::List(list, Some(sep), false)
}
} | let (input, next) = if val.is_some() {
alt((tag("{"), tag(";"), tag("")))(input)?
} else {
tag("{")(input)? | random_line_split |
mod.rs | pub(crate) mod css;
mod css_function;
mod error;
pub mod formalargs;
mod imports;
mod media;
pub mod selectors;
mod span;
pub(crate) mod strings;
mod unit;
pub(crate) mod util;
pub mod value;
pub(crate) use self::strings::name;
pub use error::ParseError;
pub(crate) use span::DebugBytes;
pub(crate) use span::{position, Span};
use self::formalargs::{call_args, formal_args};
use self::selectors::selectors;
use self::strings::{
custom_value, sass_string, sass_string_dq, sass_string_sq,
};
use self::util::{
comment2, ignore_comments, ignore_space, opt_spacelike, semi_or_end,
spacelike,
};
use self::value::{
dictionary, function_call_or_string, single_value, value_expression,
};
use crate::input::{SourceFile, SourceName, SourcePos};
use crate::sass::parser::{variable_declaration2, variable_declaration_mod};
use crate::sass::{Callable, FormalArgs, Item, Name, Selectors, Value};
use crate::value::ListSeparator;
#[cfg(test)]
use crate::value::{Numeric, Unit};
use crate::Error;
use imports::{forward2, import2, use2};
use nom::branch::alt;
use nom::bytes::complete::{is_a, is_not, tag};
use nom::character::complete::one_of;
use nom::combinator::{
all_consuming, into, map, map_res, opt, peek, value, verify,
};
use nom::multi::{many0, many_till, separated_list0, separated_list1};
use nom::sequence::{delimited, pair, preceded, terminated};
use nom::IResult;
use std::str::{from_utf8, Utf8Error};
/// A Parsing Result; ok gives a span for the rest of the data and a parsed T.
pub(crate) type PResult<'a, T> = IResult<Span<'a>, T>;
pub(crate) fn code_span(value: &[u8]) -> SourcePos {
SourceFile::scss_bytes(value, SourceName::root("(rsass)")).into()
}
pub(crate) fn input_span(value: impl Into<Vec<u8>>) -> SourcePos {
SourceFile::scss_bytes(value, SourceName::root("-")).into()
}
/// Parse a scss value.
///
/// Returns a single value (or an error).
pub fn | (data: &[u8]) -> Result<Value, Error> {
let data = code_span(data);
let value = all_consuming(value_expression)(data.borrow());
Ok(ParseError::check(value)?)
}
#[test]
fn test_parse_value_data_1() -> Result<(), Error> {
let v = parse_value_data(b"17em")?;
assert_eq!(Value::Numeric(Numeric::new(17, Unit::Em)), v);
Ok(())
}
#[test]
fn test_parse_value_data_2() -> Result<(), Error> {
let v = parse_value_data(b"17em;");
assert!(v.is_err());
Ok(())
}
pub(crate) fn sassfile(input: Span) -> PResult<Vec<Item>> {
preceded(
opt(tag("\u{feff}".as_bytes())),
map(
many_till(
preceded(opt_spacelike, top_level_item),
all_consuming(opt_spacelike),
),
|(v, _eof)| v,
),
)(input)
}
fn top_level_item(input: Span) -> PResult<Item> {
let (rest, tag) = alt((tag("$"), tag("/*"), tag("@"), tag("")))(input)?;
match tag.fragment() {
b"$" => into(variable_declaration2)(rest),
b"/*" => comment_item(rest),
b"@" => at_rule2(input),
b"" => alt((into(variable_declaration_mod), rule))(input),
_ => unreachable!(),
}
}
fn comment_item(input: Span) -> PResult<Item> {
map(comment2, Item::Comment)(input)
}
fn rule(input: Span) -> PResult<Item> {
map(pair(rule_start, body_block2), |(selectors, body)| {
Item::Rule(selectors, body)
})(input)
}
fn rule_start(input: Span) -> PResult<Selectors> {
terminated(selectors, terminated(opt(is_a(", \t\r\n")), tag("{")))(input)
}
fn body_item(input: Span) -> PResult<Item> {
let (rest, tag) =
alt((tag("$"), tag("/*"), tag(";"), tag("@"), tag("--"), tag("")))(
input,
)?;
match tag.fragment() {
b"$" => into(variable_declaration2)(rest),
b"/*" => comment_item(rest),
b";" => Ok((rest, Item::None)),
b"@" => at_rule2(input),
b"--" => {
let result = custom_property(rest);
if result.is_err() {
// Note use of `input` rather than `rest` here.
if let Ok((rest, rule)) = rule(input) {
return Ok((rest, rule));
}
}
result
}
b"" => match rule_start(rest) {
Ok((rest, selectors)) => {
let (rest, body) = body_block2(rest)?;
Ok((rest, Item::Rule(selectors, body)))
}
Err(_) => property_or_namespace_rule(rest),
},
_ => unreachable!(),
}
}
/// What follows the `@at-root` tag.
fn at_root2(input: Span) -> PResult<Item> {
preceded(
opt_spacelike,
map(
pair(
map(opt(selectors), |s| s.unwrap_or_else(Selectors::root)),
body_block,
),
|(selectors, body)| Item::AtRoot(selectors, body),
),
)(input)
}
/// What follows the `@include` tag.
fn mixin_call<'a>(start: Span, input: Span<'a>) -> PResult<'a, Item> {
let (rest, n1) = terminated(name, opt_spacelike)(input)?;
let (rest, n2) = opt(preceded(tag("."), name))(rest)?;
let name = n2.map(|n2| format!("{n1}.{n2}")).unwrap_or(n1);
let (rest, _) = opt_spacelike(rest)?;
let (rest0, args) = terminated(opt(call_args), opt_spacelike)(rest)?;
let (rest, t) = alt((tag("using"), tag("{"), tag("")))(rest0)?;
let (end, body) = match t.fragment() {
b"using" => {
let (end, args) = preceded(opt_spacelike, formal_args)(rest)?;
let (rest, body) = preceded(opt_spacelike, body_block)(end)?;
let decl = rest0.up_to(&end).to_owned();
(rest, Some(Callable::new(args, body, decl)))
}
b"{" => {
let (rest, body) = body_block(rest0)?;
let decl = rest0.up_to(&rest).to_owned();
(rest, Some(Callable::no_args(body, decl)))
}
_ => {
let (rest, _) = opt(tag(";"))(rest)?;
(rest, None)
}
};
let pos = start.up_to(&rest).to_owned();
Ok((
end,
Item::MixinCall(name, args.unwrap_or_default(), body, pos),
))
}
/// When we know that `input0` starts with an `@` sign.
fn at_rule2(input0: Span) -> PResult<Item> {
let (input, name) =
delimited(tag("@"), sass_string, opt_spacelike)(input0)?;
match name.single_raw().unwrap_or("") {
"at-root" => at_root2(input),
"charset" => charset2(input),
"content" => content_stmt2(input),
"debug" => map(expression_argument, Item::Debug)(input),
"each" => each_loop2(input),
"error" => {
let (end, v) = value_expression(input)?;
let (rest, _) = opt(tag(";"))(end)?;
let pos = input0.up_to(&end).to_owned();
Ok((rest, Item::Error(v, pos)))
}
"extend" => map(
delimited(
opt_spacelike,
selectors,
preceded(opt_spacelike, tag(";")),
),
Item::Extend,
)(input),
"for" => for_loop2(input),
"forward" => forward2(input0, input),
"function" => function_declaration2(input),
"if" => if_statement2(input),
"import" => import2(input),
"include" => mixin_call(input0, input),
"media" => media::rule(input0, input),
"mixin" => mixin_declaration2(input),
"return" => return_stmt2(input0, input),
"use" => use2(input0, input),
"warn" => map(expression_argument, Item::Warn)(input),
"while" => while_loop2(input),
_ => unknown_atrule(name, input0, input),
}
}
fn unknown_atrule<'a>(
name: SassString,
start: Span,
input: Span<'a>,
) -> PResult<'a, Item> {
let (input, args) =
terminated(opt(unknown_rule_args), opt(ignore_space))(input)?;
fn x_args(value: Value) -> Value {
match value {
Value::Variable(name, _pos) => {
Value::Literal(SassString::from(format!("${name}")))
}
Value::Map(map) => Value::Map(
map.into_iter()
.map(|(k, v)| (x_args(k), x_args(v)))
.collect(),
),
value => value,
}
}
let (rest, body) = if input.first() == Some(&b'{') {
map(body_block, Some)(input)?
} else {
value(None, semi_or_end)(input)?
};
Ok((
rest,
Item::AtRule {
name,
args: args.map_or(Value::Null, x_args),
body,
pos: start.up_to(&input).to_owned(),
},
))
}
fn expression_argument(input: Span) -> PResult<Value> {
terminated(value_expression, opt(tag(";")))(input)
}
fn charset2(input: Span) -> PResult<Item> {
use nom::combinator::map_opt;
map_opt(
terminated(
alt((sass_string_dq, sass_string_sq, sass_string)),
semi_or_end,
),
|s| {
s.single_raw().and_then(|s| {
if s.eq_ignore_ascii_case("UTF-8") {
Some(Item::None)
} else {
None
}
})
},
)(input)
}
/// Arguments to an unkown at rule.
fn unknown_rule_args(input: Span) -> PResult<Value> {
let (input, args) = separated_list0(
preceded(tag(","), opt_spacelike),
map(
many0(preceded(
opt(ignore_space),
alt((
terminated(
alt((
function_call_or_string,
dictionary,
map(
delimited(tag("("), media::args, tag(")")),
|v| Value::Paren(Box::new(v), true),
),
map(sass_string_dq, Value::Literal),
map(sass_string_sq, Value::Literal),
)),
alt((
value((), all_consuming(tag(""))),
value((), peek(one_of(") \r\n\t{,;"))),
)),
),
map(map_res(is_not("\"'{};#"), input_to_str), |s| {
Value::Literal(s.trim_end().into())
}),
)),
)),
|args| list_or_single(args, ListSeparator::Space),
),
)(input)?;
Ok((input, list_or_single(args, ListSeparator::Comma)))
}
#[cfg(test)]
pub(crate) fn check_parse<T>(
parser: impl Fn(Span) -> PResult<T>,
value: &[u8],
) -> Result<T, ParseError> {
ParseError::check(parser(code_span(value).borrow()))
}
fn if_statement_inner(input: Span) -> PResult<Item> {
preceded(
terminated(verify(name, |n: &String| n == "if"), opt_spacelike),
if_statement2,
)(input)
}
fn if_statement2(input: Span) -> PResult<Item> {
let (input, cond) = terminated(value_expression, opt_spacelike)(input)?;
let (input, body) = body_block(input)?;
let (input2, word) = opt(delimited(
preceded(opt_spacelike, tag("@")),
name,
opt_spacelike,
))(input)?;
match word.as_ref().map(AsRef::as_ref) {
Some("else") => {
let (input2, else_body) = alt((
body_block,
map(if_statement_inner, |s| vec![s]),
))(input2)?;
Ok((input2, Item::IfStatement(cond, body, else_body)))
}
Some("elseif") => {
let (input2, else_body) = if_statement2(input2)?;
Ok((input2, Item::IfStatement(cond, body, vec![else_body])))
}
_ => Ok((input, Item::IfStatement(cond, body, vec![]))),
}
}
/// The part of an each look that follows the `@each`.
fn each_loop2(input: Span) -> PResult<Item> {
let (input, names) = separated_list1(
delimited(opt_spacelike, tag(","), opt_spacelike),
map(preceded(tag("$"), name), Name::from),
)(input)?;
let (input, values) = delimited(
delimited(spacelike, tag("in"), spacelike),
value_expression,
opt_spacelike,
)(input)?;
let (input, body) = body_block(input)?;
Ok((input, Item::Each(names, values, body)))
}
/// A for loop after the initial `@for`.
fn for_loop2(input: Span) -> PResult<Item> {
let (input, name) = delimited(tag("$"), name, spacelike)(input)?;
let (input, from) = delimited(
terminated(tag("from"), spacelike),
single_value,
spacelike,
)(input)?;
let (input, inclusive) = terminated(
alt((value(true, tag("through")), value(false, tag("to")))),
spacelike,
)(input)?;
let (input, to) = terminated(single_value, opt_spacelike)(input)?;
let (input, body) = body_block(input)?;
Ok((
input,
Item::For {
name: name.into(),
from: Box::new(from),
to: Box::new(to),
inclusive,
body,
},
))
}
fn while_loop2(input: Span) -> PResult<Item> {
let (input, cond) = terminated(value_expression, opt_spacelike)(input)?;
let (input, body) = body_block(input)?;
Ok((input, Item::While(cond, body)))
}
fn mixin_declaration2(input: Span) -> PResult<Item> {
let (rest, name) = terminated(name, opt_spacelike)(input)?;
let (rest, args) = opt(formal_args)(rest)?;
let (end, body) = preceded(opt_spacelike, body_block)(rest)?;
let args = args.unwrap_or_else(FormalArgs::none);
let decl = input.up_to(&rest).to_owned();
Ok((
end,
Item::MixinDeclaration(name, Callable { args, body, decl }),
))
}
fn function_declaration2(input: Span) -> PResult<Item> {
let (end, name) = terminated(name, opt_spacelike)(input)?;
let (end, args) = formal_args(end)?;
let (rest, body) = preceded(opt_spacelike, body_block)(end)?;
let decl = input.up_to(&end).to_owned();
Ok((
rest,
Item::FunctionDeclaration(name, Callable { args, body, decl }),
))
}
fn return_stmt2<'a>(start: Span, input: Span<'a>) -> PResult<'a, Item> {
let (input, v) =
delimited(opt_spacelike, value_expression, opt_spacelike)(input)?;
let pos = start.up_to(&input).to_owned();
let (input, _) = opt(tag(";"))(input)?;
Ok((input, Item::Return(v, pos)))
}
/// The "rest" of an `@content` statement is just an optional terminator
fn content_stmt2(input: Span) -> PResult<Item> {
let (rest, _) = opt_spacelike(input)?;
let (rest, args) = opt(call_args)(rest)?;
let (rest, _) = opt(tag(";"))(rest)?;
let pos = input.up_to(&rest).to_owned();
Ok((rest, Item::Content(args.unwrap_or_default(), pos)))
}
fn custom_property(input: Span) -> PResult<Item> {
let (rest, name) = terminated(opt(sass_string), tag(":"))(input)?;
let mut name = name.unwrap_or_else(|| SassString::from(""));
// The dashes was parsed before calling this method.
name.prepend("--");
let (rest, value) =
terminated(custom_value, alt((tag(";"), peek(tag("}")))))(rest)?;
Ok((rest, Item::CustomProperty(name, value)))
}
fn property_or_namespace_rule(input: Span) -> PResult<Item> {
let (start_val, name) = terminated(
alt((
map(preceded(tag("*"), sass_string), |mut s| {
s.prepend("*");
s
}),
sass_string,
)),
delimited(ignore_comments, tag(":"), ignore_comments),
)(input)?;
let (input, val) = opt(value_expression)(start_val)?;
let pos = start_val.up_to(&input).to_owned();
let (input, _) = opt_spacelike(input)?;
let (input, next) = if val.is_some() {
alt((tag("{"), tag(";"), tag("")))(input)?
} else {
tag("{")(input)?
};
let (input, body) = match next.fragment() {
b"{" => map(body_block2, Some)(input)?,
b";" => (input, None),
b"" => (input, None),
_ => (input, None), // error?
};
let (input, _) = opt_spacelike(input)?;
Ok((input, ns_or_prop_item(name, val, body, pos)))
}
use crate::sass::SassString;
fn ns_or_prop_item(
name: SassString,
value: Option<Value>,
body: Option<Vec<Item>>,
pos: SourcePos,
) -> Item {
if let Some(body) = body {
Item::NamespaceRule(name, value.unwrap_or(Value::Null), body)
} else if let Some(value) = value {
Item::Property(name, value, pos)
} else {
unreachable!()
}
}
fn body_block(input: Span) -> PResult<Vec<Item>> {
preceded(tag("{"), body_block2)(input)
}
fn body_block2(input: Span) -> PResult<Vec<Item>> {
let (input, (v, _end)) = preceded(
opt_spacelike,
many_till(
terminated(body_item, opt_spacelike),
terminated(terminated(tag("}"), opt_spacelike), opt(tag(";"))),
),
)(input)?;
Ok((input, v))
}
fn input_to_str(s: Span) -> Result<&str, Utf8Error> {
from_utf8(s.fragment())
}
fn input_to_string(s: Span) -> Result<String, Utf8Error> {
from_utf8(s.fragment()).map(String::from)
}
fn list_or_single(list: Vec<Value>, sep: ListSeparator) -> Value {
if list.len() == 1 {
list.into_iter().next().unwrap()
} else {
Value::List(list, Some(sep), false)
}
}
| parse_value_data | identifier_name |
mod.rs | pub(crate) mod css;
mod css_function;
mod error;
pub mod formalargs;
mod imports;
mod media;
pub mod selectors;
mod span;
pub(crate) mod strings;
mod unit;
pub(crate) mod util;
pub mod value;
pub(crate) use self::strings::name;
pub use error::ParseError;
pub(crate) use span::DebugBytes;
pub(crate) use span::{position, Span};
use self::formalargs::{call_args, formal_args};
use self::selectors::selectors;
use self::strings::{
custom_value, sass_string, sass_string_dq, sass_string_sq,
};
use self::util::{
comment2, ignore_comments, ignore_space, opt_spacelike, semi_or_end,
spacelike,
};
use self::value::{
dictionary, function_call_or_string, single_value, value_expression,
};
use crate::input::{SourceFile, SourceName, SourcePos};
use crate::sass::parser::{variable_declaration2, variable_declaration_mod};
use crate::sass::{Callable, FormalArgs, Item, Name, Selectors, Value};
use crate::value::ListSeparator;
#[cfg(test)]
use crate::value::{Numeric, Unit};
use crate::Error;
use imports::{forward2, import2, use2};
use nom::branch::alt;
use nom::bytes::complete::{is_a, is_not, tag};
use nom::character::complete::one_of;
use nom::combinator::{
all_consuming, into, map, map_res, opt, peek, value, verify,
};
use nom::multi::{many0, many_till, separated_list0, separated_list1};
use nom::sequence::{delimited, pair, preceded, terminated};
use nom::IResult;
use std::str::{from_utf8, Utf8Error};
/// A Parsing Result; ok gives a span for the rest of the data and a parsed T.
pub(crate) type PResult<'a, T> = IResult<Span<'a>, T>;
pub(crate) fn code_span(value: &[u8]) -> SourcePos {
SourceFile::scss_bytes(value, SourceName::root("(rsass)")).into()
}
pub(crate) fn input_span(value: impl Into<Vec<u8>>) -> SourcePos {
SourceFile::scss_bytes(value, SourceName::root("-")).into()
}
/// Parse a scss value.
///
/// Returns a single value (or an error).
pub fn parse_value_data(data: &[u8]) -> Result<Value, Error> {
let data = code_span(data);
let value = all_consuming(value_expression)(data.borrow());
Ok(ParseError::check(value)?)
}
#[test]
fn test_parse_value_data_1() -> Result<(), Error> {
let v = parse_value_data(b"17em")?;
assert_eq!(Value::Numeric(Numeric::new(17, Unit::Em)), v);
Ok(())
}
#[test]
fn test_parse_value_data_2() -> Result<(), Error> {
let v = parse_value_data(b"17em;");
assert!(v.is_err());
Ok(())
}
pub(crate) fn sassfile(input: Span) -> PResult<Vec<Item>> {
preceded(
opt(tag("\u{feff}".as_bytes())),
map(
many_till(
preceded(opt_spacelike, top_level_item),
all_consuming(opt_spacelike),
),
|(v, _eof)| v,
),
)(input)
}
fn top_level_item(input: Span) -> PResult<Item> {
let (rest, tag) = alt((tag("$"), tag("/*"), tag("@"), tag("")))(input)?;
match tag.fragment() {
b"$" => into(variable_declaration2)(rest),
b"/*" => comment_item(rest),
b"@" => at_rule2(input),
b"" => alt((into(variable_declaration_mod), rule))(input),
_ => unreachable!(),
}
}
fn comment_item(input: Span) -> PResult<Item> {
map(comment2, Item::Comment)(input)
}
fn rule(input: Span) -> PResult<Item> {
map(pair(rule_start, body_block2), |(selectors, body)| {
Item::Rule(selectors, body)
})(input)
}
fn rule_start(input: Span) -> PResult<Selectors> {
terminated(selectors, terminated(opt(is_a(", \t\r\n")), tag("{")))(input)
}
fn body_item(input: Span) -> PResult<Item> {
let (rest, tag) =
alt((tag("$"), tag("/*"), tag(";"), tag("@"), tag("--"), tag("")))(
input,
)?;
match tag.fragment() {
b"$" => into(variable_declaration2)(rest),
b"/*" => comment_item(rest),
b";" => Ok((rest, Item::None)),
b"@" => at_rule2(input),
b"--" => {
let result = custom_property(rest);
if result.is_err() {
// Note use of `input` rather than `rest` here.
if let Ok((rest, rule)) = rule(input) {
return Ok((rest, rule));
}
}
result
}
b"" => match rule_start(rest) {
Ok((rest, selectors)) => {
let (rest, body) = body_block2(rest)?;
Ok((rest, Item::Rule(selectors, body)))
}
Err(_) => property_or_namespace_rule(rest),
},
_ => unreachable!(),
}
}
/// What follows the `@at-root` tag.
fn at_root2(input: Span) -> PResult<Item> {
preceded(
opt_spacelike,
map(
pair(
map(opt(selectors), |s| s.unwrap_or_else(Selectors::root)),
body_block,
),
|(selectors, body)| Item::AtRoot(selectors, body),
),
)(input)
}
/// What follows the `@include` tag.
fn mixin_call<'a>(start: Span, input: Span<'a>) -> PResult<'a, Item> {
let (rest, n1) = terminated(name, opt_spacelike)(input)?;
let (rest, n2) = opt(preceded(tag("."), name))(rest)?;
let name = n2.map(|n2| format!("{n1}.{n2}")).unwrap_or(n1);
let (rest, _) = opt_spacelike(rest)?;
let (rest0, args) = terminated(opt(call_args), opt_spacelike)(rest)?;
let (rest, t) = alt((tag("using"), tag("{"), tag("")))(rest0)?;
let (end, body) = match t.fragment() {
b"using" => {
let (end, args) = preceded(opt_spacelike, formal_args)(rest)?;
let (rest, body) = preceded(opt_spacelike, body_block)(end)?;
let decl = rest0.up_to(&end).to_owned();
(rest, Some(Callable::new(args, body, decl)))
}
b"{" => {
let (rest, body) = body_block(rest0)?;
let decl = rest0.up_to(&rest).to_owned();
(rest, Some(Callable::no_args(body, decl)))
}
_ => {
let (rest, _) = opt(tag(";"))(rest)?;
(rest, None)
}
};
let pos = start.up_to(&rest).to_owned();
Ok((
end,
Item::MixinCall(name, args.unwrap_or_default(), body, pos),
))
}
/// When we know that `input0` starts with an `@` sign.
fn at_rule2(input0: Span) -> PResult<Item> {
let (input, name) =
delimited(tag("@"), sass_string, opt_spacelike)(input0)?;
match name.single_raw().unwrap_or("") {
"at-root" => at_root2(input),
"charset" => charset2(input),
"content" => content_stmt2(input),
"debug" => map(expression_argument, Item::Debug)(input),
"each" => each_loop2(input),
"error" => {
let (end, v) = value_expression(input)?;
let (rest, _) = opt(tag(";"))(end)?;
let pos = input0.up_to(&end).to_owned();
Ok((rest, Item::Error(v, pos)))
}
"extend" => map(
delimited(
opt_spacelike,
selectors,
preceded(opt_spacelike, tag(";")),
),
Item::Extend,
)(input),
"for" => for_loop2(input),
"forward" => forward2(input0, input),
"function" => function_declaration2(input),
"if" => if_statement2(input),
"import" => import2(input),
"include" => mixin_call(input0, input),
"media" => media::rule(input0, input),
"mixin" => mixin_declaration2(input),
"return" => return_stmt2(input0, input),
"use" => use2(input0, input),
"warn" => map(expression_argument, Item::Warn)(input),
"while" => while_loop2(input),
_ => unknown_atrule(name, input0, input),
}
}
fn unknown_atrule<'a>(
name: SassString,
start: Span,
input: Span<'a>,
) -> PResult<'a, Item> |
fn expression_argument(input: Span) -> PResult<Value> {
terminated(value_expression, opt(tag(";")))(input)
}
fn charset2(input: Span) -> PResult<Item> {
use nom::combinator::map_opt;
map_opt(
terminated(
alt((sass_string_dq, sass_string_sq, sass_string)),
semi_or_end,
),
|s| {
s.single_raw().and_then(|s| {
if s.eq_ignore_ascii_case("UTF-8") {
Some(Item::None)
} else {
None
}
})
},
)(input)
}
/// Arguments to an unkown at rule.
fn unknown_rule_args(input: Span) -> PResult<Value> {
let (input, args) = separated_list0(
preceded(tag(","), opt_spacelike),
map(
many0(preceded(
opt(ignore_space),
alt((
terminated(
alt((
function_call_or_string,
dictionary,
map(
delimited(tag("("), media::args, tag(")")),
|v| Value::Paren(Box::new(v), true),
),
map(sass_string_dq, Value::Literal),
map(sass_string_sq, Value::Literal),
)),
alt((
value((), all_consuming(tag(""))),
value((), peek(one_of(") \r\n\t{,;"))),
)),
),
map(map_res(is_not("\"'{};#"), input_to_str), |s| {
Value::Literal(s.trim_end().into())
}),
)),
)),
|args| list_or_single(args, ListSeparator::Space),
),
)(input)?;
Ok((input, list_or_single(args, ListSeparator::Comma)))
}
#[cfg(test)]
pub(crate) fn check_parse<T>(
parser: impl Fn(Span) -> PResult<T>,
value: &[u8],
) -> Result<T, ParseError> {
ParseError::check(parser(code_span(value).borrow()))
}
fn if_statement_inner(input: Span) -> PResult<Item> {
preceded(
terminated(verify(name, |n: &String| n == "if"), opt_spacelike),
if_statement2,
)(input)
}
fn if_statement2(input: Span) -> PResult<Item> {
let (input, cond) = terminated(value_expression, opt_spacelike)(input)?;
let (input, body) = body_block(input)?;
let (input2, word) = opt(delimited(
preceded(opt_spacelike, tag("@")),
name,
opt_spacelike,
))(input)?;
match word.as_ref().map(AsRef::as_ref) {
Some("else") => {
let (input2, else_body) = alt((
body_block,
map(if_statement_inner, |s| vec![s]),
))(input2)?;
Ok((input2, Item::IfStatement(cond, body, else_body)))
}
Some("elseif") => {
let (input2, else_body) = if_statement2(input2)?;
Ok((input2, Item::IfStatement(cond, body, vec![else_body])))
}
_ => Ok((input, Item::IfStatement(cond, body, vec![]))),
}
}
/// The part of an each look that follows the `@each`.
fn each_loop2(input: Span) -> PResult<Item> {
let (input, names) = separated_list1(
delimited(opt_spacelike, tag(","), opt_spacelike),
map(preceded(tag("$"), name), Name::from),
)(input)?;
let (input, values) = delimited(
delimited(spacelike, tag("in"), spacelike),
value_expression,
opt_spacelike,
)(input)?;
let (input, body) = body_block(input)?;
Ok((input, Item::Each(names, values, body)))
}
/// A for loop after the initial `@for`.
fn for_loop2(input: Span) -> PResult<Item> {
let (input, name) = delimited(tag("$"), name, spacelike)(input)?;
let (input, from) = delimited(
terminated(tag("from"), spacelike),
single_value,
spacelike,
)(input)?;
let (input, inclusive) = terminated(
alt((value(true, tag("through")), value(false, tag("to")))),
spacelike,
)(input)?;
let (input, to) = terminated(single_value, opt_spacelike)(input)?;
let (input, body) = body_block(input)?;
Ok((
input,
Item::For {
name: name.into(),
from: Box::new(from),
to: Box::new(to),
inclusive,
body,
},
))
}
fn while_loop2(input: Span) -> PResult<Item> {
let (input, cond) = terminated(value_expression, opt_spacelike)(input)?;
let (input, body) = body_block(input)?;
Ok((input, Item::While(cond, body)))
}
fn mixin_declaration2(input: Span) -> PResult<Item> {
let (rest, name) = terminated(name, opt_spacelike)(input)?;
let (rest, args) = opt(formal_args)(rest)?;
let (end, body) = preceded(opt_spacelike, body_block)(rest)?;
let args = args.unwrap_or_else(FormalArgs::none);
let decl = input.up_to(&rest).to_owned();
Ok((
end,
Item::MixinDeclaration(name, Callable { args, body, decl }),
))
}
fn function_declaration2(input: Span) -> PResult<Item> {
let (end, name) = terminated(name, opt_spacelike)(input)?;
let (end, args) = formal_args(end)?;
let (rest, body) = preceded(opt_spacelike, body_block)(end)?;
let decl = input.up_to(&end).to_owned();
Ok((
rest,
Item::FunctionDeclaration(name, Callable { args, body, decl }),
))
}
fn return_stmt2<'a>(start: Span, input: Span<'a>) -> PResult<'a, Item> {
let (input, v) =
delimited(opt_spacelike, value_expression, opt_spacelike)(input)?;
let pos = start.up_to(&input).to_owned();
let (input, _) = opt(tag(";"))(input)?;
Ok((input, Item::Return(v, pos)))
}
/// The "rest" of an `@content` statement is just an optional terminator
fn content_stmt2(input: Span) -> PResult<Item> {
let (rest, _) = opt_spacelike(input)?;
let (rest, args) = opt(call_args)(rest)?;
let (rest, _) = opt(tag(";"))(rest)?;
let pos = input.up_to(&rest).to_owned();
Ok((rest, Item::Content(args.unwrap_or_default(), pos)))
}
fn custom_property(input: Span) -> PResult<Item> {
let (rest, name) = terminated(opt(sass_string), tag(":"))(input)?;
let mut name = name.unwrap_or_else(|| SassString::from(""));
// The dashes was parsed before calling this method.
name.prepend("--");
let (rest, value) =
terminated(custom_value, alt((tag(";"), peek(tag("}")))))(rest)?;
Ok((rest, Item::CustomProperty(name, value)))
}
fn property_or_namespace_rule(input: Span) -> PResult<Item> {
let (start_val, name) = terminated(
alt((
map(preceded(tag("*"), sass_string), |mut s| {
s.prepend("*");
s
}),
sass_string,
)),
delimited(ignore_comments, tag(":"), ignore_comments),
)(input)?;
let (input, val) = opt(value_expression)(start_val)?;
let pos = start_val.up_to(&input).to_owned();
let (input, _) = opt_spacelike(input)?;
let (input, next) = if val.is_some() {
alt((tag("{"), tag(";"), tag("")))(input)?
} else {
tag("{")(input)?
};
let (input, body) = match next.fragment() {
b"{" => map(body_block2, Some)(input)?,
b";" => (input, None),
b"" => (input, None),
_ => (input, None), // error?
};
let (input, _) = opt_spacelike(input)?;
Ok((input, ns_or_prop_item(name, val, body, pos)))
}
use crate::sass::SassString;
fn ns_or_prop_item(
name: SassString,
value: Option<Value>,
body: Option<Vec<Item>>,
pos: SourcePos,
) -> Item {
if let Some(body) = body {
Item::NamespaceRule(name, value.unwrap_or(Value::Null), body)
} else if let Some(value) = value {
Item::Property(name, value, pos)
} else {
unreachable!()
}
}
fn body_block(input: Span) -> PResult<Vec<Item>> {
preceded(tag("{"), body_block2)(input)
}
fn body_block2(input: Span) -> PResult<Vec<Item>> {
let (input, (v, _end)) = preceded(
opt_spacelike,
many_till(
terminated(body_item, opt_spacelike),
terminated(terminated(tag("}"), opt_spacelike), opt(tag(";"))),
),
)(input)?;
Ok((input, v))
}
fn input_to_str(s: Span) -> Result<&str, Utf8Error> {
from_utf8(s.fragment())
}
fn input_to_string(s: Span) -> Result<String, Utf8Error> {
from_utf8(s.fragment()).map(String::from)
}
fn list_or_single(list: Vec<Value>, sep: ListSeparator) -> Value {
if list.len() == 1 {
list.into_iter().next().unwrap()
} else {
Value::List(list, Some(sep), false)
}
}
| {
let (input, args) =
terminated(opt(unknown_rule_args), opt(ignore_space))(input)?;
fn x_args(value: Value) -> Value {
match value {
Value::Variable(name, _pos) => {
Value::Literal(SassString::from(format!("${name}")))
}
Value::Map(map) => Value::Map(
map.into_iter()
.map(|(k, v)| (x_args(k), x_args(v)))
.collect(),
),
value => value,
}
}
let (rest, body) = if input.first() == Some(&b'{') {
map(body_block, Some)(input)?
} else {
value(None, semi_or_end)(input)?
};
Ok((
rest,
Item::AtRule {
name,
args: args.map_or(Value::Null, x_args),
body,
pos: start.up_to(&input).to_owned(),
},
))
} | identifier_body |
http2.go | package http2
import (
// Standard
"bytes"
"crypto/sha256"
"crypto/tls"
"crypto/x509"
"encoding/gob"
"encoding/hex"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"strconv"
"strings"
"time"
// 3rd Party
"github.com/cretz/gopaque/gopaque"
"github.com/fatih/color"
"github.com/lucas-clemente/quic-go"
"github.com/lucas-clemente/quic-go/h2quic"
"github.com/satori/go.uuid"
"go.dedis.ch/kyber"
"gopkg.in/square/go-jose.v2"
"gopkg.in/square/go-jose.v2/jwt"
// reaper
"github.com/infosechoudini/reaper/pkg/agents"
"github.com/infosechoudini/reaper/pkg/core"
"github.com/infosechoudini/reaper/pkg/logging"
"github.com/infosechoudini/reaper/pkg/messages"
"github.com/infosechoudini/reaper/pkg/util"
)
// Server is a structure for creating and instantiating new server objects
type Server struct {
ID uuid.UUID // Unique identifier for the Server object
Interface string // The network adapter interface the server will listen on
Port int // The port the server will listen on
Protocol string // The protocol (i.e. HTTP/2 or HTTP/3) the server will use
Key string // The x.509 private key used for TLS encryption
Certificate string // The x.509 public key used for TLS encryption
Server interface{} // A Golang server object (i.e http.Server or h3quic.Server)
Mux *http.ServeMux // The message handler/multiplexer
jwtKey []byte // The password used by the server to create JWTs
psk string // The pre-shared key password used prior to Password Authenticated Key Exchange (PAKE)
opaqueKey kyber.Scalar // OPAQUE server's keys
}
// New instantiates a new server object and returns it
func New(iface string, port int, protocol string, key string, certificate string, psk string) (Server, error) {
s := Server{
ID: uuid.NewV4(),
Protocol: protocol,
Interface: iface,
Port: port,
Mux: http.NewServeMux(),
jwtKey: []byte(core.RandStringBytesMaskImprSrc(32)), // Used to sign and encrypt JWT
psk: psk,
}
// OPAQUE Server Public/Private keys; Can be used with every agent
s.opaqueKey = gopaque.CryptoDefault.NewKey(nil)
var cer tls.Certificate
var err error
// Check if certificate exists on disk
_, errCrt := os.Stat(certificate)
if os.IsNotExist(errCrt) {
// generate a new ephemeral certificate
m := fmt.Sprintf("No certificate found at %s", certificate)
logging.Server(m)
message("note", m)
t := "Creating in-memory x.509 certificate used for this session only."
logging.Server(t)
message("note", t)
message("info", "Additional details: https://github.com/Ne0nd0g/reaper/wiki/TLS-Certificates")
cerp, err := util.GenerateTLSCert(nil, nil, nil, nil, nil, nil, true) //ec certs not supported (yet) :(
if err != nil {
m := fmt.Sprintf("There was an error generating the SSL/TLS certificate:\r\n%s", err.Error())
logging.Server(m)
message("warn", m)
return s, err
}
cer = *cerp
} else {
if errCrt != nil {
m := fmt.Sprintf("There was an error importing the SSL/TLS x509 certificate:\r\n%s", errCrt.Error())
logging.Server(m)
message("warn", m)
return s, errCrt
}
s.Certificate = certificate
_, errKey := os.Stat(key)
if errKey != nil {
m := fmt.Sprintf("There was an error importing the SSL/TLS x509 key:\r\n%s", errKey.Error())
logging.Server(m)
message("warn", m)
return s, errKey
}
s.Key = key
cer, err = tls.LoadX509KeyPair(certificate, key)
if err != nil {
m := fmt.Sprintf("There was an error importing the SSL/TLS x509 key pair\r\n%s", err.Error())
logging.Server(m)
message("warn", m)
message("warn", "Ensure a keypair is located in the data/x509 directory")
return s, err
}
}
if len(cer.Certificate) < 1 || cer.PrivateKey == nil {
m := "Unable to import certificate for use in reaper: empty certificate structure."
logging.Server(m)
message("warn", m)
return s, errors.New("empty certificate structure")
}
// Parse into X.509 format
x, errX509 := x509.ParseCertificate(cer.Certificate[0])
if errX509 != nil {
m := fmt.Sprintf("There was an error parsing the tls.Certificate structure into a x509.Certificate"+
" structure:\r\n%s", errX509.Error())
logging.Server(m)
message("warn", m)
return s, errX509
}
// Create fingerprint
S256 := sha256.Sum256(x.Raw)
sha256Fingerprint := hex.EncodeToString(S256[:])
// reaperCRT is the string representation of the SHA1 fingerprint for the public x.509 certificate distributed with reaper
reaperCRT := "4af9224c77821bc8a46503cfc2764b94b1fc8aa2521afc627e835f0b3c449f50"
// Check to see if the Public Key SHA1 finger print matches the certificate distributed with reaper for testing
if reaperCRT == sha256Fingerprint {
message("warn", "Insecure publicly distributed reaper x.509 testing certificate in use")
message("info", "Additional details: https://github.com/Ne0nd0g/reaper/wiki/TLS-Certificates")
}
// Log certificate information
logging.Server(fmt.Sprintf("Starting reaper Server using an X.509 certificate with a %s signature of %s",
x.SignatureAlgorithm.String(), hex.EncodeToString(x.Signature)))
logging.Server(fmt.Sprintf("Starting reaper Server using an X.509 certificate with a public key of %v", x.PublicKey))
logging.Server(fmt.Sprintf("Starting reaper Server using an X.509 certificate with a serial number of %d", x.SerialNumber))
logging.Server(fmt.Sprintf("Starting reaper Server using an X.509 certifcate with a subject of %s", x.Subject.String()))
logging.Server(fmt.Sprintf("Starting reaper Server using an X.509 certificate with a SHA256 hash, "+
"calculated by reaper, of %s", sha256Fingerprint))
// Configure TLS
TLSConfig := &tls.Config{
Certificates: []tls.Certificate{cer},
MinVersion: tls.VersionTLS12,
CurvePreferences: []tls.CurveID{tls.CurveP521, tls.CurveP384, tls.CurveP256},
PreferServerCipherSuites: true,
CipherSuites: []uint16{
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
},
//NextProtos: []string{protocol}, //Dont need to specify because server will pick
}
s.Mux.HandleFunc("/", s.agentHandler)
srv := &http.Server{
Addr: s.Interface + ":" + strconv.Itoa(s.Port),
Handler: s.Mux,
ReadTimeout: 10 * time.Second,
WriteTimeout: 10 * time.Second,
MaxHeaderBytes: 1 << 20,
TLSConfig: TLSConfig,
//TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler), 0), // <- Disables HTTP/2
}
if s.Protocol == "h2" {
s.Server = srv
} else if s.Protocol == "hq" {
s.Server = &h2quic.Server{
Server: srv,
QuicConfig: &quic.Config{
KeepAlive: false,
IdleTimeout: 168 * time.Hour,
RequestConnectionIDOmission: false,
},
}
} else {
return s, fmt.Errorf("%s is an invalid server protocol", s.Protocol)
}
return s, nil
}
// Run function starts the server on the preconfigured port for the preconfigured service
func (s *Server) Run() error {
logging.Server(fmt.Sprintf("Starting %s Listener at %s:%d", s.Protocol, s.Interface, s.Port))
time.Sleep(45 * time.Millisecond) // Sleep to allow the shell to start up
if s.psk == "reaper" {
fmt.Println()
message("warn", "Listener was started using \"reaper\" as the Pre-Shared Key (PSK) allowing anyone"+
" decrypt message traffic.")
message("note", "Consider changing the PSK by using the -psk command line flag.")
}
message("note", fmt.Sprintf("Starting %s listener on %s:%d", s.Protocol, s.Interface, s.Port))
if s.Protocol == "h2" {
server := s.Server.(*http.Server)
defer func() {
err := server.Close()
if err != nil {
m := fmt.Sprintf("There was an error starting the %s server:\r\n%s", s.Protocol, err.Error())
logging.Server(m)
message("warn", m)
return
}
}()
go logging.Server(server.ListenAndServeTLS(s.Certificate, s.Key).Error())
return nil
} else if s.Protocol == "hq" {
server := s.Server.(*h2quic.Server)
defer func() {
err := server.Close()
if err != nil {
m := fmt.Sprintf("There was an error starting the hq server:\r\n%s", err.Error())
logging.Server(m)
message("warn", m)
return
}
}()
go logging.Server(server.ListenAndServeTLS(s.Certificate, s.Key).Error())
return nil
}
return fmt.Errorf("%s is an invalid server protocol", s.Protocol)
}
// agentHandler function is responsible for all reaper agent traffic
func (s *Server) agentHandler(w http.ResponseWriter, r *http.Request) |
// getJWT returns a JSON Web Token for the provided agent using the interface JWT Key
func getJWT(agentID uuid.UUID, key []byte) (string, error) {
if core.Debug {
message("debug", "Entering into agents.GetJWT function")
}
encrypter, encErr := jose.NewEncrypter(jose.A256GCM,
jose.Recipient{
Algorithm: jose.DIRECT,
Key: key},
(&jose.EncrypterOptions{}).WithType("JWT").WithContentType("JWT"))
if encErr != nil {
return "", fmt.Errorf("there was an error creating the JWE encryptor:\r\n%s", encErr)
}
signer, errSigner := jose.NewSigner(jose.SigningKey{
Algorithm: jose.HS256,
Key: key},
(&jose.SignerOptions{}).WithType("JWT"))
if errSigner != nil {
return "", fmt.Errorf("there was an error creating the JWT signer:\r\n%s", errSigner.Error())
}
lifetime, errLifetime := agents.GetLifetime(agentID)
if errLifetime != nil && errLifetime.Error() != "agent WaitTime is equal to zero" {
return "", errLifetime
}
// This is for when the server hasn't received an AgentInfo struct and doesn't know the agent's lifetime yet or sleep is set to zero
if lifetime == 0 {
lifetime = time.Second * 30
}
// TODO Add in the rest of the JWT claim info
cl := jwt.Claims{
ID: agentID.String(),
NotBefore: jwt.NewNumericDate(time.Now()),
IssuedAt: jwt.NewNumericDate(time.Now()),
Expiry: jwt.NewNumericDate(time.Now().Add(lifetime)),
}
agentJWT, err := jwt.SignedAndEncrypted(signer, encrypter).Claims(cl).CompactSerialize()
if err != nil {
return "", fmt.Errorf("there was an error serializing the JWT:\r\n%s", err.Error())
}
// Parse it to check for errors
_, errParse := jwt.ParseEncrypted(agentJWT)
if errParse != nil {
return "", fmt.Errorf("there was an error parsing the encrypted JWT:\r\n%s", errParse.Error())
}
logging.Server(fmt.Sprintf("Created authenticated JWT for %s", agentID))
if core.Debug {
message("debug", fmt.Sprintf("Sending agent %s an authenticated JWT with a lifetime of %v:\r\n%v",
agentID.String(), lifetime, agentJWT))
}
return agentJWT, nil
}
// validateJWT validates the provided JSON Web Token
func validateJWT(agentJWT string, key []byte) (uuid.UUID, error) {
var agentID uuid.UUID
if core.Debug {
message("debug", "Entering into http2.ValidateJWT")
message("debug", fmt.Sprintf("Input JWT: %v", agentJWT))
}
claims := jwt.Claims{}
// Parse to make sure it is a valid JWT
nestedToken, err := jwt.ParseSignedAndEncrypted(agentJWT)
if err != nil {
return agentID, fmt.Errorf("there was an error parsing the JWT:\r\n%s", err.Error())
}
// Decrypt JWT
token, errToken := nestedToken.Decrypt(key)
if errToken != nil {
return agentID, fmt.Errorf("there was an error decrypting the JWT:\r\n%s", errToken.Error())
}
// Deserialize the claims and validate the signature
errClaims := token.Claims(key, &claims)
if errClaims != nil {
return agentID, fmt.Errorf("there was an deserializing the JWT claims:\r\n%s", errClaims.Error())
}
agentID = uuid.FromStringOrNil(claims.ID)
AgentWaitTime, errWait := agents.GetAgentFieldValue(agentID, "WaitTime")
// An error will be returned during OPAQUE registration & authentication
if errWait != nil {
if core.Debug {
message("debug", fmt.Sprintf("there was an error getting the agent's wait time:\r\n%s", errWait.Error()))
}
}
if AgentWaitTime == "" {
AgentWaitTime = "10s"
}
WaitTime, errParse := time.ParseDuration(AgentWaitTime)
if errParse != nil {
return agentID, fmt.Errorf("there was an error parsing the agent's wait time into a duration:\r\n%s", errParse.Error())
}
// Validate claims; Default Leeway is 1 minute; Set it to 1x the agent's WaitTime setting
errValidate := claims.ValidateWithLeeway(jwt.Expected{
Time: time.Now(),
}, WaitTime)
if errValidate != nil {
if core.Verbose {
message("warn", fmt.Sprintf("The JWT claims were not valid for %s", agentID))
message("note", fmt.Sprintf("JWT Claim Expiry: %s", claims.Expiry.Time()))
message("note", fmt.Sprintf("JWT Claim Issued: %s", claims.IssuedAt.Time()))
}
return agentID, errValidate
}
if core.Debug {
message("debug", fmt.Sprintf("agentID: %s", agentID.String()))
message("debug", "Leaving http2.ValidateJWT without error")
}
// TODO I need to validate other things like token age/expiry
return agentID, nil
}
// decryptJWE takes provided JWE string and decrypts it using the per-agent key
func decryptJWE(jweString string, key []byte) (messages.Base, error) {
if core.Debug {
message("debug", "Entering into http2.DecryptJWE function")
message("debug", fmt.Sprintf("Input JWE String: %s", jweString))
}
var m messages.Base
// Parse JWE string back into JSONWebEncryption
jwe, errObject := jose.ParseEncrypted(jweString)
if errObject != nil {
return m, fmt.Errorf("there was an error parseing the JWE string into a JSONWebEncryption object:\r\n%s", errObject)
}
if core.Debug {
message("debug", fmt.Sprintf("Parsed JWE:\r\n%+v", jwe))
}
// Decrypt the JWE
jweMessage, errDecrypt := jwe.Decrypt(key)
if errDecrypt != nil {
return m, fmt.Errorf("there was an error decrypting the JWE:\r\n%s", errDecrypt.Error())
}
// Decode the JWE payload into a messages.Base struct
errDecode := gob.NewDecoder(bytes.NewReader(jweMessage)).Decode(&m)
if errDecode != nil {
return m, fmt.Errorf("there was an error decoding JWE payload message sent by an agent:\r\n%s", errDecode.Error())
}
if core.Debug {
message("debug", "Leaving http2.DecryptJWE function without error")
message("debug", fmt.Sprintf("Returning message base: %+v", m))
}
return m, nil
}
// message is used to print a message to the command line
func message(level string, message string) {
switch level {
case "info":
color.Cyan("[i]" + message)
case "note":
color.Yellow("[-]" + message)
case "warn":
color.Red("[!]" + message)
case "debug":
color.Red("[DEBUG]" + message)
case "success":
color.Green("[+]" + message)
default:
color.Red("[_-_]Invalid message level: " + message)
}
}
// TODO make sure all errors are logged to server log
| {
if core.Verbose {
message("note", fmt.Sprintf("Received %s %s connection from %s", r.Proto, r.Method, r.RemoteAddr))
logging.Server(fmt.Sprintf("Received HTTP %s connection from %s", r.Method, r.RemoteAddr))
}
if core.Debug {
message("debug", fmt.Sprintf("HTTP Connection Details:"))
message("debug", fmt.Sprintf("Host: %s", r.Host))
message("debug", fmt.Sprintf("URI: %s", r.RequestURI))
message("debug", fmt.Sprintf("Method: %s", r.Method))
message("debug", fmt.Sprintf("Protocol: %s", r.Proto))
message("debug", fmt.Sprintf("Headers: %s", r.Header))
message("debug", fmt.Sprintf("TLS Negotiated Protocol: %s", r.TLS.NegotiatedProtocol))
message("debug", fmt.Sprintf("TLS Cipher Suite: %d", r.TLS.CipherSuite))
message("debug", fmt.Sprintf("TLS Server Name: %s", r.TLS.ServerName))
message("debug", fmt.Sprintf("Content Length: %d", r.ContentLength))
logging.Server(fmt.Sprintf("[DEBUG]HTTP Connection Details:"))
logging.Server(fmt.Sprintf("[DEBUG]Host: %s", r.Host))
logging.Server(fmt.Sprintf("[DEBUG]URI: %s", r.RequestURI))
logging.Server(fmt.Sprintf("[DEBUG]Method: %s", r.Method))
logging.Server(fmt.Sprintf("[DEBUG]Protocol: %s", r.Proto))
logging.Server(fmt.Sprintf("[DEBUG]Headers: %s", r.Header))
logging.Server(fmt.Sprintf("[DEBUG]TLS Negotiated Protocol: %s", r.TLS.NegotiatedProtocol))
logging.Server(fmt.Sprintf("[DEBUG]TLS Cipher Suite: %d", r.TLS.CipherSuite))
logging.Server(fmt.Sprintf("[DEBUG]TLS Server Name: %s", r.TLS.ServerName))
logging.Server(fmt.Sprintf("[DEBUG]Content Length: %d", r.ContentLength))
}
// Check for reaper PRISM activity
if r.UserAgent() == "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.85 Safari/537.36 " {
message("warn", fmt.Sprintf("Someone from %s is attempting to fingerprint this reaper server", r.RemoteAddr))
//w.WriteHeader(404)
}
// Make sure the message has a JWT
token := r.Header.Get("Authorization")
if token == "" {
if core.Verbose {
message("warn", "incoming request did not contain an Authorization header")
}
w.WriteHeader(404)
return
}
if r.Method == http.MethodPost {
var returnMessage messages.Base
var err error
var key []byte
//Read the request message until EOF
requestBytes, errRequestBytes := ioutil.ReadAll(r.Body)
if errRequestBytes != nil {
message("warn", fmt.Sprintf("There was an error reading a POST message sent by an "+
"agent:\r\n%s", errRequestBytes))
return
}
// Decode gob to JWE string
var jweString string
errDecode := gob.NewDecoder(bytes.NewReader(requestBytes)).Decode(&jweString)
if errDecode != nil {
message("warn", fmt.Sprintf("there was an error decoding JWE payload message sent by an agent:\r\n%s", errDecode.Error()))
return
}
// Validate JWT and get claims
var agentID uuid.UUID
var errValidate error
// Set return headers
//w.Header().Set("Content-Type", "application/octet-stream")
// Validate JWT using HTTP interface JWT key; Given to authenticated agents by server
agentID, errValidate = validateJWT(strings.Split(token, " ")[1], s.jwtKey)
// If agentID was returned, then message contained a JWT encrypted with the HTTP interface key
if (errValidate != nil) && (agentID == uuid.Nil) {
if core.Verbose {
message("warn", errValidate.Error())
message("note", "trying again with interface PSK")
}
// Validate JWT using interface PSK; Used by unauthenticated agents
hashedKey := sha256.Sum256([]byte(s.psk))
key = hashedKey[:]
agentID, errValidate = validateJWT(strings.Split(token, " ")[1], key)
if errValidate != nil {
if core.Verbose {
message("warn", errValidate.Error())
}
w.WriteHeader(404)
return
}
if core.Debug {
message("info", "Unauthenticated JWT")
}
// Decrypt the HTTP payload, a JWE, using interface PSK
k, errDecryptPSK := decryptJWE(jweString, key)
// Successfully decrypted JWE with interface PSK
if errDecryptPSK == nil {
if core.Debug {
message("debug", fmt.Sprintf("[DEBUG]POST DATA: %v", k))
}
if core.Verbose {
message("note", fmt.Sprintf("Received %s message, decrypted JWE with interface PSK", k.Type))
}
messagePayloadBytes := new(bytes.Buffer)
// Allowed unauthenticated message types w/ PSK signed JWT and PSK encrypted JWT
switch k.Type {
case "AuthInit":
serverAuthInit, err := agents.OPAQUEAuthenticateInit(k)
if err != nil {
logging.Server(err.Error())
message("warn", err.Error())
w.WriteHeader(404)
return
}
logging.Server(fmt.Sprintf("Received new agent OPAQUE authentication from %s", agentID))
// Encode return message into a gob
errAuthInit := gob.NewEncoder(messagePayloadBytes).Encode(serverAuthInit)
if errAuthInit != nil {
m := fmt.Sprintf("there was an error encoding the return message into a gob:\r\n%s", errAuthInit.Error())
logging.Server(m)
message("warn", m)
w.WriteHeader(404)
return
}
case "RegInit":
serverRegInit, err := agents.OPAQUERegistrationInit(k, s.opaqueKey)
if err != nil {
logging.Server(err.Error())
message("warn", err.Error())
w.WriteHeader(404)
return
}
logging.Server(fmt.Sprintf("Received new agent OPAQUE user registration initialization from %s", agentID))
// Encode return message into a gob
errRegInit := gob.NewEncoder(messagePayloadBytes).Encode(serverRegInit)
if errRegInit != nil {
m := fmt.Sprintf("there was an error encoding the return message into a gob:\r\n%s", errRegInit.Error())
logging.Server(m)
message("warn", m)
w.WriteHeader(404)
return
}
case "RegComplete":
serverRegComplete, err := agents.OPAQUERegistrationComplete(k)
if err != nil {
logging.Server(err.Error())
message("warn", err.Error())
w.WriteHeader(404)
return
}
logging.Server(fmt.Sprintf("Received new agent OPAQUE user registration complete from %s", agentID))
// Encode return message into a gob
errRegInit := gob.NewEncoder(messagePayloadBytes).Encode(serverRegComplete)
if errRegInit != nil {
m := fmt.Sprintf("there was an error encoding the return message into a gob:\r\n%s", errRegInit.Error())
logging.Server(m)
message("warn", m)
w.WriteHeader(404)
return
}
default:
message("warn", "invalid message type")
w.WriteHeader(404)
return
}
// Get JWE
jwe, errJWE := core.GetJWESymetric(messagePayloadBytes.Bytes(), key)
if errJWE != nil {
logging.Server(errJWE.Error())
message("warn", errJWE.Error())
w.WriteHeader(404)
return
}
// Set return headers
w.Header().Set("Content-Type", "application/octet-stream")
// Encode JWE into gob
errJWEBuffer := gob.NewEncoder(w).Encode(jwe)
if errJWEBuffer != nil {
m := fmt.Errorf("there was an error writing the %s response message to the HTTP stream:\r\n%s", k.Type, errJWEBuffer.Error())
logging.Server(m.Error())
message("warn", m.Error())
w.WriteHeader(404)
return
}
return
}
if core.Verbose {
message("note", "Unauthenticated JWT w/ Authenticated JWE agent session key")
}
// Decrypt the HTTP payload, a JWE, using agent session key
j, errDecrypt := decryptJWE(jweString, agents.GetEncryptionKey(agentID))
if errDecrypt != nil {
message("warn", errDecrypt.Error())
w.WriteHeader(404)
return
}
if core.Debug {
message("debug", fmt.Sprintf("[DEBUG]POST DATA: %v", j))
}
if core.Verbose {
message("info", fmt.Sprintf("Received %s message from %s at %s", j.Type, j.ID, time.Now().UTC().Format(time.RFC3339)))
}
// Allowed authenticated message with PSK JWT and JWE encrypted with derived secret
switch j.Type {
case "AuthComplete":
returnMessage, err = agents.OPAQUEAuthenticateComplete(j)
if err != nil {
logging.Server(fmt.Sprintf("Received new agent OPAQUE authentication from %s", agentID))
}
default:
message("warn", fmt.Sprintf("Invalid Activity: %s", j.Type))
w.WriteHeader(404)
return
}
} else {
// If not using the PSK, the agent has previously authenticated
if core.Debug {
message("info", "Authenticated JWT")
}
// Decrypt JWE
key = agents.GetEncryptionKey(agentID)
j, errDecrypt := decryptJWE(jweString, key)
if errDecrypt != nil {
message("warn", errDecrypt.Error())
w.WriteHeader(404)
return
}
if core.Debug {
message("debug", fmt.Sprintf("[DEBUG]POST DATA: %v", j))
}
if core.Verbose {
message("note", "Authenticated JWT w/ Authenticated JWE agent session key")
message("info", fmt.Sprintf("Received %s message from %s at %s", j.Type, j.ID, time.Now().UTC().Format(time.RFC3339)))
}
// If both an agentID and error were returned, then the claims were likely bad and the agent needs to re-authenticate
if (errValidate != nil) && (agentID != uuid.Nil) {
message("warn", fmt.Sprintf("Agent %s connected with expired JWT. Instructing agent to re-authenticate", agentID))
j.Type = "ReAuthenticate"
}
// Authenticated and authorized message types
switch j.Type {
case "KeyExchange":
returnMessage, err = agents.KeyExchange(j)
case "StatusCheckIn":
returnMessage, err = agents.StatusCheckIn(j)
case "CmdResults":
err = agents.JobResults(j)
case "AgentInfo":
err = agents.UpdateInfo(j)
case "FileTransfer":
err = agents.FileTransfer(j)
case "ReAuthenticate":
returnMessage, err = agents.OPAQUEReAuthenticate(agentID)
default:
err = fmt.Errorf("invalid message type: %s", j.Type)
}
}
if err != nil {
m := fmt.Sprintf("There was an error during while handling a message from agent %s:\r\n%s", agentID.String(), err.Error())
logging.Server(m)
message("warn", m)
w.WriteHeader(404)
return
}
if returnMessage.Type == "" {
returnMessage.Type = "ServerOk"
returnMessage.ID = agentID
}
if core.Verbose {
message("note", fmt.Sprintf("Sending "+returnMessage.Type+" message type to agent"))
}
// Get JWT to add to message.Base for all messages except re-authenticate messages
if returnMessage.Type != "ReAuthenticate" {
jsonWebToken, errJWT := getJWT(agentID, s.jwtKey)
if errJWT != nil {
message("warn", errJWT.Error())
w.WriteHeader(404)
return
}
returnMessage.Token = jsonWebToken
}
// Encode messages.Base into a gob
returnMessageBytes := new(bytes.Buffer)
errReturnMessageBytes := gob.NewEncoder(returnMessageBytes).Encode(returnMessage)
if errReturnMessageBytes != nil {
m := fmt.Sprintf("there was an error encoding the %s return message for agent %s into a GOB:\r\n%s", returnMessage.Type, agentID.String(), errReturnMessageBytes.Error())
logging.Server(m)
message("warn", m)
return
}
// Get JWE
key = agents.GetEncryptionKey(agentID)
jwe, errJWE := core.GetJWESymetric(returnMessageBytes.Bytes(), key)
if errJWE != nil {
logging.Server(errJWE.Error())
message("warn", errJWE.Error())
}
// Set return headers
w.Header().Set("Content-Type", "application/octet-stream")
// Encode JWE to GOB and send it to the agent
errEncode := gob.NewEncoder(w).Encode(jwe)
if errEncode != nil {
m := fmt.Sprintf("There was an error encoding the server AuthComplete GOB message:\r\n%s", errEncode.Error())
logging.Server(m)
message("warn", m)
return
}
// Remove the agent from the server after successfully sending the kill message
if returnMessage.Type == "AgentControl" {
if returnMessage.Payload.(messages.AgentControl).Command == "kill" {
err := agents.RemoveAgent(agentID)
if err != nil {
message("warn", err.Error())
return
}
message("info", fmt.Sprintf("Agent %s was removed from the server", agentID.String()))
return
}
}
} else if r.Method == "GET" {
w.WriteHeader(404)
} else {
w.WriteHeader(404)
}
if core.Debug {
message("debug", "Leaving http2.agentHandler function without error")
}
} | identifier_body |
http2.go | package http2
import (
// Standard
"bytes"
"crypto/sha256"
"crypto/tls"
"crypto/x509"
"encoding/gob"
"encoding/hex"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"strconv"
"strings"
"time"
// 3rd Party
"github.com/cretz/gopaque/gopaque"
"github.com/fatih/color"
"github.com/lucas-clemente/quic-go"
"github.com/lucas-clemente/quic-go/h2quic"
"github.com/satori/go.uuid"
"go.dedis.ch/kyber"
"gopkg.in/square/go-jose.v2"
"gopkg.in/square/go-jose.v2/jwt"
// reaper
"github.com/infosechoudini/reaper/pkg/agents"
"github.com/infosechoudini/reaper/pkg/core"
"github.com/infosechoudini/reaper/pkg/logging"
"github.com/infosechoudini/reaper/pkg/messages"
"github.com/infosechoudini/reaper/pkg/util"
)
// Server is a structure for creating and instantiating new server objects
type Server struct {
ID uuid.UUID // Unique identifier for the Server object
Interface string // The network adapter interface the server will listen on
Port int // The port the server will listen on
Protocol string // The protocol (i.e. HTTP/2 or HTTP/3) the server will use
Key string // The x.509 private key used for TLS encryption
Certificate string // The x.509 public key used for TLS encryption
Server interface{} // A Golang server object (i.e http.Server or h3quic.Server)
Mux *http.ServeMux // The message handler/multiplexer
jwtKey []byte // The password used by the server to create JWTs
psk string // The pre-shared key password used prior to Password Authenticated Key Exchange (PAKE)
opaqueKey kyber.Scalar // OPAQUE server's keys
}
// New instantiates a new server object and returns it
func New(iface string, port int, protocol string, key string, certificate string, psk string) (Server, error) {
s := Server{
ID: uuid.NewV4(),
Protocol: protocol,
Interface: iface,
Port: port,
Mux: http.NewServeMux(),
jwtKey: []byte(core.RandStringBytesMaskImprSrc(32)), // Used to sign and encrypt JWT
psk: psk,
}
// OPAQUE Server Public/Private keys; Can be used with every agent
s.opaqueKey = gopaque.CryptoDefault.NewKey(nil)
var cer tls.Certificate
var err error
// Check if certificate exists on disk
_, errCrt := os.Stat(certificate)
if os.IsNotExist(errCrt) {
// generate a new ephemeral certificate
m := fmt.Sprintf("No certificate found at %s", certificate)
logging.Server(m)
message("note", m)
t := "Creating in-memory x.509 certificate used for this session only."
logging.Server(t)
message("note", t)
message("info", "Additional details: https://github.com/Ne0nd0g/reaper/wiki/TLS-Certificates")
cerp, err := util.GenerateTLSCert(nil, nil, nil, nil, nil, nil, true) //ec certs not supported (yet) :(
if err != nil {
m := fmt.Sprintf("There was an error generating the SSL/TLS certificate:\r\n%s", err.Error())
logging.Server(m)
message("warn", m)
return s, err
}
cer = *cerp
} else {
if errCrt != nil {
m := fmt.Sprintf("There was an error importing the SSL/TLS x509 certificate:\r\n%s", errCrt.Error())
logging.Server(m)
message("warn", m)
return s, errCrt
}
s.Certificate = certificate
_, errKey := os.Stat(key)
if errKey != nil {
m := fmt.Sprintf("There was an error importing the SSL/TLS x509 key:\r\n%s", errKey.Error())
logging.Server(m)
message("warn", m)
return s, errKey
}
s.Key = key
cer, err = tls.LoadX509KeyPair(certificate, key)
if err != nil {
m := fmt.Sprintf("There was an error importing the SSL/TLS x509 key pair\r\n%s", err.Error())
logging.Server(m)
message("warn", m)
message("warn", "Ensure a keypair is located in the data/x509 directory")
return s, err
}
}
if len(cer.Certificate) < 1 || cer.PrivateKey == nil {
m := "Unable to import certificate for use in reaper: empty certificate structure."
logging.Server(m)
message("warn", m)
return s, errors.New("empty certificate structure")
}
// Parse into X.509 format
x, errX509 := x509.ParseCertificate(cer.Certificate[0])
if errX509 != nil {
m := fmt.Sprintf("There was an error parsing the tls.Certificate structure into a x509.Certificate"+
" structure:\r\n%s", errX509.Error())
logging.Server(m)
message("warn", m)
return s, errX509
}
// Create fingerprint
S256 := sha256.Sum256(x.Raw)
sha256Fingerprint := hex.EncodeToString(S256[:])
// reaperCRT is the string representation of the SHA1 fingerprint for the public x.509 certificate distributed with reaper
reaperCRT := "4af9224c77821bc8a46503cfc2764b94b1fc8aa2521afc627e835f0b3c449f50"
// Check to see if the Public Key SHA1 finger print matches the certificate distributed with reaper for testing
if reaperCRT == sha256Fingerprint {
message("warn", "Insecure publicly distributed reaper x.509 testing certificate in use")
message("info", "Additional details: https://github.com/Ne0nd0g/reaper/wiki/TLS-Certificates")
}
// Log certificate information
logging.Server(fmt.Sprintf("Starting reaper Server using an X.509 certificate with a %s signature of %s",
x.SignatureAlgorithm.String(), hex.EncodeToString(x.Signature)))
logging.Server(fmt.Sprintf("Starting reaper Server using an X.509 certificate with a public key of %v", x.PublicKey))
logging.Server(fmt.Sprintf("Starting reaper Server using an X.509 certificate with a serial number of %d", x.SerialNumber))
logging.Server(fmt.Sprintf("Starting reaper Server using an X.509 certifcate with a subject of %s", x.Subject.String()))
logging.Server(fmt.Sprintf("Starting reaper Server using an X.509 certificate with a SHA256 hash, "+
"calculated by reaper, of %s", sha256Fingerprint))
// Configure TLS
TLSConfig := &tls.Config{
Certificates: []tls.Certificate{cer},
MinVersion: tls.VersionTLS12,
CurvePreferences: []tls.CurveID{tls.CurveP521, tls.CurveP384, tls.CurveP256},
PreferServerCipherSuites: true,
CipherSuites: []uint16{
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
},
//NextProtos: []string{protocol}, //Dont need to specify because server will pick
}
s.Mux.HandleFunc("/", s.agentHandler)
srv := &http.Server{
Addr: s.Interface + ":" + strconv.Itoa(s.Port),
Handler: s.Mux,
ReadTimeout: 10 * time.Second,
WriteTimeout: 10 * time.Second,
MaxHeaderBytes: 1 << 20,
TLSConfig: TLSConfig,
//TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler), 0), // <- Disables HTTP/2
}
if s.Protocol == "h2" {
s.Server = srv
} else if s.Protocol == "hq" {
s.Server = &h2quic.Server{
Server: srv,
QuicConfig: &quic.Config{
KeepAlive: false,
IdleTimeout: 168 * time.Hour,
RequestConnectionIDOmission: false,
},
}
} else {
return s, fmt.Errorf("%s is an invalid server protocol", s.Protocol)
}
return s, nil
}
// Run function starts the server on the preconfigured port for the preconfigured service
func (s *Server) Run() error {
logging.Server(fmt.Sprintf("Starting %s Listener at %s:%d", s.Protocol, s.Interface, s.Port))
time.Sleep(45 * time.Millisecond) // Sleep to allow the shell to start up
if s.psk == "reaper" {
fmt.Println()
message("warn", "Listener was started using \"reaper\" as the Pre-Shared Key (PSK) allowing anyone"+
" decrypt message traffic.")
message("note", "Consider changing the PSK by using the -psk command line flag.")
}
message("note", fmt.Sprintf("Starting %s listener on %s:%d", s.Protocol, s.Interface, s.Port))
if s.Protocol == "h2" {
server := s.Server.(*http.Server)
defer func() {
err := server.Close()
if err != nil {
m := fmt.Sprintf("There was an error starting the %s server:\r\n%s", s.Protocol, err.Error())
logging.Server(m)
message("warn", m)
return
}
}()
go logging.Server(server.ListenAndServeTLS(s.Certificate, s.Key).Error())
return nil
} else if s.Protocol == "hq" {
server := s.Server.(*h2quic.Server)
defer func() {
err := server.Close()
if err != nil {
m := fmt.Sprintf("There was an error starting the hq server:\r\n%s", err.Error())
logging.Server(m)
message("warn", m)
return
}
}()
go logging.Server(server.ListenAndServeTLS(s.Certificate, s.Key).Error())
return nil
}
return fmt.Errorf("%s is an invalid server protocol", s.Protocol)
}
// agentHandler function is responsible for all reaper agent traffic
func (s *Server) agentHandler(w http.ResponseWriter, r *http.Request) {
if core.Verbose {
message("note", fmt.Sprintf("Received %s %s connection from %s", r.Proto, r.Method, r.RemoteAddr))
logging.Server(fmt.Sprintf("Received HTTP %s connection from %s", r.Method, r.RemoteAddr))
}
if core.Debug {
message("debug", fmt.Sprintf("HTTP Connection Details:"))
message("debug", fmt.Sprintf("Host: %s", r.Host))
message("debug", fmt.Sprintf("URI: %s", r.RequestURI))
message("debug", fmt.Sprintf("Method: %s", r.Method))
message("debug", fmt.Sprintf("Protocol: %s", r.Proto))
message("debug", fmt.Sprintf("Headers: %s", r.Header))
message("debug", fmt.Sprintf("TLS Negotiated Protocol: %s", r.TLS.NegotiatedProtocol))
message("debug", fmt.Sprintf("TLS Cipher Suite: %d", r.TLS.CipherSuite))
message("debug", fmt.Sprintf("TLS Server Name: %s", r.TLS.ServerName))
message("debug", fmt.Sprintf("Content Length: %d", r.ContentLength))
logging.Server(fmt.Sprintf("[DEBUG]HTTP Connection Details:"))
logging.Server(fmt.Sprintf("[DEBUG]Host: %s", r.Host))
logging.Server(fmt.Sprintf("[DEBUG]URI: %s", r.RequestURI))
logging.Server(fmt.Sprintf("[DEBUG]Method: %s", r.Method))
logging.Server(fmt.Sprintf("[DEBUG]Protocol: %s", r.Proto))
logging.Server(fmt.Sprintf("[DEBUG]Headers: %s", r.Header))
logging.Server(fmt.Sprintf("[DEBUG]TLS Negotiated Protocol: %s", r.TLS.NegotiatedProtocol))
logging.Server(fmt.Sprintf("[DEBUG]TLS Cipher Suite: %d", r.TLS.CipherSuite))
logging.Server(fmt.Sprintf("[DEBUG]TLS Server Name: %s", r.TLS.ServerName))
logging.Server(fmt.Sprintf("[DEBUG]Content Length: %d", r.ContentLength))
}
// Check for reaper PRISM activity
if r.UserAgent() == "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.85 Safari/537.36 " {
message("warn", fmt.Sprintf("Someone from %s is attempting to fingerprint this reaper server", r.RemoteAddr))
//w.WriteHeader(404)
}
// Make sure the message has a JWT
token := r.Header.Get("Authorization")
if token == "" {
if core.Verbose {
message("warn", "incoming request did not contain an Authorization header")
}
w.WriteHeader(404)
return
}
if r.Method == http.MethodPost {
var returnMessage messages.Base
var err error
var key []byte
//Read the request message until EOF
requestBytes, errRequestBytes := ioutil.ReadAll(r.Body)
if errRequestBytes != nil {
message("warn", fmt.Sprintf("There was an error reading a POST message sent by an "+
"agent:\r\n%s", errRequestBytes))
return
}
// Decode gob to JWE string
var jweString string
errDecode := gob.NewDecoder(bytes.NewReader(requestBytes)).Decode(&jweString)
if errDecode != nil {
message("warn", fmt.Sprintf("there was an error decoding JWE payload message sent by an agent:\r\n%s", errDecode.Error()))
return
}
// Validate JWT and get claims
var agentID uuid.UUID
var errValidate error
// Set return headers
//w.Header().Set("Content-Type", "application/octet-stream")
// Validate JWT using HTTP interface JWT key; Given to authenticated agents by server
agentID, errValidate = validateJWT(strings.Split(token, " ")[1], s.jwtKey)
// If agentID was returned, then message contained a JWT encrypted with the HTTP interface key
if (errValidate != nil) && (agentID == uuid.Nil) {
if core.Verbose {
message("warn", errValidate.Error())
message("note", "trying again with interface PSK")
}
// Validate JWT using interface PSK; Used by unauthenticated agents
hashedKey := sha256.Sum256([]byte(s.psk))
key = hashedKey[:]
agentID, errValidate = validateJWT(strings.Split(token, " ")[1], key)
if errValidate != nil {
if core.Verbose {
message("warn", errValidate.Error())
}
w.WriteHeader(404)
return
}
if core.Debug {
message("info", "Unauthenticated JWT")
}
// Decrypt the HTTP payload, a JWE, using interface PSK
k, errDecryptPSK := decryptJWE(jweString, key)
// Successfully decrypted JWE with interface PSK
if errDecryptPSK == nil {
if core.Debug {
message("debug", fmt.Sprintf("[DEBUG]POST DATA: %v", k))
}
if core.Verbose {
message("note", fmt.Sprintf("Received %s message, decrypted JWE with interface PSK", k.Type))
}
messagePayloadBytes := new(bytes.Buffer)
// Allowed unauthenticated message types w/ PSK signed JWT and PSK encrypted JWT
switch k.Type {
case "AuthInit":
serverAuthInit, err := agents.OPAQUEAuthenticateInit(k)
if err != nil {
logging.Server(err.Error())
message("warn", err.Error())
w.WriteHeader(404)
return
}
logging.Server(fmt.Sprintf("Received new agent OPAQUE authentication from %s", agentID))
// Encode return message into a gob
errAuthInit := gob.NewEncoder(messagePayloadBytes).Encode(serverAuthInit)
if errAuthInit != nil {
m := fmt.Sprintf("there was an error encoding the return message into a gob:\r\n%s", errAuthInit.Error())
logging.Server(m)
message("warn", m)
w.WriteHeader(404)
return
}
case "RegInit":
serverRegInit, err := agents.OPAQUERegistrationInit(k, s.opaqueKey)
if err != nil {
logging.Server(err.Error())
message("warn", err.Error())
w.WriteHeader(404)
return
}
logging.Server(fmt.Sprintf("Received new agent OPAQUE user registration initialization from %s", agentID))
// Encode return message into a gob
errRegInit := gob.NewEncoder(messagePayloadBytes).Encode(serverRegInit)
if errRegInit != nil {
m := fmt.Sprintf("there was an error encoding the return message into a gob:\r\n%s", errRegInit.Error())
logging.Server(m)
message("warn", m)
w.WriteHeader(404)
return
}
case "RegComplete":
serverRegComplete, err := agents.OPAQUERegistrationComplete(k)
if err != nil {
logging.Server(err.Error())
message("warn", err.Error())
w.WriteHeader(404)
return
}
logging.Server(fmt.Sprintf("Received new agent OPAQUE user registration complete from %s", agentID))
// Encode return message into a gob
errRegInit := gob.NewEncoder(messagePayloadBytes).Encode(serverRegComplete)
if errRegInit != nil {
m := fmt.Sprintf("there was an error encoding the return message into a gob:\r\n%s", errRegInit.Error())
logging.Server(m)
message("warn", m)
w.WriteHeader(404)
return
}
default:
message("warn", "invalid message type")
w.WriteHeader(404)
return
}
// Get JWE
jwe, errJWE := core.GetJWESymetric(messagePayloadBytes.Bytes(), key)
if errJWE != nil {
logging.Server(errJWE.Error())
message("warn", errJWE.Error())
w.WriteHeader(404)
return
}
// Set return headers
w.Header().Set("Content-Type", "application/octet-stream")
// Encode JWE into gob
errJWEBuffer := gob.NewEncoder(w).Encode(jwe)
if errJWEBuffer != nil {
m := fmt.Errorf("there was an error writing the %s response message to the HTTP stream:\r\n%s", k.Type, errJWEBuffer.Error())
logging.Server(m.Error())
message("warn", m.Error())
w.WriteHeader(404)
return
}
return
}
if core.Verbose {
message("note", "Unauthenticated JWT w/ Authenticated JWE agent session key")
}
// Decrypt the HTTP payload, a JWE, using agent session key
j, errDecrypt := decryptJWE(jweString, agents.GetEncryptionKey(agentID))
if errDecrypt != nil {
message("warn", errDecrypt.Error())
w.WriteHeader(404)
return
}
if core.Debug {
message("debug", fmt.Sprintf("[DEBUG]POST DATA: %v", j))
}
if core.Verbose {
message("info", fmt.Sprintf("Received %s message from %s at %s", j.Type, j.ID, time.Now().UTC().Format(time.RFC3339)))
}
// Allowed authenticated message with PSK JWT and JWE encrypted with derived secret
switch j.Type {
case "AuthComplete":
returnMessage, err = agents.OPAQUEAuthenticateComplete(j)
if err != nil {
logging.Server(fmt.Sprintf("Received new agent OPAQUE authentication from %s", agentID))
}
default:
message("warn", fmt.Sprintf("Invalid Activity: %s", j.Type))
w.WriteHeader(404)
return
}
} else {
// If not using the PSK, the agent has previously authenticated
if core.Debug {
message("info", "Authenticated JWT")
}
// Decrypt JWE
key = agents.GetEncryptionKey(agentID)
j, errDecrypt := decryptJWE(jweString, key)
if errDecrypt != nil {
message("warn", errDecrypt.Error())
w.WriteHeader(404)
return
}
if core.Debug {
message("debug", fmt.Sprintf("[DEBUG]POST DATA: %v", j))
}
if core.Verbose {
message("note", "Authenticated JWT w/ Authenticated JWE agent session key")
message("info", fmt.Sprintf("Received %s message from %s at %s", j.Type, j.ID, time.Now().UTC().Format(time.RFC3339)))
}
// If both an agentID and error were returned, then the claims were likely bad and the agent needs to re-authenticate
if (errValidate != nil) && (agentID != uuid.Nil) {
message("warn", fmt.Sprintf("Agent %s connected with expired JWT. Instructing agent to re-authenticate", agentID))
j.Type = "ReAuthenticate"
}
// Authenticated and authorized message types
switch j.Type {
case "KeyExchange":
returnMessage, err = agents.KeyExchange(j)
case "StatusCheckIn":
returnMessage, err = agents.StatusCheckIn(j)
case "CmdResults":
err = agents.JobResults(j)
case "AgentInfo":
err = agents.UpdateInfo(j)
case "FileTransfer":
err = agents.FileTransfer(j)
case "ReAuthenticate":
returnMessage, err = agents.OPAQUEReAuthenticate(agentID)
default:
err = fmt.Errorf("invalid message type: %s", j.Type)
}
}
if err != nil {
m := fmt.Sprintf("There was an error during while handling a message from agent %s:\r\n%s", agentID.String(), err.Error())
logging.Server(m)
message("warn", m)
w.WriteHeader(404)
return
}
if returnMessage.Type == "" {
returnMessage.Type = "ServerOk"
returnMessage.ID = agentID
}
if core.Verbose {
message("note", fmt.Sprintf("Sending "+returnMessage.Type+" message type to agent"))
}
// Get JWT to add to message.Base for all messages except re-authenticate messages
if returnMessage.Type != "ReAuthenticate" {
jsonWebToken, errJWT := getJWT(agentID, s.jwtKey)
if errJWT != nil {
message("warn", errJWT.Error())
w.WriteHeader(404)
return
}
returnMessage.Token = jsonWebToken
}
// Encode messages.Base into a gob
returnMessageBytes := new(bytes.Buffer)
errReturnMessageBytes := gob.NewEncoder(returnMessageBytes).Encode(returnMessage)
if errReturnMessageBytes != nil {
m := fmt.Sprintf("there was an error encoding the %s return message for agent %s into a GOB:\r\n%s", returnMessage.Type, agentID.String(), errReturnMessageBytes.Error())
logging.Server(m)
message("warn", m)
return
}
// Get JWE
key = agents.GetEncryptionKey(agentID)
jwe, errJWE := core.GetJWESymetric(returnMessageBytes.Bytes(), key)
if errJWE != nil {
logging.Server(errJWE.Error())
message("warn", errJWE.Error())
}
// Set return headers
w.Header().Set("Content-Type", "application/octet-stream")
// Encode JWE to GOB and send it to the agent
errEncode := gob.NewEncoder(w).Encode(jwe)
if errEncode != nil {
m := fmt.Sprintf("There was an error encoding the server AuthComplete GOB message:\r\n%s", errEncode.Error())
logging.Server(m)
message("warn", m)
return
}
// Remove the agent from the server after successfully sending the kill message
if returnMessage.Type == "AgentControl" {
if returnMessage.Payload.(messages.AgentControl).Command == "kill" {
err := agents.RemoveAgent(agentID)
if err != nil {
message("warn", err.Error())
return
}
message("info", fmt.Sprintf("Agent %s was removed from the server", agentID.String()))
return
}
}
} else if r.Method == "GET" {
w.WriteHeader(404)
} else {
w.WriteHeader(404)
}
if core.Debug {
message("debug", "Leaving http2.agentHandler function without error")
}
}
// getJWT returns a JSON Web Token for the provided agent using the interface JWT Key
func getJWT(agentID uuid.UUID, key []byte) (string, error) {
if core.Debug {
message("debug", "Entering into agents.GetJWT function")
}
encrypter, encErr := jose.NewEncrypter(jose.A256GCM,
jose.Recipient{
Algorithm: jose.DIRECT,
Key: key},
(&jose.EncrypterOptions{}).WithType("JWT").WithContentType("JWT"))
if encErr != nil {
return "", fmt.Errorf("there was an error creating the JWE encryptor:\r\n%s", encErr)
}
signer, errSigner := jose.NewSigner(jose.SigningKey{
Algorithm: jose.HS256,
Key: key},
(&jose.SignerOptions{}).WithType("JWT"))
if errSigner != nil {
return "", fmt.Errorf("there was an error creating the JWT signer:\r\n%s", errSigner.Error())
}
lifetime, errLifetime := agents.GetLifetime(agentID)
if errLifetime != nil && errLifetime.Error() != "agent WaitTime is equal to zero" {
return "", errLifetime
}
// This is for when the server hasn't received an AgentInfo struct and doesn't know the agent's lifetime yet or sleep is set to zero
if lifetime == 0 {
lifetime = time.Second * 30
}
// TODO Add in the rest of the JWT claim info
cl := jwt.Claims{
ID: agentID.String(),
NotBefore: jwt.NewNumericDate(time.Now()),
IssuedAt: jwt.NewNumericDate(time.Now()),
Expiry: jwt.NewNumericDate(time.Now().Add(lifetime)),
}
agentJWT, err := jwt.SignedAndEncrypted(signer, encrypter).Claims(cl).CompactSerialize()
if err != nil {
return "", fmt.Errorf("there was an error serializing the JWT:\r\n%s", err.Error())
}
// Parse it to check for errors
_, errParse := jwt.ParseEncrypted(agentJWT)
if errParse != nil {
return "", fmt.Errorf("there was an error parsing the encrypted JWT:\r\n%s", errParse.Error())
}
logging.Server(fmt.Sprintf("Created authenticated JWT for %s", agentID))
if core.Debug {
message("debug", fmt.Sprintf("Sending agent %s an authenticated JWT with a lifetime of %v:\r\n%v",
agentID.String(), lifetime, agentJWT))
}
return agentJWT, nil
}
// validateJWT validates the provided JSON Web Token
func validateJWT(agentJWT string, key []byte) (uuid.UUID, error) {
var agentID uuid.UUID
if core.Debug {
message("debug", "Entering into http2.ValidateJWT")
message("debug", fmt.Sprintf("Input JWT: %v", agentJWT))
}
claims := jwt.Claims{}
// Parse to make sure it is a valid JWT
nestedToken, err := jwt.ParseSignedAndEncrypted(agentJWT)
if err != nil {
return agentID, fmt.Errorf("there was an error parsing the JWT:\r\n%s", err.Error())
}
// Decrypt JWT
token, errToken := nestedToken.Decrypt(key)
if errToken != nil {
return agentID, fmt.Errorf("there was an error decrypting the JWT:\r\n%s", errToken.Error())
}
// Deserialize the claims and validate the signature
errClaims := token.Claims(key, &claims)
if errClaims != nil {
return agentID, fmt.Errorf("there was an deserializing the JWT claims:\r\n%s", errClaims.Error())
}
| // An error will be returned during OPAQUE registration & authentication
if errWait != nil {
if core.Debug {
message("debug", fmt.Sprintf("there was an error getting the agent's wait time:\r\n%s", errWait.Error()))
}
}
if AgentWaitTime == "" {
AgentWaitTime = "10s"
}
WaitTime, errParse := time.ParseDuration(AgentWaitTime)
if errParse != nil {
return agentID, fmt.Errorf("there was an error parsing the agent's wait time into a duration:\r\n%s", errParse.Error())
}
// Validate claims; Default Leeway is 1 minute; Set it to 1x the agent's WaitTime setting
errValidate := claims.ValidateWithLeeway(jwt.Expected{
Time: time.Now(),
}, WaitTime)
if errValidate != nil {
if core.Verbose {
message("warn", fmt.Sprintf("The JWT claims were not valid for %s", agentID))
message("note", fmt.Sprintf("JWT Claim Expiry: %s", claims.Expiry.Time()))
message("note", fmt.Sprintf("JWT Claim Issued: %s", claims.IssuedAt.Time()))
}
return agentID, errValidate
}
if core.Debug {
message("debug", fmt.Sprintf("agentID: %s", agentID.String()))
message("debug", "Leaving http2.ValidateJWT without error")
}
// TODO I need to validate other things like token age/expiry
return agentID, nil
}
// decryptJWE takes provided JWE string and decrypts it using the per-agent key
func decryptJWE(jweString string, key []byte) (messages.Base, error) {
if core.Debug {
message("debug", "Entering into http2.DecryptJWE function")
message("debug", fmt.Sprintf("Input JWE String: %s", jweString))
}
var m messages.Base
// Parse JWE string back into JSONWebEncryption
jwe, errObject := jose.ParseEncrypted(jweString)
if errObject != nil {
return m, fmt.Errorf("there was an error parseing the JWE string into a JSONWebEncryption object:\r\n%s", errObject)
}
if core.Debug {
message("debug", fmt.Sprintf("Parsed JWE:\r\n%+v", jwe))
}
// Decrypt the JWE
jweMessage, errDecrypt := jwe.Decrypt(key)
if errDecrypt != nil {
return m, fmt.Errorf("there was an error decrypting the JWE:\r\n%s", errDecrypt.Error())
}
// Decode the JWE payload into a messages.Base struct
errDecode := gob.NewDecoder(bytes.NewReader(jweMessage)).Decode(&m)
if errDecode != nil {
return m, fmt.Errorf("there was an error decoding JWE payload message sent by an agent:\r\n%s", errDecode.Error())
}
if core.Debug {
message("debug", "Leaving http2.DecryptJWE function without error")
message("debug", fmt.Sprintf("Returning message base: %+v", m))
}
return m, nil
}
// message is used to print a message to the command line
func message(level string, message string) {
switch level {
case "info":
color.Cyan("[i]" + message)
case "note":
color.Yellow("[-]" + message)
case "warn":
color.Red("[!]" + message)
case "debug":
color.Red("[DEBUG]" + message)
case "success":
color.Green("[+]" + message)
default:
color.Red("[_-_]Invalid message level: " + message)
}
}
// TODO make sure all errors are logged to server log | agentID = uuid.FromStringOrNil(claims.ID)
AgentWaitTime, errWait := agents.GetAgentFieldValue(agentID, "WaitTime") | random_line_split |
http2.go | package http2
import (
// Standard
"bytes"
"crypto/sha256"
"crypto/tls"
"crypto/x509"
"encoding/gob"
"encoding/hex"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"strconv"
"strings"
"time"
// 3rd Party
"github.com/cretz/gopaque/gopaque"
"github.com/fatih/color"
"github.com/lucas-clemente/quic-go"
"github.com/lucas-clemente/quic-go/h2quic"
"github.com/satori/go.uuid"
"go.dedis.ch/kyber"
"gopkg.in/square/go-jose.v2"
"gopkg.in/square/go-jose.v2/jwt"
// reaper
"github.com/infosechoudini/reaper/pkg/agents"
"github.com/infosechoudini/reaper/pkg/core"
"github.com/infosechoudini/reaper/pkg/logging"
"github.com/infosechoudini/reaper/pkg/messages"
"github.com/infosechoudini/reaper/pkg/util"
)
// Server is a structure for creating and instantiating new server objects
type Server struct {
ID uuid.UUID // Unique identifier for the Server object
Interface string // The network adapter interface the server will listen on
Port int // The port the server will listen on
Protocol string // The protocol (i.e. HTTP/2 or HTTP/3) the server will use
Key string // The x.509 private key used for TLS encryption
Certificate string // The x.509 public key used for TLS encryption
Server interface{} // A Golang server object (i.e http.Server or h3quic.Server)
Mux *http.ServeMux // The message handler/multiplexer
jwtKey []byte // The password used by the server to create JWTs
psk string // The pre-shared key password used prior to Password Authenticated Key Exchange (PAKE)
opaqueKey kyber.Scalar // OPAQUE server's keys
}
// New instantiates a new server object and returns it
func | (iface string, port int, protocol string, key string, certificate string, psk string) (Server, error) {
s := Server{
ID: uuid.NewV4(),
Protocol: protocol,
Interface: iface,
Port: port,
Mux: http.NewServeMux(),
jwtKey: []byte(core.RandStringBytesMaskImprSrc(32)), // Used to sign and encrypt JWT
psk: psk,
}
// OPAQUE Server Public/Private keys; Can be used with every agent
s.opaqueKey = gopaque.CryptoDefault.NewKey(nil)
var cer tls.Certificate
var err error
// Check if certificate exists on disk
_, errCrt := os.Stat(certificate)
if os.IsNotExist(errCrt) {
// generate a new ephemeral certificate
m := fmt.Sprintf("No certificate found at %s", certificate)
logging.Server(m)
message("note", m)
t := "Creating in-memory x.509 certificate used for this session only."
logging.Server(t)
message("note", t)
message("info", "Additional details: https://github.com/Ne0nd0g/reaper/wiki/TLS-Certificates")
cerp, err := util.GenerateTLSCert(nil, nil, nil, nil, nil, nil, true) //ec certs not supported (yet) :(
if err != nil {
m := fmt.Sprintf("There was an error generating the SSL/TLS certificate:\r\n%s", err.Error())
logging.Server(m)
message("warn", m)
return s, err
}
cer = *cerp
} else {
if errCrt != nil {
m := fmt.Sprintf("There was an error importing the SSL/TLS x509 certificate:\r\n%s", errCrt.Error())
logging.Server(m)
message("warn", m)
return s, errCrt
}
s.Certificate = certificate
_, errKey := os.Stat(key)
if errKey != nil {
m := fmt.Sprintf("There was an error importing the SSL/TLS x509 key:\r\n%s", errKey.Error())
logging.Server(m)
message("warn", m)
return s, errKey
}
s.Key = key
cer, err = tls.LoadX509KeyPair(certificate, key)
if err != nil {
m := fmt.Sprintf("There was an error importing the SSL/TLS x509 key pair\r\n%s", err.Error())
logging.Server(m)
message("warn", m)
message("warn", "Ensure a keypair is located in the data/x509 directory")
return s, err
}
}
if len(cer.Certificate) < 1 || cer.PrivateKey == nil {
m := "Unable to import certificate for use in reaper: empty certificate structure."
logging.Server(m)
message("warn", m)
return s, errors.New("empty certificate structure")
}
// Parse into X.509 format
x, errX509 := x509.ParseCertificate(cer.Certificate[0])
if errX509 != nil {
m := fmt.Sprintf("There was an error parsing the tls.Certificate structure into a x509.Certificate"+
" structure:\r\n%s", errX509.Error())
logging.Server(m)
message("warn", m)
return s, errX509
}
// Create fingerprint
S256 := sha256.Sum256(x.Raw)
sha256Fingerprint := hex.EncodeToString(S256[:])
// reaperCRT is the string representation of the SHA1 fingerprint for the public x.509 certificate distributed with reaper
reaperCRT := "4af9224c77821bc8a46503cfc2764b94b1fc8aa2521afc627e835f0b3c449f50"
// Check to see if the Public Key SHA1 finger print matches the certificate distributed with reaper for testing
if reaperCRT == sha256Fingerprint {
message("warn", "Insecure publicly distributed reaper x.509 testing certificate in use")
message("info", "Additional details: https://github.com/Ne0nd0g/reaper/wiki/TLS-Certificates")
}
// Log certificate information
logging.Server(fmt.Sprintf("Starting reaper Server using an X.509 certificate with a %s signature of %s",
x.SignatureAlgorithm.String(), hex.EncodeToString(x.Signature)))
logging.Server(fmt.Sprintf("Starting reaper Server using an X.509 certificate with a public key of %v", x.PublicKey))
logging.Server(fmt.Sprintf("Starting reaper Server using an X.509 certificate with a serial number of %d", x.SerialNumber))
logging.Server(fmt.Sprintf("Starting reaper Server using an X.509 certifcate with a subject of %s", x.Subject.String()))
logging.Server(fmt.Sprintf("Starting reaper Server using an X.509 certificate with a SHA256 hash, "+
"calculated by reaper, of %s", sha256Fingerprint))
// Configure TLS
TLSConfig := &tls.Config{
Certificates: []tls.Certificate{cer},
MinVersion: tls.VersionTLS12,
CurvePreferences: []tls.CurveID{tls.CurveP521, tls.CurveP384, tls.CurveP256},
PreferServerCipherSuites: true,
CipherSuites: []uint16{
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
},
//NextProtos: []string{protocol}, //Dont need to specify because server will pick
}
s.Mux.HandleFunc("/", s.agentHandler)
srv := &http.Server{
Addr: s.Interface + ":" + strconv.Itoa(s.Port),
Handler: s.Mux,
ReadTimeout: 10 * time.Second,
WriteTimeout: 10 * time.Second,
MaxHeaderBytes: 1 << 20,
TLSConfig: TLSConfig,
//TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler), 0), // <- Disables HTTP/2
}
if s.Protocol == "h2" {
s.Server = srv
} else if s.Protocol == "hq" {
s.Server = &h2quic.Server{
Server: srv,
QuicConfig: &quic.Config{
KeepAlive: false,
IdleTimeout: 168 * time.Hour,
RequestConnectionIDOmission: false,
},
}
} else {
return s, fmt.Errorf("%s is an invalid server protocol", s.Protocol)
}
return s, nil
}
// Run function starts the server on the preconfigured port for the preconfigured service
func (s *Server) Run() error {
logging.Server(fmt.Sprintf("Starting %s Listener at %s:%d", s.Protocol, s.Interface, s.Port))
time.Sleep(45 * time.Millisecond) // Sleep to allow the shell to start up
if s.psk == "reaper" {
fmt.Println()
message("warn", "Listener was started using \"reaper\" as the Pre-Shared Key (PSK) allowing anyone"+
" decrypt message traffic.")
message("note", "Consider changing the PSK by using the -psk command line flag.")
}
message("note", fmt.Sprintf("Starting %s listener on %s:%d", s.Protocol, s.Interface, s.Port))
if s.Protocol == "h2" {
server := s.Server.(*http.Server)
defer func() {
err := server.Close()
if err != nil {
m := fmt.Sprintf("There was an error starting the %s server:\r\n%s", s.Protocol, err.Error())
logging.Server(m)
message("warn", m)
return
}
}()
go logging.Server(server.ListenAndServeTLS(s.Certificate, s.Key).Error())
return nil
} else if s.Protocol == "hq" {
server := s.Server.(*h2quic.Server)
defer func() {
err := server.Close()
if err != nil {
m := fmt.Sprintf("There was an error starting the hq server:\r\n%s", err.Error())
logging.Server(m)
message("warn", m)
return
}
}()
go logging.Server(server.ListenAndServeTLS(s.Certificate, s.Key).Error())
return nil
}
return fmt.Errorf("%s is an invalid server protocol", s.Protocol)
}
// agentHandler function is responsible for all reaper agent traffic
func (s *Server) agentHandler(w http.ResponseWriter, r *http.Request) {
if core.Verbose {
message("note", fmt.Sprintf("Received %s %s connection from %s", r.Proto, r.Method, r.RemoteAddr))
logging.Server(fmt.Sprintf("Received HTTP %s connection from %s", r.Method, r.RemoteAddr))
}
if core.Debug {
message("debug", fmt.Sprintf("HTTP Connection Details:"))
message("debug", fmt.Sprintf("Host: %s", r.Host))
message("debug", fmt.Sprintf("URI: %s", r.RequestURI))
message("debug", fmt.Sprintf("Method: %s", r.Method))
message("debug", fmt.Sprintf("Protocol: %s", r.Proto))
message("debug", fmt.Sprintf("Headers: %s", r.Header))
message("debug", fmt.Sprintf("TLS Negotiated Protocol: %s", r.TLS.NegotiatedProtocol))
message("debug", fmt.Sprintf("TLS Cipher Suite: %d", r.TLS.CipherSuite))
message("debug", fmt.Sprintf("TLS Server Name: %s", r.TLS.ServerName))
message("debug", fmt.Sprintf("Content Length: %d", r.ContentLength))
logging.Server(fmt.Sprintf("[DEBUG]HTTP Connection Details:"))
logging.Server(fmt.Sprintf("[DEBUG]Host: %s", r.Host))
logging.Server(fmt.Sprintf("[DEBUG]URI: %s", r.RequestURI))
logging.Server(fmt.Sprintf("[DEBUG]Method: %s", r.Method))
logging.Server(fmt.Sprintf("[DEBUG]Protocol: %s", r.Proto))
logging.Server(fmt.Sprintf("[DEBUG]Headers: %s", r.Header))
logging.Server(fmt.Sprintf("[DEBUG]TLS Negotiated Protocol: %s", r.TLS.NegotiatedProtocol))
logging.Server(fmt.Sprintf("[DEBUG]TLS Cipher Suite: %d", r.TLS.CipherSuite))
logging.Server(fmt.Sprintf("[DEBUG]TLS Server Name: %s", r.TLS.ServerName))
logging.Server(fmt.Sprintf("[DEBUG]Content Length: %d", r.ContentLength))
}
// Check for reaper PRISM activity
if r.UserAgent() == "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.85 Safari/537.36 " {
message("warn", fmt.Sprintf("Someone from %s is attempting to fingerprint this reaper server", r.RemoteAddr))
//w.WriteHeader(404)
}
// Make sure the message has a JWT
token := r.Header.Get("Authorization")
if token == "" {
if core.Verbose {
message("warn", "incoming request did not contain an Authorization header")
}
w.WriteHeader(404)
return
}
if r.Method == http.MethodPost {
var returnMessage messages.Base
var err error
var key []byte
//Read the request message until EOF
requestBytes, errRequestBytes := ioutil.ReadAll(r.Body)
if errRequestBytes != nil {
message("warn", fmt.Sprintf("There was an error reading a POST message sent by an "+
"agent:\r\n%s", errRequestBytes))
return
}
// Decode gob to JWE string
var jweString string
errDecode := gob.NewDecoder(bytes.NewReader(requestBytes)).Decode(&jweString)
if errDecode != nil {
message("warn", fmt.Sprintf("there was an error decoding JWE payload message sent by an agent:\r\n%s", errDecode.Error()))
return
}
// Validate JWT and get claims
var agentID uuid.UUID
var errValidate error
// Set return headers
//w.Header().Set("Content-Type", "application/octet-stream")
// Validate JWT using HTTP interface JWT key; Given to authenticated agents by server
agentID, errValidate = validateJWT(strings.Split(token, " ")[1], s.jwtKey)
// If agentID was returned, then message contained a JWT encrypted with the HTTP interface key
if (errValidate != nil) && (agentID == uuid.Nil) {
if core.Verbose {
message("warn", errValidate.Error())
message("note", "trying again with interface PSK")
}
// Validate JWT using interface PSK; Used by unauthenticated agents
hashedKey := sha256.Sum256([]byte(s.psk))
key = hashedKey[:]
agentID, errValidate = validateJWT(strings.Split(token, " ")[1], key)
if errValidate != nil {
if core.Verbose {
message("warn", errValidate.Error())
}
w.WriteHeader(404)
return
}
if core.Debug {
message("info", "Unauthenticated JWT")
}
// Decrypt the HTTP payload, a JWE, using interface PSK
k, errDecryptPSK := decryptJWE(jweString, key)
// Successfully decrypted JWE with interface PSK
if errDecryptPSK == nil {
if core.Debug {
message("debug", fmt.Sprintf("[DEBUG]POST DATA: %v", k))
}
if core.Verbose {
message("note", fmt.Sprintf("Received %s message, decrypted JWE with interface PSK", k.Type))
}
messagePayloadBytes := new(bytes.Buffer)
// Allowed unauthenticated message types w/ PSK signed JWT and PSK encrypted JWT
switch k.Type {
case "AuthInit":
serverAuthInit, err := agents.OPAQUEAuthenticateInit(k)
if err != nil {
logging.Server(err.Error())
message("warn", err.Error())
w.WriteHeader(404)
return
}
logging.Server(fmt.Sprintf("Received new agent OPAQUE authentication from %s", agentID))
// Encode return message into a gob
errAuthInit := gob.NewEncoder(messagePayloadBytes).Encode(serverAuthInit)
if errAuthInit != nil {
m := fmt.Sprintf("there was an error encoding the return message into a gob:\r\n%s", errAuthInit.Error())
logging.Server(m)
message("warn", m)
w.WriteHeader(404)
return
}
case "RegInit":
serverRegInit, err := agents.OPAQUERegistrationInit(k, s.opaqueKey)
if err != nil {
logging.Server(err.Error())
message("warn", err.Error())
w.WriteHeader(404)
return
}
logging.Server(fmt.Sprintf("Received new agent OPAQUE user registration initialization from %s", agentID))
// Encode return message into a gob
errRegInit := gob.NewEncoder(messagePayloadBytes).Encode(serverRegInit)
if errRegInit != nil {
m := fmt.Sprintf("there was an error encoding the return message into a gob:\r\n%s", errRegInit.Error())
logging.Server(m)
message("warn", m)
w.WriteHeader(404)
return
}
case "RegComplete":
serverRegComplete, err := agents.OPAQUERegistrationComplete(k)
if err != nil {
logging.Server(err.Error())
message("warn", err.Error())
w.WriteHeader(404)
return
}
logging.Server(fmt.Sprintf("Received new agent OPAQUE user registration complete from %s", agentID))
// Encode return message into a gob
errRegInit := gob.NewEncoder(messagePayloadBytes).Encode(serverRegComplete)
if errRegInit != nil {
m := fmt.Sprintf("there was an error encoding the return message into a gob:\r\n%s", errRegInit.Error())
logging.Server(m)
message("warn", m)
w.WriteHeader(404)
return
}
default:
message("warn", "invalid message type")
w.WriteHeader(404)
return
}
// Get JWE
jwe, errJWE := core.GetJWESymetric(messagePayloadBytes.Bytes(), key)
if errJWE != nil {
logging.Server(errJWE.Error())
message("warn", errJWE.Error())
w.WriteHeader(404)
return
}
// Set return headers
w.Header().Set("Content-Type", "application/octet-stream")
// Encode JWE into gob
errJWEBuffer := gob.NewEncoder(w).Encode(jwe)
if errJWEBuffer != nil {
m := fmt.Errorf("there was an error writing the %s response message to the HTTP stream:\r\n%s", k.Type, errJWEBuffer.Error())
logging.Server(m.Error())
message("warn", m.Error())
w.WriteHeader(404)
return
}
return
}
if core.Verbose {
message("note", "Unauthenticated JWT w/ Authenticated JWE agent session key")
}
// Decrypt the HTTP payload, a JWE, using agent session key
j, errDecrypt := decryptJWE(jweString, agents.GetEncryptionKey(agentID))
if errDecrypt != nil {
message("warn", errDecrypt.Error())
w.WriteHeader(404)
return
}
if core.Debug {
message("debug", fmt.Sprintf("[DEBUG]POST DATA: %v", j))
}
if core.Verbose {
message("info", fmt.Sprintf("Received %s message from %s at %s", j.Type, j.ID, time.Now().UTC().Format(time.RFC3339)))
}
// Allowed authenticated message with PSK JWT and JWE encrypted with derived secret
switch j.Type {
case "AuthComplete":
returnMessage, err = agents.OPAQUEAuthenticateComplete(j)
if err != nil {
logging.Server(fmt.Sprintf("Received new agent OPAQUE authentication from %s", agentID))
}
default:
message("warn", fmt.Sprintf("Invalid Activity: %s", j.Type))
w.WriteHeader(404)
return
}
} else {
// If not using the PSK, the agent has previously authenticated
if core.Debug {
message("info", "Authenticated JWT")
}
// Decrypt JWE
key = agents.GetEncryptionKey(agentID)
j, errDecrypt := decryptJWE(jweString, key)
if errDecrypt != nil {
message("warn", errDecrypt.Error())
w.WriteHeader(404)
return
}
if core.Debug {
message("debug", fmt.Sprintf("[DEBUG]POST DATA: %v", j))
}
if core.Verbose {
message("note", "Authenticated JWT w/ Authenticated JWE agent session key")
message("info", fmt.Sprintf("Received %s message from %s at %s", j.Type, j.ID, time.Now().UTC().Format(time.RFC3339)))
}
// If both an agentID and error were returned, then the claims were likely bad and the agent needs to re-authenticate
if (errValidate != nil) && (agentID != uuid.Nil) {
message("warn", fmt.Sprintf("Agent %s connected with expired JWT. Instructing agent to re-authenticate", agentID))
j.Type = "ReAuthenticate"
}
// Authenticated and authorized message types
switch j.Type {
case "KeyExchange":
returnMessage, err = agents.KeyExchange(j)
case "StatusCheckIn":
returnMessage, err = agents.StatusCheckIn(j)
case "CmdResults":
err = agents.JobResults(j)
case "AgentInfo":
err = agents.UpdateInfo(j)
case "FileTransfer":
err = agents.FileTransfer(j)
case "ReAuthenticate":
returnMessage, err = agents.OPAQUEReAuthenticate(agentID)
default:
err = fmt.Errorf("invalid message type: %s", j.Type)
}
}
if err != nil {
m := fmt.Sprintf("There was an error during while handling a message from agent %s:\r\n%s", agentID.String(), err.Error())
logging.Server(m)
message("warn", m)
w.WriteHeader(404)
return
}
if returnMessage.Type == "" {
returnMessage.Type = "ServerOk"
returnMessage.ID = agentID
}
if core.Verbose {
message("note", fmt.Sprintf("Sending "+returnMessage.Type+" message type to agent"))
}
// Get JWT to add to message.Base for all messages except re-authenticate messages
if returnMessage.Type != "ReAuthenticate" {
jsonWebToken, errJWT := getJWT(agentID, s.jwtKey)
if errJWT != nil {
message("warn", errJWT.Error())
w.WriteHeader(404)
return
}
returnMessage.Token = jsonWebToken
}
// Encode messages.Base into a gob
returnMessageBytes := new(bytes.Buffer)
errReturnMessageBytes := gob.NewEncoder(returnMessageBytes).Encode(returnMessage)
if errReturnMessageBytes != nil {
m := fmt.Sprintf("there was an error encoding the %s return message for agent %s into a GOB:\r\n%s", returnMessage.Type, agentID.String(), errReturnMessageBytes.Error())
logging.Server(m)
message("warn", m)
return
}
// Get JWE
key = agents.GetEncryptionKey(agentID)
jwe, errJWE := core.GetJWESymetric(returnMessageBytes.Bytes(), key)
if errJWE != nil {
logging.Server(errJWE.Error())
message("warn", errJWE.Error())
}
// Set return headers
w.Header().Set("Content-Type", "application/octet-stream")
// Encode JWE to GOB and send it to the agent
errEncode := gob.NewEncoder(w).Encode(jwe)
if errEncode != nil {
m := fmt.Sprintf("There was an error encoding the server AuthComplete GOB message:\r\n%s", errEncode.Error())
logging.Server(m)
message("warn", m)
return
}
// Remove the agent from the server after successfully sending the kill message
if returnMessage.Type == "AgentControl" {
if returnMessage.Payload.(messages.AgentControl).Command == "kill" {
err := agents.RemoveAgent(agentID)
if err != nil {
message("warn", err.Error())
return
}
message("info", fmt.Sprintf("Agent %s was removed from the server", agentID.String()))
return
}
}
} else if r.Method == "GET" {
w.WriteHeader(404)
} else {
w.WriteHeader(404)
}
if core.Debug {
message("debug", "Leaving http2.agentHandler function without error")
}
}
// getJWT returns a JSON Web Token for the provided agent using the interface JWT Key
func getJWT(agentID uuid.UUID, key []byte) (string, error) {
if core.Debug {
message("debug", "Entering into agents.GetJWT function")
}
encrypter, encErr := jose.NewEncrypter(jose.A256GCM,
jose.Recipient{
Algorithm: jose.DIRECT,
Key: key},
(&jose.EncrypterOptions{}).WithType("JWT").WithContentType("JWT"))
if encErr != nil {
return "", fmt.Errorf("there was an error creating the JWE encryptor:\r\n%s", encErr)
}
signer, errSigner := jose.NewSigner(jose.SigningKey{
Algorithm: jose.HS256,
Key: key},
(&jose.SignerOptions{}).WithType("JWT"))
if errSigner != nil {
return "", fmt.Errorf("there was an error creating the JWT signer:\r\n%s", errSigner.Error())
}
lifetime, errLifetime := agents.GetLifetime(agentID)
if errLifetime != nil && errLifetime.Error() != "agent WaitTime is equal to zero" {
return "", errLifetime
}
// This is for when the server hasn't received an AgentInfo struct and doesn't know the agent's lifetime yet or sleep is set to zero
if lifetime == 0 {
lifetime = time.Second * 30
}
// TODO Add in the rest of the JWT claim info
cl := jwt.Claims{
ID: agentID.String(),
NotBefore: jwt.NewNumericDate(time.Now()),
IssuedAt: jwt.NewNumericDate(time.Now()),
Expiry: jwt.NewNumericDate(time.Now().Add(lifetime)),
}
agentJWT, err := jwt.SignedAndEncrypted(signer, encrypter).Claims(cl).CompactSerialize()
if err != nil {
return "", fmt.Errorf("there was an error serializing the JWT:\r\n%s", err.Error())
}
// Parse it to check for errors
_, errParse := jwt.ParseEncrypted(agentJWT)
if errParse != nil {
return "", fmt.Errorf("there was an error parsing the encrypted JWT:\r\n%s", errParse.Error())
}
logging.Server(fmt.Sprintf("Created authenticated JWT for %s", agentID))
if core.Debug {
message("debug", fmt.Sprintf("Sending agent %s an authenticated JWT with a lifetime of %v:\r\n%v",
agentID.String(), lifetime, agentJWT))
}
return agentJWT, nil
}
// validateJWT validates the provided JSON Web Token
func validateJWT(agentJWT string, key []byte) (uuid.UUID, error) {
var agentID uuid.UUID
if core.Debug {
message("debug", "Entering into http2.ValidateJWT")
message("debug", fmt.Sprintf("Input JWT: %v", agentJWT))
}
claims := jwt.Claims{}
// Parse to make sure it is a valid JWT
nestedToken, err := jwt.ParseSignedAndEncrypted(agentJWT)
if err != nil {
return agentID, fmt.Errorf("there was an error parsing the JWT:\r\n%s", err.Error())
}
// Decrypt JWT
token, errToken := nestedToken.Decrypt(key)
if errToken != nil {
return agentID, fmt.Errorf("there was an error decrypting the JWT:\r\n%s", errToken.Error())
}
// Deserialize the claims and validate the signature
errClaims := token.Claims(key, &claims)
if errClaims != nil {
return agentID, fmt.Errorf("there was an deserializing the JWT claims:\r\n%s", errClaims.Error())
}
agentID = uuid.FromStringOrNil(claims.ID)
AgentWaitTime, errWait := agents.GetAgentFieldValue(agentID, "WaitTime")
// An error will be returned during OPAQUE registration & authentication
if errWait != nil {
if core.Debug {
message("debug", fmt.Sprintf("there was an error getting the agent's wait time:\r\n%s", errWait.Error()))
}
}
if AgentWaitTime == "" {
AgentWaitTime = "10s"
}
WaitTime, errParse := time.ParseDuration(AgentWaitTime)
if errParse != nil {
return agentID, fmt.Errorf("there was an error parsing the agent's wait time into a duration:\r\n%s", errParse.Error())
}
// Validate claims; Default Leeway is 1 minute; Set it to 1x the agent's WaitTime setting
errValidate := claims.ValidateWithLeeway(jwt.Expected{
Time: time.Now(),
}, WaitTime)
if errValidate != nil {
if core.Verbose {
message("warn", fmt.Sprintf("The JWT claims were not valid for %s", agentID))
message("note", fmt.Sprintf("JWT Claim Expiry: %s", claims.Expiry.Time()))
message("note", fmt.Sprintf("JWT Claim Issued: %s", claims.IssuedAt.Time()))
}
return agentID, errValidate
}
if core.Debug {
message("debug", fmt.Sprintf("agentID: %s", agentID.String()))
message("debug", "Leaving http2.ValidateJWT without error")
}
// TODO I need to validate other things like token age/expiry
return agentID, nil
}
// decryptJWE takes provided JWE string and decrypts it using the per-agent key
func decryptJWE(jweString string, key []byte) (messages.Base, error) {
if core.Debug {
message("debug", "Entering into http2.DecryptJWE function")
message("debug", fmt.Sprintf("Input JWE String: %s", jweString))
}
var m messages.Base
// Parse JWE string back into JSONWebEncryption
jwe, errObject := jose.ParseEncrypted(jweString)
if errObject != nil {
return m, fmt.Errorf("there was an error parseing the JWE string into a JSONWebEncryption object:\r\n%s", errObject)
}
if core.Debug {
message("debug", fmt.Sprintf("Parsed JWE:\r\n%+v", jwe))
}
// Decrypt the JWE
jweMessage, errDecrypt := jwe.Decrypt(key)
if errDecrypt != nil {
return m, fmt.Errorf("there was an error decrypting the JWE:\r\n%s", errDecrypt.Error())
}
// Decode the JWE payload into a messages.Base struct
errDecode := gob.NewDecoder(bytes.NewReader(jweMessage)).Decode(&m)
if errDecode != nil {
return m, fmt.Errorf("there was an error decoding JWE payload message sent by an agent:\r\n%s", errDecode.Error())
}
if core.Debug {
message("debug", "Leaving http2.DecryptJWE function without error")
message("debug", fmt.Sprintf("Returning message base: %+v", m))
}
return m, nil
}
// message is used to print a message to the command line
func message(level string, message string) {
switch level {
case "info":
color.Cyan("[i]" + message)
case "note":
color.Yellow("[-]" + message)
case "warn":
color.Red("[!]" + message)
case "debug":
color.Red("[DEBUG]" + message)
case "success":
color.Green("[+]" + message)
default:
color.Red("[_-_]Invalid message level: " + message)
}
}
// TODO make sure all errors are logged to server log
| New | identifier_name |
http2.go | package http2
import (
// Standard
"bytes"
"crypto/sha256"
"crypto/tls"
"crypto/x509"
"encoding/gob"
"encoding/hex"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"strconv"
"strings"
"time"
// 3rd Party
"github.com/cretz/gopaque/gopaque"
"github.com/fatih/color"
"github.com/lucas-clemente/quic-go"
"github.com/lucas-clemente/quic-go/h2quic"
"github.com/satori/go.uuid"
"go.dedis.ch/kyber"
"gopkg.in/square/go-jose.v2"
"gopkg.in/square/go-jose.v2/jwt"
// reaper
"github.com/infosechoudini/reaper/pkg/agents"
"github.com/infosechoudini/reaper/pkg/core"
"github.com/infosechoudini/reaper/pkg/logging"
"github.com/infosechoudini/reaper/pkg/messages"
"github.com/infosechoudini/reaper/pkg/util"
)
// Server is a structure for creating and instantiating new server objects
type Server struct {
ID uuid.UUID // Unique identifier for the Server object
Interface string // The network adapter interface the server will listen on
Port int // The port the server will listen on
Protocol string // The protocol (i.e. HTTP/2 or HTTP/3) the server will use
Key string // The x.509 private key used for TLS encryption
Certificate string // The x.509 public key used for TLS encryption
Server interface{} // A Golang server object (i.e http.Server or h3quic.Server)
Mux *http.ServeMux // The message handler/multiplexer
jwtKey []byte // The password used by the server to create JWTs
psk string // The pre-shared key password used prior to Password Authenticated Key Exchange (PAKE)
opaqueKey kyber.Scalar // OPAQUE server's keys
}
// New instantiates a new server object and returns it
func New(iface string, port int, protocol string, key string, certificate string, psk string) (Server, error) {
s := Server{
ID: uuid.NewV4(),
Protocol: protocol,
Interface: iface,
Port: port,
Mux: http.NewServeMux(),
jwtKey: []byte(core.RandStringBytesMaskImprSrc(32)), // Used to sign and encrypt JWT
psk: psk,
}
// OPAQUE Server Public/Private keys; Can be used with every agent
s.opaqueKey = gopaque.CryptoDefault.NewKey(nil)
var cer tls.Certificate
var err error
// Check if certificate exists on disk
_, errCrt := os.Stat(certificate)
if os.IsNotExist(errCrt) {
// generate a new ephemeral certificate
m := fmt.Sprintf("No certificate found at %s", certificate)
logging.Server(m)
message("note", m)
t := "Creating in-memory x.509 certificate used for this session only."
logging.Server(t)
message("note", t)
message("info", "Additional details: https://github.com/Ne0nd0g/reaper/wiki/TLS-Certificates")
cerp, err := util.GenerateTLSCert(nil, nil, nil, nil, nil, nil, true) //ec certs not supported (yet) :(
if err != nil {
m := fmt.Sprintf("There was an error generating the SSL/TLS certificate:\r\n%s", err.Error())
logging.Server(m)
message("warn", m)
return s, err
}
cer = *cerp
} else {
if errCrt != nil {
m := fmt.Sprintf("There was an error importing the SSL/TLS x509 certificate:\r\n%s", errCrt.Error())
logging.Server(m)
message("warn", m)
return s, errCrt
}
s.Certificate = certificate
_, errKey := os.Stat(key)
if errKey != nil {
m := fmt.Sprintf("There was an error importing the SSL/TLS x509 key:\r\n%s", errKey.Error())
logging.Server(m)
message("warn", m)
return s, errKey
}
s.Key = key
cer, err = tls.LoadX509KeyPair(certificate, key)
if err != nil {
m := fmt.Sprintf("There was an error importing the SSL/TLS x509 key pair\r\n%s", err.Error())
logging.Server(m)
message("warn", m)
message("warn", "Ensure a keypair is located in the data/x509 directory")
return s, err
}
}
if len(cer.Certificate) < 1 || cer.PrivateKey == nil |
// Parse into X.509 format
x, errX509 := x509.ParseCertificate(cer.Certificate[0])
if errX509 != nil {
m := fmt.Sprintf("There was an error parsing the tls.Certificate structure into a x509.Certificate"+
" structure:\r\n%s", errX509.Error())
logging.Server(m)
message("warn", m)
return s, errX509
}
// Create fingerprint
S256 := sha256.Sum256(x.Raw)
sha256Fingerprint := hex.EncodeToString(S256[:])
// reaperCRT is the string representation of the SHA1 fingerprint for the public x.509 certificate distributed with reaper
reaperCRT := "4af9224c77821bc8a46503cfc2764b94b1fc8aa2521afc627e835f0b3c449f50"
// Check to see if the Public Key SHA1 finger print matches the certificate distributed with reaper for testing
if reaperCRT == sha256Fingerprint {
message("warn", "Insecure publicly distributed reaper x.509 testing certificate in use")
message("info", "Additional details: https://github.com/Ne0nd0g/reaper/wiki/TLS-Certificates")
}
// Log certificate information
logging.Server(fmt.Sprintf("Starting reaper Server using an X.509 certificate with a %s signature of %s",
x.SignatureAlgorithm.String(), hex.EncodeToString(x.Signature)))
logging.Server(fmt.Sprintf("Starting reaper Server using an X.509 certificate with a public key of %v", x.PublicKey))
logging.Server(fmt.Sprintf("Starting reaper Server using an X.509 certificate with a serial number of %d", x.SerialNumber))
logging.Server(fmt.Sprintf("Starting reaper Server using an X.509 certifcate with a subject of %s", x.Subject.String()))
logging.Server(fmt.Sprintf("Starting reaper Server using an X.509 certificate with a SHA256 hash, "+
"calculated by reaper, of %s", sha256Fingerprint))
// Configure TLS
TLSConfig := &tls.Config{
Certificates: []tls.Certificate{cer},
MinVersion: tls.VersionTLS12,
CurvePreferences: []tls.CurveID{tls.CurveP521, tls.CurveP384, tls.CurveP256},
PreferServerCipherSuites: true,
CipherSuites: []uint16{
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
},
//NextProtos: []string{protocol}, //Dont need to specify because server will pick
}
s.Mux.HandleFunc("/", s.agentHandler)
srv := &http.Server{
Addr: s.Interface + ":" + strconv.Itoa(s.Port),
Handler: s.Mux,
ReadTimeout: 10 * time.Second,
WriteTimeout: 10 * time.Second,
MaxHeaderBytes: 1 << 20,
TLSConfig: TLSConfig,
//TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler), 0), // <- Disables HTTP/2
}
if s.Protocol == "h2" {
s.Server = srv
} else if s.Protocol == "hq" {
s.Server = &h2quic.Server{
Server: srv,
QuicConfig: &quic.Config{
KeepAlive: false,
IdleTimeout: 168 * time.Hour,
RequestConnectionIDOmission: false,
},
}
} else {
return s, fmt.Errorf("%s is an invalid server protocol", s.Protocol)
}
return s, nil
}
// Run function starts the server on the preconfigured port for the preconfigured service
func (s *Server) Run() error {
logging.Server(fmt.Sprintf("Starting %s Listener at %s:%d", s.Protocol, s.Interface, s.Port))
time.Sleep(45 * time.Millisecond) // Sleep to allow the shell to start up
if s.psk == "reaper" {
fmt.Println()
message("warn", "Listener was started using \"reaper\" as the Pre-Shared Key (PSK) allowing anyone"+
" decrypt message traffic.")
message("note", "Consider changing the PSK by using the -psk command line flag.")
}
message("note", fmt.Sprintf("Starting %s listener on %s:%d", s.Protocol, s.Interface, s.Port))
if s.Protocol == "h2" {
server := s.Server.(*http.Server)
defer func() {
err := server.Close()
if err != nil {
m := fmt.Sprintf("There was an error starting the %s server:\r\n%s", s.Protocol, err.Error())
logging.Server(m)
message("warn", m)
return
}
}()
go logging.Server(server.ListenAndServeTLS(s.Certificate, s.Key).Error())
return nil
} else if s.Protocol == "hq" {
server := s.Server.(*h2quic.Server)
defer func() {
err := server.Close()
if err != nil {
m := fmt.Sprintf("There was an error starting the hq server:\r\n%s", err.Error())
logging.Server(m)
message("warn", m)
return
}
}()
go logging.Server(server.ListenAndServeTLS(s.Certificate, s.Key).Error())
return nil
}
return fmt.Errorf("%s is an invalid server protocol", s.Protocol)
}
// agentHandler function is responsible for all reaper agent traffic
func (s *Server) agentHandler(w http.ResponseWriter, r *http.Request) {
if core.Verbose {
message("note", fmt.Sprintf("Received %s %s connection from %s", r.Proto, r.Method, r.RemoteAddr))
logging.Server(fmt.Sprintf("Received HTTP %s connection from %s", r.Method, r.RemoteAddr))
}
if core.Debug {
message("debug", fmt.Sprintf("HTTP Connection Details:"))
message("debug", fmt.Sprintf("Host: %s", r.Host))
message("debug", fmt.Sprintf("URI: %s", r.RequestURI))
message("debug", fmt.Sprintf("Method: %s", r.Method))
message("debug", fmt.Sprintf("Protocol: %s", r.Proto))
message("debug", fmt.Sprintf("Headers: %s", r.Header))
message("debug", fmt.Sprintf("TLS Negotiated Protocol: %s", r.TLS.NegotiatedProtocol))
message("debug", fmt.Sprintf("TLS Cipher Suite: %d", r.TLS.CipherSuite))
message("debug", fmt.Sprintf("TLS Server Name: %s", r.TLS.ServerName))
message("debug", fmt.Sprintf("Content Length: %d", r.ContentLength))
logging.Server(fmt.Sprintf("[DEBUG]HTTP Connection Details:"))
logging.Server(fmt.Sprintf("[DEBUG]Host: %s", r.Host))
logging.Server(fmt.Sprintf("[DEBUG]URI: %s", r.RequestURI))
logging.Server(fmt.Sprintf("[DEBUG]Method: %s", r.Method))
logging.Server(fmt.Sprintf("[DEBUG]Protocol: %s", r.Proto))
logging.Server(fmt.Sprintf("[DEBUG]Headers: %s", r.Header))
logging.Server(fmt.Sprintf("[DEBUG]TLS Negotiated Protocol: %s", r.TLS.NegotiatedProtocol))
logging.Server(fmt.Sprintf("[DEBUG]TLS Cipher Suite: %d", r.TLS.CipherSuite))
logging.Server(fmt.Sprintf("[DEBUG]TLS Server Name: %s", r.TLS.ServerName))
logging.Server(fmt.Sprintf("[DEBUG]Content Length: %d", r.ContentLength))
}
// Check for reaper PRISM activity
if r.UserAgent() == "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.85 Safari/537.36 " {
message("warn", fmt.Sprintf("Someone from %s is attempting to fingerprint this reaper server", r.RemoteAddr))
//w.WriteHeader(404)
}
// Make sure the message has a JWT
token := r.Header.Get("Authorization")
if token == "" {
if core.Verbose {
message("warn", "incoming request did not contain an Authorization header")
}
w.WriteHeader(404)
return
}
if r.Method == http.MethodPost {
var returnMessage messages.Base
var err error
var key []byte
//Read the request message until EOF
requestBytes, errRequestBytes := ioutil.ReadAll(r.Body)
if errRequestBytes != nil {
message("warn", fmt.Sprintf("There was an error reading a POST message sent by an "+
"agent:\r\n%s", errRequestBytes))
return
}
// Decode gob to JWE string
var jweString string
errDecode := gob.NewDecoder(bytes.NewReader(requestBytes)).Decode(&jweString)
if errDecode != nil {
message("warn", fmt.Sprintf("there was an error decoding JWE payload message sent by an agent:\r\n%s", errDecode.Error()))
return
}
// Validate JWT and get claims
var agentID uuid.UUID
var errValidate error
// Set return headers
//w.Header().Set("Content-Type", "application/octet-stream")
// Validate JWT using HTTP interface JWT key; Given to authenticated agents by server
agentID, errValidate = validateJWT(strings.Split(token, " ")[1], s.jwtKey)
// If agentID was returned, then message contained a JWT encrypted with the HTTP interface key
if (errValidate != nil) && (agentID == uuid.Nil) {
if core.Verbose {
message("warn", errValidate.Error())
message("note", "trying again with interface PSK")
}
// Validate JWT using interface PSK; Used by unauthenticated agents
hashedKey := sha256.Sum256([]byte(s.psk))
key = hashedKey[:]
agentID, errValidate = validateJWT(strings.Split(token, " ")[1], key)
if errValidate != nil {
if core.Verbose {
message("warn", errValidate.Error())
}
w.WriteHeader(404)
return
}
if core.Debug {
message("info", "Unauthenticated JWT")
}
// Decrypt the HTTP payload, a JWE, using interface PSK
k, errDecryptPSK := decryptJWE(jweString, key)
// Successfully decrypted JWE with interface PSK
if errDecryptPSK == nil {
if core.Debug {
message("debug", fmt.Sprintf("[DEBUG]POST DATA: %v", k))
}
if core.Verbose {
message("note", fmt.Sprintf("Received %s message, decrypted JWE with interface PSK", k.Type))
}
messagePayloadBytes := new(bytes.Buffer)
// Allowed unauthenticated message types w/ PSK signed JWT and PSK encrypted JWT
switch k.Type {
case "AuthInit":
serverAuthInit, err := agents.OPAQUEAuthenticateInit(k)
if err != nil {
logging.Server(err.Error())
message("warn", err.Error())
w.WriteHeader(404)
return
}
logging.Server(fmt.Sprintf("Received new agent OPAQUE authentication from %s", agentID))
// Encode return message into a gob
errAuthInit := gob.NewEncoder(messagePayloadBytes).Encode(serverAuthInit)
if errAuthInit != nil {
m := fmt.Sprintf("there was an error encoding the return message into a gob:\r\n%s", errAuthInit.Error())
logging.Server(m)
message("warn", m)
w.WriteHeader(404)
return
}
case "RegInit":
serverRegInit, err := agents.OPAQUERegistrationInit(k, s.opaqueKey)
if err != nil {
logging.Server(err.Error())
message("warn", err.Error())
w.WriteHeader(404)
return
}
logging.Server(fmt.Sprintf("Received new agent OPAQUE user registration initialization from %s", agentID))
// Encode return message into a gob
errRegInit := gob.NewEncoder(messagePayloadBytes).Encode(serverRegInit)
if errRegInit != nil {
m := fmt.Sprintf("there was an error encoding the return message into a gob:\r\n%s", errRegInit.Error())
logging.Server(m)
message("warn", m)
w.WriteHeader(404)
return
}
case "RegComplete":
serverRegComplete, err := agents.OPAQUERegistrationComplete(k)
if err != nil {
logging.Server(err.Error())
message("warn", err.Error())
w.WriteHeader(404)
return
}
logging.Server(fmt.Sprintf("Received new agent OPAQUE user registration complete from %s", agentID))
// Encode return message into a gob
errRegInit := gob.NewEncoder(messagePayloadBytes).Encode(serverRegComplete)
if errRegInit != nil {
m := fmt.Sprintf("there was an error encoding the return message into a gob:\r\n%s", errRegInit.Error())
logging.Server(m)
message("warn", m)
w.WriteHeader(404)
return
}
default:
message("warn", "invalid message type")
w.WriteHeader(404)
return
}
// Get JWE
jwe, errJWE := core.GetJWESymetric(messagePayloadBytes.Bytes(), key)
if errJWE != nil {
logging.Server(errJWE.Error())
message("warn", errJWE.Error())
w.WriteHeader(404)
return
}
// Set return headers
w.Header().Set("Content-Type", "application/octet-stream")
// Encode JWE into gob
errJWEBuffer := gob.NewEncoder(w).Encode(jwe)
if errJWEBuffer != nil {
m := fmt.Errorf("there was an error writing the %s response message to the HTTP stream:\r\n%s", k.Type, errJWEBuffer.Error())
logging.Server(m.Error())
message("warn", m.Error())
w.WriteHeader(404)
return
}
return
}
if core.Verbose {
message("note", "Unauthenticated JWT w/ Authenticated JWE agent session key")
}
// Decrypt the HTTP payload, a JWE, using agent session key
j, errDecrypt := decryptJWE(jweString, agents.GetEncryptionKey(agentID))
if errDecrypt != nil {
message("warn", errDecrypt.Error())
w.WriteHeader(404)
return
}
if core.Debug {
message("debug", fmt.Sprintf("[DEBUG]POST DATA: %v", j))
}
if core.Verbose {
message("info", fmt.Sprintf("Received %s message from %s at %s", j.Type, j.ID, time.Now().UTC().Format(time.RFC3339)))
}
// Allowed authenticated message with PSK JWT and JWE encrypted with derived secret
switch j.Type {
case "AuthComplete":
returnMessage, err = agents.OPAQUEAuthenticateComplete(j)
if err != nil {
logging.Server(fmt.Sprintf("Received new agent OPAQUE authentication from %s", agentID))
}
default:
message("warn", fmt.Sprintf("Invalid Activity: %s", j.Type))
w.WriteHeader(404)
return
}
} else {
// If not using the PSK, the agent has previously authenticated
if core.Debug {
message("info", "Authenticated JWT")
}
// Decrypt JWE
key = agents.GetEncryptionKey(agentID)
j, errDecrypt := decryptJWE(jweString, key)
if errDecrypt != nil {
message("warn", errDecrypt.Error())
w.WriteHeader(404)
return
}
if core.Debug {
message("debug", fmt.Sprintf("[DEBUG]POST DATA: %v", j))
}
if core.Verbose {
message("note", "Authenticated JWT w/ Authenticated JWE agent session key")
message("info", fmt.Sprintf("Received %s message from %s at %s", j.Type, j.ID, time.Now().UTC().Format(time.RFC3339)))
}
// If both an agentID and error were returned, then the claims were likely bad and the agent needs to re-authenticate
if (errValidate != nil) && (agentID != uuid.Nil) {
message("warn", fmt.Sprintf("Agent %s connected with expired JWT. Instructing agent to re-authenticate", agentID))
j.Type = "ReAuthenticate"
}
// Authenticated and authorized message types
switch j.Type {
case "KeyExchange":
returnMessage, err = agents.KeyExchange(j)
case "StatusCheckIn":
returnMessage, err = agents.StatusCheckIn(j)
case "CmdResults":
err = agents.JobResults(j)
case "AgentInfo":
err = agents.UpdateInfo(j)
case "FileTransfer":
err = agents.FileTransfer(j)
case "ReAuthenticate":
returnMessage, err = agents.OPAQUEReAuthenticate(agentID)
default:
err = fmt.Errorf("invalid message type: %s", j.Type)
}
}
if err != nil {
m := fmt.Sprintf("There was an error during while handling a message from agent %s:\r\n%s", agentID.String(), err.Error())
logging.Server(m)
message("warn", m)
w.WriteHeader(404)
return
}
if returnMessage.Type == "" {
returnMessage.Type = "ServerOk"
returnMessage.ID = agentID
}
if core.Verbose {
message("note", fmt.Sprintf("Sending "+returnMessage.Type+" message type to agent"))
}
// Get JWT to add to message.Base for all messages except re-authenticate messages
if returnMessage.Type != "ReAuthenticate" {
jsonWebToken, errJWT := getJWT(agentID, s.jwtKey)
if errJWT != nil {
message("warn", errJWT.Error())
w.WriteHeader(404)
return
}
returnMessage.Token = jsonWebToken
}
// Encode messages.Base into a gob
returnMessageBytes := new(bytes.Buffer)
errReturnMessageBytes := gob.NewEncoder(returnMessageBytes).Encode(returnMessage)
if errReturnMessageBytes != nil {
m := fmt.Sprintf("there was an error encoding the %s return message for agent %s into a GOB:\r\n%s", returnMessage.Type, agentID.String(), errReturnMessageBytes.Error())
logging.Server(m)
message("warn", m)
return
}
// Get JWE
key = agents.GetEncryptionKey(agentID)
jwe, errJWE := core.GetJWESymetric(returnMessageBytes.Bytes(), key)
if errJWE != nil {
logging.Server(errJWE.Error())
message("warn", errJWE.Error())
}
// Set return headers
w.Header().Set("Content-Type", "application/octet-stream")
// Encode JWE to GOB and send it to the agent
errEncode := gob.NewEncoder(w).Encode(jwe)
if errEncode != nil {
m := fmt.Sprintf("There was an error encoding the server AuthComplete GOB message:\r\n%s", errEncode.Error())
logging.Server(m)
message("warn", m)
return
}
// Remove the agent from the server after successfully sending the kill message
if returnMessage.Type == "AgentControl" {
if returnMessage.Payload.(messages.AgentControl).Command == "kill" {
err := agents.RemoveAgent(agentID)
if err != nil {
message("warn", err.Error())
return
}
message("info", fmt.Sprintf("Agent %s was removed from the server", agentID.String()))
return
}
}
} else if r.Method == "GET" {
w.WriteHeader(404)
} else {
w.WriteHeader(404)
}
if core.Debug {
message("debug", "Leaving http2.agentHandler function without error")
}
}
// getJWT returns a JSON Web Token for the provided agent using the interface JWT Key
func getJWT(agentID uuid.UUID, key []byte) (string, error) {
if core.Debug {
message("debug", "Entering into agents.GetJWT function")
}
encrypter, encErr := jose.NewEncrypter(jose.A256GCM,
jose.Recipient{
Algorithm: jose.DIRECT,
Key: key},
(&jose.EncrypterOptions{}).WithType("JWT").WithContentType("JWT"))
if encErr != nil {
return "", fmt.Errorf("there was an error creating the JWE encryptor:\r\n%s", encErr)
}
signer, errSigner := jose.NewSigner(jose.SigningKey{
Algorithm: jose.HS256,
Key: key},
(&jose.SignerOptions{}).WithType("JWT"))
if errSigner != nil {
return "", fmt.Errorf("there was an error creating the JWT signer:\r\n%s", errSigner.Error())
}
lifetime, errLifetime := agents.GetLifetime(agentID)
if errLifetime != nil && errLifetime.Error() != "agent WaitTime is equal to zero" {
return "", errLifetime
}
// This is for when the server hasn't received an AgentInfo struct and doesn't know the agent's lifetime yet or sleep is set to zero
if lifetime == 0 {
lifetime = time.Second * 30
}
// TODO Add in the rest of the JWT claim info
cl := jwt.Claims{
ID: agentID.String(),
NotBefore: jwt.NewNumericDate(time.Now()),
IssuedAt: jwt.NewNumericDate(time.Now()),
Expiry: jwt.NewNumericDate(time.Now().Add(lifetime)),
}
agentJWT, err := jwt.SignedAndEncrypted(signer, encrypter).Claims(cl).CompactSerialize()
if err != nil {
return "", fmt.Errorf("there was an error serializing the JWT:\r\n%s", err.Error())
}
// Parse it to check for errors
_, errParse := jwt.ParseEncrypted(agentJWT)
if errParse != nil {
return "", fmt.Errorf("there was an error parsing the encrypted JWT:\r\n%s", errParse.Error())
}
logging.Server(fmt.Sprintf("Created authenticated JWT for %s", agentID))
if core.Debug {
message("debug", fmt.Sprintf("Sending agent %s an authenticated JWT with a lifetime of %v:\r\n%v",
agentID.String(), lifetime, agentJWT))
}
return agentJWT, nil
}
// validateJWT validates the provided JSON Web Token
func validateJWT(agentJWT string, key []byte) (uuid.UUID, error) {
var agentID uuid.UUID
if core.Debug {
message("debug", "Entering into http2.ValidateJWT")
message("debug", fmt.Sprintf("Input JWT: %v", agentJWT))
}
claims := jwt.Claims{}
// Parse to make sure it is a valid JWT
nestedToken, err := jwt.ParseSignedAndEncrypted(agentJWT)
if err != nil {
return agentID, fmt.Errorf("there was an error parsing the JWT:\r\n%s", err.Error())
}
// Decrypt JWT
token, errToken := nestedToken.Decrypt(key)
if errToken != nil {
return agentID, fmt.Errorf("there was an error decrypting the JWT:\r\n%s", errToken.Error())
}
// Deserialize the claims and validate the signature
errClaims := token.Claims(key, &claims)
if errClaims != nil {
return agentID, fmt.Errorf("there was an deserializing the JWT claims:\r\n%s", errClaims.Error())
}
agentID = uuid.FromStringOrNil(claims.ID)
AgentWaitTime, errWait := agents.GetAgentFieldValue(agentID, "WaitTime")
// An error will be returned during OPAQUE registration & authentication
if errWait != nil {
if core.Debug {
message("debug", fmt.Sprintf("there was an error getting the agent's wait time:\r\n%s", errWait.Error()))
}
}
if AgentWaitTime == "" {
AgentWaitTime = "10s"
}
WaitTime, errParse := time.ParseDuration(AgentWaitTime)
if errParse != nil {
return agentID, fmt.Errorf("there was an error parsing the agent's wait time into a duration:\r\n%s", errParse.Error())
}
// Validate claims; Default Leeway is 1 minute; Set it to 1x the agent's WaitTime setting
errValidate := claims.ValidateWithLeeway(jwt.Expected{
Time: time.Now(),
}, WaitTime)
if errValidate != nil {
if core.Verbose {
message("warn", fmt.Sprintf("The JWT claims were not valid for %s", agentID))
message("note", fmt.Sprintf("JWT Claim Expiry: %s", claims.Expiry.Time()))
message("note", fmt.Sprintf("JWT Claim Issued: %s", claims.IssuedAt.Time()))
}
return agentID, errValidate
}
if core.Debug {
message("debug", fmt.Sprintf("agentID: %s", agentID.String()))
message("debug", "Leaving http2.ValidateJWT without error")
}
// TODO I need to validate other things like token age/expiry
return agentID, nil
}
// decryptJWE takes provided JWE string and decrypts it using the per-agent key
func decryptJWE(jweString string, key []byte) (messages.Base, error) {
if core.Debug {
message("debug", "Entering into http2.DecryptJWE function")
message("debug", fmt.Sprintf("Input JWE String: %s", jweString))
}
var m messages.Base
// Parse JWE string back into JSONWebEncryption
jwe, errObject := jose.ParseEncrypted(jweString)
if errObject != nil {
return m, fmt.Errorf("there was an error parseing the JWE string into a JSONWebEncryption object:\r\n%s", errObject)
}
if core.Debug {
message("debug", fmt.Sprintf("Parsed JWE:\r\n%+v", jwe))
}
// Decrypt the JWE
jweMessage, errDecrypt := jwe.Decrypt(key)
if errDecrypt != nil {
return m, fmt.Errorf("there was an error decrypting the JWE:\r\n%s", errDecrypt.Error())
}
// Decode the JWE payload into a messages.Base struct
errDecode := gob.NewDecoder(bytes.NewReader(jweMessage)).Decode(&m)
if errDecode != nil {
return m, fmt.Errorf("there was an error decoding JWE payload message sent by an agent:\r\n%s", errDecode.Error())
}
if core.Debug {
message("debug", "Leaving http2.DecryptJWE function without error")
message("debug", fmt.Sprintf("Returning message base: %+v", m))
}
return m, nil
}
// message is used to print a message to the command line
func message(level string, message string) {
switch level {
case "info":
color.Cyan("[i]" + message)
case "note":
color.Yellow("[-]" + message)
case "warn":
color.Red("[!]" + message)
case "debug":
color.Red("[DEBUG]" + message)
case "success":
color.Green("[+]" + message)
default:
color.Red("[_-_]Invalid message level: " + message)
}
}
// TODO make sure all errors are logged to server log
| {
m := "Unable to import certificate for use in reaper: empty certificate structure."
logging.Server(m)
message("warn", m)
return s, errors.New("empty certificate structure")
} | conditional_block |
cache.go | package view
import (
"context"
"encoding/json"
"fmt"
"github.com/viant/afs/option"
"github.com/viant/afs/url"
"github.com/viant/datly/converter"
"github.com/viant/datly/shared"
"github.com/viant/sqlx/io/read/cache"
"github.com/viant/sqlx/io/read/cache/aerospike"
"github.com/viant/sqlx/io/read/cache/afs"
rdata "github.com/viant/toolbox/data"
"strconv"
"strings"
"sync"
"time"
)
type (
Cache struct {
shared.Reference
owner *View
Name string `json:",omitempty" yaml:",omitempty"`
Location string
Provider string
TimeToLiveMs int
PartSize int `json:",omitempty"`
AerospikeConfig
Warmup *Warmup `json:",omitempty" yaml:",omitempty"`
newCache func() (cache.Cache, error)
_initialized bool
mux sync.Mutex
}
Caches []*Cache
AerospikeConfig struct {
SleepBetweenRetriesInMs int `json:",omitempty"`
MaxRetries int `json:",omitempty"`
TotalTimeoutInMs int `json:",omitempty"`
SocketTimeoutInMs int `json:",omitempty"`
FailedRequestLimit int `json:",omitempty"`
ResetFailuresInMs int `json:",omitempty"`
}
Warmup struct {
IndexColumn string
IndexMeta bool `json:",omitempty"`
Connector *Connector `json:",omitempty"`
Cases []*CacheParameters
}
CacheParameters struct {
Set []*ParamValue
}
ParamValue struct {
Name string
Values []interface{}
_param *Parameter
}
CacheInput struct {
Selector *Selector
Column string
MetaColumn string
IndexMeta bool
}
CacheInputFn func() ([]*CacheInput, error)
)
const (
defaultType = ""
afsType = "afs"
aerospikeType = "aerospike"
)
func (c Caches) | (name string) bool {
for _, candidate := range c {
if candidate.Name == name {
return true
}
}
return false
}
func (r *Caches) Append(cache *Cache) {
if r.Has(cache.Name) {
return
}
*r = append(*r, cache)
}
func (c *Cache) init(ctx context.Context, resource *Resource, aView *View) error {
if c._initialized {
return nil
}
c._initialized = true
c.owner = aView
var viewName string
if aView != nil {
viewName = aView.Name
}
if err := c.inheritIfNeeded(ctx, resource, aView); err != nil {
return err
}
if c.Location == "" {
return fmt.Errorf("View %v cache Location can't be empty", viewName)
}
if c.TimeToLiveMs == 0 {
return fmt.Errorf("View %v cache TimeToLiveMs can't be empty", viewName)
}
if err := c.ensureCacheClient(aView, viewName); err != nil {
return err
}
if err := c.initWarmup(ctx, resource); err != nil {
return err
}
return nil
}
func (c *Cache) ensureCacheClient(aView *View, viewName string) error {
if c.newCache != nil {
return nil
}
if aView == nil {
return nil
}
var err error
c.newCache, err = c.cacheService(viewName, aView)
if err != nil {
return err
}
return nil
}
func (c *Cache) cacheService(name string, aView *View) (func() (cache.Cache, error), error) {
scheme := url.Scheme(c.Provider, "")
switch scheme {
case aerospikeType:
return c.aerospikeCache(aView)
default:
if aView.Name == "" {
return nil, nil
}
expandedLoc, err := c.expandLocation(aView)
if err != nil {
return nil, err
}
afsCache, err := afs.NewCache(expandedLoc, time.Duration(c.TimeToLiveMs)*time.Millisecond, aView.Name, option.NewStream(c.PartSize, 0))
if err != nil {
return nil, err
}
return func() (cache.Cache, error) {
return afsCache, nil
}, nil
}
}
func (c *Cache) aerospikeCache(aView *View) (func() (cache.Cache, error), error) {
if c.Location == "" {
return nil, fmt.Errorf("aerospike cache SetName cannot be empty")
}
host, port, namespace, err := c.split(c.Provider)
if err != nil {
return nil, err
}
clientProvider := aClientPool.Client(host, port)
expanded, err := c.expandLocation(aView)
if err != nil {
return nil, err
}
timeoutConfig := &aerospike.TimeoutConfig{
MaxRetries: c.AerospikeConfig.MaxRetries,
TotalTimeoutMs: c.AerospikeConfig.TotalTimeoutInMs,
SleepBetweenRetriesMs: c.SleepBetweenRetriesInMs,
}
var resetTimout *time.Duration
if c.AerospikeConfig.ResetFailuresInMs != 0 {
resetDuration := time.Duration(c.AerospikeConfig.ResetFailuresInMs)
resetTimout = &resetDuration
}
failureHandler := aerospike.NewFailureHandler(int64(c.AerospikeConfig.FailedRequestLimit), resetTimout)
return func() (cache.Cache, error) {
client, err := clientProvider()
if err != nil {
return nil, err
}
return aerospike.New(namespace, expanded, client, uint32(c.TimeToLiveMs/1000), timeoutConfig, failureHandler)
}, nil
}
func (c *Cache) expandLocation(aView *View) (string, error) {
viewParam := AsViewParam(aView, nil, nil)
asBytes, err := json.Marshal(viewParam)
if err != nil {
return "", err
}
locationMap := &rdata.Map{}
viewMap := map[string]interface{}{}
if err = json.Unmarshal(asBytes, &viewMap); err != nil {
return "", err
}
locationMap.Put("View", viewMap)
expanded := locationMap.ExpandAsText(c.Location)
return expanded, nil
}
func (c *Cache) Service() (cache.Cache, error) {
return c.newCache()
}
func (c *Cache) split(location string) (host string, port int, namespace string, err error) {
actualScheme := url.Scheme(location, "")
hostPart, namespace := url.Split(location, actualScheme)
if namespace == "" {
return "", 0, "", c.unsupportedLocationFormat(location)
}
hostStart := 0
if actualScheme != "" {
hostStart = len(actualScheme) + 3
}
segments := strings.Split(hostPart[hostStart:len(hostPart)-1], ":")
if len(segments) != 2 {
return "", 0, "", c.unsupportedLocationFormat(location)
}
port, err = strconv.Atoi(segments[1])
if err != nil {
return "", 0, "", err
}
return segments[0], port, namespace, nil
}
func (c *Cache) unsupportedLocationFormat(location string) error {
return fmt.Errorf("unsupported location format: %v, supported location format: [protocol][hostname]:[port]/[namespace]", location)
}
func (c *Cache) inheritIfNeeded(ctx context.Context, resource *Resource, aView *View) error {
if c.Ref == "" {
return nil
}
source, ok := resource.CacheProvider(c.Ref)
if !ok {
return fmt.Errorf("not found cache provider with %v name", c.Ref)
}
if c.Warmup == nil && source.Warmup != nil {
warmupMarshal, err := json.Marshal(source.Warmup)
if err != nil {
return err
}
if err = json.Unmarshal(warmupMarshal, c.Warmup); err != nil {
return err
}
}
if err := source.init(ctx, resource, nil); err != nil {
return err
}
return c.inherit(source)
}
func (c *Cache) inherit(source *Cache) error {
if c.Provider == "" {
c.Provider = source.Provider
}
if c.PartSize == 0 {
c.PartSize = source.PartSize
}
if c.Location == "" {
c.Location = source.Location
}
if c.TimeToLiveMs == 0 {
c.TimeToLiveMs = source.TimeToLiveMs
}
return nil
}
func (c *Cache) GenerateCacheInput(ctx context.Context) ([]*CacheInput, error) {
var cacheInputPermutations []*CacheInput
chanSize := len(c.Warmup.Cases)
selectorChan := make(chan CacheInputFn, chanSize)
if chanSize == 0 {
close(selectorChan)
return []*CacheInput{
c.NewInput(NewSelector()),
}, nil
}
for i := range c.Warmup.Cases {
go c.generateDatasetSelectorsChan(ctx, selectorChan, c.Warmup.Cases[i])
}
counter := 0
for selectorFn := range selectorChan {
selectors, err := selectorFn()
if err != nil {
return nil, err
}
cacheInputPermutations = append(cacheInputPermutations, selectors...)
counter++
if counter == chanSize {
close(selectorChan)
}
}
return cacheInputPermutations, nil
}
func (c *Cache) generateDatasetSelectorsChan(ctx context.Context, selectorChan chan CacheInputFn, dataSet *CacheParameters) {
selectors, err := c.generateDatasetSelectorsErr(ctx, dataSet)
selectorChan <- func() ([]*CacheInput, error) {
return selectors, err
}
}
func (c *Cache) generateDatasetSelectorsErr(ctx context.Context, set *CacheParameters) ([]*CacheInput, error) {
var availableValues [][]interface{}
for i := range set.Set {
paramValues, err := c.getParamValues(ctx, set.Set[i])
if err != nil {
return nil, err
}
availableValues = append(availableValues, paramValues)
}
var result []*CacheInput
if err := c.appendSelectors(set, availableValues, &result); err != nil {
return nil, err
}
return result, nil
}
func (c *Cache) getParamValues(ctx context.Context, paramValue *ParamValue) ([]interface{}, error) {
result := make([]interface{}, len(paramValue.Values), len(paramValue.Values)+1)
for i, value := range paramValue.Values {
marshal := fmt.Sprintf("%v", value)
converted, _, err := converter.Convert(marshal, paramValue._param.Schema.Type(), false, paramValue._param.DateFormat)
if err != nil {
return nil, err
}
result[i] = converted
}
if !paramValue._param.IsRequired() {
result = append(result, nil)
}
return result, nil
}
func (c *Cache) initWarmup(ctx context.Context, resource *Resource) error {
if c.owner == nil || c.Warmup == nil {
return nil
}
c.addNonRequiredWarmupIfNeeded()
_, ok := c.owner.ColumnByName(c.Warmup.IndexColumn)
if !ok && c.Warmup.IndexColumn != "" {
return fmt.Errorf("not found warmup column %v at View %v", c.Warmup, c.owner.Name)
}
for _, dataset := range c.Warmup.Cases {
for _, paramValue := range dataset.Set {
if err := c.ensureParam(paramValue); err != nil {
return err
}
}
}
if c.Warmup.Connector != nil {
if err := c.Warmup.Connector.Init(ctx, resource.GetConnectors()); err != nil {
return err
}
}
return nil
}
func (c *Cache) ensureParam(paramValue *ParamValue) error {
if paramValue._param != nil {
return nil
}
param, err := c.owner.Template._parametersIndex.Lookup(paramValue.Name)
if err != nil {
return err
}
paramValue._param = param
return nil
}
func (c *Cache) addNonRequiredWarmupIfNeeded() {
if len(c.Warmup.Cases) != 0 {
return
}
var values []*ParamValue
for i, parameter := range c.owner.Template.Parameters {
if parameter.IsRequired() {
return
}
values = append(values, &ParamValue{Name: parameter.Name, _param: c.owner.Template.Parameters[i]})
}
if len(values) == 0 {
return
}
c.Warmup.Cases = append(c.Warmup.Cases, &CacheParameters{
Set: values,
})
}
func (c *Cache) appendSelectors(set *CacheParameters, paramValues [][]interface{}, selectors *[]*CacheInput) error {
for i, value := range paramValues {
if len(value) == 0 {
return fmt.Errorf("parameter %v is required but there was no data", set.Set[i].Name)
}
}
indexes := make([]int, len(paramValues))
if len(indexes) == 0 {
return nil
}
outer:
for {
selector := &Selector{}
selector.Parameters.Init(c.owner)
for i, possibleValues := range paramValues {
actualValue := possibleValues[indexes[i]]
if actualValue == nil {
continue
}
if err := set.Set[i]._param.Set(selector, actualValue); err != nil {
return err
}
}
*selectors = append(*selectors, c.NewInput(selector))
for i := len(indexes) - 1; i >= 0; i-- {
if indexes[i] < len(paramValues[i])-1 {
indexes[i]++
break
} else {
if i == 0 {
break outer
}
indexes[i] = 0
}
}
}
return nil
}
func (c *Cache) NewInput(selector *Selector) *CacheInput {
return &CacheInput{
Selector: selector,
Column: c.Warmup.IndexColumn,
MetaColumn: c.Warmup.IndexColumn,
IndexMeta: (c.Warmup.IndexMeta || c.Warmup.IndexColumn != "") && c.owner.Template.Meta != nil,
}
}
func (c Caches) Unique() []*Cache {
if len(c) == 0 {
return []*Cache{}
}
var result []*Cache
var index = make(map[string]bool, len(c))
for i, item := range c {
if index[item.Name] {
continue
}
result = append(result, c[i])
index[item.Name] = true
}
return result
}
| Has | identifier_name |
cache.go | package view
import (
"context"
"encoding/json"
"fmt"
"github.com/viant/afs/option"
"github.com/viant/afs/url"
"github.com/viant/datly/converter"
"github.com/viant/datly/shared"
"github.com/viant/sqlx/io/read/cache"
"github.com/viant/sqlx/io/read/cache/aerospike"
"github.com/viant/sqlx/io/read/cache/afs"
rdata "github.com/viant/toolbox/data"
"strconv"
"strings"
"sync"
"time"
)
type (
Cache struct {
shared.Reference
owner *View
Name string `json:",omitempty" yaml:",omitempty"`
Location string
Provider string
TimeToLiveMs int
PartSize int `json:",omitempty"`
AerospikeConfig
Warmup *Warmup `json:",omitempty" yaml:",omitempty"`
newCache func() (cache.Cache, error)
_initialized bool
mux sync.Mutex
}
Caches []*Cache
AerospikeConfig struct {
SleepBetweenRetriesInMs int `json:",omitempty"`
MaxRetries int `json:",omitempty"`
TotalTimeoutInMs int `json:",omitempty"`
SocketTimeoutInMs int `json:",omitempty"`
FailedRequestLimit int `json:",omitempty"`
ResetFailuresInMs int `json:",omitempty"`
}
Warmup struct {
IndexColumn string
IndexMeta bool `json:",omitempty"`
Connector *Connector `json:",omitempty"`
Cases []*CacheParameters
}
CacheParameters struct {
Set []*ParamValue
}
ParamValue struct {
Name string
Values []interface{}
_param *Parameter
}
CacheInput struct {
Selector *Selector
Column string
MetaColumn string
IndexMeta bool
}
CacheInputFn func() ([]*CacheInput, error)
)
const (
defaultType = ""
afsType = "afs"
aerospikeType = "aerospike"
)
func (c Caches) Has(name string) bool {
for _, candidate := range c {
if candidate.Name == name {
return true
}
}
return false
}
func (r *Caches) Append(cache *Cache) {
if r.Has(cache.Name) {
return
}
*r = append(*r, cache)
}
func (c *Cache) init(ctx context.Context, resource *Resource, aView *View) error {
if c._initialized {
return nil
}
c._initialized = true
c.owner = aView
var viewName string
if aView != nil {
viewName = aView.Name
}
if err := c.inheritIfNeeded(ctx, resource, aView); err != nil {
return err
}
if c.Location == "" {
return fmt.Errorf("View %v cache Location can't be empty", viewName)
}
if c.TimeToLiveMs == 0 {
return fmt.Errorf("View %v cache TimeToLiveMs can't be empty", viewName)
}
if err := c.ensureCacheClient(aView, viewName); err != nil {
return err
}
if err := c.initWarmup(ctx, resource); err != nil {
return err
}
return nil
}
func (c *Cache) ensureCacheClient(aView *View, viewName string) error {
if c.newCache != nil {
return nil
}
if aView == nil {
return nil
}
var err error
c.newCache, err = c.cacheService(viewName, aView)
if err != nil {
return err
}
return nil
}
func (c *Cache) cacheService(name string, aView *View) (func() (cache.Cache, error), error) {
scheme := url.Scheme(c.Provider, "")
switch scheme {
case aerospikeType:
return c.aerospikeCache(aView)
default:
if aView.Name == "" {
return nil, nil
}
expandedLoc, err := c.expandLocation(aView)
if err != nil {
return nil, err
}
afsCache, err := afs.NewCache(expandedLoc, time.Duration(c.TimeToLiveMs)*time.Millisecond, aView.Name, option.NewStream(c.PartSize, 0))
if err != nil {
return nil, err
}
return func() (cache.Cache, error) {
return afsCache, nil
}, nil
}
}
func (c *Cache) aerospikeCache(aView *View) (func() (cache.Cache, error), error) {
if c.Location == "" {
return nil, fmt.Errorf("aerospike cache SetName cannot be empty")
}
host, port, namespace, err := c.split(c.Provider)
if err != nil {
return nil, err
}
clientProvider := aClientPool.Client(host, port)
expanded, err := c.expandLocation(aView)
if err != nil {
return nil, err
}
timeoutConfig := &aerospike.TimeoutConfig{
MaxRetries: c.AerospikeConfig.MaxRetries,
TotalTimeoutMs: c.AerospikeConfig.TotalTimeoutInMs,
SleepBetweenRetriesMs: c.SleepBetweenRetriesInMs,
}
var resetTimout *time.Duration
if c.AerospikeConfig.ResetFailuresInMs != 0 {
resetDuration := time.Duration(c.AerospikeConfig.ResetFailuresInMs)
resetTimout = &resetDuration
}
failureHandler := aerospike.NewFailureHandler(int64(c.AerospikeConfig.FailedRequestLimit), resetTimout)
return func() (cache.Cache, error) {
client, err := clientProvider()
if err != nil {
return nil, err
}
return aerospike.New(namespace, expanded, client, uint32(c.TimeToLiveMs/1000), timeoutConfig, failureHandler)
}, nil
}
func (c *Cache) expandLocation(aView *View) (string, error) {
viewParam := AsViewParam(aView, nil, nil)
asBytes, err := json.Marshal(viewParam)
if err != nil {
return "", err
}
locationMap := &rdata.Map{}
viewMap := map[string]interface{}{}
if err = json.Unmarshal(asBytes, &viewMap); err != nil {
return "", err
}
locationMap.Put("View", viewMap)
expanded := locationMap.ExpandAsText(c.Location)
return expanded, nil
}
func (c *Cache) Service() (cache.Cache, error) {
return c.newCache()
}
func (c *Cache) split(location string) (host string, port int, namespace string, err error) |
func (c *Cache) unsupportedLocationFormat(location string) error {
return fmt.Errorf("unsupported location format: %v, supported location format: [protocol][hostname]:[port]/[namespace]", location)
}
func (c *Cache) inheritIfNeeded(ctx context.Context, resource *Resource, aView *View) error {
if c.Ref == "" {
return nil
}
source, ok := resource.CacheProvider(c.Ref)
if !ok {
return fmt.Errorf("not found cache provider with %v name", c.Ref)
}
if c.Warmup == nil && source.Warmup != nil {
warmupMarshal, err := json.Marshal(source.Warmup)
if err != nil {
return err
}
if err = json.Unmarshal(warmupMarshal, c.Warmup); err != nil {
return err
}
}
if err := source.init(ctx, resource, nil); err != nil {
return err
}
return c.inherit(source)
}
func (c *Cache) inherit(source *Cache) error {
if c.Provider == "" {
c.Provider = source.Provider
}
if c.PartSize == 0 {
c.PartSize = source.PartSize
}
if c.Location == "" {
c.Location = source.Location
}
if c.TimeToLiveMs == 0 {
c.TimeToLiveMs = source.TimeToLiveMs
}
return nil
}
func (c *Cache) GenerateCacheInput(ctx context.Context) ([]*CacheInput, error) {
var cacheInputPermutations []*CacheInput
chanSize := len(c.Warmup.Cases)
selectorChan := make(chan CacheInputFn, chanSize)
if chanSize == 0 {
close(selectorChan)
return []*CacheInput{
c.NewInput(NewSelector()),
}, nil
}
for i := range c.Warmup.Cases {
go c.generateDatasetSelectorsChan(ctx, selectorChan, c.Warmup.Cases[i])
}
counter := 0
for selectorFn := range selectorChan {
selectors, err := selectorFn()
if err != nil {
return nil, err
}
cacheInputPermutations = append(cacheInputPermutations, selectors...)
counter++
if counter == chanSize {
close(selectorChan)
}
}
return cacheInputPermutations, nil
}
func (c *Cache) generateDatasetSelectorsChan(ctx context.Context, selectorChan chan CacheInputFn, dataSet *CacheParameters) {
selectors, err := c.generateDatasetSelectorsErr(ctx, dataSet)
selectorChan <- func() ([]*CacheInput, error) {
return selectors, err
}
}
func (c *Cache) generateDatasetSelectorsErr(ctx context.Context, set *CacheParameters) ([]*CacheInput, error) {
var availableValues [][]interface{}
for i := range set.Set {
paramValues, err := c.getParamValues(ctx, set.Set[i])
if err != nil {
return nil, err
}
availableValues = append(availableValues, paramValues)
}
var result []*CacheInput
if err := c.appendSelectors(set, availableValues, &result); err != nil {
return nil, err
}
return result, nil
}
func (c *Cache) getParamValues(ctx context.Context, paramValue *ParamValue) ([]interface{}, error) {
result := make([]interface{}, len(paramValue.Values), len(paramValue.Values)+1)
for i, value := range paramValue.Values {
marshal := fmt.Sprintf("%v", value)
converted, _, err := converter.Convert(marshal, paramValue._param.Schema.Type(), false, paramValue._param.DateFormat)
if err != nil {
return nil, err
}
result[i] = converted
}
if !paramValue._param.IsRequired() {
result = append(result, nil)
}
return result, nil
}
func (c *Cache) initWarmup(ctx context.Context, resource *Resource) error {
if c.owner == nil || c.Warmup == nil {
return nil
}
c.addNonRequiredWarmupIfNeeded()
_, ok := c.owner.ColumnByName(c.Warmup.IndexColumn)
if !ok && c.Warmup.IndexColumn != "" {
return fmt.Errorf("not found warmup column %v at View %v", c.Warmup, c.owner.Name)
}
for _, dataset := range c.Warmup.Cases {
for _, paramValue := range dataset.Set {
if err := c.ensureParam(paramValue); err != nil {
return err
}
}
}
if c.Warmup.Connector != nil {
if err := c.Warmup.Connector.Init(ctx, resource.GetConnectors()); err != nil {
return err
}
}
return nil
}
func (c *Cache) ensureParam(paramValue *ParamValue) error {
if paramValue._param != nil {
return nil
}
param, err := c.owner.Template._parametersIndex.Lookup(paramValue.Name)
if err != nil {
return err
}
paramValue._param = param
return nil
}
func (c *Cache) addNonRequiredWarmupIfNeeded() {
if len(c.Warmup.Cases) != 0 {
return
}
var values []*ParamValue
for i, parameter := range c.owner.Template.Parameters {
if parameter.IsRequired() {
return
}
values = append(values, &ParamValue{Name: parameter.Name, _param: c.owner.Template.Parameters[i]})
}
if len(values) == 0 {
return
}
c.Warmup.Cases = append(c.Warmup.Cases, &CacheParameters{
Set: values,
})
}
func (c *Cache) appendSelectors(set *CacheParameters, paramValues [][]interface{}, selectors *[]*CacheInput) error {
for i, value := range paramValues {
if len(value) == 0 {
return fmt.Errorf("parameter %v is required but there was no data", set.Set[i].Name)
}
}
indexes := make([]int, len(paramValues))
if len(indexes) == 0 {
return nil
}
outer:
for {
selector := &Selector{}
selector.Parameters.Init(c.owner)
for i, possibleValues := range paramValues {
actualValue := possibleValues[indexes[i]]
if actualValue == nil {
continue
}
if err := set.Set[i]._param.Set(selector, actualValue); err != nil {
return err
}
}
*selectors = append(*selectors, c.NewInput(selector))
for i := len(indexes) - 1; i >= 0; i-- {
if indexes[i] < len(paramValues[i])-1 {
indexes[i]++
break
} else {
if i == 0 {
break outer
}
indexes[i] = 0
}
}
}
return nil
}
func (c *Cache) NewInput(selector *Selector) *CacheInput {
return &CacheInput{
Selector: selector,
Column: c.Warmup.IndexColumn,
MetaColumn: c.Warmup.IndexColumn,
IndexMeta: (c.Warmup.IndexMeta || c.Warmup.IndexColumn != "") && c.owner.Template.Meta != nil,
}
}
func (c Caches) Unique() []*Cache {
if len(c) == 0 {
return []*Cache{}
}
var result []*Cache
var index = make(map[string]bool, len(c))
for i, item := range c {
if index[item.Name] {
continue
}
result = append(result, c[i])
index[item.Name] = true
}
return result
}
| {
actualScheme := url.Scheme(location, "")
hostPart, namespace := url.Split(location, actualScheme)
if namespace == "" {
return "", 0, "", c.unsupportedLocationFormat(location)
}
hostStart := 0
if actualScheme != "" {
hostStart = len(actualScheme) + 3
}
segments := strings.Split(hostPart[hostStart:len(hostPart)-1], ":")
if len(segments) != 2 {
return "", 0, "", c.unsupportedLocationFormat(location)
}
port, err = strconv.Atoi(segments[1])
if err != nil {
return "", 0, "", err
}
return segments[0], port, namespace, nil
} | identifier_body |
cache.go | package view
import (
"context"
"encoding/json"
"fmt"
"github.com/viant/afs/option"
"github.com/viant/afs/url"
"github.com/viant/datly/converter"
"github.com/viant/datly/shared"
"github.com/viant/sqlx/io/read/cache"
"github.com/viant/sqlx/io/read/cache/aerospike"
"github.com/viant/sqlx/io/read/cache/afs"
rdata "github.com/viant/toolbox/data"
"strconv"
"strings"
"sync"
"time"
)
type (
Cache struct {
shared.Reference
owner *View
Name string `json:",omitempty" yaml:",omitempty"`
Location string
Provider string
TimeToLiveMs int
PartSize int `json:",omitempty"`
AerospikeConfig
Warmup *Warmup `json:",omitempty" yaml:",omitempty"`
newCache func() (cache.Cache, error)
_initialized bool
mux sync.Mutex
}
Caches []*Cache
AerospikeConfig struct {
SleepBetweenRetriesInMs int `json:",omitempty"`
MaxRetries int `json:",omitempty"`
TotalTimeoutInMs int `json:",omitempty"`
SocketTimeoutInMs int `json:",omitempty"`
FailedRequestLimit int `json:",omitempty"`
ResetFailuresInMs int `json:",omitempty"`
}
Warmup struct {
IndexColumn string
IndexMeta bool `json:",omitempty"`
Connector *Connector `json:",omitempty"`
Cases []*CacheParameters
}
CacheParameters struct {
Set []*ParamValue | ParamValue struct {
Name string
Values []interface{}
_param *Parameter
}
CacheInput struct {
Selector *Selector
Column string
MetaColumn string
IndexMeta bool
}
CacheInputFn func() ([]*CacheInput, error)
)
const (
defaultType = ""
afsType = "afs"
aerospikeType = "aerospike"
)
func (c Caches) Has(name string) bool {
for _, candidate := range c {
if candidate.Name == name {
return true
}
}
return false
}
func (r *Caches) Append(cache *Cache) {
if r.Has(cache.Name) {
return
}
*r = append(*r, cache)
}
func (c *Cache) init(ctx context.Context, resource *Resource, aView *View) error {
if c._initialized {
return nil
}
c._initialized = true
c.owner = aView
var viewName string
if aView != nil {
viewName = aView.Name
}
if err := c.inheritIfNeeded(ctx, resource, aView); err != nil {
return err
}
if c.Location == "" {
return fmt.Errorf("View %v cache Location can't be empty", viewName)
}
if c.TimeToLiveMs == 0 {
return fmt.Errorf("View %v cache TimeToLiveMs can't be empty", viewName)
}
if err := c.ensureCacheClient(aView, viewName); err != nil {
return err
}
if err := c.initWarmup(ctx, resource); err != nil {
return err
}
return nil
}
func (c *Cache) ensureCacheClient(aView *View, viewName string) error {
if c.newCache != nil {
return nil
}
if aView == nil {
return nil
}
var err error
c.newCache, err = c.cacheService(viewName, aView)
if err != nil {
return err
}
return nil
}
func (c *Cache) cacheService(name string, aView *View) (func() (cache.Cache, error), error) {
scheme := url.Scheme(c.Provider, "")
switch scheme {
case aerospikeType:
return c.aerospikeCache(aView)
default:
if aView.Name == "" {
return nil, nil
}
expandedLoc, err := c.expandLocation(aView)
if err != nil {
return nil, err
}
afsCache, err := afs.NewCache(expandedLoc, time.Duration(c.TimeToLiveMs)*time.Millisecond, aView.Name, option.NewStream(c.PartSize, 0))
if err != nil {
return nil, err
}
return func() (cache.Cache, error) {
return afsCache, nil
}, nil
}
}
func (c *Cache) aerospikeCache(aView *View) (func() (cache.Cache, error), error) {
if c.Location == "" {
return nil, fmt.Errorf("aerospike cache SetName cannot be empty")
}
host, port, namespace, err := c.split(c.Provider)
if err != nil {
return nil, err
}
clientProvider := aClientPool.Client(host, port)
expanded, err := c.expandLocation(aView)
if err != nil {
return nil, err
}
timeoutConfig := &aerospike.TimeoutConfig{
MaxRetries: c.AerospikeConfig.MaxRetries,
TotalTimeoutMs: c.AerospikeConfig.TotalTimeoutInMs,
SleepBetweenRetriesMs: c.SleepBetweenRetriesInMs,
}
var resetTimout *time.Duration
if c.AerospikeConfig.ResetFailuresInMs != 0 {
resetDuration := time.Duration(c.AerospikeConfig.ResetFailuresInMs)
resetTimout = &resetDuration
}
failureHandler := aerospike.NewFailureHandler(int64(c.AerospikeConfig.FailedRequestLimit), resetTimout)
return func() (cache.Cache, error) {
client, err := clientProvider()
if err != nil {
return nil, err
}
return aerospike.New(namespace, expanded, client, uint32(c.TimeToLiveMs/1000), timeoutConfig, failureHandler)
}, nil
}
func (c *Cache) expandLocation(aView *View) (string, error) {
viewParam := AsViewParam(aView, nil, nil)
asBytes, err := json.Marshal(viewParam)
if err != nil {
return "", err
}
locationMap := &rdata.Map{}
viewMap := map[string]interface{}{}
if err = json.Unmarshal(asBytes, &viewMap); err != nil {
return "", err
}
locationMap.Put("View", viewMap)
expanded := locationMap.ExpandAsText(c.Location)
return expanded, nil
}
func (c *Cache) Service() (cache.Cache, error) {
return c.newCache()
}
func (c *Cache) split(location string) (host string, port int, namespace string, err error) {
actualScheme := url.Scheme(location, "")
hostPart, namespace := url.Split(location, actualScheme)
if namespace == "" {
return "", 0, "", c.unsupportedLocationFormat(location)
}
hostStart := 0
if actualScheme != "" {
hostStart = len(actualScheme) + 3
}
segments := strings.Split(hostPart[hostStart:len(hostPart)-1], ":")
if len(segments) != 2 {
return "", 0, "", c.unsupportedLocationFormat(location)
}
port, err = strconv.Atoi(segments[1])
if err != nil {
return "", 0, "", err
}
return segments[0], port, namespace, nil
}
func (c *Cache) unsupportedLocationFormat(location string) error {
return fmt.Errorf("unsupported location format: %v, supported location format: [protocol][hostname]:[port]/[namespace]", location)
}
func (c *Cache) inheritIfNeeded(ctx context.Context, resource *Resource, aView *View) error {
if c.Ref == "" {
return nil
}
source, ok := resource.CacheProvider(c.Ref)
if !ok {
return fmt.Errorf("not found cache provider with %v name", c.Ref)
}
if c.Warmup == nil && source.Warmup != nil {
warmupMarshal, err := json.Marshal(source.Warmup)
if err != nil {
return err
}
if err = json.Unmarshal(warmupMarshal, c.Warmup); err != nil {
return err
}
}
if err := source.init(ctx, resource, nil); err != nil {
return err
}
return c.inherit(source)
}
func (c *Cache) inherit(source *Cache) error {
if c.Provider == "" {
c.Provider = source.Provider
}
if c.PartSize == 0 {
c.PartSize = source.PartSize
}
if c.Location == "" {
c.Location = source.Location
}
if c.TimeToLiveMs == 0 {
c.TimeToLiveMs = source.TimeToLiveMs
}
return nil
}
func (c *Cache) GenerateCacheInput(ctx context.Context) ([]*CacheInput, error) {
var cacheInputPermutations []*CacheInput
chanSize := len(c.Warmup.Cases)
selectorChan := make(chan CacheInputFn, chanSize)
if chanSize == 0 {
close(selectorChan)
return []*CacheInput{
c.NewInput(NewSelector()),
}, nil
}
for i := range c.Warmup.Cases {
go c.generateDatasetSelectorsChan(ctx, selectorChan, c.Warmup.Cases[i])
}
counter := 0
for selectorFn := range selectorChan {
selectors, err := selectorFn()
if err != nil {
return nil, err
}
cacheInputPermutations = append(cacheInputPermutations, selectors...)
counter++
if counter == chanSize {
close(selectorChan)
}
}
return cacheInputPermutations, nil
}
func (c *Cache) generateDatasetSelectorsChan(ctx context.Context, selectorChan chan CacheInputFn, dataSet *CacheParameters) {
selectors, err := c.generateDatasetSelectorsErr(ctx, dataSet)
selectorChan <- func() ([]*CacheInput, error) {
return selectors, err
}
}
func (c *Cache) generateDatasetSelectorsErr(ctx context.Context, set *CacheParameters) ([]*CacheInput, error) {
var availableValues [][]interface{}
for i := range set.Set {
paramValues, err := c.getParamValues(ctx, set.Set[i])
if err != nil {
return nil, err
}
availableValues = append(availableValues, paramValues)
}
var result []*CacheInput
if err := c.appendSelectors(set, availableValues, &result); err != nil {
return nil, err
}
return result, nil
}
func (c *Cache) getParamValues(ctx context.Context, paramValue *ParamValue) ([]interface{}, error) {
result := make([]interface{}, len(paramValue.Values), len(paramValue.Values)+1)
for i, value := range paramValue.Values {
marshal := fmt.Sprintf("%v", value)
converted, _, err := converter.Convert(marshal, paramValue._param.Schema.Type(), false, paramValue._param.DateFormat)
if err != nil {
return nil, err
}
result[i] = converted
}
if !paramValue._param.IsRequired() {
result = append(result, nil)
}
return result, nil
}
func (c *Cache) initWarmup(ctx context.Context, resource *Resource) error {
if c.owner == nil || c.Warmup == nil {
return nil
}
c.addNonRequiredWarmupIfNeeded()
_, ok := c.owner.ColumnByName(c.Warmup.IndexColumn)
if !ok && c.Warmup.IndexColumn != "" {
return fmt.Errorf("not found warmup column %v at View %v", c.Warmup, c.owner.Name)
}
for _, dataset := range c.Warmup.Cases {
for _, paramValue := range dataset.Set {
if err := c.ensureParam(paramValue); err != nil {
return err
}
}
}
if c.Warmup.Connector != nil {
if err := c.Warmup.Connector.Init(ctx, resource.GetConnectors()); err != nil {
return err
}
}
return nil
}
func (c *Cache) ensureParam(paramValue *ParamValue) error {
if paramValue._param != nil {
return nil
}
param, err := c.owner.Template._parametersIndex.Lookup(paramValue.Name)
if err != nil {
return err
}
paramValue._param = param
return nil
}
func (c *Cache) addNonRequiredWarmupIfNeeded() {
if len(c.Warmup.Cases) != 0 {
return
}
var values []*ParamValue
for i, parameter := range c.owner.Template.Parameters {
if parameter.IsRequired() {
return
}
values = append(values, &ParamValue{Name: parameter.Name, _param: c.owner.Template.Parameters[i]})
}
if len(values) == 0 {
return
}
c.Warmup.Cases = append(c.Warmup.Cases, &CacheParameters{
Set: values,
})
}
func (c *Cache) appendSelectors(set *CacheParameters, paramValues [][]interface{}, selectors *[]*CacheInput) error {
for i, value := range paramValues {
if len(value) == 0 {
return fmt.Errorf("parameter %v is required but there was no data", set.Set[i].Name)
}
}
indexes := make([]int, len(paramValues))
if len(indexes) == 0 {
return nil
}
outer:
for {
selector := &Selector{}
selector.Parameters.Init(c.owner)
for i, possibleValues := range paramValues {
actualValue := possibleValues[indexes[i]]
if actualValue == nil {
continue
}
if err := set.Set[i]._param.Set(selector, actualValue); err != nil {
return err
}
}
*selectors = append(*selectors, c.NewInput(selector))
for i := len(indexes) - 1; i >= 0; i-- {
if indexes[i] < len(paramValues[i])-1 {
indexes[i]++
break
} else {
if i == 0 {
break outer
}
indexes[i] = 0
}
}
}
return nil
}
func (c *Cache) NewInput(selector *Selector) *CacheInput {
return &CacheInput{
Selector: selector,
Column: c.Warmup.IndexColumn,
MetaColumn: c.Warmup.IndexColumn,
IndexMeta: (c.Warmup.IndexMeta || c.Warmup.IndexColumn != "") && c.owner.Template.Meta != nil,
}
}
func (c Caches) Unique() []*Cache {
if len(c) == 0 {
return []*Cache{}
}
var result []*Cache
var index = make(map[string]bool, len(c))
for i, item := range c {
if index[item.Name] {
continue
}
result = append(result, c[i])
index[item.Name] = true
}
return result
} | }
| random_line_split |
cache.go | package view
import (
"context"
"encoding/json"
"fmt"
"github.com/viant/afs/option"
"github.com/viant/afs/url"
"github.com/viant/datly/converter"
"github.com/viant/datly/shared"
"github.com/viant/sqlx/io/read/cache"
"github.com/viant/sqlx/io/read/cache/aerospike"
"github.com/viant/sqlx/io/read/cache/afs"
rdata "github.com/viant/toolbox/data"
"strconv"
"strings"
"sync"
"time"
)
type (
Cache struct {
shared.Reference
owner *View
Name string `json:",omitempty" yaml:",omitempty"`
Location string
Provider string
TimeToLiveMs int
PartSize int `json:",omitempty"`
AerospikeConfig
Warmup *Warmup `json:",omitempty" yaml:",omitempty"`
newCache func() (cache.Cache, error)
_initialized bool
mux sync.Mutex
}
Caches []*Cache
AerospikeConfig struct {
SleepBetweenRetriesInMs int `json:",omitempty"`
MaxRetries int `json:",omitempty"`
TotalTimeoutInMs int `json:",omitempty"`
SocketTimeoutInMs int `json:",omitempty"`
FailedRequestLimit int `json:",omitempty"`
ResetFailuresInMs int `json:",omitempty"`
}
Warmup struct {
IndexColumn string
IndexMeta bool `json:",omitempty"`
Connector *Connector `json:",omitempty"`
Cases []*CacheParameters
}
CacheParameters struct {
Set []*ParamValue
}
ParamValue struct {
Name string
Values []interface{}
_param *Parameter
}
CacheInput struct {
Selector *Selector
Column string
MetaColumn string
IndexMeta bool
}
CacheInputFn func() ([]*CacheInput, error)
)
const (
defaultType = ""
afsType = "afs"
aerospikeType = "aerospike"
)
func (c Caches) Has(name string) bool {
for _, candidate := range c {
if candidate.Name == name {
return true
}
}
return false
}
func (r *Caches) Append(cache *Cache) {
if r.Has(cache.Name) {
return
}
*r = append(*r, cache)
}
func (c *Cache) init(ctx context.Context, resource *Resource, aView *View) error {
if c._initialized {
return nil
}
c._initialized = true
c.owner = aView
var viewName string
if aView != nil {
viewName = aView.Name
}
if err := c.inheritIfNeeded(ctx, resource, aView); err != nil {
return err
}
if c.Location == "" {
return fmt.Errorf("View %v cache Location can't be empty", viewName)
}
if c.TimeToLiveMs == 0 {
return fmt.Errorf("View %v cache TimeToLiveMs can't be empty", viewName)
}
if err := c.ensureCacheClient(aView, viewName); err != nil {
return err
}
if err := c.initWarmup(ctx, resource); err != nil {
return err
}
return nil
}
func (c *Cache) ensureCacheClient(aView *View, viewName string) error {
if c.newCache != nil {
return nil
}
if aView == nil {
return nil
}
var err error
c.newCache, err = c.cacheService(viewName, aView)
if err != nil {
return err
}
return nil
}
func (c *Cache) cacheService(name string, aView *View) (func() (cache.Cache, error), error) {
scheme := url.Scheme(c.Provider, "")
switch scheme {
case aerospikeType:
return c.aerospikeCache(aView)
default:
if aView.Name == "" {
return nil, nil
}
expandedLoc, err := c.expandLocation(aView)
if err != nil {
return nil, err
}
afsCache, err := afs.NewCache(expandedLoc, time.Duration(c.TimeToLiveMs)*time.Millisecond, aView.Name, option.NewStream(c.PartSize, 0))
if err != nil {
return nil, err
}
return func() (cache.Cache, error) {
return afsCache, nil
}, nil
}
}
func (c *Cache) aerospikeCache(aView *View) (func() (cache.Cache, error), error) {
if c.Location == "" {
return nil, fmt.Errorf("aerospike cache SetName cannot be empty")
}
host, port, namespace, err := c.split(c.Provider)
if err != nil |
clientProvider := aClientPool.Client(host, port)
expanded, err := c.expandLocation(aView)
if err != nil {
return nil, err
}
timeoutConfig := &aerospike.TimeoutConfig{
MaxRetries: c.AerospikeConfig.MaxRetries,
TotalTimeoutMs: c.AerospikeConfig.TotalTimeoutInMs,
SleepBetweenRetriesMs: c.SleepBetweenRetriesInMs,
}
var resetTimout *time.Duration
if c.AerospikeConfig.ResetFailuresInMs != 0 {
resetDuration := time.Duration(c.AerospikeConfig.ResetFailuresInMs)
resetTimout = &resetDuration
}
failureHandler := aerospike.NewFailureHandler(int64(c.AerospikeConfig.FailedRequestLimit), resetTimout)
return func() (cache.Cache, error) {
client, err := clientProvider()
if err != nil {
return nil, err
}
return aerospike.New(namespace, expanded, client, uint32(c.TimeToLiveMs/1000), timeoutConfig, failureHandler)
}, nil
}
func (c *Cache) expandLocation(aView *View) (string, error) {
viewParam := AsViewParam(aView, nil, nil)
asBytes, err := json.Marshal(viewParam)
if err != nil {
return "", err
}
locationMap := &rdata.Map{}
viewMap := map[string]interface{}{}
if err = json.Unmarshal(asBytes, &viewMap); err != nil {
return "", err
}
locationMap.Put("View", viewMap)
expanded := locationMap.ExpandAsText(c.Location)
return expanded, nil
}
func (c *Cache) Service() (cache.Cache, error) {
return c.newCache()
}
func (c *Cache) split(location string) (host string, port int, namespace string, err error) {
actualScheme := url.Scheme(location, "")
hostPart, namespace := url.Split(location, actualScheme)
if namespace == "" {
return "", 0, "", c.unsupportedLocationFormat(location)
}
hostStart := 0
if actualScheme != "" {
hostStart = len(actualScheme) + 3
}
segments := strings.Split(hostPart[hostStart:len(hostPart)-1], ":")
if len(segments) != 2 {
return "", 0, "", c.unsupportedLocationFormat(location)
}
port, err = strconv.Atoi(segments[1])
if err != nil {
return "", 0, "", err
}
return segments[0], port, namespace, nil
}
func (c *Cache) unsupportedLocationFormat(location string) error {
return fmt.Errorf("unsupported location format: %v, supported location format: [protocol][hostname]:[port]/[namespace]", location)
}
func (c *Cache) inheritIfNeeded(ctx context.Context, resource *Resource, aView *View) error {
if c.Ref == "" {
return nil
}
source, ok := resource.CacheProvider(c.Ref)
if !ok {
return fmt.Errorf("not found cache provider with %v name", c.Ref)
}
if c.Warmup == nil && source.Warmup != nil {
warmupMarshal, err := json.Marshal(source.Warmup)
if err != nil {
return err
}
if err = json.Unmarshal(warmupMarshal, c.Warmup); err != nil {
return err
}
}
if err := source.init(ctx, resource, nil); err != nil {
return err
}
return c.inherit(source)
}
func (c *Cache) inherit(source *Cache) error {
if c.Provider == "" {
c.Provider = source.Provider
}
if c.PartSize == 0 {
c.PartSize = source.PartSize
}
if c.Location == "" {
c.Location = source.Location
}
if c.TimeToLiveMs == 0 {
c.TimeToLiveMs = source.TimeToLiveMs
}
return nil
}
func (c *Cache) GenerateCacheInput(ctx context.Context) ([]*CacheInput, error) {
var cacheInputPermutations []*CacheInput
chanSize := len(c.Warmup.Cases)
selectorChan := make(chan CacheInputFn, chanSize)
if chanSize == 0 {
close(selectorChan)
return []*CacheInput{
c.NewInput(NewSelector()),
}, nil
}
for i := range c.Warmup.Cases {
go c.generateDatasetSelectorsChan(ctx, selectorChan, c.Warmup.Cases[i])
}
counter := 0
for selectorFn := range selectorChan {
selectors, err := selectorFn()
if err != nil {
return nil, err
}
cacheInputPermutations = append(cacheInputPermutations, selectors...)
counter++
if counter == chanSize {
close(selectorChan)
}
}
return cacheInputPermutations, nil
}
func (c *Cache) generateDatasetSelectorsChan(ctx context.Context, selectorChan chan CacheInputFn, dataSet *CacheParameters) {
selectors, err := c.generateDatasetSelectorsErr(ctx, dataSet)
selectorChan <- func() ([]*CacheInput, error) {
return selectors, err
}
}
func (c *Cache) generateDatasetSelectorsErr(ctx context.Context, set *CacheParameters) ([]*CacheInput, error) {
var availableValues [][]interface{}
for i := range set.Set {
paramValues, err := c.getParamValues(ctx, set.Set[i])
if err != nil {
return nil, err
}
availableValues = append(availableValues, paramValues)
}
var result []*CacheInput
if err := c.appendSelectors(set, availableValues, &result); err != nil {
return nil, err
}
return result, nil
}
func (c *Cache) getParamValues(ctx context.Context, paramValue *ParamValue) ([]interface{}, error) {
result := make([]interface{}, len(paramValue.Values), len(paramValue.Values)+1)
for i, value := range paramValue.Values {
marshal := fmt.Sprintf("%v", value)
converted, _, err := converter.Convert(marshal, paramValue._param.Schema.Type(), false, paramValue._param.DateFormat)
if err != nil {
return nil, err
}
result[i] = converted
}
if !paramValue._param.IsRequired() {
result = append(result, nil)
}
return result, nil
}
func (c *Cache) initWarmup(ctx context.Context, resource *Resource) error {
if c.owner == nil || c.Warmup == nil {
return nil
}
c.addNonRequiredWarmupIfNeeded()
_, ok := c.owner.ColumnByName(c.Warmup.IndexColumn)
if !ok && c.Warmup.IndexColumn != "" {
return fmt.Errorf("not found warmup column %v at View %v", c.Warmup, c.owner.Name)
}
for _, dataset := range c.Warmup.Cases {
for _, paramValue := range dataset.Set {
if err := c.ensureParam(paramValue); err != nil {
return err
}
}
}
if c.Warmup.Connector != nil {
if err := c.Warmup.Connector.Init(ctx, resource.GetConnectors()); err != nil {
return err
}
}
return nil
}
func (c *Cache) ensureParam(paramValue *ParamValue) error {
if paramValue._param != nil {
return nil
}
param, err := c.owner.Template._parametersIndex.Lookup(paramValue.Name)
if err != nil {
return err
}
paramValue._param = param
return nil
}
func (c *Cache) addNonRequiredWarmupIfNeeded() {
if len(c.Warmup.Cases) != 0 {
return
}
var values []*ParamValue
for i, parameter := range c.owner.Template.Parameters {
if parameter.IsRequired() {
return
}
values = append(values, &ParamValue{Name: parameter.Name, _param: c.owner.Template.Parameters[i]})
}
if len(values) == 0 {
return
}
c.Warmup.Cases = append(c.Warmup.Cases, &CacheParameters{
Set: values,
})
}
func (c *Cache) appendSelectors(set *CacheParameters, paramValues [][]interface{}, selectors *[]*CacheInput) error {
for i, value := range paramValues {
if len(value) == 0 {
return fmt.Errorf("parameter %v is required but there was no data", set.Set[i].Name)
}
}
indexes := make([]int, len(paramValues))
if len(indexes) == 0 {
return nil
}
outer:
for {
selector := &Selector{}
selector.Parameters.Init(c.owner)
for i, possibleValues := range paramValues {
actualValue := possibleValues[indexes[i]]
if actualValue == nil {
continue
}
if err := set.Set[i]._param.Set(selector, actualValue); err != nil {
return err
}
}
*selectors = append(*selectors, c.NewInput(selector))
for i := len(indexes) - 1; i >= 0; i-- {
if indexes[i] < len(paramValues[i])-1 {
indexes[i]++
break
} else {
if i == 0 {
break outer
}
indexes[i] = 0
}
}
}
return nil
}
func (c *Cache) NewInput(selector *Selector) *CacheInput {
return &CacheInput{
Selector: selector,
Column: c.Warmup.IndexColumn,
MetaColumn: c.Warmup.IndexColumn,
IndexMeta: (c.Warmup.IndexMeta || c.Warmup.IndexColumn != "") && c.owner.Template.Meta != nil,
}
}
func (c Caches) Unique() []*Cache {
if len(c) == 0 {
return []*Cache{}
}
var result []*Cache
var index = make(map[string]bool, len(c))
for i, item := range c {
if index[item.Name] {
continue
}
result = append(result, c[i])
index[item.Name] = true
}
return result
}
| {
return nil, err
} | conditional_block |
torch_MLE_link_pred.py | import torch
import os
from scipy.io import mmread
import torch.optim as optim
import torch.nn as nn
from Adjacency_matrix import Preprocessing
from torch_sparse import spspmm
import pandas as pd
import numpy as np
#Creating dataset
from blobs import *
from sklearn import metrics
os.chdir('Datasets/divorce/')
text_file = 'divorce.mtx'
#Loading data and making adjancency matrix
#raw_data = mmread(text_file)
#A = raw_data.todense()
#A = torch.tensor(A)
class LSM(nn.Module):
def __init__(self, A, input_size, latent_dim, sparse_i_idx, sparse_j_idx, count, sample_i_size, sample_j_size):
super(LSM, self).__init__()
self.A = A
self.input_size = input_size
self.latent_dim = latent_dim
self.beta = torch.nn.Parameter(torch.randn(self.input_size[0]))
self.gamma = torch.nn.Parameter(torch.randn(self.input_size[1]))
self.latent_zi = torch.nn.Parameter(torch.randn(self.input_size[0], self.latent_dim))
self.latent_zj = torch.nn.Parameter(torch.randn(self.input_size[1], self.latent_dim))
#Change sample weights for each partition
self.sampling_i_weights = torch.ones(input_size[0])
self.sampling_j_weights = torch.ones(input_size[1])
#Change sample sizes for each partition
self.sample_i_size = sample_i_size
self.sample_j_size = sample_j_size
self.sparse_i_idx = sparse_i_idx
self.sparse_j_idx = sparse_j_idx
self.count = count
self.z_dist = 0
self.Lambda = 0
def sample_network(self):
# USE torch_sparse lib i.e. : from torch_sparse import spspmm
# sample for bipartite network
sample_i_idx = torch.multinomial(self.sampling_i_weights, self.sample_i_size, replacement=False)
sample_j_idx = torch.multinomial(self.sampling_j_weights, self.sample_j_size, replacement=False)
# translate sampled indices w.r.t. to the full matrix, it is just a diagonal matrix
indices_i_translator = torch.cat([sample_i_idx.unsqueeze(0), sample_i_idx.unsqueeze(0)], 0)
indices_j_translator = torch.cat([sample_j_idx.unsqueeze(0), sample_j_idx.unsqueeze(0)], 0)
# adjacency matrix in edges format
edges = torch.cat([self.sparse_i_idx.unsqueeze(0), self.sparse_j_idx.unsqueeze(0)], 0)
# matrix multiplication B = Adjacency x Indices translator
# see spspmm function, it give a multiplication between two matrices
# indexC is the indices where we have non-zero values and valueC the actual values (in this case ones)
indexC, valueC = spspmm(edges, self.count.float(), indices_j_translator,
torch.ones(indices_j_translator.shape[1]), self.input_size[0], self.input_size[1],
self.input_size[1], coalesced=True)
# second matrix multiplication C = Indices translator x B, indexC returns where we have edges inside the sample
indexC, valueC = spspmm(indices_i_translator, torch.ones(indices_i_translator.shape[1]), indexC, valueC,
self.input_size[0], self.input_size[0], self.input_size[1], coalesced=True)
# edge row position
sparse_i_sample = indexC[0, :]
# edge column position
sparse_j_sample = indexC[1, :]
return sample_i_idx, sample_j_idx, sparse_i_sample, sparse_j_sample, valueC
def log_likelihood(self):
sample_i_idx, sample_j_idx, sparse_i_sample, sparse_j_sample, valueC = self.sample_network()
self.z_dist = (((torch.unsqueeze(self.latent_zi[sample_i_idx], 1) - self.latent_zj[sample_j_idx]+1e-06)**2).sum(-1))**0.5
bias_matrix = torch.unsqueeze(self.beta[sample_i_idx], 1) + self.gamma[sample_j_idx]
self.Lambda = bias_matrix - self.z_dist
z_dist_links = (((self.latent_zi[sparse_i_sample] - self.latent_zj[sparse_j_sample]+1e-06)**2).sum(-1))**0.5
bias_links = self.beta[sparse_i_sample] + self.gamma[sparse_j_sample]
log_Lambda_links = valueC*(bias_links - z_dist_links)
LL = log_Lambda_links.sum() - torch.sum(torch.exp(self.Lambda))
return LL
def link_prediction(self, A_test):
with torch.no_grad():
#Create indexes for test-set relationships
idx_test = torch.where(torch.isnan(A_test) == False)
#Distance measure (euclidian)
z_pdist_test = (((self.latent_zi[idx_test[0]] - self.latent_zj[idx_test[1]]+1e-06)**2).sum(-1))**0.5
#Add bias matrices
logit_u_test = -z_pdist_test + self.beta[idx_test[0]] + self.gamma[idx_test[1]]
#Get the rate
rate = torch.exp(logit_u_test)
#Create target (make sure its in the right order by indexing)
target = A_test[idx_test[0], idx_test[1]]
fpr, tpr, threshold = metrics.roc_curve(target.cpu().data.numpy(), rate.cpu().data.numpy())
#Determining AUC score and precision and recall
auc_score = metrics.roc_auc_score(target.cpu().data.numpy(), rate.cpu().data.numpy())
return auc_score, fpr, tpr
#Implementing test log likelihood without mini batching
def test_log_likelihood(self, A_test):
|
if __name__ == "__main__":
A = adj_m
#Lists to obtain values for AUC, FPR, TPR and loss
AUC_scores = []
tprs = []
base_fpr = np.linspace(0, 1, 101)
plt.figure(figsize=(5,5))
train_loss = []
test_loss = []
#Binarize data-set if True
binarized = False
link_pred = True
if binarized:
A[A > 0] = 1
A = torch.tensor(A)
for i in range(5):
np.random.seed(i)
torch.manual_seed(i)
#Sample test-set from multinomial distribution.
if link_pred:
A_shape = A.shape
num_samples = 400000
idx_i_test = torch.multinomial(input=torch.arange(0,float(A_shape[0])), num_samples=num_samples,
replacement=True)
idx_j_test = torch.multinomial(input=torch.arange(0, float(A_shape[1])), num_samples=num_samples,
replacement=True)
A_test = A.detach().clone()
A_test[:] = 0
A_test[idx_i_test,idx_j_test] = A[idx_i_test,idx_j_test]
A[idx_i_test,idx_j_test] = 0
#Get the counts (only on train data)
idx = torch.where((A > 0) & (torch.isnan(A) == False))
count = A[idx[0],idx[1]]
#Define the model with training data.
#Cross-val loop validating 5 seeds;
model = LSM(A=A, input_size=A.shape, latent_dim=2, sparse_i_idx= idx[0], sparse_j_idx=idx[1], count=count, sample_i_size = 1000, sample_j_size = 500)
#Deine the optimizer.
optimizer = optim.Adam(params=model.parameters(), lr=0.01)
cum_loss = []
cum_loss_test = []
#Run iterations.
iterations = 20000
for _ in range(iterations):
loss = -model.log_likelihood()
if link_pred:
loss_test = -model.test_log_likelihood(A_test) / num_samples
cum_loss_test.append(loss_test.item() / (A_shape[0]*A_shape[1] - num_samples))
print('Test loss at the', _, 'iteration:', loss_test.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
cum_loss.append(loss.item())
print('Loss at the',_,'iteration:',loss.item())
train_loss.append(cum_loss)
test_loss.append(cum_loss_test)
#Binary link-prediction enable and disable;
if binarized:
auc_score, fpr, tpr = model.link_prediction(A_test)
AUC_scores.append(auc_score)
plt.plot(fpr, tpr, 'b', alpha=0.15)
tpr = np.interp(base_fpr, fpr, tpr)
tpr[0] = 0.0
tprs.append(tpr)
#beta = model.beta.cpu().data.numpy()
#gamma = model.gamma.cpu().data.numpy()
#latent_zi = model.latent_zi.cpu().data.numpy()
#latent_zj = model.latent_zj.cpu().data.numpy()
#np.savetxt(f"beta_{i}_link_pred_binary.csv", beta, delimiter=",")
#np.savetxt(f"gamma_{i}_link_pred_binary.csv", gamma, delimiter=",")
#np.savetxt(f"latent_zi_{i}_link_pred_binary.csv", latent_zi, delimiter=",")
#np.savetxt(f"latent_zj_{i}_link_pred_binary.csv", latent_zj, delimiter=",")
#np.savetxt(f"fpr_{i}_link_pred_binary.csv", fpr, delimiter=",")
#np.savetxt(f"tpr_{i}_link_pred_binary.csv", tpr, delimiter=",")
#np.savetxt(f"cum_loss_{i}_link_pred_binary.csv", cum_loss, delimiter=",")
#np.savetxt(f"cum_loss_test_{i}_link_pred_binary.csv", cum_loss_test, delimiter=",")
#np.savetxt(f"AUC_{i}_link_pred_binary.csv", AUC_scores, delimiter=",")
#Plotting the average roc curve as a result of the cross-validation
tprs = np.array(tprs)
mean_tprs = tprs.mean(axis=0)
std = tprs.std(axis=0)
#USing standard deviation as error bars
tprs_upper = np.minimum(mean_tprs + std, 1)
tprs_lower = mean_tprs - std
plt.plot(base_fpr, mean_tprs, 'b', label='Mean ROC-curve')
plt.fill_between(base_fpr, tprs_lower, tprs_upper, color='grey', alpha=0.3)
plt.plot([0, 1], [0, 1],'r--', label='Random classifier')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.axes().set_aspect('equal', 'datalim')
plt.grid()
plt.legend()
plt.show()
plt.savefig('Average_ROC_curve.png')
plt.clf()
#Plotting the average loss based on the cross validation
train_loss = np.array(train_loss)
test_loss = np.array(test_loss)
mean_train_loss = train_loss.mean(axis=0)
std_train_loss = train_loss.std(axis=0)
mean_train_loss_upr = mean_train_loss + std_train_loss
mean_train_loss_lwr = mean_train_loss - std_train_loss
mean_test_loss = test_loss.mean(axis=0)
std_test_loss = test_loss.std(axis=0)
mean_test_loss_upr = mean_test_loss + std_test_loss
mean_test_loss_lwr = mean_test_loss - std_test_loss
plt.plot(np.arange(iterations), mean_train_loss, 'b', label='Mean training loss')
plt.fill_between(np.arange(iterations), mean_train_loss_lwr, mean_train_loss_upr, color='b', alpha=0.3)
plt.plot(np.arange(iterations), mean_test_loss, 'r', label='Mean test loss')
plt.fill_between(np.arange(iterations), mean_test_loss_lwr, mean_test_loss_upr, color='r', alpha=0.3)
plt.xlim([0, iterations])
plt.ylabel('Loss')
plt.xlabel('Iterations')
plt.grid()
plt.legend()
plt.show()
plt.savefig('Average_loss.png') | with torch.no_grad():
idx_test = torch.where(torch.isnan(A_test) == False)
z_dist = (((self.latent_zi[idx_test[0]] - self.latent_zj[idx_test[1]] + 1e-06)**2).sum(-1))**0.5 #Unsqueeze eller ej?
bias_matrix = self.beta[idx_test[0]] + self.gamma[idx_test[1]]
Lambda = (bias_matrix - z_dist) * A_test[idx_test[0],idx_test[1]]
LL_test = (A_test[idx_test[0],idx_test[1]] * Lambda).sum() - torch.sum(torch.exp(Lambda))
return LL_test | identifier_body |
torch_MLE_link_pred.py | import torch
import os
from scipy.io import mmread
import torch.optim as optim
import torch.nn as nn
from Adjacency_matrix import Preprocessing
from torch_sparse import spspmm
import pandas as pd
import numpy as np
#Creating dataset
from blobs import *
from sklearn import metrics
os.chdir('Datasets/divorce/')
text_file = 'divorce.mtx'
#Loading data and making adjancency matrix
#raw_data = mmread(text_file)
#A = raw_data.todense()
#A = torch.tensor(A)
class LSM(nn.Module):
def __init__(self, A, input_size, latent_dim, sparse_i_idx, sparse_j_idx, count, sample_i_size, sample_j_size):
super(LSM, self).__init__()
self.A = A
self.input_size = input_size
self.latent_dim = latent_dim
self.beta = torch.nn.Parameter(torch.randn(self.input_size[0]))
self.gamma = torch.nn.Parameter(torch.randn(self.input_size[1]))
self.latent_zi = torch.nn.Parameter(torch.randn(self.input_size[0], self.latent_dim))
self.latent_zj = torch.nn.Parameter(torch.randn(self.input_size[1], self.latent_dim))
#Change sample weights for each partition
self.sampling_i_weights = torch.ones(input_size[0])
self.sampling_j_weights = torch.ones(input_size[1])
#Change sample sizes for each partition
self.sample_i_size = sample_i_size
self.sample_j_size = sample_j_size
self.sparse_i_idx = sparse_i_idx
self.sparse_j_idx = sparse_j_idx
self.count = count
self.z_dist = 0
self.Lambda = 0
def sample_network(self):
# USE torch_sparse lib i.e. : from torch_sparse import spspmm
# sample for bipartite network
sample_i_idx = torch.multinomial(self.sampling_i_weights, self.sample_i_size, replacement=False)
sample_j_idx = torch.multinomial(self.sampling_j_weights, self.sample_j_size, replacement=False)
# translate sampled indices w.r.t. to the full matrix, it is just a diagonal matrix
indices_i_translator = torch.cat([sample_i_idx.unsqueeze(0), sample_i_idx.unsqueeze(0)], 0)
indices_j_translator = torch.cat([sample_j_idx.unsqueeze(0), sample_j_idx.unsqueeze(0)], 0)
# adjacency matrix in edges format
edges = torch.cat([self.sparse_i_idx.unsqueeze(0), self.sparse_j_idx.unsqueeze(0)], 0)
# matrix multiplication B = Adjacency x Indices translator
# see spspmm function, it give a multiplication between two matrices
# indexC is the indices where we have non-zero values and valueC the actual values (in this case ones)
indexC, valueC = spspmm(edges, self.count.float(), indices_j_translator,
torch.ones(indices_j_translator.shape[1]), self.input_size[0], self.input_size[1],
self.input_size[1], coalesced=True)
# second matrix multiplication C = Indices translator x B, indexC returns where we have edges inside the sample
indexC, valueC = spspmm(indices_i_translator, torch.ones(indices_i_translator.shape[1]), indexC, valueC,
self.input_size[0], self.input_size[0], self.input_size[1], coalesced=True)
# edge row position
sparse_i_sample = indexC[0, :]
# edge column position
sparse_j_sample = indexC[1, :]
return sample_i_idx, sample_j_idx, sparse_i_sample, sparse_j_sample, valueC
def log_likelihood(self):
sample_i_idx, sample_j_idx, sparse_i_sample, sparse_j_sample, valueC = self.sample_network()
self.z_dist = (((torch.unsqueeze(self.latent_zi[sample_i_idx], 1) - self.latent_zj[sample_j_idx]+1e-06)**2).sum(-1))**0.5
bias_matrix = torch.unsqueeze(self.beta[sample_i_idx], 1) + self.gamma[sample_j_idx]
self.Lambda = bias_matrix - self.z_dist
z_dist_links = (((self.latent_zi[sparse_i_sample] - self.latent_zj[sparse_j_sample]+1e-06)**2).sum(-1))**0.5
bias_links = self.beta[sparse_i_sample] + self.gamma[sparse_j_sample]
log_Lambda_links = valueC*(bias_links - z_dist_links)
LL = log_Lambda_links.sum() - torch.sum(torch.exp(self.Lambda))
return LL
def link_prediction(self, A_test):
with torch.no_grad():
#Create indexes for test-set relationships
idx_test = torch.where(torch.isnan(A_test) == False)
#Distance measure (euclidian)
z_pdist_test = (((self.latent_zi[idx_test[0]] - self.latent_zj[idx_test[1]]+1e-06)**2).sum(-1))**0.5
#Add bias matrices
logit_u_test = -z_pdist_test + self.beta[idx_test[0]] + self.gamma[idx_test[1]]
#Get the rate
rate = torch.exp(logit_u_test)
#Create target (make sure its in the right order by indexing)
target = A_test[idx_test[0], idx_test[1]]
fpr, tpr, threshold = metrics.roc_curve(target.cpu().data.numpy(), rate.cpu().data.numpy())
#Determining AUC score and precision and recall
auc_score = metrics.roc_auc_score(target.cpu().data.numpy(), rate.cpu().data.numpy())
return auc_score, fpr, tpr
#Implementing test log likelihood without mini batching
def test_log_likelihood(self, A_test):
with torch.no_grad():
idx_test = torch.where(torch.isnan(A_test) == False)
z_dist = (((self.latent_zi[idx_test[0]] - self.latent_zj[idx_test[1]] + 1e-06)**2).sum(-1))**0.5 #Unsqueeze eller ej?
bias_matrix = self.beta[idx_test[0]] + self.gamma[idx_test[1]]
Lambda = (bias_matrix - z_dist) * A_test[idx_test[0],idx_test[1]]
LL_test = (A_test[idx_test[0],idx_test[1]] * Lambda).sum() - torch.sum(torch.exp(Lambda))
return LL_test
if __name__ == "__main__":
A = adj_m
#Lists to obtain values for AUC, FPR, TPR and loss
AUC_scores = []
tprs = []
base_fpr = np.linspace(0, 1, 101)
plt.figure(figsize=(5,5))
train_loss = []
test_loss = []
#Binarize data-set if True
binarized = False
link_pred = True
if binarized:
A[A > 0] = 1
A = torch.tensor(A)
for i in range(5):
|
#np.savetxt(f"AUC_{i}_link_pred_binary.csv", AUC_scores, delimiter=",")
#Plotting the average roc curve as a result of the cross-validation
tprs = np.array(tprs)
mean_tprs = tprs.mean(axis=0)
std = tprs.std(axis=0)
#USing standard deviation as error bars
tprs_upper = np.minimum(mean_tprs + std, 1)
tprs_lower = mean_tprs - std
plt.plot(base_fpr, mean_tprs, 'b', label='Mean ROC-curve')
plt.fill_between(base_fpr, tprs_lower, tprs_upper, color='grey', alpha=0.3)
plt.plot([0, 1], [0, 1],'r--', label='Random classifier')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.axes().set_aspect('equal', 'datalim')
plt.grid()
plt.legend()
plt.show()
plt.savefig('Average_ROC_curve.png')
plt.clf()
#Plotting the average loss based on the cross validation
train_loss = np.array(train_loss)
test_loss = np.array(test_loss)
mean_train_loss = train_loss.mean(axis=0)
std_train_loss = train_loss.std(axis=0)
mean_train_loss_upr = mean_train_loss + std_train_loss
mean_train_loss_lwr = mean_train_loss - std_train_loss
mean_test_loss = test_loss.mean(axis=0)
std_test_loss = test_loss.std(axis=0)
mean_test_loss_upr = mean_test_loss + std_test_loss
mean_test_loss_lwr = mean_test_loss - std_test_loss
plt.plot(np.arange(iterations), mean_train_loss, 'b', label='Mean training loss')
plt.fill_between(np.arange(iterations), mean_train_loss_lwr, mean_train_loss_upr, color='b', alpha=0.3)
plt.plot(np.arange(iterations), mean_test_loss, 'r', label='Mean test loss')
plt.fill_between(np.arange(iterations), mean_test_loss_lwr, mean_test_loss_upr, color='r', alpha=0.3)
plt.xlim([0, iterations])
plt.ylabel('Loss')
plt.xlabel('Iterations')
plt.grid()
plt.legend()
plt.show()
plt.savefig('Average_loss.png') | np.random.seed(i)
torch.manual_seed(i)
#Sample test-set from multinomial distribution.
if link_pred:
A_shape = A.shape
num_samples = 400000
idx_i_test = torch.multinomial(input=torch.arange(0,float(A_shape[0])), num_samples=num_samples,
replacement=True)
idx_j_test = torch.multinomial(input=torch.arange(0, float(A_shape[1])), num_samples=num_samples,
replacement=True)
A_test = A.detach().clone()
A_test[:] = 0
A_test[idx_i_test,idx_j_test] = A[idx_i_test,idx_j_test]
A[idx_i_test,idx_j_test] = 0
#Get the counts (only on train data)
idx = torch.where((A > 0) & (torch.isnan(A) == False))
count = A[idx[0],idx[1]]
#Define the model with training data.
#Cross-val loop validating 5 seeds;
model = LSM(A=A, input_size=A.shape, latent_dim=2, sparse_i_idx= idx[0], sparse_j_idx=idx[1], count=count, sample_i_size = 1000, sample_j_size = 500)
#Deine the optimizer.
optimizer = optim.Adam(params=model.parameters(), lr=0.01)
cum_loss = []
cum_loss_test = []
#Run iterations.
iterations = 20000
for _ in range(iterations):
loss = -model.log_likelihood()
if link_pred:
loss_test = -model.test_log_likelihood(A_test) / num_samples
cum_loss_test.append(loss_test.item() / (A_shape[0]*A_shape[1] - num_samples))
print('Test loss at the', _, 'iteration:', loss_test.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
cum_loss.append(loss.item())
print('Loss at the',_,'iteration:',loss.item())
train_loss.append(cum_loss)
test_loss.append(cum_loss_test)
#Binary link-prediction enable and disable;
if binarized:
auc_score, fpr, tpr = model.link_prediction(A_test)
AUC_scores.append(auc_score)
plt.plot(fpr, tpr, 'b', alpha=0.15)
tpr = np.interp(base_fpr, fpr, tpr)
tpr[0] = 0.0
tprs.append(tpr)
#beta = model.beta.cpu().data.numpy()
#gamma = model.gamma.cpu().data.numpy()
#latent_zi = model.latent_zi.cpu().data.numpy()
#latent_zj = model.latent_zj.cpu().data.numpy()
#np.savetxt(f"beta_{i}_link_pred_binary.csv", beta, delimiter=",")
#np.savetxt(f"gamma_{i}_link_pred_binary.csv", gamma, delimiter=",")
#np.savetxt(f"latent_zi_{i}_link_pred_binary.csv", latent_zi, delimiter=",")
#np.savetxt(f"latent_zj_{i}_link_pred_binary.csv", latent_zj, delimiter=",")
#np.savetxt(f"fpr_{i}_link_pred_binary.csv", fpr, delimiter=",")
#np.savetxt(f"tpr_{i}_link_pred_binary.csv", tpr, delimiter=",")
#np.savetxt(f"cum_loss_{i}_link_pred_binary.csv", cum_loss, delimiter=",")
#np.savetxt(f"cum_loss_test_{i}_link_pred_binary.csv", cum_loss_test, delimiter=",") | conditional_block |
torch_MLE_link_pred.py | import torch
import os
from scipy.io import mmread
import torch.optim as optim
import torch.nn as nn
from Adjacency_matrix import Preprocessing
from torch_sparse import spspmm
import pandas as pd
import numpy as np
#Creating dataset
from blobs import *
from sklearn import metrics
os.chdir('Datasets/divorce/')
text_file = 'divorce.mtx'
#Loading data and making adjancency matrix
#raw_data = mmread(text_file)
#A = raw_data.todense()
#A = torch.tensor(A)
class LSM(nn.Module):
def __init__(self, A, input_size, latent_dim, sparse_i_idx, sparse_j_idx, count, sample_i_size, sample_j_size):
super(LSM, self).__init__()
self.A = A
self.input_size = input_size
self.latent_dim = latent_dim
self.beta = torch.nn.Parameter(torch.randn(self.input_size[0]))
self.gamma = torch.nn.Parameter(torch.randn(self.input_size[1]))
self.latent_zi = torch.nn.Parameter(torch.randn(self.input_size[0], self.latent_dim))
self.latent_zj = torch.nn.Parameter(torch.randn(self.input_size[1], self.latent_dim))
#Change sample weights for each partition
self.sampling_i_weights = torch.ones(input_size[0])
self.sampling_j_weights = torch.ones(input_size[1])
#Change sample sizes for each partition
self.sample_i_size = sample_i_size
self.sample_j_size = sample_j_size
self.sparse_i_idx = sparse_i_idx
self.sparse_j_idx = sparse_j_idx
self.count = count
self.z_dist = 0
self.Lambda = 0
def sample_network(self):
# USE torch_sparse lib i.e. : from torch_sparse import spspmm
# sample for bipartite network
sample_i_idx = torch.multinomial(self.sampling_i_weights, self.sample_i_size, replacement=False)
sample_j_idx = torch.multinomial(self.sampling_j_weights, self.sample_j_size, replacement=False)
# translate sampled indices w.r.t. to the full matrix, it is just a diagonal matrix
indices_i_translator = torch.cat([sample_i_idx.unsqueeze(0), sample_i_idx.unsqueeze(0)], 0)
indices_j_translator = torch.cat([sample_j_idx.unsqueeze(0), sample_j_idx.unsqueeze(0)], 0)
# adjacency matrix in edges format
edges = torch.cat([self.sparse_i_idx.unsqueeze(0), self.sparse_j_idx.unsqueeze(0)], 0)
# matrix multiplication B = Adjacency x Indices translator
# see spspmm function, it give a multiplication between two matrices
# indexC is the indices where we have non-zero values and valueC the actual values (in this case ones)
indexC, valueC = spspmm(edges, self.count.float(), indices_j_translator,
torch.ones(indices_j_translator.shape[1]), self.input_size[0], self.input_size[1],
self.input_size[1], coalesced=True)
# second matrix multiplication C = Indices translator x B, indexC returns where we have edges inside the sample
indexC, valueC = spspmm(indices_i_translator, torch.ones(indices_i_translator.shape[1]), indexC, valueC,
self.input_size[0], self.input_size[0], self.input_size[1], coalesced=True)
# edge row position
sparse_i_sample = indexC[0, :]
# edge column position
sparse_j_sample = indexC[1, :]
return sample_i_idx, sample_j_idx, sparse_i_sample, sparse_j_sample, valueC
def log_likelihood(self):
sample_i_idx, sample_j_idx, sparse_i_sample, sparse_j_sample, valueC = self.sample_network()
self.z_dist = (((torch.unsqueeze(self.latent_zi[sample_i_idx], 1) - self.latent_zj[sample_j_idx]+1e-06)**2).sum(-1))**0.5
bias_matrix = torch.unsqueeze(self.beta[sample_i_idx], 1) + self.gamma[sample_j_idx]
self.Lambda = bias_matrix - self.z_dist
z_dist_links = (((self.latent_zi[sparse_i_sample] - self.latent_zj[sparse_j_sample]+1e-06)**2).sum(-1))**0.5
bias_links = self.beta[sparse_i_sample] + self.gamma[sparse_j_sample]
log_Lambda_links = valueC*(bias_links - z_dist_links)
LL = log_Lambda_links.sum() - torch.sum(torch.exp(self.Lambda))
return LL
def link_prediction(self, A_test):
with torch.no_grad():
#Create indexes for test-set relationships
idx_test = torch.where(torch.isnan(A_test) == False)
#Distance measure (euclidian)
z_pdist_test = (((self.latent_zi[idx_test[0]] - self.latent_zj[idx_test[1]]+1e-06)**2).sum(-1))**0.5
#Add bias matrices
logit_u_test = -z_pdist_test + self.beta[idx_test[0]] + self.gamma[idx_test[1]]
#Get the rate
rate = torch.exp(logit_u_test)
#Create target (make sure its in the right order by indexing)
target = A_test[idx_test[0], idx_test[1]]
fpr, tpr, threshold = metrics.roc_curve(target.cpu().data.numpy(), rate.cpu().data.numpy())
#Determining AUC score and precision and recall
auc_score = metrics.roc_auc_score(target.cpu().data.numpy(), rate.cpu().data.numpy())
return auc_score, fpr, tpr
#Implementing test log likelihood without mini batching
def test_log_likelihood(self, A_test):
with torch.no_grad():
idx_test = torch.where(torch.isnan(A_test) == False)
z_dist = (((self.latent_zi[idx_test[0]] - self.latent_zj[idx_test[1]] + 1e-06)**2).sum(-1))**0.5 #Unsqueeze eller ej?
bias_matrix = self.beta[idx_test[0]] + self.gamma[idx_test[1]]
Lambda = (bias_matrix - z_dist) * A_test[idx_test[0],idx_test[1]]
LL_test = (A_test[idx_test[0],idx_test[1]] * Lambda).sum() - torch.sum(torch.exp(Lambda))
return LL_test
if __name__ == "__main__":
A = adj_m
#Lists to obtain values for AUC, FPR, TPR and loss
AUC_scores = []
tprs = []
base_fpr = np.linspace(0, 1, 101)
plt.figure(figsize=(5,5))
train_loss = []
test_loss = []
#Binarize data-set if True
binarized = False
link_pred = True
if binarized:
A[A > 0] = 1
A = torch.tensor(A)
for i in range(5):
np.random.seed(i)
torch.manual_seed(i)
#Sample test-set from multinomial distribution.
if link_pred:
A_shape = A.shape
num_samples = 400000
idx_i_test = torch.multinomial(input=torch.arange(0,float(A_shape[0])), num_samples=num_samples,
replacement=True)
idx_j_test = torch.multinomial(input=torch.arange(0, float(A_shape[1])), num_samples=num_samples,
replacement=True)
A_test = A.detach().clone()
A_test[:] = 0
A_test[idx_i_test,idx_j_test] = A[idx_i_test,idx_j_test]
A[idx_i_test,idx_j_test] = 0
#Get the counts (only on train data)
idx = torch.where((A > 0) & (torch.isnan(A) == False))
count = A[idx[0],idx[1]]
| #Define the model with training data.
#Cross-val loop validating 5 seeds;
model = LSM(A=A, input_size=A.shape, latent_dim=2, sparse_i_idx= idx[0], sparse_j_idx=idx[1], count=count, sample_i_size = 1000, sample_j_size = 500)
#Deine the optimizer.
optimizer = optim.Adam(params=model.parameters(), lr=0.01)
cum_loss = []
cum_loss_test = []
#Run iterations.
iterations = 20000
for _ in range(iterations):
loss = -model.log_likelihood()
if link_pred:
loss_test = -model.test_log_likelihood(A_test) / num_samples
cum_loss_test.append(loss_test.item() / (A_shape[0]*A_shape[1] - num_samples))
print('Test loss at the', _, 'iteration:', loss_test.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
cum_loss.append(loss.item())
print('Loss at the',_,'iteration:',loss.item())
train_loss.append(cum_loss)
test_loss.append(cum_loss_test)
#Binary link-prediction enable and disable;
if binarized:
auc_score, fpr, tpr = model.link_prediction(A_test)
AUC_scores.append(auc_score)
plt.plot(fpr, tpr, 'b', alpha=0.15)
tpr = np.interp(base_fpr, fpr, tpr)
tpr[0] = 0.0
tprs.append(tpr)
#beta = model.beta.cpu().data.numpy()
#gamma = model.gamma.cpu().data.numpy()
#latent_zi = model.latent_zi.cpu().data.numpy()
#latent_zj = model.latent_zj.cpu().data.numpy()
#np.savetxt(f"beta_{i}_link_pred_binary.csv", beta, delimiter=",")
#np.savetxt(f"gamma_{i}_link_pred_binary.csv", gamma, delimiter=",")
#np.savetxt(f"latent_zi_{i}_link_pred_binary.csv", latent_zi, delimiter=",")
#np.savetxt(f"latent_zj_{i}_link_pred_binary.csv", latent_zj, delimiter=",")
#np.savetxt(f"fpr_{i}_link_pred_binary.csv", fpr, delimiter=",")
#np.savetxt(f"tpr_{i}_link_pred_binary.csv", tpr, delimiter=",")
#np.savetxt(f"cum_loss_{i}_link_pred_binary.csv", cum_loss, delimiter=",")
#np.savetxt(f"cum_loss_test_{i}_link_pred_binary.csv", cum_loss_test, delimiter=",")
#np.savetxt(f"AUC_{i}_link_pred_binary.csv", AUC_scores, delimiter=",")
#Plotting the average roc curve as a result of the cross-validation
tprs = np.array(tprs)
mean_tprs = tprs.mean(axis=0)
std = tprs.std(axis=0)
#USing standard deviation as error bars
tprs_upper = np.minimum(mean_tprs + std, 1)
tprs_lower = mean_tprs - std
plt.plot(base_fpr, mean_tprs, 'b', label='Mean ROC-curve')
plt.fill_between(base_fpr, tprs_lower, tprs_upper, color='grey', alpha=0.3)
plt.plot([0, 1], [0, 1],'r--', label='Random classifier')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.axes().set_aspect('equal', 'datalim')
plt.grid()
plt.legend()
plt.show()
plt.savefig('Average_ROC_curve.png')
plt.clf()
#Plotting the average loss based on the cross validation
train_loss = np.array(train_loss)
test_loss = np.array(test_loss)
mean_train_loss = train_loss.mean(axis=0)
std_train_loss = train_loss.std(axis=0)
mean_train_loss_upr = mean_train_loss + std_train_loss
mean_train_loss_lwr = mean_train_loss - std_train_loss
mean_test_loss = test_loss.mean(axis=0)
std_test_loss = test_loss.std(axis=0)
mean_test_loss_upr = mean_test_loss + std_test_loss
mean_test_loss_lwr = mean_test_loss - std_test_loss
plt.plot(np.arange(iterations), mean_train_loss, 'b', label='Mean training loss')
plt.fill_between(np.arange(iterations), mean_train_loss_lwr, mean_train_loss_upr, color='b', alpha=0.3)
plt.plot(np.arange(iterations), mean_test_loss, 'r', label='Mean test loss')
plt.fill_between(np.arange(iterations), mean_test_loss_lwr, mean_test_loss_upr, color='r', alpha=0.3)
plt.xlim([0, iterations])
plt.ylabel('Loss')
plt.xlabel('Iterations')
plt.grid()
plt.legend()
plt.show()
plt.savefig('Average_loss.png') | random_line_split | |
torch_MLE_link_pred.py | import torch
import os
from scipy.io import mmread
import torch.optim as optim
import torch.nn as nn
from Adjacency_matrix import Preprocessing
from torch_sparse import spspmm
import pandas as pd
import numpy as np
#Creating dataset
from blobs import *
from sklearn import metrics
os.chdir('Datasets/divorce/')
text_file = 'divorce.mtx'
#Loading data and making adjancency matrix
#raw_data = mmread(text_file)
#A = raw_data.todense()
#A = torch.tensor(A)
class LSM(nn.Module):
def __init__(self, A, input_size, latent_dim, sparse_i_idx, sparse_j_idx, count, sample_i_size, sample_j_size):
super(LSM, self).__init__()
self.A = A
self.input_size = input_size
self.latent_dim = latent_dim
self.beta = torch.nn.Parameter(torch.randn(self.input_size[0]))
self.gamma = torch.nn.Parameter(torch.randn(self.input_size[1]))
self.latent_zi = torch.nn.Parameter(torch.randn(self.input_size[0], self.latent_dim))
self.latent_zj = torch.nn.Parameter(torch.randn(self.input_size[1], self.latent_dim))
#Change sample weights for each partition
self.sampling_i_weights = torch.ones(input_size[0])
self.sampling_j_weights = torch.ones(input_size[1])
#Change sample sizes for each partition
self.sample_i_size = sample_i_size
self.sample_j_size = sample_j_size
self.sparse_i_idx = sparse_i_idx
self.sparse_j_idx = sparse_j_idx
self.count = count
self.z_dist = 0
self.Lambda = 0
def sample_network(self):
# USE torch_sparse lib i.e. : from torch_sparse import spspmm
# sample for bipartite network
sample_i_idx = torch.multinomial(self.sampling_i_weights, self.sample_i_size, replacement=False)
sample_j_idx = torch.multinomial(self.sampling_j_weights, self.sample_j_size, replacement=False)
# translate sampled indices w.r.t. to the full matrix, it is just a diagonal matrix
indices_i_translator = torch.cat([sample_i_idx.unsqueeze(0), sample_i_idx.unsqueeze(0)], 0)
indices_j_translator = torch.cat([sample_j_idx.unsqueeze(0), sample_j_idx.unsqueeze(0)], 0)
# adjacency matrix in edges format
edges = torch.cat([self.sparse_i_idx.unsqueeze(0), self.sparse_j_idx.unsqueeze(0)], 0)
# matrix multiplication B = Adjacency x Indices translator
# see spspmm function, it give a multiplication between two matrices
# indexC is the indices where we have non-zero values and valueC the actual values (in this case ones)
indexC, valueC = spspmm(edges, self.count.float(), indices_j_translator,
torch.ones(indices_j_translator.shape[1]), self.input_size[0], self.input_size[1],
self.input_size[1], coalesced=True)
# second matrix multiplication C = Indices translator x B, indexC returns where we have edges inside the sample
indexC, valueC = spspmm(indices_i_translator, torch.ones(indices_i_translator.shape[1]), indexC, valueC,
self.input_size[0], self.input_size[0], self.input_size[1], coalesced=True)
# edge row position
sparse_i_sample = indexC[0, :]
# edge column position
sparse_j_sample = indexC[1, :]
return sample_i_idx, sample_j_idx, sparse_i_sample, sparse_j_sample, valueC
def | (self):
sample_i_idx, sample_j_idx, sparse_i_sample, sparse_j_sample, valueC = self.sample_network()
self.z_dist = (((torch.unsqueeze(self.latent_zi[sample_i_idx], 1) - self.latent_zj[sample_j_idx]+1e-06)**2).sum(-1))**0.5
bias_matrix = torch.unsqueeze(self.beta[sample_i_idx], 1) + self.gamma[sample_j_idx]
self.Lambda = bias_matrix - self.z_dist
z_dist_links = (((self.latent_zi[sparse_i_sample] - self.latent_zj[sparse_j_sample]+1e-06)**2).sum(-1))**0.5
bias_links = self.beta[sparse_i_sample] + self.gamma[sparse_j_sample]
log_Lambda_links = valueC*(bias_links - z_dist_links)
LL = log_Lambda_links.sum() - torch.sum(torch.exp(self.Lambda))
return LL
def link_prediction(self, A_test):
with torch.no_grad():
#Create indexes for test-set relationships
idx_test = torch.where(torch.isnan(A_test) == False)
#Distance measure (euclidian)
z_pdist_test = (((self.latent_zi[idx_test[0]] - self.latent_zj[idx_test[1]]+1e-06)**2).sum(-1))**0.5
#Add bias matrices
logit_u_test = -z_pdist_test + self.beta[idx_test[0]] + self.gamma[idx_test[1]]
#Get the rate
rate = torch.exp(logit_u_test)
#Create target (make sure its in the right order by indexing)
target = A_test[idx_test[0], idx_test[1]]
fpr, tpr, threshold = metrics.roc_curve(target.cpu().data.numpy(), rate.cpu().data.numpy())
#Determining AUC score and precision and recall
auc_score = metrics.roc_auc_score(target.cpu().data.numpy(), rate.cpu().data.numpy())
return auc_score, fpr, tpr
#Implementing test log likelihood without mini batching
def test_log_likelihood(self, A_test):
with torch.no_grad():
idx_test = torch.where(torch.isnan(A_test) == False)
z_dist = (((self.latent_zi[idx_test[0]] - self.latent_zj[idx_test[1]] + 1e-06)**2).sum(-1))**0.5 #Unsqueeze eller ej?
bias_matrix = self.beta[idx_test[0]] + self.gamma[idx_test[1]]
Lambda = (bias_matrix - z_dist) * A_test[idx_test[0],idx_test[1]]
LL_test = (A_test[idx_test[0],idx_test[1]] * Lambda).sum() - torch.sum(torch.exp(Lambda))
return LL_test
if __name__ == "__main__":
A = adj_m
#Lists to obtain values for AUC, FPR, TPR and loss
AUC_scores = []
tprs = []
base_fpr = np.linspace(0, 1, 101)
plt.figure(figsize=(5,5))
train_loss = []
test_loss = []
#Binarize data-set if True
binarized = False
link_pred = True
if binarized:
A[A > 0] = 1
A = torch.tensor(A)
for i in range(5):
np.random.seed(i)
torch.manual_seed(i)
#Sample test-set from multinomial distribution.
if link_pred:
A_shape = A.shape
num_samples = 400000
idx_i_test = torch.multinomial(input=torch.arange(0,float(A_shape[0])), num_samples=num_samples,
replacement=True)
idx_j_test = torch.multinomial(input=torch.arange(0, float(A_shape[1])), num_samples=num_samples,
replacement=True)
A_test = A.detach().clone()
A_test[:] = 0
A_test[idx_i_test,idx_j_test] = A[idx_i_test,idx_j_test]
A[idx_i_test,idx_j_test] = 0
#Get the counts (only on train data)
idx = torch.where((A > 0) & (torch.isnan(A) == False))
count = A[idx[0],idx[1]]
#Define the model with training data.
#Cross-val loop validating 5 seeds;
model = LSM(A=A, input_size=A.shape, latent_dim=2, sparse_i_idx= idx[0], sparse_j_idx=idx[1], count=count, sample_i_size = 1000, sample_j_size = 500)
#Deine the optimizer.
optimizer = optim.Adam(params=model.parameters(), lr=0.01)
cum_loss = []
cum_loss_test = []
#Run iterations.
iterations = 20000
for _ in range(iterations):
loss = -model.log_likelihood()
if link_pred:
loss_test = -model.test_log_likelihood(A_test) / num_samples
cum_loss_test.append(loss_test.item() / (A_shape[0]*A_shape[1] - num_samples))
print('Test loss at the', _, 'iteration:', loss_test.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
cum_loss.append(loss.item())
print('Loss at the',_,'iteration:',loss.item())
train_loss.append(cum_loss)
test_loss.append(cum_loss_test)
#Binary link-prediction enable and disable;
if binarized:
auc_score, fpr, tpr = model.link_prediction(A_test)
AUC_scores.append(auc_score)
plt.plot(fpr, tpr, 'b', alpha=0.15)
tpr = np.interp(base_fpr, fpr, tpr)
tpr[0] = 0.0
tprs.append(tpr)
#beta = model.beta.cpu().data.numpy()
#gamma = model.gamma.cpu().data.numpy()
#latent_zi = model.latent_zi.cpu().data.numpy()
#latent_zj = model.latent_zj.cpu().data.numpy()
#np.savetxt(f"beta_{i}_link_pred_binary.csv", beta, delimiter=",")
#np.savetxt(f"gamma_{i}_link_pred_binary.csv", gamma, delimiter=",")
#np.savetxt(f"latent_zi_{i}_link_pred_binary.csv", latent_zi, delimiter=",")
#np.savetxt(f"latent_zj_{i}_link_pred_binary.csv", latent_zj, delimiter=",")
#np.savetxt(f"fpr_{i}_link_pred_binary.csv", fpr, delimiter=",")
#np.savetxt(f"tpr_{i}_link_pred_binary.csv", tpr, delimiter=",")
#np.savetxt(f"cum_loss_{i}_link_pred_binary.csv", cum_loss, delimiter=",")
#np.savetxt(f"cum_loss_test_{i}_link_pred_binary.csv", cum_loss_test, delimiter=",")
#np.savetxt(f"AUC_{i}_link_pred_binary.csv", AUC_scores, delimiter=",")
#Plotting the average roc curve as a result of the cross-validation
tprs = np.array(tprs)
mean_tprs = tprs.mean(axis=0)
std = tprs.std(axis=0)
#USing standard deviation as error bars
tprs_upper = np.minimum(mean_tprs + std, 1)
tprs_lower = mean_tprs - std
plt.plot(base_fpr, mean_tprs, 'b', label='Mean ROC-curve')
plt.fill_between(base_fpr, tprs_lower, tprs_upper, color='grey', alpha=0.3)
plt.plot([0, 1], [0, 1],'r--', label='Random classifier')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.axes().set_aspect('equal', 'datalim')
plt.grid()
plt.legend()
plt.show()
plt.savefig('Average_ROC_curve.png')
plt.clf()
#Plotting the average loss based on the cross validation
train_loss = np.array(train_loss)
test_loss = np.array(test_loss)
mean_train_loss = train_loss.mean(axis=0)
std_train_loss = train_loss.std(axis=0)
mean_train_loss_upr = mean_train_loss + std_train_loss
mean_train_loss_lwr = mean_train_loss - std_train_loss
mean_test_loss = test_loss.mean(axis=0)
std_test_loss = test_loss.std(axis=0)
mean_test_loss_upr = mean_test_loss + std_test_loss
mean_test_loss_lwr = mean_test_loss - std_test_loss
plt.plot(np.arange(iterations), mean_train_loss, 'b', label='Mean training loss')
plt.fill_between(np.arange(iterations), mean_train_loss_lwr, mean_train_loss_upr, color='b', alpha=0.3)
plt.plot(np.arange(iterations), mean_test_loss, 'r', label='Mean test loss')
plt.fill_between(np.arange(iterations), mean_test_loss_lwr, mean_test_loss_upr, color='r', alpha=0.3)
plt.xlim([0, iterations])
plt.ylabel('Loss')
plt.xlabel('Iterations')
plt.grid()
plt.legend()
plt.show()
plt.savefig('Average_loss.png') | log_likelihood | identifier_name |
main.py | import os
import sys
from PIL import Image, ImageFont, ImageDraw
from .emoji_directory import INITIAL_UNICODE, UNICODE_TO_PATH
RGB = 'RGB'
RGB_WHITE = (255, 255, 255)
RGB_BLACK = (0, 0, 0)
RGBA = 'RGBA'
RGBA_WHITE = (255, 255, 255, 255)
RGBA_BLACK = (0, 0, 0, 255)
RGBA_TRANSPARENT = (0, 0, 0, 0)
ZERO = 0
NEGATIVE = -1
DEFAULT_FONT_SIZE = 72
DEFAULT_IMAGE_WIDTH = 1080
EMOJI = 4
FULL_WIDTH = 3
HALF_WIDTH = 1
EMOJI_IMG_SIZE = 72
class Emoji2Pic(object):
"""将带有emoji的文本绘制到图片上,返回 'PIL.Image.Image'。
Text with emoji draw to the image.return class 'PIL.Image.Image'
:param text: 文本内容
:param font: 字体文件路径
:param emoji_folder: emoji图片文件夹路径
:param width: 图片宽度(像素)
:param font_size: 文字大小(像素)
:param font_color: 文字颜色
:param color_mode: 图片底色模式
:param background_color: 图片底色
:param line_space: 行间距(像素)
:param left: 左边距(像素) left margins
:param right: 右边距(像素)
:param top: 上边距(像素)
:param bottom: 下边距(像素)
:param half_font: 半角字符字体路径
:param half_font_width: 半角字符字体宽度(像素)
:param half_font_offset: 半角字符纵轴偏移量(像素)
:param emoji_offset: emoji纵轴偏移量(像素)
:param progress_bar: 控制台输出进度条
:return:class 'PIL.Image.Image'
"""
def __init__(self, text, font, emoji_folder,
width=DEFAULT_IMAGE_WIDTH,
font_size=DEFAULT_FONT_SIZE,
font_color=RGB_BLACK,
color_mode=RGB,
background_color=RGB_WHITE,
line_space=DEFAULT_FONT_SIZE,
left=DEFAULT_FONT_SIZE,
right=DEFAULT_FONT_SIZE,
top=DEFAULT_FONT_SIZE,
bottom=ZERO,
half_font=None,
half_font_width=None,
half_font_offset=ZERO,
emoji_offset=ZERO,
progress_bar=True
):
self.text = str(text)
self.font = font
self.emoji_folder = emoji_folder
self.img_width = int(width)
self.font_size = int(font_size)
self.font_color = font_color
self.background_color_mode = color_mode
self.background_color = background_color
self.line_space = int(line_space)
self.margin_left = int(left)
self.margin_right = int(right)
self.margin_top = int(top)
self.margin_bottom = int(bottom)
self.half_font = half_font if half_font is not None else font
self.half_font_width = int(half_font_width) if half_font_width is not None else int(self.font_size / 2)
self.half_font_offset = half_font_offset
self.emoji_offset = int(emoji_offset)
self.need_progress_bar = progress_bar
self.x = ZERO
self.y = ZERO
self.progress_bar_count = ZERO
self.text_length = ZERO
self.paragraph_list = list()
self.img_list = list()
self.img = None
self.paragraph = None
self.char = None
self.char_next = None
self.char_index = None
self.char_kind = None
self.full_width_font_type = ImageFont.truetype(self.font, size=self.font_size)
self.half_font_type = ImageFont.truetype(self.half_font, size=self.font_size)
def split_paragraph(self):
"""
分割段落
Split paragraph
"""
self.paragraph_list = self.text.replace('\n\n', '\n \n').split('\n')
for paragraph in self.paragraph_list:
self.text_length += len(paragraph)
return
def make_blank_img(self, img_width=None, img_height=None):
"""
创建空白图片
Make a blank image
"""
if img_width is None:
img_width = self.img_width
if img_height is None:
img_height = self.font_size + self.line_space
img = Image.new(mode=self.background_color_mode,
size=(img_width, img_height),
color=self.background_color)
return img
def stdout_progress_bar(self):
"""
输出进度条
Progress bar
"""
self.progress_bar_count += 1
display_length = 50
percent_num = int(self.progress_bar_count / self.text_length * 100)
percent_length = int(self.progress_bar_count / self.text_length * display_length)
sys.stdout.write('\r')
sys.stdout.write(
'Drawing | [%s>%s] %s' % ('=' * percent_length,
' ' * (display_length - percent_length),
str(percent_num) + '%'))
sys.stdout.flush()
return
def draw_text(self):
"""
每个字符按坐标绘制
Each character is plotted by coordinates
"""
for paragraph in self.paragraph_list:
self.paragraph = paragraph
self.img = self.make_blank_img()
self.x = self.margin_left
self.y = ZERO
self.char_next = NEGATIVE
for index in range(len(paragraph)):
# 进度条
if self.need_progress_bar is True:
self.stdout_progress_bar()
# 绘制
self.char_index = index
if index >= self.char_next:
self.char = paragraph[index]
char_kind = self.classify_character()
if char_kind == HALF_WIDTH: # 半角字符
self.draw_character(half_width=True)
self.x += self.half_font_width
elif char_kind == FULL_WIDTH: # 全角字符
self.draw_character()
self.x += self.font_size
elif char_kind == EMOJI: # emoji
self.draw_emoji()
self.x += self.font_size
# 换行
if self.x > self.img_width - (
self.margin_right + self.font_size) or index >= len(paragraph) - 1:
self.img_list.append(self.img)
self.img = self.make_blank_img()
self.x = self.margin_left
self.y = ZERO
return
def classify_character(self):
"""字符分类
Character classification
"""
if self.char in INITIAL_UNICODE:
if u'\x2a' <= self.char <= u'\x39' \
and self.paragraph[self.char_index:self.char_index + 3] not in UNICODE_TO_PATH \
and self.paragraph[self.char_index:self.char_index + 2] not in UNICODE_TO_PATH:
return HALF_WIDTH # 半角字符
return EMOJI # emoji
elif u'\x20' <= self.char <= u'\x7e':
return HAL | font_type = self.half_font_type
y = self.y - self.half_font_offset
else:
font_type = self.full_width_font_type
y = self.y
ImageDraw.Draw(self.img).text(xy=(self.x, y),
text=self.char,
fill=self.font_color,
font=font_type)
return
def get_emoji_img(self):
"""
打开emoji图片
Open emoji image
"""
length_list = INITIAL_UNICODE[self.char]
emoji_unicode = None
for length in length_list:
emoji_unicode_temp = self.paragraph[self.char_index:self.char_index + length]
if emoji_unicode_temp in UNICODE_TO_PATH:
emoji_unicode = emoji_unicode_temp
self.char_next = self.char_index + length # 跳过字符
break
if emoji_unicode is None:
self.char_next = NEGATIVE
return None
emoji_file_name = UNICODE_TO_PATH.get(emoji_unicode)
if emoji_file_name is None:
self.char_next = NEGATIVE
return None
emoji_img = Image.open(os.path.join(self.emoji_folder, emoji_file_name))
return emoji_img
def draw_emoji(self):
"""
绘制emoji
Draw emoji
"""
emoji_img = self.get_emoji_img()
if emoji_img is None:
self.x -= self.font_size
return
# 更改尺寸
if self.font_size != EMOJI_IMG_SIZE:
emoji_img = emoji_img.resize((self.font_size, self.font_size), Image.ANTIALIAS)
# 分离通道
if emoji_img.mode == 'RGBA':
r, g, b, a = emoji_img.split() # 分离alpha通道 split alpha channel
elif emoji_img.mode == 'LA':
l, a = emoji_img.split()
else: # image.mode == 'P'
emoji_img = emoji_img.convert('RGBA')
r, g, b, a = emoji_img.split()
# 绘制
self.img.paste(emoji_img, (self.x, self.y + self.emoji_offset), mask=a)
return
def combine_img(self):
"""
合并图片
Merge image
"""
# 创建上边距图片 Create top margin image
img_top = self.make_blank_img(img_width=self.img_width, img_height=self.margin_top)
self.img_list.insert(0, img_top)
# 创建下边距图片 Create bottom margin image
img_bottom = self.make_blank_img(img_width=self.img_width, img_height=self.margin_bottom)
self.img_list.append(img_bottom)
background_height = ZERO
y = ZERO
for img in self.img_list:
background_height += img.size[1]
# 创建背景图片图片 Create background image
background_img = self.make_blank_img(img_width=self.img_width, img_height=background_height)
for img in self.img_list:
if self.background_color_mode == RGB:
background_img.paste(img, (ZERO, y))
y += img.size[1]
elif self.background_color_mode == RGBA:
r, g, b, a = img.split() # 分离alpha通道
background_img.paste(img, (ZERO, y), mask=a)
y += img.size[1]
return background_img
def make_img(self):
"""
Main program
"""
self.split_paragraph()
self.draw_text()
return self.combine_img()
| F_WIDTH # 半角字符
else:
return FULL_WIDTH # 全角字符
def draw_character(self, half_width=False):
"""
绘制文本
Draw character
"""
if self.char in ('\u200d', '\ufe0f', '\u20e3'):
self.x -= self.font_size
return
if half_width is True:
| conditional_block |
main.py | import os
import sys
from PIL import Image, ImageFont, ImageDraw
from .emoji_directory import INITIAL_UNICODE, UNICODE_TO_PATH
RGB = 'RGB'
RGB_WHITE = (255, 255, 255)
RGB_BLACK = (0, 0, 0)
RGBA = 'RGBA'
RGBA_WHITE = (255, 255, 255, 255)
RGBA_BLACK = (0, 0, 0, 255)
RGBA_TRANSPARENT = (0, 0, 0, 0)
ZERO = 0
NEGATIVE = -1
DEFAULT_FONT_SIZE = 72
DEFAULT_IMAGE_WIDTH = 1080
EMOJI = 4
FULL_WIDTH = 3
HALF_WIDTH = 1
EMOJI_IMG_SIZE = 72
class Emoji2Pic(object):
"""将带有emoji的文本绘制到图片上,返回 'PIL.Image.Image'。
Text with emoji draw to the image.return class 'PIL.Image.Image'
:param text: 文本内容
:param font: 字体文件路径
:param emoji_folder: emoji图片文件夹路径
:param width: 图片宽度(像素)
:param font_size: 文字大小(像素)
:param font_color: 文字颜色
:param color_mode: 图片底色模式
:param background_color: 图片底色
:param line_space: 行间距(像素)
:param left: 左边距(像素) left margins
:param right: 右边距(像素)
:param top: 上边距(像素)
:param bottom: 下边距(像素)
:param half_font: 半角字符字体路径
:param half_font_width: 半角字符字体宽度(像素)
:param half_font_offset: 半角字符纵轴偏移量(像素)
:param emoji_offset: emoji纵轴偏移量(像素)
:param progress_bar: 控制台输出进度条
:return:class 'PIL.Image.Image'
"""
def __init__(self, text, font, emoji_folder,
width=DEFAULT_IMAGE_WIDTH,
font_size=DEFAULT_FONT_SIZE,
font_color=RGB_BLACK,
color_mode=RGB,
background_color=RGB_WHITE,
line_space=DEFAULT_FONT_SIZE,
left=DEFAULT_FONT_SIZE,
right=DEFAULT_FONT_SIZE,
top=DEFAULT_FONT_SIZE,
bottom=ZERO,
half_font=None,
half_font_width=None,
half_font_offset=ZERO,
emoji_offset=ZERO,
progress_bar=True
):
self.text = str(text)
self.font = font
self.emoji_folder = emoji_folder
self.img_width = int(width)
self.font_size = int(font_size)
self.font_color = font_color
self.background_color_mode = color_mode
self.background_color = background_color
self.line_space = int(line_space)
self.margin_left = int(left)
self.margin_right = int(right)
self.margin_top = int(top)
self.margin_bottom = int(bottom)
self.half_font = half_font if half_font is not None else font
self.half_font_width = int(half_font_width) if half_font_width is not None else int(self.font_size / 2)
self.half_font_offset = half_font_offset
self.emoji_offset = int(emoji_offset)
self.need_progress_bar = progress_bar
self.x = ZERO
self.y = ZERO
self.progress_bar_count = ZERO
self.text_length = ZERO
self.paragraph_list = list()
self.img_list = list()
self.img = None
self.paragraph = None
self.char = None
self.char_next = None
self.char_index = None
self.char_kind = None
self.full_width_font_type = ImageFont.truetype(self.font, size=self.font_size)
self.half_font_type = ImageFont.truetype(self.half_font, size=self.font_size)
def split_paragraph(self):
"""
分割段落
Split paragraph
"""
self.paragraph_list = self.text.replace('\n\n', '\n \n').split('\n')
for paragraph in self.paragraph_list:
self.text_length += len(paragraph)
return
def make_blank_img(self, img_width=None, img_height=None):
"""
创建空白图片
Make a blank image
"""
if img_width is None:
img_width = self.img_width
if img_height is None:
img_height = self.font_size + self.line_space
img = Image.new(mode=self.background_color_mode,
size=(img_width, img_height),
color=self.background_color)
return img
def stdout_progress_bar(self):
"""
输出进度条
Progress bar
"""
self.progress_bar_count += 1
display_length = 50
percent_num = int(self.progress_bar_count / self.text_length * 100)
percent_length = int(self.progress_bar_count / self.text_length * display_length)
sys.stdout.write('\r')
sys.stdout.write(
'Drawing | [%s>%s] %s' % ('=' * percent_length,
' ' * (display_length - percent_length),
str(percent_num) + '%'))
sys.stdout.flush()
return
def draw_text(self):
"""
每个字符按坐标绘制
Each character is plotted by coordinates
"""
for paragraph in self.paragraph_list:
self.paragraph = paragraph
self.img = self.make_blank_img()
self.x = self.margin_left
self.y = ZERO
self.char_next = NEGATIVE
for index in range(len(paragraph)):
# 进度条
if self.need_progress_bar is True:
self.stdout_progress_bar()
# 绘制
self.char_index = index
if index >= self.char_next:
self.char = paragraph[index]
char_kind = self.classify_character()
if char_kind == HALF_WIDTH: # 半角字符
self.draw_character(half_width=True)
self.x += self.half_font_width
elif char_kind == FULL_WIDTH: # 全角字符
self.draw_character()
self.x += self.font_size
elif char_kind == EMOJI: # emoji
self.draw_emoji()
self.x += self.font_size
# 换行
if self.x > self.img_width - (
self.margin_right + self.font_size) or index >= len(paragraph) - 1:
self.img_list.append(self.img)
self.img = self.make_blank_img()
self.x = self.margin_left
self.y = ZERO
return
def classify_character(self):
"""字符分类
Character classification
"""
if self.char in INITIAL_UNICODE:
if u'\x2a' <= self.char <= u'\x39' \
and self.paragraph[self.char_index:self.char_index + 3] not in UNICODE_TO_PATH \
and self.paragraph[self.char_index:self.char_index + 2] not in UNICODE_TO_PATH:
return HALF_WIDTH # 半角字符
return EMOJI # emoji
elif u'\x20' <= self.char <= u'\x7e':
return HALF_WIDTH # 半角字符
else:
return FULL_WIDTH # 全角字符
def draw_character(self, half_width=False):
"""
绘制文本
Draw character
"""
if self.char in ('\u200d', '\ufe0f', '\u20e3'):
self.x -= self.font_size
return
if half_width is True: | else:
font_type = self.full_width_font_type
y = self.y
ImageDraw.Draw(self.img).text(xy=(self.x, y),
text=self.char,
fill=self.font_color,
font=font_type)
return
def get_emoji_img(self):
"""
打开emoji图片
Open emoji image
"""
length_list = INITIAL_UNICODE[self.char]
emoji_unicode = None
for length in length_list:
emoji_unicode_temp = self.paragraph[self.char_index:self.char_index + length]
if emoji_unicode_temp in UNICODE_TO_PATH:
emoji_unicode = emoji_unicode_temp
self.char_next = self.char_index + length # 跳过字符
break
if emoji_unicode is None:
self.char_next = NEGATIVE
return None
emoji_file_name = UNICODE_TO_PATH.get(emoji_unicode)
if emoji_file_name is None:
self.char_next = NEGATIVE
return None
emoji_img = Image.open(os.path.join(self.emoji_folder, emoji_file_name))
return emoji_img
def draw_emoji(self):
"""
绘制emoji
Draw emoji
"""
emoji_img = self.get_emoji_img()
if emoji_img is None:
self.x -= self.font_size
return
# 更改尺寸
if self.font_size != EMOJI_IMG_SIZE:
emoji_img = emoji_img.resize((self.font_size, self.font_size), Image.ANTIALIAS)
# 分离通道
if emoji_img.mode == 'RGBA':
r, g, b, a = emoji_img.split() # 分离alpha通道 split alpha channel
elif emoji_img.mode == 'LA':
l, a = emoji_img.split()
else: # image.mode == 'P'
emoji_img = emoji_img.convert('RGBA')
r, g, b, a = emoji_img.split()
# 绘制
self.img.paste(emoji_img, (self.x, self.y + self.emoji_offset), mask=a)
return
def combine_img(self):
"""
合并图片
Merge image
"""
# 创建上边距图片 Create top margin image
img_top = self.make_blank_img(img_width=self.img_width, img_height=self.margin_top)
self.img_list.insert(0, img_top)
# 创建下边距图片 Create bottom margin image
img_bottom = self.make_blank_img(img_width=self.img_width, img_height=self.margin_bottom)
self.img_list.append(img_bottom)
background_height = ZERO
y = ZERO
for img in self.img_list:
background_height += img.size[1]
# 创建背景图片图片 Create background image
background_img = self.make_blank_img(img_width=self.img_width, img_height=background_height)
for img in self.img_list:
if self.background_color_mode == RGB:
background_img.paste(img, (ZERO, y))
y += img.size[1]
elif self.background_color_mode == RGBA:
r, g, b, a = img.split() # 分离alpha通道
background_img.paste(img, (ZERO, y), mask=a)
y += img.size[1]
return background_img
def make_img(self):
"""
Main program
"""
self.split_paragraph()
self.draw_text()
return self.combine_img() | font_type = self.half_font_type
y = self.y - self.half_font_offset | random_line_split |
main.py | import os
import sys
from PIL import Image, ImageFont, ImageDraw
from .emoji_directory import INITIAL_UNICODE, UNICODE_TO_PATH
RGB = 'RGB'
RGB_WHITE = (255, 255, 255)
RGB_BLACK = (0, 0, 0)
RGBA = 'RGBA'
RGBA_WHITE = (255, 255, 255, 255)
RGBA_BLACK = (0, 0, 0, 255)
RGBA_TRANSPARENT = (0, 0, 0, 0)
ZERO = 0
NEGATIVE = -1
DEFAULT_FONT_SIZE = 72
DEFAULT_IMAGE_WIDTH = 1080
EMOJI = 4
FULL_WIDTH = 3
HALF_WIDTH = 1
EMOJI_IMG_SIZE = 72
class Emoji2Pic(object):
"""将带有emoji的文本绘制到图片上,返回 'PIL.Image.Image'。
Text with emoji draw to the image.return class 'PIL.Image.Image'
:param text: 文本内容
:param font: 字体文件路径
:param emoji_folder: emoji图片文件夹路径
:param width: 图片宽度(像素)
:param font_size: 文字大小(像素)
:param font_color: 文字颜色
:param color_mode: 图片底色模式
:param background_color: 图片底色
:param line_space: 行间距(像素)
:param left: 左边距(像素) left margins
:param right: 右边距(像素)
:param top: 上边距(像素)
:param bottom: 下边距(像素)
:param half_font: 半角字符字体路径
:param half_font_width: 半角字符字体宽度(像素)
:param half_font_offset: 半角字符纵轴偏移量(像素)
:param emoji_offset: emoji纵轴偏移量(像素)
:param progress_bar: 控制台输出进度条
:return:class 'PIL.Image.Image'
"""
def __init__(self, text, font, emoji_folder,
width=DEFAULT_IMAGE_WIDTH,
font_size=DEFAULT_FONT_SIZE,
font_color=RGB_BLACK,
color_mode=RGB,
background_color=RGB_WHITE,
line_space=DEFAULT_FONT_SIZE,
left=DEFAULT_FONT_SIZE,
right=DEFAULT_FONT_SIZE,
top=DEFAULT_FONT_SIZE,
bottom=ZERO,
half_font=None,
half_font_width=None,
half_font_offset=ZERO,
emoji_offset=ZERO,
progress_bar=True
):
self.text = str(text)
self.font = font
self.emoji_folder = emoji_folder
self.img_width = int(width)
self.font_size = int(font_size)
self.font_color = font_color
self.background_color_mode = color_mode
self.background_color = background_color
self.line_space = int(line_space)
self.margin_left = int(left)
self.margin_right = int(right)
self.margin_top = int(top)
self.margin_bottom = int(bottom)
self.half_font = half_font if half_font is not None else font
self.half_font_width = int(half_font_width) if half_font_width is not None else int(self.font_size / 2)
self.half_font_offset = half_font_offset
self.emoji_offset = int(emoji_offset)
self.need_progress_bar = progress_bar
self.x = ZERO
self.y = ZERO
self.progress_bar_count = ZERO
self.text_length = ZERO
self.paragraph_list = list()
self.img_list = list()
self.img = None
self.paragraph = None
self.char = None
self.char_next = None
self.char_index = None
self.char_kind = None
self.full_width_font_type = ImageFont.truetype(self.font, size=self.font_size)
self.half_font_type = ImageFont.truetype(self.half_font, size=self.font_size)
def split_paragraph(self):
"""
分割段落
Split paragraph
"""
self.paragraph_list = self.text.replace('\n\n', '\n \n').split('\n')
for paragraph in self.paragraph_list:
self.text_length += len(paragraph)
return
def make_blank_img(self, img_width=None, img_height=None):
"""
创建空白图片
Make a blank image
"""
if img_width is None:
img_width = self.img_width
if img_height is None:
img_height = self.font_size + self.line_space
img = Image.new(mode=self.background_color_mode,
size=(img_width, img_height),
color=self.background_color)
return img
def stdout_progress_bar(self):
"""
输出进度条
Progress bar
"""
self.progress_bar_count += 1
display_length = 50
percent_num = int(self.progress_bar_count / self.text_length * 100)
percent_length = int(self.progress_bar_count / self.text_length * display_length)
sys.stdout.write('\r')
sys.stdout.write(
'Drawing | [%s>%s] %s' % ('=' * percent_length,
' ' * (display_length - percent_length),
str(percent_num) + '%'))
sys.stdout.flush()
return
def draw_text(self):
"""
每个字符按坐标绘制
Each character is plotted by coordinates
"""
for paragraph in self.paragraph_list:
self.paragraph = paragraph
self.img = self.make_blank_img()
self.x = self.margin_left
self.y = ZERO
self.char_next = NEGATIVE
for index in range(len(paragraph)):
# 进度条
if self.need_progress_bar is True:
self.stdout_progress_bar()
# 绘制
self.char_index = index
if index >= self.char_next:
self.char = paragraph[index]
char_kind = self.classify_character()
if char_kind == HALF_WIDTH: # 半角字符
self.draw_character(half_width=True)
self.x += self.half_font_width
elif char_kind == FULL_WIDTH: # 全角字符
self.draw_character()
self.x += self.font_size
elif char_kind == EMOJI: # emoji
self.draw_emoji()
self.x += self.font_size
# 换行
if self.x > self.img_width - (
self.margin_right + self.font_size) or index >= len(paragraph) - 1:
self.img_list.append(self.img)
self.img = self.make_blank_img()
self.x = self.margin_left
self.y = ZERO
return
def classify_character(self):
"""字符分类
Character classification
"""
if self.char in INITIAL_UNICODE:
if u'\x2a' <= self.char <= u'\x39' \
and self.paragraph[self.char_index:self.char_index + 3] not in UNICODE_TO_PATH \
and self.paragraph[self.char_index:self.char_index + 2] not in UNICODE_TO_PA | return HALF_WIDTH # 半角字符
return EMOJI # emoji
elif u'\x20' <= self.char <= u'\x7e':
return HALF_WIDTH # 半角字符
else:
return FULL_WIDTH # 全角字符
def draw_character(self, half_width=False):
"""
绘制文本
Draw character
"""
if self.char in ('\u200d', '\ufe0f', '\u20e3'):
self.x -= self.font_size
return
if half_width is True:
font_type = self.half_font_type
y = self.y - self.half_font_offset
else:
font_type = self.full_width_font_type
y = self.y
ImageDraw.Draw(self.img).text(xy=(self.x, y),
text=self.char,
fill=self.font_color,
font=font_type)
return
def get_emoji_img(self):
"""
打开emoji图片
Open emoji image
"""
length_list = INITIAL_UNICODE[self.char]
emoji_unicode = None
for length in length_list:
emoji_unicode_temp = self.paragraph[self.char_index:self.char_index + length]
if emoji_unicode_temp in UNICODE_TO_PATH:
emoji_unicode = emoji_unicode_temp
self.char_next = self.char_index + length # 跳过字符
break
if emoji_unicode is None:
self.char_next = NEGATIVE
return None
emoji_file_name = UNICODE_TO_PATH.get(emoji_unicode)
if emoji_file_name is None:
self.char_next = NEGATIVE
return None
emoji_img = Image.open(os.path.join(self.emoji_folder, emoji_file_name))
return emoji_img
def draw_emoji(self):
"""
绘制emoji
Draw emoji
"""
emoji_img = self.get_emoji_img()
if emoji_img is None:
self.x -= self.font_size
return
# 更改尺寸
if self.font_size != EMOJI_IMG_SIZE:
emoji_img = emoji_img.resize((self.font_size, self.font_size), Image.ANTIALIAS)
# 分离通道
if emoji_img.mode == 'RGBA':
r, g, b, a = emoji_img.split() # 分离alpha通道 split alpha channel
elif emoji_img.mode == 'LA':
l, a = emoji_img.split()
else: # image.mode == 'P'
emoji_img = emoji_img.convert('RGBA')
r, g, b, a = emoji_img.split()
# 绘制
self.img.paste(emoji_img, (self.x, self.y + self.emoji_offset), mask=a)
return
def combine_img(self):
"""
合并图片
Merge image
"""
# 创建上边距图片 Create top margin image
img_top = self.make_blank_img(img_width=self.img_width, img_height=self.margin_top)
self.img_list.insert(0, img_top)
# 创建下边距图片 Create bottom margin image
img_bottom = self.make_blank_img(img_width=self.img_width, img_height=self.margin_bottom)
self.img_list.append(img_bottom)
background_height = ZERO
y = ZERO
for img in self.img_list:
background_height += img.size[1]
# 创建背景图片图片 Create background image
background_img = self.make_blank_img(img_width=self.img_width, img_height=background_height)
for img in self.img_list:
if self.background_color_mode == RGB:
background_img.paste(img, (ZERO, y))
y += img.size[1]
elif self.background_color_mode == RGBA:
r, g, b, a = img.split() # 分离alpha通道
background_img.paste(img, (ZERO, y), mask=a)
y += img.size[1]
return background_img
def make_img(self):
"""
Main program
"""
self.split_paragraph()
self.draw_text()
return self.combine_img()
| TH:
| identifier_name |
main.py | import os
import sys
from PIL import Image, ImageFont, ImageDraw
from .emoji_directory import INITIAL_UNICODE, UNICODE_TO_PATH
RGB = 'RGB'
RGB_WHITE = (255, 255, 255)
RGB_BLACK = (0, 0, 0)
RGBA = 'RGBA'
RGBA_WHITE = (255, 255, 255, 255)
RGBA_BLACK = (0, 0, 0, 255)
RGBA_TRANSPARENT = (0, 0, 0, 0)
ZERO = 0
NEGATIVE = -1
DEFAULT_FONT_SIZE = 72
DEFAULT_IMAGE_WIDTH = 1080
EMOJI = 4
FULL_WIDTH = 3
HALF_WIDTH = 1
EMOJI_IMG_SIZE = 72
class Emoji2Pic(object):
"""将带有emoji的文本绘制到图片上,返回 'PIL.Image.Image'。
Text with emoji draw to the image.return class 'PIL.Image.Image'
:param text: 文本内容
:param font: 字体文件路径
:param emoji_folder: emoji图片文件夹路径
:param width: 图片宽度(像素)
:param font_size: 文字大小(像素)
:param font_color: 文字颜色
:param color_mode: 图片底色模式
:param background_color: 图片底色
:param line_space: 行间距(像素)
:param left: 左边距(像素) left margins
:param right: 右边距(像素)
:param top: 上边距(像素)
:param bottom: 下边距(像素)
:param half_font: 半角字符字体路径
:param half_font_width: 半角字符字体宽度(像素)
:param half_font_offset: 半角字符纵轴偏移量(像素)
:param emoji_offset: emoji纵轴偏移量(像素)
:param progress_bar: 控制台输出进度条
:return:class 'PIL.Image.Image'
"""
def __init__(self, text, font, emoji_folder,
width=DEFAULT_IMAGE_WIDTH,
font_size=DEFAULT_FONT_SIZE,
font_color=RGB_BLACK,
color_mode=RGB,
background_color=RGB_WHITE,
line_space=DEFAULT_FONT_SIZE,
left=DEFAULT_FONT_SIZE,
right=DEFAULT_FONT_SIZE,
top=DEFAULT_FONT_SIZE,
bottom=ZERO,
half_font=None,
half_font_width=None,
half_font_offset=ZERO,
emoji_offset=ZERO,
progress_bar=True
):
self.text = str(text)
self.font = font
self.emoji_folder = emoji_folder
self.img_width = int(width)
self.font_size = int(font_size)
self.font_color = font_color
self.background_color_mode = color_mode
self.background_color = background_color
self.line_space = int(line_space)
self.margin_left = int(left)
self.margin_right = int(right)
self.margin_top = int(top)
self.margin_bottom = int(bottom)
self.half_font = half_font if half_font is not None else font
self.half_font_width = int(half_font_width) if half_font_width is not None else int(self.font_size / 2)
self.half_font_offset = half_font_offset
self.emoji_offset = int(emoji_offset)
self.need_progress_bar = progress_bar
self.x = ZERO
self.y = ZERO
self.progress_bar_count = ZERO
self.text_length = ZERO
self.paragraph_list = list()
self.img_list = list()
self.img = None
self.paragraph = None
self.char = None
self.char_next = None
self.char_index = None
self.char_kind = None
self.full_width_font_type = ImageFont.truetype(self.font, size=self.font_size)
self.half_font_type = ImageFont.truetype(self.half_font, size=self.font_size)
def split_paragraph(self):
"""
分割段落
Split paragraph
"""
self.paragraph_list = self.text.replace('\n\n', '\n \n').split('\n')
for paragraph in self.paragraph_list:
self.text_length += len(paragraph)
return
def make_blank_img(self, img_width=None, img_height=N | mage.new(mode=self.background_color_mode,
size=(img_width, img_height),
color=self.background_color)
return img
def stdout_progress_bar(self):
"""
输出进度条
Progress bar
"""
self.progress_bar_count += 1
display_length = 50
percent_num = int(self.progress_bar_count / self.text_length * 100)
percent_length = int(self.progress_bar_count / self.text_length * display_length)
sys.stdout.write('\r')
sys.stdout.write(
'Drawing | [%s>%s] %s' % ('=' * percent_length,
' ' * (display_length - percent_length),
str(percent_num) + '%'))
sys.stdout.flush()
return
def draw_text(self):
"""
每个字符按坐标绘制
Each character is plotted by coordinates
"""
for paragraph in self.paragraph_list:
self.paragraph = paragraph
self.img = self.make_blank_img()
self.x = self.margin_left
self.y = ZERO
self.char_next = NEGATIVE
for index in range(len(paragraph)):
# 进度条
if self.need_progress_bar is True:
self.stdout_progress_bar()
# 绘制
self.char_index = index
if index >= self.char_next:
self.char = paragraph[index]
char_kind = self.classify_character()
if char_kind == HALF_WIDTH: # 半角字符
self.draw_character(half_width=True)
self.x += self.half_font_width
elif char_kind == FULL_WIDTH: # 全角字符
self.draw_character()
self.x += self.font_size
elif char_kind == EMOJI: # emoji
self.draw_emoji()
self.x += self.font_size
# 换行
if self.x > self.img_width - (
self.margin_right + self.font_size) or index >= len(paragraph) - 1:
self.img_list.append(self.img)
self.img = self.make_blank_img()
self.x = self.margin_left
self.y = ZERO
return
def classify_character(self):
"""字符分类
Character classification
"""
if self.char in INITIAL_UNICODE:
if u'\x2a' <= self.char <= u'\x39' \
and self.paragraph[self.char_index:self.char_index + 3] not in UNICODE_TO_PATH \
and self.paragraph[self.char_index:self.char_index + 2] not in UNICODE_TO_PATH:
return HALF_WIDTH # 半角字符
return EMOJI # emoji
elif u'\x20' <= self.char <= u'\x7e':
return HALF_WIDTH # 半角字符
else:
return FULL_WIDTH # 全角字符
def draw_character(self, half_width=False):
"""
绘制文本
Draw character
"""
if self.char in ('\u200d', '\ufe0f', '\u20e3'):
self.x -= self.font_size
return
if half_width is True:
font_type = self.half_font_type
y = self.y - self.half_font_offset
else:
font_type = self.full_width_font_type
y = self.y
ImageDraw.Draw(self.img).text(xy=(self.x, y),
text=self.char,
fill=self.font_color,
font=font_type)
return
def get_emoji_img(self):
"""
打开emoji图片
Open emoji image
"""
length_list = INITIAL_UNICODE[self.char]
emoji_unicode = None
for length in length_list:
emoji_unicode_temp = self.paragraph[self.char_index:self.char_index + length]
if emoji_unicode_temp in UNICODE_TO_PATH:
emoji_unicode = emoji_unicode_temp
self.char_next = self.char_index + length # 跳过字符
break
if emoji_unicode is None:
self.char_next = NEGATIVE
return None
emoji_file_name = UNICODE_TO_PATH.get(emoji_unicode)
if emoji_file_name is None:
self.char_next = NEGATIVE
return None
emoji_img = Image.open(os.path.join(self.emoji_folder, emoji_file_name))
return emoji_img
def draw_emoji(self):
"""
绘制emoji
Draw emoji
"""
emoji_img = self.get_emoji_img()
if emoji_img is None:
self.x -= self.font_size
return
# 更改尺寸
if self.font_size != EMOJI_IMG_SIZE:
emoji_img = emoji_img.resize((self.font_size, self.font_size), Image.ANTIALIAS)
# 分离通道
if emoji_img.mode == 'RGBA':
r, g, b, a = emoji_img.split() # 分离alpha通道 split alpha channel
elif emoji_img.mode == 'LA':
l, a = emoji_img.split()
else: # image.mode == 'P'
emoji_img = emoji_img.convert('RGBA')
r, g, b, a = emoji_img.split()
# 绘制
self.img.paste(emoji_img, (self.x, self.y + self.emoji_offset), mask=a)
return
def combine_img(self):
"""
合并图片
Merge image
"""
# 创建上边距图片 Create top margin image
img_top = self.make_blank_img(img_width=self.img_width, img_height=self.margin_top)
self.img_list.insert(0, img_top)
# 创建下边距图片 Create bottom margin image
img_bottom = self.make_blank_img(img_width=self.img_width, img_height=self.margin_bottom)
self.img_list.append(img_bottom)
background_height = ZERO
y = ZERO
for img in self.img_list:
background_height += img.size[1]
# 创建背景图片图片 Create background image
background_img = self.make_blank_img(img_width=self.img_width, img_height=background_height)
for img in self.img_list:
if self.background_color_mode == RGB:
background_img.paste(img, (ZERO, y))
y += img.size[1]
elif self.background_color_mode == RGBA:
r, g, b, a = img.split() # 分离alpha通道
background_img.paste(img, (ZERO, y), mask=a)
y += img.size[1]
return background_img
def make_img(self):
"""
Main program
"""
self.split_paragraph()
self.draw_text()
return self.combine_img()
| one):
"""
创建空白图片
Make a blank image
"""
if img_width is None:
img_width = self.img_width
if img_height is None:
img_height = self.font_size + self.line_space
img = I | identifier_body |
smallnorb_input_record.py | """Input utility functions for reading small norb dataset.
Handles reading from small norb dataset saved in binary original format. Scales and
normalizes the images as the preprocessing step. It can distort the images by
random cropping and contrast adjusting.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
def _read_input(filename_queue):
"""Reads a single record and converts it to a tensor.
Each record consists the 3x32x32 image with one byte for the label.
Args:
filename_queue: A queue of strings with the filenames to read from.
Returns:
image: a [32, 32, 3] float32 Tensor with the image data.
label: an int32 Tensor with the label in the range 0..9.
"""
label_bytes = 1
height = 32
depth = 3
image_bytes = height * height * depth
record_bytes = label_bytes + image_bytes
reader = tf.compat.v1.FixedLengthRecordReader(record_bytes=record_bytes)
_, byte_data = reader.read(filename_queue)
uint_data = tf.io.decode_raw(byte_data, tf.uint8)
label = tf.cast(tf.strided_slice(uint_data, [0], [label_bytes]), tf.int32)
label.set_shape([1])
depth_major = tf.reshape(
tf.strided_slice(uint_data, [label_bytes], [record_bytes]),
[depth, height, height])
image = tf.cast(tf.transpose(a=depth_major, perm=[1, 2, 0]), tf.float32)
return image, label
def _distort_resize(image, image_size):
"""Distorts input images for CIFAR training.
Adds standard distortions such as flipping, cropping and changing brightness
and contrast.
Args:
image: A float32 tensor with last dimmension equal to 3.
image_size: The output image size after cropping.
Returns:
distorted_image: A float32 tensor with shape [image_size, image_size, 3].
"""
distorted_image = tf.image.random_crop(image, [image_size, image_size, 3])
distorted_image = tf.image.random_flip_left_right(distorted_image)
distorted_image = tf.image.random_brightness(distorted_image, max_delta=63)
distorted_image = tf.image.random_contrast(
distorted_image, lower=0.2, upper=1.8)
distorted_image.set_shape([image_size, image_size, 3])
return distorted_image
def _batch_features(image, label, batch_size, split, image_size):
"""Constructs the batched feature dictionary.
Batches the images and labels accourding to the split. Shuffles the data only
if split is train. Formats the feature dictionary to be in the format required
by experiment.py.
Args:
image: A float32 tensor with shape [image_size, image_size, 3].
label: An int32 tensor with the label of the image.
batch_size: The number of data points in the output batch.
split: 'train' or 'test'.
image_size: The size of the input image.
Returns:
batched_features: A dictionary of the input data features.
"""
image = tf.transpose(a=image, perm=[2, 0, 1])
features = {
'images': image,
'labels': tf.one_hot(label, 5),
'recons_image': image,
'recons_label': label,
}
if split == 'train':
batched_features = tf.compat.v1.train.shuffle_batch(
features,
batch_size=batch_size,
num_threads=16,
capacity=10000 + 3 * batch_size,
min_after_dequeue=10000)
else:
batched_features = tf.compat.v1.train.batch(
features,
batch_size=batch_size,
num_threads=1,
capacity=10000 + 3 * batch_size)
batched_features['labels'] = tf.reshape(batched_features['labels'],
[batch_size, 5])
batched_features['recons_label'] = tf.reshape(
batched_features['recons_label'], [batch_size])
batched_features['height'] = image_size
batched_features['width'] = image_size
batched_features['depth'] = 3
batched_features['num_targets'] = 1
batched_features['num_classes'] = 5
return batched_features
import os
def _parser(serialized_example):
"""Parse smallNORB example from tfrecord.
Args:
serialized_example: serialized example from tfrecord
Returns:
img: image
lab: label
cat:
category
the instance in the category (0 to 9)
elv:
elevation
the elevation (0 to 8, which mean cameras are 30,
35,40,45,50,55,60,65,70 degrees from the horizontal respectively)
azi:
azimuth
the azimuth (0,2,4,...,34, multiply by 10 to get the azimuth in
degrees)
lit:
lighting
the lighting condition (0 to 5)
"""
features = tf.compat.v1.parse_single_example(
serialized_example,
features={
'img_raw': tf.compat.v1.FixedLenFeature([], tf.string),
'label': tf.compat.v1.FixedLenFeature([], tf.int64),
'category': tf.compat.v1.FixedLenFeature([], tf.int64),
'elevation': tf.compat.v1.FixedLenFeature([], tf.int64),
'azimuth': tf.compat.v1.FixedLenFeature([], tf.int64),
'lighting': tf.compat.v1.FixedLenFeature([], tf.int64),
})
img = tf.compat.v1.decode_raw(features['img_raw'], tf.float64)
img = tf.reshape(img, [96, 96, 1])
img = tf.cast(img, tf.float32) # * (1. / 255) # left unnormalized
lab = tf.cast(features['label'], tf.int32)
cat = tf.cast(features['category'], tf.int32)
elv = tf.cast(features['elevation'], tf.int32)
azi = tf.cast(features['azimuth'], tf.int32)
lit = tf.cast(features['lighting'], tf.int32)
return img, lab, cat, elv, azi, lit
def _train_preprocess(img, lab, cat, elv, azi, lit):
"""Preprocessing for training.
Preprocessing from Hinton et al. (2018) "Matrix capsules with EM routing."
Hinton2018: "We downsample smallNORB to 48 × 48 pixels and normalize each
image to have zero mean and unit variance. During training, we randomly crop
32 × 32 patches and add random brightness and contrast to the cropped images.
During test, we crop a 32 × 32 patch from the center of the image and
achieve..."
Args:
img: this fn only works on the image
lab, cat, elv, azi, lit: allow these to pass through
Returns:
img: image processed
lab, cat, elv, azi, lit: allow these to pass through
"""
img = img / 255.
img = tf.compat.v1.image.resize_images(img, [48, 48])
#img = tf.image.per_image_standardization(img)
img = tf.compat.v1.random_crop(img, [32, 32, 1])
img = tf.image.random_brightness(img, max_delta=32. / 255.)
# original 0.5, 1.5
img = tf.image.random_contrast(img, lower=0.5, upper=1.5)
# Original
# image = tf.image.random_brightness(image, max_delta=32. / 255.)
# image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
# image = tf.image.resize_images(image, [48, 48])
# image = tf.random_crop(image, [32, 32, 1])
return img, lab, cat, elv, azi, lit
| Hinton2018: "We downsample smallNORB to 48 × 48 pixels and normalize each
image to have zero mean and unit variance. During training, we randomly crop
32 × 32 patches and add random brightness and contrast to the cropped
images. During test, we crop a 32 × 32 patch from the center of the image
and achieve..."
Args:
img: this fn only works on the image
lab, cat, elv, azi, lit: allow these to pass through
Returns:
img: image processed
lab, cat, elv, azi, lit: allow these to pass through
"""
img = img / 255.
img = tf.compat.v1.image.resize_images(img, [48, 48])
#img = tf.image.per_image_standardization(img)
img = tf.slice(img, [8, 8, 0], [32, 32, 1])
# Original
# image = tf.image.resize_images(image, [48, 48])
# image = tf.slice(image, [8, 8, 0], [32, 32, 1])
return img, lab, cat, elv, azi, lit
def input_fn(path, is_train: bool, batch_size = 64, epochs=100):
"""Input pipeline for smallNORB using tf.data.
Author:
Ashley Gritzman 15/11/2018
Args:
is_train:
Returns:
dataset: image tf.data.Dataset
"""
import re
if is_train:
CHUNK_RE = re.compile(r"train.*\.tfrecords")
else:
CHUNK_RE = re.compile(r"test.*\.tfrecords")
chunk_files = [os.path.join(path, fname)
for fname in os.listdir(path)
if CHUNK_RE.match(fname)]
# 1. create the dataset
dataset = tf.data.TFRecordDataset(chunk_files)
# 2. map with the actual work (preprocessing, augmentation…) using multiple
# parallel calls
dataset = dataset.map(_parser, num_parallel_calls=4)
if is_train:
dataset = dataset.map(_train_preprocess,
num_parallel_calls=4)
else:
dataset = dataset.map(_val_preprocess,
num_parallel_calls=4)
# 3. shuffle (with a big enough buffer size)
# In response to a question on OpenReview, Hinton et al. wrote the
# following:
# https://openreview.net/forum?id=HJWLfGWRb¬eId=rJgxonoNnm
# "We did not have any special ordering of training batches and we random
# shuffle. In terms of TF batch:
# capacity=2000 + 3 * batch_size, ensures a minimum amount of shuffling of
# examples. min_after_dequeue=2000."
capacity = 2000 + 3 * batch_size
dataset = dataset.shuffle(buffer_size=capacity)
# 4. batch
dataset = dataset.batch(batch_size, drop_remainder=True)
# 5. repeat
dataset = dataset.repeat(count=epochs)
# 6. prefetch
dataset = dataset.prefetch(1)
return dataset
def create_inputs_norb(path, is_train: bool,batch_size,epochs):
"""Get a batch from the input pipeline.
Author:
Ashley Gritzman 15/11/2018
Args:
is_train:
Returns:
img, lab, cat, elv, azi, lit:
"""
# Create batched dataset
dataset = input_fn(path, is_train,batch_size=batch_size, epochs=epochs)
# Create one-shot iterator
iterator = tf.compat.v1.data.make_one_shot_iterator(dataset)
img, lab, cat, elv, azi, lit = iterator.get_next()
output_dict = {'image': img,
'label': lab,
'category': cat,
'elevation': elv,
'azimuth': azi,
'lighting': lit}
return output_dict
def inputs(data_dir,
batch_size,
split,
epochs=50):
dict = create_inputs_norb(data_dir, split == "train",batch_size=batch_size, epochs=epochs)
batched_features={}
batched_features['height'] = 32
batched_features['width'] = 32
batched_features['depth'] = 1
batched_features['num_targets'] = 1
batched_features['num_classes'] = 5
batched_features['recons_image'] = dict['image']
batched_features['recons_label'] = dict['label']
batched_features['images'] = dict['image']
batched_features['labels'] = tf.one_hot(dict['label'], 5)
return batched_features |
def _val_preprocess(img, lab, cat, elv, azi, lit):
"""Preprocessing for validation/testing.
Preprocessing from Hinton et al. (2018) "Matrix capsules with EM routing." | random_line_split |
smallnorb_input_record.py | """Input utility functions for reading small norb dataset.
Handles reading from small norb dataset saved in binary original format. Scales and
normalizes the images as the preprocessing step. It can distort the images by
random cropping and contrast adjusting.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
def _read_input(filename_queue):
"""Reads a single record and converts it to a tensor.
Each record consists the 3x32x32 image with one byte for the label.
Args:
filename_queue: A queue of strings with the filenames to read from.
Returns:
image: a [32, 32, 3] float32 Tensor with the image data.
label: an int32 Tensor with the label in the range 0..9.
"""
label_bytes = 1
height = 32
depth = 3
image_bytes = height * height * depth
record_bytes = label_bytes + image_bytes
reader = tf.compat.v1.FixedLengthRecordReader(record_bytes=record_bytes)
_, byte_data = reader.read(filename_queue)
uint_data = tf.io.decode_raw(byte_data, tf.uint8)
label = tf.cast(tf.strided_slice(uint_data, [0], [label_bytes]), tf.int32)
label.set_shape([1])
depth_major = tf.reshape(
tf.strided_slice(uint_data, [label_bytes], [record_bytes]),
[depth, height, height])
image = tf.cast(tf.transpose(a=depth_major, perm=[1, 2, 0]), tf.float32)
return image, label
def _distort_resize(image, image_size):
"""Distorts input images for CIFAR training.
Adds standard distortions such as flipping, cropping and changing brightness
and contrast.
Args:
image: A float32 tensor with last dimmension equal to 3.
image_size: The output image size after cropping.
Returns:
distorted_image: A float32 tensor with shape [image_size, image_size, 3].
"""
distorted_image = tf.image.random_crop(image, [image_size, image_size, 3])
distorted_image = tf.image.random_flip_left_right(distorted_image)
distorted_image = tf.image.random_brightness(distorted_image, max_delta=63)
distorted_image = tf.image.random_contrast(
distorted_image, lower=0.2, upper=1.8)
distorted_image.set_shape([image_size, image_size, 3])
return distorted_image
def _batch_features(image, label, batch_size, split, image_size):
"""Constructs the batched feature dictionary.
Batches the images and labels accourding to the split. Shuffles the data only
if split is train. Formats the feature dictionary to be in the format required
by experiment.py.
Args:
image: A float32 tensor with shape [image_size, image_size, 3].
label: An int32 tensor with the label of the image.
batch_size: The number of data points in the output batch.
split: 'train' or 'test'.
image_size: The size of the input image.
Returns:
batched_features: A dictionary of the input data features.
"""
image = tf.transpose(a=image, perm=[2, 0, 1])
features = {
'images': image,
'labels': tf.one_hot(label, 5),
'recons_image': image,
'recons_label': label,
}
if split == 'train':
batched_features = tf.compat.v1.train.shuffle_batch(
features,
batch_size=batch_size,
num_threads=16,
capacity=10000 + 3 * batch_size,
min_after_dequeue=10000)
else:
|
batched_features['labels'] = tf.reshape(batched_features['labels'],
[batch_size, 5])
batched_features['recons_label'] = tf.reshape(
batched_features['recons_label'], [batch_size])
batched_features['height'] = image_size
batched_features['width'] = image_size
batched_features['depth'] = 3
batched_features['num_targets'] = 1
batched_features['num_classes'] = 5
return batched_features
import os
def _parser(serialized_example):
"""Parse smallNORB example from tfrecord.
Args:
serialized_example: serialized example from tfrecord
Returns:
img: image
lab: label
cat:
category
the instance in the category (0 to 9)
elv:
elevation
the elevation (0 to 8, which mean cameras are 30,
35,40,45,50,55,60,65,70 degrees from the horizontal respectively)
azi:
azimuth
the azimuth (0,2,4,...,34, multiply by 10 to get the azimuth in
degrees)
lit:
lighting
the lighting condition (0 to 5)
"""
features = tf.compat.v1.parse_single_example(
serialized_example,
features={
'img_raw': tf.compat.v1.FixedLenFeature([], tf.string),
'label': tf.compat.v1.FixedLenFeature([], tf.int64),
'category': tf.compat.v1.FixedLenFeature([], tf.int64),
'elevation': tf.compat.v1.FixedLenFeature([], tf.int64),
'azimuth': tf.compat.v1.FixedLenFeature([], tf.int64),
'lighting': tf.compat.v1.FixedLenFeature([], tf.int64),
})
img = tf.compat.v1.decode_raw(features['img_raw'], tf.float64)
img = tf.reshape(img, [96, 96, 1])
img = tf.cast(img, tf.float32) # * (1. / 255) # left unnormalized
lab = tf.cast(features['label'], tf.int32)
cat = tf.cast(features['category'], tf.int32)
elv = tf.cast(features['elevation'], tf.int32)
azi = tf.cast(features['azimuth'], tf.int32)
lit = tf.cast(features['lighting'], tf.int32)
return img, lab, cat, elv, azi, lit
def _train_preprocess(img, lab, cat, elv, azi, lit):
"""Preprocessing for training.
Preprocessing from Hinton et al. (2018) "Matrix capsules with EM routing."
Hinton2018: "We downsample smallNORB to 48 × 48 pixels and normalize each
image to have zero mean and unit variance. During training, we randomly crop
32 × 32 patches and add random brightness and contrast to the cropped images.
During test, we crop a 32 × 32 patch from the center of the image and
achieve..."
Args:
img: this fn only works on the image
lab, cat, elv, azi, lit: allow these to pass through
Returns:
img: image processed
lab, cat, elv, azi, lit: allow these to pass through
"""
img = img / 255.
img = tf.compat.v1.image.resize_images(img, [48, 48])
#img = tf.image.per_image_standardization(img)
img = tf.compat.v1.random_crop(img, [32, 32, 1])
img = tf.image.random_brightness(img, max_delta=32. / 255.)
# original 0.5, 1.5
img = tf.image.random_contrast(img, lower=0.5, upper=1.5)
# Original
# image = tf.image.random_brightness(image, max_delta=32. / 255.)
# image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
# image = tf.image.resize_images(image, [48, 48])
# image = tf.random_crop(image, [32, 32, 1])
return img, lab, cat, elv, azi, lit
def _val_preprocess(img, lab, cat, elv, azi, lit):
"""Preprocessing for validation/testing.
Preprocessing from Hinton et al. (2018) "Matrix capsules with EM routing."
Hinton2018: "We downsample smallNORB to 48 × 48 pixels and normalize each
image to have zero mean and unit variance. During training, we randomly crop
32 × 32 patches and add random brightness and contrast to the cropped
images. During test, we crop a 32 × 32 patch from the center of the image
and achieve..."
Args:
img: this fn only works on the image
lab, cat, elv, azi, lit: allow these to pass through
Returns:
img: image processed
lab, cat, elv, azi, lit: allow these to pass through
"""
img = img / 255.
img = tf.compat.v1.image.resize_images(img, [48, 48])
#img = tf.image.per_image_standardization(img)
img = tf.slice(img, [8, 8, 0], [32, 32, 1])
# Original
# image = tf.image.resize_images(image, [48, 48])
# image = tf.slice(image, [8, 8, 0], [32, 32, 1])
return img, lab, cat, elv, azi, lit
def input_fn(path, is_train: bool, batch_size = 64, epochs=100):
"""Input pipeline for smallNORB using tf.data.
Author:
Ashley Gritzman 15/11/2018
Args:
is_train:
Returns:
dataset: image tf.data.Dataset
"""
import re
if is_train:
CHUNK_RE = re.compile(r"train.*\.tfrecords")
else:
CHUNK_RE = re.compile(r"test.*\.tfrecords")
chunk_files = [os.path.join(path, fname)
for fname in os.listdir(path)
if CHUNK_RE.match(fname)]
# 1. create the dataset
dataset = tf.data.TFRecordDataset(chunk_files)
# 2. map with the actual work (preprocessing, augmentation…) using multiple
# parallel calls
dataset = dataset.map(_parser, num_parallel_calls=4)
if is_train:
dataset = dataset.map(_train_preprocess,
num_parallel_calls=4)
else:
dataset = dataset.map(_val_preprocess,
num_parallel_calls=4)
# 3. shuffle (with a big enough buffer size)
# In response to a question on OpenReview, Hinton et al. wrote the
# following:
# https://openreview.net/forum?id=HJWLfGWRb¬eId=rJgxonoNnm
# "We did not have any special ordering of training batches and we random
# shuffle. In terms of TF batch:
# capacity=2000 + 3 * batch_size, ensures a minimum amount of shuffling of
# examples. min_after_dequeue=2000."
capacity = 2000 + 3 * batch_size
dataset = dataset.shuffle(buffer_size=capacity)
# 4. batch
dataset = dataset.batch(batch_size, drop_remainder=True)
# 5. repeat
dataset = dataset.repeat(count=epochs)
# 6. prefetch
dataset = dataset.prefetch(1)
return dataset
def create_inputs_norb(path, is_train: bool,batch_size,epochs):
"""Get a batch from the input pipeline.
Author:
Ashley Gritzman 15/11/2018
Args:
is_train:
Returns:
img, lab, cat, elv, azi, lit:
"""
# Create batched dataset
dataset = input_fn(path, is_train,batch_size=batch_size, epochs=epochs)
# Create one-shot iterator
iterator = tf.compat.v1.data.make_one_shot_iterator(dataset)
img, lab, cat, elv, azi, lit = iterator.get_next()
output_dict = {'image': img,
'label': lab,
'category': cat,
'elevation': elv,
'azimuth': azi,
'lighting': lit}
return output_dict
def inputs(data_dir,
batch_size,
split,
epochs=50):
dict = create_inputs_norb(data_dir, split == "train",batch_size=batch_size, epochs=epochs)
batched_features={}
batched_features['height'] = 32
batched_features['width'] = 32
batched_features['depth'] = 1
batched_features['num_targets'] = 1
batched_features['num_classes'] = 5
batched_features['recons_image'] = dict['image']
batched_features['recons_label'] = dict['label']
batched_features['images'] = dict['image']
batched_features['labels'] = tf.one_hot(dict['label'], 5)
return batched_features | batched_features = tf.compat.v1.train.batch(
features,
batch_size=batch_size,
num_threads=1,
capacity=10000 + 3 * batch_size) | conditional_block |
smallnorb_input_record.py | """Input utility functions for reading small norb dataset.
Handles reading from small norb dataset saved in binary original format. Scales and
normalizes the images as the preprocessing step. It can distort the images by
random cropping and contrast adjusting.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
def _read_input(filename_queue):
"""Reads a single record and converts it to a tensor.
Each record consists the 3x32x32 image with one byte for the label.
Args:
filename_queue: A queue of strings with the filenames to read from.
Returns:
image: a [32, 32, 3] float32 Tensor with the image data.
label: an int32 Tensor with the label in the range 0..9.
"""
label_bytes = 1
height = 32
depth = 3
image_bytes = height * height * depth
record_bytes = label_bytes + image_bytes
reader = tf.compat.v1.FixedLengthRecordReader(record_bytes=record_bytes)
_, byte_data = reader.read(filename_queue)
uint_data = tf.io.decode_raw(byte_data, tf.uint8)
label = tf.cast(tf.strided_slice(uint_data, [0], [label_bytes]), tf.int32)
label.set_shape([1])
depth_major = tf.reshape(
tf.strided_slice(uint_data, [label_bytes], [record_bytes]),
[depth, height, height])
image = tf.cast(tf.transpose(a=depth_major, perm=[1, 2, 0]), tf.float32)
return image, label
def _distort_resize(image, image_size):
"""Distorts input images for CIFAR training.
Adds standard distortions such as flipping, cropping and changing brightness
and contrast.
Args:
image: A float32 tensor with last dimmension equal to 3.
image_size: The output image size after cropping.
Returns:
distorted_image: A float32 tensor with shape [image_size, image_size, 3].
"""
distorted_image = tf.image.random_crop(image, [image_size, image_size, 3])
distorted_image = tf.image.random_flip_left_right(distorted_image)
distorted_image = tf.image.random_brightness(distorted_image, max_delta=63)
distorted_image = tf.image.random_contrast(
distorted_image, lower=0.2, upper=1.8)
distorted_image.set_shape([image_size, image_size, 3])
return distorted_image
def _batch_features(image, label, batch_size, split, image_size):
"""Constructs the batched feature dictionary.
Batches the images and labels accourding to the split. Shuffles the data only
if split is train. Formats the feature dictionary to be in the format required
by experiment.py.
Args:
image: A float32 tensor with shape [image_size, image_size, 3].
label: An int32 tensor with the label of the image.
batch_size: The number of data points in the output batch.
split: 'train' or 'test'.
image_size: The size of the input image.
Returns:
batched_features: A dictionary of the input data features.
"""
image = tf.transpose(a=image, perm=[2, 0, 1])
features = {
'images': image,
'labels': tf.one_hot(label, 5),
'recons_image': image,
'recons_label': label,
}
if split == 'train':
batched_features = tf.compat.v1.train.shuffle_batch(
features,
batch_size=batch_size,
num_threads=16,
capacity=10000 + 3 * batch_size,
min_after_dequeue=10000)
else:
batched_features = tf.compat.v1.train.batch(
features,
batch_size=batch_size,
num_threads=1,
capacity=10000 + 3 * batch_size)
batched_features['labels'] = tf.reshape(batched_features['labels'],
[batch_size, 5])
batched_features['recons_label'] = tf.reshape(
batched_features['recons_label'], [batch_size])
batched_features['height'] = image_size
batched_features['width'] = image_size
batched_features['depth'] = 3
batched_features['num_targets'] = 1
batched_features['num_classes'] = 5
return batched_features
import os
def _parser(serialized_example):
"""Parse smallNORB example from tfrecord.
Args:
serialized_example: serialized example from tfrecord
Returns:
img: image
lab: label
cat:
category
the instance in the category (0 to 9)
elv:
elevation
the elevation (0 to 8, which mean cameras are 30,
35,40,45,50,55,60,65,70 degrees from the horizontal respectively)
azi:
azimuth
the azimuth (0,2,4,...,34, multiply by 10 to get the azimuth in
degrees)
lit:
lighting
the lighting condition (0 to 5)
"""
features = tf.compat.v1.parse_single_example(
serialized_example,
features={
'img_raw': tf.compat.v1.FixedLenFeature([], tf.string),
'label': tf.compat.v1.FixedLenFeature([], tf.int64),
'category': tf.compat.v1.FixedLenFeature([], tf.int64),
'elevation': tf.compat.v1.FixedLenFeature([], tf.int64),
'azimuth': tf.compat.v1.FixedLenFeature([], tf.int64),
'lighting': tf.compat.v1.FixedLenFeature([], tf.int64),
})
img = tf.compat.v1.decode_raw(features['img_raw'], tf.float64)
img = tf.reshape(img, [96, 96, 1])
img = tf.cast(img, tf.float32) # * (1. / 255) # left unnormalized
lab = tf.cast(features['label'], tf.int32)
cat = tf.cast(features['category'], tf.int32)
elv = tf.cast(features['elevation'], tf.int32)
azi = tf.cast(features['azimuth'], tf.int32)
lit = tf.cast(features['lighting'], tf.int32)
return img, lab, cat, elv, azi, lit
def _train_preprocess(img, lab, cat, elv, azi, lit):
"""Preprocessing for training.
Preprocessing from Hinton et al. (2018) "Matrix capsules with EM routing."
Hinton2018: "We downsample smallNORB to 48 × 48 pixels and normalize each
image to have zero mean and unit variance. During training, we randomly crop
32 × 32 patches and add random brightness and contrast to the cropped images.
During test, we crop a 32 × 32 patch from the center of the image and
achieve..."
Args:
img: this fn only works on the image
lab, cat, elv, azi, lit: allow these to pass through
Returns:
img: image processed
lab, cat, elv, azi, lit: allow these to pass through
"""
img = img / 255.
img = tf.compat.v1.image.resize_images(img, [48, 48])
#img = tf.image.per_image_standardization(img)
img = tf.compat.v1.random_crop(img, [32, 32, 1])
img = tf.image.random_brightness(img, max_delta=32. / 255.)
# original 0.5, 1.5
img = tf.image.random_contrast(img, lower=0.5, upper=1.5)
# Original
# image = tf.image.random_brightness(image, max_delta=32. / 255.)
# image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
# image = tf.image.resize_images(image, [48, 48])
# image = tf.random_crop(image, [32, 32, 1])
return img, lab, cat, elv, azi, lit
def _val_preprocess(img, lab, cat, elv, azi, lit):
"""Preprocessing for validation/testing.
Preprocessing from Hinton et al. (2018) "Matrix capsules with EM routing."
Hinton2018: "We downsample smallNORB to 48 × 48 pixels and normalize each
image to have zero mean and unit variance. During training, we randomly crop
32 × 32 patches and add random brightness and contrast to the cropped
images. During test, we crop a 32 × 32 patch from the center of the image
and achieve..."
Args:
img: this fn only works on the image
lab, cat, elv, azi, lit: allow these to pass through
Returns:
img: image processed
lab, cat, elv, azi, lit: allow these to pass through
"""
img = img / 255.
img = tf.compat.v1.image.resize_images(img, [48, 48])
#img = tf.image.per_image_standardization(img)
img = tf.slice(img, [8, 8, 0], [32, 32, 1])
# Original
# image = tf.image.resize_images(image, [48, 48])
# image = tf.slice(image, [8, 8, 0], [32, 32, 1])
return img, lab, cat, elv, azi, lit
def input_fn(path, is_train: bool, batch_size = 64, epochs=100):
"""Input pipeline for smallNORB using tf.data.
Author:
Ashley Gritzman 15/11/2018
Args:
is_train:
Returns:
dataset: image tf.data.Dataset
"""
import re
if is_train:
CHUNK_RE = re.compile(r"train.*\.tfrecords")
else:
CHUNK_RE = re.compile(r"test.*\.tfrecords")
chunk_files = [os.path.join(path, fname)
for fname in os.listdir(path)
if CHUNK_RE.match(fname)]
# 1. create the dataset
dataset = tf.data.TFRecordDataset(chunk_files)
# 2. map with the actual work (preprocessing, augmentation…) using multiple
# parallel calls
dataset = dataset.map(_parser, num_parallel_calls=4)
if is_train:
dataset = dataset.map(_train_preprocess,
num_parallel_calls=4)
else:
dataset = dataset.map(_val_preprocess,
num_parallel_calls=4)
# 3. shuffle (with a big enough buffer size)
# In response to a question on OpenReview, Hinton et al. wrote the
# following:
# https://openreview.net/forum?id=HJWLfGWRb¬eId=rJgxonoNnm
# "We did not have any special ordering of training batches and we random
# shuffle. In terms of TF batch:
# capacity=2000 + 3 * batch_size, ensures a minimum amount of shuffling of
# examples. min_after_dequeue=2000."
capacity = 2000 + 3 * batch_size
dataset = dataset.shuffle(buffer_size=capacity)
# 4. batch
dataset = dataset.batch(batch_size, drop_remainder=True)
# 5. repeat
dataset = dataset.repeat(count=epochs)
# 6. prefetch
dataset = dataset.prefetch(1)
return dataset
def create_inputs_norb(path, is_train: bool,batch_size,epochs):
"""Get a | nputs(data_dir,
batch_size,
split,
epochs=50):
dict = create_inputs_norb(data_dir, split == "train",batch_size=batch_size, epochs=epochs)
batched_features={}
batched_features['height'] = 32
batched_features['width'] = 32
batched_features['depth'] = 1
batched_features['num_targets'] = 1
batched_features['num_classes'] = 5
batched_features['recons_image'] = dict['image']
batched_features['recons_label'] = dict['label']
batched_features['images'] = dict['image']
batched_features['labels'] = tf.one_hot(dict['label'], 5)
return batched_features | batch from the input pipeline.
Author:
Ashley Gritzman 15/11/2018
Args:
is_train:
Returns:
img, lab, cat, elv, azi, lit:
"""
# Create batched dataset
dataset = input_fn(path, is_train,batch_size=batch_size, epochs=epochs)
# Create one-shot iterator
iterator = tf.compat.v1.data.make_one_shot_iterator(dataset)
img, lab, cat, elv, azi, lit = iterator.get_next()
output_dict = {'image': img,
'label': lab,
'category': cat,
'elevation': elv,
'azimuth': azi,
'lighting': lit}
return output_dict
def i | identifier_body |
smallnorb_input_record.py | """Input utility functions for reading small norb dataset.
Handles reading from small norb dataset saved in binary original format. Scales and
normalizes the images as the preprocessing step. It can distort the images by
random cropping and contrast adjusting.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
def _read_input(filename_queue):
"""Reads a single record and converts it to a tensor.
Each record consists the 3x32x32 image with one byte for the label.
Args:
filename_queue: A queue of strings with the filenames to read from.
Returns:
image: a [32, 32, 3] float32 Tensor with the image data.
label: an int32 Tensor with the label in the range 0..9.
"""
label_bytes = 1
height = 32
depth = 3
image_bytes = height * height * depth
record_bytes = label_bytes + image_bytes
reader = tf.compat.v1.FixedLengthRecordReader(record_bytes=record_bytes)
_, byte_data = reader.read(filename_queue)
uint_data = tf.io.decode_raw(byte_data, tf.uint8)
label = tf.cast(tf.strided_slice(uint_data, [0], [label_bytes]), tf.int32)
label.set_shape([1])
depth_major = tf.reshape(
tf.strided_slice(uint_data, [label_bytes], [record_bytes]),
[depth, height, height])
image = tf.cast(tf.transpose(a=depth_major, perm=[1, 2, 0]), tf.float32)
return image, label
def _distort_resize(image, image_size):
"""Distorts input images for CIFAR training.
Adds standard distortions such as flipping, cropping and changing brightness
and contrast.
Args:
image: A float32 tensor with last dimmension equal to 3.
image_size: The output image size after cropping.
Returns:
distorted_image: A float32 tensor with shape [image_size, image_size, 3].
"""
distorted_image = tf.image.random_crop(image, [image_size, image_size, 3])
distorted_image = tf.image.random_flip_left_right(distorted_image)
distorted_image = tf.image.random_brightness(distorted_image, max_delta=63)
distorted_image = tf.image.random_contrast(
distorted_image, lower=0.2, upper=1.8)
distorted_image.set_shape([image_size, image_size, 3])
return distorted_image
def _batch_features(image, label, batch_size, split, image_size):
"""Constructs the batched feature dictionary.
Batches the images and labels accourding to the split. Shuffles the data only
if split is train. Formats the feature dictionary to be in the format required
by experiment.py.
Args:
image: A float32 tensor with shape [image_size, image_size, 3].
label: An int32 tensor with the label of the image.
batch_size: The number of data points in the output batch.
split: 'train' or 'test'.
image_size: The size of the input image.
Returns:
batched_features: A dictionary of the input data features.
"""
image = tf.transpose(a=image, perm=[2, 0, 1])
features = {
'images': image,
'labels': tf.one_hot(label, 5),
'recons_image': image,
'recons_label': label,
}
if split == 'train':
batched_features = tf.compat.v1.train.shuffle_batch(
features,
batch_size=batch_size,
num_threads=16,
capacity=10000 + 3 * batch_size,
min_after_dequeue=10000)
else:
batched_features = tf.compat.v1.train.batch(
features,
batch_size=batch_size,
num_threads=1,
capacity=10000 + 3 * batch_size)
batched_features['labels'] = tf.reshape(batched_features['labels'],
[batch_size, 5])
batched_features['recons_label'] = tf.reshape(
batched_features['recons_label'], [batch_size])
batched_features['height'] = image_size
batched_features['width'] = image_size
batched_features['depth'] = 3
batched_features['num_targets'] = 1
batched_features['num_classes'] = 5
return batched_features
import os
def _parser(serialized_example):
"""Parse smallNORB example from tfrecord.
Args:
serialized_example: serialized example from tfrecord
Returns:
img: image
lab: label
cat:
category
the instance in the category (0 to 9)
elv:
elevation
the elevation (0 to 8, which mean cameras are 30,
35,40,45,50,55,60,65,70 degrees from the horizontal respectively)
azi:
azimuth
the azimuth (0,2,4,...,34, multiply by 10 to get the azimuth in
degrees)
lit:
lighting
the lighting condition (0 to 5)
"""
features = tf.compat.v1.parse_single_example(
serialized_example,
features={
'img_raw': tf.compat.v1.FixedLenFeature([], tf.string),
'label': tf.compat.v1.FixedLenFeature([], tf.int64),
'category': tf.compat.v1.FixedLenFeature([], tf.int64),
'elevation': tf.compat.v1.FixedLenFeature([], tf.int64),
'azimuth': tf.compat.v1.FixedLenFeature([], tf.int64),
'lighting': tf.compat.v1.FixedLenFeature([], tf.int64),
})
img = tf.compat.v1.decode_raw(features['img_raw'], tf.float64)
img = tf.reshape(img, [96, 96, 1])
img = tf.cast(img, tf.float32) # * (1. / 255) # left unnormalized
lab = tf.cast(features['label'], tf.int32)
cat = tf.cast(features['category'], tf.int32)
elv = tf.cast(features['elevation'], tf.int32)
azi = tf.cast(features['azimuth'], tf.int32)
lit = tf.cast(features['lighting'], tf.int32)
return img, lab, cat, elv, azi, lit
def | (img, lab, cat, elv, azi, lit):
"""Preprocessing for training.
Preprocessing from Hinton et al. (2018) "Matrix capsules with EM routing."
Hinton2018: "We downsample smallNORB to 48 × 48 pixels and normalize each
image to have zero mean and unit variance. During training, we randomly crop
32 × 32 patches and add random brightness and contrast to the cropped images.
During test, we crop a 32 × 32 patch from the center of the image and
achieve..."
Args:
img: this fn only works on the image
lab, cat, elv, azi, lit: allow these to pass through
Returns:
img: image processed
lab, cat, elv, azi, lit: allow these to pass through
"""
img = img / 255.
img = tf.compat.v1.image.resize_images(img, [48, 48])
#img = tf.image.per_image_standardization(img)
img = tf.compat.v1.random_crop(img, [32, 32, 1])
img = tf.image.random_brightness(img, max_delta=32. / 255.)
# original 0.5, 1.5
img = tf.image.random_contrast(img, lower=0.5, upper=1.5)
# Original
# image = tf.image.random_brightness(image, max_delta=32. / 255.)
# image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
# image = tf.image.resize_images(image, [48, 48])
# image = tf.random_crop(image, [32, 32, 1])
return img, lab, cat, elv, azi, lit
def _val_preprocess(img, lab, cat, elv, azi, lit):
"""Preprocessing for validation/testing.
Preprocessing from Hinton et al. (2018) "Matrix capsules with EM routing."
Hinton2018: "We downsample smallNORB to 48 × 48 pixels and normalize each
image to have zero mean and unit variance. During training, we randomly crop
32 × 32 patches and add random brightness and contrast to the cropped
images. During test, we crop a 32 × 32 patch from the center of the image
and achieve..."
Args:
img: this fn only works on the image
lab, cat, elv, azi, lit: allow these to pass through
Returns:
img: image processed
lab, cat, elv, azi, lit: allow these to pass through
"""
img = img / 255.
img = tf.compat.v1.image.resize_images(img, [48, 48])
#img = tf.image.per_image_standardization(img)
img = tf.slice(img, [8, 8, 0], [32, 32, 1])
# Original
# image = tf.image.resize_images(image, [48, 48])
# image = tf.slice(image, [8, 8, 0], [32, 32, 1])
return img, lab, cat, elv, azi, lit
def input_fn(path, is_train: bool, batch_size = 64, epochs=100):
"""Input pipeline for smallNORB using tf.data.
Author:
Ashley Gritzman 15/11/2018
Args:
is_train:
Returns:
dataset: image tf.data.Dataset
"""
import re
if is_train:
CHUNK_RE = re.compile(r"train.*\.tfrecords")
else:
CHUNK_RE = re.compile(r"test.*\.tfrecords")
chunk_files = [os.path.join(path, fname)
for fname in os.listdir(path)
if CHUNK_RE.match(fname)]
# 1. create the dataset
dataset = tf.data.TFRecordDataset(chunk_files)
# 2. map with the actual work (preprocessing, augmentation…) using multiple
# parallel calls
dataset = dataset.map(_parser, num_parallel_calls=4)
if is_train:
dataset = dataset.map(_train_preprocess,
num_parallel_calls=4)
else:
dataset = dataset.map(_val_preprocess,
num_parallel_calls=4)
# 3. shuffle (with a big enough buffer size)
# In response to a question on OpenReview, Hinton et al. wrote the
# following:
# https://openreview.net/forum?id=HJWLfGWRb¬eId=rJgxonoNnm
# "We did not have any special ordering of training batches and we random
# shuffle. In terms of TF batch:
# capacity=2000 + 3 * batch_size, ensures a minimum amount of shuffling of
# examples. min_after_dequeue=2000."
capacity = 2000 + 3 * batch_size
dataset = dataset.shuffle(buffer_size=capacity)
# 4. batch
dataset = dataset.batch(batch_size, drop_remainder=True)
# 5. repeat
dataset = dataset.repeat(count=epochs)
# 6. prefetch
dataset = dataset.prefetch(1)
return dataset
def create_inputs_norb(path, is_train: bool,batch_size,epochs):
"""Get a batch from the input pipeline.
Author:
Ashley Gritzman 15/11/2018
Args:
is_train:
Returns:
img, lab, cat, elv, azi, lit:
"""
# Create batched dataset
dataset = input_fn(path, is_train,batch_size=batch_size, epochs=epochs)
# Create one-shot iterator
iterator = tf.compat.v1.data.make_one_shot_iterator(dataset)
img, lab, cat, elv, azi, lit = iterator.get_next()
output_dict = {'image': img,
'label': lab,
'category': cat,
'elevation': elv,
'azimuth': azi,
'lighting': lit}
return output_dict
def inputs(data_dir,
batch_size,
split,
epochs=50):
dict = create_inputs_norb(data_dir, split == "train",batch_size=batch_size, epochs=epochs)
batched_features={}
batched_features['height'] = 32
batched_features['width'] = 32
batched_features['depth'] = 1
batched_features['num_targets'] = 1
batched_features['num_classes'] = 5
batched_features['recons_image'] = dict['image']
batched_features['recons_label'] = dict['label']
batched_features['images'] = dict['image']
batched_features['labels'] = tf.one_hot(dict['label'], 5)
return batched_features | _train_preprocess | identifier_name |
game.rs | use crate::abtest::ABTestMode;
use crate::debug::DebugMode;
use crate::edit::EditMode;
use crate::helpers::ID;
use crate::mission::MissionEditMode;
use crate::render::DrawOptions;
use crate::sandbox::SandboxMode;
use crate::tutorial::TutorialMode;
use crate::ui::{EditorState, Flags, ShowEverything, UI};
use abstutil::elapsed_seconds;
use ezgui::{hotkey, Canvas, EventCtx, EventLoopMode, GfxCtx, Key, UserInput, Wizard, GUI};
use geom::{Duration, Line, Pt2D, Speed};
use map_model::Map;
use rand::seq::SliceRandom;
use rand::Rng;
use rand_xorshift::XorShiftRng;
use std::path::PathBuf;
use std::time::Instant;
// This is the top-level of the GUI logic. This module should just manage interactions between the
// top-level game states.
pub struct GameState {
pub mode: Mode,
pub ui: UI,
}
// TODO Need to reset_sim() when entering Edit, Tutorial, Mission, or ABTest and when leaving
// Tutorial and ABTest. Expressing this manually right now is quite tedious; maybe having on_enter
// and on_exit would be cleaner.
pub enum Mode {
SplashScreen(Wizard, Option<(Screensaver, XorShiftRng)>),
Edit(EditMode),
Tutorial(TutorialMode),
Sandbox(SandboxMode),
Debug(DebugMode),
Mission(MissionEditMode),
ABTest(ABTestMode),
}
impl GameState {
pub fn new(flags: Flags, ctx: &mut EventCtx) -> GameState {
let splash = !flags.no_splash
&& !format!("{}", flags.sim_flags.load.display()).contains("data/save");
let mut rng = flags.sim_flags.make_rng();
let mut game = GameState {
mode: Mode::Sandbox(SandboxMode::new(ctx)),
ui: UI::new(flags, ctx),
};
let rand_focus_pt = game
.ui
.primary
.map
.all_buildings()
.choose(&mut rng)
.and_then(|b| ID::Building(b.id).canonical_point(&game.ui.primary))
.or_else(|| {
game.ui
.primary
.map
.all_lanes()
.choose(&mut rng)
.and_then(|l| ID::Lane(l.id).canonical_point(&game.ui.primary))
})
.expect("Can't get canonical_point of a random building or lane");
if splash {
ctx.canvas.center_on_map_pt(rand_focus_pt);
game.mode = Mode::SplashScreen(
Wizard::new(),
Some((
Screensaver::start_bounce(&mut rng, ctx.canvas, &game.ui.primary.map),
rng,
)),
);
} else {
match abstutil::read_json::<EditorState>("../editor_state.json") {
Ok(ref loaded) if game.ui.primary.map.get_name() == &loaded.map_name => {
println!("Loaded previous editor_state.json");
ctx.canvas.cam_x = loaded.cam_x;
ctx.canvas.cam_y = loaded.cam_y;
ctx.canvas.cam_zoom = loaded.cam_zoom;
}
_ => {
println!("Couldn't load editor_state.json or it's for a different map, so just focusing on an arbitrary building");
ctx.canvas.center_on_map_pt(rand_focus_pt);
}
}
}
game
}
fn save_editor_state(&self, canvas: &Canvas) {
let state = EditorState {
map_name: self.ui.primary.map.get_name().clone(),
cam_x: canvas.cam_x,
cam_y: canvas.cam_y,
cam_zoom: canvas.cam_zoom,
};
// TODO maybe make state line up with the map, so loading from a new map doesn't break
abstutil::write_json("../editor_state.json", &state)
.expect("Saving editor_state.json failed");
println!("Saved editor_state.json");
}
}
impl GUI for GameState {
fn event(&mut self, ctx: &mut EventCtx) -> EventLoopMode {
match self.mode {
Mode::SplashScreen(ref mut wizard, ref mut maybe_screensaver) => {
let anim = maybe_screensaver.is_some();
if let Some((ref mut screensaver, ref mut rng)) = maybe_screensaver {
screensaver.update(rng, ctx.input, ctx.canvas, &self.ui.primary.map);
}
if let Some(new_mode) = splash_screen(wizard, ctx, &mut self.ui, maybe_screensaver)
{
self.mode = new_mode;
} else if wizard.aborted() {
self.before_quit(ctx.canvas);
std::process::exit(0);
}
if anim {
EventLoopMode::Animation
} else {
EventLoopMode::InputOnly
}
}
Mode::Edit(_) => EditMode::event(self, ctx),
Mode::Tutorial(_) => TutorialMode::event(self, ctx),
Mode::Sandbox(_) => SandboxMode::event(self, ctx),
Mode::Debug(_) => DebugMode::event(self, ctx),
Mode::Mission(_) => MissionEditMode::event(self, ctx),
Mode::ABTest(_) => ABTestMode::event(self, ctx),
}
}
fn draw(&self, g: &mut GfxCtx) {
match self.mode {
Mode::SplashScreen(ref wizard, _) => {
self.ui.draw(
g,
DrawOptions::new(),
&self.ui.primary.sim,
&ShowEverything::new(),
);
wizard.draw(g);
}
Mode::Edit(_) => EditMode::draw(self, g),
Mode::Tutorial(_) => TutorialMode::draw(self, g),
Mode::Sandbox(_) => SandboxMode::draw(self, g),
Mode::Debug(_) => DebugMode::draw(self, g),
Mode::Mission(_) => MissionEditMode::draw(self, g),
Mode::ABTest(_) => ABTestMode::draw(self, g),
}
/*println!(
"{} uploads, {} draw calls",
g.get_num_uploads(),
g.num_draw_calls
);*/
}
fn | (&self, canvas: &Canvas) {
println!(
"********************************************************************************"
);
println!("UI broke! Primary sim:");
self.ui.primary.sim.dump_before_abort();
if let Mode::ABTest(ref abtest) = self.mode {
if let Some(ref s) = abtest.secondary {
println!("Secondary sim:");
s.sim.dump_before_abort();
}
}
self.save_editor_state(canvas);
}
fn before_quit(&self, canvas: &Canvas) {
self.save_editor_state(canvas);
self.ui.cs.save();
println!("Saved color_scheme.json");
}
fn profiling_enabled(&self) -> bool {
self.ui.primary.current_flags.enable_profiler
}
}
const SPEED: Speed = Speed::const_meters_per_second(20.0);
pub struct Screensaver {
line: Line,
started: Instant,
}
impl Screensaver {
fn start_bounce(rng: &mut XorShiftRng, canvas: &mut Canvas, map: &Map) -> Screensaver {
let at = canvas.center_to_map_pt();
let bounds = map.get_bounds();
// TODO Ideally bounce off the edge of the map
let goto = Pt2D::new(
rng.gen_range(0.0, bounds.max_x),
rng.gen_range(0.0, bounds.max_y),
);
canvas.cam_zoom = 10.0;
canvas.center_on_map_pt(at);
Screensaver {
line: Line::new(at, goto),
started: Instant::now(),
}
}
fn update(
&mut self,
rng: &mut XorShiftRng,
input: &mut UserInput,
canvas: &mut Canvas,
map: &Map,
) {
if input.nonblocking_is_update_event() {
input.use_update_event();
let dist_along = Duration::seconds(elapsed_seconds(self.started)) * SPEED;
if dist_along < self.line.length() {
canvas.center_on_map_pt(self.line.dist_along(dist_along));
} else {
*self = Screensaver::start_bounce(rng, canvas, map)
}
}
}
}
fn splash_screen(
raw_wizard: &mut Wizard,
ctx: &mut EventCtx,
ui: &mut UI,
maybe_screensaver: &mut Option<(Screensaver, XorShiftRng)>,
) -> Option<Mode> {
let mut wizard = raw_wizard.wrap(ctx);
let sandbox = "Sandbox mode";
let load_map = "Load another map";
let edit = "Edit map";
let tutorial = "Tutorial";
let debug = "Debug mode";
let mission = "Mission Edit Mode";
let abtest = "A/B Test Mode";
let about = "About";
let quit = "Quit";
// Loop because we might go from About -> top-level menu repeatedly, and recursion is scary.
loop {
// TODO No hotkey for quit because it's just the normal menu escape?
match wizard
.choose_string_hotkeys(
"Welcome to A/B Street!",
vec![
(hotkey(Key::S), sandbox),
(hotkey(Key::L), load_map),
(hotkey(Key::E), edit),
(hotkey(Key::T), tutorial),
(hotkey(Key::D), debug),
(hotkey(Key::M), mission),
(hotkey(Key::A), abtest),
(None, about),
(None, quit),
],
)?
.as_str()
{
x if x == sandbox => break Some(Mode::Sandbox(SandboxMode::new(ctx))),
x if x == load_map => {
let current_map = ui.primary.map.get_name().to_string();
if let Some((name, _)) = wizard.choose_something_no_keys::<String>(
"Load which map?",
Box::new(move || {
abstutil::list_all_objects("maps", "")
.into_iter()
.filter(|(n, _)| n != ¤t_map)
.collect()
}),
) {
// This retains no state, but that's probably fine.
let mut flags = ui.primary.current_flags.clone();
flags.sim_flags.load = PathBuf::from(format!("../data/maps/{}.bin", name));
*ui = UI::new(flags, ctx);
break Some(Mode::Sandbox(SandboxMode::new(ctx)));
} else if wizard.aborted() {
break Some(Mode::SplashScreen(Wizard::new(), maybe_screensaver.take()));
} else {
break None;
}
}
x if x == edit => break Some(Mode::Edit(EditMode::new(ctx, ui))),
x if x == tutorial => break Some(Mode::Tutorial(TutorialMode::new(ctx, ui))),
x if x == debug => break Some(Mode::Debug(DebugMode::new(ctx, ui))),
x if x == mission => break Some(Mode::Mission(MissionEditMode::new(ctx, ui))),
x if x == abtest => break Some(Mode::ABTest(ABTestMode::new(ctx, ui))),
x if x == about => {
if wizard.acknowledge(
"About A/B Street",
vec![
"Author: Dustin Carlino (dabreegster@gmail.com)",
"http://github.com/dabreegster/abstreet",
"Map data from OpenStreetMap and King County GIS",
"",
"Press ENTER to continue",
],
) {
continue;
} else {
break None;
}
}
x if x == quit => {
// Not important to call before_quit... if we're here, we're bouncing around
// aimlessly anyway
std::process::exit(0);
}
_ => unreachable!(),
}
}
}
| dump_before_abort | identifier_name |
game.rs | use crate::abtest::ABTestMode;
use crate::debug::DebugMode;
use crate::edit::EditMode;
use crate::helpers::ID;
use crate::mission::MissionEditMode;
use crate::render::DrawOptions;
use crate::sandbox::SandboxMode;
use crate::tutorial::TutorialMode;
use crate::ui::{EditorState, Flags, ShowEverything, UI};
use abstutil::elapsed_seconds;
use ezgui::{hotkey, Canvas, EventCtx, EventLoopMode, GfxCtx, Key, UserInput, Wizard, GUI};
use geom::{Duration, Line, Pt2D, Speed};
use map_model::Map;
use rand::seq::SliceRandom;
use rand::Rng;
use rand_xorshift::XorShiftRng;
use std::path::PathBuf;
use std::time::Instant;
// This is the top-level of the GUI logic. This module should just manage interactions between the
// top-level game states.
pub struct GameState {
pub mode: Mode,
pub ui: UI,
}
// TODO Need to reset_sim() when entering Edit, Tutorial, Mission, or ABTest and when leaving
// Tutorial and ABTest. Expressing this manually right now is quite tedious; maybe having on_enter
// and on_exit would be cleaner.
pub enum Mode {
SplashScreen(Wizard, Option<(Screensaver, XorShiftRng)>),
Edit(EditMode),
Tutorial(TutorialMode),
Sandbox(SandboxMode),
Debug(DebugMode),
Mission(MissionEditMode),
ABTest(ABTestMode),
}
impl GameState {
pub fn new(flags: Flags, ctx: &mut EventCtx) -> GameState {
let splash = !flags.no_splash
&& !format!("{}", flags.sim_flags.load.display()).contains("data/save");
let mut rng = flags.sim_flags.make_rng();
let mut game = GameState {
mode: Mode::Sandbox(SandboxMode::new(ctx)),
ui: UI::new(flags, ctx),
};
let rand_focus_pt = game
.ui
.primary
.map
.all_buildings()
.choose(&mut rng)
.and_then(|b| ID::Building(b.id).canonical_point(&game.ui.primary))
.or_else(|| {
game.ui
.primary
.map
.all_lanes()
.choose(&mut rng)
.and_then(|l| ID::Lane(l.id).canonical_point(&game.ui.primary))
})
.expect("Can't get canonical_point of a random building or lane");
if splash {
ctx.canvas.center_on_map_pt(rand_focus_pt);
game.mode = Mode::SplashScreen(
Wizard::new(),
Some((
Screensaver::start_bounce(&mut rng, ctx.canvas, &game.ui.primary.map),
rng,
)),
);
} else {
match abstutil::read_json::<EditorState>("../editor_state.json") {
Ok(ref loaded) if game.ui.primary.map.get_name() == &loaded.map_name => {
println!("Loaded previous editor_state.json");
ctx.canvas.cam_x = loaded.cam_x;
ctx.canvas.cam_y = loaded.cam_y;
ctx.canvas.cam_zoom = loaded.cam_zoom;
}
_ => {
println!("Couldn't load editor_state.json or it's for a different map, so just focusing on an arbitrary building");
ctx.canvas.center_on_map_pt(rand_focus_pt);
}
}
}
game
}
fn save_editor_state(&self, canvas: &Canvas) {
let state = EditorState {
map_name: self.ui.primary.map.get_name().clone(),
cam_x: canvas.cam_x,
cam_y: canvas.cam_y,
cam_zoom: canvas.cam_zoom,
};
// TODO maybe make state line up with the map, so loading from a new map doesn't break
abstutil::write_json("../editor_state.json", &state)
.expect("Saving editor_state.json failed");
println!("Saved editor_state.json");
}
}
impl GUI for GameState {
fn event(&mut self, ctx: &mut EventCtx) -> EventLoopMode {
match self.mode {
Mode::SplashScreen(ref mut wizard, ref mut maybe_screensaver) => {
let anim = maybe_screensaver.is_some();
if let Some((ref mut screensaver, ref mut rng)) = maybe_screensaver {
screensaver.update(rng, ctx.input, ctx.canvas, &self.ui.primary.map);
}
if let Some(new_mode) = splash_screen(wizard, ctx, &mut self.ui, maybe_screensaver)
{
self.mode = new_mode;
} else if wizard.aborted() {
self.before_quit(ctx.canvas);
std::process::exit(0);
}
if anim {
EventLoopMode::Animation
} else {
EventLoopMode::InputOnly
}
}
Mode::Edit(_) => EditMode::event(self, ctx),
Mode::Tutorial(_) => TutorialMode::event(self, ctx),
Mode::Sandbox(_) => SandboxMode::event(self, ctx),
Mode::Debug(_) => DebugMode::event(self, ctx),
Mode::Mission(_) => MissionEditMode::event(self, ctx),
Mode::ABTest(_) => ABTestMode::event(self, ctx),
}
}
fn draw(&self, g: &mut GfxCtx) {
match self.mode {
Mode::SplashScreen(ref wizard, _) => {
self.ui.draw(
g,
DrawOptions::new(),
&self.ui.primary.sim,
&ShowEverything::new(),
);
wizard.draw(g);
}
Mode::Edit(_) => EditMode::draw(self, g),
Mode::Tutorial(_) => TutorialMode::draw(self, g),
Mode::Sandbox(_) => SandboxMode::draw(self, g),
Mode::Debug(_) => DebugMode::draw(self, g),
Mode::Mission(_) => MissionEditMode::draw(self, g),
Mode::ABTest(_) => ABTestMode::draw(self, g),
}
/*println!(
"{} uploads, {} draw calls",
g.get_num_uploads(),
g.num_draw_calls
);*/
}
fn dump_before_abort(&self, canvas: &Canvas) {
println!(
"********************************************************************************"
);
println!("UI broke! Primary sim:");
self.ui.primary.sim.dump_before_abort();
if let Mode::ABTest(ref abtest) = self.mode {
if let Some(ref s) = abtest.secondary {
println!("Secondary sim:");
s.sim.dump_before_abort();
}
}
self.save_editor_state(canvas);
}
fn before_quit(&self, canvas: &Canvas) {
self.save_editor_state(canvas);
self.ui.cs.save();
println!("Saved color_scheme.json");
}
fn profiling_enabled(&self) -> bool {
self.ui.primary.current_flags.enable_profiler
}
}
const SPEED: Speed = Speed::const_meters_per_second(20.0);
pub struct Screensaver {
line: Line,
started: Instant,
}
impl Screensaver {
fn start_bounce(rng: &mut XorShiftRng, canvas: &mut Canvas, map: &Map) -> Screensaver {
let at = canvas.center_to_map_pt();
let bounds = map.get_bounds();
// TODO Ideally bounce off the edge of the map | rng.gen_range(0.0, bounds.max_y),
);
canvas.cam_zoom = 10.0;
canvas.center_on_map_pt(at);
Screensaver {
line: Line::new(at, goto),
started: Instant::now(),
}
}
fn update(
&mut self,
rng: &mut XorShiftRng,
input: &mut UserInput,
canvas: &mut Canvas,
map: &Map,
) {
if input.nonblocking_is_update_event() {
input.use_update_event();
let dist_along = Duration::seconds(elapsed_seconds(self.started)) * SPEED;
if dist_along < self.line.length() {
canvas.center_on_map_pt(self.line.dist_along(dist_along));
} else {
*self = Screensaver::start_bounce(rng, canvas, map)
}
}
}
}
fn splash_screen(
raw_wizard: &mut Wizard,
ctx: &mut EventCtx,
ui: &mut UI,
maybe_screensaver: &mut Option<(Screensaver, XorShiftRng)>,
) -> Option<Mode> {
let mut wizard = raw_wizard.wrap(ctx);
let sandbox = "Sandbox mode";
let load_map = "Load another map";
let edit = "Edit map";
let tutorial = "Tutorial";
let debug = "Debug mode";
let mission = "Mission Edit Mode";
let abtest = "A/B Test Mode";
let about = "About";
let quit = "Quit";
// Loop because we might go from About -> top-level menu repeatedly, and recursion is scary.
loop {
// TODO No hotkey for quit because it's just the normal menu escape?
match wizard
.choose_string_hotkeys(
"Welcome to A/B Street!",
vec![
(hotkey(Key::S), sandbox),
(hotkey(Key::L), load_map),
(hotkey(Key::E), edit),
(hotkey(Key::T), tutorial),
(hotkey(Key::D), debug),
(hotkey(Key::M), mission),
(hotkey(Key::A), abtest),
(None, about),
(None, quit),
],
)?
.as_str()
{
x if x == sandbox => break Some(Mode::Sandbox(SandboxMode::new(ctx))),
x if x == load_map => {
let current_map = ui.primary.map.get_name().to_string();
if let Some((name, _)) = wizard.choose_something_no_keys::<String>(
"Load which map?",
Box::new(move || {
abstutil::list_all_objects("maps", "")
.into_iter()
.filter(|(n, _)| n != ¤t_map)
.collect()
}),
) {
// This retains no state, but that's probably fine.
let mut flags = ui.primary.current_flags.clone();
flags.sim_flags.load = PathBuf::from(format!("../data/maps/{}.bin", name));
*ui = UI::new(flags, ctx);
break Some(Mode::Sandbox(SandboxMode::new(ctx)));
} else if wizard.aborted() {
break Some(Mode::SplashScreen(Wizard::new(), maybe_screensaver.take()));
} else {
break None;
}
}
x if x == edit => break Some(Mode::Edit(EditMode::new(ctx, ui))),
x if x == tutorial => break Some(Mode::Tutorial(TutorialMode::new(ctx, ui))),
x if x == debug => break Some(Mode::Debug(DebugMode::new(ctx, ui))),
x if x == mission => break Some(Mode::Mission(MissionEditMode::new(ctx, ui))),
x if x == abtest => break Some(Mode::ABTest(ABTestMode::new(ctx, ui))),
x if x == about => {
if wizard.acknowledge(
"About A/B Street",
vec![
"Author: Dustin Carlino (dabreegster@gmail.com)",
"http://github.com/dabreegster/abstreet",
"Map data from OpenStreetMap and King County GIS",
"",
"Press ENTER to continue",
],
) {
continue;
} else {
break None;
}
}
x if x == quit => {
// Not important to call before_quit... if we're here, we're bouncing around
// aimlessly anyway
std::process::exit(0);
}
_ => unreachable!(),
}
}
} | let goto = Pt2D::new(
rng.gen_range(0.0, bounds.max_x), | random_line_split |
game.rs | use crate::abtest::ABTestMode;
use crate::debug::DebugMode;
use crate::edit::EditMode;
use crate::helpers::ID;
use crate::mission::MissionEditMode;
use crate::render::DrawOptions;
use crate::sandbox::SandboxMode;
use crate::tutorial::TutorialMode;
use crate::ui::{EditorState, Flags, ShowEverything, UI};
use abstutil::elapsed_seconds;
use ezgui::{hotkey, Canvas, EventCtx, EventLoopMode, GfxCtx, Key, UserInput, Wizard, GUI};
use geom::{Duration, Line, Pt2D, Speed};
use map_model::Map;
use rand::seq::SliceRandom;
use rand::Rng;
use rand_xorshift::XorShiftRng;
use std::path::PathBuf;
use std::time::Instant;
// This is the top-level of the GUI logic. This module should just manage interactions between the
// top-level game states.
pub struct GameState {
pub mode: Mode,
pub ui: UI,
}
// TODO Need to reset_sim() when entering Edit, Tutorial, Mission, or ABTest and when leaving
// Tutorial and ABTest. Expressing this manually right now is quite tedious; maybe having on_enter
// and on_exit would be cleaner.
pub enum Mode {
SplashScreen(Wizard, Option<(Screensaver, XorShiftRng)>),
Edit(EditMode),
Tutorial(TutorialMode),
Sandbox(SandboxMode),
Debug(DebugMode),
Mission(MissionEditMode),
ABTest(ABTestMode),
}
impl GameState {
pub fn new(flags: Flags, ctx: &mut EventCtx) -> GameState |
fn save_editor_state(&self, canvas: &Canvas) {
let state = EditorState {
map_name: self.ui.primary.map.get_name().clone(),
cam_x: canvas.cam_x,
cam_y: canvas.cam_y,
cam_zoom: canvas.cam_zoom,
};
// TODO maybe make state line up with the map, so loading from a new map doesn't break
abstutil::write_json("../editor_state.json", &state)
.expect("Saving editor_state.json failed");
println!("Saved editor_state.json");
}
}
impl GUI for GameState {
fn event(&mut self, ctx: &mut EventCtx) -> EventLoopMode {
match self.mode {
Mode::SplashScreen(ref mut wizard, ref mut maybe_screensaver) => {
let anim = maybe_screensaver.is_some();
if let Some((ref mut screensaver, ref mut rng)) = maybe_screensaver {
screensaver.update(rng, ctx.input, ctx.canvas, &self.ui.primary.map);
}
if let Some(new_mode) = splash_screen(wizard, ctx, &mut self.ui, maybe_screensaver)
{
self.mode = new_mode;
} else if wizard.aborted() {
self.before_quit(ctx.canvas);
std::process::exit(0);
}
if anim {
EventLoopMode::Animation
} else {
EventLoopMode::InputOnly
}
}
Mode::Edit(_) => EditMode::event(self, ctx),
Mode::Tutorial(_) => TutorialMode::event(self, ctx),
Mode::Sandbox(_) => SandboxMode::event(self, ctx),
Mode::Debug(_) => DebugMode::event(self, ctx),
Mode::Mission(_) => MissionEditMode::event(self, ctx),
Mode::ABTest(_) => ABTestMode::event(self, ctx),
}
}
fn draw(&self, g: &mut GfxCtx) {
match self.mode {
Mode::SplashScreen(ref wizard, _) => {
self.ui.draw(
g,
DrawOptions::new(),
&self.ui.primary.sim,
&ShowEverything::new(),
);
wizard.draw(g);
}
Mode::Edit(_) => EditMode::draw(self, g),
Mode::Tutorial(_) => TutorialMode::draw(self, g),
Mode::Sandbox(_) => SandboxMode::draw(self, g),
Mode::Debug(_) => DebugMode::draw(self, g),
Mode::Mission(_) => MissionEditMode::draw(self, g),
Mode::ABTest(_) => ABTestMode::draw(self, g),
}
/*println!(
"{} uploads, {} draw calls",
g.get_num_uploads(),
g.num_draw_calls
);*/
}
fn dump_before_abort(&self, canvas: &Canvas) {
println!(
"********************************************************************************"
);
println!("UI broke! Primary sim:");
self.ui.primary.sim.dump_before_abort();
if let Mode::ABTest(ref abtest) = self.mode {
if let Some(ref s) = abtest.secondary {
println!("Secondary sim:");
s.sim.dump_before_abort();
}
}
self.save_editor_state(canvas);
}
fn before_quit(&self, canvas: &Canvas) {
self.save_editor_state(canvas);
self.ui.cs.save();
println!("Saved color_scheme.json");
}
fn profiling_enabled(&self) -> bool {
self.ui.primary.current_flags.enable_profiler
}
}
const SPEED: Speed = Speed::const_meters_per_second(20.0);
pub struct Screensaver {
line: Line,
started: Instant,
}
impl Screensaver {
fn start_bounce(rng: &mut XorShiftRng, canvas: &mut Canvas, map: &Map) -> Screensaver {
let at = canvas.center_to_map_pt();
let bounds = map.get_bounds();
// TODO Ideally bounce off the edge of the map
let goto = Pt2D::new(
rng.gen_range(0.0, bounds.max_x),
rng.gen_range(0.0, bounds.max_y),
);
canvas.cam_zoom = 10.0;
canvas.center_on_map_pt(at);
Screensaver {
line: Line::new(at, goto),
started: Instant::now(),
}
}
fn update(
&mut self,
rng: &mut XorShiftRng,
input: &mut UserInput,
canvas: &mut Canvas,
map: &Map,
) {
if input.nonblocking_is_update_event() {
input.use_update_event();
let dist_along = Duration::seconds(elapsed_seconds(self.started)) * SPEED;
if dist_along < self.line.length() {
canvas.center_on_map_pt(self.line.dist_along(dist_along));
} else {
*self = Screensaver::start_bounce(rng, canvas, map)
}
}
}
}
fn splash_screen(
raw_wizard: &mut Wizard,
ctx: &mut EventCtx,
ui: &mut UI,
maybe_screensaver: &mut Option<(Screensaver, XorShiftRng)>,
) -> Option<Mode> {
let mut wizard = raw_wizard.wrap(ctx);
let sandbox = "Sandbox mode";
let load_map = "Load another map";
let edit = "Edit map";
let tutorial = "Tutorial";
let debug = "Debug mode";
let mission = "Mission Edit Mode";
let abtest = "A/B Test Mode";
let about = "About";
let quit = "Quit";
// Loop because we might go from About -> top-level menu repeatedly, and recursion is scary.
loop {
// TODO No hotkey for quit because it's just the normal menu escape?
match wizard
.choose_string_hotkeys(
"Welcome to A/B Street!",
vec![
(hotkey(Key::S), sandbox),
(hotkey(Key::L), load_map),
(hotkey(Key::E), edit),
(hotkey(Key::T), tutorial),
(hotkey(Key::D), debug),
(hotkey(Key::M), mission),
(hotkey(Key::A), abtest),
(None, about),
(None, quit),
],
)?
.as_str()
{
x if x == sandbox => break Some(Mode::Sandbox(SandboxMode::new(ctx))),
x if x == load_map => {
let current_map = ui.primary.map.get_name().to_string();
if let Some((name, _)) = wizard.choose_something_no_keys::<String>(
"Load which map?",
Box::new(move || {
abstutil::list_all_objects("maps", "")
.into_iter()
.filter(|(n, _)| n != ¤t_map)
.collect()
}),
) {
// This retains no state, but that's probably fine.
let mut flags = ui.primary.current_flags.clone();
flags.sim_flags.load = PathBuf::from(format!("../data/maps/{}.bin", name));
*ui = UI::new(flags, ctx);
break Some(Mode::Sandbox(SandboxMode::new(ctx)));
} else if wizard.aborted() {
break Some(Mode::SplashScreen(Wizard::new(), maybe_screensaver.take()));
} else {
break None;
}
}
x if x == edit => break Some(Mode::Edit(EditMode::new(ctx, ui))),
x if x == tutorial => break Some(Mode::Tutorial(TutorialMode::new(ctx, ui))),
x if x == debug => break Some(Mode::Debug(DebugMode::new(ctx, ui))),
x if x == mission => break Some(Mode::Mission(MissionEditMode::new(ctx, ui))),
x if x == abtest => break Some(Mode::ABTest(ABTestMode::new(ctx, ui))),
x if x == about => {
if wizard.acknowledge(
"About A/B Street",
vec![
"Author: Dustin Carlino (dabreegster@gmail.com)",
"http://github.com/dabreegster/abstreet",
"Map data from OpenStreetMap and King County GIS",
"",
"Press ENTER to continue",
],
) {
continue;
} else {
break None;
}
}
x if x == quit => {
// Not important to call before_quit... if we're here, we're bouncing around
// aimlessly anyway
std::process::exit(0);
}
_ => unreachable!(),
}
}
}
| {
let splash = !flags.no_splash
&& !format!("{}", flags.sim_flags.load.display()).contains("data/save");
let mut rng = flags.sim_flags.make_rng();
let mut game = GameState {
mode: Mode::Sandbox(SandboxMode::new(ctx)),
ui: UI::new(flags, ctx),
};
let rand_focus_pt = game
.ui
.primary
.map
.all_buildings()
.choose(&mut rng)
.and_then(|b| ID::Building(b.id).canonical_point(&game.ui.primary))
.or_else(|| {
game.ui
.primary
.map
.all_lanes()
.choose(&mut rng)
.and_then(|l| ID::Lane(l.id).canonical_point(&game.ui.primary))
})
.expect("Can't get canonical_point of a random building or lane");
if splash {
ctx.canvas.center_on_map_pt(rand_focus_pt);
game.mode = Mode::SplashScreen(
Wizard::new(),
Some((
Screensaver::start_bounce(&mut rng, ctx.canvas, &game.ui.primary.map),
rng,
)),
);
} else {
match abstutil::read_json::<EditorState>("../editor_state.json") {
Ok(ref loaded) if game.ui.primary.map.get_name() == &loaded.map_name => {
println!("Loaded previous editor_state.json");
ctx.canvas.cam_x = loaded.cam_x;
ctx.canvas.cam_y = loaded.cam_y;
ctx.canvas.cam_zoom = loaded.cam_zoom;
}
_ => {
println!("Couldn't load editor_state.json or it's for a different map, so just focusing on an arbitrary building");
ctx.canvas.center_on_map_pt(rand_focus_pt);
}
}
}
game
} | identifier_body |
sparse.rs | //! # Sparse vector
//!
//! Wrapping a `Vec<(usize, _)>`, fixed size.
use std::{fmt, mem};
use std::borrow::Borrow;
use std::cmp::Ordering;
use std::collections::HashSet;
use std::fmt::{Debug, Display};
use std::iter::Sum;
use std::marker::PhantomData;
use std::ops::{Add, AddAssign, DivAssign, Mul, MulAssign, Neg};
use std::slice::Iter;
use num::{One, Zero};
use crate::algorithm::utilities::remove_sparse_indices;
use crate::data::linear_algebra::SparseTuple;
use crate::data::linear_algebra::traits::{SparseComparator, SparseElement};
use crate::data::linear_algebra::traits::NotZero;
use crate::data::linear_algebra::vector::{DenseVector, Vector};
/// A sparse vector using a `Vec` with (row, value) combinations as back-end. Indices start at
/// `0`.
///
/// TODO(ENHANCEMENT): Consider making this backed by a `HashMap`.
#[derive(Eq, PartialEq, Clone, Debug)]
pub struct Sparse<F, C> {
data: Vec<SparseTuple<F>>,
len: usize,
/// The level that comparison is done at: a single reference to the underlying data.
phantom_comparison_type: PhantomData<C>,
}
impl<F, C> Sparse<F, C> {
fn get_data_index(&self, i: usize) -> Result<usize, usize> {
self.data.binary_search_by_key(&i, |&(index, _)| index)
}
fn set_zero(&mut self, i: usize) {
if let Ok(index) = self.get_data_index(i) {
self.data.remove(index);
}
}
/// Increase the length of the vector by passing with zeros.
pub fn extend(&mut self, extra_len: usize) {
self.len += extra_len;
}
/// Convert the inner data structure into an iterator, consuming the struct.
pub fn into_iter(self) -> impl Iterator<Item=SparseTuple<F>> {
self.data.into_iter()
}
}
impl<F, C> Vector<F> for Sparse<F, C>
where
F: SparseElement<C>,
C: SparseComparator,
{
type Inner = SparseTuple<F>;
/// Create a vector of length `len` from `data`.
///
/// Requires that values close to zero are already filtered.
fn new(data: Vec<Self::Inner>, len: usize) -> Self {
debug_assert!(data.iter().all(|&(i, _)| i < len));
debug_assert!(data.is_sorted_by_key(|&(i, _)| i));
debug_assert!(data.iter().all(|(_, v)| v.borrow().is_not_zero()));
debug_assert_ne!(len, 0);
debug_assert!(data.len() <= len);
Self {
data,
len,
phantom_comparison_type: PhantomData,
}
}
fn sparse_inner_product<'a, H, G: 'a, I: Iterator<Item=&'a SparseTuple<G>>>(&self, column: I) -> H
where
H: Zero + AddAssign<F>,
for<'r> &'r F: Mul<&'r G, Output=F>,
{
let mut total = H::zero();
let mut i = 0;
for (index, value) in column {
while i < self.data.len() && self.data[i].0 < *index {
i += 1;
}
if i < self.data.len() && self.data[i].0 == *index {
total += &self.data[i].1 * value;
i += 1;
}
if i == self.len {
break;
}
}
total
}
/// Append a non-zero value.
fn push_value(&mut self, value: F) {
debug_assert!(value.borrow().is_not_zero());
self.data.push((self.len, value));
self.len += 1;
}
/// Set the value at index `i` to `value`.
///
/// # Arguments
///
/// * `i`: Index of the value. New tuple will be inserted, potentially causing many values to
/// be shifted.
/// * `value`: Value to be taken at index `i`. Should not be very close to zero to avoid
/// memory usage and numerical error build-up.
fn | (&mut self, i: usize, value: F) {
debug_assert!(i < self.len);
debug_assert!(value.borrow().is_not_zero());
match self.get_data_index(i) {
Ok(index) => self.data[index].1 = value,
Err(index) => self.data.insert(index, (i, value)),
}
}
fn get(&self, index: usize) -> Option<&F> {
debug_assert!(index < self.len);
self.get_data_index(index).ok().map(|i| &self.data[i].1)
}
/// Remove elements.
///
/// # Arguments
///
/// * `indices` is assumed sorted.
fn remove_indices(&mut self, indices: &[usize]) {
debug_assert!(indices.is_sorted());
// All values are unique
debug_assert!(indices.iter().collect::<HashSet<_>>().len() == indices.len());
debug_assert!(indices.iter().all(|&i| i < self.len));
debug_assert!(indices.len() < self.len);
remove_sparse_indices(&mut self.data, indices);
self.len -= indices.len();
}
fn iter_values(&self) -> Iter<Self::Inner> {
self.data.iter()
}
/// The length of this vector.
fn len(&self) -> usize {
self.len
}
/// Whether this vector has zero size.
fn is_empty(&self) -> bool {
self.len == 0
}
/// The size of this vector in memory.
fn size(&self) -> usize {
self.data.len()
}
}
impl<F, C> Sparse<F, C>
where
F: SparseElement<C>,
C: SparseComparator,
{
/// Create a `SparseVector` representation of standard basis unit vector e_i.
///
/// # Arguments
///
/// * `i`: Only index where there should be a 1. Note that indexing starts at zero, and runs
/// until (not through) `len`.
/// * `len`: Size of the `SparseVector`.
#[must_use]
pub fn standard_basis_vector(i: usize, len: usize) -> Self
where
F: One + Clone,
{
debug_assert!(i < len);
Self::new(vec![(i, F::one())], len)
}
/// Add the multiple of another row to this row.
///
/// # Arguments
///
/// * `multiple`: Non-zero constant that all elements of the `other` vector are multiplied with.
/// * `other`: Vector to add a multiple of to this vector.
///
/// # Return value
///
/// A new `SparseVector`.
///
/// # Note
///
/// The implementation of this method doesn't look pretty, but it seems to be reasonably fast.
/// If this method is too slow, it might be wise to consider the switching of the `SparseVector`
/// storage backend from a `Vec` to a `HashMap`.
pub fn add_multiple_of_row<H>(&mut self, multiple: &F, other: &Sparse<F, C>)
where
H: Zero + Add<F, Output=H>,
F: Add<F, Output=H> + From<H>,
for<'r> &'r F: Mul<&'r F, Output=F>,
{
debug_assert_eq!(other.len(), self.len());
debug_assert!(multiple.borrow().is_not_zero());
let mut new_tuples = Vec::new();
let mut j = 0;
let old_data = mem::replace(&mut self.data, Vec::with_capacity(0));
for (i, value) in old_data {
while j < other.data.len() && other.data[j].0 < i {
let new_value = multiple * &other.data[j].1;
new_tuples.push((other.data[j].0, new_value.into()));
j += 1;
}
if j < other.data.len() && i == other.data[j].0 {
let new_value = value + multiple * &other.data[j].1;
if !new_value.is_zero() {
new_tuples.push((i, new_value.into()));
}
j += 1;
} else {
new_tuples.push((i, value));
}
}
for (j, value) in &other.data[j..] {
new_tuples.push((*j, multiple * value));
}
self.data = new_tuples;
}
/// Set the value at index `i` to `value`.
///
/// # Arguments
///
/// * `i`: Index of the value. New tuple will be inserted, potentially causing many values to
/// be shifted.
/// * `value`: Value to be taken at index `i`. Should not be very close to zero to avoid
/// memory usage and numerical error build-up.
pub fn shift_value<G>(&mut self, i: usize, value: G)
where
F: PartialEq<G> + AddAssign<G> + From<G>,
for<'r> &'r G: Neg<Output=G>,
{
debug_assert!(i < self.len);
match self.get_data_index(i) {
Ok(index) => {
if self.data[index].1 == -&value {
self.set_zero(i);
} else {
self.data[index].1 += value;
}
},
Err(index) => self.data.insert(index, (i, From::from(value))),
}
}
/// Multiply each element of the vector by a value.
pub fn element_wise_multiply(&mut self, value: &F)
where
for<'r> F: NotZero + MulAssign<&'r F>,
{
debug_assert!(value.borrow().is_not_zero());
for (_, v) in &mut self.data {
*v *= value;
}
self.data.retain(|(_, v)| v.is_not_zero());
}
/// Divide each element of the vector by a value.
pub fn element_wise_divide(&mut self, value: &F)
where
for<'r> F: NotZero + DivAssign<&'r F>,
{
debug_assert!(value.borrow().is_not_zero());
for (_, v) in &mut self.data {
*v /= value;
}
self.data.retain(|(_, v)| v.is_not_zero());
}
}
impl<F, C> Sparse<F, C>
where
F: SparseElement<C>,
C: SparseComparator,
{
/// Calculate the inner product between two vectors.
///
/// # Arguments
///
/// * `other`: Vector to calculate inner product with.
///
/// # Return value
///
/// The inner product.
#[must_use]
pub fn inner_product_with_dense<'a, F2, O: 'a>(&'a self, other: &'a DenseVector<F2>) -> O
where
F: Borrow<O>,
F2: Borrow<O> + PartialEq + Display + Debug,
O: Sum,
&'a O: Mul<&'a O, Output=O>,
{
debug_assert_eq!(other.len(), self.len());
self.data.iter().map(|(i, value)| other[*i].borrow() * value.borrow()).sum()
}
/// Calculate the inner product between two vectors.
///
/// # Arguments
///
/// * `other`: Vector to calculate inner product with.
///
/// # Return value
///
/// The inner product.
#[must_use]
pub fn inner_product<'a, O, F2>(&'a self, other: &'a Sparse<F2, C>) -> O
where
O: Zero + AddAssign<C>,
F2: SparseElement<C>,
// We choose to have multiplication output at the C level, because it would also be nonzero
// if both F and F2 values are not zero.
&'a C: Mul<&'a C, Output=C>,
{
debug_assert_eq!(other.len(), self.len());
debug_assert!(self.data.iter().all(|(_, v)| v.borrow().is_not_zero()));
debug_assert!(other.data.iter().all(|(_, v)| v.borrow().is_not_zero()));
let mut self_lowest = 0;
let mut other_lowest = 0;
let mut total = O::zero();
while self_lowest < self.data.len() && other_lowest < other.data.len() {
let self_sought = self.data[self_lowest].0;
let other_sought = other.data[other_lowest].0;
match self_sought.cmp(&other_sought) {
Ordering::Less => {
match self.data[self_lowest..].binary_search_by_key(&other_sought, |&(i, _)| i) {
Err(diff) => {
self_lowest += diff;
other_lowest += 1;
},
Ok(diff) => {
total += self.data[self_lowest + diff].1.borrow() * other.data[other_lowest].1.borrow();
self_lowest += diff + 1;
other_lowest += 1;
},
}
},
Ordering::Greater => {
match other.data[other_lowest..].binary_search_by_key(&self_sought, |&(i, _)| i) {
Err(diff) => {
self_lowest += 1;
other_lowest += diff;
},
Ok(diff) => {
total += self.data[self_lowest].1.borrow() * other.data[other_lowest + diff].1.borrow();
self_lowest += 1;
other_lowest += diff + 1;
},
}
},
Ordering::Equal => {
total += self.data[self_lowest].1.borrow() * other.data[other_lowest].1.borrow();
self_lowest += 1;
other_lowest += 1;
},
}
}
total
}
}
impl<F: SparseElement<C>, C: SparseComparator> Display for Sparse<F, C> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for (index, value) in &self.data {
writeln!(f, "({} {}), ", index, value)?;
}
writeln!(f)
}
}
| set | identifier_name |
sparse.rs | //! # Sparse vector
//!
//! Wrapping a `Vec<(usize, _)>`, fixed size.
use std::{fmt, mem};
use std::borrow::Borrow;
use std::cmp::Ordering;
use std::collections::HashSet;
use std::fmt::{Debug, Display};
use std::iter::Sum;
use std::marker::PhantomData;
use std::ops::{Add, AddAssign, DivAssign, Mul, MulAssign, Neg};
use std::slice::Iter;
use num::{One, Zero};
use crate::algorithm::utilities::remove_sparse_indices;
use crate::data::linear_algebra::SparseTuple;
use crate::data::linear_algebra::traits::{SparseComparator, SparseElement};
use crate::data::linear_algebra::traits::NotZero;
use crate::data::linear_algebra::vector::{DenseVector, Vector};
/// A sparse vector using a `Vec` with (row, value) combinations as back-end. Indices start at
/// `0`.
///
/// TODO(ENHANCEMENT): Consider making this backed by a `HashMap`.
#[derive(Eq, PartialEq, Clone, Debug)]
pub struct Sparse<F, C> {
data: Vec<SparseTuple<F>>,
len: usize,
/// The level that comparison is done at: a single reference to the underlying data.
phantom_comparison_type: PhantomData<C>,
}
impl<F, C> Sparse<F, C> {
fn get_data_index(&self, i: usize) -> Result<usize, usize> {
self.data.binary_search_by_key(&i, |&(index, _)| index)
}
fn set_zero(&mut self, i: usize) {
if let Ok(index) = self.get_data_index(i) {
self.data.remove(index);
}
}
/// Increase the length of the vector by passing with zeros.
pub fn extend(&mut self, extra_len: usize) {
self.len += extra_len;
}
/// Convert the inner data structure into an iterator, consuming the struct.
pub fn into_iter(self) -> impl Iterator<Item=SparseTuple<F>> {
self.data.into_iter()
}
}
impl<F, C> Vector<F> for Sparse<F, C>
where
F: SparseElement<C>,
C: SparseComparator,
{
type Inner = SparseTuple<F>;
/// Create a vector of length `len` from `data`.
///
/// Requires that values close to zero are already filtered.
fn new(data: Vec<Self::Inner>, len: usize) -> Self {
debug_assert!(data.iter().all(|&(i, _)| i < len));
debug_assert!(data.is_sorted_by_key(|&(i, _)| i));
debug_assert!(data.iter().all(|(_, v)| v.borrow().is_not_zero()));
debug_assert_ne!(len, 0);
debug_assert!(data.len() <= len);
Self {
data,
len,
phantom_comparison_type: PhantomData,
}
}
fn sparse_inner_product<'a, H, G: 'a, I: Iterator<Item=&'a SparseTuple<G>>>(&self, column: I) -> H
where
H: Zero + AddAssign<F>,
for<'r> &'r F: Mul<&'r G, Output=F>,
{
let mut total = H::zero();
let mut i = 0;
for (index, value) in column {
while i < self.data.len() && self.data[i].0 < *index {
i += 1;
}
if i < self.data.len() && self.data[i].0 == *index {
total += &self.data[i].1 * value;
i += 1;
}
if i == self.len {
break;
}
}
total
}
/// Append a non-zero value.
fn push_value(&mut self, value: F) {
debug_assert!(value.borrow().is_not_zero());
self.data.push((self.len, value));
self.len += 1;
}
/// Set the value at index `i` to `value`.
///
/// # Arguments
///
/// * `i`: Index of the value. New tuple will be inserted, potentially causing many values to
/// be shifted.
/// * `value`: Value to be taken at index `i`. Should not be very close to zero to avoid
/// memory usage and numerical error build-up.
fn set(&mut self, i: usize, value: F) {
debug_assert!(i < self.len);
debug_assert!(value.borrow().is_not_zero());
match self.get_data_index(i) {
Ok(index) => self.data[index].1 = value,
Err(index) => self.data.insert(index, (i, value)),
}
}
fn get(&self, index: usize) -> Option<&F> {
debug_assert!(index < self.len);
self.get_data_index(index).ok().map(|i| &self.data[i].1)
}
/// Remove elements.
///
/// # Arguments
///
/// * `indices` is assumed sorted.
fn remove_indices(&mut self, indices: &[usize]) {
debug_assert!(indices.is_sorted());
// All values are unique
debug_assert!(indices.iter().collect::<HashSet<_>>().len() == indices.len());
debug_assert!(indices.iter().all(|&i| i < self.len));
debug_assert!(indices.len() < self.len);
remove_sparse_indices(&mut self.data, indices);
self.len -= indices.len();
}
fn iter_values(&self) -> Iter<Self::Inner> {
self.data.iter()
}
/// The length of this vector.
fn len(&self) -> usize {
self.len
}
/// Whether this vector has zero size.
fn is_empty(&self) -> bool {
self.len == 0
}
/// The size of this vector in memory.
fn size(&self) -> usize {
self.data.len()
}
}
impl<F, C> Sparse<F, C>
where
F: SparseElement<C>,
C: SparseComparator,
{
/// Create a `SparseVector` representation of standard basis unit vector e_i.
///
/// # Arguments
///
/// * `i`: Only index where there should be a 1. Note that indexing starts at zero, and runs
/// until (not through) `len`.
/// * `len`: Size of the `SparseVector`.
#[must_use]
pub fn standard_basis_vector(i: usize, len: usize) -> Self
where
F: One + Clone,
{
debug_assert!(i < len);
Self::new(vec![(i, F::one())], len)
}
/// Add the multiple of another row to this row.
///
/// # Arguments
///
/// * `multiple`: Non-zero constant that all elements of the `other` vector are multiplied with.
/// * `other`: Vector to add a multiple of to this vector.
///
/// # Return value
///
/// A new `SparseVector`.
///
/// # Note
///
/// The implementation of this method doesn't look pretty, but it seems to be reasonably fast.
/// If this method is too slow, it might be wise to consider the switching of the `SparseVector`
/// storage backend from a `Vec` to a `HashMap`.
pub fn add_multiple_of_row<H>(&mut self, multiple: &F, other: &Sparse<F, C>)
where
H: Zero + Add<F, Output=H>,
F: Add<F, Output=H> + From<H>,
for<'r> &'r F: Mul<&'r F, Output=F>,
{
debug_assert_eq!(other.len(), self.len());
debug_assert!(multiple.borrow().is_not_zero());
let mut new_tuples = Vec::new();
let mut j = 0;
let old_data = mem::replace(&mut self.data, Vec::with_capacity(0));
for (i, value) in old_data {
while j < other.data.len() && other.data[j].0 < i {
let new_value = multiple * &other.data[j].1;
new_tuples.push((other.data[j].0, new_value.into()));
j += 1;
}
if j < other.data.len() && i == other.data[j].0 {
let new_value = value + multiple * &other.data[j].1;
if !new_value.is_zero() {
new_tuples.push((i, new_value.into()));
}
j += 1;
} else {
new_tuples.push((i, value));
}
}
for (j, value) in &other.data[j..] {
new_tuples.push((*j, multiple * value));
}
self.data = new_tuples;
}
/// Set the value at index `i` to `value`.
///
/// # Arguments
///
/// * `i`: Index of the value. New tuple will be inserted, potentially causing many values to
/// be shifted.
/// * `value`: Value to be taken at index `i`. Should not be very close to zero to avoid
/// memory usage and numerical error build-up.
pub fn shift_value<G>(&mut self, i: usize, value: G)
where
F: PartialEq<G> + AddAssign<G> + From<G>,
for<'r> &'r G: Neg<Output=G>,
{
debug_assert!(i < self.len);
match self.get_data_index(i) {
Ok(index) => {
if self.data[index].1 == -&value {
self.set_zero(i);
} else {
self.data[index].1 += value;
}
},
Err(index) => self.data.insert(index, (i, From::from(value))),
}
}
/// Multiply each element of the vector by a value.
pub fn element_wise_multiply(&mut self, value: &F)
where
for<'r> F: NotZero + MulAssign<&'r F>,
{
debug_assert!(value.borrow().is_not_zero());
for (_, v) in &mut self.data {
*v *= value;
}
self.data.retain(|(_, v)| v.is_not_zero());
}
/// Divide each element of the vector by a value.
pub fn element_wise_divide(&mut self, value: &F)
where
for<'r> F: NotZero + DivAssign<&'r F>,
{
debug_assert!(value.borrow().is_not_zero());
for (_, v) in &mut self.data {
*v /= value;
}
self.data.retain(|(_, v)| v.is_not_zero());
}
}
impl<F, C> Sparse<F, C>
where
F: SparseElement<C>,
C: SparseComparator,
{
/// Calculate the inner product between two vectors.
///
/// # Arguments
///
/// * `other`: Vector to calculate inner product with.
///
/// # Return value
///
/// The inner product.
#[must_use]
pub fn inner_product_with_dense<'a, F2, O: 'a>(&'a self, other: &'a DenseVector<F2>) -> O
where
F: Borrow<O>,
F2: Borrow<O> + PartialEq + Display + Debug,
O: Sum,
&'a O: Mul<&'a O, Output=O>,
{
debug_assert_eq!(other.len(), self.len());
self.data.iter().map(|(i, value)| other[*i].borrow() * value.borrow()).sum()
}
/// Calculate the inner product between two vectors.
///
/// # Arguments
///
/// * `other`: Vector to calculate inner product with.
///
/// # Return value
///
/// The inner product.
#[must_use]
pub fn inner_product<'a, O, F2>(&'a self, other: &'a Sparse<F2, C>) -> O
where
O: Zero + AddAssign<C>,
F2: SparseElement<C>,
// We choose to have multiplication output at the C level, because it would also be nonzero
// if both F and F2 values are not zero.
&'a C: Mul<&'a C, Output=C>,
{
debug_assert_eq!(other.len(), self.len());
debug_assert!(self.data.iter().all(|(_, v)| v.borrow().is_not_zero()));
debug_assert!(other.data.iter().all(|(_, v)| v.borrow().is_not_zero()));
let mut self_lowest = 0;
let mut other_lowest = 0;
let mut total = O::zero();
while self_lowest < self.data.len() && other_lowest < other.data.len() {
let self_sought = self.data[self_lowest].0;
let other_sought = other.data[other_lowest].0;
match self_sought.cmp(&other_sought) {
Ordering::Less => {
match self.data[self_lowest..].binary_search_by_key(&other_sought, |&(i, _)| i) {
Err(diff) => {
self_lowest += diff;
other_lowest += 1;
},
Ok(diff) => {
total += self.data[self_lowest + diff].1.borrow() * other.data[other_lowest].1.borrow();
self_lowest += diff + 1;
other_lowest += 1;
},
}
},
Ordering::Greater => {
match other.data[other_lowest..].binary_search_by_key(&self_sought, |&(i, _)| i) {
Err(diff) => {
self_lowest += 1;
other_lowest += diff;
},
Ok(diff) => {
total += self.data[self_lowest].1.borrow() * other.data[other_lowest + diff].1.borrow();
self_lowest += 1;
other_lowest += diff + 1;
},
}
},
Ordering::Equal => {
total += self.data[self_lowest].1.borrow() * other.data[other_lowest].1.borrow();
self_lowest += 1;
other_lowest += 1;
},
}
}
total
}
}
impl<F: SparseElement<C>, C: SparseComparator> Display for Sparse<F, C> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for (index, value) in &self.data {
writeln!(f, "({} {}), ", index, value)?;
} | }
} | writeln!(f) | random_line_split |
sparse.rs | //! # Sparse vector
//!
//! Wrapping a `Vec<(usize, _)>`, fixed size.
use std::{fmt, mem};
use std::borrow::Borrow;
use std::cmp::Ordering;
use std::collections::HashSet;
use std::fmt::{Debug, Display};
use std::iter::Sum;
use std::marker::PhantomData;
use std::ops::{Add, AddAssign, DivAssign, Mul, MulAssign, Neg};
use std::slice::Iter;
use num::{One, Zero};
use crate::algorithm::utilities::remove_sparse_indices;
use crate::data::linear_algebra::SparseTuple;
use crate::data::linear_algebra::traits::{SparseComparator, SparseElement};
use crate::data::linear_algebra::traits::NotZero;
use crate::data::linear_algebra::vector::{DenseVector, Vector};
/// A sparse vector using a `Vec` with (row, value) combinations as back-end. Indices start at
/// `0`.
///
/// TODO(ENHANCEMENT): Consider making this backed by a `HashMap`.
#[derive(Eq, PartialEq, Clone, Debug)]
pub struct Sparse<F, C> {
data: Vec<SparseTuple<F>>,
len: usize,
/// The level that comparison is done at: a single reference to the underlying data.
phantom_comparison_type: PhantomData<C>,
}
impl<F, C> Sparse<F, C> {
fn get_data_index(&self, i: usize) -> Result<usize, usize> {
self.data.binary_search_by_key(&i, |&(index, _)| index)
}
fn set_zero(&mut self, i: usize) {
if let Ok(index) = self.get_data_index(i) {
self.data.remove(index);
}
}
/// Increase the length of the vector by passing with zeros.
pub fn extend(&mut self, extra_len: usize) {
self.len += extra_len;
}
/// Convert the inner data structure into an iterator, consuming the struct.
pub fn into_iter(self) -> impl Iterator<Item=SparseTuple<F>> {
self.data.into_iter()
}
}
impl<F, C> Vector<F> for Sparse<F, C>
where
F: SparseElement<C>,
C: SparseComparator,
{
type Inner = SparseTuple<F>;
/// Create a vector of length `len` from `data`.
///
/// Requires that values close to zero are already filtered.
fn new(data: Vec<Self::Inner>, len: usize) -> Self {
debug_assert!(data.iter().all(|&(i, _)| i < len));
debug_assert!(data.is_sorted_by_key(|&(i, _)| i));
debug_assert!(data.iter().all(|(_, v)| v.borrow().is_not_zero()));
debug_assert_ne!(len, 0);
debug_assert!(data.len() <= len);
Self {
data,
len,
phantom_comparison_type: PhantomData,
}
}
fn sparse_inner_product<'a, H, G: 'a, I: Iterator<Item=&'a SparseTuple<G>>>(&self, column: I) -> H
where
H: Zero + AddAssign<F>,
for<'r> &'r F: Mul<&'r G, Output=F>,
{
let mut total = H::zero();
let mut i = 0;
for (index, value) in column {
while i < self.data.len() && self.data[i].0 < *index {
i += 1;
}
if i < self.data.len() && self.data[i].0 == *index {
total += &self.data[i].1 * value;
i += 1;
}
if i == self.len {
break;
}
}
total
}
/// Append a non-zero value.
fn push_value(&mut self, value: F) {
debug_assert!(value.borrow().is_not_zero());
self.data.push((self.len, value));
self.len += 1;
}
/// Set the value at index `i` to `value`.
///
/// # Arguments
///
/// * `i`: Index of the value. New tuple will be inserted, potentially causing many values to
/// be shifted.
/// * `value`: Value to be taken at index `i`. Should not be very close to zero to avoid
/// memory usage and numerical error build-up.
fn set(&mut self, i: usize, value: F) {
debug_assert!(i < self.len);
debug_assert!(value.borrow().is_not_zero());
match self.get_data_index(i) {
Ok(index) => self.data[index].1 = value,
Err(index) => self.data.insert(index, (i, value)),
}
}
fn get(&self, index: usize) -> Option<&F> {
debug_assert!(index < self.len);
self.get_data_index(index).ok().map(|i| &self.data[i].1)
}
/// Remove elements.
///
/// # Arguments
///
/// * `indices` is assumed sorted.
fn remove_indices(&mut self, indices: &[usize]) {
debug_assert!(indices.is_sorted());
// All values are unique
debug_assert!(indices.iter().collect::<HashSet<_>>().len() == indices.len());
debug_assert!(indices.iter().all(|&i| i < self.len));
debug_assert!(indices.len() < self.len);
remove_sparse_indices(&mut self.data, indices);
self.len -= indices.len();
}
fn iter_values(&self) -> Iter<Self::Inner> {
self.data.iter()
}
/// The length of this vector.
fn len(&self) -> usize |
/// Whether this vector has zero size.
fn is_empty(&self) -> bool {
self.len == 0
}
/// The size of this vector in memory.
fn size(&self) -> usize {
self.data.len()
}
}
impl<F, C> Sparse<F, C>
where
F: SparseElement<C>,
C: SparseComparator,
{
/// Create a `SparseVector` representation of standard basis unit vector e_i.
///
/// # Arguments
///
/// * `i`: Only index where there should be a 1. Note that indexing starts at zero, and runs
/// until (not through) `len`.
/// * `len`: Size of the `SparseVector`.
#[must_use]
pub fn standard_basis_vector(i: usize, len: usize) -> Self
where
F: One + Clone,
{
debug_assert!(i < len);
Self::new(vec![(i, F::one())], len)
}
/// Add the multiple of another row to this row.
///
/// # Arguments
///
/// * `multiple`: Non-zero constant that all elements of the `other` vector are multiplied with.
/// * `other`: Vector to add a multiple of to this vector.
///
/// # Return value
///
/// A new `SparseVector`.
///
/// # Note
///
/// The implementation of this method doesn't look pretty, but it seems to be reasonably fast.
/// If this method is too slow, it might be wise to consider the switching of the `SparseVector`
/// storage backend from a `Vec` to a `HashMap`.
pub fn add_multiple_of_row<H>(&mut self, multiple: &F, other: &Sparse<F, C>)
where
H: Zero + Add<F, Output=H>,
F: Add<F, Output=H> + From<H>,
for<'r> &'r F: Mul<&'r F, Output=F>,
{
debug_assert_eq!(other.len(), self.len());
debug_assert!(multiple.borrow().is_not_zero());
let mut new_tuples = Vec::new();
let mut j = 0;
let old_data = mem::replace(&mut self.data, Vec::with_capacity(0));
for (i, value) in old_data {
while j < other.data.len() && other.data[j].0 < i {
let new_value = multiple * &other.data[j].1;
new_tuples.push((other.data[j].0, new_value.into()));
j += 1;
}
if j < other.data.len() && i == other.data[j].0 {
let new_value = value + multiple * &other.data[j].1;
if !new_value.is_zero() {
new_tuples.push((i, new_value.into()));
}
j += 1;
} else {
new_tuples.push((i, value));
}
}
for (j, value) in &other.data[j..] {
new_tuples.push((*j, multiple * value));
}
self.data = new_tuples;
}
/// Set the value at index `i` to `value`.
///
/// # Arguments
///
/// * `i`: Index of the value. New tuple will be inserted, potentially causing many values to
/// be shifted.
/// * `value`: Value to be taken at index `i`. Should not be very close to zero to avoid
/// memory usage and numerical error build-up.
pub fn shift_value<G>(&mut self, i: usize, value: G)
where
F: PartialEq<G> + AddAssign<G> + From<G>,
for<'r> &'r G: Neg<Output=G>,
{
debug_assert!(i < self.len);
match self.get_data_index(i) {
Ok(index) => {
if self.data[index].1 == -&value {
self.set_zero(i);
} else {
self.data[index].1 += value;
}
},
Err(index) => self.data.insert(index, (i, From::from(value))),
}
}
/// Multiply each element of the vector by a value.
pub fn element_wise_multiply(&mut self, value: &F)
where
for<'r> F: NotZero + MulAssign<&'r F>,
{
debug_assert!(value.borrow().is_not_zero());
for (_, v) in &mut self.data {
*v *= value;
}
self.data.retain(|(_, v)| v.is_not_zero());
}
/// Divide each element of the vector by a value.
pub fn element_wise_divide(&mut self, value: &F)
where
for<'r> F: NotZero + DivAssign<&'r F>,
{
debug_assert!(value.borrow().is_not_zero());
for (_, v) in &mut self.data {
*v /= value;
}
self.data.retain(|(_, v)| v.is_not_zero());
}
}
impl<F, C> Sparse<F, C>
where
F: SparseElement<C>,
C: SparseComparator,
{
/// Calculate the inner product between two vectors.
///
/// # Arguments
///
/// * `other`: Vector to calculate inner product with.
///
/// # Return value
///
/// The inner product.
#[must_use]
pub fn inner_product_with_dense<'a, F2, O: 'a>(&'a self, other: &'a DenseVector<F2>) -> O
where
F: Borrow<O>,
F2: Borrow<O> + PartialEq + Display + Debug,
O: Sum,
&'a O: Mul<&'a O, Output=O>,
{
debug_assert_eq!(other.len(), self.len());
self.data.iter().map(|(i, value)| other[*i].borrow() * value.borrow()).sum()
}
/// Calculate the inner product between two vectors.
///
/// # Arguments
///
/// * `other`: Vector to calculate inner product with.
///
/// # Return value
///
/// The inner product.
#[must_use]
pub fn inner_product<'a, O, F2>(&'a self, other: &'a Sparse<F2, C>) -> O
where
O: Zero + AddAssign<C>,
F2: SparseElement<C>,
// We choose to have multiplication output at the C level, because it would also be nonzero
// if both F and F2 values are not zero.
&'a C: Mul<&'a C, Output=C>,
{
debug_assert_eq!(other.len(), self.len());
debug_assert!(self.data.iter().all(|(_, v)| v.borrow().is_not_zero()));
debug_assert!(other.data.iter().all(|(_, v)| v.borrow().is_not_zero()));
let mut self_lowest = 0;
let mut other_lowest = 0;
let mut total = O::zero();
while self_lowest < self.data.len() && other_lowest < other.data.len() {
let self_sought = self.data[self_lowest].0;
let other_sought = other.data[other_lowest].0;
match self_sought.cmp(&other_sought) {
Ordering::Less => {
match self.data[self_lowest..].binary_search_by_key(&other_sought, |&(i, _)| i) {
Err(diff) => {
self_lowest += diff;
other_lowest += 1;
},
Ok(diff) => {
total += self.data[self_lowest + diff].1.borrow() * other.data[other_lowest].1.borrow();
self_lowest += diff + 1;
other_lowest += 1;
},
}
},
Ordering::Greater => {
match other.data[other_lowest..].binary_search_by_key(&self_sought, |&(i, _)| i) {
Err(diff) => {
self_lowest += 1;
other_lowest += diff;
},
Ok(diff) => {
total += self.data[self_lowest].1.borrow() * other.data[other_lowest + diff].1.borrow();
self_lowest += 1;
other_lowest += diff + 1;
},
}
},
Ordering::Equal => {
total += self.data[self_lowest].1.borrow() * other.data[other_lowest].1.borrow();
self_lowest += 1;
other_lowest += 1;
},
}
}
total
}
}
impl<F: SparseElement<C>, C: SparseComparator> Display for Sparse<F, C> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for (index, value) in &self.data {
writeln!(f, "({} {}), ", index, value)?;
}
writeln!(f)
}
}
| {
self.len
} | identifier_body |
sparse.rs | //! # Sparse vector
//!
//! Wrapping a `Vec<(usize, _)>`, fixed size.
use std::{fmt, mem};
use std::borrow::Borrow;
use std::cmp::Ordering;
use std::collections::HashSet;
use std::fmt::{Debug, Display};
use std::iter::Sum;
use std::marker::PhantomData;
use std::ops::{Add, AddAssign, DivAssign, Mul, MulAssign, Neg};
use std::slice::Iter;
use num::{One, Zero};
use crate::algorithm::utilities::remove_sparse_indices;
use crate::data::linear_algebra::SparseTuple;
use crate::data::linear_algebra::traits::{SparseComparator, SparseElement};
use crate::data::linear_algebra::traits::NotZero;
use crate::data::linear_algebra::vector::{DenseVector, Vector};
/// A sparse vector using a `Vec` with (row, value) combinations as back-end. Indices start at
/// `0`.
///
/// TODO(ENHANCEMENT): Consider making this backed by a `HashMap`.
#[derive(Eq, PartialEq, Clone, Debug)]
pub struct Sparse<F, C> {
data: Vec<SparseTuple<F>>,
len: usize,
/// The level that comparison is done at: a single reference to the underlying data.
phantom_comparison_type: PhantomData<C>,
}
impl<F, C> Sparse<F, C> {
fn get_data_index(&self, i: usize) -> Result<usize, usize> {
self.data.binary_search_by_key(&i, |&(index, _)| index)
}
fn set_zero(&mut self, i: usize) {
if let Ok(index) = self.get_data_index(i) {
self.data.remove(index);
}
}
/// Increase the length of the vector by passing with zeros.
pub fn extend(&mut self, extra_len: usize) {
self.len += extra_len;
}
/// Convert the inner data structure into an iterator, consuming the struct.
pub fn into_iter(self) -> impl Iterator<Item=SparseTuple<F>> {
self.data.into_iter()
}
}
impl<F, C> Vector<F> for Sparse<F, C>
where
F: SparseElement<C>,
C: SparseComparator,
{
type Inner = SparseTuple<F>;
/// Create a vector of length `len` from `data`.
///
/// Requires that values close to zero are already filtered.
fn new(data: Vec<Self::Inner>, len: usize) -> Self {
debug_assert!(data.iter().all(|&(i, _)| i < len));
debug_assert!(data.is_sorted_by_key(|&(i, _)| i));
debug_assert!(data.iter().all(|(_, v)| v.borrow().is_not_zero()));
debug_assert_ne!(len, 0);
debug_assert!(data.len() <= len);
Self {
data,
len,
phantom_comparison_type: PhantomData,
}
}
fn sparse_inner_product<'a, H, G: 'a, I: Iterator<Item=&'a SparseTuple<G>>>(&self, column: I) -> H
where
H: Zero + AddAssign<F>,
for<'r> &'r F: Mul<&'r G, Output=F>,
{
let mut total = H::zero();
let mut i = 0;
for (index, value) in column {
while i < self.data.len() && self.data[i].0 < *index {
i += 1;
}
if i < self.data.len() && self.data[i].0 == *index {
total += &self.data[i].1 * value;
i += 1;
}
if i == self.len {
break;
}
}
total
}
/// Append a non-zero value.
fn push_value(&mut self, value: F) {
debug_assert!(value.borrow().is_not_zero());
self.data.push((self.len, value));
self.len += 1;
}
/// Set the value at index `i` to `value`.
///
/// # Arguments
///
/// * `i`: Index of the value. New tuple will be inserted, potentially causing many values to
/// be shifted.
/// * `value`: Value to be taken at index `i`. Should not be very close to zero to avoid
/// memory usage and numerical error build-up.
fn set(&mut self, i: usize, value: F) {
debug_assert!(i < self.len);
debug_assert!(value.borrow().is_not_zero());
match self.get_data_index(i) {
Ok(index) => self.data[index].1 = value,
Err(index) => self.data.insert(index, (i, value)),
}
}
fn get(&self, index: usize) -> Option<&F> {
debug_assert!(index < self.len);
self.get_data_index(index).ok().map(|i| &self.data[i].1)
}
/// Remove elements.
///
/// # Arguments
///
/// * `indices` is assumed sorted.
fn remove_indices(&mut self, indices: &[usize]) {
debug_assert!(indices.is_sorted());
// All values are unique
debug_assert!(indices.iter().collect::<HashSet<_>>().len() == indices.len());
debug_assert!(indices.iter().all(|&i| i < self.len));
debug_assert!(indices.len() < self.len);
remove_sparse_indices(&mut self.data, indices);
self.len -= indices.len();
}
fn iter_values(&self) -> Iter<Self::Inner> {
self.data.iter()
}
/// The length of this vector.
fn len(&self) -> usize {
self.len
}
/// Whether this vector has zero size.
fn is_empty(&self) -> bool {
self.len == 0
}
/// The size of this vector in memory.
fn size(&self) -> usize {
self.data.len()
}
}
impl<F, C> Sparse<F, C>
where
F: SparseElement<C>,
C: SparseComparator,
{
/// Create a `SparseVector` representation of standard basis unit vector e_i.
///
/// # Arguments
///
/// * `i`: Only index where there should be a 1. Note that indexing starts at zero, and runs
/// until (not through) `len`.
/// * `len`: Size of the `SparseVector`.
#[must_use]
pub fn standard_basis_vector(i: usize, len: usize) -> Self
where
F: One + Clone,
{
debug_assert!(i < len);
Self::new(vec![(i, F::one())], len)
}
/// Add the multiple of another row to this row.
///
/// # Arguments
///
/// * `multiple`: Non-zero constant that all elements of the `other` vector are multiplied with.
/// * `other`: Vector to add a multiple of to this vector.
///
/// # Return value
///
/// A new `SparseVector`.
///
/// # Note
///
/// The implementation of this method doesn't look pretty, but it seems to be reasonably fast.
/// If this method is too slow, it might be wise to consider the switching of the `SparseVector`
/// storage backend from a `Vec` to a `HashMap`.
pub fn add_multiple_of_row<H>(&mut self, multiple: &F, other: &Sparse<F, C>)
where
H: Zero + Add<F, Output=H>,
F: Add<F, Output=H> + From<H>,
for<'r> &'r F: Mul<&'r F, Output=F>,
{
debug_assert_eq!(other.len(), self.len());
debug_assert!(multiple.borrow().is_not_zero());
let mut new_tuples = Vec::new();
let mut j = 0;
let old_data = mem::replace(&mut self.data, Vec::with_capacity(0));
for (i, value) in old_data {
while j < other.data.len() && other.data[j].0 < i {
let new_value = multiple * &other.data[j].1;
new_tuples.push((other.data[j].0, new_value.into()));
j += 1;
}
if j < other.data.len() && i == other.data[j].0 {
let new_value = value + multiple * &other.data[j].1;
if !new_value.is_zero() {
new_tuples.push((i, new_value.into()));
}
j += 1;
} else {
new_tuples.push((i, value));
}
}
for (j, value) in &other.data[j..] {
new_tuples.push((*j, multiple * value));
}
self.data = new_tuples;
}
/// Set the value at index `i` to `value`.
///
/// # Arguments
///
/// * `i`: Index of the value. New tuple will be inserted, potentially causing many values to
/// be shifted.
/// * `value`: Value to be taken at index `i`. Should not be very close to zero to avoid
/// memory usage and numerical error build-up.
pub fn shift_value<G>(&mut self, i: usize, value: G)
where
F: PartialEq<G> + AddAssign<G> + From<G>,
for<'r> &'r G: Neg<Output=G>,
{
debug_assert!(i < self.len);
match self.get_data_index(i) {
Ok(index) => {
if self.data[index].1 == -&value {
self.set_zero(i);
} else {
self.data[index].1 += value;
}
},
Err(index) => self.data.insert(index, (i, From::from(value))),
}
}
/// Multiply each element of the vector by a value.
pub fn element_wise_multiply(&mut self, value: &F)
where
for<'r> F: NotZero + MulAssign<&'r F>,
{
debug_assert!(value.borrow().is_not_zero());
for (_, v) in &mut self.data {
*v *= value;
}
self.data.retain(|(_, v)| v.is_not_zero());
}
/// Divide each element of the vector by a value.
pub fn element_wise_divide(&mut self, value: &F)
where
for<'r> F: NotZero + DivAssign<&'r F>,
{
debug_assert!(value.borrow().is_not_zero());
for (_, v) in &mut self.data {
*v /= value;
}
self.data.retain(|(_, v)| v.is_not_zero());
}
}
impl<F, C> Sparse<F, C>
where
F: SparseElement<C>,
C: SparseComparator,
{
/// Calculate the inner product between two vectors.
///
/// # Arguments
///
/// * `other`: Vector to calculate inner product with.
///
/// # Return value
///
/// The inner product.
#[must_use]
pub fn inner_product_with_dense<'a, F2, O: 'a>(&'a self, other: &'a DenseVector<F2>) -> O
where
F: Borrow<O>,
F2: Borrow<O> + PartialEq + Display + Debug,
O: Sum,
&'a O: Mul<&'a O, Output=O>,
{
debug_assert_eq!(other.len(), self.len());
self.data.iter().map(|(i, value)| other[*i].borrow() * value.borrow()).sum()
}
/// Calculate the inner product between two vectors.
///
/// # Arguments
///
/// * `other`: Vector to calculate inner product with.
///
/// # Return value
///
/// The inner product.
#[must_use]
pub fn inner_product<'a, O, F2>(&'a self, other: &'a Sparse<F2, C>) -> O
where
O: Zero + AddAssign<C>,
F2: SparseElement<C>,
// We choose to have multiplication output at the C level, because it would also be nonzero
// if both F and F2 values are not zero.
&'a C: Mul<&'a C, Output=C>,
{
debug_assert_eq!(other.len(), self.len());
debug_assert!(self.data.iter().all(|(_, v)| v.borrow().is_not_zero()));
debug_assert!(other.data.iter().all(|(_, v)| v.borrow().is_not_zero()));
let mut self_lowest = 0;
let mut other_lowest = 0;
let mut total = O::zero();
while self_lowest < self.data.len() && other_lowest < other.data.len() {
let self_sought = self.data[self_lowest].0;
let other_sought = other.data[other_lowest].0;
match self_sought.cmp(&other_sought) {
Ordering::Less => | ,
Ordering::Greater => {
match other.data[other_lowest..].binary_search_by_key(&self_sought, |&(i, _)| i) {
Err(diff) => {
self_lowest += 1;
other_lowest += diff;
},
Ok(diff) => {
total += self.data[self_lowest].1.borrow() * other.data[other_lowest + diff].1.borrow();
self_lowest += 1;
other_lowest += diff + 1;
},
}
},
Ordering::Equal => {
total += self.data[self_lowest].1.borrow() * other.data[other_lowest].1.borrow();
self_lowest += 1;
other_lowest += 1;
},
}
}
total
}
}
impl<F: SparseElement<C>, C: SparseComparator> Display for Sparse<F, C> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for (index, value) in &self.data {
writeln!(f, "({} {}), ", index, value)?;
}
writeln!(f)
}
}
| {
match self.data[self_lowest..].binary_search_by_key(&other_sought, |&(i, _)| i) {
Err(diff) => {
self_lowest += diff;
other_lowest += 1;
},
Ok(diff) => {
total += self.data[self_lowest + diff].1.borrow() * other.data[other_lowest].1.borrow();
self_lowest += diff + 1;
other_lowest += 1;
},
}
} | conditional_block |
lib.rs | // Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
// Licensed under the Apache License, Version 2.0 (see LICENSE).
#![deny(warnings)]
// Enable all clippy lints except for many of the pedantic ones. It's a shame this needs to be copied and pasted across crates, but there doesn't appear to be a way to include inner attributes from a common source.
#![deny(
clippy::all,
clippy::default_trait_access,
clippy::expl_impl_clone_on_copy,
clippy::if_not_else,
clippy::needless_continue,
clippy::unseparated_literal_suffix,
clippy::used_underscore_binding
)]
// It is often more clear to show that nothing is being moved.
#![allow(clippy::match_ref_pats)]
// Subjective style.
#![allow(
clippy::len_without_is_empty,
clippy::redundant_field_names,
clippy::too_many_arguments
)]
// Default isn't as big a deal as people seem to think it is.
#![allow(clippy::new_without_default, clippy::new_ret_no_self)]
// Arc<Mutex> can be more clear than needing to grok Orderings:
#![allow(clippy::mutex_atomic)]
use std::collections::HashMap;
use std::env;
use std::future::Future;
use std::sync::Arc;
use std::time::{Duration, Instant};
use futures::future::FutureExt;
use itertools::Itertools;
use parking_lot::Mutex;
use tokio::runtime::{Builder, Handle, Runtime};
use tokio::task::{Id, JoinError, JoinHandle, JoinSet};
/// Copy our (thread-local or task-local) stdio destination and current workunit parent into
/// the task. The former ensures that when a pantsd thread kicks off a future, any stdio done
/// by it ends up in the pantsd log as we expect. The latter ensures that when a new workunit
/// is created it has an accurate handle to its parent.
fn future_with_correct_context<F: Future>(future: F) -> impl Future<Output = F::Output> {
let stdio_destination = stdio::get_destination();
let workunit_store_handle = workunit_store::get_workunit_store_handle();
// NB: It is important that the first portion of this method is synchronous (meaning that this
// method cannot be `async`), because that means that it will run on the thread that calls it.
// The second, async portion of the method will run in the spawned Task.
stdio::scope_task_destination(stdio_destination, async move {
workunit_store::scope_task_workunit_store_handle(workunit_store_handle, future).await
})
}
///
/// Executors come in two flavors:
/// * "borrowed"
/// * Created with `Self::new()`, or `self::to_borrowed()`.
/// * A borrowed Executor will not be shut down when all handles are dropped, and shutdown
/// methods will have no impact.
/// * Used when multiple runs of Pants will borrow a single Executor owned by `pantsd`, and in
/// unit tests where the Runtime is created by macros.
/// * "owned"
/// * Created with `Self::new_owned()`.
/// * When all handles of a owned Executor are dropped, its Runtime will be shut down.
/// Additionally, the explicit shutdown methods can be used to shut down the Executor for all
/// clones.
///
#[derive(Debug, Clone)]
pub struct Executor {
runtime: Arc<Mutex<Option<Runtime>>>,
handle: Handle,
}
impl Executor {
///
/// Creates an Executor for an existing tokio::Runtime (generally provided by tokio's macros).
///
/// The returned Executor will have a lifecycle independent of the Runtime, meaning that dropping
/// all clones of the Executor will not cause the Runtime to be shut down. Likewise, the owner of
/// the Runtime must ensure that it is kept alive longer than all Executor instances, because
/// existence of a Handle does not prevent a Runtime from shutting down. This is guaranteed by
/// the scope of the tokio::{test, main} macros.
///
pub fn new() -> Executor {
Self {
runtime: Arc::new(Mutex::new(None)),
handle: Handle::current(),
}
}
///
/// Gets a reference to a global static Executor with an owned tokio::Runtime, initializing it
/// with the given thread configuration if this is the first usage.
///
/// NB: The global static Executor eases lifecycle issues when consumed from Python, where we
/// need thread configurability, but also want to know reliably when the Runtime will shutdown
/// (which, because it is static, will only be at the entire process' exit).
///
pub fn new_owned<F>(
num_worker_threads: usize,
max_threads: usize,
on_thread_start: F,
) -> Result<Executor, String>
where
F: Fn() + Send + Sync + 'static,
{
let mut runtime_builder = Builder::new_multi_thread();
runtime_builder
.worker_threads(num_worker_threads)
.max_blocking_threads(max_threads - num_worker_threads)
.enable_all();
if env::var("PANTS_DEBUG").is_ok() {
runtime_builder.on_thread_start(on_thread_start);
};
let runtime = runtime_builder
.build()
.map_err(|e| format!("Failed to start the runtime: {e}"))?;
let handle = runtime.handle().clone();
Ok(Executor {
runtime: Arc::new(Mutex::new(Some(runtime))),
handle,
})
}
///
/// Creates a clone of this Executor which is disconnected from shutdown events. See the `Executor`
/// rustdoc.
///
pub fn to_borrowed(&self) -> Executor {
Self {
runtime: Arc::new(Mutex::new(None)),
handle: self.handle.clone(),
}
}
///
/// Enter the runtime context associated with this Executor. This should be used in situations
/// where threads not started by the runtime need access to it via task-local variables.
///
pub fn enter<F, R>(&self, f: F) -> R
where
F: FnOnce() -> R,
|
///
/// Run a Future on a tokio Runtime as a new Task, and return a Future handle to it.
///
/// If the background Task exits abnormally, the given closure will be called to recover: usually
/// it should convert the resulting Error to a relevant error type.
///
/// If the returned Future is dropped, the computation will still continue to completion: see
/// <https://docs.rs/tokio/0.2.20/tokio/task/struct.JoinHandle.html>
///
pub fn spawn<O: Send + 'static, F: Future<Output = O> + Send + 'static>(
&self,
future: F,
rescue_join_error: impl FnOnce(JoinError) -> O,
) -> impl Future<Output = O> {
self.native_spawn(future).map(|res| match res {
Ok(o) => o,
Err(e) => rescue_join_error(e),
})
}
///
/// Run a Future on a tokio Runtime as a new Task, and return a JoinHandle.
///
pub fn native_spawn<O: Send + 'static, F: Future<Output = O> + Send + 'static>(
&self,
future: F,
) -> JoinHandle<O> {
self.handle.spawn(future_with_correct_context(future))
}
///
/// Run a Future and return its resolved Result.
///
/// This should never be called from in a Future context, and should only ever be called in
/// something that resembles a main method.
///
/// Even after this method returns, work `spawn`ed into the background may continue to run on the
/// threads owned by this Executor.
///
pub fn block_on<F: Future>(&self, future: F) -> F::Output {
// Make sure to copy our (thread-local) logging destination into the task.
// When a daemon thread kicks off a future, it should log like a daemon thread (and similarly
// for a user-facing thread).
self.handle.block_on(future_with_correct_context(future))
}
///
/// Spawn a Future on a threadpool specifically reserved for I/O tasks which are allowed to be
/// long-running.
///
/// If the background Task exits abnormally, the given closure will be called to recover: usually
/// it should convert the resulting Error to a relevant error type.
///
/// If the returned Future is dropped, the computation will still continue to completion: see
/// <https://docs.rs/tokio/0.2.20/tokio/task/struct.JoinHandle.html>
///
pub fn spawn_blocking<F: FnOnce() -> R + Send + 'static, R: Send + 'static>(
&self,
f: F,
rescue_join_error: impl FnOnce(JoinError) -> R,
) -> impl Future<Output = R> {
self.native_spawn_blocking(f).map(|res| match res {
Ok(o) => o,
Err(e) => rescue_join_error(e),
})
}
///
/// Spawn a Future on threads specifically reserved for I/O tasks which are allowed to be
/// long-running and return a JoinHandle
///
pub fn native_spawn_blocking<F: FnOnce() -> R + Send + 'static, R: Send + 'static>(
&self,
f: F,
) -> JoinHandle<R> {
let stdio_destination = stdio::get_destination();
let workunit_store_handle = workunit_store::get_workunit_store_handle();
// NB: We unwrap here because the only thing that should cause an error in a spawned task is a
// panic, in which case we want to propagate that.
self.handle.spawn_blocking(move || {
stdio::set_thread_destination(stdio_destination);
workunit_store::set_thread_workunit_store_handle(workunit_store_handle);
f()
})
}
/// Return a reference to this executor's runtime handle.
pub fn handle(&self) -> &Handle {
&self.handle
}
///
/// A blocking call to shut down the Runtime associated with this "owned" Executor. If tasks do
/// not shut down within the given timeout, they are leaked.
///
/// This method has no effect for "borrowed" Executors: see the `Executor` rustdoc.
///
pub fn shutdown(&self, timeout: Duration) {
let Some(runtime) = self.runtime.lock().take() else {
return;
};
let start = Instant::now();
runtime.shutdown_timeout(timeout + Duration::from_millis(250));
if start.elapsed() > timeout {
// Leaked tasks could lead to panics in some cases (see #16105), so warn for them.
log::warn!("Executor shutdown took unexpectedly long: tasks were likely leaked!");
}
}
/// Returns true if `shutdown` has been called for this Executor. Always returns true for
/// borrowed Executors.
pub fn is_shutdown(&self) -> bool {
self.runtime.lock().is_none()
}
}
/// Store "tail" tasks which are async tasks that can execute concurrently with regular
/// build actions. Tail tasks block completion of a session until all of them have been
/// completed (subject to a timeout).
#[derive(Clone)]
pub struct TailTasks {
inner: Arc<Mutex<Option<TailTasksInner>>>,
}
struct TailTasksInner {
id_to_name: HashMap<Id, String>,
task_set: JoinSet<()>,
}
impl TailTasks {
pub fn new() -> Self {
Self {
inner: Arc::new(Mutex::new(Some(TailTasksInner {
id_to_name: HashMap::new(),
task_set: JoinSet::new(),
}))),
}
}
/// Spawn a tail task with the given name.
pub fn spawn_on<F>(&self, name: &str, handle: &Handle, task: F)
where
F: Future<Output = ()>,
F: Send + 'static,
{
let task = future_with_correct_context(task);
let mut guard = self.inner.lock();
let inner = match &mut *guard {
Some(inner) => inner,
None => {
log::warn!(
"Session end task `{}` submitted after session completed.",
name
);
return;
}
};
let h = inner.task_set.spawn_on(task, handle);
inner.id_to_name.insert(h.id(), name.to_string());
}
/// Wait for all tail tasks to complete subject to the given timeout. If tasks
/// fail or do not complete, log that fact.
pub async fn wait(self, timeout: Duration) {
let mut inner = match self.inner.lock().take() {
Some(inner) => inner,
None => {
log::debug!("Session end tasks awaited multiple times!");
return;
}
};
if inner.task_set.is_empty() {
return;
}
log::debug!(
"waiting for {} session end task(s) to complete",
inner.task_set.len()
);
let mut timeout = tokio::time::sleep(timeout).boxed();
loop {
tokio::select! {
// Use biased mode to prefer an expired timeout over joining on remaining tasks.
biased;
// Exit monitoring loop if timeout expires.
_ = &mut timeout => break,
next_result = inner.task_set.join_next_with_id() => {
match next_result {
Some(Ok((id, _))) => {
if let Some(name) = inner.id_to_name.get(&id) {
log::trace!("Session end task `{name}` completed successfully");
} else {
log::debug!("Session end task completed successfully but name not found.");
}
inner.id_to_name.remove(&id);
},
Some(Err(err)) => {
let name = inner.id_to_name.get(&err.id());
log::error!("Session end task `{name:?}` failed: {err:?}");
}
None => break,
}
}
}
}
if inner.task_set.is_empty() {
log::debug!("all session end tasks completed successfully");
} else {
log::debug!(
"{} session end task(s) failed to complete within timeout: {}",
inner.task_set.len(),
inner.id_to_name.values().join(", "),
);
inner.task_set.abort_all();
}
}
}
| {
let _context = self.handle.enter();
f()
} | identifier_body |
lib.rs | // Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
// Licensed under the Apache License, Version 2.0 (see LICENSE).
#![deny(warnings)]
// Enable all clippy lints except for many of the pedantic ones. It's a shame this needs to be copied and pasted across crates, but there doesn't appear to be a way to include inner attributes from a common source.
#![deny(
clippy::all,
clippy::default_trait_access,
clippy::expl_impl_clone_on_copy,
clippy::if_not_else,
clippy::needless_continue,
clippy::unseparated_literal_suffix,
clippy::used_underscore_binding
)]
// It is often more clear to show that nothing is being moved.
#![allow(clippy::match_ref_pats)]
// Subjective style.
#![allow(
clippy::len_without_is_empty,
clippy::redundant_field_names,
clippy::too_many_arguments
)]
// Default isn't as big a deal as people seem to think it is.
#![allow(clippy::new_without_default, clippy::new_ret_no_self)]
// Arc<Mutex> can be more clear than needing to grok Orderings:
#![allow(clippy::mutex_atomic)]
use std::collections::HashMap;
use std::env;
use std::future::Future;
use std::sync::Arc;
use std::time::{Duration, Instant};
use futures::future::FutureExt;
use itertools::Itertools;
use parking_lot::Mutex;
use tokio::runtime::{Builder, Handle, Runtime};
use tokio::task::{Id, JoinError, JoinHandle, JoinSet};
/// Copy our (thread-local or task-local) stdio destination and current workunit parent into
/// the task. The former ensures that when a pantsd thread kicks off a future, any stdio done
/// by it ends up in the pantsd log as we expect. The latter ensures that when a new workunit
/// is created it has an accurate handle to its parent.
fn future_with_correct_context<F: Future>(future: F) -> impl Future<Output = F::Output> {
let stdio_destination = stdio::get_destination();
let workunit_store_handle = workunit_store::get_workunit_store_handle();
// NB: It is important that the first portion of this method is synchronous (meaning that this
// method cannot be `async`), because that means that it will run on the thread that calls it.
// The second, async portion of the method will run in the spawned Task.
stdio::scope_task_destination(stdio_destination, async move {
workunit_store::scope_task_workunit_store_handle(workunit_store_handle, future).await
})
}
///
/// Executors come in two flavors:
/// * "borrowed"
/// * Created with `Self::new()`, or `self::to_borrowed()`.
/// * A borrowed Executor will not be shut down when all handles are dropped, and shutdown
/// methods will have no impact.
/// * Used when multiple runs of Pants will borrow a single Executor owned by `pantsd`, and in
/// unit tests where the Runtime is created by macros.
/// * "owned"
/// * Created with `Self::new_owned()`.
/// * When all handles of a owned Executor are dropped, its Runtime will be shut down.
/// Additionally, the explicit shutdown methods can be used to shut down the Executor for all
/// clones.
///
#[derive(Debug, Clone)]
pub struct Executor {
runtime: Arc<Mutex<Option<Runtime>>>,
handle: Handle,
}
impl Executor {
///
/// Creates an Executor for an existing tokio::Runtime (generally provided by tokio's macros).
///
/// The returned Executor will have a lifecycle independent of the Runtime, meaning that dropping
/// all clones of the Executor will not cause the Runtime to be shut down. Likewise, the owner of
/// the Runtime must ensure that it is kept alive longer than all Executor instances, because
/// existence of a Handle does not prevent a Runtime from shutting down. This is guaranteed by
/// the scope of the tokio::{test, main} macros.
///
pub fn new() -> Executor {
Self {
runtime: Arc::new(Mutex::new(None)),
handle: Handle::current(),
}
}
///
/// Gets a reference to a global static Executor with an owned tokio::Runtime, initializing it
/// with the given thread configuration if this is the first usage.
///
/// NB: The global static Executor eases lifecycle issues when consumed from Python, where we
/// need thread configurability, but also want to know reliably when the Runtime will shutdown
/// (which, because it is static, will only be at the entire process' exit).
///
pub fn new_owned<F>(
num_worker_threads: usize,
max_threads: usize,
on_thread_start: F,
) -> Result<Executor, String>
where
F: Fn() + Send + Sync + 'static,
{
let mut runtime_builder = Builder::new_multi_thread();
runtime_builder
.worker_threads(num_worker_threads)
.max_blocking_threads(max_threads - num_worker_threads)
.enable_all();
if env::var("PANTS_DEBUG").is_ok() {
runtime_builder.on_thread_start(on_thread_start);
};
let runtime = runtime_builder
.build()
.map_err(|e| format!("Failed to start the runtime: {e}"))?;
let handle = runtime.handle().clone();
Ok(Executor {
runtime: Arc::new(Mutex::new(Some(runtime))),
handle,
})
}
///
/// Creates a clone of this Executor which is disconnected from shutdown events. See the `Executor`
/// rustdoc.
///
pub fn to_borrowed(&self) -> Executor {
Self {
runtime: Arc::new(Mutex::new(None)),
handle: self.handle.clone(),
}
}
///
/// Enter the runtime context associated with this Executor. This should be used in situations
/// where threads not started by the runtime need access to it via task-local variables.
///
pub fn enter<F, R>(&self, f: F) -> R
where
F: FnOnce() -> R,
{
let _context = self.handle.enter();
f()
}
///
/// Run a Future on a tokio Runtime as a new Task, and return a Future handle to it.
///
/// If the background Task exits abnormally, the given closure will be called to recover: usually
/// it should convert the resulting Error to a relevant error type.
///
/// If the returned Future is dropped, the computation will still continue to completion: see
/// <https://docs.rs/tokio/0.2.20/tokio/task/struct.JoinHandle.html>
///
pub fn spawn<O: Send + 'static, F: Future<Output = O> + Send + 'static>(
&self,
future: F,
rescue_join_error: impl FnOnce(JoinError) -> O,
) -> impl Future<Output = O> {
self.native_spawn(future).map(|res| match res {
Ok(o) => o,
Err(e) => rescue_join_error(e),
})
}
///
/// Run a Future on a tokio Runtime as a new Task, and return a JoinHandle.
///
pub fn native_spawn<O: Send + 'static, F: Future<Output = O> + Send + 'static>(
&self,
future: F,
) -> JoinHandle<O> {
self.handle.spawn(future_with_correct_context(future))
}
///
/// Run a Future and return its resolved Result.
///
/// This should never be called from in a Future context, and should only ever be called in
/// something that resembles a main method.
///
/// Even after this method returns, work `spawn`ed into the background may continue to run on the
/// threads owned by this Executor.
///
pub fn block_on<F: Future>(&self, future: F) -> F::Output {
// Make sure to copy our (thread-local) logging destination into the task.
// When a daemon thread kicks off a future, it should log like a daemon thread (and similarly
// for a user-facing thread).
self.handle.block_on(future_with_correct_context(future))
}
///
/// Spawn a Future on a threadpool specifically reserved for I/O tasks which are allowed to be
/// long-running.
///
/// If the background Task exits abnormally, the given closure will be called to recover: usually
/// it should convert the resulting Error to a relevant error type.
///
/// If the returned Future is dropped, the computation will still continue to completion: see
/// <https://docs.rs/tokio/0.2.20/tokio/task/struct.JoinHandle.html>
///
pub fn spawn_blocking<F: FnOnce() -> R + Send + 'static, R: Send + 'static>(
&self,
f: F,
rescue_join_error: impl FnOnce(JoinError) -> R,
) -> impl Future<Output = R> {
self.native_spawn_blocking(f).map(|res| match res {
Ok(o) => o,
Err(e) => rescue_join_error(e),
})
}
///
/// Spawn a Future on threads specifically reserved for I/O tasks which are allowed to be
/// long-running and return a JoinHandle
///
pub fn native_spawn_blocking<F: FnOnce() -> R + Send + 'static, R: Send + 'static>(
&self,
f: F,
) -> JoinHandle<R> {
let stdio_destination = stdio::get_destination();
let workunit_store_handle = workunit_store::get_workunit_store_handle();
// NB: We unwrap here because the only thing that should cause an error in a spawned task is a
// panic, in which case we want to propagate that.
self.handle.spawn_blocking(move || {
stdio::set_thread_destination(stdio_destination);
workunit_store::set_thread_workunit_store_handle(workunit_store_handle);
f()
})
}
/// Return a reference to this executor's runtime handle.
pub fn handle(&self) -> &Handle {
&self.handle
}
///
/// A blocking call to shut down the Runtime associated with this "owned" Executor. If tasks do
/// not shut down within the given timeout, they are leaked.
///
/// This method has no effect for "borrowed" Executors: see the `Executor` rustdoc.
///
pub fn shutdown(&self, timeout: Duration) {
let Some(runtime) = self.runtime.lock().take() else {
return;
};
let start = Instant::now();
runtime.shutdown_timeout(timeout + Duration::from_millis(250));
if start.elapsed() > timeout {
// Leaked tasks could lead to panics in some cases (see #16105), so warn for them.
log::warn!("Executor shutdown took unexpectedly long: tasks were likely leaked!");
}
}
/// Returns true if `shutdown` has been called for this Executor. Always returns true for
/// borrowed Executors.
pub fn is_shutdown(&self) -> bool {
self.runtime.lock().is_none()
}
}
/// Store "tail" tasks which are async tasks that can execute concurrently with regular
/// build actions. Tail tasks block completion of a session until all of them have been
/// completed (subject to a timeout).
#[derive(Clone)]
pub struct TailTasks {
inner: Arc<Mutex<Option<TailTasksInner>>>,
}
struct TailTasksInner {
id_to_name: HashMap<Id, String>,
task_set: JoinSet<()>,
}
impl TailTasks {
pub fn new() -> Self {
Self {
inner: Arc::new(Mutex::new(Some(TailTasksInner {
id_to_name: HashMap::new(),
task_set: JoinSet::new(),
}))),
}
}
/// Spawn a tail task with the given name.
pub fn spawn_on<F>(&self, name: &str, handle: &Handle, task: F)
where
F: Future<Output = ()>,
F: Send + 'static,
{
let task = future_with_correct_context(task);
let mut guard = self.inner.lock();
let inner = match &mut *guard {
Some(inner) => inner,
None => {
log::warn!(
"Session end task `{}` submitted after session completed.",
name
);
return;
}
};
let h = inner.task_set.spawn_on(task, handle);
inner.id_to_name.insert(h.id(), name.to_string());
}
/// Wait for all tail tasks to complete subject to the given timeout. If tasks
/// fail or do not complete, log that fact.
pub async fn wait(self, timeout: Duration) {
let mut inner = match self.inner.lock().take() {
Some(inner) => inner,
None => {
log::debug!("Session end tasks awaited multiple times!");
return;
}
}; | log::debug!(
"waiting for {} session end task(s) to complete",
inner.task_set.len()
);
let mut timeout = tokio::time::sleep(timeout).boxed();
loop {
tokio::select! {
// Use biased mode to prefer an expired timeout over joining on remaining tasks.
biased;
// Exit monitoring loop if timeout expires.
_ = &mut timeout => break,
next_result = inner.task_set.join_next_with_id() => {
match next_result {
Some(Ok((id, _))) => {
if let Some(name) = inner.id_to_name.get(&id) {
log::trace!("Session end task `{name}` completed successfully");
} else {
log::debug!("Session end task completed successfully but name not found.");
}
inner.id_to_name.remove(&id);
},
Some(Err(err)) => {
let name = inner.id_to_name.get(&err.id());
log::error!("Session end task `{name:?}` failed: {err:?}");
}
None => break,
}
}
}
}
if inner.task_set.is_empty() {
log::debug!("all session end tasks completed successfully");
} else {
log::debug!(
"{} session end task(s) failed to complete within timeout: {}",
inner.task_set.len(),
inner.id_to_name.values().join(", "),
);
inner.task_set.abort_all();
}
}
} |
if inner.task_set.is_empty() {
return;
}
| random_line_split |
lib.rs | // Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
// Licensed under the Apache License, Version 2.0 (see LICENSE).
#![deny(warnings)]
// Enable all clippy lints except for many of the pedantic ones. It's a shame this needs to be copied and pasted across crates, but there doesn't appear to be a way to include inner attributes from a common source.
#![deny(
clippy::all,
clippy::default_trait_access,
clippy::expl_impl_clone_on_copy,
clippy::if_not_else,
clippy::needless_continue,
clippy::unseparated_literal_suffix,
clippy::used_underscore_binding
)]
// It is often more clear to show that nothing is being moved.
#![allow(clippy::match_ref_pats)]
// Subjective style.
#![allow(
clippy::len_without_is_empty,
clippy::redundant_field_names,
clippy::too_many_arguments
)]
// Default isn't as big a deal as people seem to think it is.
#![allow(clippy::new_without_default, clippy::new_ret_no_self)]
// Arc<Mutex> can be more clear than needing to grok Orderings:
#![allow(clippy::mutex_atomic)]
use std::collections::HashMap;
use std::env;
use std::future::Future;
use std::sync::Arc;
use std::time::{Duration, Instant};
use futures::future::FutureExt;
use itertools::Itertools;
use parking_lot::Mutex;
use tokio::runtime::{Builder, Handle, Runtime};
use tokio::task::{Id, JoinError, JoinHandle, JoinSet};
/// Copy our (thread-local or task-local) stdio destination and current workunit parent into
/// the task. The former ensures that when a pantsd thread kicks off a future, any stdio done
/// by it ends up in the pantsd log as we expect. The latter ensures that when a new workunit
/// is created it has an accurate handle to its parent.
fn future_with_correct_context<F: Future>(future: F) -> impl Future<Output = F::Output> {
let stdio_destination = stdio::get_destination();
let workunit_store_handle = workunit_store::get_workunit_store_handle();
// NB: It is important that the first portion of this method is synchronous (meaning that this
// method cannot be `async`), because that means that it will run on the thread that calls it.
// The second, async portion of the method will run in the spawned Task.
stdio::scope_task_destination(stdio_destination, async move {
workunit_store::scope_task_workunit_store_handle(workunit_store_handle, future).await
})
}
///
/// Executors come in two flavors:
/// * "borrowed"
/// * Created with `Self::new()`, or `self::to_borrowed()`.
/// * A borrowed Executor will not be shut down when all handles are dropped, and shutdown
/// methods will have no impact.
/// * Used when multiple runs of Pants will borrow a single Executor owned by `pantsd`, and in
/// unit tests where the Runtime is created by macros.
/// * "owned"
/// * Created with `Self::new_owned()`.
/// * When all handles of a owned Executor are dropped, its Runtime will be shut down.
/// Additionally, the explicit shutdown methods can be used to shut down the Executor for all
/// clones.
///
#[derive(Debug, Clone)]
pub struct Executor {
runtime: Arc<Mutex<Option<Runtime>>>,
handle: Handle,
}
impl Executor {
///
/// Creates an Executor for an existing tokio::Runtime (generally provided by tokio's macros).
///
/// The returned Executor will have a lifecycle independent of the Runtime, meaning that dropping
/// all clones of the Executor will not cause the Runtime to be shut down. Likewise, the owner of
/// the Runtime must ensure that it is kept alive longer than all Executor instances, because
/// existence of a Handle does not prevent a Runtime from shutting down. This is guaranteed by
/// the scope of the tokio::{test, main} macros.
///
pub fn new() -> Executor {
Self {
runtime: Arc::new(Mutex::new(None)),
handle: Handle::current(),
}
}
///
/// Gets a reference to a global static Executor with an owned tokio::Runtime, initializing it
/// with the given thread configuration if this is the first usage.
///
/// NB: The global static Executor eases lifecycle issues when consumed from Python, where we
/// need thread configurability, but also want to know reliably when the Runtime will shutdown
/// (which, because it is static, will only be at the entire process' exit).
///
pub fn new_owned<F>(
num_worker_threads: usize,
max_threads: usize,
on_thread_start: F,
) -> Result<Executor, String>
where
F: Fn() + Send + Sync + 'static,
{
let mut runtime_builder = Builder::new_multi_thread();
runtime_builder
.worker_threads(num_worker_threads)
.max_blocking_threads(max_threads - num_worker_threads)
.enable_all();
if env::var("PANTS_DEBUG").is_ok() {
runtime_builder.on_thread_start(on_thread_start);
};
let runtime = runtime_builder
.build()
.map_err(|e| format!("Failed to start the runtime: {e}"))?;
let handle = runtime.handle().clone();
Ok(Executor {
runtime: Arc::new(Mutex::new(Some(runtime))),
handle,
})
}
///
/// Creates a clone of this Executor which is disconnected from shutdown events. See the `Executor`
/// rustdoc.
///
pub fn to_borrowed(&self) -> Executor {
Self {
runtime: Arc::new(Mutex::new(None)),
handle: self.handle.clone(),
}
}
///
/// Enter the runtime context associated with this Executor. This should be used in situations
/// where threads not started by the runtime need access to it via task-local variables.
///
pub fn enter<F, R>(&self, f: F) -> R
where
F: FnOnce() -> R,
{
let _context = self.handle.enter();
f()
}
///
/// Run a Future on a tokio Runtime as a new Task, and return a Future handle to it.
///
/// If the background Task exits abnormally, the given closure will be called to recover: usually
/// it should convert the resulting Error to a relevant error type.
///
/// If the returned Future is dropped, the computation will still continue to completion: see
/// <https://docs.rs/tokio/0.2.20/tokio/task/struct.JoinHandle.html>
///
pub fn spawn<O: Send + 'static, F: Future<Output = O> + Send + 'static>(
&self,
future: F,
rescue_join_error: impl FnOnce(JoinError) -> O,
) -> impl Future<Output = O> {
self.native_spawn(future).map(|res| match res {
Ok(o) => o,
Err(e) => rescue_join_error(e),
})
}
///
/// Run a Future on a tokio Runtime as a new Task, and return a JoinHandle.
///
pub fn native_spawn<O: Send + 'static, F: Future<Output = O> + Send + 'static>(
&self,
future: F,
) -> JoinHandle<O> {
self.handle.spawn(future_with_correct_context(future))
}
///
/// Run a Future and return its resolved Result.
///
/// This should never be called from in a Future context, and should only ever be called in
/// something that resembles a main method.
///
/// Even after this method returns, work `spawn`ed into the background may continue to run on the
/// threads owned by this Executor.
///
pub fn block_on<F: Future>(&self, future: F) -> F::Output {
// Make sure to copy our (thread-local) logging destination into the task.
// When a daemon thread kicks off a future, it should log like a daemon thread (and similarly
// for a user-facing thread).
self.handle.block_on(future_with_correct_context(future))
}
///
/// Spawn a Future on a threadpool specifically reserved for I/O tasks which are allowed to be
/// long-running.
///
/// If the background Task exits abnormally, the given closure will be called to recover: usually
/// it should convert the resulting Error to a relevant error type.
///
/// If the returned Future is dropped, the computation will still continue to completion: see
/// <https://docs.rs/tokio/0.2.20/tokio/task/struct.JoinHandle.html>
///
pub fn spawn_blocking<F: FnOnce() -> R + Send + 'static, R: Send + 'static>(
&self,
f: F,
rescue_join_error: impl FnOnce(JoinError) -> R,
) -> impl Future<Output = R> {
self.native_spawn_blocking(f).map(|res| match res {
Ok(o) => o,
Err(e) => rescue_join_error(e),
})
}
///
/// Spawn a Future on threads specifically reserved for I/O tasks which are allowed to be
/// long-running and return a JoinHandle
///
pub fn native_spawn_blocking<F: FnOnce() -> R + Send + 'static, R: Send + 'static>(
&self,
f: F,
) -> JoinHandle<R> {
let stdio_destination = stdio::get_destination();
let workunit_store_handle = workunit_store::get_workunit_store_handle();
// NB: We unwrap here because the only thing that should cause an error in a spawned task is a
// panic, in which case we want to propagate that.
self.handle.spawn_blocking(move || {
stdio::set_thread_destination(stdio_destination);
workunit_store::set_thread_workunit_store_handle(workunit_store_handle);
f()
})
}
/// Return a reference to this executor's runtime handle.
pub fn handle(&self) -> &Handle {
&self.handle
}
///
/// A blocking call to shut down the Runtime associated with this "owned" Executor. If tasks do
/// not shut down within the given timeout, they are leaked.
///
/// This method has no effect for "borrowed" Executors: see the `Executor` rustdoc.
///
pub fn shutdown(&self, timeout: Duration) {
let Some(runtime) = self.runtime.lock().take() else {
return;
};
let start = Instant::now();
runtime.shutdown_timeout(timeout + Duration::from_millis(250));
if start.elapsed() > timeout {
// Leaked tasks could lead to panics in some cases (see #16105), so warn for them.
log::warn!("Executor shutdown took unexpectedly long: tasks were likely leaked!");
}
}
/// Returns true if `shutdown` has been called for this Executor. Always returns true for
/// borrowed Executors.
pub fn is_shutdown(&self) -> bool {
self.runtime.lock().is_none()
}
}
/// Store "tail" tasks which are async tasks that can execute concurrently with regular
/// build actions. Tail tasks block completion of a session until all of them have been
/// completed (subject to a timeout).
#[derive(Clone)]
pub struct TailTasks {
inner: Arc<Mutex<Option<TailTasksInner>>>,
}
struct TailTasksInner {
id_to_name: HashMap<Id, String>,
task_set: JoinSet<()>,
}
impl TailTasks {
pub fn new() -> Self {
Self {
inner: Arc::new(Mutex::new(Some(TailTasksInner {
id_to_name: HashMap::new(),
task_set: JoinSet::new(),
}))),
}
}
/// Spawn a tail task with the given name.
pub fn | <F>(&self, name: &str, handle: &Handle, task: F)
where
F: Future<Output = ()>,
F: Send + 'static,
{
let task = future_with_correct_context(task);
let mut guard = self.inner.lock();
let inner = match &mut *guard {
Some(inner) => inner,
None => {
log::warn!(
"Session end task `{}` submitted after session completed.",
name
);
return;
}
};
let h = inner.task_set.spawn_on(task, handle);
inner.id_to_name.insert(h.id(), name.to_string());
}
/// Wait for all tail tasks to complete subject to the given timeout. If tasks
/// fail or do not complete, log that fact.
pub async fn wait(self, timeout: Duration) {
let mut inner = match self.inner.lock().take() {
Some(inner) => inner,
None => {
log::debug!("Session end tasks awaited multiple times!");
return;
}
};
if inner.task_set.is_empty() {
return;
}
log::debug!(
"waiting for {} session end task(s) to complete",
inner.task_set.len()
);
let mut timeout = tokio::time::sleep(timeout).boxed();
loop {
tokio::select! {
// Use biased mode to prefer an expired timeout over joining on remaining tasks.
biased;
// Exit monitoring loop if timeout expires.
_ = &mut timeout => break,
next_result = inner.task_set.join_next_with_id() => {
match next_result {
Some(Ok((id, _))) => {
if let Some(name) = inner.id_to_name.get(&id) {
log::trace!("Session end task `{name}` completed successfully");
} else {
log::debug!("Session end task completed successfully but name not found.");
}
inner.id_to_name.remove(&id);
},
Some(Err(err)) => {
let name = inner.id_to_name.get(&err.id());
log::error!("Session end task `{name:?}` failed: {err:?}");
}
None => break,
}
}
}
}
if inner.task_set.is_empty() {
log::debug!("all session end tasks completed successfully");
} else {
log::debug!(
"{} session end task(s) failed to complete within timeout: {}",
inner.task_set.len(),
inner.id_to_name.values().join(", "),
);
inner.task_set.abort_all();
}
}
}
| spawn_on | identifier_name |
lib.rs | // Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
// Licensed under the Apache License, Version 2.0 (see LICENSE).
#![deny(warnings)]
// Enable all clippy lints except for many of the pedantic ones. It's a shame this needs to be copied and pasted across crates, but there doesn't appear to be a way to include inner attributes from a common source.
#![deny(
clippy::all,
clippy::default_trait_access,
clippy::expl_impl_clone_on_copy,
clippy::if_not_else,
clippy::needless_continue,
clippy::unseparated_literal_suffix,
clippy::used_underscore_binding
)]
// It is often more clear to show that nothing is being moved.
#![allow(clippy::match_ref_pats)]
// Subjective style.
#![allow(
clippy::len_without_is_empty,
clippy::redundant_field_names,
clippy::too_many_arguments
)]
// Default isn't as big a deal as people seem to think it is.
#![allow(clippy::new_without_default, clippy::new_ret_no_self)]
// Arc<Mutex> can be more clear than needing to grok Orderings:
#![allow(clippy::mutex_atomic)]
use std::collections::HashMap;
use std::env;
use std::future::Future;
use std::sync::Arc;
use std::time::{Duration, Instant};
use futures::future::FutureExt;
use itertools::Itertools;
use parking_lot::Mutex;
use tokio::runtime::{Builder, Handle, Runtime};
use tokio::task::{Id, JoinError, JoinHandle, JoinSet};
/// Copy our (thread-local or task-local) stdio destination and current workunit parent into
/// the task. The former ensures that when a pantsd thread kicks off a future, any stdio done
/// by it ends up in the pantsd log as we expect. The latter ensures that when a new workunit
/// is created it has an accurate handle to its parent.
fn future_with_correct_context<F: Future>(future: F) -> impl Future<Output = F::Output> {
let stdio_destination = stdio::get_destination();
let workunit_store_handle = workunit_store::get_workunit_store_handle();
// NB: It is important that the first portion of this method is synchronous (meaning that this
// method cannot be `async`), because that means that it will run on the thread that calls it.
// The second, async portion of the method will run in the spawned Task.
stdio::scope_task_destination(stdio_destination, async move {
workunit_store::scope_task_workunit_store_handle(workunit_store_handle, future).await
})
}
///
/// Executors come in two flavors:
/// * "borrowed"
/// * Created with `Self::new()`, or `self::to_borrowed()`.
/// * A borrowed Executor will not be shut down when all handles are dropped, and shutdown
/// methods will have no impact.
/// * Used when multiple runs of Pants will borrow a single Executor owned by `pantsd`, and in
/// unit tests where the Runtime is created by macros.
/// * "owned"
/// * Created with `Self::new_owned()`.
/// * When all handles of a owned Executor are dropped, its Runtime will be shut down.
/// Additionally, the explicit shutdown methods can be used to shut down the Executor for all
/// clones.
///
#[derive(Debug, Clone)]
pub struct Executor {
runtime: Arc<Mutex<Option<Runtime>>>,
handle: Handle,
}
impl Executor {
///
/// Creates an Executor for an existing tokio::Runtime (generally provided by tokio's macros).
///
/// The returned Executor will have a lifecycle independent of the Runtime, meaning that dropping
/// all clones of the Executor will not cause the Runtime to be shut down. Likewise, the owner of
/// the Runtime must ensure that it is kept alive longer than all Executor instances, because
/// existence of a Handle does not prevent a Runtime from shutting down. This is guaranteed by
/// the scope of the tokio::{test, main} macros.
///
pub fn new() -> Executor {
Self {
runtime: Arc::new(Mutex::new(None)),
handle: Handle::current(),
}
}
///
/// Gets a reference to a global static Executor with an owned tokio::Runtime, initializing it
/// with the given thread configuration if this is the first usage.
///
/// NB: The global static Executor eases lifecycle issues when consumed from Python, where we
/// need thread configurability, but also want to know reliably when the Runtime will shutdown
/// (which, because it is static, will only be at the entire process' exit).
///
pub fn new_owned<F>(
num_worker_threads: usize,
max_threads: usize,
on_thread_start: F,
) -> Result<Executor, String>
where
F: Fn() + Send + Sync + 'static,
{
let mut runtime_builder = Builder::new_multi_thread();
runtime_builder
.worker_threads(num_worker_threads)
.max_blocking_threads(max_threads - num_worker_threads)
.enable_all();
if env::var("PANTS_DEBUG").is_ok() {
runtime_builder.on_thread_start(on_thread_start);
};
let runtime = runtime_builder
.build()
.map_err(|e| format!("Failed to start the runtime: {e}"))?;
let handle = runtime.handle().clone();
Ok(Executor {
runtime: Arc::new(Mutex::new(Some(runtime))),
handle,
})
}
///
/// Creates a clone of this Executor which is disconnected from shutdown events. See the `Executor`
/// rustdoc.
///
pub fn to_borrowed(&self) -> Executor {
Self {
runtime: Arc::new(Mutex::new(None)),
handle: self.handle.clone(),
}
}
///
/// Enter the runtime context associated with this Executor. This should be used in situations
/// where threads not started by the runtime need access to it via task-local variables.
///
pub fn enter<F, R>(&self, f: F) -> R
where
F: FnOnce() -> R,
{
let _context = self.handle.enter();
f()
}
///
/// Run a Future on a tokio Runtime as a new Task, and return a Future handle to it.
///
/// If the background Task exits abnormally, the given closure will be called to recover: usually
/// it should convert the resulting Error to a relevant error type.
///
/// If the returned Future is dropped, the computation will still continue to completion: see
/// <https://docs.rs/tokio/0.2.20/tokio/task/struct.JoinHandle.html>
///
pub fn spawn<O: Send + 'static, F: Future<Output = O> + Send + 'static>(
&self,
future: F,
rescue_join_error: impl FnOnce(JoinError) -> O,
) -> impl Future<Output = O> {
self.native_spawn(future).map(|res| match res {
Ok(o) => o,
Err(e) => rescue_join_error(e),
})
}
///
/// Run a Future on a tokio Runtime as a new Task, and return a JoinHandle.
///
pub fn native_spawn<O: Send + 'static, F: Future<Output = O> + Send + 'static>(
&self,
future: F,
) -> JoinHandle<O> {
self.handle.spawn(future_with_correct_context(future))
}
///
/// Run a Future and return its resolved Result.
///
/// This should never be called from in a Future context, and should only ever be called in
/// something that resembles a main method.
///
/// Even after this method returns, work `spawn`ed into the background may continue to run on the
/// threads owned by this Executor.
///
pub fn block_on<F: Future>(&self, future: F) -> F::Output {
// Make sure to copy our (thread-local) logging destination into the task.
// When a daemon thread kicks off a future, it should log like a daemon thread (and similarly
// for a user-facing thread).
self.handle.block_on(future_with_correct_context(future))
}
///
/// Spawn a Future on a threadpool specifically reserved for I/O tasks which are allowed to be
/// long-running.
///
/// If the background Task exits abnormally, the given closure will be called to recover: usually
/// it should convert the resulting Error to a relevant error type.
///
/// If the returned Future is dropped, the computation will still continue to completion: see
/// <https://docs.rs/tokio/0.2.20/tokio/task/struct.JoinHandle.html>
///
pub fn spawn_blocking<F: FnOnce() -> R + Send + 'static, R: Send + 'static>(
&self,
f: F,
rescue_join_error: impl FnOnce(JoinError) -> R,
) -> impl Future<Output = R> {
self.native_spawn_blocking(f).map(|res| match res {
Ok(o) => o,
Err(e) => rescue_join_error(e),
})
}
///
/// Spawn a Future on threads specifically reserved for I/O tasks which are allowed to be
/// long-running and return a JoinHandle
///
pub fn native_spawn_blocking<F: FnOnce() -> R + Send + 'static, R: Send + 'static>(
&self,
f: F,
) -> JoinHandle<R> {
let stdio_destination = stdio::get_destination();
let workunit_store_handle = workunit_store::get_workunit_store_handle();
// NB: We unwrap here because the only thing that should cause an error in a spawned task is a
// panic, in which case we want to propagate that.
self.handle.spawn_blocking(move || {
stdio::set_thread_destination(stdio_destination);
workunit_store::set_thread_workunit_store_handle(workunit_store_handle);
f()
})
}
/// Return a reference to this executor's runtime handle.
pub fn handle(&self) -> &Handle {
&self.handle
}
///
/// A blocking call to shut down the Runtime associated with this "owned" Executor. If tasks do
/// not shut down within the given timeout, they are leaked.
///
/// This method has no effect for "borrowed" Executors: see the `Executor` rustdoc.
///
pub fn shutdown(&self, timeout: Duration) {
let Some(runtime) = self.runtime.lock().take() else {
return;
};
let start = Instant::now();
runtime.shutdown_timeout(timeout + Duration::from_millis(250));
if start.elapsed() > timeout {
// Leaked tasks could lead to panics in some cases (see #16105), so warn for them.
log::warn!("Executor shutdown took unexpectedly long: tasks were likely leaked!");
}
}
/// Returns true if `shutdown` has been called for this Executor. Always returns true for
/// borrowed Executors.
pub fn is_shutdown(&self) -> bool {
self.runtime.lock().is_none()
}
}
/// Store "tail" tasks which are async tasks that can execute concurrently with regular
/// build actions. Tail tasks block completion of a session until all of them have been
/// completed (subject to a timeout).
#[derive(Clone)]
pub struct TailTasks {
inner: Arc<Mutex<Option<TailTasksInner>>>,
}
struct TailTasksInner {
id_to_name: HashMap<Id, String>,
task_set: JoinSet<()>,
}
impl TailTasks {
pub fn new() -> Self {
Self {
inner: Arc::new(Mutex::new(Some(TailTasksInner {
id_to_name: HashMap::new(),
task_set: JoinSet::new(),
}))),
}
}
/// Spawn a tail task with the given name.
pub fn spawn_on<F>(&self, name: &str, handle: &Handle, task: F)
where
F: Future<Output = ()>,
F: Send + 'static,
{
let task = future_with_correct_context(task);
let mut guard = self.inner.lock();
let inner = match &mut *guard {
Some(inner) => inner,
None => {
log::warn!(
"Session end task `{}` submitted after session completed.",
name
);
return;
}
};
let h = inner.task_set.spawn_on(task, handle);
inner.id_to_name.insert(h.id(), name.to_string());
}
/// Wait for all tail tasks to complete subject to the given timeout. If tasks
/// fail or do not complete, log that fact.
pub async fn wait(self, timeout: Duration) {
let mut inner = match self.inner.lock().take() {
Some(inner) => inner,
None => {
log::debug!("Session end tasks awaited multiple times!");
return;
}
};
if inner.task_set.is_empty() {
return;
}
log::debug!(
"waiting for {} session end task(s) to complete",
inner.task_set.len()
);
let mut timeout = tokio::time::sleep(timeout).boxed();
loop {
tokio::select! {
// Use biased mode to prefer an expired timeout over joining on remaining tasks.
biased;
// Exit monitoring loop if timeout expires.
_ = &mut timeout => break,
next_result = inner.task_set.join_next_with_id() => {
match next_result {
Some(Ok((id, _))) => {
if let Some(name) = inner.id_to_name.get(&id) {
log::trace!("Session end task `{name}` completed successfully");
} else {
log::debug!("Session end task completed successfully but name not found.");
}
inner.id_to_name.remove(&id);
},
Some(Err(err)) => {
let name = inner.id_to_name.get(&err.id());
log::error!("Session end task `{name:?}` failed: {err:?}");
}
None => break,
}
}
}
}
if inner.task_set.is_empty() {
log::debug!("all session end tasks completed successfully");
} else |
}
}
| {
log::debug!(
"{} session end task(s) failed to complete within timeout: {}",
inner.task_set.len(),
inner.id_to_name.values().join(", "),
);
inner.task_set.abort_all();
} | conditional_block |
traphandlers.rs | //! WebAssembly trap handling, which is built on top of the lower-level
//! signalhandling mechanisms.
use crate::VMInterrupts;
use backtrace::Backtrace;
use std::any::Any;
use std::cell::Cell;
use std::error::Error;
use std::ptr;
use std::sync::atomic::{AtomicUsize, Ordering::SeqCst};
use std::sync::Once;
use wasmtime_environ::ir;
pub use self::tls::TlsRestore;
extern "C" {
fn RegisterSetjmp(
jmp_buf: *mut *const u8,
callback: extern "C" fn(*mut u8),
payload: *mut u8,
) -> i32;
fn Unwind(jmp_buf: *const u8) -> !;
}
cfg_if::cfg_if! {
if #[cfg(target_os = "macos")] {
mod macos;
use macos as sys;
} else if #[cfg(unix)] {
mod unix;
use unix as sys;
} else if #[cfg(target_os = "windows")] {
mod windows;
use windows as sys;
}
}
pub use sys::SignalHandler;
/// This function performs the low-overhead platform-specific initialization
/// that we want to do eagerly to ensure a more-deterministic global process
/// state.
///
/// This is especially relevant for signal handlers since handler ordering
/// depends on installation order: the wasm signal handler must run *before*
/// the other crash handlers and since POSIX signal handlers work LIFO, this
/// function needs to be called at the end of the startup process, after other
/// handlers have been installed. This function can thus be called multiple
/// times, having no effect after the first call.
pub fn init_traps() {
static INIT: Once = Once::new();
INIT.call_once(|| unsafe { sys::platform_init() });
}
/// Raises a user-defined trap immediately.
///
/// This function performs as-if a wasm trap was just executed, only the trap
/// has a dynamic payload associated with it which is user-provided. This trap
/// payload is then returned from `catch_traps` below.
///
/// # Safety
///
/// Only safe to call when wasm code is on the stack, aka `catch_traps` must
/// have been previously called. Additionally no Rust destructors can be on the
/// stack. They will be skipped and not executed.
pub unsafe fn raise_user_trap(data: Box<dyn Error + Send + Sync>) -> ! {
tls::with(|info| info.unwrap().unwind_with(UnwindReason::UserTrap(data)))
}
/// Raises a trap from inside library code immediately.
///
/// This function performs as-if a wasm trap was just executed. This trap
/// payload is then returned from `catch_traps` below.
///
/// # Safety
///
/// Only safe to call when wasm code is on the stack, aka `catch_traps` must
/// have been previously called. Additionally no Rust destructors can be on the
/// stack. They will be skipped and not executed.
pub unsafe fn raise_lib_trap(trap: Trap) -> ! {
tls::with(|info| info.unwrap().unwind_with(UnwindReason::LibTrap(trap)))
}
/// Carries a Rust panic across wasm code and resumes the panic on the other
/// side.
///
/// # Safety
///
/// Only safe to call when wasm code is on the stack, aka `catch_traps` must
/// have been previously called. Additionally no Rust destructors can be on the
/// stack. They will be skipped and not executed.
pub unsafe fn resume_panic(payload: Box<dyn Any + Send>) -> ! {
tls::with(|info| info.unwrap().unwind_with(UnwindReason::Panic(payload)))
}
/// Stores trace message with backtrace.
#[derive(Debug)]
pub enum Trap {
/// A user-raised trap through `raise_user_trap`.
User(Box<dyn Error + Send + Sync>),
/// A trap raised from jit code
Jit {
/// The program counter in JIT code where this trap happened.
pc: usize,
/// Native stack backtrace at the time the trap occurred
backtrace: Backtrace,
/// An indicator for whether this may have been a trap generated from an
/// interrupt, used for switching what would otherwise be a stack
/// overflow trap to be an interrupt trap.
maybe_interrupted: bool,
},
/// A trap raised from a wasm libcall
Wasm {
/// Code of the trap.
trap_code: ir::TrapCode,
/// Native stack backtrace at the time the trap occurred
backtrace: Backtrace,
},
/// A trap indicating that the runtime was unable to allocate sufficient memory.
OOM {
/// Native stack backtrace at the time the OOM occurred
backtrace: Backtrace,
},
}
impl Trap {
/// Construct a new Wasm trap with the given source location and trap code.
///
/// Internally saves a backtrace when constructed.
pub fn wasm(trap_code: ir::TrapCode) -> Self {
let backtrace = Backtrace::new_unresolved();
Trap::Wasm {
trap_code,
backtrace,
}
}
/// Construct a new OOM trap with the given source location and trap code.
///
/// Internally saves a backtrace when constructed.
pub fn oom() -> Self {
let backtrace = Backtrace::new_unresolved();
Trap::OOM { backtrace }
}
}
/// Catches any wasm traps that happen within the execution of `closure`,
/// returning them as a `Result`.
///
/// Highly unsafe since `closure` won't have any dtors run.
pub unsafe fn catch_traps<F>(trap_info: &impl TrapInfo, mut closure: F) -> Result<(), Trap>
where
F: FnMut(),
{
sys::lazy_per_thread_init()?;
return CallThreadState::new(trap_info).with(|cx| {
RegisterSetjmp(
cx.jmp_buf.as_ptr(),
call_closure::<F>,
&mut closure as *mut F as *mut u8,
)
});
extern "C" fn call_closure<F>(payload: *mut u8)
where
F: FnMut(),
{
unsafe { (*(payload as *mut F))() }
}
}
/// Runs `func` with the last `trap_info` object registered by `catch_traps`.
///
/// Calls `func` with `None` if `catch_traps` wasn't previously called from this
/// stack frame.
pub fn with_last_info<R>(func: impl FnOnce(Option<&dyn Any>) -> R) -> R {
tls::with(|state| func(state.map(|s| s.trap_info.as_any())))
}
/// Invokes the contextually-defined context's out-of-gas function.
///
/// (basically delegates to `wasmtime::Store::out_of_gas`)
pub fn out_of_gas() {
tls::with(|state| state.unwrap().trap_info.out_of_gas())
}
/// Temporary state stored on the stack which is registered in the `tls` module
/// below for calls into wasm.
pub struct CallThreadState<'a> {
unwind: Cell<UnwindReason>,
jmp_buf: Cell<*const u8>,
handling_trap: Cell<bool>,
trap_info: &'a (dyn TrapInfo + 'a),
prev: Cell<tls::Ptr>,
}
/// A package of functionality needed by `catch_traps` to figure out what to do
/// when handling a trap.
///
/// Note that this is an `unsafe` trait at least because it's being run in the
/// context of a synchronous signal handler, so it needs to be careful to not
/// access too much state in answering these queries.
pub unsafe trait TrapInfo {
/// Converts this object into an `Any` to dynamically check its type.
fn as_any(&self) -> &dyn Any;
/// Returns whether the given program counter lies within wasm code,
/// indicating whether we should handle a trap or not.
fn is_wasm_trap(&self, pc: usize) -> bool;
/// Uses `call` to call a custom signal handler, if one is specified.
///
/// Returns `true` if `call` returns true, otherwise returns `false`.
fn custom_signal_handler(&self, call: &dyn Fn(&SignalHandler) -> bool) -> bool;
/// Returns the maximum size, in bytes, the wasm native stack is allowed to
/// grow to.
fn max_wasm_stack(&self) -> usize;
/// Callback invoked whenever WebAssembly has entirely consumed the fuel
/// that it was allotted.
///
/// This function may return, and it may also `raise_lib_trap`.
fn out_of_gas(&self);
/// Returns the VM interrupts to use for interrupting Wasm code.
fn interrupts(&self) -> &VMInterrupts;
}
enum UnwindReason {
None,
Panic(Box<dyn Any + Send>),
UserTrap(Box<dyn Error + Send + Sync>),
LibTrap(Trap),
JitTrap { backtrace: Backtrace, pc: usize },
}
impl<'a> CallThreadState<'a> {
fn new(trap_info: &'a (dyn TrapInfo + 'a)) -> CallThreadState<'a> {
CallThreadState {
unwind: Cell::new(UnwindReason::None),
jmp_buf: Cell::new(ptr::null()),
handling_trap: Cell::new(false),
trap_info,
prev: Cell::new(ptr::null()),
}
}
fn with(self, closure: impl FnOnce(&CallThreadState) -> i32) -> Result<(), Trap> |
/// Checks and/or initializes the wasm native call stack limit.
///
/// This function will inspect the current state of the stack and calling
/// context to determine which of three buckets we're in:
///
/// 1. We are the first wasm call on the stack. This means that we need to
/// set up a stack limit where beyond which if the native wasm stack
/// pointer goes beyond forces a trap. For now we simply reserve an
/// arbitrary chunk of bytes (1 MB from roughly the current native stack
/// pointer). This logic will likely get tweaked over time.
///
/// 2. We aren't the first wasm call on the stack. In this scenario the wasm
/// stack limit is already configured. This case of wasm -> host -> wasm
/// we assume that the native stack consumed by the host is accounted for
/// in the initial stack limit calculation. That means that in this
/// scenario we do nothing.
///
/// 3. We were previously interrupted. In this case we consume the interrupt
/// here and return a trap, clearing the interrupt and allowing the next
/// wasm call to proceed.
///
/// The return value here is a trap for case 3, a noop destructor in case 2,
/// and a meaningful destructor in case 1
///
/// For more information about interrupts and stack limits see
/// `crates/environ/src/cranelift.rs`.
///
/// Note that this function must be called with `self` on the stack, not the
/// heap/etc.
fn update_stack_limit(&self) -> Result<impl Drop + '_, Trap> {
// Determine the stack pointer where, after which, any wasm code will
// immediately trap. This is checked on the entry to all wasm functions.
//
// Note that this isn't 100% precise. We are requested to give wasm
// `max_wasm_stack` bytes, but what we're actually doing is giving wasm
// probably a little less than `max_wasm_stack` because we're
// calculating the limit relative to this function's approximate stack
// pointer. Wasm will be executed on a frame beneath this one (or next
// to it). In any case it's expected to be at most a few hundred bytes
// of slop one way or another. When wasm is typically given a MB or so
// (a million bytes) the slop shouldn't matter too much.
let wasm_stack_limit = psm::stack_pointer() as usize - self.trap_info.max_wasm_stack();
let interrupts = self.trap_info.interrupts();
let reset_stack_limit = match interrupts.stack_limit.compare_exchange(
usize::max_value(),
wasm_stack_limit,
SeqCst,
SeqCst,
) {
Ok(_) => {
// We're the first wasm on the stack so we've now reserved the
// `max_wasm_stack` bytes of native stack space for wasm.
// Nothing left to do here now except reset back when we're
// done.
true
}
Err(n) if n == wasmtime_environ::INTERRUPTED => {
// This means that an interrupt happened before we actually
// called this function, which means that we're now
// considered interrupted. Be sure to consume this interrupt
// as part of this process too.
interrupts.stack_limit.store(usize::max_value(), SeqCst);
return Err(Trap::Wasm {
trap_code: ir::TrapCode::Interrupt,
backtrace: Backtrace::new_unresolved(),
});
}
Err(_) => {
// The stack limit was previously set by a previous wasm
// call on the stack. We leave the original stack limit for
// wasm in place in that case, and don't reset the stack
// limit when we're done.
false
}
};
struct Reset<'a>(bool, &'a AtomicUsize);
impl Drop for Reset<'_> {
fn drop(&mut self) {
if self.0 {
self.1.store(usize::max_value(), SeqCst);
}
}
}
Ok(Reset(reset_stack_limit, &interrupts.stack_limit))
}
fn unwind_with(&self, reason: UnwindReason) -> ! {
self.unwind.replace(reason);
unsafe {
Unwind(self.jmp_buf.get());
}
}
/// Trap handler using our thread-local state.
///
/// * `pc` - the program counter the trap happened at
/// * `call_handler` - a closure used to invoke the platform-specific
/// signal handler for each instance, if available.
///
/// Attempts to handle the trap if it's a wasm trap. Returns a few
/// different things:
///
/// * null - the trap didn't look like a wasm trap and should continue as a
/// trap
/// * 1 as a pointer - the trap was handled by a custom trap handler on an
/// instance, and the trap handler should quickly return.
/// * a different pointer - a jmp_buf buffer to longjmp to, meaning that
/// the wasm trap was succesfully handled.
fn jmp_buf_if_trap(
&self,
pc: *const u8,
call_handler: impl Fn(&SignalHandler) -> bool,
) -> *const u8 {
// If we hit a fault while handling a previous trap, that's quite bad,
// so bail out and let the system handle this recursive segfault.
//
// Otherwise flag ourselves as handling a trap, do the trap handling,
// and reset our trap handling flag.
if self.handling_trap.replace(true) {
return ptr::null();
}
let _reset = ResetCell(&self.handling_trap, false);
// If we haven't even started to handle traps yet, bail out.
if self.jmp_buf.get().is_null() {
return ptr::null();
}
// First up see if any instance registered has a custom trap handler,
// in which case run them all. If anything handles the trap then we
// return that the trap was handled.
if self.trap_info.custom_signal_handler(&call_handler) {
return 1 as *const _;
}
// If this fault wasn't in wasm code, then it's not our problem
if !self.trap_info.is_wasm_trap(pc as usize) {
return ptr::null();
}
// If all that passed then this is indeed a wasm trap, so return the
// `jmp_buf` passed to `Unwind` to resume.
self.jmp_buf.get()
}
fn capture_backtrace(&self, pc: *const u8) {
let backtrace = Backtrace::new_unresolved();
self.unwind.replace(UnwindReason::JitTrap {
backtrace,
pc: pc as usize,
});
}
}
struct ResetCell<'a, T: Copy>(&'a Cell<T>, T);
impl<T: Copy> Drop for ResetCell<'_, T> {
fn drop(&mut self) {
self.0.set(self.1);
}
}
// A private inner module for managing the TLS state that we require across
// calls in wasm. The WebAssembly code is called from C++ and then a trap may
// happen which requires us to read some contextual state to figure out what to
// do with the trap. This `tls` module is used to persist that information from
// the caller to the trap site.
mod tls {
use super::CallThreadState;
use std::mem;
use std::ptr;
pub use raw::Ptr;
// An even *more* inner module for dealing with TLS. This actually has the
// thread local variable and has functions to access the variable.
//
// Note that this is specially done to fully encapsulate that the accessors
// for tls must not be inlined. Wasmtime's async support employs stack
// switching which can resume execution on different OS threads. This means
// that borrows of our TLS pointer must never live across accesses because
// otherwise the access may be split across two threads and cause unsafety.
//
// This also means that extra care is taken by the runtime to save/restore
// these TLS values when the runtime may have crossed threads.
mod raw {
use super::CallThreadState;
use std::cell::Cell;
use std::ptr;
pub type Ptr = *const CallThreadState<'static>;
thread_local!(static PTR: Cell<Ptr> = Cell::new(ptr::null()));
#[inline(never)] // see module docs for why this is here
pub fn replace(val: Ptr) -> Ptr {
// Mark the current thread as handling interrupts for this specific
// CallThreadState: may clobber the previous entry.
super::super::sys::register_tls(val);
PTR.with(|p| p.replace(val))
}
#[inline(never)] // see module docs for why this is here
pub fn get() -> Ptr {
PTR.with(|p| p.get())
}
}
/// Opaque state used to help control TLS state across stack switches for
/// async support.
pub struct TlsRestore(raw::Ptr);
impl TlsRestore {
/// Takes the TLS state that is currently configured and returns a
/// token that is used to replace it later.
///
/// This is not a safe operation since it's intended to only be used
/// with stack switching found with fibers and async wasmtime.
pub unsafe fn take() -> TlsRestore {
// Our tls pointer must be set at this time, and it must not be
// null. We need to restore the previous pointer since we're
// removing ourselves from the call-stack, and in the process we
// null out our own previous field for safety in case it's
// accidentally used later.
let raw = raw::get();
assert!(!raw.is_null());
let prev = (*raw).prev.replace(ptr::null());
raw::replace(prev);
TlsRestore(raw)
}
/// Restores a previous tls state back into this thread's TLS.
///
/// This is unsafe because it's intended to only be used within the
/// context of stack switching within wasmtime.
pub unsafe fn replace(self) -> Result<(), super::Trap> {
// When replacing to the previous value of TLS, we might have
// crossed a thread: make sure the trap-handling lazy initializer
// runs.
super::sys::lazy_per_thread_init()?;
// We need to configure our previous TLS pointer to whatever is in
// TLS at this time, and then we set the current state to ourselves.
let prev = raw::get();
assert!((*self.0).prev.get().is_null());
(*self.0).prev.set(prev);
raw::replace(self.0);
Ok(())
}
}
/// Configures thread local state such that for the duration of the
/// execution of `closure` any call to `with` will yield `ptr`, unless this
/// is recursively called again.
pub fn set<R>(state: &CallThreadState<'_>, closure: impl FnOnce() -> R) -> R {
struct Reset<'a, 'b>(&'a CallThreadState<'b>);
impl Drop for Reset<'_, '_> {
fn drop(&mut self) {
raw::replace(self.0.prev.replace(ptr::null()));
}
}
// Note that this extension of the lifetime to `'static` should be
// safe because we only ever access it below with an anonymous
// lifetime, meaning `'static` never leaks out of this module.
let ptr = unsafe {
mem::transmute::<*const CallThreadState<'_>, *const CallThreadState<'static>>(state)
};
let prev = raw::replace(ptr);
state.prev.set(prev);
let _reset = Reset(state);
closure()
}
/// Returns the last pointer configured with `set` above. Panics if `set`
/// has not been previously called.
pub fn with<R>(closure: impl FnOnce(Option<&CallThreadState<'_>>) -> R) -> R {
let p = raw::get();
unsafe { closure(if p.is_null() { None } else { Some(&*p) }) }
}
}
| {
let _reset = self.update_stack_limit()?;
let ret = tls::set(&self, || closure(&self));
match self.unwind.replace(UnwindReason::None) {
UnwindReason::None => {
debug_assert_eq!(ret, 1);
Ok(())
}
UnwindReason::UserTrap(data) => {
debug_assert_eq!(ret, 0);
Err(Trap::User(data))
}
UnwindReason::LibTrap(trap) => Err(trap),
UnwindReason::JitTrap { backtrace, pc } => {
debug_assert_eq!(ret, 0);
let interrupts = self.trap_info.interrupts();
let maybe_interrupted =
interrupts.stack_limit.load(SeqCst) == wasmtime_environ::INTERRUPTED;
Err(Trap::Jit {
pc,
backtrace,
maybe_interrupted,
})
}
UnwindReason::Panic(panic) => {
debug_assert_eq!(ret, 0);
std::panic::resume_unwind(panic)
}
}
} | identifier_body |
traphandlers.rs | //! WebAssembly trap handling, which is built on top of the lower-level
//! signalhandling mechanisms.
use crate::VMInterrupts;
use backtrace::Backtrace;
use std::any::Any;
use std::cell::Cell;
use std::error::Error;
use std::ptr;
use std::sync::atomic::{AtomicUsize, Ordering::SeqCst};
use std::sync::Once;
use wasmtime_environ::ir;
pub use self::tls::TlsRestore;
extern "C" {
fn RegisterSetjmp(
jmp_buf: *mut *const u8,
callback: extern "C" fn(*mut u8),
payload: *mut u8,
) -> i32;
fn Unwind(jmp_buf: *const u8) -> !;
}
cfg_if::cfg_if! {
if #[cfg(target_os = "macos")] {
mod macos;
use macos as sys;
} else if #[cfg(unix)] {
mod unix;
use unix as sys;
} else if #[cfg(target_os = "windows")] {
mod windows;
use windows as sys;
}
}
pub use sys::SignalHandler;
/// This function performs the low-overhead platform-specific initialization
/// that we want to do eagerly to ensure a more-deterministic global process
/// state.
///
/// This is especially relevant for signal handlers since handler ordering
/// depends on installation order: the wasm signal handler must run *before*
/// the other crash handlers and since POSIX signal handlers work LIFO, this
/// function needs to be called at the end of the startup process, after other
/// handlers have been installed. This function can thus be called multiple
/// times, having no effect after the first call.
pub fn init_traps() {
static INIT: Once = Once::new();
INIT.call_once(|| unsafe { sys::platform_init() });
}
/// Raises a user-defined trap immediately.
///
/// This function performs as-if a wasm trap was just executed, only the trap
/// has a dynamic payload associated with it which is user-provided. This trap
/// payload is then returned from `catch_traps` below.
///
/// # Safety
///
/// Only safe to call when wasm code is on the stack, aka `catch_traps` must
/// have been previously called. Additionally no Rust destructors can be on the
/// stack. They will be skipped and not executed.
pub unsafe fn raise_user_trap(data: Box<dyn Error + Send + Sync>) -> ! {
tls::with(|info| info.unwrap().unwind_with(UnwindReason::UserTrap(data)))
}
/// Raises a trap from inside library code immediately.
///
/// This function performs as-if a wasm trap was just executed. This trap
/// payload is then returned from `catch_traps` below.
///
/// # Safety
///
/// Only safe to call when wasm code is on the stack, aka `catch_traps` must
/// have been previously called. Additionally no Rust destructors can be on the
/// stack. They will be skipped and not executed.
pub unsafe fn raise_lib_trap(trap: Trap) -> ! {
tls::with(|info| info.unwrap().unwind_with(UnwindReason::LibTrap(trap)))
}
/// Carries a Rust panic across wasm code and resumes the panic on the other
/// side.
///
/// # Safety
///
/// Only safe to call when wasm code is on the stack, aka `catch_traps` must
/// have been previously called. Additionally no Rust destructors can be on the
/// stack. They will be skipped and not executed.
pub unsafe fn resume_panic(payload: Box<dyn Any + Send>) -> ! {
tls::with(|info| info.unwrap().unwind_with(UnwindReason::Panic(payload)))
}
/// Stores trace message with backtrace.
#[derive(Debug)]
pub enum Trap {
/// A user-raised trap through `raise_user_trap`.
User(Box<dyn Error + Send + Sync>),
/// A trap raised from jit code
Jit {
/// The program counter in JIT code where this trap happened.
pc: usize,
/// Native stack backtrace at the time the trap occurred
backtrace: Backtrace,
/// An indicator for whether this may have been a trap generated from an
/// interrupt, used for switching what would otherwise be a stack
/// overflow trap to be an interrupt trap.
maybe_interrupted: bool,
},
/// A trap raised from a wasm libcall
Wasm {
/// Code of the trap.
trap_code: ir::TrapCode,
/// Native stack backtrace at the time the trap occurred
backtrace: Backtrace,
},
/// A trap indicating that the runtime was unable to allocate sufficient memory.
OOM {
/// Native stack backtrace at the time the OOM occurred
backtrace: Backtrace,
},
}
impl Trap {
/// Construct a new Wasm trap with the given source location and trap code.
///
/// Internally saves a backtrace when constructed.
pub fn wasm(trap_code: ir::TrapCode) -> Self {
let backtrace = Backtrace::new_unresolved();
Trap::Wasm {
trap_code,
backtrace,
}
}
/// Construct a new OOM trap with the given source location and trap code.
///
/// Internally saves a backtrace when constructed.
pub fn oom() -> Self {
let backtrace = Backtrace::new_unresolved();
Trap::OOM { backtrace }
}
}
/// Catches any wasm traps that happen within the execution of `closure`,
/// returning them as a `Result`.
///
/// Highly unsafe since `closure` won't have any dtors run.
pub unsafe fn catch_traps<F>(trap_info: &impl TrapInfo, mut closure: F) -> Result<(), Trap>
where
F: FnMut(),
{
sys::lazy_per_thread_init()?;
return CallThreadState::new(trap_info).with(|cx| {
RegisterSetjmp(
cx.jmp_buf.as_ptr(),
call_closure::<F>,
&mut closure as *mut F as *mut u8,
)
});
extern "C" fn call_closure<F>(payload: *mut u8)
where
F: FnMut(),
{
unsafe { (*(payload as *mut F))() }
}
}
/// Runs `func` with the last `trap_info` object registered by `catch_traps`.
///
/// Calls `func` with `None` if `catch_traps` wasn't previously called from this
/// stack frame.
pub fn with_last_info<R>(func: impl FnOnce(Option<&dyn Any>) -> R) -> R {
tls::with(|state| func(state.map(|s| s.trap_info.as_any())))
}
/// Invokes the contextually-defined context's out-of-gas function.
///
/// (basically delegates to `wasmtime::Store::out_of_gas`)
pub fn out_of_gas() {
tls::with(|state| state.unwrap().trap_info.out_of_gas())
}
/// Temporary state stored on the stack which is registered in the `tls` module
/// below for calls into wasm.
pub struct CallThreadState<'a> {
unwind: Cell<UnwindReason>,
jmp_buf: Cell<*const u8>,
handling_trap: Cell<bool>,
trap_info: &'a (dyn TrapInfo + 'a),
prev: Cell<tls::Ptr>,
}
/// A package of functionality needed by `catch_traps` to figure out what to do
/// when handling a trap.
///
/// Note that this is an `unsafe` trait at least because it's being run in the
/// context of a synchronous signal handler, so it needs to be careful to not
/// access too much state in answering these queries.
pub unsafe trait TrapInfo {
/// Converts this object into an `Any` to dynamically check its type.
fn as_any(&self) -> &dyn Any;
/// Returns whether the given program counter lies within wasm code,
/// indicating whether we should handle a trap or not.
fn is_wasm_trap(&self, pc: usize) -> bool;
/// Uses `call` to call a custom signal handler, if one is specified.
///
/// Returns `true` if `call` returns true, otherwise returns `false`.
fn custom_signal_handler(&self, call: &dyn Fn(&SignalHandler) -> bool) -> bool;
/// Returns the maximum size, in bytes, the wasm native stack is allowed to
/// grow to.
fn max_wasm_stack(&self) -> usize;
/// Callback invoked whenever WebAssembly has entirely consumed the fuel
/// that it was allotted.
///
/// This function may return, and it may also `raise_lib_trap`.
fn out_of_gas(&self);
/// Returns the VM interrupts to use for interrupting Wasm code.
fn interrupts(&self) -> &VMInterrupts;
}
enum UnwindReason {
None,
Panic(Box<dyn Any + Send>),
UserTrap(Box<dyn Error + Send + Sync>),
LibTrap(Trap),
JitTrap { backtrace: Backtrace, pc: usize },
}
impl<'a> CallThreadState<'a> {
fn new(trap_info: &'a (dyn TrapInfo + 'a)) -> CallThreadState<'a> {
CallThreadState {
unwind: Cell::new(UnwindReason::None),
jmp_buf: Cell::new(ptr::null()),
handling_trap: Cell::new(false),
trap_info,
prev: Cell::new(ptr::null()),
}
}
fn with(self, closure: impl FnOnce(&CallThreadState) -> i32) -> Result<(), Trap> {
let _reset = self.update_stack_limit()?;
let ret = tls::set(&self, || closure(&self));
match self.unwind.replace(UnwindReason::None) {
UnwindReason::None => {
debug_assert_eq!(ret, 1);
Ok(())
}
UnwindReason::UserTrap(data) => {
debug_assert_eq!(ret, 0);
Err(Trap::User(data))
}
UnwindReason::LibTrap(trap) => Err(trap),
UnwindReason::JitTrap { backtrace, pc } => {
debug_assert_eq!(ret, 0);
let interrupts = self.trap_info.interrupts();
let maybe_interrupted =
interrupts.stack_limit.load(SeqCst) == wasmtime_environ::INTERRUPTED;
Err(Trap::Jit {
pc,
backtrace,
maybe_interrupted,
})
}
UnwindReason::Panic(panic) => {
debug_assert_eq!(ret, 0);
std::panic::resume_unwind(panic)
}
}
}
/// Checks and/or initializes the wasm native call stack limit.
///
/// This function will inspect the current state of the stack and calling
/// context to determine which of three buckets we're in:
///
/// 1. We are the first wasm call on the stack. This means that we need to
/// set up a stack limit where beyond which if the native wasm stack
/// pointer goes beyond forces a trap. For now we simply reserve an
/// arbitrary chunk of bytes (1 MB from roughly the current native stack
/// pointer). This logic will likely get tweaked over time.
///
/// 2. We aren't the first wasm call on the stack. In this scenario the wasm
/// stack limit is already configured. This case of wasm -> host -> wasm
/// we assume that the native stack consumed by the host is accounted for
/// in the initial stack limit calculation. That means that in this
/// scenario we do nothing.
///
/// 3. We were previously interrupted. In this case we consume the interrupt
/// here and return a trap, clearing the interrupt and allowing the next
/// wasm call to proceed.
///
/// The return value here is a trap for case 3, a noop destructor in case 2,
/// and a meaningful destructor in case 1
///
/// For more information about interrupts and stack limits see
/// `crates/environ/src/cranelift.rs`.
///
/// Note that this function must be called with `self` on the stack, not the
/// heap/etc.
fn update_stack_limit(&self) -> Result<impl Drop + '_, Trap> {
// Determine the stack pointer where, after which, any wasm code will
// immediately trap. This is checked on the entry to all wasm functions.
//
// Note that this isn't 100% precise. We are requested to give wasm
// `max_wasm_stack` bytes, but what we're actually doing is giving wasm
// probably a little less than `max_wasm_stack` because we're
// calculating the limit relative to this function's approximate stack
// pointer. Wasm will be executed on a frame beneath this one (or next
// to it). In any case it's expected to be at most a few hundred bytes
// of slop one way or another. When wasm is typically given a MB or so
// (a million bytes) the slop shouldn't matter too much.
let wasm_stack_limit = psm::stack_pointer() as usize - self.trap_info.max_wasm_stack();
let interrupts = self.trap_info.interrupts();
let reset_stack_limit = match interrupts.stack_limit.compare_exchange(
usize::max_value(),
wasm_stack_limit,
SeqCst,
SeqCst,
) {
Ok(_) => {
// We're the first wasm on the stack so we've now reserved the
// `max_wasm_stack` bytes of native stack space for wasm.
// Nothing left to do here now except reset back when we're
// done.
true
}
Err(n) if n == wasmtime_environ::INTERRUPTED => {
// This means that an interrupt happened before we actually
// called this function, which means that we're now
// considered interrupted. Be sure to consume this interrupt
// as part of this process too.
interrupts.stack_limit.store(usize::max_value(), SeqCst);
return Err(Trap::Wasm {
trap_code: ir::TrapCode::Interrupt,
backtrace: Backtrace::new_unresolved(),
});
}
Err(_) => {
// The stack limit was previously set by a previous wasm
// call on the stack. We leave the original stack limit for
// wasm in place in that case, and don't reset the stack
// limit when we're done.
false
}
};
struct Reset<'a>(bool, &'a AtomicUsize);
impl Drop for Reset<'_> {
fn drop(&mut self) {
if self.0 {
self.1.store(usize::max_value(), SeqCst);
}
}
}
Ok(Reset(reset_stack_limit, &interrupts.stack_limit))
}
fn unwind_with(&self, reason: UnwindReason) -> ! {
self.unwind.replace(reason);
unsafe {
Unwind(self.jmp_buf.get());
}
}
/// Trap handler using our thread-local state.
///
/// * `pc` - the program counter the trap happened at
/// * `call_handler` - a closure used to invoke the platform-specific
/// signal handler for each instance, if available.
///
/// Attempts to handle the trap if it's a wasm trap. Returns a few
/// different things:
///
/// * null - the trap didn't look like a wasm trap and should continue as a
/// trap
/// * 1 as a pointer - the trap was handled by a custom trap handler on an
/// instance, and the trap handler should quickly return.
/// * a different pointer - a jmp_buf buffer to longjmp to, meaning that
/// the wasm trap was succesfully handled.
fn jmp_buf_if_trap(
&self,
pc: *const u8,
call_handler: impl Fn(&SignalHandler) -> bool,
) -> *const u8 {
// If we hit a fault while handling a previous trap, that's quite bad,
// so bail out and let the system handle this recursive segfault.
//
// Otherwise flag ourselves as handling a trap, do the trap handling,
// and reset our trap handling flag.
if self.handling_trap.replace(true) {
return ptr::null();
}
let _reset = ResetCell(&self.handling_trap, false);
// If we haven't even started to handle traps yet, bail out.
if self.jmp_buf.get().is_null() {
return ptr::null();
}
// First up see if any instance registered has a custom trap handler,
// in which case run them all. If anything handles the trap then we
// return that the trap was handled.
if self.trap_info.custom_signal_handler(&call_handler) {
return 1 as *const _;
}
// If this fault wasn't in wasm code, then it's not our problem
if !self.trap_info.is_wasm_trap(pc as usize) {
return ptr::null();
}
// If all that passed then this is indeed a wasm trap, so return the
// `jmp_buf` passed to `Unwind` to resume.
self.jmp_buf.get()
}
fn capture_backtrace(&self, pc: *const u8) {
let backtrace = Backtrace::new_unresolved();
self.unwind.replace(UnwindReason::JitTrap {
backtrace,
pc: pc as usize,
});
}
}
struct ResetCell<'a, T: Copy>(&'a Cell<T>, T);
impl<T: Copy> Drop for ResetCell<'_, T> {
fn drop(&mut self) {
self.0.set(self.1);
}
}
// A private inner module for managing the TLS state that we require across
// calls in wasm. The WebAssembly code is called from C++ and then a trap may
// happen which requires us to read some contextual state to figure out what to
// do with the trap. This `tls` module is used to persist that information from
// the caller to the trap site.
mod tls {
use super::CallThreadState;
use std::mem;
use std::ptr;
pub use raw::Ptr;
// An even *more* inner module for dealing with TLS. This actually has the
// thread local variable and has functions to access the variable.
//
// Note that this is specially done to fully encapsulate that the accessors
// for tls must not be inlined. Wasmtime's async support employs stack
// switching which can resume execution on different OS threads. This means
// that borrows of our TLS pointer must never live across accesses because
// otherwise the access may be split across two threads and cause unsafety.
//
// This also means that extra care is taken by the runtime to save/restore
// these TLS values when the runtime may have crossed threads.
mod raw {
use super::CallThreadState;
use std::cell::Cell;
use std::ptr;
pub type Ptr = *const CallThreadState<'static>;
thread_local!(static PTR: Cell<Ptr> = Cell::new(ptr::null()));
#[inline(never)] // see module docs for why this is here
pub fn | (val: Ptr) -> Ptr {
// Mark the current thread as handling interrupts for this specific
// CallThreadState: may clobber the previous entry.
super::super::sys::register_tls(val);
PTR.with(|p| p.replace(val))
}
#[inline(never)] // see module docs for why this is here
pub fn get() -> Ptr {
PTR.with(|p| p.get())
}
}
/// Opaque state used to help control TLS state across stack switches for
/// async support.
pub struct TlsRestore(raw::Ptr);
impl TlsRestore {
/// Takes the TLS state that is currently configured and returns a
/// token that is used to replace it later.
///
/// This is not a safe operation since it's intended to only be used
/// with stack switching found with fibers and async wasmtime.
pub unsafe fn take() -> TlsRestore {
// Our tls pointer must be set at this time, and it must not be
// null. We need to restore the previous pointer since we're
// removing ourselves from the call-stack, and in the process we
// null out our own previous field for safety in case it's
// accidentally used later.
let raw = raw::get();
assert!(!raw.is_null());
let prev = (*raw).prev.replace(ptr::null());
raw::replace(prev);
TlsRestore(raw)
}
/// Restores a previous tls state back into this thread's TLS.
///
/// This is unsafe because it's intended to only be used within the
/// context of stack switching within wasmtime.
pub unsafe fn replace(self) -> Result<(), super::Trap> {
// When replacing to the previous value of TLS, we might have
// crossed a thread: make sure the trap-handling lazy initializer
// runs.
super::sys::lazy_per_thread_init()?;
// We need to configure our previous TLS pointer to whatever is in
// TLS at this time, and then we set the current state to ourselves.
let prev = raw::get();
assert!((*self.0).prev.get().is_null());
(*self.0).prev.set(prev);
raw::replace(self.0);
Ok(())
}
}
/// Configures thread local state such that for the duration of the
/// execution of `closure` any call to `with` will yield `ptr`, unless this
/// is recursively called again.
pub fn set<R>(state: &CallThreadState<'_>, closure: impl FnOnce() -> R) -> R {
struct Reset<'a, 'b>(&'a CallThreadState<'b>);
impl Drop for Reset<'_, '_> {
fn drop(&mut self) {
raw::replace(self.0.prev.replace(ptr::null()));
}
}
// Note that this extension of the lifetime to `'static` should be
// safe because we only ever access it below with an anonymous
// lifetime, meaning `'static` never leaks out of this module.
let ptr = unsafe {
mem::transmute::<*const CallThreadState<'_>, *const CallThreadState<'static>>(state)
};
let prev = raw::replace(ptr);
state.prev.set(prev);
let _reset = Reset(state);
closure()
}
/// Returns the last pointer configured with `set` above. Panics if `set`
/// has not been previously called.
pub fn with<R>(closure: impl FnOnce(Option<&CallThreadState<'_>>) -> R) -> R {
let p = raw::get();
unsafe { closure(if p.is_null() { None } else { Some(&*p) }) }
}
}
| replace | identifier_name |
traphandlers.rs | //! WebAssembly trap handling, which is built on top of the lower-level
//! signalhandling mechanisms.
use crate::VMInterrupts;
use backtrace::Backtrace;
use std::any::Any;
use std::cell::Cell;
use std::error::Error;
use std::ptr;
use std::sync::atomic::{AtomicUsize, Ordering::SeqCst};
use std::sync::Once;
use wasmtime_environ::ir;
pub use self::tls::TlsRestore;
extern "C" {
fn RegisterSetjmp(
jmp_buf: *mut *const u8,
callback: extern "C" fn(*mut u8),
payload: *mut u8,
) -> i32;
fn Unwind(jmp_buf: *const u8) -> !;
}
cfg_if::cfg_if! {
if #[cfg(target_os = "macos")] {
mod macos;
use macos as sys;
} else if #[cfg(unix)] {
mod unix;
use unix as sys;
} else if #[cfg(target_os = "windows")] {
mod windows;
use windows as sys;
}
}
pub use sys::SignalHandler;
/// This function performs the low-overhead platform-specific initialization
/// that we want to do eagerly to ensure a more-deterministic global process
/// state.
///
/// This is especially relevant for signal handlers since handler ordering
/// depends on installation order: the wasm signal handler must run *before*
/// the other crash handlers and since POSIX signal handlers work LIFO, this
/// function needs to be called at the end of the startup process, after other
/// handlers have been installed. This function can thus be called multiple
/// times, having no effect after the first call.
pub fn init_traps() {
static INIT: Once = Once::new();
INIT.call_once(|| unsafe { sys::platform_init() });
}
/// Raises a user-defined trap immediately.
///
/// This function performs as-if a wasm trap was just executed, only the trap
/// has a dynamic payload associated with it which is user-provided. This trap
/// payload is then returned from `catch_traps` below.
///
/// # Safety
///
/// Only safe to call when wasm code is on the stack, aka `catch_traps` must
/// have been previously called. Additionally no Rust destructors can be on the
/// stack. They will be skipped and not executed.
pub unsafe fn raise_user_trap(data: Box<dyn Error + Send + Sync>) -> ! {
tls::with(|info| info.unwrap().unwind_with(UnwindReason::UserTrap(data)))
}
/// Raises a trap from inside library code immediately.
///
/// This function performs as-if a wasm trap was just executed. This trap
/// payload is then returned from `catch_traps` below.
///
/// # Safety
///
/// Only safe to call when wasm code is on the stack, aka `catch_traps` must
/// have been previously called. Additionally no Rust destructors can be on the
/// stack. They will be skipped and not executed.
pub unsafe fn raise_lib_trap(trap: Trap) -> ! {
tls::with(|info| info.unwrap().unwind_with(UnwindReason::LibTrap(trap)))
}
/// Carries a Rust panic across wasm code and resumes the panic on the other
/// side.
///
/// # Safety
///
/// Only safe to call when wasm code is on the stack, aka `catch_traps` must
/// have been previously called. Additionally no Rust destructors can be on the
/// stack. They will be skipped and not executed.
pub unsafe fn resume_panic(payload: Box<dyn Any + Send>) -> ! {
tls::with(|info| info.unwrap().unwind_with(UnwindReason::Panic(payload)))
}
/// Stores trace message with backtrace.
#[derive(Debug)]
pub enum Trap {
/// A user-raised trap through `raise_user_trap`.
User(Box<dyn Error + Send + Sync>),
/// A trap raised from jit code
Jit {
/// The program counter in JIT code where this trap happened.
pc: usize,
/// Native stack backtrace at the time the trap occurred
backtrace: Backtrace,
/// An indicator for whether this may have been a trap generated from an
/// interrupt, used for switching what would otherwise be a stack
/// overflow trap to be an interrupt trap.
maybe_interrupted: bool,
},
/// A trap raised from a wasm libcall
Wasm {
/// Code of the trap.
trap_code: ir::TrapCode,
/// Native stack backtrace at the time the trap occurred
backtrace: Backtrace,
},
/// A trap indicating that the runtime was unable to allocate sufficient memory.
OOM {
/// Native stack backtrace at the time the OOM occurred
backtrace: Backtrace,
},
}
impl Trap {
/// Construct a new Wasm trap with the given source location and trap code.
///
/// Internally saves a backtrace when constructed.
pub fn wasm(trap_code: ir::TrapCode) -> Self {
let backtrace = Backtrace::new_unresolved();
Trap::Wasm {
trap_code,
backtrace,
}
}
/// Construct a new OOM trap with the given source location and trap code.
///
/// Internally saves a backtrace when constructed.
pub fn oom() -> Self {
let backtrace = Backtrace::new_unresolved();
Trap::OOM { backtrace }
}
}
/// Catches any wasm traps that happen within the execution of `closure`,
/// returning them as a `Result`.
///
/// Highly unsafe since `closure` won't have any dtors run.
pub unsafe fn catch_traps<F>(trap_info: &impl TrapInfo, mut closure: F) -> Result<(), Trap>
where
F: FnMut(),
{
sys::lazy_per_thread_init()?;
return CallThreadState::new(trap_info).with(|cx| {
RegisterSetjmp(
cx.jmp_buf.as_ptr(),
call_closure::<F>,
&mut closure as *mut F as *mut u8,
)
});
extern "C" fn call_closure<F>(payload: *mut u8)
where
F: FnMut(),
{
unsafe { (*(payload as *mut F))() }
}
}
/// Runs `func` with the last `trap_info` object registered by `catch_traps`.
///
/// Calls `func` with `None` if `catch_traps` wasn't previously called from this
/// stack frame.
pub fn with_last_info<R>(func: impl FnOnce(Option<&dyn Any>) -> R) -> R {
tls::with(|state| func(state.map(|s| s.trap_info.as_any()))) | }
/// Invokes the contextually-defined context's out-of-gas function.
///
/// (basically delegates to `wasmtime::Store::out_of_gas`)
pub fn out_of_gas() {
tls::with(|state| state.unwrap().trap_info.out_of_gas())
}
/// Temporary state stored on the stack which is registered in the `tls` module
/// below for calls into wasm.
pub struct CallThreadState<'a> {
unwind: Cell<UnwindReason>,
jmp_buf: Cell<*const u8>,
handling_trap: Cell<bool>,
trap_info: &'a (dyn TrapInfo + 'a),
prev: Cell<tls::Ptr>,
}
/// A package of functionality needed by `catch_traps` to figure out what to do
/// when handling a trap.
///
/// Note that this is an `unsafe` trait at least because it's being run in the
/// context of a synchronous signal handler, so it needs to be careful to not
/// access too much state in answering these queries.
pub unsafe trait TrapInfo {
/// Converts this object into an `Any` to dynamically check its type.
fn as_any(&self) -> &dyn Any;
/// Returns whether the given program counter lies within wasm code,
/// indicating whether we should handle a trap or not.
fn is_wasm_trap(&self, pc: usize) -> bool;
/// Uses `call` to call a custom signal handler, if one is specified.
///
/// Returns `true` if `call` returns true, otherwise returns `false`.
fn custom_signal_handler(&self, call: &dyn Fn(&SignalHandler) -> bool) -> bool;
/// Returns the maximum size, in bytes, the wasm native stack is allowed to
/// grow to.
fn max_wasm_stack(&self) -> usize;
/// Callback invoked whenever WebAssembly has entirely consumed the fuel
/// that it was allotted.
///
/// This function may return, and it may also `raise_lib_trap`.
fn out_of_gas(&self);
/// Returns the VM interrupts to use for interrupting Wasm code.
fn interrupts(&self) -> &VMInterrupts;
}
enum UnwindReason {
None,
Panic(Box<dyn Any + Send>),
UserTrap(Box<dyn Error + Send + Sync>),
LibTrap(Trap),
JitTrap { backtrace: Backtrace, pc: usize },
}
impl<'a> CallThreadState<'a> {
fn new(trap_info: &'a (dyn TrapInfo + 'a)) -> CallThreadState<'a> {
CallThreadState {
unwind: Cell::new(UnwindReason::None),
jmp_buf: Cell::new(ptr::null()),
handling_trap: Cell::new(false),
trap_info,
prev: Cell::new(ptr::null()),
}
}
fn with(self, closure: impl FnOnce(&CallThreadState) -> i32) -> Result<(), Trap> {
let _reset = self.update_stack_limit()?;
let ret = tls::set(&self, || closure(&self));
match self.unwind.replace(UnwindReason::None) {
UnwindReason::None => {
debug_assert_eq!(ret, 1);
Ok(())
}
UnwindReason::UserTrap(data) => {
debug_assert_eq!(ret, 0);
Err(Trap::User(data))
}
UnwindReason::LibTrap(trap) => Err(trap),
UnwindReason::JitTrap { backtrace, pc } => {
debug_assert_eq!(ret, 0);
let interrupts = self.trap_info.interrupts();
let maybe_interrupted =
interrupts.stack_limit.load(SeqCst) == wasmtime_environ::INTERRUPTED;
Err(Trap::Jit {
pc,
backtrace,
maybe_interrupted,
})
}
UnwindReason::Panic(panic) => {
debug_assert_eq!(ret, 0);
std::panic::resume_unwind(panic)
}
}
}
/// Checks and/or initializes the wasm native call stack limit.
///
/// This function will inspect the current state of the stack and calling
/// context to determine which of three buckets we're in:
///
/// 1. We are the first wasm call on the stack. This means that we need to
/// set up a stack limit where beyond which if the native wasm stack
/// pointer goes beyond forces a trap. For now we simply reserve an
/// arbitrary chunk of bytes (1 MB from roughly the current native stack
/// pointer). This logic will likely get tweaked over time.
///
/// 2. We aren't the first wasm call on the stack. In this scenario the wasm
/// stack limit is already configured. This case of wasm -> host -> wasm
/// we assume that the native stack consumed by the host is accounted for
/// in the initial stack limit calculation. That means that in this
/// scenario we do nothing.
///
/// 3. We were previously interrupted. In this case we consume the interrupt
/// here and return a trap, clearing the interrupt and allowing the next
/// wasm call to proceed.
///
/// The return value here is a trap for case 3, a noop destructor in case 2,
/// and a meaningful destructor in case 1
///
/// For more information about interrupts and stack limits see
/// `crates/environ/src/cranelift.rs`.
///
/// Note that this function must be called with `self` on the stack, not the
/// heap/etc.
fn update_stack_limit(&self) -> Result<impl Drop + '_, Trap> {
// Determine the stack pointer where, after which, any wasm code will
// immediately trap. This is checked on the entry to all wasm functions.
//
// Note that this isn't 100% precise. We are requested to give wasm
// `max_wasm_stack` bytes, but what we're actually doing is giving wasm
// probably a little less than `max_wasm_stack` because we're
// calculating the limit relative to this function's approximate stack
// pointer. Wasm will be executed on a frame beneath this one (or next
// to it). In any case it's expected to be at most a few hundred bytes
// of slop one way or another. When wasm is typically given a MB or so
// (a million bytes) the slop shouldn't matter too much.
let wasm_stack_limit = psm::stack_pointer() as usize - self.trap_info.max_wasm_stack();
let interrupts = self.trap_info.interrupts();
let reset_stack_limit = match interrupts.stack_limit.compare_exchange(
usize::max_value(),
wasm_stack_limit,
SeqCst,
SeqCst,
) {
Ok(_) => {
// We're the first wasm on the stack so we've now reserved the
// `max_wasm_stack` bytes of native stack space for wasm.
// Nothing left to do here now except reset back when we're
// done.
true
}
Err(n) if n == wasmtime_environ::INTERRUPTED => {
// This means that an interrupt happened before we actually
// called this function, which means that we're now
// considered interrupted. Be sure to consume this interrupt
// as part of this process too.
interrupts.stack_limit.store(usize::max_value(), SeqCst);
return Err(Trap::Wasm {
trap_code: ir::TrapCode::Interrupt,
backtrace: Backtrace::new_unresolved(),
});
}
Err(_) => {
// The stack limit was previously set by a previous wasm
// call on the stack. We leave the original stack limit for
// wasm in place in that case, and don't reset the stack
// limit when we're done.
false
}
};
struct Reset<'a>(bool, &'a AtomicUsize);
impl Drop for Reset<'_> {
fn drop(&mut self) {
if self.0 {
self.1.store(usize::max_value(), SeqCst);
}
}
}
Ok(Reset(reset_stack_limit, &interrupts.stack_limit))
}
fn unwind_with(&self, reason: UnwindReason) -> ! {
self.unwind.replace(reason);
unsafe {
Unwind(self.jmp_buf.get());
}
}
/// Trap handler using our thread-local state.
///
/// * `pc` - the program counter the trap happened at
/// * `call_handler` - a closure used to invoke the platform-specific
/// signal handler for each instance, if available.
///
/// Attempts to handle the trap if it's a wasm trap. Returns a few
/// different things:
///
/// * null - the trap didn't look like a wasm trap and should continue as a
/// trap
/// * 1 as a pointer - the trap was handled by a custom trap handler on an
/// instance, and the trap handler should quickly return.
/// * a different pointer - a jmp_buf buffer to longjmp to, meaning that
/// the wasm trap was succesfully handled.
fn jmp_buf_if_trap(
&self,
pc: *const u8,
call_handler: impl Fn(&SignalHandler) -> bool,
) -> *const u8 {
// If we hit a fault while handling a previous trap, that's quite bad,
// so bail out and let the system handle this recursive segfault.
//
// Otherwise flag ourselves as handling a trap, do the trap handling,
// and reset our trap handling flag.
if self.handling_trap.replace(true) {
return ptr::null();
}
let _reset = ResetCell(&self.handling_trap, false);
// If we haven't even started to handle traps yet, bail out.
if self.jmp_buf.get().is_null() {
return ptr::null();
}
// First up see if any instance registered has a custom trap handler,
// in which case run them all. If anything handles the trap then we
// return that the trap was handled.
if self.trap_info.custom_signal_handler(&call_handler) {
return 1 as *const _;
}
// If this fault wasn't in wasm code, then it's not our problem
if !self.trap_info.is_wasm_trap(pc as usize) {
return ptr::null();
}
// If all that passed then this is indeed a wasm trap, so return the
// `jmp_buf` passed to `Unwind` to resume.
self.jmp_buf.get()
}
fn capture_backtrace(&self, pc: *const u8) {
let backtrace = Backtrace::new_unresolved();
self.unwind.replace(UnwindReason::JitTrap {
backtrace,
pc: pc as usize,
});
}
}
struct ResetCell<'a, T: Copy>(&'a Cell<T>, T);
impl<T: Copy> Drop for ResetCell<'_, T> {
fn drop(&mut self) {
self.0.set(self.1);
}
}
// A private inner module for managing the TLS state that we require across
// calls in wasm. The WebAssembly code is called from C++ and then a trap may
// happen which requires us to read some contextual state to figure out what to
// do with the trap. This `tls` module is used to persist that information from
// the caller to the trap site.
mod tls {
use super::CallThreadState;
use std::mem;
use std::ptr;
pub use raw::Ptr;
// An even *more* inner module for dealing with TLS. This actually has the
// thread local variable and has functions to access the variable.
//
// Note that this is specially done to fully encapsulate that the accessors
// for tls must not be inlined. Wasmtime's async support employs stack
// switching which can resume execution on different OS threads. This means
// that borrows of our TLS pointer must never live across accesses because
// otherwise the access may be split across two threads and cause unsafety.
//
// This also means that extra care is taken by the runtime to save/restore
// these TLS values when the runtime may have crossed threads.
mod raw {
use super::CallThreadState;
use std::cell::Cell;
use std::ptr;
pub type Ptr = *const CallThreadState<'static>;
thread_local!(static PTR: Cell<Ptr> = Cell::new(ptr::null()));
#[inline(never)] // see module docs for why this is here
pub fn replace(val: Ptr) -> Ptr {
// Mark the current thread as handling interrupts for this specific
// CallThreadState: may clobber the previous entry.
super::super::sys::register_tls(val);
PTR.with(|p| p.replace(val))
}
#[inline(never)] // see module docs for why this is here
pub fn get() -> Ptr {
PTR.with(|p| p.get())
}
}
/// Opaque state used to help control TLS state across stack switches for
/// async support.
pub struct TlsRestore(raw::Ptr);
impl TlsRestore {
/// Takes the TLS state that is currently configured and returns a
/// token that is used to replace it later.
///
/// This is not a safe operation since it's intended to only be used
/// with stack switching found with fibers and async wasmtime.
pub unsafe fn take() -> TlsRestore {
// Our tls pointer must be set at this time, and it must not be
// null. We need to restore the previous pointer since we're
// removing ourselves from the call-stack, and in the process we
// null out our own previous field for safety in case it's
// accidentally used later.
let raw = raw::get();
assert!(!raw.is_null());
let prev = (*raw).prev.replace(ptr::null());
raw::replace(prev);
TlsRestore(raw)
}
/// Restores a previous tls state back into this thread's TLS.
///
/// This is unsafe because it's intended to only be used within the
/// context of stack switching within wasmtime.
pub unsafe fn replace(self) -> Result<(), super::Trap> {
// When replacing to the previous value of TLS, we might have
// crossed a thread: make sure the trap-handling lazy initializer
// runs.
super::sys::lazy_per_thread_init()?;
// We need to configure our previous TLS pointer to whatever is in
// TLS at this time, and then we set the current state to ourselves.
let prev = raw::get();
assert!((*self.0).prev.get().is_null());
(*self.0).prev.set(prev);
raw::replace(self.0);
Ok(())
}
}
/// Configures thread local state such that for the duration of the
/// execution of `closure` any call to `with` will yield `ptr`, unless this
/// is recursively called again.
pub fn set<R>(state: &CallThreadState<'_>, closure: impl FnOnce() -> R) -> R {
struct Reset<'a, 'b>(&'a CallThreadState<'b>);
impl Drop for Reset<'_, '_> {
fn drop(&mut self) {
raw::replace(self.0.prev.replace(ptr::null()));
}
}
// Note that this extension of the lifetime to `'static` should be
// safe because we only ever access it below with an anonymous
// lifetime, meaning `'static` never leaks out of this module.
let ptr = unsafe {
mem::transmute::<*const CallThreadState<'_>, *const CallThreadState<'static>>(state)
};
let prev = raw::replace(ptr);
state.prev.set(prev);
let _reset = Reset(state);
closure()
}
/// Returns the last pointer configured with `set` above. Panics if `set`
/// has not been previously called.
pub fn with<R>(closure: impl FnOnce(Option<&CallThreadState<'_>>) -> R) -> R {
let p = raw::get();
unsafe { closure(if p.is_null() { None } else { Some(&*p) }) }
}
} | random_line_split | |
traphandlers.rs | //! WebAssembly trap handling, which is built on top of the lower-level
//! signalhandling mechanisms.
use crate::VMInterrupts;
use backtrace::Backtrace;
use std::any::Any;
use std::cell::Cell;
use std::error::Error;
use std::ptr;
use std::sync::atomic::{AtomicUsize, Ordering::SeqCst};
use std::sync::Once;
use wasmtime_environ::ir;
pub use self::tls::TlsRestore;
extern "C" {
fn RegisterSetjmp(
jmp_buf: *mut *const u8,
callback: extern "C" fn(*mut u8),
payload: *mut u8,
) -> i32;
fn Unwind(jmp_buf: *const u8) -> !;
}
cfg_if::cfg_if! {
if #[cfg(target_os = "macos")] {
mod macos;
use macos as sys;
} else if #[cfg(unix)] {
mod unix;
use unix as sys;
} else if #[cfg(target_os = "windows")] {
mod windows;
use windows as sys;
}
}
pub use sys::SignalHandler;
/// This function performs the low-overhead platform-specific initialization
/// that we want to do eagerly to ensure a more-deterministic global process
/// state.
///
/// This is especially relevant for signal handlers since handler ordering
/// depends on installation order: the wasm signal handler must run *before*
/// the other crash handlers and since POSIX signal handlers work LIFO, this
/// function needs to be called at the end of the startup process, after other
/// handlers have been installed. This function can thus be called multiple
/// times, having no effect after the first call.
pub fn init_traps() {
static INIT: Once = Once::new();
INIT.call_once(|| unsafe { sys::platform_init() });
}
/// Raises a user-defined trap immediately.
///
/// This function performs as-if a wasm trap was just executed, only the trap
/// has a dynamic payload associated with it which is user-provided. This trap
/// payload is then returned from `catch_traps` below.
///
/// # Safety
///
/// Only safe to call when wasm code is on the stack, aka `catch_traps` must
/// have been previously called. Additionally no Rust destructors can be on the
/// stack. They will be skipped and not executed.
pub unsafe fn raise_user_trap(data: Box<dyn Error + Send + Sync>) -> ! {
tls::with(|info| info.unwrap().unwind_with(UnwindReason::UserTrap(data)))
}
/// Raises a trap from inside library code immediately.
///
/// This function performs as-if a wasm trap was just executed. This trap
/// payload is then returned from `catch_traps` below.
///
/// # Safety
///
/// Only safe to call when wasm code is on the stack, aka `catch_traps` must
/// have been previously called. Additionally no Rust destructors can be on the
/// stack. They will be skipped and not executed.
pub unsafe fn raise_lib_trap(trap: Trap) -> ! {
tls::with(|info| info.unwrap().unwind_with(UnwindReason::LibTrap(trap)))
}
/// Carries a Rust panic across wasm code and resumes the panic on the other
/// side.
///
/// # Safety
///
/// Only safe to call when wasm code is on the stack, aka `catch_traps` must
/// have been previously called. Additionally no Rust destructors can be on the
/// stack. They will be skipped and not executed.
pub unsafe fn resume_panic(payload: Box<dyn Any + Send>) -> ! {
tls::with(|info| info.unwrap().unwind_with(UnwindReason::Panic(payload)))
}
/// Stores trace message with backtrace.
#[derive(Debug)]
pub enum Trap {
/// A user-raised trap through `raise_user_trap`.
User(Box<dyn Error + Send + Sync>),
/// A trap raised from jit code
Jit {
/// The program counter in JIT code where this trap happened.
pc: usize,
/// Native stack backtrace at the time the trap occurred
backtrace: Backtrace,
/// An indicator for whether this may have been a trap generated from an
/// interrupt, used for switching what would otherwise be a stack
/// overflow trap to be an interrupt trap.
maybe_interrupted: bool,
},
/// A trap raised from a wasm libcall
Wasm {
/// Code of the trap.
trap_code: ir::TrapCode,
/// Native stack backtrace at the time the trap occurred
backtrace: Backtrace,
},
/// A trap indicating that the runtime was unable to allocate sufficient memory.
OOM {
/// Native stack backtrace at the time the OOM occurred
backtrace: Backtrace,
},
}
impl Trap {
/// Construct a new Wasm trap with the given source location and trap code.
///
/// Internally saves a backtrace when constructed.
pub fn wasm(trap_code: ir::TrapCode) -> Self {
let backtrace = Backtrace::new_unresolved();
Trap::Wasm {
trap_code,
backtrace,
}
}
/// Construct a new OOM trap with the given source location and trap code.
///
/// Internally saves a backtrace when constructed.
pub fn oom() -> Self {
let backtrace = Backtrace::new_unresolved();
Trap::OOM { backtrace }
}
}
/// Catches any wasm traps that happen within the execution of `closure`,
/// returning them as a `Result`.
///
/// Highly unsafe since `closure` won't have any dtors run.
pub unsafe fn catch_traps<F>(trap_info: &impl TrapInfo, mut closure: F) -> Result<(), Trap>
where
F: FnMut(),
{
sys::lazy_per_thread_init()?;
return CallThreadState::new(trap_info).with(|cx| {
RegisterSetjmp(
cx.jmp_buf.as_ptr(),
call_closure::<F>,
&mut closure as *mut F as *mut u8,
)
});
extern "C" fn call_closure<F>(payload: *mut u8)
where
F: FnMut(),
{
unsafe { (*(payload as *mut F))() }
}
}
/// Runs `func` with the last `trap_info` object registered by `catch_traps`.
///
/// Calls `func` with `None` if `catch_traps` wasn't previously called from this
/// stack frame.
pub fn with_last_info<R>(func: impl FnOnce(Option<&dyn Any>) -> R) -> R {
tls::with(|state| func(state.map(|s| s.trap_info.as_any())))
}
/// Invokes the contextually-defined context's out-of-gas function.
///
/// (basically delegates to `wasmtime::Store::out_of_gas`)
pub fn out_of_gas() {
tls::with(|state| state.unwrap().trap_info.out_of_gas())
}
/// Temporary state stored on the stack which is registered in the `tls` module
/// below for calls into wasm.
pub struct CallThreadState<'a> {
unwind: Cell<UnwindReason>,
jmp_buf: Cell<*const u8>,
handling_trap: Cell<bool>,
trap_info: &'a (dyn TrapInfo + 'a),
prev: Cell<tls::Ptr>,
}
/// A package of functionality needed by `catch_traps` to figure out what to do
/// when handling a trap.
///
/// Note that this is an `unsafe` trait at least because it's being run in the
/// context of a synchronous signal handler, so it needs to be careful to not
/// access too much state in answering these queries.
pub unsafe trait TrapInfo {
/// Converts this object into an `Any` to dynamically check its type.
fn as_any(&self) -> &dyn Any;
/// Returns whether the given program counter lies within wasm code,
/// indicating whether we should handle a trap or not.
fn is_wasm_trap(&self, pc: usize) -> bool;
/// Uses `call` to call a custom signal handler, if one is specified.
///
/// Returns `true` if `call` returns true, otherwise returns `false`.
fn custom_signal_handler(&self, call: &dyn Fn(&SignalHandler) -> bool) -> bool;
/// Returns the maximum size, in bytes, the wasm native stack is allowed to
/// grow to.
fn max_wasm_stack(&self) -> usize;
/// Callback invoked whenever WebAssembly has entirely consumed the fuel
/// that it was allotted.
///
/// This function may return, and it may also `raise_lib_trap`.
fn out_of_gas(&self);
/// Returns the VM interrupts to use for interrupting Wasm code.
fn interrupts(&self) -> &VMInterrupts;
}
enum UnwindReason {
None,
Panic(Box<dyn Any + Send>),
UserTrap(Box<dyn Error + Send + Sync>),
LibTrap(Trap),
JitTrap { backtrace: Backtrace, pc: usize },
}
impl<'a> CallThreadState<'a> {
fn new(trap_info: &'a (dyn TrapInfo + 'a)) -> CallThreadState<'a> {
CallThreadState {
unwind: Cell::new(UnwindReason::None),
jmp_buf: Cell::new(ptr::null()),
handling_trap: Cell::new(false),
trap_info,
prev: Cell::new(ptr::null()),
}
}
fn with(self, closure: impl FnOnce(&CallThreadState) -> i32) -> Result<(), Trap> {
let _reset = self.update_stack_limit()?;
let ret = tls::set(&self, || closure(&self));
match self.unwind.replace(UnwindReason::None) {
UnwindReason::None => {
debug_assert_eq!(ret, 1);
Ok(())
}
UnwindReason::UserTrap(data) => {
debug_assert_eq!(ret, 0);
Err(Trap::User(data))
}
UnwindReason::LibTrap(trap) => Err(trap),
UnwindReason::JitTrap { backtrace, pc } => {
debug_assert_eq!(ret, 0);
let interrupts = self.trap_info.interrupts();
let maybe_interrupted =
interrupts.stack_limit.load(SeqCst) == wasmtime_environ::INTERRUPTED;
Err(Trap::Jit {
pc,
backtrace,
maybe_interrupted,
})
}
UnwindReason::Panic(panic) => {
debug_assert_eq!(ret, 0);
std::panic::resume_unwind(panic)
}
}
}
/// Checks and/or initializes the wasm native call stack limit.
///
/// This function will inspect the current state of the stack and calling
/// context to determine which of three buckets we're in:
///
/// 1. We are the first wasm call on the stack. This means that we need to
/// set up a stack limit where beyond which if the native wasm stack
/// pointer goes beyond forces a trap. For now we simply reserve an
/// arbitrary chunk of bytes (1 MB from roughly the current native stack
/// pointer). This logic will likely get tweaked over time.
///
/// 2. We aren't the first wasm call on the stack. In this scenario the wasm
/// stack limit is already configured. This case of wasm -> host -> wasm
/// we assume that the native stack consumed by the host is accounted for
/// in the initial stack limit calculation. That means that in this
/// scenario we do nothing.
///
/// 3. We were previously interrupted. In this case we consume the interrupt
/// here and return a trap, clearing the interrupt and allowing the next
/// wasm call to proceed.
///
/// The return value here is a trap for case 3, a noop destructor in case 2,
/// and a meaningful destructor in case 1
///
/// For more information about interrupts and stack limits see
/// `crates/environ/src/cranelift.rs`.
///
/// Note that this function must be called with `self` on the stack, not the
/// heap/etc.
fn update_stack_limit(&self) -> Result<impl Drop + '_, Trap> {
// Determine the stack pointer where, after which, any wasm code will
// immediately trap. This is checked on the entry to all wasm functions.
//
// Note that this isn't 100% precise. We are requested to give wasm
// `max_wasm_stack` bytes, but what we're actually doing is giving wasm
// probably a little less than `max_wasm_stack` because we're
// calculating the limit relative to this function's approximate stack
// pointer. Wasm will be executed on a frame beneath this one (or next
// to it). In any case it's expected to be at most a few hundred bytes
// of slop one way or another. When wasm is typically given a MB or so
// (a million bytes) the slop shouldn't matter too much.
let wasm_stack_limit = psm::stack_pointer() as usize - self.trap_info.max_wasm_stack();
let interrupts = self.trap_info.interrupts();
let reset_stack_limit = match interrupts.stack_limit.compare_exchange(
usize::max_value(),
wasm_stack_limit,
SeqCst,
SeqCst,
) {
Ok(_) => {
// We're the first wasm on the stack so we've now reserved the
// `max_wasm_stack` bytes of native stack space for wasm.
// Nothing left to do here now except reset back when we're
// done.
true
}
Err(n) if n == wasmtime_environ::INTERRUPTED => {
// This means that an interrupt happened before we actually
// called this function, which means that we're now
// considered interrupted. Be sure to consume this interrupt
// as part of this process too.
interrupts.stack_limit.store(usize::max_value(), SeqCst);
return Err(Trap::Wasm {
trap_code: ir::TrapCode::Interrupt,
backtrace: Backtrace::new_unresolved(),
});
}
Err(_) => |
};
struct Reset<'a>(bool, &'a AtomicUsize);
impl Drop for Reset<'_> {
fn drop(&mut self) {
if self.0 {
self.1.store(usize::max_value(), SeqCst);
}
}
}
Ok(Reset(reset_stack_limit, &interrupts.stack_limit))
}
fn unwind_with(&self, reason: UnwindReason) -> ! {
self.unwind.replace(reason);
unsafe {
Unwind(self.jmp_buf.get());
}
}
/// Trap handler using our thread-local state.
///
/// * `pc` - the program counter the trap happened at
/// * `call_handler` - a closure used to invoke the platform-specific
/// signal handler for each instance, if available.
///
/// Attempts to handle the trap if it's a wasm trap. Returns a few
/// different things:
///
/// * null - the trap didn't look like a wasm trap and should continue as a
/// trap
/// * 1 as a pointer - the trap was handled by a custom trap handler on an
/// instance, and the trap handler should quickly return.
/// * a different pointer - a jmp_buf buffer to longjmp to, meaning that
/// the wasm trap was succesfully handled.
fn jmp_buf_if_trap(
&self,
pc: *const u8,
call_handler: impl Fn(&SignalHandler) -> bool,
) -> *const u8 {
// If we hit a fault while handling a previous trap, that's quite bad,
// so bail out and let the system handle this recursive segfault.
//
// Otherwise flag ourselves as handling a trap, do the trap handling,
// and reset our trap handling flag.
if self.handling_trap.replace(true) {
return ptr::null();
}
let _reset = ResetCell(&self.handling_trap, false);
// If we haven't even started to handle traps yet, bail out.
if self.jmp_buf.get().is_null() {
return ptr::null();
}
// First up see if any instance registered has a custom trap handler,
// in which case run them all. If anything handles the trap then we
// return that the trap was handled.
if self.trap_info.custom_signal_handler(&call_handler) {
return 1 as *const _;
}
// If this fault wasn't in wasm code, then it's not our problem
if !self.trap_info.is_wasm_trap(pc as usize) {
return ptr::null();
}
// If all that passed then this is indeed a wasm trap, so return the
// `jmp_buf` passed to `Unwind` to resume.
self.jmp_buf.get()
}
fn capture_backtrace(&self, pc: *const u8) {
let backtrace = Backtrace::new_unresolved();
self.unwind.replace(UnwindReason::JitTrap {
backtrace,
pc: pc as usize,
});
}
}
struct ResetCell<'a, T: Copy>(&'a Cell<T>, T);
impl<T: Copy> Drop for ResetCell<'_, T> {
fn drop(&mut self) {
self.0.set(self.1);
}
}
// A private inner module for managing the TLS state that we require across
// calls in wasm. The WebAssembly code is called from C++ and then a trap may
// happen which requires us to read some contextual state to figure out what to
// do with the trap. This `tls` module is used to persist that information from
// the caller to the trap site.
mod tls {
use super::CallThreadState;
use std::mem;
use std::ptr;
pub use raw::Ptr;
// An even *more* inner module for dealing with TLS. This actually has the
// thread local variable and has functions to access the variable.
//
// Note that this is specially done to fully encapsulate that the accessors
// for tls must not be inlined. Wasmtime's async support employs stack
// switching which can resume execution on different OS threads. This means
// that borrows of our TLS pointer must never live across accesses because
// otherwise the access may be split across two threads and cause unsafety.
//
// This also means that extra care is taken by the runtime to save/restore
// these TLS values when the runtime may have crossed threads.
mod raw {
use super::CallThreadState;
use std::cell::Cell;
use std::ptr;
pub type Ptr = *const CallThreadState<'static>;
thread_local!(static PTR: Cell<Ptr> = Cell::new(ptr::null()));
#[inline(never)] // see module docs for why this is here
pub fn replace(val: Ptr) -> Ptr {
// Mark the current thread as handling interrupts for this specific
// CallThreadState: may clobber the previous entry.
super::super::sys::register_tls(val);
PTR.with(|p| p.replace(val))
}
#[inline(never)] // see module docs for why this is here
pub fn get() -> Ptr {
PTR.with(|p| p.get())
}
}
/// Opaque state used to help control TLS state across stack switches for
/// async support.
pub struct TlsRestore(raw::Ptr);
impl TlsRestore {
/// Takes the TLS state that is currently configured and returns a
/// token that is used to replace it later.
///
/// This is not a safe operation since it's intended to only be used
/// with stack switching found with fibers and async wasmtime.
pub unsafe fn take() -> TlsRestore {
// Our tls pointer must be set at this time, and it must not be
// null. We need to restore the previous pointer since we're
// removing ourselves from the call-stack, and in the process we
// null out our own previous field for safety in case it's
// accidentally used later.
let raw = raw::get();
assert!(!raw.is_null());
let prev = (*raw).prev.replace(ptr::null());
raw::replace(prev);
TlsRestore(raw)
}
/// Restores a previous tls state back into this thread's TLS.
///
/// This is unsafe because it's intended to only be used within the
/// context of stack switching within wasmtime.
pub unsafe fn replace(self) -> Result<(), super::Trap> {
// When replacing to the previous value of TLS, we might have
// crossed a thread: make sure the trap-handling lazy initializer
// runs.
super::sys::lazy_per_thread_init()?;
// We need to configure our previous TLS pointer to whatever is in
// TLS at this time, and then we set the current state to ourselves.
let prev = raw::get();
assert!((*self.0).prev.get().is_null());
(*self.0).prev.set(prev);
raw::replace(self.0);
Ok(())
}
}
/// Configures thread local state such that for the duration of the
/// execution of `closure` any call to `with` will yield `ptr`, unless this
/// is recursively called again.
pub fn set<R>(state: &CallThreadState<'_>, closure: impl FnOnce() -> R) -> R {
struct Reset<'a, 'b>(&'a CallThreadState<'b>);
impl Drop for Reset<'_, '_> {
fn drop(&mut self) {
raw::replace(self.0.prev.replace(ptr::null()));
}
}
// Note that this extension of the lifetime to `'static` should be
// safe because we only ever access it below with an anonymous
// lifetime, meaning `'static` never leaks out of this module.
let ptr = unsafe {
mem::transmute::<*const CallThreadState<'_>, *const CallThreadState<'static>>(state)
};
let prev = raw::replace(ptr);
state.prev.set(prev);
let _reset = Reset(state);
closure()
}
/// Returns the last pointer configured with `set` above. Panics if `set`
/// has not been previously called.
pub fn with<R>(closure: impl FnOnce(Option<&CallThreadState<'_>>) -> R) -> R {
let p = raw::get();
unsafe { closure(if p.is_null() { None } else { Some(&*p) }) }
}
}
| {
// The stack limit was previously set by a previous wasm
// call on the stack. We leave the original stack limit for
// wasm in place in that case, and don't reset the stack
// limit when we're done.
false
} | conditional_block |
travel-assistance.js | import React from "react"
import Layout from "../components/layout"
import SEO from "../components/seo"
import "../styles/stay.css"
import "../styles/about.css"
import "../styles/travel-assistance.css"
import "../styles/global.css"
const Stay = () => (
<Layout>
<SEO title="Travel Assistance" />
<h1 className="contact">Travel Assistance</h1>
<div className="underline-div">
<div className="separator"></div>
</div>
<p className="lead">
Thinking of booking your scuba holiday with us?
<br /> Need help booking your ferry tickets? <br />
Any other queries regarding your trip? <br />
Feel free to drop a message on whatsapp with your requirements.
</p>
<div className="privacy-content-holder">
<p className="lead left"> When to visit:</p>
<p className="content">
If you want to do an intro to scuba program or an Open Water course, you
can come at any time of the year. The sites that you visit as part of
these programs are accessible year long and not that significantly
affected by weather (tides and phase of the moon play a bigger role). Do
keep in mind that unlike mainland monsoons, rainy season in the Andamans
typically means frequent short showers – only occasionally do we have
long, heavy rains (and that too, usually at night). So you can still
have a very enjoyable holiday here, even in the monsoons.
</p>
<p className="content">
For certified divers, the traditional peak dive season in the Andamans
has been from December to May – the sun is (usually!) shining, the seas
are flat and visibility is at its best during this period. June and July
are monsoons – this means that some days, the seas are very rough and we
cannot go very far. Other days, it is calm and sunny, and the diving is
surprisingly good.
</p>
<p className="content">
The September to November period is becoming the “hidden season” when it
comes to scuba diving in the Andamans – yes, there are occasional to
regular rains, and the seas are marginally choppier than what it would
be in March or April, but the fishlife is mindboggling – enormous
schools of fish occupy your entire vision, turtle and manta sightings
are common and the reefs buzz with excitement and action. All our dive
staff agree that the diving during this period is the best that they’ve
ever done in their years in the Andamans. So if you are interested in
really primo diving, but without crowds and with the moderate risk of
being limited in the choice of dive sites due to weather, consider
coming in this time period.
</p>
<p className="content">
In the past few years, June has not had a lot of rains and has pretty
much become an extension of the peak season. August is a bit of an
unusual month – it has been raining a little more than normal the past
few years, but the seas haven’t been too rough, and the diving has been
very good at this time. For advanced divers, it is likely to be hit or
miss – the diving is superb, but we may not be able to visit our
highlight deep sites at this time.
</p>
<p className="content">
A word of warning, however: weather conditions are becoming more and
more erratic these days, and we are having more “nice days” in the
so-called off-season and more “rough days” in the peak season. The above
is meant to be indicative, based on recent trends, but we really have no
way of predicting the impact of climate change.
</p>
<p className="content">
<b>Travel tip:</b> Please check weather forecast before you start your
journey to andamans and depending on that, you may have to pack some
rain wear.
</p>
<br />
<p className="lead left">Permits and Visas:</p>
<p className="content">
All foreign visitors to India need a visa. No special permits are
required to visit andamans after recent modifications made by the Govt
of India.
</p>
<p className="content">
Indian passport holders do not need any permits to visit the Andamans.
</p>
<br />
<p className="lead left">Getting to the Andamans:</p>
<p className="content">
The quickest and most convenient way to get to the Andamans is to fly.
Sea route is reserved for the more patient as it takes 3-4 days
depending on the weather. If you want to come directly to Havelock on
the day of your arrival, taking a flight that arrives in Port Blair
before 1200hrs would give you time to catch the last ferry to havelock
that usually leaves at around 1400hrs.
</p>
<p className="content">
If you want to come directly to Neil on the day of your arrival, please
make sure you land by 1000hrs in order to catch the 1100 ferry.
</p>
<p className="content">
For the return, if your outbound flight is after 1300hrs, you can come
back from Havelock or Neil on the same morning quite easily. If your
outbound flight is before 1300hrs, you will need to spend the last night
in Port Blair.
</p>
<p className="content">
<b>Note:</b> Please check with us the ferry timings before you finalize
your travel plan.
</p>
<br />
<p className="lead left">Getting from Port Blair to Havelock and back:</p>
<p className="lead left">PORT BLAIR TO HAVELOCK TO PORT BLAIR::</p>
<p className="content">
Along with the government, there are multiple ferry operators that
operate between Port Blair and Havelock (Swarajdweep).
</p>
<p className="content">
If you land in Port Blair by 12:00pm, you can take the last ferry to
Havelock. Otherwise, you have to spend the first night in Port Blair,
and then take the morning ferry the next day.
</p>
<p className="content">
For the return, if you have a flight after 1330, you can take one of the
morning ferries back to Port Blair on the same morning. Otherwise, you
need to go back one day prior, and spend the last night in Port Blair.
</p>
<p className="content">
It is possible to arrange these tickets when you arrive (and Makruzz
tickets can also be bought online). However, for guests staying and
diving with us, we offer (and strongly recommend that you avail of) a
Meet and Greet facility which includes airport pickup, ferry tickets
to/from Havelock and all land transfers (airport to hotel/jetty, hotel
to jetty, etc). Please contact us for pricing and to book this.{" "}
</p>
<br />
<p className="lead left">Reaching our Dive center:</p>
<p className="content">
Our dive center is conviniently located close to the Jetty. Once you
come out of the jetty, a short 5 minute walk towards the Number 3 market
will take you to our dive center. We are located to the right hand side
when you are coming from jetty and you can find us right opposite Virgin
Beach Resort.
</p>
<br />
<p className="lead left">Where to stay in Port Blair and Havelock:</p>
<p className="content">
Now a days, most of the properties are listed online. You can find them
on most of the hotel booking portals. For suggestions on where to stay,
feel free to get in touch with us on Whatsapp.
</p>
<br />
<p className="lead left">Health:</p>
<p className="content">
There is one Primary Health Center, one private pharmacy in havelock and
a good public hospital in Port Blair. So it is recommended that you
bring preferred or prescription medication with you since the options on
the islands are very limited.
</p>
<br />
<p className="lead left">What to bring:</p>
<p className="content">
Most of the Andamans operate on cash, with a few of the bigger hotels
(and us) taking debit/credit cards. So, we suggest that you carry
sufficient cash with you during your visit to the islands. There are a
quite a few ATMs in Port Blair and 3 ATMs in Havelock which cannot
always be relied upon (Very often do you see tourists standing in long
queues to withdraw cash from an ATM because thats the only ATM which is
working at that point of time)
</p>
<p className="content">
Certified divers should bring their C-card and logbook. Divers insurance
is a good idea to have as well – we strongly recommend coverage sold by
Divers Alert Network (DAN). Also, if you are doing a scuba course and
have any pre-existing medical conditions, you will need a doctor’s
clearance. Please download the Medical Statement here or contact us for
a copy, and complete it, along with a doctor’s clearance, before
bringing it.
</p>
<br />
<p className="lead left">Mobile networks:</p> | worst on the islands ( Airtel and Vodafone have decent coverage in Port
Blair and in a few places in Havelock) however, data connectivity is
pretty much non-existent, except for slow GPRS on BSNL.
</p>
<p className="content">
Certified divers should bring their C-card and logbook. Divers insurance
is a good idea to have as well – we strongly recommend coverage sold by
Divers Alert Network (DAN). Also, if you are doing a scuba course and
have any pre-existing medical conditions, you will need a doctor’s
clearance. Please download the Medical Statement here or contact us for
a copy, and complete it, along with a doctor’s clearance, before
bringing it.
</p>
<br />
<p className="lead left">Safety:</p>
<p className="content">
As long as you take the basic precautions – i.e., don’t leave your
valuables lying around, don’t entrust your cash to strangers, etc., you
should be fine. There is virtually no violent crime in the Andamans,
especially against tourists.
</p>
<p className="content">
Same applies for single female travellers. Havelock and Neil are very
safe places, and there isn’t much to worry about in terms of assault,
harassment, etc.
</p>
<br />
<p className="lead left">Language:</p>
<p className="content">
Pretty much everyone on the islands can speak Hindi along with one or
two other regional languages and most of them understand English. So, be
assured that communication isn't going to be a problem on these islands.
</p>
<br />
<br />
</div>
</Layout>
)
export default Stay |
<p className="content">
Indian residents please note - BSNL is considered to be the best of the | random_line_split |
lifecycle.rs | //
// Copyright(c) 2022, Karl Eric Harper
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met :
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of Karl Eric Harper nor the names of its contributors may be used
// to endorse or promote products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED.IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
/*
/// Runtime initialization
use std::{
thread_local,
env,
process,
mem::{
MaybeUninit,
},
sync::{
atomic::Ordering,
},
path::{
Path,
},
ffi::{
OsStr,
},
};
use thread_id;
use atomig::{Atom};
use once_cell::{
sync::{
OnceCell,
},
};
use crate::*;
use crate::MrapiStatusFlag::*;
use crate::internal::db::{
MrapiDatabase,
MrapiDomainData,
MrapiDomainState,
MrapiNodeState,
MrapiProcessData,
MrapiProcessState,
MRAPI_MGR,
MRAPI_SEM,
access_database_pre,
access_database_post,
};
//pub static RESOURCE_ROOT: LateStatic<MrapiResource<'static>> = LateStatic::new(); // root of the resource tree
//threadlocal!(ALARM_STRUCT: struct sigaction); // used for testing resource tree
thread_local! {
pub static MRAPI_PID: OnceCell<MrapiUint32> = OnceCell::new();
pub static MRAPI_PROC: OnceCell<MrapiUint32> = OnceCell::new();
pub static MRAPI_TID: OnceCell<MrapiUint32> = OnceCell::new();
pub static MRAPI_NINDEX: OnceCell<MrapiUint> = OnceCell::new();
pub static MRAPI_DINDEX: OnceCell<MrapiUint> = OnceCell::new();
pub static MRAPI_PINDEX: OnceCell<MrapiUint> = OnceCell::new();
pub static MRAPI_NODE_ID: OnceCell<MrapiNode> = OnceCell::new();
pub static MRAPI_DOMAIN_ID: OnceCell<MrapiDomain> = OnceCell::new();
}
// finer grained locks for these sections of the database.
thread_local! {
pub static SEMS_SEM: OnceCell<Semaphore> = OnceCell::new(); // sems array
pub static SHMEMS_SEM: OnceCell<Semaphore> = OnceCell::new(); // shmems array
pub static REQUESTS_SEM: OnceCell<Semaphore> = OnceCell::new(); // requests array
pub static RMEMS_SEM: OnceCell<Semaphore> = OnceCell::new(); // rmems array
}
// Keep copies of global objects for comparison
thread_local! {
pub static REQUESTS_GLOBAL: OnceCell<Semaphore> = OnceCell::new();
pub static SEMS_GLOBAL: OnceCell<Semaphore> = OnceCell::new();
pub static SHMEMS_GLOBAL: OnceCell<Semaphore> = OnceCell::new();
pub static RMEMS_GLOBAL: OnceCell<Semaphore> = OnceCell::new();
}
// Tell the system whether or not to use the finer-grained locking
pub const use_global_only: bool = false;
/// Returns the initialized characteristics of this thread
#[allow(dead_code)]
fn whoami() -> Result<(MrapiNode, MrapiUint, MrapiDomain, MrapiUint), MrapiStatusFlag> {
let mut initialized: bool;
let mut result: Result<(MrapiNode, MrapiUint, MrapiDomain, MrapiUint), MrapiStatusFlag>;
match MRAPI_MGR.get() {
None => {
initialized = false;
result = Err(MrapiErrDbNotInitialized);
},
Some(_) => {
initialized = true;
},
};
if !initialized {
return result;
}
MRAPI_PID.with(|pid| {
match pid.get() {
None => {
initialized = false;
result = Err(MrapiErrProcessNotRegistered);
},
Some(_) => {
initialized = true;
},
}
});
if !initialized {
return result;
}
Ok((
MRAPI_NODE_ID.with(|id| {
match id.get() {
None => 0,
Some(v) => *v,
}
}),
MRAPI_NINDEX.with(|index| {
match index.get() {
None => 0,
Some(v) => *v,
}
}),
MRAPI_DOMAIN_ID.with(|id| {
match id.get() {
None => 0,
Some(v) => *v,
}
}),
MRAPI_DINDEX.with(|index| {
match index.get() {
None => 0,
Some(v) => *v,
}
}),
))
}
/// Checks if the given domain_id/node_id is already assoc w/ this pid/tid
#[allow(dead_code)]
fn initialized() -> bool {
match whoami() {
Ok(_) => true,
Err(_) => false,
}
}
fn finalize_node_locked(d: MrapiUint, n: MrapiUint) {
let mrapi_db = MrapiDatabase::global_db();
let dpacked = &mrapi_db.domains[d].state.load(Ordering::Relaxed);
let dstate: MrapiDomainState = Atom::unpack(*dpacked);
let npacked = &mrapi_db.domains[d].nodes[n].state.load(Ordering::Relaxed);
let mut nstate: MrapiNodeState = Atom::unpack(*npacked);
let domain_id = dstate.domain_id();
let node_num = nstate.node_num();
mrapi_dprintf!(2, "mrapi::internal::lifecycle::finalize_node_locked dindex={} nindex={} domain={} node={}", d, n, domain_id, node_num);
// Mark the node as finalized
nstate.set_valid(MRAPI_FALSE);
nstate.set_allocated(MRAPI_FALSE);
&mrapi_db.domains[d].nodes[n].state.store(Atom::pack(nstate), Ordering::Relaxed);
// Rundown the node's process association
let p = mrapi_db.domains[d].nodes[n].proc_num;
let ppacked = &mrapi_db.processes[p].state.load(Ordering::Relaxed);
let pstate: MrapiProcessState = Atom::unpack(*ppacked);
if pstate.valid() {
let num_nodes = mrapi_db.processes[p].num_nodes.fetch_sub(1, Ordering::Relaxed);
if 0 >= num_nodes {
// Last node in this process, remove process references in shared memory
for s in 0..MRAPI_MAX_SHMEMS {
if mrapi_db.shmems[s].valid {
&mrapi_db.shmems[s].set_process(p, 0);
}
}
unsafe { mrapi_db.processes[p] = MaybeUninit::zeroed().assume_init(); };
}
}
unsafe { mrapi_db.domains[d].nodes[n] = MaybeUninit::zeroed().assume_init(); };
&mrapi_db.domains[d].num_nodes.fetch_sub(1, Ordering::Relaxed);
// Decrement the shmem reference count if necessary
for shmem in 0..MRAPI_MAX_SHMEMS {
if mrapi_db.shmems[shmem].valid == MRAPI_TRUE {
if mrapi_db.domains[d].nodes[n].shmems[shmem] == 1 {
// If this node was a user of this shm, decrement the ref count
mrapi_db.shmems[shmem].refs -= 1;
}
// If the reference count is 0, free the shared memory resource
if mrapi_db.shmems[shmem].refs == 0 {
drop(&mrapi_db.shmems[shmem].mem[p]);
}
}
}
// Decrement the sem reference count if necessary
for sem in 0..MRAPI_MAX_SEMS {
if mrapi_db.sems[sem].valid == MRAPI_TRUE {
if mrapi_db.domains[d].nodes[n].sems[sem] == 1 {
mrapi_db.domains[d].nodes[n].sems[sem] = 0;
// If this node was a user of this sem, decrement the ref count
if mrapi_db.sems[sem].refs.fetch_sub(1, Ordering::Relaxed) <= 0 {
// If the reference count is 0 free the resource
mrapi_db.sems[sem].valid = MRAPI_FALSE;
}
}
}
}
}
fn free_resources(panic: MrapiBoolean) {
let mut last_node_standing = MRAPI_TRUE;
let mut last_node_standing_for_this_process = MRAPI_TRUE;
let pid = process::id();
let semref = MrapiSemRef::new(MrapiDatabase::global_sem(), 0, MRAPI_FALSE);
// Try to lock the database
let locked = access_database_pre(&semref, MRAPI_FALSE);
mrapi_dprintf!(1, "mrapi::internal::lifecycle::free_resources panic: {} freeing any existing resources", panic);
match MRAPI_MGR.get() {
None => { },
Some(_) => {
// Finalize this node
match whoami() {
Ok((node, n, domain_num, d)) => {
finalize_node_locked(d, n);
},
Err(_) => { },
}
// If we are in panic mode, then forcefully finalize all other nodes that belong to this process
if panic {
let mrapi_db = MrapiDatabase::global_db();
for d in 0..MRAPI_MAX_DOMAINS {
for n in 0..MRAPI_MAX_NODES {
let npacked = &mrapi_db.domains[d].nodes[n].state.load(Ordering::Relaxed);
let nstate: MrapiNodeState = Atom::unpack(*npacked);
if nstate.valid() == MRAPI_TRUE {
let p = *&mrapi_db.domains[d].nodes[n].proc_num as usize;
let ppacked = &mrapi_db.processes[p].state.load(Ordering::Relaxed);
let pstate: MrapiProcessState = Atom::unpack(*ppacked);
if pstate.pid() == pid {
finalize_node_locked(d, n);
}
}
}
}
for p in 0..MRAPI_MAX_PROCESSES {
let ppacked = &mrapi_db.processes[p].state.load(Ordering::Relaxed);
let pstate: MrapiProcessState = Atom::unpack(*ppacked);
if pstate.valid() == MRAPI_TRUE &&
pstate.pid() == pid {
let mut process = &mut mrapi_db.processes[p];
process.clear();
break;
}
}
}
}
}
}
/*
{
mrapi_boolean_t rc = MRAPI_TRUE;
uint32_t d, n, p;
mrapi_domain_t domain_num;
mrapi_database* mrapi_db_local = NULL;
mrapi_node_t node;
#if (__unix__)
pid_t pid = getpid();
#else
pid_t pid = (pid_t)GetCurrentProcessId();
#endif // !(__unix__)
mrapi_boolean_t last_man_standing = MRAPI_TRUE;
mrapi_boolean_t last_man_standing_for_this_process = MRAPI_TRUE;
mrapi_boolean_t locked;
// try to lock the database
mrapi_impl_sem_ref_t ref = { semid, 0, MRAPI_FALSE };
locked = mrapi_impl_access_database_pre(ref, MRAPI_FALSE);
mrapi_dprintf(1, "mrapi_impl_free_resources (panic=%d): freeing any existing resources in the database mrapi_db=%p semid=%x shmemid=%x\n",
panic, mrapi_db, semid, shmemid);
if (mrapi_db) {
// finalize this node
if (mrapi_impl_whoami(&node, &n, &domain_num, &d)) {
mrapi_impl_finalize_node_locked(d, n);
}
// if we are in panic mode, then forcefully finalize all other nodes that belong to this process
if (panic) {
for (d = 0; d < MRAPI_MAX_DOMAINS; d++) {
for (n = 0; n < MRAPI_MAX_NODES; n++) {
mrapi_node_state nstate;
mrapi_assert(sys_atomic_read(NULL, &mrapi_db->domains[d].nodes[n].state, &nstate, sizeof(mrapi_db->domains[d].nodes[n].state)));
if (nstate.data.valid == MRAPI_TRUE) {
mrapi_uint_t p = mrapi_db->domains[d].nodes[n].proc_num;
mrapi_process_state pstate;
mrapi_assert(sys_atomic_read(NULL, &mrapi_db->processes[p].state, &pstate, sizeof(mrapi_db->processes[p].state)));
if (pstate.data.pid == pid) {
mrapi_impl_finalize_node_locked(d, n);
}
}
}
}
for (p = 0; p < MRAPI_MAX_PROCESSES; p++) {
mrapi_process_state pstate;
mrapi_assert(sys_atomic_read(NULL, &mrapi_db->processes[p].state, &pstate, sizeof(mrapi_db->processes[p].state)));
if ((pstate.data.valid == MRAPI_TRUE) &&
(pstate.data.pid == pid)) {
#if !(__unix)
if (NULL != mrapi_db->processes[p].hAtomicEvt) {
CloseHandle(mrapi_db->processes[p].hAtomicEvt);
}
#endif // !(__unix)
memset(&mrapi_db->processes[p], 0, sizeof(mrapi_process_data));
break;
}
}
}
// see if there are any valid nodes left in the system and for this process
for (d = 0; d < MRAPI_MAX_DOMAINS; d++) {
for (n = 0; n < MRAPI_MAX_NODES; n++) {
mrapi_node_state nstate;
mrapi_assert(sys_atomic_read(NULL, &mrapi_db->domains[d].nodes[n].state, &nstate, sizeof(mrapi_db->domains[d].nodes[n].state)));
if (nstate.data.valid == MRAPI_TRUE) {
mrapi_process_state pstate;
p = mrapi_db->domains[d].nodes[n].proc_num;
mrapi_assert(sys_atomic_read(NULL, &mrapi_db->processes[p].state, &pstate, sizeof(mrapi_db->processes[p].state)));
last_man_standing = MRAPI_FALSE;
if (pstate.data.pid == pid) {
last_man_standing_for_this_process = MRAPI_FALSE;
}
}
}
}
if (panic) {
mrapi_assert(last_man_standing_for_this_process);
}
// if there are no other valid nodes in the whole system, then free the sems
if (last_man_standing) {
mrapi_dprintf(1, "mrapi_impl_free_resources: freeing mrapi internal semaphore and shared memory\n");
// free the mrapi internal semaphores
if (sems_semid != sems_global) {
rc = sys_sem_delete(sems_semid);
sems_semid = -1;
if (!rc) {
fprintf(stderr, "mrapi_impl_free_resources: ERROR: sys_sem_delete (mrapi_db) failed\n");
}
}
if (shmems_semid != shmems_global) {
rc = sys_sem_delete(shmems_semid);
shmems_semid = -1;
if (!rc) {
fprintf(stderr, "mrapi_impl_free_resources: ERROR: sys_sem_delete (mrapi_db) failed\n");
}
}
if (rmems_semid != rmems_global) {
rc = sys_sem_delete(rmems_semid);
rmems_semid = -1;
if (!rc) {
fprintf(stderr, "mrapi_impl_free_resources: ERROR: sys_sem_delete (mrapi_db) failed\n");
}
}
if (requests_semid != requests_global) {
rc = sys_sem_delete(requests_semid);
requests_semid = -1;
if (!rc) {
fprintf(stderr, "mrapi_impl_free_resources: ERROR: sys_sem_delete (mrapi_db) failed\n");
}
}
}
// if there are no other valid nodes for this process, then detach from shared memory
if (last_man_standing_for_this_process) {
mrapi_status_t status = 0;
mrapi_atomic_op op = { MRAPI_ATOM_CLOSEPROC, 0 };
// Signal remote processes to unlink this process
mrapi_impl_atomic_forward(0, &op, &status);
memset(&mrapi_db->processes[mrapi_pindex], 0, sizeof(mrapi_process_data));
// detach from the mrapi internal shared memory
mrapi_dprintf(1, "mrapi_impl_free_resources: detaching from mrapi internal shared memory\n");
mrapi_db_local = mrapi_db;
sys_atomic_xchg_ptr(NULL, (uintptr_t*)&mrapi_db, (uintptr_t)NULL, (uintptr_t*)NULL);
rc = sys_shmem_detach(mrapi_db_local);
if (!rc) {
fprintf(stderr, "mrapi_impl_free_resources: ERROR: sys_shmem detach (mrapi_db) failed\n");
}
}
// if there are no other valid nodes in the whole system, then free the shared memory
if (last_man_standing) {
// free the mrapi internal shared memory
rc = sys_shmem_delete(shmemid);
if (!rc) {
fprintf(stderr, "mrapi_impl_free_resources: ERROR: sys_shmem_delete (mrapi_db) failed\n");
}
mrapi_db = NULL;
shmemid = -1;
}
// if we locked the database and didn't delete it, then we need to unlock it
if (locked) {
if (!last_man_standing)
{
// unlock the database
mrapi_impl_sem_ref_t ref = { semid, 0 };
mrapi_assert(mrapi_impl_access_database_post(ref));
}
}
if (last_man_standing) {
// free the global semaphore last
rc = sys_sem_delete(semid);
semid = -1;
if (!rc) {
fprintf(stderr, "mrapi_impl_free_resources: ERROR: sys_sem_delete (mrapi_db) failed\n");
}
}
}
return last_man_standing;
}
*/
/// Create or get the semaphore corresponding to the key
fn create_sys_semaphore(num_locks: usize, key: u32, lock: MrapiBoolean) -> Option<Semaphore>
{
let max_tries: u32 = 0xffffffff;
let trycount: u32 = 0;
while trycount < max_tries {
trycount += 1;
let sem = match sem_create(key, num_locks) {
Some(v) => v,
None => {
match sem_get(key, num_locks) {
Some(v) => v,
None => Semaphore::default(),
}
},
};
if sem != Semaphore::default() {
if lock {
let sr = MrapiSemRef::new(&sem, 0, false);
while trycount < max_tries {
match sr.trylock() {
Ok(v) => {
if v { return Some(sem); }
},
Err(_) => { },
}
sysvr4::os_yield();
}
}
}
}
None
}
/// Initializes the MRAPI internal layer (sets up the database and semaphore)
///
/// # Arguments
///
/// domain_id - collection of nodes that share resources
/// node_id - task that synchronizes with other nodes in a domain
///
/// # Errors
///
/// MrapiDbNotInitialized
/// MrapiNodeInitfailed
/// MrapiAtomOpNoforward
#[allow(unused_variables)]
pub fn initialize(domain_id: MrapiDomain, node_id: MrapiNode) -> Result<MrapiStatus, MrapiStatusFlag> {
static use_uid: MrapiBoolean = MRAPI_TRUE;
// associate this node w/ a pid,tid pair so that we can recognize the caller on later calls
mrapi_dprintf!(1, "mrapi::internal::lifecycle::initialize ({},{});", domain_id, node_id);
if initialized() {
return Err(MrapiErrNodeInitfailed);
};
// Get process name
let proc_name = env::args().next()
.as_ref()
.map(Path::new)
.and_then(Path::file_name)
.and_then(OsStr::to_str);
let buff = proc_name.unwrap().to_owned() + "_mrapi";
let mut key: u32;
let mut db_key: u32;
let mut sems_key: u32;
let mut shmems_key: u32;
let mut rmems_key: u32;
let mut requests_key: u32;
if use_uid {
key = common::crc::crc32_compute_buf(0, &buff);
db_key = common::crc::crc32_compute_buf(key, &(buff + "_db"));
sems_key = common::crc::crc32_compute_buf(key, &(buff + "_sems"));
shmems_key = common::crc::crc32_compute_buf(key, &(buff + "_shmems"));
rmems_key = common::crc::crc32_compute_buf(key, &(buff + "_rmems"));
requests_key = common::crc::crc32_compute_buf(key, &(buff + "_requests"));
}
else {
key = match os_file_key("", 'z' as u32) {
Some(v) => v,
None => {
mrapi_dprintf!(1, "MRAPI ERROR: Invalid file key");
0
},
};
db_key = key + 10;
sems_key = key + 20;
shmems_key = key + 30;
rmems_key = key + 40;
requests_key = key + 50;
}
// 1) setup the global database
// get/create the shared memory database
MrapiDatabase::initialize_db(domain_id, node_id, db_key);
// 2) create or get the semaphore and lock it
// we loop here and inside of create_sys_semaphore because of the following race condition:
// initialize finalize
// 1: create/get sem 1: lock sem
// 2: lock sem 2: check db: any valid nodes?
// 3: setup db & add self 3a: no -> delete db & delete sem
// 4: unlock sem 3b: yes-> unlock sem
//
// finalize-1 can occur between initialize-1 and initialize-2 which will cause initialize-2
// to fail because the semaphore no longer exists.
let sem_local = match create_sys_semaphore(1, key, MRAPI_TRUE) {
None => {
mrapi_dprintf!(1, "MRAPI ERROR: Unable to get the semaphore key: {}", key);
return Err(MrapiErrNodeInitfailed);
},
Some(v) => v,
};
mrapi_dprintf!(1, "mrapi_impl_initialize lock acquired, now adding node to database");
// At this point we've managed to acquire and lock the semaphore ...
// NOTE: with use_global_only it's important to write to the globals only while
// we have the semaphore otherwise we introduce race conditions. This
// is why we are using the local variable id until everything is set up.
// set the global semaphore reference
MrapiDatabase::initialize_sem(sem_local);
// get or create our finer grained locks
// in addition to a lock on the sems array, every lock (rwl,sem,mutex) has it's own
// database semaphore, this allows us to access different locks in parallel
let sems_sem = create_sys_semaphore(MRAPI_MAX_SEMS + 1, sems_key, MRAPI_FALSE);
if use_global_only || sems_sem.is_none() {
SEMS_GLOBAL.with(|sem| { sem.set(MrapiDatabase::global_sem().clone()); });
SEMS_SEM.with(|sem| { sem.set(MrapiDatabase::global_sem().clone()); });
}
else {
SEMS_SEM.with(|sem| { sem.set(sems_sem.unwrap()); });
}
let shmems_sem = create_sys_semaphore(1, shmems_key, MRAPI_FALSE);
if shmems_sem.is_none() {
SHMEMS_GLOBAL.with(|sem| { sem.set(MrapiDatabase::global_sem().clone()); });
SHMEMS_SEM.with(|sem| { sem.set(MrapiDatabase::global_sem().clone()); });
}
else {
SHMEMS_SEM.with(|sem| { sem.set(shmems_sem.unwrap()); });
}
let rmems_sem = create_sys_semaphore(1, rmems_key, MRAPI_FALSE);
if rmems_sem.is_none() {
RMEMS_GLOBAL.with(|sem| { sem.set(MrapiDatabase::global_sem().clone()); });
RMEMS_SEM.with(|sem| { sem.set(MrapiDatabase::global_sem().clone()); });
}
else {
RMEMS_SEM.with(|sem| { sem.set(rmems_sem.unwrap()); });
}
let requests_sem = create_sys_semaphore(1, requests_key, MRAPI_FALSE);
if requests_sem.is_none() {
REQUESTS_GLOBAL.with(|sem| { sem.set(MrapiDatabase::global_sem().clone()); });
REQUESTS_SEM.with(|sem| { sem.set(MrapiDatabase::global_sem().clone()); });
}
else {
REQUESTS_SEM.with(|sem| { sem.set(rmems_sem.unwrap()); });
}
// Get our identity
let pid = process::id();
let tid = thread_id::get() as MrapiUint32;
MRAPI_PID.with(|id| { id.set(pid); });
MRAPI_PROC.with(|id| { id.set(pid); }); | let mrapi_db = MrapiDatabase::global_db();
// 3) Add the process/node/domain to the database
let mut d: usize = 0;
let mut n: usize = 0;
let mut p: usize = 0;
// First see if this domain already exists
for d in 0..MRAPI_MAX_DOMAINS {
let packed = &mrapi_db.domains[d].state.load(Ordering::Relaxed);
let dstate: MrapiDomainState = Atom::unpack(*packed);
if dstate.domain_id() == domain_id as MrapiUint32 {
break;
}
}
if d == MRAPI_MAX_DOMAINS {
// Find first available entry
for d in 0..MRAPI_MAX_DOMAINS {
let packed = &mrapi_db.domains[d].state.load(Ordering::Relaxed);
let mut oldstate: MrapiDomainState = Atom::unpack(*packed);
let mut newstate = oldstate;
oldstate.set_allocated(MRAPI_FALSE);
newstate.set_domain_id(domain_id as MrapiUint32);
newstate.set_allocated(MRAPI_TRUE);
match &mrapi_db.domains[d].state.compare_exchange(
Atom::pack(oldstate), Atom::pack(newstate), Ordering::Acquire, Ordering::Relaxed) {
Ok(_) => {
break;
},
Err(_) => continue,
}
}
}
if d != MRAPI_MAX_DOMAINS {
// now find an available node index...
for n in 0..MRAPI_MAX_NODES {
let packed = &mrapi_db.domains[d].nodes[n].state.load(Ordering::Relaxed);
let state: MrapiNodeState = Atom::unpack(*packed);
// Even though initialized() is checked by mrapi, we have to check again here because
// initialized() and initalize() are not atomic at the top layer
if state.allocated() && state.node_num() == node_id as MrapiUint32 {
// this node already exists for this domain
mrapi_dprintf!(1, "This node ({}) already exists for this domain ({})", node_id, domain_id);
break;
}
}
if n == MRAPI_MAX_NODES {
// it didn't exist so find the first available entry
for n in 0..MRAPI_MAX_NODES {
let packed = &mrapi_db.domains[d].nodes[n].state.load(Ordering::Relaxed);
let mut oldstate: MrapiNodeState = Atom::unpack(*packed);
let mut newstate = oldstate;
oldstate.set_allocated(MRAPI_FALSE);
newstate.set_node_num(node_id as MrapiUint32);
newstate.set_allocated(MRAPI_TRUE);
match &mrapi_db.domains[d].nodes[n].state.compare_exchange(
Atom::pack(oldstate), Atom::pack(newstate), Ordering::Acquire, Ordering::Relaxed) {
Ok(_) => {
break;
},
Err(_) => continue,
}
}
if n != MRAPI_MAX_NODES {
// See if this process exists
for p in 0..MRAPI_MAX_PROCESSES {
let packed = &mrapi_db.processes[p].state.load(Ordering::Relaxed);
let pstate: MrapiProcessState = Atom::unpack(*packed);
if pstate.pid() == pid {
break;
}
}
if p == MRAPI_MAX_PROCESSES {
// It didn't exist so find the first available entry
for p in 0..MRAPI_MAX_PROCESSES {
let packed = &mrapi_db.processes[p].state.load(Ordering::Relaxed);
let mut oldstate: MrapiProcessState = Atom::unpack(*packed);
let mut newstate = oldstate;
oldstate.set_allocated(MRAPI_FALSE);
newstate.set_pid(pid);
newstate.set_allocated(MRAPI_TRUE);
match &mrapi_db.processes[p].state.compare_exchange(
Atom::pack(oldstate), Atom::pack(newstate), Ordering::Acquire, Ordering::Relaxed) {
Ok(_) => {
break;
},
Err(_) => continue,
}
}
}
}
}
}
else {
// We did not find an available domain index
mrapi_dprintf!(1, "You have hit MRAPI_MAX_DOMAINS, either use less domains or reconfigure with more domains");
return Err(MrapiErrNodeInitfailed);
}
if n == MRAPI_MAX_NODES {
// We did not find an available node index
mrapi_dprintf!(1, "You have hit MRAPI_MAX_NODES, either use less nodes or reconfigure with more nodes");
return Err(MrapiErrNodeInitfailed);
}
if p == MRAPI_MAX_PROCESSES {
// We did not find an available process index
mrapi_dprintf!(1, "You have hit MRAPI_MAX_PROCESSES, either use less processes or reconfigure with more processes");
return Err(MrapiErrNodeInitfailed);
}
else {
MRAPI_PINDEX.with(|index| { index.set(p); });
}
Err(MrapiErrNodeInitfailed)
}
#[allow(unused_imports)]
use more_asserts as ma;
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn startup() {
let domain_id: MrapiDomain = 1;
let node_id: MrapiNode = 1;
_ = match initialize(domain_id, node_id) {
Ok(_) => {} | Err(_) => {},
};
}
}
*/ | MRAPI_TID.with(|id| { id.set(tid); });
// Seed random number generator
os_srand(tid);
| random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.