file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
lib.rs | //! See the `Bitmap` type.
/// A dense bitmap, intended to store small bitslices (<= width of usize).
pub struct Bitmap {
entries: usize,
width: usize,
// Avoid a vector here because we have our own bounds checking, and
// don't want to duplicate the length, or panic.
data: *mut u8,
}
#[inline(always)]
fn get_n_bits_at(byte: u8, n: u8, start: u8) -> u8 {
(byte >> (8-n-start) as usize) & (0xFF >> (8-n) as usize)
}
impl Drop for Bitmap {
fn drop(&mut self) {
let p = self.data;
if p != 0 as *mut _ {
self.data = 0 as *mut _;
let _ = unsafe { Vec::from_raw_parts(p as *mut u8, 0, self.byte_len()) };
}
}
}
impl Bitmap {
/// Create a new bitmap, returning None if the data can't be allocated or
/// if the width of each slice can't fit in a usize. entries * width must
/// not overflow usize.
pub fn new(entries: usize, width: usize) -> Option<Bitmap> {
if width > (std::mem::size_of::<usize>() * 8) || width == 0 {
None
} else {
entries.checked_mul(width)
.and_then(|bits| bits.checked_add(8 - (bits % 8)))
.and_then(|rbits| rbits.checked_div(8))
.and_then(|needed| {
let ptr = {
let mut alloc = Vec::<u8>::with_capacity(needed);
let ptr = alloc.as_mut_ptr();
std::mem::forget(alloc);
ptr
};
unsafe { std::ptr::write_bytes(ptr, 0, needed); }
Some(Bitmap {
entries: entries,
width: width,
data: ptr as *mut u8
})
})
}
}
/// Create a new Bitmap from raw parts. Will return None if the given
/// entry and width would overflow the number of bits or bytes needed to
/// store the Bitmap.
pub unsafe fn from_raw_parts(entries: usize, width: usize, ptr: *mut u8) -> Option<Bitmap> {
if width > (std::mem::size_of::<usize>() * 8) || width == 0 {
None
} else {
entries.checked_mul(width)
.and_then(|bits| bits.checked_add(8 - (bits % 8)))
.and_then(|rbits| rbits.checked_div(8))
.and_then(|_| {
Some(Bitmap {
entries: entries,
width: width,
data: ptr
})
})
}
}
/// Get the `i`th bitslice, returning None on out-of-bounds
pub fn get(&self, i: usize) -> Option<usize> {
if i >= self.entries | else {
let mut bit_offset = i * self.width;
let mut in_byte_offset = bit_offset % 8;
let mut byte_offset = (bit_offset - in_byte_offset) / 8;
let mut bits_left = self.width;
let mut value: usize = 0;
while bits_left > 0 {
// how many bits can we need to set in this byte?
let can_get = std::cmp::min(8 - in_byte_offset, bits_left);
// alright, pull them out.
let byte = unsafe { *self.data.offset(byte_offset as isize) };
let got = get_n_bits_at(byte, can_get as u8, in_byte_offset as u8) as usize;
// make room for the bits we just read
value <<= can_get;
value |= got;
// update all the state
bit_offset += can_get;
in_byte_offset = bit_offset % 8;
byte_offset = (bit_offset - in_byte_offset) / 8;
bits_left -= can_get;
}
Some(value)
}
}
/// Set the `i`th bitslice to `value`, returning false on out-of-bounds or if `value` contains
/// bits outside of the least significant `self.width` bits.
pub fn set(&mut self, i: usize, mut value: usize) -> bool {
let usize = std::mem::size_of::<usize>() * 8;
if i >= self.entries || value & !(usize::max_value() >> (std::cmp::min(usize-1, usize - self.width))) != 0 {
false
} else {
// shift over into the high bits
value <<= std::cmp::min(usize - 1, usize - self.width);
let mut bit_offset = i * self.width;
let mut in_byte_offset = bit_offset % 8;
let mut byte_offset = (bit_offset - in_byte_offset) / 8;
let mut bits_left = self.width;
while bits_left > 0 {
let can_set = std::cmp::min(8 - in_byte_offset, bits_left);
// pull out the highest can_set bits from value
let mut to_set: usize = value >> (usize - can_set);
// move them into where they will live
to_set <<= 8 - can_set - in_byte_offset;
let addr = unsafe { self.data.offset(byte_offset as isize) };
let mut byte = unsafe { *addr };
debug_assert!(to_set <= 255);
// clear the bits we'll be setting
byte &= !(0xFF
>>
(7 - in_byte_offset)
<<
(8usize.saturating_sub(in_byte_offset).saturating_sub(self.width)));
byte |= to_set as u8;
unsafe { *addr = byte };
// update all the state
value <<= can_set;
bit_offset += can_set;
in_byte_offset = bit_offset % 8;
byte_offset = (bit_offset - in_byte_offset) / 8;
bits_left -= can_set;
}
true
}
}
/// Length in number of bitslices cointained.
pub fn len(&self) -> usize {
self.entries
}
/// Size of the internal buffer, in bytes.
pub fn byte_len(&self) -> usize {
// can't overflow, since creation asserts that it doesn't.
let w = self.entries * self.width;
let r = w % 8;
(w + r) / 8
}
pub fn iter(&self) -> Slices {
Slices { idx: 0, bm: self }
}
/// Get the raw pointer to this Bitmap's data.
pub unsafe fn get_ptr(&self) -> *mut u8 {
self.data
}
/// Set the raw pointer to this Bitmap's data, returning the old one. It needs to be free'd
/// with `Vec`'s destructor if the Bitmap was not made with `from_raw_parts`. In general this
/// operation should really be avoided. The destructor will call `Vec`s destructor on the
/// internal pointer.
pub unsafe fn set_ptr(&mut self, ptr: *mut u8) -> *mut u8 {
let p = self.data;
self.data = ptr;
p
}
}
/// Iterator over the bitslices in the bitmap
pub struct Slices<'a> {
idx: usize,
bm: &'a Bitmap
}
impl<'a> Iterator for Slices<'a> {
type Item = usize;
/// *NOTE*: This iterator is not "well-behaved", in that if you keep calling
/// `next` after it returns None, eventually it will overflow and start
/// yielding elements again. Use the `fuse` method to make this
/// "well-behaved".
fn next(&mut self) -> Option<usize> {
let rv = self.bm.get(self.idx);
self.idx += 1;
rv
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.bm.len(), Some(self.bm.len()))
}
}
impl<'a> std::iter::IntoIterator for &'a Bitmap {
type Item = usize;
type IntoIter = Slices<'a>;
fn into_iter(self) -> Slices<'a> {
self.iter()
}
}
#[cfg(test)]
mod test {
extern crate quickcheck;
use self::quickcheck::quickcheck;
use super::{get_n_bits_at, Bitmap};
use std;
#[test]
fn empty() {
let bm = Bitmap::new(10, 10).unwrap();
for i in 0..10 {
assert_eq!(bm.get(i), Some(0));
}
assert_eq!(bm.get(11), None);
}
#[test]
fn get() {
let mut data: [u8; 4] = [0b000_001_01, 0b0_011_100_1, 0b01_110_111, 0];
let bm = Bitmap {
entries: 8,
width: 3,
data: &mut data as *mut [u8; 4] as *mut u8
};
for i in 0..8 {
assert_eq!(bm.get(i), Some(i));
}
assert_eq!(bm.get(8), None);
assert_eq!(bm.get(9), None);
// we don't use real data here, so don't bother freeing it
let mut bm = bm;
unsafe { bm.set_ptr(std::ptr::null_mut()); }
}
#[test]
fn set() {
let mut bm = Bitmap::new(10, 3).unwrap();
for i in 0..8 {
assert!(bm.set(i, i));
assert_eq!(bm.get(i), Some(i));
}
assert_eq!(bm.get(8), Some(0));
assert_eq!(bm.get(9), Some(0));
assert_eq!(bm.get(10), None);
}
#[test]
fn get_n_bits() {
macro_rules! t {
( $( $e:expr, $n:expr, $s:expr, $g:expr; )* ) => (
{
$(
assert_eq!(get_n_bits_at($e, $n, $s), $g);
)*
}
)
}
t! {
0b00111001, 1, 0, 0b0;
0b00111001, 8, 0, 0b00111001;
0b11010101, 2, 0, 0b11;
0b11010101, 2, 1, 0b10;
0b11010101, 2, 2, 0b01;
0b11010101, 2, 3, 0b10;
0b11010101, 2, 4, 0b01;
0b11010101, 3, 0, 0b110;
0b11010101, 3, 1, 0b101;
0b11010101, 3, 2, 0b010;
}
}
#[test]
fn iter() {
let mut bm = Bitmap::new(10, 3).unwrap();
bm.set(2, 0b101);
bm.set(7, 0b110);
let bs: Vec<usize> = bm.iter().collect();
assert_eq!(bs, [0, 0, 0b101, 0, 0, 0, 0, 0b110, 0, 0]);
}
fn set_then_clear_prop(entries: usize, width: usize) -> bool {
if width >= std::mem::size_of::<usize>() * 8 || width == 0 { return true }
let mut bm = Bitmap::new(entries, width).unwrap();
let all_set = (1 << width) - 1;
for i in 0..entries {
assert!(bm.set(i, all_set));
}
for val in &bm {
println!("should be {}, is {}", all_set, val);
if val != all_set { return false; }
}
for i in 0..entries {
assert!(bm.set(i, 0));
}
for val in &bm {
println!("should be {}, is {}", 0, val);
if val != 0 { return false; }
}
true
}
#[test]
fn set_then_clear_is_identity() {
quickcheck(set_then_clear_prop as fn(usize, usize) -> bool);
}
}
| {
None
} | conditional_block |
lib.rs | //! See the `Bitmap` type.
/// A dense bitmap, intended to store small bitslices (<= width of usize).
pub struct Bitmap {
entries: usize,
width: usize,
// Avoid a vector here because we have our own bounds checking, and
// don't want to duplicate the length, or panic.
data: *mut u8,
}
#[inline(always)]
fn get_n_bits_at(byte: u8, n: u8, start: u8) -> u8 {
(byte >> (8-n-start) as usize) & (0xFF >> (8-n) as usize)
}
impl Drop for Bitmap {
fn drop(&mut self) {
let p = self.data;
if p != 0 as *mut _ {
self.data = 0 as *mut _;
let _ = unsafe { Vec::from_raw_parts(p as *mut u8, 0, self.byte_len()) };
}
}
}
impl Bitmap {
/// Create a new bitmap, returning None if the data can't be allocated or
/// if the width of each slice can't fit in a usize. entries * width must
/// not overflow usize.
pub fn new(entries: usize, width: usize) -> Option<Bitmap> {
if width > (std::mem::size_of::<usize>() * 8) || width == 0 {
None
} else {
entries.checked_mul(width)
.and_then(|bits| bits.checked_add(8 - (bits % 8)))
.and_then(|rbits| rbits.checked_div(8))
.and_then(|needed| {
let ptr = {
let mut alloc = Vec::<u8>::with_capacity(needed);
let ptr = alloc.as_mut_ptr();
std::mem::forget(alloc);
ptr
};
unsafe { std::ptr::write_bytes(ptr, 0, needed); }
Some(Bitmap {
entries: entries,
width: width,
data: ptr as *mut u8
})
})
}
}
/// Create a new Bitmap from raw parts. Will return None if the given
/// entry and width would overflow the number of bits or bytes needed to
/// store the Bitmap.
pub unsafe fn from_raw_parts(entries: usize, width: usize, ptr: *mut u8) -> Option<Bitmap> {
if width > (std::mem::size_of::<usize>() * 8) || width == 0 {
None
} else {
entries.checked_mul(width)
.and_then(|bits| bits.checked_add(8 - (bits % 8)))
.and_then(|rbits| rbits.checked_div(8))
.and_then(|_| {
Some(Bitmap {
entries: entries,
width: width,
data: ptr
})
})
}
}
/// Get the `i`th bitslice, returning None on out-of-bounds
pub fn get(&self, i: usize) -> Option<usize> {
if i >= self.entries {
None
} else {
let mut bit_offset = i * self.width;
let mut in_byte_offset = bit_offset % 8;
let mut byte_offset = (bit_offset - in_byte_offset) / 8;
let mut bits_left = self.width;
let mut value: usize = 0;
while bits_left > 0 {
// how many bits can we need to set in this byte?
let can_get = std::cmp::min(8 - in_byte_offset, bits_left);
// alright, pull them out.
let byte = unsafe { *self.data.offset(byte_offset as isize) };
let got = get_n_bits_at(byte, can_get as u8, in_byte_offset as u8) as usize;
// make room for the bits we just read
value <<= can_get;
value |= got;
// update all the state
bit_offset += can_get;
in_byte_offset = bit_offset % 8;
byte_offset = (bit_offset - in_byte_offset) / 8;
bits_left -= can_get;
}
Some(value)
}
}
/// Set the `i`th bitslice to `value`, returning false on out-of-bounds or if `value` contains
/// bits outside of the least significant `self.width` bits.
pub fn set(&mut self, i: usize, mut value: usize) -> bool {
let usize = std::mem::size_of::<usize>() * 8;
if i >= self.entries || value & !(usize::max_value() >> (std::cmp::min(usize-1, usize - self.width))) != 0 {
false
} else {
// shift over into the high bits
value <<= std::cmp::min(usize - 1, usize - self.width);
let mut bit_offset = i * self.width;
let mut in_byte_offset = bit_offset % 8;
let mut byte_offset = (bit_offset - in_byte_offset) / 8;
|
while bits_left > 0 {
let can_set = std::cmp::min(8 - in_byte_offset, bits_left);
// pull out the highest can_set bits from value
let mut to_set: usize = value >> (usize - can_set);
// move them into where they will live
to_set <<= 8 - can_set - in_byte_offset;
let addr = unsafe { self.data.offset(byte_offset as isize) };
let mut byte = unsafe { *addr };
debug_assert!(to_set <= 255);
// clear the bits we'll be setting
byte &= !(0xFF
>>
(7 - in_byte_offset)
<<
(8usize.saturating_sub(in_byte_offset).saturating_sub(self.width)));
byte |= to_set as u8;
unsafe { *addr = byte };
// update all the state
value <<= can_set;
bit_offset += can_set;
in_byte_offset = bit_offset % 8;
byte_offset = (bit_offset - in_byte_offset) / 8;
bits_left -= can_set;
}
true
}
}
/// Length in number of bitslices cointained.
pub fn len(&self) -> usize {
self.entries
}
/// Size of the internal buffer, in bytes.
pub fn byte_len(&self) -> usize {
// can't overflow, since creation asserts that it doesn't.
let w = self.entries * self.width;
let r = w % 8;
(w + r) / 8
}
pub fn iter(&self) -> Slices {
Slices { idx: 0, bm: self }
}
/// Get the raw pointer to this Bitmap's data.
pub unsafe fn get_ptr(&self) -> *mut u8 {
self.data
}
/// Set the raw pointer to this Bitmap's data, returning the old one. It needs to be free'd
/// with `Vec`'s destructor if the Bitmap was not made with `from_raw_parts`. In general this
/// operation should really be avoided. The destructor will call `Vec`s destructor on the
/// internal pointer.
pub unsafe fn set_ptr(&mut self, ptr: *mut u8) -> *mut u8 {
let p = self.data;
self.data = ptr;
p
}
}
/// Iterator over the bitslices in the bitmap
pub struct Slices<'a> {
idx: usize,
bm: &'a Bitmap
}
impl<'a> Iterator for Slices<'a> {
type Item = usize;
/// *NOTE*: This iterator is not "well-behaved", in that if you keep calling
/// `next` after it returns None, eventually it will overflow and start
/// yielding elements again. Use the `fuse` method to make this
/// "well-behaved".
fn next(&mut self) -> Option<usize> {
let rv = self.bm.get(self.idx);
self.idx += 1;
rv
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.bm.len(), Some(self.bm.len()))
}
}
impl<'a> std::iter::IntoIterator for &'a Bitmap {
type Item = usize;
type IntoIter = Slices<'a>;
fn into_iter(self) -> Slices<'a> {
self.iter()
}
}
#[cfg(test)]
mod test {
extern crate quickcheck;
use self::quickcheck::quickcheck;
use super::{get_n_bits_at, Bitmap};
use std;
#[test]
fn empty() {
let bm = Bitmap::new(10, 10).unwrap();
for i in 0..10 {
assert_eq!(bm.get(i), Some(0));
}
assert_eq!(bm.get(11), None);
}
#[test]
fn get() {
let mut data: [u8; 4] = [0b000_001_01, 0b0_011_100_1, 0b01_110_111, 0];
let bm = Bitmap {
entries: 8,
width: 3,
data: &mut data as *mut [u8; 4] as *mut u8
};
for i in 0..8 {
assert_eq!(bm.get(i), Some(i));
}
assert_eq!(bm.get(8), None);
assert_eq!(bm.get(9), None);
// we don't use real data here, so don't bother freeing it
let mut bm = bm;
unsafe { bm.set_ptr(std::ptr::null_mut()); }
}
#[test]
fn set() {
let mut bm = Bitmap::new(10, 3).unwrap();
for i in 0..8 {
assert!(bm.set(i, i));
assert_eq!(bm.get(i), Some(i));
}
assert_eq!(bm.get(8), Some(0));
assert_eq!(bm.get(9), Some(0));
assert_eq!(bm.get(10), None);
}
#[test]
fn get_n_bits() {
macro_rules! t {
( $( $e:expr, $n:expr, $s:expr, $g:expr; )* ) => (
{
$(
assert_eq!(get_n_bits_at($e, $n, $s), $g);
)*
}
)
}
t! {
0b00111001, 1, 0, 0b0;
0b00111001, 8, 0, 0b00111001;
0b11010101, 2, 0, 0b11;
0b11010101, 2, 1, 0b10;
0b11010101, 2, 2, 0b01;
0b11010101, 2, 3, 0b10;
0b11010101, 2, 4, 0b01;
0b11010101, 3, 0, 0b110;
0b11010101, 3, 1, 0b101;
0b11010101, 3, 2, 0b010;
}
}
#[test]
fn iter() {
let mut bm = Bitmap::new(10, 3).unwrap();
bm.set(2, 0b101);
bm.set(7, 0b110);
let bs: Vec<usize> = bm.iter().collect();
assert_eq!(bs, [0, 0, 0b101, 0, 0, 0, 0, 0b110, 0, 0]);
}
fn set_then_clear_prop(entries: usize, width: usize) -> bool {
if width >= std::mem::size_of::<usize>() * 8 || width == 0 { return true }
let mut bm = Bitmap::new(entries, width).unwrap();
let all_set = (1 << width) - 1;
for i in 0..entries {
assert!(bm.set(i, all_set));
}
for val in &bm {
println!("should be {}, is {}", all_set, val);
if val != all_set { return false; }
}
for i in 0..entries {
assert!(bm.set(i, 0));
}
for val in &bm {
println!("should be {}, is {}", 0, val);
if val != 0 { return false; }
}
true
}
#[test]
fn set_then_clear_is_identity() {
quickcheck(set_then_clear_prop as fn(usize, usize) -> bool);
}
} | let mut bits_left = self.width; | random_line_split |
lib.rs | //! See the `Bitmap` type.
/// A dense bitmap, intended to store small bitslices (<= width of usize).
pub struct Bitmap {
entries: usize,
width: usize,
// Avoid a vector here because we have our own bounds checking, and
// don't want to duplicate the length, or panic.
data: *mut u8,
}
#[inline(always)]
fn get_n_bits_at(byte: u8, n: u8, start: u8) -> u8 {
(byte >> (8-n-start) as usize) & (0xFF >> (8-n) as usize)
}
impl Drop for Bitmap {
fn drop(&mut self) {
let p = self.data;
if p != 0 as *mut _ {
self.data = 0 as *mut _;
let _ = unsafe { Vec::from_raw_parts(p as *mut u8, 0, self.byte_len()) };
}
}
}
impl Bitmap {
/// Create a new bitmap, returning None if the data can't be allocated or
/// if the width of each slice can't fit in a usize. entries * width must
/// not overflow usize.
pub fn new(entries: usize, width: usize) -> Option<Bitmap> {
if width > (std::mem::size_of::<usize>() * 8) || width == 0 {
None
} else {
entries.checked_mul(width)
.and_then(|bits| bits.checked_add(8 - (bits % 8)))
.and_then(|rbits| rbits.checked_div(8))
.and_then(|needed| {
let ptr = {
let mut alloc = Vec::<u8>::with_capacity(needed);
let ptr = alloc.as_mut_ptr();
std::mem::forget(alloc);
ptr
};
unsafe { std::ptr::write_bytes(ptr, 0, needed); }
Some(Bitmap {
entries: entries,
width: width,
data: ptr as *mut u8
})
})
}
}
/// Create a new Bitmap from raw parts. Will return None if the given
/// entry and width would overflow the number of bits or bytes needed to
/// store the Bitmap.
pub unsafe fn from_raw_parts(entries: usize, width: usize, ptr: *mut u8) -> Option<Bitmap> {
if width > (std::mem::size_of::<usize>() * 8) || width == 0 {
None
} else {
entries.checked_mul(width)
.and_then(|bits| bits.checked_add(8 - (bits % 8)))
.and_then(|rbits| rbits.checked_div(8))
.and_then(|_| {
Some(Bitmap {
entries: entries,
width: width,
data: ptr
})
})
}
}
/// Get the `i`th bitslice, returning None on out-of-bounds
pub fn get(&self, i: usize) -> Option<usize> {
if i >= self.entries {
None
} else {
let mut bit_offset = i * self.width;
let mut in_byte_offset = bit_offset % 8;
let mut byte_offset = (bit_offset - in_byte_offset) / 8;
let mut bits_left = self.width;
let mut value: usize = 0;
while bits_left > 0 {
// how many bits can we need to set in this byte?
let can_get = std::cmp::min(8 - in_byte_offset, bits_left);
// alright, pull them out.
let byte = unsafe { *self.data.offset(byte_offset as isize) };
let got = get_n_bits_at(byte, can_get as u8, in_byte_offset as u8) as usize;
// make room for the bits we just read
value <<= can_get;
value |= got;
// update all the state
bit_offset += can_get;
in_byte_offset = bit_offset % 8;
byte_offset = (bit_offset - in_byte_offset) / 8;
bits_left -= can_get;
}
Some(value)
}
}
/// Set the `i`th bitslice to `value`, returning false on out-of-bounds or if `value` contains
/// bits outside of the least significant `self.width` bits.
pub fn set(&mut self, i: usize, mut value: usize) -> bool {
let usize = std::mem::size_of::<usize>() * 8;
if i >= self.entries || value & !(usize::max_value() >> (std::cmp::min(usize-1, usize - self.width))) != 0 {
false
} else {
// shift over into the high bits
value <<= std::cmp::min(usize - 1, usize - self.width);
let mut bit_offset = i * self.width;
let mut in_byte_offset = bit_offset % 8;
let mut byte_offset = (bit_offset - in_byte_offset) / 8;
let mut bits_left = self.width;
while bits_left > 0 {
let can_set = std::cmp::min(8 - in_byte_offset, bits_left);
// pull out the highest can_set bits from value
let mut to_set: usize = value >> (usize - can_set);
// move them into where they will live
to_set <<= 8 - can_set - in_byte_offset;
let addr = unsafe { self.data.offset(byte_offset as isize) };
let mut byte = unsafe { *addr };
debug_assert!(to_set <= 255);
// clear the bits we'll be setting
byte &= !(0xFF
>>
(7 - in_byte_offset)
<<
(8usize.saturating_sub(in_byte_offset).saturating_sub(self.width)));
byte |= to_set as u8;
unsafe { *addr = byte };
// update all the state
value <<= can_set;
bit_offset += can_set;
in_byte_offset = bit_offset % 8;
byte_offset = (bit_offset - in_byte_offset) / 8;
bits_left -= can_set;
}
true
}
}
/// Length in number of bitslices cointained.
pub fn len(&self) -> usize {
self.entries
}
/// Size of the internal buffer, in bytes.
pub fn byte_len(&self) -> usize |
pub fn iter(&self) -> Slices {
Slices { idx: 0, bm: self }
}
/// Get the raw pointer to this Bitmap's data.
pub unsafe fn get_ptr(&self) -> *mut u8 {
self.data
}
/// Set the raw pointer to this Bitmap's data, returning the old one. It needs to be free'd
/// with `Vec`'s destructor if the Bitmap was not made with `from_raw_parts`. In general this
/// operation should really be avoided. The destructor will call `Vec`s destructor on the
/// internal pointer.
pub unsafe fn set_ptr(&mut self, ptr: *mut u8) -> *mut u8 {
let p = self.data;
self.data = ptr;
p
}
}
/// Iterator over the bitslices in the bitmap
pub struct Slices<'a> {
idx: usize,
bm: &'a Bitmap
}
impl<'a> Iterator for Slices<'a> {
type Item = usize;
/// *NOTE*: This iterator is not "well-behaved", in that if you keep calling
/// `next` after it returns None, eventually it will overflow and start
/// yielding elements again. Use the `fuse` method to make this
/// "well-behaved".
fn next(&mut self) -> Option<usize> {
let rv = self.bm.get(self.idx);
self.idx += 1;
rv
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.bm.len(), Some(self.bm.len()))
}
}
impl<'a> std::iter::IntoIterator for &'a Bitmap {
type Item = usize;
type IntoIter = Slices<'a>;
fn into_iter(self) -> Slices<'a> {
self.iter()
}
}
#[cfg(test)]
mod test {
extern crate quickcheck;
use self::quickcheck::quickcheck;
use super::{get_n_bits_at, Bitmap};
use std;
#[test]
fn empty() {
let bm = Bitmap::new(10, 10).unwrap();
for i in 0..10 {
assert_eq!(bm.get(i), Some(0));
}
assert_eq!(bm.get(11), None);
}
#[test]
fn get() {
let mut data: [u8; 4] = [0b000_001_01, 0b0_011_100_1, 0b01_110_111, 0];
let bm = Bitmap {
entries: 8,
width: 3,
data: &mut data as *mut [u8; 4] as *mut u8
};
for i in 0..8 {
assert_eq!(bm.get(i), Some(i));
}
assert_eq!(bm.get(8), None);
assert_eq!(bm.get(9), None);
// we don't use real data here, so don't bother freeing it
let mut bm = bm;
unsafe { bm.set_ptr(std::ptr::null_mut()); }
}
#[test]
fn set() {
let mut bm = Bitmap::new(10, 3).unwrap();
for i in 0..8 {
assert!(bm.set(i, i));
assert_eq!(bm.get(i), Some(i));
}
assert_eq!(bm.get(8), Some(0));
assert_eq!(bm.get(9), Some(0));
assert_eq!(bm.get(10), None);
}
#[test]
fn get_n_bits() {
macro_rules! t {
( $( $e:expr, $n:expr, $s:expr, $g:expr; )* ) => (
{
$(
assert_eq!(get_n_bits_at($e, $n, $s), $g);
)*
}
)
}
t! {
0b00111001, 1, 0, 0b0;
0b00111001, 8, 0, 0b00111001;
0b11010101, 2, 0, 0b11;
0b11010101, 2, 1, 0b10;
0b11010101, 2, 2, 0b01;
0b11010101, 2, 3, 0b10;
0b11010101, 2, 4, 0b01;
0b11010101, 3, 0, 0b110;
0b11010101, 3, 1, 0b101;
0b11010101, 3, 2, 0b010;
}
}
#[test]
fn iter() {
let mut bm = Bitmap::new(10, 3).unwrap();
bm.set(2, 0b101);
bm.set(7, 0b110);
let bs: Vec<usize> = bm.iter().collect();
assert_eq!(bs, [0, 0, 0b101, 0, 0, 0, 0, 0b110, 0, 0]);
}
fn set_then_clear_prop(entries: usize, width: usize) -> bool {
if width >= std::mem::size_of::<usize>() * 8 || width == 0 { return true }
let mut bm = Bitmap::new(entries, width).unwrap();
let all_set = (1 << width) - 1;
for i in 0..entries {
assert!(bm.set(i, all_set));
}
for val in &bm {
println!("should be {}, is {}", all_set, val);
if val != all_set { return false; }
}
for i in 0..entries {
assert!(bm.set(i, 0));
}
for val in &bm {
println!("should be {}, is {}", 0, val);
if val != 0 { return false; }
}
true
}
#[test]
fn set_then_clear_is_identity() {
quickcheck(set_then_clear_prop as fn(usize, usize) -> bool);
}
}
| {
// can't overflow, since creation asserts that it doesn't.
let w = self.entries * self.width;
let r = w % 8;
(w + r) / 8
} | identifier_body |
message.go | package main
import (
"fmt"
"log"
"encoding/json"
"github.com/googollee/go-socket.io"
)
/******************************************************************************
This file contains all functionality that is sent between the communication
channel instances of Lobby and the User structs that it is responsible for.
All of the JSON encoding and Socket.IO events in this document match the
ANEXD 'Message Data Specification' document, included within our project
documentation.
All structs here implement one of the following interfaces:
MessageServer - A message sent to the application server.
Command - A message sent to the lobby instance to execute operations.
MessageUser - A message sent to a User within the lobby.
There is a channel for each interface running for the respective
receipient, and using run time Polymorphism, the respective processing
function is implicit to the underlying struct, providing constant
time complexity to begin execution.
All of the implemented functions are run in a new goroutine, providing them
with their own thread, which upon completion of execution, leave no
remaining references to the struct instance, allowing it to be garbage
collected.
This implementation significantly simplifies the main lobby and user code,
and makes the implementation modular and easy to maintain (such as adding
new functionality by adding a new interface implementation here).
******************************************************************************/
/*
Interface type: For messages sent to the application server.
*/
type MessageServer interface {
tcp(l *Lobby)
socketio(l *Lobby)
}
/*
An application specific message from a User.
*/
type MsgServer struct {
Event string `json:"event"`
Player float64 `json:"player"`
Msg map[string]interface{} `json:"msg"`
}
func (m MsgServer) tcp(l *Lobby) {
jsonMsg, err := json.Marshal(m)
if err != nil {
log.Print(err)
return
}
(*l.tcpConn).Write(jsonMsg)
}
func (m MsgServer) socketio(l *Lobby) {
(*l.socket).Emit("in", m)
}
/*
Sends an event to the Application server with parameters for creating a new
lobby instance.
*/
type NewSession struct {
Event string `json:"event"`
Players float64 `json:"players"`
MaxPlayers float64 `json:"maxplayers"`
}
func (n NewSession) tcp(l *Lobby) {
jsonMsg, err := json.Marshal(n)
if err != nil |
(*l.tcpConn).Write(jsonMsg)
}
func (n NewSession) socketio(l *Lobby) {
(*l.socket).Emit("in", n)
}
/*
Sends an event from the Host User to inform the Application server that
it has loaded the Application and is ready to communicate.
Also implements Command (multiple interface implementation)
*/
type Launch struct {
Event string `json:"event"`
}
func (la Launch) tcp(l *Lobby) {
jsonMsg, err := json.Marshal(la)
if err != nil {
log.Print(err)
return
}
(*l.tcpConn).Write(jsonMsg)
}
func (la Launch) socketio(l *Lobby) {
(*l.socket).Emit("in", la)
}
/*
Sends an event to state that the Host has ended the session (so the
server can run it's lobby end functionality)
*/
type End struct {
Event string `json:"event"`
}
func (e End) tcp(l *Lobby) {
jsonMsg, err := json.Marshal(e)
if err != nil {
log.Print(err)
return
}
(*l.tcpConn).Write(jsonMsg)
}
func (e End) socketio(l *Lobby) {
(*l.socket).Emit("in", e)
}
/*
Interface type: Commands received by the lobby for accessing/changing
the lobby data structure.
*/
type Command interface {
execute(l *Lobby)
}
/*
Contains all potential fields in the three specified events received from
Application servers: "created", "msgplayer" and "msgall"
Using omitempty, only required fields will be received, however this cannot
be used for Player, as the 0 value for player is the 'empty' value - and
the value 0 might be intentional when a message needs to be sent to a lobby host.
*/
type ServerMessage struct {
Event string `json:"event"`
Player float64 `json:"player"`
Msg map[string]interface{} `json:"msg,omitempty"`
}
func (s ServerMessage) execute(l *Lobby) {
switch s.Event {
case "msgplayer":
l.command <- MsgPlayer{
Player: int(s.Player),
Msg: s.Msg,
}
case "msgall":
l.command <- MsgAll{
Msg: s.Msg,
}
case "created":
l.command <- Created{}
}
}
/*
*** MsgPlayer, also implementing MessageUser type,
uses multiple interface implementation and is used
Polymorphically in both the Command and MessageUser channels
*/
type MsgPlayer struct {
Player int
Msg map[string]interface{}
}
func (m MsgPlayer) execute(l *Lobby) {
if m.Player >= len(l.users) || m.Player < 0 {
log.Print("MsgPlayer: invalid player index")
return
}
l.users[m.Player].send <- m
}
/*
*** MsgAll, also implementing MessageUser type,
uses multiple interface implementation and is used
Polymorphically in both the Command and MessageUser channels
*/
type MsgAll struct {
Room string
Msg map[string]interface{}
}
func (m MsgAll) execute(l *Lobby) {
m.Room = l.lobbyId
l.users[0].send <- m
}
/*
An Event for connecting an Application server over Socket.IO to the lobby.
*/
type ServerSocket struct {
Socket *socketio.Socket
}
func (s ServerSocket) execute(l *Lobby) {
if l.timeout != nil {
if l.socket == nil {
l.socket = s.Socket
(*l.socket).Emit("connectlobby", true)
}
l.timeout <- false
}
}
/*
An event to instantiate a new desktop user as the host user of a lobby.
*/
type HostLobby struct {
Username string
Socket *socketio.Socket
}
func (h HostLobby) execute(l *Lobby) {
if len(l.users) != 0 {
(*h.Socket).Emit("hostlobby", false)
log.Print("manager.desktopSetup: lobby id entered already has a host user.")
return
}
err := l.addNewUser(h.Username, h.Socket)
if err != nil {
(*h.Socket).Emit("hostlobby", false)
log.Print(err)
return
}
(*h.Socket).Emit("hostlobby", true)
l.command <- Update{}
}
/*
An event to attempt to add a new connecting mobile user to the lobby.
*/
type JoinLobby struct {
Username string
Socket *socketio.Socket
}
func (j JoinLobby) execute(l *Lobby) {
if len(l.users) == 0 {
(*j.Socket).Emit("joinlobby", false)
log.Print("manager.desktopSetup: lobby id entered does not have a host user.")
return
}
err := l.addNewUser(j.Username, j.Socket)
if err != nil {
(*j.Socket).Emit("joinlobby", false)
log.Print(err)
return
}
(*j.Socket).Emit("joinlobby", true)
l.command <- Update{}
}
/*
An event to force emit update for the list of users in the lobby.
*/
type Update struct {}
func (u Update) execute(l *Lobby) {
l.updateLobby()
}
/*
An event to attempt to prepare the Application server to begin the Application.
*/
type Start struct {}
func (s Start) execute(l *Lobby) {
room := l.lobbyId
var err error
//establish connection
if l.game.connType == "tcp" {
if l.tcpConn == nil {
err = l.connectTcp()
}
} else { //l.game.connType == "socketio"
if l.socket == nil {
err = l.connectSocketio()
}
}
if err != nil {
log.Print(err)
l.users[0].send <- GameStart{
Room: room,
Complete: false,
Failed: true,
Feedback: "Unable to connect to application server.",
}
return
}
l.users[0].send <- GameStart{
Room: room,
Complete: false,
Failed: false,
Feedback: "Connected to application server.",
}
l.createSession()
}
/*
Confirms success of a Lobby being created on the Application server.
*/
type Created struct {}
func (c Created) execute(l *Lobby) {
if l.timeout != nil {
l.timeout <- false
}
}
/*
Also implements MessageServer (multiple interface implementation)
Sets the Lobby.started to true, locking the slice data structure
for removal of users.
*/
func (la Launch) execute(l *Lobby) {
l.started = true
la.Event = "launch"
l.send <- la
}
/*
Struct to execute the removal of a user from the lobby data structure.
Caused by a leave or a kick event.
*/
type RemovedUser struct {
Player float64 `json:"-"`
Username string `json:"username"`
Reason string `json:"reason,omitempty"`
}
func (r RemovedUser) execute(l *Lobby) {
kicked := false
if r.Player == 0 { //kicked by username
kicked = true
for i := 1; i < len(l.users); i++ {
if r.Username == l.users[i].username {
r.Player = float64(i)
break
}
}
}
if r.Player == 0 { //if still not set, does not exist
l.users[0].send <- Kick{
Response: false,
Feedback: fmt.Sprintf("%s was not found in lobby.", r.Username),
}
return
}
err := l.removeUser(r.Player)
if err != nil {
log.Print(err)
l.users[0].send <- Kick{
Response: false,
Feedback: fmt.Sprintf("%s was unable to be removed.", r.Username),
}
return
}
if kicked {
l.users[0].send <- Kick{
Response: true,
Feedback: fmt.Sprintf("%s was removed from the lobby.", r.Username),
}
}
//message removed user:
if r.Reason != "" {
l.users[int(r.Player)].send <- Kicked{
Reason: r.Reason,
}
}
l.users[int(r.Player)].send <- Leave{}
}
/*
Interface type: Messages to be processed by a User instance.
*/
type MessageUser interface {
process(u *User)
}
/*
Emits the leave event to execute a soft leave.
*/
type Leave struct {}
func (l Leave) process(u *User) {
(*u.socket).Emit("leave")//, "You have been removed from the lobby.")
(*u.socket).Emit("disconnect")
}
/*
Emits the application ID matching the one stored in the database.
*/
type GetAppId struct {
Appid float64
}
func (g GetAppId) process(u *User) {
(*u.socket).Emit("getappid", g.Appid)
}
/*
Emits the reason why the user was kicked by the host.
*/
type Kicked struct {
Reason string
}
func (k Kicked) process(u *User) {
(*u.socket).Emit("kicked", k.Reason)
}
/*
Emits the success response to the attempted kick to the host.
*/
type Kick struct {
Response bool `json:"response"`
Feedback string `json:"feedback"`
}
func (k Kick) process(u *User) {
(*u.socket).Emit("kick", k)
}
/*
Emits feedback for the operation of starting an Application on the
respective Application server.
*/
type GameStart struct {
Room string `json:"-"`
Complete bool `json:"complete"`
Failed bool `json:"failed"`
Feedback string `json:"feedback"`
}
func (g GameStart) process(u *User) {
(*u.socket).Emit("start", g)
if g.Complete {
(*u.socket).BroadcastTo(g.Room, "start", g)
}
}
/*
Struct used to Emit an update of users in the lobby, in a JSON array.
*/
type LobbyUsers struct {
Room string
List []LobbyUser
}
func (l LobbyUsers) process(u *User) {
(*u.socket).Emit("updatelobby", l.List)
(*u.socket).BroadcastTo(l.Room, "updatelobby", l.List)
}
/*
*** MsgPlayer, also implementing Command type,
uses multiple interface implementation and is used
Polymorphically in both the Command and MessageUser channels
*/
func (m MsgPlayer) process(u *User) {
(*u.socket).Emit("msgplayer", m.Msg)
}
/*
*** MsgAll, also implementing Command type,
uses multiple interface implementation and is used
Polymorphically in both the Command and MessageUser channels
*/
func (m MsgAll) process(u *User) {
(*u.socket).Emit("msgall", m.Msg)
(*u.socket).BroadcastTo(m.Room, "msgall", m.Msg)
} | {
log.Print(err)
return
} | conditional_block |
message.go | package main
import (
"fmt"
"log"
"encoding/json"
"github.com/googollee/go-socket.io"
)
/******************************************************************************
This file contains all functionality that is sent between the communication
channel instances of Lobby and the User structs that it is responsible for.
All of the JSON encoding and Socket.IO events in this document match the
ANEXD 'Message Data Specification' document, included within our project
documentation.
All structs here implement one of the following interfaces:
MessageServer - A message sent to the application server.
Command - A message sent to the lobby instance to execute operations.
MessageUser - A message sent to a User within the lobby.
There is a channel for each interface running for the respective
receipient, and using run time Polymorphism, the respective processing
function is implicit to the underlying struct, providing constant
time complexity to begin execution.
All of the implemented functions are run in a new goroutine, providing them
with their own thread, which upon completion of execution, leave no
remaining references to the struct instance, allowing it to be garbage
collected.
This implementation significantly simplifies the main lobby and user code,
and makes the implementation modular and easy to maintain (such as adding
new functionality by adding a new interface implementation here).
******************************************************************************/
/*
Interface type: For messages sent to the application server.
*/
type MessageServer interface {
tcp(l *Lobby)
socketio(l *Lobby)
}
/*
An application specific message from a User.
*/
type MsgServer struct {
Event string `json:"event"`
Player float64 `json:"player"`
Msg map[string]interface{} `json:"msg"`
}
func (m MsgServer) tcp(l *Lobby) {
jsonMsg, err := json.Marshal(m)
if err != nil {
log.Print(err)
return
}
(*l.tcpConn).Write(jsonMsg)
}
func (m MsgServer) socketio(l *Lobby) {
(*l.socket).Emit("in", m)
}
/*
Sends an event to the Application server with parameters for creating a new
lobby instance.
*/
type NewSession struct {
Event string `json:"event"`
Players float64 `json:"players"`
MaxPlayers float64 `json:"maxplayers"`
}
func (n NewSession) tcp(l *Lobby) {
jsonMsg, err := json.Marshal(n)
if err != nil {
log.Print(err)
return
}
(*l.tcpConn).Write(jsonMsg)
}
func (n NewSession) socketio(l *Lobby) {
(*l.socket).Emit("in", n)
}
/*
Sends an event from the Host User to inform the Application server that
it has loaded the Application and is ready to communicate.
Also implements Command (multiple interface implementation)
*/
type Launch struct {
Event string `json:"event"`
}
func (la Launch) tcp(l *Lobby) {
jsonMsg, err := json.Marshal(la)
if err != nil {
log.Print(err)
return
}
(*l.tcpConn).Write(jsonMsg)
}
func (la Launch) socketio(l *Lobby) {
(*l.socket).Emit("in", la)
}
/*
Sends an event to state that the Host has ended the session (so the
server can run it's lobby end functionality)
*/
type End struct {
Event string `json:"event"`
}
func (e End) tcp(l *Lobby) {
jsonMsg, err := json.Marshal(e)
if err != nil {
log.Print(err)
return
} | }
/*
Interface type: Commands received by the lobby for accessing/changing
the lobby data structure.
*/
type Command interface {
execute(l *Lobby)
}
/*
Contains all potential fields in the three specified events received from
Application servers: "created", "msgplayer" and "msgall"
Using omitempty, only required fields will be received, however this cannot
be used for Player, as the 0 value for player is the 'empty' value - and
the value 0 might be intentional when a message needs to be sent to a lobby host.
*/
type ServerMessage struct {
Event string `json:"event"`
Player float64 `json:"player"`
Msg map[string]interface{} `json:"msg,omitempty"`
}
func (s ServerMessage) execute(l *Lobby) {
switch s.Event {
case "msgplayer":
l.command <- MsgPlayer{
Player: int(s.Player),
Msg: s.Msg,
}
case "msgall":
l.command <- MsgAll{
Msg: s.Msg,
}
case "created":
l.command <- Created{}
}
}
/*
*** MsgPlayer, also implementing MessageUser type,
uses multiple interface implementation and is used
Polymorphically in both the Command and MessageUser channels
*/
type MsgPlayer struct {
Player int
Msg map[string]interface{}
}
func (m MsgPlayer) execute(l *Lobby) {
if m.Player >= len(l.users) || m.Player < 0 {
log.Print("MsgPlayer: invalid player index")
return
}
l.users[m.Player].send <- m
}
/*
*** MsgAll, also implementing MessageUser type,
uses multiple interface implementation and is used
Polymorphically in both the Command and MessageUser channels
*/
type MsgAll struct {
Room string
Msg map[string]interface{}
}
func (m MsgAll) execute(l *Lobby) {
m.Room = l.lobbyId
l.users[0].send <- m
}
/*
An Event for connecting an Application server over Socket.IO to the lobby.
*/
type ServerSocket struct {
Socket *socketio.Socket
}
func (s ServerSocket) execute(l *Lobby) {
if l.timeout != nil {
if l.socket == nil {
l.socket = s.Socket
(*l.socket).Emit("connectlobby", true)
}
l.timeout <- false
}
}
/*
An event to instantiate a new desktop user as the host user of a lobby.
*/
type HostLobby struct {
Username string
Socket *socketio.Socket
}
func (h HostLobby) execute(l *Lobby) {
if len(l.users) != 0 {
(*h.Socket).Emit("hostlobby", false)
log.Print("manager.desktopSetup: lobby id entered already has a host user.")
return
}
err := l.addNewUser(h.Username, h.Socket)
if err != nil {
(*h.Socket).Emit("hostlobby", false)
log.Print(err)
return
}
(*h.Socket).Emit("hostlobby", true)
l.command <- Update{}
}
/*
An event to attempt to add a new connecting mobile user to the lobby.
*/
type JoinLobby struct {
Username string
Socket *socketio.Socket
}
func (j JoinLobby) execute(l *Lobby) {
if len(l.users) == 0 {
(*j.Socket).Emit("joinlobby", false)
log.Print("manager.desktopSetup: lobby id entered does not have a host user.")
return
}
err := l.addNewUser(j.Username, j.Socket)
if err != nil {
(*j.Socket).Emit("joinlobby", false)
log.Print(err)
return
}
(*j.Socket).Emit("joinlobby", true)
l.command <- Update{}
}
/*
An event to force emit update for the list of users in the lobby.
*/
type Update struct {}
func (u Update) execute(l *Lobby) {
l.updateLobby()
}
/*
An event to attempt to prepare the Application server to begin the Application.
*/
type Start struct {}
func (s Start) execute(l *Lobby) {
room := l.lobbyId
var err error
//establish connection
if l.game.connType == "tcp" {
if l.tcpConn == nil {
err = l.connectTcp()
}
} else { //l.game.connType == "socketio"
if l.socket == nil {
err = l.connectSocketio()
}
}
if err != nil {
log.Print(err)
l.users[0].send <- GameStart{
Room: room,
Complete: false,
Failed: true,
Feedback: "Unable to connect to application server.",
}
return
}
l.users[0].send <- GameStart{
Room: room,
Complete: false,
Failed: false,
Feedback: "Connected to application server.",
}
l.createSession()
}
/*
Confirms success of a Lobby being created on the Application server.
*/
type Created struct {}
func (c Created) execute(l *Lobby) {
if l.timeout != nil {
l.timeout <- false
}
}
/*
Also implements MessageServer (multiple interface implementation)
Sets the Lobby.started to true, locking the slice data structure
for removal of users.
*/
func (la Launch) execute(l *Lobby) {
l.started = true
la.Event = "launch"
l.send <- la
}
/*
Struct to execute the removal of a user from the lobby data structure.
Caused by a leave or a kick event.
*/
type RemovedUser struct {
Player float64 `json:"-"`
Username string `json:"username"`
Reason string `json:"reason,omitempty"`
}
func (r RemovedUser) execute(l *Lobby) {
kicked := false
if r.Player == 0 { //kicked by username
kicked = true
for i := 1; i < len(l.users); i++ {
if r.Username == l.users[i].username {
r.Player = float64(i)
break
}
}
}
if r.Player == 0 { //if still not set, does not exist
l.users[0].send <- Kick{
Response: false,
Feedback: fmt.Sprintf("%s was not found in lobby.", r.Username),
}
return
}
err := l.removeUser(r.Player)
if err != nil {
log.Print(err)
l.users[0].send <- Kick{
Response: false,
Feedback: fmt.Sprintf("%s was unable to be removed.", r.Username),
}
return
}
if kicked {
l.users[0].send <- Kick{
Response: true,
Feedback: fmt.Sprintf("%s was removed from the lobby.", r.Username),
}
}
//message removed user:
if r.Reason != "" {
l.users[int(r.Player)].send <- Kicked{
Reason: r.Reason,
}
}
l.users[int(r.Player)].send <- Leave{}
}
/*
Interface type: Messages to be processed by a User instance.
*/
type MessageUser interface {
process(u *User)
}
/*
Emits the leave event to execute a soft leave.
*/
type Leave struct {}
func (l Leave) process(u *User) {
(*u.socket).Emit("leave")//, "You have been removed from the lobby.")
(*u.socket).Emit("disconnect")
}
/*
Emits the application ID matching the one stored in the database.
*/
type GetAppId struct {
Appid float64
}
func (g GetAppId) process(u *User) {
(*u.socket).Emit("getappid", g.Appid)
}
/*
Emits the reason why the user was kicked by the host.
*/
type Kicked struct {
Reason string
}
func (k Kicked) process(u *User) {
(*u.socket).Emit("kicked", k.Reason)
}
/*
Emits the success response to the attempted kick to the host.
*/
type Kick struct {
Response bool `json:"response"`
Feedback string `json:"feedback"`
}
func (k Kick) process(u *User) {
(*u.socket).Emit("kick", k)
}
/*
Emits feedback for the operation of starting an Application on the
respective Application server.
*/
type GameStart struct {
Room string `json:"-"`
Complete bool `json:"complete"`
Failed bool `json:"failed"`
Feedback string `json:"feedback"`
}
func (g GameStart) process(u *User) {
(*u.socket).Emit("start", g)
if g.Complete {
(*u.socket).BroadcastTo(g.Room, "start", g)
}
}
/*
Struct used to Emit an update of users in the lobby, in a JSON array.
*/
type LobbyUsers struct {
Room string
List []LobbyUser
}
func (l LobbyUsers) process(u *User) {
(*u.socket).Emit("updatelobby", l.List)
(*u.socket).BroadcastTo(l.Room, "updatelobby", l.List)
}
/*
*** MsgPlayer, also implementing Command type,
uses multiple interface implementation and is used
Polymorphically in both the Command and MessageUser channels
*/
func (m MsgPlayer) process(u *User) {
(*u.socket).Emit("msgplayer", m.Msg)
}
/*
*** MsgAll, also implementing Command type,
uses multiple interface implementation and is used
Polymorphically in both the Command and MessageUser channels
*/
func (m MsgAll) process(u *User) {
(*u.socket).Emit("msgall", m.Msg)
(*u.socket).BroadcastTo(m.Room, "msgall", m.Msg)
} | (*l.tcpConn).Write(jsonMsg)
}
func (e End) socketio(l *Lobby) {
(*l.socket).Emit("in", e) | random_line_split |
message.go | package main
import (
"fmt"
"log"
"encoding/json"
"github.com/googollee/go-socket.io"
)
/******************************************************************************
This file contains all functionality that is sent between the communication
channel instances of Lobby and the User structs that it is responsible for.
All of the JSON encoding and Socket.IO events in this document match the
ANEXD 'Message Data Specification' document, included within our project
documentation.
All structs here implement one of the following interfaces:
MessageServer - A message sent to the application server.
Command - A message sent to the lobby instance to execute operations.
MessageUser - A message sent to a User within the lobby.
There is a channel for each interface running for the respective
receipient, and using run time Polymorphism, the respective processing
function is implicit to the underlying struct, providing constant
time complexity to begin execution.
All of the implemented functions are run in a new goroutine, providing them
with their own thread, which upon completion of execution, leave no
remaining references to the struct instance, allowing it to be garbage
collected.
This implementation significantly simplifies the main lobby and user code,
and makes the implementation modular and easy to maintain (such as adding
new functionality by adding a new interface implementation here).
******************************************************************************/
/*
Interface type: For messages sent to the application server.
*/
type MessageServer interface {
tcp(l *Lobby)
socketio(l *Lobby)
}
/*
An application specific message from a User.
*/
type MsgServer struct {
Event string `json:"event"`
Player float64 `json:"player"`
Msg map[string]interface{} `json:"msg"`
}
func (m MsgServer) tcp(l *Lobby) {
jsonMsg, err := json.Marshal(m)
if err != nil {
log.Print(err)
return
}
(*l.tcpConn).Write(jsonMsg)
}
func (m MsgServer) socketio(l *Lobby) {
(*l.socket).Emit("in", m)
}
/*
Sends an event to the Application server with parameters for creating a new
lobby instance.
*/
type NewSession struct {
Event string `json:"event"`
Players float64 `json:"players"`
MaxPlayers float64 `json:"maxplayers"`
}
func (n NewSession) tcp(l *Lobby) {
jsonMsg, err := json.Marshal(n)
if err != nil {
log.Print(err)
return
}
(*l.tcpConn).Write(jsonMsg)
}
func (n NewSession) socketio(l *Lobby) {
(*l.socket).Emit("in", n)
}
/*
Sends an event from the Host User to inform the Application server that
it has loaded the Application and is ready to communicate.
Also implements Command (multiple interface implementation)
*/
type Launch struct {
Event string `json:"event"`
}
func (la Launch) tcp(l *Lobby) {
jsonMsg, err := json.Marshal(la)
if err != nil {
log.Print(err)
return
}
(*l.tcpConn).Write(jsonMsg)
}
func (la Launch) socketio(l *Lobby) {
(*l.socket).Emit("in", la)
}
/*
Sends an event to state that the Host has ended the session (so the
server can run it's lobby end functionality)
*/
type End struct {
Event string `json:"event"`
}
func (e End) tcp(l *Lobby) {
jsonMsg, err := json.Marshal(e)
if err != nil {
log.Print(err)
return
}
(*l.tcpConn).Write(jsonMsg)
}
func (e End) socketio(l *Lobby) {
(*l.socket).Emit("in", e)
}
/*
Interface type: Commands received by the lobby for accessing/changing
the lobby data structure.
*/
type Command interface {
execute(l *Lobby)
}
/*
Contains all potential fields in the three specified events received from
Application servers: "created", "msgplayer" and "msgall"
Using omitempty, only required fields will be received, however this cannot
be used for Player, as the 0 value for player is the 'empty' value - and
the value 0 might be intentional when a message needs to be sent to a lobby host.
*/
type ServerMessage struct {
Event string `json:"event"`
Player float64 `json:"player"`
Msg map[string]interface{} `json:"msg,omitempty"`
}
func (s ServerMessage) execute(l *Lobby) {
switch s.Event {
case "msgplayer":
l.command <- MsgPlayer{
Player: int(s.Player),
Msg: s.Msg,
}
case "msgall":
l.command <- MsgAll{
Msg: s.Msg,
}
case "created":
l.command <- Created{}
}
}
/*
*** MsgPlayer, also implementing MessageUser type,
uses multiple interface implementation and is used
Polymorphically in both the Command and MessageUser channels
*/
type MsgPlayer struct {
Player int
Msg map[string]interface{}
}
func (m MsgPlayer) execute(l *Lobby) {
if m.Player >= len(l.users) || m.Player < 0 {
log.Print("MsgPlayer: invalid player index")
return
}
l.users[m.Player].send <- m
}
/*
*** MsgAll, also implementing MessageUser type,
uses multiple interface implementation and is used
Polymorphically in both the Command and MessageUser channels
*/
type MsgAll struct {
Room string
Msg map[string]interface{}
}
func (m MsgAll) execute(l *Lobby) {
m.Room = l.lobbyId
l.users[0].send <- m
}
/*
An Event for connecting an Application server over Socket.IO to the lobby.
*/
type ServerSocket struct {
Socket *socketio.Socket
}
func (s ServerSocket) execute(l *Lobby) {
if l.timeout != nil {
if l.socket == nil {
l.socket = s.Socket
(*l.socket).Emit("connectlobby", true)
}
l.timeout <- false
}
}
/*
An event to instantiate a new desktop user as the host user of a lobby.
*/
type HostLobby struct {
Username string
Socket *socketio.Socket
}
func (h HostLobby) | (l *Lobby) {
if len(l.users) != 0 {
(*h.Socket).Emit("hostlobby", false)
log.Print("manager.desktopSetup: lobby id entered already has a host user.")
return
}
err := l.addNewUser(h.Username, h.Socket)
if err != nil {
(*h.Socket).Emit("hostlobby", false)
log.Print(err)
return
}
(*h.Socket).Emit("hostlobby", true)
l.command <- Update{}
}
/*
An event to attempt to add a new connecting mobile user to the lobby.
*/
type JoinLobby struct {
Username string
Socket *socketio.Socket
}
func (j JoinLobby) execute(l *Lobby) {
if len(l.users) == 0 {
(*j.Socket).Emit("joinlobby", false)
log.Print("manager.desktopSetup: lobby id entered does not have a host user.")
return
}
err := l.addNewUser(j.Username, j.Socket)
if err != nil {
(*j.Socket).Emit("joinlobby", false)
log.Print(err)
return
}
(*j.Socket).Emit("joinlobby", true)
l.command <- Update{}
}
/*
An event to force emit update for the list of users in the lobby.
*/
type Update struct {}
func (u Update) execute(l *Lobby) {
l.updateLobby()
}
/*
An event to attempt to prepare the Application server to begin the Application.
*/
type Start struct {}
func (s Start) execute(l *Lobby) {
room := l.lobbyId
var err error
//establish connection
if l.game.connType == "tcp" {
if l.tcpConn == nil {
err = l.connectTcp()
}
} else { //l.game.connType == "socketio"
if l.socket == nil {
err = l.connectSocketio()
}
}
if err != nil {
log.Print(err)
l.users[0].send <- GameStart{
Room: room,
Complete: false,
Failed: true,
Feedback: "Unable to connect to application server.",
}
return
}
l.users[0].send <- GameStart{
Room: room,
Complete: false,
Failed: false,
Feedback: "Connected to application server.",
}
l.createSession()
}
/*
Confirms success of a Lobby being created on the Application server.
*/
type Created struct {}
func (c Created) execute(l *Lobby) {
if l.timeout != nil {
l.timeout <- false
}
}
/*
Also implements MessageServer (multiple interface implementation)
Sets the Lobby.started to true, locking the slice data structure
for removal of users.
*/
func (la Launch) execute(l *Lobby) {
l.started = true
la.Event = "launch"
l.send <- la
}
/*
Struct to execute the removal of a user from the lobby data structure.
Caused by a leave or a kick event.
*/
type RemovedUser struct {
Player float64 `json:"-"`
Username string `json:"username"`
Reason string `json:"reason,omitempty"`
}
func (r RemovedUser) execute(l *Lobby) {
kicked := false
if r.Player == 0 { //kicked by username
kicked = true
for i := 1; i < len(l.users); i++ {
if r.Username == l.users[i].username {
r.Player = float64(i)
break
}
}
}
if r.Player == 0 { //if still not set, does not exist
l.users[0].send <- Kick{
Response: false,
Feedback: fmt.Sprintf("%s was not found in lobby.", r.Username),
}
return
}
err := l.removeUser(r.Player)
if err != nil {
log.Print(err)
l.users[0].send <- Kick{
Response: false,
Feedback: fmt.Sprintf("%s was unable to be removed.", r.Username),
}
return
}
if kicked {
l.users[0].send <- Kick{
Response: true,
Feedback: fmt.Sprintf("%s was removed from the lobby.", r.Username),
}
}
//message removed user:
if r.Reason != "" {
l.users[int(r.Player)].send <- Kicked{
Reason: r.Reason,
}
}
l.users[int(r.Player)].send <- Leave{}
}
/*
Interface type: Messages to be processed by a User instance.
*/
type MessageUser interface {
process(u *User)
}
/*
Emits the leave event to execute a soft leave.
*/
type Leave struct {}
func (l Leave) process(u *User) {
(*u.socket).Emit("leave")//, "You have been removed from the lobby.")
(*u.socket).Emit("disconnect")
}
/*
Emits the application ID matching the one stored in the database.
*/
type GetAppId struct {
Appid float64
}
func (g GetAppId) process(u *User) {
(*u.socket).Emit("getappid", g.Appid)
}
/*
Emits the reason why the user was kicked by the host.
*/
type Kicked struct {
Reason string
}
func (k Kicked) process(u *User) {
(*u.socket).Emit("kicked", k.Reason)
}
/*
Emits the success response to the attempted kick to the host.
*/
type Kick struct {
Response bool `json:"response"`
Feedback string `json:"feedback"`
}
func (k Kick) process(u *User) {
(*u.socket).Emit("kick", k)
}
/*
Emits feedback for the operation of starting an Application on the
respective Application server.
*/
type GameStart struct {
Room string `json:"-"`
Complete bool `json:"complete"`
Failed bool `json:"failed"`
Feedback string `json:"feedback"`
}
func (g GameStart) process(u *User) {
(*u.socket).Emit("start", g)
if g.Complete {
(*u.socket).BroadcastTo(g.Room, "start", g)
}
}
/*
Struct used to Emit an update of users in the lobby, in a JSON array.
*/
type LobbyUsers struct {
Room string
List []LobbyUser
}
func (l LobbyUsers) process(u *User) {
(*u.socket).Emit("updatelobby", l.List)
(*u.socket).BroadcastTo(l.Room, "updatelobby", l.List)
}
/*
*** MsgPlayer, also implementing Command type,
uses multiple interface implementation and is used
Polymorphically in both the Command and MessageUser channels
*/
func (m MsgPlayer) process(u *User) {
(*u.socket).Emit("msgplayer", m.Msg)
}
/*
*** MsgAll, also implementing Command type,
uses multiple interface implementation and is used
Polymorphically in both the Command and MessageUser channels
*/
func (m MsgAll) process(u *User) {
(*u.socket).Emit("msgall", m.Msg)
(*u.socket).BroadcastTo(m.Room, "msgall", m.Msg)
} | execute | identifier_name |
message.go | package main
import (
"fmt"
"log"
"encoding/json"
"github.com/googollee/go-socket.io"
)
/******************************************************************************
This file contains all functionality that is sent between the communication
channel instances of Lobby and the User structs that it is responsible for.
All of the JSON encoding and Socket.IO events in this document match the
ANEXD 'Message Data Specification' document, included within our project
documentation.
All structs here implement one of the following interfaces:
MessageServer - A message sent to the application server.
Command - A message sent to the lobby instance to execute operations.
MessageUser - A message sent to a User within the lobby.
There is a channel for each interface running for the respective
receipient, and using run time Polymorphism, the respective processing
function is implicit to the underlying struct, providing constant
time complexity to begin execution.
All of the implemented functions are run in a new goroutine, providing them
with their own thread, which upon completion of execution, leave no
remaining references to the struct instance, allowing it to be garbage
collected.
This implementation significantly simplifies the main lobby and user code,
and makes the implementation modular and easy to maintain (such as adding
new functionality by adding a new interface implementation here).
******************************************************************************/
/*
Interface type: For messages sent to the application server.
*/
type MessageServer interface {
tcp(l *Lobby)
socketio(l *Lobby)
}
/*
An application specific message from a User.
*/
type MsgServer struct {
Event string `json:"event"`
Player float64 `json:"player"`
Msg map[string]interface{} `json:"msg"`
}
func (m MsgServer) tcp(l *Lobby) {
jsonMsg, err := json.Marshal(m)
if err != nil {
log.Print(err)
return
}
(*l.tcpConn).Write(jsonMsg)
}
func (m MsgServer) socketio(l *Lobby) {
(*l.socket).Emit("in", m)
}
/*
Sends an event to the Application server with parameters for creating a new
lobby instance.
*/
type NewSession struct {
Event string `json:"event"`
Players float64 `json:"players"`
MaxPlayers float64 `json:"maxplayers"`
}
func (n NewSession) tcp(l *Lobby) {
jsonMsg, err := json.Marshal(n)
if err != nil {
log.Print(err)
return
}
(*l.tcpConn).Write(jsonMsg)
}
func (n NewSession) socketio(l *Lobby) {
(*l.socket).Emit("in", n)
}
/*
Sends an event from the Host User to inform the Application server that
it has loaded the Application and is ready to communicate.
Also implements Command (multiple interface implementation)
*/
type Launch struct {
Event string `json:"event"`
}
func (la Launch) tcp(l *Lobby) {
jsonMsg, err := json.Marshal(la)
if err != nil {
log.Print(err)
return
}
(*l.tcpConn).Write(jsonMsg)
}
func (la Launch) socketio(l *Lobby) {
(*l.socket).Emit("in", la)
}
/*
Sends an event to state that the Host has ended the session (so the
server can run it's lobby end functionality)
*/
type End struct {
Event string `json:"event"`
}
func (e End) tcp(l *Lobby) {
jsonMsg, err := json.Marshal(e)
if err != nil {
log.Print(err)
return
}
(*l.tcpConn).Write(jsonMsg)
}
func (e End) socketio(l *Lobby) {
(*l.socket).Emit("in", e)
}
/*
Interface type: Commands received by the lobby for accessing/changing
the lobby data structure.
*/
type Command interface {
execute(l *Lobby)
}
/*
Contains all potential fields in the three specified events received from
Application servers: "created", "msgplayer" and "msgall"
Using omitempty, only required fields will be received, however this cannot
be used for Player, as the 0 value for player is the 'empty' value - and
the value 0 might be intentional when a message needs to be sent to a lobby host.
*/
type ServerMessage struct {
Event string `json:"event"`
Player float64 `json:"player"`
Msg map[string]interface{} `json:"msg,omitempty"`
}
func (s ServerMessage) execute(l *Lobby) {
switch s.Event {
case "msgplayer":
l.command <- MsgPlayer{
Player: int(s.Player),
Msg: s.Msg,
}
case "msgall":
l.command <- MsgAll{
Msg: s.Msg,
}
case "created":
l.command <- Created{}
}
}
/*
*** MsgPlayer, also implementing MessageUser type,
uses multiple interface implementation and is used
Polymorphically in both the Command and MessageUser channels
*/
type MsgPlayer struct {
Player int
Msg map[string]interface{}
}
func (m MsgPlayer) execute(l *Lobby) {
if m.Player >= len(l.users) || m.Player < 0 {
log.Print("MsgPlayer: invalid player index")
return
}
l.users[m.Player].send <- m
}
/*
*** MsgAll, also implementing MessageUser type,
uses multiple interface implementation and is used
Polymorphically in both the Command and MessageUser channels
*/
type MsgAll struct {
Room string
Msg map[string]interface{}
}
func (m MsgAll) execute(l *Lobby) {
m.Room = l.lobbyId
l.users[0].send <- m
}
/*
An Event for connecting an Application server over Socket.IO to the lobby.
*/
type ServerSocket struct {
Socket *socketio.Socket
}
func (s ServerSocket) execute(l *Lobby) {
if l.timeout != nil {
if l.socket == nil {
l.socket = s.Socket
(*l.socket).Emit("connectlobby", true)
}
l.timeout <- false
}
}
/*
An event to instantiate a new desktop user as the host user of a lobby.
*/
type HostLobby struct {
Username string
Socket *socketio.Socket
}
func (h HostLobby) execute(l *Lobby) {
if len(l.users) != 0 {
(*h.Socket).Emit("hostlobby", false)
log.Print("manager.desktopSetup: lobby id entered already has a host user.")
return
}
err := l.addNewUser(h.Username, h.Socket)
if err != nil {
(*h.Socket).Emit("hostlobby", false)
log.Print(err)
return
}
(*h.Socket).Emit("hostlobby", true)
l.command <- Update{}
}
/*
An event to attempt to add a new connecting mobile user to the lobby.
*/
type JoinLobby struct {
Username string
Socket *socketio.Socket
}
func (j JoinLobby) execute(l *Lobby) |
/*
An event to force emit update for the list of users in the lobby.
*/
type Update struct {}
func (u Update) execute(l *Lobby) {
l.updateLobby()
}
/*
An event to attempt to prepare the Application server to begin the Application.
*/
type Start struct {}
func (s Start) execute(l *Lobby) {
room := l.lobbyId
var err error
//establish connection
if l.game.connType == "tcp" {
if l.tcpConn == nil {
err = l.connectTcp()
}
} else { //l.game.connType == "socketio"
if l.socket == nil {
err = l.connectSocketio()
}
}
if err != nil {
log.Print(err)
l.users[0].send <- GameStart{
Room: room,
Complete: false,
Failed: true,
Feedback: "Unable to connect to application server.",
}
return
}
l.users[0].send <- GameStart{
Room: room,
Complete: false,
Failed: false,
Feedback: "Connected to application server.",
}
l.createSession()
}
/*
Confirms success of a Lobby being created on the Application server.
*/
type Created struct {}
func (c Created) execute(l *Lobby) {
if l.timeout != nil {
l.timeout <- false
}
}
/*
Also implements MessageServer (multiple interface implementation)
Sets the Lobby.started to true, locking the slice data structure
for removal of users.
*/
func (la Launch) execute(l *Lobby) {
l.started = true
la.Event = "launch"
l.send <- la
}
/*
Struct to execute the removal of a user from the lobby data structure.
Caused by a leave or a kick event.
*/
type RemovedUser struct {
Player float64 `json:"-"`
Username string `json:"username"`
Reason string `json:"reason,omitempty"`
}
func (r RemovedUser) execute(l *Lobby) {
kicked := false
if r.Player == 0 { //kicked by username
kicked = true
for i := 1; i < len(l.users); i++ {
if r.Username == l.users[i].username {
r.Player = float64(i)
break
}
}
}
if r.Player == 0 { //if still not set, does not exist
l.users[0].send <- Kick{
Response: false,
Feedback: fmt.Sprintf("%s was not found in lobby.", r.Username),
}
return
}
err := l.removeUser(r.Player)
if err != nil {
log.Print(err)
l.users[0].send <- Kick{
Response: false,
Feedback: fmt.Sprintf("%s was unable to be removed.", r.Username),
}
return
}
if kicked {
l.users[0].send <- Kick{
Response: true,
Feedback: fmt.Sprintf("%s was removed from the lobby.", r.Username),
}
}
//message removed user:
if r.Reason != "" {
l.users[int(r.Player)].send <- Kicked{
Reason: r.Reason,
}
}
l.users[int(r.Player)].send <- Leave{}
}
/*
Interface type: Messages to be processed by a User instance.
*/
type MessageUser interface {
process(u *User)
}
/*
Emits the leave event to execute a soft leave.
*/
type Leave struct {}
func (l Leave) process(u *User) {
(*u.socket).Emit("leave")//, "You have been removed from the lobby.")
(*u.socket).Emit("disconnect")
}
/*
Emits the application ID matching the one stored in the database.
*/
type GetAppId struct {
Appid float64
}
func (g GetAppId) process(u *User) {
(*u.socket).Emit("getappid", g.Appid)
}
/*
Emits the reason why the user was kicked by the host.
*/
type Kicked struct {
Reason string
}
func (k Kicked) process(u *User) {
(*u.socket).Emit("kicked", k.Reason)
}
/*
Emits the success response to the attempted kick to the host.
*/
type Kick struct {
Response bool `json:"response"`
Feedback string `json:"feedback"`
}
func (k Kick) process(u *User) {
(*u.socket).Emit("kick", k)
}
/*
Emits feedback for the operation of starting an Application on the
respective Application server.
*/
type GameStart struct {
Room string `json:"-"`
Complete bool `json:"complete"`
Failed bool `json:"failed"`
Feedback string `json:"feedback"`
}
func (g GameStart) process(u *User) {
(*u.socket).Emit("start", g)
if g.Complete {
(*u.socket).BroadcastTo(g.Room, "start", g)
}
}
/*
Struct used to Emit an update of users in the lobby, in a JSON array.
*/
type LobbyUsers struct {
Room string
List []LobbyUser
}
func (l LobbyUsers) process(u *User) {
(*u.socket).Emit("updatelobby", l.List)
(*u.socket).BroadcastTo(l.Room, "updatelobby", l.List)
}
/*
*** MsgPlayer, also implementing Command type,
uses multiple interface implementation and is used
Polymorphically in both the Command and MessageUser channels
*/
func (m MsgPlayer) process(u *User) {
(*u.socket).Emit("msgplayer", m.Msg)
}
/*
*** MsgAll, also implementing Command type,
uses multiple interface implementation and is used
Polymorphically in both the Command and MessageUser channels
*/
func (m MsgAll) process(u *User) {
(*u.socket).Emit("msgall", m.Msg)
(*u.socket).BroadcastTo(m.Room, "msgall", m.Msg)
} | {
if len(l.users) == 0 {
(*j.Socket).Emit("joinlobby", false)
log.Print("manager.desktopSetup: lobby id entered does not have a host user.")
return
}
err := l.addNewUser(j.Username, j.Socket)
if err != nil {
(*j.Socket).Emit("joinlobby", false)
log.Print(err)
return
}
(*j.Socket).Emit("joinlobby", true)
l.command <- Update{}
} | identifier_body |
ja-jp.ts | declare var define: any;
define([], (): IM365LPStrings => {
return {
WebpartTitleLabel: "Web パーツ タイトル (コンテンツのみモード)",
LPViewerPropertyDescription: "カスタム学習プロパティ",
LPViewerPropertyGroupName: "既定のビュー",
WebpartModeLabel: "Web パーツ モードを選択する",
WebpartModeDescription: "Web パーツの表示方法",
ShowNavigationLabel: "カスタム学習ナビゲーション ヘッダーを表示する",
DefaultFilterLabel: "フィルターを選択する",
DefaultFilterDescription: "結果を絞り込むフィルターを選択します。一度に 1 つの種類のみ、フィルター処理できます。",
DefaultCategoryLabel: "カテゴリにフィルターを適用する",
DefaultCategoryDescription: "結果を絞り込むカテゴリを選択します。",
DefaultSubCategoryLabel: "サブカテゴリにフィルターを適用する",
DefaultSubCategoryDescription: "結果を絞り込むサブカテゴリを選択します。",
DefaultPlaylistLabel: "再生リストにフィルターを適用する",
DefaultPlaylistDescription: "結果を絞り込むカテゴリを選択します。",
DefaultAssetLabel: "資産を選択する",
DefaultAssetDescription: "再生リストから表示する資産を選択します。",
FilterPanelHeader: "最適なコースを選択する",
FilterPanelAudienceLabel: "私は",
FilterPanelSkillsetLabel: "自分のスキル セットは",
NavigationHome: "ホーム",
SearchResultItemPlayListLabel: "再生リスト:",
SearchResultItemCategoryLabel: "カテゴリ:",
SearchPanelPlaceHolderLabel: "再生リストまたは資産を検索", | PlaylistOverviewDesignMessage: "使用中、この領域には、表示されている再生リスト内の他の資産が表示されます。",
PlaylistOverviewHeading: "再生リストのすべての手順",
BurgerButton: "バーガー",
LinkButton: "リンク",
SearchButton: "検索",
AdministerPlaylist: "再生リストの管理",
NoSearchResults: "検索結果がクエリと一致しません。",
DefaultCDNLabel: "学習ソースを選択する",
DefaultCDNDescription: "学習コンテンツの代替ソースを選択します。",
DescriptionFieldLabel: "説明フィールド",
ImageSelectorLabel: "画像を選択する",
ImageSelectorButton: "画像を選択する",
DetailEditTitle: "タイトル",
DetailEditDescription: "説明",
DetailEditUrl: "Url",
DetailEditCategory: "カテゴリ",
DetailEditCategoryNoData: "使用できるカテゴリはありません",
DetailEditTechnology: "テクノロジ",
DetailEditLevel: "レベル",
DetailEditAudience: "対象ユーザー",
AssetSearchPlaceHolderLabel: "既存の資産を検索する",
CategoryHeading: "新しい下位カテゴリを追加する",
CategoryHeadingLabel: "下位カテゴリ",
CategoryPlaylistSavedMessage: "再生リストが保存されました。",
CategoryPlaylistSaveFailedMessage: "再生リストを保存できませんでした。",
CategoryNewPlayListMessage: "新しい再生リストを変更しました。変更を破棄して続行しますか?",
CategoryEditedPlayListMessage: "この再生リストを変更しました。変更を破棄して続行しますか?",
PlaylistEditAssetSavedMessage: "資産が保存されました。",
PlaylistEditAssetSaveFailedMessage: "資産を保存できませんでした。",
PlaylistEditCreatePlaylistHeader: "新しい再生リストを作成する",
PlaylistEditPlaylistDetailsHeader: "再生リストの詳細:",
PlaylistEditPlaylistAssetsHeader: "再生リストの資産",
PlaylistEditEditLabel: "詳細を編集する",
PlaylistEditSaveLabel: "詳細を保存する",
PlaylistEditCancelLabel: "詳細をキャンセルする",
PlaylistEditCloseLabel: "再生リストを閉じる",
AssetDetailsCreateHeader: "新しい資産を作成する",
AssetDetailsManageHeader: "カスタム資産の管理:",
AssetDetailsDetailsHeader: "資産の詳細:",
AssetDetailsSaveLabel: "資産を保存する",
AssetDetailsCancelLabel: "資産をキャンセルする",
AssetDetailsCloseLabel: "資産を閉じる",
AssetDetailsOpenPage: "ページを開く",
TechnologyHeadingLabel: "テクノロジ",
SubcategoryHeadingLabel: "下位カテゴリの見出し",
PlaylistItemPlaylistHeadingLabel: "再生リスト",
PlaylistItemPlaylistDelete: "再生リストを削除する",
PlaylistEditAssetNewLabel: "新しい資産",
PlaylistRemove: "再生リストから削除する",
ImageSelectorImageAlt: "カスタム再生リスト",
ImageSelectorUrlPlaceholder: "イメージ Url",
CategoryHeadingAddPlaylistToSubcategory: "新しい再生リストを下位カテゴリに追加する",
AdminMenuTechnologyLabel: "テクノロジ",
AdminMenuCategoryLabel: "カテゴリ",
Show: "表示",
Hide: "非表示",
Add: "追加",
Edit: "編集",
CloseButton: "閉じる",
EditButton: "編集",
StepButton: "手順",
MoveUpButton: "上に移動",
MoveDownButton: "下に移動",
SaveButton: "保存",
CancelButton: "キャンセル",
UpdateButton: "更新する",
DeleteButton: "削除",
DetailEditNewPageMessage: "サイト ページ ライブラリの新しい資産ページ。",
DetailEditExistingPageMessage: "既存ページの URL を入力します。",
DetailEditNewPageButton: "資産ページを作成する",
DetailEditExistingPageButton: "Url を入力する",
AdminAddCdnLabel: "コンテンツ パックを追加する",
AdminEditCdnLabel: "コンテンツ パックを編集する",
AdminDeleteCdnLabel: "コンテンツ パックを削除する",
AdminCdnIdLabel: "コンテンツ パック ID/パートナー ID",
AdminCdnDisplayName: "表示名",
AdminCdnBaseUrl: "ベース URL",
AdminCustomCdnTitle: "カスタム コンテンツ パック",
AdminCustomCdnDescription: "カスタム コンテンツ パックは、高度な機能です。Web コンテンツの管理経験のある管理者だけが使用してください。信頼できないコンテンツ ソースにより、サイトに安全でないコンテンツが取り込まれる場合があります。信頼できるソースのみを追加する必要があります。詳細については、「学習経路の文書化のパートナー ガイダンス」」を参照してください。",
AdminConfirmContentPack: "新しいコンテンツ パックをプロビジョニングできる新しいブラウザー タブが開始されました。正常に完了したら、下の [完了] をクリックして代替コンテンツ リストを更新します。エラーがある場合、またはコンテンツ パックをインストールしない場合は、[キャンセル] をクリックしてください。いつでも戻ることができます。",
AdminCdnCompleteButton: "完了",
AdminCdnCancelButton: "キャンセル",
AdminCdnSaveButton: "保存",
AdminCdnUpdateButton: "更新する",
AdminRemoveCdn: "選択されている CDN を削除しますか?",
AdminAbout: "Web パーツについて",
DocumentationLinkLabel: "M365 の学習経路の文書を開く",
CategoryCopyPlaylistFail: "再生リストをコピーできませんでした。詳細については、管理者にお問い合わせるか、ブラウザー コンソールを参照してください。",
PlaylistEditCopyLabel: "再生リストをコピーする",
PlaylistNext: "次へ",
PlaylistPrevious: "前へ",
PlaylistFullScreen: "全画面表示モードを切り替える",
FilterNotSet: "すべて",
AdminVersionUpdateNotice: "インストールされている Microsoft 365 の学習経路 Web パーツは最新ではありません。現在、バージョン %0% を実行しており、最新バージョンは %1% です。",
AdminVersionUpdateInstructions: "「ソリューションの更新」の手順を参照してください。",
AdminSecurityMessage: "Microsoft 365の学習経路を管理する権限がありません。所有者またはメンバー グループへの参加について管理者に相談してください。",
AdminConfigIssueMessage: "Microsoft 365 の学習経路には構成の問題があります。管理者に問い合わせてください。[管理者:詳細なログについては、ブラウザー コンソールをご覧ください。技術サポートについては、https://github.com/pnp/custom-learning-office-365/issues の問題の一覧をご確認ください。]",
AdminAwaitingUrlPrompt: "Url:タイトルを待っています...",
CreatingPage: "ページを作成する",
TryAgain: "もう一度お試しください",
AddLanguagePlaceholder: "言語の追加",
NotApplicable: "該当なし",
DataUpgradeTitle: "データのアップグレード",
DataUpgradeIntro: "Microsoft 365 の学習経路をメジャーな新しいリリースに更新しました。アップグレード プロセスを開始するには、[開始] を押します。",
DataUpgradeStart: "開始",
DataUpgradeClose: "閉じる",
DataUpgradeIssue: "アップデートの実行中に問題が発生した場合は、Microsoft 365 の学習経路 GitHub の問題リストに ",
DataUpgradeIssueLink: "問題を送信してサポートを依頼できます。",
DataUpgradeLog: "アップグレード ログ",
DataUpgradeComplete: "アップグレードが完了しました。",
DataUpgradeErrors: "エラー",
LogLevel: "レベル",
LogMessage: "メッセージ",
AboutGroupHeader: "M365 の学習経路",
AboutGroupTitle: "現在の構成*",
AboutGroupTitle2: "(* は現在のコンテンツ パック固有のものです)",
AboutLearningSiteUrl: "学習サイトの Url:",
AboutBaseCDNPath: "ベース CDN パス:",
AboutTelemetryStatus: "テレメトリの状態:",
AboutCurrentWPVersion: "現在の Web パーツのバージョン:",
AboutMultilingualEnabled: "多言語ページの有効化:",
AboutMultilingualLanguages: "多言語ページの翻訳言語:",
AboutConfiguredLanguages: "構成された言語:",
AboutSupportedLanguages: "コンテンツパックでサポートされる言語:",
AboutDefaultSiteLanguage: "既定のサイトの言語:",
AboutCurrentUserLanguage: "現在のユーザーの言語:",
AboutContentPackAssetOrigins: "コンテンツ パックの資産の元:",
AboutCurrentCDN: "現在の CDN:",
AboutAllCDNs: "すべての CDN:",
AboutCacheLastUpdate: "キャッシュの最終更新日:",
DocumentLibraries: "ドキュメント ライブラリ",
OpenButtonLabel: "開く",
CancelButtonLabel: "キャンセル",
UploadFileHeader: "ファイルをアップロードする",
ChangeFileLinkLabel: "ファイルを変更する",
ChooseFileLinkLabel: "ファイルを選択する",
AddFileButtonLabel: "ファイルを追加する",
WebSearchLinkLabel: "Web 検索",
CreativeCommonsMessage: "これらの結果には Creative Commons ライセンスがタグ付けされています。ライセンスを確認し、準拠していることを確認します。",
CopyrightWarning: "著作権など他人の権利を尊重する責任があります。",
CopyrightUrl: "https://www.microsoft.com/ja-JP/legal/copyright/default.aspx",
LearnMoreLink: "詳細については、こちらを参照してください。",
NoResultsBadEnglish: "***該当する結果はありません。フィルター オプションの変更を試す",
SearchResultAlt: "{0} 件の画像結果。",
SearchResultAriaLabel: "Enter キーを押して、新しいタブで画像ソースを開きます。",
SearchBoxPlaceholder: "Web 検索",
PoweredByBing: "Bing 提供",
SizeOptionAll: "すべて",
SizeOptionExtraLarge: "巨大",
SizeOptionLarge: "大",
SizeOptionMedium: "中",
SizeOptionSmall: "小",
LayoutOptionAll: "すべて",
LayoutOptionSquare: "正方形",
LayoutOptionTall: "高い",
LayoutOptionWide: "広い",
LicenseOptionAll: "すべて",
LicenseOptionAny: "Creative Commons のみ",
RecentDocumentsHeader: "最近使用したドキュメント",
Loading: "読み込み中...",
NoRecentFiles: "最近使ったファイルはありません",
NoRecentFilesDescription: "サイトでファイルを選択するか、デバイスからファイルをアップロードしてください。",
EditedByNamePlate: "編集者 ",
LinkHeader: "リンクから",
LinkFileInstructions: "OneDrive for Business または SharePoint Online のファイルへのリンクを貼り付ける",
NoExternalLinksValidationMessage: "組織内のファイルへのリンクのみをサポートします。",
ProvidedValueIsInvalid: "指定した値は無効です。",
ImageAriaLabelTemplate: ".{0} 件の画像",
DocumentLabelTemplate: "{0}、ドキュメント、修正済み {1}、{2} によって編集、非公開",
ODPhotoIconUrl: "https://spoprod-a.akamaihd.net/files/odsp-next-prod_2019-01-18_20190124.001/odsp-media/images/itemtypes/16_2x/photo.png",
FolderLabelTemplate: "{0}、フォルダー、修正済み {1}、{2} によって編集、{3} 件のアイテム、非公開",
FolderBackPlate: "https://static2.sharepointonline.com/files/fabric/office-ui-fabric-react-assets/foldericons/folder-large_backplate.svg",
FolderFrontPlate: "https://static2.sharepointonline.com/files/fabric/office-ui-fabric-react-assets/foldericons/folder-large_frontplate_nopreview.svg",
EmptyFileSize: "0 バイト",
SizeUnit: [
"バイト",
"KB",
"MB",
"GB",
"TB",
"PB",
"EB",
"ZB",
"YB"
],
FilePickerHeader: "ファイル ピッカー",
OrgAssetsTabLabel: "組織から提供される画像とファイル",
RecentLinkLabel: "最近使った項目",
SiteLinkLabel: "サイト",
UploadLinkLabel: "アップロード",
FromLinkLinkLabel: "リンクから",
TypeAriaLabel: "ファイルの種類の列の操作、押してファイルの種類を並べ替える",
FolderIconUrl: "https://spoprod-a.akamaihd.net/files/odsp-next-prod_2019-01-11_20190116.001/odsp-media/images/itemtypes/20/folder.svg",
PhotoIconUrl: "https://spoprod-a.akamaihd.net/files/odsp-next-prod_2019-01-11_20190116.001/odsp-media/images/itemtypes/20_2x/photo.png",
FolderAltText: "フォルダー",
ImageAltText: ".{0} 件の画像",
NameField: "名前",
SortedAscending: "昇順で並べ替え",
SortedDescending: "降順で並べ替え",
ModifiedField: "更新日時",
ModifiedByField: "変更者",
FileSizeField: "ファイル サイズ",
OneDriveEmptyFolderAlt: "フォルダーを空にする",
OneDriveEmptyFolderDescription: "ファイルを追加するには、OneDrive に移動します。コンピューター用の OneDrive アプリを使用して、このフォルダーにファイルを追加することもできます。",
OneDriveEmptyFolderIconUrl: "https://spoprod-a.akamaihd.net/files/odsp-next-prod_2019-01-18_20190124.001/odsp-media/images/emptyfolder/empty_folder.svg",
OneDriveEmptyFolderTitle: "このフォルダーは空です",
ListLayoutAriaLabel: "表示オプション。{0} {1}。",
ListLayoutCompact: "コンパクト表示",
ListLayoutCompactDescription: "コンパクト リスト内のアイテムと詳細を表示する",
ListLayoutList: "リスト ビュー",
ListLayoutListDescription: "リスト内のアイテムと詳細を表示する",
ListLayoutTile: "タイル ビュー",
ListLayoutTileDescription: "タイル プレビューでアイテムを表示する",
ListOptionsAlt: "表示オプション。{0} 件が選択されました。",
ListOptionsTitle: "表示オプション メニューを開く",
Selected: "選択されました",
CategoryTranslationNotAvailable: "選択した下位カテゴリの翻訳は、この言語では使用できません。",
Abandoned: "中止",
AbandonedPlaylist: "再生リスト",
RemoveLanguageLabel: "言語を削除する",
PropertyPaneNone: "なし",
WebPartModeFull: "完全",
WebPartModeContentOnly: "コンテンツのみ",
PropertyPaneFilterCategory: "カテゴリ",
PropertyPaneFilterSubCategory: "サブカテゴリ",
PropertyPaneFilterPlaylist: "再生リスト",
AdminSavingNotification: "保存中..",
DataUpgradeReview: "レビュー",
DataUpgradeMultilingual: "多国語サポートはラーニング パスに対して有効になっています。サイトで有効になっている言語を確認し、サポートの必要がない言語をオフにします。",
M365Title: "Microsoft 365 学習経路",
ValidateBase: "The base url for the CDN must end in a (/).",
CustomizeSort: "Customize Sort",
ResetSort: "Reset Sort"
};
}); | LinkPanelCopyLabel: "コピー",
HeaderPlaylistPanelCurrentPlaylistLabel: "現在の再生リスト:",
HeaderPlaylistPanelAdminHeader: "管理", | random_line_split |
torrent.go | /*
* Copyright (c) 2016 Mark Samman <https://github.com/marksamman/gotorrent>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package main
import (
"bufio"
"bytes"
"crypto/sha1"
"encoding/binary"
"errors"
"fmt"
"math"
"net"
"os"
"path/filepath"
"github.com/marksamman/bencode"
)
type Torrent struct {
totalPeerCount int
files []File
pieces []TorrentPiece
activePeers []*Peer
completedPieces int
knownPeers map[string]struct{}
name string
comment string
pieceLength int64
totalSize int64
pieceChannel chan *PieceMessage
bitfieldChannel chan *BitfieldMessage
havePieceChannel chan *HavePieceMessage
addPeerChannel chan *Peer
removePeerChannel chan *Peer
blockRequestChannel chan *BlockRequestMessage
activeTrackerChannel chan int
stoppedTrackerChannel chan int
requestAnnounceDataChannel chan int
peersChannel chan interface{}
fileWriteDone chan struct{}
decrementPeerCount chan struct{}
handshake []byte
uploaded uint64
pendingFileWrites int
trackers []*Tracker
activeTrackers []int
}
type TorrentPiece struct {
done bool
busyness int
hash string
}
type File struct {
path string
begin int64
length int64
}
type PieceMessage struct {
from *Peer
index uint32
data []byte
}
type BitfieldMessage struct {
from *Peer
data []byte
}
type HavePieceMessage struct {
from *Peer
index uint32
}
type BlockRequestMessage struct {
from *Peer
index uint32
begin uint32
length uint32
}
func (torrent *Torrent) validatePath(base string, path string) error {
if absolutePath, err := filepath.Abs(path); err != nil {
return err
} else if len(absolutePath) < len(base) {
return errors.New("path is too short")
} else if base != absolutePath[:len(base)] {
return errors.New("path mismatch")
}
return nil
}
func (torrent *Torrent) parseTrackerURL(url string) error {
if len(url) < 7 {
return errors.New("announce URL is too short")
}
if url[0:4] == "http" {
torrent.trackers = append(torrent.trackers, &Tracker{
id: len(torrent.trackers),
announceURL: url,
protocol: TrackerHTTP,
torrent: torrent,
})
} else if url[0:3] == "udp" {
torrent.trackers = append(torrent.trackers, &Tracker{
id: len(torrent.trackers),
announceURL: url,
protocol: TrackerUDP,
torrent: torrent,
})
} else {
return errors.New("unsupported tracker protocol")
}
return nil
}
func (torrent *Torrent) open(filename string) error {
file, err := os.Open(filename)
if err != nil {
return err
}
defer file.Close()
data, err := bencode.Decode(file)
if err != nil {
return err
}
if announceLists, ok := data["announce-list"].([]interface{}); ok {
for _, announceList := range announceLists {
for _, announceURL := range announceList.([]interface{}) {
torrent.parseTrackerURL(announceURL.(string))
}
}
} else {
torrent.parseTrackerURL(data["announce"].(string))
}
if comment, ok := data["comment"]; ok {
torrent.comment = comment.(string)
}
info := data["info"].(map[string]interface{})
torrent.name = info["name"].(string)
torrent.pieceLength = info["piece length"].(int64)
infoHash := sha1.Sum(bencode.Encode(info))
// Set handshake
var buffer bytes.Buffer
buffer.WriteByte(19) // length of the string "BitTorrent Protocol"
buffer.WriteString("BitTorrent protocol")
buffer.WriteString("\x00\x00\x00\x00\x00\x00\x00\x00") // reserved
buffer.Write(infoHash[:])
buffer.Write(client.peerID)
torrent.handshake = buffer.Bytes()
// Set pieces
pieces := info["pieces"].(string)
for i := 0; i < len(pieces); i += 20 {
torrent.pieces = append(torrent.pieces, TorrentPiece{
hash: pieces[i : i+20],
})
}
if err := os.Mkdir("Downloads", 0700); err != nil && !os.IsExist(err) {
return err
}
cwd, err := os.Getwd()
if err != nil {
return err
}
base := filepath.Join(cwd, "Downloads")
// Set files
if files, exists := info["files"]; exists {
dirName := filepath.Join("Downloads", info["name"].(string))
if err := torrent.validatePath(base, dirName); err != nil {
return err
}
base := filepath.Join(cwd, dirName)
for _, v := range files.([]interface{}) {
v := v.(map[string]interface{})
torrent.totalSize += v["length"].(int64)
}
// Multiple files
var begin int64
for k, v := range files.([]interface{}) {
v := v.(map[string]interface{})
// Set up directory structure
pathList := v["path"].([]interface{})
pathElements := []string{dirName}
for i := 0; i < len(pathList)-1; i++ {
pathElements = append(pathElements, pathList[i].(string))
}
path := filepath.Join(pathElements...)
fullPath := filepath.Join(path, pathList[len(pathList)-1].(string))
if err := torrent.validatePath(base, fullPath); err != nil {
return err
}
if len(path) != 0 {
if err := os.MkdirAll(path, 0700); err != nil {
return err
}
}
length := v["length"].(int64)
file, err := os.OpenFile(fullPath, os.O_RDWR, 0600)
if err == nil {
torrent.findCompletedPieces(file, begin, length, k)
file.Close()
}
torrent.files = append(torrent.files, File{fullPath, begin, length})
begin += length
}
} else {
// Single file
fileName := filepath.Join("Downloads", info["name"].(string))
if err := torrent.validatePath(base, fileName); err != nil {
return err
}
length := info["length"].(int64)
torrent.totalSize = length
file, err := os.OpenFile(fileName, os.O_RDWR, 0600)
if err == nil {
torrent.findCompletedPieces(file, 0, length, 0)
file.Close()
}
torrent.files = []File{{fileName, 0, length}}
}
return nil
}
func (torrent *Torrent) findCompletedPieces(file *os.File, begin, length int64, fileIndex int) {
fi, err := file.Stat()
if err != nil {
return
}
size := fi.Size()
if size == 0 {
return
} else if size > length {
file.Truncate(0)
return
}
buf := make([]byte, torrent.pieceLength)
var pieceIndex uint32
if begin != 0 {
pieceIndex = uint32(begin / torrent.pieceLength)
}
fileEnd := begin + length
pos := int64(pieceIndex) * torrent.pieceLength
pieceLength := torrent.getPieceLength(pieceIndex)
if pos+pieceLength > fileEnd {
return
}
if pos < begin {
bufPos := begin - pos
if _, err := file.Read(buf[bufPos:]); err != nil {
return
}
for bufPos != 0 {
fileIndex--
f := torrent.files[fileIndex]
handle, err := os.OpenFile(f.path, os.O_RDONLY, 0600)
if err != nil {
return
}
defer handle.Close()
if bufPos > f.length {
if n, err := handle.Read(buf[bufPos-f.length : bufPos]); err != nil || int64(n) != f.length {
return
}
bufPos -= f.length
} else {
if n, err := handle.ReadAt(buf[:bufPos], f.length-bufPos); err != nil || int64(n) != bufPos {
return
}
break
}
}
if torrent.checkPieceHash(buf[:pieceLength], pieceIndex) {
torrent.pieces[pieceIndex].done = true
torrent.completedPieces++
}
pos += pieceLength
pieceIndex++
}
if _, err := file.Seek(pos-begin, os.SEEK_SET); err != nil {
return
}
reader := bufio.NewReaderSize(file, int(pieceLength))
for pos+torrent.pieceLength <= fileEnd {
if n, err := reader.Read(buf); err != nil || n != len(buf) {
return
}
if torrent.checkPieceHash(buf, pieceIndex) {
torrent.pieces[pieceIndex].done = true
torrent.completedPieces++
}
pos += torrent.pieceLength
pieceIndex++
}
if int(pieceIndex) == len(torrent.pieces)-1 {
pieceLength = torrent.getLastPieceLength()
if n, err := reader.Read(buf[:pieceLength]); err != nil || int64(n) != pieceLength {
return
}
if torrent.checkPieceHash(buf[:pieceLength], pieceIndex) {
torrent.pieces[pieceIndex].done = true
torrent.completedPieces++
}
}
}
func (torrent *Torrent) getTrackerRequestData(event uint32) *TrackerRequestData {
downloaded := torrent.getDownloadedSize()
return &TrackerRequestData{
event: event,
downloaded: uint64(downloaded),
uploaded: torrent.uploaded,
remaining: uint64(torrent.totalSize - downloaded),
}
}
func (torrent *Torrent) startTrackers() {
data := torrent.getTrackerRequestData(TrackerEventStarted)
for _, tracker := range torrent.trackers {
go tracker.start(data)
}
}
func (torrent *Torrent) stopTrackers() {
data := torrent.getTrackerRequestData(TrackerEventStopped)
for _, trackerID := range torrent.activeTrackers {
go func(announceChannel chan *TrackerRequestData, stopChannel chan struct{}) {
announceChannel <- data
stopChannel <- struct{}{}
}(torrent.trackers[trackerID].announceChannel, torrent.trackers[trackerID].stopChannel)
}
}
func (torrent *Torrent) announceToTrackers(event uint32) {
data := torrent.getTrackerRequestData(event)
for _, trackerID := range torrent.activeTrackers {
go func(channel chan *TrackerRequestData) {
channel <- data
}(torrent.trackers[trackerID].announceChannel)
}
}
func (torrent *Torrent) download() {
if torrent.completedPieces == len(torrent.pieces) {
return
}
torrent.activeTrackerChannel = make(chan int)
torrent.stoppedTrackerChannel = make(chan int)
torrent.requestAnnounceDataChannel = make(chan int)
torrent.peersChannel = make(chan interface{})
torrent.startTrackers()
torrent.pieceChannel = make(chan *PieceMessage)
torrent.bitfieldChannel = make(chan *BitfieldMessage)
torrent.havePieceChannel = make(chan *HavePieceMessage)
torrent.addPeerChannel = make(chan *Peer)
torrent.removePeerChannel = make(chan *Peer)
torrent.blockRequestChannel = make(chan *BlockRequestMessage)
torrent.fileWriteDone = make(chan struct{})
torrent.decrementPeerCount = make(chan struct{})
torrent.knownPeers = make(map[string]struct{})
for torrent.completedPieces != len(torrent.pieces) || torrent.totalPeerCount != 0 {
select {
case havePieceMessage := <-torrent.havePieceChannel:
torrent.handleHaveMessage(havePieceMessage)
case bitfieldMessage := <-torrent.bitfieldChannel:
torrent.handleBitfieldMessage(bitfieldMessage)
case pieceMessage := <-torrent.pieceChannel:
torrent.handlePieceMessage(pieceMessage)
case blockRequestMessage := <-torrent.blockRequestChannel:
torrent.handleBlockRequestMessage(blockRequestMessage)
case peer := <-torrent.addPeerChannel:
torrent.handleAddPeer(peer)
case peer := <-torrent.removePeerChannel:
torrent.handleRemovePeer(peer)
case <-torrent.fileWriteDone:
torrent.pendingFileWrites--
case <-torrent.decrementPeerCount:
torrent.totalPeerCount--
case peers := <-torrent.peersChannel:
torrent.connectToPeers(peers)
case trackerID := <-torrent.activeTrackerChannel:
torrent.activeTrackers = append(torrent.activeTrackers, trackerID)
fmt.Printf("[%s] %d active trackers\n", torrent.name, len(torrent.activeTrackers))
case trackerID := <-torrent.requestAnnounceDataChannel:
go func(channel chan *TrackerRequestData, data *TrackerRequestData) {
channel <- data
}(torrent.trackers[trackerID].announceChannel, torrent.getTrackerRequestData(TrackerEventNone))
}
}
torrent.stopTrackers()
if torrent.pendingFileWrites != 0 {
fmt.Printf("[%s] Waiting for %d pending file writes...\n", torrent.name, torrent.pendingFileWrites)
for torrent.pendingFileWrites != 0 {
<-torrent.fileWriteDone
torrent.pendingFileWrites--
}
}
if len(torrent.activeTrackers) != 0 {
fmt.Printf("[%s] Waiting for %d trackers to stop...\n", torrent.name, len(torrent.activeTrackers))
for len(torrent.activeTrackers) != 0 {
select {
case trackerID := <-torrent.stoppedTrackerChannel:
for k, v := range torrent.activeTrackers {
if v == trackerID {
torrent.activeTrackers = append(torrent.activeTrackers[:k], torrent.activeTrackers[k+1:]...)
break
}
}
// Handle other messages that a Tracker may send
case <-torrent.activeTrackerChannel:
case <-torrent.peersChannel:
case <-torrent.requestAnnounceDataChannel:
}
}
}
}
func (torrent *Torrent) checkPieceHash(data []byte, pieceIndex uint32) bool {
dataHash := sha1.Sum(data)
return bytes.Equal(dataHash[:], []byte(torrent.pieces[pieceIndex].hash))
}
func (torrent *Torrent) getPieceLength(pieceIndex uint32) int64 {
if pieceIndex == uint32(len(torrent.pieces))-1 {
if res := torrent.totalSize % torrent.pieceLength; res != 0 {
return res
}
}
return torrent.pieceLength
}
func (torrent *Torrent) getLastPieceLength() int64 |
func (torrent *Torrent) getDownloadedSize() int64 {
var downloadedSize int64
for k := range torrent.pieces {
if torrent.pieces[k].done {
downloadedSize += torrent.getPieceLength(uint32(k))
}
}
return downloadedSize
}
func (torrent *Torrent) connectToPeers(peers interface{}) {
switch peers := peers.(type) {
case string:
torrent.totalPeerCount += len(peers) / 6
for i := 0; i < len(peers); i += 6 {
peer := NewPeer(torrent)
peer.ip = net.IPv4(peers[i], peers[i+1], peers[i+2], peers[i+3])
if _, known := torrent.knownPeers[peer.ip.String()]; known {
torrent.totalPeerCount--
continue
}
torrent.knownPeers[peer.ip.String()] = struct{}{}
peer.port = binary.BigEndian.Uint16([]byte(peers[i+4:]))
go peer.connect()
}
case []interface{}:
torrent.totalPeerCount += len(peers)
for _, dict := range peers {
dict := dict.(map[string]interface{})
addr, err := net.ResolveIPAddr("ip", dict["ip"].(string))
if err != nil {
torrent.totalPeerCount--
continue
}
if _, known := torrent.knownPeers[addr.IP.String()]; known {
torrent.totalPeerCount--
continue
}
torrent.knownPeers[addr.IP.String()] = struct{}{}
peer := NewPeer(torrent)
peer.id = dict["peer id"].(string)
peer.ip = addr.IP
peer.port = uint16(dict["port"].(int64))
go peer.connect()
}
}
}
func (torrent *Torrent) handleHaveMessage(havePieceMessage *HavePieceMessage) {
havePieceMessage.from.pieces[havePieceMessage.index] = struct{}{}
// torrent.requestPieceFromPeer(peer)
}
func (torrent *Torrent) handleBitfieldMessage(bitfieldMessage *BitfieldMessage) {
peer := bitfieldMessage.from
piecesLen := uint32(len(torrent.pieces))
var index uint32
for i := 0; i < len(bitfieldMessage.data); i++ {
b := bitfieldMessage.data[i]
for v := byte(128); v != 0; v >>= 1 {
if b&v != v {
index++
continue
}
if index >= piecesLen {
break
}
peer.pieces[index] = struct{}{}
index++
}
}
torrent.requestPieceFromPeer(peer)
}
func (torrent *Torrent) handlePieceMessage(pieceMessage *PieceMessage) {
if torrent.pieces[pieceMessage.index].done {
return
}
if !torrent.checkPieceHash(pieceMessage.data, pieceMessage.index) {
pieceMessage.from.done <- struct{}{}
return
}
torrent.pieces[pieceMessage.index].done = true
torrent.completedPieces++
if torrent.completedPieces != len(torrent.pieces) {
torrent.requestPieceFromPeer(pieceMessage.from)
}
beginPos := int64(pieceMessage.index) * torrent.pieceLength
for k := range torrent.files {
file := &torrent.files[k]
if beginPos < file.begin {
break
}
fileEnd := file.begin + file.length
if beginPos >= fileEnd {
continue
}
amountWrite := fileEnd - beginPos
if amountWrite > int64(len(pieceMessage.data)) {
amountWrite = int64(len(pieceMessage.data))
}
torrent.pendingFileWrites++
go func(path string, offset int64, data []byte) {
fileWriterChannel <- &FileWriterMessage{path, offset, data, torrent}
}(file.path, beginPos-file.begin, pieceMessage.data[:amountWrite])
pieceMessage.data = pieceMessage.data[amountWrite:]
beginPos += amountWrite
}
fmt.Printf("[%s] Downloaded: %.2f%c\n", torrent.name, float64(torrent.completedPieces)*100/float64(len(torrent.pieces)), '%')
if torrent.completedPieces == len(torrent.pieces) {
for _, peer := range torrent.activePeers {
peer.done <- struct{}{}
}
torrent.announceToTrackers(TrackerEventCompleted)
} else {
for _, peer := range torrent.activePeers {
peer.sendHaveChannel <- pieceMessage.index
}
}
}
func (torrent *Torrent) requestPieceFromPeer(peer *Peer) {
// Find the least busy piece that the peer claims to have
busyness := math.MaxInt32
var pieceIndex uint32
pieces := uint32(len(torrent.pieces))
for i := uint32(0); i < pieces; i++ {
piece := &torrent.pieces[i]
if piece.done {
continue
}
if _, ok := peer.pieces[i]; ok {
if piece.busyness == 0 {
piece.busyness = 1
peer.requestPieceChannel <- i
return
} else if busyness > piece.busyness {
busyness = piece.busyness
pieceIndex = i
}
}
}
if busyness != math.MaxInt32 {
torrent.pieces[pieceIndex].busyness++
peer.requestPieceChannel <- pieceIndex
}
}
func (torrent *Torrent) handleAddPeer(peer *Peer) {
torrent.activePeers = append(torrent.activePeers, peer)
fmt.Printf("[%s] %d active peers\n", torrent.name, len(torrent.activePeers))
if torrent.completedPieces == len(torrent.pieces) {
peer.done <- struct{}{}
}
}
func (torrent *Torrent) handleRemovePeer(peer *Peer) {
for k, p := range torrent.activePeers {
if p == peer {
torrent.activePeers = append(torrent.activePeers[:k], torrent.activePeers[k+1:]...)
break
}
}
peer.done <- struct{}{}
fmt.Printf("[%s] %d active peers\n", torrent.name, len(torrent.activePeers))
}
func (torrent *Torrent) handleBlockRequestMessage(blockRequestMessage *BlockRequestMessage) {
if blockRequestMessage.index >= uint32(len(torrent.pieces)) {
blockRequestMessage.from.done <- struct{}{}
return
}
if !torrent.pieces[blockRequestMessage.index].done {
blockRequestMessage.from.done <- struct{}{}
return
}
end := int64(blockRequestMessage.begin) + int64(blockRequestMessage.length)
if end > torrent.getPieceLength(blockRequestMessage.index) {
blockRequestMessage.from.done <- struct{}{}
return
}
block := make([]byte, blockRequestMessage.length)
var pos int64
fileOffset := int64(blockRequestMessage.index)*torrent.pieceLength + int64(blockRequestMessage.begin)
for k := range torrent.files {
file := &torrent.files[k]
cursor := fileOffset + pos
if cursor < file.begin {
break
}
fileEnd := file.begin + file.length
if cursor >= fileEnd {
continue
}
n := fileEnd - cursor
if n > int64(blockRequestMessage.length)-pos {
n = int64(blockRequestMessage.length) - pos
}
handle, err := os.OpenFile(file.path, os.O_RDONLY, 0600)
if err != nil {
return
}
handle.Seek(cursor-file.begin, os.SEEK_SET)
end := pos + n
for pos < end {
count, err := handle.Read(block[pos:end])
if err != nil {
handle.Close()
return
}
pos += int64(count)
}
handle.Close()
}
torrent.uploaded += uint64(blockRequestMessage.length)
blockRequestMessage.from.sendPieceBlockChannel <- &BlockMessage{
blockRequestMessage.index,
blockRequestMessage.begin,
block,
}
}
func (torrent *Torrent) getInfoHash() []byte {
return torrent.handshake[28:48]
}
| {
if res := torrent.totalSize % torrent.pieceLength; res != 0 {
return res
}
return torrent.pieceLength
} | identifier_body |
torrent.go | /*
* Copyright (c) 2016 Mark Samman <https://github.com/marksamman/gotorrent>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package main
import (
"bufio"
"bytes"
"crypto/sha1"
"encoding/binary"
"errors"
"fmt"
"math"
"net"
"os"
"path/filepath"
"github.com/marksamman/bencode"
)
type Torrent struct {
totalPeerCount int
files []File
pieces []TorrentPiece
activePeers []*Peer
completedPieces int
knownPeers map[string]struct{}
name string
comment string
pieceLength int64
totalSize int64
pieceChannel chan *PieceMessage
bitfieldChannel chan *BitfieldMessage
havePieceChannel chan *HavePieceMessage
addPeerChannel chan *Peer
removePeerChannel chan *Peer
blockRequestChannel chan *BlockRequestMessage
activeTrackerChannel chan int
stoppedTrackerChannel chan int
requestAnnounceDataChannel chan int
peersChannel chan interface{}
fileWriteDone chan struct{}
decrementPeerCount chan struct{}
handshake []byte
uploaded uint64
pendingFileWrites int
trackers []*Tracker
activeTrackers []int
}
type TorrentPiece struct {
done bool
busyness int
hash string
}
type File struct {
path string
begin int64
length int64
}
type PieceMessage struct {
from *Peer
index uint32
data []byte
}
type BitfieldMessage struct {
from *Peer
data []byte
}
type HavePieceMessage struct {
from *Peer
index uint32
}
type BlockRequestMessage struct {
from *Peer
index uint32
begin uint32
length uint32
}
func (torrent *Torrent) validatePath(base string, path string) error {
if absolutePath, err := filepath.Abs(path); err != nil {
return err
} else if len(absolutePath) < len(base) {
return errors.New("path is too short")
} else if base != absolutePath[:len(base)] {
return errors.New("path mismatch")
}
return nil
}
func (torrent *Torrent) parseTrackerURL(url string) error {
if len(url) < 7 {
return errors.New("announce URL is too short")
}
if url[0:4] == "http" {
torrent.trackers = append(torrent.trackers, &Tracker{
id: len(torrent.trackers),
announceURL: url,
protocol: TrackerHTTP,
torrent: torrent,
})
} else if url[0:3] == "udp" {
torrent.trackers = append(torrent.trackers, &Tracker{
id: len(torrent.trackers),
announceURL: url,
protocol: TrackerUDP,
torrent: torrent,
})
} else {
return errors.New("unsupported tracker protocol")
}
return nil
}
func (torrent *Torrent) open(filename string) error {
file, err := os.Open(filename)
if err != nil {
return err
}
defer file.Close()
data, err := bencode.Decode(file)
if err != nil {
return err
}
if announceLists, ok := data["announce-list"].([]interface{}); ok {
for _, announceList := range announceLists {
for _, announceURL := range announceList.([]interface{}) {
torrent.parseTrackerURL(announceURL.(string))
}
}
} else {
torrent.parseTrackerURL(data["announce"].(string))
}
if comment, ok := data["comment"]; ok {
torrent.comment = comment.(string)
}
info := data["info"].(map[string]interface{})
torrent.name = info["name"].(string)
torrent.pieceLength = info["piece length"].(int64)
infoHash := sha1.Sum(bencode.Encode(info))
// Set handshake
var buffer bytes.Buffer
buffer.WriteByte(19) // length of the string "BitTorrent Protocol"
buffer.WriteString("BitTorrent protocol")
buffer.WriteString("\x00\x00\x00\x00\x00\x00\x00\x00") // reserved
buffer.Write(infoHash[:])
buffer.Write(client.peerID)
torrent.handshake = buffer.Bytes()
// Set pieces
pieces := info["pieces"].(string)
for i := 0; i < len(pieces); i += 20 {
torrent.pieces = append(torrent.pieces, TorrentPiece{
hash: pieces[i : i+20],
})
}
if err := os.Mkdir("Downloads", 0700); err != nil && !os.IsExist(err) {
return err
}
cwd, err := os.Getwd()
if err != nil {
return err
}
base := filepath.Join(cwd, "Downloads")
// Set files
if files, exists := info["files"]; exists {
dirName := filepath.Join("Downloads", info["name"].(string))
if err := torrent.validatePath(base, dirName); err != nil {
return err
}
base := filepath.Join(cwd, dirName)
for _, v := range files.([]interface{}) {
v := v.(map[string]interface{})
torrent.totalSize += v["length"].(int64)
}
// Multiple files
var begin int64
for k, v := range files.([]interface{}) {
v := v.(map[string]interface{})
// Set up directory structure
pathList := v["path"].([]interface{})
pathElements := []string{dirName}
for i := 0; i < len(pathList)-1; i++ {
pathElements = append(pathElements, pathList[i].(string))
}
path := filepath.Join(pathElements...)
fullPath := filepath.Join(path, pathList[len(pathList)-1].(string))
if err := torrent.validatePath(base, fullPath); err != nil {
return err
}
if len(path) != 0 {
if err := os.MkdirAll(path, 0700); err != nil {
return err
}
}
length := v["length"].(int64)
file, err := os.OpenFile(fullPath, os.O_RDWR, 0600)
if err == nil {
torrent.findCompletedPieces(file, begin, length, k)
file.Close()
}
torrent.files = append(torrent.files, File{fullPath, begin, length})
begin += length
}
} else {
// Single file
fileName := filepath.Join("Downloads", info["name"].(string))
if err := torrent.validatePath(base, fileName); err != nil {
return err
}
length := info["length"].(int64)
torrent.totalSize = length
file, err := os.OpenFile(fileName, os.O_RDWR, 0600)
if err == nil {
torrent.findCompletedPieces(file, 0, length, 0)
file.Close()
}
torrent.files = []File{{fileName, 0, length}}
}
return nil
}
func (torrent *Torrent) findCompletedPieces(file *os.File, begin, length int64, fileIndex int) {
fi, err := file.Stat()
if err != nil {
return
}
size := fi.Size()
if size == 0 {
return
} else if size > length {
file.Truncate(0)
return
}
buf := make([]byte, torrent.pieceLength)
var pieceIndex uint32
if begin != 0 {
pieceIndex = uint32(begin / torrent.pieceLength)
}
fileEnd := begin + length
pos := int64(pieceIndex) * torrent.pieceLength
pieceLength := torrent.getPieceLength(pieceIndex)
if pos+pieceLength > fileEnd {
return
}
if pos < begin {
bufPos := begin - pos
if _, err := file.Read(buf[bufPos:]); err != nil {
return
}
for bufPos != 0 {
fileIndex--
f := torrent.files[fileIndex]
handle, err := os.OpenFile(f.path, os.O_RDONLY, 0600)
if err != nil {
return
}
defer handle.Close()
if bufPos > f.length {
if n, err := handle.Read(buf[bufPos-f.length : bufPos]); err != nil || int64(n) != f.length {
return | } else {
if n, err := handle.ReadAt(buf[:bufPos], f.length-bufPos); err != nil || int64(n) != bufPos {
return
}
break
}
}
if torrent.checkPieceHash(buf[:pieceLength], pieceIndex) {
torrent.pieces[pieceIndex].done = true
torrent.completedPieces++
}
pos += pieceLength
pieceIndex++
}
if _, err := file.Seek(pos-begin, os.SEEK_SET); err != nil {
return
}
reader := bufio.NewReaderSize(file, int(pieceLength))
for pos+torrent.pieceLength <= fileEnd {
if n, err := reader.Read(buf); err != nil || n != len(buf) {
return
}
if torrent.checkPieceHash(buf, pieceIndex) {
torrent.pieces[pieceIndex].done = true
torrent.completedPieces++
}
pos += torrent.pieceLength
pieceIndex++
}
if int(pieceIndex) == len(torrent.pieces)-1 {
pieceLength = torrent.getLastPieceLength()
if n, err := reader.Read(buf[:pieceLength]); err != nil || int64(n) != pieceLength {
return
}
if torrent.checkPieceHash(buf[:pieceLength], pieceIndex) {
torrent.pieces[pieceIndex].done = true
torrent.completedPieces++
}
}
}
func (torrent *Torrent) getTrackerRequestData(event uint32) *TrackerRequestData {
downloaded := torrent.getDownloadedSize()
return &TrackerRequestData{
event: event,
downloaded: uint64(downloaded),
uploaded: torrent.uploaded,
remaining: uint64(torrent.totalSize - downloaded),
}
}
func (torrent *Torrent) startTrackers() {
data := torrent.getTrackerRequestData(TrackerEventStarted)
for _, tracker := range torrent.trackers {
go tracker.start(data)
}
}
func (torrent *Torrent) stopTrackers() {
data := torrent.getTrackerRequestData(TrackerEventStopped)
for _, trackerID := range torrent.activeTrackers {
go func(announceChannel chan *TrackerRequestData, stopChannel chan struct{}) {
announceChannel <- data
stopChannel <- struct{}{}
}(torrent.trackers[trackerID].announceChannel, torrent.trackers[trackerID].stopChannel)
}
}
func (torrent *Torrent) announceToTrackers(event uint32) {
data := torrent.getTrackerRequestData(event)
for _, trackerID := range torrent.activeTrackers {
go func(channel chan *TrackerRequestData) {
channel <- data
}(torrent.trackers[trackerID].announceChannel)
}
}
func (torrent *Torrent) download() {
if torrent.completedPieces == len(torrent.pieces) {
return
}
torrent.activeTrackerChannel = make(chan int)
torrent.stoppedTrackerChannel = make(chan int)
torrent.requestAnnounceDataChannel = make(chan int)
torrent.peersChannel = make(chan interface{})
torrent.startTrackers()
torrent.pieceChannel = make(chan *PieceMessage)
torrent.bitfieldChannel = make(chan *BitfieldMessage)
torrent.havePieceChannel = make(chan *HavePieceMessage)
torrent.addPeerChannel = make(chan *Peer)
torrent.removePeerChannel = make(chan *Peer)
torrent.blockRequestChannel = make(chan *BlockRequestMessage)
torrent.fileWriteDone = make(chan struct{})
torrent.decrementPeerCount = make(chan struct{})
torrent.knownPeers = make(map[string]struct{})
for torrent.completedPieces != len(torrent.pieces) || torrent.totalPeerCount != 0 {
select {
case havePieceMessage := <-torrent.havePieceChannel:
torrent.handleHaveMessage(havePieceMessage)
case bitfieldMessage := <-torrent.bitfieldChannel:
torrent.handleBitfieldMessage(bitfieldMessage)
case pieceMessage := <-torrent.pieceChannel:
torrent.handlePieceMessage(pieceMessage)
case blockRequestMessage := <-torrent.blockRequestChannel:
torrent.handleBlockRequestMessage(blockRequestMessage)
case peer := <-torrent.addPeerChannel:
torrent.handleAddPeer(peer)
case peer := <-torrent.removePeerChannel:
torrent.handleRemovePeer(peer)
case <-torrent.fileWriteDone:
torrent.pendingFileWrites--
case <-torrent.decrementPeerCount:
torrent.totalPeerCount--
case peers := <-torrent.peersChannel:
torrent.connectToPeers(peers)
case trackerID := <-torrent.activeTrackerChannel:
torrent.activeTrackers = append(torrent.activeTrackers, trackerID)
fmt.Printf("[%s] %d active trackers\n", torrent.name, len(torrent.activeTrackers))
case trackerID := <-torrent.requestAnnounceDataChannel:
go func(channel chan *TrackerRequestData, data *TrackerRequestData) {
channel <- data
}(torrent.trackers[trackerID].announceChannel, torrent.getTrackerRequestData(TrackerEventNone))
}
}
torrent.stopTrackers()
if torrent.pendingFileWrites != 0 {
fmt.Printf("[%s] Waiting for %d pending file writes...\n", torrent.name, torrent.pendingFileWrites)
for torrent.pendingFileWrites != 0 {
<-torrent.fileWriteDone
torrent.pendingFileWrites--
}
}
if len(torrent.activeTrackers) != 0 {
fmt.Printf("[%s] Waiting for %d trackers to stop...\n", torrent.name, len(torrent.activeTrackers))
for len(torrent.activeTrackers) != 0 {
select {
case trackerID := <-torrent.stoppedTrackerChannel:
for k, v := range torrent.activeTrackers {
if v == trackerID {
torrent.activeTrackers = append(torrent.activeTrackers[:k], torrent.activeTrackers[k+1:]...)
break
}
}
// Handle other messages that a Tracker may send
case <-torrent.activeTrackerChannel:
case <-torrent.peersChannel:
case <-torrent.requestAnnounceDataChannel:
}
}
}
}
func (torrent *Torrent) checkPieceHash(data []byte, pieceIndex uint32) bool {
dataHash := sha1.Sum(data)
return bytes.Equal(dataHash[:], []byte(torrent.pieces[pieceIndex].hash))
}
func (torrent *Torrent) getPieceLength(pieceIndex uint32) int64 {
if pieceIndex == uint32(len(torrent.pieces))-1 {
if res := torrent.totalSize % torrent.pieceLength; res != 0 {
return res
}
}
return torrent.pieceLength
}
func (torrent *Torrent) getLastPieceLength() int64 {
if res := torrent.totalSize % torrent.pieceLength; res != 0 {
return res
}
return torrent.pieceLength
}
func (torrent *Torrent) getDownloadedSize() int64 {
var downloadedSize int64
for k := range torrent.pieces {
if torrent.pieces[k].done {
downloadedSize += torrent.getPieceLength(uint32(k))
}
}
return downloadedSize
}
func (torrent *Torrent) connectToPeers(peers interface{}) {
switch peers := peers.(type) {
case string:
torrent.totalPeerCount += len(peers) / 6
for i := 0; i < len(peers); i += 6 {
peer := NewPeer(torrent)
peer.ip = net.IPv4(peers[i], peers[i+1], peers[i+2], peers[i+3])
if _, known := torrent.knownPeers[peer.ip.String()]; known {
torrent.totalPeerCount--
continue
}
torrent.knownPeers[peer.ip.String()] = struct{}{}
peer.port = binary.BigEndian.Uint16([]byte(peers[i+4:]))
go peer.connect()
}
case []interface{}:
torrent.totalPeerCount += len(peers)
for _, dict := range peers {
dict := dict.(map[string]interface{})
addr, err := net.ResolveIPAddr("ip", dict["ip"].(string))
if err != nil {
torrent.totalPeerCount--
continue
}
if _, known := torrent.knownPeers[addr.IP.String()]; known {
torrent.totalPeerCount--
continue
}
torrent.knownPeers[addr.IP.String()] = struct{}{}
peer := NewPeer(torrent)
peer.id = dict["peer id"].(string)
peer.ip = addr.IP
peer.port = uint16(dict["port"].(int64))
go peer.connect()
}
}
}
func (torrent *Torrent) handleHaveMessage(havePieceMessage *HavePieceMessage) {
havePieceMessage.from.pieces[havePieceMessage.index] = struct{}{}
// torrent.requestPieceFromPeer(peer)
}
func (torrent *Torrent) handleBitfieldMessage(bitfieldMessage *BitfieldMessage) {
peer := bitfieldMessage.from
piecesLen := uint32(len(torrent.pieces))
var index uint32
for i := 0; i < len(bitfieldMessage.data); i++ {
b := bitfieldMessage.data[i]
for v := byte(128); v != 0; v >>= 1 {
if b&v != v {
index++
continue
}
if index >= piecesLen {
break
}
peer.pieces[index] = struct{}{}
index++
}
}
torrent.requestPieceFromPeer(peer)
}
func (torrent *Torrent) handlePieceMessage(pieceMessage *PieceMessage) {
if torrent.pieces[pieceMessage.index].done {
return
}
if !torrent.checkPieceHash(pieceMessage.data, pieceMessage.index) {
pieceMessage.from.done <- struct{}{}
return
}
torrent.pieces[pieceMessage.index].done = true
torrent.completedPieces++
if torrent.completedPieces != len(torrent.pieces) {
torrent.requestPieceFromPeer(pieceMessage.from)
}
beginPos := int64(pieceMessage.index) * torrent.pieceLength
for k := range torrent.files {
file := &torrent.files[k]
if beginPos < file.begin {
break
}
fileEnd := file.begin + file.length
if beginPos >= fileEnd {
continue
}
amountWrite := fileEnd - beginPos
if amountWrite > int64(len(pieceMessage.data)) {
amountWrite = int64(len(pieceMessage.data))
}
torrent.pendingFileWrites++
go func(path string, offset int64, data []byte) {
fileWriterChannel <- &FileWriterMessage{path, offset, data, torrent}
}(file.path, beginPos-file.begin, pieceMessage.data[:amountWrite])
pieceMessage.data = pieceMessage.data[amountWrite:]
beginPos += amountWrite
}
fmt.Printf("[%s] Downloaded: %.2f%c\n", torrent.name, float64(torrent.completedPieces)*100/float64(len(torrent.pieces)), '%')
if torrent.completedPieces == len(torrent.pieces) {
for _, peer := range torrent.activePeers {
peer.done <- struct{}{}
}
torrent.announceToTrackers(TrackerEventCompleted)
} else {
for _, peer := range torrent.activePeers {
peer.sendHaveChannel <- pieceMessage.index
}
}
}
func (torrent *Torrent) requestPieceFromPeer(peer *Peer) {
// Find the least busy piece that the peer claims to have
busyness := math.MaxInt32
var pieceIndex uint32
pieces := uint32(len(torrent.pieces))
for i := uint32(0); i < pieces; i++ {
piece := &torrent.pieces[i]
if piece.done {
continue
}
if _, ok := peer.pieces[i]; ok {
if piece.busyness == 0 {
piece.busyness = 1
peer.requestPieceChannel <- i
return
} else if busyness > piece.busyness {
busyness = piece.busyness
pieceIndex = i
}
}
}
if busyness != math.MaxInt32 {
torrent.pieces[pieceIndex].busyness++
peer.requestPieceChannel <- pieceIndex
}
}
func (torrent *Torrent) handleAddPeer(peer *Peer) {
torrent.activePeers = append(torrent.activePeers, peer)
fmt.Printf("[%s] %d active peers\n", torrent.name, len(torrent.activePeers))
if torrent.completedPieces == len(torrent.pieces) {
peer.done <- struct{}{}
}
}
func (torrent *Torrent) handleRemovePeer(peer *Peer) {
for k, p := range torrent.activePeers {
if p == peer {
torrent.activePeers = append(torrent.activePeers[:k], torrent.activePeers[k+1:]...)
break
}
}
peer.done <- struct{}{}
fmt.Printf("[%s] %d active peers\n", torrent.name, len(torrent.activePeers))
}
func (torrent *Torrent) handleBlockRequestMessage(blockRequestMessage *BlockRequestMessage) {
if blockRequestMessage.index >= uint32(len(torrent.pieces)) {
blockRequestMessage.from.done <- struct{}{}
return
}
if !torrent.pieces[blockRequestMessage.index].done {
blockRequestMessage.from.done <- struct{}{}
return
}
end := int64(blockRequestMessage.begin) + int64(blockRequestMessage.length)
if end > torrent.getPieceLength(blockRequestMessage.index) {
blockRequestMessage.from.done <- struct{}{}
return
}
block := make([]byte, blockRequestMessage.length)
var pos int64
fileOffset := int64(blockRequestMessage.index)*torrent.pieceLength + int64(blockRequestMessage.begin)
for k := range torrent.files {
file := &torrent.files[k]
cursor := fileOffset + pos
if cursor < file.begin {
break
}
fileEnd := file.begin + file.length
if cursor >= fileEnd {
continue
}
n := fileEnd - cursor
if n > int64(blockRequestMessage.length)-pos {
n = int64(blockRequestMessage.length) - pos
}
handle, err := os.OpenFile(file.path, os.O_RDONLY, 0600)
if err != nil {
return
}
handle.Seek(cursor-file.begin, os.SEEK_SET)
end := pos + n
for pos < end {
count, err := handle.Read(block[pos:end])
if err != nil {
handle.Close()
return
}
pos += int64(count)
}
handle.Close()
}
torrent.uploaded += uint64(blockRequestMessage.length)
blockRequestMessage.from.sendPieceBlockChannel <- &BlockMessage{
blockRequestMessage.index,
blockRequestMessage.begin,
block,
}
}
func (torrent *Torrent) getInfoHash() []byte {
return torrent.handshake[28:48]
} | }
bufPos -= f.length | random_line_split |
torrent.go | /*
* Copyright (c) 2016 Mark Samman <https://github.com/marksamman/gotorrent>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package main
import (
"bufio"
"bytes"
"crypto/sha1"
"encoding/binary"
"errors"
"fmt"
"math"
"net"
"os"
"path/filepath"
"github.com/marksamman/bencode"
)
type Torrent struct {
totalPeerCount int
files []File
pieces []TorrentPiece
activePeers []*Peer
completedPieces int
knownPeers map[string]struct{}
name string
comment string
pieceLength int64
totalSize int64
pieceChannel chan *PieceMessage
bitfieldChannel chan *BitfieldMessage
havePieceChannel chan *HavePieceMessage
addPeerChannel chan *Peer
removePeerChannel chan *Peer
blockRequestChannel chan *BlockRequestMessage
activeTrackerChannel chan int
stoppedTrackerChannel chan int
requestAnnounceDataChannel chan int
peersChannel chan interface{}
fileWriteDone chan struct{}
decrementPeerCount chan struct{}
handshake []byte
uploaded uint64
pendingFileWrites int
trackers []*Tracker
activeTrackers []int
}
type TorrentPiece struct {
done bool
busyness int
hash string
}
type File struct {
path string
begin int64
length int64
}
type PieceMessage struct {
from *Peer
index uint32
data []byte
}
type BitfieldMessage struct {
from *Peer
data []byte
}
type HavePieceMessage struct {
from *Peer
index uint32
}
type BlockRequestMessage struct {
from *Peer
index uint32
begin uint32
length uint32
}
func (torrent *Torrent) validatePath(base string, path string) error {
if absolutePath, err := filepath.Abs(path); err != nil {
return err
} else if len(absolutePath) < len(base) {
return errors.New("path is too short")
} else if base != absolutePath[:len(base)] {
return errors.New("path mismatch")
}
return nil
}
func (torrent *Torrent) parseTrackerURL(url string) error {
if len(url) < 7 {
return errors.New("announce URL is too short")
}
if url[0:4] == "http" {
torrent.trackers = append(torrent.trackers, &Tracker{
id: len(torrent.trackers),
announceURL: url,
protocol: TrackerHTTP,
torrent: torrent,
})
} else if url[0:3] == "udp" {
torrent.trackers = append(torrent.trackers, &Tracker{
id: len(torrent.trackers),
announceURL: url,
protocol: TrackerUDP,
torrent: torrent,
})
} else {
return errors.New("unsupported tracker protocol")
}
return nil
}
func (torrent *Torrent) open(filename string) error {
file, err := os.Open(filename)
if err != nil {
return err
}
defer file.Close()
data, err := bencode.Decode(file)
if err != nil {
return err
}
if announceLists, ok := data["announce-list"].([]interface{}); ok {
for _, announceList := range announceLists {
for _, announceURL := range announceList.([]interface{}) {
torrent.parseTrackerURL(announceURL.(string))
}
}
} else {
torrent.parseTrackerURL(data["announce"].(string))
}
if comment, ok := data["comment"]; ok {
torrent.comment = comment.(string)
}
info := data["info"].(map[string]interface{})
torrent.name = info["name"].(string)
torrent.pieceLength = info["piece length"].(int64)
infoHash := sha1.Sum(bencode.Encode(info))
// Set handshake
var buffer bytes.Buffer
buffer.WriteByte(19) // length of the string "BitTorrent Protocol"
buffer.WriteString("BitTorrent protocol")
buffer.WriteString("\x00\x00\x00\x00\x00\x00\x00\x00") // reserved
buffer.Write(infoHash[:])
buffer.Write(client.peerID)
torrent.handshake = buffer.Bytes()
// Set pieces
pieces := info["pieces"].(string)
for i := 0; i < len(pieces); i += 20 {
torrent.pieces = append(torrent.pieces, TorrentPiece{
hash: pieces[i : i+20],
})
}
if err := os.Mkdir("Downloads", 0700); err != nil && !os.IsExist(err) {
return err
}
cwd, err := os.Getwd()
if err != nil {
return err
}
base := filepath.Join(cwd, "Downloads")
// Set files
if files, exists := info["files"]; exists {
dirName := filepath.Join("Downloads", info["name"].(string))
if err := torrent.validatePath(base, dirName); err != nil {
return err
}
base := filepath.Join(cwd, dirName)
for _, v := range files.([]interface{}) {
v := v.(map[string]interface{})
torrent.totalSize += v["length"].(int64)
}
// Multiple files
var begin int64
for k, v := range files.([]interface{}) {
v := v.(map[string]interface{})
// Set up directory structure
pathList := v["path"].([]interface{})
pathElements := []string{dirName}
for i := 0; i < len(pathList)-1; i++ {
pathElements = append(pathElements, pathList[i].(string))
}
path := filepath.Join(pathElements...)
fullPath := filepath.Join(path, pathList[len(pathList)-1].(string))
if err := torrent.validatePath(base, fullPath); err != nil {
return err
}
if len(path) != 0 {
if err := os.MkdirAll(path, 0700); err != nil {
return err
}
}
length := v["length"].(int64)
file, err := os.OpenFile(fullPath, os.O_RDWR, 0600)
if err == nil {
torrent.findCompletedPieces(file, begin, length, k)
file.Close()
}
torrent.files = append(torrent.files, File{fullPath, begin, length})
begin += length
}
} else {
// Single file
fileName := filepath.Join("Downloads", info["name"].(string))
if err := torrent.validatePath(base, fileName); err != nil {
return err
}
length := info["length"].(int64)
torrent.totalSize = length
file, err := os.OpenFile(fileName, os.O_RDWR, 0600)
if err == nil {
torrent.findCompletedPieces(file, 0, length, 0)
file.Close()
}
torrent.files = []File{{fileName, 0, length}}
}
return nil
}
func (torrent *Torrent) findCompletedPieces(file *os.File, begin, length int64, fileIndex int) {
fi, err := file.Stat()
if err != nil {
return
}
size := fi.Size()
if size == 0 {
return
} else if size > length {
file.Truncate(0)
return
}
buf := make([]byte, torrent.pieceLength)
var pieceIndex uint32
if begin != 0 {
pieceIndex = uint32(begin / torrent.pieceLength)
}
fileEnd := begin + length
pos := int64(pieceIndex) * torrent.pieceLength
pieceLength := torrent.getPieceLength(pieceIndex)
if pos+pieceLength > fileEnd {
return
}
if pos < begin {
bufPos := begin - pos
if _, err := file.Read(buf[bufPos:]); err != nil {
return
}
for bufPos != 0 {
fileIndex--
f := torrent.files[fileIndex]
handle, err := os.OpenFile(f.path, os.O_RDONLY, 0600)
if err != nil {
return
}
defer handle.Close()
if bufPos > f.length {
if n, err := handle.Read(buf[bufPos-f.length : bufPos]); err != nil || int64(n) != f.length {
return
}
bufPos -= f.length
} else {
if n, err := handle.ReadAt(buf[:bufPos], f.length-bufPos); err != nil || int64(n) != bufPos {
return
}
break
}
}
if torrent.checkPieceHash(buf[:pieceLength], pieceIndex) {
torrent.pieces[pieceIndex].done = true
torrent.completedPieces++
}
pos += pieceLength
pieceIndex++
}
if _, err := file.Seek(pos-begin, os.SEEK_SET); err != nil {
return
}
reader := bufio.NewReaderSize(file, int(pieceLength))
for pos+torrent.pieceLength <= fileEnd {
if n, err := reader.Read(buf); err != nil || n != len(buf) {
return
}
if torrent.checkPieceHash(buf, pieceIndex) {
torrent.pieces[pieceIndex].done = true
torrent.completedPieces++
}
pos += torrent.pieceLength
pieceIndex++
}
if int(pieceIndex) == len(torrent.pieces)-1 {
pieceLength = torrent.getLastPieceLength()
if n, err := reader.Read(buf[:pieceLength]); err != nil || int64(n) != pieceLength {
return
}
if torrent.checkPieceHash(buf[:pieceLength], pieceIndex) {
torrent.pieces[pieceIndex].done = true
torrent.completedPieces++
}
}
}
func (torrent *Torrent) getTrackerRequestData(event uint32) *TrackerRequestData {
downloaded := torrent.getDownloadedSize()
return &TrackerRequestData{
event: event,
downloaded: uint64(downloaded),
uploaded: torrent.uploaded,
remaining: uint64(torrent.totalSize - downloaded),
}
}
func (torrent *Torrent) startTrackers() {
data := torrent.getTrackerRequestData(TrackerEventStarted)
for _, tracker := range torrent.trackers {
go tracker.start(data)
}
}
func (torrent *Torrent) stopTrackers() {
data := torrent.getTrackerRequestData(TrackerEventStopped)
for _, trackerID := range torrent.activeTrackers {
go func(announceChannel chan *TrackerRequestData, stopChannel chan struct{}) {
announceChannel <- data
stopChannel <- struct{}{}
}(torrent.trackers[trackerID].announceChannel, torrent.trackers[trackerID].stopChannel)
}
}
func (torrent *Torrent) announceToTrackers(event uint32) {
data := torrent.getTrackerRequestData(event)
for _, trackerID := range torrent.activeTrackers {
go func(channel chan *TrackerRequestData) {
channel <- data
}(torrent.trackers[trackerID].announceChannel)
}
}
func (torrent *Torrent) download() {
if torrent.completedPieces == len(torrent.pieces) {
return
}
torrent.activeTrackerChannel = make(chan int)
torrent.stoppedTrackerChannel = make(chan int)
torrent.requestAnnounceDataChannel = make(chan int)
torrent.peersChannel = make(chan interface{})
torrent.startTrackers()
torrent.pieceChannel = make(chan *PieceMessage)
torrent.bitfieldChannel = make(chan *BitfieldMessage)
torrent.havePieceChannel = make(chan *HavePieceMessage)
torrent.addPeerChannel = make(chan *Peer)
torrent.removePeerChannel = make(chan *Peer)
torrent.blockRequestChannel = make(chan *BlockRequestMessage)
torrent.fileWriteDone = make(chan struct{})
torrent.decrementPeerCount = make(chan struct{})
torrent.knownPeers = make(map[string]struct{})
for torrent.completedPieces != len(torrent.pieces) || torrent.totalPeerCount != 0 {
select {
case havePieceMessage := <-torrent.havePieceChannel:
torrent.handleHaveMessage(havePieceMessage)
case bitfieldMessage := <-torrent.bitfieldChannel:
torrent.handleBitfieldMessage(bitfieldMessage)
case pieceMessage := <-torrent.pieceChannel:
torrent.handlePieceMessage(pieceMessage)
case blockRequestMessage := <-torrent.blockRequestChannel:
torrent.handleBlockRequestMessage(blockRequestMessage)
case peer := <-torrent.addPeerChannel:
torrent.handleAddPeer(peer)
case peer := <-torrent.removePeerChannel:
torrent.handleRemovePeer(peer)
case <-torrent.fileWriteDone:
torrent.pendingFileWrites--
case <-torrent.decrementPeerCount:
torrent.totalPeerCount--
case peers := <-torrent.peersChannel:
torrent.connectToPeers(peers)
case trackerID := <-torrent.activeTrackerChannel:
torrent.activeTrackers = append(torrent.activeTrackers, trackerID)
fmt.Printf("[%s] %d active trackers\n", torrent.name, len(torrent.activeTrackers))
case trackerID := <-torrent.requestAnnounceDataChannel:
go func(channel chan *TrackerRequestData, data *TrackerRequestData) {
channel <- data
}(torrent.trackers[trackerID].announceChannel, torrent.getTrackerRequestData(TrackerEventNone))
}
}
torrent.stopTrackers()
if torrent.pendingFileWrites != 0 {
fmt.Printf("[%s] Waiting for %d pending file writes...\n", torrent.name, torrent.pendingFileWrites)
for torrent.pendingFileWrites != 0 {
<-torrent.fileWriteDone
torrent.pendingFileWrites--
}
}
if len(torrent.activeTrackers) != 0 {
fmt.Printf("[%s] Waiting for %d trackers to stop...\n", torrent.name, len(torrent.activeTrackers))
for len(torrent.activeTrackers) != 0 |
}
}
func (torrent *Torrent) checkPieceHash(data []byte, pieceIndex uint32) bool {
dataHash := sha1.Sum(data)
return bytes.Equal(dataHash[:], []byte(torrent.pieces[pieceIndex].hash))
}
func (torrent *Torrent) getPieceLength(pieceIndex uint32) int64 {
if pieceIndex == uint32(len(torrent.pieces))-1 {
if res := torrent.totalSize % torrent.pieceLength; res != 0 {
return res
}
}
return torrent.pieceLength
}
func (torrent *Torrent) getLastPieceLength() int64 {
if res := torrent.totalSize % torrent.pieceLength; res != 0 {
return res
}
return torrent.pieceLength
}
func (torrent *Torrent) getDownloadedSize() int64 {
var downloadedSize int64
for k := range torrent.pieces {
if torrent.pieces[k].done {
downloadedSize += torrent.getPieceLength(uint32(k))
}
}
return downloadedSize
}
func (torrent *Torrent) connectToPeers(peers interface{}) {
switch peers := peers.(type) {
case string:
torrent.totalPeerCount += len(peers) / 6
for i := 0; i < len(peers); i += 6 {
peer := NewPeer(torrent)
peer.ip = net.IPv4(peers[i], peers[i+1], peers[i+2], peers[i+3])
if _, known := torrent.knownPeers[peer.ip.String()]; known {
torrent.totalPeerCount--
continue
}
torrent.knownPeers[peer.ip.String()] = struct{}{}
peer.port = binary.BigEndian.Uint16([]byte(peers[i+4:]))
go peer.connect()
}
case []interface{}:
torrent.totalPeerCount += len(peers)
for _, dict := range peers {
dict := dict.(map[string]interface{})
addr, err := net.ResolveIPAddr("ip", dict["ip"].(string))
if err != nil {
torrent.totalPeerCount--
continue
}
if _, known := torrent.knownPeers[addr.IP.String()]; known {
torrent.totalPeerCount--
continue
}
torrent.knownPeers[addr.IP.String()] = struct{}{}
peer := NewPeer(torrent)
peer.id = dict["peer id"].(string)
peer.ip = addr.IP
peer.port = uint16(dict["port"].(int64))
go peer.connect()
}
}
}
func (torrent *Torrent) handleHaveMessage(havePieceMessage *HavePieceMessage) {
havePieceMessage.from.pieces[havePieceMessage.index] = struct{}{}
// torrent.requestPieceFromPeer(peer)
}
func (torrent *Torrent) handleBitfieldMessage(bitfieldMessage *BitfieldMessage) {
peer := bitfieldMessage.from
piecesLen := uint32(len(torrent.pieces))
var index uint32
for i := 0; i < len(bitfieldMessage.data); i++ {
b := bitfieldMessage.data[i]
for v := byte(128); v != 0; v >>= 1 {
if b&v != v {
index++
continue
}
if index >= piecesLen {
break
}
peer.pieces[index] = struct{}{}
index++
}
}
torrent.requestPieceFromPeer(peer)
}
func (torrent *Torrent) handlePieceMessage(pieceMessage *PieceMessage) {
if torrent.pieces[pieceMessage.index].done {
return
}
if !torrent.checkPieceHash(pieceMessage.data, pieceMessage.index) {
pieceMessage.from.done <- struct{}{}
return
}
torrent.pieces[pieceMessage.index].done = true
torrent.completedPieces++
if torrent.completedPieces != len(torrent.pieces) {
torrent.requestPieceFromPeer(pieceMessage.from)
}
beginPos := int64(pieceMessage.index) * torrent.pieceLength
for k := range torrent.files {
file := &torrent.files[k]
if beginPos < file.begin {
break
}
fileEnd := file.begin + file.length
if beginPos >= fileEnd {
continue
}
amountWrite := fileEnd - beginPos
if amountWrite > int64(len(pieceMessage.data)) {
amountWrite = int64(len(pieceMessage.data))
}
torrent.pendingFileWrites++
go func(path string, offset int64, data []byte) {
fileWriterChannel <- &FileWriterMessage{path, offset, data, torrent}
}(file.path, beginPos-file.begin, pieceMessage.data[:amountWrite])
pieceMessage.data = pieceMessage.data[amountWrite:]
beginPos += amountWrite
}
fmt.Printf("[%s] Downloaded: %.2f%c\n", torrent.name, float64(torrent.completedPieces)*100/float64(len(torrent.pieces)), '%')
if torrent.completedPieces == len(torrent.pieces) {
for _, peer := range torrent.activePeers {
peer.done <- struct{}{}
}
torrent.announceToTrackers(TrackerEventCompleted)
} else {
for _, peer := range torrent.activePeers {
peer.sendHaveChannel <- pieceMessage.index
}
}
}
func (torrent *Torrent) requestPieceFromPeer(peer *Peer) {
// Find the least busy piece that the peer claims to have
busyness := math.MaxInt32
var pieceIndex uint32
pieces := uint32(len(torrent.pieces))
for i := uint32(0); i < pieces; i++ {
piece := &torrent.pieces[i]
if piece.done {
continue
}
if _, ok := peer.pieces[i]; ok {
if piece.busyness == 0 {
piece.busyness = 1
peer.requestPieceChannel <- i
return
} else if busyness > piece.busyness {
busyness = piece.busyness
pieceIndex = i
}
}
}
if busyness != math.MaxInt32 {
torrent.pieces[pieceIndex].busyness++
peer.requestPieceChannel <- pieceIndex
}
}
func (torrent *Torrent) handleAddPeer(peer *Peer) {
torrent.activePeers = append(torrent.activePeers, peer)
fmt.Printf("[%s] %d active peers\n", torrent.name, len(torrent.activePeers))
if torrent.completedPieces == len(torrent.pieces) {
peer.done <- struct{}{}
}
}
func (torrent *Torrent) handleRemovePeer(peer *Peer) {
for k, p := range torrent.activePeers {
if p == peer {
torrent.activePeers = append(torrent.activePeers[:k], torrent.activePeers[k+1:]...)
break
}
}
peer.done <- struct{}{}
fmt.Printf("[%s] %d active peers\n", torrent.name, len(torrent.activePeers))
}
func (torrent *Torrent) handleBlockRequestMessage(blockRequestMessage *BlockRequestMessage) {
if blockRequestMessage.index >= uint32(len(torrent.pieces)) {
blockRequestMessage.from.done <- struct{}{}
return
}
if !torrent.pieces[blockRequestMessage.index].done {
blockRequestMessage.from.done <- struct{}{}
return
}
end := int64(blockRequestMessage.begin) + int64(blockRequestMessage.length)
if end > torrent.getPieceLength(blockRequestMessage.index) {
blockRequestMessage.from.done <- struct{}{}
return
}
block := make([]byte, blockRequestMessage.length)
var pos int64
fileOffset := int64(blockRequestMessage.index)*torrent.pieceLength + int64(blockRequestMessage.begin)
for k := range torrent.files {
file := &torrent.files[k]
cursor := fileOffset + pos
if cursor < file.begin {
break
}
fileEnd := file.begin + file.length
if cursor >= fileEnd {
continue
}
n := fileEnd - cursor
if n > int64(blockRequestMessage.length)-pos {
n = int64(blockRequestMessage.length) - pos
}
handle, err := os.OpenFile(file.path, os.O_RDONLY, 0600)
if err != nil {
return
}
handle.Seek(cursor-file.begin, os.SEEK_SET)
end := pos + n
for pos < end {
count, err := handle.Read(block[pos:end])
if err != nil {
handle.Close()
return
}
pos += int64(count)
}
handle.Close()
}
torrent.uploaded += uint64(blockRequestMessage.length)
blockRequestMessage.from.sendPieceBlockChannel <- &BlockMessage{
blockRequestMessage.index,
blockRequestMessage.begin,
block,
}
}
func (torrent *Torrent) getInfoHash() []byte {
return torrent.handshake[28:48]
}
| {
select {
case trackerID := <-torrent.stoppedTrackerChannel:
for k, v := range torrent.activeTrackers {
if v == trackerID {
torrent.activeTrackers = append(torrent.activeTrackers[:k], torrent.activeTrackers[k+1:]...)
break
}
}
// Handle other messages that a Tracker may send
case <-torrent.activeTrackerChannel:
case <-torrent.peersChannel:
case <-torrent.requestAnnounceDataChannel:
}
} | conditional_block |
torrent.go | /*
* Copyright (c) 2016 Mark Samman <https://github.com/marksamman/gotorrent>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package main
import (
"bufio"
"bytes"
"crypto/sha1"
"encoding/binary"
"errors"
"fmt"
"math"
"net"
"os"
"path/filepath"
"github.com/marksamman/bencode"
)
type Torrent struct {
totalPeerCount int
files []File
pieces []TorrentPiece
activePeers []*Peer
completedPieces int
knownPeers map[string]struct{}
name string
comment string
pieceLength int64
totalSize int64
pieceChannel chan *PieceMessage
bitfieldChannel chan *BitfieldMessage
havePieceChannel chan *HavePieceMessage
addPeerChannel chan *Peer
removePeerChannel chan *Peer
blockRequestChannel chan *BlockRequestMessage
activeTrackerChannel chan int
stoppedTrackerChannel chan int
requestAnnounceDataChannel chan int
peersChannel chan interface{}
fileWriteDone chan struct{}
decrementPeerCount chan struct{}
handshake []byte
uploaded uint64
pendingFileWrites int
trackers []*Tracker
activeTrackers []int
}
type TorrentPiece struct {
done bool
busyness int
hash string
}
type File struct {
path string
begin int64
length int64
}
type PieceMessage struct {
from *Peer
index uint32
data []byte
}
type BitfieldMessage struct {
from *Peer
data []byte
}
type HavePieceMessage struct {
from *Peer
index uint32
}
type BlockRequestMessage struct {
from *Peer
index uint32
begin uint32
length uint32
}
func (torrent *Torrent) validatePath(base string, path string) error {
if absolutePath, err := filepath.Abs(path); err != nil {
return err
} else if len(absolutePath) < len(base) {
return errors.New("path is too short")
} else if base != absolutePath[:len(base)] {
return errors.New("path mismatch")
}
return nil
}
func (torrent *Torrent) parseTrackerURL(url string) error {
if len(url) < 7 {
return errors.New("announce URL is too short")
}
if url[0:4] == "http" {
torrent.trackers = append(torrent.trackers, &Tracker{
id: len(torrent.trackers),
announceURL: url,
protocol: TrackerHTTP,
torrent: torrent,
})
} else if url[0:3] == "udp" {
torrent.trackers = append(torrent.trackers, &Tracker{
id: len(torrent.trackers),
announceURL: url,
protocol: TrackerUDP,
torrent: torrent,
})
} else {
return errors.New("unsupported tracker protocol")
}
return nil
}
func (torrent *Torrent) open(filename string) error {
file, err := os.Open(filename)
if err != nil {
return err
}
defer file.Close()
data, err := bencode.Decode(file)
if err != nil {
return err
}
if announceLists, ok := data["announce-list"].([]interface{}); ok {
for _, announceList := range announceLists {
for _, announceURL := range announceList.([]interface{}) {
torrent.parseTrackerURL(announceURL.(string))
}
}
} else {
torrent.parseTrackerURL(data["announce"].(string))
}
if comment, ok := data["comment"]; ok {
torrent.comment = comment.(string)
}
info := data["info"].(map[string]interface{})
torrent.name = info["name"].(string)
torrent.pieceLength = info["piece length"].(int64)
infoHash := sha1.Sum(bencode.Encode(info))
// Set handshake
var buffer bytes.Buffer
buffer.WriteByte(19) // length of the string "BitTorrent Protocol"
buffer.WriteString("BitTorrent protocol")
buffer.WriteString("\x00\x00\x00\x00\x00\x00\x00\x00") // reserved
buffer.Write(infoHash[:])
buffer.Write(client.peerID)
torrent.handshake = buffer.Bytes()
// Set pieces
pieces := info["pieces"].(string)
for i := 0; i < len(pieces); i += 20 {
torrent.pieces = append(torrent.pieces, TorrentPiece{
hash: pieces[i : i+20],
})
}
if err := os.Mkdir("Downloads", 0700); err != nil && !os.IsExist(err) {
return err
}
cwd, err := os.Getwd()
if err != nil {
return err
}
base := filepath.Join(cwd, "Downloads")
// Set files
if files, exists := info["files"]; exists {
dirName := filepath.Join("Downloads", info["name"].(string))
if err := torrent.validatePath(base, dirName); err != nil {
return err
}
base := filepath.Join(cwd, dirName)
for _, v := range files.([]interface{}) {
v := v.(map[string]interface{})
torrent.totalSize += v["length"].(int64)
}
// Multiple files
var begin int64
for k, v := range files.([]interface{}) {
v := v.(map[string]interface{})
// Set up directory structure
pathList := v["path"].([]interface{})
pathElements := []string{dirName}
for i := 0; i < len(pathList)-1; i++ {
pathElements = append(pathElements, pathList[i].(string))
}
path := filepath.Join(pathElements...)
fullPath := filepath.Join(path, pathList[len(pathList)-1].(string))
if err := torrent.validatePath(base, fullPath); err != nil {
return err
}
if len(path) != 0 {
if err := os.MkdirAll(path, 0700); err != nil {
return err
}
}
length := v["length"].(int64)
file, err := os.OpenFile(fullPath, os.O_RDWR, 0600)
if err == nil {
torrent.findCompletedPieces(file, begin, length, k)
file.Close()
}
torrent.files = append(torrent.files, File{fullPath, begin, length})
begin += length
}
} else {
// Single file
fileName := filepath.Join("Downloads", info["name"].(string))
if err := torrent.validatePath(base, fileName); err != nil {
return err
}
length := info["length"].(int64)
torrent.totalSize = length
file, err := os.OpenFile(fileName, os.O_RDWR, 0600)
if err == nil {
torrent.findCompletedPieces(file, 0, length, 0)
file.Close()
}
torrent.files = []File{{fileName, 0, length}}
}
return nil
}
func (torrent *Torrent) findCompletedPieces(file *os.File, begin, length int64, fileIndex int) {
fi, err := file.Stat()
if err != nil {
return
}
size := fi.Size()
if size == 0 {
return
} else if size > length {
file.Truncate(0)
return
}
buf := make([]byte, torrent.pieceLength)
var pieceIndex uint32
if begin != 0 {
pieceIndex = uint32(begin / torrent.pieceLength)
}
fileEnd := begin + length
pos := int64(pieceIndex) * torrent.pieceLength
pieceLength := torrent.getPieceLength(pieceIndex)
if pos+pieceLength > fileEnd {
return
}
if pos < begin {
bufPos := begin - pos
if _, err := file.Read(buf[bufPos:]); err != nil {
return
}
for bufPos != 0 {
fileIndex--
f := torrent.files[fileIndex]
handle, err := os.OpenFile(f.path, os.O_RDONLY, 0600)
if err != nil {
return
}
defer handle.Close()
if bufPos > f.length {
if n, err := handle.Read(buf[bufPos-f.length : bufPos]); err != nil || int64(n) != f.length {
return
}
bufPos -= f.length
} else {
if n, err := handle.ReadAt(buf[:bufPos], f.length-bufPos); err != nil || int64(n) != bufPos {
return
}
break
}
}
if torrent.checkPieceHash(buf[:pieceLength], pieceIndex) {
torrent.pieces[pieceIndex].done = true
torrent.completedPieces++
}
pos += pieceLength
pieceIndex++
}
if _, err := file.Seek(pos-begin, os.SEEK_SET); err != nil {
return
}
reader := bufio.NewReaderSize(file, int(pieceLength))
for pos+torrent.pieceLength <= fileEnd {
if n, err := reader.Read(buf); err != nil || n != len(buf) {
return
}
if torrent.checkPieceHash(buf, pieceIndex) {
torrent.pieces[pieceIndex].done = true
torrent.completedPieces++
}
pos += torrent.pieceLength
pieceIndex++
}
if int(pieceIndex) == len(torrent.pieces)-1 {
pieceLength = torrent.getLastPieceLength()
if n, err := reader.Read(buf[:pieceLength]); err != nil || int64(n) != pieceLength {
return
}
if torrent.checkPieceHash(buf[:pieceLength], pieceIndex) {
torrent.pieces[pieceIndex].done = true
torrent.completedPieces++
}
}
}
func (torrent *Torrent) getTrackerRequestData(event uint32) *TrackerRequestData {
downloaded := torrent.getDownloadedSize()
return &TrackerRequestData{
event: event,
downloaded: uint64(downloaded),
uploaded: torrent.uploaded,
remaining: uint64(torrent.totalSize - downloaded),
}
}
func (torrent *Torrent) startTrackers() {
data := torrent.getTrackerRequestData(TrackerEventStarted)
for _, tracker := range torrent.trackers {
go tracker.start(data)
}
}
func (torrent *Torrent) stopTrackers() {
data := torrent.getTrackerRequestData(TrackerEventStopped)
for _, trackerID := range torrent.activeTrackers {
go func(announceChannel chan *TrackerRequestData, stopChannel chan struct{}) {
announceChannel <- data
stopChannel <- struct{}{}
}(torrent.trackers[trackerID].announceChannel, torrent.trackers[trackerID].stopChannel)
}
}
func (torrent *Torrent) announceToTrackers(event uint32) {
data := torrent.getTrackerRequestData(event)
for _, trackerID := range torrent.activeTrackers {
go func(channel chan *TrackerRequestData) {
channel <- data
}(torrent.trackers[trackerID].announceChannel)
}
}
func (torrent *Torrent) download() {
if torrent.completedPieces == len(torrent.pieces) {
return
}
torrent.activeTrackerChannel = make(chan int)
torrent.stoppedTrackerChannel = make(chan int)
torrent.requestAnnounceDataChannel = make(chan int)
torrent.peersChannel = make(chan interface{})
torrent.startTrackers()
torrent.pieceChannel = make(chan *PieceMessage)
torrent.bitfieldChannel = make(chan *BitfieldMessage)
torrent.havePieceChannel = make(chan *HavePieceMessage)
torrent.addPeerChannel = make(chan *Peer)
torrent.removePeerChannel = make(chan *Peer)
torrent.blockRequestChannel = make(chan *BlockRequestMessage)
torrent.fileWriteDone = make(chan struct{})
torrent.decrementPeerCount = make(chan struct{})
torrent.knownPeers = make(map[string]struct{})
for torrent.completedPieces != len(torrent.pieces) || torrent.totalPeerCount != 0 {
select {
case havePieceMessage := <-torrent.havePieceChannel:
torrent.handleHaveMessage(havePieceMessage)
case bitfieldMessage := <-torrent.bitfieldChannel:
torrent.handleBitfieldMessage(bitfieldMessage)
case pieceMessage := <-torrent.pieceChannel:
torrent.handlePieceMessage(pieceMessage)
case blockRequestMessage := <-torrent.blockRequestChannel:
torrent.handleBlockRequestMessage(blockRequestMessage)
case peer := <-torrent.addPeerChannel:
torrent.handleAddPeer(peer)
case peer := <-torrent.removePeerChannel:
torrent.handleRemovePeer(peer)
case <-torrent.fileWriteDone:
torrent.pendingFileWrites--
case <-torrent.decrementPeerCount:
torrent.totalPeerCount--
case peers := <-torrent.peersChannel:
torrent.connectToPeers(peers)
case trackerID := <-torrent.activeTrackerChannel:
torrent.activeTrackers = append(torrent.activeTrackers, trackerID)
fmt.Printf("[%s] %d active trackers\n", torrent.name, len(torrent.activeTrackers))
case trackerID := <-torrent.requestAnnounceDataChannel:
go func(channel chan *TrackerRequestData, data *TrackerRequestData) {
channel <- data
}(torrent.trackers[trackerID].announceChannel, torrent.getTrackerRequestData(TrackerEventNone))
}
}
torrent.stopTrackers()
if torrent.pendingFileWrites != 0 {
fmt.Printf("[%s] Waiting for %d pending file writes...\n", torrent.name, torrent.pendingFileWrites)
for torrent.pendingFileWrites != 0 {
<-torrent.fileWriteDone
torrent.pendingFileWrites--
}
}
if len(torrent.activeTrackers) != 0 {
fmt.Printf("[%s] Waiting for %d trackers to stop...\n", torrent.name, len(torrent.activeTrackers))
for len(torrent.activeTrackers) != 0 {
select {
case trackerID := <-torrent.stoppedTrackerChannel:
for k, v := range torrent.activeTrackers {
if v == trackerID {
torrent.activeTrackers = append(torrent.activeTrackers[:k], torrent.activeTrackers[k+1:]...)
break
}
}
// Handle other messages that a Tracker may send
case <-torrent.activeTrackerChannel:
case <-torrent.peersChannel:
case <-torrent.requestAnnounceDataChannel:
}
}
}
}
func (torrent *Torrent) checkPieceHash(data []byte, pieceIndex uint32) bool {
dataHash := sha1.Sum(data)
return bytes.Equal(dataHash[:], []byte(torrent.pieces[pieceIndex].hash))
}
func (torrent *Torrent) getPieceLength(pieceIndex uint32) int64 {
if pieceIndex == uint32(len(torrent.pieces))-1 {
if res := torrent.totalSize % torrent.pieceLength; res != 0 {
return res
}
}
return torrent.pieceLength
}
func (torrent *Torrent) | () int64 {
if res := torrent.totalSize % torrent.pieceLength; res != 0 {
return res
}
return torrent.pieceLength
}
func (torrent *Torrent) getDownloadedSize() int64 {
var downloadedSize int64
for k := range torrent.pieces {
if torrent.pieces[k].done {
downloadedSize += torrent.getPieceLength(uint32(k))
}
}
return downloadedSize
}
func (torrent *Torrent) connectToPeers(peers interface{}) {
switch peers := peers.(type) {
case string:
torrent.totalPeerCount += len(peers) / 6
for i := 0; i < len(peers); i += 6 {
peer := NewPeer(torrent)
peer.ip = net.IPv4(peers[i], peers[i+1], peers[i+2], peers[i+3])
if _, known := torrent.knownPeers[peer.ip.String()]; known {
torrent.totalPeerCount--
continue
}
torrent.knownPeers[peer.ip.String()] = struct{}{}
peer.port = binary.BigEndian.Uint16([]byte(peers[i+4:]))
go peer.connect()
}
case []interface{}:
torrent.totalPeerCount += len(peers)
for _, dict := range peers {
dict := dict.(map[string]interface{})
addr, err := net.ResolveIPAddr("ip", dict["ip"].(string))
if err != nil {
torrent.totalPeerCount--
continue
}
if _, known := torrent.knownPeers[addr.IP.String()]; known {
torrent.totalPeerCount--
continue
}
torrent.knownPeers[addr.IP.String()] = struct{}{}
peer := NewPeer(torrent)
peer.id = dict["peer id"].(string)
peer.ip = addr.IP
peer.port = uint16(dict["port"].(int64))
go peer.connect()
}
}
}
func (torrent *Torrent) handleHaveMessage(havePieceMessage *HavePieceMessage) {
havePieceMessage.from.pieces[havePieceMessage.index] = struct{}{}
// torrent.requestPieceFromPeer(peer)
}
func (torrent *Torrent) handleBitfieldMessage(bitfieldMessage *BitfieldMessage) {
peer := bitfieldMessage.from
piecesLen := uint32(len(torrent.pieces))
var index uint32
for i := 0; i < len(bitfieldMessage.data); i++ {
b := bitfieldMessage.data[i]
for v := byte(128); v != 0; v >>= 1 {
if b&v != v {
index++
continue
}
if index >= piecesLen {
break
}
peer.pieces[index] = struct{}{}
index++
}
}
torrent.requestPieceFromPeer(peer)
}
func (torrent *Torrent) handlePieceMessage(pieceMessage *PieceMessage) {
if torrent.pieces[pieceMessage.index].done {
return
}
if !torrent.checkPieceHash(pieceMessage.data, pieceMessage.index) {
pieceMessage.from.done <- struct{}{}
return
}
torrent.pieces[pieceMessage.index].done = true
torrent.completedPieces++
if torrent.completedPieces != len(torrent.pieces) {
torrent.requestPieceFromPeer(pieceMessage.from)
}
beginPos := int64(pieceMessage.index) * torrent.pieceLength
for k := range torrent.files {
file := &torrent.files[k]
if beginPos < file.begin {
break
}
fileEnd := file.begin + file.length
if beginPos >= fileEnd {
continue
}
amountWrite := fileEnd - beginPos
if amountWrite > int64(len(pieceMessage.data)) {
amountWrite = int64(len(pieceMessage.data))
}
torrent.pendingFileWrites++
go func(path string, offset int64, data []byte) {
fileWriterChannel <- &FileWriterMessage{path, offset, data, torrent}
}(file.path, beginPos-file.begin, pieceMessage.data[:amountWrite])
pieceMessage.data = pieceMessage.data[amountWrite:]
beginPos += amountWrite
}
fmt.Printf("[%s] Downloaded: %.2f%c\n", torrent.name, float64(torrent.completedPieces)*100/float64(len(torrent.pieces)), '%')
if torrent.completedPieces == len(torrent.pieces) {
for _, peer := range torrent.activePeers {
peer.done <- struct{}{}
}
torrent.announceToTrackers(TrackerEventCompleted)
} else {
for _, peer := range torrent.activePeers {
peer.sendHaveChannel <- pieceMessage.index
}
}
}
func (torrent *Torrent) requestPieceFromPeer(peer *Peer) {
// Find the least busy piece that the peer claims to have
busyness := math.MaxInt32
var pieceIndex uint32
pieces := uint32(len(torrent.pieces))
for i := uint32(0); i < pieces; i++ {
piece := &torrent.pieces[i]
if piece.done {
continue
}
if _, ok := peer.pieces[i]; ok {
if piece.busyness == 0 {
piece.busyness = 1
peer.requestPieceChannel <- i
return
} else if busyness > piece.busyness {
busyness = piece.busyness
pieceIndex = i
}
}
}
if busyness != math.MaxInt32 {
torrent.pieces[pieceIndex].busyness++
peer.requestPieceChannel <- pieceIndex
}
}
func (torrent *Torrent) handleAddPeer(peer *Peer) {
torrent.activePeers = append(torrent.activePeers, peer)
fmt.Printf("[%s] %d active peers\n", torrent.name, len(torrent.activePeers))
if torrent.completedPieces == len(torrent.pieces) {
peer.done <- struct{}{}
}
}
func (torrent *Torrent) handleRemovePeer(peer *Peer) {
for k, p := range torrent.activePeers {
if p == peer {
torrent.activePeers = append(torrent.activePeers[:k], torrent.activePeers[k+1:]...)
break
}
}
peer.done <- struct{}{}
fmt.Printf("[%s] %d active peers\n", torrent.name, len(torrent.activePeers))
}
func (torrent *Torrent) handleBlockRequestMessage(blockRequestMessage *BlockRequestMessage) {
if blockRequestMessage.index >= uint32(len(torrent.pieces)) {
blockRequestMessage.from.done <- struct{}{}
return
}
if !torrent.pieces[blockRequestMessage.index].done {
blockRequestMessage.from.done <- struct{}{}
return
}
end := int64(blockRequestMessage.begin) + int64(blockRequestMessage.length)
if end > torrent.getPieceLength(blockRequestMessage.index) {
blockRequestMessage.from.done <- struct{}{}
return
}
block := make([]byte, blockRequestMessage.length)
var pos int64
fileOffset := int64(blockRequestMessage.index)*torrent.pieceLength + int64(blockRequestMessage.begin)
for k := range torrent.files {
file := &torrent.files[k]
cursor := fileOffset + pos
if cursor < file.begin {
break
}
fileEnd := file.begin + file.length
if cursor >= fileEnd {
continue
}
n := fileEnd - cursor
if n > int64(blockRequestMessage.length)-pos {
n = int64(blockRequestMessage.length) - pos
}
handle, err := os.OpenFile(file.path, os.O_RDONLY, 0600)
if err != nil {
return
}
handle.Seek(cursor-file.begin, os.SEEK_SET)
end := pos + n
for pos < end {
count, err := handle.Read(block[pos:end])
if err != nil {
handle.Close()
return
}
pos += int64(count)
}
handle.Close()
}
torrent.uploaded += uint64(blockRequestMessage.length)
blockRequestMessage.from.sendPieceBlockChannel <- &BlockMessage{
blockRequestMessage.index,
blockRequestMessage.begin,
block,
}
}
func (torrent *Torrent) getInfoHash() []byte {
return torrent.handshake[28:48]
}
| getLastPieceLength | identifier_name |
project_config.py | # -*- coding: utf-8 -*-
# author: Guixinyu
# create Time: 2019-10-17 18:15:39
from PyQt5.QtWidgets import *
import sys
import first
import fileselect
import shutil
from first import Ui_MainWindow
import AddLibraryPath
import Enterlibraries
from PyQt5.QtCore import *
from PyQt5.QtGui import *
import os
import re
import subprocess
import time
#读取log的线程
class BackendTread(QThread):
setvalue = pyqtSignal(int)
def __init__(self, parent=None):
super(BackendTread, self).__init__(parent)
self.working=True
def stopSig(self):
self.working=False
def run(self):
#cmd1 = r'''%s\bin\make -j8 all >console.log 2>&1''' % Hdir
'''os.chdir(self.ProjectName_2.text() + '/Default')
self.process = subprocess.call(cmd1)'''
while VAL<NUM and self.working:
num=0
for path,dir,files in os.walk(os.getcwd()):
for file in files:
if file.endswith('.o'):
num=num+1
self.setvalue.emit(num)
#开编译的线程
class BackendTread1(QThread):
startcompile1 = pyqtSignal(str)
endSig = pyqtSignal()
def __init__(self, parent=None):
super(BackendTread1, self).__init__(parent)
def startCom(self):
self.process = subprocess.Popen(cmd1)
def run(self):
#cmd1 = r'''%s\bin\make -j8 all >console.log 2>&1''' % Hdir
'''os.chdir(self.ProjectName_2.text() + '/Default')
self.process = subprocess.call(cmd1)'''
f=open('conerr.err','w+')
self.process = subprocess.Popen(cmd1,stdout=subprocess.PIPE,stderr=f,bufsize=1)
'''self.bt=BackendTread()
self.bt.startcompile.connect(self.PrintConsole)
self.bt.start()'''
self.sleep(3)
while self.process.poll() is None:
#print(1)
r = self.process.stdout.readline().decode('gbk')
if r:
self.startcompile1.emit(r)
if 'tool>pause'in r:
break
os.system(r"taskkill /f /t /im make.exe")#因为在做post-build的时候,al2的工具需要按回车键才能结束进程,因为在这里强制性的使其结束
self.endSig.emit()
class basePage(QMainWindow,Ui_MainWindow):
def __init__(self):
super(basePage, self).__init__()
self.setupUi(self)
self.startpath=os.getcwd()
self.actionbuild.triggered.connect(self.checkFLAG)
#self.menuclean.triggered.connect(self.CleanProject)
self.actionclean.triggered.connect(self.CleanProject)
self.actionopen_project.triggered.connect(self.ChooseProDir)
self.actionsave_project.triggered.connect(self.modifyFLAG)
#self.quitApp.triggered.connect(QCoreApplication.instance().quit) #关闭程序的第一种方式
self.actionexit.triggered.connect(qApp.quit)#关闭程序的第二种方式
#添加工具栏:停止和退出
self.tb1=self.addToolBar('tool')
actionopen1=QAction(QIcon('./Compile/file.png'),"打开工程",self)
self.tb1.addAction(actionopen1)
actionopen1.triggered.connect(self.ChooseProDir)
self.tb1.addSeparator()
actionstop=QAction(QIcon('./Compile/stop.png'),"停止",self)
self.tb1.addAction(actionstop)
actionstop.triggered.connect(self.KillProcess)
self.tb1.addSeparator()
actionExit=QAction(QIcon('./Compile/exit.png'),"退出",self)
self.tb1.addAction(actionExit)
actionExit.triggered.connect(qApp.quit)
##创建右键菜单
#self.includeList.setContextMenuPolicy(Qt.CustomContextMenu)
#self.includeList.customContextMenuRequested.connect(self.showRightMenu)
#self.includeList.customContextMenuRequested[QPoint].connect(self.remove)
#单击一个选项
#self.f=""
#self.includeList.clicked.connect(self.check)
self.includeList.setContextMenuPolicy(Qt.CustomContextMenu)
self.excludeList.setContextMenuPolicy(Qt.CustomContextMenu)
self.contextMenu=QMenu(self)
self.actionA=self.contextMenu.addAction("删除")
self.actionA.triggered.connect(self.remove)
self.includeList.customContextMenuRequested.connect(lambda :self.showContextMenu(1))
#self.contextMenu.triggered[QAction].connect(self.remove)
#self.includeList.customContextMenuRequested[QPoint].connect(self.remove1)#[]里的代表传入的参数,自带的
self.excludeList.customContextMenuRequested.connect(lambda :self.showContextMenu(2))
#self.excludeList.customContextMenuRequested[QPoint].connect(self.remove2) # []里的代表传入的参数,自带的
self.delPath1.clicked.connect(self.includeList.clear)
self.delPath2.clicked.connect(self.excludeList.clear)
self.addPath1.clicked.connect(lambda :self.ShowDialog(1))
self.addPath2.clicked.connect(self.AddExpath)
self.fileselect = fileselect.Ui_Dialog()
#初始化page
self.listWidget.currentRowChanged.connect(self.display)
#Library的初始化
self.initLibraryWindow()
self.Llist.setSelectionMode(3)
self.llist.setSelectionMode(3)
#self.add2.clidken.connect(self.ShowLWindow)
#状态栏的部件
self.barlabel = QLabel('barlabel')
#self.initDialog()
#self.fileselect.buttonBox
#print(self.fileselect.treeWidget.currentItem().text(0))
def initUI(self):
self.includeList.clear()
self.excludeList.clear()
self.Llist.clear()
self.llist.clear()
self.ProjectName.setText(self.DebugName)
self.HithTecDir.setText(self.HighTecDir)
self.GCCFLAGName.setText(self.CCFLAG)
self.LINKFLAGName.setText(self.LINKFLAG)
self.ProjectName_2.setText(self.PROJECTDIR)
self.ProjectName_2.setEnabled(False)
self.barlabel.setText('准备中')
self.statusBar.addPermanentWidget(self.barlabel)
self.Result.clear()
if self.includepath:
#a=1
self.includeList.addItems(self.includepath)
if self.excludefiles:
#a=1
self.excludeList.addItems(self.excludefiles)
if self.LibraryPath:
#a=1
self.Llist.addItems(self.LibraryPath)
if self.libraties:
#a=1
self.llist.addItems(self.libraties)
def display(self,index):
self.index=index
self.stackedWidget.setCurrentIndex(index)
def initLibraryWindow(self):
self.LWUI=AddLibraryPath.Ui_LSelect()
self.LWin=QWidget()
self.LWin.setWindowModality(Qt.ApplicationModal)#设置模态对话框
self.LWUI.setupUi(self.LWin)
self.LWUI.LibraryP.setText("")
self.add1.clicked.connect(self.LWin.show)
self.LWUI.L_Cancel.clicked.connect(self.LWin.close)
self.LWUI.L_Workspace.clicked.connect(lambda: self.ShowDialog(1))
self.LWUI.L_OK.clicked.connect(self.AddLibraryPath)
self.del1.clicked.connect(self.DelLibraryPath)
self.lWUI = Enterlibraries.Ui_LSelect()
self.lWin = QWidget()
self.lWin.setWindowModality(Qt.ApplicationModal)
self.lWUI.setupUi(self.lWin)
self.LWUI.LibraryP.setText("")
self.add2.clicked.connect(self.lWin.show)
self.lWUI.l_OK.clicked.connect(self.AddLibraries)
self.lWUI.l_Cancel.clicked.connect(self.lWin.close)
self.del2.clicked.connect(self.DelLibraries)
def KillProcess(self):
#self.process.kill()
#self.process.pid
os.system(r"taskkill /f /t /im make.exe")
self.Result.append('用户终止执行')
def ChooseProDir(self):
dir=QFileDialog.getExistingDirectory()
dir=dir.replace('/','\\')
self.ProjectName_2.setText(dir)
if dir!='':
os.chdir(dir)
import automake_config as ac
(DebugName, HighTecDir, CCFLAG, LINKFLAG, includepath, excludefiles, g_except_dir_list,
g_except_file_list,LibraryPath,libraties) = ac.maininit()
self.includepath=includepath
self.excludefiles=excludefiles
self.DebugName=DebugName
self.CCFLAG=CCFLAG
self.LINKFLAG=LINKFLAG
self.HighTecDir=HighTecDir
self.PROJECTDIR=dir
self.LibraryPath=LibraryPath
self.libraties=libraties
#print(os.getcwd())
self.AllPath=ac.FindAllPath(dir)
#print(self.AllPath)
self.initDialog()
#对Dialog按钮的设置
self.fileselect.buttonBox.accepted.connect(self.GetPath)
self.fileselect.treeWidget.setSelectionMode(3)
self.fileselect.buttonBox.rejected.connect(self.Cleartree)
#self.adds(dir,self.child0)
a.initUI()
def initDialog(self):
self.di = QDialog()
fileselect1 = self.fileselect
fileselect1.setupUi(self.di)
# self.di.show()
child0 = QTreeWidgetItem(fileselect1.treeWidget)
child0.setText(0, self.DebugName)
child0.setIcon(0, QIcon('./Compile/01.png'))
self.adds(os.getcwd(), child0)
child1 = QTreeWidgetItem(child0)
child1.setText(0, 'TOOLS')
child1.setIcon(0, QIcon('./Compile/01.png'))
#展开所有节点
fileselect1.treeWidget.expandAll()
def showContextMenu(self,id):
# 如果有选中项,则显示显示菜单
#if id==1:
items1 = self.includeList.selectedIndexes()
#self.idRm=id
#print(items)
#elif id==2:
items2 = self.excludeList.selectedIndexes()
#self.idRm = id
if items1 or items2:
self.contextMenu.show()
#self.f=QPoint
self.contextMenu.exec_(QCursor.pos()) # 在鼠标位置显示
def remove(self):
items1 = self.includeList.selectedIndexes()
items2 = self.excludeList.selectedIndexes()
if self.index==3:
if items1:
for jj in items1:
self.includeList.removeItemWidget(self.includeList.takeItem(jj.row()))
if self.index == 4:
if items2:
for jj in items2:
self.excludeList.removeItemWidget(self.excludeList.takeItem(jj.row()))
def EndResult(self):
print(os.getcwd())
f=open('./conerr.err','r')
lines=f.readlines()
j=0
for ii in lines:
if "error:"in ii:
self.Result.append("<font color=\"#FF0000\">%s</font> "%ii)
j=1
if j!=1:
self.Result.append("<font color=\"#FF0000\">finished!!!!!!!!</font> ")
self.barlabel.setText('已完成')
f.close()
os.remove('./conerr.err')
self.backend.working=False
self.statusBar.removeWidget(self.progressBar)
self.barlabel.setText('准备中')
os.chdir(self.ProjectName_2.text())
def initBar(self):
global NUM
self.progressBar = QProgressBar()
self.Result.clear()
self.barlabel.setText('正在编译:')
self.statusBar.addPermanentWidget(self.progressBar, stretch=2)
f = open('./Default/Default.objectlist','r')
lines = f.readlines()
f.close()
NUM=len(lines)
#self.progressBar.setGeometry(0,0,100,5)
self.progressBar.setRange(0,len(lines))
global VAL
VAL=0
def SetProgressBarVal(self,val):
#global VAL
n=VAL+val
self.progressBar.setValue(n)
def StartCompile(self,Hdir):
global cmd1
#cmd1 = r'''%s\bin\make -j8 all >console.log 2>&1''' % Hdir
cmd1 = r'''%s\bin\make -j8 all''' % Hdir
#cmd1 = self.startpath+'\Compile\compile.bat '+Hdir
# cmd1='cd ..'
# print(includepath)
# self.process =subprocess.Popen(self.startpath+ '\Compile\compile.bat ' + cmd1)
os.chdir(self.ProjectName_2.text() + '/Default')
#f=open('ccccc.txt','w')
#self.process = subprocess.Popen(cmd1)
self.backend1 = BackendTread1()
self.backend1.startcompile1.connect(self.PrintConsole)
self.backend1.endSig.connect(self.EndResult)
#time.sleep(3)
self.backend1.start()
self.backend = BackendTread()
self.backend.setvalue.connect(self.SetProgressBarVal)
#self.backend.endSig.connect(self.EndResult)
# time.sleep(3)
self.backend.start()
'''self.process = subprocess.call(cmd1)
self.process.wait()
f= open('console.log','r')
lines =f.readlines()
for ii in lines:
if 'error:'in ii:
self.Result.insertText(ii+'\n')'''
#os.chdir(self.ProjectName_2.text())
def PrintConsole(self,r):
#print(2222)
# None表示正在执行中
#r = self.process.stdout.readline()
#self.Result.append(r)
self.Result.append("<font color=\"#000000\">%s</font> "%r)
#self.backend.stopSig()
# 可修改输出方式,比如控制台、文件等
#print(self.process.poll())
# 重定向错误输出
def checkFLAG(self):
CCFLAG1 = self.GCCFLAGName.toPlainText()
#CCFLAG1 = CCFLAG1[0:len(CCFLAG1) - 1]
LINKFLAG1 = self.LINKFLAGName.toPlainText()
#LINKFLAG1 = LINKFLAG1[0:len(LINKFLAG1) - 1]
Hdir = self.HithTecDir.text()
DebugName1 = self.ProjectName.text()
inn=self.includeList.count()
inpath=[]
exn = self.excludeList.count()
expath = []
for i in range(inn):
inpath.append(self.includeList.item(i).text())
for i in range(exn):
expath.append(self.excludeList.item(i).text())
#print(CCFLAG1)
# POSTBUILD1 = pb.get()
# Hdir = Hdir[0:len(Hdir) - 1]
#if CCFLAG1 != self.CCFLAG or self.LINKFLAG != LINKFLAG1 or Hdir != self.HighTecDir or DebugName1 != self.DebugName or expath != self.excludefiles or inpath != self.includepath:
self.modifyFLAG()
'''for i in range(0,len(CCFALG)):
if CCFALG1[i]!=CCFALG[i]:
print(i)'''
cmd=self.startpath+'\Compile\python '+self.startpath+"\Compile/automake.py "+self.startpath
a=subprocess.call(cmd)
self.initBar()
#a.wait()
#cmd1 = Hdir + r'\bin\make'
#self.backend.update_date.connect(self.handleDisplay)
try:
self.StartCompile(Hdir)
except BaseException as e:
print(333333)
f=open('cons.log','w')
f.write(e.args)
f.close()
#def
def modifyFLAG(self):
# f=open('./TOOLS/Compile/automake_config.py','r',encoding='utf-8')
CCFLAGNOW = self.GCCFLAGName.toPlainText()
# CCFLAG1 = CCFLAG1[0:len(CCFLAG1) - 1]
LINKFLAGNOW = self.LINKFLAGName.toPlainText()
# LINKFLAG1 = LINKFLAG1[0:len(LINKFLAG1) - 1]
HighTecDirNOW = self.HithTecDir.text()
DebugNameNOW = self.ProjectName.text()
inn = self.includeList.count()
inpathNOW = []
exn = self.excludeList.count()
expathNOW = []
Ln = self.Llist.count()
LnNOW = []
ln = self.llist.count()
lnNOW = []
try:
for i in range(inn):
inpathNOW.append(self.includeList.item(i).text())
for i in range(exn):
expathNOW.append(self.excludeList.item(i).text())
f = open('./py.pyconfig', 'w', encoding='utf-8')
# lines=f.readlines()
tLink = re.split(' ',LINKFLAGNOW)
Linkchange=''
for iii in tLink:
if '-L' not in iii and '-l:' not in iii:
Linkchange+=iii+' '
for i in range(Ln):
p = re.split('{workspace}/',self.Llist.item(i).text())
#print(p)
if len(p)==1:
Linkchange+='''-L"'''+os.path.abspath(p[0])+'''" '''
else:
Linkchange += '''-L"''' + os.path.abspath(p[1]) + '''" '''
LnNOW.append(self.Llist.item(i).text())
for i in range(ln):
Linkchange+='-l'+self.llist.item(i).text()+' '
lnNOW.append(self.llist.item(i).text())
f.write('CCFLAG=' + CCFLAGNOW + "\n")
f.write('LINKFLAG=' + Linkchange + "\n")
f.write('HighTecDir=' + HighTecDirNOW + "\n")
f.write('DebugName=' + DebugNameNOW + "\n")
aa = "includepath="
for a in inpathNOW:
if a != "":
aa += a + ','
f.write(aa + '\n')
bb = "excludefiles="
for b in expathNOW:
if b != "":
bb += b + ','
f.write(bb + '\n')
cc = "LibraryPath="
for c in LnNOW:
if c != "":
cc += c + ','
dd = "libraties="
for d in lnNOW:
if d != "":
dd += d + ','
f.write(cc + '\n')
f.write(dd + '\n')
f.close()
self.LINKFLAGName.setText('')
self.LINKFLAGName.setText(Linkchange)
except:
f.close()
def CleanProject(self):
print('Cleanning project...... ')
if os.path.exists('./Default'):
shutil.rmtree('./Default')
if os.path.exists('./delivery'):
shutil.rmtree('./delivery')
QMessageBox.about(self, "消息", "Clean has finished!")
#tkinter.messagebox.showinfo('提示','Clean has finished!')
print('Clean has finished!')
def testaa(self):
print("1")
def CloseTools(self):
print(1)
def delPath(self,id):
if id==1:
self.includeList.clear()
if id == 2:
self.excludeList.clear()
def ShowDialog(self,id):
#self.di=QDialog()
#fileselect1 = fileselect.Ui_Dialog()
#fileselect1.setupUi(self.di)
self.idPath=id
self.di.exec()
# for path,dir,files in os.walk(os.getcwd()):
# for file in files:
# i=i+1
# if file.endswith('.h') and "TOOLS" not in path:
# if "TOOLS" not in path:
# a='child'+str(i)
# a=QTreeWidgetItem(child0)
def adds(self,paths, root):
if os.path.isdir(paths):
list = os.listdir(paths)
for i in list:
# j=0
# for path1 ,dirs,files in os.walk(os.path.join(paths,i)):
# for file in files:
# if file.endswith('.h') or file.endswith('.c'):
# j=1
if 'Default' not in i and '.' not in i and '_pycache_' not in os.path.join(paths,i) and os.path.join(
paths, i) in self.AllPath:
# self.adds(os.path.join(paths, i),root)
if os.path.isdir(os.path.join(paths, i)):
childs = QTreeWidgetItem(root)
childs.setText(0, i)
childs.setIcon(0, QIcon('./Compile/01.png'))
self.adds(os.path.join(paths, i), childs)
#注意:是对QDialog对象show(),并不是自己生成的Ui_Dialog对象 show(),开始没有写self.di,弹窗总是一闪而过,类的的函数加上self之后成功
#print(QFileDialog.getExistingDirectory(None, "请选择要添加的文件", os.getcwd()))
def GetPath(self):
if self.index==3:
pathlist = self.fileselect.treeWidget.selectedItems()
# pathlist = QTreeWidgetItemIterator(self.fileselect.treeWidget)
# print(pathlist.value().childCount())
tempinclude = []
for pathss in pathlist:
tpathss = pathss
tp = ""
while 1:
if tpathss.text(0)!=self.DebugName:
tp = tpathss.text(0) + tp
if tpathss.parent():
tpathss = tpathss.parent()
tp = '/' + tp
else:
break
if tp not in tempinclude and tp!="":
tempinclude.append(tp)
pathss.setSelected(False)
self.includeList.addItems(sorted(tempinclude))
elif self.idPath==2:
pathlist = self.fileselect.treeWidget.selectedItems()
#pathlist = QTreeWidgetItemIterator(self.fileselect.treeWidget)
#print(pathlist.value().childCount())
tempexclude=[]
for pathss in pathlist:
tpathss=pathss
tp=""
while 1:
if tpathss.text(0) != self.DebugName:
tp = tpathss.text(0)+tp
if tpathss.parent():
tpathss=tpathss.parent()
tp='/'+tp
else:
break
if tp not in tempexclude and tp!="":
tempexclude.append(tp)
self.excludeList.addItems(sorted(tempexclude))
elif self.index==2:
pathlist = self.fileselect.treeWidget.selectedItems()
# pathlist = QTreeWidgetItemIterator(self.fileselect.treeWidget)
# print(pathlist.value().childCount())
tempexclude = []
for pathss in pathlist:
tpathss = pathss
tp = ""
while 1:
if tpathss.text(0) != self.DebugName:
tp = tpathss.text(0) + tp
if tpathss.parent():
tpathss = tpathss.parent()
tp = '/' + tp
else:
break
if tp not in tempexclude and tp != "":
tempexclude.append("{workspace}"+tp)
pathss.setSelected(False)
self.Llist.addItems(tempexclude)
self.LWin.close()#如果是通过workspace选的直接关掉选择框
self.di.close()
'''for selectedPath in pathlist:
print(selectedPath.text(0))
print(pathlist)'''
#if pathlist.value().checkState(0) == Qt.Checked:
#n=self.fileselect.treeWidget.topLevelItemCount()
'''while pathlist.value():
if pathlist.value().checkState(0)==Qt.Checked:
print(pathlist.value.text(0))
break'''
def Cleartree(self):
pathlist = self.fileselect.treeWidget.selectedItems()
for pathss in pathlist:
pathss.setSelected(False)
self.di.close()
def AddExpath(self):
dir1,file1 = QFileDialog.getOpenFileNames (self,'选择过滤文件',os.getcwd(),"C FILES(*.c)")
#print(dir1,file1)
for ii in dir1:
if ii!='' :
dir2 = re.split(os.getcwd().replace('\\','/'),ii)[1]
self.excludeList.addItem(dir2)
#Library的具体操作
def AddLibraryPath(self):
txt=self.LWUI.LibraryP.text()
if txt:
self.Llist.addItem(txt)
self.LWin.close()
def AddLibraries(self):
txt = self.lWUI.libraries.text()
if txt:
self.llist.addItem(txt)
self.lWin.close()
def DelLibraryPath(self):
items1 = self.Llist.selectedIndexes()
if items1:
for jj in items1:
self.Llist.removeItemWidget(self.Llist.takeItem(jj.row()))
def DelLibraries(self):
items1 = self.llist.selectedIndexes()
if items1:
for jj in items1:
self.llist.removeItemWidget(self.llist.takeItem(jj.row()))
if __name__ == '__main__':
cmd1 = ""
NUM=0
VAL=0
app = QApplication(sys.argv)
app.setWindowIcon(QIcon('./Compile/mainwindowIcon.png'))
a=basePage()
a.ChooseProDir()
a.show()
#进入程序的主循环,并通过exit函数确保主循环安全结束
sys.exit(app.exec_()) | conditional_block | ||
project_config.py | # -*- coding: utf-8 -*-
# author: Guixinyu
# create Time: 2019-10-17 18:15:39
from PyQt5.QtWidgets import *
import sys
import first
import fileselect
import shutil
from first import Ui_MainWindow
import AddLibraryPath
import Enterlibraries
from PyQt5.QtCore import *
from PyQt5.QtGui import *
import os
import re
import subprocess
import time
#读取log的线程
class BackendTread(QThread):
setvalue = |
class BackendTread1(QThread):
startcompile1 = pyqtSignal(str)
endSig = pyqtSignal()
def __init__(self, parent=None):
super(BackendTread1, self).__init__(parent)
def startCom(self):
self.process = subprocess.Popen(cmd1)
def run(self):
#cmd1 = r'''%s\bin\make -j8 all >console.log 2>&1''' % Hdir
'''os.chdir(self.ProjectName_2.text() + '/Default')
self.process = subprocess.call(cmd1)'''
f=open('conerr.err','w+')
self.process = subprocess.Popen(cmd1,stdout=subprocess.PIPE,stderr=f,bufsize=1)
'''self.bt=BackendTread()
self.bt.startcompile.connect(self.PrintConsole)
self.bt.start()'''
self.sleep(3)
while self.process.poll() is None:
#print(1)
r = self.process.stdout.readline().decode('gbk')
if r:
self.startcompile1.emit(r)
if 'tool>pause'in r:
break
os.system(r"taskkill /f /t /im make.exe")#因为在做post-build的时候,al2的工具需要按回车键才能结束进程,因为在这里强制性的使其结束
self.endSig.emit()
class basePage(QMainWindow,Ui_MainWindow):
def __init__(self):
super(basePage, self).__init__()
self.setupUi(self)
self.startpath=os.getcwd()
self.actionbuild.triggered.connect(self.checkFLAG)
#self.menuclean.triggered.connect(self.CleanProject)
self.actionclean.triggered.connect(self.CleanProject)
self.actionopen_project.triggered.connect(self.ChooseProDir)
self.actionsave_project.triggered.connect(self.modifyFLAG)
#self.quitApp.triggered.connect(QCoreApplication.instance().quit) #关闭程序的第一种方式
self.actionexit.triggered.connect(qApp.quit)#关闭程序的第二种方式
#添加工具栏:停止和退出
self.tb1=self.addToolBar('tool')
actionopen1=QAction(QIcon('./Compile/file.png'),"打开工程",self)
self.tb1.addAction(actionopen1)
actionopen1.triggered.connect(self.ChooseProDir)
self.tb1.addSeparator()
actionstop=QAction(QIcon('./Compile/stop.png'),"停止",self)
self.tb1.addAction(actionstop)
actionstop.triggered.connect(self.KillProcess)
self.tb1.addSeparator()
actionExit=QAction(QIcon('./Compile/exit.png'),"退出",self)
self.tb1.addAction(actionExit)
actionExit.triggered.connect(qApp.quit)
##创建右键菜单
#self.includeList.setContextMenuPolicy(Qt.CustomContextMenu)
#self.includeList.customContextMenuRequested.connect(self.showRightMenu)
#self.includeList.customContextMenuRequested[QPoint].connect(self.remove)
#单击一个选项
#self.f=""
#self.includeList.clicked.connect(self.check)
self.includeList.setContextMenuPolicy(Qt.CustomContextMenu)
self.excludeList.setContextMenuPolicy(Qt.CustomContextMenu)
self.contextMenu=QMenu(self)
self.actionA=self.contextMenu.addAction("删除")
self.actionA.triggered.connect(self.remove)
self.includeList.customContextMenuRequested.connect(lambda :self.showContextMenu(1))
#self.contextMenu.triggered[QAction].connect(self.remove)
#self.includeList.customContextMenuRequested[QPoint].connect(self.remove1)#[]里的代表传入的参数,自带的
self.excludeList.customContextMenuRequested.connect(lambda :self.showContextMenu(2))
#self.excludeList.customContextMenuRequested[QPoint].connect(self.remove2) # []里的代表传入的参数,自带的
self.delPath1.clicked.connect(self.includeList.clear)
self.delPath2.clicked.connect(self.excludeList.clear)
self.addPath1.clicked.connect(lambda :self.ShowDialog(1))
self.addPath2.clicked.connect(self.AddExpath)
self.fileselect = fileselect.Ui_Dialog()
#初始化page
self.listWidget.currentRowChanged.connect(self.display)
#Library的初始化
self.initLibraryWindow()
self.Llist.setSelectionMode(3)
self.llist.setSelectionMode(3)
#self.add2.clidken.connect(self.ShowLWindow)
#状态栏的部件
self.barlabel = QLabel('barlabel')
#self.initDialog()
#self.fileselect.buttonBox
#print(self.fileselect.treeWidget.currentItem().text(0))
def initUI(self):
self.includeList.clear()
self.excludeList.clear()
self.Llist.clear()
self.llist.clear()
self.ProjectName.setText(self.DebugName)
self.HithTecDir.setText(self.HighTecDir)
self.GCCFLAGName.setText(self.CCFLAG)
self.LINKFLAGName.setText(self.LINKFLAG)
self.ProjectName_2.setText(self.PROJECTDIR)
self.ProjectName_2.setEnabled(False)
self.barlabel.setText('准备中')
self.statusBar.addPermanentWidget(self.barlabel)
self.Result.clear()
if self.includepath:
#a=1
self.includeList.addItems(self.includepath)
if self.excludefiles:
#a=1
self.excludeList.addItems(self.excludefiles)
if self.LibraryPath:
#a=1
self.Llist.addItems(self.LibraryPath)
if self.libraties:
#a=1
self.llist.addItems(self.libraties)
def display(self,index):
self.index=index
self.stackedWidget.setCurrentIndex(index)
def initLibraryWindow(self):
self.LWUI=AddLibraryPath.Ui_LSelect()
self.LWin=QWidget()
self.LWin.setWindowModality(Qt.ApplicationModal)#设置模态对话框
self.LWUI.setupUi(self.LWin)
self.LWUI.LibraryP.setText("")
self.add1.clicked.connect(self.LWin.show)
self.LWUI.L_Cancel.clicked.connect(self.LWin.close)
self.LWUI.L_Workspace.clicked.connect(lambda: self.ShowDialog(1))
self.LWUI.L_OK.clicked.connect(self.AddLibraryPath)
self.del1.clicked.connect(self.DelLibraryPath)
self.lWUI = Enterlibraries.Ui_LSelect()
self.lWin = QWidget()
self.lWin.setWindowModality(Qt.ApplicationModal)
self.lWUI.setupUi(self.lWin)
self.LWUI.LibraryP.setText("")
self.add2.clicked.connect(self.lWin.show)
self.lWUI.l_OK.clicked.connect(self.AddLibraries)
self.lWUI.l_Cancel.clicked.connect(self.lWin.close)
self.del2.clicked.connect(self.DelLibraries)
def KillProcess(self):
#self.process.kill()
#self.process.pid
os.system(r"taskkill /f /t /im make.exe")
self.Result.append('用户终止执行')
def ChooseProDir(self):
dir=QFileDialog.getExistingDirectory()
dir=dir.replace('/','\\')
self.ProjectName_2.setText(dir)
if dir!='':
os.chdir(dir)
import automake_config as ac
(DebugName, HighTecDir, CCFLAG, LINKFLAG, includepath, excludefiles, g_except_dir_list,
g_except_file_list,LibraryPath,libraties) = ac.maininit()
self.includepath=includepath
self.excludefiles=excludefiles
self.DebugName=DebugName
self.CCFLAG=CCFLAG
self.LINKFLAG=LINKFLAG
self.HighTecDir=HighTecDir
self.PROJECTDIR=dir
self.LibraryPath=LibraryPath
self.libraties=libraties
#print(os.getcwd())
self.AllPath=ac.FindAllPath(dir)
#print(self.AllPath)
self.initDialog()
#对Dialog按钮的设置
self.fileselect.buttonBox.accepted.connect(self.GetPath)
self.fileselect.treeWidget.setSelectionMode(3)
self.fileselect.buttonBox.rejected.connect(self.Cleartree)
#self.adds(dir,self.child0)
a.initUI()
def initDialog(self):
self.di = QDialog()
fileselect1 = self.fileselect
fileselect1.setupUi(self.di)
# self.di.show()
child0 = QTreeWidgetItem(fileselect1.treeWidget)
child0.setText(0, self.DebugName)
child0.setIcon(0, QIcon('./Compile/01.png'))
self.adds(os.getcwd(), child0)
child1 = QTreeWidgetItem(child0)
child1.setText(0, 'TOOLS')
child1.setIcon(0, QIcon('./Compile/01.png'))
#展开所有节点
fileselect1.treeWidget.expandAll()
def showContextMenu(self,id):
# 如果有选中项,则显示显示菜单
#if id==1:
items1 = self.includeList.selectedIndexes()
#self.idRm=id
#print(items)
#elif id==2:
items2 = self.excludeList.selectedIndexes()
#self.idRm = id
if items1 or items2:
self.contextMenu.show()
#self.f=QPoint
self.contextMenu.exec_(QCursor.pos()) # 在鼠标位置显示
def remove(self):
items1 = self.includeList.selectedIndexes()
items2 = self.excludeList.selectedIndexes()
if self.index==3:
if items1:
for jj in items1:
self.includeList.removeItemWidget(self.includeList.takeItem(jj.row()))
if self.index == 4:
if items2:
for jj in items2:
self.excludeList.removeItemWidget(self.excludeList.takeItem(jj.row()))
def EndResult(self):
print(os.getcwd())
f=open('./conerr.err','r')
lines=f.readlines()
j=0
for ii in lines:
if "error:"in ii:
self.Result.append("<font color=\"#FF0000\">%s</font> "%ii)
j=1
if j!=1:
self.Result.append("<font color=\"#FF0000\">finished!!!!!!!!</font> ")
self.barlabel.setText('已完成')
f.close()
os.remove('./conerr.err')
self.backend.working=False
self.statusBar.removeWidget(self.progressBar)
self.barlabel.setText('准备中')
os.chdir(self.ProjectName_2.text())
def initBar(self):
global NUM
self.progressBar = QProgressBar()
self.Result.clear()
self.barlabel.setText('正在编译:')
self.statusBar.addPermanentWidget(self.progressBar, stretch=2)
f = open('./Default/Default.objectlist','r')
lines = f.readlines()
f.close()
NUM=len(lines)
#self.progressBar.setGeometry(0,0,100,5)
self.progressBar.setRange(0,len(lines))
global VAL
VAL=0
def SetProgressBarVal(self,val):
#global VAL
n=VAL+val
self.progressBar.setValue(n)
def StartCompile(self,Hdir):
global cmd1
#cmd1 = r'''%s\bin\make -j8 all >console.log 2>&1''' % Hdir
cmd1 = r'''%s\bin\make -j8 all''' % Hdir
#cmd1 = self.startpath+'\Compile\compile.bat '+Hdir
# cmd1='cd ..'
# print(includepath)
# self.process =subprocess.Popen(self.startpath+ '\Compile\compile.bat ' + cmd1)
os.chdir(self.ProjectName_2.text() + '/Default')
#f=open('ccccc.txt','w')
#self.process = subprocess.Popen(cmd1)
self.backend1 = BackendTread1()
self.backend1.startcompile1.connect(self.PrintConsole)
self.backend1.endSig.connect(self.EndResult)
#time.sleep(3)
self.backend1.start()
self.backend = BackendTread()
self.backend.setvalue.connect(self.SetProgressBarVal)
#self.backend.endSig.connect(self.EndResult)
# time.sleep(3)
self.backend.start()
'''self.process = subprocess.call(cmd1)
self.process.wait()
f= open('console.log','r')
lines =f.readlines()
for ii in lines:
if 'error:'in ii:
self.Result.insertText(ii+'\n')'''
#os.chdir(self.ProjectName_2.text())
def PrintConsole(self,r):
#print(2222)
# None表示正在执行中
#r = self.process.stdout.readline()
#self.Result.append(r)
self.Result.append("<font color=\"#000000\">%s</font> "%r)
#self.backend.stopSig()
# 可修改输出方式,比如控制台、文件等
#print(self.process.poll())
# 重定向错误输出
def checkFLAG(self):
CCFLAG1 = self.GCCFLAGName.toPlainText()
#CCFLAG1 = CCFLAG1[0:len(CCFLAG1) - 1]
LINKFLAG1 = self.LINKFLAGName.toPlainText()
#LINKFLAG1 = LINKFLAG1[0:len(LINKFLAG1) - 1]
Hdir = self.HithTecDir.text()
DebugName1 = self.ProjectName.text()
inn=self.includeList.count()
inpath=[]
exn = self.excludeList.count()
expath = []
for i in range(inn):
inpath.append(self.includeList.item(i).text())
for i in range(exn):
expath.append(self.excludeList.item(i).text())
#print(CCFLAG1)
# POSTBUILD1 = pb.get()
# Hdir = Hdir[0:len(Hdir) - 1]
#if CCFLAG1 != self.CCFLAG or self.LINKFLAG != LINKFLAG1 or Hdir != self.HighTecDir or DebugName1 != self.DebugName or expath != self.excludefiles or inpath != self.includepath:
self.modifyFLAG()
'''for i in range(0,len(CCFALG)):
if CCFALG1[i]!=CCFALG[i]:
print(i)'''
cmd=self.startpath+'\Compile\python '+self.startpath+"\Compile/automake.py "+self.startpath
a=subprocess.call(cmd)
self.initBar()
#a.wait()
#cmd1 = Hdir + r'\bin\make'
#self.backend.update_date.connect(self.handleDisplay)
try:
self.StartCompile(Hdir)
except BaseException as e:
print(333333)
f=open('cons.log','w')
f.write(e.args)
f.close()
#def
def modifyFLAG(self):
# f=open('./TOOLS/Compile/automake_config.py','r',encoding='utf-8')
CCFLAGNOW = self.GCCFLAGName.toPlainText()
# CCFLAG1 = CCFLAG1[0:len(CCFLAG1) - 1]
LINKFLAGNOW = self.LINKFLAGName.toPlainText()
# LINKFLAG1 = LINKFLAG1[0:len(LINKFLAG1) - 1]
HighTecDirNOW = self.HithTecDir.text()
DebugNameNOW = self.ProjectName.text()
inn = self.includeList.count()
inpathNOW = []
exn = self.excludeList.count()
expathNOW = []
Ln = self.Llist.count()
LnNOW = []
ln = self.llist.count()
lnNOW = []
try:
for i in range(inn):
inpathNOW.append(self.includeList.item(i).text())
for i in range(exn):
expathNOW.append(self.excludeList.item(i).text())
f = open('./py.pyconfig', 'w', encoding='utf-8')
# lines=f.readlines()
tLink = re.split(' ',LINKFLAGNOW)
Linkchange=''
for iii in tLink:
if '-L' not in iii and '-l:' not in iii:
Linkchange+=iii+' '
for i in range(Ln):
p = re.split('{workspace}/',self.Llist.item(i).text())
#print(p)
if len(p)==1:
Linkchange+='''-L"'''+os.path.abspath(p[0])+'''" '''
else:
Linkchange += '''-L"''' + os.path.abspath(p[1]) + '''" '''
LnNOW.append(self.Llist.item(i).text())
for i in range(ln):
Linkchange+='-l'+self.llist.item(i).text()+' '
lnNOW.append(self.llist.item(i).text())
f.write('CCFLAG=' + CCFLAGNOW + "\n")
f.write('LINKFLAG=' + Linkchange + "\n")
f.write('HighTecDir=' + HighTecDirNOW + "\n")
f.write('DebugName=' + DebugNameNOW + "\n")
aa = "includepath="
for a in inpathNOW:
if a != "":
aa += a + ','
f.write(aa + '\n')
bb = "excludefiles="
for b in expathNOW:
if b != "":
bb += b + ','
f.write(bb + '\n')
cc = "LibraryPath="
for c in LnNOW:
if c != "":
cc += c + ','
dd = "libraties="
for d in lnNOW:
if d != "":
dd += d + ','
f.write(cc + '\n')
f.write(dd + '\n')
f.close()
self.LINKFLAGName.setText('')
self.LINKFLAGName.setText(Linkchange)
except:
f.close()
def CleanProject(self):
print('Cleanning project...... ')
if os.path.exists('./Default'):
shutil.rmtree('./Default')
if os.path.exists('./delivery'):
shutil.rmtree('./delivery')
QMessageBox.about(self, "消息", "Clean has finished!")
#tkinter.messagebox.showinfo('提示','Clean has finished!')
print('Clean has finished!')
def testaa(self):
print("1")
def CloseTools(self):
print(1)
def delPath(self,id):
if id==1:
self.includeList.clear()
if id == 2:
self.excludeList.clear()
def ShowDialog(self,id):
#self.di=QDialog()
#fileselect1 = fileselect.Ui_Dialog()
#fileselect1.setupUi(self.di)
self.idPath=id
self.di.exec()
# for path,dir,files in os.walk(os.getcwd()):
# for file in files:
# i=i+1
# if file.endswith('.h') and "TOOLS" not in path:
# if "TOOLS" not in path:
# a='child'+str(i)
# a=QTreeWidgetItem(child0)
def adds(self,paths, root):
if os.path.isdir(paths):
list = os.listdir(paths)
for i in list:
# j=0
# for path1 ,dirs,files in os.walk(os.path.join(paths,i)):
# for file in files:
# if file.endswith('.h') or file.endswith('.c'):
# j=1
if 'Default' not in i and '.' not in i and '_pycache_' not in os.path.join(paths,i) and os.path.join(
paths, i) in self.AllPath:
# self.adds(os.path.join(paths, i),root)
if os.path.isdir(os.path.join(paths, i)):
childs = QTreeWidgetItem(root)
childs.setText(0, i)
childs.setIcon(0, QIcon('./Compile/01.png'))
self.adds(os.path.join(paths, i), childs)
#注意:是对QDialog对象show(),并不是自己生成的Ui_Dialog对象 show(),开始没有写self.di,弹窗总是一闪而过,类的的函数加上self之后成功
#print(QFileDialog.getExistingDirectory(None, "请选择要添加的文件", os.getcwd()))
def GetPath(self):
if self.index==3:
pathlist = self.fileselect.treeWidget.selectedItems()
# pathlist = QTreeWidgetItemIterator(self.fileselect.treeWidget)
# print(pathlist.value().childCount())
tempinclude = []
for pathss in pathlist:
tpathss = pathss
tp = ""
while 1:
if tpathss.text(0)!=self.DebugName:
tp = tpathss.text(0) + tp
if tpathss.parent():
tpathss = tpathss.parent()
tp = '/' + tp
else:
break
if tp not in tempinclude and tp!="":
tempinclude.append(tp)
pathss.setSelected(False)
self.includeList.addItems(sorted(tempinclude))
elif self.idPath==2:
pathlist = self.fileselect.treeWidget.selectedItems()
#pathlist = QTreeWidgetItemIterator(self.fileselect.treeWidget)
#print(pathlist.value().childCount())
tempexclude=[]
for pathss in pathlist:
tpathss=pathss
tp=""
while 1:
if tpathss.text(0) != self.DebugName:
tp = tpathss.text(0)+tp
if tpathss.parent():
tpathss=tpathss.parent()
tp='/'+tp
else:
break
if tp not in tempexclude and tp!="":
tempexclude.append(tp)
self.excludeList.addItems(sorted(tempexclude))
elif self.index==2:
pathlist = self.fileselect.treeWidget.selectedItems()
# pathlist = QTreeWidgetItemIterator(self.fileselect.treeWidget)
# print(pathlist.value().childCount())
tempexclude = []
for pathss in pathlist:
tpathss = pathss
tp = ""
while 1:
if tpathss.text(0) != self.DebugName:
tp = tpathss.text(0) + tp
if tpathss.parent():
tpathss = tpathss.parent()
tp = '/' + tp
else:
break
if tp not in tempexclude and tp != "":
tempexclude.append("{workspace}"+tp)
pathss.setSelected(False)
self.Llist.addItems(tempexclude)
self.LWin.close()#如果是通过workspace选的直接关掉选择框
self.di.close()
'''for selectedPath in pathlist:
print(selectedPath.text(0))
print(pathlist)'''
#if pathlist.value().checkState(0) == Qt.Checked:
#n=self.fileselect.treeWidget.topLevelItemCount()
'''while pathlist.value():
if pathlist.value().checkState(0)==Qt.Checked:
print(pathlist.value.text(0))
break'''
def Cleartree(self):
pathlist = self.fileselect.treeWidget.selectedItems()
for pathss in pathlist:
pathss.setSelected(False)
self.di.close()
def AddExpath(self):
dir1,file1 = QFileDialog.getOpenFileNames (self,'选择过滤文件',os.getcwd(),"C FILES(*.c)")
#print(dir1,file1)
for ii in dir1:
if ii!='' :
dir2 = re.split(os.getcwd().replace('\\','/'),ii)[1]
self.excludeList.addItem(dir2)
#Library的具体操作
def AddLibraryPath(self):
txt=self.LWUI.LibraryP.text()
if txt:
self.Llist.addItem(txt)
self.LWin.close()
def AddLibraries(self):
txt = self.lWUI.libraries.text()
if txt:
self.llist.addItem(txt)
self.lWin.close()
def DelLibraryPath(self):
items1 = self.Llist.selectedIndexes()
if items1:
for jj in items1:
self.Llist.removeItemWidget(self.Llist.takeItem(jj.row()))
def DelLibraries(self):
items1 = self.llist.selectedIndexes()
if items1:
for jj in items1:
self.llist.removeItemWidget(self.llist.takeItem(jj.row()))
if __name__ == '__main__':
cmd1 = ""
NUM=0
VAL=0
app = QApplication(sys.argv)
app.setWindowIcon(QIcon('./Compile/mainwindowIcon.png'))
a=basePage()
a.ChooseProDir()
a.show()
#进入程序的主循环,并通过exit函数确保主循环安全结束
sys.exit(app.exec_()) | pyqtSignal(int)
def __init__(self, parent=None):
super(BackendTread, self).__init__(parent)
self.working=True
def stopSig(self):
self.working=False
def run(self):
#cmd1 = r'''%s\bin\make -j8 all >console.log 2>&1''' % Hdir
'''os.chdir(self.ProjectName_2.text() + '/Default')
self.process = subprocess.call(cmd1)'''
while VAL<NUM and self.working:
num=0
for path,dir,files in os.walk(os.getcwd()):
for file in files:
if file.endswith('.o'):
num=num+1
self.setvalue.emit(num)
#开编译的线程 | identifier_body |
project_config.py | # -*- coding: utf-8 -*-
# author: Guixinyu
# create Time: 2019-10-17 18:15:39
from PyQt5.QtWidgets import *
import sys
import first
import fileselect
import shutil
from first import Ui_MainWindow
import AddLibraryPath
import Enterlibraries
from PyQt5.QtCore import *
from PyQt5.QtGui import *
import os
import re
import subprocess
import time
#读取log的线程
class BackendTread(QThread):
setvalue = pyqtSignal(int)
def __init__(self, parent=None):
super(BackendTread, self).__init__(parent)
self.working=True
def stopSig(se | self.working=False
def run(self):
#cmd1 = r'''%s\bin\make -j8 all >console.log 2>&1''' % Hdir
'''os.chdir(self.ProjectName_2.text() + '/Default')
self.process = subprocess.call(cmd1)'''
while VAL<NUM and self.working:
num=0
for path,dir,files in os.walk(os.getcwd()):
for file in files:
if file.endswith('.o'):
num=num+1
self.setvalue.emit(num)
#开编译的线程
class BackendTread1(QThread):
startcompile1 = pyqtSignal(str)
endSig = pyqtSignal()
def __init__(self, parent=None):
super(BackendTread1, self).__init__(parent)
def startCom(self):
self.process = subprocess.Popen(cmd1)
def run(self):
#cmd1 = r'''%s\bin\make -j8 all >console.log 2>&1''' % Hdir
'''os.chdir(self.ProjectName_2.text() + '/Default')
self.process = subprocess.call(cmd1)'''
f=open('conerr.err','w+')
self.process = subprocess.Popen(cmd1,stdout=subprocess.PIPE,stderr=f,bufsize=1)
'''self.bt=BackendTread()
self.bt.startcompile.connect(self.PrintConsole)
self.bt.start()'''
self.sleep(3)
while self.process.poll() is None:
#print(1)
r = self.process.stdout.readline().decode('gbk')
if r:
self.startcompile1.emit(r)
if 'tool>pause'in r:
break
os.system(r"taskkill /f /t /im make.exe")#因为在做post-build的时候,al2的工具需要按回车键才能结束进程,因为在这里强制性的使其结束
self.endSig.emit()
class basePage(QMainWindow,Ui_MainWindow):
def __init__(self):
super(basePage, self).__init__()
self.setupUi(self)
self.startpath=os.getcwd()
self.actionbuild.triggered.connect(self.checkFLAG)
#self.menuclean.triggered.connect(self.CleanProject)
self.actionclean.triggered.connect(self.CleanProject)
self.actionopen_project.triggered.connect(self.ChooseProDir)
self.actionsave_project.triggered.connect(self.modifyFLAG)
#self.quitApp.triggered.connect(QCoreApplication.instance().quit) #关闭程序的第一种方式
self.actionexit.triggered.connect(qApp.quit)#关闭程序的第二种方式
#添加工具栏:停止和退出
self.tb1=self.addToolBar('tool')
actionopen1=QAction(QIcon('./Compile/file.png'),"打开工程",self)
self.tb1.addAction(actionopen1)
actionopen1.triggered.connect(self.ChooseProDir)
self.tb1.addSeparator()
actionstop=QAction(QIcon('./Compile/stop.png'),"停止",self)
self.tb1.addAction(actionstop)
actionstop.triggered.connect(self.KillProcess)
self.tb1.addSeparator()
actionExit=QAction(QIcon('./Compile/exit.png'),"退出",self)
self.tb1.addAction(actionExit)
actionExit.triggered.connect(qApp.quit)
##创建右键菜单
#self.includeList.setContextMenuPolicy(Qt.CustomContextMenu)
#self.includeList.customContextMenuRequested.connect(self.showRightMenu)
#self.includeList.customContextMenuRequested[QPoint].connect(self.remove)
#单击一个选项
#self.f=""
#self.includeList.clicked.connect(self.check)
self.includeList.setContextMenuPolicy(Qt.CustomContextMenu)
self.excludeList.setContextMenuPolicy(Qt.CustomContextMenu)
self.contextMenu=QMenu(self)
self.actionA=self.contextMenu.addAction("删除")
self.actionA.triggered.connect(self.remove)
self.includeList.customContextMenuRequested.connect(lambda :self.showContextMenu(1))
#self.contextMenu.triggered[QAction].connect(self.remove)
#self.includeList.customContextMenuRequested[QPoint].connect(self.remove1)#[]里的代表传入的参数,自带的
self.excludeList.customContextMenuRequested.connect(lambda :self.showContextMenu(2))
#self.excludeList.customContextMenuRequested[QPoint].connect(self.remove2) # []里的代表传入的参数,自带的
self.delPath1.clicked.connect(self.includeList.clear)
self.delPath2.clicked.connect(self.excludeList.clear)
self.addPath1.clicked.connect(lambda :self.ShowDialog(1))
self.addPath2.clicked.connect(self.AddExpath)
self.fileselect = fileselect.Ui_Dialog()
#初始化page
self.listWidget.currentRowChanged.connect(self.display)
#Library的初始化
self.initLibraryWindow()
self.Llist.setSelectionMode(3)
self.llist.setSelectionMode(3)
#self.add2.clidken.connect(self.ShowLWindow)
#状态栏的部件
self.barlabel = QLabel('barlabel')
#self.initDialog()
#self.fileselect.buttonBox
#print(self.fileselect.treeWidget.currentItem().text(0))
def initUI(self):
self.includeList.clear()
self.excludeList.clear()
self.Llist.clear()
self.llist.clear()
self.ProjectName.setText(self.DebugName)
self.HithTecDir.setText(self.HighTecDir)
self.GCCFLAGName.setText(self.CCFLAG)
self.LINKFLAGName.setText(self.LINKFLAG)
self.ProjectName_2.setText(self.PROJECTDIR)
self.ProjectName_2.setEnabled(False)
self.barlabel.setText('准备中')
self.statusBar.addPermanentWidget(self.barlabel)
self.Result.clear()
if self.includepath:
#a=1
self.includeList.addItems(self.includepath)
if self.excludefiles:
#a=1
self.excludeList.addItems(self.excludefiles)
if self.LibraryPath:
#a=1
self.Llist.addItems(self.LibraryPath)
if self.libraties:
#a=1
self.llist.addItems(self.libraties)
def display(self,index):
self.index=index
self.stackedWidget.setCurrentIndex(index)
def initLibraryWindow(self):
self.LWUI=AddLibraryPath.Ui_LSelect()
self.LWin=QWidget()
self.LWin.setWindowModality(Qt.ApplicationModal)#设置模态对话框
self.LWUI.setupUi(self.LWin)
self.LWUI.LibraryP.setText("")
self.add1.clicked.connect(self.LWin.show)
self.LWUI.L_Cancel.clicked.connect(self.LWin.close)
self.LWUI.L_Workspace.clicked.connect(lambda: self.ShowDialog(1))
self.LWUI.L_OK.clicked.connect(self.AddLibraryPath)
self.del1.clicked.connect(self.DelLibraryPath)
self.lWUI = Enterlibraries.Ui_LSelect()
self.lWin = QWidget()
self.lWin.setWindowModality(Qt.ApplicationModal)
self.lWUI.setupUi(self.lWin)
self.LWUI.LibraryP.setText("")
self.add2.clicked.connect(self.lWin.show)
self.lWUI.l_OK.clicked.connect(self.AddLibraries)
self.lWUI.l_Cancel.clicked.connect(self.lWin.close)
self.del2.clicked.connect(self.DelLibraries)
def KillProcess(self):
#self.process.kill()
#self.process.pid
os.system(r"taskkill /f /t /im make.exe")
self.Result.append('用户终止执行')
def ChooseProDir(self):
dir=QFileDialog.getExistingDirectory()
dir=dir.replace('/','\\')
self.ProjectName_2.setText(dir)
if dir!='':
os.chdir(dir)
import automake_config as ac
(DebugName, HighTecDir, CCFLAG, LINKFLAG, includepath, excludefiles, g_except_dir_list,
g_except_file_list,LibraryPath,libraties) = ac.maininit()
self.includepath=includepath
self.excludefiles=excludefiles
self.DebugName=DebugName
self.CCFLAG=CCFLAG
self.LINKFLAG=LINKFLAG
self.HighTecDir=HighTecDir
self.PROJECTDIR=dir
self.LibraryPath=LibraryPath
self.libraties=libraties
#print(os.getcwd())
self.AllPath=ac.FindAllPath(dir)
#print(self.AllPath)
self.initDialog()
#对Dialog按钮的设置
self.fileselect.buttonBox.accepted.connect(self.GetPath)
self.fileselect.treeWidget.setSelectionMode(3)
self.fileselect.buttonBox.rejected.connect(self.Cleartree)
#self.adds(dir,self.child0)
a.initUI()
def initDialog(self):
self.di = QDialog()
fileselect1 = self.fileselect
fileselect1.setupUi(self.di)
# self.di.show()
child0 = QTreeWidgetItem(fileselect1.treeWidget)
child0.setText(0, self.DebugName)
child0.setIcon(0, QIcon('./Compile/01.png'))
self.adds(os.getcwd(), child0)
child1 = QTreeWidgetItem(child0)
child1.setText(0, 'TOOLS')
child1.setIcon(0, QIcon('./Compile/01.png'))
#展开所有节点
fileselect1.treeWidget.expandAll()
def showContextMenu(self,id):
# 如果有选中项,则显示显示菜单
#if id==1:
items1 = self.includeList.selectedIndexes()
#self.idRm=id
#print(items)
#elif id==2:
items2 = self.excludeList.selectedIndexes()
#self.idRm = id
if items1 or items2:
self.contextMenu.show()
#self.f=QPoint
self.contextMenu.exec_(QCursor.pos()) # 在鼠标位置显示
def remove(self):
items1 = self.includeList.selectedIndexes()
items2 = self.excludeList.selectedIndexes()
if self.index==3:
if items1:
for jj in items1:
self.includeList.removeItemWidget(self.includeList.takeItem(jj.row()))
if self.index == 4:
if items2:
for jj in items2:
self.excludeList.removeItemWidget(self.excludeList.takeItem(jj.row()))
def EndResult(self):
print(os.getcwd())
f=open('./conerr.err','r')
lines=f.readlines()
j=0
for ii in lines:
if "error:"in ii:
self.Result.append("<font color=\"#FF0000\">%s</font> "%ii)
j=1
if j!=1:
self.Result.append("<font color=\"#FF0000\">finished!!!!!!!!</font> ")
self.barlabel.setText('已完成')
f.close()
os.remove('./conerr.err')
self.backend.working=False
self.statusBar.removeWidget(self.progressBar)
self.barlabel.setText('准备中')
os.chdir(self.ProjectName_2.text())
def initBar(self):
global NUM
self.progressBar = QProgressBar()
self.Result.clear()
self.barlabel.setText('正在编译:')
self.statusBar.addPermanentWidget(self.progressBar, stretch=2)
f = open('./Default/Default.objectlist','r')
lines = f.readlines()
f.close()
NUM=len(lines)
#self.progressBar.setGeometry(0,0,100,5)
self.progressBar.setRange(0,len(lines))
global VAL
VAL=0
def SetProgressBarVal(self,val):
#global VAL
n=VAL+val
self.progressBar.setValue(n)
def StartCompile(self,Hdir):
global cmd1
#cmd1 = r'''%s\bin\make -j8 all >console.log 2>&1''' % Hdir
cmd1 = r'''%s\bin\make -j8 all''' % Hdir
#cmd1 = self.startpath+'\Compile\compile.bat '+Hdir
# cmd1='cd ..'
# print(includepath)
# self.process =subprocess.Popen(self.startpath+ '\Compile\compile.bat ' + cmd1)
os.chdir(self.ProjectName_2.text() + '/Default')
#f=open('ccccc.txt','w')
#self.process = subprocess.Popen(cmd1)
self.backend1 = BackendTread1()
self.backend1.startcompile1.connect(self.PrintConsole)
self.backend1.endSig.connect(self.EndResult)
#time.sleep(3)
self.backend1.start()
self.backend = BackendTread()
self.backend.setvalue.connect(self.SetProgressBarVal)
#self.backend.endSig.connect(self.EndResult)
# time.sleep(3)
self.backend.start()
'''self.process = subprocess.call(cmd1)
self.process.wait()
f= open('console.log','r')
lines =f.readlines()
for ii in lines:
if 'error:'in ii:
self.Result.insertText(ii+'\n')'''
#os.chdir(self.ProjectName_2.text())
def PrintConsole(self,r):
#print(2222)
# None表示正在执行中
#r = self.process.stdout.readline()
#self.Result.append(r)
self.Result.append("<font color=\"#000000\">%s</font> "%r)
#self.backend.stopSig()
# 可修改输出方式,比如控制台、文件等
#print(self.process.poll())
# 重定向错误输出
def checkFLAG(self):
CCFLAG1 = self.GCCFLAGName.toPlainText()
#CCFLAG1 = CCFLAG1[0:len(CCFLAG1) - 1]
LINKFLAG1 = self.LINKFLAGName.toPlainText()
#LINKFLAG1 = LINKFLAG1[0:len(LINKFLAG1) - 1]
Hdir = self.HithTecDir.text()
DebugName1 = self.ProjectName.text()
inn=self.includeList.count()
inpath=[]
exn = self.excludeList.count()
expath = []
for i in range(inn):
inpath.append(self.includeList.item(i).text())
for i in range(exn):
expath.append(self.excludeList.item(i).text())
#print(CCFLAG1)
# POSTBUILD1 = pb.get()
# Hdir = Hdir[0:len(Hdir) - 1]
#if CCFLAG1 != self.CCFLAG or self.LINKFLAG != LINKFLAG1 or Hdir != self.HighTecDir or DebugName1 != self.DebugName or expath != self.excludefiles or inpath != self.includepath:
self.modifyFLAG()
'''for i in range(0,len(CCFALG)):
if CCFALG1[i]!=CCFALG[i]:
print(i)'''
cmd=self.startpath+'\Compile\python '+self.startpath+"\Compile/automake.py "+self.startpath
a=subprocess.call(cmd)
self.initBar()
#a.wait()
#cmd1 = Hdir + r'\bin\make'
#self.backend.update_date.connect(self.handleDisplay)
try:
self.StartCompile(Hdir)
except BaseException as e:
print(333333)
f=open('cons.log','w')
f.write(e.args)
f.close()
#def
def modifyFLAG(self):
# f=open('./TOOLS/Compile/automake_config.py','r',encoding='utf-8')
CCFLAGNOW = self.GCCFLAGName.toPlainText()
# CCFLAG1 = CCFLAG1[0:len(CCFLAG1) - 1]
LINKFLAGNOW = self.LINKFLAGName.toPlainText()
# LINKFLAG1 = LINKFLAG1[0:len(LINKFLAG1) - 1]
HighTecDirNOW = self.HithTecDir.text()
DebugNameNOW = self.ProjectName.text()
inn = self.includeList.count()
inpathNOW = []
exn = self.excludeList.count()
expathNOW = []
Ln = self.Llist.count()
LnNOW = []
ln = self.llist.count()
lnNOW = []
try:
for i in range(inn):
inpathNOW.append(self.includeList.item(i).text())
for i in range(exn):
expathNOW.append(self.excludeList.item(i).text())
f = open('./py.pyconfig', 'w', encoding='utf-8')
# lines=f.readlines()
tLink = re.split(' ',LINKFLAGNOW)
Linkchange=''
for iii in tLink:
if '-L' not in iii and '-l:' not in iii:
Linkchange+=iii+' '
for i in range(Ln):
p = re.split('{workspace}/',self.Llist.item(i).text())
#print(p)
if len(p)==1:
Linkchange+='''-L"'''+os.path.abspath(p[0])+'''" '''
else:
Linkchange += '''-L"''' + os.path.abspath(p[1]) + '''" '''
LnNOW.append(self.Llist.item(i).text())
for i in range(ln):
Linkchange+='-l'+self.llist.item(i).text()+' '
lnNOW.append(self.llist.item(i).text())
f.write('CCFLAG=' + CCFLAGNOW + "\n")
f.write('LINKFLAG=' + Linkchange + "\n")
f.write('HighTecDir=' + HighTecDirNOW + "\n")
f.write('DebugName=' + DebugNameNOW + "\n")
aa = "includepath="
for a in inpathNOW:
if a != "":
aa += a + ','
f.write(aa + '\n')
bb = "excludefiles="
for b in expathNOW:
if b != "":
bb += b + ','
f.write(bb + '\n')
cc = "LibraryPath="
for c in LnNOW:
if c != "":
cc += c + ','
dd = "libraties="
for d in lnNOW:
if d != "":
dd += d + ','
f.write(cc + '\n')
f.write(dd + '\n')
f.close()
self.LINKFLAGName.setText('')
self.LINKFLAGName.setText(Linkchange)
except:
f.close()
def CleanProject(self):
print('Cleanning project...... ')
if os.path.exists('./Default'):
shutil.rmtree('./Default')
if os.path.exists('./delivery'):
shutil.rmtree('./delivery')
QMessageBox.about(self, "消息", "Clean has finished!")
#tkinter.messagebox.showinfo('提示','Clean has finished!')
print('Clean has finished!')
def testaa(self):
print("1")
def CloseTools(self):
print(1)
def delPath(self,id):
if id==1:
self.includeList.clear()
if id == 2:
self.excludeList.clear()
def ShowDialog(self,id):
#self.di=QDialog()
#fileselect1 = fileselect.Ui_Dialog()
#fileselect1.setupUi(self.di)
self.idPath=id
self.di.exec()
# for path,dir,files in os.walk(os.getcwd()):
# for file in files:
# i=i+1
# if file.endswith('.h') and "TOOLS" not in path:
# if "TOOLS" not in path:
# a='child'+str(i)
# a=QTreeWidgetItem(child0)
def adds(self,paths, root):
if os.path.isdir(paths):
list = os.listdir(paths)
for i in list:
# j=0
# for path1 ,dirs,files in os.walk(os.path.join(paths,i)):
# for file in files:
# if file.endswith('.h') or file.endswith('.c'):
# j=1
if 'Default' not in i and '.' not in i and '_pycache_' not in os.path.join(paths,i) and os.path.join(
paths, i) in self.AllPath:
# self.adds(os.path.join(paths, i),root)
if os.path.isdir(os.path.join(paths, i)):
childs = QTreeWidgetItem(root)
childs.setText(0, i)
childs.setIcon(0, QIcon('./Compile/01.png'))
self.adds(os.path.join(paths, i), childs)
#注意:是对QDialog对象show(),并不是自己生成的Ui_Dialog对象 show(),开始没有写self.di,弹窗总是一闪而过,类的的函数加上self之后成功
#print(QFileDialog.getExistingDirectory(None, "请选择要添加的文件", os.getcwd()))
def GetPath(self):
if self.index==3:
pathlist = self.fileselect.treeWidget.selectedItems()
# pathlist = QTreeWidgetItemIterator(self.fileselect.treeWidget)
# print(pathlist.value().childCount())
tempinclude = []
for pathss in pathlist:
tpathss = pathss
tp = ""
while 1:
if tpathss.text(0)!=self.DebugName:
tp = tpathss.text(0) + tp
if tpathss.parent():
tpathss = tpathss.parent()
tp = '/' + tp
else:
break
if tp not in tempinclude and tp!="":
tempinclude.append(tp)
pathss.setSelected(False)
self.includeList.addItems(sorted(tempinclude))
elif self.idPath==2:
pathlist = self.fileselect.treeWidget.selectedItems()
#pathlist = QTreeWidgetItemIterator(self.fileselect.treeWidget)
#print(pathlist.value().childCount())
tempexclude=[]
for pathss in pathlist:
tpathss=pathss
tp=""
while 1:
if tpathss.text(0) != self.DebugName:
tp = tpathss.text(0)+tp
if tpathss.parent():
tpathss=tpathss.parent()
tp='/'+tp
else:
break
if tp not in tempexclude and tp!="":
tempexclude.append(tp)
self.excludeList.addItems(sorted(tempexclude))
elif self.index==2:
pathlist = self.fileselect.treeWidget.selectedItems()
# pathlist = QTreeWidgetItemIterator(self.fileselect.treeWidget)
# print(pathlist.value().childCount())
tempexclude = []
for pathss in pathlist:
tpathss = pathss
tp = ""
while 1:
if tpathss.text(0) != self.DebugName:
tp = tpathss.text(0) + tp
if tpathss.parent():
tpathss = tpathss.parent()
tp = '/' + tp
else:
break
if tp not in tempexclude and tp != "":
tempexclude.append("{workspace}"+tp)
pathss.setSelected(False)
self.Llist.addItems(tempexclude)
self.LWin.close()#如果是通过workspace选的直接关掉选择框
self.di.close()
'''for selectedPath in pathlist:
print(selectedPath.text(0))
print(pathlist)'''
#if pathlist.value().checkState(0) == Qt.Checked:
#n=self.fileselect.treeWidget.topLevelItemCount()
'''while pathlist.value():
if pathlist.value().checkState(0)==Qt.Checked:
print(pathlist.value.text(0))
break'''
def Cleartree(self):
pathlist = self.fileselect.treeWidget.selectedItems()
for pathss in pathlist:
pathss.setSelected(False)
self.di.close()
def AddExpath(self):
dir1,file1 = QFileDialog.getOpenFileNames (self,'选择过滤文件',os.getcwd(),"C FILES(*.c)")
#print(dir1,file1)
for ii in dir1:
if ii!='' :
dir2 = re.split(os.getcwd().replace('\\','/'),ii)[1]
self.excludeList.addItem(dir2)
#Library的具体操作
def AddLibraryPath(self):
txt=self.LWUI.LibraryP.text()
if txt:
self.Llist.addItem(txt)
self.LWin.close()
def AddLibraries(self):
txt = self.lWUI.libraries.text()
if txt:
self.llist.addItem(txt)
self.lWin.close()
def DelLibraryPath(self):
items1 = self.Llist.selectedIndexes()
if items1:
for jj in items1:
self.Llist.removeItemWidget(self.Llist.takeItem(jj.row()))
def DelLibraries(self):
items1 = self.llist.selectedIndexes()
if items1:
for jj in items1:
self.llist.removeItemWidget(self.llist.takeItem(jj.row()))
if __name__ == '__main__':
cmd1 = ""
NUM=0
VAL=0
app = QApplication(sys.argv)
app.setWindowIcon(QIcon('./Compile/mainwindowIcon.png'))
a=basePage()
a.ChooseProDir()
a.show()
#进入程序的主循环,并通过exit函数确保主循环安全结束
sys.exit(app.exec_()) | lf):
| identifier_name |
project_config.py | # -*- coding: utf-8 -*-
# author: Guixinyu
# create Time: 2019-10-17 18:15:39
from PyQt5.QtWidgets import *
import sys
import first
import fileselect
import shutil
from first import Ui_MainWindow
import AddLibraryPath
import Enterlibraries
from PyQt5.QtCore import *
from PyQt5.QtGui import *
import os
import re
import subprocess
import time
#读取log的线程
class BackendTread(QThread):
setvalue = pyqtSignal(int)
def __init__(self, parent=None):
super(BackendTread, self).__init__(parent)
self.working=True
def stopSig(self):
self.working=False
def run(self):
#cmd1 = r'''%s\bin\make -j8 all >console.log 2>&1''' % Hdir
'''os.chdir(self.ProjectName_2.text() + '/Default')
self.process = subprocess.call(cmd1)'''
while VAL<NUM and self.working:
num=0
for path,dir,files in os.walk(os.getcwd()):
for file in files:
if file.endswith('.o'):
num=num+1
self.setvalue.emit(num)
#开编译的线程
class BackendTread1(QThread):
startcompile1 = pyqtSignal(str)
endSig = pyqtSignal()
def __init__(self, parent=None):
super(BackendTread1, self).__init__(parent)
def startCom(self):
self.process = subprocess.Popen(cmd1)
def run(self):
#cmd1 = r'''%s\bin\make -j8 all >console.log 2>&1''' % Hdir
'''os.chdir(self.ProjectName_2.text() + '/Default')
self.process = subprocess.call(cmd1)'''
f=open('conerr.err','w+')
self.process = subprocess.Popen(cmd1,stdout=subprocess.PIPE,stderr=f,bufsize=1)
'''self.bt=BackendTread()
self.bt.startcompile.connect(self.PrintConsole)
self.bt.start()'''
self.sleep(3)
while self.process.poll() is None:
#print(1)
r = self.process.stdout.readline().decode('gbk')
if r:
self.startcompile1.emit(r)
if 'tool>pause'in r:
break
os.system(r"taskkill /f /t /im make.exe")#因为在做post-build的时候,al2的工具需要按回车键才能结束进程,因为在这里强制性的使其结束
self.endSig.emit()
class basePage(QMainWindow,Ui_MainWindow):
def __init__(self):
super(basePage, self).__init__()
self.setupUi(self)
self.startpath=os.getcwd()
self.actionbuild.triggered.connect(self.checkFLAG)
#self.menuclean.triggered.connect(self.CleanProject)
self.actionclean.triggered.connect(self.CleanProject)
self.actionopen_project.triggered.connect(self.ChooseProDir)
self.actionsave_project.triggered.connect(self.modifyFLAG)
#self.quitApp.triggered.connect(QCoreApplication.instance().quit) #关闭程序的第一种方式
self.actionexit.triggered.connect(qApp.quit)#关闭程序的第二种方式
#添加工具栏:停止和退出
self.tb1=self.addToolBar('tool')
actionopen1=QAction(QIcon('./Compile/file.png'),"打开工程",self)
self.tb1.addAction(actionopen1)
actionopen1.triggered.connect(self.ChooseProDir)
self.tb1.addSeparator()
actionstop=QAction(QIcon('./Compile/stop.png'),"停止",self)
self.tb1.addAction(actionstop)
actionstop.triggered.connect(self.KillProcess)
self.tb1.addSeparator()
actionExit=QAction(QIcon('./Compile/exit.png'),"退出",self)
self.tb1.addAction(actionExit)
actionExit.triggered.connect(qApp.quit)
##创建右键菜单
#self.includeList.setContextMenuPolicy(Qt.CustomContextMenu)
#self.includeList.customContextMenuRequested.connect(self.showRightMenu)
#self.includeList.customContextMenuRequested[QPoint].connect(self.remove)
#单击一个选项
#self.f=""
#self.includeList.clicked.connect(self.check)
self.includeList.setContextMenuPolicy(Qt.CustomContextMenu)
self.excludeList.setContextMenuPolicy(Qt.CustomContextMenu)
self.contextMenu=QMenu(self)
self.actionA=self.contextMenu.addAction("删除")
self.actionA.triggered.connect(self.remove)
self.includeList.customContextMenuRequested.connect(lambda :self.showContextMenu(1))
#self.contextMenu.triggered[QAction].connect(self.remove)
#self.includeList.customContextMenuRequested[QPoint].connect(self.remove1)#[]里的代表传入的参数,自带的
self.excludeList.customContextMenuRequested.connect(lambda :self.showContextMenu(2))
#self.excludeList.customContextMenuRequested[QPoint].connect(self.remove2) # []里的代表传入的参数,自带的
self.delPath1.clicked.connect(self.includeList.clear)
self.delPath2.clicked.connect(self.excludeList.clear)
self.addPath1.clicked.connect(lambda :self.ShowDialog(1))
self.addPath2.clicked.connect(self.AddExpath)
self.fileselect = fileselect.Ui_Dialog()
#初始化page
self.listWidget.currentRowChanged.connect(self.display)
#Library的初始化
self.initLibraryWindow()
self.Llist.setSelectionMode(3)
self.llist.setSelectionMode(3)
#self.add2.clidken.connect(self.ShowLWindow)
#状态栏的部件
self.barlabel = QLabel('barlabel')
#self.initDialog()
#self.fileselect.buttonBox
#print(self.fileselect.treeWidget.currentItem().text(0))
def initUI(self):
self.includeList.clear()
self.excludeList.clear()
self.Llist.clear()
self.llist.clear()
self.ProjectName.setText(self.DebugName)
self.HithTecDir.setText(self.HighTecDir)
self.GCCFLAGName.setText(self.CCFLAG)
self.LINKFLAGName.setText(self.LINKFLAG)
self.ProjectName_2.setText(self.PROJECTDIR)
self.ProjectName_2.setEnabled(False)
self.barlabel.setText('准备中')
self.statusBar.addPermanentWidget(self.barlabel)
self.Result.clear()
if self.includepath:
#a=1
self.includeList.addItems(self.includepath)
if self.excludefiles:
#a=1
self.excludeList.addItems(self.excludefiles)
if self.LibraryPath:
#a=1
self.Llist.addItems(self.LibraryPath)
if self.libraties:
#a=1
self.llist.addItems(self.libraties)
def display(self,index):
self.index=index
self.stackedWidget.setCurrentIndex(index)
def initLibraryWindow(self):
self.LWUI=AddLibraryPath.Ui_LSelect()
self.LWin=QWidget()
self.LWin.setWindowModality(Qt.ApplicationModal)#设置模态对话框
self.LWUI.setupUi(self.LWin)
self.LWUI.LibraryP.setText("")
self.add1.clicked.connect(self.LWin.show)
self.LWUI.L_Cancel.clicked.connect(self.LWin.close)
self.LWUI.L_Workspace.clicked.connect(lambda: self.ShowDialog(1))
self.LWUI.L_OK.clicked.connect(self.AddLibraryPath)
self.del1.clicked.connect(self.DelLibraryPath)
self.lWUI = Enterlibraries.Ui_LSelect()
self.lWin = QWidget()
self.lWin.setWindowModality(Qt.ApplicationModal)
self.lWUI.setupUi(self.lWin)
self.LWUI.LibraryP.setText("")
self.add2.clicked.connect(self.lWin.show)
self.lWUI.l_OK.clicked.connect(self.AddLibraries)
self.lWUI.l_Cancel.clicked.connect(self.lWin.close)
self.del2.clicked.connect(self.DelLibraries)
def KillProcess(self):
#self.process.kill()
#self.process.pid
os.system(r"taskkill /f /t /im make.exe")
self.Result.append('用户终止执行')
def ChooseProDir(self):
dir=QFileDialog.getExistingDirectory()
dir=dir.replace('/','\\')
self.ProjectName_2.setText(dir)
if dir!='':
os.chdir(dir)
import automake_config as ac
(DebugName, HighTecDir, CCFLAG, LINKFLAG, includepath, excludefiles, g_except_dir_list,
g_except_file_list,LibraryPath,libraties) = ac.maininit()
self.includepath=includepath
self.excludefiles=excludefiles
self.DebugName=DebugName
self.CCFLAG=CCFLAG
self.LINKFLAG=LINKFLAG
self.HighTecDir=HighTecDir
self.PROJECTDIR=dir
self.LibraryPath=LibraryPath
self.libraties=libraties
#print(os.getcwd())
self.AllPath=ac.FindAllPath(dir)
#print(self.AllPath)
self.initDialog()
#对Dialog按钮的设置
self.fileselect.buttonBox.accepted.connect(self.GetPath)
self.fileselect.treeWidget.setSelectionMode(3)
self.fileselect.buttonBox.rejected.connect(self.Cleartree)
#self.adds(dir,self.child0)
a.initUI()
def initDialog(self):
self.di = QDialog()
fileselect1 = self.fileselect
fileselect1.setupUi(self.di)
# self.di.show()
child0 = QTreeWidgetItem(fileselect1.treeWidget)
child0.setText(0, self.DebugName)
child0.setIcon(0, QIcon('./Compile/01.png'))
self.adds(os.getcwd(), child0)
child1 = QTreeWidgetItem(child0)
child1.setText(0, 'TOOLS')
child1.setIcon(0, QIcon('./Compile/01.png'))
#展开所有节点
fileselect1.treeWidget.expandAll()
def showContextMenu(self,id):
# 如果有选中项,则显示显示菜单
#if id==1:
items1 = self.includeList.selectedIndexes()
#self.idRm=id
#print(items)
#elif id==2:
items2 = self.excludeList.selectedIndexes()
#self.idRm = id
if items1 or items2:
self.contextMenu.show()
#self.f=QPoint
self.contextMenu.exec_(QCursor.pos()) # 在鼠标位置显示
def remove(self):
items1 = self.includeList.selectedIndexes()
items2 = self.excludeList.selectedIndexes()
if self.index==3:
if items1:
for jj in items1:
self.includeList.removeItemWidget(self.includeList.takeItem(jj.row()))
if self.index == 4:
if items2:
for jj in items2:
self.excludeList.removeItemWidget(self.excludeList.takeItem(jj.row()))
def EndResult(self):
print(os.getcwd())
f=open('./conerr.err','r')
lines=f.readlines()
j=0
for ii in lines:
if "error:"in ii:
self.Result.append("<font color=\"#FF0000\">%s</font> "%ii)
j=1
if j!=1:
self.Result.append("<font color=\"#FF0000\">finished!!!!!!!!</font> ")
self.barlabel.setText('已完成')
f.close()
os.remove('./conerr.err')
self.backend.working=False
self.statusBar.removeWidget(self.progressBar)
self.barlabel.setText('准备中')
os.chdir(self.ProjectName_2.text())
def initBar(self):
global NUM
self.progressBar = QProgressBar()
self.Result.clear()
self.barlabel.setText('正在编译:')
self.statusBar.addPermanentWidget(self.progressBar, stretch=2)
f = open('./Default/Default.objectlist','r')
lines = f.readlines()
f.close()
NUM=len(lines)
#self.progressBar.setGeometry(0,0,100,5)
self.progressBar.setRange(0,len(lines))
global VAL
VAL=0
def SetProgressBarVal(self,val):
#global VAL
n=VAL+val
self.progressBar.setValue(n)
def StartCompile(self,Hdir):
global cmd1
#cmd1 = r'''%s\bin\make -j8 all >console.log 2>&1''' % Hdir
cmd1 = r'''%s\bin\make -j8 all''' % Hdir
#cmd1 = self.startpath+'\Compile\compile.bat '+Hdir
# cmd1='cd ..'
# print(includepath)
# self.process =subprocess.Popen(self.startpath+ '\Compile\compile.bat ' + cmd1)
os.chdir(self.ProjectName_2.text() + '/Default')
#f=open('ccccc.txt','w')
#self.process = subprocess.Popen(cmd1)
self.backend1 = BackendTread1()
self.backend1.startcompile1.connect(self.PrintConsole)
self.backend1.endSig.connect(self.EndResult)
#time.sleep(3)
self.backend1.start()
self.backend = BackendTread()
self.backend.setvalue.connect(self.SetProgressBarVal)
#self.backend.endSig.connect(self.EndResult)
# time.sleep(3)
self.backend.start()
'''self.process = subprocess.call(cmd1)
self.process.wait()
f= open('console.log','r')
lines =f.readlines()
for ii in lines:
if 'error:'in ii:
self.Result.insertText(ii+'\n')'''
#os.chdir(self.ProjectName_2.text())
def PrintConsole(self,r):
#print(2222)
# None表示正在执行中
#r = self.process.stdout.readline()
#self.Result.append(r)
self.Result.append("<font color=\"#000000\">%s</font> "%r)
#self.backend.stopSig()
# 可修改输出方式,比如控制台、文件等
#print(self.process.poll())
# 重定向错误输出
def checkFLAG(self):
CCFLAG1 = self.GCCFLAGName.toPlainText()
#CCFLAG1 = CCFLAG1[0:len(CCFLAG1) - 1]
LINKFLAG1 = self.LINKFLAGName.toPlainText()
#LINKFLAG1 = LINKFLAG1[0:len(LINKFLAG1) - 1]
Hdir = self.HithTecDir.text()
DebugName1 = self.ProjectName.text()
inn=self.includeList.count()
inpath=[]
exn = self.excludeList.count()
expath = []
for i in range(inn):
inpath.append(self.includeList.item(i).text())
for i in range(exn):
expath.append(self.excludeList.item(i).text())
#print(CCFLAG1)
# POSTBUILD1 = pb.get()
# Hdir = Hdir[0:len(Hdir) - 1]
#if CCFLAG1 != self.CCFLAG or self.LINKFLAG != LINKFLAG1 or Hdir != self.HighTecDir or DebugName1 != self.DebugName or expath != self.excludefiles or inpath != self.includepath:
self.modifyFLAG()
'''for i in range(0,len(CCFALG)):
if CCFALG1[i]!=CCFALG[i]:
print(i)'''
cmd=self.startpath+'\Compile\python '+self.startpath+"\Compile/automake.py "+self.startpath
a=subprocess.call(cmd)
self.initBar()
#a.wait()
#cmd1 = Hdir + r'\bin\make'
#self.backend.update_date.connect(self.handleDisplay)
try:
self.StartCompile(Hdir)
except BaseException as e:
print(333333)
f=open('cons.log','w')
f.write(e.args)
f.close()
#def
def modifyFLAG(self):
# f=open('./TOOLS/Compile/automake_config.py','r',encoding='utf-8')
CCFLAGNOW = self.GCCFLAGName.toPlainText()
# CCFLAG1 = CCFLAG1[0:len(CCFLAG1) - 1]
LINKFLAGNOW = self.LINKFLAGName.toPlainText()
# LINKFLAG1 = LINKFLAG1[0:len(LINKFLAG1) - 1]
HighTecDirNOW = self.HithTecDir.text()
DebugNameNOW = self.ProjectName.text()
inn = self.includeList.count()
inpathNOW = []
exn = self.excludeList.count()
expathNOW = []
Ln = self.Llist.count()
LnNOW = [] | for i in range(inn):
inpathNOW.append(self.includeList.item(i).text())
for i in range(exn):
expathNOW.append(self.excludeList.item(i).text())
f = open('./py.pyconfig', 'w', encoding='utf-8')
# lines=f.readlines()
tLink = re.split(' ',LINKFLAGNOW)
Linkchange=''
for iii in tLink:
if '-L' not in iii and '-l:' not in iii:
Linkchange+=iii+' '
for i in range(Ln):
p = re.split('{workspace}/',self.Llist.item(i).text())
#print(p)
if len(p)==1:
Linkchange+='''-L"'''+os.path.abspath(p[0])+'''" '''
else:
Linkchange += '''-L"''' + os.path.abspath(p[1]) + '''" '''
LnNOW.append(self.Llist.item(i).text())
for i in range(ln):
Linkchange+='-l'+self.llist.item(i).text()+' '
lnNOW.append(self.llist.item(i).text())
f.write('CCFLAG=' + CCFLAGNOW + "\n")
f.write('LINKFLAG=' + Linkchange + "\n")
f.write('HighTecDir=' + HighTecDirNOW + "\n")
f.write('DebugName=' + DebugNameNOW + "\n")
aa = "includepath="
for a in inpathNOW:
if a != "":
aa += a + ','
f.write(aa + '\n')
bb = "excludefiles="
for b in expathNOW:
if b != "":
bb += b + ','
f.write(bb + '\n')
cc = "LibraryPath="
for c in LnNOW:
if c != "":
cc += c + ','
dd = "libraties="
for d in lnNOW:
if d != "":
dd += d + ','
f.write(cc + '\n')
f.write(dd + '\n')
f.close()
self.LINKFLAGName.setText('')
self.LINKFLAGName.setText(Linkchange)
except:
f.close()
def CleanProject(self):
print('Cleanning project...... ')
if os.path.exists('./Default'):
shutil.rmtree('./Default')
if os.path.exists('./delivery'):
shutil.rmtree('./delivery')
QMessageBox.about(self, "消息", "Clean has finished!")
#tkinter.messagebox.showinfo('提示','Clean has finished!')
print('Clean has finished!')
def testaa(self):
print("1")
def CloseTools(self):
print(1)
def delPath(self,id):
if id==1:
self.includeList.clear()
if id == 2:
self.excludeList.clear()
def ShowDialog(self,id):
#self.di=QDialog()
#fileselect1 = fileselect.Ui_Dialog()
#fileselect1.setupUi(self.di)
self.idPath=id
self.di.exec()
# for path,dir,files in os.walk(os.getcwd()):
# for file in files:
# i=i+1
# if file.endswith('.h') and "TOOLS" not in path:
# if "TOOLS" not in path:
# a='child'+str(i)
# a=QTreeWidgetItem(child0)
def adds(self,paths, root):
if os.path.isdir(paths):
list = os.listdir(paths)
for i in list:
# j=0
# for path1 ,dirs,files in os.walk(os.path.join(paths,i)):
# for file in files:
# if file.endswith('.h') or file.endswith('.c'):
# j=1
if 'Default' not in i and '.' not in i and '_pycache_' not in os.path.join(paths,i) and os.path.join(
paths, i) in self.AllPath:
# self.adds(os.path.join(paths, i),root)
if os.path.isdir(os.path.join(paths, i)):
childs = QTreeWidgetItem(root)
childs.setText(0, i)
childs.setIcon(0, QIcon('./Compile/01.png'))
self.adds(os.path.join(paths, i), childs)
#注意:是对QDialog对象show(),并不是自己生成的Ui_Dialog对象 show(),开始没有写self.di,弹窗总是一闪而过,类的的函数加上self之后成功
#print(QFileDialog.getExistingDirectory(None, "请选择要添加的文件", os.getcwd()))
def GetPath(self):
if self.index==3:
pathlist = self.fileselect.treeWidget.selectedItems()
# pathlist = QTreeWidgetItemIterator(self.fileselect.treeWidget)
# print(pathlist.value().childCount())
tempinclude = []
for pathss in pathlist:
tpathss = pathss
tp = ""
while 1:
if tpathss.text(0)!=self.DebugName:
tp = tpathss.text(0) + tp
if tpathss.parent():
tpathss = tpathss.parent()
tp = '/' + tp
else:
break
if tp not in tempinclude and tp!="":
tempinclude.append(tp)
pathss.setSelected(False)
self.includeList.addItems(sorted(tempinclude))
elif self.idPath==2:
pathlist = self.fileselect.treeWidget.selectedItems()
#pathlist = QTreeWidgetItemIterator(self.fileselect.treeWidget)
#print(pathlist.value().childCount())
tempexclude=[]
for pathss in pathlist:
tpathss=pathss
tp=""
while 1:
if tpathss.text(0) != self.DebugName:
tp = tpathss.text(0)+tp
if tpathss.parent():
tpathss=tpathss.parent()
tp='/'+tp
else:
break
if tp not in tempexclude and tp!="":
tempexclude.append(tp)
self.excludeList.addItems(sorted(tempexclude))
elif self.index==2:
pathlist = self.fileselect.treeWidget.selectedItems()
# pathlist = QTreeWidgetItemIterator(self.fileselect.treeWidget)
# print(pathlist.value().childCount())
tempexclude = []
for pathss in pathlist:
tpathss = pathss
tp = ""
while 1:
if tpathss.text(0) != self.DebugName:
tp = tpathss.text(0) + tp
if tpathss.parent():
tpathss = tpathss.parent()
tp = '/' + tp
else:
break
if tp not in tempexclude and tp != "":
tempexclude.append("{workspace}"+tp)
pathss.setSelected(False)
self.Llist.addItems(tempexclude)
self.LWin.close()#如果是通过workspace选的直接关掉选择框
self.di.close()
'''for selectedPath in pathlist:
print(selectedPath.text(0))
print(pathlist)'''
#if pathlist.value().checkState(0) == Qt.Checked:
#n=self.fileselect.treeWidget.topLevelItemCount()
'''while pathlist.value():
if pathlist.value().checkState(0)==Qt.Checked:
print(pathlist.value.text(0))
break'''
def Cleartree(self):
pathlist = self.fileselect.treeWidget.selectedItems()
for pathss in pathlist:
pathss.setSelected(False)
self.di.close()
def AddExpath(self):
dir1,file1 = QFileDialog.getOpenFileNames (self,'选择过滤文件',os.getcwd(),"C FILES(*.c)")
#print(dir1,file1)
for ii in dir1:
if ii!='' :
dir2 = re.split(os.getcwd().replace('\\','/'),ii)[1]
self.excludeList.addItem(dir2)
#Library的具体操作
def AddLibraryPath(self):
txt=self.LWUI.LibraryP.text()
if txt:
self.Llist.addItem(txt)
self.LWin.close()
def AddLibraries(self):
txt = self.lWUI.libraries.text()
if txt:
self.llist.addItem(txt)
self.lWin.close()
def DelLibraryPath(self):
items1 = self.Llist.selectedIndexes()
if items1:
for jj in items1:
self.Llist.removeItemWidget(self.Llist.takeItem(jj.row()))
def DelLibraries(self):
items1 = self.llist.selectedIndexes()
if items1:
for jj in items1:
self.llist.removeItemWidget(self.llist.takeItem(jj.row()))
if __name__ == '__main__':
cmd1 = ""
NUM=0
VAL=0
app = QApplication(sys.argv)
app.setWindowIcon(QIcon('./Compile/mainwindowIcon.png'))
a=basePage()
a.ChooseProDir()
a.show()
#进入程序的主循环,并通过exit函数确保主循环安全结束
sys.exit(app.exec_()) | ln = self.llist.count()
lnNOW = []
try: | random_line_split |
feature.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: tensorflow/core/example/feature.proto
/*
Package tensorflow is a generated protocol buffer package.
It is generated from these files:
tensorflow/core/example/feature.proto
It has these top-level messages:
BytesList
FloatList
Int64List
Feature
Features
FeatureList
FeatureLists
*/
package tensorflow
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// Containers to hold repeated fundamental values.
type BytesList struct {
Value [][]byte `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"`
}
func (m *BytesList) Reset() { *m = BytesList{} }
func (m *BytesList) String() string { return proto.CompactTextString(m) }
func (*BytesList) ProtoMessage() {}
func (*BytesList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *BytesList) GetValue() [][]byte {
if m != nil {
return m.Value
}
return nil
}
type FloatList struct {
Value []float32 `protobuf:"fixed32,1,rep,packed,name=value" json:"value,omitempty"`
}
func (m *FloatList) Reset() { *m = FloatList{} }
func (m *FloatList) String() string { return proto.CompactTextString(m) }
func (*FloatList) ProtoMessage() {}
func (*FloatList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *FloatList) GetValue() []float32 {
if m != nil {
return m.Value
}
return nil
}
type Int64List struct {
Value []int64 `protobuf:"varint,1,rep,packed,name=value" json:"value,omitempty"`
}
func (m *Int64List) Reset() { *m = Int64List{} }
func (m *Int64List) String() string { return proto.CompactTextString(m) }
func (*Int64List) ProtoMessage() {}
func (*Int64List) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *Int64List) GetValue() []int64 {
if m != nil {
return m.Value
}
return nil
}
// Containers for non-sequential data.
type Feature struct {
// Each feature can be exactly one kind.
//
// Types that are valid to be assigned to Kind:
// *Feature_BytesList
// *Feature_FloatList
// *Feature_Int64List
Kind isFeature_Kind `protobuf_oneof:"kind"`
}
func (m *Feature) Reset() { *m = Feature{} }
func (m *Feature) String() string { return proto.CompactTextString(m) }
func (*Feature) ProtoMessage() {}
func (*Feature) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
type isFeature_Kind interface {
isFeature_Kind()
}
type Feature_BytesList struct {
BytesList *BytesList `protobuf:"bytes,1,opt,name=bytes_list,json=bytesList,oneof"`
}
type Feature_FloatList struct {
FloatList *FloatList `protobuf:"bytes,2,opt,name=float_list,json=floatList,oneof"`
}
type Feature_Int64List struct {
Int64List *Int64List `protobuf:"bytes,3,opt,name=int64_list,json=int64List,oneof"`
}
func (*Feature_BytesList) isFeature_Kind() {}
func (*Feature_FloatList) isFeature_Kind() {}
func (*Feature_Int64List) isFeature_Kind() {}
func (m *Feature) GetKind() isFeature_Kind {
if m != nil {
return m.Kind
}
return nil
}
func (m *Feature) GetBytesList() *BytesList {
if x, ok := m.GetKind().(*Feature_BytesList); ok {
return x.BytesList
}
return nil
}
func (m *Feature) GetFloatList() *FloatList {
if x, ok := m.GetKind().(*Feature_FloatList); ok {
return x.FloatList
}
return nil
}
func (m *Feature) GetInt64List() *Int64List {
if x, ok := m.GetKind().(*Feature_Int64List); ok {
return x.Int64List
}
return nil
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*Feature) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _Feature_OneofMarshaler, _Feature_OneofUnmarshaler, _Feature_OneofSizer, []interface{}{
(*Feature_BytesList)(nil),
(*Feature_FloatList)(nil),
(*Feature_Int64List)(nil),
}
}
func _Feature_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*Feature)
// kind
switch x := m.Kind.(type) {
case *Feature_BytesList:
b.EncodeVarint(1<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.BytesList); err != nil {
return err
}
case *Feature_FloatList:
b.EncodeVarint(2<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.FloatList); err != nil {
return err
}
case *Feature_Int64List:
b.EncodeVarint(3<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Int64List); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("Feature.Kind has unexpected type %T", x)
}
return nil
}
func _Feature_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*Feature)
switch tag {
case 1: // kind.bytes_list
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(BytesList)
err := b.DecodeMessage(msg)
m.Kind = &Feature_BytesList{msg}
return true, err
case 2: // kind.float_list
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(FloatList)
err := b.DecodeMessage(msg)
m.Kind = &Feature_FloatList{msg}
return true, err
case 3: // kind.int64_list
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(Int64List)
err := b.DecodeMessage(msg)
m.Kind = &Feature_Int64List{msg}
return true, err
default:
return false, nil
}
}
func _Feature_OneofSizer(msg proto.Message) (n int) {
m := msg.(*Feature)
// kind
switch x := m.Kind.(type) {
case *Feature_BytesList:
s := proto.Size(x.BytesList)
n += proto.SizeVarint(1<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *Feature_FloatList:
s := proto.Size(x.FloatList)
n += proto.SizeVarint(2<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *Feature_Int64List:
s := proto.Size(x.Int64List)
n += proto.SizeVarint(3<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type Features struct {
// Map from feature name to feature.
Feature map[string]*Feature `protobuf:"bytes,1,rep,name=feature" json:"feature,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
}
func (m *Features) Reset() { *m = Features{} }
func (m *Features) String() string { return proto.CompactTextString(m) }
func (*Features) ProtoMessage() {}
func (*Features) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *Features) GetFeature() map[string]*Feature {
if m != nil {
return m.Feature
}
return nil
}
// Containers for sequential data.
//
// A FeatureList contains lists of Features. These may hold zero or more
// Feature values.
//
// FeatureLists are organized into categories by name. The FeatureLists message
// contains the mapping from name to FeatureList.
//
type FeatureList struct {
Feature []*Feature `protobuf:"bytes,1,rep,name=feature" json:"feature,omitempty"`
}
func (m *FeatureList) Reset() { *m = FeatureList{} }
func (m *FeatureList) | () string { return proto.CompactTextString(m) }
func (*FeatureList) ProtoMessage() {}
func (*FeatureList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *FeatureList) GetFeature() []*Feature {
if m != nil {
return m.Feature
}
return nil
}
type FeatureLists struct {
// Map from feature name to feature list.
FeatureList map[string]*FeatureList `protobuf:"bytes,1,rep,name=feature_list,json=featureList" json:"feature_list,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
}
func (m *FeatureLists) Reset() { *m = FeatureLists{} }
func (m *FeatureLists) String() string { return proto.CompactTextString(m) }
func (*FeatureLists) ProtoMessage() {}
func (*FeatureLists) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *FeatureLists) GetFeatureList() map[string]*FeatureList {
if m != nil {
return m.FeatureList
}
return nil
}
func init() {
proto.RegisterType((*BytesList)(nil), "tensorflow.BytesList")
proto.RegisterType((*FloatList)(nil), "tensorflow.FloatList")
proto.RegisterType((*Int64List)(nil), "tensorflow.Int64List")
proto.RegisterType((*Feature)(nil), "tensorflow.Feature")
proto.RegisterType((*Features)(nil), "tensorflow.Features")
proto.RegisterType((*FeatureList)(nil), "tensorflow.FeatureList")
proto.RegisterType((*FeatureLists)(nil), "tensorflow.FeatureLists")
}
func init() { proto.RegisterFile("tensorflow/core/example/feature.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 371 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xdf, 0x4a, 0xc3, 0x30,
0x14, 0xc6, 0x4d, 0xab, 0x9b, 0x3d, 0x9d, 0x30, 0xe2, 0xbf, 0xb1, 0xab, 0xad, 0x30, 0xd8, 0xc0,
0x6d, 0x30, 0xa5, 0x88, 0x7a, 0x55, 0x70, 0x28, 0x0c, 0x1c, 0xbd, 0xf1, 0x52, 0x3a, 0x4d, 0xa5,
0xac, 0x36, 0xa3, 0xc9, 0xd4, 0xbd, 0x89, 0x2f, 0xe2, 0x85, 0x6f, 0xe6, 0xa5, 0xa4, 0x4d, 0xbb,
0x6c, 0xad, 0x77, 0x3d, 0xc9, 0xf7, 0x9d, 0xfc, 0xbe, 0xd3, 0x03, 0x1d, 0x4e, 0x22, 0x46, 0x63,
0x3f, 0xa4, 0x1f, 0xc3, 0x67, 0x1a, 0x93, 0x21, 0xf9, 0xf4, 0xde, 0x16, 0x21, 0x19, 0xfa, 0xc4,
0xe3, 0xcb, 0x98, 0x0c, 0x16, 0x31, 0xe5, 0x14, 0xc3, 0x5a, 0x66, 0xb5, 0xc1, 0x70, 0x56, 0x9c,
0xb0, 0x49, 0xc0, 0x38, 0x3e, 0x82, 0xbd, 0x77, 0x2f, 0x5c, 0x92, 0x06, 0x6a, 0xe9, 0xdd, 0x9a,
0x9b, 0x16, 0x56, 0x07, 0x8c, 0x71, 0x48, 0x3d, 0x9e, 0x48, 0x1a, 0xaa, 0x44, 0x73, 0xb4, 0x3a,
0x52, 0x64, 0xf7, 0x11, 0xb7, 0x2f, 0x8a, 0x32, 0x5d, 0x95, 0xfd, 0x20, 0xa8, 0x8e, 0x53, 0x1c,
0x6c, 0x03, 0xcc, 0xc4, 0xe3, 0x4f, 0x61, 0xc0, 0x78, 0x03, 0xb5, 0x50, 0xd7, 0x1c, 0x1d, 0x0f,
0xd6, 0x74, 0x83, 0x1c, 0xed, 0x6e, 0xc7, 0x35, 0x66, 0x39, 0xa7, 0x0d, 0xe0, 0x0b, 0xa2, 0xd4,
0xa7, 0x15, 0x7d, 0x39, 0xaf, 0xf0, 0xf9, 0x39, 0xbc, 0x0d, 0x10, 0x08, 0xc4, 0xd4, 0xa7, 0x17,
0x7d, 0x79, 0x00, 0xe1, 0x0b, 0xb2, 0xc2, 0xa9, 0xc0, 0xee, 0x3c, 0x88, 0x5e, 0xac, 0x2f, 0x04,
0xfb, 0x92, 0x9d, 0xe1, 0x6b, 0xa8, 0xca, 0xb1, 0x26, 0x21, 0xcd, 0x51, 0x7b, 0x83, 0x40, 0xca,
0xb2, 0x8f, 0xdb, 0x88, 0xc7, 0x2b, 0x37, 0x73, 0x34, 0x1f, 0xa0, 0xa6, 0x5e, 0xe0, 0x3a, 0xe8,
0x73, 0xb2, 0x4a, 0x46, 0x60, 0xb8, 0xe2, 0x13, 0xf7, 0xb2, 0x09, 0xa6, 0xf1, 0x0e, 0x4b, 0x9a,
0xcb, 0x91, 0x5e, 0x69, 0x97, 0xc8, 0xba, 0x01, 0x53, 0x9e, 0x26, 0x49, 0xfb, 0xdb, 0x70, 0xa5,
0xfe, 0x4c, 0x63, 0x7d, 0xa3, 0x9c, 0x47, 0xd8, 0x19, 0x9e, 0x40, 0x4d, 0xde, 0x65, 0xff, 0x46,
0x34, 0xe9, 0x95, 0x34, 0x49, 0xf4, 0x6a, 0x91, 0x26, 0x35, 0xfd, 0xf5, 0x49, 0xf3, 0x11, 0xea,
0xdb, 0x82, 0x92, 0xc4, 0xfd, 0xcd, 0xc4, 0xa7, 0xff, 0x3c, 0xa6, 0xa4, 0x76, 0xce, 0xe0, 0x84,
0xc6, 0xaf, 0xaa, 0x50, 0x6e, 0xbc, 0x73, 0x20, 0x1d, 0x53, 0xb1, 0xf1, 0x6c, 0x8a, 0x7e, 0x11,
0x9a, 0x55, 0x92, 0xf5, 0x3f, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0xd3, 0x38, 0x1c, 0x54, 0x27,
0x03, 0x00, 0x00,
}
| String | identifier_name |
feature.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: tensorflow/core/example/feature.proto
/*
Package tensorflow is a generated protocol buffer package.
It is generated from these files:
tensorflow/core/example/feature.proto
It has these top-level messages:
BytesList
FloatList
Int64List
Feature
Features
FeatureList
FeatureLists
*/
package tensorflow
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// Containers to hold repeated fundamental values.
type BytesList struct {
Value [][]byte `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"`
}
func (m *BytesList) Reset() { *m = BytesList{} }
func (m *BytesList) String() string { return proto.CompactTextString(m) }
func (*BytesList) ProtoMessage() {}
func (*BytesList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *BytesList) GetValue() [][]byte {
if m != nil {
return m.Value
}
return nil
}
type FloatList struct {
Value []float32 `protobuf:"fixed32,1,rep,packed,name=value" json:"value,omitempty"`
}
func (m *FloatList) Reset() { *m = FloatList{} }
func (m *FloatList) String() string { return proto.CompactTextString(m) }
func (*FloatList) ProtoMessage() {}
func (*FloatList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *FloatList) GetValue() []float32 {
if m != nil {
return m.Value
}
return nil
}
type Int64List struct {
Value []int64 `protobuf:"varint,1,rep,packed,name=value" json:"value,omitempty"`
}
func (m *Int64List) Reset() { *m = Int64List{} }
func (m *Int64List) String() string { return proto.CompactTextString(m) }
func (*Int64List) ProtoMessage() {}
func (*Int64List) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *Int64List) GetValue() []int64 {
if m != nil {
return m.Value
}
return nil
}
// Containers for non-sequential data.
type Feature struct {
// Each feature can be exactly one kind.
//
// Types that are valid to be assigned to Kind:
// *Feature_BytesList
// *Feature_FloatList
// *Feature_Int64List
Kind isFeature_Kind `protobuf_oneof:"kind"`
}
func (m *Feature) Reset() { *m = Feature{} }
func (m *Feature) String() string { return proto.CompactTextString(m) }
func (*Feature) ProtoMessage() {}
func (*Feature) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
type isFeature_Kind interface {
isFeature_Kind()
}
type Feature_BytesList struct {
BytesList *BytesList `protobuf:"bytes,1,opt,name=bytes_list,json=bytesList,oneof"`
}
type Feature_FloatList struct {
FloatList *FloatList `protobuf:"bytes,2,opt,name=float_list,json=floatList,oneof"`
}
type Feature_Int64List struct {
Int64List *Int64List `protobuf:"bytes,3,opt,name=int64_list,json=int64List,oneof"`
}
func (*Feature_BytesList) isFeature_Kind() {}
func (*Feature_FloatList) isFeature_Kind() {}
func (*Feature_Int64List) isFeature_Kind() {}
func (m *Feature) GetKind() isFeature_Kind {
if m != nil {
return m.Kind
}
return nil
}
func (m *Feature) GetBytesList() *BytesList {
if x, ok := m.GetKind().(*Feature_BytesList); ok {
return x.BytesList
}
return nil
}
func (m *Feature) GetFloatList() *FloatList {
if x, ok := m.GetKind().(*Feature_FloatList); ok {
return x.FloatList
}
return nil
}
func (m *Feature) GetInt64List() *Int64List {
if x, ok := m.GetKind().(*Feature_Int64List); ok {
return x.Int64List
}
return nil
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*Feature) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _Feature_OneofMarshaler, _Feature_OneofUnmarshaler, _Feature_OneofSizer, []interface{}{
(*Feature_BytesList)(nil),
(*Feature_FloatList)(nil),
(*Feature_Int64List)(nil),
}
}
func _Feature_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*Feature)
// kind
switch x := m.Kind.(type) {
case *Feature_BytesList:
b.EncodeVarint(1<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.BytesList); err != nil {
return err
}
case *Feature_FloatList:
b.EncodeVarint(2<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.FloatList); err != nil {
return err
}
case *Feature_Int64List:
b.EncodeVarint(3<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Int64List); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("Feature.Kind has unexpected type %T", x)
}
return nil
}
func _Feature_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*Feature)
switch tag {
case 1: // kind.bytes_list
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(BytesList)
err := b.DecodeMessage(msg)
m.Kind = &Feature_BytesList{msg}
return true, err
case 2: // kind.float_list
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(FloatList)
err := b.DecodeMessage(msg)
m.Kind = &Feature_FloatList{msg}
return true, err
case 3: // kind.int64_list
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(Int64List)
err := b.DecodeMessage(msg)
m.Kind = &Feature_Int64List{msg}
return true, err
default:
return false, nil
}
}
func _Feature_OneofSizer(msg proto.Message) (n int) {
m := msg.(*Feature)
// kind
switch x := m.Kind.(type) {
case *Feature_BytesList:
s := proto.Size(x.BytesList)
n += proto.SizeVarint(1<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *Feature_FloatList:
s := proto.Size(x.FloatList)
n += proto.SizeVarint(2<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *Feature_Int64List:
s := proto.Size(x.Int64List)
n += proto.SizeVarint(3<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type Features struct {
// Map from feature name to feature.
Feature map[string]*Feature `protobuf:"bytes,1,rep,name=feature" json:"feature,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
}
func (m *Features) Reset() { *m = Features{} }
func (m *Features) String() string { return proto.CompactTextString(m) }
func (*Features) ProtoMessage() {}
func (*Features) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *Features) GetFeature() map[string]*Feature {
if m != nil {
return m.Feature
}
return nil
}
// Containers for sequential data.
//
// A FeatureList contains lists of Features. These may hold zero or more
// Feature values.
//
// FeatureLists are organized into categories by name. The FeatureLists message
// contains the mapping from name to FeatureList.
//
type FeatureList struct {
Feature []*Feature `protobuf:"bytes,1,rep,name=feature" json:"feature,omitempty"`
}
func (m *FeatureList) Reset() { *m = FeatureList{} }
func (m *FeatureList) String() string { return proto.CompactTextString(m) }
func (*FeatureList) ProtoMessage() {}
func (*FeatureList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *FeatureList) GetFeature() []*Feature {
if m != nil {
return m.Feature
}
return nil
}
type FeatureLists struct {
// Map from feature name to feature list.
FeatureList map[string]*FeatureList `protobuf:"bytes,1,rep,name=feature_list,json=featureList" json:"feature_list,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
}
func (m *FeatureLists) Reset() { *m = FeatureLists{} }
func (m *FeatureLists) String() string { return proto.CompactTextString(m) }
func (*FeatureLists) ProtoMessage() {}
func (*FeatureLists) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *FeatureLists) GetFeatureList() map[string]*FeatureList {
if m != nil {
return m.FeatureList
}
return nil
}
func init() |
func init() { proto.RegisterFile("tensorflow/core/example/feature.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 371 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xdf, 0x4a, 0xc3, 0x30,
0x14, 0xc6, 0x4d, 0xab, 0x9b, 0x3d, 0x9d, 0x30, 0xe2, 0xbf, 0xb1, 0xab, 0xad, 0x30, 0xd8, 0xc0,
0x6d, 0x30, 0xa5, 0x88, 0x7a, 0x55, 0x70, 0x28, 0x0c, 0x1c, 0xbd, 0xf1, 0x52, 0x3a, 0x4d, 0xa5,
0xac, 0x36, 0xa3, 0xc9, 0xd4, 0xbd, 0x89, 0x2f, 0xe2, 0x85, 0x6f, 0xe6, 0xa5, 0xa4, 0x4d, 0xbb,
0x6c, 0xad, 0x77, 0x3d, 0xc9, 0xf7, 0x9d, 0xfc, 0xbe, 0xd3, 0x03, 0x1d, 0x4e, 0x22, 0x46, 0x63,
0x3f, 0xa4, 0x1f, 0xc3, 0x67, 0x1a, 0x93, 0x21, 0xf9, 0xf4, 0xde, 0x16, 0x21, 0x19, 0xfa, 0xc4,
0xe3, 0xcb, 0x98, 0x0c, 0x16, 0x31, 0xe5, 0x14, 0xc3, 0x5a, 0x66, 0xb5, 0xc1, 0x70, 0x56, 0x9c,
0xb0, 0x49, 0xc0, 0x38, 0x3e, 0x82, 0xbd, 0x77, 0x2f, 0x5c, 0x92, 0x06, 0x6a, 0xe9, 0xdd, 0x9a,
0x9b, 0x16, 0x56, 0x07, 0x8c, 0x71, 0x48, 0x3d, 0x9e, 0x48, 0x1a, 0xaa, 0x44, 0x73, 0xb4, 0x3a,
0x52, 0x64, 0xf7, 0x11, 0xb7, 0x2f, 0x8a, 0x32, 0x5d, 0x95, 0xfd, 0x20, 0xa8, 0x8e, 0x53, 0x1c,
0x6c, 0x03, 0xcc, 0xc4, 0xe3, 0x4f, 0x61, 0xc0, 0x78, 0x03, 0xb5, 0x50, 0xd7, 0x1c, 0x1d, 0x0f,
0xd6, 0x74, 0x83, 0x1c, 0xed, 0x6e, 0xc7, 0x35, 0x66, 0x39, 0xa7, 0x0d, 0xe0, 0x0b, 0xa2, 0xd4,
0xa7, 0x15, 0x7d, 0x39, 0xaf, 0xf0, 0xf9, 0x39, 0xbc, 0x0d, 0x10, 0x08, 0xc4, 0xd4, 0xa7, 0x17,
0x7d, 0x79, 0x00, 0xe1, 0x0b, 0xb2, 0xc2, 0xa9, 0xc0, 0xee, 0x3c, 0x88, 0x5e, 0xac, 0x2f, 0x04,
0xfb, 0x92, 0x9d, 0xe1, 0x6b, 0xa8, 0xca, 0xb1, 0x26, 0x21, 0xcd, 0x51, 0x7b, 0x83, 0x40, 0xca,
0xb2, 0x8f, 0xdb, 0x88, 0xc7, 0x2b, 0x37, 0x73, 0x34, 0x1f, 0xa0, 0xa6, 0x5e, 0xe0, 0x3a, 0xe8,
0x73, 0xb2, 0x4a, 0x46, 0x60, 0xb8, 0xe2, 0x13, 0xf7, 0xb2, 0x09, 0xa6, 0xf1, 0x0e, 0x4b, 0x9a,
0xcb, 0x91, 0x5e, 0x69, 0x97, 0xc8, 0xba, 0x01, 0x53, 0x9e, 0x26, 0x49, 0xfb, 0xdb, 0x70, 0xa5,
0xfe, 0x4c, 0x63, 0x7d, 0xa3, 0x9c, 0x47, 0xd8, 0x19, 0x9e, 0x40, 0x4d, 0xde, 0x65, 0xff, 0x46,
0x34, 0xe9, 0x95, 0x34, 0x49, 0xf4, 0x6a, 0x91, 0x26, 0x35, 0xfd, 0xf5, 0x49, 0xf3, 0x11, 0xea,
0xdb, 0x82, 0x92, 0xc4, 0xfd, 0xcd, 0xc4, 0xa7, 0xff, 0x3c, 0xa6, 0xa4, 0x76, 0xce, 0xe0, 0x84,
0xc6, 0xaf, 0xaa, 0x50, 0x6e, 0xbc, 0x73, 0x20, 0x1d, 0x53, 0xb1, 0xf1, 0x6c, 0x8a, 0x7e, 0x11,
0x9a, 0x55, 0x92, 0xf5, 0x3f, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0xd3, 0x38, 0x1c, 0x54, 0x27,
0x03, 0x00, 0x00,
}
| {
proto.RegisterType((*BytesList)(nil), "tensorflow.BytesList")
proto.RegisterType((*FloatList)(nil), "tensorflow.FloatList")
proto.RegisterType((*Int64List)(nil), "tensorflow.Int64List")
proto.RegisterType((*Feature)(nil), "tensorflow.Feature")
proto.RegisterType((*Features)(nil), "tensorflow.Features")
proto.RegisterType((*FeatureList)(nil), "tensorflow.FeatureList")
proto.RegisterType((*FeatureLists)(nil), "tensorflow.FeatureLists")
} | identifier_body |
feature.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: tensorflow/core/example/feature.proto
/*
Package tensorflow is a generated protocol buffer package.
It is generated from these files:
tensorflow/core/example/feature.proto
It has these top-level messages:
BytesList
FloatList
Int64List
Feature
Features
FeatureList
FeatureLists
*/
package tensorflow
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// Containers to hold repeated fundamental values.
type BytesList struct {
Value [][]byte `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"`
}
func (m *BytesList) Reset() { *m = BytesList{} }
func (m *BytesList) String() string { return proto.CompactTextString(m) }
func (*BytesList) ProtoMessage() {}
func (*BytesList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *BytesList) GetValue() [][]byte {
if m != nil {
return m.Value
}
return nil
}
type FloatList struct {
Value []float32 `protobuf:"fixed32,1,rep,packed,name=value" json:"value,omitempty"`
}
func (m *FloatList) Reset() { *m = FloatList{} }
func (m *FloatList) String() string { return proto.CompactTextString(m) }
func (*FloatList) ProtoMessage() {}
func (*FloatList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *FloatList) GetValue() []float32 {
if m != nil {
return m.Value
}
return nil
}
type Int64List struct {
Value []int64 `protobuf:"varint,1,rep,packed,name=value" json:"value,omitempty"`
}
func (m *Int64List) Reset() { *m = Int64List{} }
func (m *Int64List) String() string { return proto.CompactTextString(m) }
func (*Int64List) ProtoMessage() {}
func (*Int64List) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *Int64List) GetValue() []int64 {
if m != nil {
return m.Value
}
return nil
}
// Containers for non-sequential data.
type Feature struct {
// Each feature can be exactly one kind.
//
// Types that are valid to be assigned to Kind:
// *Feature_BytesList
// *Feature_FloatList
// *Feature_Int64List
Kind isFeature_Kind `protobuf_oneof:"kind"`
}
func (m *Feature) Reset() { *m = Feature{} }
func (m *Feature) String() string { return proto.CompactTextString(m) }
func (*Feature) ProtoMessage() {}
func (*Feature) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
type isFeature_Kind interface {
isFeature_Kind()
}
type Feature_BytesList struct {
BytesList *BytesList `protobuf:"bytes,1,opt,name=bytes_list,json=bytesList,oneof"`
}
type Feature_FloatList struct {
FloatList *FloatList `protobuf:"bytes,2,opt,name=float_list,json=floatList,oneof"`
}
type Feature_Int64List struct {
Int64List *Int64List `protobuf:"bytes,3,opt,name=int64_list,json=int64List,oneof"`
}
func (*Feature_BytesList) isFeature_Kind() {}
func (*Feature_FloatList) isFeature_Kind() {}
func (*Feature_Int64List) isFeature_Kind() {}
func (m *Feature) GetKind() isFeature_Kind {
if m != nil {
return m.Kind
}
return nil
}
func (m *Feature) GetBytesList() *BytesList {
if x, ok := m.GetKind().(*Feature_BytesList); ok {
return x.BytesList
}
return nil
}
func (m *Feature) GetFloatList() *FloatList {
if x, ok := m.GetKind().(*Feature_FloatList); ok {
return x.FloatList
}
return nil
}
func (m *Feature) GetInt64List() *Int64List {
if x, ok := m.GetKind().(*Feature_Int64List); ok {
return x.Int64List
}
return nil
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*Feature) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _Feature_OneofMarshaler, _Feature_OneofUnmarshaler, _Feature_OneofSizer, []interface{}{
(*Feature_BytesList)(nil),
(*Feature_FloatList)(nil),
(*Feature_Int64List)(nil),
}
}
func _Feature_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*Feature)
// kind
switch x := m.Kind.(type) {
case *Feature_BytesList:
b.EncodeVarint(1<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.BytesList); err != nil {
return err
}
case *Feature_FloatList:
b.EncodeVarint(2<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.FloatList); err != nil {
return err
}
case *Feature_Int64List:
b.EncodeVarint(3<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Int64List); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("Feature.Kind has unexpected type %T", x)
}
return nil
}
func _Feature_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*Feature)
switch tag {
case 1: // kind.bytes_list
if wire != proto.WireBytes |
msg := new(BytesList)
err := b.DecodeMessage(msg)
m.Kind = &Feature_BytesList{msg}
return true, err
case 2: // kind.float_list
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(FloatList)
err := b.DecodeMessage(msg)
m.Kind = &Feature_FloatList{msg}
return true, err
case 3: // kind.int64_list
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(Int64List)
err := b.DecodeMessage(msg)
m.Kind = &Feature_Int64List{msg}
return true, err
default:
return false, nil
}
}
func _Feature_OneofSizer(msg proto.Message) (n int) {
m := msg.(*Feature)
// kind
switch x := m.Kind.(type) {
case *Feature_BytesList:
s := proto.Size(x.BytesList)
n += proto.SizeVarint(1<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *Feature_FloatList:
s := proto.Size(x.FloatList)
n += proto.SizeVarint(2<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *Feature_Int64List:
s := proto.Size(x.Int64List)
n += proto.SizeVarint(3<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type Features struct {
// Map from feature name to feature.
Feature map[string]*Feature `protobuf:"bytes,1,rep,name=feature" json:"feature,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
}
func (m *Features) Reset() { *m = Features{} }
func (m *Features) String() string { return proto.CompactTextString(m) }
func (*Features) ProtoMessage() {}
func (*Features) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *Features) GetFeature() map[string]*Feature {
if m != nil {
return m.Feature
}
return nil
}
// Containers for sequential data.
//
// A FeatureList contains lists of Features. These may hold zero or more
// Feature values.
//
// FeatureLists are organized into categories by name. The FeatureLists message
// contains the mapping from name to FeatureList.
//
type FeatureList struct {
Feature []*Feature `protobuf:"bytes,1,rep,name=feature" json:"feature,omitempty"`
}
func (m *FeatureList) Reset() { *m = FeatureList{} }
func (m *FeatureList) String() string { return proto.CompactTextString(m) }
func (*FeatureList) ProtoMessage() {}
func (*FeatureList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *FeatureList) GetFeature() []*Feature {
if m != nil {
return m.Feature
}
return nil
}
type FeatureLists struct {
// Map from feature name to feature list.
FeatureList map[string]*FeatureList `protobuf:"bytes,1,rep,name=feature_list,json=featureList" json:"feature_list,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
}
func (m *FeatureLists) Reset() { *m = FeatureLists{} }
func (m *FeatureLists) String() string { return proto.CompactTextString(m) }
func (*FeatureLists) ProtoMessage() {}
func (*FeatureLists) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *FeatureLists) GetFeatureList() map[string]*FeatureList {
if m != nil {
return m.FeatureList
}
return nil
}
func init() {
proto.RegisterType((*BytesList)(nil), "tensorflow.BytesList")
proto.RegisterType((*FloatList)(nil), "tensorflow.FloatList")
proto.RegisterType((*Int64List)(nil), "tensorflow.Int64List")
proto.RegisterType((*Feature)(nil), "tensorflow.Feature")
proto.RegisterType((*Features)(nil), "tensorflow.Features")
proto.RegisterType((*FeatureList)(nil), "tensorflow.FeatureList")
proto.RegisterType((*FeatureLists)(nil), "tensorflow.FeatureLists")
}
func init() { proto.RegisterFile("tensorflow/core/example/feature.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 371 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xdf, 0x4a, 0xc3, 0x30,
0x14, 0xc6, 0x4d, 0xab, 0x9b, 0x3d, 0x9d, 0x30, 0xe2, 0xbf, 0xb1, 0xab, 0xad, 0x30, 0xd8, 0xc0,
0x6d, 0x30, 0xa5, 0x88, 0x7a, 0x55, 0x70, 0x28, 0x0c, 0x1c, 0xbd, 0xf1, 0x52, 0x3a, 0x4d, 0xa5,
0xac, 0x36, 0xa3, 0xc9, 0xd4, 0xbd, 0x89, 0x2f, 0xe2, 0x85, 0x6f, 0xe6, 0xa5, 0xa4, 0x4d, 0xbb,
0x6c, 0xad, 0x77, 0x3d, 0xc9, 0xf7, 0x9d, 0xfc, 0xbe, 0xd3, 0x03, 0x1d, 0x4e, 0x22, 0x46, 0x63,
0x3f, 0xa4, 0x1f, 0xc3, 0x67, 0x1a, 0x93, 0x21, 0xf9, 0xf4, 0xde, 0x16, 0x21, 0x19, 0xfa, 0xc4,
0xe3, 0xcb, 0x98, 0x0c, 0x16, 0x31, 0xe5, 0x14, 0xc3, 0x5a, 0x66, 0xb5, 0xc1, 0x70, 0x56, 0x9c,
0xb0, 0x49, 0xc0, 0x38, 0x3e, 0x82, 0xbd, 0x77, 0x2f, 0x5c, 0x92, 0x06, 0x6a, 0xe9, 0xdd, 0x9a,
0x9b, 0x16, 0x56, 0x07, 0x8c, 0x71, 0x48, 0x3d, 0x9e, 0x48, 0x1a, 0xaa, 0x44, 0x73, 0xb4, 0x3a,
0x52, 0x64, 0xf7, 0x11, 0xb7, 0x2f, 0x8a, 0x32, 0x5d, 0x95, 0xfd, 0x20, 0xa8, 0x8e, 0x53, 0x1c,
0x6c, 0x03, 0xcc, 0xc4, 0xe3, 0x4f, 0x61, 0xc0, 0x78, 0x03, 0xb5, 0x50, 0xd7, 0x1c, 0x1d, 0x0f,
0xd6, 0x74, 0x83, 0x1c, 0xed, 0x6e, 0xc7, 0x35, 0x66, 0x39, 0xa7, 0x0d, 0xe0, 0x0b, 0xa2, 0xd4,
0xa7, 0x15, 0x7d, 0x39, 0xaf, 0xf0, 0xf9, 0x39, 0xbc, 0x0d, 0x10, 0x08, 0xc4, 0xd4, 0xa7, 0x17,
0x7d, 0x79, 0x00, 0xe1, 0x0b, 0xb2, 0xc2, 0xa9, 0xc0, 0xee, 0x3c, 0x88, 0x5e, 0xac, 0x2f, 0x04,
0xfb, 0x92, 0x9d, 0xe1, 0x6b, 0xa8, 0xca, 0xb1, 0x26, 0x21, 0xcd, 0x51, 0x7b, 0x83, 0x40, 0xca,
0xb2, 0x8f, 0xdb, 0x88, 0xc7, 0x2b, 0x37, 0x73, 0x34, 0x1f, 0xa0, 0xa6, 0x5e, 0xe0, 0x3a, 0xe8,
0x73, 0xb2, 0x4a, 0x46, 0x60, 0xb8, 0xe2, 0x13, 0xf7, 0xb2, 0x09, 0xa6, 0xf1, 0x0e, 0x4b, 0x9a,
0xcb, 0x91, 0x5e, 0x69, 0x97, 0xc8, 0xba, 0x01, 0x53, 0x9e, 0x26, 0x49, 0xfb, 0xdb, 0x70, 0xa5,
0xfe, 0x4c, 0x63, 0x7d, 0xa3, 0x9c, 0x47, 0xd8, 0x19, 0x9e, 0x40, 0x4d, 0xde, 0x65, 0xff, 0x46,
0x34, 0xe9, 0x95, 0x34, 0x49, 0xf4, 0x6a, 0x91, 0x26, 0x35, 0xfd, 0xf5, 0x49, 0xf3, 0x11, 0xea,
0xdb, 0x82, 0x92, 0xc4, 0xfd, 0xcd, 0xc4, 0xa7, 0xff, 0x3c, 0xa6, 0xa4, 0x76, 0xce, 0xe0, 0x84,
0xc6, 0xaf, 0xaa, 0x50, 0x6e, 0xbc, 0x73, 0x20, 0x1d, 0x53, 0xb1, 0xf1, 0x6c, 0x8a, 0x7e, 0x11,
0x9a, 0x55, 0x92, 0xf5, 0x3f, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0xd3, 0x38, 0x1c, 0x54, 0x27,
0x03, 0x00, 0x00,
}
| {
return true, proto.ErrInternalBadWireType
} | conditional_block |
feature.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: tensorflow/core/example/feature.proto
/*
Package tensorflow is a generated protocol buffer package.
It is generated from these files:
tensorflow/core/example/feature.proto
It has these top-level messages:
BytesList
FloatList
Int64List
Feature
Features
FeatureList
FeatureLists
*/
package tensorflow
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// Containers to hold repeated fundamental values.
type BytesList struct {
Value [][]byte `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"`
}
func (m *BytesList) Reset() { *m = BytesList{} }
func (m *BytesList) String() string { return proto.CompactTextString(m) }
func (*BytesList) ProtoMessage() {}
func (*BytesList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *BytesList) GetValue() [][]byte {
if m != nil {
return m.Value
}
return nil
}
type FloatList struct {
Value []float32 `protobuf:"fixed32,1,rep,packed,name=value" json:"value,omitempty"`
}
func (m *FloatList) Reset() { *m = FloatList{} }
func (m *FloatList) String() string { return proto.CompactTextString(m) }
func (*FloatList) ProtoMessage() {}
func (*FloatList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *FloatList) GetValue() []float32 {
if m != nil {
return m.Value
} | type Int64List struct {
Value []int64 `protobuf:"varint,1,rep,packed,name=value" json:"value,omitempty"`
}
func (m *Int64List) Reset() { *m = Int64List{} }
func (m *Int64List) String() string { return proto.CompactTextString(m) }
func (*Int64List) ProtoMessage() {}
func (*Int64List) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *Int64List) GetValue() []int64 {
if m != nil {
return m.Value
}
return nil
}
// Containers for non-sequential data.
type Feature struct {
// Each feature can be exactly one kind.
//
// Types that are valid to be assigned to Kind:
// *Feature_BytesList
// *Feature_FloatList
// *Feature_Int64List
Kind isFeature_Kind `protobuf_oneof:"kind"`
}
func (m *Feature) Reset() { *m = Feature{} }
func (m *Feature) String() string { return proto.CompactTextString(m) }
func (*Feature) ProtoMessage() {}
func (*Feature) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
type isFeature_Kind interface {
isFeature_Kind()
}
type Feature_BytesList struct {
BytesList *BytesList `protobuf:"bytes,1,opt,name=bytes_list,json=bytesList,oneof"`
}
type Feature_FloatList struct {
FloatList *FloatList `protobuf:"bytes,2,opt,name=float_list,json=floatList,oneof"`
}
type Feature_Int64List struct {
Int64List *Int64List `protobuf:"bytes,3,opt,name=int64_list,json=int64List,oneof"`
}
func (*Feature_BytesList) isFeature_Kind() {}
func (*Feature_FloatList) isFeature_Kind() {}
func (*Feature_Int64List) isFeature_Kind() {}
func (m *Feature) GetKind() isFeature_Kind {
if m != nil {
return m.Kind
}
return nil
}
func (m *Feature) GetBytesList() *BytesList {
if x, ok := m.GetKind().(*Feature_BytesList); ok {
return x.BytesList
}
return nil
}
func (m *Feature) GetFloatList() *FloatList {
if x, ok := m.GetKind().(*Feature_FloatList); ok {
return x.FloatList
}
return nil
}
func (m *Feature) GetInt64List() *Int64List {
if x, ok := m.GetKind().(*Feature_Int64List); ok {
return x.Int64List
}
return nil
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*Feature) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _Feature_OneofMarshaler, _Feature_OneofUnmarshaler, _Feature_OneofSizer, []interface{}{
(*Feature_BytesList)(nil),
(*Feature_FloatList)(nil),
(*Feature_Int64List)(nil),
}
}
func _Feature_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*Feature)
// kind
switch x := m.Kind.(type) {
case *Feature_BytesList:
b.EncodeVarint(1<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.BytesList); err != nil {
return err
}
case *Feature_FloatList:
b.EncodeVarint(2<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.FloatList); err != nil {
return err
}
case *Feature_Int64List:
b.EncodeVarint(3<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.Int64List); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("Feature.Kind has unexpected type %T", x)
}
return nil
}
func _Feature_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*Feature)
switch tag {
case 1: // kind.bytes_list
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(BytesList)
err := b.DecodeMessage(msg)
m.Kind = &Feature_BytesList{msg}
return true, err
case 2: // kind.float_list
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(FloatList)
err := b.DecodeMessage(msg)
m.Kind = &Feature_FloatList{msg}
return true, err
case 3: // kind.int64_list
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(Int64List)
err := b.DecodeMessage(msg)
m.Kind = &Feature_Int64List{msg}
return true, err
default:
return false, nil
}
}
func _Feature_OneofSizer(msg proto.Message) (n int) {
m := msg.(*Feature)
// kind
switch x := m.Kind.(type) {
case *Feature_BytesList:
s := proto.Size(x.BytesList)
n += proto.SizeVarint(1<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *Feature_FloatList:
s := proto.Size(x.FloatList)
n += proto.SizeVarint(2<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *Feature_Int64List:
s := proto.Size(x.Int64List)
n += proto.SizeVarint(3<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type Features struct {
// Map from feature name to feature.
Feature map[string]*Feature `protobuf:"bytes,1,rep,name=feature" json:"feature,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
}
func (m *Features) Reset() { *m = Features{} }
func (m *Features) String() string { return proto.CompactTextString(m) }
func (*Features) ProtoMessage() {}
func (*Features) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *Features) GetFeature() map[string]*Feature {
if m != nil {
return m.Feature
}
return nil
}
// Containers for sequential data.
//
// A FeatureList contains lists of Features. These may hold zero or more
// Feature values.
//
// FeatureLists are organized into categories by name. The FeatureLists message
// contains the mapping from name to FeatureList.
//
type FeatureList struct {
Feature []*Feature `protobuf:"bytes,1,rep,name=feature" json:"feature,omitempty"`
}
func (m *FeatureList) Reset() { *m = FeatureList{} }
func (m *FeatureList) String() string { return proto.CompactTextString(m) }
func (*FeatureList) ProtoMessage() {}
func (*FeatureList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *FeatureList) GetFeature() []*Feature {
if m != nil {
return m.Feature
}
return nil
}
type FeatureLists struct {
// Map from feature name to feature list.
FeatureList map[string]*FeatureList `protobuf:"bytes,1,rep,name=feature_list,json=featureList" json:"feature_list,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
}
func (m *FeatureLists) Reset() { *m = FeatureLists{} }
func (m *FeatureLists) String() string { return proto.CompactTextString(m) }
func (*FeatureLists) ProtoMessage() {}
func (*FeatureLists) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *FeatureLists) GetFeatureList() map[string]*FeatureList {
if m != nil {
return m.FeatureList
}
return nil
}
func init() {
proto.RegisterType((*BytesList)(nil), "tensorflow.BytesList")
proto.RegisterType((*FloatList)(nil), "tensorflow.FloatList")
proto.RegisterType((*Int64List)(nil), "tensorflow.Int64List")
proto.RegisterType((*Feature)(nil), "tensorflow.Feature")
proto.RegisterType((*Features)(nil), "tensorflow.Features")
proto.RegisterType((*FeatureList)(nil), "tensorflow.FeatureList")
proto.RegisterType((*FeatureLists)(nil), "tensorflow.FeatureLists")
}
func init() { proto.RegisterFile("tensorflow/core/example/feature.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 371 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xdf, 0x4a, 0xc3, 0x30,
0x14, 0xc6, 0x4d, 0xab, 0x9b, 0x3d, 0x9d, 0x30, 0xe2, 0xbf, 0xb1, 0xab, 0xad, 0x30, 0xd8, 0xc0,
0x6d, 0x30, 0xa5, 0x88, 0x7a, 0x55, 0x70, 0x28, 0x0c, 0x1c, 0xbd, 0xf1, 0x52, 0x3a, 0x4d, 0xa5,
0xac, 0x36, 0xa3, 0xc9, 0xd4, 0xbd, 0x89, 0x2f, 0xe2, 0x85, 0x6f, 0xe6, 0xa5, 0xa4, 0x4d, 0xbb,
0x6c, 0xad, 0x77, 0x3d, 0xc9, 0xf7, 0x9d, 0xfc, 0xbe, 0xd3, 0x03, 0x1d, 0x4e, 0x22, 0x46, 0x63,
0x3f, 0xa4, 0x1f, 0xc3, 0x67, 0x1a, 0x93, 0x21, 0xf9, 0xf4, 0xde, 0x16, 0x21, 0x19, 0xfa, 0xc4,
0xe3, 0xcb, 0x98, 0x0c, 0x16, 0x31, 0xe5, 0x14, 0xc3, 0x5a, 0x66, 0xb5, 0xc1, 0x70, 0x56, 0x9c,
0xb0, 0x49, 0xc0, 0x38, 0x3e, 0x82, 0xbd, 0x77, 0x2f, 0x5c, 0x92, 0x06, 0x6a, 0xe9, 0xdd, 0x9a,
0x9b, 0x16, 0x56, 0x07, 0x8c, 0x71, 0x48, 0x3d, 0x9e, 0x48, 0x1a, 0xaa, 0x44, 0x73, 0xb4, 0x3a,
0x52, 0x64, 0xf7, 0x11, 0xb7, 0x2f, 0x8a, 0x32, 0x5d, 0x95, 0xfd, 0x20, 0xa8, 0x8e, 0x53, 0x1c,
0x6c, 0x03, 0xcc, 0xc4, 0xe3, 0x4f, 0x61, 0xc0, 0x78, 0x03, 0xb5, 0x50, 0xd7, 0x1c, 0x1d, 0x0f,
0xd6, 0x74, 0x83, 0x1c, 0xed, 0x6e, 0xc7, 0x35, 0x66, 0x39, 0xa7, 0x0d, 0xe0, 0x0b, 0xa2, 0xd4,
0xa7, 0x15, 0x7d, 0x39, 0xaf, 0xf0, 0xf9, 0x39, 0xbc, 0x0d, 0x10, 0x08, 0xc4, 0xd4, 0xa7, 0x17,
0x7d, 0x79, 0x00, 0xe1, 0x0b, 0xb2, 0xc2, 0xa9, 0xc0, 0xee, 0x3c, 0x88, 0x5e, 0xac, 0x2f, 0x04,
0xfb, 0x92, 0x9d, 0xe1, 0x6b, 0xa8, 0xca, 0xb1, 0x26, 0x21, 0xcd, 0x51, 0x7b, 0x83, 0x40, 0xca,
0xb2, 0x8f, 0xdb, 0x88, 0xc7, 0x2b, 0x37, 0x73, 0x34, 0x1f, 0xa0, 0xa6, 0x5e, 0xe0, 0x3a, 0xe8,
0x73, 0xb2, 0x4a, 0x46, 0x60, 0xb8, 0xe2, 0x13, 0xf7, 0xb2, 0x09, 0xa6, 0xf1, 0x0e, 0x4b, 0x9a,
0xcb, 0x91, 0x5e, 0x69, 0x97, 0xc8, 0xba, 0x01, 0x53, 0x9e, 0x26, 0x49, 0xfb, 0xdb, 0x70, 0xa5,
0xfe, 0x4c, 0x63, 0x7d, 0xa3, 0x9c, 0x47, 0xd8, 0x19, 0x9e, 0x40, 0x4d, 0xde, 0x65, 0xff, 0x46,
0x34, 0xe9, 0x95, 0x34, 0x49, 0xf4, 0x6a, 0x91, 0x26, 0x35, 0xfd, 0xf5, 0x49, 0xf3, 0x11, 0xea,
0xdb, 0x82, 0x92, 0xc4, 0xfd, 0xcd, 0xc4, 0xa7, 0xff, 0x3c, 0xa6, 0xa4, 0x76, 0xce, 0xe0, 0x84,
0xc6, 0xaf, 0xaa, 0x50, 0x6e, 0xbc, 0x73, 0x20, 0x1d, 0x53, 0xb1, 0xf1, 0x6c, 0x8a, 0x7e, 0x11,
0x9a, 0x55, 0x92, 0xf5, 0x3f, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0xd3, 0x38, 0x1c, 0x54, 0x27,
0x03, 0x00, 0x00,
} | return nil
}
| random_line_split |
GridSave.py | """
Model to use the GridClass to make an image of radio astronomical observations
"""
# Functions to create a grid and place astronomical data on that
# grid with a convolving function
# HISTORY
# 20MAY15 GIL one more time on coordiante header updates in FITS
# 20MAY14 GIL update coordinates in FITS header
# 20MAY13 GIL update coordinates in FITS header
# 20MAY12 GIL compute gain ratios for different images.
# 20MAY01 GIL grid different telescopes to different images for gain and offset merging.
# 20APR30 GIL initial version based on GridObs.py
import sys
import os
import copy
import numpy as np
from matplotlib import pyplot as plt
import datetime
import GridClass
from astropy.io import fits
from astropy.wcs import wcs
import radioastronomy
import gainfactor
EPSILON = 0.01
# minimum intensity to include for gain normalization
EPSILON = 1000.0
doRatio = False
# special telescope factors for 2020 March + April
telescopefactors = [ 1.05216, 0.94350, 1.02153, 0.98935]
telescopefactors = [ 1.09216, 0.94350, 1.02153, 0.98935]
tsum = 0.
for iii in range( 4):
tsum = tsum + telescopefactors[iii]
tsum = tsum/4.
print( "Check of Telescope Factors: TSum = %f" % (tsum))
def fixImageCoordinates( filename, projection):
"""
fixImageCoordinates() interpolates pixels to proper reference frame
"""
printcount = 0
inname = filename
nchar = len(inname)
# strip off the end of the previous image and add the new projection name
outname = inname[0:nchar-8]
outname = outname + projection + ".fit"
# get the input image coordinate transform, Usually Cartesian
win = wcs.WCS(filename)
hdu = fits.open(filename)[0]
imageData = fits.getdata( filename)
imageCopy = copy.deepcopy( imageData)
#
header = hdu.header
nx = header['NAXIS1']
ny = header['NAXIS2']
crval1 = header['CRVAL1']
crval2 = header['CRVAL2']
crpix1 = header['CRPIX1']
crpix2 = header['CRPIX2']
cdelt1 = header['CDELT1']
cdelt2 = header['CDELT2']
ctype1 = header['CTYPE1']
ctype2 = header['CTYPE2']
xmin = crval1 + (1. - crpix1)*cdelt1
xmax = crval1 + (nx - crpix1)*cdelt1
ymin = crval2 + (1. - crpix2)*cdelt2
ymax = crval2 + (nx - crpix2)*cdelt2
print( "fixImage: %.2f,%2f %.1f,%.1f %.3f,%.3f" % (crval1,crval2,crpix1,crpix2,cdelt1,cdelt2))
print( "fixImage: %s,%s" % (ctype1,ctype2))
# redefine for new projection desired
ctype1 = ctype1[0:4]+projection
ctype2 = ctype2[0:4]+projection
print( "=> %s, %s" % (ctype1, ctype2))
header['CTYPE1'] = ctype1
header['CTYPE2'] = ctype2
# for output image the reference coordinate x pixel can be anywhere
# move the center to zero
header['CRVAL1'] = 0.
header['LONPOLE'] = 0.0
header['LATPOLE'] = 90.0
header.update()
tempname = "GridSave.fits"
hdu = fits.PrimaryHDU(header=header, data=imageCopy)
print("Outname: %s" % (tempname))
if os.path.exists(tempname):
os.remove(tempname)
hdu.writeto(tempname)
wout = wcs.WCS(tempname)
# now that coordinates are defined, remove temporary file
if os.path.exists(tempname):
os.remove(tempname)
pixin = np.array([[0, 0], [nx-1, ny-1]], dtype=np.float64)
pixout = np.array([[0, 0], [nx-1, ny-1]], dtype=np.float64)
print("NX, NY: %d,%d" % (nx, ny))
nan = float("NAN")
# print("Nan = %f" % (nan))
# assume no data until found
for jjj in range (ny):
for iii in range (nx):
imageCopy[jjj][iii] = nan
# now for output image check all pixel values
for jjj in range (ny):
for iii in range (nx):
# if this image pixal has no value
pixout[0] = (iii,jjj)
oworld = wout.wcs_pix2world(pixout, 0)
xy = oworld[0]
if np.isnan(xy[0]):
continue
# print("pixout: %d,%d : world %.f,%.2f" % (iii,jjj,xy[0],xy[1]))
pixin[0] = oworld[0]
ipixels = win.wcs_world2pix(pixin, 0)
# get input pixels for coordinate
ixy = ipixels[0]
# if outside of current image skip this pixel
if np.isnan( ixy[0]):
continue
ix = int(ixy[0])
iy = int(ixy[1])
ix = max( min( nx-1, ix), 0)
iy = max( min( ny-1, iy), 0)
ix = int(ix)
iy = int(iy)
# print("pixin : %d,%d : world %.f,%.2f" % (ix,iy,xy[0],xy[1]))
# print("OX,OY:%d,%d <= IX,IY:%d,%d" %( ox,oy, ix,iy))
imageCopy[jjj][iii] = imageData[iy][ix]
print("Preparing to write new coordiante transform: %s" % (outname))
if os.path.exists(outname):
os.remove(outname)
newhdu = fits.PrimaryHDU(header=header, data=imageCopy)
newhdu.writeto(outname)
print("Wrote new")
return
def writeFitsImage( rs, cpuIndex, grid, projection):
"""
writeFitsImage() takes a spectrum for describing the observation and a 2 dimensinoal
array of image data and writes a FITS image
This program produces two images. It expects an grid that is in cartisian format.
The second format described by the input: projection
"""
# print("Image: ", imageData)
imageData = grid.image
size = imageData.shape
imageCopy = copy.deepcopy( imageData)
nx = size[1]
ny = size[0]
# now flip the Y axis of the image to match the FITS Convention
iy = ny - 1
for iii in range(ny):
imageCopy[iii][:] = imageData[iy][:]
iy = iy - 1
pixcrd = np.array([[0, 0], [24, 38]], dtype=np.float64)
# Create a new WCS object. The number of axes must be set
# from the start
w = wcs.WCS(naxis=2)
gridtype = grid.gridtype.upper()
print("Grid Type: %s %d" % (gridtype, gridtype.find('RA')))
# gridtype = "RA"
if gridtype.find('RA') > -1:
maptype = 'RA'
XTYPE = 'RA--'
YTYPE = 'DEC-'
else:
maptype = 'GAL'
XTYPE = 'GLON'
YTYPE = 'GLAT'
xstart = 360.
ystart = 90.
# select the projection here:
# projection = "-CYP"
# projection = "-CAR"
crval1 = grid.crval1
crval2 = grid.crval2
crpix1 = grid.crpix1
crpix2 = grid.crpix2
cdelt1 = grid.cdelt1
cdelt2 = grid.cdelt2
print('--------- Grid Type: %s (%f,%f %f,%f ' % (gridtype, crval1, crval2, cdelt1, cdelt2))
hdu = fits.PrimaryHDU()
header = hdu.header
dateobs = "%s" % (rs.utc)
dateobs = dateobs.replace(" ","T")
mydate = datetime.datetime.now()
mydate = "%s" % (mydate)
mydate = mydate[2:10]
mydate.replace('-','/')
header['NAXIS1'] = int(nx)
header['NAXIS2'] = int(ny)
header['BUNIT'] = 'K-km/s/BEAM'
maptype = "RA"
if maptype[0:2] == "RA":
maptype = "RA"
header['CTYPE1'] = 'RA---CAR'
else:
maptype = "GAL"
header['CTYPE1'] = 'GLON-CAR'
# create a cartesian x centered iamge
header['CRPIX1'] = nx/2.
header['CRVAL1'] = 180.
grid.crval1 = header['CRVAL1']
header['CDELT1'] = cdelt1
header['CUNIT1'] = 'deg'
header['CRVAL2'] = (grid.ymax+grid.ymin)/2.
grid.crval2 = header['CRVAL2']
header['CRPIX2'] = ny/2.
header['CDELT2'] = cdelt2
header['CUNIT2'] = 'deg'
grid.gridtype = maptype
if maptype[0:2] == "RA":
print("RA: writeFits: %s" % (maptype))
header['CTYPE2'] = 'DEC--CAR'
else:
print("GAL: writeFits: %s" % (maptype))
header['CTYPE2'] = 'GLAT-CAR'
header['WCAXES'] = 2
header['RADESYS'] ='FK5'
# temporarily replace ref coordinate iwth zero
crval2 = header['CRVAL2']
crpix2 = header['CRPIX2']
# redefine the reference for the best cartisian format
referencevalue = 0.
dpix = (referencevalue - crval2)/cdelt2
crpix2 = crpix2 + dpix
# change x axis
header['CRVAL2'] = referencevalue
header['CRPIX2'] = crpix2
header['EQUINOX'] = 2.000000000000E+03 # Equinox of equatorial coordinates
header['BMAJ'] = 18.1 # Beam major axis in degrees: 80cm horn at 21.1cm
header['BMIN'] = 18.1 # Beam minor axis in degrees
header['BPA'] = 0.000000000000E+00 # Beam position angle in degrees
header['RESTFRQ'] = 1.42040575177E+09 # Line rest frequency, Hz
header['RESTWAV'] = 0.211061140551 # Line wavelength (m)
header['DATE-OBS'] = dateobs
header['DATE'] = mydate
header['OBSERVER'] = 'Science Aficionado'
header['OBJECT'] = 'Milky Way'
header['TELESCOP'] = 'Aficionado Horn'
header['HISTORY'] = "GridSave.py -- Glen Langston -- 20 May 13"
header['HISTORY'] = "Observations in March + April 2020"
# while len(header) < (36 * 4 - 1):
# header.append() # Adds a blank card to the end
# header.delval("EXTEND")
header.update()
# hdu = fits.PrimaryHDU(header=header, data=imageData)
hdu = fits.PrimaryHDU(header=header, data=imageCopy)
# As file at filePath is deleted now, so we should check if file exists or not not before deleting them
outname = ("Aficionado_T%d" % (cpuIndex)) + "-" + maptype + projection + ".fit"
if os.path.exists(outname):
os.remove(outname)
hdu.writeto(outname)
# create a second file with new projection
fixImageCoordinates( outname, projection)
return
def gridratio( grid1, grid2):
"""
gridratio computes the ratio of two grids when the values in both grids are non-zero
This function is used to compute gain ratios
The average and rms of the ratios are provided along as the grid of ratios
"""
nx1 = grid1.img_width
ny1 = grid1.img_height
nx2 = grid2.img_width
ny2 = grid2.img_height
ratio = 0.
rms = 0.
if nx1 != nx2:
print("GridRatio: Nx1 != Nx2 (%d, %d)" % (nx1, nx2))
return ratio, rms
if ny1 != ny2:
print("GridRatio: Ny1 != Ny2 (%d, %d)" % (ny1, ny2))
return ratio, rms
count = 0
nonzero = np.zeros(nx1*ny1)
# copy to ratio array
gridratio = copy.deepcopy( grid1)
for iii in range(nx1):
for jjj in range(ny1):
# put in zero as default
gridratio.image[jjj,iii] = 0.
if grid1.image[jjj,iii] > EPSILON:
if grid2.image[jjj,iii] > EPSILON:
nonzero[count] = grid1.image[jjj,iii]/grid2.image[jjj,iii]
count = count + 1
if count < 2:
print ("No overlap in non-zero samples")
return ratio, rms, gridratio
nonzero = nonzero[0:count]
asum = np.sum( nonzero)
ratio = asum/float(count)
rms = np.std( nonzero)
print ("Grid Ratio: %.4f +/- %.4f for %d samples" % (ratio, rms/np.sqrt(count), count))
# return the ratio grid
return ratio, rms, gridratio
def main():
"""
Main executable for gridding astronomical data
"""
dpi = 1
dpi = 2
width = int(360)
height = int(130)
mywidth = int(width*dpi)
myheight = int(height*dpi)
FWHM = 7.5 # degrees
FWHM = 10.0 # degrees
FWHM = 5.0 # degrees
FWHM = 3.0 # degrees
FWHM = 1.0 # degrees
weight = 1.
nargs = len(sys.argv)
if nargs < 2:
print('GR: GRid Observations of integrated intensity produced by the T Command')
print('GR produces fits images for each of the horns used for the observations.')
print('For observations at the same coordinates, the ratios of intensities are also produced.')
print('The FITS format files require header information, which is copied from the')
print('Cold Load File provided by the user')
print('GR RA|GAL <cold file name> <savefile1> [<savefile2> ... <savefileN>]')
print("")
print('Glen Langston, National Science Foundation -- 20 May 12')
exit()
gridtype = sys.argv[1]
gridtype = gridtype.upper()
print('Grid Type: ', gridtype)
# enable having ra going from 24 to 0 hours == 360 to 0 degrees
xsign = 1.
xoffset = 0.
if gridtype == 'RA':
xmin = 0.
xmax = 360.
ymin = -40.
ymax = 90.
maptype = 'RA'
elif gridtype == '-RA':
xmin = 0.
xmax = 360.
ymin = -40.
ymax = 90.
xsign = -1.
xoffset = 360. # when x = 360. should be at zero.
maptype = 'RA'
elif gridtype == '-EL':
xmin = 0.
xmax = 360.
ymin = 0.
ymax = 90.
xsign = -1.
xoffset = 360. # when x = 360. should be at zero.
maptype = 'AZEL'
elif gridtype == 'RA0':
xmin = 0.
xmax = 360.
ymin = -41.
ymax = 89.
xsign = -1.
xoffset = 180. # when x = 360. should be at zero.
gridtype = 'RA'
elif gridtype == 'GAL':
xmin = -180.
xmax = 180.
ymin = -90.
ymax = 90.
maptype = 'GAL'
if gridtype != 'RA' and gridtype != 'GAL' and gridtype != '-RA' and gridtype != "RA0":
print('Error parsing grid type: ', gridtype)
print('1st argument should be either RA, -RA or GAL')
exit()
rs = radioastronomy.Spectrum()
if doRatio:
#create the grid with map parameters
grid1 = GridClass.Grid(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, width=width, \
height=height, dpi=dpi, FWHM=FWHM, \
projection="-CAR", gridtype=maptype)
grid2 = GridClass.Grid(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, width=width, \
height=height, dpi=dpi, FWHM=FWHM, \
projection="-CAR", gridtype=maptype)
grid3 = GridClass.Grid(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, width=width, \
height=height, dpi=dpi, FWHM=FWHM, \
projection="-CAR", gridtype=maptype)
grid4 = GridClass.Grid(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, width=width, \
height=height, dpi=dpi, FWHM=FWHM, \
projection="-CAR", gridtype=maptype)
# put each telescope in a different grid
grids = [grid1, grid2, grid3, grid4]
gridall = GridClass.Grid(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, width=width, \
height=height, dpi=dpi, FWHM=FWHM, \
projection="-CAR", gridtype=maptype)
projection = "-AIT"
# coldfile
coldfile = sys.argv[2]
# get telescope geographic location etc | rs.read_spec_ast(coldfile)
print("Observer: %s " % (rs.observer))
# first read through all data and find hot load
names = sys.argv[3:]
names = sorted(names)
firsttime = ""
lasttime = ""
count = 0
# setup grid indicies so that cpuIndex goes to the correct grid
# This assumes telescopes 2,3,4,5 are being used]
gridIndex = [0,0,0,1,2,3]
# for all save Files to Grid
for filename in names:
print("File: %s" % (filename))
f = open(filename)
date = "Unknown"
while date != "":
date, time, cpuIndex, telaz, telel, tSys, tRx, tRms, tint, KperC, tSourcemax, velSource, dV, tVSum, tVSumRms, tSumKmSec, dTSumKmSec, gainFactor = gainfactor.readSaveValues( f)
dlen = len(date)
if dlen < 1:
break
if date[0] == "#":
continue
# else not a comment process the line
count = count + 1
isodate = "20"+date+"T"+time
# print("DateTime: %s" % (isodate))
rs.utc = datetime.datetime.strptime(isodate,"%Y-%m-%dT%H:%M:%S")
# print("Utc: %s" % (rs.utc))
rs.telaz = telaz
rs.telel = telel
rs.azel2radec()
ra = rs.ra
dec = rs.dec
lon = rs.gallon
lat = rs.gallat
tsum = tSumKmSec
tsdv = dTSumKmSec
tmax = tSourcemax
vave = tVSum
vsdv = tVSumRms
if firsttime == "":
firsttime = date
else:
lasttime = date
# if vave > -100. and vave < 100:
# mygrid.convolve( lon, lat, vave, 1.)
iGrid = gridIndex[cpuIndex]
gainCorr = telescopefactors[iGrid]
tsum = tsum * gainCorr
if gridtype == 'RA':
if doRatio:
grids[iGrid].convolve(ra, dec, tsum, weight)
gridall.convolve( ra, dec, tsum, weight)
elif gridtype == '-RA':
x = (ra*xsign) + xoffset
if doRatio:
grids[iGrid].convolve(x, dec, tsum, weight)
gridall.convolve( x, dec, tsum, weight)
elif gridtype == 'RA0':
x = (ra*xsign) + xoffset
if x < 0:
x = x + xmax
elif x > xmax:
x = x - xmax
if doRatio:
grids[iGrid].convolve(x, dec, tsum, weight)
gridall.convolve( x, dec, tsum, weight)
else:
if doRatio:
grids[iGrid].convolve(lon, lat, tsum, weight)
gridall.convolve( lon, lat, tsum, weight)
if count == 0:
print('Convolving Coordinates: ', ra, dec, lon, lat)
print('Convolving Intensities: ', tsum, tsdv, vave, vsdv)
print('Convolvign Parameters : ', n, time)
count = count + 1
# end reading all lines in save file
f.close()
# normalize each of the gridded images
if doRatio:
grids[0].normalize()
grids[1].normalize()
grids[2].normalize()
grids[3].normalize()
gridall.normalize()
# mygrid.check()
# zmin = -1000.
# zmax = 3000.
# limit grid intensities for plotting
# mygrid.set_ij( 0, 0, zmax, 1.)
# mygrid.set_ij( 1, 1, zmin, 1.)
# mygrid.limit(zmin, zmax)
subplots = False
if subplots:
fig, ax = plt.subplots(figsize=(myheight, mywidth), dpi=dpi)
if gridtype == 'RA':
cax = fig.add_axes([-180, 180], [-90, 90])
else:
cax = fig.add_axes([0, 24], [-90, 90])
cbar = fig.colorbar(cax, ticks=[zmin, zmax], orientation='horizontal')
cbar.ax.set_yticklabels([str(zmin), str(zmax)])
ax.set_title("Citizen Science: Horn observations of our Galaxy")
else:
#y_ticks = ymin + (ymax-ymin)*ticks/myheight
ticks = np.arange(0, mywidth, 30*dpi)
x_ticks = xmin + ((xmax-xmin)*ticks/mywidth)
plt.imshow(gridall.image, interpolation='nearest', cmap=plt.get_cmap('jet'))
if firsttime != lasttime:
plt.title("Citizen Science: Observing our Galaxy: %s to %s" % (firsttime, lasttime))
else:
plt.title("Citizen Science: Observing our Galaxy: %s" % (firsttime))
if gridtype == 'RA':
plt.xlabel("Right Ascension (hours)")
plt.ylabel("Declination (degrees)")
labels = ticks/(mywidth/24)
yticks = np.arange(0, myheight, 15*dpi)
elif gridtype == '-RA':
plt.xlabel("Right Ascension (hours)")
plt.ylabel("Declination (degrees)")
labels = 24 - (ticks/(mywidth/24))
labels[0] = 0
labels[0] = 24
yticks = np.arange(0, myheight, 15*dpi)
elif gridtype == '-EL':
plt.xlabel("Right Ascension (hours)")
plt.ylabel("Elevation (degrees)")
labels = 24 - (ticks/(mywidth/24))
labels[0] = 0
labels[0] = 24
yticks = np.arange(0, myheight, 15*dpi)
elif gridtype == 'RA0': # put 0 hours in middle of plot
plt.xlabel("Right Ascension (hours)")
plt.ylabel("Declination (degrees)")
labels = 12 - (ticks/(mywidth/24))
nlabels = len(labels)
for iii in range(nlabels):
if labels[iii] < 0:
labels[iii] = 24 + labels[iii]
if labels[iii] == 24:
labels[iii] = 0
yticks = np.arange(0, myheight, 15*dpi)
else:
yticks = np.arange(0, myheight, 30*dpi)
ticks = np.arange(0, mywidth, 30*dpi)
x_ticks = xmin + (xmax-xmin)*ticks/mywidth
labels = x_ticks
plt.xlabel("Galactic Longitude (degrees)")
plt.ylabel("Galactic Latitude (degrees)")
# wnat an integer list of labels
# slabels = str(labels)
print(ticks, labels)
y_ticks = ymax - (ymax-ymin)*yticks/myheight
plt.yticks(yticks, y_ticks)
plt.xticks(ticks, labels, rotation='horizontal')
plt.colorbar()
crval2 = (xmin + xmax)/2.
crval1 = (ymin + ymax)/2.
cdelt1 = (-1./float(dpi)) - .001
cdelt2 = (1./float(dpi)) + .001
if doRatio:
# now show eacsh of the images
for iGrid in range(4):
imagetemp = copy.deepcopy(grids[iGrid].image)
imagetemp2 = copy.deepcopy(grids[iGrid].image)
kkk = myheight - 1
for jjj in range(myheight):
imagetemp[:][kkk] = imagetemp2[:][jjj]
kkk = kkk - 1
grids[iGrid].image = imagetemp
writeFitsImage( rs, iGrid+2, grids[iGrid], projection)
# put each telescope in a different grid
ratio1 = copy.deepcopy(grid1)
ratio2 = copy.deepcopy(grid1)
ratio3 = copy.deepcopy(grid1)
gratios = [ratio1, ratio2, ratio3]
ratios = np.zeros(3)
rmss = np.zeros(3)
jGrid = 3
for iGrid in range(3):
print("Gain Ratios for Telescopes T%d and T%d" % (iGrid+2, jGrid+2))
ratio, rms, aratio = gridratio(grids[iGrid], grids[jGrid])
ratios[iGrid] = ratio
rmss[iGrid] = rms
writeFitsImage( rs, iGrid+2, aratio, projection)
writeFitsImage( rs, 0, gridall, projection)
plt.show()
if __name__ == "__main__":
main()
#SIMPLE = T / conforms to FITS standard
#BITPIX = -32 / array data type
#NAXIS = 2 / number of array dimensions
#NAXIS1 = 4323
#NAXIS2 = 2163
#OBJECT = 'HI4PI ' / The HI 4-PI Survey
#TELESCOP= 'Effelsberg 100m RT; ATNF Parkes 64-m' / Telescope names
#ORIGIN = 'AIfA/MPIfR Bonn; ATNF Sydney' / Organisations or Institutions
#REFERENC= 'HI4PI Collaboration 2016' / A&A
#RESTFRQ = 1420405751.77
#RESTWAV = 0.211061140541
#CDELT1 = -0.08333333330000001
#CRPIX1 = 2162.0
#CRVAL1 = 0.0
#CTYPE1 = 'RA---CAR'
#CUNIT1 = 'deg '
#CDELT2 = 0.08333333330000001
#CRPIX2 = 1082.0
#CRVAL2 = 0.0
#CTYPE2 = 'DEC--CAR'
#CUNIT2 = 'deg '
#WCSAXES = 2
#RADESYS = 'FK5 '
#EQUINOX = 2000.0
#LONPOLE = 0.0
#LATPOLE = 90.0
#BUNIT = 'cm^(-2) '
#BPA = 0.0
#BMAJ = 0.2706
#BMIN = 0.2706
#VMIN = -1.5169915133210E+21
#VMAX = 2.39529868415209E+22
#CHECKSUM= '9eqZJcnY9cnYGcnY' / HDU checksum updated 2016-09-15T23:38:45
#DATASUM = '3638685465' / data unit checksum updated 2016-09-15T23:38:45
#END | print("Reading Observing parameters from: %s" % (coldfile)) | random_line_split |
GridSave.py | """
Model to use the GridClass to make an image of radio astronomical observations
"""
# Functions to create a grid and place astronomical data on that
# grid with a convolving function
# HISTORY
# 20MAY15 GIL one more time on coordiante header updates in FITS
# 20MAY14 GIL update coordinates in FITS header
# 20MAY13 GIL update coordinates in FITS header
# 20MAY12 GIL compute gain ratios for different images.
# 20MAY01 GIL grid different telescopes to different images for gain and offset merging.
# 20APR30 GIL initial version based on GridObs.py
import sys
import os
import copy
import numpy as np
from matplotlib import pyplot as plt
import datetime
import GridClass
from astropy.io import fits
from astropy.wcs import wcs
import radioastronomy
import gainfactor
EPSILON = 0.01
# minimum intensity to include for gain normalization
EPSILON = 1000.0
doRatio = False
# special telescope factors for 2020 March + April
telescopefactors = [ 1.05216, 0.94350, 1.02153, 0.98935]
telescopefactors = [ 1.09216, 0.94350, 1.02153, 0.98935]
tsum = 0.
for iii in range( 4):
tsum = tsum + telescopefactors[iii]
tsum = tsum/4.
print( "Check of Telescope Factors: TSum = %f" % (tsum))
def fixImageCoordinates( filename, projection):
"""
fixImageCoordinates() interpolates pixels to proper reference frame
"""
printcount = 0
inname = filename
nchar = len(inname)
# strip off the end of the previous image and add the new projection name
outname = inname[0:nchar-8]
outname = outname + projection + ".fit"
# get the input image coordinate transform, Usually Cartesian
win = wcs.WCS(filename)
hdu = fits.open(filename)[0]
imageData = fits.getdata( filename)
imageCopy = copy.deepcopy( imageData)
#
header = hdu.header
nx = header['NAXIS1']
ny = header['NAXIS2']
crval1 = header['CRVAL1']
crval2 = header['CRVAL2']
crpix1 = header['CRPIX1']
crpix2 = header['CRPIX2']
cdelt1 = header['CDELT1']
cdelt2 = header['CDELT2']
ctype1 = header['CTYPE1']
ctype2 = header['CTYPE2']
xmin = crval1 + (1. - crpix1)*cdelt1
xmax = crval1 + (nx - crpix1)*cdelt1
ymin = crval2 + (1. - crpix2)*cdelt2
ymax = crval2 + (nx - crpix2)*cdelt2
print( "fixImage: %.2f,%2f %.1f,%.1f %.3f,%.3f" % (crval1,crval2,crpix1,crpix2,cdelt1,cdelt2))
print( "fixImage: %s,%s" % (ctype1,ctype2))
# redefine for new projection desired
ctype1 = ctype1[0:4]+projection
ctype2 = ctype2[0:4]+projection
print( "=> %s, %s" % (ctype1, ctype2))
header['CTYPE1'] = ctype1
header['CTYPE2'] = ctype2
# for output image the reference coordinate x pixel can be anywhere
# move the center to zero
header['CRVAL1'] = 0.
header['LONPOLE'] = 0.0
header['LATPOLE'] = 90.0
header.update()
tempname = "GridSave.fits"
hdu = fits.PrimaryHDU(header=header, data=imageCopy)
print("Outname: %s" % (tempname))
if os.path.exists(tempname):
os.remove(tempname)
hdu.writeto(tempname)
wout = wcs.WCS(tempname)
# now that coordinates are defined, remove temporary file
if os.path.exists(tempname):
os.remove(tempname)
pixin = np.array([[0, 0], [nx-1, ny-1]], dtype=np.float64)
pixout = np.array([[0, 0], [nx-1, ny-1]], dtype=np.float64)
print("NX, NY: %d,%d" % (nx, ny))
nan = float("NAN")
# print("Nan = %f" % (nan))
# assume no data until found
for jjj in range (ny):
for iii in range (nx):
imageCopy[jjj][iii] = nan
# now for output image check all pixel values
for jjj in range (ny):
for iii in range (nx):
# if this image pixal has no value
pixout[0] = (iii,jjj)
oworld = wout.wcs_pix2world(pixout, 0)
xy = oworld[0]
if np.isnan(xy[0]):
continue
# print("pixout: %d,%d : world %.f,%.2f" % (iii,jjj,xy[0],xy[1]))
pixin[0] = oworld[0]
ipixels = win.wcs_world2pix(pixin, 0)
# get input pixels for coordinate
ixy = ipixels[0]
# if outside of current image skip this pixel
if np.isnan( ixy[0]):
continue
ix = int(ixy[0])
iy = int(ixy[1])
ix = max( min( nx-1, ix), 0)
iy = max( min( ny-1, iy), 0)
ix = int(ix)
iy = int(iy)
# print("pixin : %d,%d : world %.f,%.2f" % (ix,iy,xy[0],xy[1]))
# print("OX,OY:%d,%d <= IX,IY:%d,%d" %( ox,oy, ix,iy))
imageCopy[jjj][iii] = imageData[iy][ix]
print("Preparing to write new coordiante transform: %s" % (outname))
if os.path.exists(outname):
os.remove(outname)
newhdu = fits.PrimaryHDU(header=header, data=imageCopy)
newhdu.writeto(outname)
print("Wrote new")
return
def writeFitsImage( rs, cpuIndex, grid, projection):
"""
writeFitsImage() takes a spectrum for describing the observation and a 2 dimensinoal
array of image data and writes a FITS image
This program produces two images. It expects an grid that is in cartisian format.
The second format described by the input: projection
"""
# print("Image: ", imageData)
imageData = grid.image
size = imageData.shape
imageCopy = copy.deepcopy( imageData)
nx = size[1]
ny = size[0]
# now flip the Y axis of the image to match the FITS Convention
iy = ny - 1
for iii in range(ny):
imageCopy[iii][:] = imageData[iy][:]
iy = iy - 1
pixcrd = np.array([[0, 0], [24, 38]], dtype=np.float64)
# Create a new WCS object. The number of axes must be set
# from the start
w = wcs.WCS(naxis=2)
gridtype = grid.gridtype.upper()
print("Grid Type: %s %d" % (gridtype, gridtype.find('RA')))
# gridtype = "RA"
if gridtype.find('RA') > -1:
maptype = 'RA'
XTYPE = 'RA--'
YTYPE = 'DEC-'
else:
maptype = 'GAL'
XTYPE = 'GLON'
YTYPE = 'GLAT'
xstart = 360.
ystart = 90.
# select the projection here:
# projection = "-CYP"
# projection = "-CAR"
crval1 = grid.crval1
crval2 = grid.crval2
crpix1 = grid.crpix1
crpix2 = grid.crpix2
cdelt1 = grid.cdelt1
cdelt2 = grid.cdelt2
print('--------- Grid Type: %s (%f,%f %f,%f ' % (gridtype, crval1, crval2, cdelt1, cdelt2))
hdu = fits.PrimaryHDU()
header = hdu.header
dateobs = "%s" % (rs.utc)
dateobs = dateobs.replace(" ","T")
mydate = datetime.datetime.now()
mydate = "%s" % (mydate)
mydate = mydate[2:10]
mydate.replace('-','/')
header['NAXIS1'] = int(nx)
header['NAXIS2'] = int(ny)
header['BUNIT'] = 'K-km/s/BEAM'
maptype = "RA"
if maptype[0:2] == "RA":
maptype = "RA"
header['CTYPE1'] = 'RA---CAR'
else:
maptype = "GAL"
header['CTYPE1'] = 'GLON-CAR'
# create a cartesian x centered iamge
header['CRPIX1'] = nx/2.
header['CRVAL1'] = 180.
grid.crval1 = header['CRVAL1']
header['CDELT1'] = cdelt1
header['CUNIT1'] = 'deg'
header['CRVAL2'] = (grid.ymax+grid.ymin)/2.
grid.crval2 = header['CRVAL2']
header['CRPIX2'] = ny/2.
header['CDELT2'] = cdelt2
header['CUNIT2'] = 'deg'
grid.gridtype = maptype
if maptype[0:2] == "RA":
print("RA: writeFits: %s" % (maptype))
header['CTYPE2'] = 'DEC--CAR'
else:
print("GAL: writeFits: %s" % (maptype))
header['CTYPE2'] = 'GLAT-CAR'
header['WCAXES'] = 2
header['RADESYS'] ='FK5'
# temporarily replace ref coordinate iwth zero
crval2 = header['CRVAL2']
crpix2 = header['CRPIX2']
# redefine the reference for the best cartisian format
referencevalue = 0.
dpix = (referencevalue - crval2)/cdelt2
crpix2 = crpix2 + dpix
# change x axis
header['CRVAL2'] = referencevalue
header['CRPIX2'] = crpix2
header['EQUINOX'] = 2.000000000000E+03 # Equinox of equatorial coordinates
header['BMAJ'] = 18.1 # Beam major axis in degrees: 80cm horn at 21.1cm
header['BMIN'] = 18.1 # Beam minor axis in degrees
header['BPA'] = 0.000000000000E+00 # Beam position angle in degrees
header['RESTFRQ'] = 1.42040575177E+09 # Line rest frequency, Hz
header['RESTWAV'] = 0.211061140551 # Line wavelength (m)
header['DATE-OBS'] = dateobs
header['DATE'] = mydate
header['OBSERVER'] = 'Science Aficionado'
header['OBJECT'] = 'Milky Way'
header['TELESCOP'] = 'Aficionado Horn'
header['HISTORY'] = "GridSave.py -- Glen Langston -- 20 May 13"
header['HISTORY'] = "Observations in March + April 2020"
# while len(header) < (36 * 4 - 1):
# header.append() # Adds a blank card to the end
# header.delval("EXTEND")
header.update()
# hdu = fits.PrimaryHDU(header=header, data=imageData)
hdu = fits.PrimaryHDU(header=header, data=imageCopy)
# As file at filePath is deleted now, so we should check if file exists or not not before deleting them
outname = ("Aficionado_T%d" % (cpuIndex)) + "-" + maptype + projection + ".fit"
if os.path.exists(outname):
os.remove(outname)
hdu.writeto(outname)
# create a second file with new projection
fixImageCoordinates( outname, projection)
return
def | ( grid1, grid2):
"""
gridratio computes the ratio of two grids when the values in both grids are non-zero
This function is used to compute gain ratios
The average and rms of the ratios are provided along as the grid of ratios
"""
nx1 = grid1.img_width
ny1 = grid1.img_height
nx2 = grid2.img_width
ny2 = grid2.img_height
ratio = 0.
rms = 0.
if nx1 != nx2:
print("GridRatio: Nx1 != Nx2 (%d, %d)" % (nx1, nx2))
return ratio, rms
if ny1 != ny2:
print("GridRatio: Ny1 != Ny2 (%d, %d)" % (ny1, ny2))
return ratio, rms
count = 0
nonzero = np.zeros(nx1*ny1)
# copy to ratio array
gridratio = copy.deepcopy( grid1)
for iii in range(nx1):
for jjj in range(ny1):
# put in zero as default
gridratio.image[jjj,iii] = 0.
if grid1.image[jjj,iii] > EPSILON:
if grid2.image[jjj,iii] > EPSILON:
nonzero[count] = grid1.image[jjj,iii]/grid2.image[jjj,iii]
count = count + 1
if count < 2:
print ("No overlap in non-zero samples")
return ratio, rms, gridratio
nonzero = nonzero[0:count]
asum = np.sum( nonzero)
ratio = asum/float(count)
rms = np.std( nonzero)
print ("Grid Ratio: %.4f +/- %.4f for %d samples" % (ratio, rms/np.sqrt(count), count))
# return the ratio grid
return ratio, rms, gridratio
def main():
"""
Main executable for gridding astronomical data
"""
dpi = 1
dpi = 2
width = int(360)
height = int(130)
mywidth = int(width*dpi)
myheight = int(height*dpi)
FWHM = 7.5 # degrees
FWHM = 10.0 # degrees
FWHM = 5.0 # degrees
FWHM = 3.0 # degrees
FWHM = 1.0 # degrees
weight = 1.
nargs = len(sys.argv)
if nargs < 2:
print('GR: GRid Observations of integrated intensity produced by the T Command')
print('GR produces fits images for each of the horns used for the observations.')
print('For observations at the same coordinates, the ratios of intensities are also produced.')
print('The FITS format files require header information, which is copied from the')
print('Cold Load File provided by the user')
print('GR RA|GAL <cold file name> <savefile1> [<savefile2> ... <savefileN>]')
print("")
print('Glen Langston, National Science Foundation -- 20 May 12')
exit()
gridtype = sys.argv[1]
gridtype = gridtype.upper()
print('Grid Type: ', gridtype)
# enable having ra going from 24 to 0 hours == 360 to 0 degrees
xsign = 1.
xoffset = 0.
if gridtype == 'RA':
xmin = 0.
xmax = 360.
ymin = -40.
ymax = 90.
maptype = 'RA'
elif gridtype == '-RA':
xmin = 0.
xmax = 360.
ymin = -40.
ymax = 90.
xsign = -1.
xoffset = 360. # when x = 360. should be at zero.
maptype = 'RA'
elif gridtype == '-EL':
xmin = 0.
xmax = 360.
ymin = 0.
ymax = 90.
xsign = -1.
xoffset = 360. # when x = 360. should be at zero.
maptype = 'AZEL'
elif gridtype == 'RA0':
xmin = 0.
xmax = 360.
ymin = -41.
ymax = 89.
xsign = -1.
xoffset = 180. # when x = 360. should be at zero.
gridtype = 'RA'
elif gridtype == 'GAL':
xmin = -180.
xmax = 180.
ymin = -90.
ymax = 90.
maptype = 'GAL'
if gridtype != 'RA' and gridtype != 'GAL' and gridtype != '-RA' and gridtype != "RA0":
print('Error parsing grid type: ', gridtype)
print('1st argument should be either RA, -RA or GAL')
exit()
rs = radioastronomy.Spectrum()
if doRatio:
#create the grid with map parameters
grid1 = GridClass.Grid(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, width=width, \
height=height, dpi=dpi, FWHM=FWHM, \
projection="-CAR", gridtype=maptype)
grid2 = GridClass.Grid(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, width=width, \
height=height, dpi=dpi, FWHM=FWHM, \
projection="-CAR", gridtype=maptype)
grid3 = GridClass.Grid(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, width=width, \
height=height, dpi=dpi, FWHM=FWHM, \
projection="-CAR", gridtype=maptype)
grid4 = GridClass.Grid(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, width=width, \
height=height, dpi=dpi, FWHM=FWHM, \
projection="-CAR", gridtype=maptype)
# put each telescope in a different grid
grids = [grid1, grid2, grid3, grid4]
gridall = GridClass.Grid(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, width=width, \
height=height, dpi=dpi, FWHM=FWHM, \
projection="-CAR", gridtype=maptype)
projection = "-AIT"
# coldfile
coldfile = sys.argv[2]
# get telescope geographic location etc
print("Reading Observing parameters from: %s" % (coldfile))
rs.read_spec_ast(coldfile)
print("Observer: %s " % (rs.observer))
# first read through all data and find hot load
names = sys.argv[3:]
names = sorted(names)
firsttime = ""
lasttime = ""
count = 0
# setup grid indicies so that cpuIndex goes to the correct grid
# This assumes telescopes 2,3,4,5 are being used]
gridIndex = [0,0,0,1,2,3]
# for all save Files to Grid
for filename in names:
print("File: %s" % (filename))
f = open(filename)
date = "Unknown"
while date != "":
date, time, cpuIndex, telaz, telel, tSys, tRx, tRms, tint, KperC, tSourcemax, velSource, dV, tVSum, tVSumRms, tSumKmSec, dTSumKmSec, gainFactor = gainfactor.readSaveValues( f)
dlen = len(date)
if dlen < 1:
break
if date[0] == "#":
continue
# else not a comment process the line
count = count + 1
isodate = "20"+date+"T"+time
# print("DateTime: %s" % (isodate))
rs.utc = datetime.datetime.strptime(isodate,"%Y-%m-%dT%H:%M:%S")
# print("Utc: %s" % (rs.utc))
rs.telaz = telaz
rs.telel = telel
rs.azel2radec()
ra = rs.ra
dec = rs.dec
lon = rs.gallon
lat = rs.gallat
tsum = tSumKmSec
tsdv = dTSumKmSec
tmax = tSourcemax
vave = tVSum
vsdv = tVSumRms
if firsttime == "":
firsttime = date
else:
lasttime = date
# if vave > -100. and vave < 100:
# mygrid.convolve( lon, lat, vave, 1.)
iGrid = gridIndex[cpuIndex]
gainCorr = telescopefactors[iGrid]
tsum = tsum * gainCorr
if gridtype == 'RA':
if doRatio:
grids[iGrid].convolve(ra, dec, tsum, weight)
gridall.convolve( ra, dec, tsum, weight)
elif gridtype == '-RA':
x = (ra*xsign) + xoffset
if doRatio:
grids[iGrid].convolve(x, dec, tsum, weight)
gridall.convolve( x, dec, tsum, weight)
elif gridtype == 'RA0':
x = (ra*xsign) + xoffset
if x < 0:
x = x + xmax
elif x > xmax:
x = x - xmax
if doRatio:
grids[iGrid].convolve(x, dec, tsum, weight)
gridall.convolve( x, dec, tsum, weight)
else:
if doRatio:
grids[iGrid].convolve(lon, lat, tsum, weight)
gridall.convolve( lon, lat, tsum, weight)
if count == 0:
print('Convolving Coordinates: ', ra, dec, lon, lat)
print('Convolving Intensities: ', tsum, tsdv, vave, vsdv)
print('Convolvign Parameters : ', n, time)
count = count + 1
# end reading all lines in save file
f.close()
# normalize each of the gridded images
if doRatio:
grids[0].normalize()
grids[1].normalize()
grids[2].normalize()
grids[3].normalize()
gridall.normalize()
# mygrid.check()
# zmin = -1000.
# zmax = 3000.
# limit grid intensities for plotting
# mygrid.set_ij( 0, 0, zmax, 1.)
# mygrid.set_ij( 1, 1, zmin, 1.)
# mygrid.limit(zmin, zmax)
subplots = False
if subplots:
fig, ax = plt.subplots(figsize=(myheight, mywidth), dpi=dpi)
if gridtype == 'RA':
cax = fig.add_axes([-180, 180], [-90, 90])
else:
cax = fig.add_axes([0, 24], [-90, 90])
cbar = fig.colorbar(cax, ticks=[zmin, zmax], orientation='horizontal')
cbar.ax.set_yticklabels([str(zmin), str(zmax)])
ax.set_title("Citizen Science: Horn observations of our Galaxy")
else:
#y_ticks = ymin + (ymax-ymin)*ticks/myheight
ticks = np.arange(0, mywidth, 30*dpi)
x_ticks = xmin + ((xmax-xmin)*ticks/mywidth)
plt.imshow(gridall.image, interpolation='nearest', cmap=plt.get_cmap('jet'))
if firsttime != lasttime:
plt.title("Citizen Science: Observing our Galaxy: %s to %s" % (firsttime, lasttime))
else:
plt.title("Citizen Science: Observing our Galaxy: %s" % (firsttime))
if gridtype == 'RA':
plt.xlabel("Right Ascension (hours)")
plt.ylabel("Declination (degrees)")
labels = ticks/(mywidth/24)
yticks = np.arange(0, myheight, 15*dpi)
elif gridtype == '-RA':
plt.xlabel("Right Ascension (hours)")
plt.ylabel("Declination (degrees)")
labels = 24 - (ticks/(mywidth/24))
labels[0] = 0
labels[0] = 24
yticks = np.arange(0, myheight, 15*dpi)
elif gridtype == '-EL':
plt.xlabel("Right Ascension (hours)")
plt.ylabel("Elevation (degrees)")
labels = 24 - (ticks/(mywidth/24))
labels[0] = 0
labels[0] = 24
yticks = np.arange(0, myheight, 15*dpi)
elif gridtype == 'RA0': # put 0 hours in middle of plot
plt.xlabel("Right Ascension (hours)")
plt.ylabel("Declination (degrees)")
labels = 12 - (ticks/(mywidth/24))
nlabels = len(labels)
for iii in range(nlabels):
if labels[iii] < 0:
labels[iii] = 24 + labels[iii]
if labels[iii] == 24:
labels[iii] = 0
yticks = np.arange(0, myheight, 15*dpi)
else:
yticks = np.arange(0, myheight, 30*dpi)
ticks = np.arange(0, mywidth, 30*dpi)
x_ticks = xmin + (xmax-xmin)*ticks/mywidth
labels = x_ticks
plt.xlabel("Galactic Longitude (degrees)")
plt.ylabel("Galactic Latitude (degrees)")
# wnat an integer list of labels
# slabels = str(labels)
print(ticks, labels)
y_ticks = ymax - (ymax-ymin)*yticks/myheight
plt.yticks(yticks, y_ticks)
plt.xticks(ticks, labels, rotation='horizontal')
plt.colorbar()
crval2 = (xmin + xmax)/2.
crval1 = (ymin + ymax)/2.
cdelt1 = (-1./float(dpi)) - .001
cdelt2 = (1./float(dpi)) + .001
if doRatio:
# now show eacsh of the images
for iGrid in range(4):
imagetemp = copy.deepcopy(grids[iGrid].image)
imagetemp2 = copy.deepcopy(grids[iGrid].image)
kkk = myheight - 1
for jjj in range(myheight):
imagetemp[:][kkk] = imagetemp2[:][jjj]
kkk = kkk - 1
grids[iGrid].image = imagetemp
writeFitsImage( rs, iGrid+2, grids[iGrid], projection)
# put each telescope in a different grid
ratio1 = copy.deepcopy(grid1)
ratio2 = copy.deepcopy(grid1)
ratio3 = copy.deepcopy(grid1)
gratios = [ratio1, ratio2, ratio3]
ratios = np.zeros(3)
rmss = np.zeros(3)
jGrid = 3
for iGrid in range(3):
print("Gain Ratios for Telescopes T%d and T%d" % (iGrid+2, jGrid+2))
ratio, rms, aratio = gridratio(grids[iGrid], grids[jGrid])
ratios[iGrid] = ratio
rmss[iGrid] = rms
writeFitsImage( rs, iGrid+2, aratio, projection)
writeFitsImage( rs, 0, gridall, projection)
plt.show()
if __name__ == "__main__":
main()
#SIMPLE = T / conforms to FITS standard
#BITPIX = -32 / array data type
#NAXIS = 2 / number of array dimensions
#NAXIS1 = 4323
#NAXIS2 = 2163
#OBJECT = 'HI4PI ' / The HI 4-PI Survey
#TELESCOP= 'Effelsberg 100m RT; ATNF Parkes 64-m' / Telescope names
#ORIGIN = 'AIfA/MPIfR Bonn; ATNF Sydney' / Organisations or Institutions
#REFERENC= 'HI4PI Collaboration 2016' / A&A
#RESTFRQ = 1420405751.77
#RESTWAV = 0.211061140541
#CDELT1 = -0.08333333330000001
#CRPIX1 = 2162.0
#CRVAL1 = 0.0
#CTYPE1 = 'RA---CAR'
#CUNIT1 = 'deg '
#CDELT2 = 0.08333333330000001
#CRPIX2 = 1082.0
#CRVAL2 = 0.0
#CTYPE2 = 'DEC--CAR'
#CUNIT2 = 'deg '
#WCSAXES = 2
#RADESYS = 'FK5 '
#EQUINOX = 2000.0
#LONPOLE = 0.0
#LATPOLE = 90.0
#BUNIT = 'cm^(-2) '
#BPA = 0.0
#BMAJ = 0.2706
#BMIN = 0.2706
#VMIN = -1.5169915133210E+21
#VMAX = 2.39529868415209E+22
#CHECKSUM= '9eqZJcnY9cnYGcnY' / HDU checksum updated 2016-09-15T23:38:45
#DATASUM = '3638685465' / data unit checksum updated 2016-09-15T23:38:45
#END
| gridratio | identifier_name |
GridSave.py | """
Model to use the GridClass to make an image of radio astronomical observations
"""
# Functions to create a grid and place astronomical data on that
# grid with a convolving function
# HISTORY
# 20MAY15 GIL one more time on coordiante header updates in FITS
# 20MAY14 GIL update coordinates in FITS header
# 20MAY13 GIL update coordinates in FITS header
# 20MAY12 GIL compute gain ratios for different images.
# 20MAY01 GIL grid different telescopes to different images for gain and offset merging.
# 20APR30 GIL initial version based on GridObs.py
import sys
import os
import copy
import numpy as np
from matplotlib import pyplot as plt
import datetime
import GridClass
from astropy.io import fits
from astropy.wcs import wcs
import radioastronomy
import gainfactor
EPSILON = 0.01
# minimum intensity to include for gain normalization
EPSILON = 1000.0
doRatio = False
# special telescope factors for 2020 March + April
telescopefactors = [ 1.05216, 0.94350, 1.02153, 0.98935]
telescopefactors = [ 1.09216, 0.94350, 1.02153, 0.98935]
tsum = 0.
for iii in range( 4):
tsum = tsum + telescopefactors[iii]
tsum = tsum/4.
print( "Check of Telescope Factors: TSum = %f" % (tsum))
def fixImageCoordinates( filename, projection):
"""
fixImageCoordinates() interpolates pixels to proper reference frame
"""
printcount = 0
inname = filename
nchar = len(inname)
# strip off the end of the previous image and add the new projection name
outname = inname[0:nchar-8]
outname = outname + projection + ".fit"
# get the input image coordinate transform, Usually Cartesian
win = wcs.WCS(filename)
hdu = fits.open(filename)[0]
imageData = fits.getdata( filename)
imageCopy = copy.deepcopy( imageData)
#
header = hdu.header
nx = header['NAXIS1']
ny = header['NAXIS2']
crval1 = header['CRVAL1']
crval2 = header['CRVAL2']
crpix1 = header['CRPIX1']
crpix2 = header['CRPIX2']
cdelt1 = header['CDELT1']
cdelt2 = header['CDELT2']
ctype1 = header['CTYPE1']
ctype2 = header['CTYPE2']
xmin = crval1 + (1. - crpix1)*cdelt1
xmax = crval1 + (nx - crpix1)*cdelt1
ymin = crval2 + (1. - crpix2)*cdelt2
ymax = crval2 + (nx - crpix2)*cdelt2
print( "fixImage: %.2f,%2f %.1f,%.1f %.3f,%.3f" % (crval1,crval2,crpix1,crpix2,cdelt1,cdelt2))
print( "fixImage: %s,%s" % (ctype1,ctype2))
# redefine for new projection desired
ctype1 = ctype1[0:4]+projection
ctype2 = ctype2[0:4]+projection
print( "=> %s, %s" % (ctype1, ctype2))
header['CTYPE1'] = ctype1
header['CTYPE2'] = ctype2
# for output image the reference coordinate x pixel can be anywhere
# move the center to zero
header['CRVAL1'] = 0.
header['LONPOLE'] = 0.0
header['LATPOLE'] = 90.0
header.update()
tempname = "GridSave.fits"
hdu = fits.PrimaryHDU(header=header, data=imageCopy)
print("Outname: %s" % (tempname))
if os.path.exists(tempname):
os.remove(tempname)
hdu.writeto(tempname)
wout = wcs.WCS(tempname)
# now that coordinates are defined, remove temporary file
if os.path.exists(tempname):
os.remove(tempname)
pixin = np.array([[0, 0], [nx-1, ny-1]], dtype=np.float64)
pixout = np.array([[0, 0], [nx-1, ny-1]], dtype=np.float64)
print("NX, NY: %d,%d" % (nx, ny))
nan = float("NAN")
# print("Nan = %f" % (nan))
# assume no data until found
for jjj in range (ny):
for iii in range (nx):
imageCopy[jjj][iii] = nan
# now for output image check all pixel values
for jjj in range (ny):
for iii in range (nx):
# if this image pixal has no value
pixout[0] = (iii,jjj)
oworld = wout.wcs_pix2world(pixout, 0)
xy = oworld[0]
if np.isnan(xy[0]):
continue
# print("pixout: %d,%d : world %.f,%.2f" % (iii,jjj,xy[0],xy[1]))
pixin[0] = oworld[0]
ipixels = win.wcs_world2pix(pixin, 0)
# get input pixels for coordinate
ixy = ipixels[0]
# if outside of current image skip this pixel
if np.isnan( ixy[0]):
continue
ix = int(ixy[0])
iy = int(ixy[1])
ix = max( min( nx-1, ix), 0)
iy = max( min( ny-1, iy), 0)
ix = int(ix)
iy = int(iy)
# print("pixin : %d,%d : world %.f,%.2f" % (ix,iy,xy[0],xy[1]))
# print("OX,OY:%d,%d <= IX,IY:%d,%d" %( ox,oy, ix,iy))
imageCopy[jjj][iii] = imageData[iy][ix]
print("Preparing to write new coordiante transform: %s" % (outname))
if os.path.exists(outname):
os.remove(outname)
newhdu = fits.PrimaryHDU(header=header, data=imageCopy)
newhdu.writeto(outname)
print("Wrote new")
return
def writeFitsImage( rs, cpuIndex, grid, projection):
"""
writeFitsImage() takes a spectrum for describing the observation and a 2 dimensinoal
array of image data and writes a FITS image
This program produces two images. It expects an grid that is in cartisian format.
The second format described by the input: projection
"""
# print("Image: ", imageData)
imageData = grid.image
size = imageData.shape
imageCopy = copy.deepcopy( imageData)
nx = size[1]
ny = size[0]
# now flip the Y axis of the image to match the FITS Convention
iy = ny - 1
for iii in range(ny):
imageCopy[iii][:] = imageData[iy][:]
iy = iy - 1
pixcrd = np.array([[0, 0], [24, 38]], dtype=np.float64)
# Create a new WCS object. The number of axes must be set
# from the start
w = wcs.WCS(naxis=2)
gridtype = grid.gridtype.upper()
print("Grid Type: %s %d" % (gridtype, gridtype.find('RA')))
# gridtype = "RA"
if gridtype.find('RA') > -1:
maptype = 'RA'
XTYPE = 'RA--'
YTYPE = 'DEC-'
else:
maptype = 'GAL'
XTYPE = 'GLON'
YTYPE = 'GLAT'
xstart = 360.
ystart = 90.
# select the projection here:
# projection = "-CYP"
# projection = "-CAR"
crval1 = grid.crval1
crval2 = grid.crval2
crpix1 = grid.crpix1
crpix2 = grid.crpix2
cdelt1 = grid.cdelt1
cdelt2 = grid.cdelt2
print('--------- Grid Type: %s (%f,%f %f,%f ' % (gridtype, crval1, crval2, cdelt1, cdelt2))
hdu = fits.PrimaryHDU()
header = hdu.header
dateobs = "%s" % (rs.utc)
dateobs = dateobs.replace(" ","T")
mydate = datetime.datetime.now()
mydate = "%s" % (mydate)
mydate = mydate[2:10]
mydate.replace('-','/')
header['NAXIS1'] = int(nx)
header['NAXIS2'] = int(ny)
header['BUNIT'] = 'K-km/s/BEAM'
maptype = "RA"
if maptype[0:2] == "RA":
maptype = "RA"
header['CTYPE1'] = 'RA---CAR'
else:
maptype = "GAL"
header['CTYPE1'] = 'GLON-CAR'
# create a cartesian x centered iamge
header['CRPIX1'] = nx/2.
header['CRVAL1'] = 180.
grid.crval1 = header['CRVAL1']
header['CDELT1'] = cdelt1
header['CUNIT1'] = 'deg'
header['CRVAL2'] = (grid.ymax+grid.ymin)/2.
grid.crval2 = header['CRVAL2']
header['CRPIX2'] = ny/2.
header['CDELT2'] = cdelt2
header['CUNIT2'] = 'deg'
grid.gridtype = maptype
if maptype[0:2] == "RA":
print("RA: writeFits: %s" % (maptype))
header['CTYPE2'] = 'DEC--CAR'
else:
print("GAL: writeFits: %s" % (maptype))
header['CTYPE2'] = 'GLAT-CAR'
header['WCAXES'] = 2
header['RADESYS'] ='FK5'
# temporarily replace ref coordinate iwth zero
crval2 = header['CRVAL2']
crpix2 = header['CRPIX2']
# redefine the reference for the best cartisian format
referencevalue = 0.
dpix = (referencevalue - crval2)/cdelt2
crpix2 = crpix2 + dpix
# change x axis
header['CRVAL2'] = referencevalue
header['CRPIX2'] = crpix2
header['EQUINOX'] = 2.000000000000E+03 # Equinox of equatorial coordinates
header['BMAJ'] = 18.1 # Beam major axis in degrees: 80cm horn at 21.1cm
header['BMIN'] = 18.1 # Beam minor axis in degrees
header['BPA'] = 0.000000000000E+00 # Beam position angle in degrees
header['RESTFRQ'] = 1.42040575177E+09 # Line rest frequency, Hz
header['RESTWAV'] = 0.211061140551 # Line wavelength (m)
header['DATE-OBS'] = dateobs
header['DATE'] = mydate
header['OBSERVER'] = 'Science Aficionado'
header['OBJECT'] = 'Milky Way'
header['TELESCOP'] = 'Aficionado Horn'
header['HISTORY'] = "GridSave.py -- Glen Langston -- 20 May 13"
header['HISTORY'] = "Observations in March + April 2020"
# while len(header) < (36 * 4 - 1):
# header.append() # Adds a blank card to the end
# header.delval("EXTEND")
header.update()
# hdu = fits.PrimaryHDU(header=header, data=imageData)
hdu = fits.PrimaryHDU(header=header, data=imageCopy)
# As file at filePath is deleted now, so we should check if file exists or not not before deleting them
outname = ("Aficionado_T%d" % (cpuIndex)) + "-" + maptype + projection + ".fit"
if os.path.exists(outname):
os.remove(outname)
hdu.writeto(outname)
# create a second file with new projection
fixImageCoordinates( outname, projection)
return
def gridratio( grid1, grid2):
"""
gridratio computes the ratio of two grids when the values in both grids are non-zero
This function is used to compute gain ratios
The average and rms of the ratios are provided along as the grid of ratios
"""
nx1 = grid1.img_width
ny1 = grid1.img_height
nx2 = grid2.img_width
ny2 = grid2.img_height
ratio = 0.
rms = 0.
if nx1 != nx2:
print("GridRatio: Nx1 != Nx2 (%d, %d)" % (nx1, nx2))
return ratio, rms
if ny1 != ny2:
print("GridRatio: Ny1 != Ny2 (%d, %d)" % (ny1, ny2))
return ratio, rms
count = 0
nonzero = np.zeros(nx1*ny1)
# copy to ratio array
gridratio = copy.deepcopy( grid1)
for iii in range(nx1):
for jjj in range(ny1):
# put in zero as default
gridratio.image[jjj,iii] = 0.
if grid1.image[jjj,iii] > EPSILON:
if grid2.image[jjj,iii] > EPSILON:
nonzero[count] = grid1.image[jjj,iii]/grid2.image[jjj,iii]
count = count + 1
if count < 2:
print ("No overlap in non-zero samples")
return ratio, rms, gridratio
nonzero = nonzero[0:count]
asum = np.sum( nonzero)
ratio = asum/float(count)
rms = np.std( nonzero)
print ("Grid Ratio: %.4f +/- %.4f for %d samples" % (ratio, rms/np.sqrt(count), count))
# return the ratio grid
return ratio, rms, gridratio
def main():
|
if __name__ == "__main__":
main()
#SIMPLE = T / conforms to FITS standard
#BITPIX = -32 / array data type
#NAXIS = 2 / number of array dimensions
#NAXIS1 = 4323
#NAXIS2 = 2163
#OBJECT = 'HI4PI ' / The HI 4-PI Survey
#TELESCOP= 'Effelsberg 100m RT; ATNF Parkes 64-m' / Telescope names
#ORIGIN = 'AIfA/MPIfR Bonn; ATNF Sydney' / Organisations or Institutions
#REFERENC= 'HI4PI Collaboration 2016' / A&A
#RESTFRQ = 1420405751.77
#RESTWAV = 0.211061140541
#CDELT1 = -0.08333333330000001
#CRPIX1 = 2162.0
#CRVAL1 = 0.0
#CTYPE1 = 'RA---CAR'
#CUNIT1 = 'deg '
#CDELT2 = 0.08333333330000001
#CRPIX2 = 1082.0
#CRVAL2 = 0.0
#CTYPE2 = 'DEC--CAR'
#CUNIT2 = 'deg '
#WCSAXES = 2
#RADESYS = 'FK5 '
#EQUINOX = 2000.0
#LONPOLE = 0.0
#LATPOLE = 90.0
#BUNIT = 'cm^(-2) '
#BPA = 0.0
#BMAJ = 0.2706
#BMIN = 0.2706
#VMIN = -1.5169915133210E+21
#VMAX = 2.39529868415209E+22
#CHECKSUM= '9eqZJcnY9cnYGcnY' / HDU checksum updated 2016-09-15T23:38:45
#DATASUM = '3638685465' / data unit checksum updated 2016-09-15T23:38:45
#END
| """
Main executable for gridding astronomical data
"""
dpi = 1
dpi = 2
width = int(360)
height = int(130)
mywidth = int(width*dpi)
myheight = int(height*dpi)
FWHM = 7.5 # degrees
FWHM = 10.0 # degrees
FWHM = 5.0 # degrees
FWHM = 3.0 # degrees
FWHM = 1.0 # degrees
weight = 1.
nargs = len(sys.argv)
if nargs < 2:
print('GR: GRid Observations of integrated intensity produced by the T Command')
print('GR produces fits images for each of the horns used for the observations.')
print('For observations at the same coordinates, the ratios of intensities are also produced.')
print('The FITS format files require header information, which is copied from the')
print('Cold Load File provided by the user')
print('GR RA|GAL <cold file name> <savefile1> [<savefile2> ... <savefileN>]')
print("")
print('Glen Langston, National Science Foundation -- 20 May 12')
exit()
gridtype = sys.argv[1]
gridtype = gridtype.upper()
print('Grid Type: ', gridtype)
# enable having ra going from 24 to 0 hours == 360 to 0 degrees
xsign = 1.
xoffset = 0.
if gridtype == 'RA':
xmin = 0.
xmax = 360.
ymin = -40.
ymax = 90.
maptype = 'RA'
elif gridtype == '-RA':
xmin = 0.
xmax = 360.
ymin = -40.
ymax = 90.
xsign = -1.
xoffset = 360. # when x = 360. should be at zero.
maptype = 'RA'
elif gridtype == '-EL':
xmin = 0.
xmax = 360.
ymin = 0.
ymax = 90.
xsign = -1.
xoffset = 360. # when x = 360. should be at zero.
maptype = 'AZEL'
elif gridtype == 'RA0':
xmin = 0.
xmax = 360.
ymin = -41.
ymax = 89.
xsign = -1.
xoffset = 180. # when x = 360. should be at zero.
gridtype = 'RA'
elif gridtype == 'GAL':
xmin = -180.
xmax = 180.
ymin = -90.
ymax = 90.
maptype = 'GAL'
if gridtype != 'RA' and gridtype != 'GAL' and gridtype != '-RA' and gridtype != "RA0":
print('Error parsing grid type: ', gridtype)
print('1st argument should be either RA, -RA or GAL')
exit()
rs = radioastronomy.Spectrum()
if doRatio:
#create the grid with map parameters
grid1 = GridClass.Grid(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, width=width, \
height=height, dpi=dpi, FWHM=FWHM, \
projection="-CAR", gridtype=maptype)
grid2 = GridClass.Grid(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, width=width, \
height=height, dpi=dpi, FWHM=FWHM, \
projection="-CAR", gridtype=maptype)
grid3 = GridClass.Grid(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, width=width, \
height=height, dpi=dpi, FWHM=FWHM, \
projection="-CAR", gridtype=maptype)
grid4 = GridClass.Grid(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, width=width, \
height=height, dpi=dpi, FWHM=FWHM, \
projection="-CAR", gridtype=maptype)
# put each telescope in a different grid
grids = [grid1, grid2, grid3, grid4]
gridall = GridClass.Grid(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, width=width, \
height=height, dpi=dpi, FWHM=FWHM, \
projection="-CAR", gridtype=maptype)
projection = "-AIT"
# coldfile
coldfile = sys.argv[2]
# get telescope geographic location etc
print("Reading Observing parameters from: %s" % (coldfile))
rs.read_spec_ast(coldfile)
print("Observer: %s " % (rs.observer))
# first read through all data and find hot load
names = sys.argv[3:]
names = sorted(names)
firsttime = ""
lasttime = ""
count = 0
# setup grid indicies so that cpuIndex goes to the correct grid
# This assumes telescopes 2,3,4,5 are being used]
gridIndex = [0,0,0,1,2,3]
# for all save Files to Grid
for filename in names:
print("File: %s" % (filename))
f = open(filename)
date = "Unknown"
while date != "":
date, time, cpuIndex, telaz, telel, tSys, tRx, tRms, tint, KperC, tSourcemax, velSource, dV, tVSum, tVSumRms, tSumKmSec, dTSumKmSec, gainFactor = gainfactor.readSaveValues( f)
dlen = len(date)
if dlen < 1:
break
if date[0] == "#":
continue
# else not a comment process the line
count = count + 1
isodate = "20"+date+"T"+time
# print("DateTime: %s" % (isodate))
rs.utc = datetime.datetime.strptime(isodate,"%Y-%m-%dT%H:%M:%S")
# print("Utc: %s" % (rs.utc))
rs.telaz = telaz
rs.telel = telel
rs.azel2radec()
ra = rs.ra
dec = rs.dec
lon = rs.gallon
lat = rs.gallat
tsum = tSumKmSec
tsdv = dTSumKmSec
tmax = tSourcemax
vave = tVSum
vsdv = tVSumRms
if firsttime == "":
firsttime = date
else:
lasttime = date
# if vave > -100. and vave < 100:
# mygrid.convolve( lon, lat, vave, 1.)
iGrid = gridIndex[cpuIndex]
gainCorr = telescopefactors[iGrid]
tsum = tsum * gainCorr
if gridtype == 'RA':
if doRatio:
grids[iGrid].convolve(ra, dec, tsum, weight)
gridall.convolve( ra, dec, tsum, weight)
elif gridtype == '-RA':
x = (ra*xsign) + xoffset
if doRatio:
grids[iGrid].convolve(x, dec, tsum, weight)
gridall.convolve( x, dec, tsum, weight)
elif gridtype == 'RA0':
x = (ra*xsign) + xoffset
if x < 0:
x = x + xmax
elif x > xmax:
x = x - xmax
if doRatio:
grids[iGrid].convolve(x, dec, tsum, weight)
gridall.convolve( x, dec, tsum, weight)
else:
if doRatio:
grids[iGrid].convolve(lon, lat, tsum, weight)
gridall.convolve( lon, lat, tsum, weight)
if count == 0:
print('Convolving Coordinates: ', ra, dec, lon, lat)
print('Convolving Intensities: ', tsum, tsdv, vave, vsdv)
print('Convolvign Parameters : ', n, time)
count = count + 1
# end reading all lines in save file
f.close()
# normalize each of the gridded images
if doRatio:
grids[0].normalize()
grids[1].normalize()
grids[2].normalize()
grids[3].normalize()
gridall.normalize()
# mygrid.check()
# zmin = -1000.
# zmax = 3000.
# limit grid intensities for plotting
# mygrid.set_ij( 0, 0, zmax, 1.)
# mygrid.set_ij( 1, 1, zmin, 1.)
# mygrid.limit(zmin, zmax)
subplots = False
if subplots:
fig, ax = plt.subplots(figsize=(myheight, mywidth), dpi=dpi)
if gridtype == 'RA':
cax = fig.add_axes([-180, 180], [-90, 90])
else:
cax = fig.add_axes([0, 24], [-90, 90])
cbar = fig.colorbar(cax, ticks=[zmin, zmax], orientation='horizontal')
cbar.ax.set_yticklabels([str(zmin), str(zmax)])
ax.set_title("Citizen Science: Horn observations of our Galaxy")
else:
#y_ticks = ymin + (ymax-ymin)*ticks/myheight
ticks = np.arange(0, mywidth, 30*dpi)
x_ticks = xmin + ((xmax-xmin)*ticks/mywidth)
plt.imshow(gridall.image, interpolation='nearest', cmap=plt.get_cmap('jet'))
if firsttime != lasttime:
plt.title("Citizen Science: Observing our Galaxy: %s to %s" % (firsttime, lasttime))
else:
plt.title("Citizen Science: Observing our Galaxy: %s" % (firsttime))
if gridtype == 'RA':
plt.xlabel("Right Ascension (hours)")
plt.ylabel("Declination (degrees)")
labels = ticks/(mywidth/24)
yticks = np.arange(0, myheight, 15*dpi)
elif gridtype == '-RA':
plt.xlabel("Right Ascension (hours)")
plt.ylabel("Declination (degrees)")
labels = 24 - (ticks/(mywidth/24))
labels[0] = 0
labels[0] = 24
yticks = np.arange(0, myheight, 15*dpi)
elif gridtype == '-EL':
plt.xlabel("Right Ascension (hours)")
plt.ylabel("Elevation (degrees)")
labels = 24 - (ticks/(mywidth/24))
labels[0] = 0
labels[0] = 24
yticks = np.arange(0, myheight, 15*dpi)
elif gridtype == 'RA0': # put 0 hours in middle of plot
plt.xlabel("Right Ascension (hours)")
plt.ylabel("Declination (degrees)")
labels = 12 - (ticks/(mywidth/24))
nlabels = len(labels)
for iii in range(nlabels):
if labels[iii] < 0:
labels[iii] = 24 + labels[iii]
if labels[iii] == 24:
labels[iii] = 0
yticks = np.arange(0, myheight, 15*dpi)
else:
yticks = np.arange(0, myheight, 30*dpi)
ticks = np.arange(0, mywidth, 30*dpi)
x_ticks = xmin + (xmax-xmin)*ticks/mywidth
labels = x_ticks
plt.xlabel("Galactic Longitude (degrees)")
plt.ylabel("Galactic Latitude (degrees)")
# wnat an integer list of labels
# slabels = str(labels)
print(ticks, labels)
y_ticks = ymax - (ymax-ymin)*yticks/myheight
plt.yticks(yticks, y_ticks)
plt.xticks(ticks, labels, rotation='horizontal')
plt.colorbar()
crval2 = (xmin + xmax)/2.
crval1 = (ymin + ymax)/2.
cdelt1 = (-1./float(dpi)) - .001
cdelt2 = (1./float(dpi)) + .001
if doRatio:
# now show eacsh of the images
for iGrid in range(4):
imagetemp = copy.deepcopy(grids[iGrid].image)
imagetemp2 = copy.deepcopy(grids[iGrid].image)
kkk = myheight - 1
for jjj in range(myheight):
imagetemp[:][kkk] = imagetemp2[:][jjj]
kkk = kkk - 1
grids[iGrid].image = imagetemp
writeFitsImage( rs, iGrid+2, grids[iGrid], projection)
# put each telescope in a different grid
ratio1 = copy.deepcopy(grid1)
ratio2 = copy.deepcopy(grid1)
ratio3 = copy.deepcopy(grid1)
gratios = [ratio1, ratio2, ratio3]
ratios = np.zeros(3)
rmss = np.zeros(3)
jGrid = 3
for iGrid in range(3):
print("Gain Ratios for Telescopes T%d and T%d" % (iGrid+2, jGrid+2))
ratio, rms, aratio = gridratio(grids[iGrid], grids[jGrid])
ratios[iGrid] = ratio
rmss[iGrid] = rms
writeFitsImage( rs, iGrid+2, aratio, projection)
writeFitsImage( rs, 0, gridall, projection)
plt.show() | identifier_body |
GridSave.py | """
Model to use the GridClass to make an image of radio astronomical observations
"""
# Functions to create a grid and place astronomical data on that
# grid with a convolving function
# HISTORY
# 20MAY15 GIL one more time on coordiante header updates in FITS
# 20MAY14 GIL update coordinates in FITS header
# 20MAY13 GIL update coordinates in FITS header
# 20MAY12 GIL compute gain ratios for different images.
# 20MAY01 GIL grid different telescopes to different images for gain and offset merging.
# 20APR30 GIL initial version based on GridObs.py
import sys
import os
import copy
import numpy as np
from matplotlib import pyplot as plt
import datetime
import GridClass
from astropy.io import fits
from astropy.wcs import wcs
import radioastronomy
import gainfactor
EPSILON = 0.01
# minimum intensity to include for gain normalization
EPSILON = 1000.0
doRatio = False
# special telescope factors for 2020 March + April
telescopefactors = [ 1.05216, 0.94350, 1.02153, 0.98935]
telescopefactors = [ 1.09216, 0.94350, 1.02153, 0.98935]
tsum = 0.
for iii in range( 4):
tsum = tsum + telescopefactors[iii]
tsum = tsum/4.
print( "Check of Telescope Factors: TSum = %f" % (tsum))
def fixImageCoordinates( filename, projection):
"""
fixImageCoordinates() interpolates pixels to proper reference frame
"""
printcount = 0
inname = filename
nchar = len(inname)
# strip off the end of the previous image and add the new projection name
outname = inname[0:nchar-8]
outname = outname + projection + ".fit"
# get the input image coordinate transform, Usually Cartesian
win = wcs.WCS(filename)
hdu = fits.open(filename)[0]
imageData = fits.getdata( filename)
imageCopy = copy.deepcopy( imageData)
#
header = hdu.header
nx = header['NAXIS1']
ny = header['NAXIS2']
crval1 = header['CRVAL1']
crval2 = header['CRVAL2']
crpix1 = header['CRPIX1']
crpix2 = header['CRPIX2']
cdelt1 = header['CDELT1']
cdelt2 = header['CDELT2']
ctype1 = header['CTYPE1']
ctype2 = header['CTYPE2']
xmin = crval1 + (1. - crpix1)*cdelt1
xmax = crval1 + (nx - crpix1)*cdelt1
ymin = crval2 + (1. - crpix2)*cdelt2
ymax = crval2 + (nx - crpix2)*cdelt2
print( "fixImage: %.2f,%2f %.1f,%.1f %.3f,%.3f" % (crval1,crval2,crpix1,crpix2,cdelt1,cdelt2))
print( "fixImage: %s,%s" % (ctype1,ctype2))
# redefine for new projection desired
ctype1 = ctype1[0:4]+projection
ctype2 = ctype2[0:4]+projection
print( "=> %s, %s" % (ctype1, ctype2))
header['CTYPE1'] = ctype1
header['CTYPE2'] = ctype2
# for output image the reference coordinate x pixel can be anywhere
# move the center to zero
header['CRVAL1'] = 0.
header['LONPOLE'] = 0.0
header['LATPOLE'] = 90.0
header.update()
tempname = "GridSave.fits"
hdu = fits.PrimaryHDU(header=header, data=imageCopy)
print("Outname: %s" % (tempname))
if os.path.exists(tempname):
os.remove(tempname)
hdu.writeto(tempname)
wout = wcs.WCS(tempname)
# now that coordinates are defined, remove temporary file
if os.path.exists(tempname):
os.remove(tempname)
pixin = np.array([[0, 0], [nx-1, ny-1]], dtype=np.float64)
pixout = np.array([[0, 0], [nx-1, ny-1]], dtype=np.float64)
print("NX, NY: %d,%d" % (nx, ny))
nan = float("NAN")
# print("Nan = %f" % (nan))
# assume no data until found
for jjj in range (ny):
|
# now for output image check all pixel values
for jjj in range (ny):
for iii in range (nx):
# if this image pixal has no value
pixout[0] = (iii,jjj)
oworld = wout.wcs_pix2world(pixout, 0)
xy = oworld[0]
if np.isnan(xy[0]):
continue
# print("pixout: %d,%d : world %.f,%.2f" % (iii,jjj,xy[0],xy[1]))
pixin[0] = oworld[0]
ipixels = win.wcs_world2pix(pixin, 0)
# get input pixels for coordinate
ixy = ipixels[0]
# if outside of current image skip this pixel
if np.isnan( ixy[0]):
continue
ix = int(ixy[0])
iy = int(ixy[1])
ix = max( min( nx-1, ix), 0)
iy = max( min( ny-1, iy), 0)
ix = int(ix)
iy = int(iy)
# print("pixin : %d,%d : world %.f,%.2f" % (ix,iy,xy[0],xy[1]))
# print("OX,OY:%d,%d <= IX,IY:%d,%d" %( ox,oy, ix,iy))
imageCopy[jjj][iii] = imageData[iy][ix]
print("Preparing to write new coordiante transform: %s" % (outname))
if os.path.exists(outname):
os.remove(outname)
newhdu = fits.PrimaryHDU(header=header, data=imageCopy)
newhdu.writeto(outname)
print("Wrote new")
return
def writeFitsImage( rs, cpuIndex, grid, projection):
"""
writeFitsImage() takes a spectrum for describing the observation and a 2 dimensinoal
array of image data and writes a FITS image
This program produces two images. It expects an grid that is in cartisian format.
The second format described by the input: projection
"""
# print("Image: ", imageData)
imageData = grid.image
size = imageData.shape
imageCopy = copy.deepcopy( imageData)
nx = size[1]
ny = size[0]
# now flip the Y axis of the image to match the FITS Convention
iy = ny - 1
for iii in range(ny):
imageCopy[iii][:] = imageData[iy][:]
iy = iy - 1
pixcrd = np.array([[0, 0], [24, 38]], dtype=np.float64)
# Create a new WCS object. The number of axes must be set
# from the start
w = wcs.WCS(naxis=2)
gridtype = grid.gridtype.upper()
print("Grid Type: %s %d" % (gridtype, gridtype.find('RA')))
# gridtype = "RA"
if gridtype.find('RA') > -1:
maptype = 'RA'
XTYPE = 'RA--'
YTYPE = 'DEC-'
else:
maptype = 'GAL'
XTYPE = 'GLON'
YTYPE = 'GLAT'
xstart = 360.
ystart = 90.
# select the projection here:
# projection = "-CYP"
# projection = "-CAR"
crval1 = grid.crval1
crval2 = grid.crval2
crpix1 = grid.crpix1
crpix2 = grid.crpix2
cdelt1 = grid.cdelt1
cdelt2 = grid.cdelt2
print('--------- Grid Type: %s (%f,%f %f,%f ' % (gridtype, crval1, crval2, cdelt1, cdelt2))
hdu = fits.PrimaryHDU()
header = hdu.header
dateobs = "%s" % (rs.utc)
dateobs = dateobs.replace(" ","T")
mydate = datetime.datetime.now()
mydate = "%s" % (mydate)
mydate = mydate[2:10]
mydate.replace('-','/')
header['NAXIS1'] = int(nx)
header['NAXIS2'] = int(ny)
header['BUNIT'] = 'K-km/s/BEAM'
maptype = "RA"
if maptype[0:2] == "RA":
maptype = "RA"
header['CTYPE1'] = 'RA---CAR'
else:
maptype = "GAL"
header['CTYPE1'] = 'GLON-CAR'
# create a cartesian x centered iamge
header['CRPIX1'] = nx/2.
header['CRVAL1'] = 180.
grid.crval1 = header['CRVAL1']
header['CDELT1'] = cdelt1
header['CUNIT1'] = 'deg'
header['CRVAL2'] = (grid.ymax+grid.ymin)/2.
grid.crval2 = header['CRVAL2']
header['CRPIX2'] = ny/2.
header['CDELT2'] = cdelt2
header['CUNIT2'] = 'deg'
grid.gridtype = maptype
if maptype[0:2] == "RA":
print("RA: writeFits: %s" % (maptype))
header['CTYPE2'] = 'DEC--CAR'
else:
print("GAL: writeFits: %s" % (maptype))
header['CTYPE2'] = 'GLAT-CAR'
header['WCAXES'] = 2
header['RADESYS'] ='FK5'
# temporarily replace ref coordinate iwth zero
crval2 = header['CRVAL2']
crpix2 = header['CRPIX2']
# redefine the reference for the best cartisian format
referencevalue = 0.
dpix = (referencevalue - crval2)/cdelt2
crpix2 = crpix2 + dpix
# change x axis
header['CRVAL2'] = referencevalue
header['CRPIX2'] = crpix2
header['EQUINOX'] = 2.000000000000E+03 # Equinox of equatorial coordinates
header['BMAJ'] = 18.1 # Beam major axis in degrees: 80cm horn at 21.1cm
header['BMIN'] = 18.1 # Beam minor axis in degrees
header['BPA'] = 0.000000000000E+00 # Beam position angle in degrees
header['RESTFRQ'] = 1.42040575177E+09 # Line rest frequency, Hz
header['RESTWAV'] = 0.211061140551 # Line wavelength (m)
header['DATE-OBS'] = dateobs
header['DATE'] = mydate
header['OBSERVER'] = 'Science Aficionado'
header['OBJECT'] = 'Milky Way'
header['TELESCOP'] = 'Aficionado Horn'
header['HISTORY'] = "GridSave.py -- Glen Langston -- 20 May 13"
header['HISTORY'] = "Observations in March + April 2020"
# while len(header) < (36 * 4 - 1):
# header.append() # Adds a blank card to the end
# header.delval("EXTEND")
header.update()
# hdu = fits.PrimaryHDU(header=header, data=imageData)
hdu = fits.PrimaryHDU(header=header, data=imageCopy)
# As file at filePath is deleted now, so we should check if file exists or not not before deleting them
outname = ("Aficionado_T%d" % (cpuIndex)) + "-" + maptype + projection + ".fit"
if os.path.exists(outname):
os.remove(outname)
hdu.writeto(outname)
# create a second file with new projection
fixImageCoordinates( outname, projection)
return
def gridratio( grid1, grid2):
"""
gridratio computes the ratio of two grids when the values in both grids are non-zero
This function is used to compute gain ratios
The average and rms of the ratios are provided along as the grid of ratios
"""
nx1 = grid1.img_width
ny1 = grid1.img_height
nx2 = grid2.img_width
ny2 = grid2.img_height
ratio = 0.
rms = 0.
if nx1 != nx2:
print("GridRatio: Nx1 != Nx2 (%d, %d)" % (nx1, nx2))
return ratio, rms
if ny1 != ny2:
print("GridRatio: Ny1 != Ny2 (%d, %d)" % (ny1, ny2))
return ratio, rms
count = 0
nonzero = np.zeros(nx1*ny1)
# copy to ratio array
gridratio = copy.deepcopy( grid1)
for iii in range(nx1):
for jjj in range(ny1):
# put in zero as default
gridratio.image[jjj,iii] = 0.
if grid1.image[jjj,iii] > EPSILON:
if grid2.image[jjj,iii] > EPSILON:
nonzero[count] = grid1.image[jjj,iii]/grid2.image[jjj,iii]
count = count + 1
if count < 2:
print ("No overlap in non-zero samples")
return ratio, rms, gridratio
nonzero = nonzero[0:count]
asum = np.sum( nonzero)
ratio = asum/float(count)
rms = np.std( nonzero)
print ("Grid Ratio: %.4f +/- %.4f for %d samples" % (ratio, rms/np.sqrt(count), count))
# return the ratio grid
return ratio, rms, gridratio
def main():
"""
Main executable for gridding astronomical data
"""
dpi = 1
dpi = 2
width = int(360)
height = int(130)
mywidth = int(width*dpi)
myheight = int(height*dpi)
FWHM = 7.5 # degrees
FWHM = 10.0 # degrees
FWHM = 5.0 # degrees
FWHM = 3.0 # degrees
FWHM = 1.0 # degrees
weight = 1.
nargs = len(sys.argv)
if nargs < 2:
print('GR: GRid Observations of integrated intensity produced by the T Command')
print('GR produces fits images for each of the horns used for the observations.')
print('For observations at the same coordinates, the ratios of intensities are also produced.')
print('The FITS format files require header information, which is copied from the')
print('Cold Load File provided by the user')
print('GR RA|GAL <cold file name> <savefile1> [<savefile2> ... <savefileN>]')
print("")
print('Glen Langston, National Science Foundation -- 20 May 12')
exit()
gridtype = sys.argv[1]
gridtype = gridtype.upper()
print('Grid Type: ', gridtype)
# enable having ra going from 24 to 0 hours == 360 to 0 degrees
xsign = 1.
xoffset = 0.
if gridtype == 'RA':
xmin = 0.
xmax = 360.
ymin = -40.
ymax = 90.
maptype = 'RA'
elif gridtype == '-RA':
xmin = 0.
xmax = 360.
ymin = -40.
ymax = 90.
xsign = -1.
xoffset = 360. # when x = 360. should be at zero.
maptype = 'RA'
elif gridtype == '-EL':
xmin = 0.
xmax = 360.
ymin = 0.
ymax = 90.
xsign = -1.
xoffset = 360. # when x = 360. should be at zero.
maptype = 'AZEL'
elif gridtype == 'RA0':
xmin = 0.
xmax = 360.
ymin = -41.
ymax = 89.
xsign = -1.
xoffset = 180. # when x = 360. should be at zero.
gridtype = 'RA'
elif gridtype == 'GAL':
xmin = -180.
xmax = 180.
ymin = -90.
ymax = 90.
maptype = 'GAL'
if gridtype != 'RA' and gridtype != 'GAL' and gridtype != '-RA' and gridtype != "RA0":
print('Error parsing grid type: ', gridtype)
print('1st argument should be either RA, -RA or GAL')
exit()
rs = radioastronomy.Spectrum()
if doRatio:
#create the grid with map parameters
grid1 = GridClass.Grid(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, width=width, \
height=height, dpi=dpi, FWHM=FWHM, \
projection="-CAR", gridtype=maptype)
grid2 = GridClass.Grid(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, width=width, \
height=height, dpi=dpi, FWHM=FWHM, \
projection="-CAR", gridtype=maptype)
grid3 = GridClass.Grid(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, width=width, \
height=height, dpi=dpi, FWHM=FWHM, \
projection="-CAR", gridtype=maptype)
grid4 = GridClass.Grid(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, width=width, \
height=height, dpi=dpi, FWHM=FWHM, \
projection="-CAR", gridtype=maptype)
# put each telescope in a different grid
grids = [grid1, grid2, grid3, grid4]
gridall = GridClass.Grid(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, width=width, \
height=height, dpi=dpi, FWHM=FWHM, \
projection="-CAR", gridtype=maptype)
projection = "-AIT"
# coldfile
coldfile = sys.argv[2]
# get telescope geographic location etc
print("Reading Observing parameters from: %s" % (coldfile))
rs.read_spec_ast(coldfile)
print("Observer: %s " % (rs.observer))
# first read through all data and find hot load
names = sys.argv[3:]
names = sorted(names)
firsttime = ""
lasttime = ""
count = 0
# setup grid indicies so that cpuIndex goes to the correct grid
# This assumes telescopes 2,3,4,5 are being used]
gridIndex = [0,0,0,1,2,3]
# for all save Files to Grid
for filename in names:
print("File: %s" % (filename))
f = open(filename)
date = "Unknown"
while date != "":
date, time, cpuIndex, telaz, telel, tSys, tRx, tRms, tint, KperC, tSourcemax, velSource, dV, tVSum, tVSumRms, tSumKmSec, dTSumKmSec, gainFactor = gainfactor.readSaveValues( f)
dlen = len(date)
if dlen < 1:
break
if date[0] == "#":
continue
# else not a comment process the line
count = count + 1
isodate = "20"+date+"T"+time
# print("DateTime: %s" % (isodate))
rs.utc = datetime.datetime.strptime(isodate,"%Y-%m-%dT%H:%M:%S")
# print("Utc: %s" % (rs.utc))
rs.telaz = telaz
rs.telel = telel
rs.azel2radec()
ra = rs.ra
dec = rs.dec
lon = rs.gallon
lat = rs.gallat
tsum = tSumKmSec
tsdv = dTSumKmSec
tmax = tSourcemax
vave = tVSum
vsdv = tVSumRms
if firsttime == "":
firsttime = date
else:
lasttime = date
# if vave > -100. and vave < 100:
# mygrid.convolve( lon, lat, vave, 1.)
iGrid = gridIndex[cpuIndex]
gainCorr = telescopefactors[iGrid]
tsum = tsum * gainCorr
if gridtype == 'RA':
if doRatio:
grids[iGrid].convolve(ra, dec, tsum, weight)
gridall.convolve( ra, dec, tsum, weight)
elif gridtype == '-RA':
x = (ra*xsign) + xoffset
if doRatio:
grids[iGrid].convolve(x, dec, tsum, weight)
gridall.convolve( x, dec, tsum, weight)
elif gridtype == 'RA0':
x = (ra*xsign) + xoffset
if x < 0:
x = x + xmax
elif x > xmax:
x = x - xmax
if doRatio:
grids[iGrid].convolve(x, dec, tsum, weight)
gridall.convolve( x, dec, tsum, weight)
else:
if doRatio:
grids[iGrid].convolve(lon, lat, tsum, weight)
gridall.convolve( lon, lat, tsum, weight)
if count == 0:
print('Convolving Coordinates: ', ra, dec, lon, lat)
print('Convolving Intensities: ', tsum, tsdv, vave, vsdv)
print('Convolvign Parameters : ', n, time)
count = count + 1
# end reading all lines in save file
f.close()
# normalize each of the gridded images
if doRatio:
grids[0].normalize()
grids[1].normalize()
grids[2].normalize()
grids[3].normalize()
gridall.normalize()
# mygrid.check()
# zmin = -1000.
# zmax = 3000.
# limit grid intensities for plotting
# mygrid.set_ij( 0, 0, zmax, 1.)
# mygrid.set_ij( 1, 1, zmin, 1.)
# mygrid.limit(zmin, zmax)
subplots = False
if subplots:
fig, ax = plt.subplots(figsize=(myheight, mywidth), dpi=dpi)
if gridtype == 'RA':
cax = fig.add_axes([-180, 180], [-90, 90])
else:
cax = fig.add_axes([0, 24], [-90, 90])
cbar = fig.colorbar(cax, ticks=[zmin, zmax], orientation='horizontal')
cbar.ax.set_yticklabels([str(zmin), str(zmax)])
ax.set_title("Citizen Science: Horn observations of our Galaxy")
else:
#y_ticks = ymin + (ymax-ymin)*ticks/myheight
ticks = np.arange(0, mywidth, 30*dpi)
x_ticks = xmin + ((xmax-xmin)*ticks/mywidth)
plt.imshow(gridall.image, interpolation='nearest', cmap=plt.get_cmap('jet'))
if firsttime != lasttime:
plt.title("Citizen Science: Observing our Galaxy: %s to %s" % (firsttime, lasttime))
else:
plt.title("Citizen Science: Observing our Galaxy: %s" % (firsttime))
if gridtype == 'RA':
plt.xlabel("Right Ascension (hours)")
plt.ylabel("Declination (degrees)")
labels = ticks/(mywidth/24)
yticks = np.arange(0, myheight, 15*dpi)
elif gridtype == '-RA':
plt.xlabel("Right Ascension (hours)")
plt.ylabel("Declination (degrees)")
labels = 24 - (ticks/(mywidth/24))
labels[0] = 0
labels[0] = 24
yticks = np.arange(0, myheight, 15*dpi)
elif gridtype == '-EL':
plt.xlabel("Right Ascension (hours)")
plt.ylabel("Elevation (degrees)")
labels = 24 - (ticks/(mywidth/24))
labels[0] = 0
labels[0] = 24
yticks = np.arange(0, myheight, 15*dpi)
elif gridtype == 'RA0': # put 0 hours in middle of plot
plt.xlabel("Right Ascension (hours)")
plt.ylabel("Declination (degrees)")
labels = 12 - (ticks/(mywidth/24))
nlabels = len(labels)
for iii in range(nlabels):
if labels[iii] < 0:
labels[iii] = 24 + labels[iii]
if labels[iii] == 24:
labels[iii] = 0
yticks = np.arange(0, myheight, 15*dpi)
else:
yticks = np.arange(0, myheight, 30*dpi)
ticks = np.arange(0, mywidth, 30*dpi)
x_ticks = xmin + (xmax-xmin)*ticks/mywidth
labels = x_ticks
plt.xlabel("Galactic Longitude (degrees)")
plt.ylabel("Galactic Latitude (degrees)")
# wnat an integer list of labels
# slabels = str(labels)
print(ticks, labels)
y_ticks = ymax - (ymax-ymin)*yticks/myheight
plt.yticks(yticks, y_ticks)
plt.xticks(ticks, labels, rotation='horizontal')
plt.colorbar()
crval2 = (xmin + xmax)/2.
crval1 = (ymin + ymax)/2.
cdelt1 = (-1./float(dpi)) - .001
cdelt2 = (1./float(dpi)) + .001
if doRatio:
# now show eacsh of the images
for iGrid in range(4):
imagetemp = copy.deepcopy(grids[iGrid].image)
imagetemp2 = copy.deepcopy(grids[iGrid].image)
kkk = myheight - 1
for jjj in range(myheight):
imagetemp[:][kkk] = imagetemp2[:][jjj]
kkk = kkk - 1
grids[iGrid].image = imagetemp
writeFitsImage( rs, iGrid+2, grids[iGrid], projection)
# put each telescope in a different grid
ratio1 = copy.deepcopy(grid1)
ratio2 = copy.deepcopy(grid1)
ratio3 = copy.deepcopy(grid1)
gratios = [ratio1, ratio2, ratio3]
ratios = np.zeros(3)
rmss = np.zeros(3)
jGrid = 3
for iGrid in range(3):
print("Gain Ratios for Telescopes T%d and T%d" % (iGrid+2, jGrid+2))
ratio, rms, aratio = gridratio(grids[iGrid], grids[jGrid])
ratios[iGrid] = ratio
rmss[iGrid] = rms
writeFitsImage( rs, iGrid+2, aratio, projection)
writeFitsImage( rs, 0, gridall, projection)
plt.show()
if __name__ == "__main__":
main()
#SIMPLE = T / conforms to FITS standard
#BITPIX = -32 / array data type
#NAXIS = 2 / number of array dimensions
#NAXIS1 = 4323
#NAXIS2 = 2163
#OBJECT = 'HI4PI ' / The HI 4-PI Survey
#TELESCOP= 'Effelsberg 100m RT; ATNF Parkes 64-m' / Telescope names
#ORIGIN = 'AIfA/MPIfR Bonn; ATNF Sydney' / Organisations or Institutions
#REFERENC= 'HI4PI Collaboration 2016' / A&A
#RESTFRQ = 1420405751.77
#RESTWAV = 0.211061140541
#CDELT1 = -0.08333333330000001
#CRPIX1 = 2162.0
#CRVAL1 = 0.0
#CTYPE1 = 'RA---CAR'
#CUNIT1 = 'deg '
#CDELT2 = 0.08333333330000001
#CRPIX2 = 1082.0
#CRVAL2 = 0.0
#CTYPE2 = 'DEC--CAR'
#CUNIT2 = 'deg '
#WCSAXES = 2
#RADESYS = 'FK5 '
#EQUINOX = 2000.0
#LONPOLE = 0.0
#LATPOLE = 90.0
#BUNIT = 'cm^(-2) '
#BPA = 0.0
#BMAJ = 0.2706
#BMIN = 0.2706
#VMIN = -1.5169915133210E+21
#VMAX = 2.39529868415209E+22
#CHECKSUM= '9eqZJcnY9cnYGcnY' / HDU checksum updated 2016-09-15T23:38:45
#DATASUM = '3638685465' / data unit checksum updated 2016-09-15T23:38:45
#END
| for iii in range (nx):
imageCopy[jjj][iii] = nan | conditional_block |
prec_climber.rs | // pest. The Elegant Parser
// Copyright (c) 2018 Dragoș Tiselice
//
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. All files in the project carrying such notice may not be copied,
// modified, or distributed except according to those terms.
//! Constructs useful in infix operator parsing with the precedence climbing method.
use alloc::borrow::Cow;
use alloc::boxed::Box;
use alloc::vec::Vec;
use core::iter::Peekable;
use core::ops::BitOr;
use crate::iterators::Pair;
use crate::RuleType;
/// Macro for more convenient const fn definition of `prec_climber::PrecClimber`.
///
/// # Examples
///
/// ```
/// # use pest::prec_climber::{Assoc, PrecClimber};
/// # use pest::prec_climber;
/// # #[allow(non_camel_case_types)]
/// # #[allow(dead_code)]
/// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
/// # enum Rule {
/// # plus,
/// # minus,
/// # times,
/// # divide,
/// # power
/// # }
/// static CLIMBER: PrecClimber<Rule> = prec_climber![
/// L plus | minus,
/// L times | divide,
/// R power,
/// ];
/// ```
#[cfg(feature = "const_prec_climber")]
#[macro_export]
macro_rules! prec_climber {
(
$( $assoc:ident $rule:ident $( | $rules:ident )* ),+ $(,)?
) => {{
prec_climber!(
@precedences { 1u32 }
$( [ $rule $( $rules )* ] )*
);
$crate::prec_climber::PrecClimber::new_const(
prec_climber!(
@array
$( $assoc $rule $(, $assoc $rules )* ),*
)
)
}};
( @assoc L ) => { $crate::prec_climber::Assoc::Left };
( @assoc R ) => { $crate::prec_climber::Assoc::Right };
(
@array
$(
$assoc:ident $rule:ident
),*
) => {
&[
$(
(
Rule::$rule,
$rule,
prec_climber!( @assoc $assoc ),
)
),*
]
};
(
@precedences { $precedence:expr }
) => {};
(
@precedences { $precedence:expr }
[ $( $rule:ident )* ]
$( [ $( $rules:ident )* ] )*
) => {
$(
#[allow(non_upper_case_globals)]
const $rule: u32 = $precedence;
)*
prec_climber!(
@precedences { 1u32 + $precedence }
$( [ $( $rules )* ] )*
);
};
}
/// Associativity of an [`Operator`].
///
/// [`Operator`]: struct.Operator.html
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum Assoc {
/// Left `Operator` associativity
Left,
/// Right `Operator` associativity
Right,
}
/// Infix operator used in [`PrecClimber`].
///
/// [`PrecClimber`]: struct.PrecClimber.html
#[derive(Debug)]
pub struct Operator<R: RuleType> {
rule: R,
assoc: Assoc,
next: Option<Box<Operator<R>>>,
}
impl<R: RuleType> Operator<R> {
/// Creates a new `Operator` from a `Rule` and `Assoc`. | ///
/// # Examples
///
/// ```
/// # use pest::prec_climber::{Assoc, Operator};
/// # #[allow(non_camel_case_types)]
/// # #[allow(dead_code)]
/// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
/// # enum Rule {
/// # plus,
/// # minus
/// # }
/// Operator::new(Rule::plus, Assoc::Left) | Operator::new(Rule::minus, Assoc::Right);
/// ```
pub fn new(rule: R, assoc: Assoc) -> Operator<R> {
Operator {
rule,
assoc,
next: None,
}
}
}
impl<R: RuleType> BitOr for Operator<R> {
type Output = Self;
fn bitor(mut self, rhs: Self) -> Self {
fn assign_next<R: RuleType>(op: &mut Operator<R>, next: Operator<R>) {
if let Some(ref mut child) = op.next {
assign_next(child, next);
} else {
op.next = Some(Box::new(next));
}
}
assign_next(&mut self, rhs);
self
}
}
/// List of operators and precedences, which can perform [precedence climbing][1] on infix
/// expressions contained in a [`Pairs`]. The token pairs contained in the `Pairs` should start
/// with a *primary* pair and then alternate between an *operator* and a *primary*.
///
/// [1]: https://en.wikipedia.org/wiki/Operator-precedence_parser#Precedence_climbing_method
/// [`Pairs`]: ../iterators/struct.Pairs.html
#[derive(Debug)]
pub struct PrecClimber<R: Clone + 'static> {
ops: Cow<'static, [(R, u32, Assoc)]>,
}
#[cfg(feature = "const_prec_climber")]
impl<R: Clone + 'static> PrecClimber<R> {
/// Creates a new `PrecClimber` directly from a static slice of
/// `(rule: Rule, precedence: u32, associativity: Assoc)` tuples.
///
/// Precedence starts from `1`. Entries don't have to be ordered in any way, but it's easier to read when
/// sorted.
///
/// # Examples
///
/// ```
/// # use pest::prec_climber::{Assoc, PrecClimber};
/// # #[allow(non_camel_case_types)]
/// # #[allow(dead_code)]
/// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
/// # enum Rule {
/// # plus,
/// # minus,
/// # times,
/// # divide,
/// # power
/// # }
/// static CLIMBER: PrecClimber<Rule> = PrecClimber::new_const(&[
/// (Rule::plus, 1, Assoc::Left), (Rule::minus, 1, Assoc::Left),
/// (Rule::times, 2, Assoc::Left), (Rule::divide, 2, Assoc::Left),
/// (Rule::power, 3, Assoc::Right)
/// ]);
/// ```
pub const fn new_const(ops: &'static [(R, u32, Assoc)]) -> PrecClimber<R> {
PrecClimber {
ops: Cow::Borrowed(ops),
}
}
}
impl<R: RuleType> PrecClimber<R> {
// find matching operator by `rule`
fn get(&self, rule: &R) -> Option<(u32, Assoc)> {
self.ops
.iter()
.find(|(r, _, _)| r == rule)
.map(|(_, precedence, assoc)| (*precedence, *assoc))
}
/// Creates a new `PrecClimber` from the `Operator`s contained in `ops`. Every entry in the
/// `Vec` has precedence *index + 1*. In order to have operators with same precedence, they need
/// to be chained with `|` between them.
///
/// # Examples
///
/// ```
/// # use pest::prec_climber::{Assoc, Operator, PrecClimber};
/// # #[allow(non_camel_case_types)]
/// # #[allow(dead_code)]
/// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
/// # enum Rule {
/// # plus,
/// # minus,
/// # times,
/// # divide,
/// # power
/// # }
/// PrecClimber::new(vec![
/// Operator::new(Rule::plus, Assoc::Left) | Operator::new(Rule::minus, Assoc::Left),
/// Operator::new(Rule::times, Assoc::Left) | Operator::new(Rule::divide, Assoc::Left),
/// Operator::new(Rule::power, Assoc::Right)
/// ]);
/// ```
pub fn new(ops: Vec<Operator<R>>) -> PrecClimber<R> {
let ops = ops
.into_iter()
.zip(1..)
.fold(Vec::new(), |mut vec, (op, prec)| {
let mut next = Some(op);
while let Some(op) = next.take() {
let Operator {
rule,
assoc,
next: op_next,
} = op;
vec.push((rule, prec, assoc));
next = op_next.map(|op| *op);
}
vec
});
PrecClimber {
ops: Cow::Owned(ops),
}
}
/// Performs the precedence climbing algorithm on the `pairs` in a similar manner to map-reduce.
/// *Primary* pairs are mapped with `primary` and then reduced to one single result with
/// `infix`.
///
/// # Panics
///
/// Panics will occur when `pairs` is empty or when the alternating *primary*, *operator*,
/// *primary* order is not respected.
///
/// # Examples
///
/// ```ignore
/// let primary = |pair| {
/// consume(pair, climber)
/// };
/// let infix = |lhs: i32, op: Pair<Rule>, rhs: i32| {
/// match op.rule() {
/// Rule::plus => lhs + rhs,
/// Rule::minus => lhs - rhs,
/// Rule::times => lhs * rhs,
/// Rule::divide => lhs / rhs,
/// Rule::power => lhs.pow(rhs as u32),
/// _ => unreachable!()
/// }
/// };
///
/// let result = climber.climb(pairs, primary, infix);
/// ```
pub fn climb<'i, P, F, G, T>(&self, mut pairs: P, mut primary: F, mut infix: G) -> T
where
P: Iterator<Item = Pair<'i, R>>,
F: FnMut(Pair<'i, R>) -> T,
G: FnMut(T, Pair<'i, R>, T) -> T,
{
let lhs = primary(
pairs
.next()
.expect("precedence climbing requires a non-empty Pairs"),
);
self.climb_rec(lhs, 0, &mut pairs.peekable(), &mut primary, &mut infix)
}
fn climb_rec<'i, P, F, G, T>(
&self,
mut lhs: T,
min_prec: u32,
pairs: &mut Peekable<P>,
primary: &mut F,
infix: &mut G,
) -> T
where
P: Iterator<Item = Pair<'i, R>>,
F: FnMut(Pair<'i, R>) -> T,
G: FnMut(T, Pair<'i, R>, T) -> T,
{
while pairs.peek().is_some() {
let rule = pairs.peek().unwrap().as_rule();
if let Some((prec, _)) = self.get(&rule) {
if prec >= min_prec {
let op = pairs.next().unwrap();
let mut rhs = primary(pairs.next().expect(
"infix operator must be followed by \
a primary expression",
));
while pairs.peek().is_some() {
let rule = pairs.peek().unwrap().as_rule();
if let Some((new_prec, assoc)) = self.get(&rule) {
if new_prec > prec || assoc == Assoc::Right && new_prec == prec {
rhs = self.climb_rec(rhs, new_prec, pairs, primary, infix);
} else {
break;
}
} else {
break;
}
}
lhs = infix(lhs, op, rhs);
} else {
break;
}
} else {
break;
}
}
lhs
}
} | random_line_split | |
prec_climber.rs | // pest. The Elegant Parser
// Copyright (c) 2018 Dragoș Tiselice
//
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. All files in the project carrying such notice may not be copied,
// modified, or distributed except according to those terms.
//! Constructs useful in infix operator parsing with the precedence climbing method.
use alloc::borrow::Cow;
use alloc::boxed::Box;
use alloc::vec::Vec;
use core::iter::Peekable;
use core::ops::BitOr;
use crate::iterators::Pair;
use crate::RuleType;
/// Macro for more convenient const fn definition of `prec_climber::PrecClimber`.
///
/// # Examples
///
/// ```
/// # use pest::prec_climber::{Assoc, PrecClimber};
/// # use pest::prec_climber;
/// # #[allow(non_camel_case_types)]
/// # #[allow(dead_code)]
/// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
/// # enum Rule {
/// # plus,
/// # minus,
/// # times,
/// # divide,
/// # power
/// # }
/// static CLIMBER: PrecClimber<Rule> = prec_climber![
/// L plus | minus,
/// L times | divide,
/// R power,
/// ];
/// ```
#[cfg(feature = "const_prec_climber")]
#[macro_export]
macro_rules! prec_climber {
(
$( $assoc:ident $rule:ident $( | $rules:ident )* ),+ $(,)?
) => {{
prec_climber!(
@precedences { 1u32 }
$( [ $rule $( $rules )* ] )*
);
$crate::prec_climber::PrecClimber::new_const(
prec_climber!(
@array
$( $assoc $rule $(, $assoc $rules )* ),*
)
)
}};
( @assoc L ) => { $crate::prec_climber::Assoc::Left };
( @assoc R ) => { $crate::prec_climber::Assoc::Right };
(
@array
$(
$assoc:ident $rule:ident
),*
) => {
&[
$(
(
Rule::$rule,
$rule,
prec_climber!( @assoc $assoc ),
)
),*
]
};
(
@precedences { $precedence:expr }
) => {};
(
@precedences { $precedence:expr }
[ $( $rule:ident )* ]
$( [ $( $rules:ident )* ] )*
) => {
$(
#[allow(non_upper_case_globals)]
const $rule: u32 = $precedence;
)*
prec_climber!(
@precedences { 1u32 + $precedence }
$( [ $( $rules )* ] )*
);
};
}
/// Associativity of an [`Operator`].
///
/// [`Operator`]: struct.Operator.html
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum Assoc {
/// Left `Operator` associativity
Left,
/// Right `Operator` associativity
Right,
}
/// Infix operator used in [`PrecClimber`].
///
/// [`PrecClimber`]: struct.PrecClimber.html
#[derive(Debug)]
pub struct Operator<R: RuleType> {
rule: R,
assoc: Assoc,
next: Option<Box<Operator<R>>>,
}
impl<R: RuleType> Operator<R> {
/// Creates a new `Operator` from a `Rule` and `Assoc`.
///
/// # Examples
///
/// ```
/// # use pest::prec_climber::{Assoc, Operator};
/// # #[allow(non_camel_case_types)]
/// # #[allow(dead_code)]
/// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
/// # enum Rule {
/// # plus,
/// # minus
/// # }
/// Operator::new(Rule::plus, Assoc::Left) | Operator::new(Rule::minus, Assoc::Right);
/// ```
pub fn new(rule: R, assoc: Assoc) -> Operator<R> {
Operator {
rule,
assoc,
next: None,
}
}
}
impl<R: RuleType> BitOr for Operator<R> {
type Output = Self;
fn bitor(mut self, rhs: Self) -> Self { | }
/// List of operators and precedences, which can perform [precedence climbing][1] on infix
/// expressions contained in a [`Pairs`]. The token pairs contained in the `Pairs` should start
/// with a *primary* pair and then alternate between an *operator* and a *primary*.
///
/// [1]: https://en.wikipedia.org/wiki/Operator-precedence_parser#Precedence_climbing_method
/// [`Pairs`]: ../iterators/struct.Pairs.html
#[derive(Debug)]
pub struct PrecClimber<R: Clone + 'static> {
ops: Cow<'static, [(R, u32, Assoc)]>,
}
#[cfg(feature = "const_prec_climber")]
impl<R: Clone + 'static> PrecClimber<R> {
/// Creates a new `PrecClimber` directly from a static slice of
/// `(rule: Rule, precedence: u32, associativity: Assoc)` tuples.
///
/// Precedence starts from `1`. Entries don't have to be ordered in any way, but it's easier to read when
/// sorted.
///
/// # Examples
///
/// ```
/// # use pest::prec_climber::{Assoc, PrecClimber};
/// # #[allow(non_camel_case_types)]
/// # #[allow(dead_code)]
/// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
/// # enum Rule {
/// # plus,
/// # minus,
/// # times,
/// # divide,
/// # power
/// # }
/// static CLIMBER: PrecClimber<Rule> = PrecClimber::new_const(&[
/// (Rule::plus, 1, Assoc::Left), (Rule::minus, 1, Assoc::Left),
/// (Rule::times, 2, Assoc::Left), (Rule::divide, 2, Assoc::Left),
/// (Rule::power, 3, Assoc::Right)
/// ]);
/// ```
pub const fn new_const(ops: &'static [(R, u32, Assoc)]) -> PrecClimber<R> {
PrecClimber {
ops: Cow::Borrowed(ops),
}
}
}
impl<R: RuleType> PrecClimber<R> {
// find matching operator by `rule`
fn get(&self, rule: &R) -> Option<(u32, Assoc)> {
self.ops
.iter()
.find(|(r, _, _)| r == rule)
.map(|(_, precedence, assoc)| (*precedence, *assoc))
}
/// Creates a new `PrecClimber` from the `Operator`s contained in `ops`. Every entry in the
/// `Vec` has precedence *index + 1*. In order to have operators with same precedence, they need
/// to be chained with `|` between them.
///
/// # Examples
///
/// ```
/// # use pest::prec_climber::{Assoc, Operator, PrecClimber};
/// # #[allow(non_camel_case_types)]
/// # #[allow(dead_code)]
/// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
/// # enum Rule {
/// # plus,
/// # minus,
/// # times,
/// # divide,
/// # power
/// # }
/// PrecClimber::new(vec![
/// Operator::new(Rule::plus, Assoc::Left) | Operator::new(Rule::minus, Assoc::Left),
/// Operator::new(Rule::times, Assoc::Left) | Operator::new(Rule::divide, Assoc::Left),
/// Operator::new(Rule::power, Assoc::Right)
/// ]);
/// ```
pub fn new(ops: Vec<Operator<R>>) -> PrecClimber<R> {
let ops = ops
.into_iter()
.zip(1..)
.fold(Vec::new(), |mut vec, (op, prec)| {
let mut next = Some(op);
while let Some(op) = next.take() {
let Operator {
rule,
assoc,
next: op_next,
} = op;
vec.push((rule, prec, assoc));
next = op_next.map(|op| *op);
}
vec
});
PrecClimber {
ops: Cow::Owned(ops),
}
}
/// Performs the precedence climbing algorithm on the `pairs` in a similar manner to map-reduce.
/// *Primary* pairs are mapped with `primary` and then reduced to one single result with
/// `infix`.
///
/// # Panics
///
/// Panics will occur when `pairs` is empty or when the alternating *primary*, *operator*,
/// *primary* order is not respected.
///
/// # Examples
///
/// ```ignore
/// let primary = |pair| {
/// consume(pair, climber)
/// };
/// let infix = |lhs: i32, op: Pair<Rule>, rhs: i32| {
/// match op.rule() {
/// Rule::plus => lhs + rhs,
/// Rule::minus => lhs - rhs,
/// Rule::times => lhs * rhs,
/// Rule::divide => lhs / rhs,
/// Rule::power => lhs.pow(rhs as u32),
/// _ => unreachable!()
/// }
/// };
///
/// let result = climber.climb(pairs, primary, infix);
/// ```
pub fn climb<'i, P, F, G, T>(&self, mut pairs: P, mut primary: F, mut infix: G) -> T
where
P: Iterator<Item = Pair<'i, R>>,
F: FnMut(Pair<'i, R>) -> T,
G: FnMut(T, Pair<'i, R>, T) -> T,
{
let lhs = primary(
pairs
.next()
.expect("precedence climbing requires a non-empty Pairs"),
);
self.climb_rec(lhs, 0, &mut pairs.peekable(), &mut primary, &mut infix)
}
fn climb_rec<'i, P, F, G, T>(
&self,
mut lhs: T,
min_prec: u32,
pairs: &mut Peekable<P>,
primary: &mut F,
infix: &mut G,
) -> T
where
P: Iterator<Item = Pair<'i, R>>,
F: FnMut(Pair<'i, R>) -> T,
G: FnMut(T, Pair<'i, R>, T) -> T,
{
while pairs.peek().is_some() {
let rule = pairs.peek().unwrap().as_rule();
if let Some((prec, _)) = self.get(&rule) {
if prec >= min_prec {
let op = pairs.next().unwrap();
let mut rhs = primary(pairs.next().expect(
"infix operator must be followed by \
a primary expression",
));
while pairs.peek().is_some() {
let rule = pairs.peek().unwrap().as_rule();
if let Some((new_prec, assoc)) = self.get(&rule) {
if new_prec > prec || assoc == Assoc::Right && new_prec == prec {
rhs = self.climb_rec(rhs, new_prec, pairs, primary, infix);
} else {
break;
}
} else {
break;
}
}
lhs = infix(lhs, op, rhs);
} else {
break;
}
} else {
break;
}
}
lhs
}
}
|
fn assign_next<R: RuleType>(op: &mut Operator<R>, next: Operator<R>) {
if let Some(ref mut child) = op.next {
assign_next(child, next);
} else {
op.next = Some(Box::new(next));
}
}
assign_next(&mut self, rhs);
self
}
| identifier_body |
prec_climber.rs | // pest. The Elegant Parser
// Copyright (c) 2018 Dragoș Tiselice
//
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. All files in the project carrying such notice may not be copied,
// modified, or distributed except according to those terms.
//! Constructs useful in infix operator parsing with the precedence climbing method.
use alloc::borrow::Cow;
use alloc::boxed::Box;
use alloc::vec::Vec;
use core::iter::Peekable;
use core::ops::BitOr;
use crate::iterators::Pair;
use crate::RuleType;
/// Macro for more convenient const fn definition of `prec_climber::PrecClimber`.
///
/// # Examples
///
/// ```
/// # use pest::prec_climber::{Assoc, PrecClimber};
/// # use pest::prec_climber;
/// # #[allow(non_camel_case_types)]
/// # #[allow(dead_code)]
/// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
/// # enum Rule {
/// # plus,
/// # minus,
/// # times,
/// # divide,
/// # power
/// # }
/// static CLIMBER: PrecClimber<Rule> = prec_climber![
/// L plus | minus,
/// L times | divide,
/// R power,
/// ];
/// ```
#[cfg(feature = "const_prec_climber")]
#[macro_export]
macro_rules! prec_climber {
(
$( $assoc:ident $rule:ident $( | $rules:ident )* ),+ $(,)?
) => {{
prec_climber!(
@precedences { 1u32 }
$( [ $rule $( $rules )* ] )*
);
$crate::prec_climber::PrecClimber::new_const(
prec_climber!(
@array
$( $assoc $rule $(, $assoc $rules )* ),*
)
)
}};
( @assoc L ) => { $crate::prec_climber::Assoc::Left };
( @assoc R ) => { $crate::prec_climber::Assoc::Right };
(
@array
$(
$assoc:ident $rule:ident
),*
) => {
&[
$(
(
Rule::$rule,
$rule,
prec_climber!( @assoc $assoc ),
)
),*
]
};
(
@precedences { $precedence:expr }
) => {};
(
@precedences { $precedence:expr }
[ $( $rule:ident )* ]
$( [ $( $rules:ident )* ] )*
) => {
$(
#[allow(non_upper_case_globals)]
const $rule: u32 = $precedence;
)*
prec_climber!(
@precedences { 1u32 + $precedence }
$( [ $( $rules )* ] )*
);
};
}
/// Associativity of an [`Operator`].
///
/// [`Operator`]: struct.Operator.html
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum Assoc {
/// Left `Operator` associativity
Left,
/// Right `Operator` associativity
Right,
}
/// Infix operator used in [`PrecClimber`].
///
/// [`PrecClimber`]: struct.PrecClimber.html
#[derive(Debug)]
pub struct Operator<R: RuleType> {
rule: R,
assoc: Assoc,
next: Option<Box<Operator<R>>>,
}
impl<R: RuleType> Operator<R> {
/// Creates a new `Operator` from a `Rule` and `Assoc`.
///
/// # Examples
///
/// ```
/// # use pest::prec_climber::{Assoc, Operator};
/// # #[allow(non_camel_case_types)]
/// # #[allow(dead_code)]
/// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
/// # enum Rule {
/// # plus,
/// # minus
/// # }
/// Operator::new(Rule::plus, Assoc::Left) | Operator::new(Rule::minus, Assoc::Right);
/// ```
pub fn new(rule: R, assoc: Assoc) -> Operator<R> {
Operator {
rule,
assoc,
next: None,
}
}
}
impl<R: RuleType> BitOr for Operator<R> {
type Output = Self;
fn bitor(mut self, rhs: Self) -> Self {
fn assign_next<R: RuleType>(op: &mut Operator<R>, next: Operator<R>) {
if let Some(ref mut child) = op.next {
assign_next(child, next);
} else {
op.next = Some(Box::new(next));
}
}
assign_next(&mut self, rhs);
self
}
}
/// List of operators and precedences, which can perform [precedence climbing][1] on infix
/// expressions contained in a [`Pairs`]. The token pairs contained in the `Pairs` should start
/// with a *primary* pair and then alternate between an *operator* and a *primary*.
///
/// [1]: https://en.wikipedia.org/wiki/Operator-precedence_parser#Precedence_climbing_method
/// [`Pairs`]: ../iterators/struct.Pairs.html
#[derive(Debug)]
pub struct PrecClimber<R: Clone + 'static> {
ops: Cow<'static, [(R, u32, Assoc)]>,
}
#[cfg(feature = "const_prec_climber")]
impl<R: Clone + 'static> PrecClimber<R> {
/// Creates a new `PrecClimber` directly from a static slice of
/// `(rule: Rule, precedence: u32, associativity: Assoc)` tuples.
///
/// Precedence starts from `1`. Entries don't have to be ordered in any way, but it's easier to read when
/// sorted.
///
/// # Examples
///
/// ```
/// # use pest::prec_climber::{Assoc, PrecClimber};
/// # #[allow(non_camel_case_types)]
/// # #[allow(dead_code)]
/// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
/// # enum Rule {
/// # plus,
/// # minus,
/// # times,
/// # divide,
/// # power
/// # }
/// static CLIMBER: PrecClimber<Rule> = PrecClimber::new_const(&[
/// (Rule::plus, 1, Assoc::Left), (Rule::minus, 1, Assoc::Left),
/// (Rule::times, 2, Assoc::Left), (Rule::divide, 2, Assoc::Left),
/// (Rule::power, 3, Assoc::Right)
/// ]);
/// ```
pub const fn new_const(ops: &'static [(R, u32, Assoc)]) -> PrecClimber<R> {
PrecClimber {
ops: Cow::Borrowed(ops),
}
}
}
impl<R: RuleType> PrecClimber<R> {
// find matching operator by `rule`
fn get(&self, rule: &R) -> Option<(u32, Assoc)> {
self.ops
.iter()
.find(|(r, _, _)| r == rule)
.map(|(_, precedence, assoc)| (*precedence, *assoc))
}
/// Creates a new `PrecClimber` from the `Operator`s contained in `ops`. Every entry in the
/// `Vec` has precedence *index + 1*. In order to have operators with same precedence, they need
/// to be chained with `|` between them.
///
/// # Examples
///
/// ```
/// # use pest::prec_climber::{Assoc, Operator, PrecClimber};
/// # #[allow(non_camel_case_types)]
/// # #[allow(dead_code)]
/// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
/// # enum Rule {
/// # plus,
/// # minus,
/// # times,
/// # divide,
/// # power
/// # }
/// PrecClimber::new(vec![
/// Operator::new(Rule::plus, Assoc::Left) | Operator::new(Rule::minus, Assoc::Left),
/// Operator::new(Rule::times, Assoc::Left) | Operator::new(Rule::divide, Assoc::Left),
/// Operator::new(Rule::power, Assoc::Right)
/// ]);
/// ```
pub fn new(ops: Vec<Operator<R>>) -> PrecClimber<R> {
let ops = ops
.into_iter()
.zip(1..)
.fold(Vec::new(), |mut vec, (op, prec)| {
let mut next = Some(op);
while let Some(op) = next.take() {
let Operator {
rule,
assoc,
next: op_next,
} = op;
vec.push((rule, prec, assoc));
next = op_next.map(|op| *op);
}
vec
});
PrecClimber {
ops: Cow::Owned(ops),
}
}
/// Performs the precedence climbing algorithm on the `pairs` in a similar manner to map-reduce.
/// *Primary* pairs are mapped with `primary` and then reduced to one single result with
/// `infix`.
///
/// # Panics
///
/// Panics will occur when `pairs` is empty or when the alternating *primary*, *operator*,
/// *primary* order is not respected.
///
/// # Examples
///
/// ```ignore
/// let primary = |pair| {
/// consume(pair, climber)
/// };
/// let infix = |lhs: i32, op: Pair<Rule>, rhs: i32| {
/// match op.rule() {
/// Rule::plus => lhs + rhs,
/// Rule::minus => lhs - rhs,
/// Rule::times => lhs * rhs,
/// Rule::divide => lhs / rhs,
/// Rule::power => lhs.pow(rhs as u32),
/// _ => unreachable!()
/// }
/// };
///
/// let result = climber.climb(pairs, primary, infix);
/// ```
pub fn c | 'i, P, F, G, T>(&self, mut pairs: P, mut primary: F, mut infix: G) -> T
where
P: Iterator<Item = Pair<'i, R>>,
F: FnMut(Pair<'i, R>) -> T,
G: FnMut(T, Pair<'i, R>, T) -> T,
{
let lhs = primary(
pairs
.next()
.expect("precedence climbing requires a non-empty Pairs"),
);
self.climb_rec(lhs, 0, &mut pairs.peekable(), &mut primary, &mut infix)
}
fn climb_rec<'i, P, F, G, T>(
&self,
mut lhs: T,
min_prec: u32,
pairs: &mut Peekable<P>,
primary: &mut F,
infix: &mut G,
) -> T
where
P: Iterator<Item = Pair<'i, R>>,
F: FnMut(Pair<'i, R>) -> T,
G: FnMut(T, Pair<'i, R>, T) -> T,
{
while pairs.peek().is_some() {
let rule = pairs.peek().unwrap().as_rule();
if let Some((prec, _)) = self.get(&rule) {
if prec >= min_prec {
let op = pairs.next().unwrap();
let mut rhs = primary(pairs.next().expect(
"infix operator must be followed by \
a primary expression",
));
while pairs.peek().is_some() {
let rule = pairs.peek().unwrap().as_rule();
if let Some((new_prec, assoc)) = self.get(&rule) {
if new_prec > prec || assoc == Assoc::Right && new_prec == prec {
rhs = self.climb_rec(rhs, new_prec, pairs, primary, infix);
} else {
break;
}
} else {
break;
}
}
lhs = infix(lhs, op, rhs);
} else {
break;
}
} else {
break;
}
}
lhs
}
}
| limb< | identifier_name |
prec_climber.rs | // pest. The Elegant Parser
// Copyright (c) 2018 Dragoș Tiselice
//
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. All files in the project carrying such notice may not be copied,
// modified, or distributed except according to those terms.
//! Constructs useful in infix operator parsing with the precedence climbing method.
use alloc::borrow::Cow;
use alloc::boxed::Box;
use alloc::vec::Vec;
use core::iter::Peekable;
use core::ops::BitOr;
use crate::iterators::Pair;
use crate::RuleType;
/// Macro for more convenient const fn definition of `prec_climber::PrecClimber`.
///
/// # Examples
///
/// ```
/// # use pest::prec_climber::{Assoc, PrecClimber};
/// # use pest::prec_climber;
/// # #[allow(non_camel_case_types)]
/// # #[allow(dead_code)]
/// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
/// # enum Rule {
/// # plus,
/// # minus,
/// # times,
/// # divide,
/// # power
/// # }
/// static CLIMBER: PrecClimber<Rule> = prec_climber![
/// L plus | minus,
/// L times | divide,
/// R power,
/// ];
/// ```
#[cfg(feature = "const_prec_climber")]
#[macro_export]
macro_rules! prec_climber {
(
$( $assoc:ident $rule:ident $( | $rules:ident )* ),+ $(,)?
) => {{
prec_climber!(
@precedences { 1u32 }
$( [ $rule $( $rules )* ] )*
);
$crate::prec_climber::PrecClimber::new_const(
prec_climber!(
@array
$( $assoc $rule $(, $assoc $rules )* ),*
)
)
}};
( @assoc L ) => { $crate::prec_climber::Assoc::Left };
( @assoc R ) => { $crate::prec_climber::Assoc::Right };
(
@array
$(
$assoc:ident $rule:ident
),*
) => {
&[
$(
(
Rule::$rule,
$rule,
prec_climber!( @assoc $assoc ),
)
),*
]
};
(
@precedences { $precedence:expr }
) => {};
(
@precedences { $precedence:expr }
[ $( $rule:ident )* ]
$( [ $( $rules:ident )* ] )*
) => {
$(
#[allow(non_upper_case_globals)]
const $rule: u32 = $precedence;
)*
prec_climber!(
@precedences { 1u32 + $precedence }
$( [ $( $rules )* ] )*
);
};
}
/// Associativity of an [`Operator`].
///
/// [`Operator`]: struct.Operator.html
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum Assoc {
/// Left `Operator` associativity
Left,
/// Right `Operator` associativity
Right,
}
/// Infix operator used in [`PrecClimber`].
///
/// [`PrecClimber`]: struct.PrecClimber.html
#[derive(Debug)]
pub struct Operator<R: RuleType> {
rule: R,
assoc: Assoc,
next: Option<Box<Operator<R>>>,
}
impl<R: RuleType> Operator<R> {
/// Creates a new `Operator` from a `Rule` and `Assoc`.
///
/// # Examples
///
/// ```
/// # use pest::prec_climber::{Assoc, Operator};
/// # #[allow(non_camel_case_types)]
/// # #[allow(dead_code)]
/// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
/// # enum Rule {
/// # plus,
/// # minus
/// # }
/// Operator::new(Rule::plus, Assoc::Left) | Operator::new(Rule::minus, Assoc::Right);
/// ```
pub fn new(rule: R, assoc: Assoc) -> Operator<R> {
Operator {
rule,
assoc,
next: None,
}
}
}
impl<R: RuleType> BitOr for Operator<R> {
type Output = Self;
fn bitor(mut self, rhs: Self) -> Self {
fn assign_next<R: RuleType>(op: &mut Operator<R>, next: Operator<R>) {
if let Some(ref mut child) = op.next {
assign_next(child, next);
} else {
op.next = Some(Box::new(next));
}
}
assign_next(&mut self, rhs);
self
}
}
/// List of operators and precedences, which can perform [precedence climbing][1] on infix
/// expressions contained in a [`Pairs`]. The token pairs contained in the `Pairs` should start
/// with a *primary* pair and then alternate between an *operator* and a *primary*.
///
/// [1]: https://en.wikipedia.org/wiki/Operator-precedence_parser#Precedence_climbing_method
/// [`Pairs`]: ../iterators/struct.Pairs.html
#[derive(Debug)]
pub struct PrecClimber<R: Clone + 'static> {
ops: Cow<'static, [(R, u32, Assoc)]>,
}
#[cfg(feature = "const_prec_climber")]
impl<R: Clone + 'static> PrecClimber<R> {
/// Creates a new `PrecClimber` directly from a static slice of
/// `(rule: Rule, precedence: u32, associativity: Assoc)` tuples.
///
/// Precedence starts from `1`. Entries don't have to be ordered in any way, but it's easier to read when
/// sorted.
///
/// # Examples
///
/// ```
/// # use pest::prec_climber::{Assoc, PrecClimber};
/// # #[allow(non_camel_case_types)]
/// # #[allow(dead_code)]
/// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
/// # enum Rule {
/// # plus,
/// # minus,
/// # times,
/// # divide,
/// # power
/// # }
/// static CLIMBER: PrecClimber<Rule> = PrecClimber::new_const(&[
/// (Rule::plus, 1, Assoc::Left), (Rule::minus, 1, Assoc::Left),
/// (Rule::times, 2, Assoc::Left), (Rule::divide, 2, Assoc::Left),
/// (Rule::power, 3, Assoc::Right)
/// ]);
/// ```
pub const fn new_const(ops: &'static [(R, u32, Assoc)]) -> PrecClimber<R> {
PrecClimber {
ops: Cow::Borrowed(ops),
}
}
}
impl<R: RuleType> PrecClimber<R> {
// find matching operator by `rule`
fn get(&self, rule: &R) -> Option<(u32, Assoc)> {
self.ops
.iter()
.find(|(r, _, _)| r == rule)
.map(|(_, precedence, assoc)| (*precedence, *assoc))
}
/// Creates a new `PrecClimber` from the `Operator`s contained in `ops`. Every entry in the
/// `Vec` has precedence *index + 1*. In order to have operators with same precedence, they need
/// to be chained with `|` between them.
///
/// # Examples
///
/// ```
/// # use pest::prec_climber::{Assoc, Operator, PrecClimber};
/// # #[allow(non_camel_case_types)]
/// # #[allow(dead_code)]
/// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
/// # enum Rule {
/// # plus,
/// # minus,
/// # times,
/// # divide,
/// # power
/// # }
/// PrecClimber::new(vec![
/// Operator::new(Rule::plus, Assoc::Left) | Operator::new(Rule::minus, Assoc::Left),
/// Operator::new(Rule::times, Assoc::Left) | Operator::new(Rule::divide, Assoc::Left),
/// Operator::new(Rule::power, Assoc::Right)
/// ]);
/// ```
pub fn new(ops: Vec<Operator<R>>) -> PrecClimber<R> {
let ops = ops
.into_iter()
.zip(1..)
.fold(Vec::new(), |mut vec, (op, prec)| {
let mut next = Some(op);
while let Some(op) = next.take() {
let Operator {
rule,
assoc,
next: op_next,
} = op;
vec.push((rule, prec, assoc));
next = op_next.map(|op| *op);
}
vec
});
PrecClimber {
ops: Cow::Owned(ops),
}
}
/// Performs the precedence climbing algorithm on the `pairs` in a similar manner to map-reduce.
/// *Primary* pairs are mapped with `primary` and then reduced to one single result with
/// `infix`.
///
/// # Panics
///
/// Panics will occur when `pairs` is empty or when the alternating *primary*, *operator*,
/// *primary* order is not respected.
///
/// # Examples
///
/// ```ignore
/// let primary = |pair| {
/// consume(pair, climber)
/// };
/// let infix = |lhs: i32, op: Pair<Rule>, rhs: i32| {
/// match op.rule() {
/// Rule::plus => lhs + rhs,
/// Rule::minus => lhs - rhs,
/// Rule::times => lhs * rhs,
/// Rule::divide => lhs / rhs,
/// Rule::power => lhs.pow(rhs as u32),
/// _ => unreachable!()
/// }
/// };
///
/// let result = climber.climb(pairs, primary, infix);
/// ```
pub fn climb<'i, P, F, G, T>(&self, mut pairs: P, mut primary: F, mut infix: G) -> T
where
P: Iterator<Item = Pair<'i, R>>,
F: FnMut(Pair<'i, R>) -> T,
G: FnMut(T, Pair<'i, R>, T) -> T,
{
let lhs = primary(
pairs
.next()
.expect("precedence climbing requires a non-empty Pairs"),
);
self.climb_rec(lhs, 0, &mut pairs.peekable(), &mut primary, &mut infix)
}
fn climb_rec<'i, P, F, G, T>(
&self,
mut lhs: T,
min_prec: u32,
pairs: &mut Peekable<P>,
primary: &mut F,
infix: &mut G,
) -> T
where
P: Iterator<Item = Pair<'i, R>>,
F: FnMut(Pair<'i, R>) -> T,
G: FnMut(T, Pair<'i, R>, T) -> T,
{
while pairs.peek().is_some() {
let rule = pairs.peek().unwrap().as_rule();
if let Some((prec, _)) = self.get(&rule) {
if prec >= min_prec {
let op = pairs.next().unwrap();
let mut rhs = primary(pairs.next().expect(
"infix operator must be followed by \
a primary expression",
));
while pairs.peek().is_some() {
let rule = pairs.peek().unwrap().as_rule();
if let Some((new_prec, assoc)) = self.get(&rule) {
if new_prec > prec || assoc == Assoc::Right && new_prec == prec {
rhs = self.climb_rec(rhs, new_prec, pairs, primary, infix);
} else {
break;
}
} else {
break;
}
}
lhs = infix(lhs, op, rhs);
} else { | } else {
break;
}
}
lhs
}
}
|
break;
}
| conditional_block |
subscriber.rs | use super::error::{ErrorKind, Result, ResultExt};
use super::header::{decode, encode, match_field};
use super::{Message, Topic};
use crate::rosmsg::RosMsg;
use crate::util::lossy_channel::{lossy_channel, LossyReceiver, LossySender};
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use crossbeam::channel::{bounded, select, Receiver, Sender, TrySendError};
use log::error;
use std::collections::{BTreeMap, BTreeSet, HashMap};
use std::net::{SocketAddr, TcpStream, ToSocketAddrs};
use std::sync::Arc;
use std::thread;
enum DataStreamConnectionChange {
Connect(
usize,
LossySender<MessageInfo>,
Sender<HashMap<String, String>>,
),
Disconnect(usize),
}
pub struct SubscriberRosConnection {
next_data_stream_id: usize,
data_stream_tx: Sender<DataStreamConnectionChange>,
publishers_stream: Sender<SocketAddr>,
topic: Topic,
pub connected_ids: BTreeSet<usize>,
pub connected_publishers: BTreeSet<String>,
}
impl SubscriberRosConnection {
pub fn new(
caller_id: &str,
topic: &str,
msg_definition: String,
msg_type: String,
md5sum: String,
) -> SubscriberRosConnection {
let subscriber_connection_queue_size = 8;
let (data_stream_tx, data_stream_rx) = bounded(subscriber_connection_queue_size);
let publisher_connection_queue_size = 8;
let (pub_tx, pub_rx) = bounded(publisher_connection_queue_size);
let caller_id = String::from(caller_id);
let topic_name = String::from(topic);
thread::spawn({
let msg_type = msg_type.clone();
let md5sum = md5sum.clone();
move || {
join_connections(
data_stream_rx,
pub_rx,
&caller_id,
&topic_name,
&msg_definition,
&md5sum,
&msg_type,
)
}
});
let topic = Topic {
name: String::from(topic),
msg_type,
md5sum,
};
SubscriberRosConnection {
next_data_stream_id: 1,
data_stream_tx,
publishers_stream: pub_tx,
topic,
connected_ids: BTreeSet::new(),
connected_publishers: BTreeSet::new(),
}
}
// TODO: allow synchronous handling for subscribers
// This creates a new thread to call on_message. Next API change should
// allow subscribing with either callback or inline handler of the queue.
// The queue is lossy, so it wouldn't be blocking.
pub fn add_subscriber<T, F, G>(
&mut self,
queue_size: usize,
on_message: F,
on_connect: G,
) -> usize
where
T: Message,
F: Fn(T, &str) + Send + 'static,
G: Fn(HashMap<String, String>) + Send + 'static,
{
let data_stream_id = self.next_data_stream_id;
self.connected_ids.insert(data_stream_id);
self.next_data_stream_id += 1;
let (data_tx, data_rx) = lossy_channel(queue_size);
let (connection_tx, connection_rx) = bounded(8);
if self
.data_stream_tx
.send(DataStreamConnectionChange::Connect(
data_stream_id,
data_tx,
connection_tx,
))
.is_err()
{
// TODO: we might want to panic here
error!("Subscriber failed to connect to data stream");
}
thread::spawn(move || {
handle_data::<T, F, G>(data_rx, connection_rx, on_message, on_connect)
});
data_stream_id
}
pub fn remove_subscriber(&mut self, id: usize) {
self.connected_ids.remove(&id);
if self
.data_stream_tx
.send(DataStreamConnectionChange::Disconnect(id))
.is_err()
{
// TODO: we might want to panic here
error!("Subscriber failed to disconnect from data stream");
}
}
pub fn has_subscribers(&self) -> bool {
!self.connected_ids.is_empty()
}
#[inline]
pub fn publisher_count(&self) -> usize {
self.connected_publishers.len()
}
#[inline]
pub fn publisher_uris(&self) -> Vec<String> {
self.connected_publishers.iter().cloned().collect()
}
#[allow(clippy::useless_conversion)]
pub fn connect_to<U: ToSocketAddrs>(
&mut self,
publisher: &str,
addresses: U,
) -> std::io::Result<()> {
for address in addresses.to_socket_addrs()? {
// This should never fail, so it's safe to unwrap
// Failure could only be caused by the join_connections
// thread not running, which only happens after
// Subscriber has been deconstructed
self.publishers_stream
.send(address)
.expect("Connected thread died");
}
self.connected_publishers.insert(publisher.to_owned());
Ok(())
}
pub fn is_connected_to(&self, publisher: &str) -> bool {
self.connected_publishers.contains(publisher)
}
pub fn limit_publishers_to(&mut self, publishers: &BTreeSet<String>) {
let difference: Vec<String> = self
.connected_publishers
.difference(publishers)
.cloned()
.collect();
for item in difference {
self.connected_publishers.remove(&item);
}
}
pub fn get_topic(&self) -> &Topic {
&self.topic
}
}
fn handle_data<T, F, G>(
data: LossyReceiver<MessageInfo>,
connections: Receiver<HashMap<String, String>>,
on_message: F,
on_connect: G,
) where
T: Message,
F: Fn(T, &str),
G: Fn(HashMap<String, String>) + Send + 'static,
{
loop {
select! {
recv(data.kill_rx.kill_rx) -> _ => break,
recv(data.data_rx) -> msg => match msg {
Err(_) => break,
Ok(buffer) => match RosMsg::decode_slice(&buffer.data) {
Ok(value) => on_message(value, &buffer.caller_id),
Err(err) => error!("Failed to decode message: {}", err),
},
},
recv(connections) -> msg => match msg {
Err(_) => break,
Ok(conn) => on_connect(conn),
},
}
}
}
fn join_connections(
subscribers: Receiver<DataStreamConnectionChange>,
publishers: Receiver<SocketAddr>,
caller_id: &str,
topic: &str,
msg_definition: &str,
md5sum: &str,
msg_type: &str,
) {
type Sub = (LossySender<MessageInfo>, Sender<HashMap<String, String>>);
let mut subs: BTreeMap<usize, Sub> = BTreeMap::new();
let mut existing_headers: Vec<HashMap<String, String>> = Vec::new();
let (data_tx, data_rx): (Sender<MessageInfo>, Receiver<MessageInfo>) = bounded(8);
// Ends when subscriber or publisher sender is destroyed, which happens at Subscriber destruction
loop {
select! {
recv(data_rx) -> msg => {
match msg {
Err(_) => break,
Ok(v) => for sub in subs.values() {
if sub.0.try_send(v.clone()).is_err() {
error!("Failed to send data to subscriber");
}
}
}
}
recv(subscribers) -> msg => {
match msg {
Err(_) => break,
Ok(DataStreamConnectionChange::Connect(id, data, conn)) => {
for header in &existing_headers {
if conn.send(header.clone()).is_err() {
error!("Failed to send connection info for subscriber");
};
}
subs.insert(id, (data, conn));
}
Ok(DataStreamConnectionChange::Disconnect(id)) => {
if let Some((mut data, _)) = subs.remove(&id) {
if data.close().is_err() {
error!("Subscriber data stream to topic has already been killed");
}
}
}
}
}
recv(publishers) -> msg => {
match msg {
Err(_) => break,
Ok(publisher) => {
let result = join_connection(
&data_tx,
&publisher,
caller_id,
topic,
msg_definition,
md5sum,
msg_type,
)
.chain_err(|| ErrorKind::TopicConnectionFail(topic.into()));
match result {
Ok(headers) => {
for sub in subs.values() {
if sub.1.send(headers.clone()).is_err() {
error!("Failed to send connection info for subscriber");
}
}
existing_headers.push(headers);
}
Err(err) => {
let info = err
.iter()
.map(|v| format!("{}", v))
.collect::<Vec<_>>()
.join("\nCaused by:");
error!("{}", info);
}
}
}
}
}
}
}
}
fn join_connection(
data_stream: &Sender<MessageInfo>,
publisher: &SocketAddr,
caller_id: &str,
topic: &str,
msg_definition: &str,
md5sum: &str,
msg_type: &str,
) -> Result<HashMap<String, String>> {
let mut stream = TcpStream::connect(publisher)?;
let headers = exchange_headers::<_>(
&mut stream,
caller_id,
topic,
msg_definition,
md5sum,
msg_type,
)?;
let pub_caller_id = headers.get("callerid").cloned();
let target = data_stream.clone();
thread::spawn(move || {
let pub_caller_id = Arc::new(pub_caller_id.unwrap_or_default());
while let Ok(buffer) = package_to_vector(&mut stream) {
if let Err(TrySendError::Disconnected(_)) =
target.try_send(MessageInfo::new(Arc::clone(&pub_caller_id), buffer))
{
// Data receiver has been destroyed after
// Subscriber destructor's kill signal
break;
}
}
});
Ok(headers)
}
fn write_request<U: std::io::Write>(
mut stream: &mut U,
caller_id: &str,
topic: &str,
msg_definition: &str,
md5sum: &str,
msg_type: &str,
) -> Result<()> {
let mut fields = HashMap::<String, String>::new();
fields.insert(String::from("message_definition"), msg_definition.into());
fields.insert(String::from("callerid"), caller_id.into());
fields.insert(String::from("topic"), topic.into());
fields.insert(String::from("md5sum"), md5sum.into());
fields.insert(String::from("type"), msg_type.into());
encode(&mut stream, &fields)?;
Ok(())
}
fn read_response<U: std::io::Read>(
mut stream: &mut U,
md5sum: &str,
msg_type: &str,
) -> Result<HashMap<String, String>> {
let fields = decode(&mut stream)?;
if md5sum != "*" {
match_field(&fields, "md5sum", md5sum)?;
}
if msg_type != "*" {
match_field(&fields, "type", msg_type)?;
}
Ok(fields)
}
fn exchange_headers<U>(
stream: &mut U,
caller_id: &str,
topic: &str,
msg_definition: &str,
md5sum: &str,
msg_type: &str,
) -> Result<HashMap<String, String>>
where
U: std::io::Write + std::io::Read,
{
write_request::<U>(stream, caller_id, topic, msg_definition, md5sum, msg_type)?;
read_response::<U>(stream, md5sum, msg_type)
}
#[inline]
fn package_to_vector<R: std::io::Read>(stream: &mut R) -> std::io::Result<Vec<u8>> {
let length = stream.read_u32::<LittleEndian>()?;
let u32_size = std::mem::size_of::<u32>();
let num_bytes = length as usize + u32_size;
// Allocate memory of the proper size for the incoming message. We
// do not initialize the memory to zero here (as would be safe)
// because it is expensive and ultimately unnecessary. We know the
// length of the message and if the length is incorrect, the
// stream reading functions will bail with an Error rather than
// leaving memory uninitialized.
let mut out = Vec::<u8>::with_capacity(num_bytes);
let out_ptr = out.as_mut_ptr();
// Read length from stream.
std::io::Cursor::new(unsafe { std::slice::from_raw_parts_mut(out_ptr as *mut u8, u32_size) })
.write_u32::<LittleEndian>(length)?;
// Read data from stream.
let read_buf = unsafe { std::slice::from_raw_parts_mut(out_ptr as *mut u8, num_bytes) };
stream.read_exact(&mut read_buf[u32_size..])?;
// Don't drop the original Vec which has size==0 and instead use
// its memory to initialize a new Vec with size == capacity == num_bytes.
std::mem::forget(out);
// Return the new, now full and "safely" initialized.
Ok(unsafe { Vec::from_raw_parts(out_ptr, num_bytes, num_bytes) })
}
#[derive(Clone)]
struct MessageInfo {
caller_id: Arc<String>,
data: Vec<u8>,
}
impl MessageInfo {
fn new(caller_id: Arc<String>, data: Vec<u8>) -> Self {
Self { caller_id, data }
}
}
#[cfg(test)]
mod tests {
use super::*;
static FAILED_TO_READ_WRITE_VECTOR: &str = "Failed to read or write from vector";
#[test]
fn package_to_vector_creates_right_buffer_from_reader() {
let input = [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7];
let data =
package_to_vector(&mut std::io::Cursor::new(input)).expect(FAILED_TO_READ_WRITE_VECTOR);
assert_eq!(data, [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7]);
}
#[test]
fn package_to_vector_respects_provided_length() {
let input = [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
let data =
package_to_vector(&mut std::io::Cursor::new(input)).expect(FAILED_TO_READ_WRITE_VECTOR);
assert_eq!(data, [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7]);
}
#[test]
fn package_to_vector_fails_if_stream_is_shorter_than_annotated() {
let input = [7, 0, 0, 0, 1, 2, 3, 4, 5];
package_to_vector(&mut std::io::Cursor::new(input)).unwrap_err();
}
#[test]
fn package_to_vector_fails_leaves_cursor_at_end_of_reading() |
}
| {
let input = [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 4, 0, 0, 0, 11, 12, 13, 14];
let mut cursor = std::io::Cursor::new(input);
let data = package_to_vector(&mut cursor).expect(FAILED_TO_READ_WRITE_VECTOR);
assert_eq!(data, [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7]);
let data = package_to_vector(&mut cursor).expect(FAILED_TO_READ_WRITE_VECTOR);
assert_eq!(data, [4, 0, 0, 0, 11, 12, 13, 14]);
} | identifier_body |
subscriber.rs | use super::error::{ErrorKind, Result, ResultExt};
use super::header::{decode, encode, match_field};
use super::{Message, Topic};
use crate::rosmsg::RosMsg;
use crate::util::lossy_channel::{lossy_channel, LossyReceiver, LossySender};
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use crossbeam::channel::{bounded, select, Receiver, Sender, TrySendError};
use log::error;
use std::collections::{BTreeMap, BTreeSet, HashMap};
use std::net::{SocketAddr, TcpStream, ToSocketAddrs};
use std::sync::Arc;
use std::thread;
enum DataStreamConnectionChange {
Connect(
usize,
LossySender<MessageInfo>,
Sender<HashMap<String, String>>,
),
Disconnect(usize),
}
pub struct SubscriberRosConnection {
next_data_stream_id: usize,
data_stream_tx: Sender<DataStreamConnectionChange>,
publishers_stream: Sender<SocketAddr>,
topic: Topic,
pub connected_ids: BTreeSet<usize>,
pub connected_publishers: BTreeSet<String>,
}
impl SubscriberRosConnection {
pub fn new(
caller_id: &str,
topic: &str,
msg_definition: String,
msg_type: String,
md5sum: String,
) -> SubscriberRosConnection {
let subscriber_connection_queue_size = 8;
let (data_stream_tx, data_stream_rx) = bounded(subscriber_connection_queue_size);
let publisher_connection_queue_size = 8;
let (pub_tx, pub_rx) = bounded(publisher_connection_queue_size);
let caller_id = String::from(caller_id);
let topic_name = String::from(topic);
thread::spawn({
let msg_type = msg_type.clone();
let md5sum = md5sum.clone();
move || {
join_connections(
data_stream_rx,
pub_rx,
&caller_id,
&topic_name,
&msg_definition,
&md5sum,
&msg_type,
)
}
});
let topic = Topic {
name: String::from(topic),
msg_type,
md5sum,
};
SubscriberRosConnection {
next_data_stream_id: 1,
data_stream_tx,
publishers_stream: pub_tx,
topic,
connected_ids: BTreeSet::new(),
connected_publishers: BTreeSet::new(),
}
}
// TODO: allow synchronous handling for subscribers
// This creates a new thread to call on_message. Next API change should
// allow subscribing with either callback or inline handler of the queue.
// The queue is lossy, so it wouldn't be blocking.
pub fn add_subscriber<T, F, G>(
&mut self,
queue_size: usize,
on_message: F,
on_connect: G,
) -> usize
where
T: Message,
F: Fn(T, &str) + Send + 'static,
G: Fn(HashMap<String, String>) + Send + 'static,
{
let data_stream_id = self.next_data_stream_id;
self.connected_ids.insert(data_stream_id);
self.next_data_stream_id += 1;
let (data_tx, data_rx) = lossy_channel(queue_size);
let (connection_tx, connection_rx) = bounded(8);
if self
.data_stream_tx
.send(DataStreamConnectionChange::Connect(
data_stream_id,
data_tx,
connection_tx,
))
.is_err()
{
// TODO: we might want to panic here
error!("Subscriber failed to connect to data stream");
}
thread::spawn(move || {
handle_data::<T, F, G>(data_rx, connection_rx, on_message, on_connect)
});
data_stream_id
}
pub fn remove_subscriber(&mut self, id: usize) {
self.connected_ids.remove(&id);
if self
.data_stream_tx
.send(DataStreamConnectionChange::Disconnect(id))
.is_err()
{
// TODO: we might want to panic here
error!("Subscriber failed to disconnect from data stream");
}
}
pub fn has_subscribers(&self) -> bool {
!self.connected_ids.is_empty()
}
#[inline]
pub fn publisher_count(&self) -> usize {
self.connected_publishers.len()
}
#[inline]
pub fn publisher_uris(&self) -> Vec<String> {
self.connected_publishers.iter().cloned().collect()
}
#[allow(clippy::useless_conversion)]
pub fn connect_to<U: ToSocketAddrs>(
&mut self,
publisher: &str,
addresses: U,
) -> std::io::Result<()> {
for address in addresses.to_socket_addrs()? {
// This should never fail, so it's safe to unwrap
// Failure could only be caused by the join_connections
// thread not running, which only happens after
// Subscriber has been deconstructed
self.publishers_stream
.send(address)
.expect("Connected thread died");
}
self.connected_publishers.insert(publisher.to_owned());
Ok(())
}
pub fn is_connected_to(&self, publisher: &str) -> bool {
self.connected_publishers.contains(publisher)
}
pub fn limit_publishers_to(&mut self, publishers: &BTreeSet<String>) {
let difference: Vec<String> = self
.connected_publishers
.difference(publishers)
.cloned()
.collect();
for item in difference {
self.connected_publishers.remove(&item);
}
}
pub fn get_topic(&self) -> &Topic {
&self.topic
}
}
fn handle_data<T, F, G>(
data: LossyReceiver<MessageInfo>,
connections: Receiver<HashMap<String, String>>,
on_message: F,
on_connect: G,
) where
T: Message,
F: Fn(T, &str),
G: Fn(HashMap<String, String>) + Send + 'static,
{
loop {
select! {
recv(data.kill_rx.kill_rx) -> _ => break,
recv(data.data_rx) -> msg => match msg {
Err(_) => break,
Ok(buffer) => match RosMsg::decode_slice(&buffer.data) {
Ok(value) => on_message(value, &buffer.caller_id),
Err(err) => error!("Failed to decode message: {}", err),
},
},
recv(connections) -> msg => match msg {
Err(_) => break,
Ok(conn) => on_connect(conn),
},
}
}
}
fn join_connections(
subscribers: Receiver<DataStreamConnectionChange>,
publishers: Receiver<SocketAddr>,
caller_id: &str,
topic: &str,
msg_definition: &str,
md5sum: &str,
msg_type: &str,
) {
type Sub = (LossySender<MessageInfo>, Sender<HashMap<String, String>>);
let mut subs: BTreeMap<usize, Sub> = BTreeMap::new();
let mut existing_headers: Vec<HashMap<String, String>> = Vec::new();
let (data_tx, data_rx): (Sender<MessageInfo>, Receiver<MessageInfo>) = bounded(8);
// Ends when subscriber or publisher sender is destroyed, which happens at Subscriber destruction
loop {
select! {
recv(data_rx) -> msg => {
match msg {
Err(_) => break,
Ok(v) => for sub in subs.values() {
if sub.0.try_send(v.clone()).is_err() {
error!("Failed to send data to subscriber");
}
}
}
}
recv(subscribers) -> msg => {
match msg {
Err(_) => break,
Ok(DataStreamConnectionChange::Connect(id, data, conn)) => {
for header in &existing_headers {
if conn.send(header.clone()).is_err() {
error!("Failed to send connection info for subscriber");
};
}
subs.insert(id, (data, conn));
}
Ok(DataStreamConnectionChange::Disconnect(id)) => {
if let Some((mut data, _)) = subs.remove(&id) {
if data.close().is_err() {
error!("Subscriber data stream to topic has already been killed");
}
}
}
}
}
recv(publishers) -> msg => {
match msg {
Err(_) => break,
Ok(publisher) => {
let result = join_connection(
&data_tx,
&publisher,
caller_id,
topic,
msg_definition,
md5sum,
msg_type,
)
.chain_err(|| ErrorKind::TopicConnectionFail(topic.into()));
match result {
Ok(headers) => {
for sub in subs.values() {
if sub.1.send(headers.clone()).is_err() {
error!("Failed to send connection info for subscriber");
}
}
existing_headers.push(headers);
}
Err(err) => {
let info = err
.iter()
.map(|v| format!("{}", v))
.collect::<Vec<_>>()
.join("\nCaused by:");
error!("{}", info);
}
}
}
}
}
}
}
}
fn join_connection(
data_stream: &Sender<MessageInfo>,
publisher: &SocketAddr,
caller_id: &str,
topic: &str,
msg_definition: &str,
md5sum: &str,
msg_type: &str,
) -> Result<HashMap<String, String>> {
let mut stream = TcpStream::connect(publisher)?;
let headers = exchange_headers::<_>(
&mut stream,
caller_id,
topic,
msg_definition,
md5sum,
msg_type,
)?;
let pub_caller_id = headers.get("callerid").cloned();
let target = data_stream.clone();
thread::spawn(move || {
let pub_caller_id = Arc::new(pub_caller_id.unwrap_or_default());
while let Ok(buffer) = package_to_vector(&mut stream) {
if let Err(TrySendError::Disconnected(_)) =
target.try_send(MessageInfo::new(Arc::clone(&pub_caller_id), buffer))
{
// Data receiver has been destroyed after
// Subscriber destructor's kill signal
break;
}
}
});
Ok(headers)
}
fn write_request<U: std::io::Write>(
mut stream: &mut U,
caller_id: &str,
topic: &str,
msg_definition: &str,
md5sum: &str,
msg_type: &str,
) -> Result<()> {
let mut fields = HashMap::<String, String>::new();
fields.insert(String::from("message_definition"), msg_definition.into());
fields.insert(String::from("callerid"), caller_id.into());
fields.insert(String::from("topic"), topic.into());
fields.insert(String::from("md5sum"), md5sum.into());
fields.insert(String::from("type"), msg_type.into());
encode(&mut stream, &fields)?;
Ok(())
}
fn read_response<U: std::io::Read>(
mut stream: &mut U,
md5sum: &str,
msg_type: &str,
) -> Result<HashMap<String, String>> {
let fields = decode(&mut stream)?;
if md5sum != "*" {
match_field(&fields, "md5sum", md5sum)?;
}
if msg_type != "*" {
match_field(&fields, "type", msg_type)?;
}
Ok(fields)
}
fn exchange_headers<U>(
stream: &mut U,
caller_id: &str,
topic: &str,
msg_definition: &str,
md5sum: &str,
msg_type: &str,
) -> Result<HashMap<String, String>>
where
U: std::io::Write + std::io::Read,
{
write_request::<U>(stream, caller_id, topic, msg_definition, md5sum, msg_type)?;
read_response::<U>(stream, md5sum, msg_type)
}
#[inline]
fn package_to_vector<R: std::io::Read>(stream: &mut R) -> std::io::Result<Vec<u8>> {
let length = stream.read_u32::<LittleEndian>()?;
let u32_size = std::mem::size_of::<u32>();
let num_bytes = length as usize + u32_size;
// Allocate memory of the proper size for the incoming message. We
// do not initialize the memory to zero here (as would be safe)
// because it is expensive and ultimately unnecessary. We know the
// length of the message and if the length is incorrect, the
// stream reading functions will bail with an Error rather than
// leaving memory uninitialized.
let mut out = Vec::<u8>::with_capacity(num_bytes);
let out_ptr = out.as_mut_ptr();
// Read length from stream.
std::io::Cursor::new(unsafe { std::slice::from_raw_parts_mut(out_ptr as *mut u8, u32_size) })
.write_u32::<LittleEndian>(length)?;
// Read data from stream.
let read_buf = unsafe { std::slice::from_raw_parts_mut(out_ptr as *mut u8, num_bytes) };
stream.read_exact(&mut read_buf[u32_size..])?;
// Don't drop the original Vec which has size==0 and instead use
// its memory to initialize a new Vec with size == capacity == num_bytes.
std::mem::forget(out);
// Return the new, now full and "safely" initialized.
Ok(unsafe { Vec::from_raw_parts(out_ptr, num_bytes, num_bytes) })
}
#[derive(Clone)]
struct MessageInfo {
caller_id: Arc<String>,
data: Vec<u8>,
}
impl MessageInfo {
fn new(caller_id: Arc<String>, data: Vec<u8>) -> Self {
Self { caller_id, data }
}
}
#[cfg(test)]
mod tests {
use super::*;
static FAILED_TO_READ_WRITE_VECTOR: &str = "Failed to read or write from vector";
#[test]
fn package_to_vector_creates_right_buffer_from_reader() {
let input = [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7];
let data =
package_to_vector(&mut std::io::Cursor::new(input)).expect(FAILED_TO_READ_WRITE_VECTOR);
assert_eq!(data, [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7]);
}
#[test]
fn package_to_vector_respects_provided_length() {
let input = [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
let data =
package_to_vector(&mut std::io::Cursor::new(input)).expect(FAILED_TO_READ_WRITE_VECTOR);
assert_eq!(data, [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7]);
}
#[test]
fn package_to_vector_fails_if_stream_is_shorter_than_annotated() {
let input = [7, 0, 0, 0, 1, 2, 3, 4, 5];
package_to_vector(&mut std::io::Cursor::new(input)).unwrap_err();
}
#[test]
fn package_to_vector_fails_leaves_cursor_at_end_of_reading() {
let input = [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 4, 0, 0, 0, 11, 12, 13, 14];
let mut cursor = std::io::Cursor::new(input); | assert_eq!(data, [4, 0, 0, 0, 11, 12, 13, 14]);
}
} | let data = package_to_vector(&mut cursor).expect(FAILED_TO_READ_WRITE_VECTOR);
assert_eq!(data, [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7]);
let data = package_to_vector(&mut cursor).expect(FAILED_TO_READ_WRITE_VECTOR); | random_line_split |
subscriber.rs | use super::error::{ErrorKind, Result, ResultExt};
use super::header::{decode, encode, match_field};
use super::{Message, Topic};
use crate::rosmsg::RosMsg;
use crate::util::lossy_channel::{lossy_channel, LossyReceiver, LossySender};
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use crossbeam::channel::{bounded, select, Receiver, Sender, TrySendError};
use log::error;
use std::collections::{BTreeMap, BTreeSet, HashMap};
use std::net::{SocketAddr, TcpStream, ToSocketAddrs};
use std::sync::Arc;
use std::thread;
enum DataStreamConnectionChange {
Connect(
usize,
LossySender<MessageInfo>,
Sender<HashMap<String, String>>,
),
Disconnect(usize),
}
pub struct SubscriberRosConnection {
next_data_stream_id: usize,
data_stream_tx: Sender<DataStreamConnectionChange>,
publishers_stream: Sender<SocketAddr>,
topic: Topic,
pub connected_ids: BTreeSet<usize>,
pub connected_publishers: BTreeSet<String>,
}
impl SubscriberRosConnection {
pub fn new(
caller_id: &str,
topic: &str,
msg_definition: String,
msg_type: String,
md5sum: String,
) -> SubscriberRosConnection {
let subscriber_connection_queue_size = 8;
let (data_stream_tx, data_stream_rx) = bounded(subscriber_connection_queue_size);
let publisher_connection_queue_size = 8;
let (pub_tx, pub_rx) = bounded(publisher_connection_queue_size);
let caller_id = String::from(caller_id);
let topic_name = String::from(topic);
thread::spawn({
let msg_type = msg_type.clone();
let md5sum = md5sum.clone();
move || {
join_connections(
data_stream_rx,
pub_rx,
&caller_id,
&topic_name,
&msg_definition,
&md5sum,
&msg_type,
)
}
});
let topic = Topic {
name: String::from(topic),
msg_type,
md5sum,
};
SubscriberRosConnection {
next_data_stream_id: 1,
data_stream_tx,
publishers_stream: pub_tx,
topic,
connected_ids: BTreeSet::new(),
connected_publishers: BTreeSet::new(),
}
}
// TODO: allow synchronous handling for subscribers
// This creates a new thread to call on_message. Next API change should
// allow subscribing with either callback or inline handler of the queue.
// The queue is lossy, so it wouldn't be blocking.
pub fn add_subscriber<T, F, G>(
&mut self,
queue_size: usize,
on_message: F,
on_connect: G,
) -> usize
where
T: Message,
F: Fn(T, &str) + Send + 'static,
G: Fn(HashMap<String, String>) + Send + 'static,
{
let data_stream_id = self.next_data_stream_id;
self.connected_ids.insert(data_stream_id);
self.next_data_stream_id += 1;
let (data_tx, data_rx) = lossy_channel(queue_size);
let (connection_tx, connection_rx) = bounded(8);
if self
.data_stream_tx
.send(DataStreamConnectionChange::Connect(
data_stream_id,
data_tx,
connection_tx,
))
.is_err()
{
// TODO: we might want to panic here
error!("Subscriber failed to connect to data stream");
}
thread::spawn(move || {
handle_data::<T, F, G>(data_rx, connection_rx, on_message, on_connect)
});
data_stream_id
}
pub fn remove_subscriber(&mut self, id: usize) {
self.connected_ids.remove(&id);
if self
.data_stream_tx
.send(DataStreamConnectionChange::Disconnect(id))
.is_err()
{
// TODO: we might want to panic here
error!("Subscriber failed to disconnect from data stream");
}
}
pub fn has_subscribers(&self) -> bool {
!self.connected_ids.is_empty()
}
#[inline]
pub fn publisher_count(&self) -> usize {
self.connected_publishers.len()
}
#[inline]
pub fn | (&self) -> Vec<String> {
self.connected_publishers.iter().cloned().collect()
}
#[allow(clippy::useless_conversion)]
pub fn connect_to<U: ToSocketAddrs>(
&mut self,
publisher: &str,
addresses: U,
) -> std::io::Result<()> {
for address in addresses.to_socket_addrs()? {
// This should never fail, so it's safe to unwrap
// Failure could only be caused by the join_connections
// thread not running, which only happens after
// Subscriber has been deconstructed
self.publishers_stream
.send(address)
.expect("Connected thread died");
}
self.connected_publishers.insert(publisher.to_owned());
Ok(())
}
pub fn is_connected_to(&self, publisher: &str) -> bool {
self.connected_publishers.contains(publisher)
}
pub fn limit_publishers_to(&mut self, publishers: &BTreeSet<String>) {
let difference: Vec<String> = self
.connected_publishers
.difference(publishers)
.cloned()
.collect();
for item in difference {
self.connected_publishers.remove(&item);
}
}
pub fn get_topic(&self) -> &Topic {
&self.topic
}
}
fn handle_data<T, F, G>(
data: LossyReceiver<MessageInfo>,
connections: Receiver<HashMap<String, String>>,
on_message: F,
on_connect: G,
) where
T: Message,
F: Fn(T, &str),
G: Fn(HashMap<String, String>) + Send + 'static,
{
loop {
select! {
recv(data.kill_rx.kill_rx) -> _ => break,
recv(data.data_rx) -> msg => match msg {
Err(_) => break,
Ok(buffer) => match RosMsg::decode_slice(&buffer.data) {
Ok(value) => on_message(value, &buffer.caller_id),
Err(err) => error!("Failed to decode message: {}", err),
},
},
recv(connections) -> msg => match msg {
Err(_) => break,
Ok(conn) => on_connect(conn),
},
}
}
}
fn join_connections(
subscribers: Receiver<DataStreamConnectionChange>,
publishers: Receiver<SocketAddr>,
caller_id: &str,
topic: &str,
msg_definition: &str,
md5sum: &str,
msg_type: &str,
) {
type Sub = (LossySender<MessageInfo>, Sender<HashMap<String, String>>);
let mut subs: BTreeMap<usize, Sub> = BTreeMap::new();
let mut existing_headers: Vec<HashMap<String, String>> = Vec::new();
let (data_tx, data_rx): (Sender<MessageInfo>, Receiver<MessageInfo>) = bounded(8);
// Ends when subscriber or publisher sender is destroyed, which happens at Subscriber destruction
loop {
select! {
recv(data_rx) -> msg => {
match msg {
Err(_) => break,
Ok(v) => for sub in subs.values() {
if sub.0.try_send(v.clone()).is_err() {
error!("Failed to send data to subscriber");
}
}
}
}
recv(subscribers) -> msg => {
match msg {
Err(_) => break,
Ok(DataStreamConnectionChange::Connect(id, data, conn)) => {
for header in &existing_headers {
if conn.send(header.clone()).is_err() {
error!("Failed to send connection info for subscriber");
};
}
subs.insert(id, (data, conn));
}
Ok(DataStreamConnectionChange::Disconnect(id)) => {
if let Some((mut data, _)) = subs.remove(&id) {
if data.close().is_err() {
error!("Subscriber data stream to topic has already been killed");
}
}
}
}
}
recv(publishers) -> msg => {
match msg {
Err(_) => break,
Ok(publisher) => {
let result = join_connection(
&data_tx,
&publisher,
caller_id,
topic,
msg_definition,
md5sum,
msg_type,
)
.chain_err(|| ErrorKind::TopicConnectionFail(topic.into()));
match result {
Ok(headers) => {
for sub in subs.values() {
if sub.1.send(headers.clone()).is_err() {
error!("Failed to send connection info for subscriber");
}
}
existing_headers.push(headers);
}
Err(err) => {
let info = err
.iter()
.map(|v| format!("{}", v))
.collect::<Vec<_>>()
.join("\nCaused by:");
error!("{}", info);
}
}
}
}
}
}
}
}
fn join_connection(
data_stream: &Sender<MessageInfo>,
publisher: &SocketAddr,
caller_id: &str,
topic: &str,
msg_definition: &str,
md5sum: &str,
msg_type: &str,
) -> Result<HashMap<String, String>> {
let mut stream = TcpStream::connect(publisher)?;
let headers = exchange_headers::<_>(
&mut stream,
caller_id,
topic,
msg_definition,
md5sum,
msg_type,
)?;
let pub_caller_id = headers.get("callerid").cloned();
let target = data_stream.clone();
thread::spawn(move || {
let pub_caller_id = Arc::new(pub_caller_id.unwrap_or_default());
while let Ok(buffer) = package_to_vector(&mut stream) {
if let Err(TrySendError::Disconnected(_)) =
target.try_send(MessageInfo::new(Arc::clone(&pub_caller_id), buffer))
{
// Data receiver has been destroyed after
// Subscriber destructor's kill signal
break;
}
}
});
Ok(headers)
}
fn write_request<U: std::io::Write>(
mut stream: &mut U,
caller_id: &str,
topic: &str,
msg_definition: &str,
md5sum: &str,
msg_type: &str,
) -> Result<()> {
let mut fields = HashMap::<String, String>::new();
fields.insert(String::from("message_definition"), msg_definition.into());
fields.insert(String::from("callerid"), caller_id.into());
fields.insert(String::from("topic"), topic.into());
fields.insert(String::from("md5sum"), md5sum.into());
fields.insert(String::from("type"), msg_type.into());
encode(&mut stream, &fields)?;
Ok(())
}
fn read_response<U: std::io::Read>(
mut stream: &mut U,
md5sum: &str,
msg_type: &str,
) -> Result<HashMap<String, String>> {
let fields = decode(&mut stream)?;
if md5sum != "*" {
match_field(&fields, "md5sum", md5sum)?;
}
if msg_type != "*" {
match_field(&fields, "type", msg_type)?;
}
Ok(fields)
}
fn exchange_headers<U>(
stream: &mut U,
caller_id: &str,
topic: &str,
msg_definition: &str,
md5sum: &str,
msg_type: &str,
) -> Result<HashMap<String, String>>
where
U: std::io::Write + std::io::Read,
{
write_request::<U>(stream, caller_id, topic, msg_definition, md5sum, msg_type)?;
read_response::<U>(stream, md5sum, msg_type)
}
#[inline]
fn package_to_vector<R: std::io::Read>(stream: &mut R) -> std::io::Result<Vec<u8>> {
let length = stream.read_u32::<LittleEndian>()?;
let u32_size = std::mem::size_of::<u32>();
let num_bytes = length as usize + u32_size;
// Allocate memory of the proper size for the incoming message. We
// do not initialize the memory to zero here (as would be safe)
// because it is expensive and ultimately unnecessary. We know the
// length of the message and if the length is incorrect, the
// stream reading functions will bail with an Error rather than
// leaving memory uninitialized.
let mut out = Vec::<u8>::with_capacity(num_bytes);
let out_ptr = out.as_mut_ptr();
// Read length from stream.
std::io::Cursor::new(unsafe { std::slice::from_raw_parts_mut(out_ptr as *mut u8, u32_size) })
.write_u32::<LittleEndian>(length)?;
// Read data from stream.
let read_buf = unsafe { std::slice::from_raw_parts_mut(out_ptr as *mut u8, num_bytes) };
stream.read_exact(&mut read_buf[u32_size..])?;
// Don't drop the original Vec which has size==0 and instead use
// its memory to initialize a new Vec with size == capacity == num_bytes.
std::mem::forget(out);
// Return the new, now full and "safely" initialized.
Ok(unsafe { Vec::from_raw_parts(out_ptr, num_bytes, num_bytes) })
}
#[derive(Clone)]
struct MessageInfo {
caller_id: Arc<String>,
data: Vec<u8>,
}
impl MessageInfo {
fn new(caller_id: Arc<String>, data: Vec<u8>) -> Self {
Self { caller_id, data }
}
}
#[cfg(test)]
mod tests {
use super::*;
static FAILED_TO_READ_WRITE_VECTOR: &str = "Failed to read or write from vector";
#[test]
fn package_to_vector_creates_right_buffer_from_reader() {
let input = [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7];
let data =
package_to_vector(&mut std::io::Cursor::new(input)).expect(FAILED_TO_READ_WRITE_VECTOR);
assert_eq!(data, [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7]);
}
#[test]
fn package_to_vector_respects_provided_length() {
let input = [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
let data =
package_to_vector(&mut std::io::Cursor::new(input)).expect(FAILED_TO_READ_WRITE_VECTOR);
assert_eq!(data, [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7]);
}
#[test]
fn package_to_vector_fails_if_stream_is_shorter_than_annotated() {
let input = [7, 0, 0, 0, 1, 2, 3, 4, 5];
package_to_vector(&mut std::io::Cursor::new(input)).unwrap_err();
}
#[test]
fn package_to_vector_fails_leaves_cursor_at_end_of_reading() {
let input = [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 4, 0, 0, 0, 11, 12, 13, 14];
let mut cursor = std::io::Cursor::new(input);
let data = package_to_vector(&mut cursor).expect(FAILED_TO_READ_WRITE_VECTOR);
assert_eq!(data, [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7]);
let data = package_to_vector(&mut cursor).expect(FAILED_TO_READ_WRITE_VECTOR);
assert_eq!(data, [4, 0, 0, 0, 11, 12, 13, 14]);
}
}
| publisher_uris | identifier_name |
subscriber.rs | use super::error::{ErrorKind, Result, ResultExt};
use super::header::{decode, encode, match_field};
use super::{Message, Topic};
use crate::rosmsg::RosMsg;
use crate::util::lossy_channel::{lossy_channel, LossyReceiver, LossySender};
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use crossbeam::channel::{bounded, select, Receiver, Sender, TrySendError};
use log::error;
use std::collections::{BTreeMap, BTreeSet, HashMap};
use std::net::{SocketAddr, TcpStream, ToSocketAddrs};
use std::sync::Arc;
use std::thread;
enum DataStreamConnectionChange {
Connect(
usize,
LossySender<MessageInfo>,
Sender<HashMap<String, String>>,
),
Disconnect(usize),
}
pub struct SubscriberRosConnection {
next_data_stream_id: usize,
data_stream_tx: Sender<DataStreamConnectionChange>,
publishers_stream: Sender<SocketAddr>,
topic: Topic,
pub connected_ids: BTreeSet<usize>,
pub connected_publishers: BTreeSet<String>,
}
impl SubscriberRosConnection {
pub fn new(
caller_id: &str,
topic: &str,
msg_definition: String,
msg_type: String,
md5sum: String,
) -> SubscriberRosConnection {
let subscriber_connection_queue_size = 8;
let (data_stream_tx, data_stream_rx) = bounded(subscriber_connection_queue_size);
let publisher_connection_queue_size = 8;
let (pub_tx, pub_rx) = bounded(publisher_connection_queue_size);
let caller_id = String::from(caller_id);
let topic_name = String::from(topic);
thread::spawn({
let msg_type = msg_type.clone();
let md5sum = md5sum.clone();
move || {
join_connections(
data_stream_rx,
pub_rx,
&caller_id,
&topic_name,
&msg_definition,
&md5sum,
&msg_type,
)
}
});
let topic = Topic {
name: String::from(topic),
msg_type,
md5sum,
};
SubscriberRosConnection {
next_data_stream_id: 1,
data_stream_tx,
publishers_stream: pub_tx,
topic,
connected_ids: BTreeSet::new(),
connected_publishers: BTreeSet::new(),
}
}
// TODO: allow synchronous handling for subscribers
// This creates a new thread to call on_message. Next API change should
// allow subscribing with either callback or inline handler of the queue.
// The queue is lossy, so it wouldn't be blocking.
pub fn add_subscriber<T, F, G>(
&mut self,
queue_size: usize,
on_message: F,
on_connect: G,
) -> usize
where
T: Message,
F: Fn(T, &str) + Send + 'static,
G: Fn(HashMap<String, String>) + Send + 'static,
{
let data_stream_id = self.next_data_stream_id;
self.connected_ids.insert(data_stream_id);
self.next_data_stream_id += 1;
let (data_tx, data_rx) = lossy_channel(queue_size);
let (connection_tx, connection_rx) = bounded(8);
if self
.data_stream_tx
.send(DataStreamConnectionChange::Connect(
data_stream_id,
data_tx,
connection_tx,
))
.is_err()
{
// TODO: we might want to panic here
error!("Subscriber failed to connect to data stream");
}
thread::spawn(move || {
handle_data::<T, F, G>(data_rx, connection_rx, on_message, on_connect)
});
data_stream_id
}
pub fn remove_subscriber(&mut self, id: usize) {
self.connected_ids.remove(&id);
if self
.data_stream_tx
.send(DataStreamConnectionChange::Disconnect(id))
.is_err()
{
// TODO: we might want to panic here
error!("Subscriber failed to disconnect from data stream");
}
}
pub fn has_subscribers(&self) -> bool {
!self.connected_ids.is_empty()
}
#[inline]
pub fn publisher_count(&self) -> usize {
self.connected_publishers.len()
}
#[inline]
pub fn publisher_uris(&self) -> Vec<String> {
self.connected_publishers.iter().cloned().collect()
}
#[allow(clippy::useless_conversion)]
pub fn connect_to<U: ToSocketAddrs>(
&mut self,
publisher: &str,
addresses: U,
) -> std::io::Result<()> {
for address in addresses.to_socket_addrs()? {
// This should never fail, so it's safe to unwrap
// Failure could only be caused by the join_connections
// thread not running, which only happens after
// Subscriber has been deconstructed
self.publishers_stream
.send(address)
.expect("Connected thread died");
}
self.connected_publishers.insert(publisher.to_owned());
Ok(())
}
pub fn is_connected_to(&self, publisher: &str) -> bool {
self.connected_publishers.contains(publisher)
}
pub fn limit_publishers_to(&mut self, publishers: &BTreeSet<String>) {
let difference: Vec<String> = self
.connected_publishers
.difference(publishers)
.cloned()
.collect();
for item in difference {
self.connected_publishers.remove(&item);
}
}
pub fn get_topic(&self) -> &Topic {
&self.topic
}
}
fn handle_data<T, F, G>(
data: LossyReceiver<MessageInfo>,
connections: Receiver<HashMap<String, String>>,
on_message: F,
on_connect: G,
) where
T: Message,
F: Fn(T, &str),
G: Fn(HashMap<String, String>) + Send + 'static,
{
loop {
select! {
recv(data.kill_rx.kill_rx) -> _ => break,
recv(data.data_rx) -> msg => match msg {
Err(_) => break,
Ok(buffer) => match RosMsg::decode_slice(&buffer.data) {
Ok(value) => on_message(value, &buffer.caller_id),
Err(err) => error!("Failed to decode message: {}", err),
},
},
recv(connections) -> msg => match msg {
Err(_) => break,
Ok(conn) => on_connect(conn),
},
}
}
}
fn join_connections(
subscribers: Receiver<DataStreamConnectionChange>,
publishers: Receiver<SocketAddr>,
caller_id: &str,
topic: &str,
msg_definition: &str,
md5sum: &str,
msg_type: &str,
) {
type Sub = (LossySender<MessageInfo>, Sender<HashMap<String, String>>);
let mut subs: BTreeMap<usize, Sub> = BTreeMap::new();
let mut existing_headers: Vec<HashMap<String, String>> = Vec::new();
let (data_tx, data_rx): (Sender<MessageInfo>, Receiver<MessageInfo>) = bounded(8);
// Ends when subscriber or publisher sender is destroyed, which happens at Subscriber destruction
loop {
select! {
recv(data_rx) -> msg => {
match msg {
Err(_) => break,
Ok(v) => for sub in subs.values() {
if sub.0.try_send(v.clone()).is_err() {
error!("Failed to send data to subscriber");
}
}
}
}
recv(subscribers) -> msg => {
match msg {
Err(_) => break,
Ok(DataStreamConnectionChange::Connect(id, data, conn)) => {
for header in &existing_headers {
if conn.send(header.clone()).is_err() {
error!("Failed to send connection info for subscriber");
};
}
subs.insert(id, (data, conn));
}
Ok(DataStreamConnectionChange::Disconnect(id)) => {
if let Some((mut data, _)) = subs.remove(&id) {
if data.close().is_err() {
error!("Subscriber data stream to topic has already been killed");
}
}
}
}
}
recv(publishers) -> msg => {
match msg {
Err(_) => break,
Ok(publisher) => {
let result = join_connection(
&data_tx,
&publisher,
caller_id,
topic,
msg_definition,
md5sum,
msg_type,
)
.chain_err(|| ErrorKind::TopicConnectionFail(topic.into()));
match result {
Ok(headers) => {
for sub in subs.values() {
if sub.1.send(headers.clone()).is_err() {
error!("Failed to send connection info for subscriber");
}
}
existing_headers.push(headers);
}
Err(err) => {
let info = err
.iter()
.map(|v| format!("{}", v))
.collect::<Vec<_>>()
.join("\nCaused by:");
error!("{}", info);
}
}
}
}
}
}
}
}
fn join_connection(
data_stream: &Sender<MessageInfo>,
publisher: &SocketAddr,
caller_id: &str,
topic: &str,
msg_definition: &str,
md5sum: &str,
msg_type: &str,
) -> Result<HashMap<String, String>> {
let mut stream = TcpStream::connect(publisher)?;
let headers = exchange_headers::<_>(
&mut stream,
caller_id,
topic,
msg_definition,
md5sum,
msg_type,
)?;
let pub_caller_id = headers.get("callerid").cloned();
let target = data_stream.clone();
thread::spawn(move || {
let pub_caller_id = Arc::new(pub_caller_id.unwrap_or_default());
while let Ok(buffer) = package_to_vector(&mut stream) {
if let Err(TrySendError::Disconnected(_)) =
target.try_send(MessageInfo::new(Arc::clone(&pub_caller_id), buffer))
|
}
});
Ok(headers)
}
fn write_request<U: std::io::Write>(
mut stream: &mut U,
caller_id: &str,
topic: &str,
msg_definition: &str,
md5sum: &str,
msg_type: &str,
) -> Result<()> {
let mut fields = HashMap::<String, String>::new();
fields.insert(String::from("message_definition"), msg_definition.into());
fields.insert(String::from("callerid"), caller_id.into());
fields.insert(String::from("topic"), topic.into());
fields.insert(String::from("md5sum"), md5sum.into());
fields.insert(String::from("type"), msg_type.into());
encode(&mut stream, &fields)?;
Ok(())
}
fn read_response<U: std::io::Read>(
mut stream: &mut U,
md5sum: &str,
msg_type: &str,
) -> Result<HashMap<String, String>> {
let fields = decode(&mut stream)?;
if md5sum != "*" {
match_field(&fields, "md5sum", md5sum)?;
}
if msg_type != "*" {
match_field(&fields, "type", msg_type)?;
}
Ok(fields)
}
fn exchange_headers<U>(
stream: &mut U,
caller_id: &str,
topic: &str,
msg_definition: &str,
md5sum: &str,
msg_type: &str,
) -> Result<HashMap<String, String>>
where
U: std::io::Write + std::io::Read,
{
write_request::<U>(stream, caller_id, topic, msg_definition, md5sum, msg_type)?;
read_response::<U>(stream, md5sum, msg_type)
}
#[inline]
fn package_to_vector<R: std::io::Read>(stream: &mut R) -> std::io::Result<Vec<u8>> {
let length = stream.read_u32::<LittleEndian>()?;
let u32_size = std::mem::size_of::<u32>();
let num_bytes = length as usize + u32_size;
// Allocate memory of the proper size for the incoming message. We
// do not initialize the memory to zero here (as would be safe)
// because it is expensive and ultimately unnecessary. We know the
// length of the message and if the length is incorrect, the
// stream reading functions will bail with an Error rather than
// leaving memory uninitialized.
let mut out = Vec::<u8>::with_capacity(num_bytes);
let out_ptr = out.as_mut_ptr();
// Read length from stream.
std::io::Cursor::new(unsafe { std::slice::from_raw_parts_mut(out_ptr as *mut u8, u32_size) })
.write_u32::<LittleEndian>(length)?;
// Read data from stream.
let read_buf = unsafe { std::slice::from_raw_parts_mut(out_ptr as *mut u8, num_bytes) };
stream.read_exact(&mut read_buf[u32_size..])?;
// Don't drop the original Vec which has size==0 and instead use
// its memory to initialize a new Vec with size == capacity == num_bytes.
std::mem::forget(out);
// Return the new, now full and "safely" initialized.
Ok(unsafe { Vec::from_raw_parts(out_ptr, num_bytes, num_bytes) })
}
#[derive(Clone)]
struct MessageInfo {
caller_id: Arc<String>,
data: Vec<u8>,
}
impl MessageInfo {
fn new(caller_id: Arc<String>, data: Vec<u8>) -> Self {
Self { caller_id, data }
}
}
#[cfg(test)]
mod tests {
use super::*;
static FAILED_TO_READ_WRITE_VECTOR: &str = "Failed to read or write from vector";
#[test]
fn package_to_vector_creates_right_buffer_from_reader() {
let input = [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7];
let data =
package_to_vector(&mut std::io::Cursor::new(input)).expect(FAILED_TO_READ_WRITE_VECTOR);
assert_eq!(data, [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7]);
}
#[test]
fn package_to_vector_respects_provided_length() {
let input = [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
let data =
package_to_vector(&mut std::io::Cursor::new(input)).expect(FAILED_TO_READ_WRITE_VECTOR);
assert_eq!(data, [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7]);
}
#[test]
fn package_to_vector_fails_if_stream_is_shorter_than_annotated() {
let input = [7, 0, 0, 0, 1, 2, 3, 4, 5];
package_to_vector(&mut std::io::Cursor::new(input)).unwrap_err();
}
#[test]
fn package_to_vector_fails_leaves_cursor_at_end_of_reading() {
let input = [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 4, 0, 0, 0, 11, 12, 13, 14];
let mut cursor = std::io::Cursor::new(input);
let data = package_to_vector(&mut cursor).expect(FAILED_TO_READ_WRITE_VECTOR);
assert_eq!(data, [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7]);
let data = package_to_vector(&mut cursor).expect(FAILED_TO_READ_WRITE_VECTOR);
assert_eq!(data, [4, 0, 0, 0, 11, 12, 13, 14]);
}
}
| {
// Data receiver has been destroyed after
// Subscriber destructor's kill signal
break;
} | conditional_block |
snva.py | import argparse
import asyncio
import json
import logging
from logging.handlers import QueueHandler, SocketHandler
from multiprocessing import Process, Queue
import os
import platform
from queue import Empty
import signal
import socket
from subprocess import PIPE, Popen
from threading import Thread
from time import sleep, time
from utils.io import IO
from utils.processor import process_video, process_video_signalstate
import websockets as ws
path = os.path
logger = logging.getLogger('websockets')
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
def main_logger_fn(log_queue):
while True:
try:
message = log_queue.get()
if message is None:
break
logger = logging.getLogger(__name__)
logger.handle(message)
except Exception as e:
logging.error(e)
break
# Logger thread: listens for updates to log queue and writes them as they arrive
# Terminates after we add None to the queue
def child_logger_fn(main_log_queue, child_log_queue):
while True:
try:
message = child_log_queue.get()
if message is None:
break
main_log_queue.put(message)
except Exception as e:
logging.error(e)
break
def stringify_command(arg_list):
command_string = arg_list[0]
for elem in arg_list[1:]:
command_string += ' ' + elem
return 'command string: {}'.format(command_string)
#TODO: accomodate unbounded number of valid process counts
def get_valid_num_processes_per_device(device_type):
# valid_n_procs = {1, 2}
# if device_type == 'cpu':
# n_cpus = os.cpu_count()
# n_procs = 4
# while n_procs <= n_cpus:
# k = (n_cpus - n_procs) / n_procs
# if k == int(k):
# valid_n_procs.add(n_procs)
# n_procs += 2
# return valid_n_procs
return list(range(1, os.cpu_count() + 1))
async def main():
logging.info('entering snva {} main process'.format(snva_version_string))
# total_num_video_to_process = None
def interrupt_handler(signal_number, _):
logging.warning('Main process received interrupt signal '
'{}.'.format(signal_number))
main_interrupt_queue.put_nowait('_')
# if total_num_video_to_process is None \
# or total_num_video_to_process == len(video_file_paths):
# Signal the logging thread to finish up
logging.debug('signaling logger thread to end service.')
log_queue.put_nowait(None)
logger_thread.join()
logging.shutdown()
signal.signal(signal.SIGINT, interrupt_handler)
try:
ffmpeg_path = os.environ['FFMPEG_HOME']
except KeyError:
logging.warning('Environment variable FFMPEG_HOME not set. Attempting '
'to use default ffmpeg binary location.')
if platform.system() == 'Windows':
ffmpeg_path = 'ffmpeg.exe'
else:
ffmpeg_path = '/usr/local/bin/ffmpeg'
if not path.exists(ffmpeg_path):
ffmpeg_path = '/usr/bin/ffmpeg'
logging.debug('FFMPEG path set to: {}'.format(ffmpeg_path))
try:
ffprobe_path = os.environ['FFPROBE_HOME']
except KeyError:
logging.warning('Environment variable FFPROBE_HOME not set. '
'Attempting to use default ffprobe binary location.')
if platform.system() == 'Windows':
ffprobe_path = 'ffprobe.exe'
else:
ffprobe_path = '/usr/local/bin/ffprobe'
if not path.exists(ffprobe_path):
ffprobe_path = '/usr/bin/ffprobe'
logging.debug('FFPROBE path set to: {}'.format(ffprobe_path))
# # TODO validate all video file paths in the provided text file if args.inputpath is a text file
# if path.isdir(args.inputpath):
# video_file_names = set(IO.read_video_file_names(args.inputpath))
# video_file_paths = [path.join(args.inputpath, video_file_name)
# for video_file_name in video_file_names]
# elif path.isfile(args.inputpath):
# if args.inputpath[-3:] == 'txt':
# if args.inputlistrootdirpath is None:
# raise ValueError('--inputlistrootdirpath must be specified when using a'
# ' text file as the input.')
# with open(args.inputpath, newline='') as input_file:
# video_file_paths = []
#
# for line in input_file.readlines():
# line = line.rstrip()
# video_file_path = line.lstrip(args.inputlistrootdirpath)
# video_file_path = path.join('/media/root', video_file_path)
#
# if path.isfile(video_file_path):
# video_file_paths.append(video_file_path)
# else:
# logging.warning('The video file at host path {} could not be found '
# 'at mapped path {} and will not be processed'.
# format(line, video_file_path))
# else:
# video_file_paths = [args.inputpath]
# else:
# raise ValueError('The video file/folder specified at the path {} could '
# 'not be found.'.format(args.inputpath))
models_root_dir_path = path.join(snva_home, args.modelsdirpath)
models_dir_path = path.join(models_root_dir_path, args.modelname)
logging.debug('models_dir_path set to {}'.format(models_dir_path))
# model_file_path = path.join(models_dir_path, args.protobuffilename)
#
# if not path.isfile(model_file_path):
# raise ValueError('The model specified at the path {} could not be '
# 'found.'.format(model_file_path))
#
# logging.debug('model_file_path set to {}'.format(model_file_path))
model_input_size_file_path = path.join(models_dir_path, 'input_size.txt')
if not path.isfile(model_input_size_file_path):
raise ValueError('The model input size file specified at the path {} '
'could not be found.'.format(model_input_size_file_path))
logging.debug('model_input_size_file_path set to {}'.format(
model_input_size_file_path))
with open(model_input_size_file_path) as file:
model_input_size_string = file.readline().rstrip()
valid_size_set = ['224', '299']
if model_input_size_string not in valid_size_set:
raise ValueError('The model input size is not in the set {}.'.format(
valid_size_set))
model_input_size = int(model_input_size_string)
# if logpath is the default value, expand it using the SNVA_HOME prefix,
# otherwise, use the value explicitly passed by the user
if args.outputpath == 'reports':
output_dir_path = path.join(snva_home, args.outputpath)
else:
output_dir_path = args.outputpath
logging.info("Output path set to: {}".format(output_dir_path))
if not path.isdir(output_dir_path):
os.makedirs(output_dir_path)
if args.classnamesfilepath is None \
or not path.isfile(args.classnamesfilepath):
class_names_path = path.join(models_root_dir_path, 'class_names.txt')
else:
class_names_path = args.classnamesfilepath
logging.debug('labels path set to: {}'.format(class_names_path))
num_processes = args.numprocesses
class_name_map = IO.read_class_names(class_names_path)
return_code_queue_map = {}
child_logger_thread_map = {}
child_process_map = {}
total_num_processed_videos = 0
total_num_processed_frames = 0
total_analysis_duration = 0
def start_video_processor(video_file_path):
# Before popping the next video off of the list and creating a process to
# scan it, check to see if fewer than logical_device_count + 1 processes are
# active. If not, Wait for a child process to release its semaphore
# acquisition. If so, acquire the semaphore, pop the next video name,
# create the next child process, and pass the semaphore to it
return_code_queue = Queue()
return_code_queue_map[video_file_path] = return_code_queue
logging.debug('creating new child process.')
child_log_queue = Queue()
child_logger_thread = Thread(target=child_logger_fn,
args=(log_queue, child_log_queue))
child_logger_thread.start()
child_logger_thread_map[video_file_path] = child_logger_thread
if 'signalstate' == args.processormode:
child_process = Process(
target=process_video_signalstate,
name=path.splitext(path.split(video_file_path)[1])[0],
args=(video_file_path, output_dir_path, class_name_map, args.modelname, args.modelsignaturename, args.modelserverhost,model_input_size,
return_code_queue, child_log_queue, log_level,
ffmpeg_path, ffprobe_path, args.crop, args.cropwidth, args.cropheight,
args.cropx, args.cropy, args.extracttimestamps,
args.timestampmaxwidth, args.timestampheight, args.timestampx,
args.timestampy, args.deinterlace, args.numchannels, args.batchsize,
args.smoothprobs, args.smoothingfactor, args.binarizeprobs,
args.writebbox, args.writeeventreports, args.maxanalyzerthreads, args.processormode))
else:
child_process = Process(
target=process_video,
name=path.splitext(path.split(video_file_path)[1])[0],
args=(video_file_path, output_dir_path, class_name_map, args.modelname, args.modelsignaturename, args.modelserverhost,model_input_size,
return_code_queue, child_log_queue, log_level,
ffmpeg_path, ffprobe_path, args.crop, args.cropwidth, args.cropheight,
args.cropx, args.cropy, args.extracttimestamps,
args.timestampmaxwidth, args.timestampheight, args.timestampx,
args.timestampy, args.deinterlace, args.numchannels, args.batchsize,
args.smoothprobs, args.smoothingfactor, args.binarizeprobs,
args.writeinferencereports, args.writeeventreports, args.maxanalyzerthreads, args.processormode))
logging.debug('starting child process.')
child_process.start()
child_process_map[video_file_path] = child_process
async def close_completed_video_processors(
total_num_processed_videos, total_num_processed_frames,
total_analysis_duration, websocket_conn):
for video_file_path in list(return_code_queue_map.keys()):
return_code_queue = return_code_queue_map[video_file_path]
try:
return_code_map = return_code_queue.get_nowait()
return_code = return_code_map['return_code']
return_value = return_code_map['return_value']
child_process = child_process_map[video_file_path]
logging.debug(
'child process {} returned with exit code {} and exit value '
'{}'.format(child_process.pid, return_code, return_value))
if return_code == 'success':
total_num_processed_videos += 1
total_num_processed_frames += return_value
total_analysis_duration += return_code_map['analysis_duration']
logging.info('notifying control node of completion')
complete_request = json.dumps({
'action': 'COMPLETE',
'video': os.path.basename(video_file_path),
'output': return_code_map['output_locations']})
await websocket_conn.send(complete_request)
child_logger_thread = child_logger_thread_map[video_file_path]
logging.debug('joining logger thread for child process {}'.format(
child_process.pid))
child_logger_thread.join(timeout=15)
if child_logger_thread.is_alive():
logging.warning(
'logger thread for child process {} remained alive following join '
'timeout'.format(child_process.pid))
logging.debug('joining child process {}'.format(child_process.pid))
child_process.join(timeout=15)
# if the child process has not yet terminated, kill the child process at
# the risk of losing any log message not yet buffered by the main logger
try:
os.kill(child_process.pid, signal.SIGKILL)
logging.warning(
'child process {} remained alive following join timeout and had to '
'be killed'.format(child_process.pid))
except:
pass
return_code_queue.close()
return_code_queue_map.pop(video_file_path)
child_logger_thread_map.pop(video_file_path)
child_process_map.pop(video_file_path)
except Empty:
pass
return total_num_processed_videos, total_num_processed_frames, \
total_analysis_duration
start = time()
sleep_duration = 1
breakLoop = False
connectionId = None
isIdle = False
while True:
try:
if breakLoop:
break
wsUrl = 'ws://' + args.controlnodehost + '/registerProcess'
if connectionId is not None:
wsUrl = wsUrl + '?id=' + connectionId
logging.debug("Connecting with URL {}".format(wsUrl))
async with ws.connect(wsUrl) as conn:
response = await conn.recv()
response = json.loads(response)
logging.info(response)
if response['action'] != 'CONNECTION_SUCCESS':
raise ConnectionError(
'control node connection failed with response: {}'.format(response))
if connectionId is None:
connectionId = response['id']
logging.debug("Assigned id {}".format(connectionId))
while True:
# block if num_processes child processes are active
while len(return_code_queue_map) >= num_processes:
total_num_processed_videos, total_num_processed_frames, \
total_analysis_duration = await close_completed_video_processors(
total_num_processed_videos, total_num_processed_frames,
total_analysis_duration, conn)
sleep(sleep_duration)
try: # todo poll for termination signal from control node
_ = main_interrupt_queue.get_nowait()
logging.debug(
'breaking out of child process generation following interrupt signal')
break
except:
pass
if not isIdle:
logging.info('requesting video')
request = json.dumps({'action': 'REQUEST_VIDEO'})
await conn.send(request)
logging.info('reading response')
response = await conn.recv()
else:
# If idle, we will try to close completed processors until all are done
while len(return_code_queue_map) > 0:
# Before checking for completed processes, check for a new message
logging.info('Checking for new message')
try:
# If we get a response quickly, break our waiting loop and process the command
response = await asyncio.wait_for(conn.recv(), 1)
break
except asyncio.TimeoutError:
# Otherwise, go back to finishing our current tasks
logging.debug('No new message from control node, continuing...')
pass
total_num_processed_videos, total_num_processed_frames, \
total_analysis_duration = await close_completed_video_processors(
total_num_processed_videos, total_num_processed_frames,
total_analysis_duration, conn)
# by now, the last device_id_queue_len videos are being processed,
# so we can afford to poll for their completion infrequently
if len(return_code_queue_map) > 0:
sleep(sleep_duration)
# Once all are complete, if still idle we have no work left to do - we just wait for a new message
response = await conn.recv()
response = json.loads(response)
if response['action'] == 'STATUS_REQUEST':
logging.info('control node requested status request')
pass
elif response['action'] == 'CEASE_REQUESTS':
logging.info('control node has no more videos to process')
isIdle = True
pass
elif response['action'] == 'RESUME_REQUESTS':
logging.info('control node has instructed to resume requests')
isIdle = False
pass
elif response['action'] == 'SHUTDOWN':
logging.info('control node requested shutdown')
breakLoop = True
break
elif response['action'] == 'PROCESS':
# TODO Prepend input path
video_file_path = os.path.join(args.inputpath, response['path'])
request_received = json.dumps({'action': 'REQUEST_RECEIVED', 'video': response['path']})
await conn.send(request_received)
try:
start_video_processor(video_file_path)
except Exception as e:
logging.error('an unknown error has occured while processing {}'.format(video_file_path))
logging.error(e)
else:
raise ConnectionError(
'control node replied with unexpected response: {}'.format(response))
logging.debug('{} child processes remain enqueued'.format(len(return_code_queue_map)))
while len(return_code_queue_map) > 0:
#logging.debug('waiting for the final {} child processes to '
# 'terminate'.format(len(return_code_queue_map)))
total_num_processed_videos, total_num_processed_frames, \
total_analysis_duration = await close_completed_video_processors(
total_num_processed_videos, total_num_processed_frames,
total_analysis_duration, conn)
# by now, the last device_id_queue_len videos are being processed,
# so we can afford to poll for their completion infrequently
if len(return_code_queue_map) > 0:
#logging.debug('sleeping for {} seconds'.format(sleep_duration))
sleep(sleep_duration)
end = time() - start
processing_duration = IO.get_processing_duration(
end, 'snva {} processed a total of {} videos and {} frames in:'.format(
snva_version_string, total_num_processed_videos,
total_num_processed_frames))
logging.info(processing_duration)
logging.info('Video analysis alone spanned a cumulative {:.02f} '
'seconds'.format(total_analysis_duration))
logging.info('exiting snva {} main process'.format(snva_version_string))
breakLoop = True | continue
except ConnectionRefusedError:
# log something else
logging.info('connection refused')
break
except ws.exceptions.ConnectionClosed:
logging.info('Connection lost. Attempting reconnect...')
continue
except Exception as e:
logging.error("Unknown Exception")
logging.error(e)
raise e
if breakLoop:
break
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='SHRP2 NDS Video Analytics built on TensorFlow')
parser.add_argument('--batchsize', '-bs', type=int, default=32,
help='Number of concurrent neural net inputs')
parser.add_argument('--binarizeprobs', '-b', action='store_true',
help='Round probs to zero or one. For distributions with '
' two 0.5 values, both will be rounded up to 1.0')
parser.add_argument('--classnamesfilepath', '-cnfp',
help='Path to the class ids/names text file.')
parser.add_argument('--controlnodehost', '-cnh', default='localhost:8080',
help='control node colon-separated host name or IP and '
'port')
parser.add_argument('--numprocesses', '-np', type=int, default=3,
help='Number of videos to process at one time')
parser.add_argument('--crop', '-c', action='store_true',
help='Crop video frames to [offsetheight, offsetwidth, '
'targetheight, targetwidth]')
parser.add_argument('--cropheight', '-ch', type=int, default=320,
help='y-component of bottom-right corner of crop.')
parser.add_argument('--cropwidth', '-cw', type=int, default=474,
help='x-component of bottom-right corner of crop.')
parser.add_argument('--cropx', '-cx', type=int, default=2,
help='x-component of top-left corner of crop.')
parser.add_argument('--cropy', '-cy', type=int, default=0,
help='y-component of top-left corner of crop.')
parser.add_argument('--deinterlace', '-d', action='store_true',
help='Apply de-interlacing to video frames during '
'extraction.')
parser.add_argument('--writebbox', '-bb', action='store_true',
help='Create JSON files with bounding box data for signal state')
# parser.add_argument('--excludepreviouslyprocessed', '-epp',
# action='store_true',
# help='Skip processing of videos for which reports '
# 'already exist in outputpath.')
parser.add_argument('--extracttimestamps', '-et', action='store_true',
help='Crop timestamps out of video frames and map them to'
' strings for inclusion in the output CSV.')
parser.add_argument('--gpumemoryfraction', '-gmf', type=float, default=0.9,
help='% of GPU memory available to this process.')
parser.add_argument('--inputpath', '-ip', required=True,
help='Path to a single video file, a folder containing '
'video files, or a text file that lists absolute '
'video file paths.')
parser.add_argument('--loglevel', '-ll', default='info',
help='Defaults to \'info\'. Pass \'debug\' or \'error\' '
'for verbose or minimal logging, respectively.')
parser.add_argument('--logmode', '-lm', default='verbose',
help='If verbose, log to file and console. If silent, '
'log to file only.')
parser.add_argument('--logpath', '-l', default='logs',
help='Path to the directory where log files are stored.')
parser.add_argument('--logmaxbytes', '-lmb', type=int, default=2**23,
help='File size in bytes at which the log rolls over.')
parser.add_argument('--maxanalyzerthreads', '-mat', type=int,
default=4,
help='Maximum number of threads to assign to each video '
'processor')
parser.add_argument('--modelsdirpath', '-mdp',
default='models/work_zone_scene_detection',
help='Path to the parent directory of model directories.')
parser.add_argument('--modelname', '-mn', default='mobilenet_v2',
help='The name of the model directory under modelsdirpath to use.')
parser.add_argument('--modelsignaturename', '-msn', default='serving_default',
help='Name of the signature that specifies what model is '
'being served, and that model\'s input and output '
'tensors')
parser.add_argument('--modelserverhost', '-msh', default='0.0.0.0:8500',
help='tensorflow serving colon-separated host name or IP '
'and port')
parser.add_argument('--numchannels', '-nc', type=int, default=3,
help='The fourth dimension of image batches.')
parser.add_argument('--numprocessesperdevice', '-nppd', type=int, default=1,
help='The number of instances of inference to perform on '
'each device.')
parser.add_argument('--protobuffilename', '-pbfn', default='model.pb',
help='Name of the model protobuf file.')
parser.add_argument('--outputpath', '-op', default='reports',
help='Path to the directory where reports are stored.')
parser.add_argument('--smoothprobs', '-sp', action='store_true',
help='Apply class-wise smoothing across video frame class'
' probability distributions.')
parser.add_argument('--smoothingfactor', '-sf', type=int, default=16,
help='The class-wise probability smoothing factor.')
parser.add_argument('--timestampheight', '-th', type=int, default=16,
help='The length of the y-dimension of the timestamp '
'overlay.')
parser.add_argument('--timestampmaxwidth', '-tw', type=int, default=160,
help='The length of the x-dimension of the timestamp '
'overlay.')
parser.add_argument('--timestampx', '-tx', type=int, default=25,
help='x-component of top-left corner of timestamp '
'(before cropping).')
parser.add_argument('--timestampy', '-ty', type=int, default=340,
help='y-component of top-left corner of timestamp '
'(before cropping).')
parser.add_argument('--writeeventreports', '-wer', type=bool, default=True,
help='Output a CVS file for each video containing one or '
'more feature events')
parser.add_argument('--writeinferencereports', '-wir', type=bool,
default=False,
help='For every video, output a CSV file containing a '
'probability distribution over class labels, a '
'timestamp, and a frame number for each frame')
parser.add_argument('--clocktype', '-ct', default='wall',
help='Specify whether profiling should use "gpu" or "wall" clock type')
parser.add_argument('--profformat', '-pfmt', default='pstat',
help='Specify whether profiling should save output in "pstat" or "callgrind" formats')
parser.add_argument('--processormode', '-pm', default='workzone',
help='Specify wheter processor should use "workzone", "weather", or "signalstate" pipelines')
args = parser.parse_args()
try:
snva_home = os.environ['SNVA_HOME']
except KeyError:
snva_home = '.'
snva_version_string = 'v0.1.2'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Define our log level based on arguments
if args.loglevel == 'error':
log_level = logging.ERROR
elif args.loglevel == 'debug':
log_level = logging.DEBUG
else:
log_level = logging.INFO
# if logpath is the default value, expand it using the SNVA_HOME prefix,
# otherwise, use the value explicitly passed by the user
if args.logpath == 'logs':
logs_dir_path = path.join(snva_home, args.logpath)
else:
logs_dir_path = args.logpath
# Configure our log in the main process to write to a file
if path.exists(logs_dir_path):
if path.isfile(logs_dir_path):
raise ValueError('The specified logpath {} is expected to be a '
'directory, not a file.'.format(logs_dir_path))
else:
os.makedirs(logs_dir_path)
try:
log_file_name = 'snva_' + socket.getfqdn() + '.log'
except:
log_file_name = 'snva.log'
log_file_path = path.join(logs_dir_path, log_file_name)
log_format = '%(asctime)s:%(processName)s:%(process)d:%(levelname)s:' \
'%(module)s:%(lineno)d:%(funcName)s:%(message)s'
logger_script_path = path.join(snva_home, 'utils/logger.py')
log_file_max_bytes = '{}'.format(args.logmaxbytes)
stdin = os.dup(0)
logger_subprocess = Popen(
['python', logger_script_path, log_file_path, log_format, args.loglevel,
args.logmode, log_file_max_bytes, '{}'.format(stdin)], stdout=PIPE)
# wait for logger.py to indicate readiness
_ = logger_subprocess.stdout.readline()
log_handlers = [SocketHandler(
host='localhost', port=logging.handlers.DEFAULT_TCP_LOGGING_PORT)]
valid_log_modes = ['verbose', 'silent']
if args.logmode == 'verbose':
log_handlers.append(logging.StreamHandler())
elif not args.logmode == 'silent':
raise ValueError(
'The specified logmode is not in the set {}.'.format(valid_log_modes))
logging.basicConfig(level=log_level, format=log_format, handlers=log_handlers)
log_queue = Queue()
logger_thread = Thread(target=main_logger_fn, args=(log_queue,))
logger_thread.start()
logging.debug('SNVA_HOME set to {}'.format(snva_home))
main_interrupt_queue = Queue()
try:
asyncio.get_event_loop().run_until_complete(main())
except Exception as e:
logging.error(e)
logging.debug('signaling logger thread to end service.')
log_queue.put(None)
logger_thread.join()
logging.shutdown()
logger_subprocess.terminate() | except socket.gaierror:
# log something
logging.info('gaierror') | random_line_split |
snva.py | import argparse
import asyncio
import json
import logging
from logging.handlers import QueueHandler, SocketHandler
from multiprocessing import Process, Queue
import os
import platform
from queue import Empty
import signal
import socket
from subprocess import PIPE, Popen
from threading import Thread
from time import sleep, time
from utils.io import IO
from utils.processor import process_video, process_video_signalstate
import websockets as ws
path = os.path
logger = logging.getLogger('websockets')
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
def main_logger_fn(log_queue):
|
# Logger thread: listens for updates to log queue and writes them as they arrive
# Terminates after we add None to the queue
def child_logger_fn(main_log_queue, child_log_queue):
while True:
try:
message = child_log_queue.get()
if message is None:
break
main_log_queue.put(message)
except Exception as e:
logging.error(e)
break
def stringify_command(arg_list):
command_string = arg_list[0]
for elem in arg_list[1:]:
command_string += ' ' + elem
return 'command string: {}'.format(command_string)
#TODO: accomodate unbounded number of valid process counts
def get_valid_num_processes_per_device(device_type):
# valid_n_procs = {1, 2}
# if device_type == 'cpu':
# n_cpus = os.cpu_count()
# n_procs = 4
# while n_procs <= n_cpus:
# k = (n_cpus - n_procs) / n_procs
# if k == int(k):
# valid_n_procs.add(n_procs)
# n_procs += 2
# return valid_n_procs
return list(range(1, os.cpu_count() + 1))
async def main():
logging.info('entering snva {} main process'.format(snva_version_string))
# total_num_video_to_process = None
def interrupt_handler(signal_number, _):
logging.warning('Main process received interrupt signal '
'{}.'.format(signal_number))
main_interrupt_queue.put_nowait('_')
# if total_num_video_to_process is None \
# or total_num_video_to_process == len(video_file_paths):
# Signal the logging thread to finish up
logging.debug('signaling logger thread to end service.')
log_queue.put_nowait(None)
logger_thread.join()
logging.shutdown()
signal.signal(signal.SIGINT, interrupt_handler)
try:
ffmpeg_path = os.environ['FFMPEG_HOME']
except KeyError:
logging.warning('Environment variable FFMPEG_HOME not set. Attempting '
'to use default ffmpeg binary location.')
if platform.system() == 'Windows':
ffmpeg_path = 'ffmpeg.exe'
else:
ffmpeg_path = '/usr/local/bin/ffmpeg'
if not path.exists(ffmpeg_path):
ffmpeg_path = '/usr/bin/ffmpeg'
logging.debug('FFMPEG path set to: {}'.format(ffmpeg_path))
try:
ffprobe_path = os.environ['FFPROBE_HOME']
except KeyError:
logging.warning('Environment variable FFPROBE_HOME not set. '
'Attempting to use default ffprobe binary location.')
if platform.system() == 'Windows':
ffprobe_path = 'ffprobe.exe'
else:
ffprobe_path = '/usr/local/bin/ffprobe'
if not path.exists(ffprobe_path):
ffprobe_path = '/usr/bin/ffprobe'
logging.debug('FFPROBE path set to: {}'.format(ffprobe_path))
# # TODO validate all video file paths in the provided text file if args.inputpath is a text file
# if path.isdir(args.inputpath):
# video_file_names = set(IO.read_video_file_names(args.inputpath))
# video_file_paths = [path.join(args.inputpath, video_file_name)
# for video_file_name in video_file_names]
# elif path.isfile(args.inputpath):
# if args.inputpath[-3:] == 'txt':
# if args.inputlistrootdirpath is None:
# raise ValueError('--inputlistrootdirpath must be specified when using a'
# ' text file as the input.')
# with open(args.inputpath, newline='') as input_file:
# video_file_paths = []
#
# for line in input_file.readlines():
# line = line.rstrip()
# video_file_path = line.lstrip(args.inputlistrootdirpath)
# video_file_path = path.join('/media/root', video_file_path)
#
# if path.isfile(video_file_path):
# video_file_paths.append(video_file_path)
# else:
# logging.warning('The video file at host path {} could not be found '
# 'at mapped path {} and will not be processed'.
# format(line, video_file_path))
# else:
# video_file_paths = [args.inputpath]
# else:
# raise ValueError('The video file/folder specified at the path {} could '
# 'not be found.'.format(args.inputpath))
models_root_dir_path = path.join(snva_home, args.modelsdirpath)
models_dir_path = path.join(models_root_dir_path, args.modelname)
logging.debug('models_dir_path set to {}'.format(models_dir_path))
# model_file_path = path.join(models_dir_path, args.protobuffilename)
#
# if not path.isfile(model_file_path):
# raise ValueError('The model specified at the path {} could not be '
# 'found.'.format(model_file_path))
#
# logging.debug('model_file_path set to {}'.format(model_file_path))
model_input_size_file_path = path.join(models_dir_path, 'input_size.txt')
if not path.isfile(model_input_size_file_path):
raise ValueError('The model input size file specified at the path {} '
'could not be found.'.format(model_input_size_file_path))
logging.debug('model_input_size_file_path set to {}'.format(
model_input_size_file_path))
with open(model_input_size_file_path) as file:
model_input_size_string = file.readline().rstrip()
valid_size_set = ['224', '299']
if model_input_size_string not in valid_size_set:
raise ValueError('The model input size is not in the set {}.'.format(
valid_size_set))
model_input_size = int(model_input_size_string)
# if logpath is the default value, expand it using the SNVA_HOME prefix,
# otherwise, use the value explicitly passed by the user
if args.outputpath == 'reports':
output_dir_path = path.join(snva_home, args.outputpath)
else:
output_dir_path = args.outputpath
logging.info("Output path set to: {}".format(output_dir_path))
if not path.isdir(output_dir_path):
os.makedirs(output_dir_path)
if args.classnamesfilepath is None \
or not path.isfile(args.classnamesfilepath):
class_names_path = path.join(models_root_dir_path, 'class_names.txt')
else:
class_names_path = args.classnamesfilepath
logging.debug('labels path set to: {}'.format(class_names_path))
num_processes = args.numprocesses
class_name_map = IO.read_class_names(class_names_path)
return_code_queue_map = {}
child_logger_thread_map = {}
child_process_map = {}
total_num_processed_videos = 0
total_num_processed_frames = 0
total_analysis_duration = 0
def start_video_processor(video_file_path):
# Before popping the next video off of the list and creating a process to
# scan it, check to see if fewer than logical_device_count + 1 processes are
# active. If not, Wait for a child process to release its semaphore
# acquisition. If so, acquire the semaphore, pop the next video name,
# create the next child process, and pass the semaphore to it
return_code_queue = Queue()
return_code_queue_map[video_file_path] = return_code_queue
logging.debug('creating new child process.')
child_log_queue = Queue()
child_logger_thread = Thread(target=child_logger_fn,
args=(log_queue, child_log_queue))
child_logger_thread.start()
child_logger_thread_map[video_file_path] = child_logger_thread
if 'signalstate' == args.processormode:
child_process = Process(
target=process_video_signalstate,
name=path.splitext(path.split(video_file_path)[1])[0],
args=(video_file_path, output_dir_path, class_name_map, args.modelname, args.modelsignaturename, args.modelserverhost,model_input_size,
return_code_queue, child_log_queue, log_level,
ffmpeg_path, ffprobe_path, args.crop, args.cropwidth, args.cropheight,
args.cropx, args.cropy, args.extracttimestamps,
args.timestampmaxwidth, args.timestampheight, args.timestampx,
args.timestampy, args.deinterlace, args.numchannels, args.batchsize,
args.smoothprobs, args.smoothingfactor, args.binarizeprobs,
args.writebbox, args.writeeventreports, args.maxanalyzerthreads, args.processormode))
else:
child_process = Process(
target=process_video,
name=path.splitext(path.split(video_file_path)[1])[0],
args=(video_file_path, output_dir_path, class_name_map, args.modelname, args.modelsignaturename, args.modelserverhost,model_input_size,
return_code_queue, child_log_queue, log_level,
ffmpeg_path, ffprobe_path, args.crop, args.cropwidth, args.cropheight,
args.cropx, args.cropy, args.extracttimestamps,
args.timestampmaxwidth, args.timestampheight, args.timestampx,
args.timestampy, args.deinterlace, args.numchannels, args.batchsize,
args.smoothprobs, args.smoothingfactor, args.binarizeprobs,
args.writeinferencereports, args.writeeventreports, args.maxanalyzerthreads, args.processormode))
logging.debug('starting child process.')
child_process.start()
child_process_map[video_file_path] = child_process
async def close_completed_video_processors(
total_num_processed_videos, total_num_processed_frames,
total_analysis_duration, websocket_conn):
for video_file_path in list(return_code_queue_map.keys()):
return_code_queue = return_code_queue_map[video_file_path]
try:
return_code_map = return_code_queue.get_nowait()
return_code = return_code_map['return_code']
return_value = return_code_map['return_value']
child_process = child_process_map[video_file_path]
logging.debug(
'child process {} returned with exit code {} and exit value '
'{}'.format(child_process.pid, return_code, return_value))
if return_code == 'success':
total_num_processed_videos += 1
total_num_processed_frames += return_value
total_analysis_duration += return_code_map['analysis_duration']
logging.info('notifying control node of completion')
complete_request = json.dumps({
'action': 'COMPLETE',
'video': os.path.basename(video_file_path),
'output': return_code_map['output_locations']})
await websocket_conn.send(complete_request)
child_logger_thread = child_logger_thread_map[video_file_path]
logging.debug('joining logger thread for child process {}'.format(
child_process.pid))
child_logger_thread.join(timeout=15)
if child_logger_thread.is_alive():
logging.warning(
'logger thread for child process {} remained alive following join '
'timeout'.format(child_process.pid))
logging.debug('joining child process {}'.format(child_process.pid))
child_process.join(timeout=15)
# if the child process has not yet terminated, kill the child process at
# the risk of losing any log message not yet buffered by the main logger
try:
os.kill(child_process.pid, signal.SIGKILL)
logging.warning(
'child process {} remained alive following join timeout and had to '
'be killed'.format(child_process.pid))
except:
pass
return_code_queue.close()
return_code_queue_map.pop(video_file_path)
child_logger_thread_map.pop(video_file_path)
child_process_map.pop(video_file_path)
except Empty:
pass
return total_num_processed_videos, total_num_processed_frames, \
total_analysis_duration
start = time()
sleep_duration = 1
breakLoop = False
connectionId = None
isIdle = False
while True:
try:
if breakLoop:
break
wsUrl = 'ws://' + args.controlnodehost + '/registerProcess'
if connectionId is not None:
wsUrl = wsUrl + '?id=' + connectionId
logging.debug("Connecting with URL {}".format(wsUrl))
async with ws.connect(wsUrl) as conn:
response = await conn.recv()
response = json.loads(response)
logging.info(response)
if response['action'] != 'CONNECTION_SUCCESS':
raise ConnectionError(
'control node connection failed with response: {}'.format(response))
if connectionId is None:
connectionId = response['id']
logging.debug("Assigned id {}".format(connectionId))
while True:
# block if num_processes child processes are active
while len(return_code_queue_map) >= num_processes:
total_num_processed_videos, total_num_processed_frames, \
total_analysis_duration = await close_completed_video_processors(
total_num_processed_videos, total_num_processed_frames,
total_analysis_duration, conn)
sleep(sleep_duration)
try: # todo poll for termination signal from control node
_ = main_interrupt_queue.get_nowait()
logging.debug(
'breaking out of child process generation following interrupt signal')
break
except:
pass
if not isIdle:
logging.info('requesting video')
request = json.dumps({'action': 'REQUEST_VIDEO'})
await conn.send(request)
logging.info('reading response')
response = await conn.recv()
else:
# If idle, we will try to close completed processors until all are done
while len(return_code_queue_map) > 0:
# Before checking for completed processes, check for a new message
logging.info('Checking for new message')
try:
# If we get a response quickly, break our waiting loop and process the command
response = await asyncio.wait_for(conn.recv(), 1)
break
except asyncio.TimeoutError:
# Otherwise, go back to finishing our current tasks
logging.debug('No new message from control node, continuing...')
pass
total_num_processed_videos, total_num_processed_frames, \
total_analysis_duration = await close_completed_video_processors(
total_num_processed_videos, total_num_processed_frames,
total_analysis_duration, conn)
# by now, the last device_id_queue_len videos are being processed,
# so we can afford to poll for their completion infrequently
if len(return_code_queue_map) > 0:
sleep(sleep_duration)
# Once all are complete, if still idle we have no work left to do - we just wait for a new message
response = await conn.recv()
response = json.loads(response)
if response['action'] == 'STATUS_REQUEST':
logging.info('control node requested status request')
pass
elif response['action'] == 'CEASE_REQUESTS':
logging.info('control node has no more videos to process')
isIdle = True
pass
elif response['action'] == 'RESUME_REQUESTS':
logging.info('control node has instructed to resume requests')
isIdle = False
pass
elif response['action'] == 'SHUTDOWN':
logging.info('control node requested shutdown')
breakLoop = True
break
elif response['action'] == 'PROCESS':
# TODO Prepend input path
video_file_path = os.path.join(args.inputpath, response['path'])
request_received = json.dumps({'action': 'REQUEST_RECEIVED', 'video': response['path']})
await conn.send(request_received)
try:
start_video_processor(video_file_path)
except Exception as e:
logging.error('an unknown error has occured while processing {}'.format(video_file_path))
logging.error(e)
else:
raise ConnectionError(
'control node replied with unexpected response: {}'.format(response))
logging.debug('{} child processes remain enqueued'.format(len(return_code_queue_map)))
while len(return_code_queue_map) > 0:
#logging.debug('waiting for the final {} child processes to '
# 'terminate'.format(len(return_code_queue_map)))
total_num_processed_videos, total_num_processed_frames, \
total_analysis_duration = await close_completed_video_processors(
total_num_processed_videos, total_num_processed_frames,
total_analysis_duration, conn)
# by now, the last device_id_queue_len videos are being processed,
# so we can afford to poll for their completion infrequently
if len(return_code_queue_map) > 0:
#logging.debug('sleeping for {} seconds'.format(sleep_duration))
sleep(sleep_duration)
end = time() - start
processing_duration = IO.get_processing_duration(
end, 'snva {} processed a total of {} videos and {} frames in:'.format(
snva_version_string, total_num_processed_videos,
total_num_processed_frames))
logging.info(processing_duration)
logging.info('Video analysis alone spanned a cumulative {:.02f} '
'seconds'.format(total_analysis_duration))
logging.info('exiting snva {} main process'.format(snva_version_string))
breakLoop = True
except socket.gaierror:
# log something
logging.info('gaierror')
continue
except ConnectionRefusedError:
# log something else
logging.info('connection refused')
break
except ws.exceptions.ConnectionClosed:
logging.info('Connection lost. Attempting reconnect...')
continue
except Exception as e:
logging.error("Unknown Exception")
logging.error(e)
raise e
if breakLoop:
break
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='SHRP2 NDS Video Analytics built on TensorFlow')
parser.add_argument('--batchsize', '-bs', type=int, default=32,
help='Number of concurrent neural net inputs')
parser.add_argument('--binarizeprobs', '-b', action='store_true',
help='Round probs to zero or one. For distributions with '
' two 0.5 values, both will be rounded up to 1.0')
parser.add_argument('--classnamesfilepath', '-cnfp',
help='Path to the class ids/names text file.')
parser.add_argument('--controlnodehost', '-cnh', default='localhost:8080',
help='control node colon-separated host name or IP and '
'port')
parser.add_argument('--numprocesses', '-np', type=int, default=3,
help='Number of videos to process at one time')
parser.add_argument('--crop', '-c', action='store_true',
help='Crop video frames to [offsetheight, offsetwidth, '
'targetheight, targetwidth]')
parser.add_argument('--cropheight', '-ch', type=int, default=320,
help='y-component of bottom-right corner of crop.')
parser.add_argument('--cropwidth', '-cw', type=int, default=474,
help='x-component of bottom-right corner of crop.')
parser.add_argument('--cropx', '-cx', type=int, default=2,
help='x-component of top-left corner of crop.')
parser.add_argument('--cropy', '-cy', type=int, default=0,
help='y-component of top-left corner of crop.')
parser.add_argument('--deinterlace', '-d', action='store_true',
help='Apply de-interlacing to video frames during '
'extraction.')
parser.add_argument('--writebbox', '-bb', action='store_true',
help='Create JSON files with bounding box data for signal state')
# parser.add_argument('--excludepreviouslyprocessed', '-epp',
# action='store_true',
# help='Skip processing of videos for which reports '
# 'already exist in outputpath.')
parser.add_argument('--extracttimestamps', '-et', action='store_true',
help='Crop timestamps out of video frames and map them to'
' strings for inclusion in the output CSV.')
parser.add_argument('--gpumemoryfraction', '-gmf', type=float, default=0.9,
help='% of GPU memory available to this process.')
parser.add_argument('--inputpath', '-ip', required=True,
help='Path to a single video file, a folder containing '
'video files, or a text file that lists absolute '
'video file paths.')
parser.add_argument('--loglevel', '-ll', default='info',
help='Defaults to \'info\'. Pass \'debug\' or \'error\' '
'for verbose or minimal logging, respectively.')
parser.add_argument('--logmode', '-lm', default='verbose',
help='If verbose, log to file and console. If silent, '
'log to file only.')
parser.add_argument('--logpath', '-l', default='logs',
help='Path to the directory where log files are stored.')
parser.add_argument('--logmaxbytes', '-lmb', type=int, default=2**23,
help='File size in bytes at which the log rolls over.')
parser.add_argument('--maxanalyzerthreads', '-mat', type=int,
default=4,
help='Maximum number of threads to assign to each video '
'processor')
parser.add_argument('--modelsdirpath', '-mdp',
default='models/work_zone_scene_detection',
help='Path to the parent directory of model directories.')
parser.add_argument('--modelname', '-mn', default='mobilenet_v2',
help='The name of the model directory under modelsdirpath to use.')
parser.add_argument('--modelsignaturename', '-msn', default='serving_default',
help='Name of the signature that specifies what model is '
'being served, and that model\'s input and output '
'tensors')
parser.add_argument('--modelserverhost', '-msh', default='0.0.0.0:8500',
help='tensorflow serving colon-separated host name or IP '
'and port')
parser.add_argument('--numchannels', '-nc', type=int, default=3,
help='The fourth dimension of image batches.')
parser.add_argument('--numprocessesperdevice', '-nppd', type=int, default=1,
help='The number of instances of inference to perform on '
'each device.')
parser.add_argument('--protobuffilename', '-pbfn', default='model.pb',
help='Name of the model protobuf file.')
parser.add_argument('--outputpath', '-op', default='reports',
help='Path to the directory where reports are stored.')
parser.add_argument('--smoothprobs', '-sp', action='store_true',
help='Apply class-wise smoothing across video frame class'
' probability distributions.')
parser.add_argument('--smoothingfactor', '-sf', type=int, default=16,
help='The class-wise probability smoothing factor.')
parser.add_argument('--timestampheight', '-th', type=int, default=16,
help='The length of the y-dimension of the timestamp '
'overlay.')
parser.add_argument('--timestampmaxwidth', '-tw', type=int, default=160,
help='The length of the x-dimension of the timestamp '
'overlay.')
parser.add_argument('--timestampx', '-tx', type=int, default=25,
help='x-component of top-left corner of timestamp '
'(before cropping).')
parser.add_argument('--timestampy', '-ty', type=int, default=340,
help='y-component of top-left corner of timestamp '
'(before cropping).')
parser.add_argument('--writeeventreports', '-wer', type=bool, default=True,
help='Output a CVS file for each video containing one or '
'more feature events')
parser.add_argument('--writeinferencereports', '-wir', type=bool,
default=False,
help='For every video, output a CSV file containing a '
'probability distribution over class labels, a '
'timestamp, and a frame number for each frame')
parser.add_argument('--clocktype', '-ct', default='wall',
help='Specify whether profiling should use "gpu" or "wall" clock type')
parser.add_argument('--profformat', '-pfmt', default='pstat',
help='Specify whether profiling should save output in "pstat" or "callgrind" formats')
parser.add_argument('--processormode', '-pm', default='workzone',
help='Specify wheter processor should use "workzone", "weather", or "signalstate" pipelines')
args = parser.parse_args()
try:
snva_home = os.environ['SNVA_HOME']
except KeyError:
snva_home = '.'
snva_version_string = 'v0.1.2'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Define our log level based on arguments
if args.loglevel == 'error':
log_level = logging.ERROR
elif args.loglevel == 'debug':
log_level = logging.DEBUG
else:
log_level = logging.INFO
# if logpath is the default value, expand it using the SNVA_HOME prefix,
# otherwise, use the value explicitly passed by the user
if args.logpath == 'logs':
logs_dir_path = path.join(snva_home, args.logpath)
else:
logs_dir_path = args.logpath
# Configure our log in the main process to write to a file
if path.exists(logs_dir_path):
if path.isfile(logs_dir_path):
raise ValueError('The specified logpath {} is expected to be a '
'directory, not a file.'.format(logs_dir_path))
else:
os.makedirs(logs_dir_path)
try:
log_file_name = 'snva_' + socket.getfqdn() + '.log'
except:
log_file_name = 'snva.log'
log_file_path = path.join(logs_dir_path, log_file_name)
log_format = '%(asctime)s:%(processName)s:%(process)d:%(levelname)s:' \
'%(module)s:%(lineno)d:%(funcName)s:%(message)s'
logger_script_path = path.join(snva_home, 'utils/logger.py')
log_file_max_bytes = '{}'.format(args.logmaxbytes)
stdin = os.dup(0)
logger_subprocess = Popen(
['python', logger_script_path, log_file_path, log_format, args.loglevel,
args.logmode, log_file_max_bytes, '{}'.format(stdin)], stdout=PIPE)
# wait for logger.py to indicate readiness
_ = logger_subprocess.stdout.readline()
log_handlers = [SocketHandler(
host='localhost', port=logging.handlers.DEFAULT_TCP_LOGGING_PORT)]
valid_log_modes = ['verbose', 'silent']
if args.logmode == 'verbose':
log_handlers.append(logging.StreamHandler())
elif not args.logmode == 'silent':
raise ValueError(
'The specified logmode is not in the set {}.'.format(valid_log_modes))
logging.basicConfig(level=log_level, format=log_format, handlers=log_handlers)
log_queue = Queue()
logger_thread = Thread(target=main_logger_fn, args=(log_queue,))
logger_thread.start()
logging.debug('SNVA_HOME set to {}'.format(snva_home))
main_interrupt_queue = Queue()
try:
asyncio.get_event_loop().run_until_complete(main())
except Exception as e:
logging.error(e)
logging.debug('signaling logger thread to end service.')
log_queue.put(None)
logger_thread.join()
logging.shutdown()
logger_subprocess.terminate() | while True:
try:
message = log_queue.get()
if message is None:
break
logger = logging.getLogger(__name__)
logger.handle(message)
except Exception as e:
logging.error(e)
break | identifier_body |
snva.py | import argparse
import asyncio
import json
import logging
from logging.handlers import QueueHandler, SocketHandler
from multiprocessing import Process, Queue
import os
import platform
from queue import Empty
import signal
import socket
from subprocess import PIPE, Popen
from threading import Thread
from time import sleep, time
from utils.io import IO
from utils.processor import process_video, process_video_signalstate
import websockets as ws
path = os.path
logger = logging.getLogger('websockets')
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
def main_logger_fn(log_queue):
while True:
try:
message = log_queue.get()
if message is None:
break
logger = logging.getLogger(__name__)
logger.handle(message)
except Exception as e:
logging.error(e)
break
# Logger thread: listens for updates to log queue and writes them as they arrive
# Terminates after we add None to the queue
def child_logger_fn(main_log_queue, child_log_queue):
while True:
try:
message = child_log_queue.get()
if message is None:
break
main_log_queue.put(message)
except Exception as e:
logging.error(e)
break
def stringify_command(arg_list):
command_string = arg_list[0]
for elem in arg_list[1:]:
command_string += ' ' + elem
return 'command string: {}'.format(command_string)
#TODO: accomodate unbounded number of valid process counts
def get_valid_num_processes_per_device(device_type):
# valid_n_procs = {1, 2}
# if device_type == 'cpu':
# n_cpus = os.cpu_count()
# n_procs = 4
# while n_procs <= n_cpus:
# k = (n_cpus - n_procs) / n_procs
# if k == int(k):
# valid_n_procs.add(n_procs)
# n_procs += 2
# return valid_n_procs
return list(range(1, os.cpu_count() + 1))
async def main():
logging.info('entering snva {} main process'.format(snva_version_string))
# total_num_video_to_process = None
def interrupt_handler(signal_number, _):
logging.warning('Main process received interrupt signal '
'{}.'.format(signal_number))
main_interrupt_queue.put_nowait('_')
# if total_num_video_to_process is None \
# or total_num_video_to_process == len(video_file_paths):
# Signal the logging thread to finish up
logging.debug('signaling logger thread to end service.')
log_queue.put_nowait(None)
logger_thread.join()
logging.shutdown()
signal.signal(signal.SIGINT, interrupt_handler)
try:
ffmpeg_path = os.environ['FFMPEG_HOME']
except KeyError:
logging.warning('Environment variable FFMPEG_HOME not set. Attempting '
'to use default ffmpeg binary location.')
if platform.system() == 'Windows':
ffmpeg_path = 'ffmpeg.exe'
else:
ffmpeg_path = '/usr/local/bin/ffmpeg'
if not path.exists(ffmpeg_path):
ffmpeg_path = '/usr/bin/ffmpeg'
logging.debug('FFMPEG path set to: {}'.format(ffmpeg_path))
try:
ffprobe_path = os.environ['FFPROBE_HOME']
except KeyError:
logging.warning('Environment variable FFPROBE_HOME not set. '
'Attempting to use default ffprobe binary location.')
if platform.system() == 'Windows':
ffprobe_path = 'ffprobe.exe'
else:
ffprobe_path = '/usr/local/bin/ffprobe'
if not path.exists(ffprobe_path):
|
logging.debug('FFPROBE path set to: {}'.format(ffprobe_path))
# # TODO validate all video file paths in the provided text file if args.inputpath is a text file
# if path.isdir(args.inputpath):
# video_file_names = set(IO.read_video_file_names(args.inputpath))
# video_file_paths = [path.join(args.inputpath, video_file_name)
# for video_file_name in video_file_names]
# elif path.isfile(args.inputpath):
# if args.inputpath[-3:] == 'txt':
# if args.inputlistrootdirpath is None:
# raise ValueError('--inputlistrootdirpath must be specified when using a'
# ' text file as the input.')
# with open(args.inputpath, newline='') as input_file:
# video_file_paths = []
#
# for line in input_file.readlines():
# line = line.rstrip()
# video_file_path = line.lstrip(args.inputlistrootdirpath)
# video_file_path = path.join('/media/root', video_file_path)
#
# if path.isfile(video_file_path):
# video_file_paths.append(video_file_path)
# else:
# logging.warning('The video file at host path {} could not be found '
# 'at mapped path {} and will not be processed'.
# format(line, video_file_path))
# else:
# video_file_paths = [args.inputpath]
# else:
# raise ValueError('The video file/folder specified at the path {} could '
# 'not be found.'.format(args.inputpath))
models_root_dir_path = path.join(snva_home, args.modelsdirpath)
models_dir_path = path.join(models_root_dir_path, args.modelname)
logging.debug('models_dir_path set to {}'.format(models_dir_path))
# model_file_path = path.join(models_dir_path, args.protobuffilename)
#
# if not path.isfile(model_file_path):
# raise ValueError('The model specified at the path {} could not be '
# 'found.'.format(model_file_path))
#
# logging.debug('model_file_path set to {}'.format(model_file_path))
model_input_size_file_path = path.join(models_dir_path, 'input_size.txt')
if not path.isfile(model_input_size_file_path):
raise ValueError('The model input size file specified at the path {} '
'could not be found.'.format(model_input_size_file_path))
logging.debug('model_input_size_file_path set to {}'.format(
model_input_size_file_path))
with open(model_input_size_file_path) as file:
model_input_size_string = file.readline().rstrip()
valid_size_set = ['224', '299']
if model_input_size_string not in valid_size_set:
raise ValueError('The model input size is not in the set {}.'.format(
valid_size_set))
model_input_size = int(model_input_size_string)
# if logpath is the default value, expand it using the SNVA_HOME prefix,
# otherwise, use the value explicitly passed by the user
if args.outputpath == 'reports':
output_dir_path = path.join(snva_home, args.outputpath)
else:
output_dir_path = args.outputpath
logging.info("Output path set to: {}".format(output_dir_path))
if not path.isdir(output_dir_path):
os.makedirs(output_dir_path)
if args.classnamesfilepath is None \
or not path.isfile(args.classnamesfilepath):
class_names_path = path.join(models_root_dir_path, 'class_names.txt')
else:
class_names_path = args.classnamesfilepath
logging.debug('labels path set to: {}'.format(class_names_path))
num_processes = args.numprocesses
class_name_map = IO.read_class_names(class_names_path)
return_code_queue_map = {}
child_logger_thread_map = {}
child_process_map = {}
total_num_processed_videos = 0
total_num_processed_frames = 0
total_analysis_duration = 0
def start_video_processor(video_file_path):
# Before popping the next video off of the list and creating a process to
# scan it, check to see if fewer than logical_device_count + 1 processes are
# active. If not, Wait for a child process to release its semaphore
# acquisition. If so, acquire the semaphore, pop the next video name,
# create the next child process, and pass the semaphore to it
return_code_queue = Queue()
return_code_queue_map[video_file_path] = return_code_queue
logging.debug('creating new child process.')
child_log_queue = Queue()
child_logger_thread = Thread(target=child_logger_fn,
args=(log_queue, child_log_queue))
child_logger_thread.start()
child_logger_thread_map[video_file_path] = child_logger_thread
if 'signalstate' == args.processormode:
child_process = Process(
target=process_video_signalstate,
name=path.splitext(path.split(video_file_path)[1])[0],
args=(video_file_path, output_dir_path, class_name_map, args.modelname, args.modelsignaturename, args.modelserverhost,model_input_size,
return_code_queue, child_log_queue, log_level,
ffmpeg_path, ffprobe_path, args.crop, args.cropwidth, args.cropheight,
args.cropx, args.cropy, args.extracttimestamps,
args.timestampmaxwidth, args.timestampheight, args.timestampx,
args.timestampy, args.deinterlace, args.numchannels, args.batchsize,
args.smoothprobs, args.smoothingfactor, args.binarizeprobs,
args.writebbox, args.writeeventreports, args.maxanalyzerthreads, args.processormode))
else:
child_process = Process(
target=process_video,
name=path.splitext(path.split(video_file_path)[1])[0],
args=(video_file_path, output_dir_path, class_name_map, args.modelname, args.modelsignaturename, args.modelserverhost,model_input_size,
return_code_queue, child_log_queue, log_level,
ffmpeg_path, ffprobe_path, args.crop, args.cropwidth, args.cropheight,
args.cropx, args.cropy, args.extracttimestamps,
args.timestampmaxwidth, args.timestampheight, args.timestampx,
args.timestampy, args.deinterlace, args.numchannels, args.batchsize,
args.smoothprobs, args.smoothingfactor, args.binarizeprobs,
args.writeinferencereports, args.writeeventreports, args.maxanalyzerthreads, args.processormode))
logging.debug('starting child process.')
child_process.start()
child_process_map[video_file_path] = child_process
async def close_completed_video_processors(
total_num_processed_videos, total_num_processed_frames,
total_analysis_duration, websocket_conn):
for video_file_path in list(return_code_queue_map.keys()):
return_code_queue = return_code_queue_map[video_file_path]
try:
return_code_map = return_code_queue.get_nowait()
return_code = return_code_map['return_code']
return_value = return_code_map['return_value']
child_process = child_process_map[video_file_path]
logging.debug(
'child process {} returned with exit code {} and exit value '
'{}'.format(child_process.pid, return_code, return_value))
if return_code == 'success':
total_num_processed_videos += 1
total_num_processed_frames += return_value
total_analysis_duration += return_code_map['analysis_duration']
logging.info('notifying control node of completion')
complete_request = json.dumps({
'action': 'COMPLETE',
'video': os.path.basename(video_file_path),
'output': return_code_map['output_locations']})
await websocket_conn.send(complete_request)
child_logger_thread = child_logger_thread_map[video_file_path]
logging.debug('joining logger thread for child process {}'.format(
child_process.pid))
child_logger_thread.join(timeout=15)
if child_logger_thread.is_alive():
logging.warning(
'logger thread for child process {} remained alive following join '
'timeout'.format(child_process.pid))
logging.debug('joining child process {}'.format(child_process.pid))
child_process.join(timeout=15)
# if the child process has not yet terminated, kill the child process at
# the risk of losing any log message not yet buffered by the main logger
try:
os.kill(child_process.pid, signal.SIGKILL)
logging.warning(
'child process {} remained alive following join timeout and had to '
'be killed'.format(child_process.pid))
except:
pass
return_code_queue.close()
return_code_queue_map.pop(video_file_path)
child_logger_thread_map.pop(video_file_path)
child_process_map.pop(video_file_path)
except Empty:
pass
return total_num_processed_videos, total_num_processed_frames, \
total_analysis_duration
start = time()
sleep_duration = 1
breakLoop = False
connectionId = None
isIdle = False
while True:
try:
if breakLoop:
break
wsUrl = 'ws://' + args.controlnodehost + '/registerProcess'
if connectionId is not None:
wsUrl = wsUrl + '?id=' + connectionId
logging.debug("Connecting with URL {}".format(wsUrl))
async with ws.connect(wsUrl) as conn:
response = await conn.recv()
response = json.loads(response)
logging.info(response)
if response['action'] != 'CONNECTION_SUCCESS':
raise ConnectionError(
'control node connection failed with response: {}'.format(response))
if connectionId is None:
connectionId = response['id']
logging.debug("Assigned id {}".format(connectionId))
while True:
# block if num_processes child processes are active
while len(return_code_queue_map) >= num_processes:
total_num_processed_videos, total_num_processed_frames, \
total_analysis_duration = await close_completed_video_processors(
total_num_processed_videos, total_num_processed_frames,
total_analysis_duration, conn)
sleep(sleep_duration)
try: # todo poll for termination signal from control node
_ = main_interrupt_queue.get_nowait()
logging.debug(
'breaking out of child process generation following interrupt signal')
break
except:
pass
if not isIdle:
logging.info('requesting video')
request = json.dumps({'action': 'REQUEST_VIDEO'})
await conn.send(request)
logging.info('reading response')
response = await conn.recv()
else:
# If idle, we will try to close completed processors until all are done
while len(return_code_queue_map) > 0:
# Before checking for completed processes, check for a new message
logging.info('Checking for new message')
try:
# If we get a response quickly, break our waiting loop and process the command
response = await asyncio.wait_for(conn.recv(), 1)
break
except asyncio.TimeoutError:
# Otherwise, go back to finishing our current tasks
logging.debug('No new message from control node, continuing...')
pass
total_num_processed_videos, total_num_processed_frames, \
total_analysis_duration = await close_completed_video_processors(
total_num_processed_videos, total_num_processed_frames,
total_analysis_duration, conn)
# by now, the last device_id_queue_len videos are being processed,
# so we can afford to poll for their completion infrequently
if len(return_code_queue_map) > 0:
sleep(sleep_duration)
# Once all are complete, if still idle we have no work left to do - we just wait for a new message
response = await conn.recv()
response = json.loads(response)
if response['action'] == 'STATUS_REQUEST':
logging.info('control node requested status request')
pass
elif response['action'] == 'CEASE_REQUESTS':
logging.info('control node has no more videos to process')
isIdle = True
pass
elif response['action'] == 'RESUME_REQUESTS':
logging.info('control node has instructed to resume requests')
isIdle = False
pass
elif response['action'] == 'SHUTDOWN':
logging.info('control node requested shutdown')
breakLoop = True
break
elif response['action'] == 'PROCESS':
# TODO Prepend input path
video_file_path = os.path.join(args.inputpath, response['path'])
request_received = json.dumps({'action': 'REQUEST_RECEIVED', 'video': response['path']})
await conn.send(request_received)
try:
start_video_processor(video_file_path)
except Exception as e:
logging.error('an unknown error has occured while processing {}'.format(video_file_path))
logging.error(e)
else:
raise ConnectionError(
'control node replied with unexpected response: {}'.format(response))
logging.debug('{} child processes remain enqueued'.format(len(return_code_queue_map)))
while len(return_code_queue_map) > 0:
#logging.debug('waiting for the final {} child processes to '
# 'terminate'.format(len(return_code_queue_map)))
total_num_processed_videos, total_num_processed_frames, \
total_analysis_duration = await close_completed_video_processors(
total_num_processed_videos, total_num_processed_frames,
total_analysis_duration, conn)
# by now, the last device_id_queue_len videos are being processed,
# so we can afford to poll for their completion infrequently
if len(return_code_queue_map) > 0:
#logging.debug('sleeping for {} seconds'.format(sleep_duration))
sleep(sleep_duration)
end = time() - start
processing_duration = IO.get_processing_duration(
end, 'snva {} processed a total of {} videos and {} frames in:'.format(
snva_version_string, total_num_processed_videos,
total_num_processed_frames))
logging.info(processing_duration)
logging.info('Video analysis alone spanned a cumulative {:.02f} '
'seconds'.format(total_analysis_duration))
logging.info('exiting snva {} main process'.format(snva_version_string))
breakLoop = True
except socket.gaierror:
# log something
logging.info('gaierror')
continue
except ConnectionRefusedError:
# log something else
logging.info('connection refused')
break
except ws.exceptions.ConnectionClosed:
logging.info('Connection lost. Attempting reconnect...')
continue
except Exception as e:
logging.error("Unknown Exception")
logging.error(e)
raise e
if breakLoop:
break
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='SHRP2 NDS Video Analytics built on TensorFlow')
parser.add_argument('--batchsize', '-bs', type=int, default=32,
help='Number of concurrent neural net inputs')
parser.add_argument('--binarizeprobs', '-b', action='store_true',
help='Round probs to zero or one. For distributions with '
' two 0.5 values, both will be rounded up to 1.0')
parser.add_argument('--classnamesfilepath', '-cnfp',
help='Path to the class ids/names text file.')
parser.add_argument('--controlnodehost', '-cnh', default='localhost:8080',
help='control node colon-separated host name or IP and '
'port')
parser.add_argument('--numprocesses', '-np', type=int, default=3,
help='Number of videos to process at one time')
parser.add_argument('--crop', '-c', action='store_true',
help='Crop video frames to [offsetheight, offsetwidth, '
'targetheight, targetwidth]')
parser.add_argument('--cropheight', '-ch', type=int, default=320,
help='y-component of bottom-right corner of crop.')
parser.add_argument('--cropwidth', '-cw', type=int, default=474,
help='x-component of bottom-right corner of crop.')
parser.add_argument('--cropx', '-cx', type=int, default=2,
help='x-component of top-left corner of crop.')
parser.add_argument('--cropy', '-cy', type=int, default=0,
help='y-component of top-left corner of crop.')
parser.add_argument('--deinterlace', '-d', action='store_true',
help='Apply de-interlacing to video frames during '
'extraction.')
parser.add_argument('--writebbox', '-bb', action='store_true',
help='Create JSON files with bounding box data for signal state')
# parser.add_argument('--excludepreviouslyprocessed', '-epp',
# action='store_true',
# help='Skip processing of videos for which reports '
# 'already exist in outputpath.')
parser.add_argument('--extracttimestamps', '-et', action='store_true',
help='Crop timestamps out of video frames and map them to'
' strings for inclusion in the output CSV.')
parser.add_argument('--gpumemoryfraction', '-gmf', type=float, default=0.9,
help='% of GPU memory available to this process.')
parser.add_argument('--inputpath', '-ip', required=True,
help='Path to a single video file, a folder containing '
'video files, or a text file that lists absolute '
'video file paths.')
parser.add_argument('--loglevel', '-ll', default='info',
help='Defaults to \'info\'. Pass \'debug\' or \'error\' '
'for verbose or minimal logging, respectively.')
parser.add_argument('--logmode', '-lm', default='verbose',
help='If verbose, log to file and console. If silent, '
'log to file only.')
parser.add_argument('--logpath', '-l', default='logs',
help='Path to the directory where log files are stored.')
parser.add_argument('--logmaxbytes', '-lmb', type=int, default=2**23,
help='File size in bytes at which the log rolls over.')
parser.add_argument('--maxanalyzerthreads', '-mat', type=int,
default=4,
help='Maximum number of threads to assign to each video '
'processor')
parser.add_argument('--modelsdirpath', '-mdp',
default='models/work_zone_scene_detection',
help='Path to the parent directory of model directories.')
parser.add_argument('--modelname', '-mn', default='mobilenet_v2',
help='The name of the model directory under modelsdirpath to use.')
parser.add_argument('--modelsignaturename', '-msn', default='serving_default',
help='Name of the signature that specifies what model is '
'being served, and that model\'s input and output '
'tensors')
parser.add_argument('--modelserverhost', '-msh', default='0.0.0.0:8500',
help='tensorflow serving colon-separated host name or IP '
'and port')
parser.add_argument('--numchannels', '-nc', type=int, default=3,
help='The fourth dimension of image batches.')
parser.add_argument('--numprocessesperdevice', '-nppd', type=int, default=1,
help='The number of instances of inference to perform on '
'each device.')
parser.add_argument('--protobuffilename', '-pbfn', default='model.pb',
help='Name of the model protobuf file.')
parser.add_argument('--outputpath', '-op', default='reports',
help='Path to the directory where reports are stored.')
parser.add_argument('--smoothprobs', '-sp', action='store_true',
help='Apply class-wise smoothing across video frame class'
' probability distributions.')
parser.add_argument('--smoothingfactor', '-sf', type=int, default=16,
help='The class-wise probability smoothing factor.')
parser.add_argument('--timestampheight', '-th', type=int, default=16,
help='The length of the y-dimension of the timestamp '
'overlay.')
parser.add_argument('--timestampmaxwidth', '-tw', type=int, default=160,
help='The length of the x-dimension of the timestamp '
'overlay.')
parser.add_argument('--timestampx', '-tx', type=int, default=25,
help='x-component of top-left corner of timestamp '
'(before cropping).')
parser.add_argument('--timestampy', '-ty', type=int, default=340,
help='y-component of top-left corner of timestamp '
'(before cropping).')
parser.add_argument('--writeeventreports', '-wer', type=bool, default=True,
help='Output a CVS file for each video containing one or '
'more feature events')
parser.add_argument('--writeinferencereports', '-wir', type=bool,
default=False,
help='For every video, output a CSV file containing a '
'probability distribution over class labels, a '
'timestamp, and a frame number for each frame')
parser.add_argument('--clocktype', '-ct', default='wall',
help='Specify whether profiling should use "gpu" or "wall" clock type')
parser.add_argument('--profformat', '-pfmt', default='pstat',
help='Specify whether profiling should save output in "pstat" or "callgrind" formats')
parser.add_argument('--processormode', '-pm', default='workzone',
help='Specify wheter processor should use "workzone", "weather", or "signalstate" pipelines')
args = parser.parse_args()
try:
snva_home = os.environ['SNVA_HOME']
except KeyError:
snva_home = '.'
snva_version_string = 'v0.1.2'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Define our log level based on arguments
if args.loglevel == 'error':
log_level = logging.ERROR
elif args.loglevel == 'debug':
log_level = logging.DEBUG
else:
log_level = logging.INFO
# if logpath is the default value, expand it using the SNVA_HOME prefix,
# otherwise, use the value explicitly passed by the user
if args.logpath == 'logs':
logs_dir_path = path.join(snva_home, args.logpath)
else:
logs_dir_path = args.logpath
# Configure our log in the main process to write to a file
if path.exists(logs_dir_path):
if path.isfile(logs_dir_path):
raise ValueError('The specified logpath {} is expected to be a '
'directory, not a file.'.format(logs_dir_path))
else:
os.makedirs(logs_dir_path)
try:
log_file_name = 'snva_' + socket.getfqdn() + '.log'
except:
log_file_name = 'snva.log'
log_file_path = path.join(logs_dir_path, log_file_name)
log_format = '%(asctime)s:%(processName)s:%(process)d:%(levelname)s:' \
'%(module)s:%(lineno)d:%(funcName)s:%(message)s'
logger_script_path = path.join(snva_home, 'utils/logger.py')
log_file_max_bytes = '{}'.format(args.logmaxbytes)
stdin = os.dup(0)
logger_subprocess = Popen(
['python', logger_script_path, log_file_path, log_format, args.loglevel,
args.logmode, log_file_max_bytes, '{}'.format(stdin)], stdout=PIPE)
# wait for logger.py to indicate readiness
_ = logger_subprocess.stdout.readline()
log_handlers = [SocketHandler(
host='localhost', port=logging.handlers.DEFAULT_TCP_LOGGING_PORT)]
valid_log_modes = ['verbose', 'silent']
if args.logmode == 'verbose':
log_handlers.append(logging.StreamHandler())
elif not args.logmode == 'silent':
raise ValueError(
'The specified logmode is not in the set {}.'.format(valid_log_modes))
logging.basicConfig(level=log_level, format=log_format, handlers=log_handlers)
log_queue = Queue()
logger_thread = Thread(target=main_logger_fn, args=(log_queue,))
logger_thread.start()
logging.debug('SNVA_HOME set to {}'.format(snva_home))
main_interrupt_queue = Queue()
try:
asyncio.get_event_loop().run_until_complete(main())
except Exception as e:
logging.error(e)
logging.debug('signaling logger thread to end service.')
log_queue.put(None)
logger_thread.join()
logging.shutdown()
logger_subprocess.terminate() | ffprobe_path = '/usr/bin/ffprobe' | conditional_block |
snva.py | import argparse
import asyncio
import json
import logging
from logging.handlers import QueueHandler, SocketHandler
from multiprocessing import Process, Queue
import os
import platform
from queue import Empty
import signal
import socket
from subprocess import PIPE, Popen
from threading import Thread
from time import sleep, time
from utils.io import IO
from utils.processor import process_video, process_video_signalstate
import websockets as ws
path = os.path
logger = logging.getLogger('websockets')
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
def main_logger_fn(log_queue):
while True:
try:
message = log_queue.get()
if message is None:
break
logger = logging.getLogger(__name__)
logger.handle(message)
except Exception as e:
logging.error(e)
break
# Logger thread: listens for updates to log queue and writes them as they arrive
# Terminates after we add None to the queue
def child_logger_fn(main_log_queue, child_log_queue):
while True:
try:
message = child_log_queue.get()
if message is None:
break
main_log_queue.put(message)
except Exception as e:
logging.error(e)
break
def stringify_command(arg_list):
command_string = arg_list[0]
for elem in arg_list[1:]:
command_string += ' ' + elem
return 'command string: {}'.format(command_string)
#TODO: accomodate unbounded number of valid process counts
def get_valid_num_processes_per_device(device_type):
# valid_n_procs = {1, 2}
# if device_type == 'cpu':
# n_cpus = os.cpu_count()
# n_procs = 4
# while n_procs <= n_cpus:
# k = (n_cpus - n_procs) / n_procs
# if k == int(k):
# valid_n_procs.add(n_procs)
# n_procs += 2
# return valid_n_procs
return list(range(1, os.cpu_count() + 1))
async def main():
logging.info('entering snva {} main process'.format(snva_version_string))
# total_num_video_to_process = None
def interrupt_handler(signal_number, _):
logging.warning('Main process received interrupt signal '
'{}.'.format(signal_number))
main_interrupt_queue.put_nowait('_')
# if total_num_video_to_process is None \
# or total_num_video_to_process == len(video_file_paths):
# Signal the logging thread to finish up
logging.debug('signaling logger thread to end service.')
log_queue.put_nowait(None)
logger_thread.join()
logging.shutdown()
signal.signal(signal.SIGINT, interrupt_handler)
try:
ffmpeg_path = os.environ['FFMPEG_HOME']
except KeyError:
logging.warning('Environment variable FFMPEG_HOME not set. Attempting '
'to use default ffmpeg binary location.')
if platform.system() == 'Windows':
ffmpeg_path = 'ffmpeg.exe'
else:
ffmpeg_path = '/usr/local/bin/ffmpeg'
if not path.exists(ffmpeg_path):
ffmpeg_path = '/usr/bin/ffmpeg'
logging.debug('FFMPEG path set to: {}'.format(ffmpeg_path))
try:
ffprobe_path = os.environ['FFPROBE_HOME']
except KeyError:
logging.warning('Environment variable FFPROBE_HOME not set. '
'Attempting to use default ffprobe binary location.')
if platform.system() == 'Windows':
ffprobe_path = 'ffprobe.exe'
else:
ffprobe_path = '/usr/local/bin/ffprobe'
if not path.exists(ffprobe_path):
ffprobe_path = '/usr/bin/ffprobe'
logging.debug('FFPROBE path set to: {}'.format(ffprobe_path))
# # TODO validate all video file paths in the provided text file if args.inputpath is a text file
# if path.isdir(args.inputpath):
# video_file_names = set(IO.read_video_file_names(args.inputpath))
# video_file_paths = [path.join(args.inputpath, video_file_name)
# for video_file_name in video_file_names]
# elif path.isfile(args.inputpath):
# if args.inputpath[-3:] == 'txt':
# if args.inputlistrootdirpath is None:
# raise ValueError('--inputlistrootdirpath must be specified when using a'
# ' text file as the input.')
# with open(args.inputpath, newline='') as input_file:
# video_file_paths = []
#
# for line in input_file.readlines():
# line = line.rstrip()
# video_file_path = line.lstrip(args.inputlistrootdirpath)
# video_file_path = path.join('/media/root', video_file_path)
#
# if path.isfile(video_file_path):
# video_file_paths.append(video_file_path)
# else:
# logging.warning('The video file at host path {} could not be found '
# 'at mapped path {} and will not be processed'.
# format(line, video_file_path))
# else:
# video_file_paths = [args.inputpath]
# else:
# raise ValueError('The video file/folder specified at the path {} could '
# 'not be found.'.format(args.inputpath))
models_root_dir_path = path.join(snva_home, args.modelsdirpath)
models_dir_path = path.join(models_root_dir_path, args.modelname)
logging.debug('models_dir_path set to {}'.format(models_dir_path))
# model_file_path = path.join(models_dir_path, args.protobuffilename)
#
# if not path.isfile(model_file_path):
# raise ValueError('The model specified at the path {} could not be '
# 'found.'.format(model_file_path))
#
# logging.debug('model_file_path set to {}'.format(model_file_path))
model_input_size_file_path = path.join(models_dir_path, 'input_size.txt')
if not path.isfile(model_input_size_file_path):
raise ValueError('The model input size file specified at the path {} '
'could not be found.'.format(model_input_size_file_path))
logging.debug('model_input_size_file_path set to {}'.format(
model_input_size_file_path))
with open(model_input_size_file_path) as file:
model_input_size_string = file.readline().rstrip()
valid_size_set = ['224', '299']
if model_input_size_string not in valid_size_set:
raise ValueError('The model input size is not in the set {}.'.format(
valid_size_set))
model_input_size = int(model_input_size_string)
# if logpath is the default value, expand it using the SNVA_HOME prefix,
# otherwise, use the value explicitly passed by the user
if args.outputpath == 'reports':
output_dir_path = path.join(snva_home, args.outputpath)
else:
output_dir_path = args.outputpath
logging.info("Output path set to: {}".format(output_dir_path))
if not path.isdir(output_dir_path):
os.makedirs(output_dir_path)
if args.classnamesfilepath is None \
or not path.isfile(args.classnamesfilepath):
class_names_path = path.join(models_root_dir_path, 'class_names.txt')
else:
class_names_path = args.classnamesfilepath
logging.debug('labels path set to: {}'.format(class_names_path))
num_processes = args.numprocesses
class_name_map = IO.read_class_names(class_names_path)
return_code_queue_map = {}
child_logger_thread_map = {}
child_process_map = {}
total_num_processed_videos = 0
total_num_processed_frames = 0
total_analysis_duration = 0
def | (video_file_path):
# Before popping the next video off of the list and creating a process to
# scan it, check to see if fewer than logical_device_count + 1 processes are
# active. If not, Wait for a child process to release its semaphore
# acquisition. If so, acquire the semaphore, pop the next video name,
# create the next child process, and pass the semaphore to it
return_code_queue = Queue()
return_code_queue_map[video_file_path] = return_code_queue
logging.debug('creating new child process.')
child_log_queue = Queue()
child_logger_thread = Thread(target=child_logger_fn,
args=(log_queue, child_log_queue))
child_logger_thread.start()
child_logger_thread_map[video_file_path] = child_logger_thread
if 'signalstate' == args.processormode:
child_process = Process(
target=process_video_signalstate,
name=path.splitext(path.split(video_file_path)[1])[0],
args=(video_file_path, output_dir_path, class_name_map, args.modelname, args.modelsignaturename, args.modelserverhost,model_input_size,
return_code_queue, child_log_queue, log_level,
ffmpeg_path, ffprobe_path, args.crop, args.cropwidth, args.cropheight,
args.cropx, args.cropy, args.extracttimestamps,
args.timestampmaxwidth, args.timestampheight, args.timestampx,
args.timestampy, args.deinterlace, args.numchannels, args.batchsize,
args.smoothprobs, args.smoothingfactor, args.binarizeprobs,
args.writebbox, args.writeeventreports, args.maxanalyzerthreads, args.processormode))
else:
child_process = Process(
target=process_video,
name=path.splitext(path.split(video_file_path)[1])[0],
args=(video_file_path, output_dir_path, class_name_map, args.modelname, args.modelsignaturename, args.modelserverhost,model_input_size,
return_code_queue, child_log_queue, log_level,
ffmpeg_path, ffprobe_path, args.crop, args.cropwidth, args.cropheight,
args.cropx, args.cropy, args.extracttimestamps,
args.timestampmaxwidth, args.timestampheight, args.timestampx,
args.timestampy, args.deinterlace, args.numchannels, args.batchsize,
args.smoothprobs, args.smoothingfactor, args.binarizeprobs,
args.writeinferencereports, args.writeeventreports, args.maxanalyzerthreads, args.processormode))
logging.debug('starting child process.')
child_process.start()
child_process_map[video_file_path] = child_process
async def close_completed_video_processors(
total_num_processed_videos, total_num_processed_frames,
total_analysis_duration, websocket_conn):
for video_file_path in list(return_code_queue_map.keys()):
return_code_queue = return_code_queue_map[video_file_path]
try:
return_code_map = return_code_queue.get_nowait()
return_code = return_code_map['return_code']
return_value = return_code_map['return_value']
child_process = child_process_map[video_file_path]
logging.debug(
'child process {} returned with exit code {} and exit value '
'{}'.format(child_process.pid, return_code, return_value))
if return_code == 'success':
total_num_processed_videos += 1
total_num_processed_frames += return_value
total_analysis_duration += return_code_map['analysis_duration']
logging.info('notifying control node of completion')
complete_request = json.dumps({
'action': 'COMPLETE',
'video': os.path.basename(video_file_path),
'output': return_code_map['output_locations']})
await websocket_conn.send(complete_request)
child_logger_thread = child_logger_thread_map[video_file_path]
logging.debug('joining logger thread for child process {}'.format(
child_process.pid))
child_logger_thread.join(timeout=15)
if child_logger_thread.is_alive():
logging.warning(
'logger thread for child process {} remained alive following join '
'timeout'.format(child_process.pid))
logging.debug('joining child process {}'.format(child_process.pid))
child_process.join(timeout=15)
# if the child process has not yet terminated, kill the child process at
# the risk of losing any log message not yet buffered by the main logger
try:
os.kill(child_process.pid, signal.SIGKILL)
logging.warning(
'child process {} remained alive following join timeout and had to '
'be killed'.format(child_process.pid))
except:
pass
return_code_queue.close()
return_code_queue_map.pop(video_file_path)
child_logger_thread_map.pop(video_file_path)
child_process_map.pop(video_file_path)
except Empty:
pass
return total_num_processed_videos, total_num_processed_frames, \
total_analysis_duration
start = time()
sleep_duration = 1
breakLoop = False
connectionId = None
isIdle = False
while True:
try:
if breakLoop:
break
wsUrl = 'ws://' + args.controlnodehost + '/registerProcess'
if connectionId is not None:
wsUrl = wsUrl + '?id=' + connectionId
logging.debug("Connecting with URL {}".format(wsUrl))
async with ws.connect(wsUrl) as conn:
response = await conn.recv()
response = json.loads(response)
logging.info(response)
if response['action'] != 'CONNECTION_SUCCESS':
raise ConnectionError(
'control node connection failed with response: {}'.format(response))
if connectionId is None:
connectionId = response['id']
logging.debug("Assigned id {}".format(connectionId))
while True:
# block if num_processes child processes are active
while len(return_code_queue_map) >= num_processes:
total_num_processed_videos, total_num_processed_frames, \
total_analysis_duration = await close_completed_video_processors(
total_num_processed_videos, total_num_processed_frames,
total_analysis_duration, conn)
sleep(sleep_duration)
try: # todo poll for termination signal from control node
_ = main_interrupt_queue.get_nowait()
logging.debug(
'breaking out of child process generation following interrupt signal')
break
except:
pass
if not isIdle:
logging.info('requesting video')
request = json.dumps({'action': 'REQUEST_VIDEO'})
await conn.send(request)
logging.info('reading response')
response = await conn.recv()
else:
# If idle, we will try to close completed processors until all are done
while len(return_code_queue_map) > 0:
# Before checking for completed processes, check for a new message
logging.info('Checking for new message')
try:
# If we get a response quickly, break our waiting loop and process the command
response = await asyncio.wait_for(conn.recv(), 1)
break
except asyncio.TimeoutError:
# Otherwise, go back to finishing our current tasks
logging.debug('No new message from control node, continuing...')
pass
total_num_processed_videos, total_num_processed_frames, \
total_analysis_duration = await close_completed_video_processors(
total_num_processed_videos, total_num_processed_frames,
total_analysis_duration, conn)
# by now, the last device_id_queue_len videos are being processed,
# so we can afford to poll for their completion infrequently
if len(return_code_queue_map) > 0:
sleep(sleep_duration)
# Once all are complete, if still idle we have no work left to do - we just wait for a new message
response = await conn.recv()
response = json.loads(response)
if response['action'] == 'STATUS_REQUEST':
logging.info('control node requested status request')
pass
elif response['action'] == 'CEASE_REQUESTS':
logging.info('control node has no more videos to process')
isIdle = True
pass
elif response['action'] == 'RESUME_REQUESTS':
logging.info('control node has instructed to resume requests')
isIdle = False
pass
elif response['action'] == 'SHUTDOWN':
logging.info('control node requested shutdown')
breakLoop = True
break
elif response['action'] == 'PROCESS':
# TODO Prepend input path
video_file_path = os.path.join(args.inputpath, response['path'])
request_received = json.dumps({'action': 'REQUEST_RECEIVED', 'video': response['path']})
await conn.send(request_received)
try:
start_video_processor(video_file_path)
except Exception as e:
logging.error('an unknown error has occured while processing {}'.format(video_file_path))
logging.error(e)
else:
raise ConnectionError(
'control node replied with unexpected response: {}'.format(response))
logging.debug('{} child processes remain enqueued'.format(len(return_code_queue_map)))
while len(return_code_queue_map) > 0:
#logging.debug('waiting for the final {} child processes to '
# 'terminate'.format(len(return_code_queue_map)))
total_num_processed_videos, total_num_processed_frames, \
total_analysis_duration = await close_completed_video_processors(
total_num_processed_videos, total_num_processed_frames,
total_analysis_duration, conn)
# by now, the last device_id_queue_len videos are being processed,
# so we can afford to poll for their completion infrequently
if len(return_code_queue_map) > 0:
#logging.debug('sleeping for {} seconds'.format(sleep_duration))
sleep(sleep_duration)
end = time() - start
processing_duration = IO.get_processing_duration(
end, 'snva {} processed a total of {} videos and {} frames in:'.format(
snva_version_string, total_num_processed_videos,
total_num_processed_frames))
logging.info(processing_duration)
logging.info('Video analysis alone spanned a cumulative {:.02f} '
'seconds'.format(total_analysis_duration))
logging.info('exiting snva {} main process'.format(snva_version_string))
breakLoop = True
except socket.gaierror:
# log something
logging.info('gaierror')
continue
except ConnectionRefusedError:
# log something else
logging.info('connection refused')
break
except ws.exceptions.ConnectionClosed:
logging.info('Connection lost. Attempting reconnect...')
continue
except Exception as e:
logging.error("Unknown Exception")
logging.error(e)
raise e
if breakLoop:
break
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='SHRP2 NDS Video Analytics built on TensorFlow')
parser.add_argument('--batchsize', '-bs', type=int, default=32,
help='Number of concurrent neural net inputs')
parser.add_argument('--binarizeprobs', '-b', action='store_true',
help='Round probs to zero or one. For distributions with '
' two 0.5 values, both will be rounded up to 1.0')
parser.add_argument('--classnamesfilepath', '-cnfp',
help='Path to the class ids/names text file.')
parser.add_argument('--controlnodehost', '-cnh', default='localhost:8080',
help='control node colon-separated host name or IP and '
'port')
parser.add_argument('--numprocesses', '-np', type=int, default=3,
help='Number of videos to process at one time')
parser.add_argument('--crop', '-c', action='store_true',
help='Crop video frames to [offsetheight, offsetwidth, '
'targetheight, targetwidth]')
parser.add_argument('--cropheight', '-ch', type=int, default=320,
help='y-component of bottom-right corner of crop.')
parser.add_argument('--cropwidth', '-cw', type=int, default=474,
help='x-component of bottom-right corner of crop.')
parser.add_argument('--cropx', '-cx', type=int, default=2,
help='x-component of top-left corner of crop.')
parser.add_argument('--cropy', '-cy', type=int, default=0,
help='y-component of top-left corner of crop.')
parser.add_argument('--deinterlace', '-d', action='store_true',
help='Apply de-interlacing to video frames during '
'extraction.')
parser.add_argument('--writebbox', '-bb', action='store_true',
help='Create JSON files with bounding box data for signal state')
# parser.add_argument('--excludepreviouslyprocessed', '-epp',
# action='store_true',
# help='Skip processing of videos for which reports '
# 'already exist in outputpath.')
parser.add_argument('--extracttimestamps', '-et', action='store_true',
help='Crop timestamps out of video frames and map them to'
' strings for inclusion in the output CSV.')
parser.add_argument('--gpumemoryfraction', '-gmf', type=float, default=0.9,
help='% of GPU memory available to this process.')
parser.add_argument('--inputpath', '-ip', required=True,
help='Path to a single video file, a folder containing '
'video files, or a text file that lists absolute '
'video file paths.')
parser.add_argument('--loglevel', '-ll', default='info',
help='Defaults to \'info\'. Pass \'debug\' or \'error\' '
'for verbose or minimal logging, respectively.')
parser.add_argument('--logmode', '-lm', default='verbose',
help='If verbose, log to file and console. If silent, '
'log to file only.')
parser.add_argument('--logpath', '-l', default='logs',
help='Path to the directory where log files are stored.')
parser.add_argument('--logmaxbytes', '-lmb', type=int, default=2**23,
help='File size in bytes at which the log rolls over.')
parser.add_argument('--maxanalyzerthreads', '-mat', type=int,
default=4,
help='Maximum number of threads to assign to each video '
'processor')
parser.add_argument('--modelsdirpath', '-mdp',
default='models/work_zone_scene_detection',
help='Path to the parent directory of model directories.')
parser.add_argument('--modelname', '-mn', default='mobilenet_v2',
help='The name of the model directory under modelsdirpath to use.')
parser.add_argument('--modelsignaturename', '-msn', default='serving_default',
help='Name of the signature that specifies what model is '
'being served, and that model\'s input and output '
'tensors')
parser.add_argument('--modelserverhost', '-msh', default='0.0.0.0:8500',
help='tensorflow serving colon-separated host name or IP '
'and port')
parser.add_argument('--numchannels', '-nc', type=int, default=3,
help='The fourth dimension of image batches.')
parser.add_argument('--numprocessesperdevice', '-nppd', type=int, default=1,
help='The number of instances of inference to perform on '
'each device.')
parser.add_argument('--protobuffilename', '-pbfn', default='model.pb',
help='Name of the model protobuf file.')
parser.add_argument('--outputpath', '-op', default='reports',
help='Path to the directory where reports are stored.')
parser.add_argument('--smoothprobs', '-sp', action='store_true',
help='Apply class-wise smoothing across video frame class'
' probability distributions.')
parser.add_argument('--smoothingfactor', '-sf', type=int, default=16,
help='The class-wise probability smoothing factor.')
parser.add_argument('--timestampheight', '-th', type=int, default=16,
help='The length of the y-dimension of the timestamp '
'overlay.')
parser.add_argument('--timestampmaxwidth', '-tw', type=int, default=160,
help='The length of the x-dimension of the timestamp '
'overlay.')
parser.add_argument('--timestampx', '-tx', type=int, default=25,
help='x-component of top-left corner of timestamp '
'(before cropping).')
parser.add_argument('--timestampy', '-ty', type=int, default=340,
help='y-component of top-left corner of timestamp '
'(before cropping).')
parser.add_argument('--writeeventreports', '-wer', type=bool, default=True,
help='Output a CVS file for each video containing one or '
'more feature events')
parser.add_argument('--writeinferencereports', '-wir', type=bool,
default=False,
help='For every video, output a CSV file containing a '
'probability distribution over class labels, a '
'timestamp, and a frame number for each frame')
parser.add_argument('--clocktype', '-ct', default='wall',
help='Specify whether profiling should use "gpu" or "wall" clock type')
parser.add_argument('--profformat', '-pfmt', default='pstat',
help='Specify whether profiling should save output in "pstat" or "callgrind" formats')
parser.add_argument('--processormode', '-pm', default='workzone',
help='Specify wheter processor should use "workzone", "weather", or "signalstate" pipelines')
args = parser.parse_args()
try:
snva_home = os.environ['SNVA_HOME']
except KeyError:
snva_home = '.'
snva_version_string = 'v0.1.2'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Define our log level based on arguments
if args.loglevel == 'error':
log_level = logging.ERROR
elif args.loglevel == 'debug':
log_level = logging.DEBUG
else:
log_level = logging.INFO
# if logpath is the default value, expand it using the SNVA_HOME prefix,
# otherwise, use the value explicitly passed by the user
if args.logpath == 'logs':
logs_dir_path = path.join(snva_home, args.logpath)
else:
logs_dir_path = args.logpath
# Configure our log in the main process to write to a file
if path.exists(logs_dir_path):
if path.isfile(logs_dir_path):
raise ValueError('The specified logpath {} is expected to be a '
'directory, not a file.'.format(logs_dir_path))
else:
os.makedirs(logs_dir_path)
try:
log_file_name = 'snva_' + socket.getfqdn() + '.log'
except:
log_file_name = 'snva.log'
log_file_path = path.join(logs_dir_path, log_file_name)
log_format = '%(asctime)s:%(processName)s:%(process)d:%(levelname)s:' \
'%(module)s:%(lineno)d:%(funcName)s:%(message)s'
logger_script_path = path.join(snva_home, 'utils/logger.py')
log_file_max_bytes = '{}'.format(args.logmaxbytes)
stdin = os.dup(0)
logger_subprocess = Popen(
['python', logger_script_path, log_file_path, log_format, args.loglevel,
args.logmode, log_file_max_bytes, '{}'.format(stdin)], stdout=PIPE)
# wait for logger.py to indicate readiness
_ = logger_subprocess.stdout.readline()
log_handlers = [SocketHandler(
host='localhost', port=logging.handlers.DEFAULT_TCP_LOGGING_PORT)]
valid_log_modes = ['verbose', 'silent']
if args.logmode == 'verbose':
log_handlers.append(logging.StreamHandler())
elif not args.logmode == 'silent':
raise ValueError(
'The specified logmode is not in the set {}.'.format(valid_log_modes))
logging.basicConfig(level=log_level, format=log_format, handlers=log_handlers)
log_queue = Queue()
logger_thread = Thread(target=main_logger_fn, args=(log_queue,))
logger_thread.start()
logging.debug('SNVA_HOME set to {}'.format(snva_home))
main_interrupt_queue = Queue()
try:
asyncio.get_event_loop().run_until_complete(main())
except Exception as e:
logging.error(e)
logging.debug('signaling logger thread to end service.')
log_queue.put(None)
logger_thread.join()
logging.shutdown()
logger_subprocess.terminate() | start_video_processor | identifier_name |
selectionmenu.js | /*
SelectionMenu 1.1
http://github.com/molily/selectionmenu
by molily (molily@mailbox.org, http://molily.de/)
EN: SelectionMenu displays a context menu when the user selects some text on the page
DE: SelectionMenu blendet ein Kontextmenü beim Markieren von Text ein
EN: License: Public Domain
EN: You're allowed to copy, distribute and change the code without restrictions
DE: Lizenz: Public Domain
DE: Kopieren, Verteilen und Aendern ohne Einschraenkungen erlaubt
*/
// EN: Create a private scope using an anonymous function,
// EN: save the return value in a global variable.
// DE: Erzeuge einen privaten Scope durch eine anonyme Funktion,
// DE: speichere den Rückgabwert in einer globalen Variable
var SelectionMenu = (function (window, document) {
// EN: The menu element which is inserted when selecting text
// DE: Das Menü-Element, welche beim Markieren eingefügt wird
var span = null;
// EN: Shared private helper functions
// DE: Geteilte private Helferfunktionen
function addEvent (obj, type, fn) {
// EN: Feature dection DOM Events / Microsoft
// DE: Fähigkeitenweiche DOM Events / Microsoft
if (obj.addEventListener) {
obj.addEventListener(type, fn, false);
} else if (obj.attachEvent) {
obj.attachEvent('on' + type, function () {
return fn.call(obj, window.event);
});
}
}
// EN: Publish addEvent as a static method
// EN: (attach it to the constructor object)
// DE: Mache addEvent als statische Methode öffentlich
// DE: (hefte die Methode an den Konstruktor, der zurückgegeben wird)
SelectionMenu.addEvent = addEvent;
function getSelection () {
// EN: Feature dection HTML5 / Microsoft
// DE: Fähigkeitenweiche HTML5 / Microsoft
if (window.getSelection) {
return window.getSelection();
} else if (document.selection && document.selection.createRange) {
return document.selection.createRange();
} else {
// EN: No browser support available for the required features
// DE: Keine Browser-Unterstützung für die benötigten Features
return false;
}
}
function getSelectedText (selection) {
// EN: Feature detection HTML5 / Microsoft
// DE: Fähigkeitenweiche HTML5 / Microsoft
return selection.toString ? selection.toString() : selection.text;
}
function contains (a, b) {
// EN: Feature detection DOM Core / Microsoft
// DE: Fähigkeitenweiche DOM Core / Microsoft
return a.compareDocumentPosition ? !!(a.compareDocumentPosition(b) & 16) : a.contains(b);
}
function mouseOnMenu (e) {
// Greife auf das Zielelement des Ereignisses zu
// EN: Feature detection DOM Events / Microsoft
// DE: Fähigkeitenweiche DOM Events / Microsoft
var target = e.target || e.srcElement;
// Ist das Zielelement das Menü oder darin enthalten?
return target == span || contains(span, target);
}
// EN: Main constructor function
// DE: Konstruktorfunktion
function SelectionMenu (options) {
var instance = this;
// EN: Copy members from the options object to the instance
// DE: Kopiere Einstellungen aus dem options-Objekt herüber zur Instanz
instance.id = options.id || 'selection-menu';
instance.menuHTML = options.menuHTML;
instance.minimalSelection = options.minimalSelection || 5;
instance.container = options.container;
instance.handler = options.handler;
// EN: Initialisation
// DE: Initialisiere
instance.create();
instance.setupEvents();
}
SelectionMenu.prototype = {
create : function () {
var instance = this;
// EN: Create the menu container if necessary
// DE: Erzeuge den Menü-Container, sofern noch nicht passiert
if (span) {
return;
}
span = document.createElement('span');
span.id = instance.id;
},
setupEvents : function () {
var instance = this;
var container = instance.container;
// EN: Hide the menu on mouse down
// DE: Verstecke beim Mousedown
addEvent(container, 'mousedown', function (e) {
instance.hide(e);
});
// EN: Insert the menu on mouseup given some text is selected
// DE: Füge das Menü beim Mouseup ein, wenn Text ausgewählt wurde
addEvent(container, 'mouseup', function (e) {
instance.insert(e);
// EN: After a delay, check if the text was deselected
// DE: Prüfe nach einer Verzögerung, ob die Auswahl damit aufgehoben wurde
window.setTimeout(function () {
instance.hideIfNoSelection();
}, 0);
});
instance.setupMenuEvents();
},
setupMenuEvents : function () {
var instance = this;
// EN: Register the handler for clicks on the menu
// DE: Registiere Handlerfunktion für den Klick auf das Menü
addEvent(span, 'click', function (e) {
instance.handler.call(instance, e);
return false;
});
// EN: Prevent IE to select the text of the menu
// DE: Verhindere das Markieren des Menüs im IE
span.unselectable = true;
},
hide : function (e) {
// EN: Abort if an event object was passed and the click hit the menu itself
// Breche ab, wenn Event-Objekt übergeben wurde und der Klick beim Menü passierte
if (e && mouseOnMenu(e)) {
return;
}
// EN: Is the element attached to the DOM tree?
// DE: Ist das Element in den DOM-Baum gehängt?
var parent = span.parentNode;
if (parent) {
// EN: Remove the element from DOM (the element object remains
// EN: in memory and will be reused later)
// DE: Entferne das element aus dem DOM-Baum (Element bleibt im Speicher erhalten
// DE: und wird später wiederverwendet)
parent.removeChild(span);
}
},
hideIfNoSelection : function () {
var instance = this;
var selection = getSelection();
if (!selection) {
return;
}
var selectedText = getSelectedText(selection);
if (!selectedText.length) {
instance.hide();
}
},
insert : function (e) {
var instance = this;
// EN: Abort if the mouse event occured at the menu itself
// DE: Breche ab, wenn das Mausereignis beim Menü passierte
if (mouseOnMenu(e)) {
return;
}
// EN: Get a Selection object or a TextRange (IE)
// DE: Hole Selection bzw. TextRange (IE)
var selection = getSelection();
if (!selection) {
// EN: No browser support available for the required features
// DE: Keine Browser-Unterstützung für die benötigten Features
return;
}
// EN: Get the selected text
// DE: Hole markierten Text
var selectedText = getSelectedText(selection);
instance.selectedText = selectedText;
// EN: Abort if the selected text is too short
// DE: Breche ab, wenn der markierte Text zu kurz ist
if (selectedText.length < instance.minimalSelection) {
instance.hide(e);
return;
}
// EN : Feature detection DOM Range / Microsoft
// DE: Fähigkeitenweiche DOM Range / Microsoft
if (selection.getRangeAt) {
// EN: W3C DOM Range approach
// DE: Lösungsansatz mit W3C DOM Range
// EN: Get the first Range of the current Selection
// DE: Hole Range, die zur Selection gehört
var range = selection.getRangeAt(0);
// EN: Get the start and end nodes of the selection
// DE: Hole Start- und Endknoten der Auswahl
var startNode = range.startContainer;
var endNode = range.endContainer;
if (!(startNode && endNode && startNode.compareDocumentPosition)) {
// EN: Abort if we got bogus values or we can't compare their document position
// DE: Breche ab, wenn die Knoten nicht brauchbar sind
return;
}
// EN: If the start node succeeds the end node in the DOM tree, flip them
// DE: Wenn von hinten nach vorne markiert wurde, drehe Start und Ende um
if (startNode.compareDocumentPosition(endNode) & 2) {
startNode = endNode;
endNode = range.startContainer;
}
// EN: Get the end offset
// DE: Hole End-Offset
var endOffset = range.endOffset;
// EN: If the end node is an element, use its last text node as the end offset
// DE: Falls der Endknoten ein Element ist, nehme das Ende des letzten Textknoten
if (endNode.nodeType == 1) {
endNode = endNode.lastChild;
if (!endNode || endNode.nodeType != 3) {
return;
}
endOffset = endNode.data.length;
}
// EN: Create a new empty Range
// DE: Erzeuge neue, leere Range
var newRange = document.createRange();
// EN: Move the beginning of the new Range to the end of the selection
// DE: Verschiebe Anfang der neuen Range an das Ende der Auswahl
newRange.setStart(endNode, endOffset);
// EN: Fill the menu span
// DE: Befülle das Menü-span
span.innerHTML = instance.menuHTML;
// EN: Inject the span element into the new Range | // DE: Korrigiere Auswahl, verhindere das Markieren des Menüs
if (selection.removeRange) {
selection.removeRange(range);
} else {
selection.removeAllRanges();
}
selection.addRange(range);
} else if (selection.duplicate) {
// EN: Microsoft TextRange approach
// DE: Lösungsansatz mit Microsoft TextRanges
// EN: Create a copy the the TextRange
// DE: Kopiere TextRange
var newRange = selection.duplicate();
// EN: Move the start of the new range to the end of the selection
// DE: Verschiebe den Anfang der neuen Range an das Ende der Auswahl
newRange.setEndPoint('StartToEnd', selection);
// EN: Fill the menu span
// DE: Befülle das Menü-span
span.innerHTML = instance.menuHTML;
// EN: Insert the span into the new range
// DE: Fülle die neue Range mit dem span
newRange.pasteHTML(span.outerHTML);
// EN: Restore the selection so that the original text is selected
// EN: and not the menu
// DE: Korrigiere Auswahl und setze sie auf die ursprüngliche Auswahl zurück,
// DE: sodass das Menü nicht selektiert ist
selection.select();
// EN: Since we're using outerHTML to insert the span element,
// EN: we have to restore the span reference and the event handling
// DE: Da das Befüllen nicht über das DOM, sondern über serialisierten HTML-Code erfolgt,
// DE: stelle die Referenz und das Event-Handling wieder her
span = document.getElementById(id);
instance.setupMenuEvents();
} else {
// EN: No browser support available for the required features
// DE: Keine Browser-Unterstützung für die benötigten Features
return;
}
// EN: Menu positioning
// DE: Positioniere Menü
instance.position();
},
position : function () {
span.style.marginTop = -(span.offsetHeight + 5) + 'px';
}
};
// EN: Return the constructor function
// DE: Gib den Konstruktor zurück
return SelectionMenu;
})(window, document); | // DE: Füge das span-Element in die neue Range ein
newRange.insertNode(span);
// EN: Adjust the selection by removing and adding the range.
// EN: This prevents the selection of the menu text. | random_line_split |
selectionmenu.js | /*
SelectionMenu 1.1
http://github.com/molily/selectionmenu
by molily (molily@mailbox.org, http://molily.de/)
EN: SelectionMenu displays a context menu when the user selects some text on the page
DE: SelectionMenu blendet ein Kontextmenü beim Markieren von Text ein
EN: License: Public Domain
EN: You're allowed to copy, distribute and change the code without restrictions
DE: Lizenz: Public Domain
DE: Kopieren, Verteilen und Aendern ohne Einschraenkungen erlaubt
*/
// EN: Create a private scope using an anonymous function,
// EN: save the return value in a global variable.
// DE: Erzeuge einen privaten Scope durch eine anonyme Funktion,
// DE: speichere den Rückgabwert in einer globalen Variable
var SelectionMenu = (function (window, document) {
// EN: The menu element which is inserted when selecting text
// DE: Das Menü-Element, welche beim Markieren eingefügt wird
var span = null;
// EN: Shared private helper functions
// DE: Geteilte private Helferfunktionen
function addEvent (obj, type, fn) {
// EN: Feature dection DOM Events / Microsoft
// DE: Fähigkeitenweiche DOM Events / Microsoft
if (obj.addEventListener) {
obj.addEventListener(type, fn, false);
} else if (obj.attachEvent) {
obj.attachEvent('on' + type, function () {
return fn.call(obj, window.event);
});
}
}
// EN: Publish addEvent as a static method
// EN: (attach it to the constructor object)
// DE: Mache addEvent als statische Methode öffentlich
// DE: (hefte die Methode an den Konstruktor, der zurückgegeben wird)
SelectionMenu.addEvent = addEvent;
function getSelection () {
// EN: Feature dection HTML5 / Microsoft
// DE: Fähigkeitenweiche HTML5 / Microsoft
if (window.getSelection) {
return window.getSelection();
} else if (document.selection && document.selection.createRange) {
return document.selection.createRange();
} else {
// EN: No browser support available for the required features
// DE: Keine Browser-Unterstützung für die benötigten Features
return false;
}
}
function getSelectedText (selection) {
// EN: Feature detection HTML5 / Microsoft
// DE: Fähigkeitenweiche HTML5 / Microsoft
return selection.toString ? selection.toString() : selection.text;
}
function contains (a, b) {
// EN: Feature detection DOM Core / Microsoft
// DE: Fähigkeitenweiche DOM Core / Microsoft
return a.compareDocumentPosition ? !!(a.compareDocumentPosition(b) & 16) : a.contains(b);
}
function mouseOnMenu (e) {
// Greife auf das Zielelement des Ereignisses zu
// EN: Feature detection DOM Events / Microsoft
// DE: Fähigkeitenweiche DOM Events / Microsoft
var target = e.target || e.srcElement;
// Ist das Zielelement das Menü oder darin enthalten?
return target == span || contains(span, target);
}
// EN: Main constructor function
// DE: Konstruktorfunktion
function SelectionMenu (options) {
var instance = this;
// EN: Copy members from the options object to the instance
// DE: Kopiere Einstellungen aus dem options-Objekt herüber zur Instanz
instance.id = options.id || 'selection-menu';
instance.menuHTML = options.menuHTML;
instance.minimalSelection = options.minimalSelection || 5;
instance.container = options.container;
instance.handler = options.handler;
// EN: Initialisation
// DE: Initialisiere
instance.create();
instance.setupEvents();
}
SelectionMenu.prototype = {
create : function () {
var instance = this;
// EN: Create the menu container if necessary
// DE: Erzeuge den Menü-Container, sofern noch nicht passiert
if (span) {
return;
}
span = document.createElement('span');
span.id = instance.id;
},
setupEvents : function () {
var instance = this;
var container = instance.container;
// EN: Hide the menu on mouse down
// DE: Verstecke beim Mousedown
addEvent(container, 'mousedown', function (e) {
instance.hide(e);
});
// EN: Insert the menu on mouseup given some text is selected
// DE: Füge das Menü beim Mouseup ein, wenn Text ausgewählt wurde
addEvent(container, 'mouseup', function (e) {
instance.insert(e);
// EN: After a delay, check if the text was deselected
// DE: Prüfe nach einer Verzögerung, ob die Auswahl damit aufgehoben wurde
window.setTimeout(function () {
instance.hideIfNoSelection();
}, 0);
});
instance.setupMenuEvents();
},
setupMenuEvents : function () {
var instance = this;
// EN: Register the handler for clicks on the menu
// DE: Registiere Handlerfunktion für den Klick auf das Menü
addEvent(span, 'click', function (e) {
instance.handler.call(instance, e);
return false;
});
// EN: Prevent IE to select the text of the menu
// DE: Verhindere das Markieren des Menüs im IE
span.unselectable = true;
},
hide : function (e) {
// EN: Abort if an event object was passed and the click hit the menu itself
// Breche ab, wenn Event-Objekt übergeben wurde und der Klick beim Menü passierte
if (e && mouseOnMenu(e)) {
return;
}
// EN: Is the element attached to the DOM tree?
// DE: Ist das Element in den DOM-Baum gehängt?
var parent = span.parentNode;
if (parent) {
// EN: Remove the element from DOM (the element object remains
// EN: in memory and will be reused later)
// DE: Entferne das element aus dem DOM-Baum (Element bleibt im Speicher erhalten
// DE: und wird später wiederverwendet)
parent.removeChild(span);
}
},
hideIfNoSelection : function () {
var instance = this;
var selection = getSelection();
if (!selection) {
return;
}
var selectedText = getSelectedText(selection);
if (!selectedText.length) {
instance.hide();
}
},
insert : function (e) {
var instance = this;
// EN: Abort if the mouse event occured at the menu itself
// DE: Breche ab, wenn das Mausereignis beim Menü passierte
if (mouseOnMenu(e)) {
return;
}
// EN: Get a Selection object or a TextRange (IE)
// DE: Hole Selection bzw. TextRange (IE)
var selection = getSelection();
if (!selection) {
// EN: No browser support available for the required features
// DE: Keine Browser-Unterstützung für die benötigten Features
return;
}
// EN: Get the selected text
// DE: Hole markierten Text
var selectedText = getSelectedText(selection);
instance.selectedText = selectedText;
// EN: Abort if the selected text is too short
// DE: Breche ab, wenn der markierte Text zu kurz ist
if (selectedText.length < instance.minimalSelection) {
instance.hide(e);
return;
}
// EN : Feature detection DOM Range / Microsoft
// DE: Fähigkeitenweiche DOM Range / Microsoft
if (selection.getRangeAt) {
// EN: W3C DOM Range approach
// DE: Lösungsansatz mit W3C DOM Range
// EN: Get the first Range of the current Selection
// DE: Hole Range, die zur Selection gehört
var range = selection.getRangeAt(0);
// EN: Get the start and end nodes of the selection
// DE: Hole Start- und Endknoten der Auswahl
var startNode = range.startContainer;
var endNode = range.endContainer;
if (!(startNode && endNode && startNode.compareDocumentPosition)) {
// EN: Abort if we got bogus values or we can't compare their document position
// DE: Breche ab, wenn die Knoten nicht brauchbar sind
return;
}
// EN: If the start node succeeds the end node in the DOM tree, flip them
// DE: Wenn von hinten nach vorne markiert wurde, drehe Start und Ende um
if (startNode.compareDocumentPosition(endNode) & 2) {
startNode = endNode;
endNode = range.startContainer;
}
// EN: Get the end offset
// DE: Hole End-Offset
var endOffset = range.endOffset;
// EN: If the end node is an element, use its last text node as the end offset
// DE: Falls der Endknoten ein Element ist, nehme das Ende des letzten Textknoten
if (endNode.nodeType == 1) {
endNode = endNode.lastChild;
if (!endNode || endNode.nodeType != 3) {
return;
}
endOffset = endNode.data.length;
}
// EN: Create a new empty Range
// DE: Erzeuge neue, leere Range
var newRange = document.createRange();
// EN: Move the beginning of the new Range to the end of the selection
// DE: Verschiebe Anfang der neuen Range an das Ende der Auswahl
newRange.setStart(endNode, endOffset);
// EN: Fill the menu span
// DE: Befülle das Menü-span
span.innerHTML = instance.menuHTML;
// EN: Inject the span element into the new Range
// DE: Füge das span-Element in die neue Range ein
newRange.insertNode(span);
// EN: Adjust the selection by removing and adding the range.
// EN: This prevents the selection of the menu text.
// DE: Korrigiere Auswahl, verhindere das Markieren des Menüs
if (selection.removeRange) {
selection.removeRange(range);
| ;
}
selection.addRange(range);
} else if (selection.duplicate) {
// EN: Microsoft TextRange approach
// DE: Lösungsansatz mit Microsoft TextRanges
// EN: Create a copy the the TextRange
// DE: Kopiere TextRange
var newRange = selection.duplicate();
// EN: Move the start of the new range to the end of the selection
// DE: Verschiebe den Anfang der neuen Range an das Ende der Auswahl
newRange.setEndPoint('StartToEnd', selection);
// EN: Fill the menu span
// DE: Befülle das Menü-span
span.innerHTML = instance.menuHTML;
// EN: Insert the span into the new range
// DE: Fülle die neue Range mit dem span
newRange.pasteHTML(span.outerHTML);
// EN: Restore the selection so that the original text is selected
// EN: and not the menu
// DE: Korrigiere Auswahl und setze sie auf die ursprüngliche Auswahl zurück,
// DE: sodass das Menü nicht selektiert ist
selection.select();
// EN: Since we're using outerHTML to insert the span element,
// EN: we have to restore the span reference and the event handling
// DE: Da das Befüllen nicht über das DOM, sondern über serialisierten HTML-Code erfolgt,
// DE: stelle die Referenz und das Event-Handling wieder her
span = document.getElementById(id);
instance.setupMenuEvents();
} else {
// EN: No browser support available for the required features
// DE: Keine Browser-Unterstützung für die benötigten Features
return;
}
// EN: Menu positioning
// DE: Positioniere Menü
instance.position();
},
position : function () {
span.style.marginTop = -(span.offsetHeight + 5) + 'px';
}
};
// EN: Return the constructor function
// DE: Gib den Konstruktor zurück
return SelectionMenu;
})(window, document); | } else {
selection.removeAllRanges() | conditional_block |
selectionmenu.js | /*
SelectionMenu 1.1
http://github.com/molily/selectionmenu
by molily (molily@mailbox.org, http://molily.de/)
EN: SelectionMenu displays a context menu when the user selects some text on the page
DE: SelectionMenu blendet ein Kontextmenü beim Markieren von Text ein
EN: License: Public Domain
EN: You're allowed to copy, distribute and change the code without restrictions
DE: Lizenz: Public Domain
DE: Kopieren, Verteilen und Aendern ohne Einschraenkungen erlaubt
*/
// EN: Create a private scope using an anonymous function,
// EN: save the return value in a global variable.
// DE: Erzeuge einen privaten Scope durch eine anonyme Funktion,
// DE: speichere den Rückgabwert in einer globalen Variable
var SelectionMenu = (function (window, document) {
// EN: The menu element which is inserted when selecting text
// DE: Das Menü-Element, welche beim Markieren eingefügt wird
var span = null;
// EN: Shared private helper functions
// DE: Geteilte private Helferfunktionen
function addEvent (obj, type, fn) {
// EN: Feature dection DOM Events / Microsoft
// DE: Fähigkeitenweiche DOM Events / Microsoft
if (obj.addEventListener) {
obj.addEventListener(type, fn, false);
} else if (obj.attachEvent) {
obj.attachEvent('on' + type, function () {
return fn.call(obj, window.event);
});
}
}
// EN: Publish addEvent as a static method
// EN: (attach it to the constructor object)
// DE: Mache addEvent als statische Methode öffentlich
// DE: (hefte die Methode an den Konstruktor, der zurückgegeben wird)
SelectionMenu.addEvent = addEvent;
function getSelection () {
// EN: Feature dection HTML5 / Microsoft
// DE: Fähigkeitenweiche HTML5 / Microsoft
if (window.getSelection) {
return window.getSelection();
} else if (document.selection && document.selection.createRange) {
return document.selection.createRange();
} else {
// EN: No browser support available for the required features
// DE: Keine Browser-Unterstützung für die benötigten Features
return false;
}
}
function getSelectedText (selection) {
// EN: Feature detection HTML5 / Microsoft
// DE: Fähigkeitenweiche HTML5 / Microsoft
return selection.toString ? selection.toString() : selection.text;
}
function contains (a, b) {
// EN: Feature detection DOM Core / Microsoft
// DE: Fähigkeitenweiche DOM Core / Microsoft
return a.compareDocumentPosition ? !!(a.compareDocumentPosition(b) & 16) : a.contains(b);
}
function mouseOnMenu (e) {
// Greife auf das Zielelement des Ereignisses zu
// EN: Feature detection DOM Events / Microsoft
// DE: Fähigkeitenweiche DOM Events / Microsoft
var target = e.target || e.srcElement;
// Ist das Zielelement das Menü oder darin enthalten?
return target == span || contains(span, target);
}
// EN: Main constructor function
// DE: Konstruktorfunktion
function SelectionMenu (options) {
var instanc | .prototype = {
create : function () {
var instance = this;
// EN: Create the menu container if necessary
// DE: Erzeuge den Menü-Container, sofern noch nicht passiert
if (span) {
return;
}
span = document.createElement('span');
span.id = instance.id;
},
setupEvents : function () {
var instance = this;
var container = instance.container;
// EN: Hide the menu on mouse down
// DE: Verstecke beim Mousedown
addEvent(container, 'mousedown', function (e) {
instance.hide(e);
});
// EN: Insert the menu on mouseup given some text is selected
// DE: Füge das Menü beim Mouseup ein, wenn Text ausgewählt wurde
addEvent(container, 'mouseup', function (e) {
instance.insert(e);
// EN: After a delay, check if the text was deselected
// DE: Prüfe nach einer Verzögerung, ob die Auswahl damit aufgehoben wurde
window.setTimeout(function () {
instance.hideIfNoSelection();
}, 0);
});
instance.setupMenuEvents();
},
setupMenuEvents : function () {
var instance = this;
// EN: Register the handler for clicks on the menu
// DE: Registiere Handlerfunktion für den Klick auf das Menü
addEvent(span, 'click', function (e) {
instance.handler.call(instance, e);
return false;
});
// EN: Prevent IE to select the text of the menu
// DE: Verhindere das Markieren des Menüs im IE
span.unselectable = true;
},
hide : function (e) {
// EN: Abort if an event object was passed and the click hit the menu itself
// Breche ab, wenn Event-Objekt übergeben wurde und der Klick beim Menü passierte
if (e && mouseOnMenu(e)) {
return;
}
// EN: Is the element attached to the DOM tree?
// DE: Ist das Element in den DOM-Baum gehängt?
var parent = span.parentNode;
if (parent) {
// EN: Remove the element from DOM (the element object remains
// EN: in memory and will be reused later)
// DE: Entferne das element aus dem DOM-Baum (Element bleibt im Speicher erhalten
// DE: und wird später wiederverwendet)
parent.removeChild(span);
}
},
hideIfNoSelection : function () {
var instance = this;
var selection = getSelection();
if (!selection) {
return;
}
var selectedText = getSelectedText(selection);
if (!selectedText.length) {
instance.hide();
}
},
insert : function (e) {
var instance = this;
// EN: Abort if the mouse event occured at the menu itself
// DE: Breche ab, wenn das Mausereignis beim Menü passierte
if (mouseOnMenu(e)) {
return;
}
// EN: Get a Selection object or a TextRange (IE)
// DE: Hole Selection bzw. TextRange (IE)
var selection = getSelection();
if (!selection) {
// EN: No browser support available for the required features
// DE: Keine Browser-Unterstützung für die benötigten Features
return;
}
// EN: Get the selected text
// DE: Hole markierten Text
var selectedText = getSelectedText(selection);
instance.selectedText = selectedText;
// EN: Abort if the selected text is too short
// DE: Breche ab, wenn der markierte Text zu kurz ist
if (selectedText.length < instance.minimalSelection) {
instance.hide(e);
return;
}
// EN : Feature detection DOM Range / Microsoft
// DE: Fähigkeitenweiche DOM Range / Microsoft
if (selection.getRangeAt) {
// EN: W3C DOM Range approach
// DE: Lösungsansatz mit W3C DOM Range
// EN: Get the first Range of the current Selection
// DE: Hole Range, die zur Selection gehört
var range = selection.getRangeAt(0);
// EN: Get the start and end nodes of the selection
// DE: Hole Start- und Endknoten der Auswahl
var startNode = range.startContainer;
var endNode = range.endContainer;
if (!(startNode && endNode && startNode.compareDocumentPosition)) {
// EN: Abort if we got bogus values or we can't compare their document position
// DE: Breche ab, wenn die Knoten nicht brauchbar sind
return;
}
// EN: If the start node succeeds the end node in the DOM tree, flip them
// DE: Wenn von hinten nach vorne markiert wurde, drehe Start und Ende um
if (startNode.compareDocumentPosition(endNode) & 2) {
startNode = endNode;
endNode = range.startContainer;
}
// EN: Get the end offset
// DE: Hole End-Offset
var endOffset = range.endOffset;
// EN: If the end node is an element, use its last text node as the end offset
// DE: Falls der Endknoten ein Element ist, nehme das Ende des letzten Textknoten
if (endNode.nodeType == 1) {
endNode = endNode.lastChild;
if (!endNode || endNode.nodeType != 3) {
return;
}
endOffset = endNode.data.length;
}
// EN: Create a new empty Range
// DE: Erzeuge neue, leere Range
var newRange = document.createRange();
// EN: Move the beginning of the new Range to the end of the selection
// DE: Verschiebe Anfang der neuen Range an das Ende der Auswahl
newRange.setStart(endNode, endOffset);
// EN: Fill the menu span
// DE: Befülle das Menü-span
span.innerHTML = instance.menuHTML;
// EN: Inject the span element into the new Range
// DE: Füge das span-Element in die neue Range ein
newRange.insertNode(span);
// EN: Adjust the selection by removing and adding the range.
// EN: This prevents the selection of the menu text.
// DE: Korrigiere Auswahl, verhindere das Markieren des Menüs
if (selection.removeRange) {
selection.removeRange(range);
} else {
selection.removeAllRanges();
}
selection.addRange(range);
} else if (selection.duplicate) {
// EN: Microsoft TextRange approach
// DE: Lösungsansatz mit Microsoft TextRanges
// EN: Create a copy the the TextRange
// DE: Kopiere TextRange
var newRange = selection.duplicate();
// EN: Move the start of the new range to the end of the selection
// DE: Verschiebe den Anfang der neuen Range an das Ende der Auswahl
newRange.setEndPoint('StartToEnd', selection);
// EN: Fill the menu span
// DE: Befülle das Menü-span
span.innerHTML = instance.menuHTML;
// EN: Insert the span into the new range
// DE: Fülle die neue Range mit dem span
newRange.pasteHTML(span.outerHTML);
// EN: Restore the selection so that the original text is selected
// EN: and not the menu
// DE: Korrigiere Auswahl und setze sie auf die ursprüngliche Auswahl zurück,
// DE: sodass das Menü nicht selektiert ist
selection.select();
// EN: Since we're using outerHTML to insert the span element,
// EN: we have to restore the span reference and the event handling
// DE: Da das Befüllen nicht über das DOM, sondern über serialisierten HTML-Code erfolgt,
// DE: stelle die Referenz und das Event-Handling wieder her
span = document.getElementById(id);
instance.setupMenuEvents();
} else {
// EN: No browser support available for the required features
// DE: Keine Browser-Unterstützung für die benötigten Features
return;
}
// EN: Menu positioning
// DE: Positioniere Menü
instance.position();
},
position : function () {
span.style.marginTop = -(span.offsetHeight + 5) + 'px';
}
};
// EN: Return the constructor function
// DE: Gib den Konstruktor zurück
return SelectionMenu;
})(window, document); | e = this;
// EN: Copy members from the options object to the instance
// DE: Kopiere Einstellungen aus dem options-Objekt herüber zur Instanz
instance.id = options.id || 'selection-menu';
instance.menuHTML = options.menuHTML;
instance.minimalSelection = options.minimalSelection || 5;
instance.container = options.container;
instance.handler = options.handler;
// EN: Initialisation
// DE: Initialisiere
instance.create();
instance.setupEvents();
}
SelectionMenu | identifier_body |
selectionmenu.js | /*
SelectionMenu 1.1
http://github.com/molily/selectionmenu
by molily (molily@mailbox.org, http://molily.de/)
EN: SelectionMenu displays a context menu when the user selects some text on the page
DE: SelectionMenu blendet ein Kontextmenü beim Markieren von Text ein
EN: License: Public Domain
EN: You're allowed to copy, distribute and change the code without restrictions
DE: Lizenz: Public Domain
DE: Kopieren, Verteilen und Aendern ohne Einschraenkungen erlaubt
*/
// EN: Create a private scope using an anonymous function,
// EN: save the return value in a global variable.
// DE: Erzeuge einen privaten Scope durch eine anonyme Funktion,
// DE: speichere den Rückgabwert in einer globalen Variable
var SelectionMenu = (function (window, document) {
// EN: The menu element which is inserted when selecting text
// DE: Das Menü-Element, welche beim Markieren eingefügt wird
var span = null;
// EN: Shared private helper functions
// DE: Geteilte private Helferfunktionen
function addEvent (obj, type, fn) {
// EN: Feature dection DOM Events / Microsoft
// DE: Fähigkeitenweiche DOM Events / Microsoft
if (obj.addEventListener) {
obj.addEventListener(type, fn, false);
} else if (obj.attachEvent) {
obj.attachEvent('on' + type, function () {
return fn.call(obj, window.event);
});
}
}
// EN: Publish addEvent as a static method
// EN: (attach it to the constructor object)
// DE: Mache addEvent als statische Methode öffentlich
// DE: (hefte die Methode an den Konstruktor, der zurückgegeben wird)
SelectionMenu.addEvent = addEvent;
function getSelection () {
// EN: Feature dection HTML5 / Microsoft
// DE: Fähigkeitenweiche HTML5 / Microsoft
if (window.getSelection) {
return window.getSelection();
} else if (document.selection && document.selection.createRange) {
return document.selection.createRange();
} else {
// EN: No browser support available for the required features
// DE: Keine Browser-Unterstützung für die benötigten Features
return false;
}
}
function getSelected | ) {
// EN: Feature detection HTML5 / Microsoft
// DE: Fähigkeitenweiche HTML5 / Microsoft
return selection.toString ? selection.toString() : selection.text;
}
function contains (a, b) {
// EN: Feature detection DOM Core / Microsoft
// DE: Fähigkeitenweiche DOM Core / Microsoft
return a.compareDocumentPosition ? !!(a.compareDocumentPosition(b) & 16) : a.contains(b);
}
function mouseOnMenu (e) {
// Greife auf das Zielelement des Ereignisses zu
// EN: Feature detection DOM Events / Microsoft
// DE: Fähigkeitenweiche DOM Events / Microsoft
var target = e.target || e.srcElement;
// Ist das Zielelement das Menü oder darin enthalten?
return target == span || contains(span, target);
}
// EN: Main constructor function
// DE: Konstruktorfunktion
function SelectionMenu (options) {
var instance = this;
// EN: Copy members from the options object to the instance
// DE: Kopiere Einstellungen aus dem options-Objekt herüber zur Instanz
instance.id = options.id || 'selection-menu';
instance.menuHTML = options.menuHTML;
instance.minimalSelection = options.minimalSelection || 5;
instance.container = options.container;
instance.handler = options.handler;
// EN: Initialisation
// DE: Initialisiere
instance.create();
instance.setupEvents();
}
SelectionMenu.prototype = {
create : function () {
var instance = this;
// EN: Create the menu container if necessary
// DE: Erzeuge den Menü-Container, sofern noch nicht passiert
if (span) {
return;
}
span = document.createElement('span');
span.id = instance.id;
},
setupEvents : function () {
var instance = this;
var container = instance.container;
// EN: Hide the menu on mouse down
// DE: Verstecke beim Mousedown
addEvent(container, 'mousedown', function (e) {
instance.hide(e);
});
// EN: Insert the menu on mouseup given some text is selected
// DE: Füge das Menü beim Mouseup ein, wenn Text ausgewählt wurde
addEvent(container, 'mouseup', function (e) {
instance.insert(e);
// EN: After a delay, check if the text was deselected
// DE: Prüfe nach einer Verzögerung, ob die Auswahl damit aufgehoben wurde
window.setTimeout(function () {
instance.hideIfNoSelection();
}, 0);
});
instance.setupMenuEvents();
},
setupMenuEvents : function () {
var instance = this;
// EN: Register the handler for clicks on the menu
// DE: Registiere Handlerfunktion für den Klick auf das Menü
addEvent(span, 'click', function (e) {
instance.handler.call(instance, e);
return false;
});
// EN: Prevent IE to select the text of the menu
// DE: Verhindere das Markieren des Menüs im IE
span.unselectable = true;
},
hide : function (e) {
// EN: Abort if an event object was passed and the click hit the menu itself
// Breche ab, wenn Event-Objekt übergeben wurde und der Klick beim Menü passierte
if (e && mouseOnMenu(e)) {
return;
}
// EN: Is the element attached to the DOM tree?
// DE: Ist das Element in den DOM-Baum gehängt?
var parent = span.parentNode;
if (parent) {
// EN: Remove the element from DOM (the element object remains
// EN: in memory and will be reused later)
// DE: Entferne das element aus dem DOM-Baum (Element bleibt im Speicher erhalten
// DE: und wird später wiederverwendet)
parent.removeChild(span);
}
},
hideIfNoSelection : function () {
var instance = this;
var selection = getSelection();
if (!selection) {
return;
}
var selectedText = getSelectedText(selection);
if (!selectedText.length) {
instance.hide();
}
},
insert : function (e) {
var instance = this;
// EN: Abort if the mouse event occured at the menu itself
// DE: Breche ab, wenn das Mausereignis beim Menü passierte
if (mouseOnMenu(e)) {
return;
}
// EN: Get a Selection object or a TextRange (IE)
// DE: Hole Selection bzw. TextRange (IE)
var selection = getSelection();
if (!selection) {
// EN: No browser support available for the required features
// DE: Keine Browser-Unterstützung für die benötigten Features
return;
}
// EN: Get the selected text
// DE: Hole markierten Text
var selectedText = getSelectedText(selection);
instance.selectedText = selectedText;
// EN: Abort if the selected text is too short
// DE: Breche ab, wenn der markierte Text zu kurz ist
if (selectedText.length < instance.minimalSelection) {
instance.hide(e);
return;
}
// EN : Feature detection DOM Range / Microsoft
// DE: Fähigkeitenweiche DOM Range / Microsoft
if (selection.getRangeAt) {
// EN: W3C DOM Range approach
// DE: Lösungsansatz mit W3C DOM Range
// EN: Get the first Range of the current Selection
// DE: Hole Range, die zur Selection gehört
var range = selection.getRangeAt(0);
// EN: Get the start and end nodes of the selection
// DE: Hole Start- und Endknoten der Auswahl
var startNode = range.startContainer;
var endNode = range.endContainer;
if (!(startNode && endNode && startNode.compareDocumentPosition)) {
// EN: Abort if we got bogus values or we can't compare their document position
// DE: Breche ab, wenn die Knoten nicht brauchbar sind
return;
}
// EN: If the start node succeeds the end node in the DOM tree, flip them
// DE: Wenn von hinten nach vorne markiert wurde, drehe Start und Ende um
if (startNode.compareDocumentPosition(endNode) & 2) {
startNode = endNode;
endNode = range.startContainer;
}
// EN: Get the end offset
// DE: Hole End-Offset
var endOffset = range.endOffset;
// EN: If the end node is an element, use its last text node as the end offset
// DE: Falls der Endknoten ein Element ist, nehme das Ende des letzten Textknoten
if (endNode.nodeType == 1) {
endNode = endNode.lastChild;
if (!endNode || endNode.nodeType != 3) {
return;
}
endOffset = endNode.data.length;
}
// EN: Create a new empty Range
// DE: Erzeuge neue, leere Range
var newRange = document.createRange();
// EN: Move the beginning of the new Range to the end of the selection
// DE: Verschiebe Anfang der neuen Range an das Ende der Auswahl
newRange.setStart(endNode, endOffset);
// EN: Fill the menu span
// DE: Befülle das Menü-span
span.innerHTML = instance.menuHTML;
// EN: Inject the span element into the new Range
// DE: Füge das span-Element in die neue Range ein
newRange.insertNode(span);
// EN: Adjust the selection by removing and adding the range.
// EN: This prevents the selection of the menu text.
// DE: Korrigiere Auswahl, verhindere das Markieren des Menüs
if (selection.removeRange) {
selection.removeRange(range);
} else {
selection.removeAllRanges();
}
selection.addRange(range);
} else if (selection.duplicate) {
// EN: Microsoft TextRange approach
// DE: Lösungsansatz mit Microsoft TextRanges
// EN: Create a copy the the TextRange
// DE: Kopiere TextRange
var newRange = selection.duplicate();
// EN: Move the start of the new range to the end of the selection
// DE: Verschiebe den Anfang der neuen Range an das Ende der Auswahl
newRange.setEndPoint('StartToEnd', selection);
// EN: Fill the menu span
// DE: Befülle das Menü-span
span.innerHTML = instance.menuHTML;
// EN: Insert the span into the new range
// DE: Fülle die neue Range mit dem span
newRange.pasteHTML(span.outerHTML);
// EN: Restore the selection so that the original text is selected
// EN: and not the menu
// DE: Korrigiere Auswahl und setze sie auf die ursprüngliche Auswahl zurück,
// DE: sodass das Menü nicht selektiert ist
selection.select();
// EN: Since we're using outerHTML to insert the span element,
// EN: we have to restore the span reference and the event handling
// DE: Da das Befüllen nicht über das DOM, sondern über serialisierten HTML-Code erfolgt,
// DE: stelle die Referenz und das Event-Handling wieder her
span = document.getElementById(id);
instance.setupMenuEvents();
} else {
// EN: No browser support available for the required features
// DE: Keine Browser-Unterstützung für die benötigten Features
return;
}
// EN: Menu positioning
// DE: Positioniere Menü
instance.position();
},
position : function () {
span.style.marginTop = -(span.offsetHeight + 5) + 'px';
}
};
// EN: Return the constructor function
// DE: Gib den Konstruktor zurück
return SelectionMenu;
})(window, document); | Text (selection | identifier_name |
lib.rs | //! The `io_uring` library for Rust.
//!
//! The crate only provides a summary of the parameters.
//! For more detailed documentation, see manpage.
#![cfg_attr(sgx, no_std)]
#[cfg(sgx)]
extern crate sgx_types;
#[cfg(sgx)]
#[macro_use]
extern crate sgx_tstd as std;
#[cfg(sgx)]
extern crate sgx_trts;
#[cfg(sgx)]
use std::prelude::v1::*;
#[cfg(sgx)]
pub use sgx_trts::libc;
#[macro_use]
mod util;
pub mod cqueue;
pub mod opcode;
mod register;
pub mod squeue;
mod submit;
mod sys;
#[cfg(any(feature = "concurrent", sgx))]
pub mod concurrent;
use std::convert::TryInto;
use std::mem::ManuallyDrop;
use std::os::unix::io::{AsRawFd, RawFd};
use std::{cmp, io, mem};
pub use cqueue::CompletionQueue;
pub use register::Probe;
pub use squeue::SubmissionQueue;
pub use submit::Submitter;
use util::{Fd, Mmap};
/// IoUring instance
pub struct IoUring {
fd: Fd,
params: Parameters,
memory: ManuallyDrop<MemoryMap>,
sq: SubmissionQueue,
cq: CompletionQueue,
}
#[allow(dead_code)]
struct MemoryMap {
sq_mmap: Mmap,
sqe_mmap: Mmap,
cq_mmap: Option<Mmap>,
}
/// IoUring build params
#[derive(Clone, Default)]
pub struct Builder {
dontfork: bool,
params: sys::io_uring_params,
}
#[derive(Clone)]
pub struct Parameters(sys::io_uring_params);
unsafe impl Send for IoUring {}
unsafe impl Sync for IoUring {}
impl IoUring {
/// Create a IoUring instance
///
/// The `entries` sets the size of queue,
/// and it value should be the power of two.
#[inline]
pub fn new(entries: u32) -> io::Result<IoUring> {
IoUring::with_params(entries, Default::default())
}
fn with_params(entries: u32, mut p: sys::io_uring_params) -> io::Result<IoUring> {
// NOTE: The `SubmissionQueue` and `CompletionQueue` are references,
// and their lifetime can never exceed `MemoryMap`.
//
// The memory mapped regions of `MemoryMap` never move,
// so `SubmissionQueue` and `CompletionQueue` are `Unpin`.
//
// I really hope that Rust can safely use self-reference types.
#[inline]
unsafe fn setup_queue(
fd: &Fd,
p: &sys::io_uring_params,
) -> io::Result<(MemoryMap, SubmissionQueue, CompletionQueue)> {
let sq_len = p.sq_off.array as usize + p.sq_entries as usize * mem::size_of::<u32>();
let cq_len = p.cq_off.cqes as usize
+ p.cq_entries as usize * mem::size_of::<sys::io_uring_cqe>();
let sqe_len = p.sq_entries as usize * mem::size_of::<sys::io_uring_sqe>();
let sqe_mmap = Mmap::new(fd, sys::IORING_OFF_SQES as _, sqe_len)?;
if p.features & sys::IORING_FEAT_SINGLE_MMAP != 0 {
let scq_mmap =
Mmap::new(fd, sys::IORING_OFF_SQ_RING as _, cmp::max(sq_len, cq_len))?;
let sq = SubmissionQueue::new(&scq_mmap, &sqe_mmap, p);
let cq = CompletionQueue::new(&scq_mmap, p);
let mm = MemoryMap {
sq_mmap: scq_mmap,
cq_mmap: None,
sqe_mmap,
};
Ok((mm, sq, cq))
} else {
let sq_mmap = Mmap::new(fd, sys::IORING_OFF_SQ_RING as _, sq_len)?;
let cq_mmap = Mmap::new(fd, sys::IORING_OFF_CQ_RING as _, cq_len)?;
let sq = SubmissionQueue::new(&sq_mmap, &sqe_mmap, p);
let cq = CompletionQueue::new(&cq_mmap, p);
let mm = MemoryMap {
cq_mmap: Some(cq_mmap),
sq_mmap,
sqe_mmap,
};
Ok((mm, sq, cq))
}
}
let fd: Fd = unsafe {
sys::io_uring_setup(entries, &mut p)
.try_into()
.map_err(|_| io::Error::last_os_error())?
};
let (mm, sq, cq) = unsafe { setup_queue(&fd, &p)? };
Ok(IoUring {
fd,
sq,
cq,
params: Parameters(p),
memory: ManuallyDrop::new(mm),
})
}
| }
#[inline]
pub fn params(&self) -> &Parameters {
&self.params
}
pub fn start_enter_syscall_thread(&self) {
sys::start_enter_syscall_thread(self.fd.as_raw_fd());
}
/// Initiate and/or complete asynchronous I/O
///
/// # Safety
///
/// This provides a raw interface so developer must ensure that parameters are correct.
#[inline]
pub unsafe fn enter(
&self,
to_submit: u32,
min_complete: u32,
flag: u32,
sig: Option<&libc::sigset_t>,
) -> io::Result<usize> {
self.submitter().enter(to_submit, min_complete, flag, sig)
}
/// Initiate asynchronous I/O.
#[inline]
pub fn submit(&self) -> io::Result<usize> {
self.submitter().submit()
}
/// Initiate and/or complete asynchronous I/O
#[inline]
pub fn submit_and_wait(&self, want: usize) -> io::Result<usize> {
self.submitter().submit_and_wait(want)
}
/// Get submitter and submission queue and completion queue
pub fn split(&mut self) -> (Submitter<'_>, &mut SubmissionQueue, &mut CompletionQueue) {
let submit = Submitter::new(&self.fd, self.params.0.flags, &self.sq);
(submit, &mut self.sq, &mut self.cq)
}
/// Get submission queue
pub fn submission(&mut self) -> &mut SubmissionQueue {
&mut self.sq
}
/// Get completion queue
pub fn completion(&mut self) -> &mut CompletionQueue {
&mut self.cq
}
/// Make a concurrent IoUring.
#[cfg(any(feature = "concurrent", sgx))]
pub fn concurrent(self) -> concurrent::IoUring {
concurrent::IoUring::new(self)
}
}
impl Drop for IoUring {
fn drop(&mut self) {
unsafe {
ManuallyDrop::drop(&mut self.memory);
}
}
}
impl Builder {
pub fn dontfork(&mut self) -> &mut Self {
self.dontfork = true;
self
}
/// Perform busy-waiting for an I/O completion,
/// as opposed to getting notifications via an asynchronous IRQ (Interrupt Request).
pub fn setup_iopoll(&mut self) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_IOPOLL;
self
}
/// When this flag is specified, a kernel thread is created to perform submission queue polling.
/// An io_uring instance configured in this way enables an application to issue I/O
/// without ever context switching into the kernel.
pub fn setup_sqpoll(&mut self, idle: impl Into<Option<u32>>) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_SQPOLL;
self.params.sq_thread_idle = idle.into().unwrap_or(0);
self
}
/// If this flag is specified,
/// then the poll thread will be bound to the cpu set in the value.
/// This flag is only meaningful when [Builder::setup_sqpoll] is enabled.
pub fn setup_sqpoll_cpu(&mut self, n: u32) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_SQ_AFF;
self.params.sq_thread_cpu = n;
self
}
/// Create the completion queue with struct `io_uring_params.cq_entries` entries.
/// The value must be greater than entries, and may be rounded up to the next power-of-two.
pub fn setup_cqsize(&mut self, n: u32) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_CQSIZE;
self.params.cq_entries = n;
self
}
pub fn setup_clamp(&mut self) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_CLAMP;
self
}
pub fn setup_attach_wq(&mut self, fd: RawFd) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_ATTACH_WQ;
self.params.wq_fd = fd as _;
self
}
#[cfg(feature = "unstable")]
pub fn setup_r_disabled(&mut self) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_R_DISABLED;
self
}
/// Build a [IoUring].
#[inline]
pub fn build(&self, entries: u32) -> io::Result<IoUring> {
let ring = IoUring::with_params(entries, self.params)?;
if self.dontfork {
ring.memory.sq_mmap.dontfork()?;
ring.memory.sqe_mmap.dontfork()?;
if let Some(cq_mmap) = ring.memory.cq_mmap.as_ref() {
cq_mmap.dontfork()?;
}
}
Ok(ring)
}
}
impl Parameters {
pub fn is_setup_sqpoll(&self) -> bool {
self.0.flags & sys::IORING_SETUP_SQPOLL != 0
}
pub fn is_setup_iopoll(&self) -> bool {
self.0.flags & sys::IORING_SETUP_IOPOLL != 0
}
/// If this flag is set, the two SQ and CQ rings can be mapped with a single `mmap(2)` call.
/// The SQEs must still be allocated separately.
/// This brings the necessary `mmap(2)` calls down from three to two.
pub fn is_feature_single_mmap(&self) -> bool {
self.0.features & sys::IORING_FEAT_SINGLE_MMAP != 0
}
/// If this flag is set, io_uring supports never dropping completion events. If a completion
/// event occurs and the CQ ring is full, the kernel stores the event internally until such a
/// time that the CQ ring has room for more entries.
pub fn is_feature_nodrop(&self) -> bool {
self.0.features & sys::IORING_FEAT_NODROP != 0
}
/// If this flag is set, applications can be certain that any data for async offload has been consumed
/// when the kernel has consumed the SQE
pub fn is_feature_submit_stable(&self) -> bool {
self.0.features & sys::IORING_FEAT_SUBMIT_STABLE != 0
}
/// If this flag is set, applications can specify offset == -1 with
/// `IORING_OP_{READV,WRITEV}`, `IORING_OP_{READ,WRITE}_FIXED`, and `IORING_OP_{READ,WRITE}`
/// to mean current file position, which behaves like `preadv2(2)` and `pwritev2(2)` with offset == -1.
/// It’ll use (and update) the current file position.
///
/// This obviously comes with the caveat that if the application has multiple reads or writes in flight,
/// then the end result will not be as expected.
/// This is similar to threads sharing a file descriptor and doing IO using the current file position.
pub fn is_feature_rw_cur_pos(&self) -> bool {
self.0.features & sys::IORING_FEAT_RW_CUR_POS != 0
}
/// If this flag is set, then io_uring guarantees that both sync and async execution of
/// a request assumes the credentials of the task that called `io_uring_enter(2)` to queue the requests.
/// If this flag isn’t set, then requests are issued with the credentials of the task that originally registered the io_uring.
/// If only one task is using a ring, then this flag doesn’t matter as the credentials will always be the same.
/// Note that this is the default behavior,
/// tasks can still register different personalities through
/// `io_uring_register(2)` with `IORING_REGISTER_PERSONALITY` and specify the personality to use in the sqe.
pub fn is_feature_cur_personality(&self) -> bool {
self.0.features & sys::IORING_FEAT_CUR_PERSONALITY != 0
}
#[cfg(feature = "unstable")]
pub fn is_feature_fast_poll(&self) -> bool {
self.0.features & sys::IORING_FEAT_FAST_POLL != 0
}
#[cfg(feature = "unstable")]
pub fn is_feature_poll_32bits(&self) -> bool {
self.0.features & sys::IORING_FEAT_POLL_32BITS != 0
}
pub fn sq_entries(&self) -> u32 {
self.0.sq_entries
}
pub fn cq_entries(&self) -> u32 {
self.0.cq_entries
}
}
impl AsRawFd for IoUring {
fn as_raw_fd(&self) -> RawFd {
self.fd.as_raw_fd()
}
} | #[inline]
pub fn submitter(&self) -> Submitter<'_> {
Submitter::new(&self.fd, self.params.0.flags, &self.sq) | random_line_split |
lib.rs | //! The `io_uring` library for Rust.
//!
//! The crate only provides a summary of the parameters.
//! For more detailed documentation, see manpage.
#![cfg_attr(sgx, no_std)]
#[cfg(sgx)]
extern crate sgx_types;
#[cfg(sgx)]
#[macro_use]
extern crate sgx_tstd as std;
#[cfg(sgx)]
extern crate sgx_trts;
#[cfg(sgx)]
use std::prelude::v1::*;
#[cfg(sgx)]
pub use sgx_trts::libc;
#[macro_use]
mod util;
pub mod cqueue;
pub mod opcode;
mod register;
pub mod squeue;
mod submit;
mod sys;
#[cfg(any(feature = "concurrent", sgx))]
pub mod concurrent;
use std::convert::TryInto;
use std::mem::ManuallyDrop;
use std::os::unix::io::{AsRawFd, RawFd};
use std::{cmp, io, mem};
pub use cqueue::CompletionQueue;
pub use register::Probe;
pub use squeue::SubmissionQueue;
pub use submit::Submitter;
use util::{Fd, Mmap};
/// IoUring instance
pub struct IoUring {
fd: Fd,
params: Parameters,
memory: ManuallyDrop<MemoryMap>,
sq: SubmissionQueue,
cq: CompletionQueue,
}
#[allow(dead_code)]
struct MemoryMap {
sq_mmap: Mmap,
sqe_mmap: Mmap,
cq_mmap: Option<Mmap>,
}
/// IoUring build params
#[derive(Clone, Default)]
pub struct Builder {
dontfork: bool,
params: sys::io_uring_params,
}
#[derive(Clone)]
pub struct Parameters(sys::io_uring_params);
unsafe impl Send for IoUring {}
unsafe impl Sync for IoUring {}
impl IoUring {
/// Create a IoUring instance
///
/// The `entries` sets the size of queue,
/// and it value should be the power of two.
#[inline]
pub fn new(entries: u32) -> io::Result<IoUring> {
IoUring::with_params(entries, Default::default())
}
fn with_params(entries: u32, mut p: sys::io_uring_params) -> io::Result<IoUring> {
// NOTE: The `SubmissionQueue` and `CompletionQueue` are references,
// and their lifetime can never exceed `MemoryMap`.
//
// The memory mapped regions of `MemoryMap` never move,
// so `SubmissionQueue` and `CompletionQueue` are `Unpin`.
//
// I really hope that Rust can safely use self-reference types.
#[inline]
unsafe fn setup_queue(
fd: &Fd,
p: &sys::io_uring_params,
) -> io::Result<(MemoryMap, SubmissionQueue, CompletionQueue)> {
let sq_len = p.sq_off.array as usize + p.sq_entries as usize * mem::size_of::<u32>();
let cq_len = p.cq_off.cqes as usize
+ p.cq_entries as usize * mem::size_of::<sys::io_uring_cqe>();
let sqe_len = p.sq_entries as usize * mem::size_of::<sys::io_uring_sqe>();
let sqe_mmap = Mmap::new(fd, sys::IORING_OFF_SQES as _, sqe_len)?;
if p.features & sys::IORING_FEAT_SINGLE_MMAP != 0 {
let scq_mmap =
Mmap::new(fd, sys::IORING_OFF_SQ_RING as _, cmp::max(sq_len, cq_len))?;
let sq = SubmissionQueue::new(&scq_mmap, &sqe_mmap, p);
let cq = CompletionQueue::new(&scq_mmap, p);
let mm = MemoryMap {
sq_mmap: scq_mmap,
cq_mmap: None,
sqe_mmap,
};
Ok((mm, sq, cq))
} else {
let sq_mmap = Mmap::new(fd, sys::IORING_OFF_SQ_RING as _, sq_len)?;
let cq_mmap = Mmap::new(fd, sys::IORING_OFF_CQ_RING as _, cq_len)?;
let sq = SubmissionQueue::new(&sq_mmap, &sqe_mmap, p);
let cq = CompletionQueue::new(&cq_mmap, p);
let mm = MemoryMap {
cq_mmap: Some(cq_mmap),
sq_mmap,
sqe_mmap,
};
Ok((mm, sq, cq))
}
}
let fd: Fd = unsafe {
sys::io_uring_setup(entries, &mut p)
.try_into()
.map_err(|_| io::Error::last_os_error())?
};
let (mm, sq, cq) = unsafe { setup_queue(&fd, &p)? };
Ok(IoUring {
fd,
sq,
cq,
params: Parameters(p),
memory: ManuallyDrop::new(mm),
})
}
#[inline]
pub fn submitter(&self) -> Submitter<'_> {
Submitter::new(&self.fd, self.params.0.flags, &self.sq)
}
#[inline]
pub fn params(&self) -> &Parameters {
&self.params
}
pub fn start_enter_syscall_thread(&self) {
sys::start_enter_syscall_thread(self.fd.as_raw_fd());
}
/// Initiate and/or complete asynchronous I/O
///
/// # Safety
///
/// This provides a raw interface so developer must ensure that parameters are correct.
#[inline]
pub unsafe fn enter(
&self,
to_submit: u32,
min_complete: u32,
flag: u32,
sig: Option<&libc::sigset_t>,
) -> io::Result<usize> {
self.submitter().enter(to_submit, min_complete, flag, sig)
}
/// Initiate asynchronous I/O.
#[inline]
pub fn submit(&self) -> io::Result<usize> {
self.submitter().submit()
}
/// Initiate and/or complete asynchronous I/O
#[inline]
pub fn submit_and_wait(&self, want: usize) -> io::Result<usize> {
self.submitter().submit_and_wait(want)
}
/// Get submitter and submission queue and completion queue
pub fn split(&mut self) -> (Submitter<'_>, &mut SubmissionQueue, &mut CompletionQueue) {
let submit = Submitter::new(&self.fd, self.params.0.flags, &self.sq);
(submit, &mut self.sq, &mut self.cq)
}
/// Get submission queue
pub fn submission(&mut self) -> &mut SubmissionQueue {
&mut self.sq
}
/// Get completion queue
pub fn completion(&mut self) -> &mut CompletionQueue {
&mut self.cq
}
/// Make a concurrent IoUring.
#[cfg(any(feature = "concurrent", sgx))]
pub fn concurrent(self) -> concurrent::IoUring {
concurrent::IoUring::new(self)
}
}
impl Drop for IoUring {
fn drop(&mut self) {
unsafe {
ManuallyDrop::drop(&mut self.memory);
}
}
}
impl Builder {
pub fn dontfork(&mut self) -> &mut Self {
self.dontfork = true;
self
}
/// Perform busy-waiting for an I/O completion,
/// as opposed to getting notifications via an asynchronous IRQ (Interrupt Request).
pub fn setup_iopoll(&mut self) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_IOPOLL;
self
}
/// When this flag is specified, a kernel thread is created to perform submission queue polling.
/// An io_uring instance configured in this way enables an application to issue I/O
/// without ever context switching into the kernel.
pub fn setup_sqpoll(&mut self, idle: impl Into<Option<u32>>) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_SQPOLL;
self.params.sq_thread_idle = idle.into().unwrap_or(0);
self
}
/// If this flag is specified,
/// then the poll thread will be bound to the cpu set in the value.
/// This flag is only meaningful when [Builder::setup_sqpoll] is enabled.
pub fn setup_sqpoll_cpu(&mut self, n: u32) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_SQ_AFF;
self.params.sq_thread_cpu = n;
self
}
/// Create the completion queue with struct `io_uring_params.cq_entries` entries.
/// The value must be greater than entries, and may be rounded up to the next power-of-two.
pub fn setup_cqsize(&mut self, n: u32) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_CQSIZE;
self.params.cq_entries = n;
self
}
pub fn setup_clamp(&mut self) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_CLAMP;
self
}
pub fn setup_attach_wq(&mut self, fd: RawFd) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_ATTACH_WQ;
self.params.wq_fd = fd as _;
self
}
#[cfg(feature = "unstable")]
pub fn setup_r_disabled(&mut self) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_R_DISABLED;
self
}
/// Build a [IoUring].
#[inline]
pub fn build(&self, entries: u32) -> io::Result<IoUring> {
let ring = IoUring::with_params(entries, self.params)?;
if self.dontfork {
ring.memory.sq_mmap.dontfork()?;
ring.memory.sqe_mmap.dontfork()?;
if let Some(cq_mmap) = ring.memory.cq_mmap.as_ref() {
cq_mmap.dontfork()?;
}
}
Ok(ring)
}
}
impl Parameters {
pub fn is_setup_sqpoll(&self) -> bool {
self.0.flags & sys::IORING_SETUP_SQPOLL != 0
}
pub fn is_setup_iopoll(&self) -> bool {
self.0.flags & sys::IORING_SETUP_IOPOLL != 0
}
/// If this flag is set, the two SQ and CQ rings can be mapped with a single `mmap(2)` call.
/// The SQEs must still be allocated separately.
/// This brings the necessary `mmap(2)` calls down from three to two.
pub fn is_feature_single_mmap(&self) -> bool {
self.0.features & sys::IORING_FEAT_SINGLE_MMAP != 0
}
/// If this flag is set, io_uring supports never dropping completion events. If a completion
/// event occurs and the CQ ring is full, the kernel stores the event internally until such a
/// time that the CQ ring has room for more entries.
pub fn is_feature_nodrop(&self) -> bool {
self.0.features & sys::IORING_FEAT_NODROP != 0
}
/// If this flag is set, applications can be certain that any data for async offload has been consumed
/// when the kernel has consumed the SQE
pub fn is_feature_submit_stable(&self) -> bool {
self.0.features & sys::IORING_FEAT_SUBMIT_STABLE != 0
}
/// If this flag is set, applications can specify offset == -1 with
/// `IORING_OP_{READV,WRITEV}`, `IORING_OP_{READ,WRITE}_FIXED`, and `IORING_OP_{READ,WRITE}`
/// to mean current file position, which behaves like `preadv2(2)` and `pwritev2(2)` with offset == -1.
/// It’ll use (and update) the current file position.
///
/// This obviously comes with the caveat that if the application has multiple reads or writes in flight,
/// then the end result will not be as expected.
/// This is similar to threads sharing a file descriptor and doing IO using the current file position.
pub fn is_feature_rw_cur_pos(&self) -> bool {
self.0.features & sys::IORING_FEAT_RW_CUR_POS != 0
}
/// If this flag is set, then io_uring guarantees that both sync and async execution of
/// a request assumes the credentials of the task that called `io_uring_enter(2)` to queue the requests.
/// If this flag isn’t set, then requests are issued with the credentials of the task that originally registered the io_uring.
/// If only one task is using a ring, then this flag doesn’t matter as the credentials will always be the same.
/// Note that this is the default behavior,
/// tasks can still register different personalities through
/// `io_uring_register(2)` with `IORING_REGISTER_PERSONALITY` and specify the personality to use in the sqe.
pub fn is_feature_cur_personality(&self) -> bool {
self.0.features & sys::IORING_FEAT_CUR_PERSONALITY != 0
}
#[cfg(feature = "unstable")]
pub fn is_feature_fast_poll(&self) -> bool {
| #[cfg(feature = "unstable")]
pub fn is_feature_poll_32bits(&self) -> bool {
self.0.features & sys::IORING_FEAT_POLL_32BITS != 0
}
pub fn sq_entries(&self) -> u32 {
self.0.sq_entries
}
pub fn cq_entries(&self) -> u32 {
self.0.cq_entries
}
}
impl AsRawFd for IoUring {
fn as_raw_fd(&self) -> RawFd {
self.fd.as_raw_fd()
}
}
| self.0.features & sys::IORING_FEAT_FAST_POLL != 0
}
| identifier_body |
lib.rs | //! The `io_uring` library for Rust.
//!
//! The crate only provides a summary of the parameters.
//! For more detailed documentation, see manpage.
#![cfg_attr(sgx, no_std)]
#[cfg(sgx)]
extern crate sgx_types;
#[cfg(sgx)]
#[macro_use]
extern crate sgx_tstd as std;
#[cfg(sgx)]
extern crate sgx_trts;
#[cfg(sgx)]
use std::prelude::v1::*;
#[cfg(sgx)]
pub use sgx_trts::libc;
#[macro_use]
mod util;
pub mod cqueue;
pub mod opcode;
mod register;
pub mod squeue;
mod submit;
mod sys;
#[cfg(any(feature = "concurrent", sgx))]
pub mod concurrent;
use std::convert::TryInto;
use std::mem::ManuallyDrop;
use std::os::unix::io::{AsRawFd, RawFd};
use std::{cmp, io, mem};
pub use cqueue::CompletionQueue;
pub use register::Probe;
pub use squeue::SubmissionQueue;
pub use submit::Submitter;
use util::{Fd, Mmap};
/// IoUring instance
pub struct IoUring {
fd: Fd,
params: Parameters,
memory: ManuallyDrop<MemoryMap>,
sq: SubmissionQueue,
cq: CompletionQueue,
}
#[allow(dead_code)]
struct MemoryMap {
sq_mmap: Mmap,
sqe_mmap: Mmap,
cq_mmap: Option<Mmap>,
}
/// IoUring build params
#[derive(Clone, Default)]
pub struct Builder {
dontfork: bool,
params: sys::io_uring_params,
}
#[derive(Clone)]
pub struct Parameters(sys::io_uring_params);
unsafe impl Send for IoUring {}
unsafe impl Sync for IoUring {}
impl IoUring {
/// Create a IoUring instance
///
/// The `entries` sets the size of queue,
/// and it value should be the power of two.
#[inline]
pub fn new(entries: u32) -> io::Result<IoUring> {
IoUring::with_params(entries, Default::default())
}
fn with_params(entries: u32, mut p: sys::io_uring_params) -> io::Result<IoUring> {
// NOTE: The `SubmissionQueue` and `CompletionQueue` are references,
// and their lifetime can never exceed `MemoryMap`.
//
// The memory mapped regions of `MemoryMap` never move,
// so `SubmissionQueue` and `CompletionQueue` are `Unpin`.
//
// I really hope that Rust can safely use self-reference types.
#[inline]
unsafe fn setup_queue(
fd: &Fd,
p: &sys::io_uring_params,
) -> io::Result<(MemoryMap, SubmissionQueue, CompletionQueue)> {
let sq_len = p.sq_off.array as usize + p.sq_entries as usize * mem::size_of::<u32>();
let cq_len = p.cq_off.cqes as usize
+ p.cq_entries as usize * mem::size_of::<sys::io_uring_cqe>();
let sqe_len = p.sq_entries as usize * mem::size_of::<sys::io_uring_sqe>();
let sqe_mmap = Mmap::new(fd, sys::IORING_OFF_SQES as _, sqe_len)?;
if p.features & sys::IORING_FEAT_SINGLE_MMAP != 0 {
let scq_mmap =
Mmap::new(fd, sys::IORING_OFF_SQ_RING as _, cmp::max(sq_len, cq_len))?;
let sq = SubmissionQueue::new(&scq_mmap, &sqe_mmap, p);
let cq = CompletionQueue::new(&scq_mmap, p);
let mm = MemoryMap {
sq_mmap: scq_mmap,
cq_mmap: None,
sqe_mmap,
};
Ok((mm, sq, cq))
} else {
let sq_mmap = Mmap::new(fd, sys::IORING_OFF_SQ_RING as _, sq_len)?;
let cq_mmap = Mmap::new(fd, sys::IORING_OFF_CQ_RING as _, cq_len)?;
let sq = SubmissionQueue::new(&sq_mmap, &sqe_mmap, p);
let cq = CompletionQueue::new(&cq_mmap, p);
let mm = MemoryMap {
cq_mmap: Some(cq_mmap),
sq_mmap,
sqe_mmap,
};
Ok((mm, sq, cq))
}
}
let fd: Fd = unsafe {
sys::io_uring_setup(entries, &mut p)
.try_into()
.map_err(|_| io::Error::last_os_error())?
};
let (mm, sq, cq) = unsafe { setup_queue(&fd, &p)? };
Ok(IoUring {
fd,
sq,
cq,
params: Parameters(p),
memory: ManuallyDrop::new(mm),
})
}
#[inline]
pub fn submitter(&self) -> Submitter<'_> {
Submitter::new(&self.fd, self.params.0.flags, &self.sq)
}
#[inline]
pub fn params(&self) -> &Parameters {
&self.params
}
pub fn start_enter_syscall_thread(&self) {
sys::start_enter_syscall_thread(self.fd.as_raw_fd());
}
/// Initiate and/or complete asynchronous I/O
///
/// # Safety
///
/// This provides a raw interface so developer must ensure that parameters are correct.
#[inline]
pub unsafe fn enter(
&self,
to_submit: u32,
min_complete: u32,
flag: u32,
sig: Option<&libc::sigset_t>,
) -> io::Result<usize> {
self.submitter().enter(to_submit, min_complete, flag, sig)
}
/// Initiate asynchronous I/O.
#[inline]
pub fn submit(&self) -> io::Result<usize> {
self.submitter().submit()
}
/// Initiate and/or complete asynchronous I/O
#[inline]
pub fn submit_and_wait(&self, want: usize) -> io::Result<usize> {
self.submitter().submit_and_wait(want)
}
/// Get submitter and submission queue and completion queue
pub fn split(&mut self) -> (Submitter<'_>, &mut SubmissionQueue, &mut CompletionQueue) {
let submit = Submitter::new(&self.fd, self.params.0.flags, &self.sq);
(submit, &mut self.sq, &mut self.cq)
}
/// Get submission queue
pub fn submission(&mut self) -> &mut SubmissionQueue {
&mut self.sq
}
/// Get completion queue
pub fn completion(&mut self) -> &mut CompletionQueue {
&mut self.cq
}
/// Make a concurrent IoUring.
#[cfg(any(feature = "concurrent", sgx))]
pub fn concurrent(self) -> concurrent::IoUring {
concurrent::IoUring::new(self)
}
}
impl Drop for IoUring {
fn drop(&mut self) {
unsafe {
ManuallyDrop::drop(&mut self.memory);
}
}
}
impl Builder {
pub fn dontfork(&mut self) -> &mut Self {
self.dontfork = true;
self
}
/// Perform busy-waiting for an I/O completion,
/// as opposed to getting notifications via an asynchronous IRQ (Interrupt Request).
pub fn setup_iopoll(&mut self) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_IOPOLL;
self
}
/// When this flag is specified, a kernel thread is created to perform submission queue polling.
/// An io_uring instance configured in this way enables an application to issue I/O
/// without ever context switching into the kernel.
pub fn setup_sqpoll(&mut self, idle: impl Into<Option<u32>>) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_SQPOLL;
self.params.sq_thread_idle = idle.into().unwrap_or(0);
self
}
/// If this flag is specified,
/// then the poll thread will be bound to the cpu set in the value.
/// This flag is only meaningful when [Builder::setup_sqpoll] is enabled.
pub fn setup_sqpoll_cpu(&mut self, n: u32) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_SQ_AFF;
self.params.sq_thread_cpu = n;
self
}
/// Create the completion queue with struct `io_uring_params.cq_entries` entries.
/// The value must be greater than entries, and may be rounded up to the next power-of-two.
pub fn setup_cqsize(&mut self, n: u32) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_CQSIZE;
self.params.cq_entries = n;
self
}
pub fn setup_clamp(&mut self) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_CLAMP;
self
}
pub fn setup_attach_wq(&mut self, fd: RawFd) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_ATTACH_WQ;
self.params.wq_fd = fd as _;
self
}
#[cfg(feature = "unstable")]
pub fn setup_r_disabled(&mut self) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_R_DISABLED;
self
}
/// Build a [IoUring].
#[inline]
pub fn build(&self, entries: u32) -> io::Result<IoUring> {
let ring = IoUring::with_params(entries, self.params)?;
if self.dontfork {
ring.memory.sq_mmap.dontfork()?;
ring.memory.sqe_mmap.dontfork()?;
if let Some(cq_mmap) = ring.memory.cq_mmap.as_ref() {
cq_mmap.dontfork()?;
}
}
Ok(ring)
}
}
impl Parameters {
pub fn is_setup_sqpoll(&self) -> bool {
self.0.flags & sys::IORING_SETUP_SQPOLL != 0
}
pub fn is_setup_iopoll(&self) -> bool {
self.0.flags & sys::IORING_SETUP_IOPOLL != 0
}
/// If this flag is set, the two SQ and CQ rings can be mapped with a single `mmap(2)` call.
/// The SQEs must still be allocated separately.
/// This brings the necessary `mmap(2)` calls down from three to two.
pub fn | (&self) -> bool {
self.0.features & sys::IORING_FEAT_SINGLE_MMAP != 0
}
/// If this flag is set, io_uring supports never dropping completion events. If a completion
/// event occurs and the CQ ring is full, the kernel stores the event internally until such a
/// time that the CQ ring has room for more entries.
pub fn is_feature_nodrop(&self) -> bool {
self.0.features & sys::IORING_FEAT_NODROP != 0
}
/// If this flag is set, applications can be certain that any data for async offload has been consumed
/// when the kernel has consumed the SQE
pub fn is_feature_submit_stable(&self) -> bool {
self.0.features & sys::IORING_FEAT_SUBMIT_STABLE != 0
}
/// If this flag is set, applications can specify offset == -1 with
/// `IORING_OP_{READV,WRITEV}`, `IORING_OP_{READ,WRITE}_FIXED`, and `IORING_OP_{READ,WRITE}`
/// to mean current file position, which behaves like `preadv2(2)` and `pwritev2(2)` with offset == -1.
/// It’ll use (and update) the current file position.
///
/// This obviously comes with the caveat that if the application has multiple reads or writes in flight,
/// then the end result will not be as expected.
/// This is similar to threads sharing a file descriptor and doing IO using the current file position.
pub fn is_feature_rw_cur_pos(&self) -> bool {
self.0.features & sys::IORING_FEAT_RW_CUR_POS != 0
}
/// If this flag is set, then io_uring guarantees that both sync and async execution of
/// a request assumes the credentials of the task that called `io_uring_enter(2)` to queue the requests.
/// If this flag isn’t set, then requests are issued with the credentials of the task that originally registered the io_uring.
/// If only one task is using a ring, then this flag doesn’t matter as the credentials will always be the same.
/// Note that this is the default behavior,
/// tasks can still register different personalities through
/// `io_uring_register(2)` with `IORING_REGISTER_PERSONALITY` and specify the personality to use in the sqe.
pub fn is_feature_cur_personality(&self) -> bool {
self.0.features & sys::IORING_FEAT_CUR_PERSONALITY != 0
}
#[cfg(feature = "unstable")]
pub fn is_feature_fast_poll(&self) -> bool {
self.0.features & sys::IORING_FEAT_FAST_POLL != 0
}
#[cfg(feature = "unstable")]
pub fn is_feature_poll_32bits(&self) -> bool {
self.0.features & sys::IORING_FEAT_POLL_32BITS != 0
}
pub fn sq_entries(&self) -> u32 {
self.0.sq_entries
}
pub fn cq_entries(&self) -> u32 {
self.0.cq_entries
}
}
impl AsRawFd for IoUring {
fn as_raw_fd(&self) -> RawFd {
self.fd.as_raw_fd()
}
}
| is_feature_single_mmap | identifier_name |
lib.rs | //! The `io_uring` library for Rust.
//!
//! The crate only provides a summary of the parameters.
//! For more detailed documentation, see manpage.
#![cfg_attr(sgx, no_std)]
#[cfg(sgx)]
extern crate sgx_types;
#[cfg(sgx)]
#[macro_use]
extern crate sgx_tstd as std;
#[cfg(sgx)]
extern crate sgx_trts;
#[cfg(sgx)]
use std::prelude::v1::*;
#[cfg(sgx)]
pub use sgx_trts::libc;
#[macro_use]
mod util;
pub mod cqueue;
pub mod opcode;
mod register;
pub mod squeue;
mod submit;
mod sys;
#[cfg(any(feature = "concurrent", sgx))]
pub mod concurrent;
use std::convert::TryInto;
use std::mem::ManuallyDrop;
use std::os::unix::io::{AsRawFd, RawFd};
use std::{cmp, io, mem};
pub use cqueue::CompletionQueue;
pub use register::Probe;
pub use squeue::SubmissionQueue;
pub use submit::Submitter;
use util::{Fd, Mmap};
/// IoUring instance
pub struct IoUring {
fd: Fd,
params: Parameters,
memory: ManuallyDrop<MemoryMap>,
sq: SubmissionQueue,
cq: CompletionQueue,
}
#[allow(dead_code)]
struct MemoryMap {
sq_mmap: Mmap,
sqe_mmap: Mmap,
cq_mmap: Option<Mmap>,
}
/// IoUring build params
#[derive(Clone, Default)]
pub struct Builder {
dontfork: bool,
params: sys::io_uring_params,
}
#[derive(Clone)]
pub struct Parameters(sys::io_uring_params);
unsafe impl Send for IoUring {}
unsafe impl Sync for IoUring {}
impl IoUring {
/// Create a IoUring instance
///
/// The `entries` sets the size of queue,
/// and it value should be the power of two.
#[inline]
pub fn new(entries: u32) -> io::Result<IoUring> {
IoUring::with_params(entries, Default::default())
}
fn with_params(entries: u32, mut p: sys::io_uring_params) -> io::Result<IoUring> {
// NOTE: The `SubmissionQueue` and `CompletionQueue` are references,
// and their lifetime can never exceed `MemoryMap`.
//
// The memory mapped regions of `MemoryMap` never move,
// so `SubmissionQueue` and `CompletionQueue` are `Unpin`.
//
// I really hope that Rust can safely use self-reference types.
#[inline]
unsafe fn setup_queue(
fd: &Fd,
p: &sys::io_uring_params,
) -> io::Result<(MemoryMap, SubmissionQueue, CompletionQueue)> {
let sq_len = p.sq_off.array as usize + p.sq_entries as usize * mem::size_of::<u32>();
let cq_len = p.cq_off.cqes as usize
+ p.cq_entries as usize * mem::size_of::<sys::io_uring_cqe>();
let sqe_len = p.sq_entries as usize * mem::size_of::<sys::io_uring_sqe>();
let sqe_mmap = Mmap::new(fd, sys::IORING_OFF_SQES as _, sqe_len)?;
if p.features & sys::IORING_FEAT_SINGLE_MMAP != 0 | else {
let sq_mmap = Mmap::new(fd, sys::IORING_OFF_SQ_RING as _, sq_len)?;
let cq_mmap = Mmap::new(fd, sys::IORING_OFF_CQ_RING as _, cq_len)?;
let sq = SubmissionQueue::new(&sq_mmap, &sqe_mmap, p);
let cq = CompletionQueue::new(&cq_mmap, p);
let mm = MemoryMap {
cq_mmap: Some(cq_mmap),
sq_mmap,
sqe_mmap,
};
Ok((mm, sq, cq))
}
}
let fd: Fd = unsafe {
sys::io_uring_setup(entries, &mut p)
.try_into()
.map_err(|_| io::Error::last_os_error())?
};
let (mm, sq, cq) = unsafe { setup_queue(&fd, &p)? };
Ok(IoUring {
fd,
sq,
cq,
params: Parameters(p),
memory: ManuallyDrop::new(mm),
})
}
#[inline]
pub fn submitter(&self) -> Submitter<'_> {
Submitter::new(&self.fd, self.params.0.flags, &self.sq)
}
#[inline]
pub fn params(&self) -> &Parameters {
&self.params
}
pub fn start_enter_syscall_thread(&self) {
sys::start_enter_syscall_thread(self.fd.as_raw_fd());
}
/// Initiate and/or complete asynchronous I/O
///
/// # Safety
///
/// This provides a raw interface so developer must ensure that parameters are correct.
#[inline]
pub unsafe fn enter(
&self,
to_submit: u32,
min_complete: u32,
flag: u32,
sig: Option<&libc::sigset_t>,
) -> io::Result<usize> {
self.submitter().enter(to_submit, min_complete, flag, sig)
}
/// Initiate asynchronous I/O.
#[inline]
pub fn submit(&self) -> io::Result<usize> {
self.submitter().submit()
}
/// Initiate and/or complete asynchronous I/O
#[inline]
pub fn submit_and_wait(&self, want: usize) -> io::Result<usize> {
self.submitter().submit_and_wait(want)
}
/// Get submitter and submission queue and completion queue
pub fn split(&mut self) -> (Submitter<'_>, &mut SubmissionQueue, &mut CompletionQueue) {
let submit = Submitter::new(&self.fd, self.params.0.flags, &self.sq);
(submit, &mut self.sq, &mut self.cq)
}
/// Get submission queue
pub fn submission(&mut self) -> &mut SubmissionQueue {
&mut self.sq
}
/// Get completion queue
pub fn completion(&mut self) -> &mut CompletionQueue {
&mut self.cq
}
/// Make a concurrent IoUring.
#[cfg(any(feature = "concurrent", sgx))]
pub fn concurrent(self) -> concurrent::IoUring {
concurrent::IoUring::new(self)
}
}
impl Drop for IoUring {
fn drop(&mut self) {
unsafe {
ManuallyDrop::drop(&mut self.memory);
}
}
}
impl Builder {
pub fn dontfork(&mut self) -> &mut Self {
self.dontfork = true;
self
}
/// Perform busy-waiting for an I/O completion,
/// as opposed to getting notifications via an asynchronous IRQ (Interrupt Request).
pub fn setup_iopoll(&mut self) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_IOPOLL;
self
}
/// When this flag is specified, a kernel thread is created to perform submission queue polling.
/// An io_uring instance configured in this way enables an application to issue I/O
/// without ever context switching into the kernel.
pub fn setup_sqpoll(&mut self, idle: impl Into<Option<u32>>) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_SQPOLL;
self.params.sq_thread_idle = idle.into().unwrap_or(0);
self
}
/// If this flag is specified,
/// then the poll thread will be bound to the cpu set in the value.
/// This flag is only meaningful when [Builder::setup_sqpoll] is enabled.
pub fn setup_sqpoll_cpu(&mut self, n: u32) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_SQ_AFF;
self.params.sq_thread_cpu = n;
self
}
/// Create the completion queue with struct `io_uring_params.cq_entries` entries.
/// The value must be greater than entries, and may be rounded up to the next power-of-two.
pub fn setup_cqsize(&mut self, n: u32) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_CQSIZE;
self.params.cq_entries = n;
self
}
pub fn setup_clamp(&mut self) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_CLAMP;
self
}
pub fn setup_attach_wq(&mut self, fd: RawFd) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_ATTACH_WQ;
self.params.wq_fd = fd as _;
self
}
#[cfg(feature = "unstable")]
pub fn setup_r_disabled(&mut self) -> &mut Self {
self.params.flags |= sys::IORING_SETUP_R_DISABLED;
self
}
/// Build a [IoUring].
#[inline]
pub fn build(&self, entries: u32) -> io::Result<IoUring> {
let ring = IoUring::with_params(entries, self.params)?;
if self.dontfork {
ring.memory.sq_mmap.dontfork()?;
ring.memory.sqe_mmap.dontfork()?;
if let Some(cq_mmap) = ring.memory.cq_mmap.as_ref() {
cq_mmap.dontfork()?;
}
}
Ok(ring)
}
}
impl Parameters {
pub fn is_setup_sqpoll(&self) -> bool {
self.0.flags & sys::IORING_SETUP_SQPOLL != 0
}
pub fn is_setup_iopoll(&self) -> bool {
self.0.flags & sys::IORING_SETUP_IOPOLL != 0
}
/// If this flag is set, the two SQ and CQ rings can be mapped with a single `mmap(2)` call.
/// The SQEs must still be allocated separately.
/// This brings the necessary `mmap(2)` calls down from three to two.
pub fn is_feature_single_mmap(&self) -> bool {
self.0.features & sys::IORING_FEAT_SINGLE_MMAP != 0
}
/// If this flag is set, io_uring supports never dropping completion events. If a completion
/// event occurs and the CQ ring is full, the kernel stores the event internally until such a
/// time that the CQ ring has room for more entries.
pub fn is_feature_nodrop(&self) -> bool {
self.0.features & sys::IORING_FEAT_NODROP != 0
}
/// If this flag is set, applications can be certain that any data for async offload has been consumed
/// when the kernel has consumed the SQE
pub fn is_feature_submit_stable(&self) -> bool {
self.0.features & sys::IORING_FEAT_SUBMIT_STABLE != 0
}
/// If this flag is set, applications can specify offset == -1 with
/// `IORING_OP_{READV,WRITEV}`, `IORING_OP_{READ,WRITE}_FIXED`, and `IORING_OP_{READ,WRITE}`
/// to mean current file position, which behaves like `preadv2(2)` and `pwritev2(2)` with offset == -1.
/// It’ll use (and update) the current file position.
///
/// This obviously comes with the caveat that if the application has multiple reads or writes in flight,
/// then the end result will not be as expected.
/// This is similar to threads sharing a file descriptor and doing IO using the current file position.
pub fn is_feature_rw_cur_pos(&self) -> bool {
self.0.features & sys::IORING_FEAT_RW_CUR_POS != 0
}
/// If this flag is set, then io_uring guarantees that both sync and async execution of
/// a request assumes the credentials of the task that called `io_uring_enter(2)` to queue the requests.
/// If this flag isn’t set, then requests are issued with the credentials of the task that originally registered the io_uring.
/// If only one task is using a ring, then this flag doesn’t matter as the credentials will always be the same.
/// Note that this is the default behavior,
/// tasks can still register different personalities through
/// `io_uring_register(2)` with `IORING_REGISTER_PERSONALITY` and specify the personality to use in the sqe.
pub fn is_feature_cur_personality(&self) -> bool {
self.0.features & sys::IORING_FEAT_CUR_PERSONALITY != 0
}
#[cfg(feature = "unstable")]
pub fn is_feature_fast_poll(&self) -> bool {
self.0.features & sys::IORING_FEAT_FAST_POLL != 0
}
#[cfg(feature = "unstable")]
pub fn is_feature_poll_32bits(&self) -> bool {
self.0.features & sys::IORING_FEAT_POLL_32BITS != 0
}
pub fn sq_entries(&self) -> u32 {
self.0.sq_entries
}
pub fn cq_entries(&self) -> u32 {
self.0.cq_entries
}
}
impl AsRawFd for IoUring {
fn as_raw_fd(&self) -> RawFd {
self.fd.as_raw_fd()
}
}
| {
let scq_mmap =
Mmap::new(fd, sys::IORING_OFF_SQ_RING as _, cmp::max(sq_len, cq_len))?;
let sq = SubmissionQueue::new(&scq_mmap, &sqe_mmap, p);
let cq = CompletionQueue::new(&scq_mmap, p);
let mm = MemoryMap {
sq_mmap: scq_mmap,
cq_mmap: None,
sqe_mmap,
};
Ok((mm, sq, cq))
} | conditional_block |
seq2seq.py | import tensorflow as tf
from parameters import *
init_parameters()
class GRU(object):
def __init__(self, input_dimensions, hidden_size, name='', dtype=tf.float64):
# Initialize Init attributes
self.input_dimensions = input_dimensions
self.hidden_size = hidden_size
self.name = name
# Initialize Method attributes
self.x_t = None
self.h_0 = None
self.h_t = None
self.h_t_transposed = None
# Weights for input vectors of shape (input_dimensions, hidden_size)
self.Wr = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.input_dimensions, self.hidden_size), mean=0, stddev=0.01),
name='Wr' + self.name)
self.Wz = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.input_dimensions, self.hidden_size), mean=0, stddev=0.01),
name='Wz' + self.name)
self.Wh = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.input_dimensions, self.hidden_size), mean=0, stddev=0.01),
name='Wh' + self.name)
# Weights for hidden vectors of shape (hidden_size, hidden_size)
self.Ur = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.hidden_size, self.hidden_size), mean=0, stddev=0.01),
name='Ur' + self.name)
self.Uz = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.hidden_size, self.hidden_size), mean=0, stddev=0.01),
name='Uz' + self.name)
self.Uh = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.hidden_size, self.hidden_size), mean=0, stddev=0.01),
name='Uh' + self.name)
# Biases for hidden vectors of shape (hidden_size,)
self.br = tf.Variable(tf.truncated_normal(dtype=dtype, shape=(self.hidden_size,), mean=0, stddev=0.01),
name='br' + self.name)
self.bz = tf.Variable(tf.truncated_normal(dtype=dtype, shape=(self.hidden_size,), mean=0, stddev=0.01),
name='bz' + self.name)
self.bh = tf.Variable(tf.truncated_normal(dtype=dtype, shape=(self.hidden_size,), mean=0, stddev=0.01),
name='bh' + self.name)
def forward_pass(self, h_tm1, x_t): # Function though to be used by tf.scan
"""Perform a forward pass.
:param h_tm1: np.matrix. The hidden state at the previous timestep (h_{t-1}).
:param x_t: np.matrix. The input vector.
:return:
"""
# Convert vector-tensor form into matrix-tensor form
x_t = tf.reshape(x_t, shape=[1, -1])
h_tm1 = tf.reshape(h_tm1, shape=[1, -1])
# Definitions of z_t and r_t
z_t = tf.sigmoid(tf.matmul(x_t, self.Wz) + tf.matmul(h_tm1, self.Uz) + self.bz)
r_t = tf.sigmoid(tf.matmul(x_t, self.Wr) + tf.matmul(h_tm1, self.Ur) + self.br)
# Definition of h~_t
h_proposal = tf.tanh(tf.matmul(x_t, self.Wh) + tf.matmul(tf.multiply(r_t, h_tm1), self.Uh) + self.bh)
# Compute the next hidden state
h_t = tf.multiply(1 - z_t, h_tm1) + tf.multiply(z_t, h_proposal)
return tf.squeeze(h_t)
def process_sequence(self, sequence, h_0=None):
# Put the time-dimension upfront for the scan operator
self.x_t = tf.transpose(sequence, [1, 0], name='x_t') # [n_words, embedding_dim]
if h_0 is None:
# A little hack (to obtain the same shape as the input matrix) to define the initial hidden state h_0
self.h_0 = tf.zeros(dtype=tf.float64, shape=(self.hidden_size,), name='h_0')
else:
|
# Perform the scan operator (hacky as fac diud)
self.h_t_transposed = tf.scan(self.forward_pass, self.x_t, self.h_0, name='h_t_transposed')
# Transpose the result back
self.h_t = tf.transpose(self.h_t_transposed, [1, 0], name='h_t')
return self.h_t
def predict_sequence(self, h_0):
"""
Output sequence. This function iterates self.forward_pass until it gets the EOL.
:param h_0: Initial state
:return: predict_sentence
"""
# Inital values. The are required to be reshaped to rank2-tensor be concated afterwards
init_predict_sentence = tf.zeros([10, 1], dtype=tf.float64, name='whileloop_init_sentence')
init_prediction = tf.reshape(h_0, shape=[-1, 1], name='whileloop_init_prediction')
def loop_cond(prediction, predict_sentence): # predict_sentence argument is required by tf.while_loop
threshold = tf.constant(0.01, dtype=tf.float64, name='whileloop_threshold')
boolean = tf.greater((tf.reduce_sum(tf.pow(prediction, 2)) ** 0.5), threshold, name='whileloop_boolean')
return boolean
def loop_body(prev_prediction, prev_predict_sentence):
"""This function is a little bit hacky. Tensorflow's loops don't support neither fetching global scope variables
that are transformed but not returned from the loop nor modify the rank of the returned tensor in every
iteration of the loop.
This seems to be overcome defining the predict_sentence in two stages, one for the previous iter state an
another one for the next state.
:param prev_prediction:
:param prev_predict_sentence:
:return: [next_prediction, next_predict_sentence]
"""
# In the predict_model the previous state and the input state for the forward_pass are the same
next_prediction = self.forward_pass(prev_prediction, prev_prediction)
next_prediction = tf.reshape(next_prediction, shape=[-1, 1], name='whileloop_next_prediction')
# Concat the predicted word to the sentence (instead of list.append() cause tf.while_loop() doesn't support
# no-tensor arguments)
next_predict_sentence = tf.concat(axis=1, values=[prev_prediction, prev_predict_sentence],
name='whileloop_next_prediction_sentence')
return [next_prediction, next_predict_sentence]
# While loop that return the predict sentence
_, predict_sentence = tf.while_loop(cond=loop_cond,
body=loop_body,
loop_vars=[init_prediction, init_predict_sentence],
shape_invariants=[tf.TensorShape([10, 1]), tf.TensorShape([10, None])],
maximum_iterations=10,
name='whileloop_predict_sentence')
return predict_sentence
# Initialize the model
# The input has 2 dimensions: dimension 0 is reserved for the first term and dimension 1 is reserved for the second term
# Create a placeholder
input_sentence = tf.placeholder(dtype=tf.float64, shape=[Word2Vec_embedding_dim, None], name='input_data') # emb_dim x n_words
output_sentence = tf.placeholder(dtype=tf.float64, shape=[Word2Vec_embedding_dim, None], name='output_data')
# Create End Of Sentence vector
EOS = tf.zeros(dtype=tf.float64, shape=[Word2Vec_embedding_dim, 1], name='EOS')
input_sentence_ended = tf.concat([input_sentence, EOS], axis=1, name='input_data_ended')
output_sentence_ended = tf.concat([output_sentence, EOS], axis=1, name='output_data_ended')
# Create the GRU layer
gru_layer_encoder = GRU(Word2Vec_embedding_dim, hidden_dim, name='_encoder')
gru_layer_decoder = GRU(Word2Vec_embedding_dim, hidden_dim, name='_decoder')
# Training_process - ONE NN ENCODER - DECODER
input_encoded = gru_layer_encoder.process_sequence(input_sentence_ended, h_0=None) # Process the first sentence
thought_vector = input_encoded[:, -1] # Extract the last state vector (thought) from the input response
train_decoded = gru_layer_decoder.process_sequence(output_sentence_ended, h_0=thought_vector) # Train_answer
pred_decoded = gru_layer_decoder.predict_sequence(h_0=thought_vector)
# Output_data
train_predicted_output = tf.convert_to_tensor(train_decoded, dtype=tf.float64, name='train_output')
pred_predicted_output = tf.convert_to_tensor(pred_decoded, dtype=tf.float64, name='pred_output')
# Loss
loss = tf.reduce_sum(0.5 * tf.pow(train_predicted_output - output_sentence_ended, 2)) # / float(batch_size)
# loss = [sum((real_word-prediction)**2)/embedding_dim for (real_word, prediction) in zip(real_words, predictions)]
# Optimizer
train_step = tf.train.AdamOptimizer().minimize(loss)
if __name__ == "__main__":
from disintegrator import *
from Word2Vec import *
parameters.init()
# Prepare data for training the seq2seq
prepare = DataPreparation()
text = prepare.make_disintegration
sent = prepare.get_sentences(text)
dicc = prepare.get_dictionary(text, stopwords, vocab_size)
data = prepare.get_word_list(sent, stopwords, window_size=Word2Vec_window_size)
print('Propiedades del corpus: \n')
print('\tDiccionario con %d palabras' % (len(dicc['w2i'])))
word_to_vec = Word2Vec(vocab_size, Word2Vec_embedding_dim, Word2Vec_optimizer_step)
x_train, y_train = word_to_vec.training_data(data)
W1, b1 = word_to_vec.train(x_train, y_train)
vocab_vectors = W1+b1
conversations = []
for i in range(len(sent)-2):
if len(sent[i+1]) != 0 and len(sent[i+2]) != 0: # to avoid empty sentences
conversations.append([sent[i+1], sent[i+2]])
# TRAIN THE MODEL
# Initialize all the variables
session = tf.Session()
init_variables = tf.global_variables_initializer()
session.run(init_variables)
losses = []
for conversation in conversations:
# Convert text to vector
_input_sentence = word_to_vec.encoder(conversation[0])
_output_sentence = word_to_vec.encoder(conversation[1])
# Convert list-structure to array-structure
_input_sentence = np.transpose(np.array(_input_sentence))
_output_sentence = np.transpose(np.array(_output_sentence))
# Run the graph
_, _loss = session.run([train_step, loss],
feed_dict={input_sentence: _input_sentence, output_sentence: _output_sentence})
losses.append(_loss)
# Save the model
saver = tf.train.Saver()
saver.save(sess, "./model/seq2seq_model")
# Prediction
_input_sentence = 'hola que tal?'
print('yo: \t', _input_sentence)
# Convert text to vector
_input_sentence = word_to_vec.encoder(' '.split(_input_sentence))
# Convert list-structure to array-structure
_input_sentence = np.transpose(np.array(_input_sentence))
# Run the graph
prediction = session.run(pred_predicted_output, feed_dict={input_sentence: _input_sentence})
# Decode
prediction = np.transpose(np.array(prediction))
_output_sentence = word_to_vec.decoder(pred)
# Sentence
print('bot: \t', ' '.join(_output_sentence))
| self.h_0 = h_0 | conditional_block |
seq2seq.py | import tensorflow as tf
from parameters import *
init_parameters()
class GRU(object):
def __init__(self, input_dimensions, hidden_size, name='', dtype=tf.float64):
# Initialize Init attributes
self.input_dimensions = input_dimensions
self.hidden_size = hidden_size
self.name = name
# Initialize Method attributes
self.x_t = None
self.h_0 = None
self.h_t = None
self.h_t_transposed = None
# Weights for input vectors of shape (input_dimensions, hidden_size)
self.Wr = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.input_dimensions, self.hidden_size), mean=0, stddev=0.01),
name='Wr' + self.name)
self.Wz = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.input_dimensions, self.hidden_size), mean=0, stddev=0.01),
name='Wz' + self.name)
self.Wh = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.input_dimensions, self.hidden_size), mean=0, stddev=0.01),
name='Wh' + self.name)
# Weights for hidden vectors of shape (hidden_size, hidden_size)
self.Ur = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.hidden_size, self.hidden_size), mean=0, stddev=0.01),
name='Ur' + self.name)
self.Uz = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.hidden_size, self.hidden_size), mean=0, stddev=0.01),
name='Uz' + self.name)
self.Uh = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.hidden_size, self.hidden_size), mean=0, stddev=0.01),
name='Uh' + self.name)
# Biases for hidden vectors of shape (hidden_size,)
self.br = tf.Variable(tf.truncated_normal(dtype=dtype, shape=(self.hidden_size,), mean=0, stddev=0.01),
name='br' + self.name)
self.bz = tf.Variable(tf.truncated_normal(dtype=dtype, shape=(self.hidden_size,), mean=0, stddev=0.01),
name='bz' + self.name)
self.bh = tf.Variable(tf.truncated_normal(dtype=dtype, shape=(self.hidden_size,), mean=0, stddev=0.01),
name='bh' + self.name)
def forward_pass(self, h_tm1, x_t): # Function though to be used by tf.scan
"""Perform a forward pass.
:param h_tm1: np.matrix. The hidden state at the previous timestep (h_{t-1}).
:param x_t: np.matrix. The input vector. | h_tm1 = tf.reshape(h_tm1, shape=[1, -1])
# Definitions of z_t and r_t
z_t = tf.sigmoid(tf.matmul(x_t, self.Wz) + tf.matmul(h_tm1, self.Uz) + self.bz)
r_t = tf.sigmoid(tf.matmul(x_t, self.Wr) + tf.matmul(h_tm1, self.Ur) + self.br)
# Definition of h~_t
h_proposal = tf.tanh(tf.matmul(x_t, self.Wh) + tf.matmul(tf.multiply(r_t, h_tm1), self.Uh) + self.bh)
# Compute the next hidden state
h_t = tf.multiply(1 - z_t, h_tm1) + tf.multiply(z_t, h_proposal)
return tf.squeeze(h_t)
def process_sequence(self, sequence, h_0=None):
# Put the time-dimension upfront for the scan operator
self.x_t = tf.transpose(sequence, [1, 0], name='x_t') # [n_words, embedding_dim]
if h_0 is None:
# A little hack (to obtain the same shape as the input matrix) to define the initial hidden state h_0
self.h_0 = tf.zeros(dtype=tf.float64, shape=(self.hidden_size,), name='h_0')
else:
self.h_0 = h_0
# Perform the scan operator (hacky as fac diud)
self.h_t_transposed = tf.scan(self.forward_pass, self.x_t, self.h_0, name='h_t_transposed')
# Transpose the result back
self.h_t = tf.transpose(self.h_t_transposed, [1, 0], name='h_t')
return self.h_t
def predict_sequence(self, h_0):
"""
Output sequence. This function iterates self.forward_pass until it gets the EOL.
:param h_0: Initial state
:return: predict_sentence
"""
# Inital values. The are required to be reshaped to rank2-tensor be concated afterwards
init_predict_sentence = tf.zeros([10, 1], dtype=tf.float64, name='whileloop_init_sentence')
init_prediction = tf.reshape(h_0, shape=[-1, 1], name='whileloop_init_prediction')
def loop_cond(prediction, predict_sentence): # predict_sentence argument is required by tf.while_loop
threshold = tf.constant(0.01, dtype=tf.float64, name='whileloop_threshold')
boolean = tf.greater((tf.reduce_sum(tf.pow(prediction, 2)) ** 0.5), threshold, name='whileloop_boolean')
return boolean
def loop_body(prev_prediction, prev_predict_sentence):
"""This function is a little bit hacky. Tensorflow's loops don't support neither fetching global scope variables
that are transformed but not returned from the loop nor modify the rank of the returned tensor in every
iteration of the loop.
This seems to be overcome defining the predict_sentence in two stages, one for the previous iter state an
another one for the next state.
:param prev_prediction:
:param prev_predict_sentence:
:return: [next_prediction, next_predict_sentence]
"""
# In the predict_model the previous state and the input state for the forward_pass are the same
next_prediction = self.forward_pass(prev_prediction, prev_prediction)
next_prediction = tf.reshape(next_prediction, shape=[-1, 1], name='whileloop_next_prediction')
# Concat the predicted word to the sentence (instead of list.append() cause tf.while_loop() doesn't support
# no-tensor arguments)
next_predict_sentence = tf.concat(axis=1, values=[prev_prediction, prev_predict_sentence],
name='whileloop_next_prediction_sentence')
return [next_prediction, next_predict_sentence]
# While loop that return the predict sentence
_, predict_sentence = tf.while_loop(cond=loop_cond,
body=loop_body,
loop_vars=[init_prediction, init_predict_sentence],
shape_invariants=[tf.TensorShape([10, 1]), tf.TensorShape([10, None])],
maximum_iterations=10,
name='whileloop_predict_sentence')
return predict_sentence
# Initialize the model
# The input has 2 dimensions: dimension 0 is reserved for the first term and dimension 1 is reserved for the second term
# Create a placeholder
input_sentence = tf.placeholder(dtype=tf.float64, shape=[Word2Vec_embedding_dim, None], name='input_data') # emb_dim x n_words
output_sentence = tf.placeholder(dtype=tf.float64, shape=[Word2Vec_embedding_dim, None], name='output_data')
# Create End Of Sentence vector
EOS = tf.zeros(dtype=tf.float64, shape=[Word2Vec_embedding_dim, 1], name='EOS')
input_sentence_ended = tf.concat([input_sentence, EOS], axis=1, name='input_data_ended')
output_sentence_ended = tf.concat([output_sentence, EOS], axis=1, name='output_data_ended')
# Create the GRU layer
gru_layer_encoder = GRU(Word2Vec_embedding_dim, hidden_dim, name='_encoder')
gru_layer_decoder = GRU(Word2Vec_embedding_dim, hidden_dim, name='_decoder')
# Training_process - ONE NN ENCODER - DECODER
input_encoded = gru_layer_encoder.process_sequence(input_sentence_ended, h_0=None) # Process the first sentence
thought_vector = input_encoded[:, -1] # Extract the last state vector (thought) from the input response
train_decoded = gru_layer_decoder.process_sequence(output_sentence_ended, h_0=thought_vector) # Train_answer
pred_decoded = gru_layer_decoder.predict_sequence(h_0=thought_vector)
# Output_data
train_predicted_output = tf.convert_to_tensor(train_decoded, dtype=tf.float64, name='train_output')
pred_predicted_output = tf.convert_to_tensor(pred_decoded, dtype=tf.float64, name='pred_output')
# Loss
loss = tf.reduce_sum(0.5 * tf.pow(train_predicted_output - output_sentence_ended, 2)) # / float(batch_size)
# loss = [sum((real_word-prediction)**2)/embedding_dim for (real_word, prediction) in zip(real_words, predictions)]
# Optimizer
train_step = tf.train.AdamOptimizer().minimize(loss)
if __name__ == "__main__":
from disintegrator import *
from Word2Vec import *
parameters.init()
# Prepare data for training the seq2seq
prepare = DataPreparation()
text = prepare.make_disintegration
sent = prepare.get_sentences(text)
dicc = prepare.get_dictionary(text, stopwords, vocab_size)
data = prepare.get_word_list(sent, stopwords, window_size=Word2Vec_window_size)
print('Propiedades del corpus: \n')
print('\tDiccionario con %d palabras' % (len(dicc['w2i'])))
word_to_vec = Word2Vec(vocab_size, Word2Vec_embedding_dim, Word2Vec_optimizer_step)
x_train, y_train = word_to_vec.training_data(data)
W1, b1 = word_to_vec.train(x_train, y_train)
vocab_vectors = W1+b1
conversations = []
for i in range(len(sent)-2):
if len(sent[i+1]) != 0 and len(sent[i+2]) != 0: # to avoid empty sentences
conversations.append([sent[i+1], sent[i+2]])
# TRAIN THE MODEL
# Initialize all the variables
session = tf.Session()
init_variables = tf.global_variables_initializer()
session.run(init_variables)
losses = []
for conversation in conversations:
# Convert text to vector
_input_sentence = word_to_vec.encoder(conversation[0])
_output_sentence = word_to_vec.encoder(conversation[1])
# Convert list-structure to array-structure
_input_sentence = np.transpose(np.array(_input_sentence))
_output_sentence = np.transpose(np.array(_output_sentence))
# Run the graph
_, _loss = session.run([train_step, loss],
feed_dict={input_sentence: _input_sentence, output_sentence: _output_sentence})
losses.append(_loss)
# Save the model
saver = tf.train.Saver()
saver.save(sess, "./model/seq2seq_model")
# Prediction
_input_sentence = 'hola que tal?'
print('yo: \t', _input_sentence)
# Convert text to vector
_input_sentence = word_to_vec.encoder(' '.split(_input_sentence))
# Convert list-structure to array-structure
_input_sentence = np.transpose(np.array(_input_sentence))
# Run the graph
prediction = session.run(pred_predicted_output, feed_dict={input_sentence: _input_sentence})
# Decode
prediction = np.transpose(np.array(prediction))
_output_sentence = word_to_vec.decoder(pred)
# Sentence
print('bot: \t', ' '.join(_output_sentence)) | :return:
"""
# Convert vector-tensor form into matrix-tensor form
x_t = tf.reshape(x_t, shape=[1, -1]) | random_line_split |
seq2seq.py | import tensorflow as tf
from parameters import *
init_parameters()
class GRU(object):
def __init__(self, input_dimensions, hidden_size, name='', dtype=tf.float64):
# Initialize Init attributes
self.input_dimensions = input_dimensions
self.hidden_size = hidden_size
self.name = name
# Initialize Method attributes
self.x_t = None
self.h_0 = None
self.h_t = None
self.h_t_transposed = None
# Weights for input vectors of shape (input_dimensions, hidden_size)
self.Wr = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.input_dimensions, self.hidden_size), mean=0, stddev=0.01),
name='Wr' + self.name)
self.Wz = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.input_dimensions, self.hidden_size), mean=0, stddev=0.01),
name='Wz' + self.name)
self.Wh = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.input_dimensions, self.hidden_size), mean=0, stddev=0.01),
name='Wh' + self.name)
# Weights for hidden vectors of shape (hidden_size, hidden_size)
self.Ur = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.hidden_size, self.hidden_size), mean=0, stddev=0.01),
name='Ur' + self.name)
self.Uz = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.hidden_size, self.hidden_size), mean=0, stddev=0.01),
name='Uz' + self.name)
self.Uh = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.hidden_size, self.hidden_size), mean=0, stddev=0.01),
name='Uh' + self.name)
# Biases for hidden vectors of shape (hidden_size,)
self.br = tf.Variable(tf.truncated_normal(dtype=dtype, shape=(self.hidden_size,), mean=0, stddev=0.01),
name='br' + self.name)
self.bz = tf.Variable(tf.truncated_normal(dtype=dtype, shape=(self.hidden_size,), mean=0, stddev=0.01),
name='bz' + self.name)
self.bh = tf.Variable(tf.truncated_normal(dtype=dtype, shape=(self.hidden_size,), mean=0, stddev=0.01),
name='bh' + self.name)
def forward_pass(self, h_tm1, x_t): # Function though to be used by tf.scan
|
def process_sequence(self, sequence, h_0=None):
# Put the time-dimension upfront for the scan operator
self.x_t = tf.transpose(sequence, [1, 0], name='x_t') # [n_words, embedding_dim]
if h_0 is None:
# A little hack (to obtain the same shape as the input matrix) to define the initial hidden state h_0
self.h_0 = tf.zeros(dtype=tf.float64, shape=(self.hidden_size,), name='h_0')
else:
self.h_0 = h_0
# Perform the scan operator (hacky as fac diud)
self.h_t_transposed = tf.scan(self.forward_pass, self.x_t, self.h_0, name='h_t_transposed')
# Transpose the result back
self.h_t = tf.transpose(self.h_t_transposed, [1, 0], name='h_t')
return self.h_t
def predict_sequence(self, h_0):
"""
Output sequence. This function iterates self.forward_pass until it gets the EOL.
:param h_0: Initial state
:return: predict_sentence
"""
# Inital values. The are required to be reshaped to rank2-tensor be concated afterwards
init_predict_sentence = tf.zeros([10, 1], dtype=tf.float64, name='whileloop_init_sentence')
init_prediction = tf.reshape(h_0, shape=[-1, 1], name='whileloop_init_prediction')
def loop_cond(prediction, predict_sentence): # predict_sentence argument is required by tf.while_loop
threshold = tf.constant(0.01, dtype=tf.float64, name='whileloop_threshold')
boolean = tf.greater((tf.reduce_sum(tf.pow(prediction, 2)) ** 0.5), threshold, name='whileloop_boolean')
return boolean
def loop_body(prev_prediction, prev_predict_sentence):
"""This function is a little bit hacky. Tensorflow's loops don't support neither fetching global scope variables
that are transformed but not returned from the loop nor modify the rank of the returned tensor in every
iteration of the loop.
This seems to be overcome defining the predict_sentence in two stages, one for the previous iter state an
another one for the next state.
:param prev_prediction:
:param prev_predict_sentence:
:return: [next_prediction, next_predict_sentence]
"""
# In the predict_model the previous state and the input state for the forward_pass are the same
next_prediction = self.forward_pass(prev_prediction, prev_prediction)
next_prediction = tf.reshape(next_prediction, shape=[-1, 1], name='whileloop_next_prediction')
# Concat the predicted word to the sentence (instead of list.append() cause tf.while_loop() doesn't support
# no-tensor arguments)
next_predict_sentence = tf.concat(axis=1, values=[prev_prediction, prev_predict_sentence],
name='whileloop_next_prediction_sentence')
return [next_prediction, next_predict_sentence]
# While loop that return the predict sentence
_, predict_sentence = tf.while_loop(cond=loop_cond,
body=loop_body,
loop_vars=[init_prediction, init_predict_sentence],
shape_invariants=[tf.TensorShape([10, 1]), tf.TensorShape([10, None])],
maximum_iterations=10,
name='whileloop_predict_sentence')
return predict_sentence
# Initialize the model
# The input has 2 dimensions: dimension 0 is reserved for the first term and dimension 1 is reserved for the second term
# Create a placeholder
input_sentence = tf.placeholder(dtype=tf.float64, shape=[Word2Vec_embedding_dim, None], name='input_data') # emb_dim x n_words
output_sentence = tf.placeholder(dtype=tf.float64, shape=[Word2Vec_embedding_dim, None], name='output_data')
# Create End Of Sentence vector
EOS = tf.zeros(dtype=tf.float64, shape=[Word2Vec_embedding_dim, 1], name='EOS')
input_sentence_ended = tf.concat([input_sentence, EOS], axis=1, name='input_data_ended')
output_sentence_ended = tf.concat([output_sentence, EOS], axis=1, name='output_data_ended')
# Create the GRU layer
gru_layer_encoder = GRU(Word2Vec_embedding_dim, hidden_dim, name='_encoder')
gru_layer_decoder = GRU(Word2Vec_embedding_dim, hidden_dim, name='_decoder')
# Training_process - ONE NN ENCODER - DECODER
input_encoded = gru_layer_encoder.process_sequence(input_sentence_ended, h_0=None) # Process the first sentence
thought_vector = input_encoded[:, -1] # Extract the last state vector (thought) from the input response
train_decoded = gru_layer_decoder.process_sequence(output_sentence_ended, h_0=thought_vector) # Train_answer
pred_decoded = gru_layer_decoder.predict_sequence(h_0=thought_vector)
# Output_data
train_predicted_output = tf.convert_to_tensor(train_decoded, dtype=tf.float64, name='train_output')
pred_predicted_output = tf.convert_to_tensor(pred_decoded, dtype=tf.float64, name='pred_output')
# Loss
loss = tf.reduce_sum(0.5 * tf.pow(train_predicted_output - output_sentence_ended, 2)) # / float(batch_size)
# loss = [sum((real_word-prediction)**2)/embedding_dim for (real_word, prediction) in zip(real_words, predictions)]
# Optimizer
train_step = tf.train.AdamOptimizer().minimize(loss)
if __name__ == "__main__":
from disintegrator import *
from Word2Vec import *
parameters.init()
# Prepare data for training the seq2seq
prepare = DataPreparation()
text = prepare.make_disintegration
sent = prepare.get_sentences(text)
dicc = prepare.get_dictionary(text, stopwords, vocab_size)
data = prepare.get_word_list(sent, stopwords, window_size=Word2Vec_window_size)
print('Propiedades del corpus: \n')
print('\tDiccionario con %d palabras' % (len(dicc['w2i'])))
word_to_vec = Word2Vec(vocab_size, Word2Vec_embedding_dim, Word2Vec_optimizer_step)
x_train, y_train = word_to_vec.training_data(data)
W1, b1 = word_to_vec.train(x_train, y_train)
vocab_vectors = W1+b1
conversations = []
for i in range(len(sent)-2):
if len(sent[i+1]) != 0 and len(sent[i+2]) != 0: # to avoid empty sentences
conversations.append([sent[i+1], sent[i+2]])
# TRAIN THE MODEL
# Initialize all the variables
session = tf.Session()
init_variables = tf.global_variables_initializer()
session.run(init_variables)
losses = []
for conversation in conversations:
# Convert text to vector
_input_sentence = word_to_vec.encoder(conversation[0])
_output_sentence = word_to_vec.encoder(conversation[1])
# Convert list-structure to array-structure
_input_sentence = np.transpose(np.array(_input_sentence))
_output_sentence = np.transpose(np.array(_output_sentence))
# Run the graph
_, _loss = session.run([train_step, loss],
feed_dict={input_sentence: _input_sentence, output_sentence: _output_sentence})
losses.append(_loss)
# Save the model
saver = tf.train.Saver()
saver.save(sess, "./model/seq2seq_model")
# Prediction
_input_sentence = 'hola que tal?'
print('yo: \t', _input_sentence)
# Convert text to vector
_input_sentence = word_to_vec.encoder(' '.split(_input_sentence))
# Convert list-structure to array-structure
_input_sentence = np.transpose(np.array(_input_sentence))
# Run the graph
prediction = session.run(pred_predicted_output, feed_dict={input_sentence: _input_sentence})
# Decode
prediction = np.transpose(np.array(prediction))
_output_sentence = word_to_vec.decoder(pred)
# Sentence
print('bot: \t', ' '.join(_output_sentence))
| """Perform a forward pass.
:param h_tm1: np.matrix. The hidden state at the previous timestep (h_{t-1}).
:param x_t: np.matrix. The input vector.
:return:
"""
# Convert vector-tensor form into matrix-tensor form
x_t = tf.reshape(x_t, shape=[1, -1])
h_tm1 = tf.reshape(h_tm1, shape=[1, -1])
# Definitions of z_t and r_t
z_t = tf.sigmoid(tf.matmul(x_t, self.Wz) + tf.matmul(h_tm1, self.Uz) + self.bz)
r_t = tf.sigmoid(tf.matmul(x_t, self.Wr) + tf.matmul(h_tm1, self.Ur) + self.br)
# Definition of h~_t
h_proposal = tf.tanh(tf.matmul(x_t, self.Wh) + tf.matmul(tf.multiply(r_t, h_tm1), self.Uh) + self.bh)
# Compute the next hidden state
h_t = tf.multiply(1 - z_t, h_tm1) + tf.multiply(z_t, h_proposal)
return tf.squeeze(h_t) | identifier_body |
seq2seq.py | import tensorflow as tf
from parameters import *
init_parameters()
class GRU(object):
def __init__(self, input_dimensions, hidden_size, name='', dtype=tf.float64):
# Initialize Init attributes
self.input_dimensions = input_dimensions
self.hidden_size = hidden_size
self.name = name
# Initialize Method attributes
self.x_t = None
self.h_0 = None
self.h_t = None
self.h_t_transposed = None
# Weights for input vectors of shape (input_dimensions, hidden_size)
self.Wr = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.input_dimensions, self.hidden_size), mean=0, stddev=0.01),
name='Wr' + self.name)
self.Wz = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.input_dimensions, self.hidden_size), mean=0, stddev=0.01),
name='Wz' + self.name)
self.Wh = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.input_dimensions, self.hidden_size), mean=0, stddev=0.01),
name='Wh' + self.name)
# Weights for hidden vectors of shape (hidden_size, hidden_size)
self.Ur = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.hidden_size, self.hidden_size), mean=0, stddev=0.01),
name='Ur' + self.name)
self.Uz = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.hidden_size, self.hidden_size), mean=0, stddev=0.01),
name='Uz' + self.name)
self.Uh = tf.Variable(
tf.truncated_normal(dtype=dtype, shape=(self.hidden_size, self.hidden_size), mean=0, stddev=0.01),
name='Uh' + self.name)
# Biases for hidden vectors of shape (hidden_size,)
self.br = tf.Variable(tf.truncated_normal(dtype=dtype, shape=(self.hidden_size,), mean=0, stddev=0.01),
name='br' + self.name)
self.bz = tf.Variable(tf.truncated_normal(dtype=dtype, shape=(self.hidden_size,), mean=0, stddev=0.01),
name='bz' + self.name)
self.bh = tf.Variable(tf.truncated_normal(dtype=dtype, shape=(self.hidden_size,), mean=0, stddev=0.01),
name='bh' + self.name)
def | (self, h_tm1, x_t): # Function though to be used by tf.scan
"""Perform a forward pass.
:param h_tm1: np.matrix. The hidden state at the previous timestep (h_{t-1}).
:param x_t: np.matrix. The input vector.
:return:
"""
# Convert vector-tensor form into matrix-tensor form
x_t = tf.reshape(x_t, shape=[1, -1])
h_tm1 = tf.reshape(h_tm1, shape=[1, -1])
# Definitions of z_t and r_t
z_t = tf.sigmoid(tf.matmul(x_t, self.Wz) + tf.matmul(h_tm1, self.Uz) + self.bz)
r_t = tf.sigmoid(tf.matmul(x_t, self.Wr) + tf.matmul(h_tm1, self.Ur) + self.br)
# Definition of h~_t
h_proposal = tf.tanh(tf.matmul(x_t, self.Wh) + tf.matmul(tf.multiply(r_t, h_tm1), self.Uh) + self.bh)
# Compute the next hidden state
h_t = tf.multiply(1 - z_t, h_tm1) + tf.multiply(z_t, h_proposal)
return tf.squeeze(h_t)
def process_sequence(self, sequence, h_0=None):
# Put the time-dimension upfront for the scan operator
self.x_t = tf.transpose(sequence, [1, 0], name='x_t') # [n_words, embedding_dim]
if h_0 is None:
# A little hack (to obtain the same shape as the input matrix) to define the initial hidden state h_0
self.h_0 = tf.zeros(dtype=tf.float64, shape=(self.hidden_size,), name='h_0')
else:
self.h_0 = h_0
# Perform the scan operator (hacky as fac diud)
self.h_t_transposed = tf.scan(self.forward_pass, self.x_t, self.h_0, name='h_t_transposed')
# Transpose the result back
self.h_t = tf.transpose(self.h_t_transposed, [1, 0], name='h_t')
return self.h_t
def predict_sequence(self, h_0):
"""
Output sequence. This function iterates self.forward_pass until it gets the EOL.
:param h_0: Initial state
:return: predict_sentence
"""
# Inital values. The are required to be reshaped to rank2-tensor be concated afterwards
init_predict_sentence = tf.zeros([10, 1], dtype=tf.float64, name='whileloop_init_sentence')
init_prediction = tf.reshape(h_0, shape=[-1, 1], name='whileloop_init_prediction')
def loop_cond(prediction, predict_sentence): # predict_sentence argument is required by tf.while_loop
threshold = tf.constant(0.01, dtype=tf.float64, name='whileloop_threshold')
boolean = tf.greater((tf.reduce_sum(tf.pow(prediction, 2)) ** 0.5), threshold, name='whileloop_boolean')
return boolean
def loop_body(prev_prediction, prev_predict_sentence):
"""This function is a little bit hacky. Tensorflow's loops don't support neither fetching global scope variables
that are transformed but not returned from the loop nor modify the rank of the returned tensor in every
iteration of the loop.
This seems to be overcome defining the predict_sentence in two stages, one for the previous iter state an
another one for the next state.
:param prev_prediction:
:param prev_predict_sentence:
:return: [next_prediction, next_predict_sentence]
"""
# In the predict_model the previous state and the input state for the forward_pass are the same
next_prediction = self.forward_pass(prev_prediction, prev_prediction)
next_prediction = tf.reshape(next_prediction, shape=[-1, 1], name='whileloop_next_prediction')
# Concat the predicted word to the sentence (instead of list.append() cause tf.while_loop() doesn't support
# no-tensor arguments)
next_predict_sentence = tf.concat(axis=1, values=[prev_prediction, prev_predict_sentence],
name='whileloop_next_prediction_sentence')
return [next_prediction, next_predict_sentence]
# While loop that return the predict sentence
_, predict_sentence = tf.while_loop(cond=loop_cond,
body=loop_body,
loop_vars=[init_prediction, init_predict_sentence],
shape_invariants=[tf.TensorShape([10, 1]), tf.TensorShape([10, None])],
maximum_iterations=10,
name='whileloop_predict_sentence')
return predict_sentence
# Initialize the model
# The input has 2 dimensions: dimension 0 is reserved for the first term and dimension 1 is reserved for the second term
# Create a placeholder
input_sentence = tf.placeholder(dtype=tf.float64, shape=[Word2Vec_embedding_dim, None], name='input_data') # emb_dim x n_words
output_sentence = tf.placeholder(dtype=tf.float64, shape=[Word2Vec_embedding_dim, None], name='output_data')
# Create End Of Sentence vector
EOS = tf.zeros(dtype=tf.float64, shape=[Word2Vec_embedding_dim, 1], name='EOS')
input_sentence_ended = tf.concat([input_sentence, EOS], axis=1, name='input_data_ended')
output_sentence_ended = tf.concat([output_sentence, EOS], axis=1, name='output_data_ended')
# Create the GRU layer
gru_layer_encoder = GRU(Word2Vec_embedding_dim, hidden_dim, name='_encoder')
gru_layer_decoder = GRU(Word2Vec_embedding_dim, hidden_dim, name='_decoder')
# Training_process - ONE NN ENCODER - DECODER
input_encoded = gru_layer_encoder.process_sequence(input_sentence_ended, h_0=None) # Process the first sentence
thought_vector = input_encoded[:, -1] # Extract the last state vector (thought) from the input response
train_decoded = gru_layer_decoder.process_sequence(output_sentence_ended, h_0=thought_vector) # Train_answer
pred_decoded = gru_layer_decoder.predict_sequence(h_0=thought_vector)
# Output_data
train_predicted_output = tf.convert_to_tensor(train_decoded, dtype=tf.float64, name='train_output')
pred_predicted_output = tf.convert_to_tensor(pred_decoded, dtype=tf.float64, name='pred_output')
# Loss
loss = tf.reduce_sum(0.5 * tf.pow(train_predicted_output - output_sentence_ended, 2)) # / float(batch_size)
# loss = [sum((real_word-prediction)**2)/embedding_dim for (real_word, prediction) in zip(real_words, predictions)]
# Optimizer
train_step = tf.train.AdamOptimizer().minimize(loss)
if __name__ == "__main__":
from disintegrator import *
from Word2Vec import *
parameters.init()
# Prepare data for training the seq2seq
prepare = DataPreparation()
text = prepare.make_disintegration
sent = prepare.get_sentences(text)
dicc = prepare.get_dictionary(text, stopwords, vocab_size)
data = prepare.get_word_list(sent, stopwords, window_size=Word2Vec_window_size)
print('Propiedades del corpus: \n')
print('\tDiccionario con %d palabras' % (len(dicc['w2i'])))
word_to_vec = Word2Vec(vocab_size, Word2Vec_embedding_dim, Word2Vec_optimizer_step)
x_train, y_train = word_to_vec.training_data(data)
W1, b1 = word_to_vec.train(x_train, y_train)
vocab_vectors = W1+b1
conversations = []
for i in range(len(sent)-2):
if len(sent[i+1]) != 0 and len(sent[i+2]) != 0: # to avoid empty sentences
conversations.append([sent[i+1], sent[i+2]])
# TRAIN THE MODEL
# Initialize all the variables
session = tf.Session()
init_variables = tf.global_variables_initializer()
session.run(init_variables)
losses = []
for conversation in conversations:
# Convert text to vector
_input_sentence = word_to_vec.encoder(conversation[0])
_output_sentence = word_to_vec.encoder(conversation[1])
# Convert list-structure to array-structure
_input_sentence = np.transpose(np.array(_input_sentence))
_output_sentence = np.transpose(np.array(_output_sentence))
# Run the graph
_, _loss = session.run([train_step, loss],
feed_dict={input_sentence: _input_sentence, output_sentence: _output_sentence})
losses.append(_loss)
# Save the model
saver = tf.train.Saver()
saver.save(sess, "./model/seq2seq_model")
# Prediction
_input_sentence = 'hola que tal?'
print('yo: \t', _input_sentence)
# Convert text to vector
_input_sentence = word_to_vec.encoder(' '.split(_input_sentence))
# Convert list-structure to array-structure
_input_sentence = np.transpose(np.array(_input_sentence))
# Run the graph
prediction = session.run(pred_predicted_output, feed_dict={input_sentence: _input_sentence})
# Decode
prediction = np.transpose(np.array(prediction))
_output_sentence = word_to_vec.decoder(pred)
# Sentence
print('bot: \t', ' '.join(_output_sentence))
| forward_pass | identifier_name |
error.rs | /*
* Copyright (C) 2020 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::binder::AsNative;
use crate::sys;
use std::error;
use std::ffi::CStr;
use std::fmt::{Debug, Display, Formatter, Result as FmtResult};
use std::result;
pub use sys::binder_status_t as status_t;
/// Low-level status codes from Android `libutils`.
// All error codes are negative integer values. Derived from the anonymous enum
// in utils/Errors.h
pub use sys::android_c_interface_StatusCode as StatusCode;
/// A specialized [`Result`](result::Result) for binder operations.
pub type Result<T> = result::Result<T, StatusCode>;
/// Convert a low-level status code into an empty result.
///
/// An OK status is converted into an `Ok` result, any other status is converted
/// into an `Err` result holding the status code.
pub fn status_result(status: status_t) -> Result<()> {
match parse_status_code(status) {
StatusCode::OK => Ok(()),
e => Err(e),
}
}
fn parse_status_code(code: i32) -> StatusCode {
match code {
e if e == StatusCode::OK as i32 => StatusCode::OK,
e if e == StatusCode::NO_MEMORY as i32 => StatusCode::NO_MEMORY,
e if e == StatusCode::INVALID_OPERATION as i32 => StatusCode::INVALID_OPERATION,
e if e == StatusCode::BAD_VALUE as i32 => StatusCode::BAD_VALUE,
e if e == StatusCode::BAD_TYPE as i32 => StatusCode::BAD_TYPE,
e if e == StatusCode::NAME_NOT_FOUND as i32 => StatusCode::NAME_NOT_FOUND,
e if e == StatusCode::PERMISSION_DENIED as i32 => StatusCode::PERMISSION_DENIED,
e if e == StatusCode::NO_INIT as i32 => StatusCode::NO_INIT,
e if e == StatusCode::ALREADY_EXISTS as i32 => StatusCode::ALREADY_EXISTS,
e if e == StatusCode::DEAD_OBJECT as i32 => StatusCode::DEAD_OBJECT,
e if e == StatusCode::FAILED_TRANSACTION as i32 => StatusCode::FAILED_TRANSACTION,
e if e == StatusCode::BAD_INDEX as i32 => StatusCode::BAD_INDEX,
e if e == StatusCode::NOT_ENOUGH_DATA as i32 => StatusCode::NOT_ENOUGH_DATA,
e if e == StatusCode::WOULD_BLOCK as i32 => StatusCode::WOULD_BLOCK,
e if e == StatusCode::TIMED_OUT as i32 => StatusCode::TIMED_OUT,
e if e == StatusCode::UNKNOWN_TRANSACTION as i32 => StatusCode::UNKNOWN_TRANSACTION,
e if e == StatusCode::FDS_NOT_ALLOWED as i32 => StatusCode::FDS_NOT_ALLOWED,
e if e == StatusCode::UNEXPECTED_NULL as i32 => StatusCode::UNEXPECTED_NULL,
_ => StatusCode::UNKNOWN_ERROR,
}
}
pub use sys::android_c_interface_ExceptionCode as ExceptionCode;
fn parse_exception_code(code: i32) -> ExceptionCode {
match code {
e if e == ExceptionCode::NONE as i32 => ExceptionCode::NONE,
e if e == ExceptionCode::SECURITY as i32 => ExceptionCode::SECURITY,
e if e == ExceptionCode::BAD_PARCELABLE as i32 => ExceptionCode::BAD_PARCELABLE,
e if e == ExceptionCode::ILLEGAL_ARGUMENT as i32 => ExceptionCode::ILLEGAL_ARGUMENT,
e if e == ExceptionCode::NULL_POINTER as i32 => ExceptionCode::NULL_POINTER,
e if e == ExceptionCode::ILLEGAL_STATE as i32 => ExceptionCode::ILLEGAL_STATE,
e if e == ExceptionCode::NETWORK_MAIN_THREAD as i32 => ExceptionCode::NETWORK_MAIN_THREAD,
e if e == ExceptionCode::UNSUPPORTED_OPERATION as i32 => {
ExceptionCode::UNSUPPORTED_OPERATION
}
e if e == ExceptionCode::SERVICE_SPECIFIC as i32 => ExceptionCode::SERVICE_SPECIFIC,
_ => ExceptionCode::TRANSACTION_FAILED,
}
}
// Safety: `Status` always contains a owning pointer to a valid `AStatus`. The
// lifetime of the contained pointer is the same as the `Status` object.
/// High-level binder status object that encapsulates a standard way to keep
/// track of and chain binder errors along with service specific errors.
///
/// Used in AIDL transactions to represent failed transactions.
pub struct Status(*mut sys::AStatus);
// Safety: The `AStatus` that the `Status` points to must have an entirely thread-safe API for the
// duration of the `Status` object's lifetime. We ensure this by not allowing mutation of a `Status`
// in Rust, and the NDK API says we're the owner of our `AStatus` objects so outside code should not
// be mutating them underneath us.
unsafe impl Sync for Status {}
// Safety: `Status` always contains an owning pointer to a global, immutable, interned `AStatus`.
// A thread-local `AStatus` would not be valid.
unsafe impl Send for Status {}
impl Status {
/// Create a status object representing a successful transaction.
pub fn ok() -> Self {
let ptr = unsafe {
// Safety: `AStatus_newOk` always returns a new, heap allocated
// pointer to an `ASTatus` object, so we know this pointer will be
// valid.
//
// Rust takes ownership of the returned pointer.
sys::AStatus_newOk()
};
Self(ptr)
}
/// Create a status object from a service specific error
pub fn new_service_specific_error(err: i32, message: Option<&CStr>) -> Status {
let ptr = if let Some(message) = message {
unsafe {
// Safety: Any i32 is a valid service specific error for the
// error code parameter. We construct a valid, null-terminated
// `CString` from the message, which must be a valid C-style
// string to pass as the message. This function always returns a
// new, heap allocated pointer to an `AStatus` object, so we
// know the returned pointer will be valid.
//
// Rust takes ownership of the returned pointer.
sys::AStatus_fromServiceSpecificErrorWithMessage(err, message.as_ptr())
}
} else {
unsafe {
// Safety: Any i32 is a valid service specific error for the
// error code parameter. This function always returns a new,
// heap allocated pointer to an `AStatus` object, so we know the
// returned pointer will be valid.
//
// Rust takes ownership of the returned pointer.
sys::AStatus_fromServiceSpecificError(err)
}
};
Self(ptr)
}
/// Create a status object from an exception code
pub fn new_exception(exception: ExceptionCode, message: Option<&CStr>) -> Status {
if let Some(message) = message {
let ptr = unsafe {
sys::AStatus_fromExceptionCodeWithMessage(exception as i32, message.as_ptr())
};
Self(ptr)
} else {
exception.into()
}
}
/// Create a status object from a raw `AStatus` pointer.
///
/// # Safety
///
/// This constructor is safe iff `ptr` is a valid pointer to an `AStatus`.
pub(crate) unsafe fn from_ptr(ptr: *mut sys::AStatus) -> Self {
Self(ptr)
}
/// Returns `true` if this status represents a successful transaction.
pub fn is_ok(&self) -> bool {
unsafe {
// Safety: `Status` always contains a valid `AStatus` pointer, so we
// are always passing a valid pointer to `AStatus_isOk` here.
sys::AStatus_isOk(self.as_native())
}
}
/// Returns a description of the status.
pub fn get_description(&self) -> String {
let description_ptr = unsafe {
// Safety: `Status` always contains a valid `AStatus` pointer, so we
// are always passing a valid pointer to `AStatus_getDescription`
// here.
//
// `AStatus_getDescription` always returns a valid pointer to a null
// terminated C string. Rust is responsible for freeing this pointer
// via `AStatus_deleteDescription`.
sys::AStatus_getDescription(self.as_native())
};
let description = unsafe {
// Safety: `AStatus_getDescription` always returns a valid C string,
// which can be safely converted to a `CStr`.
CStr::from_ptr(description_ptr)
};
let description = description.to_string_lossy().to_string();
unsafe {
// Safety: `description_ptr` was returned from
// `AStatus_getDescription` above, and must be freed via
// `AStatus_deleteDescription`. We must not access the pointer after
// this call, so we copy it into an owned string above and return
// that string.
sys::AStatus_deleteDescription(description_ptr);
}
description
}
/// Returns the exception code of the status.
pub fn exception_code(&self) -> ExceptionCode {
let code = unsafe {
// Safety: `Status` always contains a valid `AStatus` pointer, so we
// are always passing a valid pointer to `AStatus_getExceptionCode`
// here.
sys::AStatus_getExceptionCode(self.as_native())
};
parse_exception_code(code)
}
/// Return a status code representing a transaction failure, or
/// `StatusCode::OK` if there was no transaction failure.
///
/// If this method returns `OK`, the status may still represent a different
/// exception or a service specific error. To find out if this transaction
/// as a whole is okay, use [`is_ok`](Self::is_ok) instead.
pub fn transaction_error(&self) -> StatusCode {
let code = unsafe {
// Safety: `Status` always contains a valid `AStatus` pointer, so we
// are always passing a valid pointer to `AStatus_getStatus` here.
sys::AStatus_getStatus(self.as_native())
};
parse_status_code(code)
}
/// Return a service specific error if this status represents one.
///
/// This function will only ever return a non-zero result if
/// [`exception_code`](Self::exception_code) returns
/// `ExceptionCode::SERVICE_SPECIFIC`. If this function returns 0, the
/// status object may still represent a different exception or status. To
/// find out if this transaction as a whole is okay, use
/// [`is_ok`](Self::is_ok) instead.
pub fn service_specific_error(&self) -> i32 {
unsafe {
// Safety: `Status` always contains a valid `AStatus` pointer, so we
// are always passing a valid pointer to
// `AStatus_getServiceSpecificError` here.
sys::AStatus_getServiceSpecificError(self.as_native())
}
}
/// Calls `op` if the status was ok, otherwise returns an `Err` value of
/// `self`.
pub fn and_then<T, F>(self, op: F) -> result::Result<T, Status>
where
F: FnOnce() -> result::Result<T, Status>,
{
<result::Result<(), Status>>::from(self)?;
op()
}
}
impl error::Error for Status {}
impl Display for Status {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
f.write_str(&self.get_description())
}
}
impl Debug for Status {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
f.write_str(&self.get_description())
}
}
impl PartialEq for Status {
fn eq(&self, other: &Status) -> bool {
let self_code = self.exception_code();
let other_code = other.exception_code();
match (self_code, other_code) {
(ExceptionCode::NONE, ExceptionCode::NONE) => true,
(ExceptionCode::TRANSACTION_FAILED, ExceptionCode::TRANSACTION_FAILED) => {
self.transaction_error() == other.transaction_error()
&& self.get_description() == other.get_description()
}
(ExceptionCode::SERVICE_SPECIFIC, ExceptionCode::SERVICE_SPECIFIC) => {
self.service_specific_error() == other.service_specific_error()
&& self.get_description() == other.get_description()
}
(e1, e2) => e1 == e2 && self.get_description() == other.get_description(),
}
}
}
impl Eq for Status {}
impl From<StatusCode> for Status {
fn from(status: StatusCode) -> Status {
(status as status_t).into()
}
}
impl From<status_t> for Status {
fn from(status: status_t) -> Status {
let ptr = unsafe {
// Safety: `AStatus_fromStatus` expects any `status_t` integer, so
// this is a safe FFI call. Unknown values will be coerced into
// UNKNOWN_ERROR.
sys::AStatus_fromStatus(status)
};
Self(ptr)
}
}
impl From<ExceptionCode> for Status {
fn from(code: ExceptionCode) -> Status {
let ptr = unsafe {
// Safety: `AStatus_fromExceptionCode` expects any
// `binder_exception_t` (i32) integer, so this is a safe FFI call.
// Unknown values will be coerced into EX_TRANSACTION_FAILED.
sys::AStatus_fromExceptionCode(code as i32)
};
Self(ptr)
}
}
// TODO: impl Try for Status when try_trait is stabilized
// https://github.com/rust-lang/rust/issues/42327
impl From<Status> for result::Result<(), Status> {
fn from(status: Status) -> result::Result<(), Status> {
if status.is_ok() {
Ok(())
} else {
Err(status)
}
}
}
impl From<Status> for status_t {
fn from(status: Status) -> status_t {
status.transaction_error() as status_t
}
}
impl Drop for Status {
fn drop(&mut self) {
unsafe {
// Safety: `Status` manages the lifetime of its inner `AStatus`
// pointee, so we need to delete it here. We know that the pointer
// will be valid here since `Status` always contains a valid pointer
// while it is alive.
sys::AStatus_delete(self.0);
}
}
}
/// # Safety
///
/// `Status` always contains a valid pointer to an `AStatus` object, so we can
/// trivially convert it to a correctly-typed raw pointer.
///
/// Care must be taken that the returned pointer is only dereferenced while the
/// `Status` object is still alive.
unsafe impl AsNative<sys::AStatus> for Status {
fn as_native(&self) -> *const sys::AStatus |
fn as_native_mut(&mut self) -> *mut sys::AStatus {
self.0
}
}
| {
self.0
} | identifier_body |
error.rs | /*
* Copyright (C) 2020 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::binder::AsNative;
use crate::sys;
use std::error;
use std::ffi::CStr;
use std::fmt::{Debug, Display, Formatter, Result as FmtResult};
use std::result;
pub use sys::binder_status_t as status_t;
/// Low-level status codes from Android `libutils`.
// All error codes are negative integer values. Derived from the anonymous enum
// in utils/Errors.h
pub use sys::android_c_interface_StatusCode as StatusCode;
/// A specialized [`Result`](result::Result) for binder operations.
pub type Result<T> = result::Result<T, StatusCode>;
/// Convert a low-level status code into an empty result.
///
/// An OK status is converted into an `Ok` result, any other status is converted
/// into an `Err` result holding the status code.
pub fn status_result(status: status_t) -> Result<()> {
match parse_status_code(status) {
StatusCode::OK => Ok(()),
e => Err(e),
}
}
fn parse_status_code(code: i32) -> StatusCode {
match code {
e if e == StatusCode::OK as i32 => StatusCode::OK,
e if e == StatusCode::NO_MEMORY as i32 => StatusCode::NO_MEMORY,
e if e == StatusCode::INVALID_OPERATION as i32 => StatusCode::INVALID_OPERATION,
e if e == StatusCode::BAD_VALUE as i32 => StatusCode::BAD_VALUE,
e if e == StatusCode::BAD_TYPE as i32 => StatusCode::BAD_TYPE,
e if e == StatusCode::NAME_NOT_FOUND as i32 => StatusCode::NAME_NOT_FOUND,
e if e == StatusCode::PERMISSION_DENIED as i32 => StatusCode::PERMISSION_DENIED,
e if e == StatusCode::NO_INIT as i32 => StatusCode::NO_INIT,
e if e == StatusCode::ALREADY_EXISTS as i32 => StatusCode::ALREADY_EXISTS,
e if e == StatusCode::DEAD_OBJECT as i32 => StatusCode::DEAD_OBJECT,
e if e == StatusCode::FAILED_TRANSACTION as i32 => StatusCode::FAILED_TRANSACTION,
e if e == StatusCode::BAD_INDEX as i32 => StatusCode::BAD_INDEX,
e if e == StatusCode::NOT_ENOUGH_DATA as i32 => StatusCode::NOT_ENOUGH_DATA,
e if e == StatusCode::WOULD_BLOCK as i32 => StatusCode::WOULD_BLOCK,
e if e == StatusCode::TIMED_OUT as i32 => StatusCode::TIMED_OUT,
e if e == StatusCode::UNKNOWN_TRANSACTION as i32 => StatusCode::UNKNOWN_TRANSACTION,
e if e == StatusCode::FDS_NOT_ALLOWED as i32 => StatusCode::FDS_NOT_ALLOWED,
e if e == StatusCode::UNEXPECTED_NULL as i32 => StatusCode::UNEXPECTED_NULL,
_ => StatusCode::UNKNOWN_ERROR,
}
}
pub use sys::android_c_interface_ExceptionCode as ExceptionCode;
fn parse_exception_code(code: i32) -> ExceptionCode {
match code {
e if e == ExceptionCode::NONE as i32 => ExceptionCode::NONE,
e if e == ExceptionCode::SECURITY as i32 => ExceptionCode::SECURITY,
e if e == ExceptionCode::BAD_PARCELABLE as i32 => ExceptionCode::BAD_PARCELABLE,
e if e == ExceptionCode::ILLEGAL_ARGUMENT as i32 => ExceptionCode::ILLEGAL_ARGUMENT,
e if e == ExceptionCode::NULL_POINTER as i32 => ExceptionCode::NULL_POINTER,
e if e == ExceptionCode::ILLEGAL_STATE as i32 => ExceptionCode::ILLEGAL_STATE,
e if e == ExceptionCode::NETWORK_MAIN_THREAD as i32 => ExceptionCode::NETWORK_MAIN_THREAD,
e if e == ExceptionCode::UNSUPPORTED_OPERATION as i32 => {
ExceptionCode::UNSUPPORTED_OPERATION
}
e if e == ExceptionCode::SERVICE_SPECIFIC as i32 => ExceptionCode::SERVICE_SPECIFIC,
_ => ExceptionCode::TRANSACTION_FAILED,
}
}
// Safety: `Status` always contains a owning pointer to a valid `AStatus`. The
// lifetime of the contained pointer is the same as the `Status` object.
/// High-level binder status object that encapsulates a standard way to keep
/// track of and chain binder errors along with service specific errors.
///
/// Used in AIDL transactions to represent failed transactions.
pub struct Status(*mut sys::AStatus);
// Safety: The `AStatus` that the `Status` points to must have an entirely thread-safe API for the
// duration of the `Status` object's lifetime. We ensure this by not allowing mutation of a `Status`
// in Rust, and the NDK API says we're the owner of our `AStatus` objects so outside code should not
// be mutating them underneath us.
unsafe impl Sync for Status {}
// Safety: `Status` always contains an owning pointer to a global, immutable, interned `AStatus`.
// A thread-local `AStatus` would not be valid.
unsafe impl Send for Status {}
impl Status {
/// Create a status object representing a successful transaction.
pub fn ok() -> Self {
let ptr = unsafe {
// Safety: `AStatus_newOk` always returns a new, heap allocated
// pointer to an `ASTatus` object, so we know this pointer will be
// valid.
//
// Rust takes ownership of the returned pointer.
sys::AStatus_newOk()
};
Self(ptr)
}
/// Create a status object from a service specific error
pub fn new_service_specific_error(err: i32, message: Option<&CStr>) -> Status {
let ptr = if let Some(message) = message {
unsafe {
// Safety: Any i32 is a valid service specific error for the
// error code parameter. We construct a valid, null-terminated
// `CString` from the message, which must be a valid C-style
// string to pass as the message. This function always returns a
// new, heap allocated pointer to an `AStatus` object, so we
// know the returned pointer will be valid.
//
// Rust takes ownership of the returned pointer.
sys::AStatus_fromServiceSpecificErrorWithMessage(err, message.as_ptr())
}
} else {
unsafe {
// Safety: Any i32 is a valid service specific error for the
// error code parameter. This function always returns a new,
// heap allocated pointer to an `AStatus` object, so we know the
// returned pointer will be valid.
//
// Rust takes ownership of the returned pointer.
sys::AStatus_fromServiceSpecificError(err)
}
};
Self(ptr)
}
/// Create a status object from an exception code
pub fn new_exception(exception: ExceptionCode, message: Option<&CStr>) -> Status {
if let Some(message) = message {
let ptr = unsafe {
sys::AStatus_fromExceptionCodeWithMessage(exception as i32, message.as_ptr())
};
Self(ptr)
} else {
exception.into()
}
}
/// Create a status object from a raw `AStatus` pointer.
///
/// # Safety
///
/// This constructor is safe iff `ptr` is a valid pointer to an `AStatus`.
pub(crate) unsafe fn from_ptr(ptr: *mut sys::AStatus) -> Self {
Self(ptr)
}
/// Returns `true` if this status represents a successful transaction.
pub fn is_ok(&self) -> bool {
unsafe {
// Safety: `Status` always contains a valid `AStatus` pointer, so we
// are always passing a valid pointer to `AStatus_isOk` here.
sys::AStatus_isOk(self.as_native())
}
}
/// Returns a description of the status.
pub fn get_description(&self) -> String {
let description_ptr = unsafe {
// Safety: `Status` always contains a valid `AStatus` pointer, so we
// are always passing a valid pointer to `AStatus_getDescription`
// here.
//
// `AStatus_getDescription` always returns a valid pointer to a null
// terminated C string. Rust is responsible for freeing this pointer
// via `AStatus_deleteDescription`.
sys::AStatus_getDescription(self.as_native())
};
let description = unsafe {
// Safety: `AStatus_getDescription` always returns a valid C string,
// which can be safely converted to a `CStr`.
CStr::from_ptr(description_ptr)
};
let description = description.to_string_lossy().to_string();
unsafe {
// Safety: `description_ptr` was returned from
// `AStatus_getDescription` above, and must be freed via
// `AStatus_deleteDescription`. We must not access the pointer after
// this call, so we copy it into an owned string above and return
// that string.
sys::AStatus_deleteDescription(description_ptr);
}
description
}
/// Returns the exception code of the status.
pub fn exception_code(&self) -> ExceptionCode {
let code = unsafe {
// Safety: `Status` always contains a valid `AStatus` pointer, so we
// are always passing a valid pointer to `AStatus_getExceptionCode`
// here.
sys::AStatus_getExceptionCode(self.as_native())
};
parse_exception_code(code)
}
/// Return a status code representing a transaction failure, or
/// `StatusCode::OK` if there was no transaction failure.
///
/// If this method returns `OK`, the status may still represent a different
/// exception or a service specific error. To find out if this transaction
/// as a whole is okay, use [`is_ok`](Self::is_ok) instead.
pub fn transaction_error(&self) -> StatusCode {
let code = unsafe {
// Safety: `Status` always contains a valid `AStatus` pointer, so we
// are always passing a valid pointer to `AStatus_getStatus` here.
sys::AStatus_getStatus(self.as_native())
};
parse_status_code(code)
}
/// Return a service specific error if this status represents one.
///
/// This function will only ever return a non-zero result if
/// [`exception_code`](Self::exception_code) returns
/// `ExceptionCode::SERVICE_SPECIFIC`. If this function returns 0, the
/// status object may still represent a different exception or status. To
/// find out if this transaction as a whole is okay, use
/// [`is_ok`](Self::is_ok) instead.
pub fn service_specific_error(&self) -> i32 {
unsafe {
// Safety: `Status` always contains a valid `AStatus` pointer, so we
// are always passing a valid pointer to
// `AStatus_getServiceSpecificError` here.
sys::AStatus_getServiceSpecificError(self.as_native())
}
}
/// Calls `op` if the status was ok, otherwise returns an `Err` value of
/// `self`.
pub fn and_then<T, F>(self, op: F) -> result::Result<T, Status>
where
F: FnOnce() -> result::Result<T, Status>,
{
<result::Result<(), Status>>::from(self)?;
op()
}
}
impl error::Error for Status {}
impl Display for Status {
fn | (&self, f: &mut Formatter) -> FmtResult {
f.write_str(&self.get_description())
}
}
impl Debug for Status {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
f.write_str(&self.get_description())
}
}
impl PartialEq for Status {
fn eq(&self, other: &Status) -> bool {
let self_code = self.exception_code();
let other_code = other.exception_code();
match (self_code, other_code) {
(ExceptionCode::NONE, ExceptionCode::NONE) => true,
(ExceptionCode::TRANSACTION_FAILED, ExceptionCode::TRANSACTION_FAILED) => {
self.transaction_error() == other.transaction_error()
&& self.get_description() == other.get_description()
}
(ExceptionCode::SERVICE_SPECIFIC, ExceptionCode::SERVICE_SPECIFIC) => {
self.service_specific_error() == other.service_specific_error()
&& self.get_description() == other.get_description()
}
(e1, e2) => e1 == e2 && self.get_description() == other.get_description(),
}
}
}
impl Eq for Status {}
impl From<StatusCode> for Status {
fn from(status: StatusCode) -> Status {
(status as status_t).into()
}
}
impl From<status_t> for Status {
fn from(status: status_t) -> Status {
let ptr = unsafe {
// Safety: `AStatus_fromStatus` expects any `status_t` integer, so
// this is a safe FFI call. Unknown values will be coerced into
// UNKNOWN_ERROR.
sys::AStatus_fromStatus(status)
};
Self(ptr)
}
}
impl From<ExceptionCode> for Status {
fn from(code: ExceptionCode) -> Status {
let ptr = unsafe {
// Safety: `AStatus_fromExceptionCode` expects any
// `binder_exception_t` (i32) integer, so this is a safe FFI call.
// Unknown values will be coerced into EX_TRANSACTION_FAILED.
sys::AStatus_fromExceptionCode(code as i32)
};
Self(ptr)
}
}
// TODO: impl Try for Status when try_trait is stabilized
// https://github.com/rust-lang/rust/issues/42327
impl From<Status> for result::Result<(), Status> {
fn from(status: Status) -> result::Result<(), Status> {
if status.is_ok() {
Ok(())
} else {
Err(status)
}
}
}
impl From<Status> for status_t {
fn from(status: Status) -> status_t {
status.transaction_error() as status_t
}
}
impl Drop for Status {
fn drop(&mut self) {
unsafe {
// Safety: `Status` manages the lifetime of its inner `AStatus`
// pointee, so we need to delete it here. We know that the pointer
// will be valid here since `Status` always contains a valid pointer
// while it is alive.
sys::AStatus_delete(self.0);
}
}
}
/// # Safety
///
/// `Status` always contains a valid pointer to an `AStatus` object, so we can
/// trivially convert it to a correctly-typed raw pointer.
///
/// Care must be taken that the returned pointer is only dereferenced while the
/// `Status` object is still alive.
unsafe impl AsNative<sys::AStatus> for Status {
fn as_native(&self) -> *const sys::AStatus {
self.0
}
fn as_native_mut(&mut self) -> *mut sys::AStatus {
self.0
}
}
| fmt | identifier_name |
error.rs | /*
* Copyright (C) 2020 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::binder::AsNative;
use crate::sys;
use std::error;
use std::ffi::CStr;
use std::fmt::{Debug, Display, Formatter, Result as FmtResult};
use std::result;
pub use sys::binder_status_t as status_t;
/// Low-level status codes from Android `libutils`.
// All error codes are negative integer values. Derived from the anonymous enum
// in utils/Errors.h
pub use sys::android_c_interface_StatusCode as StatusCode;
/// A specialized [`Result`](result::Result) for binder operations.
pub type Result<T> = result::Result<T, StatusCode>;
/// Convert a low-level status code into an empty result.
///
/// An OK status is converted into an `Ok` result, any other status is converted
/// into an `Err` result holding the status code.
pub fn status_result(status: status_t) -> Result<()> {
match parse_status_code(status) {
StatusCode::OK => Ok(()),
e => Err(e),
}
}
fn parse_status_code(code: i32) -> StatusCode {
match code {
e if e == StatusCode::OK as i32 => StatusCode::OK,
e if e == StatusCode::NO_MEMORY as i32 => StatusCode::NO_MEMORY,
e if e == StatusCode::INVALID_OPERATION as i32 => StatusCode::INVALID_OPERATION,
e if e == StatusCode::BAD_VALUE as i32 => StatusCode::BAD_VALUE,
e if e == StatusCode::BAD_TYPE as i32 => StatusCode::BAD_TYPE,
e if e == StatusCode::NAME_NOT_FOUND as i32 => StatusCode::NAME_NOT_FOUND,
e if e == StatusCode::PERMISSION_DENIED as i32 => StatusCode::PERMISSION_DENIED,
e if e == StatusCode::NO_INIT as i32 => StatusCode::NO_INIT,
e if e == StatusCode::ALREADY_EXISTS as i32 => StatusCode::ALREADY_EXISTS,
e if e == StatusCode::DEAD_OBJECT as i32 => StatusCode::DEAD_OBJECT,
e if e == StatusCode::FAILED_TRANSACTION as i32 => StatusCode::FAILED_TRANSACTION,
e if e == StatusCode::BAD_INDEX as i32 => StatusCode::BAD_INDEX,
e if e == StatusCode::NOT_ENOUGH_DATA as i32 => StatusCode::NOT_ENOUGH_DATA,
e if e == StatusCode::WOULD_BLOCK as i32 => StatusCode::WOULD_BLOCK,
e if e == StatusCode::TIMED_OUT as i32 => StatusCode::TIMED_OUT,
e if e == StatusCode::UNKNOWN_TRANSACTION as i32 => StatusCode::UNKNOWN_TRANSACTION,
e if e == StatusCode::FDS_NOT_ALLOWED as i32 => StatusCode::FDS_NOT_ALLOWED,
e if e == StatusCode::UNEXPECTED_NULL as i32 => StatusCode::UNEXPECTED_NULL,
_ => StatusCode::UNKNOWN_ERROR,
}
}
pub use sys::android_c_interface_ExceptionCode as ExceptionCode;
fn parse_exception_code(code: i32) -> ExceptionCode {
match code {
e if e == ExceptionCode::NONE as i32 => ExceptionCode::NONE,
e if e == ExceptionCode::SECURITY as i32 => ExceptionCode::SECURITY,
e if e == ExceptionCode::BAD_PARCELABLE as i32 => ExceptionCode::BAD_PARCELABLE,
e if e == ExceptionCode::ILLEGAL_ARGUMENT as i32 => ExceptionCode::ILLEGAL_ARGUMENT,
e if e == ExceptionCode::NULL_POINTER as i32 => ExceptionCode::NULL_POINTER,
e if e == ExceptionCode::ILLEGAL_STATE as i32 => ExceptionCode::ILLEGAL_STATE,
e if e == ExceptionCode::NETWORK_MAIN_THREAD as i32 => ExceptionCode::NETWORK_MAIN_THREAD,
e if e == ExceptionCode::UNSUPPORTED_OPERATION as i32 => {
ExceptionCode::UNSUPPORTED_OPERATION
}
e if e == ExceptionCode::SERVICE_SPECIFIC as i32 => ExceptionCode::SERVICE_SPECIFIC,
_ => ExceptionCode::TRANSACTION_FAILED,
}
}
// Safety: `Status` always contains a owning pointer to a valid `AStatus`. The
// lifetime of the contained pointer is the same as the `Status` object.
/// High-level binder status object that encapsulates a standard way to keep
/// track of and chain binder errors along with service specific errors.
///
/// Used in AIDL transactions to represent failed transactions.
pub struct Status(*mut sys::AStatus);
// Safety: The `AStatus` that the `Status` points to must have an entirely thread-safe API for the
// duration of the `Status` object's lifetime. We ensure this by not allowing mutation of a `Status`
// in Rust, and the NDK API says we're the owner of our `AStatus` objects so outside code should not
// be mutating them underneath us.
unsafe impl Sync for Status {}
// Safety: `Status` always contains an owning pointer to a global, immutable, interned `AStatus`.
// A thread-local `AStatus` would not be valid.
unsafe impl Send for Status {}
impl Status {
/// Create a status object representing a successful transaction.
pub fn ok() -> Self {
let ptr = unsafe {
// Safety: `AStatus_newOk` always returns a new, heap allocated
// pointer to an `ASTatus` object, so we know this pointer will be
// valid.
//
// Rust takes ownership of the returned pointer.
sys::AStatus_newOk()
};
Self(ptr)
}
/// Create a status object from a service specific error
pub fn new_service_specific_error(err: i32, message: Option<&CStr>) -> Status {
let ptr = if let Some(message) = message {
unsafe {
// Safety: Any i32 is a valid service specific error for the
// error code parameter. We construct a valid, null-terminated
// `CString` from the message, which must be a valid C-style
// string to pass as the message. This function always returns a
// new, heap allocated pointer to an `AStatus` object, so we
// know the returned pointer will be valid.
//
// Rust takes ownership of the returned pointer.
sys::AStatus_fromServiceSpecificErrorWithMessage(err, message.as_ptr())
}
} else {
unsafe {
// Safety: Any i32 is a valid service specific error for the
// error code parameter. This function always returns a new,
// heap allocated pointer to an `AStatus` object, so we know the
// returned pointer will be valid.
//
// Rust takes ownership of the returned pointer.
sys::AStatus_fromServiceSpecificError(err)
}
};
Self(ptr)
}
/// Create a status object from an exception code
pub fn new_exception(exception: ExceptionCode, message: Option<&CStr>) -> Status {
if let Some(message) = message {
let ptr = unsafe {
sys::AStatus_fromExceptionCodeWithMessage(exception as i32, message.as_ptr())
};
Self(ptr)
} else {
exception.into()
}
}
/// Create a status object from a raw `AStatus` pointer.
///
/// # Safety
///
/// This constructor is safe iff `ptr` is a valid pointer to an `AStatus`.
pub(crate) unsafe fn from_ptr(ptr: *mut sys::AStatus) -> Self {
Self(ptr)
}
/// Returns `true` if this status represents a successful transaction.
pub fn is_ok(&self) -> bool {
unsafe {
// Safety: `Status` always contains a valid `AStatus` pointer, so we
// are always passing a valid pointer to `AStatus_isOk` here.
sys::AStatus_isOk(self.as_native())
}
}
/// Returns a description of the status.
pub fn get_description(&self) -> String {
let description_ptr = unsafe {
// Safety: `Status` always contains a valid `AStatus` pointer, so we
// are always passing a valid pointer to `AStatus_getDescription`
// here.
//
// `AStatus_getDescription` always returns a valid pointer to a null
// terminated C string. Rust is responsible for freeing this pointer
// via `AStatus_deleteDescription`.
sys::AStatus_getDescription(self.as_native())
};
let description = unsafe {
// Safety: `AStatus_getDescription` always returns a valid C string,
// which can be safely converted to a `CStr`.
CStr::from_ptr(description_ptr)
};
let description = description.to_string_lossy().to_string();
unsafe {
// Safety: `description_ptr` was returned from
// `AStatus_getDescription` above, and must be freed via
// `AStatus_deleteDescription`. We must not access the pointer after
// this call, so we copy it into an owned string above and return
// that string.
sys::AStatus_deleteDescription(description_ptr);
}
description
}
/// Returns the exception code of the status.
pub fn exception_code(&self) -> ExceptionCode {
let code = unsafe {
// Safety: `Status` always contains a valid `AStatus` pointer, so we
// are always passing a valid pointer to `AStatus_getExceptionCode`
// here.
sys::AStatus_getExceptionCode(self.as_native())
};
parse_exception_code(code)
}
/// Return a status code representing a transaction failure, or
/// `StatusCode::OK` if there was no transaction failure.
///
/// If this method returns `OK`, the status may still represent a different
/// exception or a service specific error. To find out if this transaction
/// as a whole is okay, use [`is_ok`](Self::is_ok) instead.
pub fn transaction_error(&self) -> StatusCode {
let code = unsafe {
// Safety: `Status` always contains a valid `AStatus` pointer, so we
// are always passing a valid pointer to `AStatus_getStatus` here.
sys::AStatus_getStatus(self.as_native())
};
parse_status_code(code)
}
/// Return a service specific error if this status represents one.
///
/// This function will only ever return a non-zero result if
/// [`exception_code`](Self::exception_code) returns
/// `ExceptionCode::SERVICE_SPECIFIC`. If this function returns 0, the
/// status object may still represent a different exception or status. To
/// find out if this transaction as a whole is okay, use
/// [`is_ok`](Self::is_ok) instead.
pub fn service_specific_error(&self) -> i32 {
unsafe {
// Safety: `Status` always contains a valid `AStatus` pointer, so we
// are always passing a valid pointer to
// `AStatus_getServiceSpecificError` here.
sys::AStatus_getServiceSpecificError(self.as_native())
}
}
/// Calls `op` if the status was ok, otherwise returns an `Err` value of
/// `self`.
pub fn and_then<T, F>(self, op: F) -> result::Result<T, Status>
where
F: FnOnce() -> result::Result<T, Status>,
{
<result::Result<(), Status>>::from(self)?;
op()
}
}
impl error::Error for Status {}
| }
}
impl Debug for Status {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
f.write_str(&self.get_description())
}
}
impl PartialEq for Status {
fn eq(&self, other: &Status) -> bool {
let self_code = self.exception_code();
let other_code = other.exception_code();
match (self_code, other_code) {
(ExceptionCode::NONE, ExceptionCode::NONE) => true,
(ExceptionCode::TRANSACTION_FAILED, ExceptionCode::TRANSACTION_FAILED) => {
self.transaction_error() == other.transaction_error()
&& self.get_description() == other.get_description()
}
(ExceptionCode::SERVICE_SPECIFIC, ExceptionCode::SERVICE_SPECIFIC) => {
self.service_specific_error() == other.service_specific_error()
&& self.get_description() == other.get_description()
}
(e1, e2) => e1 == e2 && self.get_description() == other.get_description(),
}
}
}
impl Eq for Status {}
impl From<StatusCode> for Status {
fn from(status: StatusCode) -> Status {
(status as status_t).into()
}
}
impl From<status_t> for Status {
fn from(status: status_t) -> Status {
let ptr = unsafe {
// Safety: `AStatus_fromStatus` expects any `status_t` integer, so
// this is a safe FFI call. Unknown values will be coerced into
// UNKNOWN_ERROR.
sys::AStatus_fromStatus(status)
};
Self(ptr)
}
}
impl From<ExceptionCode> for Status {
fn from(code: ExceptionCode) -> Status {
let ptr = unsafe {
// Safety: `AStatus_fromExceptionCode` expects any
// `binder_exception_t` (i32) integer, so this is a safe FFI call.
// Unknown values will be coerced into EX_TRANSACTION_FAILED.
sys::AStatus_fromExceptionCode(code as i32)
};
Self(ptr)
}
}
// TODO: impl Try for Status when try_trait is stabilized
// https://github.com/rust-lang/rust/issues/42327
impl From<Status> for result::Result<(), Status> {
fn from(status: Status) -> result::Result<(), Status> {
if status.is_ok() {
Ok(())
} else {
Err(status)
}
}
}
impl From<Status> for status_t {
fn from(status: Status) -> status_t {
status.transaction_error() as status_t
}
}
impl Drop for Status {
fn drop(&mut self) {
unsafe {
// Safety: `Status` manages the lifetime of its inner `AStatus`
// pointee, so we need to delete it here. We know that the pointer
// will be valid here since `Status` always contains a valid pointer
// while it is alive.
sys::AStatus_delete(self.0);
}
}
}
/// # Safety
///
/// `Status` always contains a valid pointer to an `AStatus` object, so we can
/// trivially convert it to a correctly-typed raw pointer.
///
/// Care must be taken that the returned pointer is only dereferenced while the
/// `Status` object is still alive.
unsafe impl AsNative<sys::AStatus> for Status {
fn as_native(&self) -> *const sys::AStatus {
self.0
}
fn as_native_mut(&mut self) -> *mut sys::AStatus {
self.0
}
} | impl Display for Status {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
f.write_str(&self.get_description()) | random_line_split |
setup.rs | // Copyright 2020 Zachary Stewart
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Implements the setup phase of the board.
use std::collections::{hash_map::Entry, HashMap};
use crate::{
board::{AddShipError, Board, CannotPlaceReason, Dimensions, Grid, PlaceError},
ships::{ProjectIter, ShapeProjection, ShipId, ShipShape},
};
/// Reference to a particular ship's placement info as well as the grid, providing access
/// to the methods necessary to check it's placement status.
pub struct ShipEntry<'a, I, D: Dimensions, S> {
/// ID of this ship.
id: I,
/// Grid that the ship may occupy.
grid: &'a Grid<I, D>,
/// Placement info for the ship.
ship: &'a ShipPlacementInfo<S, D::Coordinate>,
}
impl<'a, I: ShipId, D: Dimensions, S: ShipShape<D>> ShipEntry<'a, I, D, S> {
/// If the ship is placed, get the placement. Otherwise return `None`.
// Has to be specialized for mut and non-mut because mut variants can't return a
// projection that lives as long as 'a, since that would potentially alias the &mut
// ref. With a const ref, we can give back a ref that lives as long as self rather
// than just as long as this method call.
pub fn placement(&self) -> Option<&'a ShapeProjection<D::Coordinate>> {
self.ship.placement.as_ref()
}
}
/// Reference to a particular ship's placement info as well as the grid, providing access
/// to the methods necessary to check it's placement status and place or unplace it.
pub struct ShipEntryMut<'a, I, D: Dimensions, S> {
/// ID of this ship
id: I,
/// Grid that ships are being placed into.
grid: &'a mut Grid<I, D>,
/// Back ref to the ship.
ship: &'a mut ShipPlacementInfo<S, D::Coordinate>,
}
/// Implementation of the shared parts of ShipEntry.
macro_rules! ship_entry_shared {
($t:ident) => {
impl<'a, I: ShipId, D: Dimensions, S: ShipShape<D>> $t<'a, I, D, S> {
/// Get the ID of this ship.
pub fn id(&self) -> &I {
&self.id
}
/// Returns true if this ship has been placed.
pub fn placed(&self) -> bool {
self.ship.placement.is_some()
}
/// Get an interator over possible projections of the shape for this ship that
/// start from the given [`Coordinate`]. If there are no possible placements
/// from the given coordinate, including if the coordinate is out of bounds,
/// the resulting iterator will be empty.
pub fn get_placements(
&self,
coord: D::Coordinate,
) -> ProjectIter<D, S::ProjectIterState> {
self.ship.shape.project(coord, &self.grid.dim)
}
/// Check if the specified placement is valid for this ship.
pub fn check_placement(
&self,
placement: &ShapeProjection<D::Coordinate>,
) -> Result<(), CannotPlaceReason> {
if self.placed() {
Err(CannotPlaceReason::AlreadyPlaced)
} else if !self
.ship
.shape
.is_valid_placement(placement, &self.grid.dim)
{
Err(CannotPlaceReason::InvalidProjection)
} else {
for coord in placement.iter() {
match self.grid.get(coord) {
None => return Err(CannotPlaceReason::InvalidProjection),
Some(cell) if cell.ship.is_some() => {
return Err(CannotPlaceReason::AlreadyOccupied)
}
_ => {}
}
}
Ok(())
}
}
}
};
}
ship_entry_shared!(ShipEntry);
ship_entry_shared!(ShipEntryMut);
impl<'a, I: ShipId, D: Dimensions, S: ShipShape<D>> ShipEntryMut<'a, I, D, S> {
/// If the ship is placed, get the placement. Otherwise return `None`.
// Has to be specialized for mut and non-mut because mut variants can't return a
// projection that lives as long as 'a, since that would potentially alias the &mut
// ref.
pub fn placement(&self) -> Option<&ShapeProjection<D::Coordinate>> {
self.ship.placement.as_ref()
}
/// Attempts to place the ship with onto the given coordinates. If the ship is already
/// placed, returns `Err` with the attempted placement and reason placement failed,
/// otherwise returns `Ok(())`
pub fn place(
&mut self,
placement: ShapeProjection<D::Coordinate>,
) -> Result<(), PlaceError<ShapeProjection<D::Coordinate>>> {
if self.placed() {
Err(PlaceError::new(CannotPlaceReason::AlreadyPlaced, placement))
} else if !self
.ship
.shape
.is_valid_placement(&placement, &self.grid.dim)
{
Err(PlaceError::new(
CannotPlaceReason::InvalidProjection,
placement,
))
} else {
for coord in placement.iter() {
match self.grid.get(coord) {
None => {
// ShipShape should ensure that all coordinates are valid, but don't
// trust it.
return Err(PlaceError::new(
CannotPlaceReason::InvalidProjection,
placement,
));
}
Some(cell) if cell.ship.is_some() => {
return Err(PlaceError::new(
CannotPlaceReason::AlreadyOccupied,
placement,
));
}
_ => {}
}
}
// Already ensured that every position is valid and not occupied.
for coord in placement.iter() {
self.grid[coord].ship = Some(self.id.to_owned());
}
self.ship.placement = Some(placement);
Ok(())
}
}
/// Attempt to clear the placement of the ship. Returns the previous placement of the
/// ship if any. Returns `None` if the ship has not been placed.
pub fn unplace(&mut self) -> Option<ShapeProjection<D::Coordinate>> {
self.ship.placement.take().map(|placement| {
for coord in placement.iter() {
// We should only allow placement on valid cells, so unwrap is fine.
self.grid[coord].ship = None;
}
placement
})
}
}
/// Contains a ship's shape and current placement status in the grid.
struct ShipPlacementInfo<S, C> {
/// Shape being placed.
shape: S,
/// Placement of this ship, if it has been placed.
placement: Option<ShapeProjection<C>>,
}
/// Setup phase for a [`Board`]. Allows placing ships and does not allow shooting.
pub struct BoardSetup<I: ShipId, D: Dimensions, S: ShipShape<D>> {
/// Grid for placement of ships.
grid: Grid<I, D>,
/// Mapping of added ShipIds to coresponding placement info.
ships: HashMap<I, ShipPlacementInfo<S, D::Coordinate>>,
}
impl<I: ShipId, D: Dimensions, S: ShipShape<D>> BoardSetup<I, D, S> {
/// Begin game setup by constructing a new board with the given [`Dimensions`].
pub fn new(dim: D) -> Self {
Self {
grid: Grid::new(dim),
ships: HashMap::new(),
}
}
/// Get the [`Dimesnsions`] of this [`Board`].
pub fn dimensions(&self) -> &D {
&self.grid.dim
}
/// Tries to start the game. If all ships are placed, returns a [`Board`] with the
/// current placements. If no ships have been added or any ship has not been placed,
/// returns self.
pub fn start(self) -> Result<Board<I, D>, Self> {
if !self.ready() {
Err(self)
} else {
Ok(Board {
grid: self.grid,
ships: self
.ships
.into_iter()
.map(|(id, info)| match info.placement {
Some(placement) => (id, placement),
None => unreachable!(),
})
.collect(),
})
}
}
/// Checks if this board is ready to start. Returns `true` if at least one ship has
/// been added and all ships are placed.
pub fn ready(&self) -> bool {
!self.ships.is_empty() && self.ships.values().all(|ship| ship.placement.is_some())
}
/// Get an iterator over the ships configured on this board.
pub fn iter_ships(&self) -> impl Iterator<Item = ShipEntry<I, D, S>> {
let grid = &self.grid;
self.ships.iter().map(move |(id, ship)| ShipEntry {
id: id.clone(),
grid,
ship,
})
}
/// Attempts to add a ship with the given ID. If the given ShipID is already used,
/// returns the shape passed to this function. Otherwise adds the shape and returns
/// the ShipEntryMut for it to allow placement.
pub fn add_ship(
&mut self,
id: I,
shape: S,
) -> Result<ShipEntryMut<I, D, S>, AddShipError<I, S>> {
match self.ships.entry(id.clone()) {
Entry::Occupied(_) => Err(AddShipError::new(id, shape)),
Entry::Vacant(entry) => {
let ship = entry.insert(ShipPlacementInfo {
shape,
placement: None,
});
Ok(ShipEntryMut {
id,
grid: &mut self.grid,
ship,
})
}
}
}
/// Get the [`ShipEntry`] for the ship with the specified ID if such a ship exists.
pub fn | (&self, id: I) -> Option<ShipEntry<I, D, S>> {
let grid = &self.grid;
self.ships
.get(&id)
.map(move |ship| ShipEntry { id, grid, ship })
}
/// Get the [`ShipEntryMut`] for the ship with the specified ID if such a ship exists.
pub fn get_ship_mut(&mut self, id: I) -> Option<ShipEntryMut<I, D, S>> {
let grid = &mut self.grid;
self.ships
.get_mut(&id)
.map(move |ship| ShipEntryMut { id, grid, ship })
}
/// Get the ID of the ship placed at the specified coordinate if any. Returns None if
/// the coordinate is out of bounds or no ship was placed on the specified point.
pub fn get_coord(&self, coord: &D::Coordinate) -> Option<&I> {
self.grid.get(coord).and_then(|cell| cell.ship.as_ref())
}
}
| get_ship | identifier_name |
setup.rs | // Copyright 2020 Zachary Stewart
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Implements the setup phase of the board.
use std::collections::{hash_map::Entry, HashMap};
use crate::{
board::{AddShipError, Board, CannotPlaceReason, Dimensions, Grid, PlaceError},
ships::{ProjectIter, ShapeProjection, ShipId, ShipShape},
};
/// Reference to a particular ship's placement info as well as the grid, providing access
/// to the methods necessary to check it's placement status.
pub struct ShipEntry<'a, I, D: Dimensions, S> {
/// ID of this ship.
id: I,
/// Grid that the ship may occupy.
grid: &'a Grid<I, D>,
/// Placement info for the ship.
ship: &'a ShipPlacementInfo<S, D::Coordinate>,
}
impl<'a, I: ShipId, D: Dimensions, S: ShipShape<D>> ShipEntry<'a, I, D, S> {
/// If the ship is placed, get the placement. Otherwise return `None`.
// Has to be specialized for mut and non-mut because mut variants can't return a
// projection that lives as long as 'a, since that would potentially alias the &mut
// ref. With a const ref, we can give back a ref that lives as long as self rather
// than just as long as this method call.
pub fn placement(&self) -> Option<&'a ShapeProjection<D::Coordinate>> {
self.ship.placement.as_ref()
}
}
/// Reference to a particular ship's placement info as well as the grid, providing access
/// to the methods necessary to check it's placement status and place or unplace it.
pub struct ShipEntryMut<'a, I, D: Dimensions, S> {
/// ID of this ship
id: I,
/// Grid that ships are being placed into.
grid: &'a mut Grid<I, D>,
/// Back ref to the ship.
ship: &'a mut ShipPlacementInfo<S, D::Coordinate>,
}
/// Implementation of the shared parts of ShipEntry.
macro_rules! ship_entry_shared {
($t:ident) => {
impl<'a, I: ShipId, D: Dimensions, S: ShipShape<D>> $t<'a, I, D, S> {
/// Get the ID of this ship.
pub fn id(&self) -> &I {
&self.id
}
/// Returns true if this ship has been placed.
pub fn placed(&self) -> bool {
self.ship.placement.is_some()
}
/// Get an interator over possible projections of the shape for this ship that
/// start from the given [`Coordinate`]. If there are no possible placements
/// from the given coordinate, including if the coordinate is out of bounds,
/// the resulting iterator will be empty.
pub fn get_placements(
&self,
coord: D::Coordinate,
) -> ProjectIter<D, S::ProjectIterState> {
self.ship.shape.project(coord, &self.grid.dim)
}
/// Check if the specified placement is valid for this ship.
pub fn check_placement(
&self,
placement: &ShapeProjection<D::Coordinate>,
) -> Result<(), CannotPlaceReason> {
if self.placed() {
Err(CannotPlaceReason::AlreadyPlaced)
} else if !self
.ship
.shape
.is_valid_placement(placement, &self.grid.dim)
{
Err(CannotPlaceReason::InvalidProjection)
} else {
for coord in placement.iter() {
match self.grid.get(coord) {
None => return Err(CannotPlaceReason::InvalidProjection),
Some(cell) if cell.ship.is_some() => {
return Err(CannotPlaceReason::AlreadyOccupied)
}
_ => {}
}
}
Ok(())
}
}
}
};
}
ship_entry_shared!(ShipEntry);
ship_entry_shared!(ShipEntryMut);
impl<'a, I: ShipId, D: Dimensions, S: ShipShape<D>> ShipEntryMut<'a, I, D, S> {
/// If the ship is placed, get the placement. Otherwise return `None`.
// Has to be specialized for mut and non-mut because mut variants can't return a
// projection that lives as long as 'a, since that would potentially alias the &mut
// ref.
pub fn placement(&self) -> Option<&ShapeProjection<D::Coordinate>> {
self.ship.placement.as_ref()
}
/// Attempts to place the ship with onto the given coordinates. If the ship is already
/// placed, returns `Err` with the attempted placement and reason placement failed,
/// otherwise returns `Ok(())`
pub fn place(
&mut self,
placement: ShapeProjection<D::Coordinate>,
) -> Result<(), PlaceError<ShapeProjection<D::Coordinate>>> {
if self.placed() {
Err(PlaceError::new(CannotPlaceReason::AlreadyPlaced, placement))
} else if !self
.ship
.shape
.is_valid_placement(&placement, &self.grid.dim)
{
Err(PlaceError::new(
CannotPlaceReason::InvalidProjection,
placement,
))
} else {
for coord in placement.iter() {
match self.grid.get(coord) {
None => {
// ShipShape should ensure that all coordinates are valid, but don't
// trust it.
return Err(PlaceError::new(
CannotPlaceReason::InvalidProjection,
placement, | placement,
));
}
_ => {}
}
}
// Already ensured that every position is valid and not occupied.
for coord in placement.iter() {
self.grid[coord].ship = Some(self.id.to_owned());
}
self.ship.placement = Some(placement);
Ok(())
}
}
/// Attempt to clear the placement of the ship. Returns the previous placement of the
/// ship if any. Returns `None` if the ship has not been placed.
pub fn unplace(&mut self) -> Option<ShapeProjection<D::Coordinate>> {
self.ship.placement.take().map(|placement| {
for coord in placement.iter() {
// We should only allow placement on valid cells, so unwrap is fine.
self.grid[coord].ship = None;
}
placement
})
}
}
/// Contains a ship's shape and current placement status in the grid.
struct ShipPlacementInfo<S, C> {
/// Shape being placed.
shape: S,
/// Placement of this ship, if it has been placed.
placement: Option<ShapeProjection<C>>,
}
/// Setup phase for a [`Board`]. Allows placing ships and does not allow shooting.
pub struct BoardSetup<I: ShipId, D: Dimensions, S: ShipShape<D>> {
/// Grid for placement of ships.
grid: Grid<I, D>,
/// Mapping of added ShipIds to coresponding placement info.
ships: HashMap<I, ShipPlacementInfo<S, D::Coordinate>>,
}
impl<I: ShipId, D: Dimensions, S: ShipShape<D>> BoardSetup<I, D, S> {
/// Begin game setup by constructing a new board with the given [`Dimensions`].
pub fn new(dim: D) -> Self {
Self {
grid: Grid::new(dim),
ships: HashMap::new(),
}
}
/// Get the [`Dimesnsions`] of this [`Board`].
pub fn dimensions(&self) -> &D {
&self.grid.dim
}
/// Tries to start the game. If all ships are placed, returns a [`Board`] with the
/// current placements. If no ships have been added or any ship has not been placed,
/// returns self.
pub fn start(self) -> Result<Board<I, D>, Self> {
if !self.ready() {
Err(self)
} else {
Ok(Board {
grid: self.grid,
ships: self
.ships
.into_iter()
.map(|(id, info)| match info.placement {
Some(placement) => (id, placement),
None => unreachable!(),
})
.collect(),
})
}
}
/// Checks if this board is ready to start. Returns `true` if at least one ship has
/// been added and all ships are placed.
pub fn ready(&self) -> bool {
!self.ships.is_empty() && self.ships.values().all(|ship| ship.placement.is_some())
}
/// Get an iterator over the ships configured on this board.
pub fn iter_ships(&self) -> impl Iterator<Item = ShipEntry<I, D, S>> {
let grid = &self.grid;
self.ships.iter().map(move |(id, ship)| ShipEntry {
id: id.clone(),
grid,
ship,
})
}
/// Attempts to add a ship with the given ID. If the given ShipID is already used,
/// returns the shape passed to this function. Otherwise adds the shape and returns
/// the ShipEntryMut for it to allow placement.
pub fn add_ship(
&mut self,
id: I,
shape: S,
) -> Result<ShipEntryMut<I, D, S>, AddShipError<I, S>> {
match self.ships.entry(id.clone()) {
Entry::Occupied(_) => Err(AddShipError::new(id, shape)),
Entry::Vacant(entry) => {
let ship = entry.insert(ShipPlacementInfo {
shape,
placement: None,
});
Ok(ShipEntryMut {
id,
grid: &mut self.grid,
ship,
})
}
}
}
/// Get the [`ShipEntry`] for the ship with the specified ID if such a ship exists.
pub fn get_ship(&self, id: I) -> Option<ShipEntry<I, D, S>> {
let grid = &self.grid;
self.ships
.get(&id)
.map(move |ship| ShipEntry { id, grid, ship })
}
/// Get the [`ShipEntryMut`] for the ship with the specified ID if such a ship exists.
pub fn get_ship_mut(&mut self, id: I) -> Option<ShipEntryMut<I, D, S>> {
let grid = &mut self.grid;
self.ships
.get_mut(&id)
.map(move |ship| ShipEntryMut { id, grid, ship })
}
/// Get the ID of the ship placed at the specified coordinate if any. Returns None if
/// the coordinate is out of bounds or no ship was placed on the specified point.
pub fn get_coord(&self, coord: &D::Coordinate) -> Option<&I> {
self.grid.get(coord).and_then(|cell| cell.ship.as_ref())
}
} | ));
}
Some(cell) if cell.ship.is_some() => {
return Err(PlaceError::new(
CannotPlaceReason::AlreadyOccupied, | random_line_split |
ingredientsScraper.py | from bs4 import BeautifulSoup as bs
import urllib2
import pickle
import numpy as np
import scipy.io
import matplotlib.pyplot as plt
import random as rnd
from sklearn import svm
from sklearn.metrics import confusion_matrix
from sklearn import cross_validation
from sklearn.grid_search import GridSearchCV
from labels import *
import sys
def | (fooddish):
#dictionary for ingredients
I = {}
#dictionary for food recipes
R = {}
website = 'http://allrecipes.com'
for food in fooddish:
R[food] = {}
#search for food
print food
#for page in xrange(2):
resultspage = urllib2.urlopen("http://allrecipes.com/search/default.aspx?qt=k&wt="+food)
results = bs(resultspage)
for recipelinks in results.find_all('a',class_='title'):
recipelink = recipelinks.get('href')
#go to recipe page
recipepage = urllib2.urlopen(website+recipelink)
recipe = bs(recipepage)
recipename = recipe.find('h1',id='itemTitle').text
if recipename not in R[food]:
#print "Recipe: ", recipename
#ingredients for this recipe
ingredients = recipe.find_all('li', id='liIngredient')
R[food][recipename] = {}
for ing in ingredients:
ingid = ing.attrs['data-ingredientid']
ingname = ing.find(id='lblIngName').text
if ingid not in I:
I[ingid] = ingname
amt=float(ing.attrs['data-grams'])
R[food][recipename][ingid] = amt
#normalize values
m = sum(R[food][recipename].values())
R[food][recipename]={ingid: R[food][recipename][ingid]/m for ingid in R[food][recipename].keys()}
#Recipes = {}
#ingsorted = sorted(I.keys())
#for food in R.keys():
##m = sum(R[food].values())
##normalize values
##R[food] = {ingid: R[food][ingid]/m for ingid in R[food].keys()}
#Recipes[food] = [0]*len(ingsorted)
#for i in range(len(ingsorted)):
###if ingredient is in dish R[food]
#if ingsorted[i] in R[food]:
#Recipes[food][i] = R[food][ingsorted[i]]
#m = sum(Recipes[food])
#Recipes[food] = [x/m for x in Recipes[food]]
pickle.dump((I,R),file('AllRecipesIngImageNet.npy','w'))
#return I,R
#=================================================================================
# Ingredient Scraper with cooking terms and nutritional info
def IngredientScraper(fooddish):
#dictionary for ingredients
I = {}
#dictionary for food recipes
R = {}
website = 'http://allrecipes.com'
for food in fooddish:
R[food] = {}
#search for food
print food
#for page in xrange(2):
resultspage = urllib2.urlopen("http://allrecipes.com/search/default.aspx?qt=k&wt="+food)
results = bs(resultspage)
for recipelinks in results.find_all('a',class_='title'):
recipelink = recipelinks.get('href')
#go to recipe page
recipepage = urllib2.urlopen(website+recipelink)
recipe = bs(recipepage)
recipename = recipe.find('h1',id='itemTitle').text
if recipename not in R[food]:
#print "Recipe: ", recipename
#ingredients for this recipe
ingredients = recipe.find_all('li', id='liIngredient')
#list containing ingredients, cookingterms, nutritionrating
R[food][recipename] = [{},[],[0]*7]
for ing in ingredients:
ingid = ing.attrs['data-ingredientid']
ingname = ing.find(id='lblIngName').text
if ingid not in I:
I[ingid] = ingname
amt=float(ing.attrs['data-grams'])
R[food][recipename][0][ingid] = amt
#normalize values
m = sum(R[food][recipename][0].values())
R[food][recipename][0]={ingid: R[food][recipename][0][ingid]/m for ingid in R[food][recipename][0].keys()}
#get cooking terms
directions = [step.text.lower() for step in recipe.find_all('span', class_='plaincharacterwrap break')]
R[food][recipename][1] = directions
#get nutrition
nutritionrating = recipe.find_all('ul', id='ulNutrient')
n = 0
for nutrient in nutritionrating:
#category = nutrient.find('li',class_='categories').text
R[food][recipename][2][n]=float(nutrient.find('li',id='divNutrientGradient').attrs['style'][6:-1])/100
n += 1
pickle.dump((I,R),file('AllRecipesIng50FoodExtra.npy','w'))
#================================================================================
#X = np.zeros((len(trainlabels),len(I.keys())-1),dtype=np.float32)
#ingsorted = sorted(I.keys())[1:]
#for i in xrange(len(trainlabels)):
##thresh = np.random.uniform(0,RecipeMax[trainlabels[i]],n)
#dish = fooddish[trainlabels[i]]
#X[i,:] = [1 if x != 0 else 0 for x in Recipes[dish][1:]]
##if len(R[dish].keys()) != 0:
###randomly pick recipe
##recipe = rnd.choice(R[dish].keys())
##print recipe
##for j in xrange(len(ingsorted)):
##if ingsorted[j] in R[dish][recipe]:
###X[i,j] = R[dish][recipe][ingsorted[j]]
##X[i,j] = 1
###Recipes[food] = [0]*len(ingsorted)
###for i in range(len(ingsorted)):
####if ingredient is in dish R[food]
###if ingsorted[i] in R[food]:
###Recipes[food][i] = R[food][ingsorted[i]]
###X[i,:] = [1 if x>t else 0 for x,t in zip(Recipes[dish],thresh)]
###X[i,:] = Recipes[dish]
##train classifier for each ingredient attribute
#attributeclassifiers = [None]*len(ingsorted)
#for i in xrange(len(ingsorted)):
##find positive examples of attribute i
#pos_idx = np.where(X[:,i]==1)
#print i, len(pos_idx[0])
#attr_labels = np.zeros((len(trainlabels),),dtype=np.uint8)
#attr_labels[pos_idx[0]] = 1
##train classifier
#if len(pos_idx[0]) == traindata.shape[0]:
#attr_labels[range(0,800,100)] = 0
#attributeclassifiers[i] = svm.SVC(kernel='linear',C=0.001)
#attributeclassifiers[i].fit(traindata,attr_labels)
#Xtest = np.zeros((len(testlabels),len(I.keys())-1),dtype=np.float32)
#for i in xrange(len(testlabels)):
#print 'test case:', i
#Xtest[i,:] = [x.predict(testdata[i,:])[0] for x in attributeclassifiers]
#pickle.dump((X,Xtest),file('vlg_extractor_1.1.2/ImageNetSurveyMC/IngredientAttributes.npy','w'))
###fill out correlation matrix
#m = traindata.shape[1] #number of visual word
#n = len(I.keys()) #number of ingredients
#corr_mat = np.zeros((m,n))
#for i in xrange(len(trainlabels)):
#for visualword in xrange(m):
#if traindata[i,visualword] != 0:
##count co-occurrence of ingredient and visual word
##binaryIng = [1 if x!=0 else 0 for x in Recipes[fooddish[trainlabels[i]]]]
#corr_mat[visualword,:] = corr_mat[visualword,:] + X[i,:]
#pickle.dump(corr_mat,file('corr_mat50Food.npy','w'))
###traindata = np.concatenate((traindata,X),1)
##corr_mat = pickle.load(file('corr_mat.npy','r'))
###normalize corr_mat
#row_sums = corr_mat.sum(axis=1)
#row_sums = np.array([1 if x==0 else x for x in row_sums])
#corr_mat = corr_mat/row_sums[:,np.newaxis]
##avg = corr_mat.mean(axis=0)
#logcormat = np.log(corr_mat+1)
#Xtest = np.zeros((len(testlabels),len(I.keys())),dtype=np.float32)
#for i in xrange(len(testlabels)):
#x = np.dot(testdata[i,:],logcormat)
#Xtest[i,:] = x/sum(x)
##dish = fooddish[testlabels[i]]
###randomly pick recipe
##recipe = rnd.choice(R[dish].keys())
##print recipe
#for j in xrange(len(ingsorted)):
#if attributeclassifiers[j] is not None:
#Xtest[i,j]=attributeclassifiers[j].predict(testdata[i,:])
##if ingsorted[j] in R[dish][recipe]:
##Xtest[i,j] = 1
##Xtest[i,:] = [1 if xt>t else 0 for xt,t in zip(x,avg)]
#fig = plt.figure()
#ax = fig.add_subplot(5,2,10)
#count = [0]*len(ingsorted)
#for i in xrange(len(ingsorted)):
##find negative examples of attribute i
#pos_idx = np.where(X[np.where(trainlabels==9)[0],i]==1)
#count[i] = len(pos_idx[0])
#r = plt.bar(range(589),count)
#ax.set_xticks([])
#plt.xlabel(fooddish[9])
##ax = fig.add_subplot(522)
##r = plt.bar(range(440),Recipes['casserole'])
##ax.set_xticks([])
##plt.xlabel('casserole')
##ax = fig.add_subplot(523)
##r = plt.bar(range(440),Recipes['deviled%eggs'])
##ax.set_xticks([])
##plt.xlabel('deviledegg')
##ax = fig.add_subplot(524)
##r = plt.bar(range(440),Recipes['fried%rice'])
##ax.set_xticks([])
##plt.xlabel('friedrice')
##ax = fig.add_subplot(525)
##r = plt.bar(range(440),Recipes['kebab'])
##ax.set_xticks([])
##plt.xlabel('kebab')
##ax = fig.add_subplot(526)
##r = plt.bar(range(440),Recipes['samosa'])
##ax.set_xticks([])
##plt.xlabel('samosa')
##ax = fig.add_subplot(527)
##r = plt.bar(range(440),Recipes['pasta%salad'])
##ax.set_xticks([])
##plt.xlabel('pastasalad')
##ax = fig.add_subplot(528)
##r = plt.bar(range(440),Recipes['paella'])
##ax.set_xticks([])
##plt.xlabel('Paella')
##ax = fig.add_subplot(529)
##r = plt.bar(range(440),Recipes['spaghetti'])
##ax.set_xticks([])
##plt.xlabel('spaghetti')
##ax = fig.add_subplot(5,2,10)
##r = plt.bar(range(440),Recipes['roulade'])
##ax.set_xticks([])
##plt.xlabel('roulade')
#============== script to get top features ============================
#from sklearn.multiclass import OneVsRestClassifier
#import random as rnd
#recipedict='AllRecipesIng.npy'
#fooddish = fooddish[0]
#dataset = 'vlg_extractor/ImageNetSurveyMC/ImageNetSurveyMC'
#var=scipy.io.loadmat(dataset)
#traindata = np.ndarray.astype(var['X'],dtype=np.float32)
#trainlabels = np.ndarray.astype(var['trainlabels'].flatten(),dtype=np.int)
#testdata = np.ndarray.astype(var['Xtest'],dtype=np.float32)
#testlabels = var['testlabels'].flatten()
#Xtest = pickle.load(file("/".join(dataset.split('/')[0:2])+'/IngredientAttributes.npy','r'))
#I,R = pickle.load(file(recipedict,'r'))
#ingsorted = sorted(I.keys())[1:]
#X = np.zeros((len(trainlabels),len(ingsorted)),dtype=np.uint8)
#for i in xrange(len(trainlabels)):
#dish = fooddish[trainlabels[i]]
#if len(R[dish].keys()) != 0:
####randomly pick recipe
#recipe = rnd.choice(R[dish].keys())
##print recipe
#X[i,:] = [1 if ing in R[dish][recipe] else 0 for ing in ingsorted]
#k=5
##split training data into k-folds
#kfold = cross_validation.StratifiedKFold(trainlabels,k)
#param_grid = [
#{'estimator__C': [0.001, 0.01, 1, 10, 100], 'estimator__kernel': ['linear']},
##{'estimator__C': [1, 10, 100, 1000], 'estimator__gamma': [0.01, 0.001, 0.0001], 'estimator__kernel': ['rbf']},
#]
#svc = OneVsRestClassifier(svm.SVC(kernel='linear',C=1))
#svc.fit(X,trainlabels)
##clf = GridSearchCV(estimator=svc, param_grid=param_grid, cv=kfold, n_jobs=-1)
##clf.fit(np.concatenate((traindata,X),1),trainlabels)
#svm_weights = svc.coef_
#topfeatures = [None]*svm_weights.shape[0] #topfeatures for each class
#for i in xrange(svm_weights.shape[0]):
#featureIdx=np.argsort(abs(svm_weights[i,:]))
#topfeatures[i] = featureIdx[::-1][0:30] #get top 30
##allfeatures = sorted(list(set().union(*topfeatures)))
###print top features for each class
#for f in xrange(len(fooddish)):
#xlabels = [None]*30
#for ingIdx in xrange(30):
#print fooddish[f], I[ingsorted[topfeatures[f][ingIdx]]], svm_weights[f,topfeatures[f][ingIdx]]
#xlabels[ingIdx] = I[ingsorted[topfeatures[f][ingIdx]]]
#fig=plt.figure()
#ax = fig.add_subplot(111)
#r = plt.bar(range(30),svm_weights[f,topfeatures[f]],color='b')
#ax.set_xticks(np.arange(30)+0.5)
#ax.set_xticklabels(xlabels,rotation=90,fontsize=8)
#ax.set_title(fooddish[f])
#ax.set_ylabel('Feature Weights')
#plt.show()
#=============================END ==================================
##train classifier for each ingredient attribute
#attributeclassifiers = [None]*len(allfeatures)
#param_grid = [
#{'C': [0.001, 0.01, 1, 10, 100], 'kernel': ['linear']},
##{'estimator__C': [1, 10, 100, 1000], 'estimator__gamma': [0.01, 0.001, 0.0001], 'estimator__kernel': ['rbf']},
#]
#for i in xrange(len(allfeatures)):
##find positive examples of attribute i
#pos_idx = np.where(X[:,allfeatures[i]]==1)
#print I[ingsorted[allfeatures[i]]], len(pos_idx[0])
#attr_labels = np.zeros((len(trainlabels),),dtype=np.uint8)
#attr_labels[pos_idx[0]] = 1
##train classifier
#if len(pos_idx[0]) != 0:
#attributeclassifiers[i] = GridSearchCV(estimator=svm.SVC(), param_grid=param_grid, cv=kfold, n_jobs=-1)
#attributeclassifiers[i].fit(traindata,attr_labels)
#Xtest = np.zeros((len(testlabels),len(I.keys())),dtype=np.float32)
#for i in xrange(len(testlabels)):
#for j in xrange(len(allfeatures)):
#Xtest[i,allfeatures[j]]=attributeclassifiers[j].predict(testdata[i,:])[0]
#fig = plt.figure()
#ax = fig.add_subplot(111)
#res = ax.imshow(X,cmap=plt.cm.bone,interpolation='nearest',aspect='auto')
#cb = fig.colorbar(res)
#plt.show()
##testdata = np.concatenate((testdata,Xtest),1)
#==============script to output data for use with cygwin MKL ============
#dataset = "vlg_extractor/ImageNetSurveyPicodes2048/ImageNetSurveyPicodes2048"
#dataset = "BoW2/ImageNet/ImageNetBoW2"
#recipedict = recipeDict[0] #change this
#fooddish = fooddish[0] #change this
#var=scipy.io.loadmat(dataset)
#traindata = np.ndarray.astype(var['X'],dtype=np.float)
#trainlabels = np.ndarray.astype(var['trainlabels'].flatten(),dtype=np.int)
#testdata = np.ndarray.astype(var['Xtest'],dtype=np.float)
#testlabels = var['testlabels'].flatten()
#images = var['testimages'][0]
#Xtest = pickle.load(file("/".join(dataset.split('/')[0:2])+'/IngredientAttributes.npy','r'))
#I,R = pickle.load(file(recipedict,'r'))
#ingsorted = sorted(I.keys())[1:]
#X = np.zeros((len(trainlabels),len(ingsorted)),dtype=np.int)
#for i in xrange(len(trainlabels)):
#dish = fooddish[trainlabels[i]]
#if len(R[dish].keys()) != 0:
####randomly pick recipe
#recipe = rnd.choice(R[dish].keys())
##print recipe
#X[i,:] = [1 if ing in R[dish][recipe] else 0 for ing in ingsorted]
#np.savez_compressed(dataset+"-MKL",traindata=traindata, testdata=testdata, X=X,
# Xtest=Xtest, trainlabels=trainlabels,testlabels=testlabels)
#pred=np.load(dataset+"-MKL_predictions.npz")
#y_true = pred['y_true']
#y_pred = pred['y_pred']
#from sklearn.metrics import classification_report
#print classification_report(y_true,y_pred)
#================= SCRIPT TO FIND POPULAR INGREDIENTS ====================
#ingredient histogram
#IngHist = {}
#for food in fooddish:
#IngHist[food] = {}
#for recipe in R[food].keys():
#for ingredient in R[food][recipe].keys():
#if ingredient not in IngHist[food]:
#IngHist[food][ingredient] = 1
#else:
#IngHist[food][ingredient] += 1
#commonIngredients = [None]*len(fooddish)
#commonIngredientsIdx = []
#for f in xrange(len(fooddish)):
#commonIngredientsIdx.extend([ingsorted.index(x) for x in IngHist[fooddish[f]].keys() if IngHist[fooddish[f]][x] >= 2 and x != '0'])
#commonIngredientsIdx = sorted(set(commonIngredientsIdx))
#pickle.dump(commonIngredientsIdx,file('CommonIngredientsImageNet.npy','w'))
#fig = plt.figure()
#i=9
#ax = fig.add_subplot(1,1,1)
#r = plt.bar(np.arange(len(IngHist[fooddish[i]].keys())),IngHist[fooddish[i]].values())
#ax.set_xticks(np.arange(len(IngHist[fooddish[i]].keys()))+0.5)
#ax.set_xticklabels([I[x] for x in IngHist[fooddish[i]]],rotation=90,fontsize=8)
#ax.set_title(fooddish[i])
#ax.set_ylabel('Ingredient Count')
#plt.show()
if __name__=="__main__":
IngredientScraper(fooddish[int(sys.argv[1])])
| IngredientScraper2 | identifier_name |
ingredientsScraper.py | from bs4 import BeautifulSoup as bs
import urllib2
import pickle
import numpy as np
import scipy.io
import matplotlib.pyplot as plt
import random as rnd
from sklearn import svm
from sklearn.metrics import confusion_matrix
from sklearn import cross_validation
from sklearn.grid_search import GridSearchCV
from labels import *
import sys
def IngredientScraper2(fooddish):
#dictionary for ingredients
I = {}
#dictionary for food recipes
R = {}
website = 'http://allrecipes.com'
for food in fooddish:
R[food] = {}
#search for food
print food
#for page in xrange(2):
resultspage = urllib2.urlopen("http://allrecipes.com/search/default.aspx?qt=k&wt="+food)
results = bs(resultspage)
for recipelinks in results.find_all('a',class_='title'):
recipelink = recipelinks.get('href')
#go to recipe page
recipepage = urllib2.urlopen(website+recipelink)
recipe = bs(recipepage)
recipename = recipe.find('h1',id='itemTitle').text
if recipename not in R[food]:
#print "Recipe: ", recipename
#ingredients for this recipe
ingredients = recipe.find_all('li', id='liIngredient')
R[food][recipename] = {}
for ing in ingredients:
ingid = ing.attrs['data-ingredientid']
ingname = ing.find(id='lblIngName').text
if ingid not in I:
I[ingid] = ingname
amt=float(ing.attrs['data-grams'])
R[food][recipename][ingid] = amt
#normalize values
m = sum(R[food][recipename].values())
R[food][recipename]={ingid: R[food][recipename][ingid]/m for ingid in R[food][recipename].keys()}
#Recipes = {}
#ingsorted = sorted(I.keys())
#for food in R.keys():
##m = sum(R[food].values())
##normalize values
##R[food] = {ingid: R[food][ingid]/m for ingid in R[food].keys()}
#Recipes[food] = [0]*len(ingsorted)
#for i in range(len(ingsorted)):
###if ingredient is in dish R[food]
#if ingsorted[i] in R[food]:
#Recipes[food][i] = R[food][ingsorted[i]]
#m = sum(Recipes[food])
#Recipes[food] = [x/m for x in Recipes[food]]
pickle.dump((I,R),file('AllRecipesIngImageNet.npy','w'))
#return I,R
#=================================================================================
# Ingredient Scraper with cooking terms and nutritional info
def IngredientScraper(fooddish):
#dictionary for ingredients
I = {}
#dictionary for food recipes
R = {}
website = 'http://allrecipes.com'
for food in fooddish:
R[food] = {}
#search for food
print food
#for page in xrange(2):
resultspage = urllib2.urlopen("http://allrecipes.com/search/default.aspx?qt=k&wt="+food)
results = bs(resultspage)
for recipelinks in results.find_all('a',class_='title'):
recipelink = recipelinks.get('href')
#go to recipe page
recipepage = urllib2.urlopen(website+recipelink)
recipe = bs(recipepage)
recipename = recipe.find('h1',id='itemTitle').text
if recipename not in R[food]:
#print "Recipe: ", recipename
#ingredients for this recipe
ingredients = recipe.find_all('li', id='liIngredient')
#list containing ingredients, cookingterms, nutritionrating
R[food][recipename] = [{},[],[0]*7]
for ing in ingredients:
ingid = ing.attrs['data-ingredientid']
ingname = ing.find(id='lblIngName').text
if ingid not in I:
I[ingid] = ingname
amt=float(ing.attrs['data-grams'])
R[food][recipename][0][ingid] = amt
#normalize values
m = sum(R[food][recipename][0].values())
R[food][recipename][0]={ingid: R[food][recipename][0][ingid]/m for ingid in R[food][recipename][0].keys()}
#get cooking terms
directions = [step.text.lower() for step in recipe.find_all('span', class_='plaincharacterwrap break')]
R[food][recipename][1] = directions
#get nutrition
nutritionrating = recipe.find_all('ul', id='ulNutrient')
n = 0
for nutrient in nutritionrating:
#category = nutrient.find('li',class_='categories').text
R[food][recipename][2][n]=float(nutrient.find('li',id='divNutrientGradient').attrs['style'][6:-1])/100
n += 1
pickle.dump((I,R),file('AllRecipesIng50FoodExtra.npy','w'))
#================================================================================
#X = np.zeros((len(trainlabels),len(I.keys())-1),dtype=np.float32)
#ingsorted = sorted(I.keys())[1:]
#for i in xrange(len(trainlabels)):
##thresh = np.random.uniform(0,RecipeMax[trainlabels[i]],n)
#dish = fooddish[trainlabels[i]]
#X[i,:] = [1 if x != 0 else 0 for x in Recipes[dish][1:]]
##if len(R[dish].keys()) != 0:
###randomly pick recipe
##recipe = rnd.choice(R[dish].keys())
##print recipe
##for j in xrange(len(ingsorted)):
##if ingsorted[j] in R[dish][recipe]:
###X[i,j] = R[dish][recipe][ingsorted[j]]
##X[i,j] = 1
###Recipes[food] = [0]*len(ingsorted)
###for i in range(len(ingsorted)):
####if ingredient is in dish R[food]
###if ingsorted[i] in R[food]:
###Recipes[food][i] = R[food][ingsorted[i]]
###X[i,:] = [1 if x>t else 0 for x,t in zip(Recipes[dish],thresh)]
###X[i,:] = Recipes[dish]
##train classifier for each ingredient attribute
#attributeclassifiers = [None]*len(ingsorted)
#for i in xrange(len(ingsorted)):
##find positive examples of attribute i
#pos_idx = np.where(X[:,i]==1)
#print i, len(pos_idx[0])
#attr_labels = np.zeros((len(trainlabels),),dtype=np.uint8)
#attr_labels[pos_idx[0]] = 1
##train classifier
#if len(pos_idx[0]) == traindata.shape[0]:
#attr_labels[range(0,800,100)] = 0
#attributeclassifiers[i] = svm.SVC(kernel='linear',C=0.001)
#attributeclassifiers[i].fit(traindata,attr_labels)
#Xtest = np.zeros((len(testlabels),len(I.keys())-1),dtype=np.float32)
#for i in xrange(len(testlabels)):
#print 'test case:', i
#Xtest[i,:] = [x.predict(testdata[i,:])[0] for x in attributeclassifiers]
#pickle.dump((X,Xtest),file('vlg_extractor_1.1.2/ImageNetSurveyMC/IngredientAttributes.npy','w'))
###fill out correlation matrix
#m = traindata.shape[1] #number of visual word
#n = len(I.keys()) #number of ingredients
#corr_mat = np.zeros((m,n))
#for i in xrange(len(trainlabels)):
#for visualword in xrange(m):
#if traindata[i,visualword] != 0:
##count co-occurrence of ingredient and visual word
##binaryIng = [1 if x!=0 else 0 for x in Recipes[fooddish[trainlabels[i]]]]
#corr_mat[visualword,:] = corr_mat[visualword,:] + X[i,:]
#pickle.dump(corr_mat,file('corr_mat50Food.npy','w'))
###traindata = np.concatenate((traindata,X),1)
##corr_mat = pickle.load(file('corr_mat.npy','r'))
###normalize corr_mat
#row_sums = corr_mat.sum(axis=1)
#row_sums = np.array([1 if x==0 else x for x in row_sums])
#corr_mat = corr_mat/row_sums[:,np.newaxis]
##avg = corr_mat.mean(axis=0)
#logcormat = np.log(corr_mat+1)
#Xtest = np.zeros((len(testlabels),len(I.keys())),dtype=np.float32)
#for i in xrange(len(testlabels)):
#x = np.dot(testdata[i,:],logcormat)
#Xtest[i,:] = x/sum(x)
##dish = fooddish[testlabels[i]]
###randomly pick recipe
##recipe = rnd.choice(R[dish].keys())
##print recipe
#for j in xrange(len(ingsorted)):
#if attributeclassifiers[j] is not None:
#Xtest[i,j]=attributeclassifiers[j].predict(testdata[i,:])
##if ingsorted[j] in R[dish][recipe]:
##Xtest[i,j] = 1
##Xtest[i,:] = [1 if xt>t else 0 for xt,t in zip(x,avg)]
#fig = plt.figure()
#ax = fig.add_subplot(5,2,10)
#count = [0]*len(ingsorted)
#for i in xrange(len(ingsorted)):
##find negative examples of attribute i
#pos_idx = np.where(X[np.where(trainlabels==9)[0],i]==1)
#count[i] = len(pos_idx[0])
#r = plt.bar(range(589),count)
#ax.set_xticks([])
#plt.xlabel(fooddish[9])
##ax = fig.add_subplot(522)
##r = plt.bar(range(440),Recipes['casserole'])
##ax.set_xticks([])
##plt.xlabel('casserole')
##ax = fig.add_subplot(523)
##r = plt.bar(range(440),Recipes['deviled%eggs'])
##ax.set_xticks([])
##plt.xlabel('deviledegg')
##ax = fig.add_subplot(524)
##r = plt.bar(range(440),Recipes['fried%rice'])
##ax.set_xticks([])
##plt.xlabel('friedrice')
##ax = fig.add_subplot(525)
##r = plt.bar(range(440),Recipes['kebab'])
##ax.set_xticks([])
##plt.xlabel('kebab')
##ax = fig.add_subplot(526)
##r = plt.bar(range(440),Recipes['samosa'])
##ax.set_xticks([])
##plt.xlabel('samosa')
##ax = fig.add_subplot(527)
##r = plt.bar(range(440),Recipes['pasta%salad'])
##ax.set_xticks([])
##plt.xlabel('pastasalad')
##ax = fig.add_subplot(528)
##r = plt.bar(range(440),Recipes['paella'])
##ax.set_xticks([])
##plt.xlabel('Paella')
##ax = fig.add_subplot(529)
##r = plt.bar(range(440),Recipes['spaghetti'])
##ax.set_xticks([])
##plt.xlabel('spaghetti')
##ax = fig.add_subplot(5,2,10)
##r = plt.bar(range(440),Recipes['roulade'])
##ax.set_xticks([])
##plt.xlabel('roulade')
#============== script to get top features ============================
#from sklearn.multiclass import OneVsRestClassifier
#import random as rnd
#recipedict='AllRecipesIng.npy'
#fooddish = fooddish[0]
#dataset = 'vlg_extractor/ImageNetSurveyMC/ImageNetSurveyMC'
#var=scipy.io.loadmat(dataset)
#traindata = np.ndarray.astype(var['X'],dtype=np.float32)
#trainlabels = np.ndarray.astype(var['trainlabels'].flatten(),dtype=np.int)
#testdata = np.ndarray.astype(var['Xtest'],dtype=np.float32)
#testlabels = var['testlabels'].flatten()
#Xtest = pickle.load(file("/".join(dataset.split('/')[0:2])+'/IngredientAttributes.npy','r'))
#I,R = pickle.load(file(recipedict,'r'))
#ingsorted = sorted(I.keys())[1:]
#X = np.zeros((len(trainlabels),len(ingsorted)),dtype=np.uint8)
#for i in xrange(len(trainlabels)):
#dish = fooddish[trainlabels[i]]
#if len(R[dish].keys()) != 0:
####randomly pick recipe
#recipe = rnd.choice(R[dish].keys())
##print recipe
#X[i,:] = [1 if ing in R[dish][recipe] else 0 for ing in ingsorted]
#k=5
##split training data into k-folds
#kfold = cross_validation.StratifiedKFold(trainlabels,k)
#param_grid = [
#{'estimator__C': [0.001, 0.01, 1, 10, 100], 'estimator__kernel': ['linear']},
##{'estimator__C': [1, 10, 100, 1000], 'estimator__gamma': [0.01, 0.001, 0.0001], 'estimator__kernel': ['rbf']},
#]
#svc = OneVsRestClassifier(svm.SVC(kernel='linear',C=1))
#svc.fit(X,trainlabels)
##clf = GridSearchCV(estimator=svc, param_grid=param_grid, cv=kfold, n_jobs=-1)
##clf.fit(np.concatenate((traindata,X),1),trainlabels)
#svm_weights = svc.coef_ |
##allfeatures = sorted(list(set().union(*topfeatures)))
###print top features for each class
#for f in xrange(len(fooddish)):
#xlabels = [None]*30
#for ingIdx in xrange(30):
#print fooddish[f], I[ingsorted[topfeatures[f][ingIdx]]], svm_weights[f,topfeatures[f][ingIdx]]
#xlabels[ingIdx] = I[ingsorted[topfeatures[f][ingIdx]]]
#fig=plt.figure()
#ax = fig.add_subplot(111)
#r = plt.bar(range(30),svm_weights[f,topfeatures[f]],color='b')
#ax.set_xticks(np.arange(30)+0.5)
#ax.set_xticklabels(xlabels,rotation=90,fontsize=8)
#ax.set_title(fooddish[f])
#ax.set_ylabel('Feature Weights')
#plt.show()
#=============================END ==================================
##train classifier for each ingredient attribute
#attributeclassifiers = [None]*len(allfeatures)
#param_grid = [
#{'C': [0.001, 0.01, 1, 10, 100], 'kernel': ['linear']},
##{'estimator__C': [1, 10, 100, 1000], 'estimator__gamma': [0.01, 0.001, 0.0001], 'estimator__kernel': ['rbf']},
#]
#for i in xrange(len(allfeatures)):
##find positive examples of attribute i
#pos_idx = np.where(X[:,allfeatures[i]]==1)
#print I[ingsorted[allfeatures[i]]], len(pos_idx[0])
#attr_labels = np.zeros((len(trainlabels),),dtype=np.uint8)
#attr_labels[pos_idx[0]] = 1
##train classifier
#if len(pos_idx[0]) != 0:
#attributeclassifiers[i] = GridSearchCV(estimator=svm.SVC(), param_grid=param_grid, cv=kfold, n_jobs=-1)
#attributeclassifiers[i].fit(traindata,attr_labels)
#Xtest = np.zeros((len(testlabels),len(I.keys())),dtype=np.float32)
#for i in xrange(len(testlabels)):
#for j in xrange(len(allfeatures)):
#Xtest[i,allfeatures[j]]=attributeclassifiers[j].predict(testdata[i,:])[0]
#fig = plt.figure()
#ax = fig.add_subplot(111)
#res = ax.imshow(X,cmap=plt.cm.bone,interpolation='nearest',aspect='auto')
#cb = fig.colorbar(res)
#plt.show()
##testdata = np.concatenate((testdata,Xtest),1)
#==============script to output data for use with cygwin MKL ============
#dataset = "vlg_extractor/ImageNetSurveyPicodes2048/ImageNetSurveyPicodes2048"
#dataset = "BoW2/ImageNet/ImageNetBoW2"
#recipedict = recipeDict[0] #change this
#fooddish = fooddish[0] #change this
#var=scipy.io.loadmat(dataset)
#traindata = np.ndarray.astype(var['X'],dtype=np.float)
#trainlabels = np.ndarray.astype(var['trainlabels'].flatten(),dtype=np.int)
#testdata = np.ndarray.astype(var['Xtest'],dtype=np.float)
#testlabels = var['testlabels'].flatten()
#images = var['testimages'][0]
#Xtest = pickle.load(file("/".join(dataset.split('/')[0:2])+'/IngredientAttributes.npy','r'))
#I,R = pickle.load(file(recipedict,'r'))
#ingsorted = sorted(I.keys())[1:]
#X = np.zeros((len(trainlabels),len(ingsorted)),dtype=np.int)
#for i in xrange(len(trainlabels)):
#dish = fooddish[trainlabels[i]]
#if len(R[dish].keys()) != 0:
####randomly pick recipe
#recipe = rnd.choice(R[dish].keys())
##print recipe
#X[i,:] = [1 if ing in R[dish][recipe] else 0 for ing in ingsorted]
#np.savez_compressed(dataset+"-MKL",traindata=traindata, testdata=testdata, X=X,
# Xtest=Xtest, trainlabels=trainlabels,testlabels=testlabels)
#pred=np.load(dataset+"-MKL_predictions.npz")
#y_true = pred['y_true']
#y_pred = pred['y_pred']
#from sklearn.metrics import classification_report
#print classification_report(y_true,y_pred)
#================= SCRIPT TO FIND POPULAR INGREDIENTS ====================
#ingredient histogram
#IngHist = {}
#for food in fooddish:
#IngHist[food] = {}
#for recipe in R[food].keys():
#for ingredient in R[food][recipe].keys():
#if ingredient not in IngHist[food]:
#IngHist[food][ingredient] = 1
#else:
#IngHist[food][ingredient] += 1
#commonIngredients = [None]*len(fooddish)
#commonIngredientsIdx = []
#for f in xrange(len(fooddish)):
#commonIngredientsIdx.extend([ingsorted.index(x) for x in IngHist[fooddish[f]].keys() if IngHist[fooddish[f]][x] >= 2 and x != '0'])
#commonIngredientsIdx = sorted(set(commonIngredientsIdx))
#pickle.dump(commonIngredientsIdx,file('CommonIngredientsImageNet.npy','w'))
#fig = plt.figure()
#i=9
#ax = fig.add_subplot(1,1,1)
#r = plt.bar(np.arange(len(IngHist[fooddish[i]].keys())),IngHist[fooddish[i]].values())
#ax.set_xticks(np.arange(len(IngHist[fooddish[i]].keys()))+0.5)
#ax.set_xticklabels([I[x] for x in IngHist[fooddish[i]]],rotation=90,fontsize=8)
#ax.set_title(fooddish[i])
#ax.set_ylabel('Ingredient Count')
#plt.show()
if __name__=="__main__":
IngredientScraper(fooddish[int(sys.argv[1])]) | #topfeatures = [None]*svm_weights.shape[0] #topfeatures for each class
#for i in xrange(svm_weights.shape[0]):
#featureIdx=np.argsort(abs(svm_weights[i,:]))
#topfeatures[i] = featureIdx[::-1][0:30] #get top 30 | random_line_split |
ingredientsScraper.py | from bs4 import BeautifulSoup as bs
import urllib2
import pickle
import numpy as np
import scipy.io
import matplotlib.pyplot as plt
import random as rnd
from sklearn import svm
from sklearn.metrics import confusion_matrix
from sklearn import cross_validation
from sklearn.grid_search import GridSearchCV
from labels import *
import sys
def IngredientScraper2(fooddish):
#dictionary for ingredients
I = {}
#dictionary for food recipes
R = {}
website = 'http://allrecipes.com'
for food in fooddish:
R[food] = {}
#search for food
print food
#for page in xrange(2):
resultspage = urllib2.urlopen("http://allrecipes.com/search/default.aspx?qt=k&wt="+food)
results = bs(resultspage)
for recipelinks in results.find_all('a',class_='title'):
recipelink = recipelinks.get('href')
#go to recipe page
recipepage = urllib2.urlopen(website+recipelink)
recipe = bs(recipepage)
recipename = recipe.find('h1',id='itemTitle').text
if recipename not in R[food]:
#print "Recipe: ", recipename
#ingredients for this recipe
ingredients = recipe.find_all('li', id='liIngredient')
R[food][recipename] = {}
for ing in ingredients:
|
#normalize values
m = sum(R[food][recipename].values())
R[food][recipename]={ingid: R[food][recipename][ingid]/m for ingid in R[food][recipename].keys()}
#Recipes = {}
#ingsorted = sorted(I.keys())
#for food in R.keys():
##m = sum(R[food].values())
##normalize values
##R[food] = {ingid: R[food][ingid]/m for ingid in R[food].keys()}
#Recipes[food] = [0]*len(ingsorted)
#for i in range(len(ingsorted)):
###if ingredient is in dish R[food]
#if ingsorted[i] in R[food]:
#Recipes[food][i] = R[food][ingsorted[i]]
#m = sum(Recipes[food])
#Recipes[food] = [x/m for x in Recipes[food]]
pickle.dump((I,R),file('AllRecipesIngImageNet.npy','w'))
#return I,R
#=================================================================================
# Ingredient Scraper with cooking terms and nutritional info
def IngredientScraper(fooddish):
#dictionary for ingredients
I = {}
#dictionary for food recipes
R = {}
website = 'http://allrecipes.com'
for food in fooddish:
R[food] = {}
#search for food
print food
#for page in xrange(2):
resultspage = urllib2.urlopen("http://allrecipes.com/search/default.aspx?qt=k&wt="+food)
results = bs(resultspage)
for recipelinks in results.find_all('a',class_='title'):
recipelink = recipelinks.get('href')
#go to recipe page
recipepage = urllib2.urlopen(website+recipelink)
recipe = bs(recipepage)
recipename = recipe.find('h1',id='itemTitle').text
if recipename not in R[food]:
#print "Recipe: ", recipename
#ingredients for this recipe
ingredients = recipe.find_all('li', id='liIngredient')
#list containing ingredients, cookingterms, nutritionrating
R[food][recipename] = [{},[],[0]*7]
for ing in ingredients:
ingid = ing.attrs['data-ingredientid']
ingname = ing.find(id='lblIngName').text
if ingid not in I:
I[ingid] = ingname
amt=float(ing.attrs['data-grams'])
R[food][recipename][0][ingid] = amt
#normalize values
m = sum(R[food][recipename][0].values())
R[food][recipename][0]={ingid: R[food][recipename][0][ingid]/m for ingid in R[food][recipename][0].keys()}
#get cooking terms
directions = [step.text.lower() for step in recipe.find_all('span', class_='plaincharacterwrap break')]
R[food][recipename][1] = directions
#get nutrition
nutritionrating = recipe.find_all('ul', id='ulNutrient')
n = 0
for nutrient in nutritionrating:
#category = nutrient.find('li',class_='categories').text
R[food][recipename][2][n]=float(nutrient.find('li',id='divNutrientGradient').attrs['style'][6:-1])/100
n += 1
pickle.dump((I,R),file('AllRecipesIng50FoodExtra.npy','w'))
#================================================================================
#X = np.zeros((len(trainlabels),len(I.keys())-1),dtype=np.float32)
#ingsorted = sorted(I.keys())[1:]
#for i in xrange(len(trainlabels)):
##thresh = np.random.uniform(0,RecipeMax[trainlabels[i]],n)
#dish = fooddish[trainlabels[i]]
#X[i,:] = [1 if x != 0 else 0 for x in Recipes[dish][1:]]
##if len(R[dish].keys()) != 0:
###randomly pick recipe
##recipe = rnd.choice(R[dish].keys())
##print recipe
##for j in xrange(len(ingsorted)):
##if ingsorted[j] in R[dish][recipe]:
###X[i,j] = R[dish][recipe][ingsorted[j]]
##X[i,j] = 1
###Recipes[food] = [0]*len(ingsorted)
###for i in range(len(ingsorted)):
####if ingredient is in dish R[food]
###if ingsorted[i] in R[food]:
###Recipes[food][i] = R[food][ingsorted[i]]
###X[i,:] = [1 if x>t else 0 for x,t in zip(Recipes[dish],thresh)]
###X[i,:] = Recipes[dish]
##train classifier for each ingredient attribute
#attributeclassifiers = [None]*len(ingsorted)
#for i in xrange(len(ingsorted)):
##find positive examples of attribute i
#pos_idx = np.where(X[:,i]==1)
#print i, len(pos_idx[0])
#attr_labels = np.zeros((len(trainlabels),),dtype=np.uint8)
#attr_labels[pos_idx[0]] = 1
##train classifier
#if len(pos_idx[0]) == traindata.shape[0]:
#attr_labels[range(0,800,100)] = 0
#attributeclassifiers[i] = svm.SVC(kernel='linear',C=0.001)
#attributeclassifiers[i].fit(traindata,attr_labels)
#Xtest = np.zeros((len(testlabels),len(I.keys())-1),dtype=np.float32)
#for i in xrange(len(testlabels)):
#print 'test case:', i
#Xtest[i,:] = [x.predict(testdata[i,:])[0] for x in attributeclassifiers]
#pickle.dump((X,Xtest),file('vlg_extractor_1.1.2/ImageNetSurveyMC/IngredientAttributes.npy','w'))
###fill out correlation matrix
#m = traindata.shape[1] #number of visual word
#n = len(I.keys()) #number of ingredients
#corr_mat = np.zeros((m,n))
#for i in xrange(len(trainlabels)):
#for visualword in xrange(m):
#if traindata[i,visualword] != 0:
##count co-occurrence of ingredient and visual word
##binaryIng = [1 if x!=0 else 0 for x in Recipes[fooddish[trainlabels[i]]]]
#corr_mat[visualword,:] = corr_mat[visualword,:] + X[i,:]
#pickle.dump(corr_mat,file('corr_mat50Food.npy','w'))
###traindata = np.concatenate((traindata,X),1)
##corr_mat = pickle.load(file('corr_mat.npy','r'))
###normalize corr_mat
#row_sums = corr_mat.sum(axis=1)
#row_sums = np.array([1 if x==0 else x for x in row_sums])
#corr_mat = corr_mat/row_sums[:,np.newaxis]
##avg = corr_mat.mean(axis=0)
#logcormat = np.log(corr_mat+1)
#Xtest = np.zeros((len(testlabels),len(I.keys())),dtype=np.float32)
#for i in xrange(len(testlabels)):
#x = np.dot(testdata[i,:],logcormat)
#Xtest[i,:] = x/sum(x)
##dish = fooddish[testlabels[i]]
###randomly pick recipe
##recipe = rnd.choice(R[dish].keys())
##print recipe
#for j in xrange(len(ingsorted)):
#if attributeclassifiers[j] is not None:
#Xtest[i,j]=attributeclassifiers[j].predict(testdata[i,:])
##if ingsorted[j] in R[dish][recipe]:
##Xtest[i,j] = 1
##Xtest[i,:] = [1 if xt>t else 0 for xt,t in zip(x,avg)]
#fig = plt.figure()
#ax = fig.add_subplot(5,2,10)
#count = [0]*len(ingsorted)
#for i in xrange(len(ingsorted)):
##find negative examples of attribute i
#pos_idx = np.where(X[np.where(trainlabels==9)[0],i]==1)
#count[i] = len(pos_idx[0])
#r = plt.bar(range(589),count)
#ax.set_xticks([])
#plt.xlabel(fooddish[9])
##ax = fig.add_subplot(522)
##r = plt.bar(range(440),Recipes['casserole'])
##ax.set_xticks([])
##plt.xlabel('casserole')
##ax = fig.add_subplot(523)
##r = plt.bar(range(440),Recipes['deviled%eggs'])
##ax.set_xticks([])
##plt.xlabel('deviledegg')
##ax = fig.add_subplot(524)
##r = plt.bar(range(440),Recipes['fried%rice'])
##ax.set_xticks([])
##plt.xlabel('friedrice')
##ax = fig.add_subplot(525)
##r = plt.bar(range(440),Recipes['kebab'])
##ax.set_xticks([])
##plt.xlabel('kebab')
##ax = fig.add_subplot(526)
##r = plt.bar(range(440),Recipes['samosa'])
##ax.set_xticks([])
##plt.xlabel('samosa')
##ax = fig.add_subplot(527)
##r = plt.bar(range(440),Recipes['pasta%salad'])
##ax.set_xticks([])
##plt.xlabel('pastasalad')
##ax = fig.add_subplot(528)
##r = plt.bar(range(440),Recipes['paella'])
##ax.set_xticks([])
##plt.xlabel('Paella')
##ax = fig.add_subplot(529)
##r = plt.bar(range(440),Recipes['spaghetti'])
##ax.set_xticks([])
##plt.xlabel('spaghetti')
##ax = fig.add_subplot(5,2,10)
##r = plt.bar(range(440),Recipes['roulade'])
##ax.set_xticks([])
##plt.xlabel('roulade')
#============== script to get top features ============================
#from sklearn.multiclass import OneVsRestClassifier
#import random as rnd
#recipedict='AllRecipesIng.npy'
#fooddish = fooddish[0]
#dataset = 'vlg_extractor/ImageNetSurveyMC/ImageNetSurveyMC'
#var=scipy.io.loadmat(dataset)
#traindata = np.ndarray.astype(var['X'],dtype=np.float32)
#trainlabels = np.ndarray.astype(var['trainlabels'].flatten(),dtype=np.int)
#testdata = np.ndarray.astype(var['Xtest'],dtype=np.float32)
#testlabels = var['testlabels'].flatten()
#Xtest = pickle.load(file("/".join(dataset.split('/')[0:2])+'/IngredientAttributes.npy','r'))
#I,R = pickle.load(file(recipedict,'r'))
#ingsorted = sorted(I.keys())[1:]
#X = np.zeros((len(trainlabels),len(ingsorted)),dtype=np.uint8)
#for i in xrange(len(trainlabels)):
#dish = fooddish[trainlabels[i]]
#if len(R[dish].keys()) != 0:
####randomly pick recipe
#recipe = rnd.choice(R[dish].keys())
##print recipe
#X[i,:] = [1 if ing in R[dish][recipe] else 0 for ing in ingsorted]
#k=5
##split training data into k-folds
#kfold = cross_validation.StratifiedKFold(trainlabels,k)
#param_grid = [
#{'estimator__C': [0.001, 0.01, 1, 10, 100], 'estimator__kernel': ['linear']},
##{'estimator__C': [1, 10, 100, 1000], 'estimator__gamma': [0.01, 0.001, 0.0001], 'estimator__kernel': ['rbf']},
#]
#svc = OneVsRestClassifier(svm.SVC(kernel='linear',C=1))
#svc.fit(X,trainlabels)
##clf = GridSearchCV(estimator=svc, param_grid=param_grid, cv=kfold, n_jobs=-1)
##clf.fit(np.concatenate((traindata,X),1),trainlabels)
#svm_weights = svc.coef_
#topfeatures = [None]*svm_weights.shape[0] #topfeatures for each class
#for i in xrange(svm_weights.shape[0]):
#featureIdx=np.argsort(abs(svm_weights[i,:]))
#topfeatures[i] = featureIdx[::-1][0:30] #get top 30
##allfeatures = sorted(list(set().union(*topfeatures)))
###print top features for each class
#for f in xrange(len(fooddish)):
#xlabels = [None]*30
#for ingIdx in xrange(30):
#print fooddish[f], I[ingsorted[topfeatures[f][ingIdx]]], svm_weights[f,topfeatures[f][ingIdx]]
#xlabels[ingIdx] = I[ingsorted[topfeatures[f][ingIdx]]]
#fig=plt.figure()
#ax = fig.add_subplot(111)
#r = plt.bar(range(30),svm_weights[f,topfeatures[f]],color='b')
#ax.set_xticks(np.arange(30)+0.5)
#ax.set_xticklabels(xlabels,rotation=90,fontsize=8)
#ax.set_title(fooddish[f])
#ax.set_ylabel('Feature Weights')
#plt.show()
#=============================END ==================================
##train classifier for each ingredient attribute
#attributeclassifiers = [None]*len(allfeatures)
#param_grid = [
#{'C': [0.001, 0.01, 1, 10, 100], 'kernel': ['linear']},
##{'estimator__C': [1, 10, 100, 1000], 'estimator__gamma': [0.01, 0.001, 0.0001], 'estimator__kernel': ['rbf']},
#]
#for i in xrange(len(allfeatures)):
##find positive examples of attribute i
#pos_idx = np.where(X[:,allfeatures[i]]==1)
#print I[ingsorted[allfeatures[i]]], len(pos_idx[0])
#attr_labels = np.zeros((len(trainlabels),),dtype=np.uint8)
#attr_labels[pos_idx[0]] = 1
##train classifier
#if len(pos_idx[0]) != 0:
#attributeclassifiers[i] = GridSearchCV(estimator=svm.SVC(), param_grid=param_grid, cv=kfold, n_jobs=-1)
#attributeclassifiers[i].fit(traindata,attr_labels)
#Xtest = np.zeros((len(testlabels),len(I.keys())),dtype=np.float32)
#for i in xrange(len(testlabels)):
#for j in xrange(len(allfeatures)):
#Xtest[i,allfeatures[j]]=attributeclassifiers[j].predict(testdata[i,:])[0]
#fig = plt.figure()
#ax = fig.add_subplot(111)
#res = ax.imshow(X,cmap=plt.cm.bone,interpolation='nearest',aspect='auto')
#cb = fig.colorbar(res)
#plt.show()
##testdata = np.concatenate((testdata,Xtest),1)
#==============script to output data for use with cygwin MKL ============
#dataset = "vlg_extractor/ImageNetSurveyPicodes2048/ImageNetSurveyPicodes2048"
#dataset = "BoW2/ImageNet/ImageNetBoW2"
#recipedict = recipeDict[0] #change this
#fooddish = fooddish[0] #change this
#var=scipy.io.loadmat(dataset)
#traindata = np.ndarray.astype(var['X'],dtype=np.float)
#trainlabels = np.ndarray.astype(var['trainlabels'].flatten(),dtype=np.int)
#testdata = np.ndarray.astype(var['Xtest'],dtype=np.float)
#testlabels = var['testlabels'].flatten()
#images = var['testimages'][0]
#Xtest = pickle.load(file("/".join(dataset.split('/')[0:2])+'/IngredientAttributes.npy','r'))
#I,R = pickle.load(file(recipedict,'r'))
#ingsorted = sorted(I.keys())[1:]
#X = np.zeros((len(trainlabels),len(ingsorted)),dtype=np.int)
#for i in xrange(len(trainlabels)):
#dish = fooddish[trainlabels[i]]
#if len(R[dish].keys()) != 0:
####randomly pick recipe
#recipe = rnd.choice(R[dish].keys())
##print recipe
#X[i,:] = [1 if ing in R[dish][recipe] else 0 for ing in ingsorted]
#np.savez_compressed(dataset+"-MKL",traindata=traindata, testdata=testdata, X=X,
# Xtest=Xtest, trainlabels=trainlabels,testlabels=testlabels)
#pred=np.load(dataset+"-MKL_predictions.npz")
#y_true = pred['y_true']
#y_pred = pred['y_pred']
#from sklearn.metrics import classification_report
#print classification_report(y_true,y_pred)
#================= SCRIPT TO FIND POPULAR INGREDIENTS ====================
#ingredient histogram
#IngHist = {}
#for food in fooddish:
#IngHist[food] = {}
#for recipe in R[food].keys():
#for ingredient in R[food][recipe].keys():
#if ingredient not in IngHist[food]:
#IngHist[food][ingredient] = 1
#else:
#IngHist[food][ingredient] += 1
#commonIngredients = [None]*len(fooddish)
#commonIngredientsIdx = []
#for f in xrange(len(fooddish)):
#commonIngredientsIdx.extend([ingsorted.index(x) for x in IngHist[fooddish[f]].keys() if IngHist[fooddish[f]][x] >= 2 and x != '0'])
#commonIngredientsIdx = sorted(set(commonIngredientsIdx))
#pickle.dump(commonIngredientsIdx,file('CommonIngredientsImageNet.npy','w'))
#fig = plt.figure()
#i=9
#ax = fig.add_subplot(1,1,1)
#r = plt.bar(np.arange(len(IngHist[fooddish[i]].keys())),IngHist[fooddish[i]].values())
#ax.set_xticks(np.arange(len(IngHist[fooddish[i]].keys()))+0.5)
#ax.set_xticklabels([I[x] for x in IngHist[fooddish[i]]],rotation=90,fontsize=8)
#ax.set_title(fooddish[i])
#ax.set_ylabel('Ingredient Count')
#plt.show()
if __name__=="__main__":
IngredientScraper(fooddish[int(sys.argv[1])])
| ingid = ing.attrs['data-ingredientid']
ingname = ing.find(id='lblIngName').text
if ingid not in I:
I[ingid] = ingname
amt=float(ing.attrs['data-grams'])
R[food][recipename][ingid] = amt | conditional_block |
ingredientsScraper.py | from bs4 import BeautifulSoup as bs
import urllib2
import pickle
import numpy as np
import scipy.io
import matplotlib.pyplot as plt
import random as rnd
from sklearn import svm
from sklearn.metrics import confusion_matrix
from sklearn import cross_validation
from sklearn.grid_search import GridSearchCV
from labels import *
import sys
def IngredientScraper2(fooddish):
#dictionary for ingredients
|
#=================================================================================
# Ingredient Scraper with cooking terms and nutritional info
def IngredientScraper(fooddish):
#dictionary for ingredients
I = {}
#dictionary for food recipes
R = {}
website = 'http://allrecipes.com'
for food in fooddish:
R[food] = {}
#search for food
print food
#for page in xrange(2):
resultspage = urllib2.urlopen("http://allrecipes.com/search/default.aspx?qt=k&wt="+food)
results = bs(resultspage)
for recipelinks in results.find_all('a',class_='title'):
recipelink = recipelinks.get('href')
#go to recipe page
recipepage = urllib2.urlopen(website+recipelink)
recipe = bs(recipepage)
recipename = recipe.find('h1',id='itemTitle').text
if recipename not in R[food]:
#print "Recipe: ", recipename
#ingredients for this recipe
ingredients = recipe.find_all('li', id='liIngredient')
#list containing ingredients, cookingterms, nutritionrating
R[food][recipename] = [{},[],[0]*7]
for ing in ingredients:
ingid = ing.attrs['data-ingredientid']
ingname = ing.find(id='lblIngName').text
if ingid not in I:
I[ingid] = ingname
amt=float(ing.attrs['data-grams'])
R[food][recipename][0][ingid] = amt
#normalize values
m = sum(R[food][recipename][0].values())
R[food][recipename][0]={ingid: R[food][recipename][0][ingid]/m for ingid in R[food][recipename][0].keys()}
#get cooking terms
directions = [step.text.lower() for step in recipe.find_all('span', class_='plaincharacterwrap break')]
R[food][recipename][1] = directions
#get nutrition
nutritionrating = recipe.find_all('ul', id='ulNutrient')
n = 0
for nutrient in nutritionrating:
#category = nutrient.find('li',class_='categories').text
R[food][recipename][2][n]=float(nutrient.find('li',id='divNutrientGradient').attrs['style'][6:-1])/100
n += 1
pickle.dump((I,R),file('AllRecipesIng50FoodExtra.npy','w'))
#================================================================================
#X = np.zeros((len(trainlabels),len(I.keys())-1),dtype=np.float32)
#ingsorted = sorted(I.keys())[1:]
#for i in xrange(len(trainlabels)):
##thresh = np.random.uniform(0,RecipeMax[trainlabels[i]],n)
#dish = fooddish[trainlabels[i]]
#X[i,:] = [1 if x != 0 else 0 for x in Recipes[dish][1:]]
##if len(R[dish].keys()) != 0:
###randomly pick recipe
##recipe = rnd.choice(R[dish].keys())
##print recipe
##for j in xrange(len(ingsorted)):
##if ingsorted[j] in R[dish][recipe]:
###X[i,j] = R[dish][recipe][ingsorted[j]]
##X[i,j] = 1
###Recipes[food] = [0]*len(ingsorted)
###for i in range(len(ingsorted)):
####if ingredient is in dish R[food]
###if ingsorted[i] in R[food]:
###Recipes[food][i] = R[food][ingsorted[i]]
###X[i,:] = [1 if x>t else 0 for x,t in zip(Recipes[dish],thresh)]
###X[i,:] = Recipes[dish]
##train classifier for each ingredient attribute
#attributeclassifiers = [None]*len(ingsorted)
#for i in xrange(len(ingsorted)):
##find positive examples of attribute i
#pos_idx = np.where(X[:,i]==1)
#print i, len(pos_idx[0])
#attr_labels = np.zeros((len(trainlabels),),dtype=np.uint8)
#attr_labels[pos_idx[0]] = 1
##train classifier
#if len(pos_idx[0]) == traindata.shape[0]:
#attr_labels[range(0,800,100)] = 0
#attributeclassifiers[i] = svm.SVC(kernel='linear',C=0.001)
#attributeclassifiers[i].fit(traindata,attr_labels)
#Xtest = np.zeros((len(testlabels),len(I.keys())-1),dtype=np.float32)
#for i in xrange(len(testlabels)):
#print 'test case:', i
#Xtest[i,:] = [x.predict(testdata[i,:])[0] for x in attributeclassifiers]
#pickle.dump((X,Xtest),file('vlg_extractor_1.1.2/ImageNetSurveyMC/IngredientAttributes.npy','w'))
###fill out correlation matrix
#m = traindata.shape[1] #number of visual word
#n = len(I.keys()) #number of ingredients
#corr_mat = np.zeros((m,n))
#for i in xrange(len(trainlabels)):
#for visualword in xrange(m):
#if traindata[i,visualword] != 0:
##count co-occurrence of ingredient and visual word
##binaryIng = [1 if x!=0 else 0 for x in Recipes[fooddish[trainlabels[i]]]]
#corr_mat[visualword,:] = corr_mat[visualword,:] + X[i,:]
#pickle.dump(corr_mat,file('corr_mat50Food.npy','w'))
###traindata = np.concatenate((traindata,X),1)
##corr_mat = pickle.load(file('corr_mat.npy','r'))
###normalize corr_mat
#row_sums = corr_mat.sum(axis=1)
#row_sums = np.array([1 if x==0 else x for x in row_sums])
#corr_mat = corr_mat/row_sums[:,np.newaxis]
##avg = corr_mat.mean(axis=0)
#logcormat = np.log(corr_mat+1)
#Xtest = np.zeros((len(testlabels),len(I.keys())),dtype=np.float32)
#for i in xrange(len(testlabels)):
#x = np.dot(testdata[i,:],logcormat)
#Xtest[i,:] = x/sum(x)
##dish = fooddish[testlabels[i]]
###randomly pick recipe
##recipe = rnd.choice(R[dish].keys())
##print recipe
#for j in xrange(len(ingsorted)):
#if attributeclassifiers[j] is not None:
#Xtest[i,j]=attributeclassifiers[j].predict(testdata[i,:])
##if ingsorted[j] in R[dish][recipe]:
##Xtest[i,j] = 1
##Xtest[i,:] = [1 if xt>t else 0 for xt,t in zip(x,avg)]
#fig = plt.figure()
#ax = fig.add_subplot(5,2,10)
#count = [0]*len(ingsorted)
#for i in xrange(len(ingsorted)):
##find negative examples of attribute i
#pos_idx = np.where(X[np.where(trainlabels==9)[0],i]==1)
#count[i] = len(pos_idx[0])
#r = plt.bar(range(589),count)
#ax.set_xticks([])
#plt.xlabel(fooddish[9])
##ax = fig.add_subplot(522)
##r = plt.bar(range(440),Recipes['casserole'])
##ax.set_xticks([])
##plt.xlabel('casserole')
##ax = fig.add_subplot(523)
##r = plt.bar(range(440),Recipes['deviled%eggs'])
##ax.set_xticks([])
##plt.xlabel('deviledegg')
##ax = fig.add_subplot(524)
##r = plt.bar(range(440),Recipes['fried%rice'])
##ax.set_xticks([])
##plt.xlabel('friedrice')
##ax = fig.add_subplot(525)
##r = plt.bar(range(440),Recipes['kebab'])
##ax.set_xticks([])
##plt.xlabel('kebab')
##ax = fig.add_subplot(526)
##r = plt.bar(range(440),Recipes['samosa'])
##ax.set_xticks([])
##plt.xlabel('samosa')
##ax = fig.add_subplot(527)
##r = plt.bar(range(440),Recipes['pasta%salad'])
##ax.set_xticks([])
##plt.xlabel('pastasalad')
##ax = fig.add_subplot(528)
##r = plt.bar(range(440),Recipes['paella'])
##ax.set_xticks([])
##plt.xlabel('Paella')
##ax = fig.add_subplot(529)
##r = plt.bar(range(440),Recipes['spaghetti'])
##ax.set_xticks([])
##plt.xlabel('spaghetti')
##ax = fig.add_subplot(5,2,10)
##r = plt.bar(range(440),Recipes['roulade'])
##ax.set_xticks([])
##plt.xlabel('roulade')
#============== script to get top features ============================
#from sklearn.multiclass import OneVsRestClassifier
#import random as rnd
#recipedict='AllRecipesIng.npy'
#fooddish = fooddish[0]
#dataset = 'vlg_extractor/ImageNetSurveyMC/ImageNetSurveyMC'
#var=scipy.io.loadmat(dataset)
#traindata = np.ndarray.astype(var['X'],dtype=np.float32)
#trainlabels = np.ndarray.astype(var['trainlabels'].flatten(),dtype=np.int)
#testdata = np.ndarray.astype(var['Xtest'],dtype=np.float32)
#testlabels = var['testlabels'].flatten()
#Xtest = pickle.load(file("/".join(dataset.split('/')[0:2])+'/IngredientAttributes.npy','r'))
#I,R = pickle.load(file(recipedict,'r'))
#ingsorted = sorted(I.keys())[1:]
#X = np.zeros((len(trainlabels),len(ingsorted)),dtype=np.uint8)
#for i in xrange(len(trainlabels)):
#dish = fooddish[trainlabels[i]]
#if len(R[dish].keys()) != 0:
####randomly pick recipe
#recipe = rnd.choice(R[dish].keys())
##print recipe
#X[i,:] = [1 if ing in R[dish][recipe] else 0 for ing in ingsorted]
#k=5
##split training data into k-folds
#kfold = cross_validation.StratifiedKFold(trainlabels,k)
#param_grid = [
#{'estimator__C': [0.001, 0.01, 1, 10, 100], 'estimator__kernel': ['linear']},
##{'estimator__C': [1, 10, 100, 1000], 'estimator__gamma': [0.01, 0.001, 0.0001], 'estimator__kernel': ['rbf']},
#]
#svc = OneVsRestClassifier(svm.SVC(kernel='linear',C=1))
#svc.fit(X,trainlabels)
##clf = GridSearchCV(estimator=svc, param_grid=param_grid, cv=kfold, n_jobs=-1)
##clf.fit(np.concatenate((traindata,X),1),trainlabels)
#svm_weights = svc.coef_
#topfeatures = [None]*svm_weights.shape[0] #topfeatures for each class
#for i in xrange(svm_weights.shape[0]):
#featureIdx=np.argsort(abs(svm_weights[i,:]))
#topfeatures[i] = featureIdx[::-1][0:30] #get top 30
##allfeatures = sorted(list(set().union(*topfeatures)))
###print top features for each class
#for f in xrange(len(fooddish)):
#xlabels = [None]*30
#for ingIdx in xrange(30):
#print fooddish[f], I[ingsorted[topfeatures[f][ingIdx]]], svm_weights[f,topfeatures[f][ingIdx]]
#xlabels[ingIdx] = I[ingsorted[topfeatures[f][ingIdx]]]
#fig=plt.figure()
#ax = fig.add_subplot(111)
#r = plt.bar(range(30),svm_weights[f,topfeatures[f]],color='b')
#ax.set_xticks(np.arange(30)+0.5)
#ax.set_xticklabels(xlabels,rotation=90,fontsize=8)
#ax.set_title(fooddish[f])
#ax.set_ylabel('Feature Weights')
#plt.show()
#=============================END ==================================
##train classifier for each ingredient attribute
#attributeclassifiers = [None]*len(allfeatures)
#param_grid = [
#{'C': [0.001, 0.01, 1, 10, 100], 'kernel': ['linear']},
##{'estimator__C': [1, 10, 100, 1000], 'estimator__gamma': [0.01, 0.001, 0.0001], 'estimator__kernel': ['rbf']},
#]
#for i in xrange(len(allfeatures)):
##find positive examples of attribute i
#pos_idx = np.where(X[:,allfeatures[i]]==1)
#print I[ingsorted[allfeatures[i]]], len(pos_idx[0])
#attr_labels = np.zeros((len(trainlabels),),dtype=np.uint8)
#attr_labels[pos_idx[0]] = 1
##train classifier
#if len(pos_idx[0]) != 0:
#attributeclassifiers[i] = GridSearchCV(estimator=svm.SVC(), param_grid=param_grid, cv=kfold, n_jobs=-1)
#attributeclassifiers[i].fit(traindata,attr_labels)
#Xtest = np.zeros((len(testlabels),len(I.keys())),dtype=np.float32)
#for i in xrange(len(testlabels)):
#for j in xrange(len(allfeatures)):
#Xtest[i,allfeatures[j]]=attributeclassifiers[j].predict(testdata[i,:])[0]
#fig = plt.figure()
#ax = fig.add_subplot(111)
#res = ax.imshow(X,cmap=plt.cm.bone,interpolation='nearest',aspect='auto')
#cb = fig.colorbar(res)
#plt.show()
##testdata = np.concatenate((testdata,Xtest),1)
#==============script to output data for use with cygwin MKL ============
#dataset = "vlg_extractor/ImageNetSurveyPicodes2048/ImageNetSurveyPicodes2048"
#dataset = "BoW2/ImageNet/ImageNetBoW2"
#recipedict = recipeDict[0] #change this
#fooddish = fooddish[0] #change this
#var=scipy.io.loadmat(dataset)
#traindata = np.ndarray.astype(var['X'],dtype=np.float)
#trainlabels = np.ndarray.astype(var['trainlabels'].flatten(),dtype=np.int)
#testdata = np.ndarray.astype(var['Xtest'],dtype=np.float)
#testlabels = var['testlabels'].flatten()
#images = var['testimages'][0]
#Xtest = pickle.load(file("/".join(dataset.split('/')[0:2])+'/IngredientAttributes.npy','r'))
#I,R = pickle.load(file(recipedict,'r'))
#ingsorted = sorted(I.keys())[1:]
#X = np.zeros((len(trainlabels),len(ingsorted)),dtype=np.int)
#for i in xrange(len(trainlabels)):
#dish = fooddish[trainlabels[i]]
#if len(R[dish].keys()) != 0:
####randomly pick recipe
#recipe = rnd.choice(R[dish].keys())
##print recipe
#X[i,:] = [1 if ing in R[dish][recipe] else 0 for ing in ingsorted]
#np.savez_compressed(dataset+"-MKL",traindata=traindata, testdata=testdata, X=X,
# Xtest=Xtest, trainlabels=trainlabels,testlabels=testlabels)
#pred=np.load(dataset+"-MKL_predictions.npz")
#y_true = pred['y_true']
#y_pred = pred['y_pred']
#from sklearn.metrics import classification_report
#print classification_report(y_true,y_pred)
#================= SCRIPT TO FIND POPULAR INGREDIENTS ====================
#ingredient histogram
#IngHist = {}
#for food in fooddish:
#IngHist[food] = {}
#for recipe in R[food].keys():
#for ingredient in R[food][recipe].keys():
#if ingredient not in IngHist[food]:
#IngHist[food][ingredient] = 1
#else:
#IngHist[food][ingredient] += 1
#commonIngredients = [None]*len(fooddish)
#commonIngredientsIdx = []
#for f in xrange(len(fooddish)):
#commonIngredientsIdx.extend([ingsorted.index(x) for x in IngHist[fooddish[f]].keys() if IngHist[fooddish[f]][x] >= 2 and x != '0'])
#commonIngredientsIdx = sorted(set(commonIngredientsIdx))
#pickle.dump(commonIngredientsIdx,file('CommonIngredientsImageNet.npy','w'))
#fig = plt.figure()
#i=9
#ax = fig.add_subplot(1,1,1)
#r = plt.bar(np.arange(len(IngHist[fooddish[i]].keys())),IngHist[fooddish[i]].values())
#ax.set_xticks(np.arange(len(IngHist[fooddish[i]].keys()))+0.5)
#ax.set_xticklabels([I[x] for x in IngHist[fooddish[i]]],rotation=90,fontsize=8)
#ax.set_title(fooddish[i])
#ax.set_ylabel('Ingredient Count')
#plt.show()
if __name__=="__main__":
IngredientScraper(fooddish[int(sys.argv[1])])
| I = {}
#dictionary for food recipes
R = {}
website = 'http://allrecipes.com'
for food in fooddish:
R[food] = {}
#search for food
print food
#for page in xrange(2):
resultspage = urllib2.urlopen("http://allrecipes.com/search/default.aspx?qt=k&wt="+food)
results = bs(resultspage)
for recipelinks in results.find_all('a',class_='title'):
recipelink = recipelinks.get('href')
#go to recipe page
recipepage = urllib2.urlopen(website+recipelink)
recipe = bs(recipepage)
recipename = recipe.find('h1',id='itemTitle').text
if recipename not in R[food]:
#print "Recipe: ", recipename
#ingredients for this recipe
ingredients = recipe.find_all('li', id='liIngredient')
R[food][recipename] = {}
for ing in ingredients:
ingid = ing.attrs['data-ingredientid']
ingname = ing.find(id='lblIngName').text
if ingid not in I:
I[ingid] = ingname
amt=float(ing.attrs['data-grams'])
R[food][recipename][ingid] = amt
#normalize values
m = sum(R[food][recipename].values())
R[food][recipename]={ingid: R[food][recipename][ingid]/m for ingid in R[food][recipename].keys()}
#Recipes = {}
#ingsorted = sorted(I.keys())
#for food in R.keys():
##m = sum(R[food].values())
##normalize values
##R[food] = {ingid: R[food][ingid]/m for ingid in R[food].keys()}
#Recipes[food] = [0]*len(ingsorted)
#for i in range(len(ingsorted)):
###if ingredient is in dish R[food]
#if ingsorted[i] in R[food]:
#Recipes[food][i] = R[food][ingsorted[i]]
#m = sum(Recipes[food])
#Recipes[food] = [x/m for x in Recipes[food]]
pickle.dump((I,R),file('AllRecipesIngImageNet.npy','w'))
#return I,R | identifier_body |
buffered.rs | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License..
//! Buffering wrappers for I/O traits
use crate::io::prelude::*;
use crate::error;
use crate::io::{
self, Error, ErrorKind, Initializer, IoSlice, IoSliceMut, SeekFrom, DEFAULT_BUF_SIZE,
};
use crate::memchr;
use core::cmp;
use core::fmt;
/// The `BufReader` struct adds buffering to any reader.
///
/// It can be excessively inefficient to work directly with a [`Read`] instance.
/// For example, every call to [`read`][`TcpStream::read`] on [`TcpStream`]
/// results in a system call. A `BufReader` performs large, infrequent reads on
/// the underlying [`Read`] and maintains an in-memory buffer of the results.
///
/// `BufReader` can improve the speed of programs that make *small* and
/// *repeated* read calls to the same file or network socket. It does not
/// help when reading very large amounts at once, or reading just one or a few
/// times. It also provides no advantage when reading from a source that is
/// already in memory, like a `Vec<u8>`.
///
/// When the `BufReader<R>` is dropped, the contents of its buffer will be
/// discarded. Creating multiple instances of a `BufReader<R>` on the same
/// stream can cause data loss. Reading from the underlying reader after
/// unwrapping the `BufReader<R>` with `BufReader::into_inner` can also cause
/// data loss.
///
/// [`Read`]: ../../std/io/trait.Read.html
/// [`TcpStream::read`]: ../../std/net/struct.TcpStream.html#method.read
/// [`TcpStream`]: ../../std/net/struct.TcpStream.html
///
pub struct BufReader<R> {
inner: R,
buf: Box<[u8]>,
pos: usize,
cap: usize,
}
impl<R: Read> BufReader<R> {
/// Creates a new `BufReader<R>` with a default buffer capacity. The default is currently 8 KB,
/// but may change in the future.
///
pub fn new(inner: R) -> BufReader<R> {
BufReader::with_capacity(DEFAULT_BUF_SIZE, inner)
}
/// Creates a new `BufReader<R>` with the specified buffer capacity.
///
pub fn with_capacity(capacity: usize, inner: R) -> BufReader<R> {
unsafe {
let mut buffer = Vec::with_capacity(capacity);
buffer.set_len(capacity);
inner.initializer().initialize(&mut buffer);
BufReader { inner, buf: buffer.into_boxed_slice(), pos: 0, cap: 0 }
}
}
}
impl<R> BufReader<R> {
/// Gets a reference to the underlying reader.
///
/// It is inadvisable to directly read from the underlying reader.
///
pub fn get_ref(&self) -> &R {
&self.inner
}
/// Gets a mutable reference to the underlying reader.
///
/// It is inadvisable to directly read from the underlying reader.
///
pub fn get_mut(&mut self) -> &mut R {
&mut self.inner
}
/// Returns a reference to the internally buffered data.
///
/// Unlike `fill_buf`, this will not attempt to fill the buffer if it is empty.
///
pub fn buffer(&self) -> &[u8] {
&self.buf[self.pos..self.cap]
}
/// Returns the number of bytes the internal buffer can hold at once.
///
pub fn capacity(&self) -> usize {
self.buf.len()
}
/// Unwraps this `BufReader<R>`, returning the underlying reader.
///
/// Note that any leftover data in the internal buffer is lost. Therefore,
/// a following read from the underlying reader may lead to data loss.
///
pub fn into_inner(self) -> R {
self.inner
}
/// Invalidates all data in the internal buffer.
#[inline]
fn discard_buffer(&mut self) {
self.pos = 0;
self.cap = 0;
}
}
impl<R: Seek> BufReader<R> {
/// Seeks relative to the current position. If the new position lies within the buffer,
/// the buffer will not be flushed, allowing for more efficient seeks.
/// This method does not return the location of the underlying reader, so the caller
/// must track this information themselves if it is required.
pub fn seek_relative(&mut self, offset: i64) -> io::Result<()> {
let pos = self.pos as u64;
if offset < 0 {
if let Some(new_pos) = pos.checked_sub((-offset) as u64) {
self.pos = new_pos as usize;
return Ok(());
}
} else {
if let Some(new_pos) = pos.checked_add(offset as u64) {
if new_pos <= self.cap as u64 {
self.pos = new_pos as usize;
return Ok(());
}
}
}
self.seek(SeekFrom::Current(offset)).map(drop)
}
}
impl<R: Read> Read for BufReader<R> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
// If we don't have any buffered data and we're doing a massive read
// (larger than our internal buffer), bypass our internal buffer
// entirely.
if self.pos == self.cap && buf.len() >= self.buf.len() {
self.discard_buffer();
return self.inner.read(buf);
}
let nread = {
let mut rem = self.fill_buf()?;
rem.read(buf)?
};
self.consume(nread);
Ok(nread)
}
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
let total_len = bufs.iter().map(|b| b.len()).sum::<usize>();
if self.pos == self.cap && total_len >= self.buf.len() {
self.discard_buffer();
return self.inner.read_vectored(bufs);
}
let nread = {
let mut rem = self.fill_buf()?;
rem.read_vectored(bufs)?
};
self.consume(nread);
Ok(nread)
}
// we can't skip unconditionally because of the large buffer case in read.
unsafe fn initializer(&self) -> Initializer {
self.inner.initializer()
}
}
impl<R: Read> BufRead for BufReader<R> {
fn fill_buf(&mut self) -> io::Result<&[u8]> {
// If we've reached the end of our internal buffer then we need to fetch
// some more data from the underlying reader.
// Branch using `>=` instead of the more correct `==`
// to tell the compiler that the pos..cap slice is always valid.
if self.pos >= self.cap {
debug_assert!(self.pos == self.cap);
self.cap = self.inner.read(&mut self.buf)?;
self.pos = 0;
}
Ok(&self.buf[self.pos..self.cap])
}
fn consume(&mut self, amt: usize) {
self.pos = cmp::min(self.pos + amt, self.cap);
}
}
impl<R> fmt::Debug for BufReader<R>
where
R: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("BufReader")
.field("reader", &self.inner)
.field("buffer", &format_args!("{}/{}", self.cap - self.pos, self.buf.len()))
.finish()
}
}
impl<R: Seek> Seek for BufReader<R> {
/// Seek to an offset, in bytes, in the underlying reader.
///
/// The position used for seeking with `SeekFrom::Current(_)` is the
/// position the underlying reader would be at if the `BufReader<R>` had no
/// internal buffer.
///
/// Seeking always discards the internal buffer, even if the seek position
/// would otherwise fall within it. This guarantees that calling
/// `.into_inner()` immediately after a seek yields the underlying reader
/// at the same position.
///
/// To seek without discarding the internal buffer, use [`BufReader::seek_relative`].
///
/// See [`std::io::Seek`] for more details.
///
/// Note: In the edge case where you're seeking with `SeekFrom::Current(n)`
/// where `n` minus the internal buffer length overflows an `i64`, two
/// seeks will be performed instead of one. If the second seek returns
/// `Err`, the underlying reader will be left at the same position it would
/// have if you called `seek` with `SeekFrom::Current(0)`.
///
/// [`BufReader::seek_relative`]: struct.BufReader.html#method.seek_relative
/// [`std::io::Seek`]: trait.Seek.html
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
let result: u64;
if let SeekFrom::Current(n) = pos {
let remainder = (self.cap - self.pos) as i64;
// it should be safe to assume that remainder fits within an i64 as the alternative
// means we managed to allocate 8 exbibytes and that's absurd.
// But it's not out of the realm of possibility for some weird underlying reader to
// support seeking by i64::min_value() so we need to handle underflow when subtracting
// remainder.
if let Some(offset) = n.checked_sub(remainder) {
result = self.inner.seek(SeekFrom::Current(offset))?;
} else {
// seek backwards by our remainder, and then by the offset
self.inner.seek(SeekFrom::Current(-remainder))?;
self.discard_buffer();
result = self.inner.seek(SeekFrom::Current(n))?;
}
} else {
// Seeking with Start/End doesn't care about our buffer length.
result = self.inner.seek(pos)?;
}
self.discard_buffer();
Ok(result)
}
}
/// Wraps a writer and buffers its output.
///
/// It can be excessively inefficient to work directly with something that
/// implements [`Write`]. For example, every call to
/// [`write`][`TcpStream::write`] on [`TcpStream`] results in a system call. A
/// `BufWriter<W>` keeps an in-memory buffer of data and writes it to an underlying
/// writer in large, infrequent batches.
///
/// `BufWriter<W>` can improve the speed of programs that make *small* and
/// *repeated* write calls to the same file or network socket. It does not
/// help when writing very large amounts at once, or writing just one or a few
/// times. It also provides no advantage when writing to a destination that is
/// in memory, like a `Vec<u8>`.
///
/// It is critical to call [`flush`] before `BufWriter<W>` is dropped. Though
/// dropping will attempt to flush the the contents of the buffer, any errors
/// that happen in the process of dropping will be ignored. Calling [`flush`]
/// ensures that the buffer is empty and thus dropping will not even attempt
/// file operations.
///
/// By wrapping the stream with a `BufWriter<W>`, these ten writes are all grouped
/// together by the buffer and will all be written out in one system call when
/// the `stream` is flushed.
///
/// [`Write`]: ../../std/io/trait.Write.html
/// [`TcpStream::write`]: ../../std/net/struct.TcpStream.html#method.write
/// [`TcpStream`]: ../../std/net/struct.TcpStream.html
/// [`flush`]: #method.flush
pub struct BufWriter<W: Write> {
inner: Option<W>,
buf: Vec<u8>,
// #30888: If the inner writer panics in a call to write, we don't want to
// write the buffered data a second time in BufWriter's destructor. This
// flag tells the Drop impl if it should skip the flush.
panicked: bool,
}
/// An error returned by `into_inner` which combines an error that
/// happened while writing out the buffer, and the buffered writer object
/// which may be used to recover from the condition.
///
#[derive(Debug)]
pub struct IntoInnerError<W>(W, Error);
impl<W: Write> BufWriter<W> {
/// Creates a new `BufWriter<W>` with a default buffer capacity. The default is currently 8 KB,
/// but may change in the future.
///
pub fn new(inner: W) -> BufWriter<W> {
BufWriter::with_capacity(DEFAULT_BUF_SIZE, inner)
}
/// Creates a new `BufWriter<W>` with the specified buffer capacity.
///
pub fn with_capacity(capacity: usize, inner: W) -> BufWriter<W> {
BufWriter { inner: Some(inner), buf: Vec::with_capacity(capacity), panicked: false }
}
fn flush_buf(&mut self) -> io::Result<()> {
let mut written = 0;
let len = self.buf.len();
let mut ret = Ok(());
while written < len {
self.panicked = true;
let r = self.inner.as_mut().unwrap().write(&self.buf[written..]);
self.panicked = false;
match r {
Ok(0) => {
ret =
Err(Error::new(ErrorKind::WriteZero, "failed to write the buffered data"));
break;
}
Ok(n) => written += n,
Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
Err(e) => {
ret = Err(e);
break;
}
}
}
if written > 0 {
self.buf.drain(..written);
}
ret
}
/// Gets a reference to the underlying writer.
///
pub fn get_ref(&self) -> &W {
self.inner.as_ref().unwrap()
}
/// Gets a mutable reference to the underlying writer.
///
/// It is inadvisable to directly write to the underlying writer.
///
pub fn get_mut(&mut self) -> &mut W {
self.inner.as_mut().unwrap()
}
/// Returns a reference to the internally buffered data.
///
pub fn buffer(&self) -> &[u8] {
&self.buf
}
/// Returns the number of bytes the internal buffer can hold without flushing.
///
pub fn capacity(&self) -> usize {
self.buf.capacity()
}
/// Unwraps this `BufWriter<W>`, returning the underlying writer.
///
/// The buffer is written out before returning the writer.
///
/// # Errors
///
/// An `Err` will be returned if an error occurs while flushing the buffer.
///
pub fn into_inner(mut self) -> Result<W, IntoInnerError<BufWriter<W>>> {
match self.flush_buf() {
Err(e) => Err(IntoInnerError(self, e)),
Ok(()) => Ok(self.inner.take().unwrap()),
}
}
}
impl<W: Write> Write for BufWriter<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
if self.buf.len() + buf.len() > self.buf.capacity() {
self.flush_buf()?;
}
if buf.len() >= self.buf.capacity() {
self.panicked = true;
let r = self.get_mut().write(buf);
self.panicked = false;
r
} else {
self.buf.write(buf)
}
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
let total_len = bufs.iter().map(|b| b.len()).sum::<usize>();
if self.buf.len() + total_len > self.buf.capacity() {
self.flush_buf()?;
}
if total_len >= self.buf.capacity() {
self.panicked = true;
let r = self.get_mut().write_vectored(bufs);
self.panicked = false;
r
} else {
self.buf.write_vectored(bufs)
}
}
fn flush(&mut self) -> io::Result<()> {
self.flush_buf().and_then(|()| self.get_mut().flush())
}
}
impl<W: Write> fmt::Debug for BufWriter<W>
where
W: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("BufWriter")
.field("writer", &self.inner.as_ref().unwrap())
.field("buffer", &format_args!("{}/{}", self.buf.len(), self.buf.capacity()))
.finish()
}
}
impl<W: Write + Seek> Seek for BufWriter<W> {
/// Seek to the offset, in bytes, in the underlying writer.
///
/// Seeking always writes out the internal buffer before seeking.
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
self.flush_buf().and_then(|_| self.get_mut().seek(pos))
}
}
impl<W: Write> Drop for BufWriter<W> {
fn drop(&mut self) {
if self.inner.is_some() && !self.panicked {
// dtors should not panic, so we ignore a failed flush
let _r = self.flush_buf();
}
}
}
impl<W> IntoInnerError<W> {
/// Returns the error which caused the call to `into_inner()` to fail.
///
/// This error was returned when attempting to write the internal buffer.
///
pub fn error(&self) -> &Error {
&self.1
}
/// Returns the buffered writer instance which generated the error.
///
/// The returned object can be used for error recovery, such as
/// re-inspecting the buffer.
///
pub fn into_inner(self) -> W {
self.0
}
}
impl<W> From<IntoInnerError<W>> for Error {
fn from(iie: IntoInnerError<W>) -> Error {
iie.1
}
}
impl<W: Send + fmt::Debug> error::Error for IntoInnerError<W> {
fn description(&self) -> &str {
error::Error::description(self.error())
}
}
impl<W> fmt::Display for IntoInnerError<W> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.error().fmt(f)
}
}
/// Wraps a writer and buffers output to it, flushing whenever a newline
/// (`0x0a`, `'\n'`) is detected.
///
/// The [`BufWriter`][bufwriter] struct wraps a writer and buffers its output.
/// But it only does this batched write when it goes out of scope, or when the
/// internal buffer is full. Sometimes, you'd prefer to write each line as it's
/// completed, rather than the entire buffer at once. Enter `LineWriter`. It
/// does exactly that.
///
/// Like [`BufWriter`][bufwriter], a `LineWriter`’s buffer will also be flushed when the
/// `LineWriter` goes out of scope or when its internal buffer is full.
///
/// [bufwriter]: struct.BufWriter.html
///
/// If there's still a partial line in the buffer when the `LineWriter` is
/// dropped, it will flush those contents.
///
pub struct LineWriter<W: Write> {
inner: BufWriter<W>,
need_flush: bool,
}
impl<W: Write> LineWriter<W> {
/// Creates a new `LineWriter`.
///
pub fn new(inner: W) -> LineWriter<W> {
// Lines typically aren't that long, don't use a giant buffer
LineWriter::with_capacity(1024, inner)
}
/// Creates a new `LineWriter` with a specified capacity for the internal
/// buffer.
///
pub fn with_capacity(capacity: usize, inner: W) -> LineWriter<W> {
LineWriter { inner: BufWriter::with_capacity(capacity, inner), need_flush: false }
}
/// Gets a reference to the underlying writer.
///
pub fn ge | self) -> &W {
self.inner.get_ref()
}
/// Gets a mutable reference to the underlying writer.
///
/// Caution must be taken when calling methods on the mutable reference
/// returned as extra writes could corrupt the output stream.
///
pub fn get_mut(&mut self) -> &mut W {
self.inner.get_mut()
}
/// Unwraps this `LineWriter`, returning the underlying writer.
///
/// The internal buffer is written out before returning the writer.
///
/// # Errors
///
/// An `Err` will be returned if an error occurs while flushing the buffer.
///
pub fn into_inner(self) -> Result<W, IntoInnerError<LineWriter<W>>> {
self.inner.into_inner().map_err(|IntoInnerError(buf, e)| {
IntoInnerError(LineWriter { inner: buf, need_flush: false }, e)
})
}
}
impl<W: Write> Write for LineWriter<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
if self.need_flush {
self.flush()?;
}
// Find the last newline character in the buffer provided. If found then
// we're going to write all the data up to that point and then flush,
// otherwise we just write the whole block to the underlying writer.
let i = match memchr::memrchr(b'\n', buf) {
Some(i) => i,
None => return self.inner.write(buf),
};
// Ok, we're going to write a partial amount of the data given first
// followed by flushing the newline. After we've successfully written
// some data then we *must* report that we wrote that data, so future
// errors are ignored. We set our internal `need_flush` flag, though, in
// case flushing fails and we need to try it first next time.
let n = self.inner.write(&buf[..=i])?;
self.need_flush = true;
if self.flush().is_err() || n != i + 1 {
return Ok(n);
}
// At this point we successfully wrote `i + 1` bytes and flushed it out,
// meaning that the entire line is now flushed out on the screen. While
// we can attempt to finish writing the rest of the data provided.
// Remember though that we ignore errors here as we've successfully
// written data, so we need to report that.
match self.inner.write(&buf[i + 1..]) {
Ok(i) => Ok(n + i),
Err(_) => Ok(n),
}
}
// Vectored writes are very similar to the writes above, but adjusted for
// the list of buffers that we have to write.
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
if self.need_flush {
self.flush()?;
}
// Find the last newline, and failing that write the whole buffer
let last_newline = bufs
.iter()
.enumerate()
.rev()
.filter_map(|(i, buf)| {
let pos = memchr::memrchr(b'\n', buf)?;
Some((i, pos))
})
.next();
let (i, j) = match last_newline {
Some(pair) => pair,
None => return self.inner.write_vectored(bufs),
};
let (prefix, suffix) = bufs.split_at(i);
let (buf, suffix) = suffix.split_at(1);
let buf = &buf[0];
// Write everything up to the last newline, flushing afterwards. Note
// that only if we finished our entire `write_vectored` do we try the
// subsequent
// `write`
let mut n = 0;
let prefix_amt = prefix.iter().map(|i| i.len()).sum();
if prefix_amt > 0 {
n += self.inner.write_vectored(prefix)?;
self.need_flush = true;
}
if n == prefix_amt {
match self.inner.write(&buf[..=j]) {
Ok(m) => n += m,
Err(e) if n == 0 => return Err(e),
Err(_) => return Ok(n),
}
self.need_flush = true;
}
if self.flush().is_err() || n != j + 1 + prefix_amt {
return Ok(n);
}
// ... and now write out everything remaining
match self.inner.write(&buf[j + 1..]) {
Ok(i) => n += i,
Err(_) => return Ok(n),
}
if suffix.iter().map(|s| s.len()).sum::<usize>() == 0 {
return Ok(n);
}
match self.inner.write_vectored(suffix) {
Ok(i) => Ok(n + i),
Err(_) => Ok(n),
}
}
fn flush(&mut self) -> io::Result<()> {
self.inner.flush()?;
self.need_flush = false;
Ok(())
}
}
impl<W: Write> fmt::Debug for LineWriter<W>
where
W: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("LineWriter")
.field("writer", &self.inner.inner)
.field(
"buffer",
&format_args!("{}/{}", self.inner.buf.len(), self.inner.buf.capacity()),
)
.finish()
}
}
| t_ref(& | identifier_name |
buffered.rs | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License..
//! Buffering wrappers for I/O traits
use crate::io::prelude::*;
use crate::error;
use crate::io::{
self, Error, ErrorKind, Initializer, IoSlice, IoSliceMut, SeekFrom, DEFAULT_BUF_SIZE,
};
use crate::memchr;
use core::cmp;
use core::fmt;
/// The `BufReader` struct adds buffering to any reader.
///
/// It can be excessively inefficient to work directly with a [`Read`] instance.
/// For example, every call to [`read`][`TcpStream::read`] on [`TcpStream`]
/// results in a system call. A `BufReader` performs large, infrequent reads on
/// the underlying [`Read`] and maintains an in-memory buffer of the results.
///
/// `BufReader` can improve the speed of programs that make *small* and
/// *repeated* read calls to the same file or network socket. It does not
/// help when reading very large amounts at once, or reading just one or a few
/// times. It also provides no advantage when reading from a source that is
/// already in memory, like a `Vec<u8>`.
///
/// When the `BufReader<R>` is dropped, the contents of its buffer will be
/// discarded. Creating multiple instances of a `BufReader<R>` on the same
/// stream can cause data loss. Reading from the underlying reader after
/// unwrapping the `BufReader<R>` with `BufReader::into_inner` can also cause
/// data loss.
///
/// [`Read`]: ../../std/io/trait.Read.html
/// [`TcpStream::read`]: ../../std/net/struct.TcpStream.html#method.read
/// [`TcpStream`]: ../../std/net/struct.TcpStream.html
///
pub struct BufReader<R> {
inner: R,
buf: Box<[u8]>,
pos: usize,
cap: usize,
}
impl<R: Read> BufReader<R> {
/// Creates a new `BufReader<R>` with a default buffer capacity. The default is currently 8 KB,
/// but may change in the future.
///
pub fn new(inner: R) -> BufReader<R> {
BufReader::with_capacity(DEFAULT_BUF_SIZE, inner)
}
/// Creates a new `BufReader<R>` with the specified buffer capacity.
///
pub fn with_capacity(capacity: usize, inner: R) -> BufReader<R> {
unsafe {
let mut buffer = Vec::with_capacity(capacity);
buffer.set_len(capacity);
inner.initializer().initialize(&mut buffer);
BufReader { inner, buf: buffer.into_boxed_slice(), pos: 0, cap: 0 }
}
}
}
impl<R> BufReader<R> {
/// Gets a reference to the underlying reader.
///
/// It is inadvisable to directly read from the underlying reader.
///
pub fn get_ref(&self) -> &R {
&self.inner
}
/// Gets a mutable reference to the underlying reader.
///
/// It is inadvisable to directly read from the underlying reader.
///
pub fn get_mut(&mut self) -> &mut R {
&mut self.inner
}
/// Returns a reference to the internally buffered data.
///
/// Unlike `fill_buf`, this will not attempt to fill the buffer if it is empty.
///
pub fn buffer(&self) -> &[u8] {
&self.buf[self.pos..self.cap]
}
/// Returns the number of bytes the internal buffer can hold at once.
///
pub fn capacity(&self) -> usize {
self.buf.len()
}
/// Unwraps this `BufReader<R>`, returning the underlying reader.
///
/// Note that any leftover data in the internal buffer is lost. Therefore,
/// a following read from the underlying reader may lead to data loss.
///
pub fn into_inner(self) -> R {
self.inner
}
/// Invalidates all data in the internal buffer.
#[inline]
fn discard_buffer(&mut self) {
self.pos = 0;
self.cap = 0;
}
}
impl<R: Seek> BufReader<R> {
/// Seeks relative to the current position. If the new position lies within the buffer,
/// the buffer will not be flushed, allowing for more efficient seeks.
/// This method does not return the location of the underlying reader, so the caller
/// must track this information themselves if it is required.
pub fn seek_relative(&mut self, offset: i64) -> io::Result<()> {
let pos = self.pos as u64;
if offset < 0 {
if let Some(new_pos) = pos.checked_sub((-offset) as u64) {
self.pos = new_pos as usize;
return Ok(());
}
} else {
if let Some(new_pos) = pos.checked_add(offset as u64) {
if new_pos <= self.cap as u64 {
self.pos = new_pos as usize;
return Ok(());
}
}
}
self.seek(SeekFrom::Current(offset)).map(drop)
}
}
impl<R: Read> Read for BufReader<R> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
// If we don't have any buffered data and we're doing a massive read
// (larger than our internal buffer), bypass our internal buffer
// entirely.
if self.pos == self.cap && buf.len() >= self.buf.len() {
self.discard_buffer();
return self.inner.read(buf);
}
let nread = {
let mut rem = self.fill_buf()?;
rem.read(buf)?
};
self.consume(nread);
Ok(nread)
}
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
let total_len = bufs.iter().map(|b| b.len()).sum::<usize>();
if self.pos == self.cap && total_len >= self.buf.len() {
self.discard_buffer();
return self.inner.read_vectored(bufs);
}
let nread = {
let mut rem = self.fill_buf()?;
rem.read_vectored(bufs)?
};
self.consume(nread);
Ok(nread)
}
// we can't skip unconditionally because of the large buffer case in read.
unsafe fn initializer(&self) -> Initializer {
self.inner.initializer()
}
}
impl<R: Read> BufRead for BufReader<R> {
fn fill_buf(&mut self) -> io::Result<&[u8]> {
// If we've reached the end of our internal buffer then we need to fetch
// some more data from the underlying reader.
// Branch using `>=` instead of the more correct `==`
// to tell the compiler that the pos..cap slice is always valid.
if self.pos >= self.cap {
debug_assert!(self.pos == self.cap);
self.cap = self.inner.read(&mut self.buf)?;
self.pos = 0;
}
Ok(&self.buf[self.pos..self.cap])
}
fn consume(&mut self, amt: usize) {
self.pos = cmp::min(self.pos + amt, self.cap);
}
}
impl<R> fmt::Debug for BufReader<R>
where
R: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("BufReader")
.field("reader", &self.inner)
.field("buffer", &format_args!("{}/{}", self.cap - self.pos, self.buf.len()))
.finish()
}
}
impl<R: Seek> Seek for BufReader<R> {
/// Seek to an offset, in bytes, in the underlying reader.
///
/// The position used for seeking with `SeekFrom::Current(_)` is the
/// position the underlying reader would be at if the `BufReader<R>` had no
/// internal buffer.
///
/// Seeking always discards the internal buffer, even if the seek position
/// would otherwise fall within it. This guarantees that calling
/// `.into_inner()` immediately after a seek yields the underlying reader
/// at the same position.
///
/// To seek without discarding the internal buffer, use [`BufReader::seek_relative`].
///
/// See [`std::io::Seek`] for more details.
///
/// Note: In the edge case where you're seeking with `SeekFrom::Current(n)`
/// where `n` minus the internal buffer length overflows an `i64`, two
/// seeks will be performed instead of one. If the second seek returns
/// `Err`, the underlying reader will be left at the same position it would
/// have if you called `seek` with `SeekFrom::Current(0)`.
///
/// [`BufReader::seek_relative`]: struct.BufReader.html#method.seek_relative
/// [`std::io::Seek`]: trait.Seek.html
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
let result: u64;
if let SeekFrom::Current(n) = pos {
let remainder = (self.cap - self.pos) as i64;
// it should be safe to assume that remainder fits within an i64 as the alternative
// means we managed to allocate 8 exbibytes and that's absurd.
// But it's not out of the realm of possibility for some weird underlying reader to
// support seeking by i64::min_value() so we need to handle underflow when subtracting
// remainder.
if let Some(offset) = n.checked_sub(remainder) {
result = self.inner.seek(SeekFrom::Current(offset))?;
} else {
// seek backwards by our remainder, and then by the offset
self.inner.seek(SeekFrom::Current(-remainder))?;
self.discard_buffer();
result = self.inner.seek(SeekFrom::Current(n))?;
}
} else {
// Seeking with Start/End doesn't care about our buffer length.
result = self.inner.seek(pos)?;
}
self.discard_buffer();
Ok(result)
}
}
/// Wraps a writer and buffers its output.
///
/// It can be excessively inefficient to work directly with something that
/// implements [`Write`]. For example, every call to
/// [`write`][`TcpStream::write`] on [`TcpStream`] results in a system call. A
/// `BufWriter<W>` keeps an in-memory buffer of data and writes it to an underlying
/// writer in large, infrequent batches.
///
/// `BufWriter<W>` can improve the speed of programs that make *small* and
/// *repeated* write calls to the same file or network socket. It does not
/// help when writing very large amounts at once, or writing just one or a few
/// times. It also provides no advantage when writing to a destination that is
/// in memory, like a `Vec<u8>`.
///
/// It is critical to call [`flush`] before `BufWriter<W>` is dropped. Though
/// dropping will attempt to flush the the contents of the buffer, any errors
/// that happen in the process of dropping will be ignored. Calling [`flush`]
/// ensures that the buffer is empty and thus dropping will not even attempt
/// file operations.
///
/// By wrapping the stream with a `BufWriter<W>`, these ten writes are all grouped
/// together by the buffer and will all be written out in one system call when
/// the `stream` is flushed.
///
/// [`Write`]: ../../std/io/trait.Write.html
/// [`TcpStream::write`]: ../../std/net/struct.TcpStream.html#method.write
/// [`TcpStream`]: ../../std/net/struct.TcpStream.html
/// [`flush`]: #method.flush
pub struct BufWriter<W: Write> {
inner: Option<W>,
buf: Vec<u8>,
// #30888: If the inner writer panics in a call to write, we don't want to
// write the buffered data a second time in BufWriter's destructor. This
// flag tells the Drop impl if it should skip the flush.
panicked: bool,
}
/// An error returned by `into_inner` which combines an error that
/// happened while writing out the buffer, and the buffered writer object
/// which may be used to recover from the condition.
///
#[derive(Debug)]
pub struct IntoInnerError<W>(W, Error);
impl<W: Write> BufWriter<W> {
/// Creates a new `BufWriter<W>` with a default buffer capacity. The default is currently 8 KB,
/// but may change in the future.
///
pub fn new(inner: W) -> BufWriter<W> {
BufWriter::with_capacity(DEFAULT_BUF_SIZE, inner)
}
/// Creates a new `BufWriter<W>` with the specified buffer capacity.
///
pub fn with_capacity(capacity: usize, inner: W) -> BufWriter<W> {
BufWriter { inner: Some(inner), buf: Vec::with_capacity(capacity), panicked: false }
}
fn flush_buf(&mut self) -> io::Result<()> {
let mut written = 0;
let len = self.buf.len();
let mut ret = Ok(());
while written < len {
self.panicked = true;
let r = self.inner.as_mut().unwrap().write(&self.buf[written..]);
self.panicked = false;
match r {
Ok(0) => {
ret =
Err(Error::new(ErrorKind::WriteZero, "failed to write the buffered data"));
break;
}
Ok(n) => written += n,
Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
Err(e) => {
ret = Err(e);
break;
}
}
}
if written > 0 {
self.buf.drain(..written);
}
ret
}
/// Gets a reference to the underlying writer.
///
pub fn get_ref(&self) -> &W {
self.inner.as_ref().unwrap()
}
/// Gets a mutable reference to the underlying writer.
///
/// It is inadvisable to directly write to the underlying writer.
///
pub fn get_mut(&mut self) -> &mut W {
self.inner.as_mut().unwrap()
}
/// Returns a reference to the internally buffered data.
///
pub fn buffer(&self) -> &[u8] {
&self.buf
}
/// Returns the number of bytes the internal buffer can hold without flushing.
///
pub fn capacity(&self) -> usize {
self.buf.capacity()
}
/// Unwraps this `BufWriter<W>`, returning the underlying writer.
///
/// The buffer is written out before returning the writer.
///
/// # Errors
///
/// An `Err` will be returned if an error occurs while flushing the buffer.
///
pub fn into_inner(mut self) -> Result<W, IntoInnerError<BufWriter<W>>> {
match self.flush_buf() {
Err(e) => Err(IntoInnerError(self, e)),
Ok(()) => Ok(self.inner.take().unwrap()),
}
}
}
impl<W: Write> Write for BufWriter<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
if self.buf.len() + buf.len() > self.buf.capacity() {
self.flush_buf()?;
}
if buf.len() >= self.buf.capacity() {
self.panicked = true;
let r = self.get_mut().write(buf);
self.panicked = false;
r
} else {
self.buf.write(buf)
}
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
let total_len = bufs.iter().map(|b| b.len()).sum::<usize>();
if self.buf.len() + total_len > self.buf.capacity() |
if total_len >= self.buf.capacity() {
self.panicked = true;
let r = self.get_mut().write_vectored(bufs);
self.panicked = false;
r
} else {
self.buf.write_vectored(bufs)
}
}
fn flush(&mut self) -> io::Result<()> {
self.flush_buf().and_then(|()| self.get_mut().flush())
}
}
impl<W: Write> fmt::Debug for BufWriter<W>
where
W: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("BufWriter")
.field("writer", &self.inner.as_ref().unwrap())
.field("buffer", &format_args!("{}/{}", self.buf.len(), self.buf.capacity()))
.finish()
}
}
impl<W: Write + Seek> Seek for BufWriter<W> {
/// Seek to the offset, in bytes, in the underlying writer.
///
/// Seeking always writes out the internal buffer before seeking.
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
self.flush_buf().and_then(|_| self.get_mut().seek(pos))
}
}
impl<W: Write> Drop for BufWriter<W> {
fn drop(&mut self) {
if self.inner.is_some() && !self.panicked {
// dtors should not panic, so we ignore a failed flush
let _r = self.flush_buf();
}
}
}
impl<W> IntoInnerError<W> {
/// Returns the error which caused the call to `into_inner()` to fail.
///
/// This error was returned when attempting to write the internal buffer.
///
pub fn error(&self) -> &Error {
&self.1
}
/// Returns the buffered writer instance which generated the error.
///
/// The returned object can be used for error recovery, such as
/// re-inspecting the buffer.
///
pub fn into_inner(self) -> W {
self.0
}
}
impl<W> From<IntoInnerError<W>> for Error {
fn from(iie: IntoInnerError<W>) -> Error {
iie.1
}
}
impl<W: Send + fmt::Debug> error::Error for IntoInnerError<W> {
fn description(&self) -> &str {
error::Error::description(self.error())
}
}
impl<W> fmt::Display for IntoInnerError<W> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.error().fmt(f)
}
}
/// Wraps a writer and buffers output to it, flushing whenever a newline
/// (`0x0a`, `'\n'`) is detected.
///
/// The [`BufWriter`][bufwriter] struct wraps a writer and buffers its output.
/// But it only does this batched write when it goes out of scope, or when the
/// internal buffer is full. Sometimes, you'd prefer to write each line as it's
/// completed, rather than the entire buffer at once. Enter `LineWriter`. It
/// does exactly that.
///
/// Like [`BufWriter`][bufwriter], a `LineWriter`’s buffer will also be flushed when the
/// `LineWriter` goes out of scope or when its internal buffer is full.
///
/// [bufwriter]: struct.BufWriter.html
///
/// If there's still a partial line in the buffer when the `LineWriter` is
/// dropped, it will flush those contents.
///
pub struct LineWriter<W: Write> {
inner: BufWriter<W>,
need_flush: bool,
}
impl<W: Write> LineWriter<W> {
/// Creates a new `LineWriter`.
///
pub fn new(inner: W) -> LineWriter<W> {
// Lines typically aren't that long, don't use a giant buffer
LineWriter::with_capacity(1024, inner)
}
/// Creates a new `LineWriter` with a specified capacity for the internal
/// buffer.
///
pub fn with_capacity(capacity: usize, inner: W) -> LineWriter<W> {
LineWriter { inner: BufWriter::with_capacity(capacity, inner), need_flush: false }
}
/// Gets a reference to the underlying writer.
///
pub fn get_ref(&self) -> &W {
self.inner.get_ref()
}
/// Gets a mutable reference to the underlying writer.
///
/// Caution must be taken when calling methods on the mutable reference
/// returned as extra writes could corrupt the output stream.
///
pub fn get_mut(&mut self) -> &mut W {
self.inner.get_mut()
}
/// Unwraps this `LineWriter`, returning the underlying writer.
///
/// The internal buffer is written out before returning the writer.
///
/// # Errors
///
/// An `Err` will be returned if an error occurs while flushing the buffer.
///
pub fn into_inner(self) -> Result<W, IntoInnerError<LineWriter<W>>> {
self.inner.into_inner().map_err(|IntoInnerError(buf, e)| {
IntoInnerError(LineWriter { inner: buf, need_flush: false }, e)
})
}
}
impl<W: Write> Write for LineWriter<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
if self.need_flush {
self.flush()?;
}
// Find the last newline character in the buffer provided. If found then
// we're going to write all the data up to that point and then flush,
// otherwise we just write the whole block to the underlying writer.
let i = match memchr::memrchr(b'\n', buf) {
Some(i) => i,
None => return self.inner.write(buf),
};
// Ok, we're going to write a partial amount of the data given first
// followed by flushing the newline. After we've successfully written
// some data then we *must* report that we wrote that data, so future
// errors are ignored. We set our internal `need_flush` flag, though, in
// case flushing fails and we need to try it first next time.
let n = self.inner.write(&buf[..=i])?;
self.need_flush = true;
if self.flush().is_err() || n != i + 1 {
return Ok(n);
}
// At this point we successfully wrote `i + 1` bytes and flushed it out,
// meaning that the entire line is now flushed out on the screen. While
// we can attempt to finish writing the rest of the data provided.
// Remember though that we ignore errors here as we've successfully
// written data, so we need to report that.
match self.inner.write(&buf[i + 1..]) {
Ok(i) => Ok(n + i),
Err(_) => Ok(n),
}
}
// Vectored writes are very similar to the writes above, but adjusted for
// the list of buffers that we have to write.
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
if self.need_flush {
self.flush()?;
}
// Find the last newline, and failing that write the whole buffer
let last_newline = bufs
.iter()
.enumerate()
.rev()
.filter_map(|(i, buf)| {
let pos = memchr::memrchr(b'\n', buf)?;
Some((i, pos))
})
.next();
let (i, j) = match last_newline {
Some(pair) => pair,
None => return self.inner.write_vectored(bufs),
};
let (prefix, suffix) = bufs.split_at(i);
let (buf, suffix) = suffix.split_at(1);
let buf = &buf[0];
// Write everything up to the last newline, flushing afterwards. Note
// that only if we finished our entire `write_vectored` do we try the
// subsequent
// `write`
let mut n = 0;
let prefix_amt = prefix.iter().map(|i| i.len()).sum();
if prefix_amt > 0 {
n += self.inner.write_vectored(prefix)?;
self.need_flush = true;
}
if n == prefix_amt {
match self.inner.write(&buf[..=j]) {
Ok(m) => n += m,
Err(e) if n == 0 => return Err(e),
Err(_) => return Ok(n),
}
self.need_flush = true;
}
if self.flush().is_err() || n != j + 1 + prefix_amt {
return Ok(n);
}
// ... and now write out everything remaining
match self.inner.write(&buf[j + 1..]) {
Ok(i) => n += i,
Err(_) => return Ok(n),
}
if suffix.iter().map(|s| s.len()).sum::<usize>() == 0 {
return Ok(n);
}
match self.inner.write_vectored(suffix) {
Ok(i) => Ok(n + i),
Err(_) => Ok(n),
}
}
fn flush(&mut self) -> io::Result<()> {
self.inner.flush()?;
self.need_flush = false;
Ok(())
}
}
impl<W: Write> fmt::Debug for LineWriter<W>
where
W: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("LineWriter")
.field("writer", &self.inner.inner)
.field(
"buffer",
&format_args!("{}/{}", self.inner.buf.len(), self.inner.buf.capacity()),
)
.finish()
}
}
| {
self.flush_buf()?;
} | conditional_block |
buffered.rs | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License..
//! Buffering wrappers for I/O traits
use crate::io::prelude::*;
use crate::error;
use crate::io::{
self, Error, ErrorKind, Initializer, IoSlice, IoSliceMut, SeekFrom, DEFAULT_BUF_SIZE,
};
use crate::memchr;
use core::cmp;
use core::fmt;
/// The `BufReader` struct adds buffering to any reader.
///
/// It can be excessively inefficient to work directly with a [`Read`] instance.
/// For example, every call to [`read`][`TcpStream::read`] on [`TcpStream`]
/// results in a system call. A `BufReader` performs large, infrequent reads on
/// the underlying [`Read`] and maintains an in-memory buffer of the results.
///
/// `BufReader` can improve the speed of programs that make *small* and
/// *repeated* read calls to the same file or network socket. It does not
/// help when reading very large amounts at once, or reading just one or a few
/// times. It also provides no advantage when reading from a source that is
/// already in memory, like a `Vec<u8>`.
///
/// When the `BufReader<R>` is dropped, the contents of its buffer will be
/// discarded. Creating multiple instances of a `BufReader<R>` on the same
/// stream can cause data loss. Reading from the underlying reader after
/// unwrapping the `BufReader<R>` with `BufReader::into_inner` can also cause
/// data loss.
///
/// [`Read`]: ../../std/io/trait.Read.html
/// [`TcpStream::read`]: ../../std/net/struct.TcpStream.html#method.read
/// [`TcpStream`]: ../../std/net/struct.TcpStream.html
///
pub struct BufReader<R> {
inner: R,
buf: Box<[u8]>,
pos: usize,
cap: usize,
}
impl<R: Read> BufReader<R> {
/// Creates a new `BufReader<R>` with a default buffer capacity. The default is currently 8 KB,
/// but may change in the future.
///
pub fn new(inner: R) -> BufReader<R> {
BufReader::with_capacity(DEFAULT_BUF_SIZE, inner)
}
/// Creates a new `BufReader<R>` with the specified buffer capacity.
///
pub fn with_capacity(capacity: usize, inner: R) -> BufReader<R> {
unsafe {
let mut buffer = Vec::with_capacity(capacity);
buffer.set_len(capacity);
inner.initializer().initialize(&mut buffer);
BufReader { inner, buf: buffer.into_boxed_slice(), pos: 0, cap: 0 }
}
}
}
impl<R> BufReader<R> {
/// Gets a reference to the underlying reader.
///
/// It is inadvisable to directly read from the underlying reader.
///
pub fn get_ref(&self) -> &R {
&self.inner
}
/// Gets a mutable reference to the underlying reader.
///
/// It is inadvisable to directly read from the underlying reader.
///
pub fn get_mut(&mut self) -> &mut R {
&mut self.inner
}
/// Returns a reference to the internally buffered data.
///
/// Unlike `fill_buf`, this will not attempt to fill the buffer if it is empty.
///
pub fn buffer(&self) -> &[u8] {
&self.buf[self.pos..self.cap]
}
/// Returns the number of bytes the internal buffer can hold at once.
///
pub fn capacity(&self) -> usize {
self.buf.len()
}
/// Unwraps this `BufReader<R>`, returning the underlying reader.
///
/// Note that any leftover data in the internal buffer is lost. Therefore,
/// a following read from the underlying reader may lead to data loss.
///
pub fn into_inner(self) -> R {
self.inner
}
/// Invalidates all data in the internal buffer.
#[inline]
fn discard_buffer(&mut self) {
self.pos = 0;
self.cap = 0;
}
}
impl<R: Seek> BufReader<R> {
/// Seeks relative to the current position. If the new position lies within the buffer,
/// the buffer will not be flushed, allowing for more efficient seeks.
/// This method does not return the location of the underlying reader, so the caller
/// must track this information themselves if it is required.
pub fn seek_relative(&mut self, offset: i64) -> io::Result<()> {
let pos = self.pos as u64;
if offset < 0 {
if let Some(new_pos) = pos.checked_sub((-offset) as u64) {
self.pos = new_pos as usize;
return Ok(());
}
} else {
if let Some(new_pos) = pos.checked_add(offset as u64) {
if new_pos <= self.cap as u64 {
self.pos = new_pos as usize;
return Ok(());
}
}
}
self.seek(SeekFrom::Current(offset)).map(drop)
}
}
impl<R: Read> Read for BufReader<R> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
// If we don't have any buffered data and we're doing a massive read
// (larger than our internal buffer), bypass our internal buffer
// entirely.
if self.pos == self.cap && buf.len() >= self.buf.len() {
self.discard_buffer();
return self.inner.read(buf);
}
let nread = {
let mut rem = self.fill_buf()?;
rem.read(buf)?
};
self.consume(nread);
Ok(nread)
}
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
let total_len = bufs.iter().map(|b| b.len()).sum::<usize>();
if self.pos == self.cap && total_len >= self.buf.len() {
self.discard_buffer();
return self.inner.read_vectored(bufs);
}
let nread = {
let mut rem = self.fill_buf()?;
rem.read_vectored(bufs)?
};
self.consume(nread);
Ok(nread)
}
// we can't skip unconditionally because of the large buffer case in read.
unsafe fn initializer(&self) -> Initializer {
self.inner.initializer()
}
}
impl<R: Read> BufRead for BufReader<R> {
fn fill_buf(&mut self) -> io::Result<&[u8]> {
// If we've reached the end of our internal buffer then we need to fetch
// some more data from the underlying reader.
// Branch using `>=` instead of the more correct `==`
// to tell the compiler that the pos..cap slice is always valid.
if self.pos >= self.cap {
debug_assert!(self.pos == self.cap);
self.cap = self.inner.read(&mut self.buf)?;
self.pos = 0;
}
Ok(&self.buf[self.pos..self.cap])
}
fn consume(&mut self, amt: usize) {
self.pos = cmp::min(self.pos + amt, self.cap);
}
}
impl<R> fmt::Debug for BufReader<R>
where
R: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("BufReader")
.field("reader", &self.inner)
.field("buffer", &format_args!("{}/{}", self.cap - self.pos, self.buf.len()))
.finish()
}
}
impl<R: Seek> Seek for BufReader<R> {
/// Seek to an offset, in bytes, in the underlying reader.
///
/// The position used for seeking with `SeekFrom::Current(_)` is the
/// position the underlying reader would be at if the `BufReader<R>` had no
/// internal buffer.
///
/// Seeking always discards the internal buffer, even if the seek position
/// would otherwise fall within it. This guarantees that calling
/// `.into_inner()` immediately after a seek yields the underlying reader
/// at the same position.
///
/// To seek without discarding the internal buffer, use [`BufReader::seek_relative`].
///
/// See [`std::io::Seek`] for more details.
///
/// Note: In the edge case where you're seeking with `SeekFrom::Current(n)`
/// where `n` minus the internal buffer length overflows an `i64`, two
/// seeks will be performed instead of one. If the second seek returns
/// `Err`, the underlying reader will be left at the same position it would
/// have if you called `seek` with `SeekFrom::Current(0)`.
///
/// [`BufReader::seek_relative`]: struct.BufReader.html#method.seek_relative
/// [`std::io::Seek`]: trait.Seek.html
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
let result: u64;
if let SeekFrom::Current(n) = pos {
let remainder = (self.cap - self.pos) as i64;
// it should be safe to assume that remainder fits within an i64 as the alternative
// means we managed to allocate 8 exbibytes and that's absurd.
// But it's not out of the realm of possibility for some weird underlying reader to
// support seeking by i64::min_value() so we need to handle underflow when subtracting
// remainder.
if let Some(offset) = n.checked_sub(remainder) {
result = self.inner.seek(SeekFrom::Current(offset))?;
} else {
// seek backwards by our remainder, and then by the offset
self.inner.seek(SeekFrom::Current(-remainder))?;
self.discard_buffer();
result = self.inner.seek(SeekFrom::Current(n))?;
}
} else {
// Seeking with Start/End doesn't care about our buffer length.
result = self.inner.seek(pos)?;
}
self.discard_buffer();
Ok(result)
}
}
/// Wraps a writer and buffers its output.
///
/// It can be excessively inefficient to work directly with something that
/// implements [`Write`]. For example, every call to
/// [`write`][`TcpStream::write`] on [`TcpStream`] results in a system call. A
/// `BufWriter<W>` keeps an in-memory buffer of data and writes it to an underlying
/// writer in large, infrequent batches.
///
/// `BufWriter<W>` can improve the speed of programs that make *small* and
/// *repeated* write calls to the same file or network socket. It does not
/// help when writing very large amounts at once, or writing just one or a few
/// times. It also provides no advantage when writing to a destination that is
/// in memory, like a `Vec<u8>`.
///
/// It is critical to call [`flush`] before `BufWriter<W>` is dropped. Though
/// dropping will attempt to flush the the contents of the buffer, any errors
/// that happen in the process of dropping will be ignored. Calling [`flush`]
/// ensures that the buffer is empty and thus dropping will not even attempt
/// file operations.
///
/// By wrapping the stream with a `BufWriter<W>`, these ten writes are all grouped
/// together by the buffer and will all be written out in one system call when
/// the `stream` is flushed.
///
/// [`Write`]: ../../std/io/trait.Write.html
/// [`TcpStream::write`]: ../../std/net/struct.TcpStream.html#method.write
/// [`TcpStream`]: ../../std/net/struct.TcpStream.html
/// [`flush`]: #method.flush
pub struct BufWriter<W: Write> {
inner: Option<W>,
buf: Vec<u8>,
// #30888: If the inner writer panics in a call to write, we don't want to
// write the buffered data a second time in BufWriter's destructor. This
// flag tells the Drop impl if it should skip the flush.
panicked: bool,
}
/// An error returned by `into_inner` which combines an error that
/// happened while writing out the buffer, and the buffered writer object
/// which may be used to recover from the condition.
///
#[derive(Debug)]
pub struct IntoInnerError<W>(W, Error);
impl<W: Write> BufWriter<W> {
/// Creates a new `BufWriter<W>` with a default buffer capacity. The default is currently 8 KB,
/// but may change in the future.
///
pub fn new(inner: W) -> BufWriter<W> {
BufWriter::with_capacity(DEFAULT_BUF_SIZE, inner)
}
/// Creates a new `BufWriter<W>` with the specified buffer capacity.
///
pub fn with_capacity(capacity: usize, inner: W) -> BufWriter<W> {
BufWriter { inner: Some(inner), buf: Vec::with_capacity(capacity), panicked: false }
}
fn flush_buf(&mut self) -> io::Result<()> {
let mut written = 0;
let len = self.buf.len();
let mut ret = Ok(());
while written < len {
self.panicked = true;
let r = self.inner.as_mut().unwrap().write(&self.buf[written..]);
self.panicked = false;
match r {
Ok(0) => {
ret =
Err(Error::new(ErrorKind::WriteZero, "failed to write the buffered data"));
break;
}
Ok(n) => written += n,
Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
Err(e) => {
ret = Err(e);
break;
}
}
}
if written > 0 {
self.buf.drain(..written);
}
ret
}
/// Gets a reference to the underlying writer.
///
pub fn get_ref(&self) -> &W {
self.inner.as_ref().unwrap()
}
/// Gets a mutable reference to the underlying writer.
///
/// It is inadvisable to directly write to the underlying writer.
///
pub fn get_mut(&mut self) -> &mut W {
self.inner.as_mut().unwrap()
}
/// Returns a reference to the internally buffered data.
///
pub fn buffer(&self) -> &[u8] {
&self.buf
}
/// Returns the number of bytes the internal buffer can hold without flushing.
///
pub fn capacity(&self) -> usize {
self.buf.capacity()
}
/// Unwraps this `BufWriter<W>`, returning the underlying writer.
///
/// The buffer is written out before returning the writer.
///
/// # Errors
///
/// An `Err` will be returned if an error occurs while flushing the buffer.
///
pub fn into_inner(mut self) -> Result<W, IntoInnerError<BufWriter<W>>> {
match self.flush_buf() {
Err(e) => Err(IntoInnerError(self, e)),
Ok(()) => Ok(self.inner.take().unwrap()),
}
}
}
impl<W: Write> Write for BufWriter<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
if self.buf.len() + buf.len() > self.buf.capacity() {
self.flush_buf()?;
}
if buf.len() >= self.buf.capacity() {
self.panicked = true;
let r = self.get_mut().write(buf);
self.panicked = false;
r
} else {
self.buf.write(buf)
}
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
let total_len = bufs.iter().map(|b| b.len()).sum::<usize>();
if self.buf.len() + total_len > self.buf.capacity() {
self.flush_buf()?;
}
if total_len >= self.buf.capacity() {
self.panicked = true;
let r = self.get_mut().write_vectored(bufs);
self.panicked = false;
r
} else {
self.buf.write_vectored(bufs)
}
}
fn flush(&mut self) -> io::Result<()> {
self.flush_buf().and_then(|()| self.get_mut().flush())
}
}
impl<W: Write> fmt::Debug for BufWriter<W>
where
W: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("BufWriter")
.field("writer", &self.inner.as_ref().unwrap())
.field("buffer", &format_args!("{}/{}", self.buf.len(), self.buf.capacity()))
.finish()
}
}
impl<W: Write + Seek> Seek for BufWriter<W> {
/// Seek to the offset, in bytes, in the underlying writer.
///
/// Seeking always writes out the internal buffer before seeking.
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
self.flush_buf().and_then(|_| self.get_mut().seek(pos))
}
}
impl<W: Write> Drop for BufWriter<W> {
fn drop(&mut self) {
if self.inner.is_some() && !self.panicked {
// dtors should not panic, so we ignore a failed flush
let _r = self.flush_buf();
}
}
}
impl<W> IntoInnerError<W> {
/// Returns the error which caused the call to `into_inner()` to fail.
///
/// This error was returned when attempting to write the internal buffer.
///
pub fn error(&self) -> &Error {
&self.1
}
/// Returns the buffered writer instance which generated the error.
///
/// The returned object can be used for error recovery, such as
/// re-inspecting the buffer.
///
pub fn into_inner(self) -> W {
self.0
}
}
impl<W> From<IntoInnerError<W>> for Error {
fn from(iie: IntoInnerError<W>) -> Error {
iie.1
}
}
impl<W: Send + fmt::Debug> error::Error for IntoInnerError<W> {
fn description(&self) -> &str {
error::Error::description(self.error())
}
}
impl<W> fmt::Display for IntoInnerError<W> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.error().fmt(f)
}
}
/// Wraps a writer and buffers output to it, flushing whenever a newline
/// (`0x0a`, `'\n'`) is detected.
///
/// The [`BufWriter`][bufwriter] struct wraps a writer and buffers its output.
/// But it only does this batched write when it goes out of scope, or when the
/// internal buffer is full. Sometimes, you'd prefer to write each line as it's
/// completed, rather than the entire buffer at once. Enter `LineWriter`. It
/// does exactly that.
///
/// Like [`BufWriter`][bufwriter], a `LineWriter`’s buffer will also be flushed when the
/// `LineWriter` goes out of scope or when its internal buffer is full.
///
/// [bufwriter]: struct.BufWriter.html
///
/// If there's still a partial line in the buffer when the `LineWriter` is
/// dropped, it will flush those contents.
///
pub struct LineWriter<W: Write> {
inner: BufWriter<W>,
need_flush: bool,
}
impl<W: Write> LineWriter<W> {
/// Creates a new `LineWriter`.
///
pub fn new(inner: W) -> LineWriter<W> {
// Lines typically aren't that long, don't use a giant buffer
LineWriter::with_capacity(1024, inner)
}
/// Creates a new `LineWriter` with a specified capacity for the internal
/// buffer.
///
pub fn with_capacity(capacity: usize, inner: W) -> LineWriter<W> {
LineWriter { inner: BufWriter::with_capacity(capacity, inner), need_flush: false }
}
/// Gets a reference to the underlying writer.
///
pub fn get_ref(&self) -> &W {
self.inner.get_ref()
}
/// Gets a mutable reference to the underlying writer.
///
/// Caution must be taken when calling methods on the mutable reference
/// returned as extra writes could corrupt the output stream.
///
pub fn get_mut(&mut self) -> &mut W {
self.inner.get_mut()
}
/// Unwraps this `LineWriter`, returning the underlying writer.
///
/// The internal buffer is written out before returning the writer.
///
/// # Errors
///
/// An `Err` will be returned if an error occurs while flushing the buffer.
///
pub fn into_inner(self) -> Result<W, IntoInnerError<LineWriter<W>>> {
|
impl<W: Write> Write for LineWriter<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
if self.need_flush {
self.flush()?;
}
// Find the last newline character in the buffer provided. If found then
// we're going to write all the data up to that point and then flush,
// otherwise we just write the whole block to the underlying writer.
let i = match memchr::memrchr(b'\n', buf) {
Some(i) => i,
None => return self.inner.write(buf),
};
// Ok, we're going to write a partial amount of the data given first
// followed by flushing the newline. After we've successfully written
// some data then we *must* report that we wrote that data, so future
// errors are ignored. We set our internal `need_flush` flag, though, in
// case flushing fails and we need to try it first next time.
let n = self.inner.write(&buf[..=i])?;
self.need_flush = true;
if self.flush().is_err() || n != i + 1 {
return Ok(n);
}
// At this point we successfully wrote `i + 1` bytes and flushed it out,
// meaning that the entire line is now flushed out on the screen. While
// we can attempt to finish writing the rest of the data provided.
// Remember though that we ignore errors here as we've successfully
// written data, so we need to report that.
match self.inner.write(&buf[i + 1..]) {
Ok(i) => Ok(n + i),
Err(_) => Ok(n),
}
}
// Vectored writes are very similar to the writes above, but adjusted for
// the list of buffers that we have to write.
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
if self.need_flush {
self.flush()?;
}
// Find the last newline, and failing that write the whole buffer
let last_newline = bufs
.iter()
.enumerate()
.rev()
.filter_map(|(i, buf)| {
let pos = memchr::memrchr(b'\n', buf)?;
Some((i, pos))
})
.next();
let (i, j) = match last_newline {
Some(pair) => pair,
None => return self.inner.write_vectored(bufs),
};
let (prefix, suffix) = bufs.split_at(i);
let (buf, suffix) = suffix.split_at(1);
let buf = &buf[0];
// Write everything up to the last newline, flushing afterwards. Note
// that only if we finished our entire `write_vectored` do we try the
// subsequent
// `write`
let mut n = 0;
let prefix_amt = prefix.iter().map(|i| i.len()).sum();
if prefix_amt > 0 {
n += self.inner.write_vectored(prefix)?;
self.need_flush = true;
}
if n == prefix_amt {
match self.inner.write(&buf[..=j]) {
Ok(m) => n += m,
Err(e) if n == 0 => return Err(e),
Err(_) => return Ok(n),
}
self.need_flush = true;
}
if self.flush().is_err() || n != j + 1 + prefix_amt {
return Ok(n);
}
// ... and now write out everything remaining
match self.inner.write(&buf[j + 1..]) {
Ok(i) => n += i,
Err(_) => return Ok(n),
}
if suffix.iter().map(|s| s.len()).sum::<usize>() == 0 {
return Ok(n);
}
match self.inner.write_vectored(suffix) {
Ok(i) => Ok(n + i),
Err(_) => Ok(n),
}
}
fn flush(&mut self) -> io::Result<()> {
self.inner.flush()?;
self.need_flush = false;
Ok(())
}
}
impl<W: Write> fmt::Debug for LineWriter<W>
where
W: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("LineWriter")
.field("writer", &self.inner.inner)
.field(
"buffer",
&format_args!("{}/{}", self.inner.buf.len(), self.inner.buf.capacity()),
)
.finish()
}
}
| self.inner.into_inner().map_err(|IntoInnerError(buf, e)| {
IntoInnerError(LineWriter { inner: buf, need_flush: false }, e)
})
}
} | identifier_body |
buffered.rs | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License..
//! Buffering wrappers for I/O traits
use crate::io::prelude::*;
use crate::error;
use crate::io::{
self, Error, ErrorKind, Initializer, IoSlice, IoSliceMut, SeekFrom, DEFAULT_BUF_SIZE,
};
use crate::memchr;
use core::cmp;
use core::fmt;
/// The `BufReader` struct adds buffering to any reader.
///
/// It can be excessively inefficient to work directly with a [`Read`] instance.
/// For example, every call to [`read`][`TcpStream::read`] on [`TcpStream`]
/// results in a system call. A `BufReader` performs large, infrequent reads on
/// the underlying [`Read`] and maintains an in-memory buffer of the results.
///
/// `BufReader` can improve the speed of programs that make *small* and
/// *repeated* read calls to the same file or network socket. It does not
/// help when reading very large amounts at once, or reading just one or a few
/// times. It also provides no advantage when reading from a source that is
/// already in memory, like a `Vec<u8>`.
///
/// When the `BufReader<R>` is dropped, the contents of its buffer will be
/// discarded. Creating multiple instances of a `BufReader<R>` on the same
/// stream can cause data loss. Reading from the underlying reader after
/// unwrapping the `BufReader<R>` with `BufReader::into_inner` can also cause
/// data loss.
///
/// [`Read`]: ../../std/io/trait.Read.html
/// [`TcpStream::read`]: ../../std/net/struct.TcpStream.html#method.read
/// [`TcpStream`]: ../../std/net/struct.TcpStream.html
///
pub struct BufReader<R> {
inner: R,
buf: Box<[u8]>,
pos: usize,
cap: usize,
}
impl<R: Read> BufReader<R> {
/// Creates a new `BufReader<R>` with a default buffer capacity. The default is currently 8 KB,
/// but may change in the future.
///
pub fn new(inner: R) -> BufReader<R> {
BufReader::with_capacity(DEFAULT_BUF_SIZE, inner)
}
/// Creates a new `BufReader<R>` with the specified buffer capacity.
///
pub fn with_capacity(capacity: usize, inner: R) -> BufReader<R> {
unsafe {
let mut buffer = Vec::with_capacity(capacity);
buffer.set_len(capacity);
inner.initializer().initialize(&mut buffer);
BufReader { inner, buf: buffer.into_boxed_slice(), pos: 0, cap: 0 }
}
}
}
impl<R> BufReader<R> {
/// Gets a reference to the underlying reader.
///
/// It is inadvisable to directly read from the underlying reader.
///
pub fn get_ref(&self) -> &R {
&self.inner
}
/// Gets a mutable reference to the underlying reader.
///
/// It is inadvisable to directly read from the underlying reader.
///
pub fn get_mut(&mut self) -> &mut R {
&mut self.inner
}
/// Returns a reference to the internally buffered data.
///
/// Unlike `fill_buf`, this will not attempt to fill the buffer if it is empty.
///
pub fn buffer(&self) -> &[u8] {
&self.buf[self.pos..self.cap]
}
/// Returns the number of bytes the internal buffer can hold at once.
///
pub fn capacity(&self) -> usize {
self.buf.len()
}
/// Unwraps this `BufReader<R>`, returning the underlying reader.
///
/// Note that any leftover data in the internal buffer is lost. Therefore,
/// a following read from the underlying reader may lead to data loss.
///
pub fn into_inner(self) -> R {
self.inner
}
/// Invalidates all data in the internal buffer.
#[inline]
fn discard_buffer(&mut self) {
self.pos = 0;
self.cap = 0;
}
}
impl<R: Seek> BufReader<R> {
/// Seeks relative to the current position. If the new position lies within the buffer,
/// the buffer will not be flushed, allowing for more efficient seeks.
/// This method does not return the location of the underlying reader, so the caller
/// must track this information themselves if it is required.
pub fn seek_relative(&mut self, offset: i64) -> io::Result<()> {
let pos = self.pos as u64;
if offset < 0 {
if let Some(new_pos) = pos.checked_sub((-offset) as u64) {
self.pos = new_pos as usize;
return Ok(());
}
} else {
if let Some(new_pos) = pos.checked_add(offset as u64) {
if new_pos <= self.cap as u64 {
self.pos = new_pos as usize;
return Ok(());
}
}
}
self.seek(SeekFrom::Current(offset)).map(drop)
}
}
impl<R: Read> Read for BufReader<R> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
// If we don't have any buffered data and we're doing a massive read
// (larger than our internal buffer), bypass our internal buffer
// entirely.
if self.pos == self.cap && buf.len() >= self.buf.len() {
self.discard_buffer();
return self.inner.read(buf);
}
let nread = {
let mut rem = self.fill_buf()?;
rem.read(buf)?
};
self.consume(nread);
Ok(nread)
}
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
let total_len = bufs.iter().map(|b| b.len()).sum::<usize>();
if self.pos == self.cap && total_len >= self.buf.len() {
self.discard_buffer();
return self.inner.read_vectored(bufs);
}
let nread = {
let mut rem = self.fill_buf()?;
rem.read_vectored(bufs)?
};
self.consume(nread);
Ok(nread)
}
// we can't skip unconditionally because of the large buffer case in read.
unsafe fn initializer(&self) -> Initializer {
self.inner.initializer()
}
}
impl<R: Read> BufRead for BufReader<R> {
fn fill_buf(&mut self) -> io::Result<&[u8]> {
// If we've reached the end of our internal buffer then we need to fetch
// some more data from the underlying reader.
// Branch using `>=` instead of the more correct `==`
// to tell the compiler that the pos..cap slice is always valid.
if self.pos >= self.cap {
debug_assert!(self.pos == self.cap);
self.cap = self.inner.read(&mut self.buf)?;
self.pos = 0;
}
Ok(&self.buf[self.pos..self.cap])
}
fn consume(&mut self, amt: usize) {
self.pos = cmp::min(self.pos + amt, self.cap);
}
}
impl<R> fmt::Debug for BufReader<R>
where
R: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("BufReader")
.field("reader", &self.inner)
.field("buffer", &format_args!("{}/{}", self.cap - self.pos, self.buf.len()))
.finish()
}
}
impl<R: Seek> Seek for BufReader<R> {
/// Seek to an offset, in bytes, in the underlying reader.
///
/// The position used for seeking with `SeekFrom::Current(_)` is the
/// position the underlying reader would be at if the `BufReader<R>` had no
/// internal buffer.
///
/// Seeking always discards the internal buffer, even if the seek position
/// would otherwise fall within it. This guarantees that calling
/// `.into_inner()` immediately after a seek yields the underlying reader
/// at the same position.
///
/// To seek without discarding the internal buffer, use [`BufReader::seek_relative`].
///
/// See [`std::io::Seek`] for more details.
///
/// Note: In the edge case where you're seeking with `SeekFrom::Current(n)`
/// where `n` minus the internal buffer length overflows an `i64`, two
/// seeks will be performed instead of one. If the second seek returns
/// `Err`, the underlying reader will be left at the same position it would
/// have if you called `seek` with `SeekFrom::Current(0)`.
///
/// [`BufReader::seek_relative`]: struct.BufReader.html#method.seek_relative
/// [`std::io::Seek`]: trait.Seek.html
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
let result: u64;
if let SeekFrom::Current(n) = pos {
let remainder = (self.cap - self.pos) as i64;
// it should be safe to assume that remainder fits within an i64 as the alternative
// means we managed to allocate 8 exbibytes and that's absurd.
// But it's not out of the realm of possibility for some weird underlying reader to
// support seeking by i64::min_value() so we need to handle underflow when subtracting
// remainder.
if let Some(offset) = n.checked_sub(remainder) {
result = self.inner.seek(SeekFrom::Current(offset))?;
} else {
// seek backwards by our remainder, and then by the offset
self.inner.seek(SeekFrom::Current(-remainder))?;
self.discard_buffer();
result = self.inner.seek(SeekFrom::Current(n))?;
}
} else {
// Seeking with Start/End doesn't care about our buffer length.
result = self.inner.seek(pos)?;
}
self.discard_buffer();
Ok(result)
}
}
/// Wraps a writer and buffers its output.
///
/// It can be excessively inefficient to work directly with something that
/// implements [`Write`]. For example, every call to
/// [`write`][`TcpStream::write`] on [`TcpStream`] results in a system call. A
/// `BufWriter<W>` keeps an in-memory buffer of data and writes it to an underlying
/// writer in large, infrequent batches.
///
/// `BufWriter<W>` can improve the speed of programs that make *small* and
/// *repeated* write calls to the same file or network socket. It does not
/// help when writing very large amounts at once, or writing just one or a few
/// times. It also provides no advantage when writing to a destination that is
/// in memory, like a `Vec<u8>`.
///
/// It is critical to call [`flush`] before `BufWriter<W>` is dropped. Though
/// dropping will attempt to flush the the contents of the buffer, any errors
/// that happen in the process of dropping will be ignored. Calling [`flush`]
/// ensures that the buffer is empty and thus dropping will not even attempt
/// file operations.
///
/// By wrapping the stream with a `BufWriter<W>`, these ten writes are all grouped
/// together by the buffer and will all be written out in one system call when
/// the `stream` is flushed.
///
/// [`Write`]: ../../std/io/trait.Write.html
/// [`TcpStream::write`]: ../../std/net/struct.TcpStream.html#method.write
/// [`TcpStream`]: ../../std/net/struct.TcpStream.html
/// [`flush`]: #method.flush
pub struct BufWriter<W: Write> {
inner: Option<W>,
buf: Vec<u8>,
// #30888: If the inner writer panics in a call to write, we don't want to
// write the buffered data a second time in BufWriter's destructor. This
// flag tells the Drop impl if it should skip the flush.
panicked: bool,
}
/// An error returned by `into_inner` which combines an error that
/// happened while writing out the buffer, and the buffered writer object
/// which may be used to recover from the condition.
///
#[derive(Debug)]
pub struct IntoInnerError<W>(W, Error);
impl<W: Write> BufWriter<W> {
/// Creates a new `BufWriter<W>` with a default buffer capacity. The default is currently 8 KB,
/// but may change in the future.
///
pub fn new(inner: W) -> BufWriter<W> {
BufWriter::with_capacity(DEFAULT_BUF_SIZE, inner)
}
/// Creates a new `BufWriter<W>` with the specified buffer capacity.
///
pub fn with_capacity(capacity: usize, inner: W) -> BufWriter<W> {
BufWriter { inner: Some(inner), buf: Vec::with_capacity(capacity), panicked: false }
}
fn flush_buf(&mut self) -> io::Result<()> {
let mut written = 0;
let len = self.buf.len();
let mut ret = Ok(());
while written < len {
self.panicked = true;
let r = self.inner.as_mut().unwrap().write(&self.buf[written..]);
self.panicked = false;
match r {
Ok(0) => {
ret =
Err(Error::new(ErrorKind::WriteZero, "failed to write the buffered data"));
break;
}
Ok(n) => written += n,
Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
Err(e) => {
ret = Err(e);
break;
}
}
}
if written > 0 {
self.buf.drain(..written);
}
ret
}
/// Gets a reference to the underlying writer.
///
pub fn get_ref(&self) -> &W {
self.inner.as_ref().unwrap()
}
/// Gets a mutable reference to the underlying writer.
///
/// It is inadvisable to directly write to the underlying writer.
///
pub fn get_mut(&mut self) -> &mut W {
self.inner.as_mut().unwrap()
}
/// Returns a reference to the internally buffered data.
///
pub fn buffer(&self) -> &[u8] {
&self.buf
}
/// Returns the number of bytes the internal buffer can hold without flushing.
///
pub fn capacity(&self) -> usize {
self.buf.capacity()
}
/// Unwraps this `BufWriter<W>`, returning the underlying writer.
///
/// The buffer is written out before returning the writer.
///
/// # Errors
///
/// An `Err` will be returned if an error occurs while flushing the buffer.
///
pub fn into_inner(mut self) -> Result<W, IntoInnerError<BufWriter<W>>> {
match self.flush_buf() {
Err(e) => Err(IntoInnerError(self, e)),
Ok(()) => Ok(self.inner.take().unwrap()),
}
}
}
impl<W: Write> Write for BufWriter<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
if self.buf.len() + buf.len() > self.buf.capacity() {
self.flush_buf()?;
}
if buf.len() >= self.buf.capacity() {
self.panicked = true;
let r = self.get_mut().write(buf);
self.panicked = false;
r
} else {
self.buf.write(buf)
}
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
let total_len = bufs.iter().map(|b| b.len()).sum::<usize>();
if self.buf.len() + total_len > self.buf.capacity() {
self.flush_buf()?;
}
if total_len >= self.buf.capacity() {
self.panicked = true;
let r = self.get_mut().write_vectored(bufs);
self.panicked = false;
r
} else {
self.buf.write_vectored(bufs)
}
}
fn flush(&mut self) -> io::Result<()> {
self.flush_buf().and_then(|()| self.get_mut().flush())
}
}
impl<W: Write> fmt::Debug for BufWriter<W>
where
W: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("BufWriter")
.field("writer", &self.inner.as_ref().unwrap())
.field("buffer", &format_args!("{}/{}", self.buf.len(), self.buf.capacity()))
.finish()
}
}
impl<W: Write + Seek> Seek for BufWriter<W> {
/// Seek to the offset, in bytes, in the underlying writer.
///
/// Seeking always writes out the internal buffer before seeking.
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
self.flush_buf().and_then(|_| self.get_mut().seek(pos))
}
}
impl<W: Write> Drop for BufWriter<W> {
fn drop(&mut self) {
if self.inner.is_some() && !self.panicked {
// dtors should not panic, so we ignore a failed flush
let _r = self.flush_buf();
}
}
}
impl<W> IntoInnerError<W> {
/// Returns the error which caused the call to `into_inner()` to fail.
///
/// This error was returned when attempting to write the internal buffer.
///
pub fn error(&self) -> &Error {
&self.1
}
/// Returns the buffered writer instance which generated the error.
///
/// The returned object can be used for error recovery, such as
/// re-inspecting the buffer.
///
pub fn into_inner(self) -> W {
self.0
}
}
impl<W> From<IntoInnerError<W>> for Error {
fn from(iie: IntoInnerError<W>) -> Error {
iie.1
}
}
impl<W: Send + fmt::Debug> error::Error for IntoInnerError<W> {
fn description(&self) -> &str {
error::Error::description(self.error())
}
}
impl<W> fmt::Display for IntoInnerError<W> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.error().fmt(f)
}
}
/// Wraps a writer and buffers output to it, flushing whenever a newline
/// (`0x0a`, `'\n'`) is detected.
///
/// The [`BufWriter`][bufwriter] struct wraps a writer and buffers its output.
/// But it only does this batched write when it goes out of scope, or when the
/// internal buffer is full. Sometimes, you'd prefer to write each line as it's
/// completed, rather than the entire buffer at once. Enter `LineWriter`. It
/// does exactly that.
///
/// Like [`BufWriter`][bufwriter], a `LineWriter`’s buffer will also be flushed when the
/// `LineWriter` goes out of scope or when its internal buffer is full.
///
/// [bufwriter]: struct.BufWriter.html
///
/// If there's still a partial line in the buffer when the `LineWriter` is
/// dropped, it will flush those contents.
/// | }
impl<W: Write> LineWriter<W> {
/// Creates a new `LineWriter`.
///
pub fn new(inner: W) -> LineWriter<W> {
// Lines typically aren't that long, don't use a giant buffer
LineWriter::with_capacity(1024, inner)
}
/// Creates a new `LineWriter` with a specified capacity for the internal
/// buffer.
///
pub fn with_capacity(capacity: usize, inner: W) -> LineWriter<W> {
LineWriter { inner: BufWriter::with_capacity(capacity, inner), need_flush: false }
}
/// Gets a reference to the underlying writer.
///
pub fn get_ref(&self) -> &W {
self.inner.get_ref()
}
/// Gets a mutable reference to the underlying writer.
///
/// Caution must be taken when calling methods on the mutable reference
/// returned as extra writes could corrupt the output stream.
///
pub fn get_mut(&mut self) -> &mut W {
self.inner.get_mut()
}
/// Unwraps this `LineWriter`, returning the underlying writer.
///
/// The internal buffer is written out before returning the writer.
///
/// # Errors
///
/// An `Err` will be returned if an error occurs while flushing the buffer.
///
pub fn into_inner(self) -> Result<W, IntoInnerError<LineWriter<W>>> {
self.inner.into_inner().map_err(|IntoInnerError(buf, e)| {
IntoInnerError(LineWriter { inner: buf, need_flush: false }, e)
})
}
}
impl<W: Write> Write for LineWriter<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
if self.need_flush {
self.flush()?;
}
// Find the last newline character in the buffer provided. If found then
// we're going to write all the data up to that point and then flush,
// otherwise we just write the whole block to the underlying writer.
let i = match memchr::memrchr(b'\n', buf) {
Some(i) => i,
None => return self.inner.write(buf),
};
// Ok, we're going to write a partial amount of the data given first
// followed by flushing the newline. After we've successfully written
// some data then we *must* report that we wrote that data, so future
// errors are ignored. We set our internal `need_flush` flag, though, in
// case flushing fails and we need to try it first next time.
let n = self.inner.write(&buf[..=i])?;
self.need_flush = true;
if self.flush().is_err() || n != i + 1 {
return Ok(n);
}
// At this point we successfully wrote `i + 1` bytes and flushed it out,
// meaning that the entire line is now flushed out on the screen. While
// we can attempt to finish writing the rest of the data provided.
// Remember though that we ignore errors here as we've successfully
// written data, so we need to report that.
match self.inner.write(&buf[i + 1..]) {
Ok(i) => Ok(n + i),
Err(_) => Ok(n),
}
}
// Vectored writes are very similar to the writes above, but adjusted for
// the list of buffers that we have to write.
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
if self.need_flush {
self.flush()?;
}
// Find the last newline, and failing that write the whole buffer
let last_newline = bufs
.iter()
.enumerate()
.rev()
.filter_map(|(i, buf)| {
let pos = memchr::memrchr(b'\n', buf)?;
Some((i, pos))
})
.next();
let (i, j) = match last_newline {
Some(pair) => pair,
None => return self.inner.write_vectored(bufs),
};
let (prefix, suffix) = bufs.split_at(i);
let (buf, suffix) = suffix.split_at(1);
let buf = &buf[0];
// Write everything up to the last newline, flushing afterwards. Note
// that only if we finished our entire `write_vectored` do we try the
// subsequent
// `write`
let mut n = 0;
let prefix_amt = prefix.iter().map(|i| i.len()).sum();
if prefix_amt > 0 {
n += self.inner.write_vectored(prefix)?;
self.need_flush = true;
}
if n == prefix_amt {
match self.inner.write(&buf[..=j]) {
Ok(m) => n += m,
Err(e) if n == 0 => return Err(e),
Err(_) => return Ok(n),
}
self.need_flush = true;
}
if self.flush().is_err() || n != j + 1 + prefix_amt {
return Ok(n);
}
// ... and now write out everything remaining
match self.inner.write(&buf[j + 1..]) {
Ok(i) => n += i,
Err(_) => return Ok(n),
}
if suffix.iter().map(|s| s.len()).sum::<usize>() == 0 {
return Ok(n);
}
match self.inner.write_vectored(suffix) {
Ok(i) => Ok(n + i),
Err(_) => Ok(n),
}
}
fn flush(&mut self) -> io::Result<()> {
self.inner.flush()?;
self.need_flush = false;
Ok(())
}
}
impl<W: Write> fmt::Debug for LineWriter<W>
where
W: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("LineWriter")
.field("writer", &self.inner.inner)
.field(
"buffer",
&format_args!("{}/{}", self.inner.buf.len(), self.inner.buf.capacity()),
)
.finish()
}
} | pub struct LineWriter<W: Write> {
inner: BufWriter<W>,
need_flush: bool, | random_line_split |
util.ts | import { Player } from "@/model/Api";
import { parseISO } from "date-fns";
import { MapMetaMap, ModeMetaMap } from "~/model/MetaEntry";
export const camelToSnakeCase = (str: string) => str.replace(/[A-Z]/g, letter => `_${letter.toLowerCase()}`);
export const camelToKebab = (s: string) =>
s.replace(/([a-z0-9]|(?=[A-Z]))([A-Z])/g, '$1-$2').toLowerCase();
export const kebabToCamel = (s: string) =>
s.replace(/([-_][a-z])/ig, ($1) => $1.toUpperCase()
.replace('-', '')
.replace('_', ''))
export const capitalize = (str: string) => str.replace(/(?:^|\s)\S/g, (a) => a.toUpperCase());
export const decapitalizeFirstLetter = (str: string) => str.charAt(0).toLowerCase() + str.slice(1)
export const capitalizeWords = (str: string) => str.replace(/(?:^|\s|["'([{])+\S/g, match => match.toUpperCase())
export const slugify = (str: string) => str.replace(/-/g, '--').replace(/ /g, '-')
export const deslugify = (str: string) => str.replace(/-/g, ' ').replace(/ /g, '-')
export function scaleMinMax(values: number[]) {
const min = Math.min.apply(Math, values)
const max = Math.max.apply(Math, values)
if (min === max) {
return values.map(value => 0.5)
}
return values.map(value => (value - min) / (max - min))
}
export function zip<T>(arr1: T[], arr2: T[]) {
return arr1.map((value, index) => [value, arr2[index]])
}
export function hoursSinceDate(date: string) {
const then = Date.parse(date)
const now = (new Date()).getTime()
return Math.floor((now - then) / 1000 / 3600)
}
export const brawlerId = (entry: { name: string }) =>
entry.name.replace(/\.| /g, '_').toLowerCase()
export function formatMode(mode: string) {
return camelToSnakeCase(mode)
.split('_')
.map(w => capitalize(w))
.join(' ')
}
export function unformatMode(mode: string) {
const uncapitalize = (str: string) => str.replace(/(?:^|\s)\S/g, (a) => a.toLowerCase())
return uncapitalize(mode.replace(/^Showdown$/, 'Solo Showdown').split(' ').join(''))
}
export const formatList = (l: string[], joiner = 'or') => l.slice(0, l.length - 1).join(', ') + ' ' + joiner + ' ' + l[l.length - 1]
export const clamp = (min: number, max: number, n: number) => Math.min(max, Math.max(min, n))
export const minMaxScale = (fromMin: number, fromMax: number, n: number) => (n - fromMin) / (fromMax - fromMin)
export const scaleInto = (fromMin: number, fromMax: number, toMax: number, n: number) => clamp(0, toMax, Math.floor(minMaxScale(fromMin, fromMax, n) * toMax))
export function xpToHours(xp: number) {
return xp / 220; // 145h for 30300 XP as measured by @schneefux
}
/**
* Suffix num with SI unit
* @param num number
* @param digits digits after comma
*/
export function formatSI(num: number, digits: number) {
const si = [
{ value: 1, symbol: '' },
{ value: 1E3, symbol: 'k' },
{ value: 1E6, symbol: 'M' },
]
const rx = /\.0+$|(\.[0-9]*[1-9])0+$/
let i
for (i = si.length - 1; i > 0; i--) {
if (num >= si[i].value) {
break
}
}
return Math.round(num / si[i].value)
.toFixed(digits)
.replace(rx, '$1') + si[i].symbol
}
const propPriority = ['winRateAdj', 'winRate', 'wins', 'rank1', 'duration', 'useRate', 'pickRate']
/**
* Get brawlers by event: {
* [eventId]: [
* brawler id,
* brawler name,
* brawler stats,
* sort prop
* ] }
* sorted by the preferred prop according to propPriority
*/
export function getBest(meta: MapMetaMap|ModeMetaMap): { [key: string]: unknown[] } {
return [...Object.entries(meta)]
.reduce((top, [key, entry]) => ({
...top,
[key]: [...Object.entries(entry.brawlers)]
.map(([brawlerId, brawler]) => ({
id: brawlerId,
title: brawler.name,
brawler: brawlerId,
sampleSize: brawler.sampleSize,
stats: brawler.stats,
sortProp: <string>propPriority.find(prop => prop in brawler.stats),
}))
.sort((brawler1, brawler2) => brawler2.stats[brawler2.sortProp] - brawler1.stats[brawler1.sortProp])
}), {})
}
export function getBestBrawlers(brawlers: any[]): any[] {
const sampleSizeThreshold = 300
brawlers = brawlers.filter(brawler => brawler.sampleSize >= sampleSizeThreshold)
if (brawlers.length == 0) {
return []
}
const sortProp = <string>propPriority.find(prop => prop in brawlers[0].stats)
brawlers.sort((brawler1, brawler2) => brawler2.stats[sortProp] - brawler1.stats[sortProp])
return brawlers
}
interface EventMetadata {
id: string
map: string
mode: string
start?: string
end?: string
}
export function formatAsJsonLd(event: EventMetadata, mediaUrl: string) {
const url = `/tier-list/mode/${slugify(event.mode.toLowerCase())}/map/${slugify(event.map)}`
return {
'@context': 'https://schema.org',
'@type': 'Event',
'name': `${event.mode} - ${event.map}`,
...(event.start != undefined ? {
'startDate': event.start,
} : {}),
...(event.end != undefined ? {
'endDate': event.end!,
} : {}),
'eventAttendanceMode': 'https://schema.org/OnlineEventAttendanceMode',
'eventStatus': 'https://schema.org/EventScheduled',
'url': url,
'image': [`${mediaUrl}/map/${event.id}.png`],
'location': {
'@type': 'VirtualLocation',
'url': url,
},
'description': `${event.map} is a Brawl Stars ${event.mode} map.`,
}
}
export function sloppyParseFloat(number: string) {
return Math.floor(parseFloat(number) * 10000) / 10000
}
/**
* Throw if a tag is invalid.
* Make sure tag starts with a hash.
*/
export function validateTag(tag: string) {
if (!tagPattern.test(tag)) {
throw new Error('Invalid tag ' + tag)
}
if (!tag.startsWith('#')) {
return '#' + tag
}
return tag
}
// in clickhouse SQL (tag has to start with '#'):
/* | */
/**
* Encode tag string into 64bit unsigned integer string.
* TODO: Use BigInt if tags are >2^53 at some point.
*/
export function tagToId(tag: string) {
if (!tagPattern.test(tag)) {
throw new Error('Cannot encode tag ' + tag)
}
if (tag.startsWith('#')) {
tag = tag.substring(1)
}
const result = tag.split('').reduce((sum, c) => sum*14 + '0289PYLQGRJCUV'.indexOf(c), 0)
return result.toString()
}
/**
* Decode 64bit unsigned integer string into tag string with hash.
* TODO: Use BigInt if tags are >2^53 at some point.
*/
export function idToTag(idString: string) {
let id = Number(idString)
let tag = ''
while (id != 0) {
const i = id % 14
tag = '0289PYLQGRJCUV'[i] + tag
id = Math.floor(id / 14)
}
return '#' + tag
}
/*
in SQL:
date_add(from_days(ceil(to_days(date_sub(date_sub(timestamp, interval 8 hour), interval 1 day)) / 14) * 14 + 2), interval 8 hour)
in clickhouse SQL:
addHours(addDays(toStartOfInterval(subtractDays(subtractHours(timestamp, 8), 4), interval 336 hour, 'UTC'), 14+4), 8)
*/
/**
* Round timestamp up to next legacy trophy season interval.
* Seasons used to be 2 weeks, this is what the database uses.
* @param timestamp
*/
export function getSeasonEnd(timestamp: Date) {
const trophySeasonEnd = new Date(Date.parse('2020-07-13T08:00:00Z'))
const diff = timestamp.getTime() - trophySeasonEnd.getTime()
const seasonsSince = Math.ceil(diff/1000/60/60/24/7/2)
trophySeasonEnd.setUTCDate(trophySeasonEnd.getUTCDate() + seasonsSince*7*2)
return trophySeasonEnd
}
/**
* Round timestamp up to next new trophy season interval.
* Seasons are now 4 weeks.
* @param timestamp
*/
export function getSeasonEndNew(timestamp: Date) {
const trophySeasonEnd = new Date(Date.parse('2020-07-13T08:00:00Z'))
const diff = timestamp.getTime() - trophySeasonEnd.getTime()
const seasonsSince = Math.ceil(diff/1000/60/60/24/7/4)
trophySeasonEnd.setUTCDate(trophySeasonEnd.getUTCDate() + seasonsSince*7*4)
return trophySeasonEnd
}
/*
* Round timestamp down to start of day.
* @param timestamp
*/
export function getCompetitionMapDayStart(timestamp: Date) {
const dayStart = new Date(Date.parse('2020-07-13T09:30:00Z'))
const diff = timestamp.getTime() - dayStart.getTime()
const daysSince = Math.ceil(diff/1000/60/60/24)
dayStart.setUTCDate(dayStart.getUTCDate() + daysSince - 1)
return dayStart
}
export function getCompetitionWinnerMode(timestamp: Date) {
const order = ['duoShowdown', 'siege', 'hotZone', 'soloShowdown', 'brawlBall', 'bounty', 'heist', 'gemGrab']
const dayStart = new Date(Date.parse('2021-04-24T09:30:00Z'))
const diff = timestamp.getTime() - dayStart.getTime()
const daysSince = Math.floor(diff/1000/60/60/24)
return order[daysSince % order.length]
}
/**
* Get the end date of the current and the last database-season
*/
export function getMonthSeasonEnd() {
const twoWeeksAgo = new Date()
twoWeeksAgo.setDate(twoWeeksAgo.getDate() - 14)
return getSeasonEnd(twoWeeksAgo)
}
/**
* Get the end date of the current database-season
*/
export function getTodaySeasonEnd() {
return getSeasonEnd(new Date())
}
export function parseClickhouse(timestamp: string) {
return parseISO(timestamp + 'Z')
}
export function formatClickhouse(timestamp: Date) {
return timestamp.toISOString()
.slice(0, 19) // remove fractions and time zone
.replace('T', ' ')
}
export function formatClickhouseDate(timestamp: Date) {
return timestamp.toISOString()
.slice(0, 10) // remove fractions, day and time zone
}
/** Parse API time format */
const parseTime = (time: string) => new Date(Date.parse(time))
export const parseApiTime = (time: string) => {
return parseTime(`${time.slice(0, 4)}-${time.slice(4, 6)}-${time.slice(6, 8)}T${time.slice(9, 11)}:${time.slice(11, 13)}:${time.slice(13)}`)
}
export function encodeQuery(data: { [key: string]: number|string }) {
const ret = [] as string[]
for (let d in data) {
ret.push(encodeURIComponent(d) + '=' + encodeURIComponent(data[d]))
}
return ret.join('&')
}
/*
* @returns true if a mode is a weekend mode
*/
export function isSpecialEvent(mode: string) {
return ['roboRumble', 'bigGame', 'superCity'].includes(mode)
}
export const getDotProp = (o: any, k: string) => k.split('.').reduce((a, b) => a[b], o)
// measured on 2020-11-01 with data from 2020-10-01
// select quantile(0.25)(player_trophies/player_brawlers_length), quantile(0.375)(player_trophies/player_brawlers_length), quantile(0.5)(player_trophies/player_brawlers_length), quantile(0.90)(player_trophies/player_brawlers_length), quantile(0.95)(player_trophies/player_brawlers_length), quantile(0.99)(player_trophies/player_brawlers_length) from battle where trophy_season_end>=now()-interval 28 day and timestamp>now()-interval 28 day and timestamp<now()-interval 27 day and battle_event_powerplay=0
export const ratingPercentiles = {
// key: percentile, trophy boundary
'?': [0, 480],
'D': [0.25, 500],
'C': [0.375, 520],
'B': [0.5, 590],
'A': [0.9, 630],
'S': [0.95, 730],
'S+': [0.99, Infinity],
}
export const tagPattern = new RegExp('^#?[0289PYLQGRJCUV]{3,}$')
export function calculateAccountRating(player: Player, totalBrawlers: number) {
const brawlersUnlocked = Object.keys(player.brawlers).length
const brawlerTrophies = [...Object.values(player.brawlers)]
.map(({ trophies }) => trophies)
.sort()
const medBrawlerTrophies = brawlerTrophies[Math.floor(brawlerTrophies.length / 2)]
const trophiesGoal = medBrawlerTrophies * totalBrawlers
let rating = '?'
const medTrophies = trophiesGoal / totalBrawlers
// measured on 2020-11-01 with data from 2020-10-01
// select quantile(0.25)(player_trophies/player_brawlers_length), quantile(0.375)(player_trophies/player_brawlers_length), quantile(0.5)(player_trophies/player_brawlers_length), quantile(0.90)(player_trophies/player_brawlers_length), quantile(0.95)(player_trophies/player_brawlers_length), quantile(0.99)(player_trophies/player_brawlers_length) from battle where trophy_season_end>=now()-interval 28 day and timestamp>now()-interval 28 day and timestamp<now()-interval 27 day and battle_event_powerplay=0
for (const key in ratingPercentiles) {
if (medTrophies <= ratingPercentiles[key as keyof typeof ratingPercentiles][1]) {
rating = key
break
}
}
return {
rating,
brawlersUnlocked,
trophiesGoal,
}
}
export const totalBrawlers = 64 // TODO get from an API | arraySum((c, i) -> (position('0289PYLQGRJCUV', c)-1)*pow(14, length(player_club_tag)-i-1-1), arraySlice(splitByString('', player_club_tag), 2), range(if(player_club_tag <> '', toUInt64(length(player_club_tag)-1), 0))) as player_club_id, | random_line_split |
util.ts | import { Player } from "@/model/Api";
import { parseISO } from "date-fns";
import { MapMetaMap, ModeMetaMap } from "~/model/MetaEntry";
export const camelToSnakeCase = (str: string) => str.replace(/[A-Z]/g, letter => `_${letter.toLowerCase()}`);
export const camelToKebab = (s: string) =>
s.replace(/([a-z0-9]|(?=[A-Z]))([A-Z])/g, '$1-$2').toLowerCase();
export const kebabToCamel = (s: string) =>
s.replace(/([-_][a-z])/ig, ($1) => $1.toUpperCase()
.replace('-', '')
.replace('_', ''))
export const capitalize = (str: string) => str.replace(/(?:^|\s)\S/g, (a) => a.toUpperCase());
export const decapitalizeFirstLetter = (str: string) => str.charAt(0).toLowerCase() + str.slice(1)
export const capitalizeWords = (str: string) => str.replace(/(?:^|\s|["'([{])+\S/g, match => match.toUpperCase())
export const slugify = (str: string) => str.replace(/-/g, '--').replace(/ /g, '-')
export const deslugify = (str: string) => str.replace(/-/g, ' ').replace(/ /g, '-')
export function scaleMinMax(values: number[]) {
const min = Math.min.apply(Math, values)
const max = Math.max.apply(Math, values)
if (min === max) {
return values.map(value => 0.5)
}
return values.map(value => (value - min) / (max - min))
}
export function zip<T>(arr1: T[], arr2: T[]) {
return arr1.map((value, index) => [value, arr2[index]])
}
export function hoursSinceDate(date: string) {
const then = Date.parse(date)
const now = (new Date()).getTime()
return Math.floor((now - then) / 1000 / 3600)
}
export const brawlerId = (entry: { name: string }) =>
entry.name.replace(/\.| /g, '_').toLowerCase()
export function formatMode(mode: string) {
return camelToSnakeCase(mode)
.split('_')
.map(w => capitalize(w))
.join(' ')
}
export function unformatMode(mode: string) |
export const formatList = (l: string[], joiner = 'or') => l.slice(0, l.length - 1).join(', ') + ' ' + joiner + ' ' + l[l.length - 1]
export const clamp = (min: number, max: number, n: number) => Math.min(max, Math.max(min, n))
export const minMaxScale = (fromMin: number, fromMax: number, n: number) => (n - fromMin) / (fromMax - fromMin)
export const scaleInto = (fromMin: number, fromMax: number, toMax: number, n: number) => clamp(0, toMax, Math.floor(minMaxScale(fromMin, fromMax, n) * toMax))
export function xpToHours(xp: number) {
return xp / 220; // 145h for 30300 XP as measured by @schneefux
}
/**
* Suffix num with SI unit
* @param num number
* @param digits digits after comma
*/
export function formatSI(num: number, digits: number) {
const si = [
{ value: 1, symbol: '' },
{ value: 1E3, symbol: 'k' },
{ value: 1E6, symbol: 'M' },
]
const rx = /\.0+$|(\.[0-9]*[1-9])0+$/
let i
for (i = si.length - 1; i > 0; i--) {
if (num >= si[i].value) {
break
}
}
return Math.round(num / si[i].value)
.toFixed(digits)
.replace(rx, '$1') + si[i].symbol
}
const propPriority = ['winRateAdj', 'winRate', 'wins', 'rank1', 'duration', 'useRate', 'pickRate']
/**
* Get brawlers by event: {
* [eventId]: [
* brawler id,
* brawler name,
* brawler stats,
* sort prop
* ] }
* sorted by the preferred prop according to propPriority
*/
export function getBest(meta: MapMetaMap|ModeMetaMap): { [key: string]: unknown[] } {
return [...Object.entries(meta)]
.reduce((top, [key, entry]) => ({
...top,
[key]: [...Object.entries(entry.brawlers)]
.map(([brawlerId, brawler]) => ({
id: brawlerId,
title: brawler.name,
brawler: brawlerId,
sampleSize: brawler.sampleSize,
stats: brawler.stats,
sortProp: <string>propPriority.find(prop => prop in brawler.stats),
}))
.sort((brawler1, brawler2) => brawler2.stats[brawler2.sortProp] - brawler1.stats[brawler1.sortProp])
}), {})
}
export function getBestBrawlers(brawlers: any[]): any[] {
const sampleSizeThreshold = 300
brawlers = brawlers.filter(brawler => brawler.sampleSize >= sampleSizeThreshold)
if (brawlers.length == 0) {
return []
}
const sortProp = <string>propPriority.find(prop => prop in brawlers[0].stats)
brawlers.sort((brawler1, brawler2) => brawler2.stats[sortProp] - brawler1.stats[sortProp])
return brawlers
}
interface EventMetadata {
id: string
map: string
mode: string
start?: string
end?: string
}
export function formatAsJsonLd(event: EventMetadata, mediaUrl: string) {
const url = `/tier-list/mode/${slugify(event.mode.toLowerCase())}/map/${slugify(event.map)}`
return {
'@context': 'https://schema.org',
'@type': 'Event',
'name': `${event.mode} - ${event.map}`,
...(event.start != undefined ? {
'startDate': event.start,
} : {}),
...(event.end != undefined ? {
'endDate': event.end!,
} : {}),
'eventAttendanceMode': 'https://schema.org/OnlineEventAttendanceMode',
'eventStatus': 'https://schema.org/EventScheduled',
'url': url,
'image': [`${mediaUrl}/map/${event.id}.png`],
'location': {
'@type': 'VirtualLocation',
'url': url,
},
'description': `${event.map} is a Brawl Stars ${event.mode} map.`,
}
}
export function sloppyParseFloat(number: string) {
return Math.floor(parseFloat(number) * 10000) / 10000
}
/**
* Throw if a tag is invalid.
* Make sure tag starts with a hash.
*/
export function validateTag(tag: string) {
if (!tagPattern.test(tag)) {
throw new Error('Invalid tag ' + tag)
}
if (!tag.startsWith('#')) {
return '#' + tag
}
return tag
}
// in clickhouse SQL (tag has to start with '#'):
/*
arraySum((c, i) -> (position('0289PYLQGRJCUV', c)-1)*pow(14, length(player_club_tag)-i-1-1), arraySlice(splitByString('', player_club_tag), 2), range(if(player_club_tag <> '', toUInt64(length(player_club_tag)-1), 0))) as player_club_id,
*/
/**
* Encode tag string into 64bit unsigned integer string.
* TODO: Use BigInt if tags are >2^53 at some point.
*/
export function tagToId(tag: string) {
if (!tagPattern.test(tag)) {
throw new Error('Cannot encode tag ' + tag)
}
if (tag.startsWith('#')) {
tag = tag.substring(1)
}
const result = tag.split('').reduce((sum, c) => sum*14 + '0289PYLQGRJCUV'.indexOf(c), 0)
return result.toString()
}
/**
* Decode 64bit unsigned integer string into tag string with hash.
* TODO: Use BigInt if tags are >2^53 at some point.
*/
export function idToTag(idString: string) {
let id = Number(idString)
let tag = ''
while (id != 0) {
const i = id % 14
tag = '0289PYLQGRJCUV'[i] + tag
id = Math.floor(id / 14)
}
return '#' + tag
}
/*
in SQL:
date_add(from_days(ceil(to_days(date_sub(date_sub(timestamp, interval 8 hour), interval 1 day)) / 14) * 14 + 2), interval 8 hour)
in clickhouse SQL:
addHours(addDays(toStartOfInterval(subtractDays(subtractHours(timestamp, 8), 4), interval 336 hour, 'UTC'), 14+4), 8)
*/
/**
* Round timestamp up to next legacy trophy season interval.
* Seasons used to be 2 weeks, this is what the database uses.
* @param timestamp
*/
export function getSeasonEnd(timestamp: Date) {
const trophySeasonEnd = new Date(Date.parse('2020-07-13T08:00:00Z'))
const diff = timestamp.getTime() - trophySeasonEnd.getTime()
const seasonsSince = Math.ceil(diff/1000/60/60/24/7/2)
trophySeasonEnd.setUTCDate(trophySeasonEnd.getUTCDate() + seasonsSince*7*2)
return trophySeasonEnd
}
/**
* Round timestamp up to next new trophy season interval.
* Seasons are now 4 weeks.
* @param timestamp
*/
export function getSeasonEndNew(timestamp: Date) {
const trophySeasonEnd = new Date(Date.parse('2020-07-13T08:00:00Z'))
const diff = timestamp.getTime() - trophySeasonEnd.getTime()
const seasonsSince = Math.ceil(diff/1000/60/60/24/7/4)
trophySeasonEnd.setUTCDate(trophySeasonEnd.getUTCDate() + seasonsSince*7*4)
return trophySeasonEnd
}
/*
* Round timestamp down to start of day.
* @param timestamp
*/
export function getCompetitionMapDayStart(timestamp: Date) {
const dayStart = new Date(Date.parse('2020-07-13T09:30:00Z'))
const diff = timestamp.getTime() - dayStart.getTime()
const daysSince = Math.ceil(diff/1000/60/60/24)
dayStart.setUTCDate(dayStart.getUTCDate() + daysSince - 1)
return dayStart
}
export function getCompetitionWinnerMode(timestamp: Date) {
const order = ['duoShowdown', 'siege', 'hotZone', 'soloShowdown', 'brawlBall', 'bounty', 'heist', 'gemGrab']
const dayStart = new Date(Date.parse('2021-04-24T09:30:00Z'))
const diff = timestamp.getTime() - dayStart.getTime()
const daysSince = Math.floor(diff/1000/60/60/24)
return order[daysSince % order.length]
}
/**
* Get the end date of the current and the last database-season
*/
export function getMonthSeasonEnd() {
const twoWeeksAgo = new Date()
twoWeeksAgo.setDate(twoWeeksAgo.getDate() - 14)
return getSeasonEnd(twoWeeksAgo)
}
/**
* Get the end date of the current database-season
*/
export function getTodaySeasonEnd() {
return getSeasonEnd(new Date())
}
export function parseClickhouse(timestamp: string) {
return parseISO(timestamp + 'Z')
}
export function formatClickhouse(timestamp: Date) {
return timestamp.toISOString()
.slice(0, 19) // remove fractions and time zone
.replace('T', ' ')
}
export function formatClickhouseDate(timestamp: Date) {
return timestamp.toISOString()
.slice(0, 10) // remove fractions, day and time zone
}
/** Parse API time format */
const parseTime = (time: string) => new Date(Date.parse(time))
export const parseApiTime = (time: string) => {
return parseTime(`${time.slice(0, 4)}-${time.slice(4, 6)}-${time.slice(6, 8)}T${time.slice(9, 11)}:${time.slice(11, 13)}:${time.slice(13)}`)
}
export function encodeQuery(data: { [key: string]: number|string }) {
const ret = [] as string[]
for (let d in data) {
ret.push(encodeURIComponent(d) + '=' + encodeURIComponent(data[d]))
}
return ret.join('&')
}
/*
* @returns true if a mode is a weekend mode
*/
export function isSpecialEvent(mode: string) {
return ['roboRumble', 'bigGame', 'superCity'].includes(mode)
}
export const getDotProp = (o: any, k: string) => k.split('.').reduce((a, b) => a[b], o)
// measured on 2020-11-01 with data from 2020-10-01
// select quantile(0.25)(player_trophies/player_brawlers_length), quantile(0.375)(player_trophies/player_brawlers_length), quantile(0.5)(player_trophies/player_brawlers_length), quantile(0.90)(player_trophies/player_brawlers_length), quantile(0.95)(player_trophies/player_brawlers_length), quantile(0.99)(player_trophies/player_brawlers_length) from battle where trophy_season_end>=now()-interval 28 day and timestamp>now()-interval 28 day and timestamp<now()-interval 27 day and battle_event_powerplay=0
export const ratingPercentiles = {
// key: percentile, trophy boundary
'?': [0, 480],
'D': [0.25, 500],
'C': [0.375, 520],
'B': [0.5, 590],
'A': [0.9, 630],
'S': [0.95, 730],
'S+': [0.99, Infinity],
}
export const tagPattern = new RegExp('^#?[0289PYLQGRJCUV]{3,}$')
export function calculateAccountRating(player: Player, totalBrawlers: number) {
const brawlersUnlocked = Object.keys(player.brawlers).length
const brawlerTrophies = [...Object.values(player.brawlers)]
.map(({ trophies }) => trophies)
.sort()
const medBrawlerTrophies = brawlerTrophies[Math.floor(brawlerTrophies.length / 2)]
const trophiesGoal = medBrawlerTrophies * totalBrawlers
let rating = '?'
const medTrophies = trophiesGoal / totalBrawlers
// measured on 2020-11-01 with data from 2020-10-01
// select quantile(0.25)(player_trophies/player_brawlers_length), quantile(0.375)(player_trophies/player_brawlers_length), quantile(0.5)(player_trophies/player_brawlers_length), quantile(0.90)(player_trophies/player_brawlers_length), quantile(0.95)(player_trophies/player_brawlers_length), quantile(0.99)(player_trophies/player_brawlers_length) from battle where trophy_season_end>=now()-interval 28 day and timestamp>now()-interval 28 day and timestamp<now()-interval 27 day and battle_event_powerplay=0
for (const key in ratingPercentiles) {
if (medTrophies <= ratingPercentiles[key as keyof typeof ratingPercentiles][1]) {
rating = key
break
}
}
return {
rating,
brawlersUnlocked,
trophiesGoal,
}
}
export const totalBrawlers = 64 // TODO get from an API
| {
const uncapitalize = (str: string) => str.replace(/(?:^|\s)\S/g, (a) => a.toLowerCase())
return uncapitalize(mode.replace(/^Showdown$/, 'Solo Showdown').split(' ').join(''))
} | identifier_body |
util.ts | import { Player } from "@/model/Api";
import { parseISO } from "date-fns";
import { MapMetaMap, ModeMetaMap } from "~/model/MetaEntry";
export const camelToSnakeCase = (str: string) => str.replace(/[A-Z]/g, letter => `_${letter.toLowerCase()}`);
export const camelToKebab = (s: string) =>
s.replace(/([a-z0-9]|(?=[A-Z]))([A-Z])/g, '$1-$2').toLowerCase();
export const kebabToCamel = (s: string) =>
s.replace(/([-_][a-z])/ig, ($1) => $1.toUpperCase()
.replace('-', '')
.replace('_', ''))
export const capitalize = (str: string) => str.replace(/(?:^|\s)\S/g, (a) => a.toUpperCase());
export const decapitalizeFirstLetter = (str: string) => str.charAt(0).toLowerCase() + str.slice(1)
export const capitalizeWords = (str: string) => str.replace(/(?:^|\s|["'([{])+\S/g, match => match.toUpperCase())
export const slugify = (str: string) => str.replace(/-/g, '--').replace(/ /g, '-')
export const deslugify = (str: string) => str.replace(/-/g, ' ').replace(/ /g, '-')
export function scaleMinMax(values: number[]) {
const min = Math.min.apply(Math, values)
const max = Math.max.apply(Math, values)
if (min === max) {
return values.map(value => 0.5)
}
return values.map(value => (value - min) / (max - min))
}
export function zip<T>(arr1: T[], arr2: T[]) {
return arr1.map((value, index) => [value, arr2[index]])
}
export function hoursSinceDate(date: string) {
const then = Date.parse(date)
const now = (new Date()).getTime()
return Math.floor((now - then) / 1000 / 3600)
}
export const brawlerId = (entry: { name: string }) =>
entry.name.replace(/\.| /g, '_').toLowerCase()
export function formatMode(mode: string) {
return camelToSnakeCase(mode)
.split('_')
.map(w => capitalize(w))
.join(' ')
}
export function unformatMode(mode: string) {
const uncapitalize = (str: string) => str.replace(/(?:^|\s)\S/g, (a) => a.toLowerCase())
return uncapitalize(mode.replace(/^Showdown$/, 'Solo Showdown').split(' ').join(''))
}
export const formatList = (l: string[], joiner = 'or') => l.slice(0, l.length - 1).join(', ') + ' ' + joiner + ' ' + l[l.length - 1]
export const clamp = (min: number, max: number, n: number) => Math.min(max, Math.max(min, n))
export const minMaxScale = (fromMin: number, fromMax: number, n: number) => (n - fromMin) / (fromMax - fromMin)
export const scaleInto = (fromMin: number, fromMax: number, toMax: number, n: number) => clamp(0, toMax, Math.floor(minMaxScale(fromMin, fromMax, n) * toMax))
export function xpToHours(xp: number) {
return xp / 220; // 145h for 30300 XP as measured by @schneefux
}
/**
* Suffix num with SI unit
* @param num number
* @param digits digits after comma
*/
export function formatSI(num: number, digits: number) {
const si = [
{ value: 1, symbol: '' },
{ value: 1E3, symbol: 'k' },
{ value: 1E6, symbol: 'M' },
]
const rx = /\.0+$|(\.[0-9]*[1-9])0+$/
let i
for (i = si.length - 1; i > 0; i--) {
if (num >= si[i].value) {
break
}
}
return Math.round(num / si[i].value)
.toFixed(digits)
.replace(rx, '$1') + si[i].symbol
}
const propPriority = ['winRateAdj', 'winRate', 'wins', 'rank1', 'duration', 'useRate', 'pickRate']
/**
* Get brawlers by event: {
* [eventId]: [
* brawler id,
* brawler name,
* brawler stats,
* sort prop
* ] }
* sorted by the preferred prop according to propPriority
*/
export function getBest(meta: MapMetaMap|ModeMetaMap): { [key: string]: unknown[] } {
return [...Object.entries(meta)]
.reduce((top, [key, entry]) => ({
...top,
[key]: [...Object.entries(entry.brawlers)]
.map(([brawlerId, brawler]) => ({
id: brawlerId,
title: brawler.name,
brawler: brawlerId,
sampleSize: brawler.sampleSize,
stats: brawler.stats,
sortProp: <string>propPriority.find(prop => prop in brawler.stats),
}))
.sort((brawler1, brawler2) => brawler2.stats[brawler2.sortProp] - brawler1.stats[brawler1.sortProp])
}), {})
}
export function getBestBrawlers(brawlers: any[]): any[] {
const sampleSizeThreshold = 300
brawlers = brawlers.filter(brawler => brawler.sampleSize >= sampleSizeThreshold)
if (brawlers.length == 0) {
return []
}
const sortProp = <string>propPriority.find(prop => prop in brawlers[0].stats)
brawlers.sort((brawler1, brawler2) => brawler2.stats[sortProp] - brawler1.stats[sortProp])
return brawlers
}
interface EventMetadata {
id: string
map: string
mode: string
start?: string
end?: string
}
export function formatAsJsonLd(event: EventMetadata, mediaUrl: string) {
const url = `/tier-list/mode/${slugify(event.mode.toLowerCase())}/map/${slugify(event.map)}`
return {
'@context': 'https://schema.org',
'@type': 'Event',
'name': `${event.mode} - ${event.map}`,
...(event.start != undefined ? {
'startDate': event.start,
} : {}),
...(event.end != undefined ? {
'endDate': event.end!,
} : {}),
'eventAttendanceMode': 'https://schema.org/OnlineEventAttendanceMode',
'eventStatus': 'https://schema.org/EventScheduled',
'url': url,
'image': [`${mediaUrl}/map/${event.id}.png`],
'location': {
'@type': 'VirtualLocation',
'url': url,
},
'description': `${event.map} is a Brawl Stars ${event.mode} map.`,
}
}
export function sloppyParseFloat(number: string) {
return Math.floor(parseFloat(number) * 10000) / 10000
}
/**
* Throw if a tag is invalid.
* Make sure tag starts with a hash.
*/
export function validateTag(tag: string) {
if (!tagPattern.test(tag)) {
throw new Error('Invalid tag ' + tag)
}
if (!tag.startsWith('#')) {
return '#' + tag
}
return tag
}
// in clickhouse SQL (tag has to start with '#'):
/*
arraySum((c, i) -> (position('0289PYLQGRJCUV', c)-1)*pow(14, length(player_club_tag)-i-1-1), arraySlice(splitByString('', player_club_tag), 2), range(if(player_club_tag <> '', toUInt64(length(player_club_tag)-1), 0))) as player_club_id,
*/
/**
* Encode tag string into 64bit unsigned integer string.
* TODO: Use BigInt if tags are >2^53 at some point.
*/
export function tagToId(tag: string) {
if (!tagPattern.test(tag)) {
throw new Error('Cannot encode tag ' + tag)
}
if (tag.startsWith('#')) {
tag = tag.substring(1)
}
const result = tag.split('').reduce((sum, c) => sum*14 + '0289PYLQGRJCUV'.indexOf(c), 0)
return result.toString()
}
/**
* Decode 64bit unsigned integer string into tag string with hash.
* TODO: Use BigInt if tags are >2^53 at some point.
*/
export function idToTag(idString: string) {
let id = Number(idString)
let tag = ''
while (id != 0) {
const i = id % 14
tag = '0289PYLQGRJCUV'[i] + tag
id = Math.floor(id / 14)
}
return '#' + tag
}
/*
in SQL:
date_add(from_days(ceil(to_days(date_sub(date_sub(timestamp, interval 8 hour), interval 1 day)) / 14) * 14 + 2), interval 8 hour)
in clickhouse SQL:
addHours(addDays(toStartOfInterval(subtractDays(subtractHours(timestamp, 8), 4), interval 336 hour, 'UTC'), 14+4), 8)
*/
/**
* Round timestamp up to next legacy trophy season interval.
* Seasons used to be 2 weeks, this is what the database uses.
* @param timestamp
*/
export function getSeasonEnd(timestamp: Date) {
const trophySeasonEnd = new Date(Date.parse('2020-07-13T08:00:00Z'))
const diff = timestamp.getTime() - trophySeasonEnd.getTime()
const seasonsSince = Math.ceil(diff/1000/60/60/24/7/2)
trophySeasonEnd.setUTCDate(trophySeasonEnd.getUTCDate() + seasonsSince*7*2)
return trophySeasonEnd
}
/**
* Round timestamp up to next new trophy season interval.
* Seasons are now 4 weeks.
* @param timestamp
*/
export function getSeasonEndNew(timestamp: Date) {
const trophySeasonEnd = new Date(Date.parse('2020-07-13T08:00:00Z'))
const diff = timestamp.getTime() - trophySeasonEnd.getTime()
const seasonsSince = Math.ceil(diff/1000/60/60/24/7/4)
trophySeasonEnd.setUTCDate(trophySeasonEnd.getUTCDate() + seasonsSince*7*4)
return trophySeasonEnd
}
/*
* Round timestamp down to start of day.
* @param timestamp
*/
export function getCompetitionMapDayStart(timestamp: Date) {
const dayStart = new Date(Date.parse('2020-07-13T09:30:00Z'))
const diff = timestamp.getTime() - dayStart.getTime()
const daysSince = Math.ceil(diff/1000/60/60/24)
dayStart.setUTCDate(dayStart.getUTCDate() + daysSince - 1)
return dayStart
}
export function getCompetitionWinnerMode(timestamp: Date) {
const order = ['duoShowdown', 'siege', 'hotZone', 'soloShowdown', 'brawlBall', 'bounty', 'heist', 'gemGrab']
const dayStart = new Date(Date.parse('2021-04-24T09:30:00Z'))
const diff = timestamp.getTime() - dayStart.getTime()
const daysSince = Math.floor(diff/1000/60/60/24)
return order[daysSince % order.length]
}
/**
* Get the end date of the current and the last database-season
*/
export function | () {
const twoWeeksAgo = new Date()
twoWeeksAgo.setDate(twoWeeksAgo.getDate() - 14)
return getSeasonEnd(twoWeeksAgo)
}
/**
* Get the end date of the current database-season
*/
export function getTodaySeasonEnd() {
return getSeasonEnd(new Date())
}
export function parseClickhouse(timestamp: string) {
return parseISO(timestamp + 'Z')
}
export function formatClickhouse(timestamp: Date) {
return timestamp.toISOString()
.slice(0, 19) // remove fractions and time zone
.replace('T', ' ')
}
export function formatClickhouseDate(timestamp: Date) {
return timestamp.toISOString()
.slice(0, 10) // remove fractions, day and time zone
}
/** Parse API time format */
const parseTime = (time: string) => new Date(Date.parse(time))
export const parseApiTime = (time: string) => {
return parseTime(`${time.slice(0, 4)}-${time.slice(4, 6)}-${time.slice(6, 8)}T${time.slice(9, 11)}:${time.slice(11, 13)}:${time.slice(13)}`)
}
export function encodeQuery(data: { [key: string]: number|string }) {
const ret = [] as string[]
for (let d in data) {
ret.push(encodeURIComponent(d) + '=' + encodeURIComponent(data[d]))
}
return ret.join('&')
}
/*
* @returns true if a mode is a weekend mode
*/
export function isSpecialEvent(mode: string) {
return ['roboRumble', 'bigGame', 'superCity'].includes(mode)
}
export const getDotProp = (o: any, k: string) => k.split('.').reduce((a, b) => a[b], o)
// measured on 2020-11-01 with data from 2020-10-01
// select quantile(0.25)(player_trophies/player_brawlers_length), quantile(0.375)(player_trophies/player_brawlers_length), quantile(0.5)(player_trophies/player_brawlers_length), quantile(0.90)(player_trophies/player_brawlers_length), quantile(0.95)(player_trophies/player_brawlers_length), quantile(0.99)(player_trophies/player_brawlers_length) from battle where trophy_season_end>=now()-interval 28 day and timestamp>now()-interval 28 day and timestamp<now()-interval 27 day and battle_event_powerplay=0
export const ratingPercentiles = {
// key: percentile, trophy boundary
'?': [0, 480],
'D': [0.25, 500],
'C': [0.375, 520],
'B': [0.5, 590],
'A': [0.9, 630],
'S': [0.95, 730],
'S+': [0.99, Infinity],
}
export const tagPattern = new RegExp('^#?[0289PYLQGRJCUV]{3,}$')
export function calculateAccountRating(player: Player, totalBrawlers: number) {
const brawlersUnlocked = Object.keys(player.brawlers).length
const brawlerTrophies = [...Object.values(player.brawlers)]
.map(({ trophies }) => trophies)
.sort()
const medBrawlerTrophies = brawlerTrophies[Math.floor(brawlerTrophies.length / 2)]
const trophiesGoal = medBrawlerTrophies * totalBrawlers
let rating = '?'
const medTrophies = trophiesGoal / totalBrawlers
// measured on 2020-11-01 with data from 2020-10-01
// select quantile(0.25)(player_trophies/player_brawlers_length), quantile(0.375)(player_trophies/player_brawlers_length), quantile(0.5)(player_trophies/player_brawlers_length), quantile(0.90)(player_trophies/player_brawlers_length), quantile(0.95)(player_trophies/player_brawlers_length), quantile(0.99)(player_trophies/player_brawlers_length) from battle where trophy_season_end>=now()-interval 28 day and timestamp>now()-interval 28 day and timestamp<now()-interval 27 day and battle_event_powerplay=0
for (const key in ratingPercentiles) {
if (medTrophies <= ratingPercentiles[key as keyof typeof ratingPercentiles][1]) {
rating = key
break
}
}
return {
rating,
brawlersUnlocked,
trophiesGoal,
}
}
export const totalBrawlers = 64 // TODO get from an API
| getMonthSeasonEnd | identifier_name |
util.ts | import { Player } from "@/model/Api";
import { parseISO } from "date-fns";
import { MapMetaMap, ModeMetaMap } from "~/model/MetaEntry";
export const camelToSnakeCase = (str: string) => str.replace(/[A-Z]/g, letter => `_${letter.toLowerCase()}`);
export const camelToKebab = (s: string) =>
s.replace(/([a-z0-9]|(?=[A-Z]))([A-Z])/g, '$1-$2').toLowerCase();
export const kebabToCamel = (s: string) =>
s.replace(/([-_][a-z])/ig, ($1) => $1.toUpperCase()
.replace('-', '')
.replace('_', ''))
export const capitalize = (str: string) => str.replace(/(?:^|\s)\S/g, (a) => a.toUpperCase());
export const decapitalizeFirstLetter = (str: string) => str.charAt(0).toLowerCase() + str.slice(1)
export const capitalizeWords = (str: string) => str.replace(/(?:^|\s|["'([{])+\S/g, match => match.toUpperCase())
export const slugify = (str: string) => str.replace(/-/g, '--').replace(/ /g, '-')
export const deslugify = (str: string) => str.replace(/-/g, ' ').replace(/ /g, '-')
export function scaleMinMax(values: number[]) {
const min = Math.min.apply(Math, values)
const max = Math.max.apply(Math, values)
if (min === max) {
return values.map(value => 0.5)
}
return values.map(value => (value - min) / (max - min))
}
export function zip<T>(arr1: T[], arr2: T[]) {
return arr1.map((value, index) => [value, arr2[index]])
}
export function hoursSinceDate(date: string) {
const then = Date.parse(date)
const now = (new Date()).getTime()
return Math.floor((now - then) / 1000 / 3600)
}
export const brawlerId = (entry: { name: string }) =>
entry.name.replace(/\.| /g, '_').toLowerCase()
export function formatMode(mode: string) {
return camelToSnakeCase(mode)
.split('_')
.map(w => capitalize(w))
.join(' ')
}
export function unformatMode(mode: string) {
const uncapitalize = (str: string) => str.replace(/(?:^|\s)\S/g, (a) => a.toLowerCase())
return uncapitalize(mode.replace(/^Showdown$/, 'Solo Showdown').split(' ').join(''))
}
export const formatList = (l: string[], joiner = 'or') => l.slice(0, l.length - 1).join(', ') + ' ' + joiner + ' ' + l[l.length - 1]
export const clamp = (min: number, max: number, n: number) => Math.min(max, Math.max(min, n))
export const minMaxScale = (fromMin: number, fromMax: number, n: number) => (n - fromMin) / (fromMax - fromMin)
export const scaleInto = (fromMin: number, fromMax: number, toMax: number, n: number) => clamp(0, toMax, Math.floor(minMaxScale(fromMin, fromMax, n) * toMax))
export function xpToHours(xp: number) {
return xp / 220; // 145h for 30300 XP as measured by @schneefux
}
/**
* Suffix num with SI unit
* @param num number
* @param digits digits after comma
*/
export function formatSI(num: number, digits: number) {
const si = [
{ value: 1, symbol: '' },
{ value: 1E3, symbol: 'k' },
{ value: 1E6, symbol: 'M' },
]
const rx = /\.0+$|(\.[0-9]*[1-9])0+$/
let i
for (i = si.length - 1; i > 0; i--) {
if (num >= si[i].value) |
}
return Math.round(num / si[i].value)
.toFixed(digits)
.replace(rx, '$1') + si[i].symbol
}
const propPriority = ['winRateAdj', 'winRate', 'wins', 'rank1', 'duration', 'useRate', 'pickRate']
/**
* Get brawlers by event: {
* [eventId]: [
* brawler id,
* brawler name,
* brawler stats,
* sort prop
* ] }
* sorted by the preferred prop according to propPriority
*/
export function getBest(meta: MapMetaMap|ModeMetaMap): { [key: string]: unknown[] } {
return [...Object.entries(meta)]
.reduce((top, [key, entry]) => ({
...top,
[key]: [...Object.entries(entry.brawlers)]
.map(([brawlerId, brawler]) => ({
id: brawlerId,
title: brawler.name,
brawler: brawlerId,
sampleSize: brawler.sampleSize,
stats: brawler.stats,
sortProp: <string>propPriority.find(prop => prop in brawler.stats),
}))
.sort((brawler1, brawler2) => brawler2.stats[brawler2.sortProp] - brawler1.stats[brawler1.sortProp])
}), {})
}
export function getBestBrawlers(brawlers: any[]): any[] {
const sampleSizeThreshold = 300
brawlers = brawlers.filter(brawler => brawler.sampleSize >= sampleSizeThreshold)
if (brawlers.length == 0) {
return []
}
const sortProp = <string>propPriority.find(prop => prop in brawlers[0].stats)
brawlers.sort((brawler1, brawler2) => brawler2.stats[sortProp] - brawler1.stats[sortProp])
return brawlers
}
interface EventMetadata {
id: string
map: string
mode: string
start?: string
end?: string
}
export function formatAsJsonLd(event: EventMetadata, mediaUrl: string) {
const url = `/tier-list/mode/${slugify(event.mode.toLowerCase())}/map/${slugify(event.map)}`
return {
'@context': 'https://schema.org',
'@type': 'Event',
'name': `${event.mode} - ${event.map}`,
...(event.start != undefined ? {
'startDate': event.start,
} : {}),
...(event.end != undefined ? {
'endDate': event.end!,
} : {}),
'eventAttendanceMode': 'https://schema.org/OnlineEventAttendanceMode',
'eventStatus': 'https://schema.org/EventScheduled',
'url': url,
'image': [`${mediaUrl}/map/${event.id}.png`],
'location': {
'@type': 'VirtualLocation',
'url': url,
},
'description': `${event.map} is a Brawl Stars ${event.mode} map.`,
}
}
export function sloppyParseFloat(number: string) {
return Math.floor(parseFloat(number) * 10000) / 10000
}
/**
* Throw if a tag is invalid.
* Make sure tag starts with a hash.
*/
export function validateTag(tag: string) {
if (!tagPattern.test(tag)) {
throw new Error('Invalid tag ' + tag)
}
if (!tag.startsWith('#')) {
return '#' + tag
}
return tag
}
// in clickhouse SQL (tag has to start with '#'):
/*
arraySum((c, i) -> (position('0289PYLQGRJCUV', c)-1)*pow(14, length(player_club_tag)-i-1-1), arraySlice(splitByString('', player_club_tag), 2), range(if(player_club_tag <> '', toUInt64(length(player_club_tag)-1), 0))) as player_club_id,
*/
/**
* Encode tag string into 64bit unsigned integer string.
* TODO: Use BigInt if tags are >2^53 at some point.
*/
export function tagToId(tag: string) {
if (!tagPattern.test(tag)) {
throw new Error('Cannot encode tag ' + tag)
}
if (tag.startsWith('#')) {
tag = tag.substring(1)
}
const result = tag.split('').reduce((sum, c) => sum*14 + '0289PYLQGRJCUV'.indexOf(c), 0)
return result.toString()
}
/**
* Decode 64bit unsigned integer string into tag string with hash.
* TODO: Use BigInt if tags are >2^53 at some point.
*/
export function idToTag(idString: string) {
let id = Number(idString)
let tag = ''
while (id != 0) {
const i = id % 14
tag = '0289PYLQGRJCUV'[i] + tag
id = Math.floor(id / 14)
}
return '#' + tag
}
/*
in SQL:
date_add(from_days(ceil(to_days(date_sub(date_sub(timestamp, interval 8 hour), interval 1 day)) / 14) * 14 + 2), interval 8 hour)
in clickhouse SQL:
addHours(addDays(toStartOfInterval(subtractDays(subtractHours(timestamp, 8), 4), interval 336 hour, 'UTC'), 14+4), 8)
*/
/**
* Round timestamp up to next legacy trophy season interval.
* Seasons used to be 2 weeks, this is what the database uses.
* @param timestamp
*/
export function getSeasonEnd(timestamp: Date) {
const trophySeasonEnd = new Date(Date.parse('2020-07-13T08:00:00Z'))
const diff = timestamp.getTime() - trophySeasonEnd.getTime()
const seasonsSince = Math.ceil(diff/1000/60/60/24/7/2)
trophySeasonEnd.setUTCDate(trophySeasonEnd.getUTCDate() + seasonsSince*7*2)
return trophySeasonEnd
}
/**
* Round timestamp up to next new trophy season interval.
* Seasons are now 4 weeks.
* @param timestamp
*/
export function getSeasonEndNew(timestamp: Date) {
const trophySeasonEnd = new Date(Date.parse('2020-07-13T08:00:00Z'))
const diff = timestamp.getTime() - trophySeasonEnd.getTime()
const seasonsSince = Math.ceil(diff/1000/60/60/24/7/4)
trophySeasonEnd.setUTCDate(trophySeasonEnd.getUTCDate() + seasonsSince*7*4)
return trophySeasonEnd
}
/*
* Round timestamp down to start of day.
* @param timestamp
*/
export function getCompetitionMapDayStart(timestamp: Date) {
const dayStart = new Date(Date.parse('2020-07-13T09:30:00Z'))
const diff = timestamp.getTime() - dayStart.getTime()
const daysSince = Math.ceil(diff/1000/60/60/24)
dayStart.setUTCDate(dayStart.getUTCDate() + daysSince - 1)
return dayStart
}
export function getCompetitionWinnerMode(timestamp: Date) {
const order = ['duoShowdown', 'siege', 'hotZone', 'soloShowdown', 'brawlBall', 'bounty', 'heist', 'gemGrab']
const dayStart = new Date(Date.parse('2021-04-24T09:30:00Z'))
const diff = timestamp.getTime() - dayStart.getTime()
const daysSince = Math.floor(diff/1000/60/60/24)
return order[daysSince % order.length]
}
/**
* Get the end date of the current and the last database-season
*/
export function getMonthSeasonEnd() {
const twoWeeksAgo = new Date()
twoWeeksAgo.setDate(twoWeeksAgo.getDate() - 14)
return getSeasonEnd(twoWeeksAgo)
}
/**
* Get the end date of the current database-season
*/
export function getTodaySeasonEnd() {
return getSeasonEnd(new Date())
}
export function parseClickhouse(timestamp: string) {
return parseISO(timestamp + 'Z')
}
export function formatClickhouse(timestamp: Date) {
return timestamp.toISOString()
.slice(0, 19) // remove fractions and time zone
.replace('T', ' ')
}
export function formatClickhouseDate(timestamp: Date) {
return timestamp.toISOString()
.slice(0, 10) // remove fractions, day and time zone
}
/** Parse API time format */
const parseTime = (time: string) => new Date(Date.parse(time))
export const parseApiTime = (time: string) => {
return parseTime(`${time.slice(0, 4)}-${time.slice(4, 6)}-${time.slice(6, 8)}T${time.slice(9, 11)}:${time.slice(11, 13)}:${time.slice(13)}`)
}
export function encodeQuery(data: { [key: string]: number|string }) {
const ret = [] as string[]
for (let d in data) {
ret.push(encodeURIComponent(d) + '=' + encodeURIComponent(data[d]))
}
return ret.join('&')
}
/*
* @returns true if a mode is a weekend mode
*/
export function isSpecialEvent(mode: string) {
return ['roboRumble', 'bigGame', 'superCity'].includes(mode)
}
export const getDotProp = (o: any, k: string) => k.split('.').reduce((a, b) => a[b], o)
// measured on 2020-11-01 with data from 2020-10-01
// select quantile(0.25)(player_trophies/player_brawlers_length), quantile(0.375)(player_trophies/player_brawlers_length), quantile(0.5)(player_trophies/player_brawlers_length), quantile(0.90)(player_trophies/player_brawlers_length), quantile(0.95)(player_trophies/player_brawlers_length), quantile(0.99)(player_trophies/player_brawlers_length) from battle where trophy_season_end>=now()-interval 28 day and timestamp>now()-interval 28 day and timestamp<now()-interval 27 day and battle_event_powerplay=0
export const ratingPercentiles = {
// key: percentile, trophy boundary
'?': [0, 480],
'D': [0.25, 500],
'C': [0.375, 520],
'B': [0.5, 590],
'A': [0.9, 630],
'S': [0.95, 730],
'S+': [0.99, Infinity],
}
export const tagPattern = new RegExp('^#?[0289PYLQGRJCUV]{3,}$')
export function calculateAccountRating(player: Player, totalBrawlers: number) {
const brawlersUnlocked = Object.keys(player.brawlers).length
const brawlerTrophies = [...Object.values(player.brawlers)]
.map(({ trophies }) => trophies)
.sort()
const medBrawlerTrophies = brawlerTrophies[Math.floor(brawlerTrophies.length / 2)]
const trophiesGoal = medBrawlerTrophies * totalBrawlers
let rating = '?'
const medTrophies = trophiesGoal / totalBrawlers
// measured on 2020-11-01 with data from 2020-10-01
// select quantile(0.25)(player_trophies/player_brawlers_length), quantile(0.375)(player_trophies/player_brawlers_length), quantile(0.5)(player_trophies/player_brawlers_length), quantile(0.90)(player_trophies/player_brawlers_length), quantile(0.95)(player_trophies/player_brawlers_length), quantile(0.99)(player_trophies/player_brawlers_length) from battle where trophy_season_end>=now()-interval 28 day and timestamp>now()-interval 28 day and timestamp<now()-interval 27 day and battle_event_powerplay=0
for (const key in ratingPercentiles) {
if (medTrophies <= ratingPercentiles[key as keyof typeof ratingPercentiles][1]) {
rating = key
break
}
}
return {
rating,
brawlersUnlocked,
trophiesGoal,
}
}
export const totalBrawlers = 64 // TODO get from an API
| {
break
} | conditional_block |
parameter_noise.py | from gymnasium.spaces import Box, Discrete
import numpy as np
from typing import Optional, TYPE_CHECKING, Union
from ray.rllib.env.base_env import BaseEnv
from ray.rllib.models.action_dist import ActionDistribution
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.tf.tf_action_dist import Categorical, Deterministic
from ray.rllib.models.torch.torch_action_dist import (
TorchCategorical,
TorchDeterministic,
)
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.annotations import override, PublicAPI
from ray.rllib.utils.exploration.exploration import Exploration
from ray.rllib.utils.framework import get_variable, try_import_tf, try_import_torch
from ray.rllib.utils.from_config import from_config
from ray.rllib.utils.numpy import softmax, SMALL_NUMBER
from ray.rllib.utils.typing import TensorType
if TYPE_CHECKING:
from ray.rllib.policy.policy import Policy
tf1, tf, tfv = try_import_tf()
torch, _ = try_import_torch()
@PublicAPI
class ParameterNoise(Exploration):
"""An exploration that changes a Model's parameters.
Implemented based on:
[1] https://openai.com/research/better-exploration-with-parameter-noise
[2] https://arxiv.org/pdf/1706.01905.pdf
At the beginning of an episode, Gaussian noise is added to all weights
of the model. At the end of the episode, the noise is undone and an action
diff (pi-delta) is calculated, from which we determine the changes in the
noise's stddev for the next episode.
"""
def __init__(
self,
action_space,
*,
framework: str,
policy_config: dict,
model: ModelV2,
initial_stddev: float = 1.0,
random_timesteps: int = 10000,
sub_exploration: Optional[dict] = None,
**kwargs
):
"""Initializes a ParameterNoise Exploration object.
Args:
initial_stddev: The initial stddev to use for the noise.
random_timesteps: The number of timesteps to act completely
randomly (see [1]).
sub_exploration: Optional sub-exploration config.
None for auto-detection/setup.
"""
assert framework is not None
super().__init__(
action_space,
policy_config=policy_config,
model=model,
framework=framework,
**kwargs
)
self.stddev = get_variable(
initial_stddev, framework=self.framework, tf_name="stddev"
)
self.stddev_val = initial_stddev # Out-of-graph tf value holder.
# The weight variables of the Model where noise should be applied to.
# This excludes any variable, whose name contains "LayerNorm" (those
# are BatchNormalization layers, which should not be perturbed).
self.model_variables = [
v
for k, v in self.model.trainable_variables(as_dict=True).items()
if "LayerNorm" not in k
]
# Our noise to be added to the weights. Each item in `self.noise`
# corresponds to one Model variable and holding the Gaussian noise to
# be added to that variable (weight).
self.noise = []
for var in self.model_variables:
name_ = var.name.split(":")[0] + "_noisy" if var.name else ""
self.noise.append(
get_variable(
np.zeros(var.shape, dtype=np.float32),
framework=self.framework,
tf_name=name_,
torch_tensor=True,
device=self.device,
)
)
# tf-specific ops to sample, assign and remove noise.
if self.framework == "tf" and not tf.executing_eagerly():
self.tf_sample_new_noise_op = self._tf_sample_new_noise_op()
self.tf_add_stored_noise_op = self._tf_add_stored_noise_op()
self.tf_remove_noise_op = self._tf_remove_noise_op()
# Create convenience sample+add op for tf.
with tf1.control_dependencies([self.tf_sample_new_noise_op]):
add_op = self._tf_add_stored_noise_op()
with tf1.control_dependencies([add_op]):
self.tf_sample_new_noise_and_add_op = tf.no_op()
# Whether the Model's weights currently have noise added or not.
self.weights_are_currently_noisy = False
# Auto-detection of underlying exploration functionality.
if sub_exploration is None:
# For discrete action spaces, use an underlying EpsilonGreedy with
# a special schedule.
if isinstance(self.action_space, Discrete):
sub_exploration = {
"type": "EpsilonGreedy",
"epsilon_schedule": {
"type": "PiecewiseSchedule",
# Step function (see [2]).
"endpoints": [
(0, 1.0),
(random_timesteps + 1, 1.0),
(random_timesteps + 2, 0.01),
],
"outside_value": 0.01,
},
}
elif isinstance(self.action_space, Box):
sub_exploration = {
"type": "OrnsteinUhlenbeckNoise",
"random_timesteps": random_timesteps,
}
# TODO(sven): Implement for any action space.
else:
raise NotImplementedError
self.sub_exploration = from_config(
Exploration,
sub_exploration,
framework=self.framework,
action_space=self.action_space,
policy_config=self.policy_config,
model=self.model,
**kwargs
)
# Whether we need to call `self._delayed_on_episode_start` before
# the forward pass.
self.episode_started = False
@override(Exploration)
def before_compute_actions(
self,
*,
timestep: Optional[int] = None,
explore: Optional[bool] = None,
tf_sess: Optional["tf.Session"] = None
):
explore = explore if explore is not None else self.policy_config["explore"]
# Is this the first forward pass in the new episode? If yes, do the
# noise re-sampling and add to weights.
if self.episode_started:
self._delayed_on_episode_start(explore, tf_sess)
# Add noise if necessary.
if explore and not self.weights_are_currently_noisy:
self._add_stored_noise(tf_sess=tf_sess)
# Remove noise if necessary.
elif not explore and self.weights_are_currently_noisy:
self._remove_noise(tf_sess=tf_sess)
@override(Exploration)
def get_exploration_action(
self,
*,
action_distribution: ActionDistribution,
timestep: Union[TensorType, int],
explore: Union[TensorType, bool]
):
# Use our sub-exploration object to handle the final exploration
# action (depends on the algo-type/action-space/etc..).
return self.sub_exploration.get_exploration_action(
action_distribution=action_distribution, timestep=timestep, explore=explore
)
@override(Exploration)
def on_episode_start(
self,
policy: "Policy",
*,
environment: BaseEnv = None,
episode: int = None,
tf_sess: Optional["tf.Session"] = None
):
# We have to delay the noise-adding step by one forward call.
# This is due to the fact that the optimizer does it's step right
# after the episode was reset (and hence the noise was already added!).
# We don't want to update into a noisy net.
self.episode_started = True
def _delayed_on_episode_start(self, explore, tf_sess):
# Sample fresh noise and add to weights.
if explore:
self._sample_new_noise_and_add(tf_sess=tf_sess, override=True)
# Only sample, don't apply anything to the weights.
else:
self._sample_new_noise(tf_sess=tf_sess)
self.episode_started = False
@override(Exploration)
def on_episode_end(self, policy, *, environment=None, episode=None, tf_sess=None):
# Remove stored noise from weights (only if currently noisy).
if self.weights_are_currently_noisy:
self._remove_noise(tf_sess=tf_sess)
@override(Exploration)
def postprocess_trajectory(
self,
policy: "Policy",
sample_batch: SampleBatch,
tf_sess: Optional["tf.Session"] = None,
):
noisy_action_dist = noise_free_action_dist = None
# Adjust the stddev depending on the action (pi)-distance.
# Also see [1] for details.
# TODO(sven): Find out whether this can be scrapped by simply using
# the `sample_batch` to get the noisy/noise-free action dist.
_, _, fetches = policy.compute_actions_from_input_dict(
input_dict=sample_batch, explore=self.weights_are_currently_noisy
)
# Categorical case (e.g. DQN).
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])
# Deterministic (Gaussian actions, e.g. DDPG).
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):
action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]
else:
raise NotImplementedError # TODO(sven): Other action-dist cases.
if self.weights_are_currently_noisy:
noisy_action_dist = action_dist
else:
noise_free_action_dist = action_dist
_, _, fetches = policy.compute_actions_from_input_dict(
input_dict=sample_batch, explore=not self.weights_are_currently_noisy
)
# Categorical case (e.g. DQN).
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])
# Deterministic (Gaussian actions, e.g. DDPG).
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):
action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]
if noisy_action_dist is None:
noisy_action_dist = action_dist
else:
noise_free_action_dist = action_dist
delta = distance = None
# Categorical case (e.g. DQN).
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
# Calculate KL-divergence (DKL(clean||noisy)) according to [2].
# TODO(sven): Allow KL-divergence to be calculated by our
# Distribution classes (don't support off-graph/numpy yet).
distance = np.nanmean(
np.sum(
noise_free_action_dist
* np.log(
noise_free_action_dist / (noisy_action_dist + SMALL_NUMBER)
),
1,
)
)
current_epsilon = self.sub_exploration.get_state(sess=tf_sess)[
"cur_epsilon"
]
delta = -np.log(1 - current_epsilon + current_epsilon / self.action_space.n)
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):
# Calculate MSE between noisy and non-noisy output (see [2]).
distance = np.sqrt(
np.mean(np.square(noise_free_action_dist - noisy_action_dist))
)
current_scale = self.sub_exploration.get_state(sess=tf_sess)["cur_scale"]
delta = getattr(self.sub_exploration, "ou_sigma", 0.2) * current_scale
# Adjust stddev according to the calculated action-distance.
if distance <= delta:
self.stddev_val *= 1.01
else:
self.stddev_val /= 1.01
# Update our state (self.stddev and self.stddev_val).
self.set_state(self.get_state(), sess=tf_sess)
return sample_batch
def _sample_new_noise(self, *, tf_sess=None):
"""Samples new noise and stores it in `self.noise`."""
if self.framework == "tf":
tf_sess.run(self.tf_sample_new_noise_op)
elif self.framework == "tf2":
self._tf_sample_new_noise_op()
else:
for i in range(len(self.noise)):
self.noise[i] = torch.normal(
mean=torch.zeros(self.noise[i].size()), std=self.stddev
).to(self.device)
def _tf_sample_new_noise_op(self):
added_noises = []
for noise in self.noise:
added_noises.append(
tf1.assign(
noise,
tf.random.normal(
shape=noise.shape, stddev=self.stddev, dtype=tf.float32
),
)
)
return tf.group(*added_noises)
def _sample_new_noise_and_add(self, *, tf_sess=None, override=False):
if self.framework == "tf":
if override and self.weights_are_currently_noisy:
tf_sess.run(self.tf_remove_noise_op)
tf_sess.run(self.tf_sample_new_noise_and_add_op)
else:
if override and self.weights_are_currently_noisy:
self._remove_noise()
self._sample_new_noise()
self._add_stored_noise()
self.weights_are_currently_noisy = True
def _add_stored_noise(self, *, tf_sess=None):
"""Adds the stored `self.noise` to the model's parameters.
Note: No new sampling of noise here.
Args:
tf_sess (Optional[tf.Session]): The tf-session to use to add the
stored noise to the (currently noise-free) weights.
override: If True, undo any currently applied noise first,
then add the currently stored noise.
"""
# Make sure we only add noise to currently noise-free weights.
assert self.weights_are_currently_noisy is False
# Add stored noise to the model's parameters.
if self.framework == "tf":
tf_sess.run(self.tf_add_stored_noise_op)
elif self.framework == "tf2":
self._tf_add_stored_noise_op()
else:
for var, noise in zip(self.model_variables, self.noise):
# Add noise to weights in-place.
var.requires_grad = False
var.add_(noise)
var.requires_grad = True
self.weights_are_currently_noisy = True
def _tf_add_stored_noise_op(self):
"""Generates tf-op that assigns the stored noise to weights.
Also used by tf-eager.
Returns:
tf.op: The tf op to apply the already stored noise to the NN.
"""
add_noise_ops = list()
for var, noise in zip(self.model_variables, self.noise):
add_noise_ops.append(tf1.assign_add(var, noise))
ret = tf.group(*tuple(add_noise_ops))
with tf1.control_dependencies([ret]):
return tf.no_op()
def _remove_noise(self, *, tf_sess=None):
"""
Removes the current action noise from the model parameters.
Args:
tf_sess (Optional[tf.Session]): The tf-session to use to remove
the noise from the (currently noisy) weights.
"""
# Make sure we only remove noise iff currently noisy.
assert self.weights_are_currently_noisy is True
# Removes the stored noise from the model's parameters.
if self.framework == "tf":
tf_sess.run(self.tf_remove_noise_op)
elif self.framework == "tf2":
self._tf_remove_noise_op()
else:
for var, noise in zip(self.model_variables, self.noise):
# Remove noise from weights in-place.
var.requires_grad = False
var.add_(-noise)
var.requires_grad = True
self.weights_are_currently_noisy = False
def _tf_remove_noise_op(self):
"""Generates a tf-op for removing noise from the model's weights.
Also used by tf-eager.
Returns:
tf.op: The tf op to remve the currently stored noise from the NN.
"""
remove_noise_ops = list()
for var, noise in zip(self.model_variables, self.noise):
remove_noise_ops.append(tf1.assign_add(var, -noise))
ret = tf.group(*tuple(remove_noise_ops))
with tf1.control_dependencies([ret]):
return tf.no_op()
@override(Exploration)
def get_state(self, sess=None):
|
@override(Exploration)
def set_state(self, state: dict, sess: Optional["tf.Session"] = None) -> None:
self.stddev_val = state["cur_stddev"]
# Set self.stddev to calculated value.
if self.framework == "tf":
self.stddev.load(self.stddev_val, session=sess)
elif isinstance(self.stddev, float):
self.stddev = self.stddev_val
else:
self.stddev.assign(self.stddev_val)
| return {"cur_stddev": self.stddev_val} | identifier_body |
parameter_noise.py | from gymnasium.spaces import Box, Discrete
import numpy as np
from typing import Optional, TYPE_CHECKING, Union
from ray.rllib.env.base_env import BaseEnv
from ray.rllib.models.action_dist import ActionDistribution
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.tf.tf_action_dist import Categorical, Deterministic
from ray.rllib.models.torch.torch_action_dist import (
TorchCategorical,
TorchDeterministic,
)
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.annotations import override, PublicAPI
from ray.rllib.utils.exploration.exploration import Exploration
from ray.rllib.utils.framework import get_variable, try_import_tf, try_import_torch
from ray.rllib.utils.from_config import from_config
from ray.rllib.utils.numpy import softmax, SMALL_NUMBER
from ray.rllib.utils.typing import TensorType
if TYPE_CHECKING:
from ray.rllib.policy.policy import Policy
tf1, tf, tfv = try_import_tf()
torch, _ = try_import_torch()
@PublicAPI
class ParameterNoise(Exploration):
"""An exploration that changes a Model's parameters.
Implemented based on:
[1] https://openai.com/research/better-exploration-with-parameter-noise
[2] https://arxiv.org/pdf/1706.01905.pdf
At the beginning of an episode, Gaussian noise is added to all weights
of the model. At the end of the episode, the noise is undone and an action
diff (pi-delta) is calculated, from which we determine the changes in the
noise's stddev for the next episode.
"""
def __init__(
self,
action_space,
*,
framework: str,
policy_config: dict,
model: ModelV2,
initial_stddev: float = 1.0,
random_timesteps: int = 10000,
sub_exploration: Optional[dict] = None,
**kwargs
):
"""Initializes a ParameterNoise Exploration object.
Args:
initial_stddev: The initial stddev to use for the noise.
random_timesteps: The number of timesteps to act completely
randomly (see [1]).
sub_exploration: Optional sub-exploration config.
None for auto-detection/setup.
"""
assert framework is not None
super().__init__(
action_space,
policy_config=policy_config,
model=model,
framework=framework,
**kwargs
)
self.stddev = get_variable(
initial_stddev, framework=self.framework, tf_name="stddev"
)
self.stddev_val = initial_stddev # Out-of-graph tf value holder.
# The weight variables of the Model where noise should be applied to.
# This excludes any variable, whose name contains "LayerNorm" (those
# are BatchNormalization layers, which should not be perturbed).
self.model_variables = [
v
for k, v in self.model.trainable_variables(as_dict=True).items()
if "LayerNorm" not in k
]
# Our noise to be added to the weights. Each item in `self.noise`
# corresponds to one Model variable and holding the Gaussian noise to
# be added to that variable (weight).
self.noise = []
for var in self.model_variables:
name_ = var.name.split(":")[0] + "_noisy" if var.name else ""
self.noise.append(
get_variable(
np.zeros(var.shape, dtype=np.float32),
framework=self.framework,
tf_name=name_,
torch_tensor=True,
device=self.device,
)
)
# tf-specific ops to sample, assign and remove noise.
if self.framework == "tf" and not tf.executing_eagerly():
self.tf_sample_new_noise_op = self._tf_sample_new_noise_op()
self.tf_add_stored_noise_op = self._tf_add_stored_noise_op()
self.tf_remove_noise_op = self._tf_remove_noise_op()
# Create convenience sample+add op for tf.
with tf1.control_dependencies([self.tf_sample_new_noise_op]):
add_op = self._tf_add_stored_noise_op()
with tf1.control_dependencies([add_op]):
self.tf_sample_new_noise_and_add_op = tf.no_op()
# Whether the Model's weights currently have noise added or not.
self.weights_are_currently_noisy = False
# Auto-detection of underlying exploration functionality.
if sub_exploration is None:
# For discrete action spaces, use an underlying EpsilonGreedy with
# a special schedule.
if isinstance(self.action_space, Discrete):
sub_exploration = {
"type": "EpsilonGreedy",
"epsilon_schedule": {
"type": "PiecewiseSchedule",
# Step function (see [2]).
"endpoints": [
(0, 1.0),
(random_timesteps + 1, 1.0),
(random_timesteps + 2, 0.01),
],
"outside_value": 0.01,
},
}
elif isinstance(self.action_space, Box):
sub_exploration = {
"type": "OrnsteinUhlenbeckNoise",
"random_timesteps": random_timesteps,
}
# TODO(sven): Implement for any action space.
else:
raise NotImplementedError
self.sub_exploration = from_config(
Exploration,
sub_exploration,
framework=self.framework,
action_space=self.action_space,
policy_config=self.policy_config,
model=self.model,
**kwargs
)
# Whether we need to call `self._delayed_on_episode_start` before
# the forward pass.
self.episode_started = False
@override(Exploration)
def before_compute_actions(
self,
*,
timestep: Optional[int] = None,
explore: Optional[bool] = None,
tf_sess: Optional["tf.Session"] = None
):
explore = explore if explore is not None else self.policy_config["explore"]
# Is this the first forward pass in the new episode? If yes, do the
# noise re-sampling and add to weights.
if self.episode_started:
self._delayed_on_episode_start(explore, tf_sess)
# Add noise if necessary.
if explore and not self.weights_are_currently_noisy:
self._add_stored_noise(tf_sess=tf_sess)
# Remove noise if necessary.
elif not explore and self.weights_are_currently_noisy:
self._remove_noise(tf_sess=tf_sess)
@override(Exploration)
def get_exploration_action(
self,
*,
action_distribution: ActionDistribution,
timestep: Union[TensorType, int],
explore: Union[TensorType, bool]
):
# Use our sub-exploration object to handle the final exploration
# action (depends on the algo-type/action-space/etc..).
return self.sub_exploration.get_exploration_action(
action_distribution=action_distribution, timestep=timestep, explore=explore
)
@override(Exploration)
def on_episode_start(
self,
policy: "Policy",
*,
environment: BaseEnv = None,
episode: int = None,
tf_sess: Optional["tf.Session"] = None
):
# We have to delay the noise-adding step by one forward call.
# This is due to the fact that the optimizer does it's step right
# after the episode was reset (and hence the noise was already added!).
# We don't want to update into a noisy net.
self.episode_started = True
def _delayed_on_episode_start(self, explore, tf_sess):
# Sample fresh noise and add to weights.
if explore:
self._sample_new_noise_and_add(tf_sess=tf_sess, override=True)
# Only sample, don't apply anything to the weights.
else:
self._sample_new_noise(tf_sess=tf_sess)
self.episode_started = False
@override(Exploration)
def on_episode_end(self, policy, *, environment=None, episode=None, tf_sess=None):
# Remove stored noise from weights (only if currently noisy).
if self.weights_are_currently_noisy:
self._remove_noise(tf_sess=tf_sess)
@override(Exploration)
def postprocess_trajectory(
self,
policy: "Policy",
sample_batch: SampleBatch,
tf_sess: Optional["tf.Session"] = None,
):
noisy_action_dist = noise_free_action_dist = None
# Adjust the stddev depending on the action (pi)-distance.
# Also see [1] for details.
# TODO(sven): Find out whether this can be scrapped by simply using
# the `sample_batch` to get the noisy/noise-free action dist.
_, _, fetches = policy.compute_actions_from_input_dict(
input_dict=sample_batch, explore=self.weights_are_currently_noisy
)
# Categorical case (e.g. DQN).
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])
# Deterministic (Gaussian actions, e.g. DDPG).
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):
action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]
else:
raise NotImplementedError # TODO(sven): Other action-dist cases.
if self.weights_are_currently_noisy:
noisy_action_dist = action_dist
else:
noise_free_action_dist = action_dist
_, _, fetches = policy.compute_actions_from_input_dict(
input_dict=sample_batch, explore=not self.weights_are_currently_noisy
)
# Categorical case (e.g. DQN).
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])
# Deterministic (Gaussian actions, e.g. DDPG).
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):
action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]
if noisy_action_dist is None:
noisy_action_dist = action_dist
else:
noise_free_action_dist = action_dist
delta = distance = None
# Categorical case (e.g. DQN).
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
# Calculate KL-divergence (DKL(clean||noisy)) according to [2].
# TODO(sven): Allow KL-divergence to be calculated by our
# Distribution classes (don't support off-graph/numpy yet).
distance = np.nanmean(
np.sum(
noise_free_action_dist
* np.log(
noise_free_action_dist / (noisy_action_dist + SMALL_NUMBER)
),
1,
)
)
current_epsilon = self.sub_exploration.get_state(sess=tf_sess)[
"cur_epsilon"
]
delta = -np.log(1 - current_epsilon + current_epsilon / self.action_space.n)
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):
# Calculate MSE between noisy and non-noisy output (see [2]).
distance = np.sqrt(
np.mean(np.square(noise_free_action_dist - noisy_action_dist))
)
current_scale = self.sub_exploration.get_state(sess=tf_sess)["cur_scale"]
delta = getattr(self.sub_exploration, "ou_sigma", 0.2) * current_scale
# Adjust stddev according to the calculated action-distance.
if distance <= delta:
self.stddev_val *= 1.01
else:
self.stddev_val /= 1.01
# Update our state (self.stddev and self.stddev_val).
self.set_state(self.get_state(), sess=tf_sess)
return sample_batch
def _sample_new_noise(self, *, tf_sess=None):
"""Samples new noise and stores it in `self.noise`."""
if self.framework == "tf":
tf_sess.run(self.tf_sample_new_noise_op)
elif self.framework == "tf2":
self._tf_sample_new_noise_op()
else:
for i in range(len(self.noise)):
self.noise[i] = torch.normal(
mean=torch.zeros(self.noise[i].size()), std=self.stddev
).to(self.device)
def _tf_sample_new_noise_op(self):
added_noises = []
for noise in self.noise:
added_noises.append(
tf1.assign(
noise,
tf.random.normal(
shape=noise.shape, stddev=self.stddev, dtype=tf.float32
),
)
)
return tf.group(*added_noises)
def _sample_new_noise_and_add(self, *, tf_sess=None, override=False):
if self.framework == "tf":
if override and self.weights_are_currently_noisy:
tf_sess.run(self.tf_remove_noise_op)
tf_sess.run(self.tf_sample_new_noise_and_add_op)
else:
if override and self.weights_are_currently_noisy:
self._remove_noise()
self._sample_new_noise()
self._add_stored_noise()
self.weights_are_currently_noisy = True
def | (self, *, tf_sess=None):
"""Adds the stored `self.noise` to the model's parameters.
Note: No new sampling of noise here.
Args:
tf_sess (Optional[tf.Session]): The tf-session to use to add the
stored noise to the (currently noise-free) weights.
override: If True, undo any currently applied noise first,
then add the currently stored noise.
"""
# Make sure we only add noise to currently noise-free weights.
assert self.weights_are_currently_noisy is False
# Add stored noise to the model's parameters.
if self.framework == "tf":
tf_sess.run(self.tf_add_stored_noise_op)
elif self.framework == "tf2":
self._tf_add_stored_noise_op()
else:
for var, noise in zip(self.model_variables, self.noise):
# Add noise to weights in-place.
var.requires_grad = False
var.add_(noise)
var.requires_grad = True
self.weights_are_currently_noisy = True
def _tf_add_stored_noise_op(self):
"""Generates tf-op that assigns the stored noise to weights.
Also used by tf-eager.
Returns:
tf.op: The tf op to apply the already stored noise to the NN.
"""
add_noise_ops = list()
for var, noise in zip(self.model_variables, self.noise):
add_noise_ops.append(tf1.assign_add(var, noise))
ret = tf.group(*tuple(add_noise_ops))
with tf1.control_dependencies([ret]):
return tf.no_op()
def _remove_noise(self, *, tf_sess=None):
"""
Removes the current action noise from the model parameters.
Args:
tf_sess (Optional[tf.Session]): The tf-session to use to remove
the noise from the (currently noisy) weights.
"""
# Make sure we only remove noise iff currently noisy.
assert self.weights_are_currently_noisy is True
# Removes the stored noise from the model's parameters.
if self.framework == "tf":
tf_sess.run(self.tf_remove_noise_op)
elif self.framework == "tf2":
self._tf_remove_noise_op()
else:
for var, noise in zip(self.model_variables, self.noise):
# Remove noise from weights in-place.
var.requires_grad = False
var.add_(-noise)
var.requires_grad = True
self.weights_are_currently_noisy = False
def _tf_remove_noise_op(self):
"""Generates a tf-op for removing noise from the model's weights.
Also used by tf-eager.
Returns:
tf.op: The tf op to remve the currently stored noise from the NN.
"""
remove_noise_ops = list()
for var, noise in zip(self.model_variables, self.noise):
remove_noise_ops.append(tf1.assign_add(var, -noise))
ret = tf.group(*tuple(remove_noise_ops))
with tf1.control_dependencies([ret]):
return tf.no_op()
@override(Exploration)
def get_state(self, sess=None):
return {"cur_stddev": self.stddev_val}
@override(Exploration)
def set_state(self, state: dict, sess: Optional["tf.Session"] = None) -> None:
self.stddev_val = state["cur_stddev"]
# Set self.stddev to calculated value.
if self.framework == "tf":
self.stddev.load(self.stddev_val, session=sess)
elif isinstance(self.stddev, float):
self.stddev = self.stddev_val
else:
self.stddev.assign(self.stddev_val)
| _add_stored_noise | identifier_name |
parameter_noise.py | from gymnasium.spaces import Box, Discrete
import numpy as np
from typing import Optional, TYPE_CHECKING, Union
from ray.rllib.env.base_env import BaseEnv
from ray.rllib.models.action_dist import ActionDistribution
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.tf.tf_action_dist import Categorical, Deterministic
from ray.rllib.models.torch.torch_action_dist import (
TorchCategorical,
TorchDeterministic,
)
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.annotations import override, PublicAPI
from ray.rllib.utils.exploration.exploration import Exploration
from ray.rllib.utils.framework import get_variable, try_import_tf, try_import_torch
from ray.rllib.utils.from_config import from_config
from ray.rllib.utils.numpy import softmax, SMALL_NUMBER
from ray.rllib.utils.typing import TensorType
if TYPE_CHECKING:
from ray.rllib.policy.policy import Policy
tf1, tf, tfv = try_import_tf()
torch, _ = try_import_torch()
@PublicAPI
class ParameterNoise(Exploration):
"""An exploration that changes a Model's parameters.
Implemented based on:
[1] https://openai.com/research/better-exploration-with-parameter-noise
[2] https://arxiv.org/pdf/1706.01905.pdf
At the beginning of an episode, Gaussian noise is added to all weights
of the model. At the end of the episode, the noise is undone and an action
diff (pi-delta) is calculated, from which we determine the changes in the
noise's stddev for the next episode.
"""
def __init__(
self,
action_space,
*,
framework: str,
policy_config: dict,
model: ModelV2,
initial_stddev: float = 1.0,
random_timesteps: int = 10000,
sub_exploration: Optional[dict] = None,
**kwargs
):
"""Initializes a ParameterNoise Exploration object.
Args:
initial_stddev: The initial stddev to use for the noise.
random_timesteps: The number of timesteps to act completely
randomly (see [1]).
sub_exploration: Optional sub-exploration config.
None for auto-detection/setup.
"""
assert framework is not None
super().__init__(
action_space,
policy_config=policy_config,
model=model,
framework=framework,
**kwargs
)
self.stddev = get_variable(
initial_stddev, framework=self.framework, tf_name="stddev"
)
self.stddev_val = initial_stddev # Out-of-graph tf value holder.
# The weight variables of the Model where noise should be applied to.
# This excludes any variable, whose name contains "LayerNorm" (those
# are BatchNormalization layers, which should not be perturbed).
self.model_variables = [
v
for k, v in self.model.trainable_variables(as_dict=True).items()
if "LayerNorm" not in k
]
# Our noise to be added to the weights. Each item in `self.noise`
# corresponds to one Model variable and holding the Gaussian noise to
# be added to that variable (weight).
self.noise = []
for var in self.model_variables:
name_ = var.name.split(":")[0] + "_noisy" if var.name else ""
self.noise.append(
get_variable(
np.zeros(var.shape, dtype=np.float32),
framework=self.framework,
tf_name=name_,
torch_tensor=True,
device=self.device,
)
)
# tf-specific ops to sample, assign and remove noise.
if self.framework == "tf" and not tf.executing_eagerly():
self.tf_sample_new_noise_op = self._tf_sample_new_noise_op()
self.tf_add_stored_noise_op = self._tf_add_stored_noise_op()
self.tf_remove_noise_op = self._tf_remove_noise_op()
# Create convenience sample+add op for tf.
with tf1.control_dependencies([self.tf_sample_new_noise_op]):
add_op = self._tf_add_stored_noise_op()
with tf1.control_dependencies([add_op]):
self.tf_sample_new_noise_and_add_op = tf.no_op()
# Whether the Model's weights currently have noise added or not.
self.weights_are_currently_noisy = False
# Auto-detection of underlying exploration functionality.
if sub_exploration is None:
# For discrete action spaces, use an underlying EpsilonGreedy with
# a special schedule.
if isinstance(self.action_space, Discrete):
|
elif isinstance(self.action_space, Box):
sub_exploration = {
"type": "OrnsteinUhlenbeckNoise",
"random_timesteps": random_timesteps,
}
# TODO(sven): Implement for any action space.
else:
raise NotImplementedError
self.sub_exploration = from_config(
Exploration,
sub_exploration,
framework=self.framework,
action_space=self.action_space,
policy_config=self.policy_config,
model=self.model,
**kwargs
)
# Whether we need to call `self._delayed_on_episode_start` before
# the forward pass.
self.episode_started = False
@override(Exploration)
def before_compute_actions(
self,
*,
timestep: Optional[int] = None,
explore: Optional[bool] = None,
tf_sess: Optional["tf.Session"] = None
):
explore = explore if explore is not None else self.policy_config["explore"]
# Is this the first forward pass in the new episode? If yes, do the
# noise re-sampling and add to weights.
if self.episode_started:
self._delayed_on_episode_start(explore, tf_sess)
# Add noise if necessary.
if explore and not self.weights_are_currently_noisy:
self._add_stored_noise(tf_sess=tf_sess)
# Remove noise if necessary.
elif not explore and self.weights_are_currently_noisy:
self._remove_noise(tf_sess=tf_sess)
@override(Exploration)
def get_exploration_action(
self,
*,
action_distribution: ActionDistribution,
timestep: Union[TensorType, int],
explore: Union[TensorType, bool]
):
# Use our sub-exploration object to handle the final exploration
# action (depends on the algo-type/action-space/etc..).
return self.sub_exploration.get_exploration_action(
action_distribution=action_distribution, timestep=timestep, explore=explore
)
@override(Exploration)
def on_episode_start(
self,
policy: "Policy",
*,
environment: BaseEnv = None,
episode: int = None,
tf_sess: Optional["tf.Session"] = None
):
# We have to delay the noise-adding step by one forward call.
# This is due to the fact that the optimizer does it's step right
# after the episode was reset (and hence the noise was already added!).
# We don't want to update into a noisy net.
self.episode_started = True
def _delayed_on_episode_start(self, explore, tf_sess):
# Sample fresh noise and add to weights.
if explore:
self._sample_new_noise_and_add(tf_sess=tf_sess, override=True)
# Only sample, don't apply anything to the weights.
else:
self._sample_new_noise(tf_sess=tf_sess)
self.episode_started = False
@override(Exploration)
def on_episode_end(self, policy, *, environment=None, episode=None, tf_sess=None):
# Remove stored noise from weights (only if currently noisy).
if self.weights_are_currently_noisy:
self._remove_noise(tf_sess=tf_sess)
@override(Exploration)
def postprocess_trajectory(
self,
policy: "Policy",
sample_batch: SampleBatch,
tf_sess: Optional["tf.Session"] = None,
):
noisy_action_dist = noise_free_action_dist = None
# Adjust the stddev depending on the action (pi)-distance.
# Also see [1] for details.
# TODO(sven): Find out whether this can be scrapped by simply using
# the `sample_batch` to get the noisy/noise-free action dist.
_, _, fetches = policy.compute_actions_from_input_dict(
input_dict=sample_batch, explore=self.weights_are_currently_noisy
)
# Categorical case (e.g. DQN).
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])
# Deterministic (Gaussian actions, e.g. DDPG).
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):
action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]
else:
raise NotImplementedError # TODO(sven): Other action-dist cases.
if self.weights_are_currently_noisy:
noisy_action_dist = action_dist
else:
noise_free_action_dist = action_dist
_, _, fetches = policy.compute_actions_from_input_dict(
input_dict=sample_batch, explore=not self.weights_are_currently_noisy
)
# Categorical case (e.g. DQN).
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])
# Deterministic (Gaussian actions, e.g. DDPG).
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):
action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]
if noisy_action_dist is None:
noisy_action_dist = action_dist
else:
noise_free_action_dist = action_dist
delta = distance = None
# Categorical case (e.g. DQN).
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
# Calculate KL-divergence (DKL(clean||noisy)) according to [2].
# TODO(sven): Allow KL-divergence to be calculated by our
# Distribution classes (don't support off-graph/numpy yet).
distance = np.nanmean(
np.sum(
noise_free_action_dist
* np.log(
noise_free_action_dist / (noisy_action_dist + SMALL_NUMBER)
),
1,
)
)
current_epsilon = self.sub_exploration.get_state(sess=tf_sess)[
"cur_epsilon"
]
delta = -np.log(1 - current_epsilon + current_epsilon / self.action_space.n)
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):
# Calculate MSE between noisy and non-noisy output (see [2]).
distance = np.sqrt(
np.mean(np.square(noise_free_action_dist - noisy_action_dist))
)
current_scale = self.sub_exploration.get_state(sess=tf_sess)["cur_scale"]
delta = getattr(self.sub_exploration, "ou_sigma", 0.2) * current_scale
# Adjust stddev according to the calculated action-distance.
if distance <= delta:
self.stddev_val *= 1.01
else:
self.stddev_val /= 1.01
# Update our state (self.stddev and self.stddev_val).
self.set_state(self.get_state(), sess=tf_sess)
return sample_batch
def _sample_new_noise(self, *, tf_sess=None):
"""Samples new noise and stores it in `self.noise`."""
if self.framework == "tf":
tf_sess.run(self.tf_sample_new_noise_op)
elif self.framework == "tf2":
self._tf_sample_new_noise_op()
else:
for i in range(len(self.noise)):
self.noise[i] = torch.normal(
mean=torch.zeros(self.noise[i].size()), std=self.stddev
).to(self.device)
def _tf_sample_new_noise_op(self):
added_noises = []
for noise in self.noise:
added_noises.append(
tf1.assign(
noise,
tf.random.normal(
shape=noise.shape, stddev=self.stddev, dtype=tf.float32
),
)
)
return tf.group(*added_noises)
def _sample_new_noise_and_add(self, *, tf_sess=None, override=False):
if self.framework == "tf":
if override and self.weights_are_currently_noisy:
tf_sess.run(self.tf_remove_noise_op)
tf_sess.run(self.tf_sample_new_noise_and_add_op)
else:
if override and self.weights_are_currently_noisy:
self._remove_noise()
self._sample_new_noise()
self._add_stored_noise()
self.weights_are_currently_noisy = True
def _add_stored_noise(self, *, tf_sess=None):
"""Adds the stored `self.noise` to the model's parameters.
Note: No new sampling of noise here.
Args:
tf_sess (Optional[tf.Session]): The tf-session to use to add the
stored noise to the (currently noise-free) weights.
override: If True, undo any currently applied noise first,
then add the currently stored noise.
"""
# Make sure we only add noise to currently noise-free weights.
assert self.weights_are_currently_noisy is False
# Add stored noise to the model's parameters.
if self.framework == "tf":
tf_sess.run(self.tf_add_stored_noise_op)
elif self.framework == "tf2":
self._tf_add_stored_noise_op()
else:
for var, noise in zip(self.model_variables, self.noise):
# Add noise to weights in-place.
var.requires_grad = False
var.add_(noise)
var.requires_grad = True
self.weights_are_currently_noisy = True
def _tf_add_stored_noise_op(self):
"""Generates tf-op that assigns the stored noise to weights.
Also used by tf-eager.
Returns:
tf.op: The tf op to apply the already stored noise to the NN.
"""
add_noise_ops = list()
for var, noise in zip(self.model_variables, self.noise):
add_noise_ops.append(tf1.assign_add(var, noise))
ret = tf.group(*tuple(add_noise_ops))
with tf1.control_dependencies([ret]):
return tf.no_op()
def _remove_noise(self, *, tf_sess=None):
"""
Removes the current action noise from the model parameters.
Args:
tf_sess (Optional[tf.Session]): The tf-session to use to remove
the noise from the (currently noisy) weights.
"""
# Make sure we only remove noise iff currently noisy.
assert self.weights_are_currently_noisy is True
# Removes the stored noise from the model's parameters.
if self.framework == "tf":
tf_sess.run(self.tf_remove_noise_op)
elif self.framework == "tf2":
self._tf_remove_noise_op()
else:
for var, noise in zip(self.model_variables, self.noise):
# Remove noise from weights in-place.
var.requires_grad = False
var.add_(-noise)
var.requires_grad = True
self.weights_are_currently_noisy = False
def _tf_remove_noise_op(self):
"""Generates a tf-op for removing noise from the model's weights.
Also used by tf-eager.
Returns:
tf.op: The tf op to remve the currently stored noise from the NN.
"""
remove_noise_ops = list()
for var, noise in zip(self.model_variables, self.noise):
remove_noise_ops.append(tf1.assign_add(var, -noise))
ret = tf.group(*tuple(remove_noise_ops))
with tf1.control_dependencies([ret]):
return tf.no_op()
@override(Exploration)
def get_state(self, sess=None):
return {"cur_stddev": self.stddev_val}
@override(Exploration)
def set_state(self, state: dict, sess: Optional["tf.Session"] = None) -> None:
self.stddev_val = state["cur_stddev"]
# Set self.stddev to calculated value.
if self.framework == "tf":
self.stddev.load(self.stddev_val, session=sess)
elif isinstance(self.stddev, float):
self.stddev = self.stddev_val
else:
self.stddev.assign(self.stddev_val)
| sub_exploration = {
"type": "EpsilonGreedy",
"epsilon_schedule": {
"type": "PiecewiseSchedule",
# Step function (see [2]).
"endpoints": [
(0, 1.0),
(random_timesteps + 1, 1.0),
(random_timesteps + 2, 0.01),
],
"outside_value": 0.01,
},
} | conditional_block |
parameter_noise.py | from gymnasium.spaces import Box, Discrete
import numpy as np
from typing import Optional, TYPE_CHECKING, Union
from ray.rllib.env.base_env import BaseEnv
from ray.rllib.models.action_dist import ActionDistribution
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.tf.tf_action_dist import Categorical, Deterministic
from ray.rllib.models.torch.torch_action_dist import (
TorchCategorical,
TorchDeterministic,
)
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.annotations import override, PublicAPI
from ray.rllib.utils.exploration.exploration import Exploration
from ray.rllib.utils.framework import get_variable, try_import_tf, try_import_torch
from ray.rllib.utils.from_config import from_config
from ray.rllib.utils.numpy import softmax, SMALL_NUMBER
from ray.rllib.utils.typing import TensorType
if TYPE_CHECKING:
from ray.rllib.policy.policy import Policy
tf1, tf, tfv = try_import_tf()
torch, _ = try_import_torch()
@PublicAPI
class ParameterNoise(Exploration):
"""An exploration that changes a Model's parameters.
Implemented based on:
[1] https://openai.com/research/better-exploration-with-parameter-noise
[2] https://arxiv.org/pdf/1706.01905.pdf
At the beginning of an episode, Gaussian noise is added to all weights
of the model. At the end of the episode, the noise is undone and an action
diff (pi-delta) is calculated, from which we determine the changes in the
noise's stddev for the next episode.
"""
def __init__(
self,
action_space,
*,
framework: str,
policy_config: dict,
model: ModelV2,
initial_stddev: float = 1.0,
random_timesteps: int = 10000,
sub_exploration: Optional[dict] = None,
**kwargs
):
"""Initializes a ParameterNoise Exploration object.
Args:
initial_stddev: The initial stddev to use for the noise.
random_timesteps: The number of timesteps to act completely
randomly (see [1]).
sub_exploration: Optional sub-exploration config.
None for auto-detection/setup.
"""
assert framework is not None
super().__init__(
action_space,
policy_config=policy_config,
model=model,
framework=framework,
**kwargs
)
self.stddev = get_variable(
initial_stddev, framework=self.framework, tf_name="stddev"
)
self.stddev_val = initial_stddev # Out-of-graph tf value holder.
# The weight variables of the Model where noise should be applied to.
# This excludes any variable, whose name contains "LayerNorm" (those
# are BatchNormalization layers, which should not be perturbed).
self.model_variables = [
v
for k, v in self.model.trainable_variables(as_dict=True).items()
if "LayerNorm" not in k
]
# Our noise to be added to the weights. Each item in `self.noise`
# corresponds to one Model variable and holding the Gaussian noise to
# be added to that variable (weight).
self.noise = []
for var in self.model_variables:
name_ = var.name.split(":")[0] + "_noisy" if var.name else ""
self.noise.append(
get_variable(
np.zeros(var.shape, dtype=np.float32),
framework=self.framework,
tf_name=name_,
torch_tensor=True,
device=self.device,
)
)
# tf-specific ops to sample, assign and remove noise.
if self.framework == "tf" and not tf.executing_eagerly():
self.tf_sample_new_noise_op = self._tf_sample_new_noise_op()
self.tf_add_stored_noise_op = self._tf_add_stored_noise_op()
self.tf_remove_noise_op = self._tf_remove_noise_op()
# Create convenience sample+add op for tf.
with tf1.control_dependencies([self.tf_sample_new_noise_op]):
add_op = self._tf_add_stored_noise_op()
with tf1.control_dependencies([add_op]):
self.tf_sample_new_noise_and_add_op = tf.no_op()
# Whether the Model's weights currently have noise added or not.
self.weights_are_currently_noisy = False
# Auto-detection of underlying exploration functionality.
if sub_exploration is None:
# For discrete action spaces, use an underlying EpsilonGreedy with | # a special schedule.
if isinstance(self.action_space, Discrete):
sub_exploration = {
"type": "EpsilonGreedy",
"epsilon_schedule": {
"type": "PiecewiseSchedule",
# Step function (see [2]).
"endpoints": [
(0, 1.0),
(random_timesteps + 1, 1.0),
(random_timesteps + 2, 0.01),
],
"outside_value": 0.01,
},
}
elif isinstance(self.action_space, Box):
sub_exploration = {
"type": "OrnsteinUhlenbeckNoise",
"random_timesteps": random_timesteps,
}
# TODO(sven): Implement for any action space.
else:
raise NotImplementedError
self.sub_exploration = from_config(
Exploration,
sub_exploration,
framework=self.framework,
action_space=self.action_space,
policy_config=self.policy_config,
model=self.model,
**kwargs
)
# Whether we need to call `self._delayed_on_episode_start` before
# the forward pass.
self.episode_started = False
@override(Exploration)
def before_compute_actions(
self,
*,
timestep: Optional[int] = None,
explore: Optional[bool] = None,
tf_sess: Optional["tf.Session"] = None
):
explore = explore if explore is not None else self.policy_config["explore"]
# Is this the first forward pass in the new episode? If yes, do the
# noise re-sampling and add to weights.
if self.episode_started:
self._delayed_on_episode_start(explore, tf_sess)
# Add noise if necessary.
if explore and not self.weights_are_currently_noisy:
self._add_stored_noise(tf_sess=tf_sess)
# Remove noise if necessary.
elif not explore and self.weights_are_currently_noisy:
self._remove_noise(tf_sess=tf_sess)
@override(Exploration)
def get_exploration_action(
self,
*,
action_distribution: ActionDistribution,
timestep: Union[TensorType, int],
explore: Union[TensorType, bool]
):
# Use our sub-exploration object to handle the final exploration
# action (depends on the algo-type/action-space/etc..).
return self.sub_exploration.get_exploration_action(
action_distribution=action_distribution, timestep=timestep, explore=explore
)
@override(Exploration)
def on_episode_start(
self,
policy: "Policy",
*,
environment: BaseEnv = None,
episode: int = None,
tf_sess: Optional["tf.Session"] = None
):
# We have to delay the noise-adding step by one forward call.
# This is due to the fact that the optimizer does it's step right
# after the episode was reset (and hence the noise was already added!).
# We don't want to update into a noisy net.
self.episode_started = True
def _delayed_on_episode_start(self, explore, tf_sess):
# Sample fresh noise and add to weights.
if explore:
self._sample_new_noise_and_add(tf_sess=tf_sess, override=True)
# Only sample, don't apply anything to the weights.
else:
self._sample_new_noise(tf_sess=tf_sess)
self.episode_started = False
@override(Exploration)
def on_episode_end(self, policy, *, environment=None, episode=None, tf_sess=None):
# Remove stored noise from weights (only if currently noisy).
if self.weights_are_currently_noisy:
self._remove_noise(tf_sess=tf_sess)
@override(Exploration)
def postprocess_trajectory(
self,
policy: "Policy",
sample_batch: SampleBatch,
tf_sess: Optional["tf.Session"] = None,
):
noisy_action_dist = noise_free_action_dist = None
# Adjust the stddev depending on the action (pi)-distance.
# Also see [1] for details.
# TODO(sven): Find out whether this can be scrapped by simply using
# the `sample_batch` to get the noisy/noise-free action dist.
_, _, fetches = policy.compute_actions_from_input_dict(
input_dict=sample_batch, explore=self.weights_are_currently_noisy
)
# Categorical case (e.g. DQN).
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])
# Deterministic (Gaussian actions, e.g. DDPG).
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):
action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]
else:
raise NotImplementedError # TODO(sven): Other action-dist cases.
if self.weights_are_currently_noisy:
noisy_action_dist = action_dist
else:
noise_free_action_dist = action_dist
_, _, fetches = policy.compute_actions_from_input_dict(
input_dict=sample_batch, explore=not self.weights_are_currently_noisy
)
# Categorical case (e.g. DQN).
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
action_dist = softmax(fetches[SampleBatch.ACTION_DIST_INPUTS])
# Deterministic (Gaussian actions, e.g. DDPG).
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):
action_dist = fetches[SampleBatch.ACTION_DIST_INPUTS]
if noisy_action_dist is None:
noisy_action_dist = action_dist
else:
noise_free_action_dist = action_dist
delta = distance = None
# Categorical case (e.g. DQN).
if issubclass(policy.dist_class, (Categorical, TorchCategorical)):
# Calculate KL-divergence (DKL(clean||noisy)) according to [2].
# TODO(sven): Allow KL-divergence to be calculated by our
# Distribution classes (don't support off-graph/numpy yet).
distance = np.nanmean(
np.sum(
noise_free_action_dist
* np.log(
noise_free_action_dist / (noisy_action_dist + SMALL_NUMBER)
),
1,
)
)
current_epsilon = self.sub_exploration.get_state(sess=tf_sess)[
"cur_epsilon"
]
delta = -np.log(1 - current_epsilon + current_epsilon / self.action_space.n)
elif issubclass(policy.dist_class, (Deterministic, TorchDeterministic)):
# Calculate MSE between noisy and non-noisy output (see [2]).
distance = np.sqrt(
np.mean(np.square(noise_free_action_dist - noisy_action_dist))
)
current_scale = self.sub_exploration.get_state(sess=tf_sess)["cur_scale"]
delta = getattr(self.sub_exploration, "ou_sigma", 0.2) * current_scale
# Adjust stddev according to the calculated action-distance.
if distance <= delta:
self.stddev_val *= 1.01
else:
self.stddev_val /= 1.01
# Update our state (self.stddev and self.stddev_val).
self.set_state(self.get_state(), sess=tf_sess)
return sample_batch
def _sample_new_noise(self, *, tf_sess=None):
"""Samples new noise and stores it in `self.noise`."""
if self.framework == "tf":
tf_sess.run(self.tf_sample_new_noise_op)
elif self.framework == "tf2":
self._tf_sample_new_noise_op()
else:
for i in range(len(self.noise)):
self.noise[i] = torch.normal(
mean=torch.zeros(self.noise[i].size()), std=self.stddev
).to(self.device)
def _tf_sample_new_noise_op(self):
added_noises = []
for noise in self.noise:
added_noises.append(
tf1.assign(
noise,
tf.random.normal(
shape=noise.shape, stddev=self.stddev, dtype=tf.float32
),
)
)
return tf.group(*added_noises)
def _sample_new_noise_and_add(self, *, tf_sess=None, override=False):
if self.framework == "tf":
if override and self.weights_are_currently_noisy:
tf_sess.run(self.tf_remove_noise_op)
tf_sess.run(self.tf_sample_new_noise_and_add_op)
else:
if override and self.weights_are_currently_noisy:
self._remove_noise()
self._sample_new_noise()
self._add_stored_noise()
self.weights_are_currently_noisy = True
def _add_stored_noise(self, *, tf_sess=None):
"""Adds the stored `self.noise` to the model's parameters.
Note: No new sampling of noise here.
Args:
tf_sess (Optional[tf.Session]): The tf-session to use to add the
stored noise to the (currently noise-free) weights.
override: If True, undo any currently applied noise first,
then add the currently stored noise.
"""
# Make sure we only add noise to currently noise-free weights.
assert self.weights_are_currently_noisy is False
# Add stored noise to the model's parameters.
if self.framework == "tf":
tf_sess.run(self.tf_add_stored_noise_op)
elif self.framework == "tf2":
self._tf_add_stored_noise_op()
else:
for var, noise in zip(self.model_variables, self.noise):
# Add noise to weights in-place.
var.requires_grad = False
var.add_(noise)
var.requires_grad = True
self.weights_are_currently_noisy = True
def _tf_add_stored_noise_op(self):
"""Generates tf-op that assigns the stored noise to weights.
Also used by tf-eager.
Returns:
tf.op: The tf op to apply the already stored noise to the NN.
"""
add_noise_ops = list()
for var, noise in zip(self.model_variables, self.noise):
add_noise_ops.append(tf1.assign_add(var, noise))
ret = tf.group(*tuple(add_noise_ops))
with tf1.control_dependencies([ret]):
return tf.no_op()
def _remove_noise(self, *, tf_sess=None):
"""
Removes the current action noise from the model parameters.
Args:
tf_sess (Optional[tf.Session]): The tf-session to use to remove
the noise from the (currently noisy) weights.
"""
# Make sure we only remove noise iff currently noisy.
assert self.weights_are_currently_noisy is True
# Removes the stored noise from the model's parameters.
if self.framework == "tf":
tf_sess.run(self.tf_remove_noise_op)
elif self.framework == "tf2":
self._tf_remove_noise_op()
else:
for var, noise in zip(self.model_variables, self.noise):
# Remove noise from weights in-place.
var.requires_grad = False
var.add_(-noise)
var.requires_grad = True
self.weights_are_currently_noisy = False
def _tf_remove_noise_op(self):
"""Generates a tf-op for removing noise from the model's weights.
Also used by tf-eager.
Returns:
tf.op: The tf op to remve the currently stored noise from the NN.
"""
remove_noise_ops = list()
for var, noise in zip(self.model_variables, self.noise):
remove_noise_ops.append(tf1.assign_add(var, -noise))
ret = tf.group(*tuple(remove_noise_ops))
with tf1.control_dependencies([ret]):
return tf.no_op()
@override(Exploration)
def get_state(self, sess=None):
return {"cur_stddev": self.stddev_val}
@override(Exploration)
def set_state(self, state: dict, sess: Optional["tf.Session"] = None) -> None:
self.stddev_val = state["cur_stddev"]
# Set self.stddev to calculated value.
if self.framework == "tf":
self.stddev.load(self.stddev_val, session=sess)
elif isinstance(self.stddev, float):
self.stddev = self.stddev_val
else:
self.stddev.assign(self.stddev_val) | random_line_split | |
hazard2.py | from nmigen import *
from nmigen.asserts import *
from enum import IntEnum
XLEN = 32
class ALUOp(IntEnum):
ADD = 0x0
SUB = 0x1
LT = 0x2
LTU = 0x4
AND = 0x6
OR = 0x7
XOR = 0x8
SRL = 0x9
SRA = 0xa
SLL = 0xb
class RVOpc(IntEnum):
LOAD = 0b00_000
MISC_MEM = 0b00_011
OP_IMM = 0b00_100
AUIPC = 0b00_101
STORE = 0b01_000
OP = 0b01_100
LUI = 0b01_101
BRANCH = 0b11_000
JALR = 0b11_001
JAL = 0b11_011
SYSTEM = 0b11_100
def imm_i(instr):
return Cat(instr[20:], Repl(instr[-1], 20))
def imm_s(instr):
return Cat(instr[7:12], instr[25:], Repl(instr[-1], 20))
def imm_b(instr):
return Cat(C(0, 1), instr[8:12], instr[25:31], instr[7], Repl(instr[-1], 20))
def | (instr):
return Cat(C(0, 12), instr[12:])
def imm_j(instr):
return Cat(C(0, 1), instr[21:31], instr[20], instr[12:20], Repl(instr[-1], 12))
class Hazard2Shifter(Elaboratable):
def __init__(self):
self.i = Signal(XLEN)
self.shamt = Signal(range(XLEN))
self.right = Signal()
self.arith = Signal()
self.o = Signal(XLEN)
def elaborate(self, platform):
m = Module()
accum = Signal(XLEN, name="shift_pre_reverse")
m.d.comb += accum.eq(Mux(self.right, self.i, self.i[::-1]))
for i in range(self.shamt.width):
accum_next = Signal(XLEN, name=f"shift_accum{i}")
m.d.comb += accum_next.eq(Mux(self.shamt[i],
Cat(accum[1 << i:], Repl(accum[-1] & self.arith, 1 << i)),
accum
))
accum = accum_next
m.d.comb += self.o.eq(Mux(self.right, accum, accum[::-1]))
return m
class Hazard2ALU(Elaboratable):
def __init__(self):
self.i0 = Signal(XLEN)
self.i1 = Signal(XLEN)
self.op = Signal(Shape.cast(ALUOp))
self.take4 = Signal()
self.cmp = Signal()
self.o = Signal(XLEN)
def elaborate(self, platform):
m = Module()
m.submodules.shifter = shifter = Hazard2Shifter()
# Add/subtract i0 and i1, then subtract 4 if take4 is true. Use of 3-input adder
# encourages tools to implement as carry-save.
adder = sum((
self.i0,
self.i1 ^ Repl(self.op != ALUOp.ADD, XLEN),
Cat(self.op != ALUOp.ADD, C(0, 1), Repl(self.take4, XLEN - 2))
))[:XLEN]
less_than = Mux(self.i0[-1] == self.i1[-1], adder[-1],
Mux(self.op == ALUOp.LTU, self.i1[-1], self.i0[-1])
)
m.d.comb += self.cmp.eq(Mux(self.op == ALUOp.SUB, self.i0 == self.i1, less_than))
# Bitwise ops can be implemented as a single rank of LUT4s. Try to encourage this.
bitwise = Signal(XLEN)
with m.Switch(self.op[0:2]):
with m.Case(ALUOp.AND & 0x3):
m.d.comb += bitwise.eq(self.i0 & self.i1)
with m.Case(ALUOp.OR & 0x3):
m.d.comb += bitwise.eq(self.i0 | self.i1)
with m.Case():
m.d.comb += bitwise.eq(self.i0 ^ self.i1)
m.d.comb += [
shifter.i.eq(self.i0),
shifter.shamt.eq(self.i1),
shifter.right.eq(self.op != ALUOp.SLL),
shifter.arith.eq(self.op == ALUOp.SRA)
]
with m.Switch(self.op):
with m.Case(ALUOp.ADD):
m.d.comb += self.o.eq(adder)
with m.Case(ALUOp.SUB):
m.d.comb += self.o.eq(adder)
with m.Case(ALUOp.LT):
m.d.comb += self.o.eq(less_than)
with m.Case(ALUOp.LTU):
m.d.comb += self.o.eq(less_than)
with m.Case(ALUOp.SRL):
m.d.comb += self.o.eq(shifter.o)
with m.Case(ALUOp.SRA):
m.d.comb += self.o.eq(shifter.o)
with m.Case(ALUOp.SLL):
m.d.comb += self.o.eq(shifter.o)
with m.Case():
m.d.comb += self.o.eq(bitwise)
return m
class Hazard2Regfile(Elaboratable):
def __init__(self):
self.raddr1 = Signal(5)
self.raddr2 = Signal(5)
self.ren = Signal()
self.rdata1 = Signal(XLEN)
self.rdata2 = Signal(XLEN)
self.waddr = Signal(5)
self.wdata = Signal(XLEN)
self.wen = Signal()
self.mem = Memory(width=XLEN, depth=32, init=[0] * 32)
def elaborate(self, platform):
m = Module()
m.submodules.wport = wport = self.mem.write_port()
m.submodules.rport1 = rport1 = self.mem.read_port(transparent=False)
m.submodules.rport2 = rport2 = self.mem.read_port(transparent=False)
# nMigen/Yosys do not support read enable on read ports with transparency
# enabled, so need to perform write-to-read bypass manually.
prev_wdata = Signal(XLEN)
forward_wdata_to_r1 = Signal()
forward_wdata_to_r2 = Signal()
next_is_forwarded = self.wen & self.ren & (self.waddr != 0)
with m.If(next_is_forwarded):
m.d.sync += prev_wdata.eq(self.wdata)
with m.If(self.ren):
m.d.sync += [
forward_wdata_to_r1.eq(next_is_forwarded & (self.waddr == self.raddr1)),
forward_wdata_to_r2.eq(next_is_forwarded & (self.waddr == self.raddr2))
]
m.d.comb += [
rport1.addr.eq(self.raddr1),
rport1.en.eq(self.ren),
self.rdata1.eq(Mux(forward_wdata_to_r1, prev_wdata, rport1.data)),
rport2.addr.eq(self.raddr2),
rport2.en.eq(self.ren),
self.rdata2.eq(Mux(forward_wdata_to_r2, prev_wdata, rport2.data)),
wport.addr.eq(self.waddr),
wport.data.eq(self.wdata),
wport.en.eq(self.wen & (self.waddr != 0))
]
return m
class Hazard2CPU(Elaboratable):
def __init__(self, reset_vector=0x0):
self.reset_vector = reset_vector
self.htrans = Signal(2)
self.hwrite = Signal()
self.hsize = Signal(3)
self.haddr = Signal(XLEN)
self.hwdata = Signal(XLEN)
self.hrdata = Signal(XLEN)
self.hready = Signal()
def elaborate(self, platform):
m = Module()
stall = ~self.hready
### Stage F ###
i_dph_active = Signal()
d_dph_active = Signal()
d_dph_write = Signal()
d_dph_addr = Signal(2)
d_dph_size = Signal(2)
d_dph_signed = Signal()
cir = Signal(32)
cir_valid = Signal()
load_rdata = Signal(XLEN)
with m.If(i_dph_active & ~stall):
m.d.sync += cir.eq(self.hrdata)
with m.Switch(d_dph_size):
with m.Case(2):
m.d.comb += load_rdata.eq(self.hrdata)
with m.Case(1):
hword_rdata = self.hrdata.word_select(d_dph_addr[1:], 16)
m.d.comb += load_rdata.eq(Cat(hword_rdata, Repl(hword_rdata[-1] & d_dph_signed, XLEN - 16)))
with m.Case():
byte_rdata = self.hrdata.word_select(d_dph_addr, 8)
m.d.comb += load_rdata.eq(Cat(byte_rdata, Repl(byte_rdata[-1] & d_dph_signed, XLEN - 8)))
### Stage D/X ###
opc = cir[2 :7 ]
cir_rd = cir[7 :12]
funct3 = cir[12:15]
cir_rs1 = cir[15:20]
cir_rs2 = cir[20:25]
funct7 = cir[25:32]
rs1 = Signal(XLEN)
rs2 = Signal(XLEN)
pc = Signal(XLEN, reset=self.reset_vector - 4)
# ALU, and operand/operation selection
m.submodules.alu = alu = Hazard2ALU()
aluop_r_i = Signal(alu.op.shape())
with m.Switch(funct3):
with m.Case(0b000):
# Mask funct7 for I-format (!cir[5]), as it's part of the immediate
m.d.comb += aluop_r_i.eq(Mux(funct7[5] & cir[5], ALUOp.SUB, ALUOp.ADD))
with m.Case(0b001):
m.d.comb += aluop_r_i.eq(ALUOp.SLL)
with m.Case(0b010):
m.d.comb += aluop_r_i.eq(ALUOp.LT)
with m.Case(0b011):
m.d.comb += aluop_r_i.eq(ALUOp.LTU)
with m.Case(0b100):
m.d.comb += aluop_r_i.eq(ALUOp.XOR)
with m.Case(0b101):
m.d.comb += aluop_r_i.eq(Mux(funct7[5], ALUOp.SRA, ALUOp.SRL))
with m.Case(0b110):
m.d.comb += aluop_r_i.eq(ALUOp.OR)
with m.Case(0b111):
m.d.comb += aluop_r_i.eq(ALUOp.AND)
with m.Switch(opc):
with m.Case(RVOpc.OP):
m.d.comb += [
alu.i0.eq(rs1),
alu.i1.eq(rs2),
alu.op.eq(aluop_r_i),
]
with m.Case(RVOpc.OP_IMM):
m.d.comb += [
alu.i0.eq(rs1),
alu.i1.eq(imm_i(cir)),
alu.op.eq(aluop_r_i),
]
with m.Case(RVOpc.JAL):
m.d.comb += [
alu.i0.eq(pc),
alu.i1.eq(0),
alu.op.eq(ALUOp.ADD)
]
with m.Case(RVOpc.JALR):
m.d.comb += [
alu.i0.eq(pc),
alu.i1.eq(0),
alu.op.eq(ALUOp.ADD)
]
with m.Case(RVOpc.BRANCH):
m.d.comb += [
alu.i0.eq(rs1),
alu.i1.eq(rs2),
alu.op.eq(Mux(funct3 & 0x6 == 0x0, ALUOp.SUB,
Mux(funct3 & 0x6 == 0x4, ALUOp.LT, ALUOp.LTU)))
]
with m.Case(RVOpc.LUI):
m.d.comb += [
alu.i0.eq(0),
alu.i1.eq(imm_u(cir)),
alu.op.eq(ALUOp.ADD)
]
with m.Case(RVOpc.AUIPC):
m.d.comb += [
alu.i0.eq(pc),
alu.i1.eq(imm_u(cir)),
alu.op.eq(ALUOp.ADD),
alu.take4.eq(True)
]
# AGU
# Don't assert bus request during reset, it's a rude thing to do. Other than
# that we have the pedal to the metal all the time.
bus_available = Signal()
m.d.sync += bus_available.eq(1)
m.d.comb += self.htrans.eq(bus_available << 1)
agu_next_addr = Signal(XLEN)
access_is_load = cir_valid & ~d_dph_active & (opc == RVOpc.LOAD)
access_is_store = cir_valid & ~d_dph_active & (opc == RVOpc.STORE)
access_is_loadstore = access_is_load | access_is_store
take_branch = cir_valid & ~d_dph_active & (opc == RVOpc.BRANCH) & (alu.cmp != funct3[0])
take_jal = cir_valid & ~d_dph_active & (opc == RVOpc.JAL)
take_jalr = cir_valid & ~d_dph_active & (opc == RVOpc.JALR)
agu_op0 = Signal(XLEN)
agu_op1 = Signal(XLEN)
agu_offs = Signal(XLEN)
with m.If(access_is_load):
m.d.comb += [agu_op0.eq(rs1), agu_op1.eq(imm_i(cir))]
with m.Elif(access_is_store):
m.d.comb += [agu_op0.eq(rs1), agu_op1.eq(imm_s(cir))]
with m.Elif(take_branch):
m.d.comb += [agu_op0.eq(pc), agu_op1.eq(imm_b(cir))]
with m.Elif(take_jal):
m.d.comb += [agu_op0.eq(pc), agu_op1.eq(imm_j(cir))]
with m.Elif(take_jalr):
m.d.comb += [agu_op0.eq(rs1), agu_op1.eq(imm_i(cir))]
with m.Else():
m.d.comb += [agu_op0.eq(pc), agu_op1.eq(0)]
# Offset of +/-4 applied via third adder input (which tools will likely implement as carry-save)
m.d.comb += agu_offs.eq(Cat(
C(0, 2),
((take_branch | take_jal) & ~access_is_loadstore) | ~(access_is_loadstore | take_jalr),
Repl((take_branch | take_jal) & ~access_is_loadstore, 29)
))
m.d.comb += agu_next_addr.eq(agu_op0 + agu_op1 + agu_offs)
# Generate address-phase request
m.d.comb += self.haddr.eq(agu_next_addr)
with m.If(access_is_loadstore):
m.d.comb += [
self.hwrite.eq(access_is_store),
self.hsize.eq(funct3[:2])
]
with m.Else():
m.d.comb += [
self.hsize.eq(2)
]
# Update PC and track bus transfer status
with m.If(bus_available & self.hready):
with m.If(~access_is_loadstore):
# Force PC alignment, since we don't support traps
m.d.sync += pc.eq(agu_next_addr & -4)
m.d.sync += [
i_dph_active.eq(self.htrans[1] & ~access_is_loadstore),
# Note d_dph_active term stops the CIR from being marked as invalid on
# second cycle of a load/store dphase, since it's not consumed during this
# time (it is the CIR of the *next* instruction)
cir_valid.eq((i_dph_active | d_dph_active) & ~(take_branch | take_jal | take_jalr))
]
m.d.sync += [
d_dph_active.eq(self.htrans[1] & access_is_loadstore),
d_dph_addr.eq(self.haddr[:2]),
d_dph_size.eq(self.hsize[:2]),
d_dph_write.eq(self.hwrite),
d_dph_signed.eq(~funct3[2])
]
# Store data shifter
# Unaligned stores behave correctly as long as you don't do them
with m.Switch(d_dph_addr):
with m.Case(0):
m.d.comb += self.hwdata.eq(rs2)
with m.Case(1):
m.d.comb += self.hwdata.eq(Cat(rs2[:8], rs2[:8], rs2[16:]))
with m.Case(2):
m.d.comb += self.hwdata.eq(Cat(rs2[:16], rs2[:16]))
with m.Case(3):
m.d.comb += self.hwdata.eq(Cat(rs2[:24], rs2[:8]))
# Register file
m.submodules.regfile = regfile = Hazard2Regfile()
m.d.comb += [
# During load/store, the CIR is updated during cycle n, and the register
# file is read for next instruction on cycle n + 1, so delay addr using CIR.
regfile.raddr1.eq(Mux(d_dph_active, cir_rs1, self.hrdata[15:20])),
regfile.raddr2.eq(Mux(d_dph_active, cir_rs2, self.hrdata[20:25])),
regfile.ren.eq(~(access_is_loadstore | stall)),
rs1.eq(regfile.rdata1),
rs2.eq(regfile.rdata2)
]
reg_write_alu = cir_valid & ~d_dph_active & ~stall & (
(opc == RVOpc.OP) | (opc == RVOpc.OP_IMM) | (opc == RVOpc.JAL) |
(opc == RVOpc.JALR) | (opc == RVOpc.LUI) | (opc == RVOpc.AUIPC))
reg_write_load = d_dph_active & ~(stall | d_dph_write)
load_rd = Signal(cir_rd.shape())
with m.If(~stall):
m.d.sync += load_rd.eq(cir_rd)
m.d.comb += [
regfile.waddr.eq(Mux(reg_write_load, load_rd, cir_rd)),
regfile.wdata.eq(Mux(reg_write_load, load_rdata, alu.o)),
regfile.wen.eq(reg_write_load | reg_write_alu)
]
return m
| imm_u | identifier_name |
hazard2.py | from nmigen import *
from nmigen.asserts import *
from enum import IntEnum
XLEN = 32
class ALUOp(IntEnum):
ADD = 0x0
SUB = 0x1
LT = 0x2
LTU = 0x4
AND = 0x6
OR = 0x7
XOR = 0x8
SRL = 0x9
SRA = 0xa
SLL = 0xb
class RVOpc(IntEnum):
LOAD = 0b00_000
MISC_MEM = 0b00_011
OP_IMM = 0b00_100
AUIPC = 0b00_101
STORE = 0b01_000
OP = 0b01_100
LUI = 0b01_101
BRANCH = 0b11_000
JALR = 0b11_001
JAL = 0b11_011
SYSTEM = 0b11_100
def imm_i(instr):
return Cat(instr[20:], Repl(instr[-1], 20))
def imm_s(instr):
return Cat(instr[7:12], instr[25:], Repl(instr[-1], 20))
def imm_b(instr):
return Cat(C(0, 1), instr[8:12], instr[25:31], instr[7], Repl(instr[-1], 20))
def imm_u(instr):
return Cat(C(0, 12), instr[12:])
def imm_j(instr):
return Cat(C(0, 1), instr[21:31], instr[20], instr[12:20], Repl(instr[-1], 12))
class Hazard2Shifter(Elaboratable):
def __init__(self):
self.i = Signal(XLEN)
self.shamt = Signal(range(XLEN))
self.right = Signal()
self.arith = Signal()
self.o = Signal(XLEN)
def elaborate(self, platform):
m = Module()
accum = Signal(XLEN, name="shift_pre_reverse")
m.d.comb += accum.eq(Mux(self.right, self.i, self.i[::-1]))
for i in range(self.shamt.width):
accum_next = Signal(XLEN, name=f"shift_accum{i}")
m.d.comb += accum_next.eq(Mux(self.shamt[i],
Cat(accum[1 << i:], Repl(accum[-1] & self.arith, 1 << i)),
accum
))
accum = accum_next
m.d.comb += self.o.eq(Mux(self.right, accum, accum[::-1]))
return m
class Hazard2ALU(Elaboratable):
def __init__(self):
self.i0 = Signal(XLEN)
self.i1 = Signal(XLEN)
self.op = Signal(Shape.cast(ALUOp))
self.take4 = Signal()
self.cmp = Signal()
self.o = Signal(XLEN)
def elaborate(self, platform):
m = Module()
m.submodules.shifter = shifter = Hazard2Shifter()
# Add/subtract i0 and i1, then subtract 4 if take4 is true. Use of 3-input adder
# encourages tools to implement as carry-save.
adder = sum((
self.i0,
self.i1 ^ Repl(self.op != ALUOp.ADD, XLEN),
Cat(self.op != ALUOp.ADD, C(0, 1), Repl(self.take4, XLEN - 2))
))[:XLEN]
less_than = Mux(self.i0[-1] == self.i1[-1], adder[-1],
Mux(self.op == ALUOp.LTU, self.i1[-1], self.i0[-1])
)
m.d.comb += self.cmp.eq(Mux(self.op == ALUOp.SUB, self.i0 == self.i1, less_than))
# Bitwise ops can be implemented as a single rank of LUT4s. Try to encourage this.
bitwise = Signal(XLEN)
with m.Switch(self.op[0:2]):
with m.Case(ALUOp.AND & 0x3):
m.d.comb += bitwise.eq(self.i0 & self.i1)
with m.Case(ALUOp.OR & 0x3):
m.d.comb += bitwise.eq(self.i0 | self.i1)
with m.Case():
m.d.comb += bitwise.eq(self.i0 ^ self.i1)
m.d.comb += [
shifter.i.eq(self.i0),
shifter.shamt.eq(self.i1),
shifter.right.eq(self.op != ALUOp.SLL),
shifter.arith.eq(self.op == ALUOp.SRA)
]
with m.Switch(self.op):
with m.Case(ALUOp.ADD):
m.d.comb += self.o.eq(adder)
with m.Case(ALUOp.SUB):
m.d.comb += self.o.eq(adder)
with m.Case(ALUOp.LT):
m.d.comb += self.o.eq(less_than)
with m.Case(ALUOp.LTU):
m.d.comb += self.o.eq(less_than)
with m.Case(ALUOp.SRL):
m.d.comb += self.o.eq(shifter.o)
with m.Case(ALUOp.SRA):
m.d.comb += self.o.eq(shifter.o)
with m.Case(ALUOp.SLL):
m.d.comb += self.o.eq(shifter.o)
with m.Case():
m.d.comb += self.o.eq(bitwise)
return m
class Hazard2Regfile(Elaboratable):
def __init__(self):
self.raddr1 = Signal(5)
self.raddr2 = Signal(5)
self.ren = Signal()
self.rdata1 = Signal(XLEN)
self.rdata2 = Signal(XLEN)
self.waddr = Signal(5)
self.wdata = Signal(XLEN)
self.wen = Signal()
self.mem = Memory(width=XLEN, depth=32, init=[0] * 32)
def elaborate(self, platform):
m = Module()
m.submodules.wport = wport = self.mem.write_port()
m.submodules.rport1 = rport1 = self.mem.read_port(transparent=False)
m.submodules.rport2 = rport2 = self.mem.read_port(transparent=False)
# nMigen/Yosys do not support read enable on read ports with transparency
# enabled, so need to perform write-to-read bypass manually.
prev_wdata = Signal(XLEN)
forward_wdata_to_r1 = Signal()
forward_wdata_to_r2 = Signal()
next_is_forwarded = self.wen & self.ren & (self.waddr != 0)
with m.If(next_is_forwarded):
m.d.sync += prev_wdata.eq(self.wdata)
with m.If(self.ren):
m.d.sync += [
forward_wdata_to_r1.eq(next_is_forwarded & (self.waddr == self.raddr1)),
forward_wdata_to_r2.eq(next_is_forwarded & (self.waddr == self.raddr2))
]
m.d.comb += [
rport1.addr.eq(self.raddr1),
rport1.en.eq(self.ren),
self.rdata1.eq(Mux(forward_wdata_to_r1, prev_wdata, rport1.data)),
rport2.addr.eq(self.raddr2),
rport2.en.eq(self.ren),
self.rdata2.eq(Mux(forward_wdata_to_r2, prev_wdata, rport2.data)),
wport.addr.eq(self.waddr),
wport.data.eq(self.wdata),
wport.en.eq(self.wen & (self.waddr != 0))
]
return m
class Hazard2CPU(Elaboratable):
def __init__(self, reset_vector=0x0):
self.reset_vector = reset_vector
self.htrans = Signal(2)
self.hwrite = Signal()
self.hsize = Signal(3)
self.haddr = Signal(XLEN)
self.hwdata = Signal(XLEN)
self.hrdata = Signal(XLEN)
self.hready = Signal()
def elaborate(self, platform): | ### Stage F ###
i_dph_active = Signal()
d_dph_active = Signal()
d_dph_write = Signal()
d_dph_addr = Signal(2)
d_dph_size = Signal(2)
d_dph_signed = Signal()
cir = Signal(32)
cir_valid = Signal()
load_rdata = Signal(XLEN)
with m.If(i_dph_active & ~stall):
m.d.sync += cir.eq(self.hrdata)
with m.Switch(d_dph_size):
with m.Case(2):
m.d.comb += load_rdata.eq(self.hrdata)
with m.Case(1):
hword_rdata = self.hrdata.word_select(d_dph_addr[1:], 16)
m.d.comb += load_rdata.eq(Cat(hword_rdata, Repl(hword_rdata[-1] & d_dph_signed, XLEN - 16)))
with m.Case():
byte_rdata = self.hrdata.word_select(d_dph_addr, 8)
m.d.comb += load_rdata.eq(Cat(byte_rdata, Repl(byte_rdata[-1] & d_dph_signed, XLEN - 8)))
### Stage D/X ###
opc = cir[2 :7 ]
cir_rd = cir[7 :12]
funct3 = cir[12:15]
cir_rs1 = cir[15:20]
cir_rs2 = cir[20:25]
funct7 = cir[25:32]
rs1 = Signal(XLEN)
rs2 = Signal(XLEN)
pc = Signal(XLEN, reset=self.reset_vector - 4)
# ALU, and operand/operation selection
m.submodules.alu = alu = Hazard2ALU()
aluop_r_i = Signal(alu.op.shape())
with m.Switch(funct3):
with m.Case(0b000):
# Mask funct7 for I-format (!cir[5]), as it's part of the immediate
m.d.comb += aluop_r_i.eq(Mux(funct7[5] & cir[5], ALUOp.SUB, ALUOp.ADD))
with m.Case(0b001):
m.d.comb += aluop_r_i.eq(ALUOp.SLL)
with m.Case(0b010):
m.d.comb += aluop_r_i.eq(ALUOp.LT)
with m.Case(0b011):
m.d.comb += aluop_r_i.eq(ALUOp.LTU)
with m.Case(0b100):
m.d.comb += aluop_r_i.eq(ALUOp.XOR)
with m.Case(0b101):
m.d.comb += aluop_r_i.eq(Mux(funct7[5], ALUOp.SRA, ALUOp.SRL))
with m.Case(0b110):
m.d.comb += aluop_r_i.eq(ALUOp.OR)
with m.Case(0b111):
m.d.comb += aluop_r_i.eq(ALUOp.AND)
with m.Switch(opc):
with m.Case(RVOpc.OP):
m.d.comb += [
alu.i0.eq(rs1),
alu.i1.eq(rs2),
alu.op.eq(aluop_r_i),
]
with m.Case(RVOpc.OP_IMM):
m.d.comb += [
alu.i0.eq(rs1),
alu.i1.eq(imm_i(cir)),
alu.op.eq(aluop_r_i),
]
with m.Case(RVOpc.JAL):
m.d.comb += [
alu.i0.eq(pc),
alu.i1.eq(0),
alu.op.eq(ALUOp.ADD)
]
with m.Case(RVOpc.JALR):
m.d.comb += [
alu.i0.eq(pc),
alu.i1.eq(0),
alu.op.eq(ALUOp.ADD)
]
with m.Case(RVOpc.BRANCH):
m.d.comb += [
alu.i0.eq(rs1),
alu.i1.eq(rs2),
alu.op.eq(Mux(funct3 & 0x6 == 0x0, ALUOp.SUB,
Mux(funct3 & 0x6 == 0x4, ALUOp.LT, ALUOp.LTU)))
]
with m.Case(RVOpc.LUI):
m.d.comb += [
alu.i0.eq(0),
alu.i1.eq(imm_u(cir)),
alu.op.eq(ALUOp.ADD)
]
with m.Case(RVOpc.AUIPC):
m.d.comb += [
alu.i0.eq(pc),
alu.i1.eq(imm_u(cir)),
alu.op.eq(ALUOp.ADD),
alu.take4.eq(True)
]
# AGU
# Don't assert bus request during reset, it's a rude thing to do. Other than
# that we have the pedal to the metal all the time.
bus_available = Signal()
m.d.sync += bus_available.eq(1)
m.d.comb += self.htrans.eq(bus_available << 1)
agu_next_addr = Signal(XLEN)
access_is_load = cir_valid & ~d_dph_active & (opc == RVOpc.LOAD)
access_is_store = cir_valid & ~d_dph_active & (opc == RVOpc.STORE)
access_is_loadstore = access_is_load | access_is_store
take_branch = cir_valid & ~d_dph_active & (opc == RVOpc.BRANCH) & (alu.cmp != funct3[0])
take_jal = cir_valid & ~d_dph_active & (opc == RVOpc.JAL)
take_jalr = cir_valid & ~d_dph_active & (opc == RVOpc.JALR)
agu_op0 = Signal(XLEN)
agu_op1 = Signal(XLEN)
agu_offs = Signal(XLEN)
with m.If(access_is_load):
m.d.comb += [agu_op0.eq(rs1), agu_op1.eq(imm_i(cir))]
with m.Elif(access_is_store):
m.d.comb += [agu_op0.eq(rs1), agu_op1.eq(imm_s(cir))]
with m.Elif(take_branch):
m.d.comb += [agu_op0.eq(pc), agu_op1.eq(imm_b(cir))]
with m.Elif(take_jal):
m.d.comb += [agu_op0.eq(pc), agu_op1.eq(imm_j(cir))]
with m.Elif(take_jalr):
m.d.comb += [agu_op0.eq(rs1), agu_op1.eq(imm_i(cir))]
with m.Else():
m.d.comb += [agu_op0.eq(pc), agu_op1.eq(0)]
# Offset of +/-4 applied via third adder input (which tools will likely implement as carry-save)
m.d.comb += agu_offs.eq(Cat(
C(0, 2),
((take_branch | take_jal) & ~access_is_loadstore) | ~(access_is_loadstore | take_jalr),
Repl((take_branch | take_jal) & ~access_is_loadstore, 29)
))
m.d.comb += agu_next_addr.eq(agu_op0 + agu_op1 + agu_offs)
# Generate address-phase request
m.d.comb += self.haddr.eq(agu_next_addr)
with m.If(access_is_loadstore):
m.d.comb += [
self.hwrite.eq(access_is_store),
self.hsize.eq(funct3[:2])
]
with m.Else():
m.d.comb += [
self.hsize.eq(2)
]
# Update PC and track bus transfer status
with m.If(bus_available & self.hready):
with m.If(~access_is_loadstore):
# Force PC alignment, since we don't support traps
m.d.sync += pc.eq(agu_next_addr & -4)
m.d.sync += [
i_dph_active.eq(self.htrans[1] & ~access_is_loadstore),
# Note d_dph_active term stops the CIR from being marked as invalid on
# second cycle of a load/store dphase, since it's not consumed during this
# time (it is the CIR of the *next* instruction)
cir_valid.eq((i_dph_active | d_dph_active) & ~(take_branch | take_jal | take_jalr))
]
m.d.sync += [
d_dph_active.eq(self.htrans[1] & access_is_loadstore),
d_dph_addr.eq(self.haddr[:2]),
d_dph_size.eq(self.hsize[:2]),
d_dph_write.eq(self.hwrite),
d_dph_signed.eq(~funct3[2])
]
# Store data shifter
# Unaligned stores behave correctly as long as you don't do them
with m.Switch(d_dph_addr):
with m.Case(0):
m.d.comb += self.hwdata.eq(rs2)
with m.Case(1):
m.d.comb += self.hwdata.eq(Cat(rs2[:8], rs2[:8], rs2[16:]))
with m.Case(2):
m.d.comb += self.hwdata.eq(Cat(rs2[:16], rs2[:16]))
with m.Case(3):
m.d.comb += self.hwdata.eq(Cat(rs2[:24], rs2[:8]))
# Register file
m.submodules.regfile = regfile = Hazard2Regfile()
m.d.comb += [
# During load/store, the CIR is updated during cycle n, and the register
# file is read for next instruction on cycle n + 1, so delay addr using CIR.
regfile.raddr1.eq(Mux(d_dph_active, cir_rs1, self.hrdata[15:20])),
regfile.raddr2.eq(Mux(d_dph_active, cir_rs2, self.hrdata[20:25])),
regfile.ren.eq(~(access_is_loadstore | stall)),
rs1.eq(regfile.rdata1),
rs2.eq(regfile.rdata2)
]
reg_write_alu = cir_valid & ~d_dph_active & ~stall & (
(opc == RVOpc.OP) | (opc == RVOpc.OP_IMM) | (opc == RVOpc.JAL) |
(opc == RVOpc.JALR) | (opc == RVOpc.LUI) | (opc == RVOpc.AUIPC))
reg_write_load = d_dph_active & ~(stall | d_dph_write)
load_rd = Signal(cir_rd.shape())
with m.If(~stall):
m.d.sync += load_rd.eq(cir_rd)
m.d.comb += [
regfile.waddr.eq(Mux(reg_write_load, load_rd, cir_rd)),
regfile.wdata.eq(Mux(reg_write_load, load_rdata, alu.o)),
regfile.wen.eq(reg_write_load | reg_write_alu)
]
return m | m = Module()
stall = ~self.hready
| random_line_split |
hazard2.py | from nmigen import *
from nmigen.asserts import *
from enum import IntEnum
XLEN = 32
class ALUOp(IntEnum):
ADD = 0x0
SUB = 0x1
LT = 0x2
LTU = 0x4
AND = 0x6
OR = 0x7
XOR = 0x8
SRL = 0x9
SRA = 0xa
SLL = 0xb
class RVOpc(IntEnum):
LOAD = 0b00_000
MISC_MEM = 0b00_011
OP_IMM = 0b00_100
AUIPC = 0b00_101
STORE = 0b01_000
OP = 0b01_100
LUI = 0b01_101
BRANCH = 0b11_000
JALR = 0b11_001
JAL = 0b11_011
SYSTEM = 0b11_100
def imm_i(instr):
return Cat(instr[20:], Repl(instr[-1], 20))
def imm_s(instr):
return Cat(instr[7:12], instr[25:], Repl(instr[-1], 20))
def imm_b(instr):
return Cat(C(0, 1), instr[8:12], instr[25:31], instr[7], Repl(instr[-1], 20))
def imm_u(instr):
return Cat(C(0, 12), instr[12:])
def imm_j(instr):
return Cat(C(0, 1), instr[21:31], instr[20], instr[12:20], Repl(instr[-1], 12))
class Hazard2Shifter(Elaboratable):
def __init__(self):
self.i = Signal(XLEN)
self.shamt = Signal(range(XLEN))
self.right = Signal()
self.arith = Signal()
self.o = Signal(XLEN)
def elaborate(self, platform):
m = Module()
accum = Signal(XLEN, name="shift_pre_reverse")
m.d.comb += accum.eq(Mux(self.right, self.i, self.i[::-1]))
for i in range(self.shamt.width):
|
m.d.comb += self.o.eq(Mux(self.right, accum, accum[::-1]))
return m
class Hazard2ALU(Elaboratable):
def __init__(self):
self.i0 = Signal(XLEN)
self.i1 = Signal(XLEN)
self.op = Signal(Shape.cast(ALUOp))
self.take4 = Signal()
self.cmp = Signal()
self.o = Signal(XLEN)
def elaborate(self, platform):
m = Module()
m.submodules.shifter = shifter = Hazard2Shifter()
# Add/subtract i0 and i1, then subtract 4 if take4 is true. Use of 3-input adder
# encourages tools to implement as carry-save.
adder = sum((
self.i0,
self.i1 ^ Repl(self.op != ALUOp.ADD, XLEN),
Cat(self.op != ALUOp.ADD, C(0, 1), Repl(self.take4, XLEN - 2))
))[:XLEN]
less_than = Mux(self.i0[-1] == self.i1[-1], adder[-1],
Mux(self.op == ALUOp.LTU, self.i1[-1], self.i0[-1])
)
m.d.comb += self.cmp.eq(Mux(self.op == ALUOp.SUB, self.i0 == self.i1, less_than))
# Bitwise ops can be implemented as a single rank of LUT4s. Try to encourage this.
bitwise = Signal(XLEN)
with m.Switch(self.op[0:2]):
with m.Case(ALUOp.AND & 0x3):
m.d.comb += bitwise.eq(self.i0 & self.i1)
with m.Case(ALUOp.OR & 0x3):
m.d.comb += bitwise.eq(self.i0 | self.i1)
with m.Case():
m.d.comb += bitwise.eq(self.i0 ^ self.i1)
m.d.comb += [
shifter.i.eq(self.i0),
shifter.shamt.eq(self.i1),
shifter.right.eq(self.op != ALUOp.SLL),
shifter.arith.eq(self.op == ALUOp.SRA)
]
with m.Switch(self.op):
with m.Case(ALUOp.ADD):
m.d.comb += self.o.eq(adder)
with m.Case(ALUOp.SUB):
m.d.comb += self.o.eq(adder)
with m.Case(ALUOp.LT):
m.d.comb += self.o.eq(less_than)
with m.Case(ALUOp.LTU):
m.d.comb += self.o.eq(less_than)
with m.Case(ALUOp.SRL):
m.d.comb += self.o.eq(shifter.o)
with m.Case(ALUOp.SRA):
m.d.comb += self.o.eq(shifter.o)
with m.Case(ALUOp.SLL):
m.d.comb += self.o.eq(shifter.o)
with m.Case():
m.d.comb += self.o.eq(bitwise)
return m
class Hazard2Regfile(Elaboratable):
def __init__(self):
self.raddr1 = Signal(5)
self.raddr2 = Signal(5)
self.ren = Signal()
self.rdata1 = Signal(XLEN)
self.rdata2 = Signal(XLEN)
self.waddr = Signal(5)
self.wdata = Signal(XLEN)
self.wen = Signal()
self.mem = Memory(width=XLEN, depth=32, init=[0] * 32)
def elaborate(self, platform):
m = Module()
m.submodules.wport = wport = self.mem.write_port()
m.submodules.rport1 = rport1 = self.mem.read_port(transparent=False)
m.submodules.rport2 = rport2 = self.mem.read_port(transparent=False)
# nMigen/Yosys do not support read enable on read ports with transparency
# enabled, so need to perform write-to-read bypass manually.
prev_wdata = Signal(XLEN)
forward_wdata_to_r1 = Signal()
forward_wdata_to_r2 = Signal()
next_is_forwarded = self.wen & self.ren & (self.waddr != 0)
with m.If(next_is_forwarded):
m.d.sync += prev_wdata.eq(self.wdata)
with m.If(self.ren):
m.d.sync += [
forward_wdata_to_r1.eq(next_is_forwarded & (self.waddr == self.raddr1)),
forward_wdata_to_r2.eq(next_is_forwarded & (self.waddr == self.raddr2))
]
m.d.comb += [
rport1.addr.eq(self.raddr1),
rport1.en.eq(self.ren),
self.rdata1.eq(Mux(forward_wdata_to_r1, prev_wdata, rport1.data)),
rport2.addr.eq(self.raddr2),
rport2.en.eq(self.ren),
self.rdata2.eq(Mux(forward_wdata_to_r2, prev_wdata, rport2.data)),
wport.addr.eq(self.waddr),
wport.data.eq(self.wdata),
wport.en.eq(self.wen & (self.waddr != 0))
]
return m
class Hazard2CPU(Elaboratable):
def __init__(self, reset_vector=0x0):
self.reset_vector = reset_vector
self.htrans = Signal(2)
self.hwrite = Signal()
self.hsize = Signal(3)
self.haddr = Signal(XLEN)
self.hwdata = Signal(XLEN)
self.hrdata = Signal(XLEN)
self.hready = Signal()
def elaborate(self, platform):
m = Module()
stall = ~self.hready
### Stage F ###
i_dph_active = Signal()
d_dph_active = Signal()
d_dph_write = Signal()
d_dph_addr = Signal(2)
d_dph_size = Signal(2)
d_dph_signed = Signal()
cir = Signal(32)
cir_valid = Signal()
load_rdata = Signal(XLEN)
with m.If(i_dph_active & ~stall):
m.d.sync += cir.eq(self.hrdata)
with m.Switch(d_dph_size):
with m.Case(2):
m.d.comb += load_rdata.eq(self.hrdata)
with m.Case(1):
hword_rdata = self.hrdata.word_select(d_dph_addr[1:], 16)
m.d.comb += load_rdata.eq(Cat(hword_rdata, Repl(hword_rdata[-1] & d_dph_signed, XLEN - 16)))
with m.Case():
byte_rdata = self.hrdata.word_select(d_dph_addr, 8)
m.d.comb += load_rdata.eq(Cat(byte_rdata, Repl(byte_rdata[-1] & d_dph_signed, XLEN - 8)))
### Stage D/X ###
opc = cir[2 :7 ]
cir_rd = cir[7 :12]
funct3 = cir[12:15]
cir_rs1 = cir[15:20]
cir_rs2 = cir[20:25]
funct7 = cir[25:32]
rs1 = Signal(XLEN)
rs2 = Signal(XLEN)
pc = Signal(XLEN, reset=self.reset_vector - 4)
# ALU, and operand/operation selection
m.submodules.alu = alu = Hazard2ALU()
aluop_r_i = Signal(alu.op.shape())
with m.Switch(funct3):
with m.Case(0b000):
# Mask funct7 for I-format (!cir[5]), as it's part of the immediate
m.d.comb += aluop_r_i.eq(Mux(funct7[5] & cir[5], ALUOp.SUB, ALUOp.ADD))
with m.Case(0b001):
m.d.comb += aluop_r_i.eq(ALUOp.SLL)
with m.Case(0b010):
m.d.comb += aluop_r_i.eq(ALUOp.LT)
with m.Case(0b011):
m.d.comb += aluop_r_i.eq(ALUOp.LTU)
with m.Case(0b100):
m.d.comb += aluop_r_i.eq(ALUOp.XOR)
with m.Case(0b101):
m.d.comb += aluop_r_i.eq(Mux(funct7[5], ALUOp.SRA, ALUOp.SRL))
with m.Case(0b110):
m.d.comb += aluop_r_i.eq(ALUOp.OR)
with m.Case(0b111):
m.d.comb += aluop_r_i.eq(ALUOp.AND)
with m.Switch(opc):
with m.Case(RVOpc.OP):
m.d.comb += [
alu.i0.eq(rs1),
alu.i1.eq(rs2),
alu.op.eq(aluop_r_i),
]
with m.Case(RVOpc.OP_IMM):
m.d.comb += [
alu.i0.eq(rs1),
alu.i1.eq(imm_i(cir)),
alu.op.eq(aluop_r_i),
]
with m.Case(RVOpc.JAL):
m.d.comb += [
alu.i0.eq(pc),
alu.i1.eq(0),
alu.op.eq(ALUOp.ADD)
]
with m.Case(RVOpc.JALR):
m.d.comb += [
alu.i0.eq(pc),
alu.i1.eq(0),
alu.op.eq(ALUOp.ADD)
]
with m.Case(RVOpc.BRANCH):
m.d.comb += [
alu.i0.eq(rs1),
alu.i1.eq(rs2),
alu.op.eq(Mux(funct3 & 0x6 == 0x0, ALUOp.SUB,
Mux(funct3 & 0x6 == 0x4, ALUOp.LT, ALUOp.LTU)))
]
with m.Case(RVOpc.LUI):
m.d.comb += [
alu.i0.eq(0),
alu.i1.eq(imm_u(cir)),
alu.op.eq(ALUOp.ADD)
]
with m.Case(RVOpc.AUIPC):
m.d.comb += [
alu.i0.eq(pc),
alu.i1.eq(imm_u(cir)),
alu.op.eq(ALUOp.ADD),
alu.take4.eq(True)
]
# AGU
# Don't assert bus request during reset, it's a rude thing to do. Other than
# that we have the pedal to the metal all the time.
bus_available = Signal()
m.d.sync += bus_available.eq(1)
m.d.comb += self.htrans.eq(bus_available << 1)
agu_next_addr = Signal(XLEN)
access_is_load = cir_valid & ~d_dph_active & (opc == RVOpc.LOAD)
access_is_store = cir_valid & ~d_dph_active & (opc == RVOpc.STORE)
access_is_loadstore = access_is_load | access_is_store
take_branch = cir_valid & ~d_dph_active & (opc == RVOpc.BRANCH) & (alu.cmp != funct3[0])
take_jal = cir_valid & ~d_dph_active & (opc == RVOpc.JAL)
take_jalr = cir_valid & ~d_dph_active & (opc == RVOpc.JALR)
agu_op0 = Signal(XLEN)
agu_op1 = Signal(XLEN)
agu_offs = Signal(XLEN)
with m.If(access_is_load):
m.d.comb += [agu_op0.eq(rs1), agu_op1.eq(imm_i(cir))]
with m.Elif(access_is_store):
m.d.comb += [agu_op0.eq(rs1), agu_op1.eq(imm_s(cir))]
with m.Elif(take_branch):
m.d.comb += [agu_op0.eq(pc), agu_op1.eq(imm_b(cir))]
with m.Elif(take_jal):
m.d.comb += [agu_op0.eq(pc), agu_op1.eq(imm_j(cir))]
with m.Elif(take_jalr):
m.d.comb += [agu_op0.eq(rs1), agu_op1.eq(imm_i(cir))]
with m.Else():
m.d.comb += [agu_op0.eq(pc), agu_op1.eq(0)]
# Offset of +/-4 applied via third adder input (which tools will likely implement as carry-save)
m.d.comb += agu_offs.eq(Cat(
C(0, 2),
((take_branch | take_jal) & ~access_is_loadstore) | ~(access_is_loadstore | take_jalr),
Repl((take_branch | take_jal) & ~access_is_loadstore, 29)
))
m.d.comb += agu_next_addr.eq(agu_op0 + agu_op1 + agu_offs)
# Generate address-phase request
m.d.comb += self.haddr.eq(agu_next_addr)
with m.If(access_is_loadstore):
m.d.comb += [
self.hwrite.eq(access_is_store),
self.hsize.eq(funct3[:2])
]
with m.Else():
m.d.comb += [
self.hsize.eq(2)
]
# Update PC and track bus transfer status
with m.If(bus_available & self.hready):
with m.If(~access_is_loadstore):
# Force PC alignment, since we don't support traps
m.d.sync += pc.eq(agu_next_addr & -4)
m.d.sync += [
i_dph_active.eq(self.htrans[1] & ~access_is_loadstore),
# Note d_dph_active term stops the CIR from being marked as invalid on
# second cycle of a load/store dphase, since it's not consumed during this
# time (it is the CIR of the *next* instruction)
cir_valid.eq((i_dph_active | d_dph_active) & ~(take_branch | take_jal | take_jalr))
]
m.d.sync += [
d_dph_active.eq(self.htrans[1] & access_is_loadstore),
d_dph_addr.eq(self.haddr[:2]),
d_dph_size.eq(self.hsize[:2]),
d_dph_write.eq(self.hwrite),
d_dph_signed.eq(~funct3[2])
]
# Store data shifter
# Unaligned stores behave correctly as long as you don't do them
with m.Switch(d_dph_addr):
with m.Case(0):
m.d.comb += self.hwdata.eq(rs2)
with m.Case(1):
m.d.comb += self.hwdata.eq(Cat(rs2[:8], rs2[:8], rs2[16:]))
with m.Case(2):
m.d.comb += self.hwdata.eq(Cat(rs2[:16], rs2[:16]))
with m.Case(3):
m.d.comb += self.hwdata.eq(Cat(rs2[:24], rs2[:8]))
# Register file
m.submodules.regfile = regfile = Hazard2Regfile()
m.d.comb += [
# During load/store, the CIR is updated during cycle n, and the register
# file is read for next instruction on cycle n + 1, so delay addr using CIR.
regfile.raddr1.eq(Mux(d_dph_active, cir_rs1, self.hrdata[15:20])),
regfile.raddr2.eq(Mux(d_dph_active, cir_rs2, self.hrdata[20:25])),
regfile.ren.eq(~(access_is_loadstore | stall)),
rs1.eq(regfile.rdata1),
rs2.eq(regfile.rdata2)
]
reg_write_alu = cir_valid & ~d_dph_active & ~stall & (
(opc == RVOpc.OP) | (opc == RVOpc.OP_IMM) | (opc == RVOpc.JAL) |
(opc == RVOpc.JALR) | (opc == RVOpc.LUI) | (opc == RVOpc.AUIPC))
reg_write_load = d_dph_active & ~(stall | d_dph_write)
load_rd = Signal(cir_rd.shape())
with m.If(~stall):
m.d.sync += load_rd.eq(cir_rd)
m.d.comb += [
regfile.waddr.eq(Mux(reg_write_load, load_rd, cir_rd)),
regfile.wdata.eq(Mux(reg_write_load, load_rdata, alu.o)),
regfile.wen.eq(reg_write_load | reg_write_alu)
]
return m
| accum_next = Signal(XLEN, name=f"shift_accum{i}")
m.d.comb += accum_next.eq(Mux(self.shamt[i],
Cat(accum[1 << i:], Repl(accum[-1] & self.arith, 1 << i)),
accum
))
accum = accum_next | conditional_block |
hazard2.py | from nmigen import *
from nmigen.asserts import *
from enum import IntEnum
XLEN = 32
class ALUOp(IntEnum):
ADD = 0x0
SUB = 0x1
LT = 0x2
LTU = 0x4
AND = 0x6
OR = 0x7
XOR = 0x8
SRL = 0x9
SRA = 0xa
SLL = 0xb
class RVOpc(IntEnum):
LOAD = 0b00_000
MISC_MEM = 0b00_011
OP_IMM = 0b00_100
AUIPC = 0b00_101
STORE = 0b01_000
OP = 0b01_100
LUI = 0b01_101
BRANCH = 0b11_000
JALR = 0b11_001
JAL = 0b11_011
SYSTEM = 0b11_100
def imm_i(instr):
return Cat(instr[20:], Repl(instr[-1], 20))
def imm_s(instr):
return Cat(instr[7:12], instr[25:], Repl(instr[-1], 20))
def imm_b(instr):
return Cat(C(0, 1), instr[8:12], instr[25:31], instr[7], Repl(instr[-1], 20))
def imm_u(instr):
return Cat(C(0, 12), instr[12:])
def imm_j(instr):
return Cat(C(0, 1), instr[21:31], instr[20], instr[12:20], Repl(instr[-1], 12))
class Hazard2Shifter(Elaboratable):
def __init__(self):
self.i = Signal(XLEN)
self.shamt = Signal(range(XLEN))
self.right = Signal()
self.arith = Signal()
self.o = Signal(XLEN)
def elaborate(self, platform):
|
class Hazard2ALU(Elaboratable):
def __init__(self):
self.i0 = Signal(XLEN)
self.i1 = Signal(XLEN)
self.op = Signal(Shape.cast(ALUOp))
self.take4 = Signal()
self.cmp = Signal()
self.o = Signal(XLEN)
def elaborate(self, platform):
m = Module()
m.submodules.shifter = shifter = Hazard2Shifter()
# Add/subtract i0 and i1, then subtract 4 if take4 is true. Use of 3-input adder
# encourages tools to implement as carry-save.
adder = sum((
self.i0,
self.i1 ^ Repl(self.op != ALUOp.ADD, XLEN),
Cat(self.op != ALUOp.ADD, C(0, 1), Repl(self.take4, XLEN - 2))
))[:XLEN]
less_than = Mux(self.i0[-1] == self.i1[-1], adder[-1],
Mux(self.op == ALUOp.LTU, self.i1[-1], self.i0[-1])
)
m.d.comb += self.cmp.eq(Mux(self.op == ALUOp.SUB, self.i0 == self.i1, less_than))
# Bitwise ops can be implemented as a single rank of LUT4s. Try to encourage this.
bitwise = Signal(XLEN)
with m.Switch(self.op[0:2]):
with m.Case(ALUOp.AND & 0x3):
m.d.comb += bitwise.eq(self.i0 & self.i1)
with m.Case(ALUOp.OR & 0x3):
m.d.comb += bitwise.eq(self.i0 | self.i1)
with m.Case():
m.d.comb += bitwise.eq(self.i0 ^ self.i1)
m.d.comb += [
shifter.i.eq(self.i0),
shifter.shamt.eq(self.i1),
shifter.right.eq(self.op != ALUOp.SLL),
shifter.arith.eq(self.op == ALUOp.SRA)
]
with m.Switch(self.op):
with m.Case(ALUOp.ADD):
m.d.comb += self.o.eq(adder)
with m.Case(ALUOp.SUB):
m.d.comb += self.o.eq(adder)
with m.Case(ALUOp.LT):
m.d.comb += self.o.eq(less_than)
with m.Case(ALUOp.LTU):
m.d.comb += self.o.eq(less_than)
with m.Case(ALUOp.SRL):
m.d.comb += self.o.eq(shifter.o)
with m.Case(ALUOp.SRA):
m.d.comb += self.o.eq(shifter.o)
with m.Case(ALUOp.SLL):
m.d.comb += self.o.eq(shifter.o)
with m.Case():
m.d.comb += self.o.eq(bitwise)
return m
class Hazard2Regfile(Elaboratable):
def __init__(self):
self.raddr1 = Signal(5)
self.raddr2 = Signal(5)
self.ren = Signal()
self.rdata1 = Signal(XLEN)
self.rdata2 = Signal(XLEN)
self.waddr = Signal(5)
self.wdata = Signal(XLEN)
self.wen = Signal()
self.mem = Memory(width=XLEN, depth=32, init=[0] * 32)
def elaborate(self, platform):
m = Module()
m.submodules.wport = wport = self.mem.write_port()
m.submodules.rport1 = rport1 = self.mem.read_port(transparent=False)
m.submodules.rport2 = rport2 = self.mem.read_port(transparent=False)
# nMigen/Yosys do not support read enable on read ports with transparency
# enabled, so need to perform write-to-read bypass manually.
prev_wdata = Signal(XLEN)
forward_wdata_to_r1 = Signal()
forward_wdata_to_r2 = Signal()
next_is_forwarded = self.wen & self.ren & (self.waddr != 0)
with m.If(next_is_forwarded):
m.d.sync += prev_wdata.eq(self.wdata)
with m.If(self.ren):
m.d.sync += [
forward_wdata_to_r1.eq(next_is_forwarded & (self.waddr == self.raddr1)),
forward_wdata_to_r2.eq(next_is_forwarded & (self.waddr == self.raddr2))
]
m.d.comb += [
rport1.addr.eq(self.raddr1),
rport1.en.eq(self.ren),
self.rdata1.eq(Mux(forward_wdata_to_r1, prev_wdata, rport1.data)),
rport2.addr.eq(self.raddr2),
rport2.en.eq(self.ren),
self.rdata2.eq(Mux(forward_wdata_to_r2, prev_wdata, rport2.data)),
wport.addr.eq(self.waddr),
wport.data.eq(self.wdata),
wport.en.eq(self.wen & (self.waddr != 0))
]
return m
class Hazard2CPU(Elaboratable):
def __init__(self, reset_vector=0x0):
self.reset_vector = reset_vector
self.htrans = Signal(2)
self.hwrite = Signal()
self.hsize = Signal(3)
self.haddr = Signal(XLEN)
self.hwdata = Signal(XLEN)
self.hrdata = Signal(XLEN)
self.hready = Signal()
def elaborate(self, platform):
m = Module()
stall = ~self.hready
### Stage F ###
i_dph_active = Signal()
d_dph_active = Signal()
d_dph_write = Signal()
d_dph_addr = Signal(2)
d_dph_size = Signal(2)
d_dph_signed = Signal()
cir = Signal(32)
cir_valid = Signal()
load_rdata = Signal(XLEN)
with m.If(i_dph_active & ~stall):
m.d.sync += cir.eq(self.hrdata)
with m.Switch(d_dph_size):
with m.Case(2):
m.d.comb += load_rdata.eq(self.hrdata)
with m.Case(1):
hword_rdata = self.hrdata.word_select(d_dph_addr[1:], 16)
m.d.comb += load_rdata.eq(Cat(hword_rdata, Repl(hword_rdata[-1] & d_dph_signed, XLEN - 16)))
with m.Case():
byte_rdata = self.hrdata.word_select(d_dph_addr, 8)
m.d.comb += load_rdata.eq(Cat(byte_rdata, Repl(byte_rdata[-1] & d_dph_signed, XLEN - 8)))
### Stage D/X ###
opc = cir[2 :7 ]
cir_rd = cir[7 :12]
funct3 = cir[12:15]
cir_rs1 = cir[15:20]
cir_rs2 = cir[20:25]
funct7 = cir[25:32]
rs1 = Signal(XLEN)
rs2 = Signal(XLEN)
pc = Signal(XLEN, reset=self.reset_vector - 4)
# ALU, and operand/operation selection
m.submodules.alu = alu = Hazard2ALU()
aluop_r_i = Signal(alu.op.shape())
with m.Switch(funct3):
with m.Case(0b000):
# Mask funct7 for I-format (!cir[5]), as it's part of the immediate
m.d.comb += aluop_r_i.eq(Mux(funct7[5] & cir[5], ALUOp.SUB, ALUOp.ADD))
with m.Case(0b001):
m.d.comb += aluop_r_i.eq(ALUOp.SLL)
with m.Case(0b010):
m.d.comb += aluop_r_i.eq(ALUOp.LT)
with m.Case(0b011):
m.d.comb += aluop_r_i.eq(ALUOp.LTU)
with m.Case(0b100):
m.d.comb += aluop_r_i.eq(ALUOp.XOR)
with m.Case(0b101):
m.d.comb += aluop_r_i.eq(Mux(funct7[5], ALUOp.SRA, ALUOp.SRL))
with m.Case(0b110):
m.d.comb += aluop_r_i.eq(ALUOp.OR)
with m.Case(0b111):
m.d.comb += aluop_r_i.eq(ALUOp.AND)
with m.Switch(opc):
with m.Case(RVOpc.OP):
m.d.comb += [
alu.i0.eq(rs1),
alu.i1.eq(rs2),
alu.op.eq(aluop_r_i),
]
with m.Case(RVOpc.OP_IMM):
m.d.comb += [
alu.i0.eq(rs1),
alu.i1.eq(imm_i(cir)),
alu.op.eq(aluop_r_i),
]
with m.Case(RVOpc.JAL):
m.d.comb += [
alu.i0.eq(pc),
alu.i1.eq(0),
alu.op.eq(ALUOp.ADD)
]
with m.Case(RVOpc.JALR):
m.d.comb += [
alu.i0.eq(pc),
alu.i1.eq(0),
alu.op.eq(ALUOp.ADD)
]
with m.Case(RVOpc.BRANCH):
m.d.comb += [
alu.i0.eq(rs1),
alu.i1.eq(rs2),
alu.op.eq(Mux(funct3 & 0x6 == 0x0, ALUOp.SUB,
Mux(funct3 & 0x6 == 0x4, ALUOp.LT, ALUOp.LTU)))
]
with m.Case(RVOpc.LUI):
m.d.comb += [
alu.i0.eq(0),
alu.i1.eq(imm_u(cir)),
alu.op.eq(ALUOp.ADD)
]
with m.Case(RVOpc.AUIPC):
m.d.comb += [
alu.i0.eq(pc),
alu.i1.eq(imm_u(cir)),
alu.op.eq(ALUOp.ADD),
alu.take4.eq(True)
]
# AGU
# Don't assert bus request during reset, it's a rude thing to do. Other than
# that we have the pedal to the metal all the time.
bus_available = Signal()
m.d.sync += bus_available.eq(1)
m.d.comb += self.htrans.eq(bus_available << 1)
agu_next_addr = Signal(XLEN)
access_is_load = cir_valid & ~d_dph_active & (opc == RVOpc.LOAD)
access_is_store = cir_valid & ~d_dph_active & (opc == RVOpc.STORE)
access_is_loadstore = access_is_load | access_is_store
take_branch = cir_valid & ~d_dph_active & (opc == RVOpc.BRANCH) & (alu.cmp != funct3[0])
take_jal = cir_valid & ~d_dph_active & (opc == RVOpc.JAL)
take_jalr = cir_valid & ~d_dph_active & (opc == RVOpc.JALR)
agu_op0 = Signal(XLEN)
agu_op1 = Signal(XLEN)
agu_offs = Signal(XLEN)
with m.If(access_is_load):
m.d.comb += [agu_op0.eq(rs1), agu_op1.eq(imm_i(cir))]
with m.Elif(access_is_store):
m.d.comb += [agu_op0.eq(rs1), agu_op1.eq(imm_s(cir))]
with m.Elif(take_branch):
m.d.comb += [agu_op0.eq(pc), agu_op1.eq(imm_b(cir))]
with m.Elif(take_jal):
m.d.comb += [agu_op0.eq(pc), agu_op1.eq(imm_j(cir))]
with m.Elif(take_jalr):
m.d.comb += [agu_op0.eq(rs1), agu_op1.eq(imm_i(cir))]
with m.Else():
m.d.comb += [agu_op0.eq(pc), agu_op1.eq(0)]
# Offset of +/-4 applied via third adder input (which tools will likely implement as carry-save)
m.d.comb += agu_offs.eq(Cat(
C(0, 2),
((take_branch | take_jal) & ~access_is_loadstore) | ~(access_is_loadstore | take_jalr),
Repl((take_branch | take_jal) & ~access_is_loadstore, 29)
))
m.d.comb += agu_next_addr.eq(agu_op0 + agu_op1 + agu_offs)
# Generate address-phase request
m.d.comb += self.haddr.eq(agu_next_addr)
with m.If(access_is_loadstore):
m.d.comb += [
self.hwrite.eq(access_is_store),
self.hsize.eq(funct3[:2])
]
with m.Else():
m.d.comb += [
self.hsize.eq(2)
]
# Update PC and track bus transfer status
with m.If(bus_available & self.hready):
with m.If(~access_is_loadstore):
# Force PC alignment, since we don't support traps
m.d.sync += pc.eq(agu_next_addr & -4)
m.d.sync += [
i_dph_active.eq(self.htrans[1] & ~access_is_loadstore),
# Note d_dph_active term stops the CIR from being marked as invalid on
# second cycle of a load/store dphase, since it's not consumed during this
# time (it is the CIR of the *next* instruction)
cir_valid.eq((i_dph_active | d_dph_active) & ~(take_branch | take_jal | take_jalr))
]
m.d.sync += [
d_dph_active.eq(self.htrans[1] & access_is_loadstore),
d_dph_addr.eq(self.haddr[:2]),
d_dph_size.eq(self.hsize[:2]),
d_dph_write.eq(self.hwrite),
d_dph_signed.eq(~funct3[2])
]
# Store data shifter
# Unaligned stores behave correctly as long as you don't do them
with m.Switch(d_dph_addr):
with m.Case(0):
m.d.comb += self.hwdata.eq(rs2)
with m.Case(1):
m.d.comb += self.hwdata.eq(Cat(rs2[:8], rs2[:8], rs2[16:]))
with m.Case(2):
m.d.comb += self.hwdata.eq(Cat(rs2[:16], rs2[:16]))
with m.Case(3):
m.d.comb += self.hwdata.eq(Cat(rs2[:24], rs2[:8]))
# Register file
m.submodules.regfile = regfile = Hazard2Regfile()
m.d.comb += [
# During load/store, the CIR is updated during cycle n, and the register
# file is read for next instruction on cycle n + 1, so delay addr using CIR.
regfile.raddr1.eq(Mux(d_dph_active, cir_rs1, self.hrdata[15:20])),
regfile.raddr2.eq(Mux(d_dph_active, cir_rs2, self.hrdata[20:25])),
regfile.ren.eq(~(access_is_loadstore | stall)),
rs1.eq(regfile.rdata1),
rs2.eq(regfile.rdata2)
]
reg_write_alu = cir_valid & ~d_dph_active & ~stall & (
(opc == RVOpc.OP) | (opc == RVOpc.OP_IMM) | (opc == RVOpc.JAL) |
(opc == RVOpc.JALR) | (opc == RVOpc.LUI) | (opc == RVOpc.AUIPC))
reg_write_load = d_dph_active & ~(stall | d_dph_write)
load_rd = Signal(cir_rd.shape())
with m.If(~stall):
m.d.sync += load_rd.eq(cir_rd)
m.d.comb += [
regfile.waddr.eq(Mux(reg_write_load, load_rd, cir_rd)),
regfile.wdata.eq(Mux(reg_write_load, load_rdata, alu.o)),
regfile.wen.eq(reg_write_load | reg_write_alu)
]
return m
| m = Module()
accum = Signal(XLEN, name="shift_pre_reverse")
m.d.comb += accum.eq(Mux(self.right, self.i, self.i[::-1]))
for i in range(self.shamt.width):
accum_next = Signal(XLEN, name=f"shift_accum{i}")
m.d.comb += accum_next.eq(Mux(self.shamt[i],
Cat(accum[1 << i:], Repl(accum[-1] & self.arith, 1 << i)),
accum
))
accum = accum_next
m.d.comb += self.o.eq(Mux(self.right, accum, accum[::-1]))
return m | identifier_body |
server.go | /*
*
* Copyright 2018 huayuan-iot
*
* Author: lynn
* Date: 2018/07/03
* Despcription: bus server implement
*
*/
package module
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"time"
pb "clc.hmu/app/busmanager/buspb"
"clc.hmu/app/busmanager/module/web"
"clc.hmu/app/extend"
"clc.hmu/app/public"
"clc.hmu/app/public/log"
"clc.hmu/app/public/log/bootflag"
"clc.hmu/app/public/log/buslog"
"clc.hmu/app/public/store/etc"
"clc.hmu/app/public/sys"
"github.com/gwaylib/errors"
)
// BusServer is used to implement busmanager.BusServer.
type BusServer struct {
MqttClient MQTTClient
enableCache bool
cache []string
}
// LEDSetInterval set led interval
var LEDSetInterval = 50
// status
const (
Offline = -1 // mqtt确认了下线
Online = 0 // mqtt确认了上线
WaitingOnline = 1 // 检测到网络正常,等待mqtt上线
)
// NetworkStatus network status, 0 for connect, -1 for disconnect
var (
networkStatus = -1
networkStatusSync = sync.Mutex{}
)
func GetNetworkStatus() int {
networkStatusSync.Lock()
defer networkStatusSync.Unlock()
return networkStatus
}
func SetNetworkStatus(status int, mqtt bool) {
networkStatusSync.Lock()
defer networkStatusSync.Unlock()
if networkStatus == Online && !mqtt {
// 当mqtt在线时,只能由mqtt处理
return
}
networkStatus = status
}
// Init do some init operation
func (s *BusServer) Init() {
cfg := sys.GetBusManagerCfg()
// download element library
downloadDependentDeviceLibrary()
if err := web.ReadVideoConfig(); err != nil {
log.Printf("open video config file failed, errmsg {%v}", err)
}
s.enableCache = true
// check directory exist or not
_, err := os.Stat(sys.GetBusManagerCfg().Cache.Directory)
if err != nil {
if os.IsNotExist(err) {
// do not exist, create
if err := os.Mkdir(cfg.Cache.Directory, os.ModeDir); err != nil {
log.Printf("create directory failed: %s", err)
s.enableCache = false
}
} else {
s.enableCache = false
}
}
log.Printf("enable cache: %v", s.enableCache)
muid := getUUID()
willtopic := "sample-values/" + muid + "/_/_state"
conntopic := willtopic
payload := public.MessagePayload{
MonitoringUnitID: muid,
SampleUnitID: "_",
ChannelID: "_state",
Name: "采集器连接状态",
Value: -1,
// Timestamp: public.UTCTimeStamp(),// 此值因与服务器发生了冲突,估不上报
Cov: true,
State: 0,
}
willpayload, _ := json.Marshal(payload)
payload.Value = 0
connpayload, _ := json.Marshal(payload)
s.MqttClient = NewMQTTClient(SubMessageHandler, willtopic, string(willpayload), conntopic, string(connpayload))
if err := s.MqttClient.ConnectServer(); err != nil {
log.Printf("connect mqtt server failed, errmsg {%v}, start reconnect...", err)
// start to reconnect
go s.MqttClient.ReconnectServer()
}
// check network status
go checkNetworkStatus()
s.MqttClient.Subscribe("sample-values/+/_/upgrade")
s.MqttClient.Subscribe("command/" + muid + "/#")
// init status
web.DeviceStatus = make(map[string]int)
mu := sys.GetMonitoringUnitCfg()
for _, sp := range mu.SamplePorts {
for _, su := range sp.SampleUnits {
web.DeviceStatus[su.ID] = 0
}
}
// set led status
go controlAppLEDStatus()
// check start log
go func() {
topic := "sample-values/" + getUUID() + "/_/restart"
payload := public.MessagePayload{
MonitoringUnitID: cfg.MQTT.ClientID,
SampleUnitID: "_",
ChannelID: "restart",
Name: "",
Value: 0,
Timestamp: public.UTCTimeStamp(),
Cov: true,
State: 0,
}
flag, err := bootflag.GetFlag()
if err != nil {
log.Warning(errors.As(err))
flag = "-1"
}
switch flag {
case "0":
payload.Value = 1
case "1":
payload.Value = 2
default:
// using 0
}
bp, _ := json.Marshal(payload)
for {
if GetNetworkStatus() == Online {
s.MqttClient.PublishSampleValues(topic, string(bp))
break
}
time.Sleep(time.Second)
}
if err := bootflag.CleanFlag(); err != nil {
log.Warning(errors.As(err))
}
}()
}
// Cleanup cleanup
func (s *BusServer) Cleanup() {
s.MqttClient.DisconnectServer()
}
func (s *BusServer) publishCacheFile() error {
files, err := filepath.Glob(filepath.Join(sys.GetBusManagerCfg().Cache.Directory, "*"))
if err != nil {
return err
}
if len(files) == 0 {
return nil
}
topic := "sample-block/" + getUUID()
for _, filename := range files {
f, err := os.Open(filename)
if err != nil {
continue
}
defer f.Close()
data, err := ioutil.ReadAll(f)
if err != nil {
continue
}
sd := string(data)
ms := strings.Split(sd, "\n")
sp := []string{}
for _, m := range ms {
ps := strings.Split(m, "&")
if len(ps) < 2 {
continue
}
p := ps[1]
sp = append(sp, p)
}
d := strings.Join(sp, ",")
d = "[" + d + "]"
log.Printf("publish file: %s", filename)
// publish data
if err := s.MqttClient.PublishSampleValues(topic, d); err != nil {
return err
}
// remove cache file
os.Remove(filename)
}
return nil
}
func saveCacheToFile(cache []string) error {
cfg := sys.GetBusManagerCfg()
files, err := filepath.Glob(filepath.Join(cfg.Cache.Directory, "*"))
if err != nil {
return err
}
var ifl []int
for _, f := range files {
fn := filepath.Base(f)
i, _ := strconv.Atoi(fn)
ifl = append(ifl, i)
}
sort.Sort(sort.Reverse(sort.IntSlice(ifl)))
log.Println(ifl)
l := len(ifl)
if l > cfg.Cache.MaxFile {
rfs := ifl[cfg.Cache.MaxFile:]
// remove files
for _, f := range rfs {
os.Remove(filepath.Join(cfg.Cache.Directory, strconv.Itoa(f)))
}
}
var nf int
if l == 0 {
nf = 0
} else {
nf = ifl[0] + 1
}
filepath := filepath.Join(cfg.Cache.Directory, strconv.Itoa(nf))
f, err := os.Create(filepath)
if err != nil {
return err
}
defer f.Close()
data := strings.Join(cache, "\n")
if _, err = f.Write([]byte(data)); err != nil {
return fmt.Errorf("write file [%s] failed: %s", filepath, err)
}
return nil
}
// Publish publish implement
func (s *BusServer) Publish(ctx context.Context, in *pb.PublishRequest) (*pb.PublishReply, error) {
// check message and boradcast to clients when necessary
checkMessage(in.Topic, in.Payload)
go func(a string, b []byte) {
web.PayloadMap.Store(a, b)
web.PayloadChan <- b
}(in.Topic, []byte(in.Payload))
// check whether should capture or not
buscfg := sys.GetBusManagerCfg()
for _, cap := range buscfg.CaptureOption {
match := false
for _, signal := range cap.Signals {
if strings.Contains(in.Topic, signal.Topic) {
// topic coincident, check value
var p public.MessagePayload
if err := json.Unmarshal([]byte(in.Payload), &p); err != nil {
continue
}
val := ""
switch p.Value.(type) {
case int:
val = strconv.Itoa(p.Value.(int))
case float64:
val = strconv.Itoa(int(p.Value.(float64)))
case string:
val = p.Value.(string)
}
if signal.Value == val {
match = true
break
}
}
}
// match, capture
if match {
var p public.CommandPayload
var para public.CommandParameter
muid := getUUID()
chid := "capture"
para.Channel = chid
p.MonitoringUnit = muid
p.SampleUnit = cap.SUID
p.Channel = chid
p.StartTime = public.UTCTimeStamp()
p.Phase = public.PhaseExcuting
p.Parameters = para
topic := "command/" + muid + "/" + cap.SUID + "/" + chid
msg, err := json.Marshal(p)
if err != nil {
continue
}
// publish
s.MqttClient.PublishSampleValues(topic, string(msg))
}
}
// enable cache
if s.enableCache {
// online or not
if GetNetworkStatus() == Online {
// check cache files exist or not, send cache files first
if err := s.publishCacheFile(); err != nil {
log.Printf("publish failed: %s", err)
return &pb.PublishReply{Status: public.StatusOK, Message: public.MessageOK}, nil
}
// check cache exist, publish
if len(s.cache) > 0 {
for _, m := range s.cache {
ms := strings.Split(m, "&")
if len(ms) == 2 {
topic := ms[0]
payload := ms[1]
log.Printf("publish cache: %s", m)
if err := s.MqttClient.PublishSampleValues(topic, payload); err != nil {
return &pb.PublishReply{Status: public.StatusOK, Message: public.MessageOK}, nil
}
}
}
s.cache = []string{}
}
// then publish current message
s.MqttClient.PublishSampleValues(in.Topic, in.Payload)
} else {
// offline, save data to cache, check cache quantity
if len(s.cache) < sys.GetBusManagerCfg().Cache.MaxMessage {
log.Printf("save to cache, current number: %d", len(s.cache))
s.cache = append(s.cache, in.Topic+"&"+in.Payload)
} else {
log.Printf("save to file")
// save to file
if err := saveCacheToFile(s.cache); err != nil {
log.Printf("save cache faield: %s", err)
}
s.cache = []string{}
}
}
} else {
if err := s.MqttClient.PublishSampleValues(in.Topic, in.Payload); err != nil {
return &pb.PublishReply{Status: public.StatusErr, Message: public.MessageErrUnknown}, nil
}
}
return &pb.PublishReply{Status: public.StatusOK, Message: public.MessageOK}, nil
}
// Subscribe subscribe implement
func (s *BusServer) Subscribe(ctx context.Context, in *pb.SubscribeRequest) (*pb.SubscribeReply, error) {
if err := s.MqttClient.Subscribe(in.Topic); err != nil {
return &pb.SubscribeReply{Status: public.StatusErr, Message: err.Error()}, nil
}
return &pb.SubscribeReply{Status: public.StatusOK, Message: public.MessageOK}, nil
}
// get uuid
func getUUID() string {
// address := config.Configuration.SystemServer.Host + ":" + config.Configuration.SystemServer.Port
// // get info from hmu
// var client public.SystemClient
// if err := client.ConnectSystemDaemon(address); err != nil {
// log.Fatalf("connect system server failed, errmsg {%v}", err)
// }
// resp, err := client.UUID()
// if err != nil {
// log.Fatalf("get uuid failed, errmsg {%v}", err)
// }
// defer client.Disconnect()
// return resp.UUID
// read id from config file
return sys.GetMonitoringUnitCfg().ID
}
// contorl app led status
func controlAppLEDStatus() {
var appled extend.AppLED
if err := appled.Prepare(sys.GetBusManagerCfg().Model); err != nil {
buslog.LOG.Warningf("prepare app led failed, errmsg: %v", err)
return
}
defer appled.CleanUp()
status := 0
// loop
for {
// toggle status
status = status ^ 1
// sleep for a moment, interval set by mqtt connect/disconnect handler
time.Sleep(time.Millisecond * time.Duration(LEDSetInterval))
if err := appled.SetLEDStatus(status); err != nil {
// log.Printf("set appled %v", err)
}
}
}
func checkMessage(topic, payload string) {
s := strings.Split(topic, "/")
if len(s) != 4 {
return
}
suid := s[2]
channelid := s[3]
if channelid != "_state" {
return
}
// parse payload, get value
var p public.MessagePayload
if err := json.Unmarshal([]byte(payload), &p); err != nil {
log.Printf("parse payload fail, payload: %s, errmsg: %v", payload, err)
return
}
v := int(p.Value.(float64))
if v == -1 {
v = 0
} else {
v = 1
}
// set status
lastvalue, ok := web.DeviceStatus[suid]
if !ok {
log.Printf("channel id `%s` do not exist", suid)
return
}
if v != lastvalue {
// update status, broadcast
web.DeviceStatus[suid] = v
bs, _ := web.DeviceStatusToBytes()
web.WSHub.BroadcastMessage(bs)
}
}
func checkNetworkStatus() {
timer := time.NewTicker(5 * time.Second)
lastRestartTime := time.Now()
cfg := sys.GetBusManagerCfg()
netCheckList := []string{cfg.MQTT.Host + ":" + cfg.MQTT.Port}
netCheckList = append(netCheckList, cfg.Web.NetChecking.Hosts...)
doTimeout := cfg.Web.NetChecking.Timeout
if doTimeout == 0 {
doTimeout = 5
}
doTimes := cfg.Web.NetChecking.DoTimes
for {
select {
case <-timer.C:
status := GetNetworkStatus()
// 大部分网络是正常的,优化走这个
if status == Online {
lastRestartTime = time.Now()
continue
}
if status == WaitingOnline && len(cfg.Web.NetChecking.Hosts) > 0 {
// 处理检测到网络正常时的ticker事件
sysd := sys.ConnectSystemDaemon(cfg.Model, &cfg.SystemServer)
if _, err := sysd.AutoCheckNetworking(netCheckList, time.Duration(doTimeout)*1e9); err == nil {
SetNetworkStatus(WaitingOnline, false)
} else {
// 在等待mqtt上线的过程中发如果检查到网络又下线了,恢复到网络不可用的状态。
SetNetworkStatus(Offline, false)
}
sysd.Disconnect()
continue
}
if status == Offline {
if doTimes > 0 && len(cfg.Web.NetChecking.Hosts) > 0 {
doTimes--
// 先尝试网络
sysd := sys.ConnectSystemDaemon(cfg.Model, &cfg.SystemServer)
if _, err := sysd.AutoCheckNetworking(netCheckList, time.Duration(doTimeout)*1e9); err == nil {
// 检测到网络正常了, 执行等待mqtt上线的逻辑。
sysd.Disconnect()
SetNetworkStatus(WaitingOnline, false)
continue
}
sysd.Disconnect()
// 网络失败,走失败的逻辑
}
now := time.Now()
d := now.Sub(lastRestartTime)
rd := time.Duration(cfg.Web.Restart.Duration) * time.Second
if d >= rd {
lastRestartTime = now
rt := cfg.Web.Restart.Times
if rt < cfg.Web.Restart.Max {
// add retart times
cfg.Web.Restart.Times++ | buslog.LOG.Infof("software restart: %d times", cfg.Web.Restart.Times)
// software rstart
if err := public.RestartApp(cfg.Model, errors.New(public.RestartByCommunicationInterrupt)); err != nil {
buslog.LOG.Warning(errors.As(err))
}
} else {
// clear times
cfg.Web.Restart.Times = 0
if err := sys.SaveBusManagerCfg(cfg); err != nil {
buslog.LOG.Warningf("save bus config failed, errmsg {%v}", err)
}
buslog.LOG.Info("hardware restart")
// hardware restart
if err := public.Reboot(errors.New(public.RebootByCommunicationInterrupt)); err != nil {
buslog.LOG.Warning(errors.As(err))
}
}
}
}
}
}
}
func downloadDependentDeviceLibrary() error {
cfg := sys.GetBusManagerCfg()
mu := sys.GetMonitoringUnitCfg()
elementPath := os.ExpandEnv(etc.Etc.String("public", "element-dir"))
for _, sp := range mu.SamplePorts {
for _, su := range sp.SampleUnits {
// check device library exist or not
ep := filepath.Join(elementPath, su.Element)
_, err := os.Stat(ep)
if err == nil {
// do not update when exist
log.Debugf("element library [%s] exist", ep)
continue
}
// other error occured
if !os.IsNotExist(err) {
log.Debugf("check element library [%s] existence fail", errors.As(err, ep))
continue
}
// do not exist, download and save
np := cfg.Web.ElementLib.Server + su.Element
if err := public.HTTPDownloadFile(np, ep); err != nil {
fmt.Printf("download or save element library [%s] failed: %s\n", su.Element, errors.As(err))
continue
}
fmt.Printf("download or save element library [%s] success\n", su.Element)
}
}
return nil
} | if err := sys.SaveBusManagerCfg(cfg); err != nil {
buslog.LOG.Warningf("save bus config failed, errmsg {%v}", err)
}
| random_line_split |
server.go | /*
*
* Copyright 2018 huayuan-iot
*
* Author: lynn
* Date: 2018/07/03
* Despcription: bus server implement
*
*/
package module
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"time"
pb "clc.hmu/app/busmanager/buspb"
"clc.hmu/app/busmanager/module/web"
"clc.hmu/app/extend"
"clc.hmu/app/public"
"clc.hmu/app/public/log"
"clc.hmu/app/public/log/bootflag"
"clc.hmu/app/public/log/buslog"
"clc.hmu/app/public/store/etc"
"clc.hmu/app/public/sys"
"github.com/gwaylib/errors"
)
// BusServer is used to implement busmanager.BusServer.
type BusServer struct {
MqttClient MQTTClient
enableCache bool
cache []string
}
// LEDSetInterval set led interval
var LEDSetInterval = 50
// status
const (
Offline = -1 // mqtt确认了下线
Online = 0 // mqtt确认了上线
WaitingOnline = 1 // 检测到网络正常,等待mqtt上线
)
// NetworkStatus network status, 0 for connect, -1 for disconnect
var (
networkStatus = -1
networkStatusSync = sync.Mutex{}
)
func GetNetworkStatus() int {
networkStatusSync.Lock()
defer networkStatusSync.Unlock()
return networkStatus
}
func SetNetworkStatus(status int, mqtt bool) {
networkStatusSync.Lock()
defer networkStatusSync.Unlock()
if networkStatus == Online && !mqtt {
// 当mqtt在线时,只能由mqtt处理
return
}
networkStatus = status
}
// Init do some init operation
func (s *BusServer) Init() {
cfg := sys.GetBusManagerCfg()
// download element library
downloadDependentDeviceLibrary()
if err := web.ReadVideoConfig(); err != nil {
log.Printf("open video config file failed, errmsg {%v}", err)
}
s.enableCache = true
// check directory exist or not
_, err := os.Stat(sys.GetBusManagerCfg().Cache.Directory)
if err != nil {
if os.IsNotExist(err) {
// do not exist, create
if err := os.Mkdir(cfg.Cache.Directory, os.ModeDir); err != nil {
log.Printf("create directory failed: %s", err)
s.enableCache = false
}
} else {
s.enableCache = false
}
}
log.Printf("enable cache: %v", s.enableCache)
muid := getUUID()
willtopic := "sample-values/" + muid + "/_/_state"
conntopic := willtopic
payload := public.MessagePayload{
MonitoringUnitID: muid,
SampleUnitID: "_",
ChannelID: "_state",
Name: "采集器连接状态",
Value: -1,
// Timestamp: public.UTCTimeStamp(),// 此值因与服务器发生了冲突,估不上报
Cov: true,
State: 0,
}
willpayload, _ := json.Marshal(payload)
payload.Value = 0
connpayload, _ := json.Marshal(payload)
s.MqttClient = NewMQTTClient(SubMessageHandler, willtopic, string(willpayload), conntopic, string(connpayload))
if err := s.MqttClient.ConnectServer(); err != nil {
log.Printf("connect mqtt server failed, errmsg {%v}, start reconnect...", err)
// start to reconnect
go s.MqttClient.ReconnectServer()
}
// check network status
go checkNetworkStatus()
s.MqttClient.Subscribe("sample-values/+/_/upgrade")
s.MqttClient.Subscribe("command/" + muid + "/#")
// init status
web.DeviceStatus = make(map[string]int)
mu := sys.GetMonitoringUnitCfg()
for _, sp := range mu.SamplePorts {
for _, su := range sp.SampleUnits {
web.DeviceStatus[su.ID] = 0
}
}
// set led status
go controlAppLEDStatus()
// check start log
go func() {
topic := "sample-values/" + getUUID() + "/_/restart"
payload := public.MessagePayload{
MonitoringUnitID: cfg.MQTT.ClientID,
SampleUnitID: "_",
ChannelID: "restart",
Name: "",
Value: 0,
Timestamp: public.UTCTimeStamp(),
Cov: true,
State: 0,
}
flag, err := bootflag.GetFlag()
if err != nil {
log.Warning(errors.As(err))
flag = "-1"
}
switch flag {
case "0":
payload.Value = 1
case "1":
payload.Value = 2
default:
// using 0
}
bp, _ := json.Marshal(payload)
for {
if GetNetworkStatus() == Online {
s.MqttClient.PublishSampleValues(topic, string(bp))
break
}
time.Sleep(time.Second)
}
if err := bootflag.CleanFlag(); err != nil {
log.Warning(errors.As(err))
}
}()
}
// Cleanup cleanup
func (s *BusServer) Cleanup() {
s.MqttClient.DisconnectServer()
}
func (s *BusServer) publishCacheFile() error {
files, err := filepath.Glob(filepath.Join(sys.GetBusManagerCfg().Cache.Directory, "*"))
if err != nil {
return err
}
if len(files) == 0 {
return nil
}
topic := "sample-block/" + getUUID()
for _, filename := range files {
f, err := os.Open(filename)
if err != nil {
continue
}
defer f.Close()
data, err := ioutil.ReadAll(f)
if err != nil {
continue
}
sd := string(data)
ms := strings.Split(sd, "\n")
sp := []string{}
for _, m := range ms {
ps := strings.Split(m, "&")
if len(ps) < 2 {
continue
}
p := ps[1]
sp = append(sp, p)
}
d := strings.Join(sp, ",")
d = "[" + d + "]"
log.Printf("publish file: %s", filename)
// publish data
if err := s.MqttClient.PublishSampleValues(topic, d); err != nil {
return err
}
// remove cache file
os.Remove(filename)
}
return nil
}
func saveCacheToFile(cache []string) error {
cfg := sys.GetBusManagerCfg()
files, err := filepath.Glob(filepath.Join(cfg.Cache.Directory, "*"))
if err != nil {
return err
}
var ifl []int
for _, f := range files {
fn := filepath.Base(f)
i, _ := strconv.Atoi(fn)
ifl = append(ifl, i)
}
sort.Sort(sort.Reverse(sort.In | fs := ifl[cfg.Cache.MaxFile:]
// remove files
for _, f := range rfs {
os.Remove(filepath.Join(cfg.Cache.Directory, strconv.Itoa(f)))
}
}
var nf int
if l == 0 {
nf = 0
} else {
nf = ifl[0] + 1
}
filepath := filepath.Join(cfg.Cache.Directory, strconv.Itoa(nf))
f, err := os.Create(filepath)
if err != nil {
return err
}
defer f.Close()
data := strings.Join(cache, "\n")
if _, err = f.Write([]byte(data)); err != nil {
return fmt.Errorf("write file [%s] failed: %s", filepath, err)
}
return nil
}
// Publish publish implement
func (s *BusServer) Publish(ctx context.Context, in *pb.PublishRequest) (*pb.PublishReply, error) {
// check message and boradcast to clients when necessary
checkMessage(in.Topic, in.Payload)
go func(a string, b []byte) {
web.PayloadMap.Store(a, b)
web.PayloadChan <- b
}(in.Topic, []byte(in.Payload))
// check whether should capture or not
buscfg := sys.GetBusManagerCfg()
for _, cap := range buscfg.CaptureOption {
match := false
for _, signal := range cap.Signals {
if strings.Contains(in.Topic, signal.Topic) {
// topic coincident, check value
var p public.MessagePayload
if err := json.Unmarshal([]byte(in.Payload), &p); err != nil {
continue
}
val := ""
switch p.Value.(type) {
case int:
val = strconv.Itoa(p.Value.(int))
case float64:
val = strconv.Itoa(int(p.Value.(float64)))
case string:
val = p.Value.(string)
}
if signal.Value == val {
match = true
break
}
}
}
// match, capture
if match {
var p public.CommandPayload
var para public.CommandParameter
muid := getUUID()
chid := "capture"
para.Channel = chid
p.MonitoringUnit = muid
p.SampleUnit = cap.SUID
p.Channel = chid
p.StartTime = public.UTCTimeStamp()
p.Phase = public.PhaseExcuting
p.Parameters = para
topic := "command/" + muid + "/" + cap.SUID + "/" + chid
msg, err := json.Marshal(p)
if err != nil {
continue
}
// publish
s.MqttClient.PublishSampleValues(topic, string(msg))
}
}
// enable cache
if s.enableCache {
// online or not
if GetNetworkStatus() == Online {
// check cache files exist or not, send cache files first
if err := s.publishCacheFile(); err != nil {
log.Printf("publish failed: %s", err)
return &pb.PublishReply{Status: public.StatusOK, Message: public.MessageOK}, nil
}
// check cache exist, publish
if len(s.cache) > 0 {
for _, m := range s.cache {
ms := strings.Split(m, "&")
if len(ms) == 2 {
topic := ms[0]
payload := ms[1]
log.Printf("publish cache: %s", m)
if err := s.MqttClient.PublishSampleValues(topic, payload); err != nil {
return &pb.PublishReply{Status: public.StatusOK, Message: public.MessageOK}, nil
}
}
}
s.cache = []string{}
}
// then publish current message
s.MqttClient.PublishSampleValues(in.Topic, in.Payload)
} else {
// offline, save data to cache, check cache quantity
if len(s.cache) < sys.GetBusManagerCfg().Cache.MaxMessage {
log.Printf("save to cache, current number: %d", len(s.cache))
s.cache = append(s.cache, in.Topic+"&"+in.Payload)
} else {
log.Printf("save to file")
// save to file
if err := saveCacheToFile(s.cache); err != nil {
log.Printf("save cache faield: %s", err)
}
s.cache = []string{}
}
}
} else {
if err := s.MqttClient.PublishSampleValues(in.Topic, in.Payload); err != nil {
return &pb.PublishReply{Status: public.StatusErr, Message: public.MessageErrUnknown}, nil
}
}
return &pb.PublishReply{Status: public.StatusOK, Message: public.MessageOK}, nil
}
// Subscribe subscribe implement
func (s *BusServer) Subscribe(ctx context.Context, in *pb.SubscribeRequest) (*pb.SubscribeReply, error) {
if err := s.MqttClient.Subscribe(in.Topic); err != nil {
return &pb.SubscribeReply{Status: public.StatusErr, Message: err.Error()}, nil
}
return &pb.SubscribeReply{Status: public.StatusOK, Message: public.MessageOK}, nil
}
// get uuid
func getUUID() string {
// address := config.Configuration.SystemServer.Host + ":" + config.Configuration.SystemServer.Port
// // get info from hmu
// var client public.SystemClient
// if err := client.ConnectSystemDaemon(address); err != nil {
// log.Fatalf("connect system server failed, errmsg {%v}", err)
// }
// resp, err := client.UUID()
// if err != nil {
// log.Fatalf("get uuid failed, errmsg {%v}", err)
// }
// defer client.Disconnect()
// return resp.UUID
// read id from config file
return sys.GetMonitoringUnitCfg().ID
}
// contorl app led status
func controlAppLEDStatus() {
var appled extend.AppLED
if err := appled.Prepare(sys.GetBusManagerCfg().Model); err != nil {
buslog.LOG.Warningf("prepare app led failed, errmsg: %v", err)
return
}
defer appled.CleanUp()
status := 0
// loop
for {
// toggle status
status = status ^ 1
// sleep for a moment, interval set by mqtt connect/disconnect handler
time.Sleep(time.Millisecond * time.Duration(LEDSetInterval))
if err := appled.SetLEDStatus(status); err != nil {
// log.Printf("set appled %v", err)
}
}
}
func checkMessage(topic, payload string) {
s := strings.Split(topic, "/")
if len(s) != 4 {
return
}
suid := s[2]
channelid := s[3]
if channelid != "_state" {
return
}
// parse payload, get value
var p public.MessagePayload
if err := json.Unmarshal([]byte(payload), &p); err != nil {
log.Printf("parse payload fail, payload: %s, errmsg: %v", payload, err)
return
}
v := int(p.Value.(float64))
if v == -1 {
v = 0
} else {
v = 1
}
// set status
lastvalue, ok := web.DeviceStatus[suid]
if !ok {
log.Printf("channel id `%s` do not exist", suid)
return
}
if v != lastvalue {
// update status, broadcast
web.DeviceStatus[suid] = v
bs, _ := web.DeviceStatusToBytes()
web.WSHub.BroadcastMessage(bs)
}
}
func checkNetworkStatus() {
timer := time.NewTicker(5 * time.Second)
lastRestartTime := time.Now()
cfg := sys.GetBusManagerCfg()
netCheckList := []string{cfg.MQTT.Host + ":" + cfg.MQTT.Port}
netCheckList = append(netCheckList, cfg.Web.NetChecking.Hosts...)
doTimeout := cfg.Web.NetChecking.Timeout
if doTimeout == 0 {
doTimeout = 5
}
doTimes := cfg.Web.NetChecking.DoTimes
for {
select {
case <-timer.C:
status := GetNetworkStatus()
// 大部分网络是正常的,优化走这个
if status == Online {
lastRestartTime = time.Now()
continue
}
if status == WaitingOnline && len(cfg.Web.NetChecking.Hosts) > 0 {
// 处理检测到网络正常时的ticker事件
sysd := sys.ConnectSystemDaemon(cfg.Model, &cfg.SystemServer)
if _, err := sysd.AutoCheckNetworking(netCheckList, time.Duration(doTimeout)*1e9); err == nil {
SetNetworkStatus(WaitingOnline, false)
} else {
// 在等待mqtt上线的过程中发如果检查到网络又下线了,恢复到网络不可用的状态。
SetNetworkStatus(Offline, false)
}
sysd.Disconnect()
continue
}
if status == Offline {
if doTimes > 0 && len(cfg.Web.NetChecking.Hosts) > 0 {
doTimes--
// 先尝试网络
sysd := sys.ConnectSystemDaemon(cfg.Model, &cfg.SystemServer)
if _, err := sysd.AutoCheckNetworking(netCheckList, time.Duration(doTimeout)*1e9); err == nil {
// 检测到网络正常了, 执行等待mqtt上线的逻辑。
sysd.Disconnect()
SetNetworkStatus(WaitingOnline, false)
continue
}
sysd.Disconnect()
// 网络失败,走失败的逻辑
}
now := time.Now()
d := now.Sub(lastRestartTime)
rd := time.Duration(cfg.Web.Restart.Duration) * time.Second
if d >= rd {
lastRestartTime = now
rt := cfg.Web.Restart.Times
if rt < cfg.Web.Restart.Max {
// add retart times
cfg.Web.Restart.Times++
if err := sys.SaveBusManagerCfg(cfg); err != nil {
buslog.LOG.Warningf("save bus config failed, errmsg {%v}", err)
}
buslog.LOG.Infof("software restart: %d times", cfg.Web.Restart.Times)
// software rstart
if err := public.RestartApp(cfg.Model, errors.New(public.RestartByCommunicationInterrupt)); err != nil {
buslog.LOG.Warning(errors.As(err))
}
} else {
// clear times
cfg.Web.Restart.Times = 0
if err := sys.SaveBusManagerCfg(cfg); err != nil {
buslog.LOG.Warningf("save bus config failed, errmsg {%v}", err)
}
buslog.LOG.Info("hardware restart")
// hardware restart
if err := public.Reboot(errors.New(public.RebootByCommunicationInterrupt)); err != nil {
buslog.LOG.Warning(errors.As(err))
}
}
}
}
}
}
}
func downloadDependentDeviceLibrary() error {
cfg := sys.GetBusManagerCfg()
mu := sys.GetMonitoringUnitCfg()
elementPath := os.ExpandEnv(etc.Etc.String("public", "element-dir"))
for _, sp := range mu.SamplePorts {
for _, su := range sp.SampleUnits {
// check device library exist or not
ep := filepath.Join(elementPath, su.Element)
_, err := os.Stat(ep)
if err == nil {
// do not update when exist
log.Debugf("element library [%s] exist", ep)
continue
}
// other error occured
if !os.IsNotExist(err) {
log.Debugf("check element library [%s] existence fail", errors.As(err, ep))
continue
}
// do not exist, download and save
np := cfg.Web.ElementLib.Server + su.Element
if err := public.HTTPDownloadFile(np, ep); err != nil {
fmt.Printf("download or save element library [%s] failed: %s\n", su.Element, errors.As(err))
continue
}
fmt.Printf("download or save element library [%s] success\n", su.Element)
}
}
return nil
}
| tSlice(ifl)))
log.Println(ifl)
l := len(ifl)
if l > cfg.Cache.MaxFile {
r | conditional_block |
server.go | /*
*
* Copyright 2018 huayuan-iot
*
* Author: lynn
* Date: 2018/07/03
* Despcription: bus server implement
*
*/
package module
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"time"
pb "clc.hmu/app/busmanager/buspb"
"clc.hmu/app/busmanager/module/web"
"clc.hmu/app/extend"
"clc.hmu/app/public"
"clc.hmu/app/public/log"
"clc.hmu/app/public/log/bootflag"
"clc.hmu/app/public/log/buslog"
"clc.hmu/app/public/store/etc"
"clc.hmu/app/public/sys"
"github.com/gwaylib/errors"
)
// BusServer is used to implement busmanager.BusServer.
type BusServer struct {
MqttClient MQTTClient
enableCache bool
cache []string
}
// LEDSetInterval set led interval
var LEDSetInterval = 50
// status
const (
Offline = -1 // mqtt确认了下线
Online = 0 // mqtt确认了上线
WaitingOnline = 1 // 检测到网络正常,等待mqtt上线
)
// NetworkStatus network status, 0 for connect, -1 for disconnect
var (
networkStatus = -1
networkStatusSync = sync.Mutex{}
)
func GetNetworkStatus() int {
networkStatusSync.Lock()
defer networkStatusSync.Unlock()
return networkStatus
}
func SetNetworkStatus(status int, mqtt bool) {
networkStatusSync.Lock()
defer networkStatusSync.Unlock()
if networkStatus == Online && !mqtt {
// 当mqtt在线时,只能由mqtt处理
return
}
networkStatus = status
}
// Init do some init operation
func (s *BusServer) Init() {
cfg := sys.GetBusManagerCfg()
// download element li | y
downloadDependentDeviceLibrary()
if err := web.ReadVideoConfig(); err != nil {
log.Printf("open video config file failed, errmsg {%v}", err)
}
s.enableCache = true
// check directory exist or not
_, err := os.Stat(sys.GetBusManagerCfg().Cache.Directory)
if err != nil {
if os.IsNotExist(err) {
// do not exist, create
if err := os.Mkdir(cfg.Cache.Directory, os.ModeDir); err != nil {
log.Printf("create directory failed: %s", err)
s.enableCache = false
}
} else {
s.enableCache = false
}
}
log.Printf("enable cache: %v", s.enableCache)
muid := getUUID()
willtopic := "sample-values/" + muid + "/_/_state"
conntopic := willtopic
payload := public.MessagePayload{
MonitoringUnitID: muid,
SampleUnitID: "_",
ChannelID: "_state",
Name: "采集器连接状态",
Value: -1,
// Timestamp: public.UTCTimeStamp(),// 此值因与服务器发生了冲突,估不上报
Cov: true,
State: 0,
}
willpayload, _ := json.Marshal(payload)
payload.Value = 0
connpayload, _ := json.Marshal(payload)
s.MqttClient = NewMQTTClient(SubMessageHandler, willtopic, string(willpayload), conntopic, string(connpayload))
if err := s.MqttClient.ConnectServer(); err != nil {
log.Printf("connect mqtt server failed, errmsg {%v}, start reconnect...", err)
// start to reconnect
go s.MqttClient.ReconnectServer()
}
// check network status
go checkNetworkStatus()
s.MqttClient.Subscribe("sample-values/+/_/upgrade")
s.MqttClient.Subscribe("command/" + muid + "/#")
// init status
web.DeviceStatus = make(map[string]int)
mu := sys.GetMonitoringUnitCfg()
for _, sp := range mu.SamplePorts {
for _, su := range sp.SampleUnits {
web.DeviceStatus[su.ID] = 0
}
}
// set led status
go controlAppLEDStatus()
// check start log
go func() {
topic := "sample-values/" + getUUID() + "/_/restart"
payload := public.MessagePayload{
MonitoringUnitID: cfg.MQTT.ClientID,
SampleUnitID: "_",
ChannelID: "restart",
Name: "",
Value: 0,
Timestamp: public.UTCTimeStamp(),
Cov: true,
State: 0,
}
flag, err := bootflag.GetFlag()
if err != nil {
log.Warning(errors.As(err))
flag = "-1"
}
switch flag {
case "0":
payload.Value = 1
case "1":
payload.Value = 2
default:
// using 0
}
bp, _ := json.Marshal(payload)
for {
if GetNetworkStatus() == Online {
s.MqttClient.PublishSampleValues(topic, string(bp))
break
}
time.Sleep(time.Second)
}
if err := bootflag.CleanFlag(); err != nil {
log.Warning(errors.As(err))
}
}()
}
// Cleanup cleanup
func (s *BusServer) Cleanup() {
s.MqttClient.DisconnectServer()
}
func (s *BusServer) publishCacheFile() error {
files, err := filepath.Glob(filepath.Join(sys.GetBusManagerCfg().Cache.Directory, "*"))
if err != nil {
return err
}
if len(files) == 0 {
return nil
}
topic := "sample-block/" + getUUID()
for _, filename := range files {
f, err := os.Open(filename)
if err != nil {
continue
}
defer f.Close()
data, err := ioutil.ReadAll(f)
if err != nil {
continue
}
sd := string(data)
ms := strings.Split(sd, "\n")
sp := []string{}
for _, m := range ms {
ps := strings.Split(m, "&")
if len(ps) < 2 {
continue
}
p := ps[1]
sp = append(sp, p)
}
d := strings.Join(sp, ",")
d = "[" + d + "]"
log.Printf("publish file: %s", filename)
// publish data
if err := s.MqttClient.PublishSampleValues(topic, d); err != nil {
return err
}
// remove cache file
os.Remove(filename)
}
return nil
}
func saveCacheToFile(cache []string) error {
cfg := sys.GetBusManagerCfg()
files, err := filepath.Glob(filepath.Join(cfg.Cache.Directory, "*"))
if err != nil {
return err
}
var ifl []int
for _, f := range files {
fn := filepath.Base(f)
i, _ := strconv.Atoi(fn)
ifl = append(ifl, i)
}
sort.Sort(sort.Reverse(sort.IntSlice(ifl)))
log.Println(ifl)
l := len(ifl)
if l > cfg.Cache.MaxFile {
rfs := ifl[cfg.Cache.MaxFile:]
// remove files
for _, f := range rfs {
os.Remove(filepath.Join(cfg.Cache.Directory, strconv.Itoa(f)))
}
}
var nf int
if l == 0 {
nf = 0
} else {
nf = ifl[0] + 1
}
filepath := filepath.Join(cfg.Cache.Directory, strconv.Itoa(nf))
f, err := os.Create(filepath)
if err != nil {
return err
}
defer f.Close()
data := strings.Join(cache, "\n")
if _, err = f.Write([]byte(data)); err != nil {
return fmt.Errorf("write file [%s] failed: %s", filepath, err)
}
return nil
}
// Publish publish implement
func (s *BusServer) Publish(ctx context.Context, in *pb.PublishRequest) (*pb.PublishReply, error) {
// check message and boradcast to clients when necessary
checkMessage(in.Topic, in.Payload)
go func(a string, b []byte) {
web.PayloadMap.Store(a, b)
web.PayloadChan <- b
}(in.Topic, []byte(in.Payload))
// check whether should capture or not
buscfg := sys.GetBusManagerCfg()
for _, cap := range buscfg.CaptureOption {
match := false
for _, signal := range cap.Signals {
if strings.Contains(in.Topic, signal.Topic) {
// topic coincident, check value
var p public.MessagePayload
if err := json.Unmarshal([]byte(in.Payload), &p); err != nil {
continue
}
val := ""
switch p.Value.(type) {
case int:
val = strconv.Itoa(p.Value.(int))
case float64:
val = strconv.Itoa(int(p.Value.(float64)))
case string:
val = p.Value.(string)
}
if signal.Value == val {
match = true
break
}
}
}
// match, capture
if match {
var p public.CommandPayload
var para public.CommandParameter
muid := getUUID()
chid := "capture"
para.Channel = chid
p.MonitoringUnit = muid
p.SampleUnit = cap.SUID
p.Channel = chid
p.StartTime = public.UTCTimeStamp()
p.Phase = public.PhaseExcuting
p.Parameters = para
topic := "command/" + muid + "/" + cap.SUID + "/" + chid
msg, err := json.Marshal(p)
if err != nil {
continue
}
// publish
s.MqttClient.PublishSampleValues(topic, string(msg))
}
}
// enable cache
if s.enableCache {
// online or not
if GetNetworkStatus() == Online {
// check cache files exist or not, send cache files first
if err := s.publishCacheFile(); err != nil {
log.Printf("publish failed: %s", err)
return &pb.PublishReply{Status: public.StatusOK, Message: public.MessageOK}, nil
}
// check cache exist, publish
if len(s.cache) > 0 {
for _, m := range s.cache {
ms := strings.Split(m, "&")
if len(ms) == 2 {
topic := ms[0]
payload := ms[1]
log.Printf("publish cache: %s", m)
if err := s.MqttClient.PublishSampleValues(topic, payload); err != nil {
return &pb.PublishReply{Status: public.StatusOK, Message: public.MessageOK}, nil
}
}
}
s.cache = []string{}
}
// then publish current message
s.MqttClient.PublishSampleValues(in.Topic, in.Payload)
} else {
// offline, save data to cache, check cache quantity
if len(s.cache) < sys.GetBusManagerCfg().Cache.MaxMessage {
log.Printf("save to cache, current number: %d", len(s.cache))
s.cache = append(s.cache, in.Topic+"&"+in.Payload)
} else {
log.Printf("save to file")
// save to file
if err := saveCacheToFile(s.cache); err != nil {
log.Printf("save cache faield: %s", err)
}
s.cache = []string{}
}
}
} else {
if err := s.MqttClient.PublishSampleValues(in.Topic, in.Payload); err != nil {
return &pb.PublishReply{Status: public.StatusErr, Message: public.MessageErrUnknown}, nil
}
}
return &pb.PublishReply{Status: public.StatusOK, Message: public.MessageOK}, nil
}
// Subscribe subscribe implement
func (s *BusServer) Subscribe(ctx context.Context, in *pb.SubscribeRequest) (*pb.SubscribeReply, error) {
if err := s.MqttClient.Subscribe(in.Topic); err != nil {
return &pb.SubscribeReply{Status: public.StatusErr, Message: err.Error()}, nil
}
return &pb.SubscribeReply{Status: public.StatusOK, Message: public.MessageOK}, nil
}
// get uuid
func getUUID() string {
// address := config.Configuration.SystemServer.Host + ":" + config.Configuration.SystemServer.Port
// // get info from hmu
// var client public.SystemClient
// if err := client.ConnectSystemDaemon(address); err != nil {
// log.Fatalf("connect system server failed, errmsg {%v}", err)
// }
// resp, err := client.UUID()
// if err != nil {
// log.Fatalf("get uuid failed, errmsg {%v}", err)
// }
// defer client.Disconnect()
// return resp.UUID
// read id from config file
return sys.GetMonitoringUnitCfg().ID
}
// contorl app led status
func controlAppLEDStatus() {
var appled extend.AppLED
if err := appled.Prepare(sys.GetBusManagerCfg().Model); err != nil {
buslog.LOG.Warningf("prepare app led failed, errmsg: %v", err)
return
}
defer appled.CleanUp()
status := 0
// loop
for {
// toggle status
status = status ^ 1
// sleep for a moment, interval set by mqtt connect/disconnect handler
time.Sleep(time.Millisecond * time.Duration(LEDSetInterval))
if err := appled.SetLEDStatus(status); err != nil {
// log.Printf("set appled %v", err)
}
}
}
func checkMessage(topic, payload string) {
s := strings.Split(topic, "/")
if len(s) != 4 {
return
}
suid := s[2]
channelid := s[3]
if channelid != "_state" {
return
}
// parse payload, get value
var p public.MessagePayload
if err := json.Unmarshal([]byte(payload), &p); err != nil {
log.Printf("parse payload fail, payload: %s, errmsg: %v", payload, err)
return
}
v := int(p.Value.(float64))
if v == -1 {
v = 0
} else {
v = 1
}
// set status
lastvalue, ok := web.DeviceStatus[suid]
if !ok {
log.Printf("channel id `%s` do not exist", suid)
return
}
if v != lastvalue {
// update status, broadcast
web.DeviceStatus[suid] = v
bs, _ := web.DeviceStatusToBytes()
web.WSHub.BroadcastMessage(bs)
}
}
func checkNetworkStatus() {
timer := time.NewTicker(5 * time.Second)
lastRestartTime := time.Now()
cfg := sys.GetBusManagerCfg()
netCheckList := []string{cfg.MQTT.Host + ":" + cfg.MQTT.Port}
netCheckList = append(netCheckList, cfg.Web.NetChecking.Hosts...)
doTimeout := cfg.Web.NetChecking.Timeout
if doTimeout == 0 {
doTimeout = 5
}
doTimes := cfg.Web.NetChecking.DoTimes
for {
select {
case <-timer.C:
status := GetNetworkStatus()
// 大部分网络是正常的,优化走这个
if status == Online {
lastRestartTime = time.Now()
continue
}
if status == WaitingOnline && len(cfg.Web.NetChecking.Hosts) > 0 {
// 处理检测到网络正常时的ticker事件
sysd := sys.ConnectSystemDaemon(cfg.Model, &cfg.SystemServer)
if _, err := sysd.AutoCheckNetworking(netCheckList, time.Duration(doTimeout)*1e9); err == nil {
SetNetworkStatus(WaitingOnline, false)
} else {
// 在等待mqtt上线的过程中发如果检查到网络又下线了,恢复到网络不可用的状态。
SetNetworkStatus(Offline, false)
}
sysd.Disconnect()
continue
}
if status == Offline {
if doTimes > 0 && len(cfg.Web.NetChecking.Hosts) > 0 {
doTimes--
// 先尝试网络
sysd := sys.ConnectSystemDaemon(cfg.Model, &cfg.SystemServer)
if _, err := sysd.AutoCheckNetworking(netCheckList, time.Duration(doTimeout)*1e9); err == nil {
// 检测到网络正常了, 执行等待mqtt上线的逻辑。
sysd.Disconnect()
SetNetworkStatus(WaitingOnline, false)
continue
}
sysd.Disconnect()
// 网络失败,走失败的逻辑
}
now := time.Now()
d := now.Sub(lastRestartTime)
rd := time.Duration(cfg.Web.Restart.Duration) * time.Second
if d >= rd {
lastRestartTime = now
rt := cfg.Web.Restart.Times
if rt < cfg.Web.Restart.Max {
// add retart times
cfg.Web.Restart.Times++
if err := sys.SaveBusManagerCfg(cfg); err != nil {
buslog.LOG.Warningf("save bus config failed, errmsg {%v}", err)
}
buslog.LOG.Infof("software restart: %d times", cfg.Web.Restart.Times)
// software rstart
if err := public.RestartApp(cfg.Model, errors.New(public.RestartByCommunicationInterrupt)); err != nil {
buslog.LOG.Warning(errors.As(err))
}
} else {
// clear times
cfg.Web.Restart.Times = 0
if err := sys.SaveBusManagerCfg(cfg); err != nil {
buslog.LOG.Warningf("save bus config failed, errmsg {%v}", err)
}
buslog.LOG.Info("hardware restart")
// hardware restart
if err := public.Reboot(errors.New(public.RebootByCommunicationInterrupt)); err != nil {
buslog.LOG.Warning(errors.As(err))
}
}
}
}
}
}
}
func downloadDependentDeviceLibrary() error {
cfg := sys.GetBusManagerCfg()
mu := sys.GetMonitoringUnitCfg()
elementPath := os.ExpandEnv(etc.Etc.String("public", "element-dir"))
for _, sp := range mu.SamplePorts {
for _, su := range sp.SampleUnits {
// check device library exist or not
ep := filepath.Join(elementPath, su.Element)
_, err := os.Stat(ep)
if err == nil {
// do not update when exist
log.Debugf("element library [%s] exist", ep)
continue
}
// other error occured
if !os.IsNotExist(err) {
log.Debugf("check element library [%s] existence fail", errors.As(err, ep))
continue
}
// do not exist, download and save
np := cfg.Web.ElementLib.Server + su.Element
if err := public.HTTPDownloadFile(np, ep); err != nil {
fmt.Printf("download or save element library [%s] failed: %s\n", su.Element, errors.As(err))
continue
}
fmt.Printf("download or save element library [%s] success\n", su.Element)
}
}
return nil
}
| brar | identifier_name |
server.go | /*
*
* Copyright 2018 huayuan-iot
*
* Author: lynn
* Date: 2018/07/03
* Despcription: bus server implement
*
*/
package module
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"time"
pb "clc.hmu/app/busmanager/buspb"
"clc.hmu/app/busmanager/module/web"
"clc.hmu/app/extend"
"clc.hmu/app/public"
"clc.hmu/app/public/log"
"clc.hmu/app/public/log/bootflag"
"clc.hmu/app/public/log/buslog"
"clc.hmu/app/public/store/etc"
"clc.hmu/app/public/sys"
"github.com/gwaylib/errors"
)
// BusServer is used to implement busmanager.BusServer.
type BusServer struct {
MqttClient MQTTClient
enableCache bool
cache []string
}
// LEDSetInterval set led interval
var LEDSetInterval = 50
// status
const (
Offline = -1 // mqtt确认了下线
Online = 0 // mqtt确认了上线
WaitingOnline = 1 // 检测到网络正常,等待mqtt上线
)
// NetworkStatus network status, 0 for connect, -1 for disconnect
var (
networkStatus = -1
networkStatusSync = sync.Mutex{}
)
func GetNetworkStatus() int {
networkStatusSync.Lock()
defer networkStatusSync.Unlock()
return networkStatus
}
func SetNetworkStatus(status int, mqtt bool) {
networkStatusSync.Lock()
defer networkSt | fg := sys.GetBusManagerCfg()
// download element library
downloadDependentDeviceLibrary()
if err := web.ReadVideoConfig(); err != nil {
log.Printf("open video config file failed, errmsg {%v}", err)
}
s.enableCache = true
// check directory exist or not
_, err := os.Stat(sys.GetBusManagerCfg().Cache.Directory)
if err != nil {
if os.IsNotExist(err) {
// do not exist, create
if err := os.Mkdir(cfg.Cache.Directory, os.ModeDir); err != nil {
log.Printf("create directory failed: %s", err)
s.enableCache = false
}
} else {
s.enableCache = false
}
}
log.Printf("enable cache: %v", s.enableCache)
muid := getUUID()
willtopic := "sample-values/" + muid + "/_/_state"
conntopic := willtopic
payload := public.MessagePayload{
MonitoringUnitID: muid,
SampleUnitID: "_",
ChannelID: "_state",
Name: "采集器连接状态",
Value: -1,
// Timestamp: public.UTCTimeStamp(),// 此值因与服务器发生了冲突,估不上报
Cov: true,
State: 0,
}
willpayload, _ := json.Marshal(payload)
payload.Value = 0
connpayload, _ := json.Marshal(payload)
s.MqttClient = NewMQTTClient(SubMessageHandler, willtopic, string(willpayload), conntopic, string(connpayload))
if err := s.MqttClient.ConnectServer(); err != nil {
log.Printf("connect mqtt server failed, errmsg {%v}, start reconnect...", err)
// start to reconnect
go s.MqttClient.ReconnectServer()
}
// check network status
go checkNetworkStatus()
s.MqttClient.Subscribe("sample-values/+/_/upgrade")
s.MqttClient.Subscribe("command/" + muid + "/#")
// init status
web.DeviceStatus = make(map[string]int)
mu := sys.GetMonitoringUnitCfg()
for _, sp := range mu.SamplePorts {
for _, su := range sp.SampleUnits {
web.DeviceStatus[su.ID] = 0
}
}
// set led status
go controlAppLEDStatus()
// check start log
go func() {
topic := "sample-values/" + getUUID() + "/_/restart"
payload := public.MessagePayload{
MonitoringUnitID: cfg.MQTT.ClientID,
SampleUnitID: "_",
ChannelID: "restart",
Name: "",
Value: 0,
Timestamp: public.UTCTimeStamp(),
Cov: true,
State: 0,
}
flag, err := bootflag.GetFlag()
if err != nil {
log.Warning(errors.As(err))
flag = "-1"
}
switch flag {
case "0":
payload.Value = 1
case "1":
payload.Value = 2
default:
// using 0
}
bp, _ := json.Marshal(payload)
for {
if GetNetworkStatus() == Online {
s.MqttClient.PublishSampleValues(topic, string(bp))
break
}
time.Sleep(time.Second)
}
if err := bootflag.CleanFlag(); err != nil {
log.Warning(errors.As(err))
}
}()
}
// Cleanup cleanup
func (s *BusServer) Cleanup() {
s.MqttClient.DisconnectServer()
}
func (s *BusServer) publishCacheFile() error {
files, err := filepath.Glob(filepath.Join(sys.GetBusManagerCfg().Cache.Directory, "*"))
if err != nil {
return err
}
if len(files) == 0 {
return nil
}
topic := "sample-block/" + getUUID()
for _, filename := range files {
f, err := os.Open(filename)
if err != nil {
continue
}
defer f.Close()
data, err := ioutil.ReadAll(f)
if err != nil {
continue
}
sd := string(data)
ms := strings.Split(sd, "\n")
sp := []string{}
for _, m := range ms {
ps := strings.Split(m, "&")
if len(ps) < 2 {
continue
}
p := ps[1]
sp = append(sp, p)
}
d := strings.Join(sp, ",")
d = "[" + d + "]"
log.Printf("publish file: %s", filename)
// publish data
if err := s.MqttClient.PublishSampleValues(topic, d); err != nil {
return err
}
// remove cache file
os.Remove(filename)
}
return nil
}
func saveCacheToFile(cache []string) error {
cfg := sys.GetBusManagerCfg()
files, err := filepath.Glob(filepath.Join(cfg.Cache.Directory, "*"))
if err != nil {
return err
}
var ifl []int
for _, f := range files {
fn := filepath.Base(f)
i, _ := strconv.Atoi(fn)
ifl = append(ifl, i)
}
sort.Sort(sort.Reverse(sort.IntSlice(ifl)))
log.Println(ifl)
l := len(ifl)
if l > cfg.Cache.MaxFile {
rfs := ifl[cfg.Cache.MaxFile:]
// remove files
for _, f := range rfs {
os.Remove(filepath.Join(cfg.Cache.Directory, strconv.Itoa(f)))
}
}
var nf int
if l == 0 {
nf = 0
} else {
nf = ifl[0] + 1
}
filepath := filepath.Join(cfg.Cache.Directory, strconv.Itoa(nf))
f, err := os.Create(filepath)
if err != nil {
return err
}
defer f.Close()
data := strings.Join(cache, "\n")
if _, err = f.Write([]byte(data)); err != nil {
return fmt.Errorf("write file [%s] failed: %s", filepath, err)
}
return nil
}
// Publish publish implement
func (s *BusServer) Publish(ctx context.Context, in *pb.PublishRequest) (*pb.PublishReply, error) {
// check message and boradcast to clients when necessary
checkMessage(in.Topic, in.Payload)
go func(a string, b []byte) {
web.PayloadMap.Store(a, b)
web.PayloadChan <- b
}(in.Topic, []byte(in.Payload))
// check whether should capture or not
buscfg := sys.GetBusManagerCfg()
for _, cap := range buscfg.CaptureOption {
match := false
for _, signal := range cap.Signals {
if strings.Contains(in.Topic, signal.Topic) {
// topic coincident, check value
var p public.MessagePayload
if err := json.Unmarshal([]byte(in.Payload), &p); err != nil {
continue
}
val := ""
switch p.Value.(type) {
case int:
val = strconv.Itoa(p.Value.(int))
case float64:
val = strconv.Itoa(int(p.Value.(float64)))
case string:
val = p.Value.(string)
}
if signal.Value == val {
match = true
break
}
}
}
// match, capture
if match {
var p public.CommandPayload
var para public.CommandParameter
muid := getUUID()
chid := "capture"
para.Channel = chid
p.MonitoringUnit = muid
p.SampleUnit = cap.SUID
p.Channel = chid
p.StartTime = public.UTCTimeStamp()
p.Phase = public.PhaseExcuting
p.Parameters = para
topic := "command/" + muid + "/" + cap.SUID + "/" + chid
msg, err := json.Marshal(p)
if err != nil {
continue
}
// publish
s.MqttClient.PublishSampleValues(topic, string(msg))
}
}
// enable cache
if s.enableCache {
// online or not
if GetNetworkStatus() == Online {
// check cache files exist or not, send cache files first
if err := s.publishCacheFile(); err != nil {
log.Printf("publish failed: %s", err)
return &pb.PublishReply{Status: public.StatusOK, Message: public.MessageOK}, nil
}
// check cache exist, publish
if len(s.cache) > 0 {
for _, m := range s.cache {
ms := strings.Split(m, "&")
if len(ms) == 2 {
topic := ms[0]
payload := ms[1]
log.Printf("publish cache: %s", m)
if err := s.MqttClient.PublishSampleValues(topic, payload); err != nil {
return &pb.PublishReply{Status: public.StatusOK, Message: public.MessageOK}, nil
}
}
}
s.cache = []string{}
}
// then publish current message
s.MqttClient.PublishSampleValues(in.Topic, in.Payload)
} else {
// offline, save data to cache, check cache quantity
if len(s.cache) < sys.GetBusManagerCfg().Cache.MaxMessage {
log.Printf("save to cache, current number: %d", len(s.cache))
s.cache = append(s.cache, in.Topic+"&"+in.Payload)
} else {
log.Printf("save to file")
// save to file
if err := saveCacheToFile(s.cache); err != nil {
log.Printf("save cache faield: %s", err)
}
s.cache = []string{}
}
}
} else {
if err := s.MqttClient.PublishSampleValues(in.Topic, in.Payload); err != nil {
return &pb.PublishReply{Status: public.StatusErr, Message: public.MessageErrUnknown}, nil
}
}
return &pb.PublishReply{Status: public.StatusOK, Message: public.MessageOK}, nil
}
// Subscribe subscribe implement
func (s *BusServer) Subscribe(ctx context.Context, in *pb.SubscribeRequest) (*pb.SubscribeReply, error) {
if err := s.MqttClient.Subscribe(in.Topic); err != nil {
return &pb.SubscribeReply{Status: public.StatusErr, Message: err.Error()}, nil
}
return &pb.SubscribeReply{Status: public.StatusOK, Message: public.MessageOK}, nil
}
// get uuid
func getUUID() string {
// address := config.Configuration.SystemServer.Host + ":" + config.Configuration.SystemServer.Port
// // get info from hmu
// var client public.SystemClient
// if err := client.ConnectSystemDaemon(address); err != nil {
// log.Fatalf("connect system server failed, errmsg {%v}", err)
// }
// resp, err := client.UUID()
// if err != nil {
// log.Fatalf("get uuid failed, errmsg {%v}", err)
// }
// defer client.Disconnect()
// return resp.UUID
// read id from config file
return sys.GetMonitoringUnitCfg().ID
}
// contorl app led status
func controlAppLEDStatus() {
var appled extend.AppLED
if err := appled.Prepare(sys.GetBusManagerCfg().Model); err != nil {
buslog.LOG.Warningf("prepare app led failed, errmsg: %v", err)
return
}
defer appled.CleanUp()
status := 0
// loop
for {
// toggle status
status = status ^ 1
// sleep for a moment, interval set by mqtt connect/disconnect handler
time.Sleep(time.Millisecond * time.Duration(LEDSetInterval))
if err := appled.SetLEDStatus(status); err != nil {
// log.Printf("set appled %v", err)
}
}
}
func checkMessage(topic, payload string) {
s := strings.Split(topic, "/")
if len(s) != 4 {
return
}
suid := s[2]
channelid := s[3]
if channelid != "_state" {
return
}
// parse payload, get value
var p public.MessagePayload
if err := json.Unmarshal([]byte(payload), &p); err != nil {
log.Printf("parse payload fail, payload: %s, errmsg: %v", payload, err)
return
}
v := int(p.Value.(float64))
if v == -1 {
v = 0
} else {
v = 1
}
// set status
lastvalue, ok := web.DeviceStatus[suid]
if !ok {
log.Printf("channel id `%s` do not exist", suid)
return
}
if v != lastvalue {
// update status, broadcast
web.DeviceStatus[suid] = v
bs, _ := web.DeviceStatusToBytes()
web.WSHub.BroadcastMessage(bs)
}
}
func checkNetworkStatus() {
timer := time.NewTicker(5 * time.Second)
lastRestartTime := time.Now()
cfg := sys.GetBusManagerCfg()
netCheckList := []string{cfg.MQTT.Host + ":" + cfg.MQTT.Port}
netCheckList = append(netCheckList, cfg.Web.NetChecking.Hosts...)
doTimeout := cfg.Web.NetChecking.Timeout
if doTimeout == 0 {
doTimeout = 5
}
doTimes := cfg.Web.NetChecking.DoTimes
for {
select {
case <-timer.C:
status := GetNetworkStatus()
// 大部分网络是正常的,优化走这个
if status == Online {
lastRestartTime = time.Now()
continue
}
if status == WaitingOnline && len(cfg.Web.NetChecking.Hosts) > 0 {
// 处理检测到网络正常时的ticker事件
sysd := sys.ConnectSystemDaemon(cfg.Model, &cfg.SystemServer)
if _, err := sysd.AutoCheckNetworking(netCheckList, time.Duration(doTimeout)*1e9); err == nil {
SetNetworkStatus(WaitingOnline, false)
} else {
// 在等待mqtt上线的过程中发如果检查到网络又下线了,恢复到网络不可用的状态。
SetNetworkStatus(Offline, false)
}
sysd.Disconnect()
continue
}
if status == Offline {
if doTimes > 0 && len(cfg.Web.NetChecking.Hosts) > 0 {
doTimes--
// 先尝试网络
sysd := sys.ConnectSystemDaemon(cfg.Model, &cfg.SystemServer)
if _, err := sysd.AutoCheckNetworking(netCheckList, time.Duration(doTimeout)*1e9); err == nil {
// 检测到网络正常了, 执行等待mqtt上线的逻辑。
sysd.Disconnect()
SetNetworkStatus(WaitingOnline, false)
continue
}
sysd.Disconnect()
// 网络失败,走失败的逻辑
}
now := time.Now()
d := now.Sub(lastRestartTime)
rd := time.Duration(cfg.Web.Restart.Duration) * time.Second
if d >= rd {
lastRestartTime = now
rt := cfg.Web.Restart.Times
if rt < cfg.Web.Restart.Max {
// add retart times
cfg.Web.Restart.Times++
if err := sys.SaveBusManagerCfg(cfg); err != nil {
buslog.LOG.Warningf("save bus config failed, errmsg {%v}", err)
}
buslog.LOG.Infof("software restart: %d times", cfg.Web.Restart.Times)
// software rstart
if err := public.RestartApp(cfg.Model, errors.New(public.RestartByCommunicationInterrupt)); err != nil {
buslog.LOG.Warning(errors.As(err))
}
} else {
// clear times
cfg.Web.Restart.Times = 0
if err := sys.SaveBusManagerCfg(cfg); err != nil {
buslog.LOG.Warningf("save bus config failed, errmsg {%v}", err)
}
buslog.LOG.Info("hardware restart")
// hardware restart
if err := public.Reboot(errors.New(public.RebootByCommunicationInterrupt)); err != nil {
buslog.LOG.Warning(errors.As(err))
}
}
}
}
}
}
}
func downloadDependentDeviceLibrary() error {
cfg := sys.GetBusManagerCfg()
mu := sys.GetMonitoringUnitCfg()
elementPath := os.ExpandEnv(etc.Etc.String("public", "element-dir"))
for _, sp := range mu.SamplePorts {
for _, su := range sp.SampleUnits {
// check device library exist or not
ep := filepath.Join(elementPath, su.Element)
_, err := os.Stat(ep)
if err == nil {
// do not update when exist
log.Debugf("element library [%s] exist", ep)
continue
}
// other error occured
if !os.IsNotExist(err) {
log.Debugf("check element library [%s] existence fail", errors.As(err, ep))
continue
}
// do not exist, download and save
np := cfg.Web.ElementLib.Server + su.Element
if err := public.HTTPDownloadFile(np, ep); err != nil {
fmt.Printf("download or save element library [%s] failed: %s\n", su.Element, errors.As(err))
continue
}
fmt.Printf("download or save element library [%s] success\n", su.Element)
}
}
return nil
}
| atusSync.Unlock()
if networkStatus == Online && !mqtt {
// 当mqtt在线时,只能由mqtt处理
return
}
networkStatus = status
}
// Init do some init operation
func (s *BusServer) Init() {
c | identifier_body |
TempTaker_old.py | import serial #library for interfacing with the serial port
import time #library for pausing the script
import math
import sys
import os #for file operations
import pickle #for easy exporting/importing of datatypes
import types #for recognizing types of objects, functions on multiple data types
from numpy import *
import Logger
import email_notifier
directory = 'Log/'; os.chdir(directory);
ColdWaterTempBase = 7 # average ColdwaterTemp estimate
SetPoint = [22,22,22,22,22,22,22,22,22,22,22,22,70,22,ColdWaterTempBase,22] #channels 13 and 15 (or 12,14 counting from 0) are hot/cold water
SignalDelay = 2 #how many seconds to wait before changing applied voltages and how often a response is calculated, seems to be a least 5s of minimum delay, probably an offset.
WriteDelay = 10 #how many seconds to wait before logging the data
RunningAvgNum = 12 #how many iterations is the window for the running average of temperature measurements
DataFreq = 0.95 #how often to take data
ComDelay = 0.10 #how long to wait before sending consecutive communcations in seconds
Ch = 16; # written for 16 incoming chaneels
ControlCh = 4 #number of controlled rooms
ControlledValves = 8 # number of valves
a = 3.3540154E-03; b = 2.5627725E-04; c = 2.0829210E-06 ; d = 7.3003206E-08 # coeffs for voltage to T conversion info from thermistor datasheet
V0 = 6.95 #volts on the voltage reference
#corresponding channels
Table1=8;Table2=5;Table3=1;Table4=6;Table5=9;SupplyBigRoom=14;SupplyLaserRoom=16;SupplySmallRoom=11;ColdWaterSmallRoom=15;ColdWaterBigRoom=15;ColdWaterLaserRoom=15;HotWaterSmallRoom=13;HotWaterBigRoom=13;HotWaterLaserRoom=13;
HardwareG = [15,15,15,15,15,15,15,15,15,15,15,15,5,15,5,15] #channels 13 and 15 (or 12,14 counting from 0) have less gain for expanded range
#Control Signal
bigroomctrl = 0 #144 big room
smlroomctrl = 1 #140
laserroomctrl = 2 #144B laserroom
officectrl = 3 #144A office
#Coldoffset = [63,25,45,0] #adding an offset because the valve seems to open only at around 50 [big,sml,laser]
#Hotoffset = [70,80,75,0] #adding an offset because the valve seems to open only at around 50 [big,sml,laser]
#ValveMin = [15,62,52,65,35,60,0,0] # making sure that the valves never close completly [smlroom-cold,hot, big room-cold, hot, laser room-cold, hot]
Coldoffset = [65,35,45,0] #adding an offset because the valve seems to open only at around 50 [big,sml,laser]
Hotoffset = [67,70,70,0] #adding an offset because the valve seems to open only at around 50 [big,sml,laser]
ValveMin = [15,45,55,55,35,30,0,0] # making sure that the valves never close completly [smlroom-cold,hot, big room-cold, hot, laser room-cold, hot]
ColdWaterTempCorrection = [0,0,0,0] # should be zero, [big,sml, laser,office]
ColdWaterDiffGain = [-0,-0,-0] # should be zero
#ColdWaterDiffGain = [-10000,-10000,-10000] # should be zero
ColdWaterValveGain = [0.1, 0.2, 0.2, 0] # should be zero, adds some value (ColdWaterValveGain * (ActualColdWater-ColdWaterTempBase)) to the cold valve depending on the cold water tempererature deviation from ColdWaterTempBase, [big,sml, laser]
ValveMax = 255
GuessHotWaterTemp = 70
oldvalvesignal=zeros(ControlledValves)
direction=zeros(ControlledValves)
hysteresis = [ 22, 12, 2, 7, 22, 12, 0, 0] # smlroom (cold, hot), bigroom (cold, hot), laserroom (cold, hot)
#hysteresis = [ 14, 12, 2, 7, 17, 12, 0, 0] # working well
#hysteresis = [ 12, 20, 3, 13, 35, 25, 0, 0]
IntegrationMin = -500*5; #limit on how small integration response can get, modified later with I-gain and thus more than the max control signal makes not much sense, the 20 comes from the estimated cooling power rescaling (SetPoint-2)
IntegrationMax = 700*5; #limit on how big integration response can get, modified later with the I-gain, the 60 comes from the estimated heating power rescaling
DiffMax = 20000000
PropActionThreshold = 0.0; # Set proportional feedback to 0 if correction would smaller than this value, units are in temperature deviation [K]; prevents some noise on control when having high P-gain.
DiffActionThreshold = 0.08; # Set differential feedback to 0 if correction would smaller than this value, units are in temperature deviation [K]; prevents some noise on control when having high D-gain
#ControlActionThreshold = [1,1,1,1,1,1,1,1]; # idea is to reduce wear on control elements by doing adjustments only when large changes occur, SRC,SRH,BRC,BRH,LRC,LRH
ControlActionThreshold = [2,2,4,3,2,2,2,2]; # idea is to reduce wear on control elements by doing adjustments only when large changes occur, SRC,SRH,BRC,BRH,LRC,LRH
PTab=[0,0,0,1] # contribution from table temperature to P gain
PSup=[1,1,1,0] # contribution from supply air (=incoming air) to P gain
PCoolingWater=[0,0,0,0] # contribution from cooling water (=cooling water) to P gain
ColdValveGain=[1,1,1,0] # correction for cold valve gain
HotValveGain=[1,1,1,0] # correction for hot valve gain, should be 1, however, better results with higher gain?
integfile = 'lastInteg.txt'#name of the file where last integrator array is kept.
pifile = 'PIparams.txt'#where PI parameters are kept and can be modified on the fly
mancontrolfile = 'manual_valve.txt' #used for manual control of valve positions
ser = serial.Serial('/dev/ttyUSB1') # open USB-serial port
if(not ser.isOpen()):
print 'Error: Serial Port Not Open'
ser.flushInput()
ser.flushOutput()
ser.baudrate = 9600;
ser.timeout = 0.1; #sets timeout of the serial port
counter = SignalDelay+1 #so that data is outputted on the first loop
WriteCounter = SignalDelay+1 #so that data is written on the first loop
errors_count = 0 #initial number of errors
notifier = email_notifier.notifier()
notifier.set_recepients(['micramm@gmail.com','hhaeffner@berkeley.edu','haeffnerlab@gmail.com'])
officialnotifier = email_notifier.notifier()
officialnotifier.set_recepients(['micramm@gmail.com','hhaeffner@berkeley.edu','haeffnerlab@gmail.com','physics-support@lists.berkeley.edu'])
class Valves():
def __init__(self):
self.previousSignal = zeros(ControlledValves)
self.newSignal = zeros(ControlledValves)
def sign(x):
if(x > 0.01):
return 1
if(x < -0.01):
return -1
else:
return 0
def ApplyValveSignal(self,incoming_signal):
self.newSignal = self.testResponseChange(incoming_signal)
for i in range(ControlledValves): # taking care of the hysteresis ....
newdirection = sign(self.newSignal[i] - oldvalvesignal[i])
if((newdirection != direction[i]) and (newdirection)): # valve turns around
direction[i] = newdirection
print str(time.strftime("%H:%M:%S", time.localtime())) + ': Direction change: Valve ' + str(i) + ' ' + str(direction[i])
oldvalvesignal[i] = self.newSignal[i]
self.newSignal[i] = clip(self.newSignal[i] + direction[i] * hysteresis[i]/2,ValveMin[i],ValveMax)
self.communicateSend()
return self.newSignal
#for for test in response to minimize valve motion and reduce wear and tear
def testResponseChange(self,signal):
for i in range(len(signal)):
if abs(signal[i]-self.previousSignal[i]) >= ControlActionThreshold[i]:
signal[i] = int(round(signal[i]))
self.previousSignal[i] = signal[i]
print str(time.strftime("%H:%M:%S", time.localtime())) + ': Changing Valve ' + str(i) + ' to ' + str(signal[i])
else:
signal[i] = int(round(self.previousSignal[i]))
return signal
def communicateSend(self):
signal = self.newSignal
for i in range(ControlledValves):
ser.write("d")
time.sleep(ComDelay)
ser.write(str(i))
time.sleep(ComDelay)
vsig = self.dec2hex(signal[i])
ser.write(vsig)
time.sleep(ComDelay)
ser.flushInput()
time.sleep(ComDelay)
def dec2hex(self, n):#"""return the hexadecimal string representation of integer n as a two digits representation in lowercase"""
string = "%x" % n
string = string.zfill(2)
return string
class ResponseCalculator():
def __init__(self):
self.lastErrSigArr = zeros(Ch) #initial vale of lastErrorSignal, used to disable Diff gain for the first time
self.loadExternalParams()
def loadExternalParams(self):
if(os.path.isfile(integfile)):#if integ file exists (with information about last integration), open it in read/write mode and read in last integrator setting
self.INTEGFILE = open(integfile,"r+");
self.integralerrorSigArr = array(pickle.load(self.INTEGFILE))
else: #if file does not exist, create it and specify initial integrator parameters.
self.INTEGFILE = open(integfile,"w");
self.integralerrorSigArr = zeros(Ch)
if(os.path.isfile(pifile)): #if file exists, load the PI parameters
self.PIFILE = open(pifile,"r+")
self.P = array(pickle.load(self.PIFILE))
self.I = array(pickle.load(self.PIFILE))
self.D = array(pickle.load(self.PIFILE))
self.PIFILE.close()
else:
self.PIFILE = open(pifile,"w") #if file doesn't not exist, create it
#proportionality constant for PID in the format [#144 big room / #140 small room / #144B Laser Room / #144A office]
self.P = array([-15,-15,-15,-0])
self.I = array([-.1,-.1,-.1,-0])
self.D = array([-40,-40,-40,0])
pickle.dump(self.P.tolist(),self.PIFILE)
pickle.dump(self.I.tolist(),self.PIFILE)
pickle.dump(self.D.tolist(),self.PIFILE)
self.PIFILE.close()
self.PImodtime = os.path.getmtime(pifile) #time when pifile is last modified
def updateExternalPIDParams(self):
if(os.path.getmtime(pifile) != self.PImodtime): #if PI parmeters have been modified externally, update them
self.PIFILE = open(pifile, 'r')
self.P = array(pickle.load(self.PIFILE))
self.I = array(pickle.load(self.PIFILE))
self.D = array(pickle.load(self.PIFILE))
self.PIFILE.close()
self.PImodtime = os.path.getmtime(pifile)
print("new P,I,D parameters are")
print self.P
print self.I
print self.D
def getResponse(self):
return [self.PIDresponseArr,self.valvesignalArr]
def calculateResponse(self, curTempArr):
self.errorSigArr = self.finderrorSig(curTempArr)
self.integralerrorSigArr = self.calcintegrator(self.integralerrorSigArr, self.errorSigArr)
self.saveIntegralError(self.integralerrorSigArr)
self.PIDresponseArr = self.findPIDresponse(self.errorSigArr, self.integralerrorSigArr,self.lastErrSigArr)
self.lastErrSigArr= self.errorSigArr
self.valvesignalArr = self.CalcValveSignal(self.PIDresponseArr, curTempArr) |
def saveIntegralError(self,integError):
#print integError
self.INTEGFILE.seek(0) #moves position to the beginning of the file
pickle.dump(integError, self.INTEGFILE)
self.INTEGFILE.truncate()
def finderrorSig(self, CurTemp): #takes array with current temperatures and finds the error signal array
error = CurTemp - SetPoint
return error
def calcintegrator(self,oldArr, newArr):
TotalArr = oldArr + newArr
# Normalize maximum by the mean of the integration constants
minim = IntegrationMin/(-sum(self.I)/len(self.I))
maxim = IntegrationMax/(-sum(self.I)/len(self.I))
TotalArr=clip(TotalArr,minim,maxim)
return TotalArr
def findPIDresponse(self,curErrArr, IntErrArr, lastErrArr): #produces array containg signal to be sent to valves in format [Control1, Control2..] where each one is measured from -255 to 255 positive to hotter, negative for colder
P = self.P
I = self.I
D = self.D
propArr = zeros(ControlCh)
propArr[bigroomctrl] = PSup[bigroomctrl]*curErrArr[SupplyBigRoom-1]#0 + PTab[bigroomctrl]*curErrArr[Table1-1]#0 + PCoolingWater[bigroomctrl]*curErrArr[ColdWaterBigRoom]
propArr[smlroomctrl] = PSup[smlroomctrl]*curErrArr[SupplySmallRoom-1]#0 + PTab[smlroomctrl]*curErrArr[Table3-1]#0 + PCoolingWater[smlroomctrl]*curErrArr[ColdWaterSmallRoom]
propArr[laserroomctrl] = PSup[laserroomctrl]*curErrArr[SupplyLaserRoom-1]#0 + PTab[laserroomctrl]*curErrArr[Table4-1]#0 + PCoolingWater[laserroomctrl]*curErrArr[ColdWaterLaserRoom]
propArr[officectrl] = 0 #no control in office
propArr = propArr - clip(propArr, -PropActionThreshold,PropActionThreshold)
proprespArr = (P * propArr) # when used with arrays, * is component by component multiplcation or dot product for 1D arrays
integArr = zeros(ControlCh)
integArr[bigroomctrl] = IntErrArr[Table1-1]
integArr[smlroomctrl] = IntErrArr[Table3-1]
integArr[laserroomctrl] = IntErrArr[Table4-1]
integArr[officectrl] = 0 #no control in office
integrespArr = (I * integArr) # when used with arrays, * is component by component multiplcation or dot product for 1D arrays
#print integArr
if((lastErrArr == zeros(Ch)).any()): #when the lastErrArr is the zero array, then don't do any diff because it's the first run
diffrespArr = zeros(ControlCh)
else:
diffArr = zeros(ControlCh)
DiffErrArr = curErrArr - lastErrArr
diffArr[bigroomctrl] = DiffErrArr[SupplyBigRoom-1] + ColdWaterDiffGain[bigroomctrl] * DiffErrArr[ColdWaterBigRoom-1] / D[bigroomctrl]
diffArr[smlroomctrl] = DiffErrArr[SupplySmallRoom-1] + ColdWaterDiffGain[smlroomctrl] * DiffErrArr[ColdWaterSmallRoom-1] / D[smlroomctrl]
diffArr[laserroomctrl] = DiffErrArr[SupplyLaserRoom-1] + ColdWaterDiffGain[laserroomctrl] * DiffErrArr[ColdWaterLaserRoom-1] / D[laserroomctrl]
diffArr[officectrl] = 0 # no control in office
diffArr = diffArr - clip(diffArr, -DiffActionThreshold,DiffActionThreshold)
diffrespArr = (D * diffArr)
diffrespArr = clip(diffrespArr, -DiffMax, DiffMax)
print 'P', proprespArr
print 'I', integrespArr
print 'D', diffrespArr
responseArr = proprespArr + integrespArr + diffrespArr
return responseArr
def CalcValveSignal(self,responseArr,curTempArr):#hard codes which control channel correspond to which output number
valvesignalArr = zeros(ControlledValves)
#ColdWater = array([curTempArr[ColdWaterBigRoom-1], curTempArr[ColdWaterSmallRoom-1], curTempArr[ColdWaterLaserRoom-1],0 ])
#ColdWater = clip(ColdWater,0,20)
ColdWater = array([13.0,13.0,13.0,0.0]); # set cold water temp to 13 degrees because the sensor is not working atm
HotWater = array([curTempArr[HotWaterBigRoom-1], curTempArr[HotWaterSmallRoom-1], curTempArr[HotWaterLaserRoom-1], 0])
SetPointAux = array([SetPoint[Table1-1], SetPoint[Table3-1], SetPoint[Table4-1], 0])
CoolingPower = clip(SetPointAux - ColdWater - ColdWaterTempCorrection,1.0,100.0) # estimate cooling power for valve settings, always assume some cooling power
HeatingPower = clip(HotWater - SetPointAux,20.0,200.0) # minum heating power corresponds to 20 degrees temp-difference
ColdValveSignal = - responseArr/CoolingPower*ColdValveGain + Coldoffset# + ColdWaterValveGain * (ColdWater-ColdWaterTempBase)
HotValveSignal = Hotoffset + responseArr/HeatingPower*HotValveGain
valvesignalArr[0] = ColdValveSignal[smlroomctrl]
valvesignalArr[1] = HotValveSignal[smlroomctrl]
valvesignalArr[2] = ColdValveSignal[bigroomctrl]
valvesignalArr[3] = HotValveSignal[bigroomctrl]
valvesignalArr[4] = ColdValveSignal[laserroomctrl]
valvesignalArr[5] = HotValveSignal[laserroomctrl]
valvesignalArr[6] = 0
valvesignalArr[7] = 0
# valvesignalArr[0] = clip(ColdValveSignal[smlroomctrl],ValveMin[0],ValveMax)
# valvesignalArr[1] = clip(HotValveSignal[smlroomctrl],ValveMin[1],ValveMax)
# valvesignalArr[2] = clip(ColdValveSignal[bigroomctrl],ValveMin[2],ValveMax)
# valvesignalArr[3] = clip(HotValveSignal[bigroomctrl],ValveMin[3],ValveMax)
# valvesignalArr[4] = clip(ColdValveSignal[laserroomctrl],ValveMin[4],ValveMax)
# valvesignalArr[5] = clip(HotValveSignal[laserroomctrl],ValveMin[5],ValveMax)
# valvesignalArr[6] = 0
# valvesignalArr[7] = 0
valvesignal = valvesignalArr.tolist()
return valvesignalArr
def __del__(self):
self.INTEGFILE.close()
class DataAcquisition():
def binarytoTempC(self,bin, ch): #converts binary output to a physical temperature in C
Vin = 2.56*(float(bin)+1)/1024 #voltage that is read in 1023 is 2.56 0 is 0
dV = (15/HardwareG[ch])*(Vin/1.2 - 1) #when G = 15 (most channels) dV of 2.4 corresponds to bridge voltage of 1 and dV of 0 is bridge voltage of -1
#G = 5 for low res channels for cold water, hot water supply
#G is determines by INA114 gain resistor
R = (dV/V0 +.5) / (- dV/V0 + .5) * 10 #convert bridge voltage to R in kohms
T = 1/(a + b*math.log(R/10.) + c * pow(math.log(R/10.),2) + d * pow(math.log(R/10.),3)) #consult datasheet for this
TempC = round(T - 273.15,2) #Kelvin to C
return TempC
def readTemp(self,ser):#processing the input in the format 03:1023<space>... where 03 is the number of the detector, 1023 is the voltage representation
#returns array with data
global errors_count
curTempArr = zeros(Ch)
ser.write('t') # command to output readings
curLine = ser.read(Ch*8) # reads 128 bytes, 16 channels 7 bytes each and 16 spaces
if(len(curLine)==128): # read everything correctly
for i in range(Ch):
# left and right ranges for number of voltages
lnum = 8*i + 0
rnum = 8*i + 2
lvol = 8*i + 3
rvol = 8*i + 7
num = curLine[lnum:rnum] #number of the detector is the first
vol = int(curLine[lvol:rvol]) #voltage readout
TempC = self.binarytoTempC(vol, i)
curTempArr[i] = TempC
else:
if(errors_count > 20):
notifier.set_content('AC ALARM','The program quit because there were too many errors with data acquisition')
notifier.send()
sys.exit()
errors_count = errors_count + 1
print "Error: Data not collected"
print curLine
time.sleep(DataFreq)
curTempArr = self.readTemp(ser)
return curTempArr
class RunningAverage():
def __init__(self):
self.RunningAvgNum = RunningAvgNum
self.historyArr = zeros([self.RunningAvgNum,Ch])
self.binfull = 0
self.historyCounter = 0
self.printintro()
def printintro(self):
print '\n' + 'Filling up history for ' + str(self.RunningAvgNum) +' seconds \n'
def printbinfull(self):
print 'Running Average Operational'
def addNumber(self,newnumber):
self.historyArr[self.historyCounter,:] = newnumber #updates history by cycling through rows of historyArr and replacing old data with readTemp
self.historyCounter = (self.historyCounter + 1) % self.RunningAvgNum
if(self.historyCounter == 0):
if(self.binfull == 0):
self.printbinfull()
self.binfull = 1
def getAverage(self):
if(self.binfull): #if bin is full, take the mean
average = mean(self.historyArr,axis=0) #current temperature is the average of the columns of the history array
else: #if bin is not filled, return mean of existing elements
average = sum(self.historyArr[0:(self.historyCounter+1),:],axis=0)/(self.historyCounter)
return average
class ManualController():
def __init__(self):
if(os.path.isfile(mancontrolfile)):#if file exists open it in read mode
pass
else: #if file doesn't exist, create it
self.FILE = open(mancontrolfile,"w");
pickle.dump(0,self.FILE) #indicates automatic control, 1 is manual
pickle.dump(zeros(ControlledValves).tolist(),self.FILE)
self.FILE.close()
self.modtime = os.path.getmtime(mancontrolfile)
self.valves = zeros(ControlledValves)
def isControlManual(self):
self.FILE = open(mancontrolfile,"r");
self.mancontrol = pickle.load(self.FILE)
self.valves = array(pickle.load(self.FILE))
self.FILE.close()
return self.mancontrol
def ManualValvePos(self):
return self.valves
class AlarmChecker():
def __init__(self):
self.messagesent = 0
self.callstoReset = 900*12 # set time for next alarm to 12h
self.callsCount = 0
self.messageMax = 1 #maximum number of allowed emails per the number of callstoReset
def updateCallsCount(self):
if(self.callsCount >= self.callstoReset):
self.messagesent = 0
self.callsCount = 0
print 'alarm armed again'
else:
self.callsCount = self.callsCount + 1;
def checkForAlarm(self,curTempArr):
self.updateCallsCount()
if(abs(curTempArr[Table1 - 1] - SetPoint[Table1 -1]) > 2):
notifier.set_content('AC ALARM','The differential between Table1 temperature and setpoint exceeds norm')
if(self.messagesent < self.messageMax):
notifier.send()
self.messagesent = self.messagesent + 1
if(abs(curTempArr[Table3 - 1] - SetPoint[Table3 - 1]) > 2):
notifier.set_content('AC ALARM','The differential between Table3 temperature and setpoint exceeds norm')
if(self.messagesent < self.messageMax):
notifier.send()
self.messagesent = self.messagesent + 1
if(abs(curTempArr[Table4 - 1] - SetPoint[Table4 - 1]) > 2):
notifier.set_content('AC ALARM','The differential between Table4 temperature and setpoint exceeds norm')
if(self.messagesent < self.messageMax):
notifier.send()
self.messagesent = self.messagesent + 1
#if(abs(curTempArr[ColdWaterBigRoom - 1] - 7) > 8):
#notifier.set_content('AC ALARM','The ColdWaterBigRoom temperature is too far from norm')
#officialnotifier.set_content('Haeffner Lab: Possible Chiller Issue','The cold water supply temperature is currently ' + str(curTempArr[ColdWaterBigRoom - 1])+ ', too far from norm of 7 degrees \n This is an automatically generated email.')
#if(self.messagesent < self.messageMax):
#notifier.send()
#officialnotifier.send()
#self.messagesent = self.messagesent + 1
if(abs(curTempArr[HotWaterBigRoom - 1] - 50) > 40): # Hot water varies really a lot
notifier.set_content('AC ALARM','The HotWaterBigRoom temperature is too far from norm')
if(self.messagesent < self.messageMax):
notifier.send()
self.messagesent = self.messagesent + 1
alarmchecker = AlarmChecker()
runaverage = RunningAverage()
acquire = DataAcquisition()
log = Logger.Logger()
valves = Valves()
responsecalculator = ResponseCalculator()
manualcontrol = ManualController()
try:
while('true'):
counter = counter + DataFreq
WriteCounter = WriteCounter + DataFreq
time.sleep(DataFreq)
runaverage.addNumber(acquire.readTemp(ser))
curTempArr = runaverage.getAverage()
alarmchecker.checkForAlarm(curTempArr)
responsecalculator.calculateResponse(curTempArr)
if counter > SignalDelay: #apply the output singal every SignalDelay seconds
counter = 0
if(manualcontrol.isControlManual()):
valvesignalArr = manualcontrol.ManualValvePos()
PIDresponseArr = zeros(ControlCh) #neded for logging 0s
print 'manual control' + str(valvesignalArr)
else:
[PIDresponseArr,valvesignalArr] = responsecalculator.getResponse()
sentValveSignalArr = valves.ApplyValveSignal(valvesignalArr)
#print sentValveSignalArr
if WriteCounter > WriteDelay: #write data to log file
WriteCounter = 0
log.MakeLog(curTempArr, PIDresponseArr,sentValveSignalArr)
responsecalculator.updateExternalPIDParams()
except KeyboardInterrupt:
time.sleep(DataFreq)
ser.flushInput()
ser.flushOutput()
ser.close() # closes the serial port
print 'Graceful exit' | random_line_split | |
TempTaker_old.py | import serial #library for interfacing with the serial port
import time #library for pausing the script
import math
import sys
import os #for file operations
import pickle #for easy exporting/importing of datatypes
import types #for recognizing types of objects, functions on multiple data types
from numpy import *
import Logger
import email_notifier
directory = 'Log/'; os.chdir(directory);
ColdWaterTempBase = 7 # average ColdwaterTemp estimate
SetPoint = [22,22,22,22,22,22,22,22,22,22,22,22,70,22,ColdWaterTempBase,22] #channels 13 and 15 (or 12,14 counting from 0) are hot/cold water
SignalDelay = 2 #how many seconds to wait before changing applied voltages and how often a response is calculated, seems to be a least 5s of minimum delay, probably an offset.
WriteDelay = 10 #how many seconds to wait before logging the data
RunningAvgNum = 12 #how many iterations is the window for the running average of temperature measurements
DataFreq = 0.95 #how often to take data
ComDelay = 0.10 #how long to wait before sending consecutive communcations in seconds
Ch = 16; # written for 16 incoming chaneels
ControlCh = 4 #number of controlled rooms
ControlledValves = 8 # number of valves
a = 3.3540154E-03; b = 2.5627725E-04; c = 2.0829210E-06 ; d = 7.3003206E-08 # coeffs for voltage to T conversion info from thermistor datasheet
V0 = 6.95 #volts on the voltage reference
#corresponding channels
Table1=8;Table2=5;Table3=1;Table4=6;Table5=9;SupplyBigRoom=14;SupplyLaserRoom=16;SupplySmallRoom=11;ColdWaterSmallRoom=15;ColdWaterBigRoom=15;ColdWaterLaserRoom=15;HotWaterSmallRoom=13;HotWaterBigRoom=13;HotWaterLaserRoom=13;
HardwareG = [15,15,15,15,15,15,15,15,15,15,15,15,5,15,5,15] #channels 13 and 15 (or 12,14 counting from 0) have less gain for expanded range
#Control Signal
bigroomctrl = 0 #144 big room
smlroomctrl = 1 #140
laserroomctrl = 2 #144B laserroom
officectrl = 3 #144A office
#Coldoffset = [63,25,45,0] #adding an offset because the valve seems to open only at around 50 [big,sml,laser]
#Hotoffset = [70,80,75,0] #adding an offset because the valve seems to open only at around 50 [big,sml,laser]
#ValveMin = [15,62,52,65,35,60,0,0] # making sure that the valves never close completly [smlroom-cold,hot, big room-cold, hot, laser room-cold, hot]
Coldoffset = [65,35,45,0] #adding an offset because the valve seems to open only at around 50 [big,sml,laser]
Hotoffset = [67,70,70,0] #adding an offset because the valve seems to open only at around 50 [big,sml,laser]
ValveMin = [15,45,55,55,35,30,0,0] # making sure that the valves never close completly [smlroom-cold,hot, big room-cold, hot, laser room-cold, hot]
ColdWaterTempCorrection = [0,0,0,0] # should be zero, [big,sml, laser,office]
ColdWaterDiffGain = [-0,-0,-0] # should be zero
#ColdWaterDiffGain = [-10000,-10000,-10000] # should be zero
ColdWaterValveGain = [0.1, 0.2, 0.2, 0] # should be zero, adds some value (ColdWaterValveGain * (ActualColdWater-ColdWaterTempBase)) to the cold valve depending on the cold water tempererature deviation from ColdWaterTempBase, [big,sml, laser]
ValveMax = 255
GuessHotWaterTemp = 70
oldvalvesignal=zeros(ControlledValves)
direction=zeros(ControlledValves)
hysteresis = [ 22, 12, 2, 7, 22, 12, 0, 0] # smlroom (cold, hot), bigroom (cold, hot), laserroom (cold, hot)
#hysteresis = [ 14, 12, 2, 7, 17, 12, 0, 0] # working well
#hysteresis = [ 12, 20, 3, 13, 35, 25, 0, 0]
IntegrationMin = -500*5; #limit on how small integration response can get, modified later with I-gain and thus more than the max control signal makes not much sense, the 20 comes from the estimated cooling power rescaling (SetPoint-2)
IntegrationMax = 700*5; #limit on how big integration response can get, modified later with the I-gain, the 60 comes from the estimated heating power rescaling
DiffMax = 20000000
PropActionThreshold = 0.0; # Set proportional feedback to 0 if correction would smaller than this value, units are in temperature deviation [K]; prevents some noise on control when having high P-gain.
DiffActionThreshold = 0.08; # Set differential feedback to 0 if correction would smaller than this value, units are in temperature deviation [K]; prevents some noise on control when having high D-gain
#ControlActionThreshold = [1,1,1,1,1,1,1,1]; # idea is to reduce wear on control elements by doing adjustments only when large changes occur, SRC,SRH,BRC,BRH,LRC,LRH
ControlActionThreshold = [2,2,4,3,2,2,2,2]; # idea is to reduce wear on control elements by doing adjustments only when large changes occur, SRC,SRH,BRC,BRH,LRC,LRH
PTab=[0,0,0,1] # contribution from table temperature to P gain
PSup=[1,1,1,0] # contribution from supply air (=incoming air) to P gain
PCoolingWater=[0,0,0,0] # contribution from cooling water (=cooling water) to P gain
ColdValveGain=[1,1,1,0] # correction for cold valve gain
HotValveGain=[1,1,1,0] # correction for hot valve gain, should be 1, however, better results with higher gain?
integfile = 'lastInteg.txt'#name of the file where last integrator array is kept.
pifile = 'PIparams.txt'#where PI parameters are kept and can be modified on the fly
mancontrolfile = 'manual_valve.txt' #used for manual control of valve positions
ser = serial.Serial('/dev/ttyUSB1') # open USB-serial port
if(not ser.isOpen()):
print 'Error: Serial Port Not Open'
ser.flushInput()
ser.flushOutput()
ser.baudrate = 9600;
ser.timeout = 0.1; #sets timeout of the serial port
counter = SignalDelay+1 #so that data is outputted on the first loop
WriteCounter = SignalDelay+1 #so that data is written on the first loop
errors_count = 0 #initial number of errors
notifier = email_notifier.notifier()
notifier.set_recepients(['micramm@gmail.com','hhaeffner@berkeley.edu','haeffnerlab@gmail.com'])
officialnotifier = email_notifier.notifier()
officialnotifier.set_recepients(['micramm@gmail.com','hhaeffner@berkeley.edu','haeffnerlab@gmail.com','physics-support@lists.berkeley.edu'])
class Valves():
def __init__(self):
self.previousSignal = zeros(ControlledValves)
self.newSignal = zeros(ControlledValves)
def sign(x):
if(x > 0.01):
return 1
if(x < -0.01):
return -1
else:
return 0
def ApplyValveSignal(self,incoming_signal):
self.newSignal = self.testResponseChange(incoming_signal)
for i in range(ControlledValves): # taking care of the hysteresis ....
newdirection = sign(self.newSignal[i] - oldvalvesignal[i])
if((newdirection != direction[i]) and (newdirection)): # valve turns around
direction[i] = newdirection
print str(time.strftime("%H:%M:%S", time.localtime())) + ': Direction change: Valve ' + str(i) + ' ' + str(direction[i])
oldvalvesignal[i] = self.newSignal[i]
self.newSignal[i] = clip(self.newSignal[i] + direction[i] * hysteresis[i]/2,ValveMin[i],ValveMax)
self.communicateSend()
return self.newSignal
#for for test in response to minimize valve motion and reduce wear and tear
def testResponseChange(self,signal):
for i in range(len(signal)):
if abs(signal[i]-self.previousSignal[i]) >= ControlActionThreshold[i]:
signal[i] = int(round(signal[i]))
self.previousSignal[i] = signal[i]
print str(time.strftime("%H:%M:%S", time.localtime())) + ': Changing Valve ' + str(i) + ' to ' + str(signal[i])
else:
signal[i] = int(round(self.previousSignal[i]))
return signal
def communicateSend(self):
signal = self.newSignal
for i in range(ControlledValves):
ser.write("d")
time.sleep(ComDelay)
ser.write(str(i))
time.sleep(ComDelay)
vsig = self.dec2hex(signal[i])
ser.write(vsig)
time.sleep(ComDelay)
ser.flushInput()
time.sleep(ComDelay)
def dec2hex(self, n):#"""return the hexadecimal string representation of integer n as a two digits representation in lowercase"""
string = "%x" % n
string = string.zfill(2)
return string
class ResponseCalculator():
def __init__(self):
self.lastErrSigArr = zeros(Ch) #initial vale of lastErrorSignal, used to disable Diff gain for the first time
self.loadExternalParams()
def loadExternalParams(self):
if(os.path.isfile(integfile)):#if integ file exists (with information about last integration), open it in read/write mode and read in last integrator setting
self.INTEGFILE = open(integfile,"r+");
self.integralerrorSigArr = array(pickle.load(self.INTEGFILE))
else: #if file does not exist, create it and specify initial integrator parameters.
self.INTEGFILE = open(integfile,"w");
self.integralerrorSigArr = zeros(Ch)
if(os.path.isfile(pifile)): #if file exists, load the PI parameters
self.PIFILE = open(pifile,"r+")
self.P = array(pickle.load(self.PIFILE))
self.I = array(pickle.load(self.PIFILE))
self.D = array(pickle.load(self.PIFILE))
self.PIFILE.close()
else:
self.PIFILE = open(pifile,"w") #if file doesn't not exist, create it
#proportionality constant for PID in the format [#144 big room / #140 small room / #144B Laser Room / #144A office]
self.P = array([-15,-15,-15,-0])
self.I = array([-.1,-.1,-.1,-0])
self.D = array([-40,-40,-40,0])
pickle.dump(self.P.tolist(),self.PIFILE)
pickle.dump(self.I.tolist(),self.PIFILE)
pickle.dump(self.D.tolist(),self.PIFILE)
self.PIFILE.close()
self.PImodtime = os.path.getmtime(pifile) #time when pifile is last modified
def updateExternalPIDParams(self):
if(os.path.getmtime(pifile) != self.PImodtime): #if PI parmeters have been modified externally, update them
self.PIFILE = open(pifile, 'r')
self.P = array(pickle.load(self.PIFILE))
self.I = array(pickle.load(self.PIFILE))
self.D = array(pickle.load(self.PIFILE))
self.PIFILE.close()
self.PImodtime = os.path.getmtime(pifile)
print("new P,I,D parameters are")
print self.P
print self.I
print self.D
def getResponse(self):
return [self.PIDresponseArr,self.valvesignalArr]
def calculateResponse(self, curTempArr):
self.errorSigArr = self.finderrorSig(curTempArr)
self.integralerrorSigArr = self.calcintegrator(self.integralerrorSigArr, self.errorSigArr)
self.saveIntegralError(self.integralerrorSigArr)
self.PIDresponseArr = self.findPIDresponse(self.errorSigArr, self.integralerrorSigArr,self.lastErrSigArr)
self.lastErrSigArr= self.errorSigArr
self.valvesignalArr = self.CalcValveSignal(self.PIDresponseArr, curTempArr)
def saveIntegralError(self,integError):
#print integError
self.INTEGFILE.seek(0) #moves position to the beginning of the file
pickle.dump(integError, self.INTEGFILE)
self.INTEGFILE.truncate()
def finderrorSig(self, CurTemp): #takes array with current temperatures and finds the error signal array
error = CurTemp - SetPoint
return error
def calcintegrator(self,oldArr, newArr):
TotalArr = oldArr + newArr
# Normalize maximum by the mean of the integration constants
minim = IntegrationMin/(-sum(self.I)/len(self.I))
maxim = IntegrationMax/(-sum(self.I)/len(self.I))
TotalArr=clip(TotalArr,minim,maxim)
return TotalArr
def findPIDresponse(self,curErrArr, IntErrArr, lastErrArr): #produces array containg signal to be sent to valves in format [Control1, Control2..] where each one is measured from -255 to 255 positive to hotter, negative for colder
P = self.P
I = self.I
D = self.D
propArr = zeros(ControlCh)
propArr[bigroomctrl] = PSup[bigroomctrl]*curErrArr[SupplyBigRoom-1]#0 + PTab[bigroomctrl]*curErrArr[Table1-1]#0 + PCoolingWater[bigroomctrl]*curErrArr[ColdWaterBigRoom]
propArr[smlroomctrl] = PSup[smlroomctrl]*curErrArr[SupplySmallRoom-1]#0 + PTab[smlroomctrl]*curErrArr[Table3-1]#0 + PCoolingWater[smlroomctrl]*curErrArr[ColdWaterSmallRoom]
propArr[laserroomctrl] = PSup[laserroomctrl]*curErrArr[SupplyLaserRoom-1]#0 + PTab[laserroomctrl]*curErrArr[Table4-1]#0 + PCoolingWater[laserroomctrl]*curErrArr[ColdWaterLaserRoom]
propArr[officectrl] = 0 #no control in office
propArr = propArr - clip(propArr, -PropActionThreshold,PropActionThreshold)
proprespArr = (P * propArr) # when used with arrays, * is component by component multiplcation or dot product for 1D arrays
integArr = zeros(ControlCh)
integArr[bigroomctrl] = IntErrArr[Table1-1]
integArr[smlroomctrl] = IntErrArr[Table3-1]
integArr[laserroomctrl] = IntErrArr[Table4-1]
integArr[officectrl] = 0 #no control in office
integrespArr = (I * integArr) # when used with arrays, * is component by component multiplcation or dot product for 1D arrays
#print integArr
if((lastErrArr == zeros(Ch)).any()): #when the lastErrArr is the zero array, then don't do any diff because it's the first run
diffrespArr = zeros(ControlCh)
else:
diffArr = zeros(ControlCh)
DiffErrArr = curErrArr - lastErrArr
diffArr[bigroomctrl] = DiffErrArr[SupplyBigRoom-1] + ColdWaterDiffGain[bigroomctrl] * DiffErrArr[ColdWaterBigRoom-1] / D[bigroomctrl]
diffArr[smlroomctrl] = DiffErrArr[SupplySmallRoom-1] + ColdWaterDiffGain[smlroomctrl] * DiffErrArr[ColdWaterSmallRoom-1] / D[smlroomctrl]
diffArr[laserroomctrl] = DiffErrArr[SupplyLaserRoom-1] + ColdWaterDiffGain[laserroomctrl] * DiffErrArr[ColdWaterLaserRoom-1] / D[laserroomctrl]
diffArr[officectrl] = 0 # no control in office
diffArr = diffArr - clip(diffArr, -DiffActionThreshold,DiffActionThreshold)
diffrespArr = (D * diffArr)
diffrespArr = clip(diffrespArr, -DiffMax, DiffMax)
print 'P', proprespArr
print 'I', integrespArr
print 'D', diffrespArr
responseArr = proprespArr + integrespArr + diffrespArr
return responseArr
def CalcValveSignal(self,responseArr,curTempArr):#hard codes which control channel correspond to which output number
valvesignalArr = zeros(ControlledValves)
#ColdWater = array([curTempArr[ColdWaterBigRoom-1], curTempArr[ColdWaterSmallRoom-1], curTempArr[ColdWaterLaserRoom-1],0 ])
#ColdWater = clip(ColdWater,0,20)
ColdWater = array([13.0,13.0,13.0,0.0]); # set cold water temp to 13 degrees because the sensor is not working atm
HotWater = array([curTempArr[HotWaterBigRoom-1], curTempArr[HotWaterSmallRoom-1], curTempArr[HotWaterLaserRoom-1], 0])
SetPointAux = array([SetPoint[Table1-1], SetPoint[Table3-1], SetPoint[Table4-1], 0])
CoolingPower = clip(SetPointAux - ColdWater - ColdWaterTempCorrection,1.0,100.0) # estimate cooling power for valve settings, always assume some cooling power
HeatingPower = clip(HotWater - SetPointAux,20.0,200.0) # minum heating power corresponds to 20 degrees temp-difference
ColdValveSignal = - responseArr/CoolingPower*ColdValveGain + Coldoffset# + ColdWaterValveGain * (ColdWater-ColdWaterTempBase)
HotValveSignal = Hotoffset + responseArr/HeatingPower*HotValveGain
valvesignalArr[0] = ColdValveSignal[smlroomctrl]
valvesignalArr[1] = HotValveSignal[smlroomctrl]
valvesignalArr[2] = ColdValveSignal[bigroomctrl]
valvesignalArr[3] = HotValveSignal[bigroomctrl]
valvesignalArr[4] = ColdValveSignal[laserroomctrl]
valvesignalArr[5] = HotValveSignal[laserroomctrl]
valvesignalArr[6] = 0
valvesignalArr[7] = 0
# valvesignalArr[0] = clip(ColdValveSignal[smlroomctrl],ValveMin[0],ValveMax)
# valvesignalArr[1] = clip(HotValveSignal[smlroomctrl],ValveMin[1],ValveMax)
# valvesignalArr[2] = clip(ColdValveSignal[bigroomctrl],ValveMin[2],ValveMax)
# valvesignalArr[3] = clip(HotValveSignal[bigroomctrl],ValveMin[3],ValveMax)
# valvesignalArr[4] = clip(ColdValveSignal[laserroomctrl],ValveMin[4],ValveMax)
# valvesignalArr[5] = clip(HotValveSignal[laserroomctrl],ValveMin[5],ValveMax)
# valvesignalArr[6] = 0
# valvesignalArr[7] = 0
valvesignal = valvesignalArr.tolist()
return valvesignalArr
def | (self):
self.INTEGFILE.close()
class DataAcquisition():
def binarytoTempC(self,bin, ch): #converts binary output to a physical temperature in C
Vin = 2.56*(float(bin)+1)/1024 #voltage that is read in 1023 is 2.56 0 is 0
dV = (15/HardwareG[ch])*(Vin/1.2 - 1) #when G = 15 (most channels) dV of 2.4 corresponds to bridge voltage of 1 and dV of 0 is bridge voltage of -1
#G = 5 for low res channels for cold water, hot water supply
#G is determines by INA114 gain resistor
R = (dV/V0 +.5) / (- dV/V0 + .5) * 10 #convert bridge voltage to R in kohms
T = 1/(a + b*math.log(R/10.) + c * pow(math.log(R/10.),2) + d * pow(math.log(R/10.),3)) #consult datasheet for this
TempC = round(T - 273.15,2) #Kelvin to C
return TempC
def readTemp(self,ser):#processing the input in the format 03:1023<space>... where 03 is the number of the detector, 1023 is the voltage representation
#returns array with data
global errors_count
curTempArr = zeros(Ch)
ser.write('t') # command to output readings
curLine = ser.read(Ch*8) # reads 128 bytes, 16 channels 7 bytes each and 16 spaces
if(len(curLine)==128): # read everything correctly
for i in range(Ch):
# left and right ranges for number of voltages
lnum = 8*i + 0
rnum = 8*i + 2
lvol = 8*i + 3
rvol = 8*i + 7
num = curLine[lnum:rnum] #number of the detector is the first
vol = int(curLine[lvol:rvol]) #voltage readout
TempC = self.binarytoTempC(vol, i)
curTempArr[i] = TempC
else:
if(errors_count > 20):
notifier.set_content('AC ALARM','The program quit because there were too many errors with data acquisition')
notifier.send()
sys.exit()
errors_count = errors_count + 1
print "Error: Data not collected"
print curLine
time.sleep(DataFreq)
curTempArr = self.readTemp(ser)
return curTempArr
class RunningAverage():
def __init__(self):
self.RunningAvgNum = RunningAvgNum
self.historyArr = zeros([self.RunningAvgNum,Ch])
self.binfull = 0
self.historyCounter = 0
self.printintro()
def printintro(self):
print '\n' + 'Filling up history for ' + str(self.RunningAvgNum) +' seconds \n'
def printbinfull(self):
print 'Running Average Operational'
def addNumber(self,newnumber):
self.historyArr[self.historyCounter,:] = newnumber #updates history by cycling through rows of historyArr and replacing old data with readTemp
self.historyCounter = (self.historyCounter + 1) % self.RunningAvgNum
if(self.historyCounter == 0):
if(self.binfull == 0):
self.printbinfull()
self.binfull = 1
def getAverage(self):
if(self.binfull): #if bin is full, take the mean
average = mean(self.historyArr,axis=0) #current temperature is the average of the columns of the history array
else: #if bin is not filled, return mean of existing elements
average = sum(self.historyArr[0:(self.historyCounter+1),:],axis=0)/(self.historyCounter)
return average
class ManualController():
def __init__(self):
if(os.path.isfile(mancontrolfile)):#if file exists open it in read mode
pass
else: #if file doesn't exist, create it
self.FILE = open(mancontrolfile,"w");
pickle.dump(0,self.FILE) #indicates automatic control, 1 is manual
pickle.dump(zeros(ControlledValves).tolist(),self.FILE)
self.FILE.close()
self.modtime = os.path.getmtime(mancontrolfile)
self.valves = zeros(ControlledValves)
def isControlManual(self):
self.FILE = open(mancontrolfile,"r");
self.mancontrol = pickle.load(self.FILE)
self.valves = array(pickle.load(self.FILE))
self.FILE.close()
return self.mancontrol
def ManualValvePos(self):
return self.valves
class AlarmChecker():
def __init__(self):
self.messagesent = 0
self.callstoReset = 900*12 # set time for next alarm to 12h
self.callsCount = 0
self.messageMax = 1 #maximum number of allowed emails per the number of callstoReset
def updateCallsCount(self):
if(self.callsCount >= self.callstoReset):
self.messagesent = 0
self.callsCount = 0
print 'alarm armed again'
else:
self.callsCount = self.callsCount + 1;
def checkForAlarm(self,curTempArr):
self.updateCallsCount()
if(abs(curTempArr[Table1 - 1] - SetPoint[Table1 -1]) > 2):
notifier.set_content('AC ALARM','The differential between Table1 temperature and setpoint exceeds norm')
if(self.messagesent < self.messageMax):
notifier.send()
self.messagesent = self.messagesent + 1
if(abs(curTempArr[Table3 - 1] - SetPoint[Table3 - 1]) > 2):
notifier.set_content('AC ALARM','The differential between Table3 temperature and setpoint exceeds norm')
if(self.messagesent < self.messageMax):
notifier.send()
self.messagesent = self.messagesent + 1
if(abs(curTempArr[Table4 - 1] - SetPoint[Table4 - 1]) > 2):
notifier.set_content('AC ALARM','The differential between Table4 temperature and setpoint exceeds norm')
if(self.messagesent < self.messageMax):
notifier.send()
self.messagesent = self.messagesent + 1
#if(abs(curTempArr[ColdWaterBigRoom - 1] - 7) > 8):
#notifier.set_content('AC ALARM','The ColdWaterBigRoom temperature is too far from norm')
#officialnotifier.set_content('Haeffner Lab: Possible Chiller Issue','The cold water supply temperature is currently ' + str(curTempArr[ColdWaterBigRoom - 1])+ ', too far from norm of 7 degrees \n This is an automatically generated email.')
#if(self.messagesent < self.messageMax):
#notifier.send()
#officialnotifier.send()
#self.messagesent = self.messagesent + 1
if(abs(curTempArr[HotWaterBigRoom - 1] - 50) > 40): # Hot water varies really a lot
notifier.set_content('AC ALARM','The HotWaterBigRoom temperature is too far from norm')
if(self.messagesent < self.messageMax):
notifier.send()
self.messagesent = self.messagesent + 1
alarmchecker = AlarmChecker()
runaverage = RunningAverage()
acquire = DataAcquisition()
log = Logger.Logger()
valves = Valves()
responsecalculator = ResponseCalculator()
manualcontrol = ManualController()
try:
while('true'):
counter = counter + DataFreq
WriteCounter = WriteCounter + DataFreq
time.sleep(DataFreq)
runaverage.addNumber(acquire.readTemp(ser))
curTempArr = runaverage.getAverage()
alarmchecker.checkForAlarm(curTempArr)
responsecalculator.calculateResponse(curTempArr)
if counter > SignalDelay: #apply the output singal every SignalDelay seconds
counter = 0
if(manualcontrol.isControlManual()):
valvesignalArr = manualcontrol.ManualValvePos()
PIDresponseArr = zeros(ControlCh) #neded for logging 0s
print 'manual control' + str(valvesignalArr)
else:
[PIDresponseArr,valvesignalArr] = responsecalculator.getResponse()
sentValveSignalArr = valves.ApplyValveSignal(valvesignalArr)
#print sentValveSignalArr
if WriteCounter > WriteDelay: #write data to log file
WriteCounter = 0
log.MakeLog(curTempArr, PIDresponseArr,sentValveSignalArr)
responsecalculator.updateExternalPIDParams()
except KeyboardInterrupt:
time.sleep(DataFreq)
ser.flushInput()
ser.flushOutput()
ser.close() # closes the serial port
print 'Graceful exit'
| __del__ | identifier_name |
TempTaker_old.py | import serial #library for interfacing with the serial port
import time #library for pausing the script
import math
import sys
import os #for file operations
import pickle #for easy exporting/importing of datatypes
import types #for recognizing types of objects, functions on multiple data types
from numpy import *
import Logger
import email_notifier
directory = 'Log/'; os.chdir(directory);
ColdWaterTempBase = 7 # average ColdwaterTemp estimate
SetPoint = [22,22,22,22,22,22,22,22,22,22,22,22,70,22,ColdWaterTempBase,22] #channels 13 and 15 (or 12,14 counting from 0) are hot/cold water
SignalDelay = 2 #how many seconds to wait before changing applied voltages and how often a response is calculated, seems to be a least 5s of minimum delay, probably an offset.
WriteDelay = 10 #how many seconds to wait before logging the data
RunningAvgNum = 12 #how many iterations is the window for the running average of temperature measurements
DataFreq = 0.95 #how often to take data
ComDelay = 0.10 #how long to wait before sending consecutive communcations in seconds
Ch = 16; # written for 16 incoming chaneels
ControlCh = 4 #number of controlled rooms
ControlledValves = 8 # number of valves
a = 3.3540154E-03; b = 2.5627725E-04; c = 2.0829210E-06 ; d = 7.3003206E-08 # coeffs for voltage to T conversion info from thermistor datasheet
V0 = 6.95 #volts on the voltage reference
#corresponding channels
Table1=8;Table2=5;Table3=1;Table4=6;Table5=9;SupplyBigRoom=14;SupplyLaserRoom=16;SupplySmallRoom=11;ColdWaterSmallRoom=15;ColdWaterBigRoom=15;ColdWaterLaserRoom=15;HotWaterSmallRoom=13;HotWaterBigRoom=13;HotWaterLaserRoom=13;
HardwareG = [15,15,15,15,15,15,15,15,15,15,15,15,5,15,5,15] #channels 13 and 15 (or 12,14 counting from 0) have less gain for expanded range
#Control Signal
bigroomctrl = 0 #144 big room
smlroomctrl = 1 #140
laserroomctrl = 2 #144B laserroom
officectrl = 3 #144A office
#Coldoffset = [63,25,45,0] #adding an offset because the valve seems to open only at around 50 [big,sml,laser]
#Hotoffset = [70,80,75,0] #adding an offset because the valve seems to open only at around 50 [big,sml,laser]
#ValveMin = [15,62,52,65,35,60,0,0] # making sure that the valves never close completly [smlroom-cold,hot, big room-cold, hot, laser room-cold, hot]
Coldoffset = [65,35,45,0] #adding an offset because the valve seems to open only at around 50 [big,sml,laser]
Hotoffset = [67,70,70,0] #adding an offset because the valve seems to open only at around 50 [big,sml,laser]
ValveMin = [15,45,55,55,35,30,0,0] # making sure that the valves never close completly [smlroom-cold,hot, big room-cold, hot, laser room-cold, hot]
ColdWaterTempCorrection = [0,0,0,0] # should be zero, [big,sml, laser,office]
ColdWaterDiffGain = [-0,-0,-0] # should be zero
#ColdWaterDiffGain = [-10000,-10000,-10000] # should be zero
ColdWaterValveGain = [0.1, 0.2, 0.2, 0] # should be zero, adds some value (ColdWaterValveGain * (ActualColdWater-ColdWaterTempBase)) to the cold valve depending on the cold water tempererature deviation from ColdWaterTempBase, [big,sml, laser]
ValveMax = 255
GuessHotWaterTemp = 70
oldvalvesignal=zeros(ControlledValves)
direction=zeros(ControlledValves)
hysteresis = [ 22, 12, 2, 7, 22, 12, 0, 0] # smlroom (cold, hot), bigroom (cold, hot), laserroom (cold, hot)
#hysteresis = [ 14, 12, 2, 7, 17, 12, 0, 0] # working well
#hysteresis = [ 12, 20, 3, 13, 35, 25, 0, 0]
IntegrationMin = -500*5; #limit on how small integration response can get, modified later with I-gain and thus more than the max control signal makes not much sense, the 20 comes from the estimated cooling power rescaling (SetPoint-2)
IntegrationMax = 700*5; #limit on how big integration response can get, modified later with the I-gain, the 60 comes from the estimated heating power rescaling
DiffMax = 20000000
PropActionThreshold = 0.0; # Set proportional feedback to 0 if correction would smaller than this value, units are in temperature deviation [K]; prevents some noise on control when having high P-gain.
DiffActionThreshold = 0.08; # Set differential feedback to 0 if correction would smaller than this value, units are in temperature deviation [K]; prevents some noise on control when having high D-gain
#ControlActionThreshold = [1,1,1,1,1,1,1,1]; # idea is to reduce wear on control elements by doing adjustments only when large changes occur, SRC,SRH,BRC,BRH,LRC,LRH
ControlActionThreshold = [2,2,4,3,2,2,2,2]; # idea is to reduce wear on control elements by doing adjustments only when large changes occur, SRC,SRH,BRC,BRH,LRC,LRH
PTab=[0,0,0,1] # contribution from table temperature to P gain
PSup=[1,1,1,0] # contribution from supply air (=incoming air) to P gain
PCoolingWater=[0,0,0,0] # contribution from cooling water (=cooling water) to P gain
ColdValveGain=[1,1,1,0] # correction for cold valve gain
HotValveGain=[1,1,1,0] # correction for hot valve gain, should be 1, however, better results with higher gain?
integfile = 'lastInteg.txt'#name of the file where last integrator array is kept.
pifile = 'PIparams.txt'#where PI parameters are kept and can be modified on the fly
mancontrolfile = 'manual_valve.txt' #used for manual control of valve positions
ser = serial.Serial('/dev/ttyUSB1') # open USB-serial port
if(not ser.isOpen()):
print 'Error: Serial Port Not Open'
ser.flushInput()
ser.flushOutput()
ser.baudrate = 9600;
ser.timeout = 0.1; #sets timeout of the serial port
counter = SignalDelay+1 #so that data is outputted on the first loop
WriteCounter = SignalDelay+1 #so that data is written on the first loop
errors_count = 0 #initial number of errors
notifier = email_notifier.notifier()
notifier.set_recepients(['micramm@gmail.com','hhaeffner@berkeley.edu','haeffnerlab@gmail.com'])
officialnotifier = email_notifier.notifier()
officialnotifier.set_recepients(['micramm@gmail.com','hhaeffner@berkeley.edu','haeffnerlab@gmail.com','physics-support@lists.berkeley.edu'])
class Valves():
def __init__(self):
self.previousSignal = zeros(ControlledValves)
self.newSignal = zeros(ControlledValves)
def sign(x):
if(x > 0.01):
return 1
if(x < -0.01):
return -1
else:
return 0
def ApplyValveSignal(self,incoming_signal):
self.newSignal = self.testResponseChange(incoming_signal)
for i in range(ControlledValves): # taking care of the hysteresis ....
newdirection = sign(self.newSignal[i] - oldvalvesignal[i])
if((newdirection != direction[i]) and (newdirection)): # valve turns around
direction[i] = newdirection
print str(time.strftime("%H:%M:%S", time.localtime())) + ': Direction change: Valve ' + str(i) + ' ' + str(direction[i])
oldvalvesignal[i] = self.newSignal[i]
self.newSignal[i] = clip(self.newSignal[i] + direction[i] * hysteresis[i]/2,ValveMin[i],ValveMax)
self.communicateSend()
return self.newSignal
#for for test in response to minimize valve motion and reduce wear and tear
def testResponseChange(self,signal):
for i in range(len(signal)):
if abs(signal[i]-self.previousSignal[i]) >= ControlActionThreshold[i]:
signal[i] = int(round(signal[i]))
self.previousSignal[i] = signal[i]
print str(time.strftime("%H:%M:%S", time.localtime())) + ': Changing Valve ' + str(i) + ' to ' + str(signal[i])
else:
signal[i] = int(round(self.previousSignal[i]))
return signal
def communicateSend(self):
signal = self.newSignal
for i in range(ControlledValves):
ser.write("d")
time.sleep(ComDelay)
ser.write(str(i))
time.sleep(ComDelay)
vsig = self.dec2hex(signal[i])
ser.write(vsig)
time.sleep(ComDelay)
ser.flushInput()
time.sleep(ComDelay)
def dec2hex(self, n):#"""return the hexadecimal string representation of integer n as a two digits representation in lowercase"""
string = "%x" % n
string = string.zfill(2)
return string
class ResponseCalculator():
def __init__(self):
self.lastErrSigArr = zeros(Ch) #initial vale of lastErrorSignal, used to disable Diff gain for the first time
self.loadExternalParams()
def loadExternalParams(self):
if(os.path.isfile(integfile)):#if integ file exists (with information about last integration), open it in read/write mode and read in last integrator setting
self.INTEGFILE = open(integfile,"r+");
self.integralerrorSigArr = array(pickle.load(self.INTEGFILE))
else: #if file does not exist, create it and specify initial integrator parameters.
self.INTEGFILE = open(integfile,"w");
self.integralerrorSigArr = zeros(Ch)
if(os.path.isfile(pifile)): #if file exists, load the PI parameters
self.PIFILE = open(pifile,"r+")
self.P = array(pickle.load(self.PIFILE))
self.I = array(pickle.load(self.PIFILE))
self.D = array(pickle.load(self.PIFILE))
self.PIFILE.close()
else:
self.PIFILE = open(pifile,"w") #if file doesn't not exist, create it
#proportionality constant for PID in the format [#144 big room / #140 small room / #144B Laser Room / #144A office]
self.P = array([-15,-15,-15,-0])
self.I = array([-.1,-.1,-.1,-0])
self.D = array([-40,-40,-40,0])
pickle.dump(self.P.tolist(),self.PIFILE)
pickle.dump(self.I.tolist(),self.PIFILE)
pickle.dump(self.D.tolist(),self.PIFILE)
self.PIFILE.close()
self.PImodtime = os.path.getmtime(pifile) #time when pifile is last modified
def updateExternalPIDParams(self):
if(os.path.getmtime(pifile) != self.PImodtime): #if PI parmeters have been modified externally, update them
self.PIFILE = open(pifile, 'r')
self.P = array(pickle.load(self.PIFILE))
self.I = array(pickle.load(self.PIFILE))
self.D = array(pickle.load(self.PIFILE))
self.PIFILE.close()
self.PImodtime = os.path.getmtime(pifile)
print("new P,I,D parameters are")
print self.P
print self.I
print self.D
def getResponse(self):
return [self.PIDresponseArr,self.valvesignalArr]
def calculateResponse(self, curTempArr):
self.errorSigArr = self.finderrorSig(curTempArr)
self.integralerrorSigArr = self.calcintegrator(self.integralerrorSigArr, self.errorSigArr)
self.saveIntegralError(self.integralerrorSigArr)
self.PIDresponseArr = self.findPIDresponse(self.errorSigArr, self.integralerrorSigArr,self.lastErrSigArr)
self.lastErrSigArr= self.errorSigArr
self.valvesignalArr = self.CalcValveSignal(self.PIDresponseArr, curTempArr)
def saveIntegralError(self,integError):
#print integError
self.INTEGFILE.seek(0) #moves position to the beginning of the file
pickle.dump(integError, self.INTEGFILE)
self.INTEGFILE.truncate()
def finderrorSig(self, CurTemp): #takes array with current temperatures and finds the error signal array
error = CurTemp - SetPoint
return error
def calcintegrator(self,oldArr, newArr):
TotalArr = oldArr + newArr
# Normalize maximum by the mean of the integration constants
minim = IntegrationMin/(-sum(self.I)/len(self.I))
maxim = IntegrationMax/(-sum(self.I)/len(self.I))
TotalArr=clip(TotalArr,minim,maxim)
return TotalArr
def findPIDresponse(self,curErrArr, IntErrArr, lastErrArr): #produces array containg signal to be sent to valves in format [Control1, Control2..] where each one is measured from -255 to 255 positive to hotter, negative for colder
P = self.P
I = self.I
D = self.D
propArr = zeros(ControlCh)
propArr[bigroomctrl] = PSup[bigroomctrl]*curErrArr[SupplyBigRoom-1]#0 + PTab[bigroomctrl]*curErrArr[Table1-1]#0 + PCoolingWater[bigroomctrl]*curErrArr[ColdWaterBigRoom]
propArr[smlroomctrl] = PSup[smlroomctrl]*curErrArr[SupplySmallRoom-1]#0 + PTab[smlroomctrl]*curErrArr[Table3-1]#0 + PCoolingWater[smlroomctrl]*curErrArr[ColdWaterSmallRoom]
propArr[laserroomctrl] = PSup[laserroomctrl]*curErrArr[SupplyLaserRoom-1]#0 + PTab[laserroomctrl]*curErrArr[Table4-1]#0 + PCoolingWater[laserroomctrl]*curErrArr[ColdWaterLaserRoom]
propArr[officectrl] = 0 #no control in office
propArr = propArr - clip(propArr, -PropActionThreshold,PropActionThreshold)
proprespArr = (P * propArr) # when used with arrays, * is component by component multiplcation or dot product for 1D arrays
integArr = zeros(ControlCh)
integArr[bigroomctrl] = IntErrArr[Table1-1]
integArr[smlroomctrl] = IntErrArr[Table3-1]
integArr[laserroomctrl] = IntErrArr[Table4-1]
integArr[officectrl] = 0 #no control in office
integrespArr = (I * integArr) # when used with arrays, * is component by component multiplcation or dot product for 1D arrays
#print integArr
if((lastErrArr == zeros(Ch)).any()): #when the lastErrArr is the zero array, then don't do any diff because it's the first run
diffrespArr = zeros(ControlCh)
else:
diffArr = zeros(ControlCh)
DiffErrArr = curErrArr - lastErrArr
diffArr[bigroomctrl] = DiffErrArr[SupplyBigRoom-1] + ColdWaterDiffGain[bigroomctrl] * DiffErrArr[ColdWaterBigRoom-1] / D[bigroomctrl]
diffArr[smlroomctrl] = DiffErrArr[SupplySmallRoom-1] + ColdWaterDiffGain[smlroomctrl] * DiffErrArr[ColdWaterSmallRoom-1] / D[smlroomctrl]
diffArr[laserroomctrl] = DiffErrArr[SupplyLaserRoom-1] + ColdWaterDiffGain[laserroomctrl] * DiffErrArr[ColdWaterLaserRoom-1] / D[laserroomctrl]
diffArr[officectrl] = 0 # no control in office
diffArr = diffArr - clip(diffArr, -DiffActionThreshold,DiffActionThreshold)
diffrespArr = (D * diffArr)
diffrespArr = clip(diffrespArr, -DiffMax, DiffMax)
print 'P', proprespArr
print 'I', integrespArr
print 'D', diffrespArr
responseArr = proprespArr + integrespArr + diffrespArr
return responseArr
def CalcValveSignal(self,responseArr,curTempArr):#hard codes which control channel correspond to which output number
valvesignalArr = zeros(ControlledValves)
#ColdWater = array([curTempArr[ColdWaterBigRoom-1], curTempArr[ColdWaterSmallRoom-1], curTempArr[ColdWaterLaserRoom-1],0 ])
#ColdWater = clip(ColdWater,0,20)
ColdWater = array([13.0,13.0,13.0,0.0]); # set cold water temp to 13 degrees because the sensor is not working atm
HotWater = array([curTempArr[HotWaterBigRoom-1], curTempArr[HotWaterSmallRoom-1], curTempArr[HotWaterLaserRoom-1], 0])
SetPointAux = array([SetPoint[Table1-1], SetPoint[Table3-1], SetPoint[Table4-1], 0])
CoolingPower = clip(SetPointAux - ColdWater - ColdWaterTempCorrection,1.0,100.0) # estimate cooling power for valve settings, always assume some cooling power
HeatingPower = clip(HotWater - SetPointAux,20.0,200.0) # minum heating power corresponds to 20 degrees temp-difference
ColdValveSignal = - responseArr/CoolingPower*ColdValveGain + Coldoffset# + ColdWaterValveGain * (ColdWater-ColdWaterTempBase)
HotValveSignal = Hotoffset + responseArr/HeatingPower*HotValveGain
valvesignalArr[0] = ColdValveSignal[smlroomctrl]
valvesignalArr[1] = HotValveSignal[smlroomctrl]
valvesignalArr[2] = ColdValveSignal[bigroomctrl]
valvesignalArr[3] = HotValveSignal[bigroomctrl]
valvesignalArr[4] = ColdValveSignal[laserroomctrl]
valvesignalArr[5] = HotValveSignal[laserroomctrl]
valvesignalArr[6] = 0
valvesignalArr[7] = 0
# valvesignalArr[0] = clip(ColdValveSignal[smlroomctrl],ValveMin[0],ValveMax)
# valvesignalArr[1] = clip(HotValveSignal[smlroomctrl],ValveMin[1],ValveMax)
# valvesignalArr[2] = clip(ColdValveSignal[bigroomctrl],ValveMin[2],ValveMax)
# valvesignalArr[3] = clip(HotValveSignal[bigroomctrl],ValveMin[3],ValveMax)
# valvesignalArr[4] = clip(ColdValveSignal[laserroomctrl],ValveMin[4],ValveMax)
# valvesignalArr[5] = clip(HotValveSignal[laserroomctrl],ValveMin[5],ValveMax)
# valvesignalArr[6] = 0
# valvesignalArr[7] = 0
valvesignal = valvesignalArr.tolist()
return valvesignalArr
def __del__(self):
self.INTEGFILE.close()
class DataAcquisition():
def binarytoTempC(self,bin, ch): #converts binary output to a physical temperature in C
Vin = 2.56*(float(bin)+1)/1024 #voltage that is read in 1023 is 2.56 0 is 0
dV = (15/HardwareG[ch])*(Vin/1.2 - 1) #when G = 15 (most channels) dV of 2.4 corresponds to bridge voltage of 1 and dV of 0 is bridge voltage of -1
#G = 5 for low res channels for cold water, hot water supply
#G is determines by INA114 gain resistor
R = (dV/V0 +.5) / (- dV/V0 + .5) * 10 #convert bridge voltage to R in kohms
T = 1/(a + b*math.log(R/10.) + c * pow(math.log(R/10.),2) + d * pow(math.log(R/10.),3)) #consult datasheet for this
TempC = round(T - 273.15,2) #Kelvin to C
return TempC
def readTemp(self,ser):#processing the input in the format 03:1023<space>... where 03 is the number of the detector, 1023 is the voltage representation
#returns array with data
global errors_count
curTempArr = zeros(Ch)
ser.write('t') # command to output readings
curLine = ser.read(Ch*8) # reads 128 bytes, 16 channels 7 bytes each and 16 spaces
if(len(curLine)==128): # read everything correctly
for i in range(Ch):
# left and right ranges for number of voltages
lnum = 8*i + 0
rnum = 8*i + 2
lvol = 8*i + 3
rvol = 8*i + 7
num = curLine[lnum:rnum] #number of the detector is the first
vol = int(curLine[lvol:rvol]) #voltage readout
TempC = self.binarytoTempC(vol, i)
curTempArr[i] = TempC
else:
if(errors_count > 20):
notifier.set_content('AC ALARM','The program quit because there were too many errors with data acquisition')
notifier.send()
sys.exit()
errors_count = errors_count + 1
print "Error: Data not collected"
print curLine
time.sleep(DataFreq)
curTempArr = self.readTemp(ser)
return curTempArr
class RunningAverage():
def __init__(self):
self.RunningAvgNum = RunningAvgNum
self.historyArr = zeros([self.RunningAvgNum,Ch])
self.binfull = 0
self.historyCounter = 0
self.printintro()
def printintro(self):
|
def printbinfull(self):
print 'Running Average Operational'
def addNumber(self,newnumber):
self.historyArr[self.historyCounter,:] = newnumber #updates history by cycling through rows of historyArr and replacing old data with readTemp
self.historyCounter = (self.historyCounter + 1) % self.RunningAvgNum
if(self.historyCounter == 0):
if(self.binfull == 0):
self.printbinfull()
self.binfull = 1
def getAverage(self):
if(self.binfull): #if bin is full, take the mean
average = mean(self.historyArr,axis=0) #current temperature is the average of the columns of the history array
else: #if bin is not filled, return mean of existing elements
average = sum(self.historyArr[0:(self.historyCounter+1),:],axis=0)/(self.historyCounter)
return average
class ManualController():
def __init__(self):
if(os.path.isfile(mancontrolfile)):#if file exists open it in read mode
pass
else: #if file doesn't exist, create it
self.FILE = open(mancontrolfile,"w");
pickle.dump(0,self.FILE) #indicates automatic control, 1 is manual
pickle.dump(zeros(ControlledValves).tolist(),self.FILE)
self.FILE.close()
self.modtime = os.path.getmtime(mancontrolfile)
self.valves = zeros(ControlledValves)
def isControlManual(self):
self.FILE = open(mancontrolfile,"r");
self.mancontrol = pickle.load(self.FILE)
self.valves = array(pickle.load(self.FILE))
self.FILE.close()
return self.mancontrol
def ManualValvePos(self):
return self.valves
class AlarmChecker():
def __init__(self):
self.messagesent = 0
self.callstoReset = 900*12 # set time for next alarm to 12h
self.callsCount = 0
self.messageMax = 1 #maximum number of allowed emails per the number of callstoReset
def updateCallsCount(self):
if(self.callsCount >= self.callstoReset):
self.messagesent = 0
self.callsCount = 0
print 'alarm armed again'
else:
self.callsCount = self.callsCount + 1;
def checkForAlarm(self,curTempArr):
self.updateCallsCount()
if(abs(curTempArr[Table1 - 1] - SetPoint[Table1 -1]) > 2):
notifier.set_content('AC ALARM','The differential between Table1 temperature and setpoint exceeds norm')
if(self.messagesent < self.messageMax):
notifier.send()
self.messagesent = self.messagesent + 1
if(abs(curTempArr[Table3 - 1] - SetPoint[Table3 - 1]) > 2):
notifier.set_content('AC ALARM','The differential between Table3 temperature and setpoint exceeds norm')
if(self.messagesent < self.messageMax):
notifier.send()
self.messagesent = self.messagesent + 1
if(abs(curTempArr[Table4 - 1] - SetPoint[Table4 - 1]) > 2):
notifier.set_content('AC ALARM','The differential between Table4 temperature and setpoint exceeds norm')
if(self.messagesent < self.messageMax):
notifier.send()
self.messagesent = self.messagesent + 1
#if(abs(curTempArr[ColdWaterBigRoom - 1] - 7) > 8):
#notifier.set_content('AC ALARM','The ColdWaterBigRoom temperature is too far from norm')
#officialnotifier.set_content('Haeffner Lab: Possible Chiller Issue','The cold water supply temperature is currently ' + str(curTempArr[ColdWaterBigRoom - 1])+ ', too far from norm of 7 degrees \n This is an automatically generated email.')
#if(self.messagesent < self.messageMax):
#notifier.send()
#officialnotifier.send()
#self.messagesent = self.messagesent + 1
if(abs(curTempArr[HotWaterBigRoom - 1] - 50) > 40): # Hot water varies really a lot
notifier.set_content('AC ALARM','The HotWaterBigRoom temperature is too far from norm')
if(self.messagesent < self.messageMax):
notifier.send()
self.messagesent = self.messagesent + 1
alarmchecker = AlarmChecker()
runaverage = RunningAverage()
acquire = DataAcquisition()
log = Logger.Logger()
valves = Valves()
responsecalculator = ResponseCalculator()
manualcontrol = ManualController()
try:
while('true'):
counter = counter + DataFreq
WriteCounter = WriteCounter + DataFreq
time.sleep(DataFreq)
runaverage.addNumber(acquire.readTemp(ser))
curTempArr = runaverage.getAverage()
alarmchecker.checkForAlarm(curTempArr)
responsecalculator.calculateResponse(curTempArr)
if counter > SignalDelay: #apply the output singal every SignalDelay seconds
counter = 0
if(manualcontrol.isControlManual()):
valvesignalArr = manualcontrol.ManualValvePos()
PIDresponseArr = zeros(ControlCh) #neded for logging 0s
print 'manual control' + str(valvesignalArr)
else:
[PIDresponseArr,valvesignalArr] = responsecalculator.getResponse()
sentValveSignalArr = valves.ApplyValveSignal(valvesignalArr)
#print sentValveSignalArr
if WriteCounter > WriteDelay: #write data to log file
WriteCounter = 0
log.MakeLog(curTempArr, PIDresponseArr,sentValveSignalArr)
responsecalculator.updateExternalPIDParams()
except KeyboardInterrupt:
time.sleep(DataFreq)
ser.flushInput()
ser.flushOutput()
ser.close() # closes the serial port
print 'Graceful exit'
| print '\n' + 'Filling up history for ' + str(self.RunningAvgNum) +' seconds \n' | identifier_body |
TempTaker_old.py | import serial #library for interfacing with the serial port
import time #library for pausing the script
import math
import sys
import os #for file operations
import pickle #for easy exporting/importing of datatypes
import types #for recognizing types of objects, functions on multiple data types
from numpy import *
import Logger
import email_notifier
directory = 'Log/'; os.chdir(directory);
ColdWaterTempBase = 7 # average ColdwaterTemp estimate
SetPoint = [22,22,22,22,22,22,22,22,22,22,22,22,70,22,ColdWaterTempBase,22] #channels 13 and 15 (or 12,14 counting from 0) are hot/cold water
SignalDelay = 2 #how many seconds to wait before changing applied voltages and how often a response is calculated, seems to be a least 5s of minimum delay, probably an offset.
WriteDelay = 10 #how many seconds to wait before logging the data
RunningAvgNum = 12 #how many iterations is the window for the running average of temperature measurements
DataFreq = 0.95 #how often to take data
ComDelay = 0.10 #how long to wait before sending consecutive communcations in seconds
Ch = 16; # written for 16 incoming chaneels
ControlCh = 4 #number of controlled rooms
ControlledValves = 8 # number of valves
a = 3.3540154E-03; b = 2.5627725E-04; c = 2.0829210E-06 ; d = 7.3003206E-08 # coeffs for voltage to T conversion info from thermistor datasheet
V0 = 6.95 #volts on the voltage reference
#corresponding channels
Table1=8;Table2=5;Table3=1;Table4=6;Table5=9;SupplyBigRoom=14;SupplyLaserRoom=16;SupplySmallRoom=11;ColdWaterSmallRoom=15;ColdWaterBigRoom=15;ColdWaterLaserRoom=15;HotWaterSmallRoom=13;HotWaterBigRoom=13;HotWaterLaserRoom=13;
HardwareG = [15,15,15,15,15,15,15,15,15,15,15,15,5,15,5,15] #channels 13 and 15 (or 12,14 counting from 0) have less gain for expanded range
#Control Signal
bigroomctrl = 0 #144 big room
smlroomctrl = 1 #140
laserroomctrl = 2 #144B laserroom
officectrl = 3 #144A office
#Coldoffset = [63,25,45,0] #adding an offset because the valve seems to open only at around 50 [big,sml,laser]
#Hotoffset = [70,80,75,0] #adding an offset because the valve seems to open only at around 50 [big,sml,laser]
#ValveMin = [15,62,52,65,35,60,0,0] # making sure that the valves never close completly [smlroom-cold,hot, big room-cold, hot, laser room-cold, hot]
Coldoffset = [65,35,45,0] #adding an offset because the valve seems to open only at around 50 [big,sml,laser]
Hotoffset = [67,70,70,0] #adding an offset because the valve seems to open only at around 50 [big,sml,laser]
ValveMin = [15,45,55,55,35,30,0,0] # making sure that the valves never close completly [smlroom-cold,hot, big room-cold, hot, laser room-cold, hot]
ColdWaterTempCorrection = [0,0,0,0] # should be zero, [big,sml, laser,office]
ColdWaterDiffGain = [-0,-0,-0] # should be zero
#ColdWaterDiffGain = [-10000,-10000,-10000] # should be zero
ColdWaterValveGain = [0.1, 0.2, 0.2, 0] # should be zero, adds some value (ColdWaterValveGain * (ActualColdWater-ColdWaterTempBase)) to the cold valve depending on the cold water tempererature deviation from ColdWaterTempBase, [big,sml, laser]
ValveMax = 255
GuessHotWaterTemp = 70
oldvalvesignal=zeros(ControlledValves)
direction=zeros(ControlledValves)
hysteresis = [ 22, 12, 2, 7, 22, 12, 0, 0] # smlroom (cold, hot), bigroom (cold, hot), laserroom (cold, hot)
#hysteresis = [ 14, 12, 2, 7, 17, 12, 0, 0] # working well
#hysteresis = [ 12, 20, 3, 13, 35, 25, 0, 0]
IntegrationMin = -500*5; #limit on how small integration response can get, modified later with I-gain and thus more than the max control signal makes not much sense, the 20 comes from the estimated cooling power rescaling (SetPoint-2)
IntegrationMax = 700*5; #limit on how big integration response can get, modified later with the I-gain, the 60 comes from the estimated heating power rescaling
DiffMax = 20000000
PropActionThreshold = 0.0; # Set proportional feedback to 0 if correction would smaller than this value, units are in temperature deviation [K]; prevents some noise on control when having high P-gain.
DiffActionThreshold = 0.08; # Set differential feedback to 0 if correction would smaller than this value, units are in temperature deviation [K]; prevents some noise on control when having high D-gain
#ControlActionThreshold = [1,1,1,1,1,1,1,1]; # idea is to reduce wear on control elements by doing adjustments only when large changes occur, SRC,SRH,BRC,BRH,LRC,LRH
ControlActionThreshold = [2,2,4,3,2,2,2,2]; # idea is to reduce wear on control elements by doing adjustments only when large changes occur, SRC,SRH,BRC,BRH,LRC,LRH
PTab=[0,0,0,1] # contribution from table temperature to P gain
PSup=[1,1,1,0] # contribution from supply air (=incoming air) to P gain
PCoolingWater=[0,0,0,0] # contribution from cooling water (=cooling water) to P gain
ColdValveGain=[1,1,1,0] # correction for cold valve gain
HotValveGain=[1,1,1,0] # correction for hot valve gain, should be 1, however, better results with higher gain?
integfile = 'lastInteg.txt'#name of the file where last integrator array is kept.
pifile = 'PIparams.txt'#where PI parameters are kept and can be modified on the fly
mancontrolfile = 'manual_valve.txt' #used for manual control of valve positions
ser = serial.Serial('/dev/ttyUSB1') # open USB-serial port
if(not ser.isOpen()):
print 'Error: Serial Port Not Open'
ser.flushInput()
ser.flushOutput()
ser.baudrate = 9600;
ser.timeout = 0.1; #sets timeout of the serial port
counter = SignalDelay+1 #so that data is outputted on the first loop
WriteCounter = SignalDelay+1 #so that data is written on the first loop
errors_count = 0 #initial number of errors
notifier = email_notifier.notifier()
notifier.set_recepients(['micramm@gmail.com','hhaeffner@berkeley.edu','haeffnerlab@gmail.com'])
officialnotifier = email_notifier.notifier()
officialnotifier.set_recepients(['micramm@gmail.com','hhaeffner@berkeley.edu','haeffnerlab@gmail.com','physics-support@lists.berkeley.edu'])
class Valves():
def __init__(self):
self.previousSignal = zeros(ControlledValves)
self.newSignal = zeros(ControlledValves)
def sign(x):
if(x > 0.01):
return 1
if(x < -0.01):
return -1
else:
return 0
def ApplyValveSignal(self,incoming_signal):
self.newSignal = self.testResponseChange(incoming_signal)
for i in range(ControlledValves): # taking care of the hysteresis ....
newdirection = sign(self.newSignal[i] - oldvalvesignal[i])
if((newdirection != direction[i]) and (newdirection)): # valve turns around
direction[i] = newdirection
print str(time.strftime("%H:%M:%S", time.localtime())) + ': Direction change: Valve ' + str(i) + ' ' + str(direction[i])
oldvalvesignal[i] = self.newSignal[i]
self.newSignal[i] = clip(self.newSignal[i] + direction[i] * hysteresis[i]/2,ValveMin[i],ValveMax)
self.communicateSend()
return self.newSignal
#for for test in response to minimize valve motion and reduce wear and tear
def testResponseChange(self,signal):
for i in range(len(signal)):
if abs(signal[i]-self.previousSignal[i]) >= ControlActionThreshold[i]:
signal[i] = int(round(signal[i]))
self.previousSignal[i] = signal[i]
print str(time.strftime("%H:%M:%S", time.localtime())) + ': Changing Valve ' + str(i) + ' to ' + str(signal[i])
else:
signal[i] = int(round(self.previousSignal[i]))
return signal
def communicateSend(self):
signal = self.newSignal
for i in range(ControlledValves):
ser.write("d")
time.sleep(ComDelay)
ser.write(str(i))
time.sleep(ComDelay)
vsig = self.dec2hex(signal[i])
ser.write(vsig)
time.sleep(ComDelay)
ser.flushInput()
time.sleep(ComDelay)
def dec2hex(self, n):#"""return the hexadecimal string representation of integer n as a two digits representation in lowercase"""
string = "%x" % n
string = string.zfill(2)
return string
class ResponseCalculator():
def __init__(self):
self.lastErrSigArr = zeros(Ch) #initial vale of lastErrorSignal, used to disable Diff gain for the first time
self.loadExternalParams()
def loadExternalParams(self):
if(os.path.isfile(integfile)):#if integ file exists (with information about last integration), open it in read/write mode and read in last integrator setting
self.INTEGFILE = open(integfile,"r+");
self.integralerrorSigArr = array(pickle.load(self.INTEGFILE))
else: #if file does not exist, create it and specify initial integrator parameters.
self.INTEGFILE = open(integfile,"w");
self.integralerrorSigArr = zeros(Ch)
if(os.path.isfile(pifile)): #if file exists, load the PI parameters
self.PIFILE = open(pifile,"r+")
self.P = array(pickle.load(self.PIFILE))
self.I = array(pickle.load(self.PIFILE))
self.D = array(pickle.load(self.PIFILE))
self.PIFILE.close()
else:
self.PIFILE = open(pifile,"w") #if file doesn't not exist, create it
#proportionality constant for PID in the format [#144 big room / #140 small room / #144B Laser Room / #144A office]
self.P = array([-15,-15,-15,-0])
self.I = array([-.1,-.1,-.1,-0])
self.D = array([-40,-40,-40,0])
pickle.dump(self.P.tolist(),self.PIFILE)
pickle.dump(self.I.tolist(),self.PIFILE)
pickle.dump(self.D.tolist(),self.PIFILE)
self.PIFILE.close()
self.PImodtime = os.path.getmtime(pifile) #time when pifile is last modified
def updateExternalPIDParams(self):
if(os.path.getmtime(pifile) != self.PImodtime): #if PI parmeters have been modified externally, update them
self.PIFILE = open(pifile, 'r')
self.P = array(pickle.load(self.PIFILE))
self.I = array(pickle.load(self.PIFILE))
self.D = array(pickle.load(self.PIFILE))
self.PIFILE.close()
self.PImodtime = os.path.getmtime(pifile)
print("new P,I,D parameters are")
print self.P
print self.I
print self.D
def getResponse(self):
return [self.PIDresponseArr,self.valvesignalArr]
def calculateResponse(self, curTempArr):
self.errorSigArr = self.finderrorSig(curTempArr)
self.integralerrorSigArr = self.calcintegrator(self.integralerrorSigArr, self.errorSigArr)
self.saveIntegralError(self.integralerrorSigArr)
self.PIDresponseArr = self.findPIDresponse(self.errorSigArr, self.integralerrorSigArr,self.lastErrSigArr)
self.lastErrSigArr= self.errorSigArr
self.valvesignalArr = self.CalcValveSignal(self.PIDresponseArr, curTempArr)
def saveIntegralError(self,integError):
#print integError
self.INTEGFILE.seek(0) #moves position to the beginning of the file
pickle.dump(integError, self.INTEGFILE)
self.INTEGFILE.truncate()
def finderrorSig(self, CurTemp): #takes array with current temperatures and finds the error signal array
error = CurTemp - SetPoint
return error
def calcintegrator(self,oldArr, newArr):
TotalArr = oldArr + newArr
# Normalize maximum by the mean of the integration constants
minim = IntegrationMin/(-sum(self.I)/len(self.I))
maxim = IntegrationMax/(-sum(self.I)/len(self.I))
TotalArr=clip(TotalArr,minim,maxim)
return TotalArr
def findPIDresponse(self,curErrArr, IntErrArr, lastErrArr): #produces array containg signal to be sent to valves in format [Control1, Control2..] where each one is measured from -255 to 255 positive to hotter, negative for colder
P = self.P
I = self.I
D = self.D
propArr = zeros(ControlCh)
propArr[bigroomctrl] = PSup[bigroomctrl]*curErrArr[SupplyBigRoom-1]#0 + PTab[bigroomctrl]*curErrArr[Table1-1]#0 + PCoolingWater[bigroomctrl]*curErrArr[ColdWaterBigRoom]
propArr[smlroomctrl] = PSup[smlroomctrl]*curErrArr[SupplySmallRoom-1]#0 + PTab[smlroomctrl]*curErrArr[Table3-1]#0 + PCoolingWater[smlroomctrl]*curErrArr[ColdWaterSmallRoom]
propArr[laserroomctrl] = PSup[laserroomctrl]*curErrArr[SupplyLaserRoom-1]#0 + PTab[laserroomctrl]*curErrArr[Table4-1]#0 + PCoolingWater[laserroomctrl]*curErrArr[ColdWaterLaserRoom]
propArr[officectrl] = 0 #no control in office
propArr = propArr - clip(propArr, -PropActionThreshold,PropActionThreshold)
proprespArr = (P * propArr) # when used with arrays, * is component by component multiplcation or dot product for 1D arrays
integArr = zeros(ControlCh)
integArr[bigroomctrl] = IntErrArr[Table1-1]
integArr[smlroomctrl] = IntErrArr[Table3-1]
integArr[laserroomctrl] = IntErrArr[Table4-1]
integArr[officectrl] = 0 #no control in office
integrespArr = (I * integArr) # when used with arrays, * is component by component multiplcation or dot product for 1D arrays
#print integArr
if((lastErrArr == zeros(Ch)).any()): #when the lastErrArr is the zero array, then don't do any diff because it's the first run
diffrespArr = zeros(ControlCh)
else:
diffArr = zeros(ControlCh)
DiffErrArr = curErrArr - lastErrArr
diffArr[bigroomctrl] = DiffErrArr[SupplyBigRoom-1] + ColdWaterDiffGain[bigroomctrl] * DiffErrArr[ColdWaterBigRoom-1] / D[bigroomctrl]
diffArr[smlroomctrl] = DiffErrArr[SupplySmallRoom-1] + ColdWaterDiffGain[smlroomctrl] * DiffErrArr[ColdWaterSmallRoom-1] / D[smlroomctrl]
diffArr[laserroomctrl] = DiffErrArr[SupplyLaserRoom-1] + ColdWaterDiffGain[laserroomctrl] * DiffErrArr[ColdWaterLaserRoom-1] / D[laserroomctrl]
diffArr[officectrl] = 0 # no control in office
diffArr = diffArr - clip(diffArr, -DiffActionThreshold,DiffActionThreshold)
diffrespArr = (D * diffArr)
diffrespArr = clip(diffrespArr, -DiffMax, DiffMax)
print 'P', proprespArr
print 'I', integrespArr
print 'D', diffrespArr
responseArr = proprespArr + integrespArr + diffrespArr
return responseArr
def CalcValveSignal(self,responseArr,curTempArr):#hard codes which control channel correspond to which output number
valvesignalArr = zeros(ControlledValves)
#ColdWater = array([curTempArr[ColdWaterBigRoom-1], curTempArr[ColdWaterSmallRoom-1], curTempArr[ColdWaterLaserRoom-1],0 ])
#ColdWater = clip(ColdWater,0,20)
ColdWater = array([13.0,13.0,13.0,0.0]); # set cold water temp to 13 degrees because the sensor is not working atm
HotWater = array([curTempArr[HotWaterBigRoom-1], curTempArr[HotWaterSmallRoom-1], curTempArr[HotWaterLaserRoom-1], 0])
SetPointAux = array([SetPoint[Table1-1], SetPoint[Table3-1], SetPoint[Table4-1], 0])
CoolingPower = clip(SetPointAux - ColdWater - ColdWaterTempCorrection,1.0,100.0) # estimate cooling power for valve settings, always assume some cooling power
HeatingPower = clip(HotWater - SetPointAux,20.0,200.0) # minum heating power corresponds to 20 degrees temp-difference
ColdValveSignal = - responseArr/CoolingPower*ColdValveGain + Coldoffset# + ColdWaterValveGain * (ColdWater-ColdWaterTempBase)
HotValveSignal = Hotoffset + responseArr/HeatingPower*HotValveGain
valvesignalArr[0] = ColdValveSignal[smlroomctrl]
valvesignalArr[1] = HotValveSignal[smlroomctrl]
valvesignalArr[2] = ColdValveSignal[bigroomctrl]
valvesignalArr[3] = HotValveSignal[bigroomctrl]
valvesignalArr[4] = ColdValveSignal[laserroomctrl]
valvesignalArr[5] = HotValveSignal[laserroomctrl]
valvesignalArr[6] = 0
valvesignalArr[7] = 0
# valvesignalArr[0] = clip(ColdValveSignal[smlroomctrl],ValveMin[0],ValveMax)
# valvesignalArr[1] = clip(HotValveSignal[smlroomctrl],ValveMin[1],ValveMax)
# valvesignalArr[2] = clip(ColdValveSignal[bigroomctrl],ValveMin[2],ValveMax)
# valvesignalArr[3] = clip(HotValveSignal[bigroomctrl],ValveMin[3],ValveMax)
# valvesignalArr[4] = clip(ColdValveSignal[laserroomctrl],ValveMin[4],ValveMax)
# valvesignalArr[5] = clip(HotValveSignal[laserroomctrl],ValveMin[5],ValveMax)
# valvesignalArr[6] = 0
# valvesignalArr[7] = 0
valvesignal = valvesignalArr.tolist()
return valvesignalArr
def __del__(self):
self.INTEGFILE.close()
class DataAcquisition():
def binarytoTempC(self,bin, ch): #converts binary output to a physical temperature in C
Vin = 2.56*(float(bin)+1)/1024 #voltage that is read in 1023 is 2.56 0 is 0
dV = (15/HardwareG[ch])*(Vin/1.2 - 1) #when G = 15 (most channels) dV of 2.4 corresponds to bridge voltage of 1 and dV of 0 is bridge voltage of -1
#G = 5 for low res channels for cold water, hot water supply
#G is determines by INA114 gain resistor
R = (dV/V0 +.5) / (- dV/V0 + .5) * 10 #convert bridge voltage to R in kohms
T = 1/(a + b*math.log(R/10.) + c * pow(math.log(R/10.),2) + d * pow(math.log(R/10.),3)) #consult datasheet for this
TempC = round(T - 273.15,2) #Kelvin to C
return TempC
def readTemp(self,ser):#processing the input in the format 03:1023<space>... where 03 is the number of the detector, 1023 is the voltage representation
#returns array with data
global errors_count
curTempArr = zeros(Ch)
ser.write('t') # command to output readings
curLine = ser.read(Ch*8) # reads 128 bytes, 16 channels 7 bytes each and 16 spaces
if(len(curLine)==128): # read everything correctly
for i in range(Ch):
# left and right ranges for number of voltages
lnum = 8*i + 0
rnum = 8*i + 2
lvol = 8*i + 3
rvol = 8*i + 7
num = curLine[lnum:rnum] #number of the detector is the first
vol = int(curLine[lvol:rvol]) #voltage readout
TempC = self.binarytoTempC(vol, i)
curTempArr[i] = TempC
else:
if(errors_count > 20):
notifier.set_content('AC ALARM','The program quit because there were too many errors with data acquisition')
notifier.send()
sys.exit()
errors_count = errors_count + 1
print "Error: Data not collected"
print curLine
time.sleep(DataFreq)
curTempArr = self.readTemp(ser)
return curTempArr
class RunningAverage():
def __init__(self):
self.RunningAvgNum = RunningAvgNum
self.historyArr = zeros([self.RunningAvgNum,Ch])
self.binfull = 0
self.historyCounter = 0
self.printintro()
def printintro(self):
print '\n' + 'Filling up history for ' + str(self.RunningAvgNum) +' seconds \n'
def printbinfull(self):
print 'Running Average Operational'
def addNumber(self,newnumber):
self.historyArr[self.historyCounter,:] = newnumber #updates history by cycling through rows of historyArr and replacing old data with readTemp
self.historyCounter = (self.historyCounter + 1) % self.RunningAvgNum
if(self.historyCounter == 0):
if(self.binfull == 0):
self.printbinfull()
self.binfull = 1
def getAverage(self):
if(self.binfull): #if bin is full, take the mean
average = mean(self.historyArr,axis=0) #current temperature is the average of the columns of the history array
else: #if bin is not filled, return mean of existing elements
|
return average
class ManualController():
def __init__(self):
if(os.path.isfile(mancontrolfile)):#if file exists open it in read mode
pass
else: #if file doesn't exist, create it
self.FILE = open(mancontrolfile,"w");
pickle.dump(0,self.FILE) #indicates automatic control, 1 is manual
pickle.dump(zeros(ControlledValves).tolist(),self.FILE)
self.FILE.close()
self.modtime = os.path.getmtime(mancontrolfile)
self.valves = zeros(ControlledValves)
def isControlManual(self):
self.FILE = open(mancontrolfile,"r");
self.mancontrol = pickle.load(self.FILE)
self.valves = array(pickle.load(self.FILE))
self.FILE.close()
return self.mancontrol
def ManualValvePos(self):
return self.valves
class AlarmChecker():
def __init__(self):
self.messagesent = 0
self.callstoReset = 900*12 # set time for next alarm to 12h
self.callsCount = 0
self.messageMax = 1 #maximum number of allowed emails per the number of callstoReset
def updateCallsCount(self):
if(self.callsCount >= self.callstoReset):
self.messagesent = 0
self.callsCount = 0
print 'alarm armed again'
else:
self.callsCount = self.callsCount + 1;
def checkForAlarm(self,curTempArr):
self.updateCallsCount()
if(abs(curTempArr[Table1 - 1] - SetPoint[Table1 -1]) > 2):
notifier.set_content('AC ALARM','The differential between Table1 temperature and setpoint exceeds norm')
if(self.messagesent < self.messageMax):
notifier.send()
self.messagesent = self.messagesent + 1
if(abs(curTempArr[Table3 - 1] - SetPoint[Table3 - 1]) > 2):
notifier.set_content('AC ALARM','The differential between Table3 temperature and setpoint exceeds norm')
if(self.messagesent < self.messageMax):
notifier.send()
self.messagesent = self.messagesent + 1
if(abs(curTempArr[Table4 - 1] - SetPoint[Table4 - 1]) > 2):
notifier.set_content('AC ALARM','The differential between Table4 temperature and setpoint exceeds norm')
if(self.messagesent < self.messageMax):
notifier.send()
self.messagesent = self.messagesent + 1
#if(abs(curTempArr[ColdWaterBigRoom - 1] - 7) > 8):
#notifier.set_content('AC ALARM','The ColdWaterBigRoom temperature is too far from norm')
#officialnotifier.set_content('Haeffner Lab: Possible Chiller Issue','The cold water supply temperature is currently ' + str(curTempArr[ColdWaterBigRoom - 1])+ ', too far from norm of 7 degrees \n This is an automatically generated email.')
#if(self.messagesent < self.messageMax):
#notifier.send()
#officialnotifier.send()
#self.messagesent = self.messagesent + 1
if(abs(curTempArr[HotWaterBigRoom - 1] - 50) > 40): # Hot water varies really a lot
notifier.set_content('AC ALARM','The HotWaterBigRoom temperature is too far from norm')
if(self.messagesent < self.messageMax):
notifier.send()
self.messagesent = self.messagesent + 1
alarmchecker = AlarmChecker()
runaverage = RunningAverage()
acquire = DataAcquisition()
log = Logger.Logger()
valves = Valves()
responsecalculator = ResponseCalculator()
manualcontrol = ManualController()
try:
while('true'):
counter = counter + DataFreq
WriteCounter = WriteCounter + DataFreq
time.sleep(DataFreq)
runaverage.addNumber(acquire.readTemp(ser))
curTempArr = runaverage.getAverage()
alarmchecker.checkForAlarm(curTempArr)
responsecalculator.calculateResponse(curTempArr)
if counter > SignalDelay: #apply the output singal every SignalDelay seconds
counter = 0
if(manualcontrol.isControlManual()):
valvesignalArr = manualcontrol.ManualValvePos()
PIDresponseArr = zeros(ControlCh) #neded for logging 0s
print 'manual control' + str(valvesignalArr)
else:
[PIDresponseArr,valvesignalArr] = responsecalculator.getResponse()
sentValveSignalArr = valves.ApplyValveSignal(valvesignalArr)
#print sentValveSignalArr
if WriteCounter > WriteDelay: #write data to log file
WriteCounter = 0
log.MakeLog(curTempArr, PIDresponseArr,sentValveSignalArr)
responsecalculator.updateExternalPIDParams()
except KeyboardInterrupt:
time.sleep(DataFreq)
ser.flushInput()
ser.flushOutput()
ser.close() # closes the serial port
print 'Graceful exit'
| average = sum(self.historyArr[0:(self.historyCounter+1),:],axis=0)/(self.historyCounter) | conditional_block |
Decryption.py | import os
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.primitives import padding, asymmetric
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, hmac, serialization
from cryptography.hazmat.primitives.asymmetric import rsa
import json
CONST_RSA_KEY_SIZE = 2048
CONST_INDENT_SIZE = 4
CONST_PADDING_BITS = 128
CONST_KEY_BYTES= 32
KEY_LENGTH = 32
RSA_KEY_SIZE = 2048
IV_LENGTH = 16
PUBLIC_EXPONENT = 65537
RSA_PUBLIC_KEY_PATH = ".\RSApublickey.pem"
RSA_PRIVATE_KEY_PATH = ".\RSAprivatekey.pem"
def MyEncrypt(message, key):
#checking key length
if(len(key) < KEY_LENGTH):
raise ValueError("Key less than 32 Bytes!")
#assigning values
IV = os.urandom(IV_LENGTH)
backend = default_backend()
#initialize padder
padder = padding.PKCS7(CONST_PADDING_BITS).padder()
#pad data to fit block size
message = padder.update(message) + padder.finalize()
#create cipher object
cipher = Cipher(algorithms.AES(key), modes.CBC(IV), backend=backend)
#make encryptor
encryptor = cipher.encryptor()
#encrypt message
C = encryptor.update(message) + encryptor.finalize()
return C, IV
def MyFileEncrypt(filepath):
#generating key
key = os.urandom(KEY_LENGTH)
#Exclude private key, public key, and executable from encrypt
#getting file name and extension
filename_ext = os.path.basename(filepath) #gets file name with extension from path
filename, ext = os.path.splitext(filename_ext) #separates file name and extension
# loop throughh all files:
#for file in files:
#Retrieve full filepath
#filepath = pathTofile + "\\" + file
#reading file
file = open(filepath, "rb")
m = file.read()
file.close()
#calling encryption method
C, IV = MyEncrypt(m, key)
file = open(filepath, "wb")
file.write(C)
file.close()
return C, IV, key, ext
def MyEncryptMAC(message, EncKey, HMACKey):
#get ciphertext and IV
C, IV = MyEncrypt(message, EncKey)
#create HMAC object to make tag
h = hmac.HMAC(HMACKey, hashes.SHA256(), backend=default_backend())
h.update(C)
tag = h.finalize()
return C, IV, tag
def MyFileEncryptMAC(filepath):
#create Keys
KeyLength = 32
HMACKey = os.urandom(KEY_LENGTH)
EncKey = os.urandom(KEY_LENGTH)
if len(EncKey) < KeyLength:
raise Exception("EncKey less than 32 bytes!")
if len(HMACKey) < KeyLength:
raise Exception("HMACKey less than 32 bytes!")
#open and read file to encrypt
file = open(filepath, "rb")
m = file.read()
file.close()
#getting file name and extension
filename_ext = os.path.basename(filepath) #gets file name with extension from path
filename, ext = os.path.splitext(filename_ext) #separates file name and extension
#encrypt & MAC
C, IV, tag = MyEncryptMAC(m, EncKey, HMACKey)
'''Not used for RSA
#storing values
encData = {"RSACipher": RSACipher.decode('cp437'),"C": C.decode('cp437'), "IV": IV.decode('cp437'), "ext": ext, "tag": tag.decode('cp437')}
#create and write to json
filenameJSON = filename + ".json"
#write json data to file
with open(filenameJSON, "w") as outfile:
json.dump(encData, outfile)
outfile.close()
#delete original file
os.remove(filepath)
'''
return C, IV, tag, EncKey, HMACKey, ext
def CheckRSAKeys(): # check if pem file exist
publicExists = os.path.isfile(RSA_PUBLIC_KEY_PATH)
privateExists = os.path.isfile(RSA_PRIVATE_KEY_PATH)
if not publicExists or not privateExists:
#generate and store private key
privateKey = rsa.generate_private_key(
public_exponent = PUBLIC_EXPONENT,
key_size = RSA_KEY_SIZE,
backend=default_backend()
)
privatepem = privateKey.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()
)
with open(RSA_PRIVATE_KEY_PATH, "wb") as privateKeyFile:
privateKeyFile.write(privatepem)
#generate and store public key
publicKey = privateKey.public_key()
publicpem = publicKey.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
with open(RSA_PUBLIC_KEY_PATH, "wb") as publicKeyFile:
publicKeyFile.write(publicpem)
# RSA Encrypt using AES CBC 256 Encryption with HMAC
def MyRSAEncrypt(filepath, RSA_Publickey_filepath):
(C, IV, tag, EncKey, HMACKey, ext) = MyFileEncryptMAC(filepath)
key = EncKey + HMACKey
with open(RSA_Publickey_filepath, 'rb') as key_file:
public_key = serialization.load_pem_public_key(
key_file.read(),
backend = default_backend()
)
RSACipher = public_key.encrypt(
key,
asymmetric.padding.OAEP(
mgf=asymmetric.padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
key_file.close()
return (RSACipher, C, IV, tag, ext)
# AES requires plain text and ciphertext to be a multiple of 16
# We pad it so that the message is a multiple of the IV, 16
def addPadding(encoded):
# We pad it with 128 bits or 16 bytes
padder = padding.PKCS7(CONST_KEY_BYTES.CONST_PADDING_BITS).padder()
# update() pads the encoded message
padded_encoded = padder.update(encoded)
# .finalize () Returns the remainder of the data.
padded_encoded += padder.finalize()
return padded_encoded
def DirectoryEncrypt(directory):
|
def MyDecrypt(C, IV, key):
#make cipher
backend = default_backend()
cipher = Cipher(algorithms.AES(key), modes.CBC(IV), backend=backend)
#make decryptor
decryptor = cipher.decryptor()
#decrypt ciphertext
plaintext_padded = decryptor.update(C) + decryptor.finalize()
#unpad message
unpadder = padding.PKCS7(128).unpadder()
plaintext = unpadder.update(plaintext_padded) + unpadder.finalize()
return plaintext
def MyFileDecrypt(filepath, IV, key, ext):
#getting file name and extension
filename_ext = os.path.basename(filepath) #gets file name with extension from path
filename, ext = os.path.splitext(filename_ext) #separates file name and extension
file = open(filepath, "rb")
C = file.read()
file.close()
message = MyDecrypt(C, IV, key)
writefile = open(filepath, "wb")
writefile.write(message)
writefile.close()
return message, IV, key
def MyDecryptMAC(C, IV, tag, HMACKey, EncKey):
h = hmac.HMAC(HMACKey, hashes.SHA256(), backend=default_backend())
h.update(C)
verifyTag = h.finalize()
if verifyTag != tag:
raise Exception("Tags do not align")
message = MyDecrypt(C, IV, EncKey)
return message
def MyFileDecryptMAC(originalfilepath, HMACKey):
#getting file name and extension
filename_ext = os.path.basename(originalfilepath) #gets file name with extension from path
filename, ext = os.path.splitext(filename_ext) #separates file name and extension
jsonFile = filename + ".json"
#open file to decrypt
with open(jsonFile) as decryptFile:
data = json.load(decryptFile)
decryptFile.close()
#getting data from dictionary
C = (data['C']).encode('cp437')
IV = (data['IV']).encode('cp437')
tag = (data['tag']).encode('cp437')
EncKey = (data['EncKey']).encode('cp437')
message = MyDecryptMAC(C, IV, tag, HMACKey, EncKey)
#write recovered data to file
recoveredFile = open(originalfilepath, "wb")
recoveredFile.write(message)
recoveredFile.close()
#remove json file
os.remove(jsonFile)
return message
# RSA Decrypt # using AES CBC 256 Decryption with HMAC
'''
def MyRSADecrypt(RSACipher, C, IV, tag, ext, RSA_Privatekey_filepath):
(C, IV, tag, EncKey, HMACKey, ext) = MyFileEncryptMAC(filepath)
key = EncKey + HMACKey
with open(RSA_Publickey_filepath, 'rb') as key_file:
public_key = serialization.load_pem_public_key(
_file.read(),
backend = default_backend()
)
RSACipher = public_key.encrypt(
key,
asymmetric.padding.OAEP(
mgf=asymmetric.padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
key_file.close()
return (RSACipher, C, IV, tag, ext)
'''
def MyRSADecrypt(filepath, RSACipher, C, IV, tag, ext, RSA_Privatekey_filepath):
with open(RSA_Privatekey_filepath, 'rb') as key_file:
private_key = serialization.load_pem_private_key(
key_file.read(),
password=None,
backend = default_backend()
)
key = private_key.decrypt(RSACipher,asymmetric,padding.OAEP(
mgf = asymmetric.padding.MGF1(algorithm=hashes.SHA256()),
algorithm= hashes.SHA256(),
label=None
)
)
EncKey_start=0
EncKey_end = int((len(key)/2))
HMACKey_start=EncKey_end
HMACKey_end = int(len(key))
EncKey= key[EncKey_start:HMACKey_end]
key_file.close()
HMACKey= ""
MyFileDecryptMAC(filepath,EncKey, HMACKey, IV,tag)
def main():
#testFile = "test.txt"
testFile = "test_photo.jpg"
'''Regular'''
C, IV, key, ext = MyFileEncrypt(testFile)
input("File encrypted! Press enter to decrypt.")
MyFileDecrypt(testFile, IV, key, ext)
print("\nFile decrypted!")
'''HMAC'''
#C, IV, tag, EncKey, HMACKey, ext = MyFileEncryptMAC(testFile)
#input("File encrypted! Press enter to decrypt.")
#MyFileDecryptMAC(testFile, HMACKey)
#print("\nFile decrypted!")
'''RSA'''
CheckRSAKeys()
#RSACipher, C, IV, tag, ext = MyRSAEncrypt(testFile, RSA_PUBLIC_KEY_PATH)
#input("File encrypted! Press enter to decrypt.")
#MyRSADecrypt(RSACipher, C, IV, tag, ext, RSA_Privatekey_filepath)
#print("\nFile decrypted!")
main()
| try:
key = CheckRSAKeys()
except:
print("Error: keys has issue")
return
try:
for root, dirs, filres in os.walk(directory):
for file in filres:
try:
RSACipher, C, IV, tag, ext = MyRSAEncrypt(os.path.join(root,file),key[0])
except:
print("Error: MyRSAEncrypt failed")
return
#create JSon file
try:
data= {'encrypted': [{'RSACipher':RSACipher, 'C': C, 'IV': IV, 'tag': tag, 'ext': ext}]}
except:
print("Error: Not able to create Json file")
return
try:
encryptedFilepath = os.path.splittext(file)[0] + ".encrypted" + ".json"
with open( encryptedFilepath, 'w')as jsonfile:
json.dump(data, jsonfile, indent=3)
except:
print("Error:Json file didnt create")
return
for file in dirs:
try:
RSACipher,C, IV, tag = MyRSAEncrypt(os.ath.json(root,file),key[0])
except:
("Error: MyRSAEncryptfailes:")
return
#create JSON file
try:
data = {'encrypted': [{'RSACipher':RSACipher,'C':C,'IV':IV,'tag':tag,'ext':ext}]}
except:
print("Error: Json file didnt create")
return
try:
encryptedFilepath = os.path.splitext(file)[0] + ".encrypted" + ".json"
with open( encryptedFilepath, 'w') as jsonFile:
json.dump(data,jsonFile, indent=3)
except:
print("Error: Unable to create JSON file.")
return
except:
print("Directory doent excist")
return | identifier_body |
Decryption.py | import os
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.primitives import padding, asymmetric
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, hmac, serialization
from cryptography.hazmat.primitives.asymmetric import rsa
import json
CONST_RSA_KEY_SIZE = 2048
CONST_INDENT_SIZE = 4
CONST_PADDING_BITS = 128
CONST_KEY_BYTES= 32
KEY_LENGTH = 32
RSA_KEY_SIZE = 2048
IV_LENGTH = 16
PUBLIC_EXPONENT = 65537
RSA_PUBLIC_KEY_PATH = ".\RSApublickey.pem"
RSA_PRIVATE_KEY_PATH = ".\RSAprivatekey.pem"
def MyEncrypt(message, key):
#checking key length
if(len(key) < KEY_LENGTH):
raise ValueError("Key less than 32 Bytes!")
#assigning values
IV = os.urandom(IV_LENGTH)
backend = default_backend()
#initialize padder
padder = padding.PKCS7(CONST_PADDING_BITS).padder()
#pad data to fit block size
message = padder.update(message) + padder.finalize()
#create cipher object
cipher = Cipher(algorithms.AES(key), modes.CBC(IV), backend=backend)
#make encryptor
encryptor = cipher.encryptor()
#encrypt message
C = encryptor.update(message) + encryptor.finalize()
return C, IV
def MyFileEncrypt(filepath):
#generating key
key = os.urandom(KEY_LENGTH)
#Exclude private key, public key, and executable from encrypt
#getting file name and extension
filename_ext = os.path.basename(filepath) #gets file name with extension from path
filename, ext = os.path.splitext(filename_ext) #separates file name and extension
# loop throughh all files:
#for file in files:
#Retrieve full filepath
#filepath = pathTofile + "\\" + file
#reading file
file = open(filepath, "rb")
m = file.read()
file.close()
#calling encryption method
C, IV = MyEncrypt(m, key)
file = open(filepath, "wb")
file.write(C)
file.close()
return C, IV, key, ext
def MyEncryptMAC(message, EncKey, HMACKey):
#get ciphertext and IV
C, IV = MyEncrypt(message, EncKey)
#create HMAC object to make tag
h = hmac.HMAC(HMACKey, hashes.SHA256(), backend=default_backend())
h.update(C)
tag = h.finalize()
return C, IV, tag
def MyFileEncryptMAC(filepath):
#create Keys
KeyLength = 32
HMACKey = os.urandom(KEY_LENGTH)
EncKey = os.urandom(KEY_LENGTH)
if len(EncKey) < KeyLength:
raise Exception("EncKey less than 32 bytes!")
if len(HMACKey) < KeyLength:
raise Exception("HMACKey less than 32 bytes!")
#open and read file to encrypt
file = open(filepath, "rb")
m = file.read()
file.close()
#getting file name and extension
filename_ext = os.path.basename(filepath) #gets file name with extension from path
filename, ext = os.path.splitext(filename_ext) #separates file name and extension
#encrypt & MAC
C, IV, tag = MyEncryptMAC(m, EncKey, HMACKey)
'''Not used for RSA
#storing values
encData = {"RSACipher": RSACipher.decode('cp437'),"C": C.decode('cp437'), "IV": IV.decode('cp437'), "ext": ext, "tag": tag.decode('cp437')}
#create and write to json
filenameJSON = filename + ".json"
#write json data to file
with open(filenameJSON, "w") as outfile:
json.dump(encData, outfile)
outfile.close()
#delete original file
os.remove(filepath)
'''
return C, IV, tag, EncKey, HMACKey, ext
def CheckRSAKeys(): # check if pem file exist
publicExists = os.path.isfile(RSA_PUBLIC_KEY_PATH)
privateExists = os.path.isfile(RSA_PRIVATE_KEY_PATH)
if not publicExists or not privateExists:
#generate and store private key
privateKey = rsa.generate_private_key(
public_exponent = PUBLIC_EXPONENT,
key_size = RSA_KEY_SIZE,
backend=default_backend()
)
privatepem = privateKey.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()
)
with open(RSA_PRIVATE_KEY_PATH, "wb") as privateKeyFile:
privateKeyFile.write(privatepem)
#generate and store public key
publicKey = privateKey.public_key()
publicpem = publicKey.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
with open(RSA_PUBLIC_KEY_PATH, "wb") as publicKeyFile:
publicKeyFile.write(publicpem)
# RSA Encrypt using AES CBC 256 Encryption with HMAC
def MyRSAEncrypt(filepath, RSA_Publickey_filepath):
(C, IV, tag, EncKey, HMACKey, ext) = MyFileEncryptMAC(filepath)
key = EncKey + HMACKey
with open(RSA_Publickey_filepath, 'rb') as key_file:
public_key = serialization.load_pem_public_key(
key_file.read(),
backend = default_backend()
)
RSACipher = public_key.encrypt(
key,
asymmetric.padding.OAEP(
mgf=asymmetric.padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
key_file.close()
return (RSACipher, C, IV, tag, ext)
# AES requires plain text and ciphertext to be a multiple of 16
# We pad it so that the message is a multiple of the IV, 16
def addPadding(encoded):
# We pad it with 128 bits or 16 bytes
padder = padding.PKCS7(CONST_KEY_BYTES.CONST_PADDING_BITS).padder()
# update() pads the encoded message
padded_encoded = padder.update(encoded)
# .finalize () Returns the remainder of the data.
padded_encoded += padder.finalize()
return padded_encoded
def DirectoryEncrypt(directory):
try:
key = CheckRSAKeys()
except:
print("Error: keys has issue")
return
try:
for root, dirs, filres in os.walk(directory):
for file in filres:
try:
RSACipher, C, IV, tag, ext = MyRSAEncrypt(os.path.join(root,file),key[0])
except:
print("Error: MyRSAEncrypt failed")
return
#create JSon file
try:
data= {'encrypted': [{'RSACipher':RSACipher, 'C': C, 'IV': IV, 'tag': tag, 'ext': ext}]}
except:
print("Error: Not able to create Json file")
return
try:
encryptedFilepath = os.path.splittext(file)[0] + ".encrypted" + ".json"
with open( encryptedFilepath, 'w')as jsonfile:
json.dump(data, jsonfile, indent=3)
except:
print("Error:Json file didnt create")
return
for file in dirs:
try:
RSACipher,C, IV, tag = MyRSAEncrypt(os.ath.json(root,file),key[0])
except:
("Error: MyRSAEncryptfailes:")
return
#create JSON file
try:
data = {'encrypted': [{'RSACipher':RSACipher,'C':C,'IV':IV,'tag':tag,'ext':ext}]}
except:
print("Error: Json file didnt create")
return
try:
encryptedFilepath = os.path.splitext(file)[0] + ".encrypted" + ".json"
with open( encryptedFilepath, 'w') as jsonFile:
json.dump(data,jsonFile, indent=3)
except:
print("Error: Unable to create JSON file.")
return
except:
print("Directory doent excist")
return
def MyDecrypt(C, IV, key):
#make cipher
backend = default_backend()
cipher = Cipher(algorithms.AES(key), modes.CBC(IV), backend=backend)
#make decryptor
decryptor = cipher.decryptor()
#decrypt ciphertext
plaintext_padded = decryptor.update(C) + decryptor.finalize()
#unpad message
unpadder = padding.PKCS7(128).unpadder()
plaintext = unpadder.update(plaintext_padded) + unpadder.finalize()
return plaintext
def MyFileDecrypt(filepath, IV, key, ext):
#getting file name and extension
filename_ext = os.path.basename(filepath) #gets file name with extension from path
filename, ext = os.path.splitext(filename_ext) #separates file name and extension
file = open(filepath, "rb")
C = file.read()
file.close()
message = MyDecrypt(C, IV, key)
writefile = open(filepath, "wb")
writefile.write(message)
writefile.close()
return message, IV, key
def MyDecryptMAC(C, IV, tag, HMACKey, EncKey):
h = hmac.HMAC(HMACKey, hashes.SHA256(), backend=default_backend())
h.update(C)
verifyTag = h.finalize()
if verifyTag != tag:
raise Exception("Tags do not align")
message = MyDecrypt(C, IV, EncKey)
return message
def MyFileDecryptMAC(originalfilepath, HMACKey):
#getting file name and extension
filename_ext = os.path.basename(originalfilepath) #gets file name with extension from path
filename, ext = os.path.splitext(filename_ext) #separates file name and extension | with open(jsonFile) as decryptFile:
data = json.load(decryptFile)
decryptFile.close()
#getting data from dictionary
C = (data['C']).encode('cp437')
IV = (data['IV']).encode('cp437')
tag = (data['tag']).encode('cp437')
EncKey = (data['EncKey']).encode('cp437')
message = MyDecryptMAC(C, IV, tag, HMACKey, EncKey)
#write recovered data to file
recoveredFile = open(originalfilepath, "wb")
recoveredFile.write(message)
recoveredFile.close()
#remove json file
os.remove(jsonFile)
return message
# RSA Decrypt # using AES CBC 256 Decryption with HMAC
'''
def MyRSADecrypt(RSACipher, C, IV, tag, ext, RSA_Privatekey_filepath):
(C, IV, tag, EncKey, HMACKey, ext) = MyFileEncryptMAC(filepath)
key = EncKey + HMACKey
with open(RSA_Publickey_filepath, 'rb') as key_file:
public_key = serialization.load_pem_public_key(
_file.read(),
backend = default_backend()
)
RSACipher = public_key.encrypt(
key,
asymmetric.padding.OAEP(
mgf=asymmetric.padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
key_file.close()
return (RSACipher, C, IV, tag, ext)
'''
def MyRSADecrypt(filepath, RSACipher, C, IV, tag, ext, RSA_Privatekey_filepath):
with open(RSA_Privatekey_filepath, 'rb') as key_file:
private_key = serialization.load_pem_private_key(
key_file.read(),
password=None,
backend = default_backend()
)
key = private_key.decrypt(RSACipher,asymmetric,padding.OAEP(
mgf = asymmetric.padding.MGF1(algorithm=hashes.SHA256()),
algorithm= hashes.SHA256(),
label=None
)
)
EncKey_start=0
EncKey_end = int((len(key)/2))
HMACKey_start=EncKey_end
HMACKey_end = int(len(key))
EncKey= key[EncKey_start:HMACKey_end]
key_file.close()
HMACKey= ""
MyFileDecryptMAC(filepath,EncKey, HMACKey, IV,tag)
def main():
#testFile = "test.txt"
testFile = "test_photo.jpg"
'''Regular'''
C, IV, key, ext = MyFileEncrypt(testFile)
input("File encrypted! Press enter to decrypt.")
MyFileDecrypt(testFile, IV, key, ext)
print("\nFile decrypted!")
'''HMAC'''
#C, IV, tag, EncKey, HMACKey, ext = MyFileEncryptMAC(testFile)
#input("File encrypted! Press enter to decrypt.")
#MyFileDecryptMAC(testFile, HMACKey)
#print("\nFile decrypted!")
'''RSA'''
CheckRSAKeys()
#RSACipher, C, IV, tag, ext = MyRSAEncrypt(testFile, RSA_PUBLIC_KEY_PATH)
#input("File encrypted! Press enter to decrypt.")
#MyRSADecrypt(RSACipher, C, IV, tag, ext, RSA_Privatekey_filepath)
#print("\nFile decrypted!")
main() |
jsonFile = filename + ".json"
#open file to decrypt | random_line_split |
Decryption.py | import os
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.primitives import padding, asymmetric
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, hmac, serialization
from cryptography.hazmat.primitives.asymmetric import rsa
import json
CONST_RSA_KEY_SIZE = 2048
CONST_INDENT_SIZE = 4
CONST_PADDING_BITS = 128
CONST_KEY_BYTES= 32
KEY_LENGTH = 32
RSA_KEY_SIZE = 2048
IV_LENGTH = 16
PUBLIC_EXPONENT = 65537
RSA_PUBLIC_KEY_PATH = ".\RSApublickey.pem"
RSA_PRIVATE_KEY_PATH = ".\RSAprivatekey.pem"
def MyEncrypt(message, key):
#checking key length
if(len(key) < KEY_LENGTH):
raise ValueError("Key less than 32 Bytes!")
#assigning values
IV = os.urandom(IV_LENGTH)
backend = default_backend()
#initialize padder
padder = padding.PKCS7(CONST_PADDING_BITS).padder()
#pad data to fit block size
message = padder.update(message) + padder.finalize()
#create cipher object
cipher = Cipher(algorithms.AES(key), modes.CBC(IV), backend=backend)
#make encryptor
encryptor = cipher.encryptor()
#encrypt message
C = encryptor.update(message) + encryptor.finalize()
return C, IV
def MyFileEncrypt(filepath):
#generating key
key = os.urandom(KEY_LENGTH)
#Exclude private key, public key, and executable from encrypt
#getting file name and extension
filename_ext = os.path.basename(filepath) #gets file name with extension from path
filename, ext = os.path.splitext(filename_ext) #separates file name and extension
# loop throughh all files:
#for file in files:
#Retrieve full filepath
#filepath = pathTofile + "\\" + file
#reading file
file = open(filepath, "rb")
m = file.read()
file.close()
#calling encryption method
C, IV = MyEncrypt(m, key)
file = open(filepath, "wb")
file.write(C)
file.close()
return C, IV, key, ext
def MyEncryptMAC(message, EncKey, HMACKey):
#get ciphertext and IV
C, IV = MyEncrypt(message, EncKey)
#create HMAC object to make tag
h = hmac.HMAC(HMACKey, hashes.SHA256(), backend=default_backend())
h.update(C)
tag = h.finalize()
return C, IV, tag
def MyFileEncryptMAC(filepath):
#create Keys
KeyLength = 32
HMACKey = os.urandom(KEY_LENGTH)
EncKey = os.urandom(KEY_LENGTH)
if len(EncKey) < KeyLength:
raise Exception("EncKey less than 32 bytes!")
if len(HMACKey) < KeyLength:
raise Exception("HMACKey less than 32 bytes!")
#open and read file to encrypt
file = open(filepath, "rb")
m = file.read()
file.close()
#getting file name and extension
filename_ext = os.path.basename(filepath) #gets file name with extension from path
filename, ext = os.path.splitext(filename_ext) #separates file name and extension
#encrypt & MAC
C, IV, tag = MyEncryptMAC(m, EncKey, HMACKey)
'''Not used for RSA
#storing values
encData = {"RSACipher": RSACipher.decode('cp437'),"C": C.decode('cp437'), "IV": IV.decode('cp437'), "ext": ext, "tag": tag.decode('cp437')}
#create and write to json
filenameJSON = filename + ".json"
#write json data to file
with open(filenameJSON, "w") as outfile:
json.dump(encData, outfile)
outfile.close()
#delete original file
os.remove(filepath)
'''
return C, IV, tag, EncKey, HMACKey, ext
def CheckRSAKeys(): # check if pem file exist
publicExists = os.path.isfile(RSA_PUBLIC_KEY_PATH)
privateExists = os.path.isfile(RSA_PRIVATE_KEY_PATH)
if not publicExists or not privateExists:
#generate and store private key
privateKey = rsa.generate_private_key(
public_exponent = PUBLIC_EXPONENT,
key_size = RSA_KEY_SIZE,
backend=default_backend()
)
privatepem = privateKey.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()
)
with open(RSA_PRIVATE_KEY_PATH, "wb") as privateKeyFile:
privateKeyFile.write(privatepem)
#generate and store public key
publicKey = privateKey.public_key()
publicpem = publicKey.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
with open(RSA_PUBLIC_KEY_PATH, "wb") as publicKeyFile:
publicKeyFile.write(publicpem)
# RSA Encrypt using AES CBC 256 Encryption with HMAC
def MyRSAEncrypt(filepath, RSA_Publickey_filepath):
(C, IV, tag, EncKey, HMACKey, ext) = MyFileEncryptMAC(filepath)
key = EncKey + HMACKey
with open(RSA_Publickey_filepath, 'rb') as key_file:
public_key = serialization.load_pem_public_key(
key_file.read(),
backend = default_backend()
)
RSACipher = public_key.encrypt(
key,
asymmetric.padding.OAEP(
mgf=asymmetric.padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
key_file.close()
return (RSACipher, C, IV, tag, ext)
# AES requires plain text and ciphertext to be a multiple of 16
# We pad it so that the message is a multiple of the IV, 16
def addPadding(encoded):
# We pad it with 128 bits or 16 bytes
padder = padding.PKCS7(CONST_KEY_BYTES.CONST_PADDING_BITS).padder()
# update() pads the encoded message
padded_encoded = padder.update(encoded)
# .finalize () Returns the remainder of the data.
padded_encoded += padder.finalize()
return padded_encoded
def DirectoryEncrypt(directory):
try:
key = CheckRSAKeys()
except:
print("Error: keys has issue")
return
try:
for root, dirs, filres in os.walk(directory):
for file in filres:
try:
RSACipher, C, IV, tag, ext = MyRSAEncrypt(os.path.join(root,file),key[0])
except:
print("Error: MyRSAEncrypt failed")
return
#create JSon file
try:
data= {'encrypted': [{'RSACipher':RSACipher, 'C': C, 'IV': IV, 'tag': tag, 'ext': ext}]}
except:
print("Error: Not able to create Json file")
return
try:
encryptedFilepath = os.path.splittext(file)[0] + ".encrypted" + ".json"
with open( encryptedFilepath, 'w')as jsonfile:
json.dump(data, jsonfile, indent=3)
except:
print("Error:Json file didnt create")
return
for file in dirs:
try:
RSACipher,C, IV, tag = MyRSAEncrypt(os.ath.json(root,file),key[0])
except:
("Error: MyRSAEncryptfailes:")
return
#create JSON file
try:
data = {'encrypted': [{'RSACipher':RSACipher,'C':C,'IV':IV,'tag':tag,'ext':ext}]}
except:
print("Error: Json file didnt create")
return
try:
encryptedFilepath = os.path.splitext(file)[0] + ".encrypted" + ".json"
with open( encryptedFilepath, 'w') as jsonFile:
json.dump(data,jsonFile, indent=3)
except:
print("Error: Unable to create JSON file.")
return
except:
print("Directory doent excist")
return
def MyDecrypt(C, IV, key):
#make cipher
backend = default_backend()
cipher = Cipher(algorithms.AES(key), modes.CBC(IV), backend=backend)
#make decryptor
decryptor = cipher.decryptor()
#decrypt ciphertext
plaintext_padded = decryptor.update(C) + decryptor.finalize()
#unpad message
unpadder = padding.PKCS7(128).unpadder()
plaintext = unpadder.update(plaintext_padded) + unpadder.finalize()
return plaintext
def MyFileDecrypt(filepath, IV, key, ext):
#getting file name and extension
filename_ext = os.path.basename(filepath) #gets file name with extension from path
filename, ext = os.path.splitext(filename_ext) #separates file name and extension
file = open(filepath, "rb")
C = file.read()
file.close()
message = MyDecrypt(C, IV, key)
writefile = open(filepath, "wb")
writefile.write(message)
writefile.close()
return message, IV, key
def MyDecryptMAC(C, IV, tag, HMACKey, EncKey):
h = hmac.HMAC(HMACKey, hashes.SHA256(), backend=default_backend())
h.update(C)
verifyTag = h.finalize()
if verifyTag != tag:
raise Exception("Tags do not align")
message = MyDecrypt(C, IV, EncKey)
return message
def MyFileDecryptMAC(originalfilepath, HMACKey):
#getting file name and extension
filename_ext = os.path.basename(originalfilepath) #gets file name with extension from path
filename, ext = os.path.splitext(filename_ext) #separates file name and extension
jsonFile = filename + ".json"
#open file to decrypt
with open(jsonFile) as decryptFile:
data = json.load(decryptFile)
decryptFile.close()
#getting data from dictionary
C = (data['C']).encode('cp437')
IV = (data['IV']).encode('cp437')
tag = (data['tag']).encode('cp437')
EncKey = (data['EncKey']).encode('cp437')
message = MyDecryptMAC(C, IV, tag, HMACKey, EncKey)
#write recovered data to file
recoveredFile = open(originalfilepath, "wb")
recoveredFile.write(message)
recoveredFile.close()
#remove json file
os.remove(jsonFile)
return message
# RSA Decrypt # using AES CBC 256 Decryption with HMAC
'''
def MyRSADecrypt(RSACipher, C, IV, tag, ext, RSA_Privatekey_filepath):
(C, IV, tag, EncKey, HMACKey, ext) = MyFileEncryptMAC(filepath)
key = EncKey + HMACKey
with open(RSA_Publickey_filepath, 'rb') as key_file:
public_key = serialization.load_pem_public_key(
_file.read(),
backend = default_backend()
)
RSACipher = public_key.encrypt(
key,
asymmetric.padding.OAEP(
mgf=asymmetric.padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
key_file.close()
return (RSACipher, C, IV, tag, ext)
'''
def MyRSADecrypt(filepath, RSACipher, C, IV, tag, ext, RSA_Privatekey_filepath):
with open(RSA_Privatekey_filepath, 'rb') as key_file:
private_key = serialization.load_pem_private_key(
key_file.read(),
password=None,
backend = default_backend()
)
key = private_key.decrypt(RSACipher,asymmetric,padding.OAEP(
mgf = asymmetric.padding.MGF1(algorithm=hashes.SHA256()),
algorithm= hashes.SHA256(),
label=None
)
)
EncKey_start=0
EncKey_end = int((len(key)/2))
HMACKey_start=EncKey_end
HMACKey_end = int(len(key))
EncKey= key[EncKey_start:HMACKey_end]
key_file.close()
HMACKey= ""
MyFileDecryptMAC(filepath,EncKey, HMACKey, IV,tag)
def | ():
#testFile = "test.txt"
testFile = "test_photo.jpg"
'''Regular'''
C, IV, key, ext = MyFileEncrypt(testFile)
input("File encrypted! Press enter to decrypt.")
MyFileDecrypt(testFile, IV, key, ext)
print("\nFile decrypted!")
'''HMAC'''
#C, IV, tag, EncKey, HMACKey, ext = MyFileEncryptMAC(testFile)
#input("File encrypted! Press enter to decrypt.")
#MyFileDecryptMAC(testFile, HMACKey)
#print("\nFile decrypted!")
'''RSA'''
CheckRSAKeys()
#RSACipher, C, IV, tag, ext = MyRSAEncrypt(testFile, RSA_PUBLIC_KEY_PATH)
#input("File encrypted! Press enter to decrypt.")
#MyRSADecrypt(RSACipher, C, IV, tag, ext, RSA_Privatekey_filepath)
#print("\nFile decrypted!")
main()
| main | identifier_name |
Decryption.py | import os
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.primitives import padding, asymmetric
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, hmac, serialization
from cryptography.hazmat.primitives.asymmetric import rsa
import json
CONST_RSA_KEY_SIZE = 2048
CONST_INDENT_SIZE = 4
CONST_PADDING_BITS = 128
CONST_KEY_BYTES= 32
KEY_LENGTH = 32
RSA_KEY_SIZE = 2048
IV_LENGTH = 16
PUBLIC_EXPONENT = 65537
RSA_PUBLIC_KEY_PATH = ".\RSApublickey.pem"
RSA_PRIVATE_KEY_PATH = ".\RSAprivatekey.pem"
def MyEncrypt(message, key):
#checking key length
if(len(key) < KEY_LENGTH):
raise ValueError("Key less than 32 Bytes!")
#assigning values
IV = os.urandom(IV_LENGTH)
backend = default_backend()
#initialize padder
padder = padding.PKCS7(CONST_PADDING_BITS).padder()
#pad data to fit block size
message = padder.update(message) + padder.finalize()
#create cipher object
cipher = Cipher(algorithms.AES(key), modes.CBC(IV), backend=backend)
#make encryptor
encryptor = cipher.encryptor()
#encrypt message
C = encryptor.update(message) + encryptor.finalize()
return C, IV
def MyFileEncrypt(filepath):
#generating key
key = os.urandom(KEY_LENGTH)
#Exclude private key, public key, and executable from encrypt
#getting file name and extension
filename_ext = os.path.basename(filepath) #gets file name with extension from path
filename, ext = os.path.splitext(filename_ext) #separates file name and extension
# loop throughh all files:
#for file in files:
#Retrieve full filepath
#filepath = pathTofile + "\\" + file
#reading file
file = open(filepath, "rb")
m = file.read()
file.close()
#calling encryption method
C, IV = MyEncrypt(m, key)
file = open(filepath, "wb")
file.write(C)
file.close()
return C, IV, key, ext
def MyEncryptMAC(message, EncKey, HMACKey):
#get ciphertext and IV
C, IV = MyEncrypt(message, EncKey)
#create HMAC object to make tag
h = hmac.HMAC(HMACKey, hashes.SHA256(), backend=default_backend())
h.update(C)
tag = h.finalize()
return C, IV, tag
def MyFileEncryptMAC(filepath):
#create Keys
KeyLength = 32
HMACKey = os.urandom(KEY_LENGTH)
EncKey = os.urandom(KEY_LENGTH)
if len(EncKey) < KeyLength:
raise Exception("EncKey less than 32 bytes!")
if len(HMACKey) < KeyLength:
|
#open and read file to encrypt
file = open(filepath, "rb")
m = file.read()
file.close()
#getting file name and extension
filename_ext = os.path.basename(filepath) #gets file name with extension from path
filename, ext = os.path.splitext(filename_ext) #separates file name and extension
#encrypt & MAC
C, IV, tag = MyEncryptMAC(m, EncKey, HMACKey)
'''Not used for RSA
#storing values
encData = {"RSACipher": RSACipher.decode('cp437'),"C": C.decode('cp437'), "IV": IV.decode('cp437'), "ext": ext, "tag": tag.decode('cp437')}
#create and write to json
filenameJSON = filename + ".json"
#write json data to file
with open(filenameJSON, "w") as outfile:
json.dump(encData, outfile)
outfile.close()
#delete original file
os.remove(filepath)
'''
return C, IV, tag, EncKey, HMACKey, ext
def CheckRSAKeys(): # check if pem file exist
publicExists = os.path.isfile(RSA_PUBLIC_KEY_PATH)
privateExists = os.path.isfile(RSA_PRIVATE_KEY_PATH)
if not publicExists or not privateExists:
#generate and store private key
privateKey = rsa.generate_private_key(
public_exponent = PUBLIC_EXPONENT,
key_size = RSA_KEY_SIZE,
backend=default_backend()
)
privatepem = privateKey.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()
)
with open(RSA_PRIVATE_KEY_PATH, "wb") as privateKeyFile:
privateKeyFile.write(privatepem)
#generate and store public key
publicKey = privateKey.public_key()
publicpem = publicKey.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
with open(RSA_PUBLIC_KEY_PATH, "wb") as publicKeyFile:
publicKeyFile.write(publicpem)
# RSA Encrypt using AES CBC 256 Encryption with HMAC
def MyRSAEncrypt(filepath, RSA_Publickey_filepath):
(C, IV, tag, EncKey, HMACKey, ext) = MyFileEncryptMAC(filepath)
key = EncKey + HMACKey
with open(RSA_Publickey_filepath, 'rb') as key_file:
public_key = serialization.load_pem_public_key(
key_file.read(),
backend = default_backend()
)
RSACipher = public_key.encrypt(
key,
asymmetric.padding.OAEP(
mgf=asymmetric.padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
key_file.close()
return (RSACipher, C, IV, tag, ext)
# AES requires plain text and ciphertext to be a multiple of 16
# We pad it so that the message is a multiple of the IV, 16
def addPadding(encoded):
# We pad it with 128 bits or 16 bytes
padder = padding.PKCS7(CONST_KEY_BYTES.CONST_PADDING_BITS).padder()
# update() pads the encoded message
padded_encoded = padder.update(encoded)
# .finalize () Returns the remainder of the data.
padded_encoded += padder.finalize()
return padded_encoded
def DirectoryEncrypt(directory):
try:
key = CheckRSAKeys()
except:
print("Error: keys has issue")
return
try:
for root, dirs, filres in os.walk(directory):
for file in filres:
try:
RSACipher, C, IV, tag, ext = MyRSAEncrypt(os.path.join(root,file),key[0])
except:
print("Error: MyRSAEncrypt failed")
return
#create JSon file
try:
data= {'encrypted': [{'RSACipher':RSACipher, 'C': C, 'IV': IV, 'tag': tag, 'ext': ext}]}
except:
print("Error: Not able to create Json file")
return
try:
encryptedFilepath = os.path.splittext(file)[0] + ".encrypted" + ".json"
with open( encryptedFilepath, 'w')as jsonfile:
json.dump(data, jsonfile, indent=3)
except:
print("Error:Json file didnt create")
return
for file in dirs:
try:
RSACipher,C, IV, tag = MyRSAEncrypt(os.ath.json(root,file),key[0])
except:
("Error: MyRSAEncryptfailes:")
return
#create JSON file
try:
data = {'encrypted': [{'RSACipher':RSACipher,'C':C,'IV':IV,'tag':tag,'ext':ext}]}
except:
print("Error: Json file didnt create")
return
try:
encryptedFilepath = os.path.splitext(file)[0] + ".encrypted" + ".json"
with open( encryptedFilepath, 'w') as jsonFile:
json.dump(data,jsonFile, indent=3)
except:
print("Error: Unable to create JSON file.")
return
except:
print("Directory doent excist")
return
def MyDecrypt(C, IV, key):
#make cipher
backend = default_backend()
cipher = Cipher(algorithms.AES(key), modes.CBC(IV), backend=backend)
#make decryptor
decryptor = cipher.decryptor()
#decrypt ciphertext
plaintext_padded = decryptor.update(C) + decryptor.finalize()
#unpad message
unpadder = padding.PKCS7(128).unpadder()
plaintext = unpadder.update(plaintext_padded) + unpadder.finalize()
return plaintext
def MyFileDecrypt(filepath, IV, key, ext):
#getting file name and extension
filename_ext = os.path.basename(filepath) #gets file name with extension from path
filename, ext = os.path.splitext(filename_ext) #separates file name and extension
file = open(filepath, "rb")
C = file.read()
file.close()
message = MyDecrypt(C, IV, key)
writefile = open(filepath, "wb")
writefile.write(message)
writefile.close()
return message, IV, key
def MyDecryptMAC(C, IV, tag, HMACKey, EncKey):
h = hmac.HMAC(HMACKey, hashes.SHA256(), backend=default_backend())
h.update(C)
verifyTag = h.finalize()
if verifyTag != tag:
raise Exception("Tags do not align")
message = MyDecrypt(C, IV, EncKey)
return message
def MyFileDecryptMAC(originalfilepath, HMACKey):
#getting file name and extension
filename_ext = os.path.basename(originalfilepath) #gets file name with extension from path
filename, ext = os.path.splitext(filename_ext) #separates file name and extension
jsonFile = filename + ".json"
#open file to decrypt
with open(jsonFile) as decryptFile:
data = json.load(decryptFile)
decryptFile.close()
#getting data from dictionary
C = (data['C']).encode('cp437')
IV = (data['IV']).encode('cp437')
tag = (data['tag']).encode('cp437')
EncKey = (data['EncKey']).encode('cp437')
message = MyDecryptMAC(C, IV, tag, HMACKey, EncKey)
#write recovered data to file
recoveredFile = open(originalfilepath, "wb")
recoveredFile.write(message)
recoveredFile.close()
#remove json file
os.remove(jsonFile)
return message
# RSA Decrypt # using AES CBC 256 Decryption with HMAC
'''
def MyRSADecrypt(RSACipher, C, IV, tag, ext, RSA_Privatekey_filepath):
(C, IV, tag, EncKey, HMACKey, ext) = MyFileEncryptMAC(filepath)
key = EncKey + HMACKey
with open(RSA_Publickey_filepath, 'rb') as key_file:
public_key = serialization.load_pem_public_key(
_file.read(),
backend = default_backend()
)
RSACipher = public_key.encrypt(
key,
asymmetric.padding.OAEP(
mgf=asymmetric.padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=None
)
)
key_file.close()
return (RSACipher, C, IV, tag, ext)
'''
def MyRSADecrypt(filepath, RSACipher, C, IV, tag, ext, RSA_Privatekey_filepath):
with open(RSA_Privatekey_filepath, 'rb') as key_file:
private_key = serialization.load_pem_private_key(
key_file.read(),
password=None,
backend = default_backend()
)
key = private_key.decrypt(RSACipher,asymmetric,padding.OAEP(
mgf = asymmetric.padding.MGF1(algorithm=hashes.SHA256()),
algorithm= hashes.SHA256(),
label=None
)
)
EncKey_start=0
EncKey_end = int((len(key)/2))
HMACKey_start=EncKey_end
HMACKey_end = int(len(key))
EncKey= key[EncKey_start:HMACKey_end]
key_file.close()
HMACKey= ""
MyFileDecryptMAC(filepath,EncKey, HMACKey, IV,tag)
def main():
#testFile = "test.txt"
testFile = "test_photo.jpg"
'''Regular'''
C, IV, key, ext = MyFileEncrypt(testFile)
input("File encrypted! Press enter to decrypt.")
MyFileDecrypt(testFile, IV, key, ext)
print("\nFile decrypted!")
'''HMAC'''
#C, IV, tag, EncKey, HMACKey, ext = MyFileEncryptMAC(testFile)
#input("File encrypted! Press enter to decrypt.")
#MyFileDecryptMAC(testFile, HMACKey)
#print("\nFile decrypted!")
'''RSA'''
CheckRSAKeys()
#RSACipher, C, IV, tag, ext = MyRSAEncrypt(testFile, RSA_PUBLIC_KEY_PATH)
#input("File encrypted! Press enter to decrypt.")
#MyRSADecrypt(RSACipher, C, IV, tag, ext, RSA_Privatekey_filepath)
#print("\nFile decrypted!")
main()
| raise Exception("HMACKey less than 32 bytes!") | conditional_block |
skim.rs | ///! The fuzzy matching algorithm used by skim
///! It focus more on path matching
///
///! # Example:
///! ```edition2018
///! use fuzzy_matcher::skim::{fuzzy_match, fuzzy_indices};
///!
///! assert_eq!(None, fuzzy_match("abc", "abx"));
///! assert!(fuzzy_match("axbycz", "abc").is_some());
///! assert!(fuzzy_match("axbycz", "xyz").is_some());
///!
///! let (score, indices) = fuzzy_indices("axbycz", "abc").unwrap();
///! assert_eq!(indices, [0, 2, 4]);
///!
///! ```
///!
///! It is modeled after <https://github.com/felipesere/icepick.git>
use std::cmp::max;
use crate::util::*;
const BONUS_MATCHED: i64 = 4;
const BONUS_CASE_MATCH: i64 = 4;
const BONUS_UPPER_MATCH: i64 = 6;
const BONUS_ADJACENCY: i64 = 10;
const BONUS_SEPARATOR: i64 = 8;
const BONUS_CAMEL: i64 = 8;
const PENALTY_CASE_UNMATCHED: i64 = -1;
const PENALTY_LEADING: i64 = -6; // penalty applied for every letter before the first match
const PENALTY_MAX_LEADING: i64 = -18; // maxing penalty for leading letters
const PENALTY_UNMATCHED: i64 = -2;
pub fn fuzzy_match(choice: &str, pattern: &str) -> Option<i64> |
pub fn fuzzy_indices(choice: &str, pattern: &str) -> Option<(i64, Vec<usize>)> {
if pattern.is_empty() {
return Some((0, Vec::new()));
}
let mut picked = vec![];
let scores = build_graph(choice, pattern)?;
let last_row = &scores[scores.len() - 1];
let (mut next_col, &MatchingStatus { final_score, .. }) = last_row
.iter()
.enumerate()
.max_by_key(|&(_, x)| x.final_score)
.expect("fuzzy_indices failed to iterate over last_row");
let mut pat_idx = scores.len() as i64 - 1;
while pat_idx >= 0 {
let status = scores[pat_idx as usize][next_col];
next_col = status.back_ref;
picked.push(status.idx);
pat_idx -= 1;
}
picked.reverse();
Some((final_score, picked))
}
#[derive(Clone, Copy, Debug)]
struct MatchingStatus {
pub idx: usize,
pub score: i64,
pub final_score: i64,
pub adj_num: usize,
pub back_ref: usize,
}
impl Default for MatchingStatus {
fn default() -> Self {
MatchingStatus {
idx: 0,
score: 0,
final_score: 0,
adj_num: 1,
back_ref: 0,
}
}
}
fn build_graph(choice: &str, pattern: &str) -> Option<Vec<Vec<MatchingStatus>>> {
let mut scores = vec![];
let mut match_start_idx = 0; // to ensure that the pushed char are able to match the pattern
let mut pat_prev_ch = '\0';
// initialize the match positions and inline scores
for (pat_idx, pat_ch) in pattern.chars().enumerate() {
let mut vec = vec![];
let mut choice_prev_ch = '\0';
for (idx, ch) in choice.chars().enumerate() {
if ch.to_ascii_lowercase() == pat_ch.to_ascii_lowercase() && idx >= match_start_idx {
let score = fuzzy_score(ch, idx, choice_prev_ch, pat_ch, pat_idx, pat_prev_ch);
vec.push(MatchingStatus {
idx,
score,
final_score: score,
adj_num: 1,
back_ref: 0,
});
}
choice_prev_ch = ch;
}
if vec.is_empty() {
// not matched
return None;
}
match_start_idx = vec[0].idx + 1;
scores.push(vec);
pat_prev_ch = pat_ch;
}
// calculate max scores considering adjacent characters
for pat_idx in 1..scores.len() {
let (first_half, last_half) = scores.split_at_mut(pat_idx);
let prev_row = &first_half[first_half.len() - 1];
let cur_row = &mut last_half[0];
for idx in 0..cur_row.len() {
let next = cur_row[idx];
let prev = if idx > 0 {
cur_row[idx - 1]
} else {
MatchingStatus::default()
};
let mut score_before_idx = prev.final_score - prev.score + next.score;
score_before_idx += PENALTY_UNMATCHED * ((next.idx - prev.idx) as i64);
score_before_idx -= if prev.adj_num == 0 {
BONUS_ADJACENCY
} else {
0
};
let (back_ref, score, adj_num) = prev_row
.iter()
.enumerate()
.take_while(|&(_, &MatchingStatus { idx, .. })| idx < next.idx)
.skip_while(|&(_, &MatchingStatus { idx, .. })| idx < prev.idx)
.map(|(back_ref, cur)| {
let adj_num = next.idx - cur.idx - 1;
let mut final_score = cur.final_score + next.score;
final_score += if adj_num == 0 {
BONUS_ADJACENCY
} else {
PENALTY_UNMATCHED * adj_num as i64
};
(back_ref, final_score, adj_num)
})
.max_by_key(|&(_, x, _)| x)
.unwrap_or((prev.back_ref, score_before_idx, prev.adj_num));
cur_row[idx] = if idx > 0 && score < score_before_idx {
MatchingStatus {
final_score: score_before_idx,
back_ref: prev.back_ref,
adj_num,
..next
}
} else {
MatchingStatus {
final_score: score,
back_ref,
adj_num,
..next
}
};
}
}
Some(scores)
}
// judge how many scores the current index should get
fn fuzzy_score(
choice_ch: char,
choice_idx: usize,
choice_prev_ch: char,
pat_ch: char,
pat_idx: usize,
_pat_prev_ch: char,
) -> i64 {
let mut score = BONUS_MATCHED;
let choice_prev_ch_type = char_type_of(choice_prev_ch);
let choice_role = char_role(choice_prev_ch, choice_ch);
if pat_ch == choice_ch {
if pat_ch.is_uppercase() {
score += BONUS_UPPER_MATCH;
} else {
score += BONUS_CASE_MATCH;
}
} else {
score += PENALTY_CASE_UNMATCHED;
}
// apply bonus for camelCases
if choice_role == CharRole::Head {
score += BONUS_CAMEL;
}
// apply bonus for matches after a separator
if choice_prev_ch_type == CharType::Separ {
score += BONUS_SEPARATOR;
}
if pat_idx == 0 {
score += max((choice_idx as i64) * PENALTY_LEADING, PENALTY_MAX_LEADING);
}
score
}
#[cfg(test)]
mod tests {
use super::*;
fn wrap_matches(line: &str, indices: &[usize]) -> String {
let mut ret = String::new();
let mut peekable = indices.iter().peekable();
for (idx, ch) in line.chars().enumerate() {
let next_id = **peekable.peek().unwrap_or(&&line.len());
if next_id == idx {
ret.push_str(format!("[{}]", ch).as_str());
peekable.next();
} else {
ret.push(ch);
}
}
ret
}
fn filter_and_sort(pattern: &str, lines: &[&'static str]) -> Vec<&'static str> {
let mut lines_with_score: Vec<(i64, &'static str)> = lines
.into_iter()
.map(|&s| (fuzzy_match(s, pattern).unwrap_or(-(1 << 62)), s))
.collect();
lines_with_score.sort_by_key(|(score, _)| -score);
lines_with_score
.into_iter()
.map(|(_, string)| string)
.collect()
}
fn wrap_fuzzy_match(line: &str, pattern: &str) -> Option<String> {
let (_score, indices) = fuzzy_indices(line, pattern)?;
Some(wrap_matches(line, &indices))
}
fn assert_order(pattern: &str, choices: &[&'static str]) {
let result = filter_and_sort(pattern, choices);
if result != choices {
// debug print
println!("pattern: {}", pattern);
for &choice in choices.iter() {
if let Some((score, indices)) = fuzzy_indices(choice, pattern) {
println!("{}: {:?}", score, wrap_matches(choice, &indices));
} else {
println!("NO MATCH for {}", choice);
}
}
}
assert_eq!(result, choices);
}
#[test]
fn test_match_or_not() {
assert_eq!(Some(0), fuzzy_match("", ""));
assert_eq!(Some(0), fuzzy_match("abcdefaghi", ""));
assert_eq!(None, fuzzy_match("", "a"));
assert_eq!(None, fuzzy_match("abcdefaghi", "中"));
assert_eq!(None, fuzzy_match("abc", "abx"));
assert!(fuzzy_match("axbycz", "abc").is_some());
assert!(fuzzy_match("axbycz", "xyz").is_some());
assert_eq!("[a]x[b]y[c]z", &wrap_fuzzy_match("axbycz", "abc").unwrap());
assert_eq!("a[x]b[y]c[z]", &wrap_fuzzy_match("axbycz", "xyz").unwrap());
assert_eq!(
"[H]ello, [世]界",
&wrap_fuzzy_match("Hello, 世界", "H世").unwrap()
);
}
#[test]
fn test_match_quality() {
// case
// assert_order("monad", &["monad", "Monad", "mONAD"]);
// initials
assert_order("ab", &["ab", "aoo_boo", "acb"]);
assert_order("CC", &["CamelCase", "camelCase", "camelcase"]);
assert_order("cC", &["camelCase", "CamelCase", "camelcase"]);
assert_order(
"cc",
&[
"camel case",
"camelCase",
"camelcase",
"CamelCase",
"camel ace",
],
);
assert_order(
"Da.Te",
&["Data.Text", "Data.Text.Lazy", "Data.Aeson.Encoding.text"],
);
// prefix
assert_order("is", &["isIEEE", "inSuf"]);
// shorter
assert_order("ma", &["map", "many", "maximum"]);
assert_order("print", &["printf", "sprintf"]);
// score(PRINT) = kMinScore
assert_order("ast", &["ast", "AST", "INT_FAST16_MAX"]);
// score(PRINT) > kMinScore
assert_order("Int", &["int", "INT", "PRINT"]);
}
}
| {
if pattern.is_empty() {
return Some(0);
}
let scores = build_graph(choice, pattern)?;
let last_row = &scores[scores.len() - 1];
let (_, &MatchingStatus { final_score, .. }) = last_row
.iter()
.enumerate()
.max_by_key(|&(_, x)| x.final_score)
.expect("fuzzy_indices failed to iterate over last_row");
Some(final_score)
} | identifier_body |
skim.rs | ///! The fuzzy matching algorithm used by skim
///! It focus more on path matching
///
///! # Example:
///! ```edition2018
///! use fuzzy_matcher::skim::{fuzzy_match, fuzzy_indices};
///!
///! assert_eq!(None, fuzzy_match("abc", "abx"));
///! assert!(fuzzy_match("axbycz", "abc").is_some());
///! assert!(fuzzy_match("axbycz", "xyz").is_some());
///!
///! let (score, indices) = fuzzy_indices("axbycz", "abc").unwrap();
///! assert_eq!(indices, [0, 2, 4]);
///!
///! ```
///!
///! It is modeled after <https://github.com/felipesere/icepick.git>
use std::cmp::max;
use crate::util::*;
const BONUS_MATCHED: i64 = 4;
const BONUS_CASE_MATCH: i64 = 4;
const BONUS_UPPER_MATCH: i64 = 6;
const BONUS_ADJACENCY: i64 = 10;
const BONUS_SEPARATOR: i64 = 8;
const BONUS_CAMEL: i64 = 8;
const PENALTY_CASE_UNMATCHED: i64 = -1;
const PENALTY_LEADING: i64 = -6; // penalty applied for every letter before the first match
const PENALTY_MAX_LEADING: i64 = -18; // maxing penalty for leading letters
const PENALTY_UNMATCHED: i64 = -2;
pub fn fuzzy_match(choice: &str, pattern: &str) -> Option<i64> {
if pattern.is_empty() {
return Some(0);
}
let scores = build_graph(choice, pattern)?;
let last_row = &scores[scores.len() - 1];
let (_, &MatchingStatus { final_score, .. }) = last_row
.iter()
.enumerate()
.max_by_key(|&(_, x)| x.final_score)
.expect("fuzzy_indices failed to iterate over last_row");
Some(final_score)
}
pub fn fuzzy_indices(choice: &str, pattern: &str) -> Option<(i64, Vec<usize>)> {
if pattern.is_empty() {
return Some((0, Vec::new()));
}
let mut picked = vec![];
let scores = build_graph(choice, pattern)?;
let last_row = &scores[scores.len() - 1];
let (mut next_col, &MatchingStatus { final_score, .. }) = last_row
.iter()
.enumerate()
.max_by_key(|&(_, x)| x.final_score)
.expect("fuzzy_indices failed to iterate over last_row");
let mut pat_idx = scores.len() as i64 - 1;
while pat_idx >= 0 {
let status = scores[pat_idx as usize][next_col];
next_col = status.back_ref;
picked.push(status.idx);
pat_idx -= 1;
}
picked.reverse();
Some((final_score, picked))
}
#[derive(Clone, Copy, Debug)]
struct MatchingStatus {
pub idx: usize,
pub score: i64,
pub final_score: i64,
pub adj_num: usize,
pub back_ref: usize,
}
impl Default for MatchingStatus {
fn default() -> Self {
MatchingStatus {
idx: 0,
score: 0,
final_score: 0,
adj_num: 1,
back_ref: 0,
}
}
}
fn build_graph(choice: &str, pattern: &str) -> Option<Vec<Vec<MatchingStatus>>> {
let mut scores = vec![];
let mut match_start_idx = 0; // to ensure that the pushed char are able to match the pattern
let mut pat_prev_ch = '\0';
// initialize the match positions and inline scores
for (pat_idx, pat_ch) in pattern.chars().enumerate() {
let mut vec = vec![];
let mut choice_prev_ch = '\0';
for (idx, ch) in choice.chars().enumerate() {
if ch.to_ascii_lowercase() == pat_ch.to_ascii_lowercase() && idx >= match_start_idx {
let score = fuzzy_score(ch, idx, choice_prev_ch, pat_ch, pat_idx, pat_prev_ch);
vec.push(MatchingStatus {
idx,
score,
final_score: score,
adj_num: 1,
back_ref: 0,
});
}
choice_prev_ch = ch;
}
if vec.is_empty() {
// not matched
return None;
}
match_start_idx = vec[0].idx + 1;
scores.push(vec);
pat_prev_ch = pat_ch;
}
// calculate max scores considering adjacent characters
for pat_idx in 1..scores.len() {
let (first_half, last_half) = scores.split_at_mut(pat_idx);
let prev_row = &first_half[first_half.len() - 1];
let cur_row = &mut last_half[0];
for idx in 0..cur_row.len() {
let next = cur_row[idx];
let prev = if idx > 0 {
cur_row[idx - 1]
} else {
MatchingStatus::default()
};
let mut score_before_idx = prev.final_score - prev.score + next.score;
score_before_idx += PENALTY_UNMATCHED * ((next.idx - prev.idx) as i64);
score_before_idx -= if prev.adj_num == 0 {
BONUS_ADJACENCY
} else {
0
};
let (back_ref, score, adj_num) = prev_row
.iter()
.enumerate()
.take_while(|&(_, &MatchingStatus { idx, .. })| idx < next.idx)
.skip_while(|&(_, &MatchingStatus { idx, .. })| idx < prev.idx)
.map(|(back_ref, cur)| {
let adj_num = next.idx - cur.idx - 1;
let mut final_score = cur.final_score + next.score;
final_score += if adj_num == 0 {
BONUS_ADJACENCY
} else {
PENALTY_UNMATCHED * adj_num as i64
};
(back_ref, final_score, adj_num)
})
.max_by_key(|&(_, x, _)| x)
.unwrap_or((prev.back_ref, score_before_idx, prev.adj_num));
cur_row[idx] = if idx > 0 && score < score_before_idx {
MatchingStatus {
final_score: score_before_idx,
back_ref: prev.back_ref,
adj_num,
..next
}
} else {
MatchingStatus {
final_score: score,
back_ref,
adj_num,
..next
}
};
}
}
Some(scores)
}
// judge how many scores the current index should get
fn fuzzy_score(
choice_ch: char,
choice_idx: usize,
choice_prev_ch: char,
pat_ch: char,
pat_idx: usize,
_pat_prev_ch: char,
) -> i64 {
let mut score = BONUS_MATCHED;
let choice_prev_ch_type = char_type_of(choice_prev_ch);
let choice_role = char_role(choice_prev_ch, choice_ch);
if pat_ch == choice_ch {
if pat_ch.is_uppercase() {
score += BONUS_UPPER_MATCH;
} else {
score += BONUS_CASE_MATCH;
}
} else {
score += PENALTY_CASE_UNMATCHED;
}
// apply bonus for camelCases
if choice_role == CharRole::Head {
score += BONUS_CAMEL;
}
// apply bonus for matches after a separator
if choice_prev_ch_type == CharType::Separ {
score += BONUS_SEPARATOR;
}
if pat_idx == 0 {
score += max((choice_idx as i64) * PENALTY_LEADING, PENALTY_MAX_LEADING);
}
score
}
#[cfg(test)]
mod tests {
use super::*;
fn wrap_matches(line: &str, indices: &[usize]) -> String {
let mut ret = String::new();
let mut peekable = indices.iter().peekable();
for (idx, ch) in line.chars().enumerate() {
let next_id = **peekable.peek().unwrap_or(&&line.len());
if next_id == idx {
ret.push_str(format!("[{}]", ch).as_str());
peekable.next();
} else {
ret.push(ch);
}
}
ret
}
fn filter_and_sort(pattern: &str, lines: &[&'static str]) -> Vec<&'static str> {
let mut lines_with_score: Vec<(i64, &'static str)> = lines
.into_iter()
.map(|&s| (fuzzy_match(s, pattern).unwrap_or(-(1 << 62)), s))
.collect();
lines_with_score.sort_by_key(|(score, _)| -score);
lines_with_score
.into_iter()
.map(|(_, string)| string)
.collect()
}
fn wrap_fuzzy_match(line: &str, pattern: &str) -> Option<String> {
let (_score, indices) = fuzzy_indices(line, pattern)?;
Some(wrap_matches(line, &indices))
}
fn assert_order(pattern: &str, choices: &[&'static str]) {
let result = filter_and_sort(pattern, choices);
if result != choices {
// debug print
println!("pattern: {}", pattern);
for &choice in choices.iter() {
if let Some((score, indices)) = fuzzy_indices(choice, pattern) {
println!("{}: {:?}", score, wrap_matches(choice, &indices));
} else {
println!("NO MATCH for {}", choice);
}
}
}
assert_eq!(result, choices);
}
#[test]
fn test_match_or_not() {
assert_eq!(Some(0), fuzzy_match("", ""));
assert_eq!(Some(0), fuzzy_match("abcdefaghi", ""));
assert_eq!(None, fuzzy_match("", "a"));
assert_eq!(None, fuzzy_match("abcdefaghi", "中"));
assert_eq!(None, fuzzy_match("abc", "abx"));
assert!(fuzzy_match("axbycz", "abc").is_some());
assert!(fuzzy_match("axbycz", "xyz").is_some());
assert_eq!("[a]x[b]y[c]z", &wrap_fuzzy_match("axbycz", "abc").unwrap());
assert_eq!("a[x]b[y]c[z]", &wrap_fuzzy_match("axbycz", "xyz").unwrap());
assert_eq!(
"[H]ello, [世]界",
&wrap_fuzzy_match("Hello, 世界", "H世").unwrap()
);
}
#[test]
fn test_match_quality() {
// case
// assert_order("monad", &["monad", "Monad", "mONAD"]);
// initials
assert_order("ab", &["ab", "aoo_boo", "acb"]);
assert_order("CC", &["CamelCase", "camelCase", "camelcase"]);
assert_order("cC", &["camelCase", "CamelCase", "camelcase"]);
assert_order(
"cc", | &[
"camel case",
"camelCase",
"camelcase",
"CamelCase",
"camel ace",
],
);
assert_order(
"Da.Te",
&["Data.Text", "Data.Text.Lazy", "Data.Aeson.Encoding.text"],
);
// prefix
assert_order("is", &["isIEEE", "inSuf"]);
// shorter
assert_order("ma", &["map", "many", "maximum"]);
assert_order("print", &["printf", "sprintf"]);
// score(PRINT) = kMinScore
assert_order("ast", &["ast", "AST", "INT_FAST16_MAX"]);
// score(PRINT) > kMinScore
assert_order("Int", &["int", "INT", "PRINT"]);
}
} | random_line_split | |
skim.rs | ///! The fuzzy matching algorithm used by skim
///! It focus more on path matching
///
///! # Example:
///! ```edition2018
///! use fuzzy_matcher::skim::{fuzzy_match, fuzzy_indices};
///!
///! assert_eq!(None, fuzzy_match("abc", "abx"));
///! assert!(fuzzy_match("axbycz", "abc").is_some());
///! assert!(fuzzy_match("axbycz", "xyz").is_some());
///!
///! let (score, indices) = fuzzy_indices("axbycz", "abc").unwrap();
///! assert_eq!(indices, [0, 2, 4]);
///!
///! ```
///!
///! It is modeled after <https://github.com/felipesere/icepick.git>
use std::cmp::max;
use crate::util::*;
const BONUS_MATCHED: i64 = 4;
const BONUS_CASE_MATCH: i64 = 4;
const BONUS_UPPER_MATCH: i64 = 6;
const BONUS_ADJACENCY: i64 = 10;
const BONUS_SEPARATOR: i64 = 8;
const BONUS_CAMEL: i64 = 8;
const PENALTY_CASE_UNMATCHED: i64 = -1;
const PENALTY_LEADING: i64 = -6; // penalty applied for every letter before the first match
const PENALTY_MAX_LEADING: i64 = -18; // maxing penalty for leading letters
const PENALTY_UNMATCHED: i64 = -2;
pub fn fuzzy_match(choice: &str, pattern: &str) -> Option<i64> {
if pattern.is_empty() {
return Some(0);
}
let scores = build_graph(choice, pattern)?;
let last_row = &scores[scores.len() - 1];
let (_, &MatchingStatus { final_score, .. }) = last_row
.iter()
.enumerate()
.max_by_key(|&(_, x)| x.final_score)
.expect("fuzzy_indices failed to iterate over last_row");
Some(final_score)
}
pub fn fuzzy_indices(choice: &str, pattern: &str) -> Option<(i64, Vec<usize>)> {
if pattern.is_empty() {
return Some((0, Vec::new()));
}
let mut picked = vec![];
let scores = build_graph(choice, pattern)?;
let last_row = &scores[scores.len() - 1];
let (mut next_col, &MatchingStatus { final_score, .. }) = last_row
.iter()
.enumerate()
.max_by_key(|&(_, x)| x.final_score)
.expect("fuzzy_indices failed to iterate over last_row");
let mut pat_idx = scores.len() as i64 - 1;
while pat_idx >= 0 {
let status = scores[pat_idx as usize][next_col];
next_col = status.back_ref;
picked.push(status.idx);
pat_idx -= 1;
}
picked.reverse();
Some((final_score, picked))
}
#[derive(Clone, Copy, Debug)]
struct MatchingStatus {
pub idx: usize,
pub score: i64,
pub final_score: i64,
pub adj_num: usize,
pub back_ref: usize,
}
impl Default for MatchingStatus {
fn default() -> Self {
MatchingStatus {
idx: 0,
score: 0,
final_score: 0,
adj_num: 1,
back_ref: 0,
}
}
}
fn build_graph(choice: &str, pattern: &str) -> Option<Vec<Vec<MatchingStatus>>> {
let mut scores = vec![];
let mut match_start_idx = 0; // to ensure that the pushed char are able to match the pattern
let mut pat_prev_ch = '\0';
// initialize the match positions and inline scores
for (pat_idx, pat_ch) in pattern.chars().enumerate() {
let mut vec = vec![];
let mut choice_prev_ch = '\0';
for (idx, ch) in choice.chars().enumerate() {
if ch.to_ascii_lowercase() == pat_ch.to_ascii_lowercase() && idx >= match_start_idx {
let score = fuzzy_score(ch, idx, choice_prev_ch, pat_ch, pat_idx, pat_prev_ch);
vec.push(MatchingStatus {
idx,
score,
final_score: score,
adj_num: 1,
back_ref: 0,
});
}
choice_prev_ch = ch;
}
if vec.is_empty() {
// not matched
return None;
}
match_start_idx = vec[0].idx + 1;
scores.push(vec);
pat_prev_ch = pat_ch;
}
// calculate max scores considering adjacent characters
for pat_idx in 1..scores.len() {
let (first_half, last_half) = scores.split_at_mut(pat_idx);
let prev_row = &first_half[first_half.len() - 1];
let cur_row = &mut last_half[0];
for idx in 0..cur_row.len() {
let next = cur_row[idx];
let prev = if idx > 0 {
cur_row[idx - 1]
} else {
MatchingStatus::default()
};
let mut score_before_idx = prev.final_score - prev.score + next.score;
score_before_idx += PENALTY_UNMATCHED * ((next.idx - prev.idx) as i64);
score_before_idx -= if prev.adj_num == 0 {
BONUS_ADJACENCY
} else {
0
};
let (back_ref, score, adj_num) = prev_row
.iter()
.enumerate()
.take_while(|&(_, &MatchingStatus { idx, .. })| idx < next.idx)
.skip_while(|&(_, &MatchingStatus { idx, .. })| idx < prev.idx)
.map(|(back_ref, cur)| {
let adj_num = next.idx - cur.idx - 1;
let mut final_score = cur.final_score + next.score;
final_score += if adj_num == 0 {
BONUS_ADJACENCY
} else {
PENALTY_UNMATCHED * adj_num as i64
};
(back_ref, final_score, adj_num)
})
.max_by_key(|&(_, x, _)| x)
.unwrap_or((prev.back_ref, score_before_idx, prev.adj_num));
cur_row[idx] = if idx > 0 && score < score_before_idx {
MatchingStatus {
final_score: score_before_idx,
back_ref: prev.back_ref,
adj_num,
..next
}
} else {
MatchingStatus {
final_score: score,
back_ref,
adj_num,
..next
}
};
}
}
Some(scores)
}
// judge how many scores the current index should get
fn fuzzy_score(
choice_ch: char,
choice_idx: usize,
choice_prev_ch: char,
pat_ch: char,
pat_idx: usize,
_pat_prev_ch: char,
) -> i64 {
let mut score = BONUS_MATCHED;
let choice_prev_ch_type = char_type_of(choice_prev_ch);
let choice_role = char_role(choice_prev_ch, choice_ch);
if pat_ch == choice_ch | else {
score += PENALTY_CASE_UNMATCHED;
}
// apply bonus for camelCases
if choice_role == CharRole::Head {
score += BONUS_CAMEL;
}
// apply bonus for matches after a separator
if choice_prev_ch_type == CharType::Separ {
score += BONUS_SEPARATOR;
}
if pat_idx == 0 {
score += max((choice_idx as i64) * PENALTY_LEADING, PENALTY_MAX_LEADING);
}
score
}
#[cfg(test)]
mod tests {
use super::*;
fn wrap_matches(line: &str, indices: &[usize]) -> String {
let mut ret = String::new();
let mut peekable = indices.iter().peekable();
for (idx, ch) in line.chars().enumerate() {
let next_id = **peekable.peek().unwrap_or(&&line.len());
if next_id == idx {
ret.push_str(format!("[{}]", ch).as_str());
peekable.next();
} else {
ret.push(ch);
}
}
ret
}
fn filter_and_sort(pattern: &str, lines: &[&'static str]) -> Vec<&'static str> {
let mut lines_with_score: Vec<(i64, &'static str)> = lines
.into_iter()
.map(|&s| (fuzzy_match(s, pattern).unwrap_or(-(1 << 62)), s))
.collect();
lines_with_score.sort_by_key(|(score, _)| -score);
lines_with_score
.into_iter()
.map(|(_, string)| string)
.collect()
}
fn wrap_fuzzy_match(line: &str, pattern: &str) -> Option<String> {
let (_score, indices) = fuzzy_indices(line, pattern)?;
Some(wrap_matches(line, &indices))
}
fn assert_order(pattern: &str, choices: &[&'static str]) {
let result = filter_and_sort(pattern, choices);
if result != choices {
// debug print
println!("pattern: {}", pattern);
for &choice in choices.iter() {
if let Some((score, indices)) = fuzzy_indices(choice, pattern) {
println!("{}: {:?}", score, wrap_matches(choice, &indices));
} else {
println!("NO MATCH for {}", choice);
}
}
}
assert_eq!(result, choices);
}
#[test]
fn test_match_or_not() {
assert_eq!(Some(0), fuzzy_match("", ""));
assert_eq!(Some(0), fuzzy_match("abcdefaghi", ""));
assert_eq!(None, fuzzy_match("", "a"));
assert_eq!(None, fuzzy_match("abcdefaghi", "中"));
assert_eq!(None, fuzzy_match("abc", "abx"));
assert!(fuzzy_match("axbycz", "abc").is_some());
assert!(fuzzy_match("axbycz", "xyz").is_some());
assert_eq!("[a]x[b]y[c]z", &wrap_fuzzy_match("axbycz", "abc").unwrap());
assert_eq!("a[x]b[y]c[z]", &wrap_fuzzy_match("axbycz", "xyz").unwrap());
assert_eq!(
"[H]ello, [世]界",
&wrap_fuzzy_match("Hello, 世界", "H世").unwrap()
);
}
#[test]
fn test_match_quality() {
// case
// assert_order("monad", &["monad", "Monad", "mONAD"]);
// initials
assert_order("ab", &["ab", "aoo_boo", "acb"]);
assert_order("CC", &["CamelCase", "camelCase", "camelcase"]);
assert_order("cC", &["camelCase", "CamelCase", "camelcase"]);
assert_order(
"cc",
&[
"camel case",
"camelCase",
"camelcase",
"CamelCase",
"camel ace",
],
);
assert_order(
"Da.Te",
&["Data.Text", "Data.Text.Lazy", "Data.Aeson.Encoding.text"],
);
// prefix
assert_order("is", &["isIEEE", "inSuf"]);
// shorter
assert_order("ma", &["map", "many", "maximum"]);
assert_order("print", &["printf", "sprintf"]);
// score(PRINT) = kMinScore
assert_order("ast", &["ast", "AST", "INT_FAST16_MAX"]);
// score(PRINT) > kMinScore
assert_order("Int", &["int", "INT", "PRINT"]);
}
}
| {
if pat_ch.is_uppercase() {
score += BONUS_UPPER_MATCH;
} else {
score += BONUS_CASE_MATCH;
}
} | conditional_block |
skim.rs | ///! The fuzzy matching algorithm used by skim
///! It focus more on path matching
///
///! # Example:
///! ```edition2018
///! use fuzzy_matcher::skim::{fuzzy_match, fuzzy_indices};
///!
///! assert_eq!(None, fuzzy_match("abc", "abx"));
///! assert!(fuzzy_match("axbycz", "abc").is_some());
///! assert!(fuzzy_match("axbycz", "xyz").is_some());
///!
///! let (score, indices) = fuzzy_indices("axbycz", "abc").unwrap();
///! assert_eq!(indices, [0, 2, 4]);
///!
///! ```
///!
///! It is modeled after <https://github.com/felipesere/icepick.git>
use std::cmp::max;
use crate::util::*;
const BONUS_MATCHED: i64 = 4;
const BONUS_CASE_MATCH: i64 = 4;
const BONUS_UPPER_MATCH: i64 = 6;
const BONUS_ADJACENCY: i64 = 10;
const BONUS_SEPARATOR: i64 = 8;
const BONUS_CAMEL: i64 = 8;
const PENALTY_CASE_UNMATCHED: i64 = -1;
const PENALTY_LEADING: i64 = -6; // penalty applied for every letter before the first match
const PENALTY_MAX_LEADING: i64 = -18; // maxing penalty for leading letters
const PENALTY_UNMATCHED: i64 = -2;
pub fn fuzzy_match(choice: &str, pattern: &str) -> Option<i64> {
if pattern.is_empty() {
return Some(0);
}
let scores = build_graph(choice, pattern)?;
let last_row = &scores[scores.len() - 1];
let (_, &MatchingStatus { final_score, .. }) = last_row
.iter()
.enumerate()
.max_by_key(|&(_, x)| x.final_score)
.expect("fuzzy_indices failed to iterate over last_row");
Some(final_score)
}
pub fn fuzzy_indices(choice: &str, pattern: &str) -> Option<(i64, Vec<usize>)> {
if pattern.is_empty() {
return Some((0, Vec::new()));
}
let mut picked = vec![];
let scores = build_graph(choice, pattern)?;
let last_row = &scores[scores.len() - 1];
let (mut next_col, &MatchingStatus { final_score, .. }) = last_row
.iter()
.enumerate()
.max_by_key(|&(_, x)| x.final_score)
.expect("fuzzy_indices failed to iterate over last_row");
let mut pat_idx = scores.len() as i64 - 1;
while pat_idx >= 0 {
let status = scores[pat_idx as usize][next_col];
next_col = status.back_ref;
picked.push(status.idx);
pat_idx -= 1;
}
picked.reverse();
Some((final_score, picked))
}
#[derive(Clone, Copy, Debug)]
struct MatchingStatus {
pub idx: usize,
pub score: i64,
pub final_score: i64,
pub adj_num: usize,
pub back_ref: usize,
}
impl Default for MatchingStatus {
fn default() -> Self {
MatchingStatus {
idx: 0,
score: 0,
final_score: 0,
adj_num: 1,
back_ref: 0,
}
}
}
fn build_graph(choice: &str, pattern: &str) -> Option<Vec<Vec<MatchingStatus>>> {
let mut scores = vec![];
let mut match_start_idx = 0; // to ensure that the pushed char are able to match the pattern
let mut pat_prev_ch = '\0';
// initialize the match positions and inline scores
for (pat_idx, pat_ch) in pattern.chars().enumerate() {
let mut vec = vec![];
let mut choice_prev_ch = '\0';
for (idx, ch) in choice.chars().enumerate() {
if ch.to_ascii_lowercase() == pat_ch.to_ascii_lowercase() && idx >= match_start_idx {
let score = fuzzy_score(ch, idx, choice_prev_ch, pat_ch, pat_idx, pat_prev_ch);
vec.push(MatchingStatus {
idx,
score,
final_score: score,
adj_num: 1,
back_ref: 0,
});
}
choice_prev_ch = ch;
}
if vec.is_empty() {
// not matched
return None;
}
match_start_idx = vec[0].idx + 1;
scores.push(vec);
pat_prev_ch = pat_ch;
}
// calculate max scores considering adjacent characters
for pat_idx in 1..scores.len() {
let (first_half, last_half) = scores.split_at_mut(pat_idx);
let prev_row = &first_half[first_half.len() - 1];
let cur_row = &mut last_half[0];
for idx in 0..cur_row.len() {
let next = cur_row[idx];
let prev = if idx > 0 {
cur_row[idx - 1]
} else {
MatchingStatus::default()
};
let mut score_before_idx = prev.final_score - prev.score + next.score;
score_before_idx += PENALTY_UNMATCHED * ((next.idx - prev.idx) as i64);
score_before_idx -= if prev.adj_num == 0 {
BONUS_ADJACENCY
} else {
0
};
let (back_ref, score, adj_num) = prev_row
.iter()
.enumerate()
.take_while(|&(_, &MatchingStatus { idx, .. })| idx < next.idx)
.skip_while(|&(_, &MatchingStatus { idx, .. })| idx < prev.idx)
.map(|(back_ref, cur)| {
let adj_num = next.idx - cur.idx - 1;
let mut final_score = cur.final_score + next.score;
final_score += if adj_num == 0 {
BONUS_ADJACENCY
} else {
PENALTY_UNMATCHED * adj_num as i64
};
(back_ref, final_score, adj_num)
})
.max_by_key(|&(_, x, _)| x)
.unwrap_or((prev.back_ref, score_before_idx, prev.adj_num));
cur_row[idx] = if idx > 0 && score < score_before_idx {
MatchingStatus {
final_score: score_before_idx,
back_ref: prev.back_ref,
adj_num,
..next
}
} else {
MatchingStatus {
final_score: score,
back_ref,
adj_num,
..next
}
};
}
}
Some(scores)
}
// judge how many scores the current index should get
fn fuzzy_score(
choice_ch: char,
choice_idx: usize,
choice_prev_ch: char,
pat_ch: char,
pat_idx: usize,
_pat_prev_ch: char,
) -> i64 {
let mut score = BONUS_MATCHED;
let choice_prev_ch_type = char_type_of(choice_prev_ch);
let choice_role = char_role(choice_prev_ch, choice_ch);
if pat_ch == choice_ch {
if pat_ch.is_uppercase() {
score += BONUS_UPPER_MATCH;
} else {
score += BONUS_CASE_MATCH;
}
} else {
score += PENALTY_CASE_UNMATCHED;
}
// apply bonus for camelCases
if choice_role == CharRole::Head {
score += BONUS_CAMEL;
}
// apply bonus for matches after a separator
if choice_prev_ch_type == CharType::Separ {
score += BONUS_SEPARATOR;
}
if pat_idx == 0 {
score += max((choice_idx as i64) * PENALTY_LEADING, PENALTY_MAX_LEADING);
}
score
}
#[cfg(test)]
mod tests {
use super::*;
fn wrap_matches(line: &str, indices: &[usize]) -> String {
let mut ret = String::new();
let mut peekable = indices.iter().peekable();
for (idx, ch) in line.chars().enumerate() {
let next_id = **peekable.peek().unwrap_or(&&line.len());
if next_id == idx {
ret.push_str(format!("[{}]", ch).as_str());
peekable.next();
} else {
ret.push(ch);
}
}
ret
}
fn filter_and_sort(pattern: &str, lines: &[&'static str]) -> Vec<&'static str> {
let mut lines_with_score: Vec<(i64, &'static str)> = lines
.into_iter()
.map(|&s| (fuzzy_match(s, pattern).unwrap_or(-(1 << 62)), s))
.collect();
lines_with_score.sort_by_key(|(score, _)| -score);
lines_with_score
.into_iter()
.map(|(_, string)| string)
.collect()
}
fn | (line: &str, pattern: &str) -> Option<String> {
let (_score, indices) = fuzzy_indices(line, pattern)?;
Some(wrap_matches(line, &indices))
}
fn assert_order(pattern: &str, choices: &[&'static str]) {
let result = filter_and_sort(pattern, choices);
if result != choices {
// debug print
println!("pattern: {}", pattern);
for &choice in choices.iter() {
if let Some((score, indices)) = fuzzy_indices(choice, pattern) {
println!("{}: {:?}", score, wrap_matches(choice, &indices));
} else {
println!("NO MATCH for {}", choice);
}
}
}
assert_eq!(result, choices);
}
#[test]
fn test_match_or_not() {
assert_eq!(Some(0), fuzzy_match("", ""));
assert_eq!(Some(0), fuzzy_match("abcdefaghi", ""));
assert_eq!(None, fuzzy_match("", "a"));
assert_eq!(None, fuzzy_match("abcdefaghi", "中"));
assert_eq!(None, fuzzy_match("abc", "abx"));
assert!(fuzzy_match("axbycz", "abc").is_some());
assert!(fuzzy_match("axbycz", "xyz").is_some());
assert_eq!("[a]x[b]y[c]z", &wrap_fuzzy_match("axbycz", "abc").unwrap());
assert_eq!("a[x]b[y]c[z]", &wrap_fuzzy_match("axbycz", "xyz").unwrap());
assert_eq!(
"[H]ello, [世]界",
&wrap_fuzzy_match("Hello, 世界", "H世").unwrap()
);
}
#[test]
fn test_match_quality() {
// case
// assert_order("monad", &["monad", "Monad", "mONAD"]);
// initials
assert_order("ab", &["ab", "aoo_boo", "acb"]);
assert_order("CC", &["CamelCase", "camelCase", "camelcase"]);
assert_order("cC", &["camelCase", "CamelCase", "camelcase"]);
assert_order(
"cc",
&[
"camel case",
"camelCase",
"camelcase",
"CamelCase",
"camel ace",
],
);
assert_order(
"Da.Te",
&["Data.Text", "Data.Text.Lazy", "Data.Aeson.Encoding.text"],
);
// prefix
assert_order("is", &["isIEEE", "inSuf"]);
// shorter
assert_order("ma", &["map", "many", "maximum"]);
assert_order("print", &["printf", "sprintf"]);
// score(PRINT) = kMinScore
assert_order("ast", &["ast", "AST", "INT_FAST16_MAX"]);
// score(PRINT) > kMinScore
assert_order("Int", &["int", "INT", "PRINT"]);
}
}
| wrap_fuzzy_match | identifier_name |
ex1a_sim_vanillaSEIR_model_old.py | import numpy as np
from scipy.integrate import odeint
from scipy.integrate import solve_ivp
from scipy.optimize import fsolve
from scipy import stats
import matplotlib.pyplot as plt
from matplotlib import rc
# Importing models and plotting functions
from epimodels.seir import *
rc('font',**{'family':'serif','serif':['Times']})
rc('text', usetex=True)
# Select simulation number
sim_num = 0
############################################
######## Parameters for Simulation ########
############################################
# Simulation for a toy problem for implementation comparison
if sim_num < 2:
# Simulation parameters
S0 = 9990
I0 = 1
R0 = 0
E0 = 9
N = S0 + I0 + R0 + E0
days = 100
# Model parameters
contact_rate = 10 # number of contacts per day
transmission_probability = 0.07 # transmission probability
gamma_inv = 5 # infectious period
sigma_inv = 2 # latent period
# Derived Model parameters and Control variable
beta = contact_rate * transmission_probability
gamma = 1 / gamma_inv
sigma = 1 / sigma_inv
r0 = beta / gamma
if sim_num == 1:
D0 = 0
m = 0.0043 # mortality rate
N = S0 + I0 + R0 + E0 + D0
# Values used for the indian armed forces model
# Initial values from March 21st for India test-case
if sim_num == 2:
# Values used for the indian armed forces model
# Initial values from March 21st for India test-case
N = 1375987036 # Number from report
days = 356
gamma_inv = 7
sigma_inv = 5.1
m = 0.0043
r0 = 2.28
tau_q_inv = 14
# Initial values from March 21st "indian armed forces predictions"
R0 = 23
D0 = 5
Q0 = 249 # Q0 is 1% of total infectious; i.e. I0 + Q0 (as described in report)
# In the report table 1, they write number of Quarantined as SO rather than Q0
# Q0, is this a typo?
T0 = 334 # This is the total number of comfirmed cases for March 21st, not used it seems?
I0 = (1.01/0.01) * Q0 # Number of Infectuos as described in report
# The initial number of exposed E(0) is not defined in report, how are they computed?
contact_rate = 10 # number of contacts an individual has per day
E0 = (contact_rate - 1)*I0 # Estimated exposed based on contact rate and inital infected
# Derived Model parameters and
beta = r0 / gamma_inv
sigma = 1.0 / sigma_inv
gamma = 1.0 / gamma_inv
tau_q = 1.0 /tau_q_inv
# Control variable: percentage quarantined
q = 0.01
print('***** Hyper-parameters *****')
print('N=',N,'days=',days, 'r0=',r0, 'gamma_inv (days) = ',gamma_inv)
print('***** Model-parameters *****')
print('beta=',beta,'gamma=', gamma, 'sigma', sigma)
######################################
######## Simulation Functions ########
######################################
# Equation to estimate final epidemic size (infected)
def | (x):
return np.log(x) + r0_test*(1-x)
###################################################
######## SEIR Model simulation Simulation ########
###################################################
if sim_num == 0:
''' Compartment structure of armed forces SEIR model
N = S + E + I + R
'''
# Initial conditions vector
S0 = N - E0 - I0 - R0
y0 = S0, E0, I0, R0
print("S0=",S0, "E0=",E0, "I0=",I0, "R0=",R0)
# Simulation Options
seir_type = 0 # SEIR no deaths
solver_type = 1 # ivp - LSODA
else:
''' Compartment structure of armed forces SEIR model with deaths
N = S + E + I + R + D
'''
S0 = N - E0 - I0 - R0 - D0
y0 = S0, E0, I0, R0, D0
print("S0=",S0, "E0=",E0, "I0=",I0, "R0=",R0, "D0", D0)
# Simulation Options
seir_type = 1 # SEIR with deaths
solver_type = 1 # ivp - LSODA
# Simulate ODE equations
SEIRparams = N, beta, gamma, sigma
sol_ode_timeseries = simulate_seirModel(seir_type, SEIRparams, solver_type, y0, N, days, 1)
# Unpack timseries
if sim_num == 0:
t = sol_ode_timeseries[0]
S = sol_ode_timeseries[1]
E = sol_ode_timeseries[2]
I = sol_ode_timeseries[3]
R = sol_ode_timeseries[4]
else:
t = sol_ode_timeseries[0]
S = sol_ode_timeseries[1]
E = sol_ode_timeseries[2]
I = sol_ode_timeseries[3]
Re = sol_ode_timeseries[4]
D = sol_ode_timeseries[5]
R = Re + D
# Accumulated Total Cases
T = I + R
print("t=", t[-1])
print("ST=", S[-1])
print("ET=", E[-1])
print("IT=", I[-1])
print("RT=", R[-1])
print("TT=", T[-1])
if sim_num > 0:
print("DT=", D[-1])
print("ReT=",Re[-1])
# Estimated Final epidemic size (analytic) not-dependent on simulation
init_guess = 0.0001
r0_test = r0
SinfN = fsolve(epi_size, init_guess)
One_SinfN = 1 - SinfN
print('***** Final Epidemic Size *****')
print('r0 = ', r0_test, '1 - Sinf/S0 = ', One_SinfN[0])
print('***** Results *****')
peak_inf_idx = np.argmax(I)
peak_inf = I[peak_inf_idx]
print('Peak Instant. Infected = ', peak_inf,'by day=', peak_inf_idx)
peak_total_inf = T[peak_inf_idx]
print('Total Cases when Peak = ', peak_total_inf,'by day=', peak_inf_idx)
#####################################################################
######## Plots Simulation with point estimates of parameters ########
#####################################################################
# Plot the data on three separate curves for S(t), I(t) and R(t)
fig, ax1 = plt.subplots()
if sim_num > 0:
txt_title_sim = r"COVID-19 Vanilla SEIR Model Dynamics (N={N:10.0f},$R_0$={R0:1.3f}, $\beta$={beta:1.3f}, 1/$\gamma$={gamma_inv:1.3f}, 1/$\sigma$={sigma_inv:1.3f}, m={m:1.3f})"
fig.suptitle(txt_title_sim.format(N=N, R0=r0, beta= beta, gamma_inv = gamma_inv, sigma_inv = sigma_inv, m=m),fontsize=15)
else:
txt_title_sim = r"COVID-19 Vanilla SEIR Model Dynamics (N={N:10.0f},$R_0$={R0:1.3f}, $\beta$={beta:1.3f}, 1/$\gamma$={gamma_inv:1.3f}, 1/$\sigma$={sigma_inv:1.3f})"
fig.suptitle(txt_title_sim.format(N=N, R0=r0, beta= beta, gamma_inv = gamma_inv, sigma_inv = sigma_inv),fontsize=15)
# Variable evolution
ax1.plot(t, S/N, 'k', lw=2, label='Susceptible')
ax1.plot(t, E/N, 'm', lw=2, label='Exposed')
ax1.plot(t, I/N, 'r', lw=2, label='Infected')
ax1.plot(t, T/N, 'y', lw=2, label='Total Cases')
if sim_num == 0:
ax1.plot(t, R/N, 'g--', lw=1, label='Recovered')
else:
ax1.plot(t, Re/N, 'g--', lw=1, label='Recovered')
ax1.plot(t, D/N, 'b--', lw=1, label='Dead')
# Plot Final Epidemic Size
ax1.plot(t, One_SinfN*np.ones(len(t)), 'm--')
txt1 = "{per:2.2f} infected"
ax1.text(t[0], One_SinfN - 0.05, txt1.format(per=One_SinfN[0]), fontsize=12, color='m')
# Plot peak points
ax1.plot(peak_inf_idx, peak_inf/N,'ro', markersize=8)
ax1.plot(peak_inf_idx, peak_total_inf/N,'ro', markersize=8)
if sim_num < 2:
txt_title = r"Peak infected: {peak_inf:5.0f} by day {peak_days:2.0f}"
txt_title2 = r"Total Cases: {peak_total:5.0f} by day {peak_days:2.0f}"
else:
txt_title = r"Peak infected: {peak_inf:5.0f} by day {peak_days:2.0f} from March 21"
txt_title2 = r"Total Cases: {peak_total:5.0f} by day {peak_days:2.0f} from March 21"
ax1.text(peak_inf_idx+10, peak_inf/N, txt_title.format(peak_inf=peak_inf, peak_days= peak_inf_idx), fontsize=12, color="r")
ax1.text(peak_inf_idx+10, peak_total_inf/N, txt_title2.format(peak_total=peak_total_inf, peak_days= peak_inf_idx), fontsize=12, color="r")
# Making things beautiful
ax1.set_xlabel('Time /days', fontsize=12)
ax1.set_ylabel('Percentage of Population', fontsize=12)
ax1.yaxis.set_tick_params(length=0)
ax1.xaxis.set_tick_params(length=0)
ax1.grid(b=True, which='major', c='w', lw=2, ls='-')
legend = ax1.legend()
legend.get_frame().set_alpha(0.5)
for spine in ('top', 'right', 'bottom', 'left'):
ax1.spines[spine].set_visible(True)
fig.subplots_adjust(left=.12, bottom=.14, right=.93, top=0.93)
fig.set_size_inches(20.5/2, 14.5/2, forward=True)
plt.savefig('./figures/vanilla/vanillaSEIR_timeEvolution_%i.png'%sim_num, bbox_inches='tight')
plt.savefig('./figures/vanilla/vanillaSEIR_timeEvolution_%i.pdf'%sim_num, bbox_inches='tight')
#################################################################
######## Plots Simulation with reproductive/growth rates ########
#################################################################
do_growth = 1
if do_growth:
# Analytic growth rate
effective_Rt = r0 * (S/N)
growth_rates = gamma * (effective_Rt - 1)
####### Plots for Growth Rates #######
fig, (ax1, ax2) = plt.subplots(1,2)
if sim_num > 0:
fig.suptitle(txt_title_sim.format(N=N, R0=r0, beta= beta, gamma_inv = gamma_inv, sigma_inv = sigma_inv, m=m),fontsize=15)
else:
fig.suptitle(txt_title_sim.format(N=N, R0=r0, beta= beta, gamma_inv = gamma_inv, sigma_inv = sigma_inv),fontsize=15)
# Plot of Reproductive rate (number)
ax1.plot(t, effective_Rt, 'k', lw=2, label='Rt (Effective Reproductive Rate)')
ax1.text(t[0] + 0.02, effective_Rt[0] - 0.15,r'${\cal R}_t$', fontsize=10)
ax1.plot(t, 1*np.ones(len(t)), 'r-')
txt1 = "Critical (Rt={per:2.2f})"
ax1.text(t[-1]-50, 1 + 0.01, txt1.format(per=1), fontsize=12, color='r')
ax1.text(t[-1]-50,2.5, r"${\cal R}_t \equiv \left( \frac{S (t) }{N (t) } \right) {\cal R}_0$", fontsize=15, bbox=dict(facecolor='red', alpha=0.2))
# Estimations of End of Epidemic
effRT_diff = effective_Rt - 1
ids_less_1 = np.nonzero(effRT_diff < 0)
effRT_crossing = ids_less_1[0][1]
ax1.plot(effRT_crossing, 1,'ro', markersize=12)
ax1.text(effRT_crossing-10, 1-0.2,str(effRT_crossing), fontsize=10, color="r")
ax1.set_ylabel('Rt (Effective Reproductive Rate)', fontsize=12)
ax1.set_xlabel('Time[days]', fontsize=12)
ax1.set_ylim(0,4)
# Plot of temporal growth rate
ax2.plot(t, growth_rates, 'k', lw=2, label='rI (temporal growth rate)')
ax2.text(t[0] + 0.02, growth_rates[0] - 0.02,r'${r}_I(t)$', fontsize=10)
ax2.plot(t, 0*np.ones(len(t)), 'r-')
txt1 = r"Critical ($r_I$={per:2.2f})"
ax2.text(t[-1]-50, 0 + 0.01, txt1.format(per=0), fontsize=12, color='r')
ax2.text(t[-1]-50, 0.2, r"$r_I \equiv \gamma \left[ {\cal R}_t - 1 \right]$", fontsize=15, bbox=dict(facecolor='red', alpha=0.2))
ax2.text(t[-1]-50, 0.1, r"$\frac{ dI}{dt} = r_I \, I $", fontsize=15, bbox=dict(facecolor='red', alpha=0.2))
ax2.set_ylabel('rI (temporal growth rate)', fontsize=12)
ax2.set_xlabel('Time[days]',fontsize=12)
ax2.set_ylim(-0.2,0.5)
# Estimations of End of Epidemic
rI_diff = growth_rates
ids_less_0 = np.nonzero(rI_diff < 0)
rI_crossing = ids_less_1[0][1]
ax2.plot(rI_crossing, 0,'ro', markersize=12)
ax2.text(rI_crossing-10, 0-0.04,str(rI_crossing), fontsize=10, color="r")
fig.set_size_inches(27.5/2, 12.5/2, forward=True)
plt.savefig('./figures/vanilla/vanillaSIR_growthRates_%i.png'%sim_num, bbox_inches='tight')
plt.savefig('./figures/vanilla/vanillaSIR_growthRates_%i.pdf'%sim_num, bbox_inches='tight')
#############################################################
######## Dependence of R0 on Final Epidemic Behavior ########
#############################################################
# Final epidemic size (analytic)
# r0_vals = np.linspace(1,5,100)
# init_guess = 0.0001
# Sinf_N = []
# Sinf_S0 = []
# for ii in range(len(r0_vals)):
# r0_test = r0_vals[ii]
# Sinf_N.append(fsolve(epi_size, init_guess))
# Sinf_S0.append(1 - Sinf_N[ii])
# # Plots
# fig0, ax0 = plt.subplots()
# ax0.plot(r0_vals, Sinf_S0, 'r', lw=2, label='Susceptible')
# ax0.set_ylabel('$1 - S_{\infty}/N$ (percentage of population infected)', fontsize=12)
# ax0.set_xlabel('$R_0$', fontsize=12)
# # Current estimate of Covid R0
# plt.title('Final Size of Epidemic Dependence on $R_0$ estimate',fontsize=15)
# ax0.plot(r0, One_SinfN, 'ko', markersize=5, lw=2)
# # Plot mean
# txt = 'Covid R0({r0:3.3f})'
# ax0.text(r0 - 0.45, One_SinfN + 0.05,txt.format(r0=r0_test), fontsize=10)
# plt.plot([r0]*10,np.linspace(0,One_SinfN,10), color='black')
# txt = "{Sinf:3.3f} Infected"
# ax0.text(1.1, One_SinfN - 0.025,txt.format(Sinf=One_SinfN[0]), fontsize=8)
# plt.plot(np.linspace(1,[r0],10), [One_SinfN]*10, color='black')
# ax0.text(4, 0.75, r"${\cal R}_0 \equiv \frac{ \beta } {\gamma}$", fontsize=15, bbox=dict(facecolor='red', alpha=0.15))
# fig0.set_size_inches(18.5/2, 12.5/2, forward=True)
# plt.savefig('./figures/vanilla/armedSIR_finalSize_%i.png'%sim_num, bbox_inches='tight')
# plt.savefig('./figures/vanilla/armedSIR_finalSize_%i.pdf'%sim_num, bbox_inches='tight')
plt.show()
| epi_size | identifier_name |
ex1a_sim_vanillaSEIR_model_old.py | import numpy as np
from scipy.integrate import odeint
from scipy.integrate import solve_ivp
from scipy.optimize import fsolve
from scipy import stats
import matplotlib.pyplot as plt
from matplotlib import rc
# Importing models and plotting functions
from epimodels.seir import *
rc('font',**{'family':'serif','serif':['Times']})
rc('text', usetex=True)
# Select simulation number
sim_num = 0
############################################
######## Parameters for Simulation ########
############################################
# Simulation for a toy problem for implementation comparison
if sim_num < 2:
# Simulation parameters
S0 = 9990
I0 = 1
R0 = 0
E0 = 9
N = S0 + I0 + R0 + E0
days = 100
# Model parameters
contact_rate = 10 # number of contacts per day
transmission_probability = 0.07 # transmission probability
gamma_inv = 5 # infectious period
sigma_inv = 2 # latent period
# Derived Model parameters and Control variable
beta = contact_rate * transmission_probability
gamma = 1 / gamma_inv
sigma = 1 / sigma_inv
r0 = beta / gamma
if sim_num == 1:
D0 = 0
m = 0.0043 # mortality rate
N = S0 + I0 + R0 + E0 + D0
# Values used for the indian armed forces model
# Initial values from March 21st for India test-case
if sim_num == 2:
# Values used for the indian armed forces model
# Initial values from March 21st for India test-case
N = 1375987036 # Number from report
days = 356
gamma_inv = 7
sigma_inv = 5.1
m = 0.0043
r0 = 2.28
tau_q_inv = 14
# Initial values from March 21st "indian armed forces predictions"
R0 = 23
D0 = 5
Q0 = 249 # Q0 is 1% of total infectious; i.e. I0 + Q0 (as described in report)
# In the report table 1, they write number of Quarantined as SO rather than Q0
# Q0, is this a typo?
T0 = 334 # This is the total number of comfirmed cases for March 21st, not used it seems?
I0 = (1.01/0.01) * Q0 # Number of Infectuos as described in report
# The initial number of exposed E(0) is not defined in report, how are they computed?
contact_rate = 10 # number of contacts an individual has per day
E0 = (contact_rate - 1)*I0 # Estimated exposed based on contact rate and inital infected
# Derived Model parameters and
beta = r0 / gamma_inv
sigma = 1.0 / sigma_inv
gamma = 1.0 / gamma_inv
tau_q = 1.0 /tau_q_inv
# Control variable: percentage quarantined
q = 0.01
print('***** Hyper-parameters *****')
print('N=',N,'days=',days, 'r0=',r0, 'gamma_inv (days) = ',gamma_inv)
print('***** Model-parameters *****')
print('beta=',beta,'gamma=', gamma, 'sigma', sigma)
######################################
######## Simulation Functions ########
######################################
# Equation to estimate final epidemic size (infected)
def epi_size(x):
return np.log(x) + r0_test*(1-x)
###################################################
######## SEIR Model simulation Simulation ########
###################################################
if sim_num == 0:
''' Compartment structure of armed forces SEIR model
N = S + E + I + R
'''
# Initial conditions vector
S0 = N - E0 - I0 - R0
y0 = S0, E0, I0, R0
print("S0=",S0, "E0=",E0, "I0=",I0, "R0=",R0)
# Simulation Options
seir_type = 0 # SEIR no deaths
solver_type = 1 # ivp - LSODA
else:
''' Compartment structure of armed forces SEIR model with deaths
N = S + E + I + R + D
'''
S0 = N - E0 - I0 - R0 - D0
y0 = S0, E0, I0, R0, D0
print("S0=",S0, "E0=",E0, "I0=",I0, "R0=",R0, "D0", D0)
# Simulation Options
seir_type = 1 # SEIR with deaths
solver_type = 1 # ivp - LSODA
# Simulate ODE equations | # Unpack timseries
if sim_num == 0:
t = sol_ode_timeseries[0]
S = sol_ode_timeseries[1]
E = sol_ode_timeseries[2]
I = sol_ode_timeseries[3]
R = sol_ode_timeseries[4]
else:
t = sol_ode_timeseries[0]
S = sol_ode_timeseries[1]
E = sol_ode_timeseries[2]
I = sol_ode_timeseries[3]
Re = sol_ode_timeseries[4]
D = sol_ode_timeseries[5]
R = Re + D
# Accumulated Total Cases
T = I + R
print("t=", t[-1])
print("ST=", S[-1])
print("ET=", E[-1])
print("IT=", I[-1])
print("RT=", R[-1])
print("TT=", T[-1])
if sim_num > 0:
print("DT=", D[-1])
print("ReT=",Re[-1])
# Estimated Final epidemic size (analytic) not-dependent on simulation
init_guess = 0.0001
r0_test = r0
SinfN = fsolve(epi_size, init_guess)
One_SinfN = 1 - SinfN
print('***** Final Epidemic Size *****')
print('r0 = ', r0_test, '1 - Sinf/S0 = ', One_SinfN[0])
print('***** Results *****')
peak_inf_idx = np.argmax(I)
peak_inf = I[peak_inf_idx]
print('Peak Instant. Infected = ', peak_inf,'by day=', peak_inf_idx)
peak_total_inf = T[peak_inf_idx]
print('Total Cases when Peak = ', peak_total_inf,'by day=', peak_inf_idx)
#####################################################################
######## Plots Simulation with point estimates of parameters ########
#####################################################################
# Plot the data on three separate curves for S(t), I(t) and R(t)
fig, ax1 = plt.subplots()
if sim_num > 0:
txt_title_sim = r"COVID-19 Vanilla SEIR Model Dynamics (N={N:10.0f},$R_0$={R0:1.3f}, $\beta$={beta:1.3f}, 1/$\gamma$={gamma_inv:1.3f}, 1/$\sigma$={sigma_inv:1.3f}, m={m:1.3f})"
fig.suptitle(txt_title_sim.format(N=N, R0=r0, beta= beta, gamma_inv = gamma_inv, sigma_inv = sigma_inv, m=m),fontsize=15)
else:
txt_title_sim = r"COVID-19 Vanilla SEIR Model Dynamics (N={N:10.0f},$R_0$={R0:1.3f}, $\beta$={beta:1.3f}, 1/$\gamma$={gamma_inv:1.3f}, 1/$\sigma$={sigma_inv:1.3f})"
fig.suptitle(txt_title_sim.format(N=N, R0=r0, beta= beta, gamma_inv = gamma_inv, sigma_inv = sigma_inv),fontsize=15)
# Variable evolution
ax1.plot(t, S/N, 'k', lw=2, label='Susceptible')
ax1.plot(t, E/N, 'm', lw=2, label='Exposed')
ax1.plot(t, I/N, 'r', lw=2, label='Infected')
ax1.plot(t, T/N, 'y', lw=2, label='Total Cases')
if sim_num == 0:
ax1.plot(t, R/N, 'g--', lw=1, label='Recovered')
else:
ax1.plot(t, Re/N, 'g--', lw=1, label='Recovered')
ax1.plot(t, D/N, 'b--', lw=1, label='Dead')
# Plot Final Epidemic Size
ax1.plot(t, One_SinfN*np.ones(len(t)), 'm--')
txt1 = "{per:2.2f} infected"
ax1.text(t[0], One_SinfN - 0.05, txt1.format(per=One_SinfN[0]), fontsize=12, color='m')
# Plot peak points
ax1.plot(peak_inf_idx, peak_inf/N,'ro', markersize=8)
ax1.plot(peak_inf_idx, peak_total_inf/N,'ro', markersize=8)
if sim_num < 2:
txt_title = r"Peak infected: {peak_inf:5.0f} by day {peak_days:2.0f}"
txt_title2 = r"Total Cases: {peak_total:5.0f} by day {peak_days:2.0f}"
else:
txt_title = r"Peak infected: {peak_inf:5.0f} by day {peak_days:2.0f} from March 21"
txt_title2 = r"Total Cases: {peak_total:5.0f} by day {peak_days:2.0f} from March 21"
ax1.text(peak_inf_idx+10, peak_inf/N, txt_title.format(peak_inf=peak_inf, peak_days= peak_inf_idx), fontsize=12, color="r")
ax1.text(peak_inf_idx+10, peak_total_inf/N, txt_title2.format(peak_total=peak_total_inf, peak_days= peak_inf_idx), fontsize=12, color="r")
# Making things beautiful
ax1.set_xlabel('Time /days', fontsize=12)
ax1.set_ylabel('Percentage of Population', fontsize=12)
ax1.yaxis.set_tick_params(length=0)
ax1.xaxis.set_tick_params(length=0)
ax1.grid(b=True, which='major', c='w', lw=2, ls='-')
legend = ax1.legend()
legend.get_frame().set_alpha(0.5)
for spine in ('top', 'right', 'bottom', 'left'):
ax1.spines[spine].set_visible(True)
fig.subplots_adjust(left=.12, bottom=.14, right=.93, top=0.93)
fig.set_size_inches(20.5/2, 14.5/2, forward=True)
plt.savefig('./figures/vanilla/vanillaSEIR_timeEvolution_%i.png'%sim_num, bbox_inches='tight')
plt.savefig('./figures/vanilla/vanillaSEIR_timeEvolution_%i.pdf'%sim_num, bbox_inches='tight')
#################################################################
######## Plots Simulation with reproductive/growth rates ########
#################################################################
do_growth = 1
if do_growth:
# Analytic growth rate
effective_Rt = r0 * (S/N)
growth_rates = gamma * (effective_Rt - 1)
####### Plots for Growth Rates #######
fig, (ax1, ax2) = plt.subplots(1,2)
if sim_num > 0:
fig.suptitle(txt_title_sim.format(N=N, R0=r0, beta= beta, gamma_inv = gamma_inv, sigma_inv = sigma_inv, m=m),fontsize=15)
else:
fig.suptitle(txt_title_sim.format(N=N, R0=r0, beta= beta, gamma_inv = gamma_inv, sigma_inv = sigma_inv),fontsize=15)
# Plot of Reproductive rate (number)
ax1.plot(t, effective_Rt, 'k', lw=2, label='Rt (Effective Reproductive Rate)')
ax1.text(t[0] + 0.02, effective_Rt[0] - 0.15,r'${\cal R}_t$', fontsize=10)
ax1.plot(t, 1*np.ones(len(t)), 'r-')
txt1 = "Critical (Rt={per:2.2f})"
ax1.text(t[-1]-50, 1 + 0.01, txt1.format(per=1), fontsize=12, color='r')
ax1.text(t[-1]-50,2.5, r"${\cal R}_t \equiv \left( \frac{S (t) }{N (t) } \right) {\cal R}_0$", fontsize=15, bbox=dict(facecolor='red', alpha=0.2))
# Estimations of End of Epidemic
effRT_diff = effective_Rt - 1
ids_less_1 = np.nonzero(effRT_diff < 0)
effRT_crossing = ids_less_1[0][1]
ax1.plot(effRT_crossing, 1,'ro', markersize=12)
ax1.text(effRT_crossing-10, 1-0.2,str(effRT_crossing), fontsize=10, color="r")
ax1.set_ylabel('Rt (Effective Reproductive Rate)', fontsize=12)
ax1.set_xlabel('Time[days]', fontsize=12)
ax1.set_ylim(0,4)
# Plot of temporal growth rate
ax2.plot(t, growth_rates, 'k', lw=2, label='rI (temporal growth rate)')
ax2.text(t[0] + 0.02, growth_rates[0] - 0.02,r'${r}_I(t)$', fontsize=10)
ax2.plot(t, 0*np.ones(len(t)), 'r-')
txt1 = r"Critical ($r_I$={per:2.2f})"
ax2.text(t[-1]-50, 0 + 0.01, txt1.format(per=0), fontsize=12, color='r')
ax2.text(t[-1]-50, 0.2, r"$r_I \equiv \gamma \left[ {\cal R}_t - 1 \right]$", fontsize=15, bbox=dict(facecolor='red', alpha=0.2))
ax2.text(t[-1]-50, 0.1, r"$\frac{ dI}{dt} = r_I \, I $", fontsize=15, bbox=dict(facecolor='red', alpha=0.2))
ax2.set_ylabel('rI (temporal growth rate)', fontsize=12)
ax2.set_xlabel('Time[days]',fontsize=12)
ax2.set_ylim(-0.2,0.5)
# Estimations of End of Epidemic
rI_diff = growth_rates
ids_less_0 = np.nonzero(rI_diff < 0)
rI_crossing = ids_less_1[0][1]
ax2.plot(rI_crossing, 0,'ro', markersize=12)
ax2.text(rI_crossing-10, 0-0.04,str(rI_crossing), fontsize=10, color="r")
fig.set_size_inches(27.5/2, 12.5/2, forward=True)
plt.savefig('./figures/vanilla/vanillaSIR_growthRates_%i.png'%sim_num, bbox_inches='tight')
plt.savefig('./figures/vanilla/vanillaSIR_growthRates_%i.pdf'%sim_num, bbox_inches='tight')
#############################################################
######## Dependence of R0 on Final Epidemic Behavior ########
#############################################################
# Final epidemic size (analytic)
# r0_vals = np.linspace(1,5,100)
# init_guess = 0.0001
# Sinf_N = []
# Sinf_S0 = []
# for ii in range(len(r0_vals)):
# r0_test = r0_vals[ii]
# Sinf_N.append(fsolve(epi_size, init_guess))
# Sinf_S0.append(1 - Sinf_N[ii])
# # Plots
# fig0, ax0 = plt.subplots()
# ax0.plot(r0_vals, Sinf_S0, 'r', lw=2, label='Susceptible')
# ax0.set_ylabel('$1 - S_{\infty}/N$ (percentage of population infected)', fontsize=12)
# ax0.set_xlabel('$R_0$', fontsize=12)
# # Current estimate of Covid R0
# plt.title('Final Size of Epidemic Dependence on $R_0$ estimate',fontsize=15)
# ax0.plot(r0, One_SinfN, 'ko', markersize=5, lw=2)
# # Plot mean
# txt = 'Covid R0({r0:3.3f})'
# ax0.text(r0 - 0.45, One_SinfN + 0.05,txt.format(r0=r0_test), fontsize=10)
# plt.plot([r0]*10,np.linspace(0,One_SinfN,10), color='black')
# txt = "{Sinf:3.3f} Infected"
# ax0.text(1.1, One_SinfN - 0.025,txt.format(Sinf=One_SinfN[0]), fontsize=8)
# plt.plot(np.linspace(1,[r0],10), [One_SinfN]*10, color='black')
# ax0.text(4, 0.75, r"${\cal R}_0 \equiv \frac{ \beta } {\gamma}$", fontsize=15, bbox=dict(facecolor='red', alpha=0.15))
# fig0.set_size_inches(18.5/2, 12.5/2, forward=True)
# plt.savefig('./figures/vanilla/armedSIR_finalSize_%i.png'%sim_num, bbox_inches='tight')
# plt.savefig('./figures/vanilla/armedSIR_finalSize_%i.pdf'%sim_num, bbox_inches='tight')
plt.show() | SEIRparams = N, beta, gamma, sigma
sol_ode_timeseries = simulate_seirModel(seir_type, SEIRparams, solver_type, y0, N, days, 1)
| random_line_split |
ex1a_sim_vanillaSEIR_model_old.py | import numpy as np
from scipy.integrate import odeint
from scipy.integrate import solve_ivp
from scipy.optimize import fsolve
from scipy import stats
import matplotlib.pyplot as plt
from matplotlib import rc
# Importing models and plotting functions
from epimodels.seir import *
rc('font',**{'family':'serif','serif':['Times']})
rc('text', usetex=True)
# Select simulation number
sim_num = 0
############################################
######## Parameters for Simulation ########
############################################
# Simulation for a toy problem for implementation comparison
if sim_num < 2:
# Simulation parameters
S0 = 9990
I0 = 1
R0 = 0
E0 = 9
N = S0 + I0 + R0 + E0
days = 100
# Model parameters
contact_rate = 10 # number of contacts per day
transmission_probability = 0.07 # transmission probability
gamma_inv = 5 # infectious period
sigma_inv = 2 # latent period
# Derived Model parameters and Control variable
beta = contact_rate * transmission_probability
gamma = 1 / gamma_inv
sigma = 1 / sigma_inv
r0 = beta / gamma
if sim_num == 1:
D0 = 0
m = 0.0043 # mortality rate
N = S0 + I0 + R0 + E0 + D0
# Values used for the indian armed forces model
# Initial values from March 21st for India test-case
if sim_num == 2:
# Values used for the indian armed forces model
# Initial values from March 21st for India test-case
N = 1375987036 # Number from report
days = 356
gamma_inv = 7
sigma_inv = 5.1
m = 0.0043
r0 = 2.28
tau_q_inv = 14
# Initial values from March 21st "indian armed forces predictions"
R0 = 23
D0 = 5
Q0 = 249 # Q0 is 1% of total infectious; i.e. I0 + Q0 (as described in report)
# In the report table 1, they write number of Quarantined as SO rather than Q0
# Q0, is this a typo?
T0 = 334 # This is the total number of comfirmed cases for March 21st, not used it seems?
I0 = (1.01/0.01) * Q0 # Number of Infectuos as described in report
# The initial number of exposed E(0) is not defined in report, how are they computed?
contact_rate = 10 # number of contacts an individual has per day
E0 = (contact_rate - 1)*I0 # Estimated exposed based on contact rate and inital infected
# Derived Model parameters and
beta = r0 / gamma_inv
sigma = 1.0 / sigma_inv
gamma = 1.0 / gamma_inv
tau_q = 1.0 /tau_q_inv
# Control variable: percentage quarantined
q = 0.01
print('***** Hyper-parameters *****')
print('N=',N,'days=',days, 'r0=',r0, 'gamma_inv (days) = ',gamma_inv)
print('***** Model-parameters *****')
print('beta=',beta,'gamma=', gamma, 'sigma', sigma)
######################################
######## Simulation Functions ########
######################################
# Equation to estimate final epidemic size (infected)
def epi_size(x):
return np.log(x) + r0_test*(1-x)
###################################################
######## SEIR Model simulation Simulation ########
###################################################
if sim_num == 0:
''' Compartment structure of armed forces SEIR model
N = S + E + I + R
'''
# Initial conditions vector
S0 = N - E0 - I0 - R0
y0 = S0, E0, I0, R0
print("S0=",S0, "E0=",E0, "I0=",I0, "R0=",R0)
# Simulation Options
seir_type = 0 # SEIR no deaths
solver_type = 1 # ivp - LSODA
else:
''' Compartment structure of armed forces SEIR model with deaths
N = S + E + I + R + D
'''
S0 = N - E0 - I0 - R0 - D0
y0 = S0, E0, I0, R0, D0
print("S0=",S0, "E0=",E0, "I0=",I0, "R0=",R0, "D0", D0)
# Simulation Options
seir_type = 1 # SEIR with deaths
solver_type = 1 # ivp - LSODA
# Simulate ODE equations
SEIRparams = N, beta, gamma, sigma
sol_ode_timeseries = simulate_seirModel(seir_type, SEIRparams, solver_type, y0, N, days, 1)
# Unpack timseries
if sim_num == 0:
t = sol_ode_timeseries[0]
S = sol_ode_timeseries[1]
E = sol_ode_timeseries[2]
I = sol_ode_timeseries[3]
R = sol_ode_timeseries[4]
else:
t = sol_ode_timeseries[0]
S = sol_ode_timeseries[1]
E = sol_ode_timeseries[2]
I = sol_ode_timeseries[3]
Re = sol_ode_timeseries[4]
D = sol_ode_timeseries[5]
R = Re + D
# Accumulated Total Cases
T = I + R
print("t=", t[-1])
print("ST=", S[-1])
print("ET=", E[-1])
print("IT=", I[-1])
print("RT=", R[-1])
print("TT=", T[-1])
if sim_num > 0:
print("DT=", D[-1])
print("ReT=",Re[-1])
# Estimated Final epidemic size (analytic) not-dependent on simulation
init_guess = 0.0001
r0_test = r0
SinfN = fsolve(epi_size, init_guess)
One_SinfN = 1 - SinfN
print('***** Final Epidemic Size *****')
print('r0 = ', r0_test, '1 - Sinf/S0 = ', One_SinfN[0])
print('***** Results *****')
peak_inf_idx = np.argmax(I)
peak_inf = I[peak_inf_idx]
print('Peak Instant. Infected = ', peak_inf,'by day=', peak_inf_idx)
peak_total_inf = T[peak_inf_idx]
print('Total Cases when Peak = ', peak_total_inf,'by day=', peak_inf_idx)
#####################################################################
######## Plots Simulation with point estimates of parameters ########
#####################################################################
# Plot the data on three separate curves for S(t), I(t) and R(t)
fig, ax1 = plt.subplots()
if sim_num > 0:
txt_title_sim = r"COVID-19 Vanilla SEIR Model Dynamics (N={N:10.0f},$R_0$={R0:1.3f}, $\beta$={beta:1.3f}, 1/$\gamma$={gamma_inv:1.3f}, 1/$\sigma$={sigma_inv:1.3f}, m={m:1.3f})"
fig.suptitle(txt_title_sim.format(N=N, R0=r0, beta= beta, gamma_inv = gamma_inv, sigma_inv = sigma_inv, m=m),fontsize=15)
else:
txt_title_sim = r"COVID-19 Vanilla SEIR Model Dynamics (N={N:10.0f},$R_0$={R0:1.3f}, $\beta$={beta:1.3f}, 1/$\gamma$={gamma_inv:1.3f}, 1/$\sigma$={sigma_inv:1.3f})"
fig.suptitle(txt_title_sim.format(N=N, R0=r0, beta= beta, gamma_inv = gamma_inv, sigma_inv = sigma_inv),fontsize=15)
# Variable evolution
ax1.plot(t, S/N, 'k', lw=2, label='Susceptible')
ax1.plot(t, E/N, 'm', lw=2, label='Exposed')
ax1.plot(t, I/N, 'r', lw=2, label='Infected')
ax1.plot(t, T/N, 'y', lw=2, label='Total Cases')
if sim_num == 0:
ax1.plot(t, R/N, 'g--', lw=1, label='Recovered')
else:
ax1.plot(t, Re/N, 'g--', lw=1, label='Recovered')
ax1.plot(t, D/N, 'b--', lw=1, label='Dead')
# Plot Final Epidemic Size
ax1.plot(t, One_SinfN*np.ones(len(t)), 'm--')
txt1 = "{per:2.2f} infected"
ax1.text(t[0], One_SinfN - 0.05, txt1.format(per=One_SinfN[0]), fontsize=12, color='m')
# Plot peak points
ax1.plot(peak_inf_idx, peak_inf/N,'ro', markersize=8)
ax1.plot(peak_inf_idx, peak_total_inf/N,'ro', markersize=8)
if sim_num < 2:
txt_title = r"Peak infected: {peak_inf:5.0f} by day {peak_days:2.0f}"
txt_title2 = r"Total Cases: {peak_total:5.0f} by day {peak_days:2.0f}"
else:
|
ax1.text(peak_inf_idx+10, peak_inf/N, txt_title.format(peak_inf=peak_inf, peak_days= peak_inf_idx), fontsize=12, color="r")
ax1.text(peak_inf_idx+10, peak_total_inf/N, txt_title2.format(peak_total=peak_total_inf, peak_days= peak_inf_idx), fontsize=12, color="r")
# Making things beautiful
ax1.set_xlabel('Time /days', fontsize=12)
ax1.set_ylabel('Percentage of Population', fontsize=12)
ax1.yaxis.set_tick_params(length=0)
ax1.xaxis.set_tick_params(length=0)
ax1.grid(b=True, which='major', c='w', lw=2, ls='-')
legend = ax1.legend()
legend.get_frame().set_alpha(0.5)
for spine in ('top', 'right', 'bottom', 'left'):
ax1.spines[spine].set_visible(True)
fig.subplots_adjust(left=.12, bottom=.14, right=.93, top=0.93)
fig.set_size_inches(20.5/2, 14.5/2, forward=True)
plt.savefig('./figures/vanilla/vanillaSEIR_timeEvolution_%i.png'%sim_num, bbox_inches='tight')
plt.savefig('./figures/vanilla/vanillaSEIR_timeEvolution_%i.pdf'%sim_num, bbox_inches='tight')
#################################################################
######## Plots Simulation with reproductive/growth rates ########
#################################################################
do_growth = 1
if do_growth:
# Analytic growth rate
effective_Rt = r0 * (S/N)
growth_rates = gamma * (effective_Rt - 1)
####### Plots for Growth Rates #######
fig, (ax1, ax2) = plt.subplots(1,2)
if sim_num > 0:
fig.suptitle(txt_title_sim.format(N=N, R0=r0, beta= beta, gamma_inv = gamma_inv, sigma_inv = sigma_inv, m=m),fontsize=15)
else:
fig.suptitle(txt_title_sim.format(N=N, R0=r0, beta= beta, gamma_inv = gamma_inv, sigma_inv = sigma_inv),fontsize=15)
# Plot of Reproductive rate (number)
ax1.plot(t, effective_Rt, 'k', lw=2, label='Rt (Effective Reproductive Rate)')
ax1.text(t[0] + 0.02, effective_Rt[0] - 0.15,r'${\cal R}_t$', fontsize=10)
ax1.plot(t, 1*np.ones(len(t)), 'r-')
txt1 = "Critical (Rt={per:2.2f})"
ax1.text(t[-1]-50, 1 + 0.01, txt1.format(per=1), fontsize=12, color='r')
ax1.text(t[-1]-50,2.5, r"${\cal R}_t \equiv \left( \frac{S (t) }{N (t) } \right) {\cal R}_0$", fontsize=15, bbox=dict(facecolor='red', alpha=0.2))
# Estimations of End of Epidemic
effRT_diff = effective_Rt - 1
ids_less_1 = np.nonzero(effRT_diff < 0)
effRT_crossing = ids_less_1[0][1]
ax1.plot(effRT_crossing, 1,'ro', markersize=12)
ax1.text(effRT_crossing-10, 1-0.2,str(effRT_crossing), fontsize=10, color="r")
ax1.set_ylabel('Rt (Effective Reproductive Rate)', fontsize=12)
ax1.set_xlabel('Time[days]', fontsize=12)
ax1.set_ylim(0,4)
# Plot of temporal growth rate
ax2.plot(t, growth_rates, 'k', lw=2, label='rI (temporal growth rate)')
ax2.text(t[0] + 0.02, growth_rates[0] - 0.02,r'${r}_I(t)$', fontsize=10)
ax2.plot(t, 0*np.ones(len(t)), 'r-')
txt1 = r"Critical ($r_I$={per:2.2f})"
ax2.text(t[-1]-50, 0 + 0.01, txt1.format(per=0), fontsize=12, color='r')
ax2.text(t[-1]-50, 0.2, r"$r_I \equiv \gamma \left[ {\cal R}_t - 1 \right]$", fontsize=15, bbox=dict(facecolor='red', alpha=0.2))
ax2.text(t[-1]-50, 0.1, r"$\frac{ dI}{dt} = r_I \, I $", fontsize=15, bbox=dict(facecolor='red', alpha=0.2))
ax2.set_ylabel('rI (temporal growth rate)', fontsize=12)
ax2.set_xlabel('Time[days]',fontsize=12)
ax2.set_ylim(-0.2,0.5)
# Estimations of End of Epidemic
rI_diff = growth_rates
ids_less_0 = np.nonzero(rI_diff < 0)
rI_crossing = ids_less_1[0][1]
ax2.plot(rI_crossing, 0,'ro', markersize=12)
ax2.text(rI_crossing-10, 0-0.04,str(rI_crossing), fontsize=10, color="r")
fig.set_size_inches(27.5/2, 12.5/2, forward=True)
plt.savefig('./figures/vanilla/vanillaSIR_growthRates_%i.png'%sim_num, bbox_inches='tight')
plt.savefig('./figures/vanilla/vanillaSIR_growthRates_%i.pdf'%sim_num, bbox_inches='tight')
#############################################################
######## Dependence of R0 on Final Epidemic Behavior ########
#############################################################
# Final epidemic size (analytic)
# r0_vals = np.linspace(1,5,100)
# init_guess = 0.0001
# Sinf_N = []
# Sinf_S0 = []
# for ii in range(len(r0_vals)):
# r0_test = r0_vals[ii]
# Sinf_N.append(fsolve(epi_size, init_guess))
# Sinf_S0.append(1 - Sinf_N[ii])
# # Plots
# fig0, ax0 = plt.subplots()
# ax0.plot(r0_vals, Sinf_S0, 'r', lw=2, label='Susceptible')
# ax0.set_ylabel('$1 - S_{\infty}/N$ (percentage of population infected)', fontsize=12)
# ax0.set_xlabel('$R_0$', fontsize=12)
# # Current estimate of Covid R0
# plt.title('Final Size of Epidemic Dependence on $R_0$ estimate',fontsize=15)
# ax0.plot(r0, One_SinfN, 'ko', markersize=5, lw=2)
# # Plot mean
# txt = 'Covid R0({r0:3.3f})'
# ax0.text(r0 - 0.45, One_SinfN + 0.05,txt.format(r0=r0_test), fontsize=10)
# plt.plot([r0]*10,np.linspace(0,One_SinfN,10), color='black')
# txt = "{Sinf:3.3f} Infected"
# ax0.text(1.1, One_SinfN - 0.025,txt.format(Sinf=One_SinfN[0]), fontsize=8)
# plt.plot(np.linspace(1,[r0],10), [One_SinfN]*10, color='black')
# ax0.text(4, 0.75, r"${\cal R}_0 \equiv \frac{ \beta } {\gamma}$", fontsize=15, bbox=dict(facecolor='red', alpha=0.15))
# fig0.set_size_inches(18.5/2, 12.5/2, forward=True)
# plt.savefig('./figures/vanilla/armedSIR_finalSize_%i.png'%sim_num, bbox_inches='tight')
# plt.savefig('./figures/vanilla/armedSIR_finalSize_%i.pdf'%sim_num, bbox_inches='tight')
plt.show()
| txt_title = r"Peak infected: {peak_inf:5.0f} by day {peak_days:2.0f} from March 21"
txt_title2 = r"Total Cases: {peak_total:5.0f} by day {peak_days:2.0f} from March 21" | conditional_block |
ex1a_sim_vanillaSEIR_model_old.py | import numpy as np
from scipy.integrate import odeint
from scipy.integrate import solve_ivp
from scipy.optimize import fsolve
from scipy import stats
import matplotlib.pyplot as plt
from matplotlib import rc
# Importing models and plotting functions
from epimodels.seir import *
rc('font',**{'family':'serif','serif':['Times']})
rc('text', usetex=True)
# Select simulation number
sim_num = 0
############################################
######## Parameters for Simulation ########
############################################
# Simulation for a toy problem for implementation comparison
if sim_num < 2:
# Simulation parameters
S0 = 9990
I0 = 1
R0 = 0
E0 = 9
N = S0 + I0 + R0 + E0
days = 100
# Model parameters
contact_rate = 10 # number of contacts per day
transmission_probability = 0.07 # transmission probability
gamma_inv = 5 # infectious period
sigma_inv = 2 # latent period
# Derived Model parameters and Control variable
beta = contact_rate * transmission_probability
gamma = 1 / gamma_inv
sigma = 1 / sigma_inv
r0 = beta / gamma
if sim_num == 1:
D0 = 0
m = 0.0043 # mortality rate
N = S0 + I0 + R0 + E0 + D0
# Values used for the indian armed forces model
# Initial values from March 21st for India test-case
if sim_num == 2:
# Values used for the indian armed forces model
# Initial values from March 21st for India test-case
N = 1375987036 # Number from report
days = 356
gamma_inv = 7
sigma_inv = 5.1
m = 0.0043
r0 = 2.28
tau_q_inv = 14
# Initial values from March 21st "indian armed forces predictions"
R0 = 23
D0 = 5
Q0 = 249 # Q0 is 1% of total infectious; i.e. I0 + Q0 (as described in report)
# In the report table 1, they write number of Quarantined as SO rather than Q0
# Q0, is this a typo?
T0 = 334 # This is the total number of comfirmed cases for March 21st, not used it seems?
I0 = (1.01/0.01) * Q0 # Number of Infectuos as described in report
# The initial number of exposed E(0) is not defined in report, how are they computed?
contact_rate = 10 # number of contacts an individual has per day
E0 = (contact_rate - 1)*I0 # Estimated exposed based on contact rate and inital infected
# Derived Model parameters and
beta = r0 / gamma_inv
sigma = 1.0 / sigma_inv
gamma = 1.0 / gamma_inv
tau_q = 1.0 /tau_q_inv
# Control variable: percentage quarantined
q = 0.01
print('***** Hyper-parameters *****')
print('N=',N,'days=',days, 'r0=',r0, 'gamma_inv (days) = ',gamma_inv)
print('***** Model-parameters *****')
print('beta=',beta,'gamma=', gamma, 'sigma', sigma)
######################################
######## Simulation Functions ########
######################################
# Equation to estimate final epidemic size (infected)
def epi_size(x):
|
###################################################
######## SEIR Model simulation Simulation ########
###################################################
if sim_num == 0:
''' Compartment structure of armed forces SEIR model
N = S + E + I + R
'''
# Initial conditions vector
S0 = N - E0 - I0 - R0
y0 = S0, E0, I0, R0
print("S0=",S0, "E0=",E0, "I0=",I0, "R0=",R0)
# Simulation Options
seir_type = 0 # SEIR no deaths
solver_type = 1 # ivp - LSODA
else:
''' Compartment structure of armed forces SEIR model with deaths
N = S + E + I + R + D
'''
S0 = N - E0 - I0 - R0 - D0
y0 = S0, E0, I0, R0, D0
print("S0=",S0, "E0=",E0, "I0=",I0, "R0=",R0, "D0", D0)
# Simulation Options
seir_type = 1 # SEIR with deaths
solver_type = 1 # ivp - LSODA
# Simulate ODE equations
SEIRparams = N, beta, gamma, sigma
sol_ode_timeseries = simulate_seirModel(seir_type, SEIRparams, solver_type, y0, N, days, 1)
# Unpack timseries
if sim_num == 0:
t = sol_ode_timeseries[0]
S = sol_ode_timeseries[1]
E = sol_ode_timeseries[2]
I = sol_ode_timeseries[3]
R = sol_ode_timeseries[4]
else:
t = sol_ode_timeseries[0]
S = sol_ode_timeseries[1]
E = sol_ode_timeseries[2]
I = sol_ode_timeseries[3]
Re = sol_ode_timeseries[4]
D = sol_ode_timeseries[5]
R = Re + D
# Accumulated Total Cases
T = I + R
print("t=", t[-1])
print("ST=", S[-1])
print("ET=", E[-1])
print("IT=", I[-1])
print("RT=", R[-1])
print("TT=", T[-1])
if sim_num > 0:
print("DT=", D[-1])
print("ReT=",Re[-1])
# Estimated Final epidemic size (analytic) not-dependent on simulation
init_guess = 0.0001
r0_test = r0
SinfN = fsolve(epi_size, init_guess)
One_SinfN = 1 - SinfN
print('***** Final Epidemic Size *****')
print('r0 = ', r0_test, '1 - Sinf/S0 = ', One_SinfN[0])
print('***** Results *****')
peak_inf_idx = np.argmax(I)
peak_inf = I[peak_inf_idx]
print('Peak Instant. Infected = ', peak_inf,'by day=', peak_inf_idx)
peak_total_inf = T[peak_inf_idx]
print('Total Cases when Peak = ', peak_total_inf,'by day=', peak_inf_idx)
#####################################################################
######## Plots Simulation with point estimates of parameters ########
#####################################################################
# Plot the data on three separate curves for S(t), I(t) and R(t)
fig, ax1 = plt.subplots()
if sim_num > 0:
txt_title_sim = r"COVID-19 Vanilla SEIR Model Dynamics (N={N:10.0f},$R_0$={R0:1.3f}, $\beta$={beta:1.3f}, 1/$\gamma$={gamma_inv:1.3f}, 1/$\sigma$={sigma_inv:1.3f}, m={m:1.3f})"
fig.suptitle(txt_title_sim.format(N=N, R0=r0, beta= beta, gamma_inv = gamma_inv, sigma_inv = sigma_inv, m=m),fontsize=15)
else:
txt_title_sim = r"COVID-19 Vanilla SEIR Model Dynamics (N={N:10.0f},$R_0$={R0:1.3f}, $\beta$={beta:1.3f}, 1/$\gamma$={gamma_inv:1.3f}, 1/$\sigma$={sigma_inv:1.3f})"
fig.suptitle(txt_title_sim.format(N=N, R0=r0, beta= beta, gamma_inv = gamma_inv, sigma_inv = sigma_inv),fontsize=15)
# Variable evolution
ax1.plot(t, S/N, 'k', lw=2, label='Susceptible')
ax1.plot(t, E/N, 'm', lw=2, label='Exposed')
ax1.plot(t, I/N, 'r', lw=2, label='Infected')
ax1.plot(t, T/N, 'y', lw=2, label='Total Cases')
if sim_num == 0:
ax1.plot(t, R/N, 'g--', lw=1, label='Recovered')
else:
ax1.plot(t, Re/N, 'g--', lw=1, label='Recovered')
ax1.plot(t, D/N, 'b--', lw=1, label='Dead')
# Plot Final Epidemic Size
ax1.plot(t, One_SinfN*np.ones(len(t)), 'm--')
txt1 = "{per:2.2f} infected"
ax1.text(t[0], One_SinfN - 0.05, txt1.format(per=One_SinfN[0]), fontsize=12, color='m')
# Plot peak points
ax1.plot(peak_inf_idx, peak_inf/N,'ro', markersize=8)
ax1.plot(peak_inf_idx, peak_total_inf/N,'ro', markersize=8)
if sim_num < 2:
txt_title = r"Peak infected: {peak_inf:5.0f} by day {peak_days:2.0f}"
txt_title2 = r"Total Cases: {peak_total:5.0f} by day {peak_days:2.0f}"
else:
txt_title = r"Peak infected: {peak_inf:5.0f} by day {peak_days:2.0f} from March 21"
txt_title2 = r"Total Cases: {peak_total:5.0f} by day {peak_days:2.0f} from March 21"
ax1.text(peak_inf_idx+10, peak_inf/N, txt_title.format(peak_inf=peak_inf, peak_days= peak_inf_idx), fontsize=12, color="r")
ax1.text(peak_inf_idx+10, peak_total_inf/N, txt_title2.format(peak_total=peak_total_inf, peak_days= peak_inf_idx), fontsize=12, color="r")
# Making things beautiful
ax1.set_xlabel('Time /days', fontsize=12)
ax1.set_ylabel('Percentage of Population', fontsize=12)
ax1.yaxis.set_tick_params(length=0)
ax1.xaxis.set_tick_params(length=0)
ax1.grid(b=True, which='major', c='w', lw=2, ls='-')
legend = ax1.legend()
legend.get_frame().set_alpha(0.5)
for spine in ('top', 'right', 'bottom', 'left'):
ax1.spines[spine].set_visible(True)
fig.subplots_adjust(left=.12, bottom=.14, right=.93, top=0.93)
fig.set_size_inches(20.5/2, 14.5/2, forward=True)
plt.savefig('./figures/vanilla/vanillaSEIR_timeEvolution_%i.png'%sim_num, bbox_inches='tight')
plt.savefig('./figures/vanilla/vanillaSEIR_timeEvolution_%i.pdf'%sim_num, bbox_inches='tight')
#################################################################
######## Plots Simulation with reproductive/growth rates ########
#################################################################
do_growth = 1
if do_growth:
# Analytic growth rate
effective_Rt = r0 * (S/N)
growth_rates = gamma * (effective_Rt - 1)
####### Plots for Growth Rates #######
fig, (ax1, ax2) = plt.subplots(1,2)
if sim_num > 0:
fig.suptitle(txt_title_sim.format(N=N, R0=r0, beta= beta, gamma_inv = gamma_inv, sigma_inv = sigma_inv, m=m),fontsize=15)
else:
fig.suptitle(txt_title_sim.format(N=N, R0=r0, beta= beta, gamma_inv = gamma_inv, sigma_inv = sigma_inv),fontsize=15)
# Plot of Reproductive rate (number)
ax1.plot(t, effective_Rt, 'k', lw=2, label='Rt (Effective Reproductive Rate)')
ax1.text(t[0] + 0.02, effective_Rt[0] - 0.15,r'${\cal R}_t$', fontsize=10)
ax1.plot(t, 1*np.ones(len(t)), 'r-')
txt1 = "Critical (Rt={per:2.2f})"
ax1.text(t[-1]-50, 1 + 0.01, txt1.format(per=1), fontsize=12, color='r')
ax1.text(t[-1]-50,2.5, r"${\cal R}_t \equiv \left( \frac{S (t) }{N (t) } \right) {\cal R}_0$", fontsize=15, bbox=dict(facecolor='red', alpha=0.2))
# Estimations of End of Epidemic
effRT_diff = effective_Rt - 1
ids_less_1 = np.nonzero(effRT_diff < 0)
effRT_crossing = ids_less_1[0][1]
ax1.plot(effRT_crossing, 1,'ro', markersize=12)
ax1.text(effRT_crossing-10, 1-0.2,str(effRT_crossing), fontsize=10, color="r")
ax1.set_ylabel('Rt (Effective Reproductive Rate)', fontsize=12)
ax1.set_xlabel('Time[days]', fontsize=12)
ax1.set_ylim(0,4)
# Plot of temporal growth rate
ax2.plot(t, growth_rates, 'k', lw=2, label='rI (temporal growth rate)')
ax2.text(t[0] + 0.02, growth_rates[0] - 0.02,r'${r}_I(t)$', fontsize=10)
ax2.plot(t, 0*np.ones(len(t)), 'r-')
txt1 = r"Critical ($r_I$={per:2.2f})"
ax2.text(t[-1]-50, 0 + 0.01, txt1.format(per=0), fontsize=12, color='r')
ax2.text(t[-1]-50, 0.2, r"$r_I \equiv \gamma \left[ {\cal R}_t - 1 \right]$", fontsize=15, bbox=dict(facecolor='red', alpha=0.2))
ax2.text(t[-1]-50, 0.1, r"$\frac{ dI}{dt} = r_I \, I $", fontsize=15, bbox=dict(facecolor='red', alpha=0.2))
ax2.set_ylabel('rI (temporal growth rate)', fontsize=12)
ax2.set_xlabel('Time[days]',fontsize=12)
ax2.set_ylim(-0.2,0.5)
# Estimations of End of Epidemic
rI_diff = growth_rates
ids_less_0 = np.nonzero(rI_diff < 0)
rI_crossing = ids_less_1[0][1]
ax2.plot(rI_crossing, 0,'ro', markersize=12)
ax2.text(rI_crossing-10, 0-0.04,str(rI_crossing), fontsize=10, color="r")
fig.set_size_inches(27.5/2, 12.5/2, forward=True)
plt.savefig('./figures/vanilla/vanillaSIR_growthRates_%i.png'%sim_num, bbox_inches='tight')
plt.savefig('./figures/vanilla/vanillaSIR_growthRates_%i.pdf'%sim_num, bbox_inches='tight')
#############################################################
######## Dependence of R0 on Final Epidemic Behavior ########
#############################################################
# Final epidemic size (analytic)
# r0_vals = np.linspace(1,5,100)
# init_guess = 0.0001
# Sinf_N = []
# Sinf_S0 = []
# for ii in range(len(r0_vals)):
# r0_test = r0_vals[ii]
# Sinf_N.append(fsolve(epi_size, init_guess))
# Sinf_S0.append(1 - Sinf_N[ii])
# # Plots
# fig0, ax0 = plt.subplots()
# ax0.plot(r0_vals, Sinf_S0, 'r', lw=2, label='Susceptible')
# ax0.set_ylabel('$1 - S_{\infty}/N$ (percentage of population infected)', fontsize=12)
# ax0.set_xlabel('$R_0$', fontsize=12)
# # Current estimate of Covid R0
# plt.title('Final Size of Epidemic Dependence on $R_0$ estimate',fontsize=15)
# ax0.plot(r0, One_SinfN, 'ko', markersize=5, lw=2)
# # Plot mean
# txt = 'Covid R0({r0:3.3f})'
# ax0.text(r0 - 0.45, One_SinfN + 0.05,txt.format(r0=r0_test), fontsize=10)
# plt.plot([r0]*10,np.linspace(0,One_SinfN,10), color='black')
# txt = "{Sinf:3.3f} Infected"
# ax0.text(1.1, One_SinfN - 0.025,txt.format(Sinf=One_SinfN[0]), fontsize=8)
# plt.plot(np.linspace(1,[r0],10), [One_SinfN]*10, color='black')
# ax0.text(4, 0.75, r"${\cal R}_0 \equiv \frac{ \beta } {\gamma}$", fontsize=15, bbox=dict(facecolor='red', alpha=0.15))
# fig0.set_size_inches(18.5/2, 12.5/2, forward=True)
# plt.savefig('./figures/vanilla/armedSIR_finalSize_%i.png'%sim_num, bbox_inches='tight')
# plt.savefig('./figures/vanilla/armedSIR_finalSize_%i.pdf'%sim_num, bbox_inches='tight')
plt.show()
| return np.log(x) + r0_test*(1-x) | identifier_body |
clustering.py | #!/usr/bin/env python
# coding: utf-8
"""
Author: Mahsa Khalili
Date: 2021 April 15th
Purpose: This Python script prepare IMU data for terrain classification.
"""
import os
import glob
import pathlib
import random
import numpy as np
import pandas as pd
import pickle
import joblib
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import itertools
from scipy import linalg
import matplotlib as mpl
from matplotlib.lines import Line2D
from sklearn.preprocessing import StandardScaler
from sklearn import mixture
| # list of all maneuvers
maneuvers = ['Obstacles15', 'Obstacles35', 'RampA', 'StraightF', 'Turn90FR', 'Turn90FL', 'Turn180L', 'Turn180R']
# choose the feature subsets for clustering
featureSet_list = ['ALL', 'ALL_TORQUE', '2D_TORQUE', 'LR_TORQUE', 'LR_TORQUE_MEAN', '2D_TORQUE_MEAN']
dataset_to_import = 'featured_data' # choose dataset/datasets to import
featured_columns = ['AngVel_L', 'AngVel_R', 'Chair_LinVel', 'Chair_AngVel', 'Torque_L', 'Torque_R',
'Torque_sum', 'Torque_diff', 'Torque_L_roc', 'Torque_R_roc']
time_features = ['Mean', 'Std', 'Max', 'Min', 'RMS']
# clustering model parameters
clus_params = {'covar_types': 'full', 'n_components': 6, 'feat_list': 'ALL_TORQUE'}
# path to save labeled data and corresponding figures
CURR_PATH = os.path.abspath('.')
# Import processed data
dataset_paths = glob.glob(os.path.join(CURR_PATH, dataset_to_import, USER, 'WinSize' + str(WIN_SIZE), '*.csv'))
# create a color pallette
cmap = matplotlib.cm.get_cmap('tab10')
def import_func(path_):
""" function to import featured datasets"""
datasets_dic = {}
for dataset_path in path_:
# Parse labels from filenames
dataset_label = os.path.split(dataset_path)[1].split('.')[0]
# Read from csv to Pandas
dataset = pd.read_csv(dataset_path)
# insert dataset label to the dataframes
dataset.insert(0, 'trial', dataset_label)
dataset.insert(0, 'maneuver', dataset_label.split('_')[0])
# Datasets are stored in a dictionary
datasets_dic.update({dataset_label: dataset})
# list of imported maneuvers
dataset_names = list(datasets_dic.keys())
return datasets_dic, dataset_names
def prep_func(data_dic):
"""Prepare dataframes for clustering"""
df_all = pd.DataFrame(columns=datasets[dataset_labels[0]].columns.tolist())
# combine desired datasets into one dataframe
for label in dataset_labels:
df_all = pd.concat([df_all, data_dic[label]], ignore_index=True)
df_all_columns = df_all.copy() # keep a copy of the original dataframes before dropping the trial names
# dropping unused columns/features
for col in ['Time', 'trial', 'maneuver']:
if col in df_all.columns:
df_all = df_all.drop(columns=[col])
columns_all = df_all.columns.tolist()
columns_torque = [col for col in df_all.columns.tolist() if 'Torque' in col] # all torque data
# all torque features except for roc (mean/std/... & left/right/sum/diff)
columns_2d_torque = [col for col in df_all.columns.tolist()
if 'Torque_sum' in col or 'Torque_diff' in col and 'roc' not in col]
# all torque features of left and right only (mean/std/... & left/right)
columns_lr_torque = [col for col in df_all.columns.tolist()
if ('Torque_L' in col or 'Torque_R' in col) and 'roc' not in col]
columns_lr_torque_mean = ['Mean Torque_L', 'Mean Torque_R'] # mean torque left and right only
columns_2d_torque_mean = ['Mean Torque_sum', 'Mean Torque_diff'] # mean torque left and right only
# dictionary of list of feature subsets to be used for dimension_reduction or clustering
featureSet_dic = {'ALL': columns_all, 'ALL_TORQUE': columns_torque,
'2D_TORQUE': columns_2d_torque, '2D_TORQUE_MEAN': columns_2d_torque_mean,
'LR_TORQUE': columns_lr_torque, 'LR_TORQUE_MEAN': columns_lr_torque_mean}
# Standardize features by removing the mean and scaling to unit variance
scaler = StandardScaler()
feat_all_stand = scaler.fit_transform(df_all.values)
df_all_stand = pd.DataFrame(feat_all_stand, columns=data_columns) # normalized dataset
return df_all_stand, df_all_columns, featureSet_dic
def clus_func(df_all, n_components, feat_subset):
"""
function to cluster and evaluate the clustering performance
input: dataframe consisting of different maneuvers to be clustered, feature sets to be used for clustering,
and the clustering model
output: labeled dataframe and three performance measures
"""
df = df_all[featureSet_dic[feat_subset]].copy()
X = df.values
# # Fit a Gaussian mixture with EM
# gmm_model = mixture.GaussianMixture(n_components=n_components,
# covariance_type=cv_type,
# random_state=1,
# n_init=10)
# gmm_model = gmm_model.fit(X)
model_path = os.path.join(CURR_PATH, 'clustering_model') # create directiry for the current time
model_name = os.path.join(model_path, 'gmm.joblib')
gmm_model = joblib.load(model_name)
# predic labels & probabilities
labels = gmm_model.predict(X)
labels_prob = gmm_model.predict_proba(X)
# adding all droped features (for plotting purposes) of the standardized dataframe
added_feat = [feat for feat in data_columns if feat not in df.columns]
df[added_feat] = df_all_stand[added_feat].copy()
df = df[data_columns]
# adding the labels to the dataframe
df.insert(0, 'Clus_label', labels)
for n in range(n_components):
df['Prob_L'+str(n)] = labels_prob[:, n]
return gmm_model, df # export all gmm models and a dictionary of all labeled datasets
def labeling_func(df_clus):
""" add all cluster labels to the original dataframe """
df_all_labeled = df_all_columns.copy()
df_all_labeled['Clus_label'] = df_clus['Clus_label'].copy()
df_all_labeled['Clus_label']= df_all_labeled['Clus_label'].astype(int)
for i in range(0, clus_params['n_components']):
df_all_labeled['Prob_L'+str(i)] = df_clus['Prob_L'+str(i)].copy()
return df_all_labeled
def plt_gm_clusters(df_all, model):
"""this function gets unlabeled scaled dataframe and predict labels + plotting cluster ellips"""
# color_iter = itertools.cycle([cmap(i) for i in range(cmap.N)])
color_iter = itertools.cycle([cmap(i) for i in range(clus_params['n_components'])])
df = df_all[featureSet_dic[clus_params['feat_list']]].copy()
XX = df.values
Y_ = model.predict(XX) # predict labels for each model
plt.figure(figsize=(8, 6))
splot = plt.subplot(1, 1, 1)
for i, (mean, cov, color) in enumerate(zip(model.means_, model.covariances_, color_iter)):
if "MEAN" in clus_params['feat_list']:
v, w = linalg.eigh(cov)
else:
subset = [0, 5] # mean torque L & R
v, w = linalg.eigh(cov[np.ix_(subset, subset)])
mean = np.array([mean[0], mean[5]])
if not np.any(Y_ == i):
continue
if "MEAN" in clus_params['feat_list']:
plt.scatter(XX[Y_ == i, 0], XX[Y_ == i, 1], color=color, s=60)
else:
plt.scatter(XX[Y_ == i, 0], XX[Y_ == i, 5], color=color, s=60)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180. * angle / np.pi # convert to degrees
v = 2. * np.sqrt(2.) * np.sqrt(v)
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xticks(())
plt.yticks(())
plt.title('Subject: {}, feature set: {}'.format(USER, clus_params['feat_list']))
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
def range_dic_(df_):
"""
get the start index of each maneuver from the original dataframe
"""
range_dic = {}
for man in df_['maneuver']:
trial_indx = df_.index[df_['maneuver'] == man].tolist()
range_ = (min(trial_indx), max(trial_indx))
range_dic.update({man: range_})
return range_dic
def plt_ts_cluster(df_, features_to_plot):
"""
input: input original dataframe (with maneuver columns), clustered dataframe, number of clusteres,
and selected features to plot
output: plotting clustered time series data with different colors
"""
df_clus = df_.copy()
plt_num = 2
fig, axs = plt.subplots(plt_num, 1, figsize=(15, 12), constrained_layout=True)
axs = axs.ravel()
states = df_clus['Clus_label']
clusterNum = clus_params['n_components']
color_dict = {i:cmap(i) for i in range(clusterNum)}
color_array = [color_dict[i] for i in states]
for i, feature in enumerate(features_to_plot):
axs[i].scatter(df_clus.index, df_clus[feature], c=color_array, s=10)
axs[i].set_xlim([-1, len(df_clus)+1])
axs[i].tick_params(direction='out', labelsize=15)
axs[i].yaxis.grid(True)
if 'Torque' in feature:
axs[i].set_ylabel(feature + ' (Nm)', fontsize=15)
elif 'Lin' in feature:
axs[i].set_ylabel(feature + ' (m/s)', fontsize=15)
elif 'Ang' in feature:
axs[i].set_ylabel(feature + ' (rad/s)', fontsize=15)
fig.suptitle(clus_params['feat_list'], fontsize=16)
range_dic = range_dic_(df_clus)
for trial, range_ in range_dic.items():
axs[0].text(range_[0], axs[0].get_ylim()[1]+0.2, trial, fontsize=15, rotation=45)
for i in range(plt_num):
axs[i].axvline(x=range_[0], linestyle='--', linewidth=0.5)
plt.show()
# function to plot clusters in time series data
def plt_ts_cluster_subset(df_, features_to_plot, man_list=maneuvers):
"""
input: input original dataframe (with maneuver columns), clustered dataframe, number of clusteres,
and selected features to plot
output: plotting clustered time series data with different colors
"""
clusterNum = clus_params['n_components']
color_dict = {i: cmap(i) for i in range(clusterNum)}
figsize = (15, 15)
legend_size = 15
if len(man_list) == 1:
figsize = (15, 8)
fig, axs = plt.subplots(len(man_list), 1, figsize=figsize, constrained_layout=True)
fig.suptitle(clus_params['feat_list'], fontsize=16)
if len(man_list) != 1:
axs = axs.ravel()
for i, wheelchair_man in enumerate(man_list):
df_clus = df_.loc[df_['maneuver'] == wheelchair_man].copy()
df_clus = df_clus.reset_index()
states = df_clus['Clus_label']
color_array = [color_dict[i] for i in states]
if len(man_list) != 1:
axs[i].scatter(df_clus.index, df_clus[features_to_plot[0]], c=color_array, s=16)
axs[i].scatter(df_clus.index, df_clus[features_to_plot[1]], c=color_array, s=16, alpha=0.7, marker='>')
axs[i].tick_params(direction='out', labelsize=15)
axs[i].set_ylabel('Torque (Nm)', fontsize=15)
axs[i].set_title(wheelchair_man)
axs[i].yaxis.grid(True)
axs[i].set_xlim([-1, len(df_clus)+1])
legend_elements = [Line2D([0], [0], marker='>', color='w', label='Right',
markerfacecolor='k', markersize=15),
Line2D([0], [0], marker='o', color='w', label='Left',
markerfacecolor='k', markersize=15)]
axs[i].legend(handles=legend_elements, fontsize=legend_size)
else:
axs.scatter(df_clus.index, df_clus[features_to_plot[0]], c=color_array, s=16)
axs.scatter(df_clus.index, df_clus[features_to_plot[1]], c=color_array, s=16, alpha=0.7, marker='>')
axs.tick_params(direction='out', labelsize=15)
axs.set_ylabel('Torque (Nm)', fontsize=15)
axs.set_title(wheelchair_man)
axs.yaxis.grid(True)
axs.set_xlim([-1, len(df_clus)+1])
legend_elements = [Line2D([0], [0], marker='>', color='w', label='Right',
markerfacecolor='k', markersize=15),
Line2D([0], [0], marker='o', color='w', label='Left',
markerfacecolor='k', markersize=15)]
axs.legend(handles=legend_elements, fontsize=legend_size)
plt.show()
datasets, dataset_labels = import_func(dataset_paths)
data_columns = [col for col in datasets[dataset_labels[0]].columns if col != 'trial' and
col != 'Time' and col != 'maneuver'] # get columns/features of the imported datasets
df_all_stand, df_all_columns, featureSet_dic = prep_func(datasets)
# run the cluster function or import trained model
models, df_clus = clus_func(df_all_stand.copy(),
clus_params['n_components'],
clus_params['feat_list'])
df_labeled = labeling_func(df_clus) # adding labels to all datastes
plt_gm_clusters(df_all_stand.copy(), models) # plotting cluster over torque left/right
plt_ts_cluster(df_labeled, ['Mean Torque_L', 'Mean Torque_R']) # plotting all labeled data in a time series format
# plotting a subset of selected labeled maneuvers in a time series format
plt_ts_cluster_subset(df_labeled, ['Mean Torque_L', 'Mean Torque_R'], ['StraightF'])
# if SAVE_DATA:
processed_path = os.path.join(CURR_PATH, 'labeled_data')
pathlib.Path(processed_path).mkdir(parents=True, exist_ok=True)
filename = "gmm_labels.csv"
filename = os.path.join(processed_path, filename)
df_labeled.to_csv(filename, index=False)
print("SUCCESSFULLY EXECUTED!!!!") |
# DEFINITIONS
USER = 'Mahsa' # ['Mahsa', 'Jaimie'] # participant name
WIN_SIZE = 32 # window size
| random_line_split |
clustering.py | #!/usr/bin/env python
# coding: utf-8
"""
Author: Mahsa Khalili
Date: 2021 April 15th
Purpose: This Python script prepare IMU data for terrain classification.
"""
import os
import glob
import pathlib
import random
import numpy as np
import pandas as pd
import pickle
import joblib
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import itertools
from scipy import linalg
import matplotlib as mpl
from matplotlib.lines import Line2D
from sklearn.preprocessing import StandardScaler
from sklearn import mixture
# DEFINITIONS
USER = 'Mahsa' # ['Mahsa', 'Jaimie'] # participant name
WIN_SIZE = 32 # window size
# list of all maneuvers
maneuvers = ['Obstacles15', 'Obstacles35', 'RampA', 'StraightF', 'Turn90FR', 'Turn90FL', 'Turn180L', 'Turn180R']
# choose the feature subsets for clustering
featureSet_list = ['ALL', 'ALL_TORQUE', '2D_TORQUE', 'LR_TORQUE', 'LR_TORQUE_MEAN', '2D_TORQUE_MEAN']
dataset_to_import = 'featured_data' # choose dataset/datasets to import
featured_columns = ['AngVel_L', 'AngVel_R', 'Chair_LinVel', 'Chair_AngVel', 'Torque_L', 'Torque_R',
'Torque_sum', 'Torque_diff', 'Torque_L_roc', 'Torque_R_roc']
time_features = ['Mean', 'Std', 'Max', 'Min', 'RMS']
# clustering model parameters
clus_params = {'covar_types': 'full', 'n_components': 6, 'feat_list': 'ALL_TORQUE'}
# path to save labeled data and corresponding figures
CURR_PATH = os.path.abspath('.')
# Import processed data
dataset_paths = glob.glob(os.path.join(CURR_PATH, dataset_to_import, USER, 'WinSize' + str(WIN_SIZE), '*.csv'))
# create a color pallette
cmap = matplotlib.cm.get_cmap('tab10')
def import_func(path_):
""" function to import featured datasets"""
datasets_dic = {}
for dataset_path in path_:
# Parse labels from filenames
dataset_label = os.path.split(dataset_path)[1].split('.')[0]
# Read from csv to Pandas
dataset = pd.read_csv(dataset_path)
# insert dataset label to the dataframes
dataset.insert(0, 'trial', dataset_label)
dataset.insert(0, 'maneuver', dataset_label.split('_')[0])
# Datasets are stored in a dictionary
datasets_dic.update({dataset_label: dataset})
# list of imported maneuvers
dataset_names = list(datasets_dic.keys())
return datasets_dic, dataset_names
def prep_func(data_dic):
"""Prepare dataframes for clustering"""
df_all = pd.DataFrame(columns=datasets[dataset_labels[0]].columns.tolist())
# combine desired datasets into one dataframe
for label in dataset_labels:
df_all = pd.concat([df_all, data_dic[label]], ignore_index=True)
df_all_columns = df_all.copy() # keep a copy of the original dataframes before dropping the trial names
# dropping unused columns/features
for col in ['Time', 'trial', 'maneuver']:
if col in df_all.columns:
df_all = df_all.drop(columns=[col])
columns_all = df_all.columns.tolist()
columns_torque = [col for col in df_all.columns.tolist() if 'Torque' in col] # all torque data
# all torque features except for roc (mean/std/... & left/right/sum/diff)
columns_2d_torque = [col for col in df_all.columns.tolist()
if 'Torque_sum' in col or 'Torque_diff' in col and 'roc' not in col]
# all torque features of left and right only (mean/std/... & left/right)
columns_lr_torque = [col for col in df_all.columns.tolist()
if ('Torque_L' in col or 'Torque_R' in col) and 'roc' not in col]
columns_lr_torque_mean = ['Mean Torque_L', 'Mean Torque_R'] # mean torque left and right only
columns_2d_torque_mean = ['Mean Torque_sum', 'Mean Torque_diff'] # mean torque left and right only
# dictionary of list of feature subsets to be used for dimension_reduction or clustering
featureSet_dic = {'ALL': columns_all, 'ALL_TORQUE': columns_torque,
'2D_TORQUE': columns_2d_torque, '2D_TORQUE_MEAN': columns_2d_torque_mean,
'LR_TORQUE': columns_lr_torque, 'LR_TORQUE_MEAN': columns_lr_torque_mean}
# Standardize features by removing the mean and scaling to unit variance
scaler = StandardScaler()
feat_all_stand = scaler.fit_transform(df_all.values)
df_all_stand = pd.DataFrame(feat_all_stand, columns=data_columns) # normalized dataset
return df_all_stand, df_all_columns, featureSet_dic
def clus_func(df_all, n_components, feat_subset):
"""
function to cluster and evaluate the clustering performance
input: dataframe consisting of different maneuvers to be clustered, feature sets to be used for clustering,
and the clustering model
output: labeled dataframe and three performance measures
"""
df = df_all[featureSet_dic[feat_subset]].copy()
X = df.values
# # Fit a Gaussian mixture with EM
# gmm_model = mixture.GaussianMixture(n_components=n_components,
# covariance_type=cv_type,
# random_state=1,
# n_init=10)
# gmm_model = gmm_model.fit(X)
model_path = os.path.join(CURR_PATH, 'clustering_model') # create directiry for the current time
model_name = os.path.join(model_path, 'gmm.joblib')
gmm_model = joblib.load(model_name)
# predic labels & probabilities
labels = gmm_model.predict(X)
labels_prob = gmm_model.predict_proba(X)
# adding all droped features (for plotting purposes) of the standardized dataframe
added_feat = [feat for feat in data_columns if feat not in df.columns]
df[added_feat] = df_all_stand[added_feat].copy()
df = df[data_columns]
# adding the labels to the dataframe
df.insert(0, 'Clus_label', labels)
for n in range(n_components):
df['Prob_L'+str(n)] = labels_prob[:, n]
return gmm_model, df # export all gmm models and a dictionary of all labeled datasets
def | (df_clus):
""" add all cluster labels to the original dataframe """
df_all_labeled = df_all_columns.copy()
df_all_labeled['Clus_label'] = df_clus['Clus_label'].copy()
df_all_labeled['Clus_label']= df_all_labeled['Clus_label'].astype(int)
for i in range(0, clus_params['n_components']):
df_all_labeled['Prob_L'+str(i)] = df_clus['Prob_L'+str(i)].copy()
return df_all_labeled
def plt_gm_clusters(df_all, model):
"""this function gets unlabeled scaled dataframe and predict labels + plotting cluster ellips"""
# color_iter = itertools.cycle([cmap(i) for i in range(cmap.N)])
color_iter = itertools.cycle([cmap(i) for i in range(clus_params['n_components'])])
df = df_all[featureSet_dic[clus_params['feat_list']]].copy()
XX = df.values
Y_ = model.predict(XX) # predict labels for each model
plt.figure(figsize=(8, 6))
splot = plt.subplot(1, 1, 1)
for i, (mean, cov, color) in enumerate(zip(model.means_, model.covariances_, color_iter)):
if "MEAN" in clus_params['feat_list']:
v, w = linalg.eigh(cov)
else:
subset = [0, 5] # mean torque L & R
v, w = linalg.eigh(cov[np.ix_(subset, subset)])
mean = np.array([mean[0], mean[5]])
if not np.any(Y_ == i):
continue
if "MEAN" in clus_params['feat_list']:
plt.scatter(XX[Y_ == i, 0], XX[Y_ == i, 1], color=color, s=60)
else:
plt.scatter(XX[Y_ == i, 0], XX[Y_ == i, 5], color=color, s=60)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180. * angle / np.pi # convert to degrees
v = 2. * np.sqrt(2.) * np.sqrt(v)
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xticks(())
plt.yticks(())
plt.title('Subject: {}, feature set: {}'.format(USER, clus_params['feat_list']))
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
def range_dic_(df_):
"""
get the start index of each maneuver from the original dataframe
"""
range_dic = {}
for man in df_['maneuver']:
trial_indx = df_.index[df_['maneuver'] == man].tolist()
range_ = (min(trial_indx), max(trial_indx))
range_dic.update({man: range_})
return range_dic
def plt_ts_cluster(df_, features_to_plot):
"""
input: input original dataframe (with maneuver columns), clustered dataframe, number of clusteres,
and selected features to plot
output: plotting clustered time series data with different colors
"""
df_clus = df_.copy()
plt_num = 2
fig, axs = plt.subplots(plt_num, 1, figsize=(15, 12), constrained_layout=True)
axs = axs.ravel()
states = df_clus['Clus_label']
clusterNum = clus_params['n_components']
color_dict = {i:cmap(i) for i in range(clusterNum)}
color_array = [color_dict[i] for i in states]
for i, feature in enumerate(features_to_plot):
axs[i].scatter(df_clus.index, df_clus[feature], c=color_array, s=10)
axs[i].set_xlim([-1, len(df_clus)+1])
axs[i].tick_params(direction='out', labelsize=15)
axs[i].yaxis.grid(True)
if 'Torque' in feature:
axs[i].set_ylabel(feature + ' (Nm)', fontsize=15)
elif 'Lin' in feature:
axs[i].set_ylabel(feature + ' (m/s)', fontsize=15)
elif 'Ang' in feature:
axs[i].set_ylabel(feature + ' (rad/s)', fontsize=15)
fig.suptitle(clus_params['feat_list'], fontsize=16)
range_dic = range_dic_(df_clus)
for trial, range_ in range_dic.items():
axs[0].text(range_[0], axs[0].get_ylim()[1]+0.2, trial, fontsize=15, rotation=45)
for i in range(plt_num):
axs[i].axvline(x=range_[0], linestyle='--', linewidth=0.5)
plt.show()
# function to plot clusters in time series data
def plt_ts_cluster_subset(df_, features_to_plot, man_list=maneuvers):
"""
input: input original dataframe (with maneuver columns), clustered dataframe, number of clusteres,
and selected features to plot
output: plotting clustered time series data with different colors
"""
clusterNum = clus_params['n_components']
color_dict = {i: cmap(i) for i in range(clusterNum)}
figsize = (15, 15)
legend_size = 15
if len(man_list) == 1:
figsize = (15, 8)
fig, axs = plt.subplots(len(man_list), 1, figsize=figsize, constrained_layout=True)
fig.suptitle(clus_params['feat_list'], fontsize=16)
if len(man_list) != 1:
axs = axs.ravel()
for i, wheelchair_man in enumerate(man_list):
df_clus = df_.loc[df_['maneuver'] == wheelchair_man].copy()
df_clus = df_clus.reset_index()
states = df_clus['Clus_label']
color_array = [color_dict[i] for i in states]
if len(man_list) != 1:
axs[i].scatter(df_clus.index, df_clus[features_to_plot[0]], c=color_array, s=16)
axs[i].scatter(df_clus.index, df_clus[features_to_plot[1]], c=color_array, s=16, alpha=0.7, marker='>')
axs[i].tick_params(direction='out', labelsize=15)
axs[i].set_ylabel('Torque (Nm)', fontsize=15)
axs[i].set_title(wheelchair_man)
axs[i].yaxis.grid(True)
axs[i].set_xlim([-1, len(df_clus)+1])
legend_elements = [Line2D([0], [0], marker='>', color='w', label='Right',
markerfacecolor='k', markersize=15),
Line2D([0], [0], marker='o', color='w', label='Left',
markerfacecolor='k', markersize=15)]
axs[i].legend(handles=legend_elements, fontsize=legend_size)
else:
axs.scatter(df_clus.index, df_clus[features_to_plot[0]], c=color_array, s=16)
axs.scatter(df_clus.index, df_clus[features_to_plot[1]], c=color_array, s=16, alpha=0.7, marker='>')
axs.tick_params(direction='out', labelsize=15)
axs.set_ylabel('Torque (Nm)', fontsize=15)
axs.set_title(wheelchair_man)
axs.yaxis.grid(True)
axs.set_xlim([-1, len(df_clus)+1])
legend_elements = [Line2D([0], [0], marker='>', color='w', label='Right',
markerfacecolor='k', markersize=15),
Line2D([0], [0], marker='o', color='w', label='Left',
markerfacecolor='k', markersize=15)]
axs.legend(handles=legend_elements, fontsize=legend_size)
plt.show()
datasets, dataset_labels = import_func(dataset_paths)
data_columns = [col for col in datasets[dataset_labels[0]].columns if col != 'trial' and
col != 'Time' and col != 'maneuver'] # get columns/features of the imported datasets
df_all_stand, df_all_columns, featureSet_dic = prep_func(datasets)
# run the cluster function or import trained model
models, df_clus = clus_func(df_all_stand.copy(),
clus_params['n_components'],
clus_params['feat_list'])
df_labeled = labeling_func(df_clus) # adding labels to all datastes
plt_gm_clusters(df_all_stand.copy(), models) # plotting cluster over torque left/right
plt_ts_cluster(df_labeled, ['Mean Torque_L', 'Mean Torque_R']) # plotting all labeled data in a time series format
# plotting a subset of selected labeled maneuvers in a time series format
plt_ts_cluster_subset(df_labeled, ['Mean Torque_L', 'Mean Torque_R'], ['StraightF'])
# if SAVE_DATA:
processed_path = os.path.join(CURR_PATH, 'labeled_data')
pathlib.Path(processed_path).mkdir(parents=True, exist_ok=True)
filename = "gmm_labels.csv"
filename = os.path.join(processed_path, filename)
df_labeled.to_csv(filename, index=False)
print("SUCCESSFULLY EXECUTED!!!!")
| labeling_func | identifier_name |
clustering.py | #!/usr/bin/env python
# coding: utf-8
"""
Author: Mahsa Khalili
Date: 2021 April 15th
Purpose: This Python script prepare IMU data for terrain classification.
"""
import os
import glob
import pathlib
import random
import numpy as np
import pandas as pd
import pickle
import joblib
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import itertools
from scipy import linalg
import matplotlib as mpl
from matplotlib.lines import Line2D
from sklearn.preprocessing import StandardScaler
from sklearn import mixture
# DEFINITIONS
USER = 'Mahsa' # ['Mahsa', 'Jaimie'] # participant name
WIN_SIZE = 32 # window size
# list of all maneuvers
maneuvers = ['Obstacles15', 'Obstacles35', 'RampA', 'StraightF', 'Turn90FR', 'Turn90FL', 'Turn180L', 'Turn180R']
# choose the feature subsets for clustering
featureSet_list = ['ALL', 'ALL_TORQUE', '2D_TORQUE', 'LR_TORQUE', 'LR_TORQUE_MEAN', '2D_TORQUE_MEAN']
dataset_to_import = 'featured_data' # choose dataset/datasets to import
featured_columns = ['AngVel_L', 'AngVel_R', 'Chair_LinVel', 'Chair_AngVel', 'Torque_L', 'Torque_R',
'Torque_sum', 'Torque_diff', 'Torque_L_roc', 'Torque_R_roc']
time_features = ['Mean', 'Std', 'Max', 'Min', 'RMS']
# clustering model parameters
clus_params = {'covar_types': 'full', 'n_components': 6, 'feat_list': 'ALL_TORQUE'}
# path to save labeled data and corresponding figures
CURR_PATH = os.path.abspath('.')
# Import processed data
dataset_paths = glob.glob(os.path.join(CURR_PATH, dataset_to_import, USER, 'WinSize' + str(WIN_SIZE), '*.csv'))
# create a color pallette
cmap = matplotlib.cm.get_cmap('tab10')
def import_func(path_):
""" function to import featured datasets"""
datasets_dic = {}
for dataset_path in path_:
# Parse labels from filenames
dataset_label = os.path.split(dataset_path)[1].split('.')[0]
# Read from csv to Pandas
dataset = pd.read_csv(dataset_path)
# insert dataset label to the dataframes
dataset.insert(0, 'trial', dataset_label)
dataset.insert(0, 'maneuver', dataset_label.split('_')[0])
# Datasets are stored in a dictionary
datasets_dic.update({dataset_label: dataset})
# list of imported maneuvers
dataset_names = list(datasets_dic.keys())
return datasets_dic, dataset_names
def prep_func(data_dic):
"""Prepare dataframes for clustering"""
df_all = pd.DataFrame(columns=datasets[dataset_labels[0]].columns.tolist())
# combine desired datasets into one dataframe
for label in dataset_labels:
df_all = pd.concat([df_all, data_dic[label]], ignore_index=True)
df_all_columns = df_all.copy() # keep a copy of the original dataframes before dropping the trial names
# dropping unused columns/features
for col in ['Time', 'trial', 'maneuver']:
if col in df_all.columns:
df_all = df_all.drop(columns=[col])
columns_all = df_all.columns.tolist()
columns_torque = [col for col in df_all.columns.tolist() if 'Torque' in col] # all torque data
# all torque features except for roc (mean/std/... & left/right/sum/diff)
columns_2d_torque = [col for col in df_all.columns.tolist()
if 'Torque_sum' in col or 'Torque_diff' in col and 'roc' not in col]
# all torque features of left and right only (mean/std/... & left/right)
columns_lr_torque = [col for col in df_all.columns.tolist()
if ('Torque_L' in col or 'Torque_R' in col) and 'roc' not in col]
columns_lr_torque_mean = ['Mean Torque_L', 'Mean Torque_R'] # mean torque left and right only
columns_2d_torque_mean = ['Mean Torque_sum', 'Mean Torque_diff'] # mean torque left and right only
# dictionary of list of feature subsets to be used for dimension_reduction or clustering
featureSet_dic = {'ALL': columns_all, 'ALL_TORQUE': columns_torque,
'2D_TORQUE': columns_2d_torque, '2D_TORQUE_MEAN': columns_2d_torque_mean,
'LR_TORQUE': columns_lr_torque, 'LR_TORQUE_MEAN': columns_lr_torque_mean}
# Standardize features by removing the mean and scaling to unit variance
scaler = StandardScaler()
feat_all_stand = scaler.fit_transform(df_all.values)
df_all_stand = pd.DataFrame(feat_all_stand, columns=data_columns) # normalized dataset
return df_all_stand, df_all_columns, featureSet_dic
def clus_func(df_all, n_components, feat_subset):
"""
function to cluster and evaluate the clustering performance
input: dataframe consisting of different maneuvers to be clustered, feature sets to be used for clustering,
and the clustering model
output: labeled dataframe and three performance measures
"""
df = df_all[featureSet_dic[feat_subset]].copy()
X = df.values
# # Fit a Gaussian mixture with EM
# gmm_model = mixture.GaussianMixture(n_components=n_components,
# covariance_type=cv_type,
# random_state=1,
# n_init=10)
# gmm_model = gmm_model.fit(X)
model_path = os.path.join(CURR_PATH, 'clustering_model') # create directiry for the current time
model_name = os.path.join(model_path, 'gmm.joblib')
gmm_model = joblib.load(model_name)
# predic labels & probabilities
labels = gmm_model.predict(X)
labels_prob = gmm_model.predict_proba(X)
# adding all droped features (for plotting purposes) of the standardized dataframe
added_feat = [feat for feat in data_columns if feat not in df.columns]
df[added_feat] = df_all_stand[added_feat].copy()
df = df[data_columns]
# adding the labels to the dataframe
df.insert(0, 'Clus_label', labels)
for n in range(n_components):
df['Prob_L'+str(n)] = labels_prob[:, n]
return gmm_model, df # export all gmm models and a dictionary of all labeled datasets
def labeling_func(df_clus):
""" add all cluster labels to the original dataframe """
df_all_labeled = df_all_columns.copy()
df_all_labeled['Clus_label'] = df_clus['Clus_label'].copy()
df_all_labeled['Clus_label']= df_all_labeled['Clus_label'].astype(int)
for i in range(0, clus_params['n_components']):
df_all_labeled['Prob_L'+str(i)] = df_clus['Prob_L'+str(i)].copy()
return df_all_labeled
def plt_gm_clusters(df_all, model):
"""this function gets unlabeled scaled dataframe and predict labels + plotting cluster ellips"""
# color_iter = itertools.cycle([cmap(i) for i in range(cmap.N)])
color_iter = itertools.cycle([cmap(i) for i in range(clus_params['n_components'])])
df = df_all[featureSet_dic[clus_params['feat_list']]].copy()
XX = df.values
Y_ = model.predict(XX) # predict labels for each model
plt.figure(figsize=(8, 6))
splot = plt.subplot(1, 1, 1)
for i, (mean, cov, color) in enumerate(zip(model.means_, model.covariances_, color_iter)):
if "MEAN" in clus_params['feat_list']:
v, w = linalg.eigh(cov)
else:
subset = [0, 5] # mean torque L & R
v, w = linalg.eigh(cov[np.ix_(subset, subset)])
mean = np.array([mean[0], mean[5]])
if not np.any(Y_ == i):
continue
if "MEAN" in clus_params['feat_list']:
plt.scatter(XX[Y_ == i, 0], XX[Y_ == i, 1], color=color, s=60)
else:
plt.scatter(XX[Y_ == i, 0], XX[Y_ == i, 5], color=color, s=60)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180. * angle / np.pi # convert to degrees
v = 2. * np.sqrt(2.) * np.sqrt(v)
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xticks(())
plt.yticks(())
plt.title('Subject: {}, feature set: {}'.format(USER, clus_params['feat_list']))
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
def range_dic_(df_):
"""
get the start index of each maneuver from the original dataframe
"""
range_dic = {}
for man in df_['maneuver']:
trial_indx = df_.index[df_['maneuver'] == man].tolist()
range_ = (min(trial_indx), max(trial_indx))
range_dic.update({man: range_})
return range_dic
def plt_ts_cluster(df_, features_to_plot):
"""
input: input original dataframe (with maneuver columns), clustered dataframe, number of clusteres,
and selected features to plot
output: plotting clustered time series data with different colors
"""
df_clus = df_.copy()
plt_num = 2
fig, axs = plt.subplots(plt_num, 1, figsize=(15, 12), constrained_layout=True)
axs = axs.ravel()
states = df_clus['Clus_label']
clusterNum = clus_params['n_components']
color_dict = {i:cmap(i) for i in range(clusterNum)}
color_array = [color_dict[i] for i in states]
for i, feature in enumerate(features_to_plot):
axs[i].scatter(df_clus.index, df_clus[feature], c=color_array, s=10)
axs[i].set_xlim([-1, len(df_clus)+1])
axs[i].tick_params(direction='out', labelsize=15)
axs[i].yaxis.grid(True)
if 'Torque' in feature:
axs[i].set_ylabel(feature + ' (Nm)', fontsize=15)
elif 'Lin' in feature:
axs[i].set_ylabel(feature + ' (m/s)', fontsize=15)
elif 'Ang' in feature:
axs[i].set_ylabel(feature + ' (rad/s)', fontsize=15)
fig.suptitle(clus_params['feat_list'], fontsize=16)
range_dic = range_dic_(df_clus)
for trial, range_ in range_dic.items():
axs[0].text(range_[0], axs[0].get_ylim()[1]+0.2, trial, fontsize=15, rotation=45)
for i in range(plt_num):
axs[i].axvline(x=range_[0], linestyle='--', linewidth=0.5)
plt.show()
# function to plot clusters in time series data
def plt_ts_cluster_subset(df_, features_to_plot, man_list=maneuvers):
|
datasets, dataset_labels = import_func(dataset_paths)
data_columns = [col for col in datasets[dataset_labels[0]].columns if col != 'trial' and
col != 'Time' and col != 'maneuver'] # get columns/features of the imported datasets
df_all_stand, df_all_columns, featureSet_dic = prep_func(datasets)
# run the cluster function or import trained model
models, df_clus = clus_func(df_all_stand.copy(),
clus_params['n_components'],
clus_params['feat_list'])
df_labeled = labeling_func(df_clus) # adding labels to all datastes
plt_gm_clusters(df_all_stand.copy(), models) # plotting cluster over torque left/right
plt_ts_cluster(df_labeled, ['Mean Torque_L', 'Mean Torque_R']) # plotting all labeled data in a time series format
# plotting a subset of selected labeled maneuvers in a time series format
plt_ts_cluster_subset(df_labeled, ['Mean Torque_L', 'Mean Torque_R'], ['StraightF'])
# if SAVE_DATA:
processed_path = os.path.join(CURR_PATH, 'labeled_data')
pathlib.Path(processed_path).mkdir(parents=True, exist_ok=True)
filename = "gmm_labels.csv"
filename = os.path.join(processed_path, filename)
df_labeled.to_csv(filename, index=False)
print("SUCCESSFULLY EXECUTED!!!!")
| """
input: input original dataframe (with maneuver columns), clustered dataframe, number of clusteres,
and selected features to plot
output: plotting clustered time series data with different colors
"""
clusterNum = clus_params['n_components']
color_dict = {i: cmap(i) for i in range(clusterNum)}
figsize = (15, 15)
legend_size = 15
if len(man_list) == 1:
figsize = (15, 8)
fig, axs = plt.subplots(len(man_list), 1, figsize=figsize, constrained_layout=True)
fig.suptitle(clus_params['feat_list'], fontsize=16)
if len(man_list) != 1:
axs = axs.ravel()
for i, wheelchair_man in enumerate(man_list):
df_clus = df_.loc[df_['maneuver'] == wheelchair_man].copy()
df_clus = df_clus.reset_index()
states = df_clus['Clus_label']
color_array = [color_dict[i] for i in states]
if len(man_list) != 1:
axs[i].scatter(df_clus.index, df_clus[features_to_plot[0]], c=color_array, s=16)
axs[i].scatter(df_clus.index, df_clus[features_to_plot[1]], c=color_array, s=16, alpha=0.7, marker='>')
axs[i].tick_params(direction='out', labelsize=15)
axs[i].set_ylabel('Torque (Nm)', fontsize=15)
axs[i].set_title(wheelchair_man)
axs[i].yaxis.grid(True)
axs[i].set_xlim([-1, len(df_clus)+1])
legend_elements = [Line2D([0], [0], marker='>', color='w', label='Right',
markerfacecolor='k', markersize=15),
Line2D([0], [0], marker='o', color='w', label='Left',
markerfacecolor='k', markersize=15)]
axs[i].legend(handles=legend_elements, fontsize=legend_size)
else:
axs.scatter(df_clus.index, df_clus[features_to_plot[0]], c=color_array, s=16)
axs.scatter(df_clus.index, df_clus[features_to_plot[1]], c=color_array, s=16, alpha=0.7, marker='>')
axs.tick_params(direction='out', labelsize=15)
axs.set_ylabel('Torque (Nm)', fontsize=15)
axs.set_title(wheelchair_man)
axs.yaxis.grid(True)
axs.set_xlim([-1, len(df_clus)+1])
legend_elements = [Line2D([0], [0], marker='>', color='w', label='Right',
markerfacecolor='k', markersize=15),
Line2D([0], [0], marker='o', color='w', label='Left',
markerfacecolor='k', markersize=15)]
axs.legend(handles=legend_elements, fontsize=legend_size)
plt.show() | identifier_body |
clustering.py | #!/usr/bin/env python
# coding: utf-8
"""
Author: Mahsa Khalili
Date: 2021 April 15th
Purpose: This Python script prepare IMU data for terrain classification.
"""
import os
import glob
import pathlib
import random
import numpy as np
import pandas as pd
import pickle
import joblib
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import itertools
from scipy import linalg
import matplotlib as mpl
from matplotlib.lines import Line2D
from sklearn.preprocessing import StandardScaler
from sklearn import mixture
# DEFINITIONS
USER = 'Mahsa' # ['Mahsa', 'Jaimie'] # participant name
WIN_SIZE = 32 # window size
# list of all maneuvers
maneuvers = ['Obstacles15', 'Obstacles35', 'RampA', 'StraightF', 'Turn90FR', 'Turn90FL', 'Turn180L', 'Turn180R']
# choose the feature subsets for clustering
featureSet_list = ['ALL', 'ALL_TORQUE', '2D_TORQUE', 'LR_TORQUE', 'LR_TORQUE_MEAN', '2D_TORQUE_MEAN']
dataset_to_import = 'featured_data' # choose dataset/datasets to import
featured_columns = ['AngVel_L', 'AngVel_R', 'Chair_LinVel', 'Chair_AngVel', 'Torque_L', 'Torque_R',
'Torque_sum', 'Torque_diff', 'Torque_L_roc', 'Torque_R_roc']
time_features = ['Mean', 'Std', 'Max', 'Min', 'RMS']
# clustering model parameters
clus_params = {'covar_types': 'full', 'n_components': 6, 'feat_list': 'ALL_TORQUE'}
# path to save labeled data and corresponding figures
CURR_PATH = os.path.abspath('.')
# Import processed data
dataset_paths = glob.glob(os.path.join(CURR_PATH, dataset_to_import, USER, 'WinSize' + str(WIN_SIZE), '*.csv'))
# create a color pallette
cmap = matplotlib.cm.get_cmap('tab10')
def import_func(path_):
""" function to import featured datasets"""
datasets_dic = {}
for dataset_path in path_:
# Parse labels from filenames
dataset_label = os.path.split(dataset_path)[1].split('.')[0]
# Read from csv to Pandas
dataset = pd.read_csv(dataset_path)
# insert dataset label to the dataframes
dataset.insert(0, 'trial', dataset_label)
dataset.insert(0, 'maneuver', dataset_label.split('_')[0])
# Datasets are stored in a dictionary
datasets_dic.update({dataset_label: dataset})
# list of imported maneuvers
dataset_names = list(datasets_dic.keys())
return datasets_dic, dataset_names
def prep_func(data_dic):
"""Prepare dataframes for clustering"""
df_all = pd.DataFrame(columns=datasets[dataset_labels[0]].columns.tolist())
# combine desired datasets into one dataframe
for label in dataset_labels:
df_all = pd.concat([df_all, data_dic[label]], ignore_index=True)
df_all_columns = df_all.copy() # keep a copy of the original dataframes before dropping the trial names
# dropping unused columns/features
for col in ['Time', 'trial', 'maneuver']:
if col in df_all.columns:
df_all = df_all.drop(columns=[col])
columns_all = df_all.columns.tolist()
columns_torque = [col for col in df_all.columns.tolist() if 'Torque' in col] # all torque data
# all torque features except for roc (mean/std/... & left/right/sum/diff)
columns_2d_torque = [col for col in df_all.columns.tolist()
if 'Torque_sum' in col or 'Torque_diff' in col and 'roc' not in col]
# all torque features of left and right only (mean/std/... & left/right)
columns_lr_torque = [col for col in df_all.columns.tolist()
if ('Torque_L' in col or 'Torque_R' in col) and 'roc' not in col]
columns_lr_torque_mean = ['Mean Torque_L', 'Mean Torque_R'] # mean torque left and right only
columns_2d_torque_mean = ['Mean Torque_sum', 'Mean Torque_diff'] # mean torque left and right only
# dictionary of list of feature subsets to be used for dimension_reduction or clustering
featureSet_dic = {'ALL': columns_all, 'ALL_TORQUE': columns_torque,
'2D_TORQUE': columns_2d_torque, '2D_TORQUE_MEAN': columns_2d_torque_mean,
'LR_TORQUE': columns_lr_torque, 'LR_TORQUE_MEAN': columns_lr_torque_mean}
# Standardize features by removing the mean and scaling to unit variance
scaler = StandardScaler()
feat_all_stand = scaler.fit_transform(df_all.values)
df_all_stand = pd.DataFrame(feat_all_stand, columns=data_columns) # normalized dataset
return df_all_stand, df_all_columns, featureSet_dic
def clus_func(df_all, n_components, feat_subset):
"""
function to cluster and evaluate the clustering performance
input: dataframe consisting of different maneuvers to be clustered, feature sets to be used for clustering,
and the clustering model
output: labeled dataframe and three performance measures
"""
df = df_all[featureSet_dic[feat_subset]].copy()
X = df.values
# # Fit a Gaussian mixture with EM
# gmm_model = mixture.GaussianMixture(n_components=n_components,
# covariance_type=cv_type,
# random_state=1,
# n_init=10)
# gmm_model = gmm_model.fit(X)
model_path = os.path.join(CURR_PATH, 'clustering_model') # create directiry for the current time
model_name = os.path.join(model_path, 'gmm.joblib')
gmm_model = joblib.load(model_name)
# predic labels & probabilities
labels = gmm_model.predict(X)
labels_prob = gmm_model.predict_proba(X)
# adding all droped features (for plotting purposes) of the standardized dataframe
added_feat = [feat for feat in data_columns if feat not in df.columns]
df[added_feat] = df_all_stand[added_feat].copy()
df = df[data_columns]
# adding the labels to the dataframe
df.insert(0, 'Clus_label', labels)
for n in range(n_components):
df['Prob_L'+str(n)] = labels_prob[:, n]
return gmm_model, df # export all gmm models and a dictionary of all labeled datasets
def labeling_func(df_clus):
""" add all cluster labels to the original dataframe """
df_all_labeled = df_all_columns.copy()
df_all_labeled['Clus_label'] = df_clus['Clus_label'].copy()
df_all_labeled['Clus_label']= df_all_labeled['Clus_label'].astype(int)
for i in range(0, clus_params['n_components']):
df_all_labeled['Prob_L'+str(i)] = df_clus['Prob_L'+str(i)].copy()
return df_all_labeled
def plt_gm_clusters(df_all, model):
"""this function gets unlabeled scaled dataframe and predict labels + plotting cluster ellips"""
# color_iter = itertools.cycle([cmap(i) for i in range(cmap.N)])
color_iter = itertools.cycle([cmap(i) for i in range(clus_params['n_components'])])
df = df_all[featureSet_dic[clus_params['feat_list']]].copy()
XX = df.values
Y_ = model.predict(XX) # predict labels for each model
plt.figure(figsize=(8, 6))
splot = plt.subplot(1, 1, 1)
for i, (mean, cov, color) in enumerate(zip(model.means_, model.covariances_, color_iter)):
if "MEAN" in clus_params['feat_list']:
v, w = linalg.eigh(cov)
else:
|
if not np.any(Y_ == i):
continue
if "MEAN" in clus_params['feat_list']:
plt.scatter(XX[Y_ == i, 0], XX[Y_ == i, 1], color=color, s=60)
else:
plt.scatter(XX[Y_ == i, 0], XX[Y_ == i, 5], color=color, s=60)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180. * angle / np.pi # convert to degrees
v = 2. * np.sqrt(2.) * np.sqrt(v)
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xticks(())
plt.yticks(())
plt.title('Subject: {}, feature set: {}'.format(USER, clus_params['feat_list']))
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
def range_dic_(df_):
"""
get the start index of each maneuver from the original dataframe
"""
range_dic = {}
for man in df_['maneuver']:
trial_indx = df_.index[df_['maneuver'] == man].tolist()
range_ = (min(trial_indx), max(trial_indx))
range_dic.update({man: range_})
return range_dic
def plt_ts_cluster(df_, features_to_plot):
"""
input: input original dataframe (with maneuver columns), clustered dataframe, number of clusteres,
and selected features to plot
output: plotting clustered time series data with different colors
"""
df_clus = df_.copy()
plt_num = 2
fig, axs = plt.subplots(plt_num, 1, figsize=(15, 12), constrained_layout=True)
axs = axs.ravel()
states = df_clus['Clus_label']
clusterNum = clus_params['n_components']
color_dict = {i:cmap(i) for i in range(clusterNum)}
color_array = [color_dict[i] for i in states]
for i, feature in enumerate(features_to_plot):
axs[i].scatter(df_clus.index, df_clus[feature], c=color_array, s=10)
axs[i].set_xlim([-1, len(df_clus)+1])
axs[i].tick_params(direction='out', labelsize=15)
axs[i].yaxis.grid(True)
if 'Torque' in feature:
axs[i].set_ylabel(feature + ' (Nm)', fontsize=15)
elif 'Lin' in feature:
axs[i].set_ylabel(feature + ' (m/s)', fontsize=15)
elif 'Ang' in feature:
axs[i].set_ylabel(feature + ' (rad/s)', fontsize=15)
fig.suptitle(clus_params['feat_list'], fontsize=16)
range_dic = range_dic_(df_clus)
for trial, range_ in range_dic.items():
axs[0].text(range_[0], axs[0].get_ylim()[1]+0.2, trial, fontsize=15, rotation=45)
for i in range(plt_num):
axs[i].axvline(x=range_[0], linestyle='--', linewidth=0.5)
plt.show()
# function to plot clusters in time series data
def plt_ts_cluster_subset(df_, features_to_plot, man_list=maneuvers):
"""
input: input original dataframe (with maneuver columns), clustered dataframe, number of clusteres,
and selected features to plot
output: plotting clustered time series data with different colors
"""
clusterNum = clus_params['n_components']
color_dict = {i: cmap(i) for i in range(clusterNum)}
figsize = (15, 15)
legend_size = 15
if len(man_list) == 1:
figsize = (15, 8)
fig, axs = plt.subplots(len(man_list), 1, figsize=figsize, constrained_layout=True)
fig.suptitle(clus_params['feat_list'], fontsize=16)
if len(man_list) != 1:
axs = axs.ravel()
for i, wheelchair_man in enumerate(man_list):
df_clus = df_.loc[df_['maneuver'] == wheelchair_man].copy()
df_clus = df_clus.reset_index()
states = df_clus['Clus_label']
color_array = [color_dict[i] for i in states]
if len(man_list) != 1:
axs[i].scatter(df_clus.index, df_clus[features_to_plot[0]], c=color_array, s=16)
axs[i].scatter(df_clus.index, df_clus[features_to_plot[1]], c=color_array, s=16, alpha=0.7, marker='>')
axs[i].tick_params(direction='out', labelsize=15)
axs[i].set_ylabel('Torque (Nm)', fontsize=15)
axs[i].set_title(wheelchair_man)
axs[i].yaxis.grid(True)
axs[i].set_xlim([-1, len(df_clus)+1])
legend_elements = [Line2D([0], [0], marker='>', color='w', label='Right',
markerfacecolor='k', markersize=15),
Line2D([0], [0], marker='o', color='w', label='Left',
markerfacecolor='k', markersize=15)]
axs[i].legend(handles=legend_elements, fontsize=legend_size)
else:
axs.scatter(df_clus.index, df_clus[features_to_plot[0]], c=color_array, s=16)
axs.scatter(df_clus.index, df_clus[features_to_plot[1]], c=color_array, s=16, alpha=0.7, marker='>')
axs.tick_params(direction='out', labelsize=15)
axs.set_ylabel('Torque (Nm)', fontsize=15)
axs.set_title(wheelchair_man)
axs.yaxis.grid(True)
axs.set_xlim([-1, len(df_clus)+1])
legend_elements = [Line2D([0], [0], marker='>', color='w', label='Right',
markerfacecolor='k', markersize=15),
Line2D([0], [0], marker='o', color='w', label='Left',
markerfacecolor='k', markersize=15)]
axs.legend(handles=legend_elements, fontsize=legend_size)
plt.show()
datasets, dataset_labels = import_func(dataset_paths)
data_columns = [col for col in datasets[dataset_labels[0]].columns if col != 'trial' and
col != 'Time' and col != 'maneuver'] # get columns/features of the imported datasets
df_all_stand, df_all_columns, featureSet_dic = prep_func(datasets)
# run the cluster function or import trained model
models, df_clus = clus_func(df_all_stand.copy(),
clus_params['n_components'],
clus_params['feat_list'])
df_labeled = labeling_func(df_clus) # adding labels to all datastes
plt_gm_clusters(df_all_stand.copy(), models) # plotting cluster over torque left/right
plt_ts_cluster(df_labeled, ['Mean Torque_L', 'Mean Torque_R']) # plotting all labeled data in a time series format
# plotting a subset of selected labeled maneuvers in a time series format
plt_ts_cluster_subset(df_labeled, ['Mean Torque_L', 'Mean Torque_R'], ['StraightF'])
# if SAVE_DATA:
processed_path = os.path.join(CURR_PATH, 'labeled_data')
pathlib.Path(processed_path).mkdir(parents=True, exist_ok=True)
filename = "gmm_labels.csv"
filename = os.path.join(processed_path, filename)
df_labeled.to_csv(filename, index=False)
print("SUCCESSFULLY EXECUTED!!!!")
| subset = [0, 5] # mean torque L & R
v, w = linalg.eigh(cov[np.ix_(subset, subset)])
mean = np.array([mean[0], mean[5]]) | conditional_block |
dataflows.rs | // Copyright Materialize, Inc. and contributors. All rights reserved.
//
// Use of this software is governed by the Business Source License
// included in the LICENSE file.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0.
//! Types for describing dataflows.
use std::collections::{BTreeMap, BTreeSet};
use mz_expr::{CollectionPlan, MirRelationExpr, MirScalarExpr, OptimizedMirRelationExpr};
use mz_proto::{IntoRustIfSome, ProtoMapEntry, ProtoType, RustType, TryFromProtoError};
use mz_repr::explain::IndexUsageType;
use mz_repr::{GlobalId, RelationType};
use mz_storage_client::controller::CollectionMetadata;
use proptest::prelude::{any, Arbitrary};
use proptest::strategy::{BoxedStrategy, Strategy};
use proptest_derive::Arbitrary;
use serde::{Deserialize, Serialize};
use timely::progress::Antichain;
use crate::plan::Plan;
use crate::types::dataflows::proto_dataflow_description::{
ProtoIndexExport, ProtoIndexImport, ProtoSinkExport, ProtoSourceImport,
};
use crate::types::sinks::{ComputeSinkConnection, ComputeSinkDesc};
use crate::types::sources::{SourceInstanceArguments, SourceInstanceDesc};
include!(concat!(
env!("OUT_DIR"),
"/mz_compute_client.types.dataflows.rs"
));
/// A description of a dataflow to construct and results to surface.
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
pub struct DataflowDescription<P, S: 'static = (), T = mz_repr::Timestamp> {
/// Sources instantiations made available to the dataflow pair with monotonicity information.
pub source_imports: BTreeMap<GlobalId, (SourceInstanceDesc<S>, bool)>,
/// Indexes made available to the dataflow.
/// (id of new index, description of index, relationtype of base source/view, monotonic)
pub index_imports: BTreeMap<GlobalId, IndexImport>,
/// Views and indexes to be built and stored in the local context.
/// Objects must be built in the specific order, as there may be
/// dependencies of later objects on prior identifiers.
pub objects_to_build: Vec<BuildDesc<P>>,
/// Indexes to be made available to be shared with other dataflows
/// (id of new index, description of index, relationtype of base source/view)
pub index_exports: BTreeMap<GlobalId, (IndexDesc, RelationType)>,
/// sinks to be created
/// (id of new sink, description of sink)
pub sink_exports: BTreeMap<GlobalId, ComputeSinkDesc<S, T>>,
/// An optional frontier to which inputs should be advanced.
///
/// If this is set, it should override the default setting determined by
/// the upper bound of `since` frontiers contributing to the dataflow.
/// It is an error for this to be set to a frontier not beyond that default.
pub as_of: Option<Antichain<T>>,
/// Frontier beyond which the dataflow should not execute.
/// Specifically, updates at times greater or equal to this frontier are suppressed.
/// This is often set to `as_of + 1` to enable "batch" computations.
pub until: Antichain<T>,
/// Human readable name
pub debug_name: String,
}
impl<T> DataflowDescription<Plan<T>, (), mz_repr::Timestamp> {
/// Tests if the dataflow refers to a single timestamp, namely
/// that `as_of` has a single coordinate and that the `until`
/// value corresponds to the `as_of` value plus one.
pub fn is_single_time(&self) -> bool {
// TODO: this would be much easier to check if `until` was a strict lower bound,
// and we would be testing that `until == as_of`.
let Some(as_of) = self.as_of.as_ref() else { return false; };
!as_of.is_empty()
&& as_of
.as_option()
.and_then(|as_of| as_of.checked_add(1))
.as_ref()
== self.until.as_option()
}
}
impl<T> DataflowDescription<OptimizedMirRelationExpr, (), T> {
/// Creates a new dataflow description with a human-readable name.
pub fn new(name: String) -> Self {
Self {
source_imports: Default::default(),
index_imports: Default::default(),
objects_to_build: Vec::new(),
index_exports: Default::default(),
sink_exports: Default::default(),
as_of: Default::default(),
until: Antichain::new(),
debug_name: name,
}
}
/// Imports a previously exported index.
///
/// This method makes available an index previously exported as `id`, identified
/// to the query by `description` (which names the view the index arranges, and
/// the keys by which it is arranged).
pub fn import_index(
&mut self,
id: GlobalId,
desc: IndexDesc,
typ: RelationType,
monotonic: bool,
) {
self.index_imports.insert(
id,
IndexImport {
desc,
typ,
monotonic,
usage_types: None,
},
);
}
/// Imports a source and makes it available as `id`.
pub fn import_source(&mut self, id: GlobalId, typ: RelationType, monotonic: bool) {
// Import the source with no linear operators applied to it.
// They may be populated by whole-dataflow optimization.
self.source_imports.insert(
id,
(
SourceInstanceDesc {
storage_metadata: (),
arguments: SourceInstanceArguments { operators: None },
typ,
},
monotonic,
),
);
}
/// Binds to `id` the relation expression `plan`.
pub fn insert_plan(&mut self, id: GlobalId, plan: OptimizedMirRelationExpr) {
self.objects_to_build.push(BuildDesc { id, plan });
}
/// Exports as `id` an index described by `description`.
///
/// Future uses of `import_index` in other dataflow descriptions may use `id`,
/// as long as this dataflow has not been terminated in the meantime.
pub fn export_index(&mut self, id: GlobalId, description: IndexDesc, on_type: RelationType) {
// We first create a "view" named `id` that ensures that the
// data are correctly arranged and available for export.
self.insert_plan(
id,
OptimizedMirRelationExpr::declare_optimized(MirRelationExpr::ArrangeBy {
input: Box::new(MirRelationExpr::global_get(
description.on_id,
on_type.clone(),
)),
keys: vec![description.key.clone()],
}),
);
self.index_exports.insert(id, (description, on_type));
}
/// Exports as `id` a sink described by `description`.
pub fn export_sink(&mut self, id: GlobalId, description: ComputeSinkDesc<(), T>) {
self.sink_exports.insert(id, description);
}
/// Returns true iff `id` is already imported.
pub fn is_imported(&self, id: &GlobalId) -> bool {
self.objects_to_build.iter().any(|bd| &bd.id == id)
|| self.source_imports.keys().any(|i| i == id)
}
/// Assigns the `as_of` frontier to the supplied argument.
///
/// This method allows the dataflow to indicate a frontier up through
/// which all times should be advanced. This can be done for at least
/// two reasons: 1. correctness and 2. performance.
///
/// Correctness may require an `as_of` to ensure that historical detail
/// is consolidated at representative times that do not present specific
/// detail that is not specifically correct. For example, updates may be
/// compacted to times that are no longer the source times, but instead
/// some byproduct of when compaction was executed; we should not present
/// those specific times as meaningfully different from other equivalent
/// times.
///
/// Performance may benefit from an aggressive `as_of` as it reduces the
/// number of distinct moments at which collections vary. Differential
/// dataflow will refresh its outputs at each time its inputs change and
/// to moderate that we can minimize the volume of distinct input times
/// as much as possible.
///
/// Generally, one should consider setting `as_of` at least to the `since`
/// frontiers of contributing data sources and as aggressively as the
/// computation permits.
pub fn set_as_of(&mut self, as_of: Antichain<T>) {
self.as_of = Some(as_of);
}
/// The number of columns associated with an identifier in the dataflow.
pub fn arity_of(&self, id: &GlobalId) -> usize {
for (source_id, (source, _monotonic)) in self.source_imports.iter() {
if source_id == id {
return source.typ.arity();
}
}
for IndexImport { desc, typ, .. } in self.index_imports.values() {
if &desc.on_id == id {
return typ.arity();
}
}
for desc in self.objects_to_build.iter() {
if &desc.id == id {
return desc.plan.arity();
}
}
panic!("GlobalId {} not found in DataflowDesc", id);
}
/// Calls r and s on any sub-members of those types in self. Halts at the first error return.
pub fn visit_children<R, S, E>(&mut self, r: R, s: S) -> Result<(), E>
where
R: Fn(&mut OptimizedMirRelationExpr) -> Result<(), E>,
S: Fn(&mut MirScalarExpr) -> Result<(), E>,
{
for BuildDesc { plan, .. } in &mut self.objects_to_build {
r(plan)?;
}
for (source_instance_desc, _) in self.source_imports.values_mut() {
let Some(mfp) = source_instance_desc.arguments.operators.as_mut() else {
continue;
};
for expr in mfp.expressions.iter_mut() {
s(expr)?;
}
for (_, expr) in mfp.predicates.iter_mut() {
s(expr)?;
}
}
Ok(())
}
}
impl<P, S, T> DataflowDescription<P, S, T>
where
P: CollectionPlan,
{
/// Identifiers of exported objects (indexes and sinks).
pub fn | (&self) -> impl Iterator<Item = GlobalId> + '_ {
self.index_exports
.keys()
.chain(self.sink_exports.keys())
.cloned()
}
/// Identifiers of exported subscribe sinks.
pub fn subscribe_ids(&self) -> impl Iterator<Item = GlobalId> + '_ {
self.sink_exports
.iter()
.filter_map(|(id, desc)| match desc.connection {
ComputeSinkConnection::Subscribe(_) => Some(*id),
_ => None,
})
}
/// Returns the description of the object to build with the specified
/// identifier.
///
/// # Panics
///
/// Panics if `id` is not present in `objects_to_build` exactly once.
pub fn build_desc(&self, id: GlobalId) -> &BuildDesc<P> {
let mut builds = self.objects_to_build.iter().filter(|build| build.id == id);
let build = builds
.next()
.unwrap_or_else(|| panic!("object to build id {id} unexpectedly missing"));
assert!(builds.next().is_none());
build
}
/// Computes the set of identifiers upon which the specified collection
/// identifier depends.
///
/// `collection_id` must specify a valid object in `objects_to_build`.
///
/// This method includes identifiers for e.g. intermediate views, and should be filtered
/// if one only wants sources and indexes.
///
/// This method is safe for mutually recursive view definitions.
pub fn depends_on(&self, collection_id: GlobalId) -> BTreeSet<GlobalId> {
let mut out = BTreeSet::new();
self.depends_on_into(collection_id, &mut out);
out
}
/// Like `depends_on`, but appends to an existing `BTreeSet`.
pub fn depends_on_into(&self, collection_id: GlobalId, out: &mut BTreeSet<GlobalId>) {
out.insert(collection_id);
if self.source_imports.contains_key(&collection_id) {
// The collection is provided by an imported source. Report the
// dependency on the source.
out.insert(collection_id);
return;
}
// NOTE(benesch): we're not smart enough here to know *which* index
// for the collection will be used, if one exists, so we have to report
// the dependency on all of them.
let mut found_index = false;
for (index_id, IndexImport { desc, .. }) in &self.index_imports {
if desc.on_id == collection_id {
// The collection is provided by an imported index. Report the
// dependency on the index.
out.insert(*index_id);
found_index = true;
}
}
if found_index {
return;
}
// The collection is not provided by a source or imported index.
// It must be a collection whose plan we have handy. Recurse.
let build = self.build_desc(collection_id);
for id in build.plan.depends_on() {
if !out.contains(&id) {
self.depends_on_into(id, out)
}
}
}
/// Computes the set of imports upon which the specified collection depends.
///
/// This method behaves like `depends_on` but filters out internal dependencies that are not
/// included in the dataflow imports.
pub fn depends_on_imports(&self, collection_id: GlobalId) -> BTreeSet<GlobalId> {
let is_import = |id: &GlobalId| {
self.source_imports.contains_key(id) || self.index_imports.contains_key(id)
};
let deps = self.depends_on(collection_id);
deps.into_iter().filter(is_import).collect()
}
}
impl<P: PartialEq, S: PartialEq, T: timely::PartialOrder> DataflowDescription<P, S, T> {
/// Determine if a dataflow description is compatible with this dataflow description.
///
/// Compatible dataflows have equal exports, imports, and objects to build. The `as_of` of
/// the receiver has to be less equal the `other` `as_of`.
///
// TODO: The semantics of this function are only useful for command reconciliation at the moment.
pub fn compatible_with(&self, other: &Self) -> bool {
let equality = self.index_exports == other.index_exports
&& self.sink_exports == other.sink_exports
&& self.objects_to_build == other.objects_to_build
&& self.index_imports == other.index_imports
&& self.source_imports == other.source_imports;
let partial = if let (Some(as_of), Some(other_as_of)) = (&self.as_of, &other.as_of) {
timely::PartialOrder::less_equal(as_of, other_as_of)
} else {
false
};
equality && partial
}
}
impl RustType<ProtoDataflowDescription>
for DataflowDescription<crate::plan::Plan, CollectionMetadata>
{
fn into_proto(&self) -> ProtoDataflowDescription {
ProtoDataflowDescription {
source_imports: self.source_imports.into_proto(),
index_imports: self.index_imports.into_proto(),
objects_to_build: self.objects_to_build.into_proto(),
index_exports: self.index_exports.into_proto(),
sink_exports: self.sink_exports.into_proto(),
as_of: self.as_of.into_proto(),
until: Some(self.until.into_proto()),
debug_name: self.debug_name.clone(),
}
}
fn from_proto(proto: ProtoDataflowDescription) -> Result<Self, TryFromProtoError> {
Ok(DataflowDescription {
source_imports: proto.source_imports.into_rust()?,
index_imports: proto.index_imports.into_rust()?,
objects_to_build: proto.objects_to_build.into_rust()?,
index_exports: proto.index_exports.into_rust()?,
sink_exports: proto.sink_exports.into_rust()?,
as_of: proto.as_of.map(|x| x.into_rust()).transpose()?,
until: proto
.until
.map(|x| x.into_rust())
.transpose()?
.unwrap_or_else(Antichain::new),
debug_name: proto.debug_name,
})
}
}
impl ProtoMapEntry<GlobalId, (SourceInstanceDesc<CollectionMetadata>, bool)> for ProtoSourceImport {
fn from_rust<'a>(
entry: (
&'a GlobalId,
&'a (SourceInstanceDesc<CollectionMetadata>, bool),
),
) -> Self {
ProtoSourceImport {
id: Some(entry.0.into_proto()),
source_instance_desc: Some(entry.1 .0.into_proto()),
monotonic: entry.1 .1.into_proto(),
}
}
fn into_rust(
self,
) -> Result<(GlobalId, (SourceInstanceDesc<CollectionMetadata>, bool)), TryFromProtoError> {
Ok((
self.id.into_rust_if_some("ProtoSourceImport::id")?,
(
self.source_instance_desc
.into_rust_if_some("ProtoSourceImport::source_instance_desc")?,
self.monotonic.into_rust()?,
),
))
}
}
impl ProtoMapEntry<GlobalId, IndexImport> for ProtoIndexImport {
fn from_rust<'a>(
(
id,
IndexImport {
desc,
typ,
monotonic,
usage_types,
},
): (&'a GlobalId, &'a IndexImport),
) -> Self {
ProtoIndexImport {
id: Some(id.into_proto()),
index_desc: Some(desc.into_proto()),
typ: Some(typ.into_proto()),
monotonic: monotonic.into_proto(),
usage_types: usage_types.as_ref().unwrap_or(&Vec::new()).into_proto(),
has_usage_types: usage_types.is_some(),
}
}
fn into_rust(self) -> Result<(GlobalId, IndexImport), TryFromProtoError> {
Ok((
self.id.into_rust_if_some("ProtoIndex::id")?,
IndexImport {
desc: self
.index_desc
.into_rust_if_some("ProtoIndexImport::index_desc")?,
typ: self.typ.into_rust_if_some("ProtoIndexImport::typ")?,
monotonic: self.monotonic.into_rust()?,
usage_types: if !self.has_usage_types.into_rust()? {
None
} else {
Some(self.usage_types.into_rust()?)
},
},
))
}
}
impl ProtoMapEntry<GlobalId, (IndexDesc, RelationType)> for ProtoIndexExport {
fn from_rust<'a>(
(id, (index_desc, typ)): (&'a GlobalId, &'a (IndexDesc, RelationType)),
) -> Self {
ProtoIndexExport {
id: Some(id.into_proto()),
index_desc: Some(index_desc.into_proto()),
typ: Some(typ.into_proto()),
}
}
fn into_rust(self) -> Result<(GlobalId, (IndexDesc, RelationType)), TryFromProtoError> {
Ok((
self.id.into_rust_if_some("ProtoIndexExport::id")?,
(
self.index_desc
.into_rust_if_some("ProtoIndexExport::index_desc")?,
self.typ.into_rust_if_some("ProtoIndexExport::typ")?,
),
))
}
}
impl ProtoMapEntry<GlobalId, ComputeSinkDesc<CollectionMetadata>> for ProtoSinkExport {
fn from_rust<'a>(
(id, sink_desc): (&'a GlobalId, &'a ComputeSinkDesc<CollectionMetadata>),
) -> Self {
ProtoSinkExport {
id: Some(id.into_proto()),
sink_desc: Some(sink_desc.into_proto()),
}
}
fn into_rust(
self,
) -> Result<(GlobalId, ComputeSinkDesc<CollectionMetadata>), TryFromProtoError> {
Ok((
self.id.into_rust_if_some("ProtoSinkExport::id")?,
self.sink_desc
.into_rust_if_some("ProtoSinkExport::sink_desc")?,
))
}
}
impl Arbitrary for DataflowDescription<Plan, CollectionMetadata, mz_repr::Timestamp> {
type Strategy = BoxedStrategy<Self>;
type Parameters = ();
fn arbitrary_with(_: Self::Parameters) -> Self::Strategy {
any_dataflow_description().boxed()
}
}
proptest::prop_compose! {
fn any_dataflow_description()(
source_imports in proptest::collection::vec(any_source_import(), 1..3),
index_imports in proptest::collection::vec(any_dataflow_index_import(), 1..3),
objects_to_build in proptest::collection::vec(any::<BuildDesc<Plan>>(), 1..3),
index_exports in proptest::collection::vec(any_dataflow_index_export(), 1..3),
sink_descs in proptest::collection::vec(
any::<(GlobalId, ComputeSinkDesc<CollectionMetadata, mz_repr::Timestamp>)>(),
1..3,
),
as_of_some in any::<bool>(),
as_of in proptest::collection::vec(any::<mz_repr::Timestamp>(), 1..5),
debug_name in ".*",
) -> DataflowDescription<Plan, CollectionMetadata, mz_repr::Timestamp> {
DataflowDescription {
source_imports: BTreeMap::from_iter(source_imports.into_iter()),
index_imports: BTreeMap::from_iter(index_imports.into_iter()),
objects_to_build,
index_exports: BTreeMap::from_iter(index_exports.into_iter()),
sink_exports: BTreeMap::from_iter(
sink_descs.into_iter(),
),
as_of: if as_of_some {
Some(Antichain::from(as_of))
} else {
None
},
until: Antichain::new(),
debug_name,
}
}
}
fn any_source_import(
) -> impl Strategy<Value = (GlobalId, (SourceInstanceDesc<CollectionMetadata>, bool))> {
(
any::<GlobalId>(),
any::<(SourceInstanceDesc<CollectionMetadata>, bool)>(),
)
}
proptest::prop_compose! {
fn any_dataflow_index_import()(
id in any::<GlobalId>(),
desc in any::<IndexDesc>(),
typ in any::<RelationType>(),
monotonic in any::<bool>(),
usage_types in any::<Option<Vec<IndexUsageType>>>(),
) -> (GlobalId, IndexImport) {
(id, IndexImport {desc, typ, monotonic, usage_types})
}
}
proptest::prop_compose! {
fn any_dataflow_index_export()(
id in any::<GlobalId>(),
index in any::<IndexDesc>(),
typ in any::<RelationType>(),
) -> (GlobalId, (IndexDesc, RelationType)) {
(id, (index, typ))
}
}
/// A commonly used name for dataflows contain MIR expressions.
pub type DataflowDesc = DataflowDescription<OptimizedMirRelationExpr, ()>;
/// An index storing processed updates so they can be queried
/// or reused in other computations
#[derive(Arbitrary, Clone, Debug, Eq, PartialEq, Serialize, Deserialize, Hash)]
pub struct IndexDesc {
/// Identity of the collection the index is on.
pub on_id: GlobalId,
/// Expressions to be arranged, in order of decreasing primacy.
#[proptest(strategy = "proptest::collection::vec(any::<MirScalarExpr>(), 1..3)")]
pub key: Vec<MirScalarExpr>,
}
impl RustType<ProtoIndexDesc> for IndexDesc {
fn into_proto(&self) -> ProtoIndexDesc {
ProtoIndexDesc {
on_id: Some(self.on_id.into_proto()),
key: self.key.into_proto(),
}
}
fn from_proto(proto: ProtoIndexDesc) -> Result<Self, TryFromProtoError> {
Ok(IndexDesc {
on_id: proto.on_id.into_rust_if_some("ProtoIndexDesc::on_id")?,
key: proto.key.into_rust()?,
})
}
}
/// Information about an imported index, and how it will be used by the dataflow.
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
pub struct IndexImport {
/// Description of index.
pub desc: IndexDesc,
/// Schema and keys of the object the index is on.
pub typ: RelationType,
/// Whether the index will supply monotonic data.
pub monotonic: bool,
/// What kind of operation (full scan, lookup, ...) will access the index. Filled by
/// `prune_and_annotate_dataflow_index_imports`.
pub usage_types: Option<Vec<IndexUsageType>>,
}
/// An association of a global identifier to an expression.
#[derive(Arbitrary, Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
pub struct BuildDesc<P> {
pub id: GlobalId,
pub plan: P,
}
impl RustType<ProtoBuildDesc> for BuildDesc<crate::plan::Plan> {
fn into_proto(&self) -> ProtoBuildDesc {
ProtoBuildDesc {
id: Some(self.id.into_proto()),
plan: Some(self.plan.into_proto()),
}
}
fn from_proto(x: ProtoBuildDesc) -> Result<Self, TryFromProtoError> {
Ok(BuildDesc {
id: x.id.into_rust_if_some("ProtoBuildDesc::id")?,
plan: x.plan.into_rust_if_some("ProtoBuildDesc::plan")?,
})
}
}
#[cfg(test)]
mod tests {
use mz_proto::protobuf_roundtrip;
use proptest::prelude::ProptestConfig;
use proptest::proptest;
use crate::types::dataflows::DataflowDescription;
use super::*;
proptest! {
#![proptest_config(ProptestConfig::with_cases(32))]
#[mz_ore::test]
fn dataflow_description_protobuf_roundtrip(expect in any::<DataflowDescription<Plan, CollectionMetadata, mz_repr::Timestamp>>()) {
let actual = protobuf_roundtrip::<_, ProtoDataflowDescription>(&expect);
assert!(actual.is_ok());
assert_eq!(actual.unwrap(), expect);
}
}
}
| export_ids | identifier_name |
dataflows.rs | // Copyright Materialize, Inc. and contributors. All rights reserved.
//
// Use of this software is governed by the Business Source License
// included in the LICENSE file.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0.
//! Types for describing dataflows.
use std::collections::{BTreeMap, BTreeSet};
use mz_expr::{CollectionPlan, MirRelationExpr, MirScalarExpr, OptimizedMirRelationExpr};
use mz_proto::{IntoRustIfSome, ProtoMapEntry, ProtoType, RustType, TryFromProtoError};
use mz_repr::explain::IndexUsageType;
use mz_repr::{GlobalId, RelationType};
use mz_storage_client::controller::CollectionMetadata;
use proptest::prelude::{any, Arbitrary};
use proptest::strategy::{BoxedStrategy, Strategy};
use proptest_derive::Arbitrary;
use serde::{Deserialize, Serialize};
use timely::progress::Antichain;
use crate::plan::Plan;
use crate::types::dataflows::proto_dataflow_description::{
ProtoIndexExport, ProtoIndexImport, ProtoSinkExport, ProtoSourceImport,
};
use crate::types::sinks::{ComputeSinkConnection, ComputeSinkDesc};
use crate::types::sources::{SourceInstanceArguments, SourceInstanceDesc};
include!(concat!(
env!("OUT_DIR"),
"/mz_compute_client.types.dataflows.rs"
));
/// A description of a dataflow to construct and results to surface.
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
pub struct DataflowDescription<P, S: 'static = (), T = mz_repr::Timestamp> {
/// Sources instantiations made available to the dataflow pair with monotonicity information.
pub source_imports: BTreeMap<GlobalId, (SourceInstanceDesc<S>, bool)>,
/// Indexes made available to the dataflow.
/// (id of new index, description of index, relationtype of base source/view, monotonic)
pub index_imports: BTreeMap<GlobalId, IndexImport>,
/// Views and indexes to be built and stored in the local context.
/// Objects must be built in the specific order, as there may be
/// dependencies of later objects on prior identifiers.
pub objects_to_build: Vec<BuildDesc<P>>,
/// Indexes to be made available to be shared with other dataflows
/// (id of new index, description of index, relationtype of base source/view)
pub index_exports: BTreeMap<GlobalId, (IndexDesc, RelationType)>,
/// sinks to be created
/// (id of new sink, description of sink)
pub sink_exports: BTreeMap<GlobalId, ComputeSinkDesc<S, T>>,
/// An optional frontier to which inputs should be advanced.
///
/// If this is set, it should override the default setting determined by
/// the upper bound of `since` frontiers contributing to the dataflow.
/// It is an error for this to be set to a frontier not beyond that default.
pub as_of: Option<Antichain<T>>,
/// Frontier beyond which the dataflow should not execute.
/// Specifically, updates at times greater or equal to this frontier are suppressed.
/// This is often set to `as_of + 1` to enable "batch" computations.
pub until: Antichain<T>,
/// Human readable name
pub debug_name: String,
}
impl<T> DataflowDescription<Plan<T>, (), mz_repr::Timestamp> {
/// Tests if the dataflow refers to a single timestamp, namely
/// that `as_of` has a single coordinate and that the `until`
/// value corresponds to the `as_of` value plus one.
pub fn is_single_time(&self) -> bool {
// TODO: this would be much easier to check if `until` was a strict lower bound,
// and we would be testing that `until == as_of`.
let Some(as_of) = self.as_of.as_ref() else { return false; };
!as_of.is_empty()
&& as_of
.as_option()
.and_then(|as_of| as_of.checked_add(1))
.as_ref()
== self.until.as_option()
}
}
impl<T> DataflowDescription<OptimizedMirRelationExpr, (), T> {
/// Creates a new dataflow description with a human-readable name.
pub fn new(name: String) -> Self {
Self {
source_imports: Default::default(),
index_imports: Default::default(),
objects_to_build: Vec::new(),
index_exports: Default::default(),
sink_exports: Default::default(),
as_of: Default::default(),
until: Antichain::new(),
debug_name: name,
}
}
/// Imports a previously exported index.
///
/// This method makes available an index previously exported as `id`, identified
/// to the query by `description` (which names the view the index arranges, and
/// the keys by which it is arranged).
pub fn import_index(
&mut self,
id: GlobalId,
desc: IndexDesc,
typ: RelationType,
monotonic: bool,
) {
self.index_imports.insert(
id,
IndexImport {
desc,
typ,
monotonic,
usage_types: None,
},
);
}
/// Imports a source and makes it available as `id`.
pub fn import_source(&mut self, id: GlobalId, typ: RelationType, monotonic: bool) {
// Import the source with no linear operators applied to it.
// They may be populated by whole-dataflow optimization.
self.source_imports.insert(
id,
(
SourceInstanceDesc {
storage_metadata: (),
arguments: SourceInstanceArguments { operators: None },
typ,
},
monotonic,
),
);
}
/// Binds to `id` the relation expression `plan`.
pub fn insert_plan(&mut self, id: GlobalId, plan: OptimizedMirRelationExpr) {
self.objects_to_build.push(BuildDesc { id, plan });
}
/// Exports as `id` an index described by `description`.
///
/// Future uses of `import_index` in other dataflow descriptions may use `id`,
/// as long as this dataflow has not been terminated in the meantime.
pub fn export_index(&mut self, id: GlobalId, description: IndexDesc, on_type: RelationType) {
// We first create a "view" named `id` that ensures that the
// data are correctly arranged and available for export.
self.insert_plan(
id,
OptimizedMirRelationExpr::declare_optimized(MirRelationExpr::ArrangeBy {
input: Box::new(MirRelationExpr::global_get(
description.on_id,
on_type.clone(),
)),
keys: vec![description.key.clone()],
}),
);
self.index_exports.insert(id, (description, on_type));
}
/// Exports as `id` a sink described by `description`.
pub fn export_sink(&mut self, id: GlobalId, description: ComputeSinkDesc<(), T>) {
self.sink_exports.insert(id, description);
}
/// Returns true iff `id` is already imported.
pub fn is_imported(&self, id: &GlobalId) -> bool {
self.objects_to_build.iter().any(|bd| &bd.id == id)
|| self.source_imports.keys().any(|i| i == id)
}
/// Assigns the `as_of` frontier to the supplied argument.
///
/// This method allows the dataflow to indicate a frontier up through
/// which all times should be advanced. This can be done for at least
/// two reasons: 1. correctness and 2. performance.
///
/// Correctness may require an `as_of` to ensure that historical detail
/// is consolidated at representative times that do not present specific
/// detail that is not specifically correct. For example, updates may be
/// compacted to times that are no longer the source times, but instead
/// some byproduct of when compaction was executed; we should not present
/// those specific times as meaningfully different from other equivalent
/// times.
///
/// Performance may benefit from an aggressive `as_of` as it reduces the
/// number of distinct moments at which collections vary. Differential
/// dataflow will refresh its outputs at each time its inputs change and
/// to moderate that we can minimize the volume of distinct input times
/// as much as possible.
///
/// Generally, one should consider setting `as_of` at least to the `since`
/// frontiers of contributing data sources and as aggressively as the
/// computation permits.
pub fn set_as_of(&mut self, as_of: Antichain<T>) {
self.as_of = Some(as_of);
}
/// The number of columns associated with an identifier in the dataflow.
pub fn arity_of(&self, id: &GlobalId) -> usize |
/// Calls r and s on any sub-members of those types in self. Halts at the first error return.
pub fn visit_children<R, S, E>(&mut self, r: R, s: S) -> Result<(), E>
where
R: Fn(&mut OptimizedMirRelationExpr) -> Result<(), E>,
S: Fn(&mut MirScalarExpr) -> Result<(), E>,
{
for BuildDesc { plan, .. } in &mut self.objects_to_build {
r(plan)?;
}
for (source_instance_desc, _) in self.source_imports.values_mut() {
let Some(mfp) = source_instance_desc.arguments.operators.as_mut() else {
continue;
};
for expr in mfp.expressions.iter_mut() {
s(expr)?;
}
for (_, expr) in mfp.predicates.iter_mut() {
s(expr)?;
}
}
Ok(())
}
}
impl<P, S, T> DataflowDescription<P, S, T>
where
P: CollectionPlan,
{
/// Identifiers of exported objects (indexes and sinks).
pub fn export_ids(&self) -> impl Iterator<Item = GlobalId> + '_ {
self.index_exports
.keys()
.chain(self.sink_exports.keys())
.cloned()
}
/// Identifiers of exported subscribe sinks.
pub fn subscribe_ids(&self) -> impl Iterator<Item = GlobalId> + '_ {
self.sink_exports
.iter()
.filter_map(|(id, desc)| match desc.connection {
ComputeSinkConnection::Subscribe(_) => Some(*id),
_ => None,
})
}
/// Returns the description of the object to build with the specified
/// identifier.
///
/// # Panics
///
/// Panics if `id` is not present in `objects_to_build` exactly once.
pub fn build_desc(&self, id: GlobalId) -> &BuildDesc<P> {
let mut builds = self.objects_to_build.iter().filter(|build| build.id == id);
let build = builds
.next()
.unwrap_or_else(|| panic!("object to build id {id} unexpectedly missing"));
assert!(builds.next().is_none());
build
}
/// Computes the set of identifiers upon which the specified collection
/// identifier depends.
///
/// `collection_id` must specify a valid object in `objects_to_build`.
///
/// This method includes identifiers for e.g. intermediate views, and should be filtered
/// if one only wants sources and indexes.
///
/// This method is safe for mutually recursive view definitions.
pub fn depends_on(&self, collection_id: GlobalId) -> BTreeSet<GlobalId> {
let mut out = BTreeSet::new();
self.depends_on_into(collection_id, &mut out);
out
}
/// Like `depends_on`, but appends to an existing `BTreeSet`.
pub fn depends_on_into(&self, collection_id: GlobalId, out: &mut BTreeSet<GlobalId>) {
out.insert(collection_id);
if self.source_imports.contains_key(&collection_id) {
// The collection is provided by an imported source. Report the
// dependency on the source.
out.insert(collection_id);
return;
}
// NOTE(benesch): we're not smart enough here to know *which* index
// for the collection will be used, if one exists, so we have to report
// the dependency on all of them.
let mut found_index = false;
for (index_id, IndexImport { desc, .. }) in &self.index_imports {
if desc.on_id == collection_id {
// The collection is provided by an imported index. Report the
// dependency on the index.
out.insert(*index_id);
found_index = true;
}
}
if found_index {
return;
}
// The collection is not provided by a source or imported index.
// It must be a collection whose plan we have handy. Recurse.
let build = self.build_desc(collection_id);
for id in build.plan.depends_on() {
if !out.contains(&id) {
self.depends_on_into(id, out)
}
}
}
/// Computes the set of imports upon which the specified collection depends.
///
/// This method behaves like `depends_on` but filters out internal dependencies that are not
/// included in the dataflow imports.
pub fn depends_on_imports(&self, collection_id: GlobalId) -> BTreeSet<GlobalId> {
let is_import = |id: &GlobalId| {
self.source_imports.contains_key(id) || self.index_imports.contains_key(id)
};
let deps = self.depends_on(collection_id);
deps.into_iter().filter(is_import).collect()
}
}
impl<P: PartialEq, S: PartialEq, T: timely::PartialOrder> DataflowDescription<P, S, T> {
/// Determine if a dataflow description is compatible with this dataflow description.
///
/// Compatible dataflows have equal exports, imports, and objects to build. The `as_of` of
/// the receiver has to be less equal the `other` `as_of`.
///
// TODO: The semantics of this function are only useful for command reconciliation at the moment.
pub fn compatible_with(&self, other: &Self) -> bool {
let equality = self.index_exports == other.index_exports
&& self.sink_exports == other.sink_exports
&& self.objects_to_build == other.objects_to_build
&& self.index_imports == other.index_imports
&& self.source_imports == other.source_imports;
let partial = if let (Some(as_of), Some(other_as_of)) = (&self.as_of, &other.as_of) {
timely::PartialOrder::less_equal(as_of, other_as_of)
} else {
false
};
equality && partial
}
}
impl RustType<ProtoDataflowDescription>
for DataflowDescription<crate::plan::Plan, CollectionMetadata>
{
fn into_proto(&self) -> ProtoDataflowDescription {
ProtoDataflowDescription {
source_imports: self.source_imports.into_proto(),
index_imports: self.index_imports.into_proto(),
objects_to_build: self.objects_to_build.into_proto(),
index_exports: self.index_exports.into_proto(),
sink_exports: self.sink_exports.into_proto(),
as_of: self.as_of.into_proto(),
until: Some(self.until.into_proto()),
debug_name: self.debug_name.clone(),
}
}
fn from_proto(proto: ProtoDataflowDescription) -> Result<Self, TryFromProtoError> {
Ok(DataflowDescription {
source_imports: proto.source_imports.into_rust()?,
index_imports: proto.index_imports.into_rust()?,
objects_to_build: proto.objects_to_build.into_rust()?,
index_exports: proto.index_exports.into_rust()?,
sink_exports: proto.sink_exports.into_rust()?,
as_of: proto.as_of.map(|x| x.into_rust()).transpose()?,
until: proto
.until
.map(|x| x.into_rust())
.transpose()?
.unwrap_or_else(Antichain::new),
debug_name: proto.debug_name,
})
}
}
impl ProtoMapEntry<GlobalId, (SourceInstanceDesc<CollectionMetadata>, bool)> for ProtoSourceImport {
fn from_rust<'a>(
entry: (
&'a GlobalId,
&'a (SourceInstanceDesc<CollectionMetadata>, bool),
),
) -> Self {
ProtoSourceImport {
id: Some(entry.0.into_proto()),
source_instance_desc: Some(entry.1 .0.into_proto()),
monotonic: entry.1 .1.into_proto(),
}
}
fn into_rust(
self,
) -> Result<(GlobalId, (SourceInstanceDesc<CollectionMetadata>, bool)), TryFromProtoError> {
Ok((
self.id.into_rust_if_some("ProtoSourceImport::id")?,
(
self.source_instance_desc
.into_rust_if_some("ProtoSourceImport::source_instance_desc")?,
self.monotonic.into_rust()?,
),
))
}
}
impl ProtoMapEntry<GlobalId, IndexImport> for ProtoIndexImport {
fn from_rust<'a>(
(
id,
IndexImport {
desc,
typ,
monotonic,
usage_types,
},
): (&'a GlobalId, &'a IndexImport),
) -> Self {
ProtoIndexImport {
id: Some(id.into_proto()),
index_desc: Some(desc.into_proto()),
typ: Some(typ.into_proto()),
monotonic: monotonic.into_proto(),
usage_types: usage_types.as_ref().unwrap_or(&Vec::new()).into_proto(),
has_usage_types: usage_types.is_some(),
}
}
fn into_rust(self) -> Result<(GlobalId, IndexImport), TryFromProtoError> {
Ok((
self.id.into_rust_if_some("ProtoIndex::id")?,
IndexImport {
desc: self
.index_desc
.into_rust_if_some("ProtoIndexImport::index_desc")?,
typ: self.typ.into_rust_if_some("ProtoIndexImport::typ")?,
monotonic: self.monotonic.into_rust()?,
usage_types: if !self.has_usage_types.into_rust()? {
None
} else {
Some(self.usage_types.into_rust()?)
},
},
))
}
}
impl ProtoMapEntry<GlobalId, (IndexDesc, RelationType)> for ProtoIndexExport {
fn from_rust<'a>(
(id, (index_desc, typ)): (&'a GlobalId, &'a (IndexDesc, RelationType)),
) -> Self {
ProtoIndexExport {
id: Some(id.into_proto()),
index_desc: Some(index_desc.into_proto()),
typ: Some(typ.into_proto()),
}
}
fn into_rust(self) -> Result<(GlobalId, (IndexDesc, RelationType)), TryFromProtoError> {
Ok((
self.id.into_rust_if_some("ProtoIndexExport::id")?,
(
self.index_desc
.into_rust_if_some("ProtoIndexExport::index_desc")?,
self.typ.into_rust_if_some("ProtoIndexExport::typ")?,
),
))
}
}
impl ProtoMapEntry<GlobalId, ComputeSinkDesc<CollectionMetadata>> for ProtoSinkExport {
fn from_rust<'a>(
(id, sink_desc): (&'a GlobalId, &'a ComputeSinkDesc<CollectionMetadata>),
) -> Self {
ProtoSinkExport {
id: Some(id.into_proto()),
sink_desc: Some(sink_desc.into_proto()),
}
}
fn into_rust(
self,
) -> Result<(GlobalId, ComputeSinkDesc<CollectionMetadata>), TryFromProtoError> {
Ok((
self.id.into_rust_if_some("ProtoSinkExport::id")?,
self.sink_desc
.into_rust_if_some("ProtoSinkExport::sink_desc")?,
))
}
}
impl Arbitrary for DataflowDescription<Plan, CollectionMetadata, mz_repr::Timestamp> {
type Strategy = BoxedStrategy<Self>;
type Parameters = ();
fn arbitrary_with(_: Self::Parameters) -> Self::Strategy {
any_dataflow_description().boxed()
}
}
proptest::prop_compose! {
fn any_dataflow_description()(
source_imports in proptest::collection::vec(any_source_import(), 1..3),
index_imports in proptest::collection::vec(any_dataflow_index_import(), 1..3),
objects_to_build in proptest::collection::vec(any::<BuildDesc<Plan>>(), 1..3),
index_exports in proptest::collection::vec(any_dataflow_index_export(), 1..3),
sink_descs in proptest::collection::vec(
any::<(GlobalId, ComputeSinkDesc<CollectionMetadata, mz_repr::Timestamp>)>(),
1..3,
),
as_of_some in any::<bool>(),
as_of in proptest::collection::vec(any::<mz_repr::Timestamp>(), 1..5),
debug_name in ".*",
) -> DataflowDescription<Plan, CollectionMetadata, mz_repr::Timestamp> {
DataflowDescription {
source_imports: BTreeMap::from_iter(source_imports.into_iter()),
index_imports: BTreeMap::from_iter(index_imports.into_iter()),
objects_to_build,
index_exports: BTreeMap::from_iter(index_exports.into_iter()),
sink_exports: BTreeMap::from_iter(
sink_descs.into_iter(),
),
as_of: if as_of_some {
Some(Antichain::from(as_of))
} else {
None
},
until: Antichain::new(),
debug_name,
}
}
}
fn any_source_import(
) -> impl Strategy<Value = (GlobalId, (SourceInstanceDesc<CollectionMetadata>, bool))> {
(
any::<GlobalId>(),
any::<(SourceInstanceDesc<CollectionMetadata>, bool)>(),
)
}
proptest::prop_compose! {
fn any_dataflow_index_import()(
id in any::<GlobalId>(),
desc in any::<IndexDesc>(),
typ in any::<RelationType>(),
monotonic in any::<bool>(),
usage_types in any::<Option<Vec<IndexUsageType>>>(),
) -> (GlobalId, IndexImport) {
(id, IndexImport {desc, typ, monotonic, usage_types})
}
}
proptest::prop_compose! {
fn any_dataflow_index_export()(
id in any::<GlobalId>(),
index in any::<IndexDesc>(),
typ in any::<RelationType>(),
) -> (GlobalId, (IndexDesc, RelationType)) {
(id, (index, typ))
}
}
/// A commonly used name for dataflows contain MIR expressions.
pub type DataflowDesc = DataflowDescription<OptimizedMirRelationExpr, ()>;
/// An index storing processed updates so they can be queried
/// or reused in other computations
#[derive(Arbitrary, Clone, Debug, Eq, PartialEq, Serialize, Deserialize, Hash)]
pub struct IndexDesc {
/// Identity of the collection the index is on.
pub on_id: GlobalId,
/// Expressions to be arranged, in order of decreasing primacy.
#[proptest(strategy = "proptest::collection::vec(any::<MirScalarExpr>(), 1..3)")]
pub key: Vec<MirScalarExpr>,
}
impl RustType<ProtoIndexDesc> for IndexDesc {
fn into_proto(&self) -> ProtoIndexDesc {
ProtoIndexDesc {
on_id: Some(self.on_id.into_proto()),
key: self.key.into_proto(),
}
}
fn from_proto(proto: ProtoIndexDesc) -> Result<Self, TryFromProtoError> {
Ok(IndexDesc {
on_id: proto.on_id.into_rust_if_some("ProtoIndexDesc::on_id")?,
key: proto.key.into_rust()?,
})
}
}
/// Information about an imported index, and how it will be used by the dataflow.
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
pub struct IndexImport {
/// Description of index.
pub desc: IndexDesc,
/// Schema and keys of the object the index is on.
pub typ: RelationType,
/// Whether the index will supply monotonic data.
pub monotonic: bool,
/// What kind of operation (full scan, lookup, ...) will access the index. Filled by
/// `prune_and_annotate_dataflow_index_imports`.
pub usage_types: Option<Vec<IndexUsageType>>,
}
/// An association of a global identifier to an expression.
#[derive(Arbitrary, Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
pub struct BuildDesc<P> {
pub id: GlobalId,
pub plan: P,
}
impl RustType<ProtoBuildDesc> for BuildDesc<crate::plan::Plan> {
fn into_proto(&self) -> ProtoBuildDesc {
ProtoBuildDesc {
id: Some(self.id.into_proto()),
plan: Some(self.plan.into_proto()),
}
}
fn from_proto(x: ProtoBuildDesc) -> Result<Self, TryFromProtoError> {
Ok(BuildDesc {
id: x.id.into_rust_if_some("ProtoBuildDesc::id")?,
plan: x.plan.into_rust_if_some("ProtoBuildDesc::plan")?,
})
}
}
#[cfg(test)]
mod tests {
use mz_proto::protobuf_roundtrip;
use proptest::prelude::ProptestConfig;
use proptest::proptest;
use crate::types::dataflows::DataflowDescription;
use super::*;
proptest! {
#![proptest_config(ProptestConfig::with_cases(32))]
#[mz_ore::test]
fn dataflow_description_protobuf_roundtrip(expect in any::<DataflowDescription<Plan, CollectionMetadata, mz_repr::Timestamp>>()) {
let actual = protobuf_roundtrip::<_, ProtoDataflowDescription>(&expect);
assert!(actual.is_ok());
assert_eq!(actual.unwrap(), expect);
}
}
}
| {
for (source_id, (source, _monotonic)) in self.source_imports.iter() {
if source_id == id {
return source.typ.arity();
}
}
for IndexImport { desc, typ, .. } in self.index_imports.values() {
if &desc.on_id == id {
return typ.arity();
}
}
for desc in self.objects_to_build.iter() {
if &desc.id == id {
return desc.plan.arity();
}
}
panic!("GlobalId {} not found in DataflowDesc", id);
} | identifier_body |
dataflows.rs | // Copyright Materialize, Inc. and contributors. All rights reserved.
//
// Use of this software is governed by the Business Source License
// included in the LICENSE file.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0.
//! Types for describing dataflows.
use std::collections::{BTreeMap, BTreeSet};
use mz_expr::{CollectionPlan, MirRelationExpr, MirScalarExpr, OptimizedMirRelationExpr};
use mz_proto::{IntoRustIfSome, ProtoMapEntry, ProtoType, RustType, TryFromProtoError};
use mz_repr::explain::IndexUsageType;
use mz_repr::{GlobalId, RelationType};
use mz_storage_client::controller::CollectionMetadata;
use proptest::prelude::{any, Arbitrary};
use proptest::strategy::{BoxedStrategy, Strategy};
use proptest_derive::Arbitrary;
use serde::{Deserialize, Serialize};
use timely::progress::Antichain;
use crate::plan::Plan;
use crate::types::dataflows::proto_dataflow_description::{
ProtoIndexExport, ProtoIndexImport, ProtoSinkExport, ProtoSourceImport,
};
use crate::types::sinks::{ComputeSinkConnection, ComputeSinkDesc};
use crate::types::sources::{SourceInstanceArguments, SourceInstanceDesc};
include!(concat!(
env!("OUT_DIR"),
"/mz_compute_client.types.dataflows.rs"
));
/// A description of a dataflow to construct and results to surface.
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
pub struct DataflowDescription<P, S: 'static = (), T = mz_repr::Timestamp> {
/// Sources instantiations made available to the dataflow pair with monotonicity information.
pub source_imports: BTreeMap<GlobalId, (SourceInstanceDesc<S>, bool)>,
/// Indexes made available to the dataflow.
/// (id of new index, description of index, relationtype of base source/view, monotonic)
pub index_imports: BTreeMap<GlobalId, IndexImport>,
/// Views and indexes to be built and stored in the local context.
/// Objects must be built in the specific order, as there may be
/// dependencies of later objects on prior identifiers.
pub objects_to_build: Vec<BuildDesc<P>>,
/// Indexes to be made available to be shared with other dataflows
/// (id of new index, description of index, relationtype of base source/view)
pub index_exports: BTreeMap<GlobalId, (IndexDesc, RelationType)>,
/// sinks to be created
/// (id of new sink, description of sink)
pub sink_exports: BTreeMap<GlobalId, ComputeSinkDesc<S, T>>,
/// An optional frontier to which inputs should be advanced.
///
/// If this is set, it should override the default setting determined by
/// the upper bound of `since` frontiers contributing to the dataflow.
/// It is an error for this to be set to a frontier not beyond that default.
pub as_of: Option<Antichain<T>>,
/// Frontier beyond which the dataflow should not execute.
/// Specifically, updates at times greater or equal to this frontier are suppressed.
/// This is often set to `as_of + 1` to enable "batch" computations.
pub until: Antichain<T>,
/// Human readable name
pub debug_name: String,
}
impl<T> DataflowDescription<Plan<T>, (), mz_repr::Timestamp> {
/// Tests if the dataflow refers to a single timestamp, namely
/// that `as_of` has a single coordinate and that the `until`
/// value corresponds to the `as_of` value plus one.
pub fn is_single_time(&self) -> bool {
// TODO: this would be much easier to check if `until` was a strict lower bound,
// and we would be testing that `until == as_of`.
let Some(as_of) = self.as_of.as_ref() else { return false; };
!as_of.is_empty()
&& as_of
.as_option()
.and_then(|as_of| as_of.checked_add(1))
.as_ref()
== self.until.as_option()
}
}
impl<T> DataflowDescription<OptimizedMirRelationExpr, (), T> {
/// Creates a new dataflow description with a human-readable name.
pub fn new(name: String) -> Self {
Self {
source_imports: Default::default(),
index_imports: Default::default(),
objects_to_build: Vec::new(),
index_exports: Default::default(),
sink_exports: Default::default(),
as_of: Default::default(),
until: Antichain::new(),
debug_name: name,
}
}
/// Imports a previously exported index.
///
/// This method makes available an index previously exported as `id`, identified
/// to the query by `description` (which names the view the index arranges, and
/// the keys by which it is arranged).
pub fn import_index(
&mut self,
id: GlobalId,
desc: IndexDesc,
typ: RelationType,
monotonic: bool,
) {
self.index_imports.insert(
id,
IndexImport {
desc,
typ,
monotonic,
usage_types: None,
},
);
}
/// Imports a source and makes it available as `id`.
pub fn import_source(&mut self, id: GlobalId, typ: RelationType, monotonic: bool) {
// Import the source with no linear operators applied to it.
// They may be populated by whole-dataflow optimization.
self.source_imports.insert(
id,
(
SourceInstanceDesc {
storage_metadata: (),
arguments: SourceInstanceArguments { operators: None },
typ,
},
monotonic,
),
);
}
/// Binds to `id` the relation expression `plan`.
pub fn insert_plan(&mut self, id: GlobalId, plan: OptimizedMirRelationExpr) {
self.objects_to_build.push(BuildDesc { id, plan });
}
/// Exports as `id` an index described by `description`.
///
/// Future uses of `import_index` in other dataflow descriptions may use `id`,
/// as long as this dataflow has not been terminated in the meantime.
pub fn export_index(&mut self, id: GlobalId, description: IndexDesc, on_type: RelationType) {
// We first create a "view" named `id` that ensures that the
// data are correctly arranged and available for export.
self.insert_plan(
id,
OptimizedMirRelationExpr::declare_optimized(MirRelationExpr::ArrangeBy {
input: Box::new(MirRelationExpr::global_get(
description.on_id,
on_type.clone(),
)),
keys: vec![description.key.clone()],
}),
);
self.index_exports.insert(id, (description, on_type));
}
/// Exports as `id` a sink described by `description`.
pub fn export_sink(&mut self, id: GlobalId, description: ComputeSinkDesc<(), T>) {
self.sink_exports.insert(id, description);
}
| /// Returns true iff `id` is already imported.
pub fn is_imported(&self, id: &GlobalId) -> bool {
self.objects_to_build.iter().any(|bd| &bd.id == id)
|| self.source_imports.keys().any(|i| i == id)
}
/// Assigns the `as_of` frontier to the supplied argument.
///
/// This method allows the dataflow to indicate a frontier up through
/// which all times should be advanced. This can be done for at least
/// two reasons: 1. correctness and 2. performance.
///
/// Correctness may require an `as_of` to ensure that historical detail
/// is consolidated at representative times that do not present specific
/// detail that is not specifically correct. For example, updates may be
/// compacted to times that are no longer the source times, but instead
/// some byproduct of when compaction was executed; we should not present
/// those specific times as meaningfully different from other equivalent
/// times.
///
/// Performance may benefit from an aggressive `as_of` as it reduces the
/// number of distinct moments at which collections vary. Differential
/// dataflow will refresh its outputs at each time its inputs change and
/// to moderate that we can minimize the volume of distinct input times
/// as much as possible.
///
/// Generally, one should consider setting `as_of` at least to the `since`
/// frontiers of contributing data sources and as aggressively as the
/// computation permits.
pub fn set_as_of(&mut self, as_of: Antichain<T>) {
self.as_of = Some(as_of);
}
/// The number of columns associated with an identifier in the dataflow.
pub fn arity_of(&self, id: &GlobalId) -> usize {
for (source_id, (source, _monotonic)) in self.source_imports.iter() {
if source_id == id {
return source.typ.arity();
}
}
for IndexImport { desc, typ, .. } in self.index_imports.values() {
if &desc.on_id == id {
return typ.arity();
}
}
for desc in self.objects_to_build.iter() {
if &desc.id == id {
return desc.plan.arity();
}
}
panic!("GlobalId {} not found in DataflowDesc", id);
}
/// Calls r and s on any sub-members of those types in self. Halts at the first error return.
pub fn visit_children<R, S, E>(&mut self, r: R, s: S) -> Result<(), E>
where
R: Fn(&mut OptimizedMirRelationExpr) -> Result<(), E>,
S: Fn(&mut MirScalarExpr) -> Result<(), E>,
{
for BuildDesc { plan, .. } in &mut self.objects_to_build {
r(plan)?;
}
for (source_instance_desc, _) in self.source_imports.values_mut() {
let Some(mfp) = source_instance_desc.arguments.operators.as_mut() else {
continue;
};
for expr in mfp.expressions.iter_mut() {
s(expr)?;
}
for (_, expr) in mfp.predicates.iter_mut() {
s(expr)?;
}
}
Ok(())
}
}
impl<P, S, T> DataflowDescription<P, S, T>
where
P: CollectionPlan,
{
/// Identifiers of exported objects (indexes and sinks).
pub fn export_ids(&self) -> impl Iterator<Item = GlobalId> + '_ {
self.index_exports
.keys()
.chain(self.sink_exports.keys())
.cloned()
}
/// Identifiers of exported subscribe sinks.
pub fn subscribe_ids(&self) -> impl Iterator<Item = GlobalId> + '_ {
self.sink_exports
.iter()
.filter_map(|(id, desc)| match desc.connection {
ComputeSinkConnection::Subscribe(_) => Some(*id),
_ => None,
})
}
/// Returns the description of the object to build with the specified
/// identifier.
///
/// # Panics
///
/// Panics if `id` is not present in `objects_to_build` exactly once.
pub fn build_desc(&self, id: GlobalId) -> &BuildDesc<P> {
let mut builds = self.objects_to_build.iter().filter(|build| build.id == id);
let build = builds
.next()
.unwrap_or_else(|| panic!("object to build id {id} unexpectedly missing"));
assert!(builds.next().is_none());
build
}
/// Computes the set of identifiers upon which the specified collection
/// identifier depends.
///
/// `collection_id` must specify a valid object in `objects_to_build`.
///
/// This method includes identifiers for e.g. intermediate views, and should be filtered
/// if one only wants sources and indexes.
///
/// This method is safe for mutually recursive view definitions.
pub fn depends_on(&self, collection_id: GlobalId) -> BTreeSet<GlobalId> {
let mut out = BTreeSet::new();
self.depends_on_into(collection_id, &mut out);
out
}
/// Like `depends_on`, but appends to an existing `BTreeSet`.
pub fn depends_on_into(&self, collection_id: GlobalId, out: &mut BTreeSet<GlobalId>) {
out.insert(collection_id);
if self.source_imports.contains_key(&collection_id) {
// The collection is provided by an imported source. Report the
// dependency on the source.
out.insert(collection_id);
return;
}
// NOTE(benesch): we're not smart enough here to know *which* index
// for the collection will be used, if one exists, so we have to report
// the dependency on all of them.
let mut found_index = false;
for (index_id, IndexImport { desc, .. }) in &self.index_imports {
if desc.on_id == collection_id {
// The collection is provided by an imported index. Report the
// dependency on the index.
out.insert(*index_id);
found_index = true;
}
}
if found_index {
return;
}
// The collection is not provided by a source or imported index.
// It must be a collection whose plan we have handy. Recurse.
let build = self.build_desc(collection_id);
for id in build.plan.depends_on() {
if !out.contains(&id) {
self.depends_on_into(id, out)
}
}
}
/// Computes the set of imports upon which the specified collection depends.
///
/// This method behaves like `depends_on` but filters out internal dependencies that are not
/// included in the dataflow imports.
pub fn depends_on_imports(&self, collection_id: GlobalId) -> BTreeSet<GlobalId> {
let is_import = |id: &GlobalId| {
self.source_imports.contains_key(id) || self.index_imports.contains_key(id)
};
let deps = self.depends_on(collection_id);
deps.into_iter().filter(is_import).collect()
}
}
impl<P: PartialEq, S: PartialEq, T: timely::PartialOrder> DataflowDescription<P, S, T> {
/// Determine if a dataflow description is compatible with this dataflow description.
///
/// Compatible dataflows have equal exports, imports, and objects to build. The `as_of` of
/// the receiver has to be less equal the `other` `as_of`.
///
// TODO: The semantics of this function are only useful for command reconciliation at the moment.
pub fn compatible_with(&self, other: &Self) -> bool {
let equality = self.index_exports == other.index_exports
&& self.sink_exports == other.sink_exports
&& self.objects_to_build == other.objects_to_build
&& self.index_imports == other.index_imports
&& self.source_imports == other.source_imports;
let partial = if let (Some(as_of), Some(other_as_of)) = (&self.as_of, &other.as_of) {
timely::PartialOrder::less_equal(as_of, other_as_of)
} else {
false
};
equality && partial
}
}
impl RustType<ProtoDataflowDescription>
for DataflowDescription<crate::plan::Plan, CollectionMetadata>
{
fn into_proto(&self) -> ProtoDataflowDescription {
ProtoDataflowDescription {
source_imports: self.source_imports.into_proto(),
index_imports: self.index_imports.into_proto(),
objects_to_build: self.objects_to_build.into_proto(),
index_exports: self.index_exports.into_proto(),
sink_exports: self.sink_exports.into_proto(),
as_of: self.as_of.into_proto(),
until: Some(self.until.into_proto()),
debug_name: self.debug_name.clone(),
}
}
fn from_proto(proto: ProtoDataflowDescription) -> Result<Self, TryFromProtoError> {
Ok(DataflowDescription {
source_imports: proto.source_imports.into_rust()?,
index_imports: proto.index_imports.into_rust()?,
objects_to_build: proto.objects_to_build.into_rust()?,
index_exports: proto.index_exports.into_rust()?,
sink_exports: proto.sink_exports.into_rust()?,
as_of: proto.as_of.map(|x| x.into_rust()).transpose()?,
until: proto
.until
.map(|x| x.into_rust())
.transpose()?
.unwrap_or_else(Antichain::new),
debug_name: proto.debug_name,
})
}
}
impl ProtoMapEntry<GlobalId, (SourceInstanceDesc<CollectionMetadata>, bool)> for ProtoSourceImport {
fn from_rust<'a>(
entry: (
&'a GlobalId,
&'a (SourceInstanceDesc<CollectionMetadata>, bool),
),
) -> Self {
ProtoSourceImport {
id: Some(entry.0.into_proto()),
source_instance_desc: Some(entry.1 .0.into_proto()),
monotonic: entry.1 .1.into_proto(),
}
}
fn into_rust(
self,
) -> Result<(GlobalId, (SourceInstanceDesc<CollectionMetadata>, bool)), TryFromProtoError> {
Ok((
self.id.into_rust_if_some("ProtoSourceImport::id")?,
(
self.source_instance_desc
.into_rust_if_some("ProtoSourceImport::source_instance_desc")?,
self.monotonic.into_rust()?,
),
))
}
}
impl ProtoMapEntry<GlobalId, IndexImport> for ProtoIndexImport {
fn from_rust<'a>(
(
id,
IndexImport {
desc,
typ,
monotonic,
usage_types,
},
): (&'a GlobalId, &'a IndexImport),
) -> Self {
ProtoIndexImport {
id: Some(id.into_proto()),
index_desc: Some(desc.into_proto()),
typ: Some(typ.into_proto()),
monotonic: monotonic.into_proto(),
usage_types: usage_types.as_ref().unwrap_or(&Vec::new()).into_proto(),
has_usage_types: usage_types.is_some(),
}
}
fn into_rust(self) -> Result<(GlobalId, IndexImport), TryFromProtoError> {
Ok((
self.id.into_rust_if_some("ProtoIndex::id")?,
IndexImport {
desc: self
.index_desc
.into_rust_if_some("ProtoIndexImport::index_desc")?,
typ: self.typ.into_rust_if_some("ProtoIndexImport::typ")?,
monotonic: self.monotonic.into_rust()?,
usage_types: if !self.has_usage_types.into_rust()? {
None
} else {
Some(self.usage_types.into_rust()?)
},
},
))
}
}
impl ProtoMapEntry<GlobalId, (IndexDesc, RelationType)> for ProtoIndexExport {
fn from_rust<'a>(
(id, (index_desc, typ)): (&'a GlobalId, &'a (IndexDesc, RelationType)),
) -> Self {
ProtoIndexExport {
id: Some(id.into_proto()),
index_desc: Some(index_desc.into_proto()),
typ: Some(typ.into_proto()),
}
}
fn into_rust(self) -> Result<(GlobalId, (IndexDesc, RelationType)), TryFromProtoError> {
Ok((
self.id.into_rust_if_some("ProtoIndexExport::id")?,
(
self.index_desc
.into_rust_if_some("ProtoIndexExport::index_desc")?,
self.typ.into_rust_if_some("ProtoIndexExport::typ")?,
),
))
}
}
impl ProtoMapEntry<GlobalId, ComputeSinkDesc<CollectionMetadata>> for ProtoSinkExport {
fn from_rust<'a>(
(id, sink_desc): (&'a GlobalId, &'a ComputeSinkDesc<CollectionMetadata>),
) -> Self {
ProtoSinkExport {
id: Some(id.into_proto()),
sink_desc: Some(sink_desc.into_proto()),
}
}
fn into_rust(
self,
) -> Result<(GlobalId, ComputeSinkDesc<CollectionMetadata>), TryFromProtoError> {
Ok((
self.id.into_rust_if_some("ProtoSinkExport::id")?,
self.sink_desc
.into_rust_if_some("ProtoSinkExport::sink_desc")?,
))
}
}
impl Arbitrary for DataflowDescription<Plan, CollectionMetadata, mz_repr::Timestamp> {
type Strategy = BoxedStrategy<Self>;
type Parameters = ();
fn arbitrary_with(_: Self::Parameters) -> Self::Strategy {
any_dataflow_description().boxed()
}
}
proptest::prop_compose! {
fn any_dataflow_description()(
source_imports in proptest::collection::vec(any_source_import(), 1..3),
index_imports in proptest::collection::vec(any_dataflow_index_import(), 1..3),
objects_to_build in proptest::collection::vec(any::<BuildDesc<Plan>>(), 1..3),
index_exports in proptest::collection::vec(any_dataflow_index_export(), 1..3),
sink_descs in proptest::collection::vec(
any::<(GlobalId, ComputeSinkDesc<CollectionMetadata, mz_repr::Timestamp>)>(),
1..3,
),
as_of_some in any::<bool>(),
as_of in proptest::collection::vec(any::<mz_repr::Timestamp>(), 1..5),
debug_name in ".*",
) -> DataflowDescription<Plan, CollectionMetadata, mz_repr::Timestamp> {
DataflowDescription {
source_imports: BTreeMap::from_iter(source_imports.into_iter()),
index_imports: BTreeMap::from_iter(index_imports.into_iter()),
objects_to_build,
index_exports: BTreeMap::from_iter(index_exports.into_iter()),
sink_exports: BTreeMap::from_iter(
sink_descs.into_iter(),
),
as_of: if as_of_some {
Some(Antichain::from(as_of))
} else {
None
},
until: Antichain::new(),
debug_name,
}
}
}
fn any_source_import(
) -> impl Strategy<Value = (GlobalId, (SourceInstanceDesc<CollectionMetadata>, bool))> {
(
any::<GlobalId>(),
any::<(SourceInstanceDesc<CollectionMetadata>, bool)>(),
)
}
proptest::prop_compose! {
fn any_dataflow_index_import()(
id in any::<GlobalId>(),
desc in any::<IndexDesc>(),
typ in any::<RelationType>(),
monotonic in any::<bool>(),
usage_types in any::<Option<Vec<IndexUsageType>>>(),
) -> (GlobalId, IndexImport) {
(id, IndexImport {desc, typ, monotonic, usage_types})
}
}
proptest::prop_compose! {
fn any_dataflow_index_export()(
id in any::<GlobalId>(),
index in any::<IndexDesc>(),
typ in any::<RelationType>(),
) -> (GlobalId, (IndexDesc, RelationType)) {
(id, (index, typ))
}
}
/// A commonly used name for dataflows contain MIR expressions.
pub type DataflowDesc = DataflowDescription<OptimizedMirRelationExpr, ()>;
/// An index storing processed updates so they can be queried
/// or reused in other computations
#[derive(Arbitrary, Clone, Debug, Eq, PartialEq, Serialize, Deserialize, Hash)]
pub struct IndexDesc {
/// Identity of the collection the index is on.
pub on_id: GlobalId,
/// Expressions to be arranged, in order of decreasing primacy.
#[proptest(strategy = "proptest::collection::vec(any::<MirScalarExpr>(), 1..3)")]
pub key: Vec<MirScalarExpr>,
}
impl RustType<ProtoIndexDesc> for IndexDesc {
fn into_proto(&self) -> ProtoIndexDesc {
ProtoIndexDesc {
on_id: Some(self.on_id.into_proto()),
key: self.key.into_proto(),
}
}
fn from_proto(proto: ProtoIndexDesc) -> Result<Self, TryFromProtoError> {
Ok(IndexDesc {
on_id: proto.on_id.into_rust_if_some("ProtoIndexDesc::on_id")?,
key: proto.key.into_rust()?,
})
}
}
/// Information about an imported index, and how it will be used by the dataflow.
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
pub struct IndexImport {
/// Description of index.
pub desc: IndexDesc,
/// Schema and keys of the object the index is on.
pub typ: RelationType,
/// Whether the index will supply monotonic data.
pub monotonic: bool,
/// What kind of operation (full scan, lookup, ...) will access the index. Filled by
/// `prune_and_annotate_dataflow_index_imports`.
pub usage_types: Option<Vec<IndexUsageType>>,
}
/// An association of a global identifier to an expression.
#[derive(Arbitrary, Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
pub struct BuildDesc<P> {
pub id: GlobalId,
pub plan: P,
}
impl RustType<ProtoBuildDesc> for BuildDesc<crate::plan::Plan> {
fn into_proto(&self) -> ProtoBuildDesc {
ProtoBuildDesc {
id: Some(self.id.into_proto()),
plan: Some(self.plan.into_proto()),
}
}
fn from_proto(x: ProtoBuildDesc) -> Result<Self, TryFromProtoError> {
Ok(BuildDesc {
id: x.id.into_rust_if_some("ProtoBuildDesc::id")?,
plan: x.plan.into_rust_if_some("ProtoBuildDesc::plan")?,
})
}
}
#[cfg(test)]
mod tests {
use mz_proto::protobuf_roundtrip;
use proptest::prelude::ProptestConfig;
use proptest::proptest;
use crate::types::dataflows::DataflowDescription;
use super::*;
proptest! {
#![proptest_config(ProptestConfig::with_cases(32))]
#[mz_ore::test]
fn dataflow_description_protobuf_roundtrip(expect in any::<DataflowDescription<Plan, CollectionMetadata, mz_repr::Timestamp>>()) {
let actual = protobuf_roundtrip::<_, ProtoDataflowDescription>(&expect);
assert!(actual.is_ok());
assert_eq!(actual.unwrap(), expect);
}
}
} | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.