file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
tags.rs | use crate::{ResponseValue, ViewWrap};
use std::fmt::Debug;
use htmldom_read::{Node};
use crate::events::OnClick;
use std::marker::PhantomData;
use std::ops::{Deref, DerefMut};
use std::fmt::Formatter;
use std::sync::Arc;
/// The functions that allow to load images concurrently.
pub mod image_loader {
use std::sync::Arc;
use crate::tags::Image;
use crate::tags::ImageFormat;
use std::collections::LinkedList;
/// Load all images from binary format from the iterator. This function is concurrent.
/// It will create multiple threads to process images in parallel. Returned value contains
/// handles to all images in the order they appeared in the iterator.
pub fn load_all(iter: &mut Iterator<Item = (Vec<u8>, ImageFormat)>) -> Vec<Arc<Image>> {
use std::sync::mpsc;
use std::thread;
// Start loading images async.
let recvs = {
let mut list = LinkedList::new();
for (arr, format) in iter {
let (tx, rx) = mpsc::channel();
list.push_back(rx);
thread::spawn(move || {
let img = Image::from_binary(arr, format);
tx.send(img).unwrap();
});
}
list
};
// Collect results.
let mut vec = Vec::with_capacity(recvs.len());
for rx in recvs {
let image = rx.recv().unwrap();
let arc = Arc::new(image);
vec.push(arc);
}
vec
}
/// Load one image into Arc.
pub fn load(bin: Vec<u8>, format: ImageFormat) -> Arc<Image> {
let img = Image::from_binary(bin, format);
Arc::new(img)
}
}
#[derive(Clone, Debug)]
pub enum TagName {
A,
Canvas,
H4,
H5,
Img,
Li,
P,
Span,
Unknown(String)
}
/// Supported canvas image formats.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ImageFormat {
Png,
Jpg,
}
/// Element in the HTML DOM that can be accessed by Rust interface.
pub trait Element: Debug {
/// Tag name of the element.
fn tag_name(&self) -> TagName;
/// HTML content of this element if it still exists.
fn dom_html(&mut self) -> Option<String> {
let req = self.view_mut().new_request();
let js = format!("\
var inner = document.getElementById('{}').outerHTML;\
window.external.invoke(JSON.stringify({{\
incmd: 'attribute',
request: {},\
value: inner\
}}));
", self.id(), req.id());
let rx = req.run(js);
let response = rx.recv();
if let Err(_) = response {
return None; // likely because Null element was accessed.
}
let response = response.unwrap();
if let ResponseValue::Str(s) = response {
if s.is_empty() {
None
} else {
Some(s)
}
} else {
// Inner HTML request cannot return any other response type.
unreachable!();
}
}
/// Get attribute value of the element if any. Even if attribute is present but is empty
/// None is returned.
fn attribute(&self, name: &str) -> Option<String> {
// Unsafe because we take immutable variable `self` as mutable.
let request = unsafe {
let this = &mut *(self as *const Self as *mut Self);
this.view_mut().new_request()
};
let id = request.id();
let js = format!("\
var attr = document.getElementById('{}').getAttribute('{}');\
attr = attr == null ? '' : attr;\
window.external.invoke(JSON.stringify({{\
incmd: 'attribute',\
request: {},\
value: attr\
}}));\
", self.id(), name, id);
let receiver = request.run(js);
let attr = receiver.recv().unwrap();
if let ResponseValue::Str(s) = attr {
if s == "" {
None
} else {
Some(s)
}
} else {
unreachable!()
}
}
/// Set attribute with given name to given value.
fn set_attribute(&mut self, name: &str, value: &str) {
let id = self.id().to_owned();
self.view_mut().eval(
format!(
"document.getElementById('{}').setAttribute('{}', '{}');",
id, name, crate::js_prefix_quotes(value)
)
);
}
/// Append given text to innerHTML field.
fn append_inner_html(&mut self, html: &str) {
let id = self.id().to_owned();
self.view_mut().eval(
format!(
"document.getElementById('{}').innerHTML += '{}';",
id, crate::js_prefix_quotes(html)
)
);
}
/// Clears the outerHTML of the element to remove it from HTML completely.
fn remove_from_html(&mut self) {
let id = self.id().to_owned();
self.view_mut().eval(
format!(
"document.getElementById('{}').outerHTML = '';",
id
)
);
}
/// Element ID.
fn id(&self) -> &String;
/// Change element ID.
fn set_id(&mut self, new_id: &str) {
self.set_attribute("id", new_id)
}
fn view(&self) -> &ViewWrap;
fn view_mut(&mut self) -> &mut ViewWrap {
let p = self.view() as *const ViewWrap as *mut ViewWrap;
unsafe { &mut *p }
}
/// Check whether this element still exists.
/// Actions on non-existing elements have no effect.
fn exists(&mut self) -> bool {
self.dom_html().is_some()
}
fn add_class(&mut self, class: &str) {
let attr = self.attribute("class");
let mut attr = if let Some(s) = attr {
s
} else {
String::with_capacity(class.len())
};
attr.push(' ');
attr.push_str(class);
self.set_attribute("class", &attr);
}
fn remove_class(&mut self, class: &str) {
let attr = self.attribute("class");
if attr.is_none() {
self.set_attribute("class", class);
return;
}
let attr = attr.unwrap();
let split = attr.split_whitespace();
let mut new_str = String::with_capacity(attr.len());
for val in split {
if val != class {
new_str.push_str(val);
}
}
self.set_attribute("class", &new_str);
}
fn has_class(&self, class: &str) -> bool {
let attr = self.attribute("class");
if attr.is_none() {
return false;
}
let attr = attr.unwrap();
let split = attr.split_whitespace();
for s in split {
if s == class {
return true;
}
}
false
}
}
/// Text content can be set to some text value and read this content back.
pub trait TextContent: Element {
/// Get text contained by this element.
fn text(&self) -> String {
if let Some(s) = self.attribute("textContent") {
s
} else {
String::new()
}
}
fn set_text<T: AsRef<str>>(&mut self, text: T) {
self.set_attribute("textContent", text.as_ref())
}
}
pub trait ImageContent: Element {
/// Set image data to this element.
fn set_image(&mut self, img: Arc<Image>);
/// Get image data of this element.
fn image(&self) -> Option<&Arc<Image>>;
/// Remove any supplied image data.
fn remove_image(&mut self) -> Option<Arc<Image>>;
}
macro_rules! elm_impl {
($name: ident) => {
impl Element for $name {
fn view(&self) -> &ViewWrap {
&self.view
}
fn id(&self) -> &String {
&self.id
}
fn tag_name(&self) -> TagName {
TagName::$name
}
}
}
}
/// Wrap that gives access to the dynamic element which is known to be of given type.
#[derive(Debug)]
pub struct Wrap<T: Element> {
element: Box<dyn Element>,
_p: PhantomData<T>,
}
/// Image data of canvas.
#[derive(Clone)]
pub struct Image {
base64: String,
format: ImageFormat,
}
#[derive(Debug)]
pub struct A {
view: ViewWrap,
id: String,
onclick: OnClick<A>,
}
#[derive(Debug)]
pub struct Canvas {
view: ViewWrap,
id: String,
}
#[derive(Clone, Debug)]
pub struct H4 {
view: ViewWrap,
id: String,
}
#[derive(Clone, Debug)]
pub struct H5 {
view: ViewWrap,
id: String,
}
#[derive(Clone, Debug)]
pub struct Img {
view: ViewWrap,
id: String,
data: Option<Arc<Image>>,
}
#[derive(Clone, Debug)]
pub struct Li {
view: ViewWrap,
id: String,
}
#[derive(Clone, Debug)]
pub struct P {
view: ViewWrap,
id: String,
}
#[derive(Clone, Debug)]
pub struct Span {
view: ViewWrap,
id: String,
}
elm_impl!(A);
elm_impl!(Canvas);
elm_impl!(H4);
elm_impl!(H5);
elm_impl!(Img);
elm_impl!(Li);
elm_impl!(P);
elm_impl!(Span);
#[derive(Clone, Debug)]
pub struct Unknown {
view: ViewWrap,
id: String,
name: String,
}
impl<T> Wrap<T> where T: Element {
/// Wrap given element.
///
/// # Safety
/// Programmer must be sure this element has expected type.
pub unsafe fn new(element: Box<dyn Element>) -> Self {
Wrap {
element,
_p: Default::default(),
}
}
}
impl<T> Deref for Wrap<T> where T: Element {
type Target = Box<T>;
fn deref(&self) -> &Box<T> {
let b = &self.element;
let ptr = b as *const Box<dyn Element> as *const Box<T>;
unsafe { &*ptr }
}
}
impl<T> DerefMut for Wrap<T> where T: Element {
fn deref_mut(&mut self) -> &mut Box<T> {
let b = &mut self.element;
let ptr = b as *mut Box<dyn Element> as *mut Box<T>;
unsafe { &mut *ptr }
}
}
impl Debug for Image {
fn fmt(&self, fmt: &mut Formatter) -> std::fmt::Result {
write!(fmt, "Image {{ base64: [char; ")?;
write!(fmt, "{}", self.base64.len())?;
write!(fmt, "], format: ")?;
write!(fmt, "{:?}", self.format)?;
write!(fmt, " }}")?;
Ok(())
}
}
impl From<&str> for TagName {
fn from(s: &str) -> Self {
use self::TagName::*;
match s.to_lowercase().as_str() {
"a" => A,
"canvas" => Canvas,
"h4" => H4,
"h5" => H5,
"img" => Img,
"li" => Li,
"p" => P,
"span" => Span,
_ => Unknown(String::from(s)),
}
}
}
impl TagName {
/// Create implementation of the tag by it's tag name.
pub fn new_impl(&self, view: ViewWrap, id: String) -> Box<dyn Element> {
match self {
TagName::A => {
let mut b = Box::new(A {
view,
id,
onclick: unsafe { OnClick::null() },
});
let onclick = unsafe { OnClick::new(&mut *b) };
b.onclick = onclick;
b
},
TagName::Canvas => {
Box::new(Canvas {
view,
id,
})
},
TagName::H4 => Box::new(
H4 {
view,
id,
}
),
TagName::H5 => Box::new(
H4 {
view,
id,
}
),
TagName::Img => Box::new(
Img {
view,
id,
data: None,
}
),
TagName::Li => Box::new (
Li {
view,
id,
}
),
TagName::P => Box::new(P { view, id }),
TagName::Span => Box::new(Span { view, id }),
TagName::Unknown(name) => Box::new(Unknown {
view,
id,
name: name.clone(),
}),
}
}
/// Try creating TagName from this node.
pub fn try_from_node(node: &Node) -> Option<Self> {
let tag_name = node.tag_name();
if let Some(tag_name) = tag_name {
let tag_name = TagName::from(tag_name);
Some(tag_name)
} else {
None
}
}
/// Try creating implementation of the Element from this node.
///
/// # Failures
/// Node must contain ID of the element. It also is required to contain opening tag
/// which corresponds to element tag. If either of conditions is not met this function
/// will return None.
pub fn try_impl_from_node(node: &Node, view: ViewWrap) -> Option<Box<dyn Element>> {
let tag_name = Self::try_from_node(node);
if let Some(tag_name) = tag_name {
let id = node.attribute_by_name("id");
if let Some(id) = id {
Some(tag_name.new_impl(view, id.values_to_string()))
} else {
None
}
} else {
None
}
}
}
impl ImageFormat {
pub fn to_string(&self) -> String {
use ImageFormat::*;
match self {
Jpg => "jpg",
Png => "png",
}.to_string()
}
}
impl Image {
/// Encode given array of bytes in Base64 encoding.
pub fn base64(bin: Vec<u8>) -> String {
base64::encode(&bin)
}
/// Generate image struct from given array.
pub fn from_binary(bin: Vec<u8>, format: ImageFormat) -> Image {
Image {
base64: Self::base64(bin),
format,
}
}
/// Convert this image to string that can be supplied to 'src' attribute of <img> tag.
pub fn to_img_string(&self) -> String {
format!("data:image/{};base64,{}", self.format.to_string(), self.base64)
}
}
impl A {
pub fn href(&self) -> String |
pub fn set_href<T: AsRef<str>>(&mut self, href: T) {
self.set_attribute("href", href.as_ref())
}
pub fn onclick(&self) -> &OnClick<A> {
&self.onclick
}
pub fn onclick_mut(&mut self) -> &mut OnClick<A> {
&mut self.onclick
}
}
impl ImageContent for Img {
fn set_image(&mut self, img: Arc<Image>) {
self.data = Some(img);
self.set_attribute("src", &self.data.as_ref().unwrap().to_img_string());
}
fn image(&self) -> Option<&Arc<Image>> {
self.data.as_ref()
}
fn remove_image(&mut self) -> Option<Arc<Image>> {
let mut img: Option<Arc<Image>> = None;
std::mem::swap(&mut img, &mut self.data);
img
}
}
impl TextContent for A {}
impl TextContent for H4 {}
impl TextContent for H5 {}
impl TextContent for Li {}
impl TextContent for P {}
impl TextContent for Span {}
impl Element for Unknown {
fn tag_name(&self) -> TagName {
TagName::Unknown(self.id.clone())
}
fn id(&self) -> &String {
&self.id
}
fn view(&self) -> &ViewWrap {
&self.view
}
}
| {
if let Some(s) = self.attribute("href") {
s
} else {
String::new()
}
} | identifier_body |
tags.rs | use crate::{ResponseValue, ViewWrap};
use std::fmt::Debug;
use htmldom_read::{Node};
use crate::events::OnClick;
use std::marker::PhantomData;
use std::ops::{Deref, DerefMut};
use std::fmt::Formatter;
use std::sync::Arc;
/// The functions that allow to load images concurrently.
pub mod image_loader {
use std::sync::Arc;
use crate::tags::Image;
use crate::tags::ImageFormat;
use std::collections::LinkedList;
/// Load all images from binary format from the iterator. This function is concurrent.
/// It will create multiple threads to process images in parallel. Returned value contains
/// handles to all images in the order they appeared in the iterator.
pub fn load_all(iter: &mut Iterator<Item = (Vec<u8>, ImageFormat)>) -> Vec<Arc<Image>> {
use std::sync::mpsc;
use std::thread;
// Start loading images async.
let recvs = {
let mut list = LinkedList::new();
for (arr, format) in iter {
let (tx, rx) = mpsc::channel();
list.push_back(rx);
thread::spawn(move || {
let img = Image::from_binary(arr, format);
tx.send(img).unwrap();
});
}
list
};
// Collect results.
let mut vec = Vec::with_capacity(recvs.len());
for rx in recvs {
let image = rx.recv().unwrap();
let arc = Arc::new(image);
vec.push(arc);
}
vec
}
/// Load one image into Arc.
pub fn load(bin: Vec<u8>, format: ImageFormat) -> Arc<Image> {
let img = Image::from_binary(bin, format);
Arc::new(img)
}
}
#[derive(Clone, Debug)]
pub enum TagName {
A,
Canvas,
H4,
H5,
Img,
Li,
P,
Span, |
Unknown(String)
}
/// Supported canvas image formats.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ImageFormat {
Png,
Jpg,
}
/// Element in the HTML DOM that can be accessed by Rust interface.
pub trait Element: Debug {
/// Tag name of the element.
fn tag_name(&self) -> TagName;
/// HTML content of this element if it still exists.
fn dom_html(&mut self) -> Option<String> {
let req = self.view_mut().new_request();
let js = format!("\
var inner = document.getElementById('{}').outerHTML;\
window.external.invoke(JSON.stringify({{\
incmd: 'attribute',
request: {},\
value: inner\
}}));
", self.id(), req.id());
let rx = req.run(js);
let response = rx.recv();
if let Err(_) = response {
return None; // likely because Null element was accessed.
}
let response = response.unwrap();
if let ResponseValue::Str(s) = response {
if s.is_empty() {
None
} else {
Some(s)
}
} else {
// Inner HTML request cannot return any other response type.
unreachable!();
}
}
/// Get attribute value of the element if any. Even if attribute is present but is empty
/// None is returned.
fn attribute(&self, name: &str) -> Option<String> {
// Unsafe because we take immutable variable `self` as mutable.
let request = unsafe {
let this = &mut *(self as *const Self as *mut Self);
this.view_mut().new_request()
};
let id = request.id();
let js = format!("\
var attr = document.getElementById('{}').getAttribute('{}');\
attr = attr == null ? '' : attr;\
window.external.invoke(JSON.stringify({{\
incmd: 'attribute',\
request: {},\
value: attr\
}}));\
", self.id(), name, id);
let receiver = request.run(js);
let attr = receiver.recv().unwrap();
if let ResponseValue::Str(s) = attr {
if s == "" {
None
} else {
Some(s)
}
} else {
unreachable!()
}
}
/// Set attribute with given name to given value.
fn set_attribute(&mut self, name: &str, value: &str) {
let id = self.id().to_owned();
self.view_mut().eval(
format!(
"document.getElementById('{}').setAttribute('{}', '{}');",
id, name, crate::js_prefix_quotes(value)
)
);
}
/// Append given text to innerHTML field.
fn append_inner_html(&mut self, html: &str) {
let id = self.id().to_owned();
self.view_mut().eval(
format!(
"document.getElementById('{}').innerHTML += '{}';",
id, crate::js_prefix_quotes(html)
)
);
}
/// Clears the outerHTML of the element to remove it from HTML completely.
fn remove_from_html(&mut self) {
let id = self.id().to_owned();
self.view_mut().eval(
format!(
"document.getElementById('{}').outerHTML = '';",
id
)
);
}
/// Element ID.
fn id(&self) -> &String;
/// Change element ID.
fn set_id(&mut self, new_id: &str) {
self.set_attribute("id", new_id)
}
fn view(&self) -> &ViewWrap;
fn view_mut(&mut self) -> &mut ViewWrap {
let p = self.view() as *const ViewWrap as *mut ViewWrap;
unsafe { &mut *p }
}
/// Check whether this element still exists.
/// Actions on non-existing elements have no effect.
fn exists(&mut self) -> bool {
self.dom_html().is_some()
}
fn add_class(&mut self, class: &str) {
let attr = self.attribute("class");
let mut attr = if let Some(s) = attr {
s
} else {
String::with_capacity(class.len())
};
attr.push(' ');
attr.push_str(class);
self.set_attribute("class", &attr);
}
fn remove_class(&mut self, class: &str) {
let attr = self.attribute("class");
if attr.is_none() {
self.set_attribute("class", class);
return;
}
let attr = attr.unwrap();
let split = attr.split_whitespace();
let mut new_str = String::with_capacity(attr.len());
for val in split {
if val != class {
new_str.push_str(val);
}
}
self.set_attribute("class", &new_str);
}
fn has_class(&self, class: &str) -> bool {
let attr = self.attribute("class");
if attr.is_none() {
return false;
}
let attr = attr.unwrap();
let split = attr.split_whitespace();
for s in split {
if s == class {
return true;
}
}
false
}
}
/// Text content can be set to some text value and read this content back.
pub trait TextContent: Element {
/// Get text contained by this element.
fn text(&self) -> String {
if let Some(s) = self.attribute("textContent") {
s
} else {
String::new()
}
}
fn set_text<T: AsRef<str>>(&mut self, text: T) {
self.set_attribute("textContent", text.as_ref())
}
}
pub trait ImageContent: Element {
/// Set image data to this element.
fn set_image(&mut self, img: Arc<Image>);
/// Get image data of this element.
fn image(&self) -> Option<&Arc<Image>>;
/// Remove any supplied image data.
fn remove_image(&mut self) -> Option<Arc<Image>>;
}
macro_rules! elm_impl {
($name: ident) => {
impl Element for $name {
fn view(&self) -> &ViewWrap {
&self.view
}
fn id(&self) -> &String {
&self.id
}
fn tag_name(&self) -> TagName {
TagName::$name
}
}
}
}
/// Wrap that gives access to the dynamic element which is known to be of given type.
#[derive(Debug)]
pub struct Wrap<T: Element> {
element: Box<dyn Element>,
_p: PhantomData<T>,
}
/// Image data of canvas.
#[derive(Clone)]
pub struct Image {
base64: String,
format: ImageFormat,
}
#[derive(Debug)]
pub struct A {
view: ViewWrap,
id: String,
onclick: OnClick<A>,
}
#[derive(Debug)]
pub struct Canvas {
view: ViewWrap,
id: String,
}
#[derive(Clone, Debug)]
pub struct H4 {
view: ViewWrap,
id: String,
}
#[derive(Clone, Debug)]
pub struct H5 {
view: ViewWrap,
id: String,
}
#[derive(Clone, Debug)]
pub struct Img {
view: ViewWrap,
id: String,
data: Option<Arc<Image>>,
}
#[derive(Clone, Debug)]
pub struct Li {
view: ViewWrap,
id: String,
}
#[derive(Clone, Debug)]
pub struct P {
view: ViewWrap,
id: String,
}
#[derive(Clone, Debug)]
pub struct Span {
view: ViewWrap,
id: String,
}
elm_impl!(A);
elm_impl!(Canvas);
elm_impl!(H4);
elm_impl!(H5);
elm_impl!(Img);
elm_impl!(Li);
elm_impl!(P);
elm_impl!(Span);
#[derive(Clone, Debug)]
pub struct Unknown {
view: ViewWrap,
id: String,
name: String,
}
impl<T> Wrap<T> where T: Element {
/// Wrap given element.
///
/// # Safety
/// Programmer must be sure this element has expected type.
pub unsafe fn new(element: Box<dyn Element>) -> Self {
Wrap {
element,
_p: Default::default(),
}
}
}
impl<T> Deref for Wrap<T> where T: Element {
type Target = Box<T>;
fn deref(&self) -> &Box<T> {
let b = &self.element;
let ptr = b as *const Box<dyn Element> as *const Box<T>;
unsafe { &*ptr }
}
}
impl<T> DerefMut for Wrap<T> where T: Element {
fn deref_mut(&mut self) -> &mut Box<T> {
let b = &mut self.element;
let ptr = b as *mut Box<dyn Element> as *mut Box<T>;
unsafe { &mut *ptr }
}
}
impl Debug for Image {
fn fmt(&self, fmt: &mut Formatter) -> std::fmt::Result {
write!(fmt, "Image {{ base64: [char; ")?;
write!(fmt, "{}", self.base64.len())?;
write!(fmt, "], format: ")?;
write!(fmt, "{:?}", self.format)?;
write!(fmt, " }}")?;
Ok(())
}
}
impl From<&str> for TagName {
fn from(s: &str) -> Self {
use self::TagName::*;
match s.to_lowercase().as_str() {
"a" => A,
"canvas" => Canvas,
"h4" => H4,
"h5" => H5,
"img" => Img,
"li" => Li,
"p" => P,
"span" => Span,
_ => Unknown(String::from(s)),
}
}
}
impl TagName {
/// Create implementation of the tag by it's tag name.
pub fn new_impl(&self, view: ViewWrap, id: String) -> Box<dyn Element> {
match self {
TagName::A => {
let mut b = Box::new(A {
view,
id,
onclick: unsafe { OnClick::null() },
});
let onclick = unsafe { OnClick::new(&mut *b) };
b.onclick = onclick;
b
},
TagName::Canvas => {
Box::new(Canvas {
view,
id,
})
},
TagName::H4 => Box::new(
H4 {
view,
id,
}
),
TagName::H5 => Box::new(
H4 {
view,
id,
}
),
TagName::Img => Box::new(
Img {
view,
id,
data: None,
}
),
TagName::Li => Box::new (
Li {
view,
id,
}
),
TagName::P => Box::new(P { view, id }),
TagName::Span => Box::new(Span { view, id }),
TagName::Unknown(name) => Box::new(Unknown {
view,
id,
name: name.clone(),
}),
}
}
/// Try creating TagName from this node.
pub fn try_from_node(node: &Node) -> Option<Self> {
let tag_name = node.tag_name();
if let Some(tag_name) = tag_name {
let tag_name = TagName::from(tag_name);
Some(tag_name)
} else {
None
}
}
/// Try creating implementation of the Element from this node.
///
/// # Failures
/// Node must contain ID of the element. It also is required to contain opening tag
/// which corresponds to element tag. If either of conditions is not met this function
/// will return None.
pub fn try_impl_from_node(node: &Node, view: ViewWrap) -> Option<Box<dyn Element>> {
let tag_name = Self::try_from_node(node);
if let Some(tag_name) = tag_name {
let id = node.attribute_by_name("id");
if let Some(id) = id {
Some(tag_name.new_impl(view, id.values_to_string()))
} else {
None
}
} else {
None
}
}
}
impl ImageFormat {
pub fn to_string(&self) -> String {
use ImageFormat::*;
match self {
Jpg => "jpg",
Png => "png",
}.to_string()
}
}
impl Image {
/// Encode given array of bytes in Base64 encoding.
pub fn base64(bin: Vec<u8>) -> String {
base64::encode(&bin)
}
/// Generate image struct from given array.
pub fn from_binary(bin: Vec<u8>, format: ImageFormat) -> Image {
Image {
base64: Self::base64(bin),
format,
}
}
/// Convert this image to string that can be supplied to 'src' attribute of <img> tag.
pub fn to_img_string(&self) -> String {
format!("data:image/{};base64,{}", self.format.to_string(), self.base64)
}
}
impl A {
pub fn href(&self) -> String {
if let Some(s) = self.attribute("href") {
s
} else {
String::new()
}
}
pub fn set_href<T: AsRef<str>>(&mut self, href: T) {
self.set_attribute("href", href.as_ref())
}
pub fn onclick(&self) -> &OnClick<A> {
&self.onclick
}
pub fn onclick_mut(&mut self) -> &mut OnClick<A> {
&mut self.onclick
}
}
impl ImageContent for Img {
fn set_image(&mut self, img: Arc<Image>) {
self.data = Some(img);
self.set_attribute("src", &self.data.as_ref().unwrap().to_img_string());
}
fn image(&self) -> Option<&Arc<Image>> {
self.data.as_ref()
}
fn remove_image(&mut self) -> Option<Arc<Image>> {
let mut img: Option<Arc<Image>> = None;
std::mem::swap(&mut img, &mut self.data);
img
}
}
impl TextContent for A {}
impl TextContent for H4 {}
impl TextContent for H5 {}
impl TextContent for Li {}
impl TextContent for P {}
impl TextContent for Span {}
impl Element for Unknown {
fn tag_name(&self) -> TagName {
TagName::Unknown(self.id.clone())
}
fn id(&self) -> &String {
&self.id
}
fn view(&self) -> &ViewWrap {
&self.view
}
} | random_line_split | |
tags.rs | use crate::{ResponseValue, ViewWrap};
use std::fmt::Debug;
use htmldom_read::{Node};
use crate::events::OnClick;
use std::marker::PhantomData;
use std::ops::{Deref, DerefMut};
use std::fmt::Formatter;
use std::sync::Arc;
/// The functions that allow to load images concurrently.
pub mod image_loader {
use std::sync::Arc;
use crate::tags::Image;
use crate::tags::ImageFormat;
use std::collections::LinkedList;
/// Load all images from binary format from the iterator. This function is concurrent.
/// It will create multiple threads to process images in parallel. Returned value contains
/// handles to all images in the order they appeared in the iterator.
pub fn load_all(iter: &mut Iterator<Item = (Vec<u8>, ImageFormat)>) -> Vec<Arc<Image>> {
use std::sync::mpsc;
use std::thread;
// Start loading images async.
let recvs = {
let mut list = LinkedList::new();
for (arr, format) in iter {
let (tx, rx) = mpsc::channel();
list.push_back(rx);
thread::spawn(move || {
let img = Image::from_binary(arr, format);
tx.send(img).unwrap();
});
}
list
};
// Collect results.
let mut vec = Vec::with_capacity(recvs.len());
for rx in recvs {
let image = rx.recv().unwrap();
let arc = Arc::new(image);
vec.push(arc);
}
vec
}
/// Load one image into Arc.
pub fn load(bin: Vec<u8>, format: ImageFormat) -> Arc<Image> {
let img = Image::from_binary(bin, format);
Arc::new(img)
}
}
#[derive(Clone, Debug)]
pub enum TagName {
A,
Canvas,
H4,
H5,
Img,
Li,
P,
Span,
Unknown(String)
}
/// Supported canvas image formats.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ImageFormat {
Png,
Jpg,
}
/// Element in the HTML DOM that can be accessed by Rust interface.
pub trait Element: Debug {
/// Tag name of the element.
fn tag_name(&self) -> TagName;
/// HTML content of this element if it still exists.
fn dom_html(&mut self) -> Option<String> {
let req = self.view_mut().new_request();
let js = format!("\
var inner = document.getElementById('{}').outerHTML;\
window.external.invoke(JSON.stringify({{\
incmd: 'attribute',
request: {},\
value: inner\
}}));
", self.id(), req.id());
let rx = req.run(js);
let response = rx.recv();
if let Err(_) = response {
return None; // likely because Null element was accessed.
}
let response = response.unwrap();
if let ResponseValue::Str(s) = response {
if s.is_empty() {
None
} else {
Some(s)
}
} else {
// Inner HTML request cannot return any other response type.
unreachable!();
}
}
/// Get attribute value of the element if any. Even if attribute is present but is empty
/// None is returned.
fn attribute(&self, name: &str) -> Option<String> {
// Unsafe because we take immutable variable `self` as mutable.
let request = unsafe {
let this = &mut *(self as *const Self as *mut Self);
this.view_mut().new_request()
};
let id = request.id();
let js = format!("\
var attr = document.getElementById('{}').getAttribute('{}');\
attr = attr == null ? '' : attr;\
window.external.invoke(JSON.stringify({{\
incmd: 'attribute',\
request: {},\
value: attr\
}}));\
", self.id(), name, id);
let receiver = request.run(js);
let attr = receiver.recv().unwrap();
if let ResponseValue::Str(s) = attr {
if s == "" {
None
} else {
Some(s)
}
} else {
unreachable!()
}
}
/// Set attribute with given name to given value.
fn set_attribute(&mut self, name: &str, value: &str) {
let id = self.id().to_owned();
self.view_mut().eval(
format!(
"document.getElementById('{}').setAttribute('{}', '{}');",
id, name, crate::js_prefix_quotes(value)
)
);
}
/// Append given text to innerHTML field.
fn append_inner_html(&mut self, html: &str) {
let id = self.id().to_owned();
self.view_mut().eval(
format!(
"document.getElementById('{}').innerHTML += '{}';",
id, crate::js_prefix_quotes(html)
)
);
}
/// Clears the outerHTML of the element to remove it from HTML completely.
fn remove_from_html(&mut self) {
let id = self.id().to_owned();
self.view_mut().eval(
format!(
"document.getElementById('{}').outerHTML = '';",
id
)
);
}
/// Element ID.
fn id(&self) -> &String;
/// Change element ID.
fn set_id(&mut self, new_id: &str) {
self.set_attribute("id", new_id)
}
fn view(&self) -> &ViewWrap;
fn view_mut(&mut self) -> &mut ViewWrap {
let p = self.view() as *const ViewWrap as *mut ViewWrap;
unsafe { &mut *p }
}
/// Check whether this element still exists.
/// Actions on non-existing elements have no effect.
fn exists(&mut self) -> bool {
self.dom_html().is_some()
}
fn add_class(&mut self, class: &str) {
let attr = self.attribute("class");
let mut attr = if let Some(s) = attr {
s
} else {
String::with_capacity(class.len())
};
attr.push(' ');
attr.push_str(class);
self.set_attribute("class", &attr);
}
fn remove_class(&mut self, class: &str) {
let attr = self.attribute("class");
if attr.is_none() {
self.set_attribute("class", class);
return;
}
let attr = attr.unwrap();
let split = attr.split_whitespace();
let mut new_str = String::with_capacity(attr.len());
for val in split {
if val != class {
new_str.push_str(val);
}
}
self.set_attribute("class", &new_str);
}
fn has_class(&self, class: &str) -> bool {
let attr = self.attribute("class");
if attr.is_none() {
return false;
}
let attr = attr.unwrap();
let split = attr.split_whitespace();
for s in split {
if s == class {
return true;
}
}
false
}
}
/// Text content can be set to some text value and read this content back.
pub trait TextContent: Element {
/// Get text contained by this element.
fn text(&self) -> String {
if let Some(s) = self.attribute("textContent") {
s
} else {
String::new()
}
}
fn set_text<T: AsRef<str>>(&mut self, text: T) {
self.set_attribute("textContent", text.as_ref())
}
}
pub trait ImageContent: Element {
/// Set image data to this element.
fn set_image(&mut self, img: Arc<Image>);
/// Get image data of this element.
fn image(&self) -> Option<&Arc<Image>>;
/// Remove any supplied image data.
fn remove_image(&mut self) -> Option<Arc<Image>>;
}
macro_rules! elm_impl {
($name: ident) => {
impl Element for $name {
fn view(&self) -> &ViewWrap {
&self.view
}
fn id(&self) -> &String {
&self.id
}
fn tag_name(&self) -> TagName {
TagName::$name
}
}
}
}
/// Wrap that gives access to the dynamic element which is known to be of given type.
#[derive(Debug)]
pub struct Wrap<T: Element> {
element: Box<dyn Element>,
_p: PhantomData<T>,
}
/// Image data of canvas.
#[derive(Clone)]
pub struct Image {
base64: String,
format: ImageFormat,
}
#[derive(Debug)]
pub struct A {
view: ViewWrap,
id: String,
onclick: OnClick<A>,
}
#[derive(Debug)]
pub struct Canvas {
view: ViewWrap,
id: String,
}
#[derive(Clone, Debug)]
pub struct H4 {
view: ViewWrap,
id: String,
}
#[derive(Clone, Debug)]
pub struct H5 {
view: ViewWrap,
id: String,
}
#[derive(Clone, Debug)]
pub struct Img {
view: ViewWrap,
id: String,
data: Option<Arc<Image>>,
}
#[derive(Clone, Debug)]
pub struct Li {
view: ViewWrap,
id: String,
}
#[derive(Clone, Debug)]
pub struct P {
view: ViewWrap,
id: String,
}
#[derive(Clone, Debug)]
pub struct Span {
view: ViewWrap,
id: String,
}
elm_impl!(A);
elm_impl!(Canvas);
elm_impl!(H4);
elm_impl!(H5);
elm_impl!(Img);
elm_impl!(Li);
elm_impl!(P);
elm_impl!(Span);
#[derive(Clone, Debug)]
pub struct Unknown {
view: ViewWrap,
id: String,
name: String,
}
impl<T> Wrap<T> where T: Element {
/// Wrap given element.
///
/// # Safety
/// Programmer must be sure this element has expected type.
pub unsafe fn new(element: Box<dyn Element>) -> Self {
Wrap {
element,
_p: Default::default(),
}
}
}
impl<T> Deref for Wrap<T> where T: Element {
type Target = Box<T>;
fn deref(&self) -> &Box<T> {
let b = &self.element;
let ptr = b as *const Box<dyn Element> as *const Box<T>;
unsafe { &*ptr }
}
}
impl<T> DerefMut for Wrap<T> where T: Element {
fn deref_mut(&mut self) -> &mut Box<T> {
let b = &mut self.element;
let ptr = b as *mut Box<dyn Element> as *mut Box<T>;
unsafe { &mut *ptr }
}
}
impl Debug for Image {
fn fmt(&self, fmt: &mut Formatter) -> std::fmt::Result {
write!(fmt, "Image {{ base64: [char; ")?;
write!(fmt, "{}", self.base64.len())?;
write!(fmt, "], format: ")?;
write!(fmt, "{:?}", self.format)?;
write!(fmt, " }}")?;
Ok(())
}
}
impl From<&str> for TagName {
fn from(s: &str) -> Self {
use self::TagName::*;
match s.to_lowercase().as_str() {
"a" => A,
"canvas" => Canvas,
"h4" => H4,
"h5" => H5,
"img" => Img,
"li" => Li,
"p" => P,
"span" => Span,
_ => Unknown(String::from(s)),
}
}
}
impl TagName {
/// Create implementation of the tag by it's tag name.
pub fn new_impl(&self, view: ViewWrap, id: String) -> Box<dyn Element> {
match self {
TagName::A => {
let mut b = Box::new(A {
view,
id,
onclick: unsafe { OnClick::null() },
});
let onclick = unsafe { OnClick::new(&mut *b) };
b.onclick = onclick;
b
},
TagName::Canvas => {
Box::new(Canvas {
view,
id,
})
},
TagName::H4 => Box::new(
H4 {
view,
id,
}
),
TagName::H5 => Box::new(
H4 {
view,
id,
}
),
TagName::Img => Box::new(
Img {
view,
id,
data: None,
}
),
TagName::Li => Box::new (
Li {
view,
id,
}
),
TagName::P => Box::new(P { view, id }),
TagName::Span => Box::new(Span { view, id }),
TagName::Unknown(name) => Box::new(Unknown {
view,
id,
name: name.clone(),
}),
}
}
/// Try creating TagName from this node.
pub fn | (node: &Node) -> Option<Self> {
let tag_name = node.tag_name();
if let Some(tag_name) = tag_name {
let tag_name = TagName::from(tag_name);
Some(tag_name)
} else {
None
}
}
/// Try creating implementation of the Element from this node.
///
/// # Failures
/// Node must contain ID of the element. It also is required to contain opening tag
/// which corresponds to element tag. If either of conditions is not met this function
/// will return None.
pub fn try_impl_from_node(node: &Node, view: ViewWrap) -> Option<Box<dyn Element>> {
let tag_name = Self::try_from_node(node);
if let Some(tag_name) = tag_name {
let id = node.attribute_by_name("id");
if let Some(id) = id {
Some(tag_name.new_impl(view, id.values_to_string()))
} else {
None
}
} else {
None
}
}
}
impl ImageFormat {
pub fn to_string(&self) -> String {
use ImageFormat::*;
match self {
Jpg => "jpg",
Png => "png",
}.to_string()
}
}
impl Image {
/// Encode given array of bytes in Base64 encoding.
pub fn base64(bin: Vec<u8>) -> String {
base64::encode(&bin)
}
/// Generate image struct from given array.
pub fn from_binary(bin: Vec<u8>, format: ImageFormat) -> Image {
Image {
base64: Self::base64(bin),
format,
}
}
/// Convert this image to string that can be supplied to 'src' attribute of <img> tag.
pub fn to_img_string(&self) -> String {
format!("data:image/{};base64,{}", self.format.to_string(), self.base64)
}
}
impl A {
pub fn href(&self) -> String {
if let Some(s) = self.attribute("href") {
s
} else {
String::new()
}
}
pub fn set_href<T: AsRef<str>>(&mut self, href: T) {
self.set_attribute("href", href.as_ref())
}
pub fn onclick(&self) -> &OnClick<A> {
&self.onclick
}
pub fn onclick_mut(&mut self) -> &mut OnClick<A> {
&mut self.onclick
}
}
impl ImageContent for Img {
fn set_image(&mut self, img: Arc<Image>) {
self.data = Some(img);
self.set_attribute("src", &self.data.as_ref().unwrap().to_img_string());
}
fn image(&self) -> Option<&Arc<Image>> {
self.data.as_ref()
}
fn remove_image(&mut self) -> Option<Arc<Image>> {
let mut img: Option<Arc<Image>> = None;
std::mem::swap(&mut img, &mut self.data);
img
}
}
impl TextContent for A {}
impl TextContent for H4 {}
impl TextContent for H5 {}
impl TextContent for Li {}
impl TextContent for P {}
impl TextContent for Span {}
impl Element for Unknown {
fn tag_name(&self) -> TagName {
TagName::Unknown(self.id.clone())
}
fn id(&self) -> &String {
&self.id
}
fn view(&self) -> &ViewWrap {
&self.view
}
}
| try_from_node | identifier_name |
destination.rs | use super::pandas_columns::{
BooleanBlock, BytesBlock, DateTimeBlock, Float64Block, HasPandasColumn, Int64Block,
PandasColumn, PandasColumnObject, StringBlock,
};
use super::types::{PandasDType, PandasTypeSystem};
use anyhow::anyhow;
use connectorx::{
ConnectorAgentError, Consume, DataOrder, Destination, DestinationPartition, Result, TypeAssoc,
TypeSystem,
};
use fehler::{throw, throws};
use itertools::Itertools;
use log::debug;
use pyo3::{
types::{PyDict, PyList},
FromPyObject, PyAny, Python,
};
use std::collections::HashMap;
use std::mem::transmute;
pub struct PandasDestination<'py> {
py: Python<'py>,
nrows: Option<usize>,
schema: Option<Vec<PandasTypeSystem>>,
buffers: Option<&'py PyList>,
buffer_column_index: Option<Vec<Vec<usize>>>,
dataframe: Option<&'py PyAny>, // Using this field other than the return purpose should be careful: this refers to the same data as buffers
}
impl<'a> PandasDestination<'a> {
pub fn new(py: Python<'a>) -> Self {
PandasDestination {
py,
nrows: None,
schema: None,
buffers: None,
buffer_column_index: None,
dataframe: None,
}
}
pub fn result(self) -> Option<&'a PyAny> {
self.dataframe
}
}
impl<'a> Destination for PandasDestination<'a> {
const DATA_ORDERS: &'static [DataOrder] = &[DataOrder::RowMajor];
type TypeSystem = PandasTypeSystem;
type Partition<'b> = PandasPartitionDestination<'b>;
#[throws(ConnectorAgentError)]
fn allocate<S: AsRef<str>>(
&mut self,
nrows: usize,
names: &[S],
schema: &[PandasTypeSystem],
data_order: DataOrder,
) {
if !matches!(data_order, DataOrder::RowMajor) {
throw!(ConnectorAgentError::UnsupportedDataOrder(data_order))
}
if matches!(self.nrows, Some(_)) {
throw!(ConnectorAgentError::DuplicatedAllocation);
}
let (df, buffers, index) = create_dataframe(self.py, names, schema, nrows)?;
debug!("DataFrame created");
// get index for each column: (index of block, index of column within the block)
let mut column_buffer_index: Vec<(usize, usize)> = Vec::with_capacity(index.len());
index.iter().try_for_each(|tuple| -> Result<()> {
column_buffer_index.push(tuple.extract().map_err(|e| {
anyhow!("cannot extract index tuple for `column_buffer_index` {}", e)
})?);
Ok(())
})?;
let nbuffers = buffers.len();
// buffer_column_index[i][j] = the column id of the j-th row (pandas buffer stores columns row-wise) in the i-th buffer.
let mut buffer_column_index = vec![vec![]; nbuffers];
let mut column_buffer_index_cid: Vec<_> = column_buffer_index.iter().enumerate().collect();
column_buffer_index_cid.sort_by_key(|(_, blk)| *blk);
for (cid, &(blkno, _)) in column_buffer_index_cid {
buffer_column_index[blkno].push(cid);
}
self.nrows = Some(nrows);
self.schema = Some(schema.to_vec());
self.buffers = Some(buffers);
self.buffer_column_index = Some(buffer_column_index);
self.dataframe = Some(df);
}
#[throws(ConnectorAgentError)]
fn partition(&mut self, counts: &[usize]) -> Vec<Self::Partition<'_>> {
assert_eq!(
counts.iter().sum::<usize>(),
self.nrows
.ok_or_else(|| ConnectorAgentError::DestinationNotAllocated)?,
"counts: {} != nrows: {:?}",
counts.iter().sum::<usize>(),
self.nrows
);
let buffers = self.buffers.ok_or_else(|| anyhow!("got None buffers"))?;
let schema = self
.schema
.as_ref()
.ok_or_else(|| anyhow!("got None schema"))?;
let buffer_column_index = self
.buffer_column_index
.as_ref()
.ok_or_else(|| anyhow!("got None buffer_column_index"))?;
let mut partitioned_columns: Vec<Vec<Box<dyn PandasColumnObject>>> =
(0..schema.len()).map(|_| vec![]).collect();
for (buf, cids) in buffers.iter().zip_eq(buffer_column_index) {
for &cid in cids {
match schema[cid] {
PandasTypeSystem::F64(_) => {
let fblock = Float64Block::extract(buf).map_err(|e| anyhow!(e))?;
let fcols = fblock.split()?;
for (&cid, fcol) in cids.iter().zip_eq(fcols) {
partitioned_columns[cid] = fcol
.partition(&counts)
.into_iter()
.map(|c| Box::new(c) as _)
.collect()
}
}
PandasTypeSystem::I64(_) => {
let ublock = Int64Block::extract(buf).map_err(|e| anyhow!(e))?;
let ucols = ublock.split()?;
for (&cid, ucol) in cids.iter().zip_eq(ucols) {
partitioned_columns[cid] = ucol
.partition(&counts)
.into_iter()
.map(|c| Box::new(c) as _)
.collect()
}
}
PandasTypeSystem::Bool(_) => {
let bblock = BooleanBlock::extract(buf).map_err(|e| anyhow!(e))?;
let bcols = bblock.split()?;
for (&cid, bcol) in cids.iter().zip_eq(bcols) {
partitioned_columns[cid] = bcol
.partition(&counts)
.into_iter()
.map(|c| Box::new(c) as _)
.collect()
}
}
PandasTypeSystem::String(_)
| PandasTypeSystem::BoxStr(_)
| PandasTypeSystem::Str(_)
| PandasTypeSystem::Char(_) => {
let block = StringBlock::extract(buf).map_err(|e| anyhow!(e))?;
let cols = block.split()?;
for (&cid, col) in cids.iter().zip_eq(cols) {
partitioned_columns[cid] = col
.partition(&counts)
.into_iter()
.map(|c| Box::new(c) as _)
.collect()
}
}
PandasTypeSystem::Bytes(_) => {
let block = BytesBlock::extract(buf).map_err(|e| anyhow!(e))?;
let cols = block.split()?;
for (&cid, col) in cids.iter().zip_eq(cols) {
partitioned_columns[cid] = col
.partition(&counts)
.into_iter()
.map(|c| Box::new(c) as _)
.collect()
}
}
PandasTypeSystem::DateTime(_) => {
let block = DateTimeBlock::extract(buf).map_err(|e| anyhow!(e))?;
let cols = block.split()?;
for (&cid, col) in cids.iter().zip_eq(cols) {
partitioned_columns[cid] = col
.partition(&counts)
.into_iter()
.map(|c| Box::new(c) as _)
.collect()
}
}
}
}
}
let mut par_destinations = vec![];
for &c in counts.into_iter().rev() {
let mut columns = Vec::with_capacity(partitioned_columns.len());
for (i, partitions) in partitioned_columns.iter_mut().enumerate() {
columns.push(
partitions
.pop()
.ok_or_else(|| anyhow!("empty partition for {}th column", i))?,
);
}
par_destinations.push(PandasPartitionDestination::new(c, columns, schema));
}
// We need to reverse the par_destinations because partitions are poped reversely
par_destinations.into_iter().rev().collect()
}
fn schema(&self) -> &[PandasTypeSystem] {
static EMPTY_SCHEMA: Vec<PandasTypeSystem> = vec![];
self.schema.as_ref().unwrap_or(EMPTY_SCHEMA.as_ref())
}
}
pub struct PandasPartitionDestination<'a> {
nrows: usize,
columns: Vec<Box<dyn PandasColumnObject + 'a>>,
schema: &'a [PandasTypeSystem],
seq: usize,
}
impl<'a> PandasPartitionDestination<'a> {
fn new(
nrows: usize,
columns: Vec<Box<dyn PandasColumnObject + 'a>>,
schema: &'a [PandasTypeSystem],
) -> Self {
Self {
nrows,
columns,
schema,
seq: 0,
}
}
fn loc(&mut self) -> (usize, usize) {
let (row, col) = (self.seq / self.ncols(), self.seq % self.ncols());
self.seq += 1;
(row, col)
}
}
impl<'a> DestinationPartition<'a> for PandasPartitionDestination<'a> {
type TypeSystem = PandasTypeSystem;
fn nrows(&self) -> usize {
self.nrows
}
fn ncols(&self) -> usize {
self.schema.len()
}
fn finalize(&mut self) -> Result<()> {
for col in &mut self.columns {
col.finalize()?;
}
Ok(())
}
}
impl<'a, T> Consume<T> for PandasPartitionDestination<'a>
where
T: HasPandasColumn + TypeAssoc<PandasTypeSystem> + std::fmt::Debug,
{
fn consume(&mut self, value: T) -> Result<()> {
let (_, col) = self.loc();
self.schema[col].check::<T>()?;
// How do we check type id for borrowed types?
// assert!(self.columns[col].typecheck(TypeId::of::<T>()));
let (column, _): (&mut T::PandasColumn<'a>, *const ()) =
unsafe { transmute(&*self.columns[col]) };
column.write(value)
}
}
/// call python code to construct the dataframe and expose its buffers
#[throws(ConnectorAgentError)]
fn create_dataframe<'a, S: AsRef<str>>(
py: Python<'a>,
names: &[S],
schema: &[PandasTypeSystem],
nrows: usize,
) -> (&'a PyAny, &'a PyList, &'a PyList) {
let names: Vec<_> = names.into_iter().map(|s| s.as_ref()).collect();
debug!("names: {:?}", names);
debug!("schema: {:?}", schema);
let mut schema_dict: HashMap<PandasTypeSystem, Vec<usize>> = HashMap::new();
schema.iter().enumerate().for_each(|(idx, &dt)| {
let indices = schema_dict.entry(dt).or_insert(vec![]);
indices.push(idx);
});
debug!("schema_dict: {:?}", schema_dict);
let mut blocks_code = vec![];
schema_dict
.iter()
.for_each(|(&dt, indices)| {
if dt.is_extension() {
// each extension block only contains one column
for idx in indices {
blocks_code.push(format!(
"pd.core.internals.ExtensionBlock(pd.array(np.empty([{}], dtype='{}'), dtype='{}'), placement={}, ndim=2)",
nrows,
dt.npdtype(),
dt.dtype(),
idx,
));
}
} else {
blocks_code.push(format!(
"pd.core.internals.{}(np.empty([{}, {}], dtype='{}'), placement={:?}, ndim=2)",
dt.block_name(),
indices.len(),
nrows,
dt.npdtype(),
indices,
));
}
});
// https://github.com/pandas-dev/pandas/blob/master/pandas/core/internals/managers.py
// Suppose we want to find the array corresponding to our i'th column. |
let code = format!(
r#"import pandas as pd
import numpy as np
blocks = [{}]
block_manager = pd.core.internals.BlockManager(
blocks, [pd.Index(['{}']), pd.RangeIndex(start=0, stop={}, step=1)])
df = pd.DataFrame(block_manager)
blocks = [b.values for b in df._mgr.blocks]
index = [(i, j) for i, j in zip(df._mgr.blknos, df._mgr.blklocs)]"#,
blocks_code.join(","),
format!("{}", names.join("\',\'")),
nrows,
);
debug!("create dataframe code: {}", code);
// run python code
let locals = PyDict::new(py);
py.run(code.as_str(), None, Some(locals))
.map_err(|e| anyhow!(e))?;
// get # of blocks in dataframe
let buffers: &PyList = locals
.get_item("blocks")
.ok_or_else(|| anyhow!("cannot get `blocks` from locals"))?
.downcast::<PyList>()
.map_err(|e| anyhow!("cannot downcast `blocks` to PyList {}", e))?;
let index = locals
.get_item("index")
.ok_or_else(|| anyhow!("cannot get `index` from locals"))?
.downcast::<PyList>()
.map_err(|e| anyhow!("cannot downcast `index` to PyList {}", e))?;
let df = locals
.get_item("df")
.ok_or_else(|| anyhow!("cannot get `df` from locals"))?;
(df, buffers, index)
} | // blknos[i] identifies the block from self.blocks that contains this column.
// blklocs[i] identifies the column of interest within
// self.blocks[self.blknos[i]] | random_line_split |
destination.rs | use super::pandas_columns::{
BooleanBlock, BytesBlock, DateTimeBlock, Float64Block, HasPandasColumn, Int64Block,
PandasColumn, PandasColumnObject, StringBlock,
};
use super::types::{PandasDType, PandasTypeSystem};
use anyhow::anyhow;
use connectorx::{
ConnectorAgentError, Consume, DataOrder, Destination, DestinationPartition, Result, TypeAssoc,
TypeSystem,
};
use fehler::{throw, throws};
use itertools::Itertools;
use log::debug;
use pyo3::{
types::{PyDict, PyList},
FromPyObject, PyAny, Python,
};
use std::collections::HashMap;
use std::mem::transmute;
pub struct PandasDestination<'py> {
py: Python<'py>,
nrows: Option<usize>,
schema: Option<Vec<PandasTypeSystem>>,
buffers: Option<&'py PyList>,
buffer_column_index: Option<Vec<Vec<usize>>>,
dataframe: Option<&'py PyAny>, // Using this field other than the return purpose should be careful: this refers to the same data as buffers
}
impl<'a> PandasDestination<'a> {
pub fn new(py: Python<'a>) -> Self {
PandasDestination {
py,
nrows: None,
schema: None,
buffers: None,
buffer_column_index: None,
dataframe: None,
}
}
pub fn result(self) -> Option<&'a PyAny> {
self.dataframe
}
}
impl<'a> Destination for PandasDestination<'a> {
const DATA_ORDERS: &'static [DataOrder] = &[DataOrder::RowMajor];
type TypeSystem = PandasTypeSystem;
type Partition<'b> = PandasPartitionDestination<'b>;
#[throws(ConnectorAgentError)]
fn allocate<S: AsRef<str>>(
&mut self,
nrows: usize,
names: &[S],
schema: &[PandasTypeSystem],
data_order: DataOrder,
) {
if !matches!(data_order, DataOrder::RowMajor) {
throw!(ConnectorAgentError::UnsupportedDataOrder(data_order))
}
if matches!(self.nrows, Some(_)) {
throw!(ConnectorAgentError::DuplicatedAllocation);
}
let (df, buffers, index) = create_dataframe(self.py, names, schema, nrows)?;
debug!("DataFrame created");
// get index for each column: (index of block, index of column within the block)
let mut column_buffer_index: Vec<(usize, usize)> = Vec::with_capacity(index.len());
index.iter().try_for_each(|tuple| -> Result<()> {
column_buffer_index.push(tuple.extract().map_err(|e| {
anyhow!("cannot extract index tuple for `column_buffer_index` {}", e)
})?);
Ok(())
})?;
let nbuffers = buffers.len();
// buffer_column_index[i][j] = the column id of the j-th row (pandas buffer stores columns row-wise) in the i-th buffer.
let mut buffer_column_index = vec![vec![]; nbuffers];
let mut column_buffer_index_cid: Vec<_> = column_buffer_index.iter().enumerate().collect();
column_buffer_index_cid.sort_by_key(|(_, blk)| *blk);
for (cid, &(blkno, _)) in column_buffer_index_cid {
buffer_column_index[blkno].push(cid);
}
self.nrows = Some(nrows);
self.schema = Some(schema.to_vec());
self.buffers = Some(buffers);
self.buffer_column_index = Some(buffer_column_index);
self.dataframe = Some(df);
}
#[throws(ConnectorAgentError)]
fn partition(&mut self, counts: &[usize]) -> Vec<Self::Partition<'_>> {
assert_eq!(
counts.iter().sum::<usize>(),
self.nrows
.ok_or_else(|| ConnectorAgentError::DestinationNotAllocated)?,
"counts: {} != nrows: {:?}",
counts.iter().sum::<usize>(),
self.nrows
);
let buffers = self.buffers.ok_or_else(|| anyhow!("got None buffers"))?;
let schema = self
.schema
.as_ref()
.ok_or_else(|| anyhow!("got None schema"))?;
let buffer_column_index = self
.buffer_column_index
.as_ref()
.ok_or_else(|| anyhow!("got None buffer_column_index"))?;
let mut partitioned_columns: Vec<Vec<Box<dyn PandasColumnObject>>> =
(0..schema.len()).map(|_| vec![]).collect();
for (buf, cids) in buffers.iter().zip_eq(buffer_column_index) {
for &cid in cids {
match schema[cid] {
PandasTypeSystem::F64(_) => {
let fblock = Float64Block::extract(buf).map_err(|e| anyhow!(e))?;
let fcols = fblock.split()?;
for (&cid, fcol) in cids.iter().zip_eq(fcols) {
partitioned_columns[cid] = fcol
.partition(&counts)
.into_iter()
.map(|c| Box::new(c) as _)
.collect()
}
}
PandasTypeSystem::I64(_) => {
let ublock = Int64Block::extract(buf).map_err(|e| anyhow!(e))?;
let ucols = ublock.split()?;
for (&cid, ucol) in cids.iter().zip_eq(ucols) {
partitioned_columns[cid] = ucol
.partition(&counts)
.into_iter()
.map(|c| Box::new(c) as _)
.collect()
}
}
PandasTypeSystem::Bool(_) => {
let bblock = BooleanBlock::extract(buf).map_err(|e| anyhow!(e))?;
let bcols = bblock.split()?;
for (&cid, bcol) in cids.iter().zip_eq(bcols) {
partitioned_columns[cid] = bcol
.partition(&counts)
.into_iter()
.map(|c| Box::new(c) as _)
.collect()
}
}
PandasTypeSystem::String(_)
| PandasTypeSystem::BoxStr(_)
| PandasTypeSystem::Str(_)
| PandasTypeSystem::Char(_) => {
let block = StringBlock::extract(buf).map_err(|e| anyhow!(e))?;
let cols = block.split()?;
for (&cid, col) in cids.iter().zip_eq(cols) {
partitioned_columns[cid] = col
.partition(&counts)
.into_iter()
.map(|c| Box::new(c) as _)
.collect()
}
}
PandasTypeSystem::Bytes(_) => {
let block = BytesBlock::extract(buf).map_err(|e| anyhow!(e))?;
let cols = block.split()?;
for (&cid, col) in cids.iter().zip_eq(cols) {
partitioned_columns[cid] = col
.partition(&counts)
.into_iter()
.map(|c| Box::new(c) as _)
.collect()
}
}
PandasTypeSystem::DateTime(_) => {
let block = DateTimeBlock::extract(buf).map_err(|e| anyhow!(e))?;
let cols = block.split()?;
for (&cid, col) in cids.iter().zip_eq(cols) {
partitioned_columns[cid] = col
.partition(&counts)
.into_iter()
.map(|c| Box::new(c) as _)
.collect()
}
}
}
}
}
let mut par_destinations = vec![];
for &c in counts.into_iter().rev() {
let mut columns = Vec::with_capacity(partitioned_columns.len());
for (i, partitions) in partitioned_columns.iter_mut().enumerate() {
columns.push(
partitions
.pop()
.ok_or_else(|| anyhow!("empty partition for {}th column", i))?,
);
}
par_destinations.push(PandasPartitionDestination::new(c, columns, schema));
}
// We need to reverse the par_destinations because partitions are poped reversely
par_destinations.into_iter().rev().collect()
}
fn | (&self) -> &[PandasTypeSystem] {
static EMPTY_SCHEMA: Vec<PandasTypeSystem> = vec![];
self.schema.as_ref().unwrap_or(EMPTY_SCHEMA.as_ref())
}
}
pub struct PandasPartitionDestination<'a> {
nrows: usize,
columns: Vec<Box<dyn PandasColumnObject + 'a>>,
schema: &'a [PandasTypeSystem],
seq: usize,
}
impl<'a> PandasPartitionDestination<'a> {
fn new(
nrows: usize,
columns: Vec<Box<dyn PandasColumnObject + 'a>>,
schema: &'a [PandasTypeSystem],
) -> Self {
Self {
nrows,
columns,
schema,
seq: 0,
}
}
fn loc(&mut self) -> (usize, usize) {
let (row, col) = (self.seq / self.ncols(), self.seq % self.ncols());
self.seq += 1;
(row, col)
}
}
impl<'a> DestinationPartition<'a> for PandasPartitionDestination<'a> {
type TypeSystem = PandasTypeSystem;
fn nrows(&self) -> usize {
self.nrows
}
fn ncols(&self) -> usize {
self.schema.len()
}
fn finalize(&mut self) -> Result<()> {
for col in &mut self.columns {
col.finalize()?;
}
Ok(())
}
}
impl<'a, T> Consume<T> for PandasPartitionDestination<'a>
where
T: HasPandasColumn + TypeAssoc<PandasTypeSystem> + std::fmt::Debug,
{
fn consume(&mut self, value: T) -> Result<()> {
let (_, col) = self.loc();
self.schema[col].check::<T>()?;
// How do we check type id for borrowed types?
// assert!(self.columns[col].typecheck(TypeId::of::<T>()));
let (column, _): (&mut T::PandasColumn<'a>, *const ()) =
unsafe { transmute(&*self.columns[col]) };
column.write(value)
}
}
/// call python code to construct the dataframe and expose its buffers
#[throws(ConnectorAgentError)]
fn create_dataframe<'a, S: AsRef<str>>(
py: Python<'a>,
names: &[S],
schema: &[PandasTypeSystem],
nrows: usize,
) -> (&'a PyAny, &'a PyList, &'a PyList) {
let names: Vec<_> = names.into_iter().map(|s| s.as_ref()).collect();
debug!("names: {:?}", names);
debug!("schema: {:?}", schema);
let mut schema_dict: HashMap<PandasTypeSystem, Vec<usize>> = HashMap::new();
schema.iter().enumerate().for_each(|(idx, &dt)| {
let indices = schema_dict.entry(dt).or_insert(vec![]);
indices.push(idx);
});
debug!("schema_dict: {:?}", schema_dict);
let mut blocks_code = vec![];
schema_dict
.iter()
.for_each(|(&dt, indices)| {
if dt.is_extension() {
// each extension block only contains one column
for idx in indices {
blocks_code.push(format!(
"pd.core.internals.ExtensionBlock(pd.array(np.empty([{}], dtype='{}'), dtype='{}'), placement={}, ndim=2)",
nrows,
dt.npdtype(),
dt.dtype(),
idx,
));
}
} else {
blocks_code.push(format!(
"pd.core.internals.{}(np.empty([{}, {}], dtype='{}'), placement={:?}, ndim=2)",
dt.block_name(),
indices.len(),
nrows,
dt.npdtype(),
indices,
));
}
});
// https://github.com/pandas-dev/pandas/blob/master/pandas/core/internals/managers.py
// Suppose we want to find the array corresponding to our i'th column.
// blknos[i] identifies the block from self.blocks that contains this column.
// blklocs[i] identifies the column of interest within
// self.blocks[self.blknos[i]]
let code = format!(
r#"import pandas as pd
import numpy as np
blocks = [{}]
block_manager = pd.core.internals.BlockManager(
blocks, [pd.Index(['{}']), pd.RangeIndex(start=0, stop={}, step=1)])
df = pd.DataFrame(block_manager)
blocks = [b.values for b in df._mgr.blocks]
index = [(i, j) for i, j in zip(df._mgr.blknos, df._mgr.blklocs)]"#,
blocks_code.join(","),
format!("{}", names.join("\',\'")),
nrows,
);
debug!("create dataframe code: {}", code);
// run python code
let locals = PyDict::new(py);
py.run(code.as_str(), None, Some(locals))
.map_err(|e| anyhow!(e))?;
// get # of blocks in dataframe
let buffers: &PyList = locals
.get_item("blocks")
.ok_or_else(|| anyhow!("cannot get `blocks` from locals"))?
.downcast::<PyList>()
.map_err(|e| anyhow!("cannot downcast `blocks` to PyList {}", e))?;
let index = locals
.get_item("index")
.ok_or_else(|| anyhow!("cannot get `index` from locals"))?
.downcast::<PyList>()
.map_err(|e| anyhow!("cannot downcast `index` to PyList {}", e))?;
let df = locals
.get_item("df")
.ok_or_else(|| anyhow!("cannot get `df` from locals"))?;
(df, buffers, index)
}
| schema | identifier_name |
destination.rs | use super::pandas_columns::{
BooleanBlock, BytesBlock, DateTimeBlock, Float64Block, HasPandasColumn, Int64Block,
PandasColumn, PandasColumnObject, StringBlock,
};
use super::types::{PandasDType, PandasTypeSystem};
use anyhow::anyhow;
use connectorx::{
ConnectorAgentError, Consume, DataOrder, Destination, DestinationPartition, Result, TypeAssoc,
TypeSystem,
};
use fehler::{throw, throws};
use itertools::Itertools;
use log::debug;
use pyo3::{
types::{PyDict, PyList},
FromPyObject, PyAny, Python,
};
use std::collections::HashMap;
use std::mem::transmute;
pub struct PandasDestination<'py> {
py: Python<'py>,
nrows: Option<usize>,
schema: Option<Vec<PandasTypeSystem>>,
buffers: Option<&'py PyList>,
buffer_column_index: Option<Vec<Vec<usize>>>,
dataframe: Option<&'py PyAny>, // Using this field other than the return purpose should be careful: this refers to the same data as buffers
}
impl<'a> PandasDestination<'a> {
pub fn new(py: Python<'a>) -> Self {
PandasDestination {
py,
nrows: None,
schema: None,
buffers: None,
buffer_column_index: None,
dataframe: None,
}
}
pub fn result(self) -> Option<&'a PyAny> {
self.dataframe
}
}
impl<'a> Destination for PandasDestination<'a> {
const DATA_ORDERS: &'static [DataOrder] = &[DataOrder::RowMajor];
type TypeSystem = PandasTypeSystem;
type Partition<'b> = PandasPartitionDestination<'b>;
#[throws(ConnectorAgentError)]
fn allocate<S: AsRef<str>>(
&mut self,
nrows: usize,
names: &[S],
schema: &[PandasTypeSystem],
data_order: DataOrder,
) {
if !matches!(data_order, DataOrder::RowMajor) {
throw!(ConnectorAgentError::UnsupportedDataOrder(data_order))
}
if matches!(self.nrows, Some(_)) {
throw!(ConnectorAgentError::DuplicatedAllocation);
}
let (df, buffers, index) = create_dataframe(self.py, names, schema, nrows)?;
debug!("DataFrame created");
// get index for each column: (index of block, index of column within the block)
let mut column_buffer_index: Vec<(usize, usize)> = Vec::with_capacity(index.len());
index.iter().try_for_each(|tuple| -> Result<()> {
column_buffer_index.push(tuple.extract().map_err(|e| {
anyhow!("cannot extract index tuple for `column_buffer_index` {}", e)
})?);
Ok(())
})?;
let nbuffers = buffers.len();
// buffer_column_index[i][j] = the column id of the j-th row (pandas buffer stores columns row-wise) in the i-th buffer.
let mut buffer_column_index = vec![vec![]; nbuffers];
let mut column_buffer_index_cid: Vec<_> = column_buffer_index.iter().enumerate().collect();
column_buffer_index_cid.sort_by_key(|(_, blk)| *blk);
for (cid, &(blkno, _)) in column_buffer_index_cid {
buffer_column_index[blkno].push(cid);
}
self.nrows = Some(nrows);
self.schema = Some(schema.to_vec());
self.buffers = Some(buffers);
self.buffer_column_index = Some(buffer_column_index);
self.dataframe = Some(df);
}
#[throws(ConnectorAgentError)]
fn partition(&mut self, counts: &[usize]) -> Vec<Self::Partition<'_>> {
assert_eq!(
counts.iter().sum::<usize>(),
self.nrows
.ok_or_else(|| ConnectorAgentError::DestinationNotAllocated)?,
"counts: {} != nrows: {:?}",
counts.iter().sum::<usize>(),
self.nrows
);
let buffers = self.buffers.ok_or_else(|| anyhow!("got None buffers"))?;
let schema = self
.schema
.as_ref()
.ok_or_else(|| anyhow!("got None schema"))?;
let buffer_column_index = self
.buffer_column_index
.as_ref()
.ok_or_else(|| anyhow!("got None buffer_column_index"))?;
let mut partitioned_columns: Vec<Vec<Box<dyn PandasColumnObject>>> =
(0..schema.len()).map(|_| vec![]).collect();
for (buf, cids) in buffers.iter().zip_eq(buffer_column_index) {
for &cid in cids {
match schema[cid] {
PandasTypeSystem::F64(_) => {
let fblock = Float64Block::extract(buf).map_err(|e| anyhow!(e))?;
let fcols = fblock.split()?;
for (&cid, fcol) in cids.iter().zip_eq(fcols) {
partitioned_columns[cid] = fcol
.partition(&counts)
.into_iter()
.map(|c| Box::new(c) as _)
.collect()
}
}
PandasTypeSystem::I64(_) => {
let ublock = Int64Block::extract(buf).map_err(|e| anyhow!(e))?;
let ucols = ublock.split()?;
for (&cid, ucol) in cids.iter().zip_eq(ucols) {
partitioned_columns[cid] = ucol
.partition(&counts)
.into_iter()
.map(|c| Box::new(c) as _)
.collect()
}
}
PandasTypeSystem::Bool(_) => {
let bblock = BooleanBlock::extract(buf).map_err(|e| anyhow!(e))?;
let bcols = bblock.split()?;
for (&cid, bcol) in cids.iter().zip_eq(bcols) {
partitioned_columns[cid] = bcol
.partition(&counts)
.into_iter()
.map(|c| Box::new(c) as _)
.collect()
}
}
PandasTypeSystem::String(_)
| PandasTypeSystem::BoxStr(_)
| PandasTypeSystem::Str(_)
| PandasTypeSystem::Char(_) => {
let block = StringBlock::extract(buf).map_err(|e| anyhow!(e))?;
let cols = block.split()?;
for (&cid, col) in cids.iter().zip_eq(cols) {
partitioned_columns[cid] = col
.partition(&counts)
.into_iter()
.map(|c| Box::new(c) as _)
.collect()
}
}
PandasTypeSystem::Bytes(_) => {
let block = BytesBlock::extract(buf).map_err(|e| anyhow!(e))?;
let cols = block.split()?;
for (&cid, col) in cids.iter().zip_eq(cols) {
partitioned_columns[cid] = col
.partition(&counts)
.into_iter()
.map(|c| Box::new(c) as _)
.collect()
}
}
PandasTypeSystem::DateTime(_) => {
let block = DateTimeBlock::extract(buf).map_err(|e| anyhow!(e))?;
let cols = block.split()?;
for (&cid, col) in cids.iter().zip_eq(cols) {
partitioned_columns[cid] = col
.partition(&counts)
.into_iter()
.map(|c| Box::new(c) as _)
.collect()
}
}
}
}
}
let mut par_destinations = vec![];
for &c in counts.into_iter().rev() {
let mut columns = Vec::with_capacity(partitioned_columns.len());
for (i, partitions) in partitioned_columns.iter_mut().enumerate() {
columns.push(
partitions
.pop()
.ok_or_else(|| anyhow!("empty partition for {}th column", i))?,
);
}
par_destinations.push(PandasPartitionDestination::new(c, columns, schema));
}
// We need to reverse the par_destinations because partitions are poped reversely
par_destinations.into_iter().rev().collect()
}
fn schema(&self) -> &[PandasTypeSystem] |
}
pub struct PandasPartitionDestination<'a> {
nrows: usize,
columns: Vec<Box<dyn PandasColumnObject + 'a>>,
schema: &'a [PandasTypeSystem],
seq: usize,
}
impl<'a> PandasPartitionDestination<'a> {
fn new(
nrows: usize,
columns: Vec<Box<dyn PandasColumnObject + 'a>>,
schema: &'a [PandasTypeSystem],
) -> Self {
Self {
nrows,
columns,
schema,
seq: 0,
}
}
fn loc(&mut self) -> (usize, usize) {
let (row, col) = (self.seq / self.ncols(), self.seq % self.ncols());
self.seq += 1;
(row, col)
}
}
impl<'a> DestinationPartition<'a> for PandasPartitionDestination<'a> {
type TypeSystem = PandasTypeSystem;
fn nrows(&self) -> usize {
self.nrows
}
fn ncols(&self) -> usize {
self.schema.len()
}
fn finalize(&mut self) -> Result<()> {
for col in &mut self.columns {
col.finalize()?;
}
Ok(())
}
}
impl<'a, T> Consume<T> for PandasPartitionDestination<'a>
where
T: HasPandasColumn + TypeAssoc<PandasTypeSystem> + std::fmt::Debug,
{
fn consume(&mut self, value: T) -> Result<()> {
let (_, col) = self.loc();
self.schema[col].check::<T>()?;
// How do we check type id for borrowed types?
// assert!(self.columns[col].typecheck(TypeId::of::<T>()));
let (column, _): (&mut T::PandasColumn<'a>, *const ()) =
unsafe { transmute(&*self.columns[col]) };
column.write(value)
}
}
/// call python code to construct the dataframe and expose its buffers
#[throws(ConnectorAgentError)]
fn create_dataframe<'a, S: AsRef<str>>(
py: Python<'a>,
names: &[S],
schema: &[PandasTypeSystem],
nrows: usize,
) -> (&'a PyAny, &'a PyList, &'a PyList) {
let names: Vec<_> = names.into_iter().map(|s| s.as_ref()).collect();
debug!("names: {:?}", names);
debug!("schema: {:?}", schema);
let mut schema_dict: HashMap<PandasTypeSystem, Vec<usize>> = HashMap::new();
schema.iter().enumerate().for_each(|(idx, &dt)| {
let indices = schema_dict.entry(dt).or_insert(vec![]);
indices.push(idx);
});
debug!("schema_dict: {:?}", schema_dict);
let mut blocks_code = vec![];
schema_dict
.iter()
.for_each(|(&dt, indices)| {
if dt.is_extension() {
// each extension block only contains one column
for idx in indices {
blocks_code.push(format!(
"pd.core.internals.ExtensionBlock(pd.array(np.empty([{}], dtype='{}'), dtype='{}'), placement={}, ndim=2)",
nrows,
dt.npdtype(),
dt.dtype(),
idx,
));
}
} else {
blocks_code.push(format!(
"pd.core.internals.{}(np.empty([{}, {}], dtype='{}'), placement={:?}, ndim=2)",
dt.block_name(),
indices.len(),
nrows,
dt.npdtype(),
indices,
));
}
});
// https://github.com/pandas-dev/pandas/blob/master/pandas/core/internals/managers.py
// Suppose we want to find the array corresponding to our i'th column.
// blknos[i] identifies the block from self.blocks that contains this column.
// blklocs[i] identifies the column of interest within
// self.blocks[self.blknos[i]]
let code = format!(
r#"import pandas as pd
import numpy as np
blocks = [{}]
block_manager = pd.core.internals.BlockManager(
blocks, [pd.Index(['{}']), pd.RangeIndex(start=0, stop={}, step=1)])
df = pd.DataFrame(block_manager)
blocks = [b.values for b in df._mgr.blocks]
index = [(i, j) for i, j in zip(df._mgr.blknos, df._mgr.blklocs)]"#,
blocks_code.join(","),
format!("{}", names.join("\',\'")),
nrows,
);
debug!("create dataframe code: {}", code);
// run python code
let locals = PyDict::new(py);
py.run(code.as_str(), None, Some(locals))
.map_err(|e| anyhow!(e))?;
// get # of blocks in dataframe
let buffers: &PyList = locals
.get_item("blocks")
.ok_or_else(|| anyhow!("cannot get `blocks` from locals"))?
.downcast::<PyList>()
.map_err(|e| anyhow!("cannot downcast `blocks` to PyList {}", e))?;
let index = locals
.get_item("index")
.ok_or_else(|| anyhow!("cannot get `index` from locals"))?
.downcast::<PyList>()
.map_err(|e| anyhow!("cannot downcast `index` to PyList {}", e))?;
let df = locals
.get_item("df")
.ok_or_else(|| anyhow!("cannot get `df` from locals"))?;
(df, buffers, index)
}
| {
static EMPTY_SCHEMA: Vec<PandasTypeSystem> = vec![];
self.schema.as_ref().unwrap_or(EMPTY_SCHEMA.as_ref())
} | identifier_body |
towrap.go | // Package towrap wraps two versions of Traffic Ops clients to give up-to-date
// information, possibly using legacy API versions.
package towrap
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import (
"crypto/tls"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/http/cookiejar"
"net/url"
"os"
"strconv"
"sync"
"time"
"github.com/apache/trafficcontrol/lib/go-log"
"github.com/apache/trafficcontrol/lib/go-tc"
"github.com/apache/trafficcontrol/traffic_monitor/config"
legacyClient "github.com/apache/trafficcontrol/traffic_ops/v3-client"
client "github.com/apache/trafficcontrol/traffic_ops/v4-client"
jsoniter "github.com/json-iterator/go"
"golang.org/x/net/publicsuffix"
)
const localHostIP = "127.0.0.1"
// ErrNilSession is the error returned by operations performed on a nil session.
var ErrNilSession = errors.New("nil session")
// ByteTime is a structure for associating a set of raw data with some CDN
// Snapshot statistics, and a certain time.
type ByteTime struct {
bytes []byte
time time.Time
stats *tc.CRConfigStats
}
// ByteMapCache is a thread-access-safe map of cache server hostnames to
// ByteTime structures.
type ByteMapCache struct {
cache *map[string]ByteTime
m *sync.RWMutex
}
// NewByteMapCache constructs a new, empty ByteMapCache.
func NewByteMapCache() ByteMapCache {
return ByteMapCache{m: &sync.RWMutex{}, cache: &map[string]ByteTime{}}
}
// Set sets the entry given by 'key' to a new ByteTime structure with the given
// raw data ('newBytes') and the given statistics ('stats') at the current time.
func (c ByteMapCache) Set(key string, newBytes []byte, stats *tc.CRConfigStats) {
c.m.Lock()
defer c.m.Unlock()
(*c.cache)[key] = ByteTime{bytes: newBytes, stats: stats, time: time.Now()}
}
// Get retrieves the raw data, associated time, and statistics of the entry
// given by 'key'.
func (c ByteMapCache) Get(key string) ([]byte, time.Time, *tc.CRConfigStats) {
c.m.RLock()
defer c.m.RUnlock()
if byteTime, ok := (*c.cache)[key]; !ok {
return nil, time.Time{}, nil
} else {
return byteTime.bytes, byteTime.time, byteTime.stats
}
}
func (s TrafficOpsSessionThreadsafe) BackupFileExists() bool {
if _, err := os.Stat(s.CRConfigBackupFile); !os.IsNotExist(err) {
if _, err = os.Stat(s.TMConfigBackupFile); !os.IsNotExist(err) {
return true
}
}
return false
}
// CRConfigStat represents a set of statistics from a CDN Snapshot requested at
// a particular time.
type CRConfigStat struct {
// Err contains any error that may have occurred when obtaining the
// statistics.
Err error `json:"error"`
// ReqAddr is the network address from which the statistics were requested.
ReqAddr string `json:"request_address"`
// ReqTime is the time at which the request for statistics was made.
ReqTime time.Time `json:"request_time"`
// Stats contains the actual statistics.
Stats tc.CRConfigStats `json:"stats"`
}
// CopyCRConfigStat makes a deep copy of a slice of CRConfigStats.
func CopyCRConfigStat(old []CRConfigStat) []CRConfigStat {
newStats := make([]CRConfigStat, len(old))
copy(newStats, old)
return newStats
}
// CRConfigHistoryThreadsafe stores history in a circular buffer.
type CRConfigHistoryThreadsafe struct {
hist *[]CRConfigStat
m *sync.RWMutex
limit *uint64
length *uint64
pos *uint64
}
// NewCRConfigHistoryThreadsafe constructs a new, empty
// CRConfigHistoryThreadsafe - this is the ONLY way to safely create a
// CRConfigHistoryThreadsafe, using the zero value of the structure will cause
// all operations to encounter segmentation faults, and there is no way to
// preempt this.
//
// 'limit' indicates the size of the circular buffer - effectively the number of
// entries it will be capable of storing.
func NewCRConfigHistoryThreadsafe(limit uint64) CRConfigHistoryThreadsafe {
hist := make([]CRConfigStat, limit, limit)
length := uint64(0)
pos := uint64(0)
return CRConfigHistoryThreadsafe{hist: &hist, m: &sync.RWMutex{}, limit: &limit, length: &length, pos: &pos}
}
// Add adds the given stat to the history. Does not add new additions with the
// same remote address and CRConfig Date as the previous.
func (h CRConfigHistoryThreadsafe) Add(i *CRConfigStat) {
h.m.Lock()
defer h.m.Unlock()
if *h.length != 0 {
last := (*h.hist)[(*h.pos-1)%*h.limit]
datesEqual := (i.Stats.DateUnixSeconds == nil && last.Stats.DateUnixSeconds == nil) || (i.Stats.DateUnixSeconds != nil && last.Stats.DateUnixSeconds != nil && *i.Stats.DateUnixSeconds == *last.Stats.DateUnixSeconds)
cdnsEqual := (i.Stats.CDNName == nil && last.Stats.CDNName == nil) || (i.Stats.CDNName != nil && last.Stats.CDNName != nil && *i.Stats.CDNName == *last.Stats.CDNName)
reqAddrsEqual := i.ReqAddr == last.ReqAddr
if reqAddrsEqual && datesEqual && cdnsEqual {
return
}
}
(*h.hist)[*h.pos] = *i
*h.pos = (*h.pos + 1) % *h.limit
if *h.length < *h.limit {
*h.length++
}
}
// Get retrieves the stored history of CRConfigStat entries.
func (h CRConfigHistoryThreadsafe) Get() []CRConfigStat {
h.m.RLock()
defer h.m.RUnlock()
if *h.length < *h.limit {
return CopyCRConfigStat((*h.hist)[:*h.length])
}
newStats := make([]CRConfigStat, *h.limit)
copy(newStats, (*h.hist)[*h.pos:])
copy(newStats[*h.length-*h.pos:], (*h.hist)[:*h.pos])
return newStats
}
// Len gives the number of currently stored items in the buffer.
//
// An uninitialized buffer has zero length.
func (h CRConfigHistoryThreadsafe) Len() uint64 {
if h.length == nil {
return 0
}
return *h.length
}
// TrafficOpsSessionThreadsafe provides access to the Traffic Ops client safe
// for multiple goroutines. This fulfills the ITrafficOpsSession interface.
type TrafficOpsSessionThreadsafe struct {
session **client.Session // pointer-to-pointer, because we're given a pointer from the Traffic Ops package, and we don't want to copy it.
legacySession **legacyClient.Session
m *sync.Mutex
lastCRConfig ByteMapCache
crConfigHist CRConfigHistoryThreadsafe
CRConfigBackupFile string
TMConfigBackupFile string
}
// NewTrafficOpsSessionThreadsafe returns a new threadsafe
// TrafficOpsSessionThreadsafe wrapping the given `Session`.
func NewTrafficOpsSessionThreadsafe(s *client.Session, ls *legacyClient.Session, histLimit uint64, cfg config.Config) TrafficOpsSessionThreadsafe {
return TrafficOpsSessionThreadsafe{
CRConfigBackupFile: cfg.CRConfigBackupFile,
crConfigHist: NewCRConfigHistoryThreadsafe(histLimit),
lastCRConfig: NewByteMapCache(),
m: &sync.Mutex{},
session: &s,
legacySession: &ls,
TMConfigBackupFile: cfg.TMConfigBackupFile,
}
}
// Initialized tells whether or not the TrafficOpsSessionThreadsafe has been
// properly initialized with non-nil sessions.
func (s TrafficOpsSessionThreadsafe) Initialized() bool {
return s.session != nil && *s.session != nil && s.legacySession != nil && *s.legacySession != nil
}
// Update updates the TrafficOpsSessionThreadsafe's connection information with
// the provided information. It's safe for calling by multiple goroutines, being
// aware that they will race.
func (s *TrafficOpsSessionThreadsafe) Update(
url string,
username string,
password string,
insecure bool,
userAgent string,
useCache bool,
timeout time.Duration,
) error {
if s == nil {
return errors.New("cannot update nil session")
}
s.m.Lock()
defer s.m.Unlock()
// always set unauthenticated sessions first which can eventually authenticate themselves when attempting requests
if err := s.setSession(url, username, password, insecure, userAgent, useCache, timeout); err != nil {
return err
}
if err := s.setLegacySession(url, username, password, insecure, userAgent, useCache, timeout); err != nil {
return err
}
session, _, err := client.LoginWithAgent(url, username, password, insecure, userAgent, useCache, timeout)
if err != nil {
log.Errorf("logging in using up-to-date client: %v", err)
legacySession, _, err := legacyClient.LoginWithAgent(url, username, password, insecure, userAgent, useCache, timeout)
if err != nil || legacySession == nil {
err = fmt.Errorf("logging in using legacy client: %v", err)
return err
}
*s.legacySession = legacySession
} else {
*s.session = session
}
return nil
}
// setSession sets the session for the up-to-date client without logging in.
func (s *TrafficOpsSessionThreadsafe) setSession(url, username, password string, insecure bool, userAgent string, useCache bool, timeout time.Duration) error {
options := cookiejar.Options{
PublicSuffixList: publicsuffix.List,
}
jar, err := cookiejar.New(&options)
if err != nil {
return err
}
to := client.NewSession(username, password, url, userAgent, &http.Client{
Timeout: timeout,
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: insecure},
},
Jar: jar,
}, useCache)
*s.session = to
return nil
}
// setSession sets the session for the legacy client without logging in.
func (s *TrafficOpsSessionThreadsafe) setLegacySession(url, username, password string, insecure bool, userAgent string, useCache bool, timeout time.Duration) error {
options := cookiejar.Options{
PublicSuffixList: publicsuffix.List,
}
jar, err := cookiejar.New(&options)
if err != nil {
return err
}
to := legacyClient.NewSession(username, password, url, userAgent, &http.Client{
Timeout: timeout,
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: insecure},
},
Jar: jar,
}, useCache)
*s.legacySession = to
return nil
}
// getThreadsafeSession is used internally to get a copy of the session pointer,
// or nil if it doesn't exist. This should not be used outside
// TrafficOpsSessionThreadsafe, and never stored, because part of the purpose of
// rafficOpsSessionThreadsafe is to store a pointer to the Session pointer, so
// it can be updated by one goroutine and immediately used by another. This
// should only be called immediately before using the session, since someone
// else may update it concurrently.
func (s TrafficOpsSessionThreadsafe) get() *client.Session {
s.m.Lock()
defer s.m.Unlock()
if s.session == nil || *s.session == nil {
return nil
}
return *s.session
}
func (s TrafficOpsSessionThreadsafe) getLegacy() *legacyClient.Session {
s.m.Lock()
defer s.m.Unlock()
if s.legacySession == nil || *s.legacySession == nil {
return nil
}
return *s.legacySession
}
// CRConfigHistory gets all of the stored, historical data about CRConfig
// Snapshots' Stats sections.
func (s TrafficOpsSessionThreadsafe) CRConfigHistory() []CRConfigStat |
// CRConfigValid checks if the passed tc.CRConfig structure is valid, and
// ensures that it is from the same CDN as the last CRConfig Snapshot, as well
// as that it is newer than the last CRConfig Snapshot.
func (s *TrafficOpsSessionThreadsafe) CRConfigValid(crc *tc.CRConfig, cdn string) error {
if crc == nil {
return errors.New("CRConfig is nil")
}
if crc.Stats.CDNName == nil {
return errors.New("CRConfig.Stats.CDN missing")
}
if crc.Stats.DateUnixSeconds == nil {
return errors.New("CRConfig.Stats.Date missing")
}
// Note this intentionally takes intended CDN, rather than trusting
// crc.Stats
lastCrc, lastCrcTime, lastCrcStats := s.lastCRConfig.Get(cdn)
if lastCrc == nil {
return nil
}
if lastCrcStats.DateUnixSeconds == nil {
log.Warnln("TrafficOpsSessionThreadsafe.CRConfigValid returning no error, but last CRConfig Date was missing!")
return nil
}
if lastCrcStats.CDNName == nil {
log.Warnln("TrafficOpsSessionThreadsafe.CRConfigValid returning no error, but last CRConfig CDN was missing!")
return nil
}
if *lastCrcStats.CDNName != *crc.Stats.CDNName {
return errors.New("CRConfig.Stats.CDN " + *crc.Stats.CDNName + " different than last received CRConfig.Stats.CDNName " + *lastCrcStats.CDNName + " received at " + lastCrcTime.Format(time.RFC3339Nano))
}
if *lastCrcStats.DateUnixSeconds > *crc.Stats.DateUnixSeconds {
return errors.New("CRConfig.Stats.Date " + strconv.FormatInt(*crc.Stats.DateUnixSeconds, 10) + " older than last received CRConfig.Stats.Date " + strconv.FormatInt(*lastCrcStats.DateUnixSeconds, 10) + " received at " + lastCrcTime.Format(time.RFC3339Nano))
}
return nil
}
// CRConfigRaw returns the CRConfig from the Traffic Ops. This is safe for
// multiple goroutines.
func (s TrafficOpsSessionThreadsafe) CRConfigRaw(cdn string) ([]byte, error) {
var remoteAddr string
var err error
var crConfig *tc.CRConfig
var configBytes []byte
json := jsoniter.ConfigFastest
ss := s.get()
if ss == nil {
return nil, ErrNilSession
}
response, reqInf, err := ss.GetCRConfig(cdn, client.RequestOptions{})
if reqInf.RemoteAddr != nil {
remoteAddr = reqInf.RemoteAddr.String()
}
if err != nil {
log.Warnln("getting CRConfig from Traffic Ops using up-to-date client: " + err.Error() + ". Retrying with legacy client")
ls := s.getLegacy()
if ls == nil {
return nil, ErrNilSession
}
configBytes, reqInf, err = ls.GetCRConfig(cdn)
if reqInf.RemoteAddr != nil {
remoteAddr = reqInf.RemoteAddr.String()
}
if err != nil {
log.Errorln("getting CRConfig from Traffic Ops using legacy client: " + err.Error() + ". Checking for backup")
}
} else {
crConfig = &response.Response
configBytes, err = json.Marshal(crConfig)
if err != nil {
crConfig = nil
log.Warnln("failed to marshal CRConfig using up-to-date client: " + err.Error())
}
}
if err == nil {
log.Infoln("successfully got CRConfig from Traffic Ops. Writing to backup file")
if wErr := ioutil.WriteFile(s.CRConfigBackupFile, configBytes, 0644); wErr != nil {
log.Errorf("failed to write CRConfig backup file: %v", wErr)
}
} else {
if s.BackupFileExists() {
log.Errorln("using backup file for CRConfig snapshot due to error fetching CRConfig snapshot from Traffic Ops: " + err.Error())
configBytes, err = ioutil.ReadFile(s.CRConfigBackupFile)
if err != nil {
return nil, fmt.Errorf("reading CRConfig backup file: %v", err)
}
remoteAddr = localHostIP
err = nil
} else {
return nil, fmt.Errorf("failed to get CRConfig from Traffic Ops (%v), and there is no backup file", err)
}
}
hist := &CRConfigStat{
Err: err,
ReqAddr: remoteAddr,
ReqTime: time.Now(),
Stats: tc.CRConfigStats{},
}
defer s.crConfigHist.Add(hist)
if crConfig == nil {
if err = json.Unmarshal(configBytes, crConfig); err != nil {
err = errors.New("invalid JSON: " + err.Error())
hist.Err = err
return configBytes, err
}
}
hist.Stats = crConfig.Stats
if err = s.CRConfigValid(crConfig, cdn); err != nil {
err = errors.New("invalid CRConfig: " + err.Error())
hist.Err = err
return configBytes, err
}
s.lastCRConfig.Set(cdn, configBytes, &crConfig.Stats)
return configBytes, nil
}
// LastCRConfig returns the last CRConfig requested from CRConfigRaw, and the
// time it was returned. This is designed to be used in conjunction with a
// poller which regularly calls CRConfigRaw. If no last CRConfig exists, because
// CRConfigRaw has never been called successfully, this calls CRConfigRaw once
// to try to get the CRConfig from Traffic Ops.
func (s TrafficOpsSessionThreadsafe) LastCRConfig(cdn string) ([]byte, time.Time, error) {
crConfig, crConfigTime, _ := s.lastCRConfig.Get(cdn)
if len(crConfig) == 0 {
b, err := s.CRConfigRaw(cdn)
return b, time.Now(), err
}
return crConfig, crConfigTime, nil
}
func (s TrafficOpsSessionThreadsafe) fetchTMConfig(cdn string) (*tc.TrafficMonitorConfig, error) {
ss := s.get()
if ss == nil {
return nil, ErrNilSession
}
m, _, e := ss.GetTrafficMonitorConfig(cdn, client.NewRequestOptions())
return &m.Response, e
}
func (s TrafficOpsSessionThreadsafe) fetchLegacyTMConfig(cdn string) (*tc.TrafficMonitorConfig, error) {
ss := s.getLegacy()
if ss == nil {
return nil, ErrNilSession
}
m, _, e := ss.GetTrafficMonitorConfig(cdn)
if m == nil {
return nil, e
}
return m, e
}
// trafficMonitorConfigMapRaw returns the Traffic Monitor config map from the
// Traffic Ops, directly from the monitoring endpoint. This is not usually
// what is needed, rather monitoring needs the snapshotted CRConfig data, which
// is filled in by `LegacyTrafficMonitorConfigMap`. This is safe for multiple
// goroutines.
func (s TrafficOpsSessionThreadsafe) trafficMonitorConfigMapRaw(cdn string) (*tc.TrafficMonitorConfigMap, error) {
var config *tc.TrafficMonitorConfig
var configMap *tc.TrafficMonitorConfigMap
var err error
config, err = s.fetchTMConfig(cdn)
if err != nil {
log.Warnln("getting Traffic Monitor config from Traffic Ops using up-to-date client: " + err.Error() + ". Retrying with legacy client")
config, err = s.fetchLegacyTMConfig(cdn)
if err != nil {
log.Errorln("getting Traffic Monitor config from Traffic Ops using legacy client: " + err.Error())
}
}
if err == nil {
log.Infoln("successfully got Traffic Monitor config from Traffic Ops")
if config == nil {
return nil, fmt.Errorf("nil Traffic Monitor config after successful fetch")
}
configMap, err = tc.TrafficMonitorTransformToMap(config)
}
if err != nil {
// Default error case, no backup file exists
if !s.BackupFileExists() {
return nil, err
}
log.Errorln("using backup file for monitoring config snapshot due to invalid monitoring config snapshot from Traffic Ops: " + err.Error())
b, err := ioutil.ReadFile(s.TMConfigBackupFile)
if err != nil {
return nil, errors.New("reading TMConfigBackupFile: " + err.Error())
}
json := jsoniter.ConfigFastest
var tmConfig tc.TrafficMonitorConfig
if err := json.Unmarshal(b, &tmConfig); err != nil {
return nil, errors.New("unmarshalling backup file monitoring.json: " + err.Error())
}
return tc.TrafficMonitorTransformToMap(&tmConfig)
}
json := jsoniter.ConfigFastest
data, err := json.Marshal(*config)
if err == nil {
if wErr := ioutil.WriteFile(s.TMConfigBackupFile, data, 0644); wErr != nil {
log.Errorf("failed to write TM config backup file: %v", wErr)
}
}
return configMap, err
}
// TrafficMonitorConfigMap returns the Traffic Monitor config map from the
// Traffic Ops. This is safe for multiple goroutines.
func (s TrafficOpsSessionThreadsafe) TrafficMonitorConfigMap(cdn string) (*tc.TrafficMonitorConfigMap, error) {
mc, err := s.trafficMonitorConfigMapRaw(cdn)
if err != nil {
return nil, fmt.Errorf("getting monitor config map: %v", err)
}
return mc, nil
}
func (s TrafficOpsSessionThreadsafe) fetchServerByHostname(hostName string) (tc.ServerV40, error) {
ss := s.get()
if ss == nil {
return tc.ServerV40{}, ErrNilSession
}
params := url.Values{}
params.Set("hostName", hostName)
resp, _, err := ss.GetServers(client.RequestOptions{QueryParameters: params})
if err != nil {
return tc.ServerV40{}, fmt.Errorf("fetching server by hostname '%s': %v", hostName, err)
}
respLen := len(resp.Response)
if respLen < 1 {
return tc.ServerV40{}, fmt.Errorf("no server '%s' found in Traffic Ops", hostName)
}
var server tc.ServerV40
var num int
found := false
for i, srv := range resp.Response {
num = i
if srv.CDNName != nil && srv.HostName != nil && *srv.HostName == hostName {
server = srv
found = true
break
}
}
if !found {
return tc.ServerV40{}, fmt.Errorf("either no server '%s' found in Traffic Ops, or none by that hostName had non-nil CDN", hostName)
}
if respLen > 1 {
log.Warnf("Getting monitor server by hostname '%s' returned %d servers - selecting #%d", hostName, respLen, num)
}
return server, nil
}
func (s TrafficOpsSessionThreadsafe) fetchLegacyServerByHostname(hostName string) (tc.ServerV40, error) {
ss := s.getLegacy()
if ss == nil {
return tc.ServerV40{}, ErrNilSession
}
params := url.Values{}
params.Set("hostName", hostName)
resp, _, err := ss.GetServersWithHdr(¶ms, nil)
if err != nil {
return tc.ServerV40{}, fmt.Errorf("fetching server by hostname '%s': %v", hostName, err)
}
respLen := len(resp.Response)
if respLen < 1 {
return tc.ServerV40{}, fmt.Errorf("no server '%s' found in Traffic Ops", hostName)
}
var server tc.ServerV30
var num int
found := false
for i, srv := range resp.Response {
num = i
if srv.CDNName != nil && srv.HostName != nil && *srv.HostName == hostName {
server = srv
found = true
break
}
}
if !found {
return tc.ServerV40{}, fmt.Errorf("either no server '%s' found in Traffic Ops, or none by that hostName had non-nil CDN", hostName)
}
if respLen > 1 {
log.Warnf("Getting monitor server by hostname '%s' returned %d servers - selecting #%d", hostName, respLen, num)
}
if server.Profile == nil {
return tc.ServerV40{}, fmt.Errorf("server with hostname '%s' has no profile", hostName)
}
newServer, err := server.UpgradeToV40([]string{*server.Profile})
if err != nil {
return newServer, fmt.Errorf("coercing legacy server to new format: %v", err)
}
return newServer, nil
}
// MonitorCDN returns the name of the CDN of a Traffic Monitor with the given
// hostName.
func (s TrafficOpsSessionThreadsafe) MonitorCDN(hostName string) (string, error) {
var server tc.ServerV40
var err error
server, err = s.fetchServerByHostname(hostName)
if err != nil {
log.Warnln("getting server by hostname '" + hostName + "' using up-to-date client: " + err.Error() + ". Retrying with legacy client")
server, err = s.fetchLegacyServerByHostname(hostName)
}
if err != nil {
return "", fmt.Errorf("getting monitor CDN: %v", err)
}
// nil-dereference checks done already in each 'fetch' method; they'll just
// return an error in that case
return *server.CDNName, nil
}
| {
return s.crConfigHist.Get()
} | identifier_body |
towrap.go | // Package towrap wraps two versions of Traffic Ops clients to give up-to-date
// information, possibly using legacy API versions.
package towrap
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import (
"crypto/tls"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/http/cookiejar"
"net/url"
"os"
"strconv"
"sync"
"time"
"github.com/apache/trafficcontrol/lib/go-log"
"github.com/apache/trafficcontrol/lib/go-tc"
"github.com/apache/trafficcontrol/traffic_monitor/config"
legacyClient "github.com/apache/trafficcontrol/traffic_ops/v3-client"
client "github.com/apache/trafficcontrol/traffic_ops/v4-client"
jsoniter "github.com/json-iterator/go"
"golang.org/x/net/publicsuffix"
)
const localHostIP = "127.0.0.1"
// ErrNilSession is the error returned by operations performed on a nil session.
var ErrNilSession = errors.New("nil session")
// ByteTime is a structure for associating a set of raw data with some CDN
// Snapshot statistics, and a certain time.
type ByteTime struct {
bytes []byte
time time.Time
stats *tc.CRConfigStats
}
// ByteMapCache is a thread-access-safe map of cache server hostnames to
// ByteTime structures.
type ByteMapCache struct {
cache *map[string]ByteTime
m *sync.RWMutex
}
// NewByteMapCache constructs a new, empty ByteMapCache.
func NewByteMapCache() ByteMapCache {
return ByteMapCache{m: &sync.RWMutex{}, cache: &map[string]ByteTime{}}
}
// Set sets the entry given by 'key' to a new ByteTime structure with the given
// raw data ('newBytes') and the given statistics ('stats') at the current time.
func (c ByteMapCache) Set(key string, newBytes []byte, stats *tc.CRConfigStats) {
c.m.Lock()
defer c.m.Unlock()
(*c.cache)[key] = ByteTime{bytes: newBytes, stats: stats, time: time.Now()}
}
// Get retrieves the raw data, associated time, and statistics of the entry
// given by 'key'.
func (c ByteMapCache) Get(key string) ([]byte, time.Time, *tc.CRConfigStats) {
c.m.RLock()
defer c.m.RUnlock()
if byteTime, ok := (*c.cache)[key]; !ok {
return nil, time.Time{}, nil
} else {
return byteTime.bytes, byteTime.time, byteTime.stats
}
}
func (s TrafficOpsSessionThreadsafe) BackupFileExists() bool {
if _, err := os.Stat(s.CRConfigBackupFile); !os.IsNotExist(err) {
if _, err = os.Stat(s.TMConfigBackupFile); !os.IsNotExist(err) {
return true
}
}
return false
}
// CRConfigStat represents a set of statistics from a CDN Snapshot requested at
// a particular time.
type CRConfigStat struct {
// Err contains any error that may have occurred when obtaining the
// statistics.
Err error `json:"error"`
// ReqAddr is the network address from which the statistics were requested.
ReqAddr string `json:"request_address"`
// ReqTime is the time at which the request for statistics was made.
ReqTime time.Time `json:"request_time"`
// Stats contains the actual statistics.
Stats tc.CRConfigStats `json:"stats"`
}
// CopyCRConfigStat makes a deep copy of a slice of CRConfigStats.
func CopyCRConfigStat(old []CRConfigStat) []CRConfigStat {
newStats := make([]CRConfigStat, len(old))
copy(newStats, old)
return newStats
}
// CRConfigHistoryThreadsafe stores history in a circular buffer.
type CRConfigHistoryThreadsafe struct {
hist *[]CRConfigStat
m *sync.RWMutex
limit *uint64
length *uint64
pos *uint64
}
// NewCRConfigHistoryThreadsafe constructs a new, empty
// CRConfigHistoryThreadsafe - this is the ONLY way to safely create a
// CRConfigHistoryThreadsafe, using the zero value of the structure will cause
// all operations to encounter segmentation faults, and there is no way to
// preempt this.
//
// 'limit' indicates the size of the circular buffer - effectively the number of
// entries it will be capable of storing.
func NewCRConfigHistoryThreadsafe(limit uint64) CRConfigHistoryThreadsafe {
hist := make([]CRConfigStat, limit, limit)
length := uint64(0)
pos := uint64(0)
return CRConfigHistoryThreadsafe{hist: &hist, m: &sync.RWMutex{}, limit: &limit, length: &length, pos: &pos}
}
// Add adds the given stat to the history. Does not add new additions with the
// same remote address and CRConfig Date as the previous.
func (h CRConfigHistoryThreadsafe) Add(i *CRConfigStat) {
h.m.Lock()
defer h.m.Unlock()
if *h.length != 0 {
last := (*h.hist)[(*h.pos-1)%*h.limit]
datesEqual := (i.Stats.DateUnixSeconds == nil && last.Stats.DateUnixSeconds == nil) || (i.Stats.DateUnixSeconds != nil && last.Stats.DateUnixSeconds != nil && *i.Stats.DateUnixSeconds == *last.Stats.DateUnixSeconds)
cdnsEqual := (i.Stats.CDNName == nil && last.Stats.CDNName == nil) || (i.Stats.CDNName != nil && last.Stats.CDNName != nil && *i.Stats.CDNName == *last.Stats.CDNName)
reqAddrsEqual := i.ReqAddr == last.ReqAddr
if reqAddrsEqual && datesEqual && cdnsEqual {
return
}
}
(*h.hist)[*h.pos] = *i
*h.pos = (*h.pos + 1) % *h.limit
if *h.length < *h.limit {
*h.length++
}
}
// Get retrieves the stored history of CRConfigStat entries.
func (h CRConfigHistoryThreadsafe) Get() []CRConfigStat {
h.m.RLock()
defer h.m.RUnlock()
if *h.length < *h.limit {
return CopyCRConfigStat((*h.hist)[:*h.length])
}
newStats := make([]CRConfigStat, *h.limit)
copy(newStats, (*h.hist)[*h.pos:])
copy(newStats[*h.length-*h.pos:], (*h.hist)[:*h.pos])
return newStats
}
// Len gives the number of currently stored items in the buffer.
//
// An uninitialized buffer has zero length.
func (h CRConfigHistoryThreadsafe) Len() uint64 {
if h.length == nil {
return 0
}
return *h.length
}
// TrafficOpsSessionThreadsafe provides access to the Traffic Ops client safe
// for multiple goroutines. This fulfills the ITrafficOpsSession interface.
type TrafficOpsSessionThreadsafe struct {
session **client.Session // pointer-to-pointer, because we're given a pointer from the Traffic Ops package, and we don't want to copy it.
legacySession **legacyClient.Session
m *sync.Mutex
lastCRConfig ByteMapCache
crConfigHist CRConfigHistoryThreadsafe
CRConfigBackupFile string
TMConfigBackupFile string
}
// NewTrafficOpsSessionThreadsafe returns a new threadsafe
// TrafficOpsSessionThreadsafe wrapping the given `Session`.
func NewTrafficOpsSessionThreadsafe(s *client.Session, ls *legacyClient.Session, histLimit uint64, cfg config.Config) TrafficOpsSessionThreadsafe {
return TrafficOpsSessionThreadsafe{
CRConfigBackupFile: cfg.CRConfigBackupFile,
crConfigHist: NewCRConfigHistoryThreadsafe(histLimit),
lastCRConfig: NewByteMapCache(),
m: &sync.Mutex{},
session: &s,
legacySession: &ls,
TMConfigBackupFile: cfg.TMConfigBackupFile,
}
}
// Initialized tells whether or not the TrafficOpsSessionThreadsafe has been
// properly initialized with non-nil sessions.
func (s TrafficOpsSessionThreadsafe) Initialized() bool {
return s.session != nil && *s.session != nil && s.legacySession != nil && *s.legacySession != nil
}
// Update updates the TrafficOpsSessionThreadsafe's connection information with
// the provided information. It's safe for calling by multiple goroutines, being
// aware that they will race.
func (s *TrafficOpsSessionThreadsafe) Update(
url string,
username string,
password string,
insecure bool,
userAgent string,
useCache bool,
timeout time.Duration,
) error {
if s == nil {
return errors.New("cannot update nil session")
}
s.m.Lock()
defer s.m.Unlock()
// always set unauthenticated sessions first which can eventually authenticate themselves when attempting requests
if err := s.setSession(url, username, password, insecure, userAgent, useCache, timeout); err != nil {
return err
}
if err := s.setLegacySession(url, username, password, insecure, userAgent, useCache, timeout); err != nil {
return err
}
session, _, err := client.LoginWithAgent(url, username, password, insecure, userAgent, useCache, timeout)
if err != nil {
log.Errorf("logging in using up-to-date client: %v", err)
legacySession, _, err := legacyClient.LoginWithAgent(url, username, password, insecure, userAgent, useCache, timeout)
if err != nil || legacySession == nil {
err = fmt.Errorf("logging in using legacy client: %v", err)
return err
}
*s.legacySession = legacySession
} else {
*s.session = session
}
return nil
}
// setSession sets the session for the up-to-date client without logging in.
func (s *TrafficOpsSessionThreadsafe) setSession(url, username, password string, insecure bool, userAgent string, useCache bool, timeout time.Duration) error {
options := cookiejar.Options{
PublicSuffixList: publicsuffix.List,
}
jar, err := cookiejar.New(&options)
if err != nil {
return err
}
to := client.NewSession(username, password, url, userAgent, &http.Client{
Timeout: timeout,
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: insecure},
},
Jar: jar,
}, useCache)
*s.session = to
return nil
}
// setSession sets the session for the legacy client without logging in.
func (s *TrafficOpsSessionThreadsafe) setLegacySession(url, username, password string, insecure bool, userAgent string, useCache bool, timeout time.Duration) error {
options := cookiejar.Options{
PublicSuffixList: publicsuffix.List,
}
jar, err := cookiejar.New(&options)
if err != nil {
return err
}
to := legacyClient.NewSession(username, password, url, userAgent, &http.Client{
Timeout: timeout,
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: insecure},
},
Jar: jar,
}, useCache)
*s.legacySession = to
return nil
}
// getThreadsafeSession is used internally to get a copy of the session pointer,
// or nil if it doesn't exist. This should not be used outside
// TrafficOpsSessionThreadsafe, and never stored, because part of the purpose of
// rafficOpsSessionThreadsafe is to store a pointer to the Session pointer, so
// it can be updated by one goroutine and immediately used by another. This
// should only be called immediately before using the session, since someone
// else may update it concurrently.
func (s TrafficOpsSessionThreadsafe) get() *client.Session {
s.m.Lock()
defer s.m.Unlock()
if s.session == nil || *s.session == nil {
return nil
}
return *s.session
}
func (s TrafficOpsSessionThreadsafe) getLegacy() *legacyClient.Session {
s.m.Lock()
defer s.m.Unlock()
if s.legacySession == nil || *s.legacySession == nil {
return nil
}
return *s.legacySession
}
// CRConfigHistory gets all of the stored, historical data about CRConfig
// Snapshots' Stats sections.
func (s TrafficOpsSessionThreadsafe) CRConfigHistory() []CRConfigStat {
return s.crConfigHist.Get()
}
// CRConfigValid checks if the passed tc.CRConfig structure is valid, and
// ensures that it is from the same CDN as the last CRConfig Snapshot, as well
// as that it is newer than the last CRConfig Snapshot.
func (s *TrafficOpsSessionThreadsafe) CRConfigValid(crc *tc.CRConfig, cdn string) error {
if crc == nil {
return errors.New("CRConfig is nil")
}
if crc.Stats.CDNName == nil {
return errors.New("CRConfig.Stats.CDN missing")
}
if crc.Stats.DateUnixSeconds == nil {
return errors.New("CRConfig.Stats.Date missing")
}
// Note this intentionally takes intended CDN, rather than trusting
// crc.Stats
lastCrc, lastCrcTime, lastCrcStats := s.lastCRConfig.Get(cdn)
if lastCrc == nil {
return nil
}
if lastCrcStats.DateUnixSeconds == nil {
log.Warnln("TrafficOpsSessionThreadsafe.CRConfigValid returning no error, but last CRConfig Date was missing!")
return nil
}
if lastCrcStats.CDNName == nil {
log.Warnln("TrafficOpsSessionThreadsafe.CRConfigValid returning no error, but last CRConfig CDN was missing!")
return nil
}
if *lastCrcStats.CDNName != *crc.Stats.CDNName {
return errors.New("CRConfig.Stats.CDN " + *crc.Stats.CDNName + " different than last received CRConfig.Stats.CDNName " + *lastCrcStats.CDNName + " received at " + lastCrcTime.Format(time.RFC3339Nano))
}
if *lastCrcStats.DateUnixSeconds > *crc.Stats.DateUnixSeconds {
return errors.New("CRConfig.Stats.Date " + strconv.FormatInt(*crc.Stats.DateUnixSeconds, 10) + " older than last received CRConfig.Stats.Date " + strconv.FormatInt(*lastCrcStats.DateUnixSeconds, 10) + " received at " + lastCrcTime.Format(time.RFC3339Nano))
}
return nil
}
// CRConfigRaw returns the CRConfig from the Traffic Ops. This is safe for
// multiple goroutines.
func (s TrafficOpsSessionThreadsafe) CRConfigRaw(cdn string) ([]byte, error) {
var remoteAddr string
var err error
var crConfig *tc.CRConfig
var configBytes []byte
json := jsoniter.ConfigFastest
ss := s.get()
if ss == nil {
return nil, ErrNilSession
}
response, reqInf, err := ss.GetCRConfig(cdn, client.RequestOptions{})
if reqInf.RemoteAddr != nil {
remoteAddr = reqInf.RemoteAddr.String()
}
if err != nil {
log.Warnln("getting CRConfig from Traffic Ops using up-to-date client: " + err.Error() + ". Retrying with legacy client")
ls := s.getLegacy()
if ls == nil {
return nil, ErrNilSession
}
configBytes, reqInf, err = ls.GetCRConfig(cdn)
if reqInf.RemoteAddr != nil {
remoteAddr = reqInf.RemoteAddr.String()
}
if err != nil {
log.Errorln("getting CRConfig from Traffic Ops using legacy client: " + err.Error() + ". Checking for backup")
}
} else {
crConfig = &response.Response
configBytes, err = json.Marshal(crConfig)
if err != nil {
crConfig = nil
log.Warnln("failed to marshal CRConfig using up-to-date client: " + err.Error())
}
}
if err == nil {
log.Infoln("successfully got CRConfig from Traffic Ops. Writing to backup file")
if wErr := ioutil.WriteFile(s.CRConfigBackupFile, configBytes, 0644); wErr != nil {
log.Errorf("failed to write CRConfig backup file: %v", wErr)
}
} else {
if s.BackupFileExists() {
log.Errorln("using backup file for CRConfig snapshot due to error fetching CRConfig snapshot from Traffic Ops: " + err.Error())
configBytes, err = ioutil.ReadFile(s.CRConfigBackupFile)
if err != nil {
return nil, fmt.Errorf("reading CRConfig backup file: %v", err)
}
remoteAddr = localHostIP
err = nil
} else {
return nil, fmt.Errorf("failed to get CRConfig from Traffic Ops (%v), and there is no backup file", err)
}
}
hist := &CRConfigStat{
Err: err,
ReqAddr: remoteAddr,
ReqTime: time.Now(),
Stats: tc.CRConfigStats{},
}
defer s.crConfigHist.Add(hist) | return configBytes, err
}
}
hist.Stats = crConfig.Stats
if err = s.CRConfigValid(crConfig, cdn); err != nil {
err = errors.New("invalid CRConfig: " + err.Error())
hist.Err = err
return configBytes, err
}
s.lastCRConfig.Set(cdn, configBytes, &crConfig.Stats)
return configBytes, nil
}
// LastCRConfig returns the last CRConfig requested from CRConfigRaw, and the
// time it was returned. This is designed to be used in conjunction with a
// poller which regularly calls CRConfigRaw. If no last CRConfig exists, because
// CRConfigRaw has never been called successfully, this calls CRConfigRaw once
// to try to get the CRConfig from Traffic Ops.
func (s TrafficOpsSessionThreadsafe) LastCRConfig(cdn string) ([]byte, time.Time, error) {
crConfig, crConfigTime, _ := s.lastCRConfig.Get(cdn)
if len(crConfig) == 0 {
b, err := s.CRConfigRaw(cdn)
return b, time.Now(), err
}
return crConfig, crConfigTime, nil
}
func (s TrafficOpsSessionThreadsafe) fetchTMConfig(cdn string) (*tc.TrafficMonitorConfig, error) {
ss := s.get()
if ss == nil {
return nil, ErrNilSession
}
m, _, e := ss.GetTrafficMonitorConfig(cdn, client.NewRequestOptions())
return &m.Response, e
}
func (s TrafficOpsSessionThreadsafe) fetchLegacyTMConfig(cdn string) (*tc.TrafficMonitorConfig, error) {
ss := s.getLegacy()
if ss == nil {
return nil, ErrNilSession
}
m, _, e := ss.GetTrafficMonitorConfig(cdn)
if m == nil {
return nil, e
}
return m, e
}
// trafficMonitorConfigMapRaw returns the Traffic Monitor config map from the
// Traffic Ops, directly from the monitoring endpoint. This is not usually
// what is needed, rather monitoring needs the snapshotted CRConfig data, which
// is filled in by `LegacyTrafficMonitorConfigMap`. This is safe for multiple
// goroutines.
func (s TrafficOpsSessionThreadsafe) trafficMonitorConfigMapRaw(cdn string) (*tc.TrafficMonitorConfigMap, error) {
var config *tc.TrafficMonitorConfig
var configMap *tc.TrafficMonitorConfigMap
var err error
config, err = s.fetchTMConfig(cdn)
if err != nil {
log.Warnln("getting Traffic Monitor config from Traffic Ops using up-to-date client: " + err.Error() + ". Retrying with legacy client")
config, err = s.fetchLegacyTMConfig(cdn)
if err != nil {
log.Errorln("getting Traffic Monitor config from Traffic Ops using legacy client: " + err.Error())
}
}
if err == nil {
log.Infoln("successfully got Traffic Monitor config from Traffic Ops")
if config == nil {
return nil, fmt.Errorf("nil Traffic Monitor config after successful fetch")
}
configMap, err = tc.TrafficMonitorTransformToMap(config)
}
if err != nil {
// Default error case, no backup file exists
if !s.BackupFileExists() {
return nil, err
}
log.Errorln("using backup file for monitoring config snapshot due to invalid monitoring config snapshot from Traffic Ops: " + err.Error())
b, err := ioutil.ReadFile(s.TMConfigBackupFile)
if err != nil {
return nil, errors.New("reading TMConfigBackupFile: " + err.Error())
}
json := jsoniter.ConfigFastest
var tmConfig tc.TrafficMonitorConfig
if err := json.Unmarshal(b, &tmConfig); err != nil {
return nil, errors.New("unmarshalling backup file monitoring.json: " + err.Error())
}
return tc.TrafficMonitorTransformToMap(&tmConfig)
}
json := jsoniter.ConfigFastest
data, err := json.Marshal(*config)
if err == nil {
if wErr := ioutil.WriteFile(s.TMConfigBackupFile, data, 0644); wErr != nil {
log.Errorf("failed to write TM config backup file: %v", wErr)
}
}
return configMap, err
}
// TrafficMonitorConfigMap returns the Traffic Monitor config map from the
// Traffic Ops. This is safe for multiple goroutines.
func (s TrafficOpsSessionThreadsafe) TrafficMonitorConfigMap(cdn string) (*tc.TrafficMonitorConfigMap, error) {
mc, err := s.trafficMonitorConfigMapRaw(cdn)
if err != nil {
return nil, fmt.Errorf("getting monitor config map: %v", err)
}
return mc, nil
}
func (s TrafficOpsSessionThreadsafe) fetchServerByHostname(hostName string) (tc.ServerV40, error) {
ss := s.get()
if ss == nil {
return tc.ServerV40{}, ErrNilSession
}
params := url.Values{}
params.Set("hostName", hostName)
resp, _, err := ss.GetServers(client.RequestOptions{QueryParameters: params})
if err != nil {
return tc.ServerV40{}, fmt.Errorf("fetching server by hostname '%s': %v", hostName, err)
}
respLen := len(resp.Response)
if respLen < 1 {
return tc.ServerV40{}, fmt.Errorf("no server '%s' found in Traffic Ops", hostName)
}
var server tc.ServerV40
var num int
found := false
for i, srv := range resp.Response {
num = i
if srv.CDNName != nil && srv.HostName != nil && *srv.HostName == hostName {
server = srv
found = true
break
}
}
if !found {
return tc.ServerV40{}, fmt.Errorf("either no server '%s' found in Traffic Ops, or none by that hostName had non-nil CDN", hostName)
}
if respLen > 1 {
log.Warnf("Getting monitor server by hostname '%s' returned %d servers - selecting #%d", hostName, respLen, num)
}
return server, nil
}
func (s TrafficOpsSessionThreadsafe) fetchLegacyServerByHostname(hostName string) (tc.ServerV40, error) {
ss := s.getLegacy()
if ss == nil {
return tc.ServerV40{}, ErrNilSession
}
params := url.Values{}
params.Set("hostName", hostName)
resp, _, err := ss.GetServersWithHdr(¶ms, nil)
if err != nil {
return tc.ServerV40{}, fmt.Errorf("fetching server by hostname '%s': %v", hostName, err)
}
respLen := len(resp.Response)
if respLen < 1 {
return tc.ServerV40{}, fmt.Errorf("no server '%s' found in Traffic Ops", hostName)
}
var server tc.ServerV30
var num int
found := false
for i, srv := range resp.Response {
num = i
if srv.CDNName != nil && srv.HostName != nil && *srv.HostName == hostName {
server = srv
found = true
break
}
}
if !found {
return tc.ServerV40{}, fmt.Errorf("either no server '%s' found in Traffic Ops, or none by that hostName had non-nil CDN", hostName)
}
if respLen > 1 {
log.Warnf("Getting monitor server by hostname '%s' returned %d servers - selecting #%d", hostName, respLen, num)
}
if server.Profile == nil {
return tc.ServerV40{}, fmt.Errorf("server with hostname '%s' has no profile", hostName)
}
newServer, err := server.UpgradeToV40([]string{*server.Profile})
if err != nil {
return newServer, fmt.Errorf("coercing legacy server to new format: %v", err)
}
return newServer, nil
}
// MonitorCDN returns the name of the CDN of a Traffic Monitor with the given
// hostName.
func (s TrafficOpsSessionThreadsafe) MonitorCDN(hostName string) (string, error) {
var server tc.ServerV40
var err error
server, err = s.fetchServerByHostname(hostName)
if err != nil {
log.Warnln("getting server by hostname '" + hostName + "' using up-to-date client: " + err.Error() + ". Retrying with legacy client")
server, err = s.fetchLegacyServerByHostname(hostName)
}
if err != nil {
return "", fmt.Errorf("getting monitor CDN: %v", err)
}
// nil-dereference checks done already in each 'fetch' method; they'll just
// return an error in that case
return *server.CDNName, nil
} |
if crConfig == nil {
if err = json.Unmarshal(configBytes, crConfig); err != nil {
err = errors.New("invalid JSON: " + err.Error())
hist.Err = err | random_line_split |
towrap.go | // Package towrap wraps two versions of Traffic Ops clients to give up-to-date
// information, possibly using legacy API versions.
package towrap
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import (
"crypto/tls"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/http/cookiejar"
"net/url"
"os"
"strconv"
"sync"
"time"
"github.com/apache/trafficcontrol/lib/go-log"
"github.com/apache/trafficcontrol/lib/go-tc"
"github.com/apache/trafficcontrol/traffic_monitor/config"
legacyClient "github.com/apache/trafficcontrol/traffic_ops/v3-client"
client "github.com/apache/trafficcontrol/traffic_ops/v4-client"
jsoniter "github.com/json-iterator/go"
"golang.org/x/net/publicsuffix"
)
const localHostIP = "127.0.0.1"
// ErrNilSession is the error returned by operations performed on a nil session.
var ErrNilSession = errors.New("nil session")
// ByteTime is a structure for associating a set of raw data with some CDN
// Snapshot statistics, and a certain time.
type ByteTime struct {
bytes []byte
time time.Time
stats *tc.CRConfigStats
}
// ByteMapCache is a thread-access-safe map of cache server hostnames to
// ByteTime structures.
type ByteMapCache struct {
cache *map[string]ByteTime
m *sync.RWMutex
}
// NewByteMapCache constructs a new, empty ByteMapCache.
func NewByteMapCache() ByteMapCache {
return ByteMapCache{m: &sync.RWMutex{}, cache: &map[string]ByteTime{}}
}
// Set sets the entry given by 'key' to a new ByteTime structure with the given
// raw data ('newBytes') and the given statistics ('stats') at the current time.
func (c ByteMapCache) Set(key string, newBytes []byte, stats *tc.CRConfigStats) {
c.m.Lock()
defer c.m.Unlock()
(*c.cache)[key] = ByteTime{bytes: newBytes, stats: stats, time: time.Now()}
}
// Get retrieves the raw data, associated time, and statistics of the entry
// given by 'key'.
func (c ByteMapCache) Get(key string) ([]byte, time.Time, *tc.CRConfigStats) {
c.m.RLock()
defer c.m.RUnlock()
if byteTime, ok := (*c.cache)[key]; !ok {
return nil, time.Time{}, nil
} else {
return byteTime.bytes, byteTime.time, byteTime.stats
}
}
func (s TrafficOpsSessionThreadsafe) BackupFileExists() bool {
if _, err := os.Stat(s.CRConfigBackupFile); !os.IsNotExist(err) {
if _, err = os.Stat(s.TMConfigBackupFile); !os.IsNotExist(err) {
return true
}
}
return false
}
// CRConfigStat represents a set of statistics from a CDN Snapshot requested at
// a particular time.
type CRConfigStat struct {
// Err contains any error that may have occurred when obtaining the
// statistics.
Err error `json:"error"`
// ReqAddr is the network address from which the statistics were requested.
ReqAddr string `json:"request_address"`
// ReqTime is the time at which the request for statistics was made.
ReqTime time.Time `json:"request_time"`
// Stats contains the actual statistics.
Stats tc.CRConfigStats `json:"stats"`
}
// CopyCRConfigStat makes a deep copy of a slice of CRConfigStats.
func CopyCRConfigStat(old []CRConfigStat) []CRConfigStat {
newStats := make([]CRConfigStat, len(old))
copy(newStats, old)
return newStats
}
// CRConfigHistoryThreadsafe stores history in a circular buffer.
type CRConfigHistoryThreadsafe struct {
hist *[]CRConfigStat
m *sync.RWMutex
limit *uint64
length *uint64
pos *uint64
}
// NewCRConfigHistoryThreadsafe constructs a new, empty
// CRConfigHistoryThreadsafe - this is the ONLY way to safely create a
// CRConfigHistoryThreadsafe, using the zero value of the structure will cause
// all operations to encounter segmentation faults, and there is no way to
// preempt this.
//
// 'limit' indicates the size of the circular buffer - effectively the number of
// entries it will be capable of storing.
func NewCRConfigHistoryThreadsafe(limit uint64) CRConfigHistoryThreadsafe {
hist := make([]CRConfigStat, limit, limit)
length := uint64(0)
pos := uint64(0)
return CRConfigHistoryThreadsafe{hist: &hist, m: &sync.RWMutex{}, limit: &limit, length: &length, pos: &pos}
}
// Add adds the given stat to the history. Does not add new additions with the
// same remote address and CRConfig Date as the previous.
func (h CRConfigHistoryThreadsafe) Add(i *CRConfigStat) {
h.m.Lock()
defer h.m.Unlock()
if *h.length != 0 {
last := (*h.hist)[(*h.pos-1)%*h.limit]
datesEqual := (i.Stats.DateUnixSeconds == nil && last.Stats.DateUnixSeconds == nil) || (i.Stats.DateUnixSeconds != nil && last.Stats.DateUnixSeconds != nil && *i.Stats.DateUnixSeconds == *last.Stats.DateUnixSeconds)
cdnsEqual := (i.Stats.CDNName == nil && last.Stats.CDNName == nil) || (i.Stats.CDNName != nil && last.Stats.CDNName != nil && *i.Stats.CDNName == *last.Stats.CDNName)
reqAddrsEqual := i.ReqAddr == last.ReqAddr
if reqAddrsEqual && datesEqual && cdnsEqual |
}
(*h.hist)[*h.pos] = *i
*h.pos = (*h.pos + 1) % *h.limit
if *h.length < *h.limit {
*h.length++
}
}
// Get retrieves the stored history of CRConfigStat entries.
func (h CRConfigHistoryThreadsafe) Get() []CRConfigStat {
h.m.RLock()
defer h.m.RUnlock()
if *h.length < *h.limit {
return CopyCRConfigStat((*h.hist)[:*h.length])
}
newStats := make([]CRConfigStat, *h.limit)
copy(newStats, (*h.hist)[*h.pos:])
copy(newStats[*h.length-*h.pos:], (*h.hist)[:*h.pos])
return newStats
}
// Len gives the number of currently stored items in the buffer.
//
// An uninitialized buffer has zero length.
func (h CRConfigHistoryThreadsafe) Len() uint64 {
if h.length == nil {
return 0
}
return *h.length
}
// TrafficOpsSessionThreadsafe provides access to the Traffic Ops client safe
// for multiple goroutines. This fulfills the ITrafficOpsSession interface.
type TrafficOpsSessionThreadsafe struct {
session **client.Session // pointer-to-pointer, because we're given a pointer from the Traffic Ops package, and we don't want to copy it.
legacySession **legacyClient.Session
m *sync.Mutex
lastCRConfig ByteMapCache
crConfigHist CRConfigHistoryThreadsafe
CRConfigBackupFile string
TMConfigBackupFile string
}
// NewTrafficOpsSessionThreadsafe returns a new threadsafe
// TrafficOpsSessionThreadsafe wrapping the given `Session`.
func NewTrafficOpsSessionThreadsafe(s *client.Session, ls *legacyClient.Session, histLimit uint64, cfg config.Config) TrafficOpsSessionThreadsafe {
return TrafficOpsSessionThreadsafe{
CRConfigBackupFile: cfg.CRConfigBackupFile,
crConfigHist: NewCRConfigHistoryThreadsafe(histLimit),
lastCRConfig: NewByteMapCache(),
m: &sync.Mutex{},
session: &s,
legacySession: &ls,
TMConfigBackupFile: cfg.TMConfigBackupFile,
}
}
// Initialized tells whether or not the TrafficOpsSessionThreadsafe has been
// properly initialized with non-nil sessions.
func (s TrafficOpsSessionThreadsafe) Initialized() bool {
return s.session != nil && *s.session != nil && s.legacySession != nil && *s.legacySession != nil
}
// Update updates the TrafficOpsSessionThreadsafe's connection information with
// the provided information. It's safe for calling by multiple goroutines, being
// aware that they will race.
func (s *TrafficOpsSessionThreadsafe) Update(
url string,
username string,
password string,
insecure bool,
userAgent string,
useCache bool,
timeout time.Duration,
) error {
if s == nil {
return errors.New("cannot update nil session")
}
s.m.Lock()
defer s.m.Unlock()
// always set unauthenticated sessions first which can eventually authenticate themselves when attempting requests
if err := s.setSession(url, username, password, insecure, userAgent, useCache, timeout); err != nil {
return err
}
if err := s.setLegacySession(url, username, password, insecure, userAgent, useCache, timeout); err != nil {
return err
}
session, _, err := client.LoginWithAgent(url, username, password, insecure, userAgent, useCache, timeout)
if err != nil {
log.Errorf("logging in using up-to-date client: %v", err)
legacySession, _, err := legacyClient.LoginWithAgent(url, username, password, insecure, userAgent, useCache, timeout)
if err != nil || legacySession == nil {
err = fmt.Errorf("logging in using legacy client: %v", err)
return err
}
*s.legacySession = legacySession
} else {
*s.session = session
}
return nil
}
// setSession sets the session for the up-to-date client without logging in.
func (s *TrafficOpsSessionThreadsafe) setSession(url, username, password string, insecure bool, userAgent string, useCache bool, timeout time.Duration) error {
options := cookiejar.Options{
PublicSuffixList: publicsuffix.List,
}
jar, err := cookiejar.New(&options)
if err != nil {
return err
}
to := client.NewSession(username, password, url, userAgent, &http.Client{
Timeout: timeout,
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: insecure},
},
Jar: jar,
}, useCache)
*s.session = to
return nil
}
// setSession sets the session for the legacy client without logging in.
func (s *TrafficOpsSessionThreadsafe) setLegacySession(url, username, password string, insecure bool, userAgent string, useCache bool, timeout time.Duration) error {
options := cookiejar.Options{
PublicSuffixList: publicsuffix.List,
}
jar, err := cookiejar.New(&options)
if err != nil {
return err
}
to := legacyClient.NewSession(username, password, url, userAgent, &http.Client{
Timeout: timeout,
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: insecure},
},
Jar: jar,
}, useCache)
*s.legacySession = to
return nil
}
// getThreadsafeSession is used internally to get a copy of the session pointer,
// or nil if it doesn't exist. This should not be used outside
// TrafficOpsSessionThreadsafe, and never stored, because part of the purpose of
// rafficOpsSessionThreadsafe is to store a pointer to the Session pointer, so
// it can be updated by one goroutine and immediately used by another. This
// should only be called immediately before using the session, since someone
// else may update it concurrently.
func (s TrafficOpsSessionThreadsafe) get() *client.Session {
s.m.Lock()
defer s.m.Unlock()
if s.session == nil || *s.session == nil {
return nil
}
return *s.session
}
func (s TrafficOpsSessionThreadsafe) getLegacy() *legacyClient.Session {
s.m.Lock()
defer s.m.Unlock()
if s.legacySession == nil || *s.legacySession == nil {
return nil
}
return *s.legacySession
}
// CRConfigHistory gets all of the stored, historical data about CRConfig
// Snapshots' Stats sections.
func (s TrafficOpsSessionThreadsafe) CRConfigHistory() []CRConfigStat {
return s.crConfigHist.Get()
}
// CRConfigValid checks if the passed tc.CRConfig structure is valid, and
// ensures that it is from the same CDN as the last CRConfig Snapshot, as well
// as that it is newer than the last CRConfig Snapshot.
func (s *TrafficOpsSessionThreadsafe) CRConfigValid(crc *tc.CRConfig, cdn string) error {
if crc == nil {
return errors.New("CRConfig is nil")
}
if crc.Stats.CDNName == nil {
return errors.New("CRConfig.Stats.CDN missing")
}
if crc.Stats.DateUnixSeconds == nil {
return errors.New("CRConfig.Stats.Date missing")
}
// Note this intentionally takes intended CDN, rather than trusting
// crc.Stats
lastCrc, lastCrcTime, lastCrcStats := s.lastCRConfig.Get(cdn)
if lastCrc == nil {
return nil
}
if lastCrcStats.DateUnixSeconds == nil {
log.Warnln("TrafficOpsSessionThreadsafe.CRConfigValid returning no error, but last CRConfig Date was missing!")
return nil
}
if lastCrcStats.CDNName == nil {
log.Warnln("TrafficOpsSessionThreadsafe.CRConfigValid returning no error, but last CRConfig CDN was missing!")
return nil
}
if *lastCrcStats.CDNName != *crc.Stats.CDNName {
return errors.New("CRConfig.Stats.CDN " + *crc.Stats.CDNName + " different than last received CRConfig.Stats.CDNName " + *lastCrcStats.CDNName + " received at " + lastCrcTime.Format(time.RFC3339Nano))
}
if *lastCrcStats.DateUnixSeconds > *crc.Stats.DateUnixSeconds {
return errors.New("CRConfig.Stats.Date " + strconv.FormatInt(*crc.Stats.DateUnixSeconds, 10) + " older than last received CRConfig.Stats.Date " + strconv.FormatInt(*lastCrcStats.DateUnixSeconds, 10) + " received at " + lastCrcTime.Format(time.RFC3339Nano))
}
return nil
}
// CRConfigRaw returns the CRConfig from the Traffic Ops. This is safe for
// multiple goroutines.
func (s TrafficOpsSessionThreadsafe) CRConfigRaw(cdn string) ([]byte, error) {
var remoteAddr string
var err error
var crConfig *tc.CRConfig
var configBytes []byte
json := jsoniter.ConfigFastest
ss := s.get()
if ss == nil {
return nil, ErrNilSession
}
response, reqInf, err := ss.GetCRConfig(cdn, client.RequestOptions{})
if reqInf.RemoteAddr != nil {
remoteAddr = reqInf.RemoteAddr.String()
}
if err != nil {
log.Warnln("getting CRConfig from Traffic Ops using up-to-date client: " + err.Error() + ". Retrying with legacy client")
ls := s.getLegacy()
if ls == nil {
return nil, ErrNilSession
}
configBytes, reqInf, err = ls.GetCRConfig(cdn)
if reqInf.RemoteAddr != nil {
remoteAddr = reqInf.RemoteAddr.String()
}
if err != nil {
log.Errorln("getting CRConfig from Traffic Ops using legacy client: " + err.Error() + ". Checking for backup")
}
} else {
crConfig = &response.Response
configBytes, err = json.Marshal(crConfig)
if err != nil {
crConfig = nil
log.Warnln("failed to marshal CRConfig using up-to-date client: " + err.Error())
}
}
if err == nil {
log.Infoln("successfully got CRConfig from Traffic Ops. Writing to backup file")
if wErr := ioutil.WriteFile(s.CRConfigBackupFile, configBytes, 0644); wErr != nil {
log.Errorf("failed to write CRConfig backup file: %v", wErr)
}
} else {
if s.BackupFileExists() {
log.Errorln("using backup file for CRConfig snapshot due to error fetching CRConfig snapshot from Traffic Ops: " + err.Error())
configBytes, err = ioutil.ReadFile(s.CRConfigBackupFile)
if err != nil {
return nil, fmt.Errorf("reading CRConfig backup file: %v", err)
}
remoteAddr = localHostIP
err = nil
} else {
return nil, fmt.Errorf("failed to get CRConfig from Traffic Ops (%v), and there is no backup file", err)
}
}
hist := &CRConfigStat{
Err: err,
ReqAddr: remoteAddr,
ReqTime: time.Now(),
Stats: tc.CRConfigStats{},
}
defer s.crConfigHist.Add(hist)
if crConfig == nil {
if err = json.Unmarshal(configBytes, crConfig); err != nil {
err = errors.New("invalid JSON: " + err.Error())
hist.Err = err
return configBytes, err
}
}
hist.Stats = crConfig.Stats
if err = s.CRConfigValid(crConfig, cdn); err != nil {
err = errors.New("invalid CRConfig: " + err.Error())
hist.Err = err
return configBytes, err
}
s.lastCRConfig.Set(cdn, configBytes, &crConfig.Stats)
return configBytes, nil
}
// LastCRConfig returns the last CRConfig requested from CRConfigRaw, and the
// time it was returned. This is designed to be used in conjunction with a
// poller which regularly calls CRConfigRaw. If no last CRConfig exists, because
// CRConfigRaw has never been called successfully, this calls CRConfigRaw once
// to try to get the CRConfig from Traffic Ops.
func (s TrafficOpsSessionThreadsafe) LastCRConfig(cdn string) ([]byte, time.Time, error) {
crConfig, crConfigTime, _ := s.lastCRConfig.Get(cdn)
if len(crConfig) == 0 {
b, err := s.CRConfigRaw(cdn)
return b, time.Now(), err
}
return crConfig, crConfigTime, nil
}
func (s TrafficOpsSessionThreadsafe) fetchTMConfig(cdn string) (*tc.TrafficMonitorConfig, error) {
ss := s.get()
if ss == nil {
return nil, ErrNilSession
}
m, _, e := ss.GetTrafficMonitorConfig(cdn, client.NewRequestOptions())
return &m.Response, e
}
func (s TrafficOpsSessionThreadsafe) fetchLegacyTMConfig(cdn string) (*tc.TrafficMonitorConfig, error) {
ss := s.getLegacy()
if ss == nil {
return nil, ErrNilSession
}
m, _, e := ss.GetTrafficMonitorConfig(cdn)
if m == nil {
return nil, e
}
return m, e
}
// trafficMonitorConfigMapRaw returns the Traffic Monitor config map from the
// Traffic Ops, directly from the monitoring endpoint. This is not usually
// what is needed, rather monitoring needs the snapshotted CRConfig data, which
// is filled in by `LegacyTrafficMonitorConfigMap`. This is safe for multiple
// goroutines.
func (s TrafficOpsSessionThreadsafe) trafficMonitorConfigMapRaw(cdn string) (*tc.TrafficMonitorConfigMap, error) {
var config *tc.TrafficMonitorConfig
var configMap *tc.TrafficMonitorConfigMap
var err error
config, err = s.fetchTMConfig(cdn)
if err != nil {
log.Warnln("getting Traffic Monitor config from Traffic Ops using up-to-date client: " + err.Error() + ". Retrying with legacy client")
config, err = s.fetchLegacyTMConfig(cdn)
if err != nil {
log.Errorln("getting Traffic Monitor config from Traffic Ops using legacy client: " + err.Error())
}
}
if err == nil {
log.Infoln("successfully got Traffic Monitor config from Traffic Ops")
if config == nil {
return nil, fmt.Errorf("nil Traffic Monitor config after successful fetch")
}
configMap, err = tc.TrafficMonitorTransformToMap(config)
}
if err != nil {
// Default error case, no backup file exists
if !s.BackupFileExists() {
return nil, err
}
log.Errorln("using backup file for monitoring config snapshot due to invalid monitoring config snapshot from Traffic Ops: " + err.Error())
b, err := ioutil.ReadFile(s.TMConfigBackupFile)
if err != nil {
return nil, errors.New("reading TMConfigBackupFile: " + err.Error())
}
json := jsoniter.ConfigFastest
var tmConfig tc.TrafficMonitorConfig
if err := json.Unmarshal(b, &tmConfig); err != nil {
return nil, errors.New("unmarshalling backup file monitoring.json: " + err.Error())
}
return tc.TrafficMonitorTransformToMap(&tmConfig)
}
json := jsoniter.ConfigFastest
data, err := json.Marshal(*config)
if err == nil {
if wErr := ioutil.WriteFile(s.TMConfigBackupFile, data, 0644); wErr != nil {
log.Errorf("failed to write TM config backup file: %v", wErr)
}
}
return configMap, err
}
// TrafficMonitorConfigMap returns the Traffic Monitor config map from the
// Traffic Ops. This is safe for multiple goroutines.
func (s TrafficOpsSessionThreadsafe) TrafficMonitorConfigMap(cdn string) (*tc.TrafficMonitorConfigMap, error) {
mc, err := s.trafficMonitorConfigMapRaw(cdn)
if err != nil {
return nil, fmt.Errorf("getting monitor config map: %v", err)
}
return mc, nil
}
func (s TrafficOpsSessionThreadsafe) fetchServerByHostname(hostName string) (tc.ServerV40, error) {
ss := s.get()
if ss == nil {
return tc.ServerV40{}, ErrNilSession
}
params := url.Values{}
params.Set("hostName", hostName)
resp, _, err := ss.GetServers(client.RequestOptions{QueryParameters: params})
if err != nil {
return tc.ServerV40{}, fmt.Errorf("fetching server by hostname '%s': %v", hostName, err)
}
respLen := len(resp.Response)
if respLen < 1 {
return tc.ServerV40{}, fmt.Errorf("no server '%s' found in Traffic Ops", hostName)
}
var server tc.ServerV40
var num int
found := false
for i, srv := range resp.Response {
num = i
if srv.CDNName != nil && srv.HostName != nil && *srv.HostName == hostName {
server = srv
found = true
break
}
}
if !found {
return tc.ServerV40{}, fmt.Errorf("either no server '%s' found in Traffic Ops, or none by that hostName had non-nil CDN", hostName)
}
if respLen > 1 {
log.Warnf("Getting monitor server by hostname '%s' returned %d servers - selecting #%d", hostName, respLen, num)
}
return server, nil
}
func (s TrafficOpsSessionThreadsafe) fetchLegacyServerByHostname(hostName string) (tc.ServerV40, error) {
ss := s.getLegacy()
if ss == nil {
return tc.ServerV40{}, ErrNilSession
}
params := url.Values{}
params.Set("hostName", hostName)
resp, _, err := ss.GetServersWithHdr(¶ms, nil)
if err != nil {
return tc.ServerV40{}, fmt.Errorf("fetching server by hostname '%s': %v", hostName, err)
}
respLen := len(resp.Response)
if respLen < 1 {
return tc.ServerV40{}, fmt.Errorf("no server '%s' found in Traffic Ops", hostName)
}
var server tc.ServerV30
var num int
found := false
for i, srv := range resp.Response {
num = i
if srv.CDNName != nil && srv.HostName != nil && *srv.HostName == hostName {
server = srv
found = true
break
}
}
if !found {
return tc.ServerV40{}, fmt.Errorf("either no server '%s' found in Traffic Ops, or none by that hostName had non-nil CDN", hostName)
}
if respLen > 1 {
log.Warnf("Getting monitor server by hostname '%s' returned %d servers - selecting #%d", hostName, respLen, num)
}
if server.Profile == nil {
return tc.ServerV40{}, fmt.Errorf("server with hostname '%s' has no profile", hostName)
}
newServer, err := server.UpgradeToV40([]string{*server.Profile})
if err != nil {
return newServer, fmt.Errorf("coercing legacy server to new format: %v", err)
}
return newServer, nil
}
// MonitorCDN returns the name of the CDN of a Traffic Monitor with the given
// hostName.
func (s TrafficOpsSessionThreadsafe) MonitorCDN(hostName string) (string, error) {
var server tc.ServerV40
var err error
server, err = s.fetchServerByHostname(hostName)
if err != nil {
log.Warnln("getting server by hostname '" + hostName + "' using up-to-date client: " + err.Error() + ". Retrying with legacy client")
server, err = s.fetchLegacyServerByHostname(hostName)
}
if err != nil {
return "", fmt.Errorf("getting monitor CDN: %v", err)
}
// nil-dereference checks done already in each 'fetch' method; they'll just
// return an error in that case
return *server.CDNName, nil
}
| {
return
} | conditional_block |
towrap.go | // Package towrap wraps two versions of Traffic Ops clients to give up-to-date
// information, possibly using legacy API versions.
package towrap
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import (
"crypto/tls"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/http/cookiejar"
"net/url"
"os"
"strconv"
"sync"
"time"
"github.com/apache/trafficcontrol/lib/go-log"
"github.com/apache/trafficcontrol/lib/go-tc"
"github.com/apache/trafficcontrol/traffic_monitor/config"
legacyClient "github.com/apache/trafficcontrol/traffic_ops/v3-client"
client "github.com/apache/trafficcontrol/traffic_ops/v4-client"
jsoniter "github.com/json-iterator/go"
"golang.org/x/net/publicsuffix"
)
const localHostIP = "127.0.0.1"
// ErrNilSession is the error returned by operations performed on a nil session.
var ErrNilSession = errors.New("nil session")
// ByteTime is a structure for associating a set of raw data with some CDN
// Snapshot statistics, and a certain time.
type ByteTime struct {
bytes []byte
time time.Time
stats *tc.CRConfigStats
}
// ByteMapCache is a thread-access-safe map of cache server hostnames to
// ByteTime structures.
type ByteMapCache struct {
cache *map[string]ByteTime
m *sync.RWMutex
}
// NewByteMapCache constructs a new, empty ByteMapCache.
func NewByteMapCache() ByteMapCache {
return ByteMapCache{m: &sync.RWMutex{}, cache: &map[string]ByteTime{}}
}
// Set sets the entry given by 'key' to a new ByteTime structure with the given
// raw data ('newBytes') and the given statistics ('stats') at the current time.
func (c ByteMapCache) Set(key string, newBytes []byte, stats *tc.CRConfigStats) {
c.m.Lock()
defer c.m.Unlock()
(*c.cache)[key] = ByteTime{bytes: newBytes, stats: stats, time: time.Now()}
}
// Get retrieves the raw data, associated time, and statistics of the entry
// given by 'key'.
func (c ByteMapCache) Get(key string) ([]byte, time.Time, *tc.CRConfigStats) {
c.m.RLock()
defer c.m.RUnlock()
if byteTime, ok := (*c.cache)[key]; !ok {
return nil, time.Time{}, nil
} else {
return byteTime.bytes, byteTime.time, byteTime.stats
}
}
func (s TrafficOpsSessionThreadsafe) BackupFileExists() bool {
if _, err := os.Stat(s.CRConfigBackupFile); !os.IsNotExist(err) {
if _, err = os.Stat(s.TMConfigBackupFile); !os.IsNotExist(err) {
return true
}
}
return false
}
// CRConfigStat represents a set of statistics from a CDN Snapshot requested at
// a particular time.
type CRConfigStat struct {
// Err contains any error that may have occurred when obtaining the
// statistics.
Err error `json:"error"`
// ReqAddr is the network address from which the statistics were requested.
ReqAddr string `json:"request_address"`
// ReqTime is the time at which the request for statistics was made.
ReqTime time.Time `json:"request_time"`
// Stats contains the actual statistics.
Stats tc.CRConfigStats `json:"stats"`
}
// CopyCRConfigStat makes a deep copy of a slice of CRConfigStats.
func CopyCRConfigStat(old []CRConfigStat) []CRConfigStat {
newStats := make([]CRConfigStat, len(old))
copy(newStats, old)
return newStats
}
// CRConfigHistoryThreadsafe stores history in a circular buffer.
type CRConfigHistoryThreadsafe struct {
hist *[]CRConfigStat
m *sync.RWMutex
limit *uint64
length *uint64
pos *uint64
}
// NewCRConfigHistoryThreadsafe constructs a new, empty
// CRConfigHistoryThreadsafe - this is the ONLY way to safely create a
// CRConfigHistoryThreadsafe, using the zero value of the structure will cause
// all operations to encounter segmentation faults, and there is no way to
// preempt this.
//
// 'limit' indicates the size of the circular buffer - effectively the number of
// entries it will be capable of storing.
func NewCRConfigHistoryThreadsafe(limit uint64) CRConfigHistoryThreadsafe {
hist := make([]CRConfigStat, limit, limit)
length := uint64(0)
pos := uint64(0)
return CRConfigHistoryThreadsafe{hist: &hist, m: &sync.RWMutex{}, limit: &limit, length: &length, pos: &pos}
}
// Add adds the given stat to the history. Does not add new additions with the
// same remote address and CRConfig Date as the previous.
func (h CRConfigHistoryThreadsafe) Add(i *CRConfigStat) {
h.m.Lock()
defer h.m.Unlock()
if *h.length != 0 {
last := (*h.hist)[(*h.pos-1)%*h.limit]
datesEqual := (i.Stats.DateUnixSeconds == nil && last.Stats.DateUnixSeconds == nil) || (i.Stats.DateUnixSeconds != nil && last.Stats.DateUnixSeconds != nil && *i.Stats.DateUnixSeconds == *last.Stats.DateUnixSeconds)
cdnsEqual := (i.Stats.CDNName == nil && last.Stats.CDNName == nil) || (i.Stats.CDNName != nil && last.Stats.CDNName != nil && *i.Stats.CDNName == *last.Stats.CDNName)
reqAddrsEqual := i.ReqAddr == last.ReqAddr
if reqAddrsEqual && datesEqual && cdnsEqual {
return
}
}
(*h.hist)[*h.pos] = *i
*h.pos = (*h.pos + 1) % *h.limit
if *h.length < *h.limit {
*h.length++
}
}
// Get retrieves the stored history of CRConfigStat entries.
func (h CRConfigHistoryThreadsafe) Get() []CRConfigStat {
h.m.RLock()
defer h.m.RUnlock()
if *h.length < *h.limit {
return CopyCRConfigStat((*h.hist)[:*h.length])
}
newStats := make([]CRConfigStat, *h.limit)
copy(newStats, (*h.hist)[*h.pos:])
copy(newStats[*h.length-*h.pos:], (*h.hist)[:*h.pos])
return newStats
}
// Len gives the number of currently stored items in the buffer.
//
// An uninitialized buffer has zero length.
func (h CRConfigHistoryThreadsafe) Len() uint64 {
if h.length == nil {
return 0
}
return *h.length
}
// TrafficOpsSessionThreadsafe provides access to the Traffic Ops client safe
// for multiple goroutines. This fulfills the ITrafficOpsSession interface.
type TrafficOpsSessionThreadsafe struct {
session **client.Session // pointer-to-pointer, because we're given a pointer from the Traffic Ops package, and we don't want to copy it.
legacySession **legacyClient.Session
m *sync.Mutex
lastCRConfig ByteMapCache
crConfigHist CRConfigHistoryThreadsafe
CRConfigBackupFile string
TMConfigBackupFile string
}
// NewTrafficOpsSessionThreadsafe returns a new threadsafe
// TrafficOpsSessionThreadsafe wrapping the given `Session`.
func NewTrafficOpsSessionThreadsafe(s *client.Session, ls *legacyClient.Session, histLimit uint64, cfg config.Config) TrafficOpsSessionThreadsafe {
return TrafficOpsSessionThreadsafe{
CRConfigBackupFile: cfg.CRConfigBackupFile,
crConfigHist: NewCRConfigHistoryThreadsafe(histLimit),
lastCRConfig: NewByteMapCache(),
m: &sync.Mutex{},
session: &s,
legacySession: &ls,
TMConfigBackupFile: cfg.TMConfigBackupFile,
}
}
// Initialized tells whether or not the TrafficOpsSessionThreadsafe has been
// properly initialized with non-nil sessions.
func (s TrafficOpsSessionThreadsafe) Initialized() bool {
return s.session != nil && *s.session != nil && s.legacySession != nil && *s.legacySession != nil
}
// Update updates the TrafficOpsSessionThreadsafe's connection information with
// the provided information. It's safe for calling by multiple goroutines, being
// aware that they will race.
func (s *TrafficOpsSessionThreadsafe) Update(
url string,
username string,
password string,
insecure bool,
userAgent string,
useCache bool,
timeout time.Duration,
) error {
if s == nil {
return errors.New("cannot update nil session")
}
s.m.Lock()
defer s.m.Unlock()
// always set unauthenticated sessions first which can eventually authenticate themselves when attempting requests
if err := s.setSession(url, username, password, insecure, userAgent, useCache, timeout); err != nil {
return err
}
if err := s.setLegacySession(url, username, password, insecure, userAgent, useCache, timeout); err != nil {
return err
}
session, _, err := client.LoginWithAgent(url, username, password, insecure, userAgent, useCache, timeout)
if err != nil {
log.Errorf("logging in using up-to-date client: %v", err)
legacySession, _, err := legacyClient.LoginWithAgent(url, username, password, insecure, userAgent, useCache, timeout)
if err != nil || legacySession == nil {
err = fmt.Errorf("logging in using legacy client: %v", err)
return err
}
*s.legacySession = legacySession
} else {
*s.session = session
}
return nil
}
// setSession sets the session for the up-to-date client without logging in.
func (s *TrafficOpsSessionThreadsafe) setSession(url, username, password string, insecure bool, userAgent string, useCache bool, timeout time.Duration) error {
options := cookiejar.Options{
PublicSuffixList: publicsuffix.List,
}
jar, err := cookiejar.New(&options)
if err != nil {
return err
}
to := client.NewSession(username, password, url, userAgent, &http.Client{
Timeout: timeout,
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: insecure},
},
Jar: jar,
}, useCache)
*s.session = to
return nil
}
// setSession sets the session for the legacy client without logging in.
func (s *TrafficOpsSessionThreadsafe) setLegacySession(url, username, password string, insecure bool, userAgent string, useCache bool, timeout time.Duration) error {
options := cookiejar.Options{
PublicSuffixList: publicsuffix.List,
}
jar, err := cookiejar.New(&options)
if err != nil {
return err
}
to := legacyClient.NewSession(username, password, url, userAgent, &http.Client{
Timeout: timeout,
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: insecure},
},
Jar: jar,
}, useCache)
*s.legacySession = to
return nil
}
// getThreadsafeSession is used internally to get a copy of the session pointer,
// or nil if it doesn't exist. This should not be used outside
// TrafficOpsSessionThreadsafe, and never stored, because part of the purpose of
// rafficOpsSessionThreadsafe is to store a pointer to the Session pointer, so
// it can be updated by one goroutine and immediately used by another. This
// should only be called immediately before using the session, since someone
// else may update it concurrently.
func (s TrafficOpsSessionThreadsafe) get() *client.Session {
s.m.Lock()
defer s.m.Unlock()
if s.session == nil || *s.session == nil {
return nil
}
return *s.session
}
func (s TrafficOpsSessionThreadsafe) getLegacy() *legacyClient.Session {
s.m.Lock()
defer s.m.Unlock()
if s.legacySession == nil || *s.legacySession == nil {
return nil
}
return *s.legacySession
}
// CRConfigHistory gets all of the stored, historical data about CRConfig
// Snapshots' Stats sections.
func (s TrafficOpsSessionThreadsafe) CRConfigHistory() []CRConfigStat {
return s.crConfigHist.Get()
}
// CRConfigValid checks if the passed tc.CRConfig structure is valid, and
// ensures that it is from the same CDN as the last CRConfig Snapshot, as well
// as that it is newer than the last CRConfig Snapshot.
func (s *TrafficOpsSessionThreadsafe) | (crc *tc.CRConfig, cdn string) error {
if crc == nil {
return errors.New("CRConfig is nil")
}
if crc.Stats.CDNName == nil {
return errors.New("CRConfig.Stats.CDN missing")
}
if crc.Stats.DateUnixSeconds == nil {
return errors.New("CRConfig.Stats.Date missing")
}
// Note this intentionally takes intended CDN, rather than trusting
// crc.Stats
lastCrc, lastCrcTime, lastCrcStats := s.lastCRConfig.Get(cdn)
if lastCrc == nil {
return nil
}
if lastCrcStats.DateUnixSeconds == nil {
log.Warnln("TrafficOpsSessionThreadsafe.CRConfigValid returning no error, but last CRConfig Date was missing!")
return nil
}
if lastCrcStats.CDNName == nil {
log.Warnln("TrafficOpsSessionThreadsafe.CRConfigValid returning no error, but last CRConfig CDN was missing!")
return nil
}
if *lastCrcStats.CDNName != *crc.Stats.CDNName {
return errors.New("CRConfig.Stats.CDN " + *crc.Stats.CDNName + " different than last received CRConfig.Stats.CDNName " + *lastCrcStats.CDNName + " received at " + lastCrcTime.Format(time.RFC3339Nano))
}
if *lastCrcStats.DateUnixSeconds > *crc.Stats.DateUnixSeconds {
return errors.New("CRConfig.Stats.Date " + strconv.FormatInt(*crc.Stats.DateUnixSeconds, 10) + " older than last received CRConfig.Stats.Date " + strconv.FormatInt(*lastCrcStats.DateUnixSeconds, 10) + " received at " + lastCrcTime.Format(time.RFC3339Nano))
}
return nil
}
// CRConfigRaw returns the CRConfig from the Traffic Ops. This is safe for
// multiple goroutines.
func (s TrafficOpsSessionThreadsafe) CRConfigRaw(cdn string) ([]byte, error) {
var remoteAddr string
var err error
var crConfig *tc.CRConfig
var configBytes []byte
json := jsoniter.ConfigFastest
ss := s.get()
if ss == nil {
return nil, ErrNilSession
}
response, reqInf, err := ss.GetCRConfig(cdn, client.RequestOptions{})
if reqInf.RemoteAddr != nil {
remoteAddr = reqInf.RemoteAddr.String()
}
if err != nil {
log.Warnln("getting CRConfig from Traffic Ops using up-to-date client: " + err.Error() + ". Retrying with legacy client")
ls := s.getLegacy()
if ls == nil {
return nil, ErrNilSession
}
configBytes, reqInf, err = ls.GetCRConfig(cdn)
if reqInf.RemoteAddr != nil {
remoteAddr = reqInf.RemoteAddr.String()
}
if err != nil {
log.Errorln("getting CRConfig from Traffic Ops using legacy client: " + err.Error() + ". Checking for backup")
}
} else {
crConfig = &response.Response
configBytes, err = json.Marshal(crConfig)
if err != nil {
crConfig = nil
log.Warnln("failed to marshal CRConfig using up-to-date client: " + err.Error())
}
}
if err == nil {
log.Infoln("successfully got CRConfig from Traffic Ops. Writing to backup file")
if wErr := ioutil.WriteFile(s.CRConfigBackupFile, configBytes, 0644); wErr != nil {
log.Errorf("failed to write CRConfig backup file: %v", wErr)
}
} else {
if s.BackupFileExists() {
log.Errorln("using backup file for CRConfig snapshot due to error fetching CRConfig snapshot from Traffic Ops: " + err.Error())
configBytes, err = ioutil.ReadFile(s.CRConfigBackupFile)
if err != nil {
return nil, fmt.Errorf("reading CRConfig backup file: %v", err)
}
remoteAddr = localHostIP
err = nil
} else {
return nil, fmt.Errorf("failed to get CRConfig from Traffic Ops (%v), and there is no backup file", err)
}
}
hist := &CRConfigStat{
Err: err,
ReqAddr: remoteAddr,
ReqTime: time.Now(),
Stats: tc.CRConfigStats{},
}
defer s.crConfigHist.Add(hist)
if crConfig == nil {
if err = json.Unmarshal(configBytes, crConfig); err != nil {
err = errors.New("invalid JSON: " + err.Error())
hist.Err = err
return configBytes, err
}
}
hist.Stats = crConfig.Stats
if err = s.CRConfigValid(crConfig, cdn); err != nil {
err = errors.New("invalid CRConfig: " + err.Error())
hist.Err = err
return configBytes, err
}
s.lastCRConfig.Set(cdn, configBytes, &crConfig.Stats)
return configBytes, nil
}
// LastCRConfig returns the last CRConfig requested from CRConfigRaw, and the
// time it was returned. This is designed to be used in conjunction with a
// poller which regularly calls CRConfigRaw. If no last CRConfig exists, because
// CRConfigRaw has never been called successfully, this calls CRConfigRaw once
// to try to get the CRConfig from Traffic Ops.
func (s TrafficOpsSessionThreadsafe) LastCRConfig(cdn string) ([]byte, time.Time, error) {
crConfig, crConfigTime, _ := s.lastCRConfig.Get(cdn)
if len(crConfig) == 0 {
b, err := s.CRConfigRaw(cdn)
return b, time.Now(), err
}
return crConfig, crConfigTime, nil
}
func (s TrafficOpsSessionThreadsafe) fetchTMConfig(cdn string) (*tc.TrafficMonitorConfig, error) {
ss := s.get()
if ss == nil {
return nil, ErrNilSession
}
m, _, e := ss.GetTrafficMonitorConfig(cdn, client.NewRequestOptions())
return &m.Response, e
}
func (s TrafficOpsSessionThreadsafe) fetchLegacyTMConfig(cdn string) (*tc.TrafficMonitorConfig, error) {
ss := s.getLegacy()
if ss == nil {
return nil, ErrNilSession
}
m, _, e := ss.GetTrafficMonitorConfig(cdn)
if m == nil {
return nil, e
}
return m, e
}
// trafficMonitorConfigMapRaw returns the Traffic Monitor config map from the
// Traffic Ops, directly from the monitoring endpoint. This is not usually
// what is needed, rather monitoring needs the snapshotted CRConfig data, which
// is filled in by `LegacyTrafficMonitorConfigMap`. This is safe for multiple
// goroutines.
func (s TrafficOpsSessionThreadsafe) trafficMonitorConfigMapRaw(cdn string) (*tc.TrafficMonitorConfigMap, error) {
var config *tc.TrafficMonitorConfig
var configMap *tc.TrafficMonitorConfigMap
var err error
config, err = s.fetchTMConfig(cdn)
if err != nil {
log.Warnln("getting Traffic Monitor config from Traffic Ops using up-to-date client: " + err.Error() + ". Retrying with legacy client")
config, err = s.fetchLegacyTMConfig(cdn)
if err != nil {
log.Errorln("getting Traffic Monitor config from Traffic Ops using legacy client: " + err.Error())
}
}
if err == nil {
log.Infoln("successfully got Traffic Monitor config from Traffic Ops")
if config == nil {
return nil, fmt.Errorf("nil Traffic Monitor config after successful fetch")
}
configMap, err = tc.TrafficMonitorTransformToMap(config)
}
if err != nil {
// Default error case, no backup file exists
if !s.BackupFileExists() {
return nil, err
}
log.Errorln("using backup file for monitoring config snapshot due to invalid monitoring config snapshot from Traffic Ops: " + err.Error())
b, err := ioutil.ReadFile(s.TMConfigBackupFile)
if err != nil {
return nil, errors.New("reading TMConfigBackupFile: " + err.Error())
}
json := jsoniter.ConfigFastest
var tmConfig tc.TrafficMonitorConfig
if err := json.Unmarshal(b, &tmConfig); err != nil {
return nil, errors.New("unmarshalling backup file monitoring.json: " + err.Error())
}
return tc.TrafficMonitorTransformToMap(&tmConfig)
}
json := jsoniter.ConfigFastest
data, err := json.Marshal(*config)
if err == nil {
if wErr := ioutil.WriteFile(s.TMConfigBackupFile, data, 0644); wErr != nil {
log.Errorf("failed to write TM config backup file: %v", wErr)
}
}
return configMap, err
}
// TrafficMonitorConfigMap returns the Traffic Monitor config map from the
// Traffic Ops. This is safe for multiple goroutines.
func (s TrafficOpsSessionThreadsafe) TrafficMonitorConfigMap(cdn string) (*tc.TrafficMonitorConfigMap, error) {
mc, err := s.trafficMonitorConfigMapRaw(cdn)
if err != nil {
return nil, fmt.Errorf("getting monitor config map: %v", err)
}
return mc, nil
}
func (s TrafficOpsSessionThreadsafe) fetchServerByHostname(hostName string) (tc.ServerV40, error) {
ss := s.get()
if ss == nil {
return tc.ServerV40{}, ErrNilSession
}
params := url.Values{}
params.Set("hostName", hostName)
resp, _, err := ss.GetServers(client.RequestOptions{QueryParameters: params})
if err != nil {
return tc.ServerV40{}, fmt.Errorf("fetching server by hostname '%s': %v", hostName, err)
}
respLen := len(resp.Response)
if respLen < 1 {
return tc.ServerV40{}, fmt.Errorf("no server '%s' found in Traffic Ops", hostName)
}
var server tc.ServerV40
var num int
found := false
for i, srv := range resp.Response {
num = i
if srv.CDNName != nil && srv.HostName != nil && *srv.HostName == hostName {
server = srv
found = true
break
}
}
if !found {
return tc.ServerV40{}, fmt.Errorf("either no server '%s' found in Traffic Ops, or none by that hostName had non-nil CDN", hostName)
}
if respLen > 1 {
log.Warnf("Getting monitor server by hostname '%s' returned %d servers - selecting #%d", hostName, respLen, num)
}
return server, nil
}
func (s TrafficOpsSessionThreadsafe) fetchLegacyServerByHostname(hostName string) (tc.ServerV40, error) {
ss := s.getLegacy()
if ss == nil {
return tc.ServerV40{}, ErrNilSession
}
params := url.Values{}
params.Set("hostName", hostName)
resp, _, err := ss.GetServersWithHdr(¶ms, nil)
if err != nil {
return tc.ServerV40{}, fmt.Errorf("fetching server by hostname '%s': %v", hostName, err)
}
respLen := len(resp.Response)
if respLen < 1 {
return tc.ServerV40{}, fmt.Errorf("no server '%s' found in Traffic Ops", hostName)
}
var server tc.ServerV30
var num int
found := false
for i, srv := range resp.Response {
num = i
if srv.CDNName != nil && srv.HostName != nil && *srv.HostName == hostName {
server = srv
found = true
break
}
}
if !found {
return tc.ServerV40{}, fmt.Errorf("either no server '%s' found in Traffic Ops, or none by that hostName had non-nil CDN", hostName)
}
if respLen > 1 {
log.Warnf("Getting monitor server by hostname '%s' returned %d servers - selecting #%d", hostName, respLen, num)
}
if server.Profile == nil {
return tc.ServerV40{}, fmt.Errorf("server with hostname '%s' has no profile", hostName)
}
newServer, err := server.UpgradeToV40([]string{*server.Profile})
if err != nil {
return newServer, fmt.Errorf("coercing legacy server to new format: %v", err)
}
return newServer, nil
}
// MonitorCDN returns the name of the CDN of a Traffic Monitor with the given
// hostName.
func (s TrafficOpsSessionThreadsafe) MonitorCDN(hostName string) (string, error) {
var server tc.ServerV40
var err error
server, err = s.fetchServerByHostname(hostName)
if err != nil {
log.Warnln("getting server by hostname '" + hostName + "' using up-to-date client: " + err.Error() + ". Retrying with legacy client")
server, err = s.fetchLegacyServerByHostname(hostName)
}
if err != nil {
return "", fmt.Errorf("getting monitor CDN: %v", err)
}
// nil-dereference checks done already in each 'fetch' method; they'll just
// return an error in that case
return *server.CDNName, nil
}
| CRConfigValid | identifier_name |
main.rs | //! CSIS-616 - Program #3
//!
//! Some parts were originally made by: Ralph W. Crosby PhD.
//! Edited and added to by: Paige Peck
//!
//!
//! Process a yaml format deterministic finite automaton producing
//! - A textual representation of the internal state graph
//! - A Graphviz `.dot` file representing the graph
//!
//! # Usage
//!
//! ```
//! cargo run regex
//! ```
//! where: `regex` is a series of symbols that will generate a DFA and decide if input
//! is accepted or rejected by the regex
//!
//! # Output
//!
//! To `stderr`: Debug display of the internal graph structure
//!
//! To `stdout`: Graphviz definitions of the graph structure
use std::io;
use std::io::prelude::*;
use std::io::Write;
// *********************************************************************
/// # Deterministic Finite Automata Structure
struct DFA {
/// The set of characters comprising the alphabet
alphabet: Vec<char>,
/// State number (1 relative) for the start state
start: usize,
/// Set of accept states (1 relative)
accept: Vec<usize>, //will need to be Vec<usize> when multiple accept states are implemented
/// Matrix of transitions, rows are states, columns characters in the alphabet
transitions: Vec<Vec<usize>>,
}
//State based representation of the DFA version of the RegEx
struct StateGraph {
/// The set of characters comprising the alphabet
alphabet: Vec<char>,
/// State number for the start state
start_state: usize,
/// Vector of state objects
states: Vec<Box<State>>
}
//Definition of a single state
struct State {
//Is this an accept state
accept_state: bool,
//Set of transitions
transitions: Vec<usize>
}
struct Transitions {
chars: char,
state: usize
}
fn main() {
//Get and validate the RegEx on the command line
let regex = get_regex(std::env::args());
let dfa = DFA::new_from_regex(®ex);
//Create the dfa structure based on in RegEx entered from the command line
let state_graph = StateGraph::new_from_dfa(&dfa);
//eprintln!("{:?}", state_graph);
state_graph.write_graphviz();
// Process through the input until end of file (cntl-z) is encountered
state_graph.process();
}
// *********************************************************************
/// Return the RegEx passed as the first parameter
fn get_regex(args: std::env::Args) -> String {
// Get the arguments as a vector
let args: Vec<String> = args.collect();
// Make sure only one argument was passed
if args.len() != 2 {
writeln!(std::io::stderr(), "Usage: cargo run 'regex'")
.unwrap();
std::process::exit(1);
}
args[1].to_string()
}
// *********************************************************************
/// Implement the methods of the DFA structure
impl DFA {
//Create and return a DFA on the heap
//Generate the DFA from the given regex
fn new_from_regex(regex: &str) -> Box<DFA> {
//Setup the regex as the language / alphabet of the dfa
//Remove any duplicate word characters
let mut l = regex.replace("|", "");
l = l.replace("+", "");
l = l.replace("*", "");
//Creates a language Vec<char> without the operators in it and pushing the sigma symbol for alphabet purposes
let mut language: Vec<char> = l.chars().collect();
language.sort();
language.dedup();
language.push('Σ');
let final_state = l.len()+1;
//Create a near blank dfa object, with 1 being start state, accept state being the final state
// which is calculated based on the length of the regex length + 1
let mut dfa = Box::new(DFA{alphabet: language,
start: 1,
accept: [final_state].to_vec(),
transitions: vec![] });
//Set current and next state to traverse through the graph as we create the transition matrix.
let mut current_state = 1;
let mut next_state = 2;
//Create the Transitions Struct to save any transitions characters. These are characters that would
// need to be cycled back to. First character and second state will always start this off.
let mut transitions: Vec<Transitions> = Vec::new();
let t = Transitions{chars: regex.chars().next().unwrap(),
state: 2};
transitions.push(t);
//Create a previous_char character for | and * operators
let mut previous_char = regex.chars().next().unwrap();
//Traverse through the regex string, reading characters and deciding what to do depending on the character.
for c in regex.chars() {
let mut states: Vec<usize> = Vec::new();
//Checks if previous char was a | operator.
//If so, save the current character as a transition or cycle character
//Also fixes any previous transition state
if previous_char == '|' {
for (n, a) in dfa.alphabet.iter().enumerate() {
if *a == c {
dfa.transitions[0][n] = next_state;
}
}
let j = Transitions{chars: c, state: next_state};
transitions.push(j);
}
//Same as above, just with the * operator.
if previous_char == '*' {
let j = Transitions{chars: c, state: next_state};
transitions.push(j);
}
//Operator '|': Implemented - single and multiple | operators are working
//Multiple types of symbols are untested and could produce varying results
//Checks if character is | operator. If so, save the final state as an accept state, reset
//current state back to 1, and set previous_char as |
if c == '|' {
let final_bar_state = dfa.transitions.len()+1;
let mut final_bar_state_count: Vec<usize> = Vec::new();
dfa.accept.push(final_bar_state);
for _a in dfa.alphabet.iter() {
final_bar_state_count.push(final_bar_state);
}
dfa.transitions.push(final_bar_state_count);
current_state = 1;
previous_char = '|';
}
//Operator '+': Implemented - single works, multiple is funky, almost working
//Removes the previous transition matrix to remake it with updated states
//Fix to the multiple + operators I believe is using a for loop to go through the entire transitions vec
// but I have ran out of time to get that working.
else if c == '+' {
dfa.transitions.remove(dfa.transitions.len()-1);
next_state -= 1;
current_state -= 1;
for a in dfa.alphabet.iter() {
if a == &previous_char {
states.push(next_state);
} else {
if *a == transitions[0].chars {
states.push(transitions[0].state);
} else {
states.push(1);
}
}
}
dfa.transitions.push(states);
next_state += 1;
current_state += 1;
}
//Operator '*': Implemented - Single and multiple * operators are working. Something funky happens with the more characters
// added into the regex, especially after a *. Not time to check it. Very close to getting this part fixed, most of it works
//Similar to + operator, remove previous transition to replace it with new one.
// Step back 2 states for next and current to allow for proper transition. Push necessary states.
// Potential fix is similar to + operator with iterating over transitions instead of just checking index 0.
//At the end, add 2 to current state to get back, and set previous_char as *
else if c == '*' {
dfa.transitions.remove(dfa.transitions.len()-1);
let mut pushed_forward = false;
next_state -= 2;
current_state -= 2;
for a in dfa.alphabet.iter() {
if a == &previous_char {
next_state += 1;
states.push(next_state);
} else if *a == 'Σ' {
states.push(1);
} else {
if *a == transitions[0].chars {
states.push(transitions[0].state);
} else if !pushed_forward {
next_state += 1;
states.push(next_state);
pushed_forward = true;
} else {
states.push(1);
}
}
}
dfa.transitions.push(states);
current_state += 2;
previous_char = '*';
}
//All word character symbols: Implemented
//Allows for any character that is in the language to be added in, checks if there is a transition/cycle
//to be made, set the state as that before pushing. If it is not a transition, push to state 1
//if sigma symbol, push to state 1
else if c != 'Σ'
{
for a in dfa.alphabet.iter() {
let mut was_transition = false;
if c == *a {
states.push(next_state);
}
else {
for i in 0..transitions.len() {
if *a == transitions[i].chars {
states.push(transitions[i].state);
was_transition = true;
}
}
if was_transition == false {
if previous_char == '*' && *a != 'Σ' {
states.push(1);
previous_char = c;
} else {
states.push(1);
}
}
}
}
if previous_char != '|' {
dfa.transitions.push(states);
}
next_state += 1;
current_state += 1;
previous_char = c;
}
}
//Go back through and fix any transitions that weren't marked properly
// (i.e. | transitions to state 2 from state 4 if applicable)
for i in 0..dfa.transitions.len() {
for n in 0..dfa.transitions[i].len() {
if n < dfa.transitions[i].len() - 1 && dfa.transitions[i][n] == 1 {
for c in 0..transitions.len() {
if dfa.alphabet[n] == transitions[c].chars {
dfa.transitions[i][n] = transitions[c].state;
}
}
}
}
}
//Set final state as a cycle for transition matrix. If 3 states, push [3,3,3]
let mut final_state_count: Vec<usize> = Vec::new();
for _alphabet in dfa.alphabet.iter() {
final_state_count.push(final_state);
}
dfa.transitions.push(final_state_count);
dfa
}
}
// *********************************************************************
// Implement the methods of the DFA structure
impl StateGraph<> {
/// Create a state graph from a DFA structure
fn new_from_dfa(dfa: &DFA) -> Box<StateGraph> {
// Create an empty graph object
let mut graph = Box::new(StateGraph{alphabet: dfa.alphabet.clone(),
start_state: dfa.start - 1,
states: vec!() });
// Look through the transition table building state objects
for row in dfa.transitions.iter() {
let mut v = Box::new(State{accept_state: false, transitions: vec!()});
for col in row {
v.transitions.push(col-1);
}
graph.states.push(v);
}
// Set the accept states
for astate in dfa.accept.iter() {
graph.states[*astate - 1].accept_state = true;
}
graph
}
/// Execute the graph on a sentence
/// Return Err if a character not in the alphabet is encountered
/// Return Ok and a bool indicating accept (true) or reject (false)
fn test_sentence(&self, sentence: &str) -> Result<bool, String> {
let mut state = self.start_state;
//Full alphabet to test against for sigma character
let full_alphabet: Vec<char> = "abcdefghijklmnopqrstuvwxyz0123456789 ".chars().collect();
for ch in sentence.chars() {
//Check if character is a word character. Accept it if it is and change it to the 'Σ' symbol for matching purposes
let mut c = ch;
if !self.alphabet.contains(&c) && full_alphabet.contains(&c) {
c = 'Σ';
}
let state_no = match self.alphabet.iter().position(|v| *v == ch || *v == c) {
Some(t) => t,
None => return Err(format!("Character <{}> does not have a transition", ch))
};
print!("δ(q{}, {}) → ", state+1, ch);
state = self.states[state].transitions[state_no];
println!("(q{})", state+1);
}
Ok(self.states[state].accept_state)
}
fn write_graphviz(&self) {
println!("digraph {{");
println!("\trankdir=LR;");
println!("\tnode [shape=point]; start;");
for (n, state) in self.states.iter().enumerate() {
if state.accept_state {
println!("\tnode [shape=doublecircle]; q{};", n+1);
}
}
println!("\tnode [shape=circle];");
println!("\tstart -> q{}", self.start_state+1);
for (n, state) in self.states.iter().enumerate() {
for (i, ch) in self.alphabet.iter().enumerate() {
println!("\tq{} -> q{} [label=\"{}\"]", n+1, state.transitions[i] + 1, ch);
}
}
println!("}}");
}
fn process(& |
let stdin = io::stdin();
for line in stdin.lock().lines() {
// Get the line out of the Result, should never error
let sentence = &line.unwrap();
println!("Processing sentence <{}>", sentence);
match self.test_sentence(sentence) {
Ok(b) => println!("{}",
if b {"Accept"} else {"Reject"}),
Err(s) => println!("Error processing sentence: {}", s)
}
}
}
}
#[cfg(test)]
mod test {
use super::*;
//This test is used to make sure that it creates a graphviz file
#[test]
fn test1() {
let dfa = DFA::new_from_regex("a*b");
//Create the dfa structure based on in RegEx entered from the command line
let state_graph = StateGraph::new_from_dfa(&dfa);
state_graph.write_graphviz();
}
} | self) { | identifier_name |
main.rs | //! CSIS-616 - Program #3
//!
//! Some parts were originally made by: Ralph W. Crosby PhD.
//! Edited and added to by: Paige Peck
//!
//!
//! Process a yaml format deterministic finite automaton producing
//! - A textual representation of the internal state graph
//! - A Graphviz `.dot` file representing the graph
//!
//! # Usage
//!
//! ```
//! cargo run regex
//! ```
//! where: `regex` is a series of symbols that will generate a DFA and decide if input
//! is accepted or rejected by the regex
//!
//! # Output
//!
//! To `stderr`: Debug display of the internal graph structure
//!
//! To `stdout`: Graphviz definitions of the graph structure
use std::io;
use std::io::prelude::*;
use std::io::Write;
// *********************************************************************
/// # Deterministic Finite Automata Structure
struct DFA {
/// The set of characters comprising the alphabet
alphabet: Vec<char>,
/// State number (1 relative) for the start state
start: usize,
/// Set of accept states (1 relative)
accept: Vec<usize>, //will need to be Vec<usize> when multiple accept states are implemented
/// Matrix of transitions, rows are states, columns characters in the alphabet
transitions: Vec<Vec<usize>>,
}
//State based representation of the DFA version of the RegEx
struct StateGraph {
/// The set of characters comprising the alphabet
alphabet: Vec<char>,
/// State number for the start state
start_state: usize,
/// Vector of state objects
states: Vec<Box<State>>
}
//Definition of a single state
struct State {
//Is this an accept state
accept_state: bool,
//Set of transitions
transitions: Vec<usize>
}
struct Transitions {
chars: char,
state: usize
}
fn main() {
//Get and validate the RegEx on the command line
let regex = get_regex(std::env::args());
let dfa = DFA::new_from_regex(®ex);
//Create the dfa structure based on in RegEx entered from the command line
let state_graph = StateGraph::new_from_dfa(&dfa);
//eprintln!("{:?}", state_graph);
state_graph.write_graphviz();
// Process through the input until end of file (cntl-z) is encountered
state_graph.process();
}
// *********************************************************************
/// Return the RegEx passed as the first parameter
fn get_regex(args: std::env::Args) -> String {
// Get the arguments as a vector
let args: Vec<String> = args.collect();
// Make sure only one argument was passed
if args.len() != 2 {
writeln!(std::io::stderr(), "Usage: cargo run 'regex'")
.unwrap();
std::process::exit(1);
}
args[1].to_string()
}
// *********************************************************************
/// Implement the methods of the DFA structure
impl DFA {
//Create and return a DFA on the heap
//Generate the DFA from the given regex
fn new_from_regex(regex: &str) -> Box<DFA> {
//Setup the regex as the language / alphabet of the dfa
//Remove any duplicate word characters
let mut l = regex.replace("|", "");
l = l.replace("+", "");
l = l.replace("*", "");
//Creates a language Vec<char> without the operators in it and pushing the sigma symbol for alphabet purposes
let mut language: Vec<char> = l.chars().collect();
language.sort();
language.dedup();
language.push('Σ');
let final_state = l.len()+1;
//Create a near blank dfa object, with 1 being start state, accept state being the final state
// which is calculated based on the length of the regex length + 1
let mut dfa = Box::new(DFA{alphabet: language,
start: 1,
accept: [final_state].to_vec(),
transitions: vec![] });
//Set current and next state to traverse through the graph as we create the transition matrix.
let mut current_state = 1;
let mut next_state = 2;
//Create the Transitions Struct to save any transitions characters. These are characters that would
// need to be cycled back to. First character and second state will always start this off.
let mut transitions: Vec<Transitions> = Vec::new();
let t = Transitions{chars: regex.chars().next().unwrap(),
state: 2};
transitions.push(t);
//Create a previous_char character for | and * operators
let mut previous_char = regex.chars().next().unwrap();
//Traverse through the regex string, reading characters and deciding what to do depending on the character.
for c in regex.chars() {
let mut states: Vec<usize> = Vec::new();
//Checks if previous char was a | operator.
//If so, save the current character as a transition or cycle character
//Also fixes any previous transition state
if previous_char == '|' {
for (n, a) in dfa.alphabet.iter().enumerate() {
if *a == c {
dfa.transitions[0][n] = next_state;
}
}
let j = Transitions{chars: c, state: next_state};
transitions.push(j);
}
//Same as above, just with the * operator.
if previous_char == '*' {
let j = Transitions{chars: c, state: next_state};
transitions.push(j);
}
//Operator '|': Implemented - single and multiple | operators are working
//Multiple types of symbols are untested and could produce varying results
//Checks if character is | operator. If so, save the final state as an accept state, reset
//current state back to 1, and set previous_char as |
if c == '|' {
let final_bar_state = dfa.transitions.len()+1;
let mut final_bar_state_count: Vec<usize> = Vec::new();
dfa.accept.push(final_bar_state);
for _a in dfa.alphabet.iter() {
final_bar_state_count.push(final_bar_state);
}
dfa.transitions.push(final_bar_state_count);
current_state = 1;
previous_char = '|';
}
//Operator '+': Implemented - single works, multiple is funky, almost working
//Removes the previous transition matrix to remake it with updated states
//Fix to the multiple + operators I believe is using a for loop to go through the entire transitions vec
// but I have ran out of time to get that working.
else if c == '+' {
dfa.transitions.remove(dfa.transitions.len()-1);
next_state -= 1;
current_state -= 1;
for a in dfa.alphabet.iter() {
if a == &previous_char {
states.push(next_state);
} else {
if *a == transitions[0].chars {
states.push(transitions[0].state);
} else {
states.push(1);
}
}
}
dfa.transitions.push(states);
next_state += 1;
current_state += 1;
}
//Operator '*': Implemented - Single and multiple * operators are working. Something funky happens with the more characters
// added into the regex, especially after a *. Not time to check it. Very close to getting this part fixed, most of it works
//Similar to + operator, remove previous transition to replace it with new one.
// Step back 2 states for next and current to allow for proper transition. Push necessary states. | //At the end, add 2 to current state to get back, and set previous_char as *
else if c == '*' {
dfa.transitions.remove(dfa.transitions.len()-1);
let mut pushed_forward = false;
next_state -= 2;
current_state -= 2;
for a in dfa.alphabet.iter() {
if a == &previous_char {
next_state += 1;
states.push(next_state);
} else if *a == 'Σ' {
states.push(1);
} else {
if *a == transitions[0].chars {
states.push(transitions[0].state);
} else if !pushed_forward {
next_state += 1;
states.push(next_state);
pushed_forward = true;
} else {
states.push(1);
}
}
}
dfa.transitions.push(states);
current_state += 2;
previous_char = '*';
}
//All word character symbols: Implemented
//Allows for any character that is in the language to be added in, checks if there is a transition/cycle
//to be made, set the state as that before pushing. If it is not a transition, push to state 1
//if sigma symbol, push to state 1
else if c != 'Σ'
{
for a in dfa.alphabet.iter() {
let mut was_transition = false;
if c == *a {
states.push(next_state);
}
else {
for i in 0..transitions.len() {
if *a == transitions[i].chars {
states.push(transitions[i].state);
was_transition = true;
}
}
if was_transition == false {
if previous_char == '*' && *a != 'Σ' {
states.push(1);
previous_char = c;
} else {
states.push(1);
}
}
}
}
if previous_char != '|' {
dfa.transitions.push(states);
}
next_state += 1;
current_state += 1;
previous_char = c;
}
}
//Go back through and fix any transitions that weren't marked properly
// (i.e. | transitions to state 2 from state 4 if applicable)
for i in 0..dfa.transitions.len() {
for n in 0..dfa.transitions[i].len() {
if n < dfa.transitions[i].len() - 1 && dfa.transitions[i][n] == 1 {
for c in 0..transitions.len() {
if dfa.alphabet[n] == transitions[c].chars {
dfa.transitions[i][n] = transitions[c].state;
}
}
}
}
}
//Set final state as a cycle for transition matrix. If 3 states, push [3,3,3]
let mut final_state_count: Vec<usize> = Vec::new();
for _alphabet in dfa.alphabet.iter() {
final_state_count.push(final_state);
}
dfa.transitions.push(final_state_count);
dfa
}
}
// *********************************************************************
// Implement the methods of the DFA structure
impl StateGraph<> {
/// Create a state graph from a DFA structure
fn new_from_dfa(dfa: &DFA) -> Box<StateGraph> {
// Create an empty graph object
let mut graph = Box::new(StateGraph{alphabet: dfa.alphabet.clone(),
start_state: dfa.start - 1,
states: vec!() });
// Look through the transition table building state objects
for row in dfa.transitions.iter() {
let mut v = Box::new(State{accept_state: false, transitions: vec!()});
for col in row {
v.transitions.push(col-1);
}
graph.states.push(v);
}
// Set the accept states
for astate in dfa.accept.iter() {
graph.states[*astate - 1].accept_state = true;
}
graph
}
/// Execute the graph on a sentence
/// Return Err if a character not in the alphabet is encountered
/// Return Ok and a bool indicating accept (true) or reject (false)
fn test_sentence(&self, sentence: &str) -> Result<bool, String> {
let mut state = self.start_state;
//Full alphabet to test against for sigma character
let full_alphabet: Vec<char> = "abcdefghijklmnopqrstuvwxyz0123456789 ".chars().collect();
for ch in sentence.chars() {
//Check if character is a word character. Accept it if it is and change it to the 'Σ' symbol for matching purposes
let mut c = ch;
if !self.alphabet.contains(&c) && full_alphabet.contains(&c) {
c = 'Σ';
}
let state_no = match self.alphabet.iter().position(|v| *v == ch || *v == c) {
Some(t) => t,
None => return Err(format!("Character <{}> does not have a transition", ch))
};
print!("δ(q{}, {}) → ", state+1, ch);
state = self.states[state].transitions[state_no];
println!("(q{})", state+1);
}
Ok(self.states[state].accept_state)
}
fn write_graphviz(&self) {
println!("digraph {{");
println!("\trankdir=LR;");
println!("\tnode [shape=point]; start;");
for (n, state) in self.states.iter().enumerate() {
if state.accept_state {
println!("\tnode [shape=doublecircle]; q{};", n+1);
}
}
println!("\tnode [shape=circle];");
println!("\tstart -> q{}", self.start_state+1);
for (n, state) in self.states.iter().enumerate() {
for (i, ch) in self.alphabet.iter().enumerate() {
println!("\tq{} -> q{} [label=\"{}\"]", n+1, state.transitions[i] + 1, ch);
}
}
println!("}}");
}
fn process(&self) {
let stdin = io::stdin();
for line in stdin.lock().lines() {
// Get the line out of the Result, should never error
let sentence = &line.unwrap();
println!("Processing sentence <{}>", sentence);
match self.test_sentence(sentence) {
Ok(b) => println!("{}",
if b {"Accept"} else {"Reject"}),
Err(s) => println!("Error processing sentence: {}", s)
}
}
}
}
#[cfg(test)]
mod test {
use super::*;
//This test is used to make sure that it creates a graphviz file
#[test]
fn test1() {
let dfa = DFA::new_from_regex("a*b");
//Create the dfa structure based on in RegEx entered from the command line
let state_graph = StateGraph::new_from_dfa(&dfa);
state_graph.write_graphviz();
}
} | // Potential fix is similar to + operator with iterating over transitions instead of just checking index 0. | random_line_split |
main.rs | //! CSIS-616 - Program #3
//!
//! Some parts were originally made by: Ralph W. Crosby PhD.
//! Edited and added to by: Paige Peck
//!
//!
//! Process a yaml format deterministic finite automaton producing
//! - A textual representation of the internal state graph
//! - A Graphviz `.dot` file representing the graph
//!
//! # Usage
//!
//! ```
//! cargo run regex
//! ```
//! where: `regex` is a series of symbols that will generate a DFA and decide if input
//! is accepted or rejected by the regex
//!
//! # Output
//!
//! To `stderr`: Debug display of the internal graph structure
//!
//! To `stdout`: Graphviz definitions of the graph structure
use std::io;
use std::io::prelude::*;
use std::io::Write;
// *********************************************************************
/// # Deterministic Finite Automata Structure
struct DFA {
/// The set of characters comprising the alphabet
alphabet: Vec<char>,
/// State number (1 relative) for the start state
start: usize,
/// Set of accept states (1 relative)
accept: Vec<usize>, //will need to be Vec<usize> when multiple accept states are implemented
/// Matrix of transitions, rows are states, columns characters in the alphabet
transitions: Vec<Vec<usize>>,
}
//State based representation of the DFA version of the RegEx
struct StateGraph {
/// The set of characters comprising the alphabet
alphabet: Vec<char>,
/// State number for the start state
start_state: usize,
/// Vector of state objects
states: Vec<Box<State>>
}
//Definition of a single state
struct State {
//Is this an accept state
accept_state: bool,
//Set of transitions
transitions: Vec<usize>
}
struct Transitions {
chars: char,
state: usize
}
fn main() |
// *********************************************************************
/// Return the RegEx passed as the first parameter
fn get_regex(args: std::env::Args) -> String {
// Get the arguments as a vector
let args: Vec<String> = args.collect();
// Make sure only one argument was passed
if args.len() != 2 {
writeln!(std::io::stderr(), "Usage: cargo run 'regex'")
.unwrap();
std::process::exit(1);
}
args[1].to_string()
}
// *********************************************************************
/// Implement the methods of the DFA structure
impl DFA {
//Create and return a DFA on the heap
//Generate the DFA from the given regex
fn new_from_regex(regex: &str) -> Box<DFA> {
//Setup the regex as the language / alphabet of the dfa
//Remove any duplicate word characters
let mut l = regex.replace("|", "");
l = l.replace("+", "");
l = l.replace("*", "");
//Creates a language Vec<char> without the operators in it and pushing the sigma symbol for alphabet purposes
let mut language: Vec<char> = l.chars().collect();
language.sort();
language.dedup();
language.push('Σ');
let final_state = l.len()+1;
//Create a near blank dfa object, with 1 being start state, accept state being the final state
// which is calculated based on the length of the regex length + 1
let mut dfa = Box::new(DFA{alphabet: language,
start: 1,
accept: [final_state].to_vec(),
transitions: vec![] });
//Set current and next state to traverse through the graph as we create the transition matrix.
let mut current_state = 1;
let mut next_state = 2;
//Create the Transitions Struct to save any transitions characters. These are characters that would
// need to be cycled back to. First character and second state will always start this off.
let mut transitions: Vec<Transitions> = Vec::new();
let t = Transitions{chars: regex.chars().next().unwrap(),
state: 2};
transitions.push(t);
//Create a previous_char character for | and * operators
let mut previous_char = regex.chars().next().unwrap();
//Traverse through the regex string, reading characters and deciding what to do depending on the character.
for c in regex.chars() {
let mut states: Vec<usize> = Vec::new();
//Checks if previous char was a | operator.
//If so, save the current character as a transition or cycle character
//Also fixes any previous transition state
if previous_char == '|' {
for (n, a) in dfa.alphabet.iter().enumerate() {
if *a == c {
dfa.transitions[0][n] = next_state;
}
}
let j = Transitions{chars: c, state: next_state};
transitions.push(j);
}
//Same as above, just with the * operator.
if previous_char == '*' {
let j = Transitions{chars: c, state: next_state};
transitions.push(j);
}
//Operator '|': Implemented - single and multiple | operators are working
//Multiple types of symbols are untested and could produce varying results
//Checks if character is | operator. If so, save the final state as an accept state, reset
//current state back to 1, and set previous_char as |
if c == '|' {
let final_bar_state = dfa.transitions.len()+1;
let mut final_bar_state_count: Vec<usize> = Vec::new();
dfa.accept.push(final_bar_state);
for _a in dfa.alphabet.iter() {
final_bar_state_count.push(final_bar_state);
}
dfa.transitions.push(final_bar_state_count);
current_state = 1;
previous_char = '|';
}
//Operator '+': Implemented - single works, multiple is funky, almost working
//Removes the previous transition matrix to remake it with updated states
//Fix to the multiple + operators I believe is using a for loop to go through the entire transitions vec
// but I have ran out of time to get that working.
else if c == '+' {
dfa.transitions.remove(dfa.transitions.len()-1);
next_state -= 1;
current_state -= 1;
for a in dfa.alphabet.iter() {
if a == &previous_char {
states.push(next_state);
} else {
if *a == transitions[0].chars {
states.push(transitions[0].state);
} else {
states.push(1);
}
}
}
dfa.transitions.push(states);
next_state += 1;
current_state += 1;
}
//Operator '*': Implemented - Single and multiple * operators are working. Something funky happens with the more characters
// added into the regex, especially after a *. Not time to check it. Very close to getting this part fixed, most of it works
//Similar to + operator, remove previous transition to replace it with new one.
// Step back 2 states for next and current to allow for proper transition. Push necessary states.
// Potential fix is similar to + operator with iterating over transitions instead of just checking index 0.
//At the end, add 2 to current state to get back, and set previous_char as *
else if c == '*' {
dfa.transitions.remove(dfa.transitions.len()-1);
let mut pushed_forward = false;
next_state -= 2;
current_state -= 2;
for a in dfa.alphabet.iter() {
if a == &previous_char {
next_state += 1;
states.push(next_state);
} else if *a == 'Σ' {
states.push(1);
} else {
if *a == transitions[0].chars {
states.push(transitions[0].state);
} else if !pushed_forward {
next_state += 1;
states.push(next_state);
pushed_forward = true;
} else {
states.push(1);
}
}
}
dfa.transitions.push(states);
current_state += 2;
previous_char = '*';
}
//All word character symbols: Implemented
//Allows for any character that is in the language to be added in, checks if there is a transition/cycle
//to be made, set the state as that before pushing. If it is not a transition, push to state 1
//if sigma symbol, push to state 1
else if c != 'Σ'
{
for a in dfa.alphabet.iter() {
let mut was_transition = false;
if c == *a {
states.push(next_state);
}
else {
for i in 0..transitions.len() {
if *a == transitions[i].chars {
states.push(transitions[i].state);
was_transition = true;
}
}
if was_transition == false {
if previous_char == '*' && *a != 'Σ' {
states.push(1);
previous_char = c;
} else {
states.push(1);
}
}
}
}
if previous_char != '|' {
dfa.transitions.push(states);
}
next_state += 1;
current_state += 1;
previous_char = c;
}
}
//Go back through and fix any transitions that weren't marked properly
// (i.e. | transitions to state 2 from state 4 if applicable)
for i in 0..dfa.transitions.len() {
for n in 0..dfa.transitions[i].len() {
if n < dfa.transitions[i].len() - 1 && dfa.transitions[i][n] == 1 {
for c in 0..transitions.len() {
if dfa.alphabet[n] == transitions[c].chars {
dfa.transitions[i][n] = transitions[c].state;
}
}
}
}
}
//Set final state as a cycle for transition matrix. If 3 states, push [3,3,3]
let mut final_state_count: Vec<usize> = Vec::new();
for _alphabet in dfa.alphabet.iter() {
final_state_count.push(final_state);
}
dfa.transitions.push(final_state_count);
dfa
}
}
// *********************************************************************
// Implement the methods of the DFA structure
impl StateGraph<> {
/// Create a state graph from a DFA structure
fn new_from_dfa(dfa: &DFA) -> Box<StateGraph> {
// Create an empty graph object
let mut graph = Box::new(StateGraph{alphabet: dfa.alphabet.clone(),
start_state: dfa.start - 1,
states: vec!() });
// Look through the transition table building state objects
for row in dfa.transitions.iter() {
let mut v = Box::new(State{accept_state: false, transitions: vec!()});
for col in row {
v.transitions.push(col-1);
}
graph.states.push(v);
}
// Set the accept states
for astate in dfa.accept.iter() {
graph.states[*astate - 1].accept_state = true;
}
graph
}
/// Execute the graph on a sentence
/// Return Err if a character not in the alphabet is encountered
/// Return Ok and a bool indicating accept (true) or reject (false)
fn test_sentence(&self, sentence: &str) -> Result<bool, String> {
let mut state = self.start_state;
//Full alphabet to test against for sigma character
let full_alphabet: Vec<char> = "abcdefghijklmnopqrstuvwxyz0123456789 ".chars().collect();
for ch in sentence.chars() {
//Check if character is a word character. Accept it if it is and change it to the 'Σ' symbol for matching purposes
let mut c = ch;
if !self.alphabet.contains(&c) && full_alphabet.contains(&c) {
c = 'Σ';
}
let state_no = match self.alphabet.iter().position(|v| *v == ch || *v == c) {
Some(t) => t,
None => return Err(format!("Character <{}> does not have a transition", ch))
};
print!("δ(q{}, {}) → ", state+1, ch);
state = self.states[state].transitions[state_no];
println!("(q{})", state+1);
}
Ok(self.states[state].accept_state)
}
fn write_graphviz(&self) {
println!("digraph {{");
println!("\trankdir=LR;");
println!("\tnode [shape=point]; start;");
for (n, state) in self.states.iter().enumerate() {
if state.accept_state {
println!("\tnode [shape=doublecircle]; q{};", n+1);
}
}
println!("\tnode [shape=circle];");
println!("\tstart -> q{}", self.start_state+1);
for (n, state) in self.states.iter().enumerate() {
for (i, ch) in self.alphabet.iter().enumerate() {
println!("\tq{} -> q{} [label=\"{}\"]", n+1, state.transitions[i] + 1, ch);
}
}
println!("}}");
}
fn process(&self) {
let stdin = io::stdin();
for line in stdin.lock().lines() {
// Get the line out of the Result, should never error
let sentence = &line.unwrap();
println!("Processing sentence <{}>", sentence);
match self.test_sentence(sentence) {
Ok(b) => println!("{}",
if b {"Accept"} else {"Reject"}),
Err(s) => println!("Error processing sentence: {}", s)
}
}
}
}
#[cfg(test)]
mod test {
use super::*;
//This test is used to make sure that it creates a graphviz file
#[test]
fn test1() {
let dfa = DFA::new_from_regex("a*b");
//Create the dfa structure based on in RegEx entered from the command line
let state_graph = StateGraph::new_from_dfa(&dfa);
state_graph.write_graphviz();
}
} | {
//Get and validate the RegEx on the command line
let regex = get_regex(std::env::args());
let dfa = DFA::new_from_regex(®ex);
//Create the dfa structure based on in RegEx entered from the command line
let state_graph = StateGraph::new_from_dfa(&dfa);
//eprintln!("{:?}", state_graph);
state_graph.write_graphviz();
// Process through the input until end of file (cntl-z) is encountered
state_graph.process();
} | identifier_body |
cblmg_cs_supplierCoachGeocoder.js | /**
* Module Description
*
* Version Date Author Remarks
* 1.00 13 Jul 2014 AnJoe
*
* 8/2/2014
* Modified to allow geocoding on Booking records
*/
var paramGeoCodeFormIds = nlapiGetContext().getSetting('SCRIPT', 'custscript_sct246_formids').split(',');
var paramGeoCodeSupplierCategoryIds = nlapiGetContext().getSetting('SCRIPT', 'custscript_sct246_categoryids').split(',');
var geoflds = ['geoadr1','geoadr2','geoadr3','geocity','geocountystate','geopostcode','geocountry'];
var oShipAdrText = '';
var reGeoCode = false;
var PinImageUrl = 'https://maps.gstatic.com/mapfiles/ridefinder-images/mm_20_red.png';
var canGeoCode = false;
function supplierPageInit(type) {
//grab default shipping address on the record
var shipLine = nlapiFindLineItemValue('addressbook', 'defaultshipping', 'T');
//2014v2 Modification. nlapiGetLineItemValue on addressbook no longer works. MUST select and use nlapiGetCurrentLineItemValue
if (shipLine && parseInt(shipLine) > 0) {
nlapiSelectLineItem('addressbook', shipLine);
oShipAdrText = nlapiGetCurrentLineItemValue('addressbook', 'addrtext');
//oShipAdrText = nlapiGetLineItemValue('addressbook', 'addrtext', shipLine)?nlapiGetLineItemValue('addressbook', 'addrtext', shipLine):'';
}
//Changing it so that we ONLY execute drawGoogleMap when it's handled by userinterface
if (paramGeoCodeFormIds.contains(nlapiGetFieldValue('customform')) && nlapiGetContext().getExecutionContext() == 'userinterface') {
canGeoCode = true;
drawGoogleMapByGeoCode();
}
}
function supplierValidateLine(type) {
if (canGeoCode && type == 'addressbook' && nlapiGetCurrentLineItemValue(type, 'defaultshipping')=='T') {
var nShipAdrText = nlapiGetCurrentLineItemValue('addressbook', 'addrtext')?nlapiGetCurrentLineItemValue('addressbook', 'addrtext'):'';
//alert(oShipAdrText +' // '+nShipAdrText);
if (oShipAdrText != nShipAdrText) {
reGeoCode = true;
nlapiSetFieldValue('custentity_cbl_shipadr_lat','',false,true);
nlapiSetFieldValue('custentity_cbl_shipadr_lng','',false,true);
nlapiSetFieldValue('custentity_cbl_shipadr_geodetail', '',false,true);
}
}
return true;
}
/**
* The recordType (internal id) corresponds to the "Applied To" record in your script deployment.
* @appliedtorecord recordType
*
* @returns {Boolean} True to continue save, false to abort save
*/
function supplierSaveRecord(){
//Attempt to geocode ONLY on User Interface
if (canGeoCode && nlapiGetContext().getExecutionContext() == 'userinterface') {
var shipLine = nlapiFindLineItemValue('addressbook', 'defaultshipping', 'T');
if (shipLine >= 1 && (!nlapiGetFieldValue('custentity_cbl_shipadr_lat') || !nlapiGetFieldValue('custentity_cbl_shipadr_lng')) && paramGeoCodeSupplierCategoryIds.contains(nlapiGetFieldValue('category'))) {
alert('Please Geocode Shipping Address');
return false;
}
}
return true;
}
function supplierFldChanged(type, name, linenum) {
if (!canGeoCode) {
return;
}
//If both Lat/Lng values are set, display the Google Map map_canvas
//ONLY Redraw IF Both Lat/Lng values are provided
if ((name=='custentity_cbl_shipadr_lat' || name=='custentity_cbl_shipadr_lng') && nlapiGetFieldValue('custentity_cbl_shipadr_lat') && nlapiGetFieldValue('custentity_cbl_shipadr_lng')) {
drawGoogleMapByGeoCode();
}
}
/**
* ONLY on booking record to fire suitelet that looks up matching clients
* @returns {Boolean}
*/
function radiusCoachLookup() {
if (!nlapiGetFieldValue('custentity_cbl_shipadr_lat') && !nlapiGetFieldValue('custentity_cbl_shipadr_lng')) {
alert('Please Geocode Booking Location Address under Location Subtab');
return false;
}
//8/5/2014 - Client requested following changes:
// - Subsidiary search
// - 100 within UK; 1000 outside UK
// - Selected coach pin
// - Buyer
// -
var selectedCoachId = nlapiGetFieldValue('custentity_bo_coach');
var bookingSubsId = nlapiGetFieldValue('subsidiary');
var bookingBuyer = nlapiGetFieldText('custentity_bo_buyer');
var bookingCourse = nlapiGetFieldText('custentity_bo_course');
var bookingItem = nlapiGetFieldText('custentity_bo_item');
var bookingDate = nlapiGetFieldValue('enddate');
var bookingTime = nlapiGetFieldValue('custentity_bo_eventtime');
var selectedCoachText = nlapiGetFieldText('custentity_bo_coach');
var bookingClientText = nlapiGetFieldText('parent');
//customscript_cbl_sl_radiuscoachsearch
//customdeploy_cbl_sl_radiuscoachsearchd
var radiusSlUrl = nlapiResolveURL('SUITELET', 'customscript_cbl_sl_radiuscoachsearch', 'customdeploy_cbl_sl_radiuscoachsearchd', 'VIEW')+
'&booklat='+nlapiGetFieldValue('custentity_cbl_shipadr_lat')+'&booklng='+nlapiGetFieldValue('custentity_cbl_shipadr_lng')+
'&bookcountryid='+nlapiGetFieldValue('custentity_bo_eventcountry')+'&bookcountry='+nlapiGetFieldText('custentity_bo_eventcountry')+
'&selectedcoach='+selectedCoachId+'&booksubsid='+bookingSubsId+'&buyer='+encodeURIComponent(bookingBuyer)+
'&bookdatetime='+encodeURIComponent(bookingDate+' '+bookingTime)+'&course='+encodeURIComponent(bookingCourse)+
'&item='+encodeURIComponent(bookingItem)+'&client='+bookingClientText+'&selectedcoachtext='+selectedCoachText+
'&entityid='+nlapiGetFieldValue('entityid')+'&bookid='+nlapiGetRecordId()+'&bookingtype='+nlapiGetFieldValue('jobtype');
window.open(radiusSlUrl,'Radius_Search','width=1200,height=700,resizable=yes,scrollbars=yes');
}
function setValueFromRadiusLookup(_id) {
nlapiSetFieldValue('custentity_bo_coach', _id, true, true);
}
function drawGoogleMapByGeoCode() {
//Make sure map_canvas div element is present
if (!document.getElementById('map_canvas'))
{
return;
}
if (canGeoCode && nlapiGetFieldValue('custentity_cbl_shipadr_lat') && nlapiGetFieldValue('custentity_cbl_shipadr_lng')) {
try {
var nsmap = null;
var latlng = new google.maps.LatLng(nlapiGetFieldValue('custentity_cbl_shipadr_lat'), nlapiGetFieldValue('custentity_cbl_shipadr_lng'));
var moption = {
center:latlng,
zoom:15,
mapTypeId: google.maps.MapTypeId.ROADMAP
};
nsmap = new google.maps.Map(document.getElementById('map_canvas'),moption);
//Drops Lead Pin on the map
new google.maps.Marker({
position: latlng,
map:nsmap,
animation: google.maps.Animation.DROP,
title:"Geocode Location",
icon: PinImageUrl
});
/**
google.maps.event.addListener(nsmap, 'zoom_changed', function() {
setTimeout(zipLabelToggle, 2000);
});
*/
} catch(e) {
alert('Error Drawing Map: '+e.toString());
}
document.getElementById('map_canvas').style.display = 'block';
} else {
//Hide The Mini Map
document.getElementById('map_canvas').style.display = 'none';
}
}
function geoCodeShipAddress() {
//grab default shipping address on the record
var shipLine = nlapiFindLineItemValue('addressbook', 'defaultshipping', 'T');
var fldschecked = false;
var geoAddressText = '';
for (var g=0; g < geoflds.length; g++) {
if (document.getElementById(geoflds[g]) && document.getElementById(geoflds[g]).checked) {
fldschecked=true;
if (nlapiGetRecordType() == 'job') {
//for Booking record. country and state drop down is custom record. check for existance of value instead of text.
//TEXT value will be used for geo coding
if (geoflds[g]=='geocountry') {
geoAddressText += nlapiGetFieldValue(document.getElementById(geoflds[g]).value)?nlapiGetFieldText(document.getElementById(geoflds[g]).value):''+' ';
} else if (geoflds[g]=='geocountry') {
var stateVal = nlapiGetFieldValue(document.getElementById(geoflds[g]).value)?nlapiGetFieldText(document.getElementById(geoflds[g]).value):'';
if (stateVal) |
} else {
geoAddressText += nlapiGetFieldValue(document.getElementById(geoflds[g]).value)?nlapiGetFieldValue(document.getElementById(geoflds[g]).value):''+' ';
}
} else {
//2014v2 Update - Must use nlapiGetCurrentLineItemValue
nlapiSelectLineItem('addressbook', shipLine);
if (geoflds[g]=='geocountry') {
geoAddressText += nlapiGetCurrentLineItemText('addressbook',document.getElementById(geoflds[g]).value)?nlapiGetCurrentLineItemText('addressbook',document.getElementById(geoflds[g]).value):''+' ';
} else {
geoAddressText += nlapiGetCurrentLineItemValue('addressbook',document.getElementById(geoflds[g]).value)?nlapiGetCurrentLineItemValue('addressbook',document.getElementById(geoflds[g]).value):''+' ';
}
}
}
}
if (!fldschecked) {
alert('Please check address fields to use for Geocoding.');
return false;
}
if (nlapiGetRecordType() != 'job') {
if (shipLine < 1) {
alert('Default Shipping Address is not available for this Supplier under Address Subtab.');
return false;
}
}
if (!strTrim(geoAddressText)) {
alert('Address to geocode is empty. Try setting address component fields and try again');
return false;
}
var geocoder = new google.maps.Geocoder();
geocoder.geocode( { 'address': geoAddressText}, function(results, status) {
if (status == google.maps.GeocoderStatus.OK) {
//alert(JSON.stringify(results));
nlapiSetFieldValue('custentity_cbl_shipadr_lat',results[0].geometry.location.lat());
nlapiSetFieldValue('custentity_cbl_shipadr_lng',results[0].geometry.location.lng());
var matchType = results[0].geometry.location_type;
var matchText = '';
if (matchType == 'ROOFTOP') {
matchText = '<i>'+matchType+'</i><br/>Returned result is a precise geocode for which we have location information accurate down to street address precision';
} else if (matchType == 'RANGE_INTERPOLATED') {
matchText = '<i>'+matchType+'</i><br/>Returned result reflects an approximation (usually on a road) interpolated between two precise points (such as intersections). Interpolated results are generally returned when rooftop geocodes are unavailable for a street address.';
} else if (matchType == 'GEOMETRIC_CENTER') {
matchText = '<i>'+matchType+'</i><br/>Returned result is the geometric center of a result such as a polyline (for example, a street) or polygon (region).';
} else if (matchType == 'APPROXIMATE') {
matchText = '<i>'+matchType+'</i><br/>Returned result is approximate.';
}
nlapiSetFieldValue('custentity_cbl_shipadr_geodetail',
'<span style="color:green; font-weight: bold">Success: </span><br/>'+
'<b>Google Formatted Address: </b><br/>'+results[0].formatted_address+'<br/><br/>'+
'<b>Address Match Type: </b><br/>'+matchText);
} else {
nlapiSetFieldValue('custentity_cbl_shipadr_lat','');
nlapiSetFieldValue('custentity_cbl_shipadr_lng','');
nlapiSetFieldValue('custentity_cbl_shipadr_geodetail',
'<span style="color:red; font-weight: bold">Geocode Error: '+status+'</span><br/>Try different combination of Address fields. ');
}
});
//alert(geoAddressText);
}
| {
stateVal = strGlobalReplace(stateVal, 'CA - ', '');
stateVal = strGlobalReplace(stateVal, 'US - ', '');
geoAddressText += stateVal+' ';
} | conditional_block |
cblmg_cs_supplierCoachGeocoder.js | /**
* Module Description
*
* Version Date Author Remarks
* 1.00 13 Jul 2014 AnJoe
*
* 8/2/2014
* Modified to allow geocoding on Booking records
*/
var paramGeoCodeFormIds = nlapiGetContext().getSetting('SCRIPT', 'custscript_sct246_formids').split(',');
var paramGeoCodeSupplierCategoryIds = nlapiGetContext().getSetting('SCRIPT', 'custscript_sct246_categoryids').split(',');
var geoflds = ['geoadr1','geoadr2','geoadr3','geocity','geocountystate','geopostcode','geocountry'];
var oShipAdrText = '';
var reGeoCode = false;
var PinImageUrl = 'https://maps.gstatic.com/mapfiles/ridefinder-images/mm_20_red.png';
var canGeoCode = false;
function supplierPageInit(type) {
//grab default shipping address on the record
var shipLine = nlapiFindLineItemValue('addressbook', 'defaultshipping', 'T');
//2014v2 Modification. nlapiGetLineItemValue on addressbook no longer works. MUST select and use nlapiGetCurrentLineItemValue
if (shipLine && parseInt(shipLine) > 0) {
nlapiSelectLineItem('addressbook', shipLine);
oShipAdrText = nlapiGetCurrentLineItemValue('addressbook', 'addrtext');
//oShipAdrText = nlapiGetLineItemValue('addressbook', 'addrtext', shipLine)?nlapiGetLineItemValue('addressbook', 'addrtext', shipLine):'';
}
//Changing it so that we ONLY execute drawGoogleMap when it's handled by userinterface
if (paramGeoCodeFormIds.contains(nlapiGetFieldValue('customform')) && nlapiGetContext().getExecutionContext() == 'userinterface') {
canGeoCode = true;
drawGoogleMapByGeoCode();
}
}
function supplierValidateLine(type) {
if (canGeoCode && type == 'addressbook' && nlapiGetCurrentLineItemValue(type, 'defaultshipping')=='T') {
var nShipAdrText = nlapiGetCurrentLineItemValue('addressbook', 'addrtext')?nlapiGetCurrentLineItemValue('addressbook', 'addrtext'):'';
//alert(oShipAdrText +' // '+nShipAdrText);
if (oShipAdrText != nShipAdrText) {
reGeoCode = true;
nlapiSetFieldValue('custentity_cbl_shipadr_lat','',false,true);
nlapiSetFieldValue('custentity_cbl_shipadr_lng','',false,true);
nlapiSetFieldValue('custentity_cbl_shipadr_geodetail', '',false,true);
}
}
return true;
}
/**
* The recordType (internal id) corresponds to the "Applied To" record in your script deployment.
* @appliedtorecord recordType
*
* @returns {Boolean} True to continue save, false to abort save
*/
function supplierSaveRecord(){
//Attempt to geocode ONLY on User Interface
if (canGeoCode && nlapiGetContext().getExecutionContext() == 'userinterface') {
var shipLine = nlapiFindLineItemValue('addressbook', 'defaultshipping', 'T');
if (shipLine >= 1 && (!nlapiGetFieldValue('custentity_cbl_shipadr_lat') || !nlapiGetFieldValue('custentity_cbl_shipadr_lng')) && paramGeoCodeSupplierCategoryIds.contains(nlapiGetFieldValue('category'))) {
alert('Please Geocode Shipping Address');
return false;
}
}
return true;
}
function | (type, name, linenum) {
if (!canGeoCode) {
return;
}
//If both Lat/Lng values are set, display the Google Map map_canvas
//ONLY Redraw IF Both Lat/Lng values are provided
if ((name=='custentity_cbl_shipadr_lat' || name=='custentity_cbl_shipadr_lng') && nlapiGetFieldValue('custentity_cbl_shipadr_lat') && nlapiGetFieldValue('custentity_cbl_shipadr_lng')) {
drawGoogleMapByGeoCode();
}
}
/**
* ONLY on booking record to fire suitelet that looks up matching clients
* @returns {Boolean}
*/
function radiusCoachLookup() {
if (!nlapiGetFieldValue('custentity_cbl_shipadr_lat') && !nlapiGetFieldValue('custentity_cbl_shipadr_lng')) {
alert('Please Geocode Booking Location Address under Location Subtab');
return false;
}
//8/5/2014 - Client requested following changes:
// - Subsidiary search
// - 100 within UK; 1000 outside UK
// - Selected coach pin
// - Buyer
// -
var selectedCoachId = nlapiGetFieldValue('custentity_bo_coach');
var bookingSubsId = nlapiGetFieldValue('subsidiary');
var bookingBuyer = nlapiGetFieldText('custentity_bo_buyer');
var bookingCourse = nlapiGetFieldText('custentity_bo_course');
var bookingItem = nlapiGetFieldText('custentity_bo_item');
var bookingDate = nlapiGetFieldValue('enddate');
var bookingTime = nlapiGetFieldValue('custentity_bo_eventtime');
var selectedCoachText = nlapiGetFieldText('custentity_bo_coach');
var bookingClientText = nlapiGetFieldText('parent');
//customscript_cbl_sl_radiuscoachsearch
//customdeploy_cbl_sl_radiuscoachsearchd
var radiusSlUrl = nlapiResolveURL('SUITELET', 'customscript_cbl_sl_radiuscoachsearch', 'customdeploy_cbl_sl_radiuscoachsearchd', 'VIEW')+
'&booklat='+nlapiGetFieldValue('custentity_cbl_shipadr_lat')+'&booklng='+nlapiGetFieldValue('custentity_cbl_shipadr_lng')+
'&bookcountryid='+nlapiGetFieldValue('custentity_bo_eventcountry')+'&bookcountry='+nlapiGetFieldText('custentity_bo_eventcountry')+
'&selectedcoach='+selectedCoachId+'&booksubsid='+bookingSubsId+'&buyer='+encodeURIComponent(bookingBuyer)+
'&bookdatetime='+encodeURIComponent(bookingDate+' '+bookingTime)+'&course='+encodeURIComponent(bookingCourse)+
'&item='+encodeURIComponent(bookingItem)+'&client='+bookingClientText+'&selectedcoachtext='+selectedCoachText+
'&entityid='+nlapiGetFieldValue('entityid')+'&bookid='+nlapiGetRecordId()+'&bookingtype='+nlapiGetFieldValue('jobtype');
window.open(radiusSlUrl,'Radius_Search','width=1200,height=700,resizable=yes,scrollbars=yes');
}
function setValueFromRadiusLookup(_id) {
nlapiSetFieldValue('custentity_bo_coach', _id, true, true);
}
function drawGoogleMapByGeoCode() {
//Make sure map_canvas div element is present
if (!document.getElementById('map_canvas'))
{
return;
}
if (canGeoCode && nlapiGetFieldValue('custentity_cbl_shipadr_lat') && nlapiGetFieldValue('custentity_cbl_shipadr_lng')) {
try {
var nsmap = null;
var latlng = new google.maps.LatLng(nlapiGetFieldValue('custentity_cbl_shipadr_lat'), nlapiGetFieldValue('custentity_cbl_shipadr_lng'));
var moption = {
center:latlng,
zoom:15,
mapTypeId: google.maps.MapTypeId.ROADMAP
};
nsmap = new google.maps.Map(document.getElementById('map_canvas'),moption);
//Drops Lead Pin on the map
new google.maps.Marker({
position: latlng,
map:nsmap,
animation: google.maps.Animation.DROP,
title:"Geocode Location",
icon: PinImageUrl
});
/**
google.maps.event.addListener(nsmap, 'zoom_changed', function() {
setTimeout(zipLabelToggle, 2000);
});
*/
} catch(e) {
alert('Error Drawing Map: '+e.toString());
}
document.getElementById('map_canvas').style.display = 'block';
} else {
//Hide The Mini Map
document.getElementById('map_canvas').style.display = 'none';
}
}
function geoCodeShipAddress() {
//grab default shipping address on the record
var shipLine = nlapiFindLineItemValue('addressbook', 'defaultshipping', 'T');
var fldschecked = false;
var geoAddressText = '';
for (var g=0; g < geoflds.length; g++) {
if (document.getElementById(geoflds[g]) && document.getElementById(geoflds[g]).checked) {
fldschecked=true;
if (nlapiGetRecordType() == 'job') {
//for Booking record. country and state drop down is custom record. check for existance of value instead of text.
//TEXT value will be used for geo coding
if (geoflds[g]=='geocountry') {
geoAddressText += nlapiGetFieldValue(document.getElementById(geoflds[g]).value)?nlapiGetFieldText(document.getElementById(geoflds[g]).value):''+' ';
} else if (geoflds[g]=='geocountry') {
var stateVal = nlapiGetFieldValue(document.getElementById(geoflds[g]).value)?nlapiGetFieldText(document.getElementById(geoflds[g]).value):'';
if (stateVal) {
stateVal = strGlobalReplace(stateVal, 'CA - ', '');
stateVal = strGlobalReplace(stateVal, 'US - ', '');
geoAddressText += stateVal+' ';
}
} else {
geoAddressText += nlapiGetFieldValue(document.getElementById(geoflds[g]).value)?nlapiGetFieldValue(document.getElementById(geoflds[g]).value):''+' ';
}
} else {
//2014v2 Update - Must use nlapiGetCurrentLineItemValue
nlapiSelectLineItem('addressbook', shipLine);
if (geoflds[g]=='geocountry') {
geoAddressText += nlapiGetCurrentLineItemText('addressbook',document.getElementById(geoflds[g]).value)?nlapiGetCurrentLineItemText('addressbook',document.getElementById(geoflds[g]).value):''+' ';
} else {
geoAddressText += nlapiGetCurrentLineItemValue('addressbook',document.getElementById(geoflds[g]).value)?nlapiGetCurrentLineItemValue('addressbook',document.getElementById(geoflds[g]).value):''+' ';
}
}
}
}
if (!fldschecked) {
alert('Please check address fields to use for Geocoding.');
return false;
}
if (nlapiGetRecordType() != 'job') {
if (shipLine < 1) {
alert('Default Shipping Address is not available for this Supplier under Address Subtab.');
return false;
}
}
if (!strTrim(geoAddressText)) {
alert('Address to geocode is empty. Try setting address component fields and try again');
return false;
}
var geocoder = new google.maps.Geocoder();
geocoder.geocode( { 'address': geoAddressText}, function(results, status) {
if (status == google.maps.GeocoderStatus.OK) {
//alert(JSON.stringify(results));
nlapiSetFieldValue('custentity_cbl_shipadr_lat',results[0].geometry.location.lat());
nlapiSetFieldValue('custentity_cbl_shipadr_lng',results[0].geometry.location.lng());
var matchType = results[0].geometry.location_type;
var matchText = '';
if (matchType == 'ROOFTOP') {
matchText = '<i>'+matchType+'</i><br/>Returned result is a precise geocode for which we have location information accurate down to street address precision';
} else if (matchType == 'RANGE_INTERPOLATED') {
matchText = '<i>'+matchType+'</i><br/>Returned result reflects an approximation (usually on a road) interpolated between two precise points (such as intersections). Interpolated results are generally returned when rooftop geocodes are unavailable for a street address.';
} else if (matchType == 'GEOMETRIC_CENTER') {
matchText = '<i>'+matchType+'</i><br/>Returned result is the geometric center of a result such as a polyline (for example, a street) or polygon (region).';
} else if (matchType == 'APPROXIMATE') {
matchText = '<i>'+matchType+'</i><br/>Returned result is approximate.';
}
nlapiSetFieldValue('custentity_cbl_shipadr_geodetail',
'<span style="color:green; font-weight: bold">Success: </span><br/>'+
'<b>Google Formatted Address: </b><br/>'+results[0].formatted_address+'<br/><br/>'+
'<b>Address Match Type: </b><br/>'+matchText);
} else {
nlapiSetFieldValue('custentity_cbl_shipadr_lat','');
nlapiSetFieldValue('custentity_cbl_shipadr_lng','');
nlapiSetFieldValue('custentity_cbl_shipadr_geodetail',
'<span style="color:red; font-weight: bold">Geocode Error: '+status+'</span><br/>Try different combination of Address fields. ');
}
});
//alert(geoAddressText);
}
| supplierFldChanged | identifier_name |
cblmg_cs_supplierCoachGeocoder.js | /**
* Module Description
*
* Version Date Author Remarks
* 1.00 13 Jul 2014 AnJoe
*
* 8/2/2014
* Modified to allow geocoding on Booking records
*/
var paramGeoCodeFormIds = nlapiGetContext().getSetting('SCRIPT', 'custscript_sct246_formids').split(',');
var paramGeoCodeSupplierCategoryIds = nlapiGetContext().getSetting('SCRIPT', 'custscript_sct246_categoryids').split(',');
var geoflds = ['geoadr1','geoadr2','geoadr3','geocity','geocountystate','geopostcode','geocountry'];
var oShipAdrText = '';
var reGeoCode = false;
var PinImageUrl = 'https://maps.gstatic.com/mapfiles/ridefinder-images/mm_20_red.png';
var canGeoCode = false;
function supplierPageInit(type) {
//grab default shipping address on the record
var shipLine = nlapiFindLineItemValue('addressbook', 'defaultshipping', 'T');
//2014v2 Modification. nlapiGetLineItemValue on addressbook no longer works. MUST select and use nlapiGetCurrentLineItemValue
if (shipLine && parseInt(shipLine) > 0) {
nlapiSelectLineItem('addressbook', shipLine);
oShipAdrText = nlapiGetCurrentLineItemValue('addressbook', 'addrtext');
//oShipAdrText = nlapiGetLineItemValue('addressbook', 'addrtext', shipLine)?nlapiGetLineItemValue('addressbook', 'addrtext', shipLine):'';
}
//Changing it so that we ONLY execute drawGoogleMap when it's handled by userinterface
if (paramGeoCodeFormIds.contains(nlapiGetFieldValue('customform')) && nlapiGetContext().getExecutionContext() == 'userinterface') {
canGeoCode = true;
drawGoogleMapByGeoCode();
}
}
function supplierValidateLine(type) {
if (canGeoCode && type == 'addressbook' && nlapiGetCurrentLineItemValue(type, 'defaultshipping')=='T') {
var nShipAdrText = nlapiGetCurrentLineItemValue('addressbook', 'addrtext')?nlapiGetCurrentLineItemValue('addressbook', 'addrtext'):'';
//alert(oShipAdrText +' // '+nShipAdrText);
if (oShipAdrText != nShipAdrText) {
reGeoCode = true;
nlapiSetFieldValue('custentity_cbl_shipadr_lat','',false,true);
nlapiSetFieldValue('custentity_cbl_shipadr_lng','',false,true);
nlapiSetFieldValue('custentity_cbl_shipadr_geodetail', '',false,true);
}
}
return true;
}
/**
* The recordType (internal id) corresponds to the "Applied To" record in your script deployment.
* @appliedtorecord recordType
*
* @returns {Boolean} True to continue save, false to abort save
*/
function supplierSaveRecord(){
//Attempt to geocode ONLY on User Interface
if (canGeoCode && nlapiGetContext().getExecutionContext() == 'userinterface') {
var shipLine = nlapiFindLineItemValue('addressbook', 'defaultshipping', 'T');
if (shipLine >= 1 && (!nlapiGetFieldValue('custentity_cbl_shipadr_lat') || !nlapiGetFieldValue('custentity_cbl_shipadr_lng')) && paramGeoCodeSupplierCategoryIds.contains(nlapiGetFieldValue('category'))) {
alert('Please Geocode Shipping Address');
return false;
}
}
return true;
}
function supplierFldChanged(type, name, linenum) {
if (!canGeoCode) {
return;
}
//If both Lat/Lng values are set, display the Google Map map_canvas
//ONLY Redraw IF Both Lat/Lng values are provided
if ((name=='custentity_cbl_shipadr_lat' || name=='custentity_cbl_shipadr_lng') && nlapiGetFieldValue('custentity_cbl_shipadr_lat') && nlapiGetFieldValue('custentity_cbl_shipadr_lng')) {
drawGoogleMapByGeoCode();
}
}
/**
* ONLY on booking record to fire suitelet that looks up matching clients
* @returns {Boolean}
*/
function radiusCoachLookup() {
if (!nlapiGetFieldValue('custentity_cbl_shipadr_lat') && !nlapiGetFieldValue('custentity_cbl_shipadr_lng')) {
alert('Please Geocode Booking Location Address under Location Subtab');
return false;
}
//8/5/2014 - Client requested following changes:
// - Subsidiary search
// - 100 within UK; 1000 outside UK
// - Selected coach pin
// - Buyer
// -
var selectedCoachId = nlapiGetFieldValue('custentity_bo_coach');
var bookingSubsId = nlapiGetFieldValue('subsidiary');
var bookingBuyer = nlapiGetFieldText('custentity_bo_buyer');
var bookingCourse = nlapiGetFieldText('custentity_bo_course');
var bookingItem = nlapiGetFieldText('custentity_bo_item');
var bookingDate = nlapiGetFieldValue('enddate');
var bookingTime = nlapiGetFieldValue('custentity_bo_eventtime');
var selectedCoachText = nlapiGetFieldText('custentity_bo_coach');
var bookingClientText = nlapiGetFieldText('parent');
//customscript_cbl_sl_radiuscoachsearch
//customdeploy_cbl_sl_radiuscoachsearchd
var radiusSlUrl = nlapiResolveURL('SUITELET', 'customscript_cbl_sl_radiuscoachsearch', 'customdeploy_cbl_sl_radiuscoachsearchd', 'VIEW')+
'&booklat='+nlapiGetFieldValue('custentity_cbl_shipadr_lat')+'&booklng='+nlapiGetFieldValue('custentity_cbl_shipadr_lng')+
'&bookcountryid='+nlapiGetFieldValue('custentity_bo_eventcountry')+'&bookcountry='+nlapiGetFieldText('custentity_bo_eventcountry')+
'&selectedcoach='+selectedCoachId+'&booksubsid='+bookingSubsId+'&buyer='+encodeURIComponent(bookingBuyer)+ |
window.open(radiusSlUrl,'Radius_Search','width=1200,height=700,resizable=yes,scrollbars=yes');
}
function setValueFromRadiusLookup(_id) {
nlapiSetFieldValue('custentity_bo_coach', _id, true, true);
}
function drawGoogleMapByGeoCode() {
//Make sure map_canvas div element is present
if (!document.getElementById('map_canvas'))
{
return;
}
if (canGeoCode && nlapiGetFieldValue('custentity_cbl_shipadr_lat') && nlapiGetFieldValue('custentity_cbl_shipadr_lng')) {
try {
var nsmap = null;
var latlng = new google.maps.LatLng(nlapiGetFieldValue('custentity_cbl_shipadr_lat'), nlapiGetFieldValue('custentity_cbl_shipadr_lng'));
var moption = {
center:latlng,
zoom:15,
mapTypeId: google.maps.MapTypeId.ROADMAP
};
nsmap = new google.maps.Map(document.getElementById('map_canvas'),moption);
//Drops Lead Pin on the map
new google.maps.Marker({
position: latlng,
map:nsmap,
animation: google.maps.Animation.DROP,
title:"Geocode Location",
icon: PinImageUrl
});
/**
google.maps.event.addListener(nsmap, 'zoom_changed', function() {
setTimeout(zipLabelToggle, 2000);
});
*/
} catch(e) {
alert('Error Drawing Map: '+e.toString());
}
document.getElementById('map_canvas').style.display = 'block';
} else {
//Hide The Mini Map
document.getElementById('map_canvas').style.display = 'none';
}
}
function geoCodeShipAddress() {
//grab default shipping address on the record
var shipLine = nlapiFindLineItemValue('addressbook', 'defaultshipping', 'T');
var fldschecked = false;
var geoAddressText = '';
for (var g=0; g < geoflds.length; g++) {
if (document.getElementById(geoflds[g]) && document.getElementById(geoflds[g]).checked) {
fldschecked=true;
if (nlapiGetRecordType() == 'job') {
//for Booking record. country and state drop down is custom record. check for existance of value instead of text.
//TEXT value will be used for geo coding
if (geoflds[g]=='geocountry') {
geoAddressText += nlapiGetFieldValue(document.getElementById(geoflds[g]).value)?nlapiGetFieldText(document.getElementById(geoflds[g]).value):''+' ';
} else if (geoflds[g]=='geocountry') {
var stateVal = nlapiGetFieldValue(document.getElementById(geoflds[g]).value)?nlapiGetFieldText(document.getElementById(geoflds[g]).value):'';
if (stateVal) {
stateVal = strGlobalReplace(stateVal, 'CA - ', '');
stateVal = strGlobalReplace(stateVal, 'US - ', '');
geoAddressText += stateVal+' ';
}
} else {
geoAddressText += nlapiGetFieldValue(document.getElementById(geoflds[g]).value)?nlapiGetFieldValue(document.getElementById(geoflds[g]).value):''+' ';
}
} else {
//2014v2 Update - Must use nlapiGetCurrentLineItemValue
nlapiSelectLineItem('addressbook', shipLine);
if (geoflds[g]=='geocountry') {
geoAddressText += nlapiGetCurrentLineItemText('addressbook',document.getElementById(geoflds[g]).value)?nlapiGetCurrentLineItemText('addressbook',document.getElementById(geoflds[g]).value):''+' ';
} else {
geoAddressText += nlapiGetCurrentLineItemValue('addressbook',document.getElementById(geoflds[g]).value)?nlapiGetCurrentLineItemValue('addressbook',document.getElementById(geoflds[g]).value):''+' ';
}
}
}
}
if (!fldschecked) {
alert('Please check address fields to use for Geocoding.');
return false;
}
if (nlapiGetRecordType() != 'job') {
if (shipLine < 1) {
alert('Default Shipping Address is not available for this Supplier under Address Subtab.');
return false;
}
}
if (!strTrim(geoAddressText)) {
alert('Address to geocode is empty. Try setting address component fields and try again');
return false;
}
var geocoder = new google.maps.Geocoder();
geocoder.geocode( { 'address': geoAddressText}, function(results, status) {
if (status == google.maps.GeocoderStatus.OK) {
//alert(JSON.stringify(results));
nlapiSetFieldValue('custentity_cbl_shipadr_lat',results[0].geometry.location.lat());
nlapiSetFieldValue('custentity_cbl_shipadr_lng',results[0].geometry.location.lng());
var matchType = results[0].geometry.location_type;
var matchText = '';
if (matchType == 'ROOFTOP') {
matchText = '<i>'+matchType+'</i><br/>Returned result is a precise geocode for which we have location information accurate down to street address precision';
} else if (matchType == 'RANGE_INTERPOLATED') {
matchText = '<i>'+matchType+'</i><br/>Returned result reflects an approximation (usually on a road) interpolated between two precise points (such as intersections). Interpolated results are generally returned when rooftop geocodes are unavailable for a street address.';
} else if (matchType == 'GEOMETRIC_CENTER') {
matchText = '<i>'+matchType+'</i><br/>Returned result is the geometric center of a result such as a polyline (for example, a street) or polygon (region).';
} else if (matchType == 'APPROXIMATE') {
matchText = '<i>'+matchType+'</i><br/>Returned result is approximate.';
}
nlapiSetFieldValue('custentity_cbl_shipadr_geodetail',
'<span style="color:green; font-weight: bold">Success: </span><br/>'+
'<b>Google Formatted Address: </b><br/>'+results[0].formatted_address+'<br/><br/>'+
'<b>Address Match Type: </b><br/>'+matchText);
} else {
nlapiSetFieldValue('custentity_cbl_shipadr_lat','');
nlapiSetFieldValue('custentity_cbl_shipadr_lng','');
nlapiSetFieldValue('custentity_cbl_shipadr_geodetail',
'<span style="color:red; font-weight: bold">Geocode Error: '+status+'</span><br/>Try different combination of Address fields. ');
}
});
//alert(geoAddressText);
} | '&bookdatetime='+encodeURIComponent(bookingDate+' '+bookingTime)+'&course='+encodeURIComponent(bookingCourse)+
'&item='+encodeURIComponent(bookingItem)+'&client='+bookingClientText+'&selectedcoachtext='+selectedCoachText+
'&entityid='+nlapiGetFieldValue('entityid')+'&bookid='+nlapiGetRecordId()+'&bookingtype='+nlapiGetFieldValue('jobtype'); | random_line_split |
cblmg_cs_supplierCoachGeocoder.js | /**
* Module Description
*
* Version Date Author Remarks
* 1.00 13 Jul 2014 AnJoe
*
* 8/2/2014
* Modified to allow geocoding on Booking records
*/
var paramGeoCodeFormIds = nlapiGetContext().getSetting('SCRIPT', 'custscript_sct246_formids').split(',');
var paramGeoCodeSupplierCategoryIds = nlapiGetContext().getSetting('SCRIPT', 'custscript_sct246_categoryids').split(',');
var geoflds = ['geoadr1','geoadr2','geoadr3','geocity','geocountystate','geopostcode','geocountry'];
var oShipAdrText = '';
var reGeoCode = false;
var PinImageUrl = 'https://maps.gstatic.com/mapfiles/ridefinder-images/mm_20_red.png';
var canGeoCode = false;
function supplierPageInit(type) {
//grab default shipping address on the record
var shipLine = nlapiFindLineItemValue('addressbook', 'defaultshipping', 'T');
//2014v2 Modification. nlapiGetLineItemValue on addressbook no longer works. MUST select and use nlapiGetCurrentLineItemValue
if (shipLine && parseInt(shipLine) > 0) {
nlapiSelectLineItem('addressbook', shipLine);
oShipAdrText = nlapiGetCurrentLineItemValue('addressbook', 'addrtext');
//oShipAdrText = nlapiGetLineItemValue('addressbook', 'addrtext', shipLine)?nlapiGetLineItemValue('addressbook', 'addrtext', shipLine):'';
}
//Changing it so that we ONLY execute drawGoogleMap when it's handled by userinterface
if (paramGeoCodeFormIds.contains(nlapiGetFieldValue('customform')) && nlapiGetContext().getExecutionContext() == 'userinterface') {
canGeoCode = true;
drawGoogleMapByGeoCode();
}
}
function supplierValidateLine(type) |
/**
* The recordType (internal id) corresponds to the "Applied To" record in your script deployment.
* @appliedtorecord recordType
*
* @returns {Boolean} True to continue save, false to abort save
*/
function supplierSaveRecord(){
//Attempt to geocode ONLY on User Interface
if (canGeoCode && nlapiGetContext().getExecutionContext() == 'userinterface') {
var shipLine = nlapiFindLineItemValue('addressbook', 'defaultshipping', 'T');
if (shipLine >= 1 && (!nlapiGetFieldValue('custentity_cbl_shipadr_lat') || !nlapiGetFieldValue('custentity_cbl_shipadr_lng')) && paramGeoCodeSupplierCategoryIds.contains(nlapiGetFieldValue('category'))) {
alert('Please Geocode Shipping Address');
return false;
}
}
return true;
}
function supplierFldChanged(type, name, linenum) {
if (!canGeoCode) {
return;
}
//If both Lat/Lng values are set, display the Google Map map_canvas
//ONLY Redraw IF Both Lat/Lng values are provided
if ((name=='custentity_cbl_shipadr_lat' || name=='custentity_cbl_shipadr_lng') && nlapiGetFieldValue('custentity_cbl_shipadr_lat') && nlapiGetFieldValue('custentity_cbl_shipadr_lng')) {
drawGoogleMapByGeoCode();
}
}
/**
* ONLY on booking record to fire suitelet that looks up matching clients
* @returns {Boolean}
*/
function radiusCoachLookup() {
if (!nlapiGetFieldValue('custentity_cbl_shipadr_lat') && !nlapiGetFieldValue('custentity_cbl_shipadr_lng')) {
alert('Please Geocode Booking Location Address under Location Subtab');
return false;
}
//8/5/2014 - Client requested following changes:
// - Subsidiary search
// - 100 within UK; 1000 outside UK
// - Selected coach pin
// - Buyer
// -
var selectedCoachId = nlapiGetFieldValue('custentity_bo_coach');
var bookingSubsId = nlapiGetFieldValue('subsidiary');
var bookingBuyer = nlapiGetFieldText('custentity_bo_buyer');
var bookingCourse = nlapiGetFieldText('custentity_bo_course');
var bookingItem = nlapiGetFieldText('custentity_bo_item');
var bookingDate = nlapiGetFieldValue('enddate');
var bookingTime = nlapiGetFieldValue('custentity_bo_eventtime');
var selectedCoachText = nlapiGetFieldText('custentity_bo_coach');
var bookingClientText = nlapiGetFieldText('parent');
//customscript_cbl_sl_radiuscoachsearch
//customdeploy_cbl_sl_radiuscoachsearchd
var radiusSlUrl = nlapiResolveURL('SUITELET', 'customscript_cbl_sl_radiuscoachsearch', 'customdeploy_cbl_sl_radiuscoachsearchd', 'VIEW')+
'&booklat='+nlapiGetFieldValue('custentity_cbl_shipadr_lat')+'&booklng='+nlapiGetFieldValue('custentity_cbl_shipadr_lng')+
'&bookcountryid='+nlapiGetFieldValue('custentity_bo_eventcountry')+'&bookcountry='+nlapiGetFieldText('custentity_bo_eventcountry')+
'&selectedcoach='+selectedCoachId+'&booksubsid='+bookingSubsId+'&buyer='+encodeURIComponent(bookingBuyer)+
'&bookdatetime='+encodeURIComponent(bookingDate+' '+bookingTime)+'&course='+encodeURIComponent(bookingCourse)+
'&item='+encodeURIComponent(bookingItem)+'&client='+bookingClientText+'&selectedcoachtext='+selectedCoachText+
'&entityid='+nlapiGetFieldValue('entityid')+'&bookid='+nlapiGetRecordId()+'&bookingtype='+nlapiGetFieldValue('jobtype');
window.open(radiusSlUrl,'Radius_Search','width=1200,height=700,resizable=yes,scrollbars=yes');
}
function setValueFromRadiusLookup(_id) {
nlapiSetFieldValue('custentity_bo_coach', _id, true, true);
}
function drawGoogleMapByGeoCode() {
//Make sure map_canvas div element is present
if (!document.getElementById('map_canvas'))
{
return;
}
if (canGeoCode && nlapiGetFieldValue('custentity_cbl_shipadr_lat') && nlapiGetFieldValue('custentity_cbl_shipadr_lng')) {
try {
var nsmap = null;
var latlng = new google.maps.LatLng(nlapiGetFieldValue('custentity_cbl_shipadr_lat'), nlapiGetFieldValue('custentity_cbl_shipadr_lng'));
var moption = {
center:latlng,
zoom:15,
mapTypeId: google.maps.MapTypeId.ROADMAP
};
nsmap = new google.maps.Map(document.getElementById('map_canvas'),moption);
//Drops Lead Pin on the map
new google.maps.Marker({
position: latlng,
map:nsmap,
animation: google.maps.Animation.DROP,
title:"Geocode Location",
icon: PinImageUrl
});
/**
google.maps.event.addListener(nsmap, 'zoom_changed', function() {
setTimeout(zipLabelToggle, 2000);
});
*/
} catch(e) {
alert('Error Drawing Map: '+e.toString());
}
document.getElementById('map_canvas').style.display = 'block';
} else {
//Hide The Mini Map
document.getElementById('map_canvas').style.display = 'none';
}
}
function geoCodeShipAddress() {
//grab default shipping address on the record
var shipLine = nlapiFindLineItemValue('addressbook', 'defaultshipping', 'T');
var fldschecked = false;
var geoAddressText = '';
for (var g=0; g < geoflds.length; g++) {
if (document.getElementById(geoflds[g]) && document.getElementById(geoflds[g]).checked) {
fldschecked=true;
if (nlapiGetRecordType() == 'job') {
//for Booking record. country and state drop down is custom record. check for existance of value instead of text.
//TEXT value will be used for geo coding
if (geoflds[g]=='geocountry') {
geoAddressText += nlapiGetFieldValue(document.getElementById(geoflds[g]).value)?nlapiGetFieldText(document.getElementById(geoflds[g]).value):''+' ';
} else if (geoflds[g]=='geocountry') {
var stateVal = nlapiGetFieldValue(document.getElementById(geoflds[g]).value)?nlapiGetFieldText(document.getElementById(geoflds[g]).value):'';
if (stateVal) {
stateVal = strGlobalReplace(stateVal, 'CA - ', '');
stateVal = strGlobalReplace(stateVal, 'US - ', '');
geoAddressText += stateVal+' ';
}
} else {
geoAddressText += nlapiGetFieldValue(document.getElementById(geoflds[g]).value)?nlapiGetFieldValue(document.getElementById(geoflds[g]).value):''+' ';
}
} else {
//2014v2 Update - Must use nlapiGetCurrentLineItemValue
nlapiSelectLineItem('addressbook', shipLine);
if (geoflds[g]=='geocountry') {
geoAddressText += nlapiGetCurrentLineItemText('addressbook',document.getElementById(geoflds[g]).value)?nlapiGetCurrentLineItemText('addressbook',document.getElementById(geoflds[g]).value):''+' ';
} else {
geoAddressText += nlapiGetCurrentLineItemValue('addressbook',document.getElementById(geoflds[g]).value)?nlapiGetCurrentLineItemValue('addressbook',document.getElementById(geoflds[g]).value):''+' ';
}
}
}
}
if (!fldschecked) {
alert('Please check address fields to use for Geocoding.');
return false;
}
if (nlapiGetRecordType() != 'job') {
if (shipLine < 1) {
alert('Default Shipping Address is not available for this Supplier under Address Subtab.');
return false;
}
}
if (!strTrim(geoAddressText)) {
alert('Address to geocode is empty. Try setting address component fields and try again');
return false;
}
var geocoder = new google.maps.Geocoder();
geocoder.geocode( { 'address': geoAddressText}, function(results, status) {
if (status == google.maps.GeocoderStatus.OK) {
//alert(JSON.stringify(results));
nlapiSetFieldValue('custentity_cbl_shipadr_lat',results[0].geometry.location.lat());
nlapiSetFieldValue('custentity_cbl_shipadr_lng',results[0].geometry.location.lng());
var matchType = results[0].geometry.location_type;
var matchText = '';
if (matchType == 'ROOFTOP') {
matchText = '<i>'+matchType+'</i><br/>Returned result is a precise geocode for which we have location information accurate down to street address precision';
} else if (matchType == 'RANGE_INTERPOLATED') {
matchText = '<i>'+matchType+'</i><br/>Returned result reflects an approximation (usually on a road) interpolated between two precise points (such as intersections). Interpolated results are generally returned when rooftop geocodes are unavailable for a street address.';
} else if (matchType == 'GEOMETRIC_CENTER') {
matchText = '<i>'+matchType+'</i><br/>Returned result is the geometric center of a result such as a polyline (for example, a street) or polygon (region).';
} else if (matchType == 'APPROXIMATE') {
matchText = '<i>'+matchType+'</i><br/>Returned result is approximate.';
}
nlapiSetFieldValue('custentity_cbl_shipadr_geodetail',
'<span style="color:green; font-weight: bold">Success: </span><br/>'+
'<b>Google Formatted Address: </b><br/>'+results[0].formatted_address+'<br/><br/>'+
'<b>Address Match Type: </b><br/>'+matchText);
} else {
nlapiSetFieldValue('custentity_cbl_shipadr_lat','');
nlapiSetFieldValue('custentity_cbl_shipadr_lng','');
nlapiSetFieldValue('custentity_cbl_shipadr_geodetail',
'<span style="color:red; font-weight: bold">Geocode Error: '+status+'</span><br/>Try different combination of Address fields. ');
}
});
//alert(geoAddressText);
}
| {
if (canGeoCode && type == 'addressbook' && nlapiGetCurrentLineItemValue(type, 'defaultshipping')=='T') {
var nShipAdrText = nlapiGetCurrentLineItemValue('addressbook', 'addrtext')?nlapiGetCurrentLineItemValue('addressbook', 'addrtext'):'';
//alert(oShipAdrText +' // '+nShipAdrText);
if (oShipAdrText != nShipAdrText) {
reGeoCode = true;
nlapiSetFieldValue('custentity_cbl_shipadr_lat','',false,true);
nlapiSetFieldValue('custentity_cbl_shipadr_lng','',false,true);
nlapiSetFieldValue('custentity_cbl_shipadr_geodetail', '',false,true);
}
}
return true;
} | identifier_body |
i2c.rs | //! I2C Peripheral
use crate::common::{ Register, Frequency, I2CInterrupt, I2CFlags, I2CBitMode, MasterMode, DutyCycle, DualAddress };
use crate::common::enums::RCCPeripheral;
use crate::common::structs::pins::Pin;
use embedded_hal::blocking::i2c::{ Read, Write, WriteRead };
use crate::peripherals::extended::{ gpio::Gpio, rcc::Rcc };
pub const I2C1: u32 = 0x4000_5400;
pub const I2C2: u32 = 0x4000_5800;
pub const I2C3: u32 = 0x4000_5C00;
pub const SIZE: usize = 10;
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum I2CError {
/// NACK received
NACK,
/// Bus error
Bus,
/// Arbitration loss
Arbitration,
/// Overrun - Slave mode only
Overrun,
/// PEC - SMBUS mode only
PEC,
/// Timeout - SMBUS mode only
Timeout,
/// Alert - SMBUS mode only
Alert,
Other,
}
#[repr(C)]
pub struct I2c {
#[repr(C)]
block: &'static [Register<u32>; SIZE],
pins: (Pin, Pin),
}
impl I2c {
/// Sets bit at block and offset given
pub fn set(&mut self, b: usize, o: usize) -> &mut Self {
self.block[b] |= 1 << o;
self
}
/// Clears bit at block and offset given
pub fn clear(&mut self, b: usize, o: usize) -> &mut Self {
self.block[b] &= !(1 << o);
self
}
/// Checks if bit is set
pub fn is_set(&self, r: usize, b: usize) -> bool {
(self.block[r].read() >> b) & 1 == 1
}
pub fn write_bits(&mut self, b: usize, o: usize, data: u32, size: usize) -> &mut Self {
let mask = (1u32 << size) - 1;
let old = self.block[b].read();
self.block[b].write( old & !(mask << o) | ((data & mask) << o) );
self
}
}
impl I2c {
/// Set up as master
pub fn master<'a>(address: u32, pins: (Pin, Pin), rcc: &'a Rcc, clocks: Clocks, speed: Frequency) -> Result<Self, I2CError> {
let i2cid = match address {
I2C1 => RCCPeripheral::I2C1,
I2C2 => RCCPeripheral::I2C2,
I2C3 => RCCPeripheral::I2C3,
_ => return Err(I2CError::Other),
};
let new = I2c {
block: &mut *(address as *mut _),
pins,
};
let (sda, scl) = pins;
// TODO : Change to each board
sda.altfn(4)
.speed(HIGH);
scl.altfn(4)
.speed(HIGH);
rcc.peripheral_state(true, i2cid);
rcc.reset_peripheral(i2cid);
// TODO set up RCC clocks
// Disable he peripheral
// by clearing PE bit in CR1
new.clear(0, 0);
// Calculate settings for I2C speed modes
// If the user used the RCC given clocks, APB clock is legal
// Configure bus frequency into I2C peripheral
new.write_bits(1, 0, clocks.apb1.mhz(), 6);
let trise = if speed <= Frequency::KHz(100) {
clocks.apb1.mhz() + 1
} else {
((clocks.apb.mhz() * 300) / 1000) + 1
};
// Configure correct rise times
new.write_bits(8, 0, trise, 6);
// I2C clock control calculation
// If in slow mode
if speed <= Frequency::KHz(100) {
let ccr = match clocks.apb.hz() / (speed.hz() * 2) {
0...3 => 4,
n => n,
};
// Set clock to standard mode with appropiate parameters for selected speed
new.clear(7, 15)
.clear(7, 14)
.write_bits(7, 0, ccr, 12);
} else {
// Fast mode
// Defaults for now to 2:1 duty cycle
if true {
let ccr = match clocks.apb1.hz() / (speed.hz() * 3) {
0 => 1,
n => n,
};
new.set(7, 15)
.clear(7, 14)
.write_bits(7, 0, ccr, 12);
} else {
// 16:9 duty cycle
let ccr = match clocks.apb1.hz() / (speed.hz() * 25) {
0 => 1,
n => n,
};
new.set(7, 15)
.set(7, 14)
.write_bits(7, 0, ccr, 12);
}
}
new.set(0, 0);
Ok( new )
}
/// Stop the peripheral and release the pins
pub fn free(&mut self) -> (Pin, Pin) {
self.clear(0, 0);
self.pins
}
}
/*
impl I2c {
/// Scans for devices and returns all the addresses it found connected
pub fn scan(&mut self) -> Vec<u8> {
let mut addresses = Vec::new();
let mut void = &[0];
for i in 0..128 {
match self.read(i, void) {
Ok(()) => addresses.push(i),
_ => (),
}
}
addresses
}
}
*/
impl Read for I2c {
type Error = I2CError;
/// Read bytes into buffer
/// This function is based on MASTER mode
/// WARNING!
/// `unsafe` function (but now marked as such). This function may leave the sender hanging
/// if the sender sends more bytes than what the buffer can hold.
/// This is due to no STOP signal being sent back.
fn read(&mut self, addr: u8, buffer: &mut [u8]) -> Result<(), I2CError> {
let last = buffer.len() - 1;
// Send start condition and ACK bit
self.start()
.ack();
// Wait until START condition is generated
while !self.is_set(5, 0) {}
// Wait until all devices are listening to us (bus is free)
while !self.is_set(6, 0) && !self.is_set(6, 1) {}
// Set up current address to talk to
self.write_data(((addr as u32) << 1) + 1);
// wait until address was sent
while !self.is_set(5, 1) {}
// Clear condition by reading SR2
let _ = self.block[6].read();
// Store bytes
for i in 0..last {
buffer[i] = self.recv_byte()?;
}
self.nack()
.stop();
// Read last byte
buffer[last] = self.recv_byte()?;
Ok(())
}
}
impl Write for I2c {
type Error = I2CError;
/// Send a buffer of bytes
fn write(&mut self, addr: u8, bytes: &[u8]) -> Result<(), I2CError> {
// Send START condition
self.start();
// Wait until START condition is generated
while !self.is_set(5, 0) {}
// Wait until all devices are listening to us (bus is free)
while !self.is_set(6, 0) && !self.is_set(6, 1) {}
// Set up current address to talk to
self.write_data((addr as u32) << 1);
// wait until address was sent
while !self.is_set(5, 1) {}
// Clear condition by reading SR2
// let _ = ptr::read_volatile(self as u32 + 0x18);
let _ = self.block[6].read();
// Send the bytes
for b in bytes {
self.send_byte(*b)?;
}
Ok(())
}
}
impl WriteRead for I2c {
type Error = I2CError;
/// Writes some bytes then reads some bytes
fn write_read(&mut self, addr: u8, bytes: &[u8], buffer: &mut [u8]) -> Result<(), I2CError> {
self.write(addr, bytes)?;
self.read(addr, buffer)
}
}
impl I2c {
/// Sends a byte
pub fn send_byte(&mut self, byte: u8) -> Result<&mut Self, I2CError> {
// Wait until TX buffer is empty
while !self.is_raised(I2CFlags::TxEmpty) {}
self.write_data(byte as u32);
while {
if self.is_raised(I2CFlags::ACKFailure) {
return Err(I2CError::NACK);
}
!self.is_raised(I2CFlags::TransferComplete)
} {}
Ok( self )
}
/// Receive a byte
pub fn recv_byte(&self) -> Result<u8, I2CError> {
while !self.is_raised(I2CFlags::RxNotEmpty) {}
Ok( self.read_data() )
}
}
impl I2c {
/// Enable the sending of ACK signal after byte transfer
pub fn ack(&mut self) -> &mut Self {
self.set(0, 10)
}
/// Disable the sending of ACK signal (effectively sending a NACK) after byte transfer
pub fn nack(&mut self) -> &mut Self {
self.clear(0, 10)
}
/// Stop generation
/// 0: No stop generation
/// 1: Slave Mode - Release the SCL and SDA lines after current byte transfer
/// Master Mode - Stop generation after the current byte transfer or current Start condition is sent
pub fn stop(&mut self) -> &mut Self {
self.set(0, 9)
}
/// Start generation
/// 0: No start generation
/// 1: Slave Mode - Start generation when bus id free
/// Master Mode - Repeated start generation
pub fn start(&mut self) -> &mut Self {
self.set(0, 8)
}
/// Enable/Disable peripheral
pub fn state(&mut self, s: bool) -> &mut Self {
match s {
true => self.set(0, 0),
_ => self.clear(0, 0),
}
}
/// If enabled the next byte will received in shift register
pub fn receive_in_shift(&mut self) -> &mut Self {
self.set(0, 11)
}
/// Starts Packet Error Checking (PEC) for the next transfer
pub fn start_pec(&mut self) -> &mut Self {
self.set(0, 12)
}
/// Resets the peripheral
pub fn reset(&mut self) -> &mut Self {
// TODO : check lines are free
self.stop()
.set(0, 15)
}
/// Sets the frequency of the transfer
pub fn set_frequency(&mut self, f: Frequency) -> Result<&mut Self, I2CError> {
match f.mhz() {
2...50 => Ok( self.write_bits(1, 0, f.mhz() as u32, 6) ),
_ => Err(I2CError::InvalidBusSpeed),
}
}
/// Indicate this is the last trasnfer
pub fn last_transfer(&mut self) -> &mut Self {
self.set(1, 12)
}
/// Enable/Disable interrupt
pub fn int_state(&mut self, s: bool, int: I2CInterrupt) -> &mut Self {
let offsets = int.offsets();
match s {
true => self.set(offsets.0, offsets.1),
_ => self.clear(offsets.0, offsets.1),
}
}
/// Sets the addressing mode between 7-bit and 10-bit
pub fn | (&mut self, a: I2CBitMode) -> &mut Self {
match a {
I2CBitMode::Bit7 => self.clear(2, 15),
_ => self.set(2, 15),
}
}
/// Writes the interface address 1
/// To be set **after** the interface bit size is set (7-bit or 10-bit)
pub fn set_address_1(&mut self, addr: u32) -> &mut Self {
match self.is_set(2, 15) {
true => self.write_bits(2, 0, addr, 10),
_ => self.write_bits(2, 1, addr, 7),
}
}
/// Writes the interface address 2
/// Returns an error if not in 7 bit mode
pub fn set_address_2(&mut self, addr: u32) -> Result<&mut Self, I2CError> {
match self.is_set(2, 15) {
true => Err( I2CError::Address2NotAllowed),
_ => Ok( self.write_bits(3, 1, addr, 7) ),
}
}
/// Enable/Disable dual addressing mode
pub fn dual_address_state(&mut self, s: bool) -> &mut Self {
match s {
true => self.set(3, 0),
_ => self.clear(3, 0),
}
}
/// Read received byte
pub fn read_data(&self) -> u8 {
self.block[4].read() as u8
}
/// Write data to be transmitted
pub fn write_data(&mut self, data: u32) -> &mut Self {
self.write_bits(4, 0, data, 8)
}
/// Returns true if the flag is raised
pub fn is_raised(&self, f: I2CFlags) -> bool {
let offsets = f.offsets();
self.is_set( offsets.0, offsets.1 )
}
/// Returns true if the device is master
pub fn is_master(&self) -> bool {
self.is_set(6, 0)
}
/// Returns true if the bus is busy
pub fn is_bus_busy(&self) -> bool {
self.is_set(6, 1)
}
/// Returns true if the TRA bit is set
pub fn is_tra_set(&self) -> bool {
self.is_set(6, 2)
}
/// Returns which Dual Address has matched
pub fn which_addr(&self) -> DualAddress {
match self.is_set(6, 7) {
true => DualAddress::Addr2,
_ => DualAddress::Addr1,
}
}
/// Returns the PEC register
pub fn pec(&self) -> u32 {
(self.block[6].read() >> 8) & 0b1111_1111
}
/// Clear the given flag
/// If the flag is cleared by hardware, it does nothing
pub fn clear_flag(&mut self, f: I2CFlags) -> &mut Self {
match f.offsets() {
(5, o) => match o {
8...15 => self.clear(5, o),
_ => self
},
_ => self
}
}
/// Set CCR
/// Refer to the STM32F4 user manual
pub fn set_ccr(&mut self, data: u32) -> &mut Self {
self.write_bits(7, 0, data, 12)
}
/// Set Master Mode
/// Refer to the STM32F4 user manual
pub fn set_master_mode(&mut self, mode: MasterMode) -> &mut Self {
match mode {
MasterMode::SM => self.clear(7, 15),
MasterMode::FM => self.set(7, 15),
}
}
/// Set duty cycle
/// Refer to the STM32F4 user manual
pub fn set_duty_cycle(&mut self, d: DutyCycle) -> &mut Self {
match d {
DutyCycle::D2 => self.clear(7, 14),
DutyCycle::D169 => self.set(7, 14),
}
}
/// Set maximum rise time
pub fn max_rise_time(&mut self, data: u32) -> &mut Self {
self.write_bits(8, 0, data, 6)
}
/// Enable/Disable Analog Filter
pub fn analog_filter_state(&mut self, s: bool) -> &mut Self {
match s {
true => self.clear(9, 4),
_ => self.set(9, 4),
}
}
/// Sets the Digital Noise Filter
pub fn digital_noise_filter(&mut self, d: Option<u32>) -> &mut Self {
match d {
None => self.write_bits(9, 0, 0, 4),
Some(a) => self.write_bits(9, 0, a, 4),
}
}
}
impl I2c {
/// Set master mode (Standard or Fast)
pub fn master_mode(&mut self, mode: MasterMode) -> &mut Self {
match mode {
MasterMode::SM => self.clear(7, 15),
_ => self.set(7, 15),
}
}
/// Write Interface Address
pub fn set_address(&mut self, address: u32) -> &mut Self {
let cons = if self.is_set(2, 15) { (0, 10) } else { (1, 7) };
self.write_bits(2, cons.0, address, cons.1)
}
/// Set secondary address
pub fn set_secondary_address(&mut self, address: u32) -> &mut Self {
self.write_bits(3, 1, address, 7)
}
} | address_mode | identifier_name |
i2c.rs | //! I2C Peripheral
use crate::common::{ Register, Frequency, I2CInterrupt, I2CFlags, I2CBitMode, MasterMode, DutyCycle, DualAddress };
use crate::common::enums::RCCPeripheral;
use crate::common::structs::pins::Pin;
use embedded_hal::blocking::i2c::{ Read, Write, WriteRead };
use crate::peripherals::extended::{ gpio::Gpio, rcc::Rcc };
pub const I2C1: u32 = 0x4000_5400;
pub const I2C2: u32 = 0x4000_5800;
pub const I2C3: u32 = 0x4000_5C00;
pub const SIZE: usize = 10;
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum I2CError {
/// NACK received
NACK,
/// Bus error
Bus,
/// Arbitration loss
Arbitration,
/// Overrun - Slave mode only
Overrun,
/// PEC - SMBUS mode only
PEC,
/// Timeout - SMBUS mode only
Timeout,
/// Alert - SMBUS mode only
Alert,
Other,
}
#[repr(C)]
pub struct I2c {
#[repr(C)]
block: &'static [Register<u32>; SIZE],
pins: (Pin, Pin),
}
impl I2c {
/// Sets bit at block and offset given
pub fn set(&mut self, b: usize, o: usize) -> &mut Self {
self.block[b] |= 1 << o;
self
}
/// Clears bit at block and offset given
pub fn clear(&mut self, b: usize, o: usize) -> &mut Self {
self.block[b] &= !(1 << o);
self
}
/// Checks if bit is set
pub fn is_set(&self, r: usize, b: usize) -> bool {
(self.block[r].read() >> b) & 1 == 1
}
pub fn write_bits(&mut self, b: usize, o: usize, data: u32, size: usize) -> &mut Self {
let mask = (1u32 << size) - 1;
let old = self.block[b].read();
self.block[b].write( old & !(mask << o) | ((data & mask) << o) );
self
}
}
impl I2c {
/// Set up as master
pub fn master<'a>(address: u32, pins: (Pin, Pin), rcc: &'a Rcc, clocks: Clocks, speed: Frequency) -> Result<Self, I2CError> {
let i2cid = match address {
I2C1 => RCCPeripheral::I2C1,
I2C2 => RCCPeripheral::I2C2,
I2C3 => RCCPeripheral::I2C3,
_ => return Err(I2CError::Other),
};
let new = I2c {
block: &mut *(address as *mut _),
pins,
};
let (sda, scl) = pins;
// TODO : Change to each board
sda.altfn(4)
.speed(HIGH);
scl.altfn(4)
.speed(HIGH);
rcc.peripheral_state(true, i2cid);
rcc.reset_peripheral(i2cid);
// TODO set up RCC clocks
// Disable he peripheral
// by clearing PE bit in CR1
new.clear(0, 0);
// Calculate settings for I2C speed modes
// If the user used the RCC given clocks, APB clock is legal
// Configure bus frequency into I2C peripheral
new.write_bits(1, 0, clocks.apb1.mhz(), 6);
let trise = if speed <= Frequency::KHz(100) {
clocks.apb1.mhz() + 1
} else {
((clocks.apb.mhz() * 300) / 1000) + 1
};
// Configure correct rise times
new.write_bits(8, 0, trise, 6);
// I2C clock control calculation
// If in slow mode
if speed <= Frequency::KHz(100) {
let ccr = match clocks.apb.hz() / (speed.hz() * 2) {
0...3 => 4,
n => n,
};
// Set clock to standard mode with appropiate parameters for selected speed
new.clear(7, 15)
.clear(7, 14)
.write_bits(7, 0, ccr, 12);
} else {
// Fast mode
// Defaults for now to 2:1 duty cycle
if true {
let ccr = match clocks.apb1.hz() / (speed.hz() * 3) {
0 => 1,
n => n,
};
new.set(7, 15)
.clear(7, 14)
.write_bits(7, 0, ccr, 12);
} else {
// 16:9 duty cycle
let ccr = match clocks.apb1.hz() / (speed.hz() * 25) {
0 => 1,
n => n,
};
new.set(7, 15)
.set(7, 14)
.write_bits(7, 0, ccr, 12);
}
}
new.set(0, 0);
Ok( new )
}
/// Stop the peripheral and release the pins
pub fn free(&mut self) -> (Pin, Pin) {
self.clear(0, 0);
self.pins
}
}
/*
impl I2c {
/// Scans for devices and returns all the addresses it found connected
pub fn scan(&mut self) -> Vec<u8> {
let mut addresses = Vec::new();
let mut void = &[0];
for i in 0..128 {
match self.read(i, void) {
Ok(()) => addresses.push(i),
_ => (),
}
}
addresses
}
}
*/
impl Read for I2c {
type Error = I2CError;
/// Read bytes into buffer
/// This function is based on MASTER mode
/// WARNING!
/// `unsafe` function (but now marked as such). This function may leave the sender hanging
/// if the sender sends more bytes than what the buffer can hold.
/// This is due to no STOP signal being sent back.
fn read(&mut self, addr: u8, buffer: &mut [u8]) -> Result<(), I2CError> {
let last = buffer.len() - 1;
// Send start condition and ACK bit
self.start()
.ack();
// Wait until START condition is generated
while !self.is_set(5, 0) {}
// Wait until all devices are listening to us (bus is free)
while !self.is_set(6, 0) && !self.is_set(6, 1) {}
// Set up current address to talk to
self.write_data(((addr as u32) << 1) + 1);
// wait until address was sent
while !self.is_set(5, 1) {}
// Clear condition by reading SR2
let _ = self.block[6].read();
// Store bytes
for i in 0..last {
buffer[i] = self.recv_byte()?;
}
self.nack()
.stop();
// Read last byte
buffer[last] = self.recv_byte()?;
Ok(())
}
}
impl Write for I2c {
type Error = I2CError;
/// Send a buffer of bytes
fn write(&mut self, addr: u8, bytes: &[u8]) -> Result<(), I2CError> {
// Send START condition
self.start();
// Wait until START condition is generated
while !self.is_set(5, 0) {}
// Wait until all devices are listening to us (bus is free)
while !self.is_set(6, 0) && !self.is_set(6, 1) {}
// Set up current address to talk to
self.write_data((addr as u32) << 1);
// wait until address was sent
while !self.is_set(5, 1) {}
// Clear condition by reading SR2
// let _ = ptr::read_volatile(self as u32 + 0x18);
let _ = self.block[6].read();
// Send the bytes
for b in bytes {
self.send_byte(*b)?;
}
Ok(())
}
}
impl WriteRead for I2c {
type Error = I2CError;
/// Writes some bytes then reads some bytes
fn write_read(&mut self, addr: u8, bytes: &[u8], buffer: &mut [u8]) -> Result<(), I2CError> {
self.write(addr, bytes)?;
self.read(addr, buffer)
}
}
impl I2c {
/// Sends a byte
pub fn send_byte(&mut self, byte: u8) -> Result<&mut Self, I2CError> {
// Wait until TX buffer is empty
while !self.is_raised(I2CFlags::TxEmpty) {}
self.write_data(byte as u32);
while {
if self.is_raised(I2CFlags::ACKFailure) {
return Err(I2CError::NACK);
}
!self.is_raised(I2CFlags::TransferComplete)
} {}
Ok( self )
}
/// Receive a byte
pub fn recv_byte(&self) -> Result<u8, I2CError> {
while !self.is_raised(I2CFlags::RxNotEmpty) {}
Ok( self.read_data() )
}
}
impl I2c {
/// Enable the sending of ACK signal after byte transfer
pub fn ack(&mut self) -> &mut Self {
self.set(0, 10)
}
/// Disable the sending of ACK signal (effectively sending a NACK) after byte transfer
pub fn nack(&mut self) -> &mut Self {
self.clear(0, 10)
}
/// Stop generation
/// 0: No stop generation
/// 1: Slave Mode - Release the SCL and SDA lines after current byte transfer
/// Master Mode - Stop generation after the current byte transfer or current Start condition is sent
pub fn stop(&mut self) -> &mut Self {
self.set(0, 9)
}
/// Start generation
/// 0: No start generation
/// 1: Slave Mode - Start generation when bus id free
/// Master Mode - Repeated start generation
pub fn start(&mut self) -> &mut Self {
self.set(0, 8)
}
/// Enable/Disable peripheral
pub fn state(&mut self, s: bool) -> &mut Self {
match s {
true => self.set(0, 0),
_ => self.clear(0, 0),
}
}
/// If enabled the next byte will received in shift register
pub fn receive_in_shift(&mut self) -> &mut Self {
self.set(0, 11)
}
/// Starts Packet Error Checking (PEC) for the next transfer
pub fn start_pec(&mut self) -> &mut Self {
self.set(0, 12)
}
/// Resets the peripheral
pub fn reset(&mut self) -> &mut Self {
// TODO : check lines are free
self.stop()
.set(0, 15)
}
/// Sets the frequency of the transfer
pub fn set_frequency(&mut self, f: Frequency) -> Result<&mut Self, I2CError> {
match f.mhz() {
2...50 => Ok( self.write_bits(1, 0, f.mhz() as u32, 6) ),
_ => Err(I2CError::InvalidBusSpeed),
}
}
/// Indicate this is the last trasnfer
pub fn last_transfer(&mut self) -> &mut Self {
self.set(1, 12)
}
/// Enable/Disable interrupt
pub fn int_state(&mut self, s: bool, int: I2CInterrupt) -> &mut Self {
let offsets = int.offsets();
match s {
true => self.set(offsets.0, offsets.1),
_ => self.clear(offsets.0, offsets.1),
}
}
/// Sets the addressing mode between 7-bit and 10-bit
pub fn address_mode(&mut self, a: I2CBitMode) -> &mut Self {
match a {
I2CBitMode::Bit7 => self.clear(2, 15),
_ => self.set(2, 15),
}
}
/// Writes the interface address 1
/// To be set **after** the interface bit size is set (7-bit or 10-bit)
pub fn set_address_1(&mut self, addr: u32) -> &mut Self {
match self.is_set(2, 15) {
true => self.write_bits(2, 0, addr, 10),
_ => self.write_bits(2, 1, addr, 7),
}
}
/// Writes the interface address 2
/// Returns an error if not in 7 bit mode
pub fn set_address_2(&mut self, addr: u32) -> Result<&mut Self, I2CError> {
match self.is_set(2, 15) {
true => Err( I2CError::Address2NotAllowed),
_ => Ok( self.write_bits(3, 1, addr, 7) ),
}
}
/// Enable/Disable dual addressing mode
pub fn dual_address_state(&mut self, s: bool) -> &mut Self {
match s {
true => self.set(3, 0),
_ => self.clear(3, 0),
}
}
/// Read received byte
pub fn read_data(&self) -> u8 {
self.block[4].read() as u8
}
/// Write data to be transmitted
pub fn write_data(&mut self, data: u32) -> &mut Self {
self.write_bits(4, 0, data, 8)
}
/// Returns true if the flag is raised
pub fn is_raised(&self, f: I2CFlags) -> bool {
let offsets = f.offsets();
self.is_set( offsets.0, offsets.1 )
}
/// Returns true if the device is master
pub fn is_master(&self) -> bool {
self.is_set(6, 0)
}
/// Returns true if the bus is busy
pub fn is_bus_busy(&self) -> bool {
self.is_set(6, 1)
}
/// Returns true if the TRA bit is set
pub fn is_tra_set(&self) -> bool {
self.is_set(6, 2)
}
/// Returns which Dual Address has matched
pub fn which_addr(&self) -> DualAddress {
match self.is_set(6, 7) {
true => DualAddress::Addr2,
_ => DualAddress::Addr1,
}
}
/// Returns the PEC register
pub fn pec(&self) -> u32 |
/// Clear the given flag
/// If the flag is cleared by hardware, it does nothing
pub fn clear_flag(&mut self, f: I2CFlags) -> &mut Self {
match f.offsets() {
(5, o) => match o {
8...15 => self.clear(5, o),
_ => self
},
_ => self
}
}
/// Set CCR
/// Refer to the STM32F4 user manual
pub fn set_ccr(&mut self, data: u32) -> &mut Self {
self.write_bits(7, 0, data, 12)
}
/// Set Master Mode
/// Refer to the STM32F4 user manual
pub fn set_master_mode(&mut self, mode: MasterMode) -> &mut Self {
match mode {
MasterMode::SM => self.clear(7, 15),
MasterMode::FM => self.set(7, 15),
}
}
/// Set duty cycle
/// Refer to the STM32F4 user manual
pub fn set_duty_cycle(&mut self, d: DutyCycle) -> &mut Self {
match d {
DutyCycle::D2 => self.clear(7, 14),
DutyCycle::D169 => self.set(7, 14),
}
}
/// Set maximum rise time
pub fn max_rise_time(&mut self, data: u32) -> &mut Self {
self.write_bits(8, 0, data, 6)
}
/// Enable/Disable Analog Filter
pub fn analog_filter_state(&mut self, s: bool) -> &mut Self {
match s {
true => self.clear(9, 4),
_ => self.set(9, 4),
}
}
/// Sets the Digital Noise Filter
pub fn digital_noise_filter(&mut self, d: Option<u32>) -> &mut Self {
match d {
None => self.write_bits(9, 0, 0, 4),
Some(a) => self.write_bits(9, 0, a, 4),
}
}
}
impl I2c {
/// Set master mode (Standard or Fast)
pub fn master_mode(&mut self, mode: MasterMode) -> &mut Self {
match mode {
MasterMode::SM => self.clear(7, 15),
_ => self.set(7, 15),
}
}
/// Write Interface Address
pub fn set_address(&mut self, address: u32) -> &mut Self {
let cons = if self.is_set(2, 15) { (0, 10) } else { (1, 7) };
self.write_bits(2, cons.0, address, cons.1)
}
/// Set secondary address
pub fn set_secondary_address(&mut self, address: u32) -> &mut Self {
self.write_bits(3, 1, address, 7)
}
} | {
(self.block[6].read() >> 8) & 0b1111_1111
} | identifier_body |
i2c.rs | //! I2C Peripheral
use crate::common::{ Register, Frequency, I2CInterrupt, I2CFlags, I2CBitMode, MasterMode, DutyCycle, DualAddress };
use crate::common::enums::RCCPeripheral;
use crate::common::structs::pins::Pin;
use embedded_hal::blocking::i2c::{ Read, Write, WriteRead };
use crate::peripherals::extended::{ gpio::Gpio, rcc::Rcc };
pub const I2C1: u32 = 0x4000_5400;
pub const I2C2: u32 = 0x4000_5800;
pub const I2C3: u32 = 0x4000_5C00;
pub const SIZE: usize = 10;
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum I2CError {
/// NACK received
NACK,
/// Bus error
Bus,
/// Arbitration loss
Arbitration,
/// Overrun - Slave mode only
Overrun,
/// PEC - SMBUS mode only
PEC,
/// Timeout - SMBUS mode only
Timeout,
/// Alert - SMBUS mode only
Alert,
Other,
}
#[repr(C)]
pub struct I2c {
#[repr(C)]
block: &'static [Register<u32>; SIZE],
pins: (Pin, Pin),
}
impl I2c {
/// Sets bit at block and offset given
pub fn set(&mut self, b: usize, o: usize) -> &mut Self {
self.block[b] |= 1 << o;
self
}
/// Clears bit at block and offset given
pub fn clear(&mut self, b: usize, o: usize) -> &mut Self {
self.block[b] &= !(1 << o);
self
}
/// Checks if bit is set
pub fn is_set(&self, r: usize, b: usize) -> bool {
(self.block[r].read() >> b) & 1 == 1
}
pub fn write_bits(&mut self, b: usize, o: usize, data: u32, size: usize) -> &mut Self {
let mask = (1u32 << size) - 1;
let old = self.block[b].read();
self.block[b].write( old & !(mask << o) | ((data & mask) << o) );
self
}
}
impl I2c {
/// Set up as master
pub fn master<'a>(address: u32, pins: (Pin, Pin), rcc: &'a Rcc, clocks: Clocks, speed: Frequency) -> Result<Self, I2CError> {
let i2cid = match address {
I2C1 => RCCPeripheral::I2C1,
I2C2 => RCCPeripheral::I2C2,
I2C3 => RCCPeripheral::I2C3,
_ => return Err(I2CError::Other),
};
let new = I2c {
block: &mut *(address as *mut _),
pins,
};
let (sda, scl) = pins;
// TODO : Change to each board
sda.altfn(4)
.speed(HIGH);
scl.altfn(4)
.speed(HIGH);
rcc.peripheral_state(true, i2cid);
rcc.reset_peripheral(i2cid);
// TODO set up RCC clocks
// Disable he peripheral
// by clearing PE bit in CR1
new.clear(0, 0);
// Calculate settings for I2C speed modes
// If the user used the RCC given clocks, APB clock is legal
// Configure bus frequency into I2C peripheral
new.write_bits(1, 0, clocks.apb1.mhz(), 6);
let trise = if speed <= Frequency::KHz(100) {
clocks.apb1.mhz() + 1
} else {
((clocks.apb.mhz() * 300) / 1000) + 1
};
// Configure correct rise times
new.write_bits(8, 0, trise, 6);
// I2C clock control calculation
// If in slow mode
if speed <= Frequency::KHz(100) {
let ccr = match clocks.apb.hz() / (speed.hz() * 2) {
0...3 => 4,
n => n,
};
// Set clock to standard mode with appropiate parameters for selected speed
new.clear(7, 15)
.clear(7, 14)
.write_bits(7, 0, ccr, 12);
} else {
// Fast mode
// Defaults for now to 2:1 duty cycle
if true {
let ccr = match clocks.apb1.hz() / (speed.hz() * 3) {
0 => 1,
n => n,
};
new.set(7, 15)
.clear(7, 14)
.write_bits(7, 0, ccr, 12);
} else {
// 16:9 duty cycle
let ccr = match clocks.apb1.hz() / (speed.hz() * 25) {
0 => 1,
n => n,
};
new.set(7, 15)
.set(7, 14)
.write_bits(7, 0, ccr, 12);
}
}
new.set(0, 0);
Ok( new )
}
/// Stop the peripheral and release the pins
pub fn free(&mut self) -> (Pin, Pin) {
self.clear(0, 0);
self.pins
}
}
/*
impl I2c {
/// Scans for devices and returns all the addresses it found connected
pub fn scan(&mut self) -> Vec<u8> {
let mut addresses = Vec::new();
let mut void = &[0];
for i in 0..128 {
match self.read(i, void) {
Ok(()) => addresses.push(i),
_ => (),
}
}
addresses
}
}
*/
impl Read for I2c {
type Error = I2CError;
/// Read bytes into buffer
/// This function is based on MASTER mode
/// WARNING!
/// `unsafe` function (but now marked as such). This function may leave the sender hanging
/// if the sender sends more bytes than what the buffer can hold.
/// This is due to no STOP signal being sent back.
fn read(&mut self, addr: u8, buffer: &mut [u8]) -> Result<(), I2CError> {
let last = buffer.len() - 1;
// Send start condition and ACK bit
self.start()
.ack();
// Wait until START condition is generated
while !self.is_set(5, 0) {}
// Wait until all devices are listening to us (bus is free)
while !self.is_set(6, 0) && !self.is_set(6, 1) {}
// Set up current address to talk to
self.write_data(((addr as u32) << 1) + 1);
// wait until address was sent |
// Clear condition by reading SR2
let _ = self.block[6].read();
// Store bytes
for i in 0..last {
buffer[i] = self.recv_byte()?;
}
self.nack()
.stop();
// Read last byte
buffer[last] = self.recv_byte()?;
Ok(())
}
}
impl Write for I2c {
type Error = I2CError;
/// Send a buffer of bytes
fn write(&mut self, addr: u8, bytes: &[u8]) -> Result<(), I2CError> {
// Send START condition
self.start();
// Wait until START condition is generated
while !self.is_set(5, 0) {}
// Wait until all devices are listening to us (bus is free)
while !self.is_set(6, 0) && !self.is_set(6, 1) {}
// Set up current address to talk to
self.write_data((addr as u32) << 1);
// wait until address was sent
while !self.is_set(5, 1) {}
// Clear condition by reading SR2
// let _ = ptr::read_volatile(self as u32 + 0x18);
let _ = self.block[6].read();
// Send the bytes
for b in bytes {
self.send_byte(*b)?;
}
Ok(())
}
}
impl WriteRead for I2c {
type Error = I2CError;
/// Writes some bytes then reads some bytes
fn write_read(&mut self, addr: u8, bytes: &[u8], buffer: &mut [u8]) -> Result<(), I2CError> {
self.write(addr, bytes)?;
self.read(addr, buffer)
}
}
impl I2c {
/// Sends a byte
pub fn send_byte(&mut self, byte: u8) -> Result<&mut Self, I2CError> {
// Wait until TX buffer is empty
while !self.is_raised(I2CFlags::TxEmpty) {}
self.write_data(byte as u32);
while {
if self.is_raised(I2CFlags::ACKFailure) {
return Err(I2CError::NACK);
}
!self.is_raised(I2CFlags::TransferComplete)
} {}
Ok( self )
}
/// Receive a byte
pub fn recv_byte(&self) -> Result<u8, I2CError> {
while !self.is_raised(I2CFlags::RxNotEmpty) {}
Ok( self.read_data() )
}
}
impl I2c {
/// Enable the sending of ACK signal after byte transfer
pub fn ack(&mut self) -> &mut Self {
self.set(0, 10)
}
/// Disable the sending of ACK signal (effectively sending a NACK) after byte transfer
pub fn nack(&mut self) -> &mut Self {
self.clear(0, 10)
}
/// Stop generation
/// 0: No stop generation
/// 1: Slave Mode - Release the SCL and SDA lines after current byte transfer
/// Master Mode - Stop generation after the current byte transfer or current Start condition is sent
pub fn stop(&mut self) -> &mut Self {
self.set(0, 9)
}
/// Start generation
/// 0: No start generation
/// 1: Slave Mode - Start generation when bus id free
/// Master Mode - Repeated start generation
pub fn start(&mut self) -> &mut Self {
self.set(0, 8)
}
/// Enable/Disable peripheral
pub fn state(&mut self, s: bool) -> &mut Self {
match s {
true => self.set(0, 0),
_ => self.clear(0, 0),
}
}
/// If enabled the next byte will received in shift register
pub fn receive_in_shift(&mut self) -> &mut Self {
self.set(0, 11)
}
/// Starts Packet Error Checking (PEC) for the next transfer
pub fn start_pec(&mut self) -> &mut Self {
self.set(0, 12)
}
/// Resets the peripheral
pub fn reset(&mut self) -> &mut Self {
// TODO : check lines are free
self.stop()
.set(0, 15)
}
/// Sets the frequency of the transfer
pub fn set_frequency(&mut self, f: Frequency) -> Result<&mut Self, I2CError> {
match f.mhz() {
2...50 => Ok( self.write_bits(1, 0, f.mhz() as u32, 6) ),
_ => Err(I2CError::InvalidBusSpeed),
}
}
/// Indicate this is the last trasnfer
pub fn last_transfer(&mut self) -> &mut Self {
self.set(1, 12)
}
/// Enable/Disable interrupt
pub fn int_state(&mut self, s: bool, int: I2CInterrupt) -> &mut Self {
let offsets = int.offsets();
match s {
true => self.set(offsets.0, offsets.1),
_ => self.clear(offsets.0, offsets.1),
}
}
/// Sets the addressing mode between 7-bit and 10-bit
pub fn address_mode(&mut self, a: I2CBitMode) -> &mut Self {
match a {
I2CBitMode::Bit7 => self.clear(2, 15),
_ => self.set(2, 15),
}
}
/// Writes the interface address 1
/// To be set **after** the interface bit size is set (7-bit or 10-bit)
pub fn set_address_1(&mut self, addr: u32) -> &mut Self {
match self.is_set(2, 15) {
true => self.write_bits(2, 0, addr, 10),
_ => self.write_bits(2, 1, addr, 7),
}
}
/// Writes the interface address 2
/// Returns an error if not in 7 bit mode
pub fn set_address_2(&mut self, addr: u32) -> Result<&mut Self, I2CError> {
match self.is_set(2, 15) {
true => Err( I2CError::Address2NotAllowed),
_ => Ok( self.write_bits(3, 1, addr, 7) ),
}
}
/// Enable/Disable dual addressing mode
pub fn dual_address_state(&mut self, s: bool) -> &mut Self {
match s {
true => self.set(3, 0),
_ => self.clear(3, 0),
}
}
/// Read received byte
pub fn read_data(&self) -> u8 {
self.block[4].read() as u8
}
/// Write data to be transmitted
pub fn write_data(&mut self, data: u32) -> &mut Self {
self.write_bits(4, 0, data, 8)
}
/// Returns true if the flag is raised
pub fn is_raised(&self, f: I2CFlags) -> bool {
let offsets = f.offsets();
self.is_set( offsets.0, offsets.1 )
}
/// Returns true if the device is master
pub fn is_master(&self) -> bool {
self.is_set(6, 0)
}
/// Returns true if the bus is busy
pub fn is_bus_busy(&self) -> bool {
self.is_set(6, 1)
}
/// Returns true if the TRA bit is set
pub fn is_tra_set(&self) -> bool {
self.is_set(6, 2)
}
/// Returns which Dual Address has matched
pub fn which_addr(&self) -> DualAddress {
match self.is_set(6, 7) {
true => DualAddress::Addr2,
_ => DualAddress::Addr1,
}
}
/// Returns the PEC register
pub fn pec(&self) -> u32 {
(self.block[6].read() >> 8) & 0b1111_1111
}
/// Clear the given flag
/// If the flag is cleared by hardware, it does nothing
pub fn clear_flag(&mut self, f: I2CFlags) -> &mut Self {
match f.offsets() {
(5, o) => match o {
8...15 => self.clear(5, o),
_ => self
},
_ => self
}
}
/// Set CCR
/// Refer to the STM32F4 user manual
pub fn set_ccr(&mut self, data: u32) -> &mut Self {
self.write_bits(7, 0, data, 12)
}
/// Set Master Mode
/// Refer to the STM32F4 user manual
pub fn set_master_mode(&mut self, mode: MasterMode) -> &mut Self {
match mode {
MasterMode::SM => self.clear(7, 15),
MasterMode::FM => self.set(7, 15),
}
}
/// Set duty cycle
/// Refer to the STM32F4 user manual
pub fn set_duty_cycle(&mut self, d: DutyCycle) -> &mut Self {
match d {
DutyCycle::D2 => self.clear(7, 14),
DutyCycle::D169 => self.set(7, 14),
}
}
/// Set maximum rise time
pub fn max_rise_time(&mut self, data: u32) -> &mut Self {
self.write_bits(8, 0, data, 6)
}
/// Enable/Disable Analog Filter
pub fn analog_filter_state(&mut self, s: bool) -> &mut Self {
match s {
true => self.clear(9, 4),
_ => self.set(9, 4),
}
}
/// Sets the Digital Noise Filter
pub fn digital_noise_filter(&mut self, d: Option<u32>) -> &mut Self {
match d {
None => self.write_bits(9, 0, 0, 4),
Some(a) => self.write_bits(9, 0, a, 4),
}
}
}
impl I2c {
/// Set master mode (Standard or Fast)
pub fn master_mode(&mut self, mode: MasterMode) -> &mut Self {
match mode {
MasterMode::SM => self.clear(7, 15),
_ => self.set(7, 15),
}
}
/// Write Interface Address
pub fn set_address(&mut self, address: u32) -> &mut Self {
let cons = if self.is_set(2, 15) { (0, 10) } else { (1, 7) };
self.write_bits(2, cons.0, address, cons.1)
}
/// Set secondary address
pub fn set_secondary_address(&mut self, address: u32) -> &mut Self {
self.write_bits(3, 1, address, 7)
}
} | while !self.is_set(5, 1) {} | random_line_split |
bot.go | package main
import (
"fmt"
"io"
"log"
"math"
"math/rand"
"os"
"path"
"sort"
"strconv"
"time"
"github.com/go-errors/errors"
"github.com/andyleap/gioframework"
"github.com/xarg/gopathfinding"
"os/signal"
)
const (
TileEmpty = -1
TileMountain = -2
TileFog = -3
TileFogObstacle = -4
)
// If we allow too few future moves, then slow network means we could miss turns
// If we allow too many future moves, then bot is less adaptive to changing
// conditions
const MaxPlannedMoves = 8
const NumGamesToPlay = 100
func main() {
client, _ := gioframework.Connect("bot", os.Getenv("GENERALS_BOT_ID"), os.Getenv("GENERALS_BOT_NAME"))
go client.Run()
abort := false
ch := make(chan os.Signal)
signal.Notify(ch, os.Interrupt)
go func() {
<-ch
log.Println("abort set to true")
abort = true
<-ch
log.Println("ok leaving this function")
os.Exit(2)
}()
// Hack to help with race condition for setting name
time.Sleep(time.Second)
for i := 0; i < NumGamesToPlay; i++ {
if abort {
break
}
setupLogging()
log.Printf("---------- Game #%v/%v -----------", i+1, NumGamesToPlay)
realGame := os.Getenv("REAL_GAME") == "true"
var game *gioframework.Game
if realGame {
game = client.Join1v1()
log.Println("Waiting for opponent...")
} else {
gameId := "bot_game"
game = client.JoinCustomGame(gameId)
teamVar := os.Getenv("TEAM")
if teamVar != "" {
team, _ := strconv.Atoi(teamVar)
game.SetTeam(team, gameId)
}
url := "http://bot.generals.io/games/" + gameId
log.Printf("Joined custom game, go to: %v", url)
game.SetForceStart(true)
}
started := false
game.Start = func(playerIndex int, users []string) {
log.Println("Game started with ", users)
log.Printf("Replay available at: http://bot.generals.io/replays/%v", game.ReplayID)
for i, user := range users {
if i == playerIndex {
continue
}
game.SendChat(fmt.Sprintf("%v, prepare to be destroyed!", user))
}
started = true
}
done := false
game.Won = func() {
log.Println("=========================== Won game! ============================")
done = true
}
game.Lost = func() {
log.Println("============================ Lost game... ========================")
done = true
}
for !started {
time.Sleep(1 * time.Second)
}
time.Sleep(1 * time.Second)
for !done {
time.Sleep(100 * time.Millisecond)
if game.QueueLength() > 0 {
continue
}
if game.TurnCount < 20 {
continue
}
logTurnData(game)
from, toTarget := GetBestMove(game)
if from < 0 {
continue
}
path, err := GetShortestPath(game, from, toTarget)
if err != nil {
log.Println(err)
continue
}
if len(path) == 0 {
log.Printf("Registering impossible tile: %v", game.GetCoordString(toTarget))
game.ImpossibleTiles[toTarget] = true
}
max_num_moves := min(len(path)-1, MaxPlannedMoves)
for i := 0; i < max_num_moves; i++ {
log.Printf("Move army: %v -> %v (Armies: %v -> %v)",
game.GetCoordString(path[i]), game.GetCoordString(path[i+1]),
game.GameMap[path[i]].Armies, game.GameMap[path[i+1]].Armies)
game.Attack(path[i], path[i+1], false)
}
}
log.Printf("Replay available at: http://bot.generals.io/replays/%v", game.ReplayID)
time.Sleep(10*time.Second)
}
}
func setupLogging() {
logDir := "log"
_ = os.Mkdir(logDir, os.ModePerm)
rand.Seed(time.Now().UTC().UnixNano())
logFilename := path.Join(logDir, "log_"+strconv.Itoa(rand.Intn(10000)))
logFile, err := os.OpenFile(logFilename, os.O_CREATE|os.O_APPEND|os.O_RDWR, 0666)
check(err)
mw := io.MultiWriter(os.Stdout, logFile)
log.SetOutput(mw)
}
func logTurnData(g *gioframework.Game) {
log.Println("------------------------------------------")
log.Printf("Turn: %v (UI%v)", g.TurnCount, float64(g.TurnCount)/2.)
var msgs []string
for i, s := range g.Scores {
msg := fmt.Sprintf("%10v: Tiles: %v, Army: %v", g.Usernames[i], s.Tiles, s.Armies)
msgs = append(msgs, msg)
}
sort.Strings(msgs)
for _, msg := range msgs {
log.Println(msg)
}
if g.TurnCount < 10 {
log.Printf("My General at: %v", g.GetCoordString(g.Generals[g.PlayerIndex]))
}
log.Println("------------------------------------------")
}
func check(err error) {
if err != nil {
panic(err)
}
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
func Btoi(b bool) int {
if b {
return 1
}
return 0
}
func Btof(b bool) float64 {
if b {
return 1.
}
return 0.
}
func getHeuristicPathDistance(game *gioframework.Game, from, to int) float64 {
/* Would have preferred to use A* to get actual path distance, but that's
prohibitvely expensive. (I need to calculate this many times per turn)
*/
baseDistance := game.GetDistance(from, to)
tilesInSquare := getTilesInSquare(game, from, to)
numObstacles := 0.
for _, tile := range tilesInSquare {
numObstacles += Btof(!game.Walkable(tile))
}
total_area := len(tilesInSquare)
obstacleRatio := numObstacles / float64(total_area)
// Not sure this is the best heuristic, but it's simple, so I'll use it for
// now
hDist := float64(baseDistance) * (1. + 2.0*obstacleRatio)
//log.Printf("hDist from %v to %v is: %v", game.GetCoordString(from), game.GetCoordString(to), hDist)
//log.Println("tilesInSquare: ")
//for _, i := range tilesInSquare {
// log.Println(game.GetCoordString(i))
//}
//log.Println("baseDistance:", baseDistance)
//log.Println("obstacleRatio:", obstacleRatio)
return hDist
}
func getTilesInSquare(game *gioframework.Game, i, j int) []int {
// Gets index of all tiles in a square defined by two diagonally opposed
// corners
rowI := game.GetRow(i)
colI := game.GetCol(i)
rowJ := game.GetRow(j)
colJ := game.GetCol(j)
rowLimits := []int{rowI, rowJ}
colLimits := []int{colI, colJ}
sort.Ints(rowLimits)
sort.Ints(colLimits)
var tiles []int
for row := rowLimits[0]; row < rowLimits[1]+1; row++ {
for col := colLimits[0]; col < colLimits[1]+1; col++ {
tiles = append(tiles, game.GetIndex(row, col))
}
}
return tiles
}
type AstarError struct {
From, To string
}
func (e AstarError) Error() string {
return fmt.Sprintf("Astar error with from:%v to:%v", e.From, e.To)
}
func GetShortestPath(game *gioframework.Game, from, to int) (path []int, err error) {
// pathfinding.Astar has no error handling, so we catch its panics
defer func() {
if r := recover(); r != nil {
log.Printf("ERROR, GetShortestPath recovered from panic: %v\n", r)
path = []int{}
err = AstarError{
game.GetCoordString(from),
game.GetCoordString(to),
}
}
}()
// TODO: if from and to are the same, just erturn an err.
map_data := *pathfinding.NewMapData(game.Height, game.Width)
for row := 0; row < game.Height; row++ {
for col := 0; col < game.Width; col++ {
i := game.GetIndex(row, col)
tile := game.GameMap[i]
// We don't want to accidentally attack cities on route to
// somewhere else. Note: if it is the final destination, it'll be
// changed
not_my_city := tile.Type == gioframework.City && tile.Faction != game.PlayerIndex
map_data[row][col] = Btoi(!game.Walkable(i) || not_my_city)
}
}
map_data[game.GetRow(from)][game.GetCol(from)] = pathfinding.START
map_data[game.GetRow(to)][game.GetCol(to)] = pathfinding.STOP
graph := pathfinding.NewGraph(&map_data)
nodesPath := pathfinding.Astar(graph)
path = []int{}
for _, node := range nodesPath {
path = append(path, game.GetIndex(node.X, node.Y))
}
return path, nil
}
func GetBestMove(game *gioframework.Game) (bestFrom int, bestTo int) {
defer func() {
if r := recover(); r != nil {
log.Printf("ERROR, GetBestMove recovered from panic: %v", r)
fmt.Println(errors.Wrap(r, 2).ErrorStack())
bestFrom = -1
bestTo = -1
}
}()
bestFrom = -1
bestTo = -1
bestTotalScore := -10.
var bestScores map[string]float64
myGeneral := game.Generals[game.PlayerIndex]
enemyCOM := getEnemyCenterOfMass(game)
/// First check for attacking new empty or enemy tiles
for from, fromTile := range game.GameMap {
if fromTile.Faction != game.PlayerIndex || fromTile.Armies < 2 {
continue
}
for to, toTile := range game.GameMap {
if toTile.Faction < TileEmpty || toTile.Faction == game.PlayerIndex {
continue
}
if game.ImpossibleTiles[to] {
continue
}
isEmpty := toTile.Faction == TileEmpty
isEnemy := IsEnemy(game, toTile)
isGeneral := toTile.Type == gioframework.General
isCity := toTile.Type == gioframework.City
outnumber := float64(fromTile.Armies - toTile.Armies)
dist := getHeuristicPathDistance(game, from, to)
distFromGen := getHeuristicPathDistance(game, myGeneral, to)
center := game.GetIndex(game.Width/2, game.Height/2)
distCenter := getHeuristicPathDistance(game, center, to)
centerness := 1. - distCenter/float64(game.Width)
// This is the vector pointing towards the enemy
enemyVector := [2]int{
game.GetRow(enemyCOM) - game.GetRow(myGeneral),
game.GetCol(enemyCOM) - game.GetCol(myGeneral),
}
// The vector showing the proposed move
moveVector := [2]int{
game.GetRow(to) - game.GetRow(from),
game.GetCol(to) - game.GetCol(from),
}
neighbors := game.GetNeighborhood(to, false)
numAlliedNeighbors := 0
for _, neighbor := range neighbors {
if !IsEnemy(game, game.GameMap[neighbor]) {
numAlliedNeighbors += 1
}
}
if isCity && outnumber < 2 && !isEnemy {
// Never attack a neutral city and lose
continue
}
scores := make(map[string]float64)
scores["outnumber score"] = Truncate(outnumber/200, 0., 0.25) * Btof(isEnemy)
scores["outnumbered penalty"] = -0.2 * Btof(outnumber < 2)
scores["general threat score"] = (0.25 * math.Pow(distFromGen, -1.0)) *
Truncate(float64(toTile.Armies)/10, 0., 1.0) * Btof(isEnemy)
scores["dist penalty"] = Truncate(-0.5*dist/30, -0.3, 0)
scores["dist gt army penalty"] = -0.2 * Btof(fromTile.Armies < int(dist))
scores["is enemy score"] = 0.05 * Btof(isEnemy)
scores["close city score"] = 0.35 * Btof(isCity && outnumber >= 2) *
math.Pow(distFromGen, -0.5)
scores["enemy city score"] = 0.2 * Btof(isCity && isEnemy)
scores["enemy gen score"] = 0.15 * Btof(isGeneral) * Btof(isEnemy)
scores["empty score"] = 0.08 * Btof(isEmpty)
// Generally a good strategy to take the center of the board
scores["centerness score"] = 0.02 * centerness
// You should move towards enemy's main base, not random little
// patches of enemy. This prevents the bot from "cleaning up"
// irrelevant squares. This could be improved by making the vectors
// normalized and having the score gradually increase as you point
// towards the enemy
scores["towards enemy score"] = 0.03 * Btof(dotProduct(enemyVector, moveVector) > 1)
// Instead of attacking all the tiles on the enemy's border it is
// typically better to make a deep drive into enemy land
scores["deep drive score"] = 0.04 * Btof(numAlliedNeighbors < 2)
totalScore := 0.
for _, score := range scores {
totalScore += score
}
//log.Printf("============Considering move %v->%v, got score: %v\n", from, to, totalScore)
//logSortedScores(scores)
if totalScore > bestTotalScore {
bestScores = scores
bestTotalScore = totalScore
bestFrom = from
bestTo = to
}
}
}
logSortedScores(bestScores)
log.Printf("Attack score: %.2f", bestTotalScore)
log.Printf("From:%v To:%v", game.GetCoordString(bestFrom), game.GetCoordString(bestTo))
log.Println("--------")
/////////////// Then check for consolidation //////////////////////////////
//consolScore := getConsolidationScore(game)
// It's a good idea to consolidate armies right after the armies regenerate.
// armyCycle shows the amount of the cycle that's passed. [0, 1]
armyCycle := float64(game.TurnCount % 50) / 50
consolScore := 0.6 * math.Pow(1-armyCycle, 6) - 0.2
log.Printf("Consolidation score:%.2f", consolScore)
tiles := getTilesSortedOnArmies(game)
if len(tiles) > 10 && consolScore > bestTotalScore {
largestTile := tiles[0]
for _, tile := range tiles[:5] {
log.Printf("Army ranked: %v", game.GameMap[tile].Armies)
}
highestAvArmy := 0.
for _, from := range tiles[1:] {
if game.GameMap[from].Armies < 2 {
continue
}
// Warning: this path could cut through enemy territory! Keep an
// eye for this
path_, _ := GetShortestPath(game, from, largestTile) // TODO: handle err //TODO: this throws an error it seems! largestTile == from ??!!
armies := 0
for _, i := range path_[:len(path_)-1] {
armies += game.GameMap[i].Armies
}
av_army := float64(armies) / float64(len(path_)-1)
if av_army > highestAvArmy {
highestAvArmy = av_army
bestFrom = from
}
}
bestTo = largestTile
// We want to move towards the enemy. Reverse if that's not the
// case
if enemyCOM >= 0 {
fromDistEnemy := getHeuristicPathDistance(game, enemyCOM, bestFrom)
toDistEnemy := getHeuristicPathDistance(game, enemyCOM, bestTo)
if fromDistEnemy < toDistEnemy {
log.Println("Switching direction of consolidation")
bestFrom, bestTo = bestTo, bestFrom
}
}
log.Printf("Consolidating, with average army: %v", highestAvArmy)
}
// Trash talk
toTile := game.GameMap[bestTo]
if toTile.Type == gioframework.City && IsEnemy(game, toTile) {
game.SendChat("Sorry, I'm gonna need that citadel")
}
return bestFrom, bestTo
}
func IsEnemy(game *gioframework.Game, tile gioframework.Cell) bool {
if len(game.Teams) == 0 {
// This means we're playing 1v1 or FFA
return tile.Faction != game.PlayerIndex && tile.Faction >= 0
} else {
myTeam := game.Teams[game.PlayerIndex]
return tile.Faction >= 0 && game.Teams[tile.Faction] != myTeam
}
}
func logSortedScores(scores map[string]float64) {
keys := make([]string, len(scores))
i := 0
for k := range scores {
keys[i] = k
i++
}
sort.Strings(keys)
for _, k := range keys {
log.Printf("%20v: %.3f\n", k, scores[k])
}
}
func Truncate(val, min, max float64) float64 |
func Sum(x []int) int { // TODO make this an interface for fun
sum := 0
for _, i := range x {
sum += i
}
return sum
}
func dotProduct(x [2]int, y [2]int) float64 {
return float64(x[0]) * float64(y[0]) + float64(x[1]) * float64(y[1])
}
// getEnemyCenterOfMass find the central point of the visible enemy terrain,
// weighted by armies, and rounded to the closes tile.
func getEnemyCenterOfMass(game *gioframework.Game) int {
rows := []int{}
cols := []int{}
armies := 0
for i, tile := range game.GameMap {
if IsEnemy(game, tile) {
army := tile.Armies
rows = append(rows, army*game.GetRow(i))
cols = append(cols, army*game.GetCol(i))
armies += army
}
}
var COM int
if armies == 0 {
COM = -1
log.Println("COM is: -1")
return COM
}
avRow := float64(Sum(rows))/float64(armies)
avCol := float64(Sum(cols))/float64(armies)
COM = game.GetIndex(int(avRow), int(avCol))
log.Printf("COM is: %v\n", game.GetCoordString(COM))
return COM
}
func getConsolidationScore(game *gioframework.Game) float64 {
gini := getArmyGiniCoefficient(game)
totalArmy := float64(game.Scores[game.PlayerIndex].Armies)
log.Printf("Gini coefficient: %.2f", gini)
log.Printf("Total army: %v", totalArmy)
return (0.65 - gini) * Truncate(totalArmy/500., 0.5, 2.)
}
func getArmyGiniCoefficient(game *gioframework.Game) float64 {
movableArmies := []int{}
for _, tile := range game.GameMap {
if tile.Faction == game.PlayerIndex {
movableArmies = append(movableArmies, tile.Armies-1)
}
}
//log.Printf("movableArmies: %v", movableArmies)
return giniCoefficient(movableArmies)
}
// giniCoefficient is calculated as described here:
// https://en.wikipedia.org/wiki/Gini_coefficient#Alternate_expressions
func giniCoefficient(nums []int) float64 {
sort.Ints(nums)
n := len(nums)
denom := 0
for _, num := range nums {
denom += num
}
denom *= n
numer := 0
for i, num := range nums {
numer += (i + 1) * num
}
numer *= 2
return float64(numer)/float64(denom) - float64(n+1)/float64(n)
}
func getTilesSortedOnArmies(game *gioframework.Game) []int {
tileToArmy := make(map[int]int)
for i := 0; i < game.Height*game.Width; i++ {
tile := game.GameMap[i]
if tile.Faction == game.PlayerIndex {
tileToArmy[i] = tile.Armies
}
}
largestArmyTiles := sortKeysByValues(tileToArmy, true)
return largestArmyTiles
}
func sortKeysByValues(m map[int]int, reversed bool) []int {
n := map[int][]int{}
var a []int
for k, v := range m {
n[v] = append(n[v], k)
}
for v := range n {
a = append(a, v)
}
keys := []int{}
if reversed {
sort.Sort(sort.Reverse(sort.IntSlice(a)))
} else {
sort.Sort(sort.IntSlice(a))
}
for _, v := range a {
for _, k := range n[v] {
keys = append(keys, k)
}
}
return keys
}
| {
return math.Min(math.Max(val, min), max)
} | identifier_body |
bot.go | package main
import (
"fmt"
"io"
"log"
"math"
"math/rand"
"os"
"path"
"sort"
"strconv"
"time"
"github.com/go-errors/errors"
"github.com/andyleap/gioframework"
"github.com/xarg/gopathfinding"
"os/signal"
)
const (
TileEmpty = -1
TileMountain = -2
TileFog = -3
TileFogObstacle = -4
)
// If we allow too few future moves, then slow network means we could miss turns
// If we allow too many future moves, then bot is less adaptive to changing
// conditions
const MaxPlannedMoves = 8
const NumGamesToPlay = 100
func main() {
client, _ := gioframework.Connect("bot", os.Getenv("GENERALS_BOT_ID"), os.Getenv("GENERALS_BOT_NAME"))
go client.Run()
abort := false
ch := make(chan os.Signal)
signal.Notify(ch, os.Interrupt)
go func() {
<-ch
log.Println("abort set to true")
abort = true
<-ch
log.Println("ok leaving this function")
os.Exit(2)
}()
// Hack to help with race condition for setting name
time.Sleep(time.Second)
for i := 0; i < NumGamesToPlay; i++ {
if abort {
break
}
setupLogging()
log.Printf("---------- Game #%v/%v -----------", i+1, NumGamesToPlay)
realGame := os.Getenv("REAL_GAME") == "true"
var game *gioframework.Game
if realGame {
game = client.Join1v1()
log.Println("Waiting for opponent...")
} else {
gameId := "bot_game"
game = client.JoinCustomGame(gameId)
teamVar := os.Getenv("TEAM")
if teamVar != "" {
team, _ := strconv.Atoi(teamVar)
game.SetTeam(team, gameId)
}
url := "http://bot.generals.io/games/" + gameId
log.Printf("Joined custom game, go to: %v", url)
game.SetForceStart(true)
}
started := false
game.Start = func(playerIndex int, users []string) {
log.Println("Game started with ", users)
log.Printf("Replay available at: http://bot.generals.io/replays/%v", game.ReplayID)
for i, user := range users {
if i == playerIndex {
continue
}
game.SendChat(fmt.Sprintf("%v, prepare to be destroyed!", user))
}
started = true
}
done := false
game.Won = func() {
log.Println("=========================== Won game! ============================")
done = true
}
game.Lost = func() {
log.Println("============================ Lost game... ========================")
done = true
}
for !started {
time.Sleep(1 * time.Second)
}
time.Sleep(1 * time.Second)
for !done {
time.Sleep(100 * time.Millisecond)
if game.QueueLength() > 0 {
continue
}
if game.TurnCount < 20 {
continue
}
logTurnData(game)
from, toTarget := GetBestMove(game)
if from < 0 {
continue
}
path, err := GetShortestPath(game, from, toTarget)
if err != nil {
log.Println(err)
continue
}
if len(path) == 0 {
log.Printf("Registering impossible tile: %v", game.GetCoordString(toTarget))
game.ImpossibleTiles[toTarget] = true
}
max_num_moves := min(len(path)-1, MaxPlannedMoves)
for i := 0; i < max_num_moves; i++ {
log.Printf("Move army: %v -> %v (Armies: %v -> %v)",
game.GetCoordString(path[i]), game.GetCoordString(path[i+1]),
game.GameMap[path[i]].Armies, game.GameMap[path[i+1]].Armies)
game.Attack(path[i], path[i+1], false)
}
}
log.Printf("Replay available at: http://bot.generals.io/replays/%v", game.ReplayID)
time.Sleep(10*time.Second)
}
}
func setupLogging() {
logDir := "log"
_ = os.Mkdir(logDir, os.ModePerm)
rand.Seed(time.Now().UTC().UnixNano())
logFilename := path.Join(logDir, "log_"+strconv.Itoa(rand.Intn(10000)))
logFile, err := os.OpenFile(logFilename, os.O_CREATE|os.O_APPEND|os.O_RDWR, 0666)
check(err)
mw := io.MultiWriter(os.Stdout, logFile)
log.SetOutput(mw)
}
func logTurnData(g *gioframework.Game) {
log.Println("------------------------------------------")
log.Printf("Turn: %v (UI%v)", g.TurnCount, float64(g.TurnCount)/2.)
var msgs []string
for i, s := range g.Scores {
msg := fmt.Sprintf("%10v: Tiles: %v, Army: %v", g.Usernames[i], s.Tiles, s.Armies)
msgs = append(msgs, msg)
}
sort.Strings(msgs)
for _, msg := range msgs {
log.Println(msg)
}
if g.TurnCount < 10 {
log.Printf("My General at: %v", g.GetCoordString(g.Generals[g.PlayerIndex]))
}
log.Println("------------------------------------------")
}
func check(err error) {
if err != nil {
panic(err)
}
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
func | (b bool) int {
if b {
return 1
}
return 0
}
func Btof(b bool) float64 {
if b {
return 1.
}
return 0.
}
func getHeuristicPathDistance(game *gioframework.Game, from, to int) float64 {
/* Would have preferred to use A* to get actual path distance, but that's
prohibitvely expensive. (I need to calculate this many times per turn)
*/
baseDistance := game.GetDistance(from, to)
tilesInSquare := getTilesInSquare(game, from, to)
numObstacles := 0.
for _, tile := range tilesInSquare {
numObstacles += Btof(!game.Walkable(tile))
}
total_area := len(tilesInSquare)
obstacleRatio := numObstacles / float64(total_area)
// Not sure this is the best heuristic, but it's simple, so I'll use it for
// now
hDist := float64(baseDistance) * (1. + 2.0*obstacleRatio)
//log.Printf("hDist from %v to %v is: %v", game.GetCoordString(from), game.GetCoordString(to), hDist)
//log.Println("tilesInSquare: ")
//for _, i := range tilesInSquare {
// log.Println(game.GetCoordString(i))
//}
//log.Println("baseDistance:", baseDistance)
//log.Println("obstacleRatio:", obstacleRatio)
return hDist
}
func getTilesInSquare(game *gioframework.Game, i, j int) []int {
// Gets index of all tiles in a square defined by two diagonally opposed
// corners
rowI := game.GetRow(i)
colI := game.GetCol(i)
rowJ := game.GetRow(j)
colJ := game.GetCol(j)
rowLimits := []int{rowI, rowJ}
colLimits := []int{colI, colJ}
sort.Ints(rowLimits)
sort.Ints(colLimits)
var tiles []int
for row := rowLimits[0]; row < rowLimits[1]+1; row++ {
for col := colLimits[0]; col < colLimits[1]+1; col++ {
tiles = append(tiles, game.GetIndex(row, col))
}
}
return tiles
}
type AstarError struct {
From, To string
}
func (e AstarError) Error() string {
return fmt.Sprintf("Astar error with from:%v to:%v", e.From, e.To)
}
func GetShortestPath(game *gioframework.Game, from, to int) (path []int, err error) {
// pathfinding.Astar has no error handling, so we catch its panics
defer func() {
if r := recover(); r != nil {
log.Printf("ERROR, GetShortestPath recovered from panic: %v\n", r)
path = []int{}
err = AstarError{
game.GetCoordString(from),
game.GetCoordString(to),
}
}
}()
// TODO: if from and to are the same, just erturn an err.
map_data := *pathfinding.NewMapData(game.Height, game.Width)
for row := 0; row < game.Height; row++ {
for col := 0; col < game.Width; col++ {
i := game.GetIndex(row, col)
tile := game.GameMap[i]
// We don't want to accidentally attack cities on route to
// somewhere else. Note: if it is the final destination, it'll be
// changed
not_my_city := tile.Type == gioframework.City && tile.Faction != game.PlayerIndex
map_data[row][col] = Btoi(!game.Walkable(i) || not_my_city)
}
}
map_data[game.GetRow(from)][game.GetCol(from)] = pathfinding.START
map_data[game.GetRow(to)][game.GetCol(to)] = pathfinding.STOP
graph := pathfinding.NewGraph(&map_data)
nodesPath := pathfinding.Astar(graph)
path = []int{}
for _, node := range nodesPath {
path = append(path, game.GetIndex(node.X, node.Y))
}
return path, nil
}
func GetBestMove(game *gioframework.Game) (bestFrom int, bestTo int) {
defer func() {
if r := recover(); r != nil {
log.Printf("ERROR, GetBestMove recovered from panic: %v", r)
fmt.Println(errors.Wrap(r, 2).ErrorStack())
bestFrom = -1
bestTo = -1
}
}()
bestFrom = -1
bestTo = -1
bestTotalScore := -10.
var bestScores map[string]float64
myGeneral := game.Generals[game.PlayerIndex]
enemyCOM := getEnemyCenterOfMass(game)
/// First check for attacking new empty or enemy tiles
for from, fromTile := range game.GameMap {
if fromTile.Faction != game.PlayerIndex || fromTile.Armies < 2 {
continue
}
for to, toTile := range game.GameMap {
if toTile.Faction < TileEmpty || toTile.Faction == game.PlayerIndex {
continue
}
if game.ImpossibleTiles[to] {
continue
}
isEmpty := toTile.Faction == TileEmpty
isEnemy := IsEnemy(game, toTile)
isGeneral := toTile.Type == gioframework.General
isCity := toTile.Type == gioframework.City
outnumber := float64(fromTile.Armies - toTile.Armies)
dist := getHeuristicPathDistance(game, from, to)
distFromGen := getHeuristicPathDistance(game, myGeneral, to)
center := game.GetIndex(game.Width/2, game.Height/2)
distCenter := getHeuristicPathDistance(game, center, to)
centerness := 1. - distCenter/float64(game.Width)
// This is the vector pointing towards the enemy
enemyVector := [2]int{
game.GetRow(enemyCOM) - game.GetRow(myGeneral),
game.GetCol(enemyCOM) - game.GetCol(myGeneral),
}
// The vector showing the proposed move
moveVector := [2]int{
game.GetRow(to) - game.GetRow(from),
game.GetCol(to) - game.GetCol(from),
}
neighbors := game.GetNeighborhood(to, false)
numAlliedNeighbors := 0
for _, neighbor := range neighbors {
if !IsEnemy(game, game.GameMap[neighbor]) {
numAlliedNeighbors += 1
}
}
if isCity && outnumber < 2 && !isEnemy {
// Never attack a neutral city and lose
continue
}
scores := make(map[string]float64)
scores["outnumber score"] = Truncate(outnumber/200, 0., 0.25) * Btof(isEnemy)
scores["outnumbered penalty"] = -0.2 * Btof(outnumber < 2)
scores["general threat score"] = (0.25 * math.Pow(distFromGen, -1.0)) *
Truncate(float64(toTile.Armies)/10, 0., 1.0) * Btof(isEnemy)
scores["dist penalty"] = Truncate(-0.5*dist/30, -0.3, 0)
scores["dist gt army penalty"] = -0.2 * Btof(fromTile.Armies < int(dist))
scores["is enemy score"] = 0.05 * Btof(isEnemy)
scores["close city score"] = 0.35 * Btof(isCity && outnumber >= 2) *
math.Pow(distFromGen, -0.5)
scores["enemy city score"] = 0.2 * Btof(isCity && isEnemy)
scores["enemy gen score"] = 0.15 * Btof(isGeneral) * Btof(isEnemy)
scores["empty score"] = 0.08 * Btof(isEmpty)
// Generally a good strategy to take the center of the board
scores["centerness score"] = 0.02 * centerness
// You should move towards enemy's main base, not random little
// patches of enemy. This prevents the bot from "cleaning up"
// irrelevant squares. This could be improved by making the vectors
// normalized and having the score gradually increase as you point
// towards the enemy
scores["towards enemy score"] = 0.03 * Btof(dotProduct(enemyVector, moveVector) > 1)
// Instead of attacking all the tiles on the enemy's border it is
// typically better to make a deep drive into enemy land
scores["deep drive score"] = 0.04 * Btof(numAlliedNeighbors < 2)
totalScore := 0.
for _, score := range scores {
totalScore += score
}
//log.Printf("============Considering move %v->%v, got score: %v\n", from, to, totalScore)
//logSortedScores(scores)
if totalScore > bestTotalScore {
bestScores = scores
bestTotalScore = totalScore
bestFrom = from
bestTo = to
}
}
}
logSortedScores(bestScores)
log.Printf("Attack score: %.2f", bestTotalScore)
log.Printf("From:%v To:%v", game.GetCoordString(bestFrom), game.GetCoordString(bestTo))
log.Println("--------")
/////////////// Then check for consolidation //////////////////////////////
//consolScore := getConsolidationScore(game)
// It's a good idea to consolidate armies right after the armies regenerate.
// armyCycle shows the amount of the cycle that's passed. [0, 1]
armyCycle := float64(game.TurnCount % 50) / 50
consolScore := 0.6 * math.Pow(1-armyCycle, 6) - 0.2
log.Printf("Consolidation score:%.2f", consolScore)
tiles := getTilesSortedOnArmies(game)
if len(tiles) > 10 && consolScore > bestTotalScore {
largestTile := tiles[0]
for _, tile := range tiles[:5] {
log.Printf("Army ranked: %v", game.GameMap[tile].Armies)
}
highestAvArmy := 0.
for _, from := range tiles[1:] {
if game.GameMap[from].Armies < 2 {
continue
}
// Warning: this path could cut through enemy territory! Keep an
// eye for this
path_, _ := GetShortestPath(game, from, largestTile) // TODO: handle err //TODO: this throws an error it seems! largestTile == from ??!!
armies := 0
for _, i := range path_[:len(path_)-1] {
armies += game.GameMap[i].Armies
}
av_army := float64(armies) / float64(len(path_)-1)
if av_army > highestAvArmy {
highestAvArmy = av_army
bestFrom = from
}
}
bestTo = largestTile
// We want to move towards the enemy. Reverse if that's not the
// case
if enemyCOM >= 0 {
fromDistEnemy := getHeuristicPathDistance(game, enemyCOM, bestFrom)
toDistEnemy := getHeuristicPathDistance(game, enemyCOM, bestTo)
if fromDistEnemy < toDistEnemy {
log.Println("Switching direction of consolidation")
bestFrom, bestTo = bestTo, bestFrom
}
}
log.Printf("Consolidating, with average army: %v", highestAvArmy)
}
// Trash talk
toTile := game.GameMap[bestTo]
if toTile.Type == gioframework.City && IsEnemy(game, toTile) {
game.SendChat("Sorry, I'm gonna need that citadel")
}
return bestFrom, bestTo
}
func IsEnemy(game *gioframework.Game, tile gioframework.Cell) bool {
if len(game.Teams) == 0 {
// This means we're playing 1v1 or FFA
return tile.Faction != game.PlayerIndex && tile.Faction >= 0
} else {
myTeam := game.Teams[game.PlayerIndex]
return tile.Faction >= 0 && game.Teams[tile.Faction] != myTeam
}
}
func logSortedScores(scores map[string]float64) {
keys := make([]string, len(scores))
i := 0
for k := range scores {
keys[i] = k
i++
}
sort.Strings(keys)
for _, k := range keys {
log.Printf("%20v: %.3f\n", k, scores[k])
}
}
func Truncate(val, min, max float64) float64 {
return math.Min(math.Max(val, min), max)
}
func Sum(x []int) int { // TODO make this an interface for fun
sum := 0
for _, i := range x {
sum += i
}
return sum
}
func dotProduct(x [2]int, y [2]int) float64 {
return float64(x[0]) * float64(y[0]) + float64(x[1]) * float64(y[1])
}
// getEnemyCenterOfMass find the central point of the visible enemy terrain,
// weighted by armies, and rounded to the closes tile.
func getEnemyCenterOfMass(game *gioframework.Game) int {
rows := []int{}
cols := []int{}
armies := 0
for i, tile := range game.GameMap {
if IsEnemy(game, tile) {
army := tile.Armies
rows = append(rows, army*game.GetRow(i))
cols = append(cols, army*game.GetCol(i))
armies += army
}
}
var COM int
if armies == 0 {
COM = -1
log.Println("COM is: -1")
return COM
}
avRow := float64(Sum(rows))/float64(armies)
avCol := float64(Sum(cols))/float64(armies)
COM = game.GetIndex(int(avRow), int(avCol))
log.Printf("COM is: %v\n", game.GetCoordString(COM))
return COM
}
func getConsolidationScore(game *gioframework.Game) float64 {
gini := getArmyGiniCoefficient(game)
totalArmy := float64(game.Scores[game.PlayerIndex].Armies)
log.Printf("Gini coefficient: %.2f", gini)
log.Printf("Total army: %v", totalArmy)
return (0.65 - gini) * Truncate(totalArmy/500., 0.5, 2.)
}
func getArmyGiniCoefficient(game *gioframework.Game) float64 {
movableArmies := []int{}
for _, tile := range game.GameMap {
if tile.Faction == game.PlayerIndex {
movableArmies = append(movableArmies, tile.Armies-1)
}
}
//log.Printf("movableArmies: %v", movableArmies)
return giniCoefficient(movableArmies)
}
// giniCoefficient is calculated as described here:
// https://en.wikipedia.org/wiki/Gini_coefficient#Alternate_expressions
func giniCoefficient(nums []int) float64 {
sort.Ints(nums)
n := len(nums)
denom := 0
for _, num := range nums {
denom += num
}
denom *= n
numer := 0
for i, num := range nums {
numer += (i + 1) * num
}
numer *= 2
return float64(numer)/float64(denom) - float64(n+1)/float64(n)
}
func getTilesSortedOnArmies(game *gioframework.Game) []int {
tileToArmy := make(map[int]int)
for i := 0; i < game.Height*game.Width; i++ {
tile := game.GameMap[i]
if tile.Faction == game.PlayerIndex {
tileToArmy[i] = tile.Armies
}
}
largestArmyTiles := sortKeysByValues(tileToArmy, true)
return largestArmyTiles
}
func sortKeysByValues(m map[int]int, reversed bool) []int {
n := map[int][]int{}
var a []int
for k, v := range m {
n[v] = append(n[v], k)
}
for v := range n {
a = append(a, v)
}
keys := []int{}
if reversed {
sort.Sort(sort.Reverse(sort.IntSlice(a)))
} else {
sort.Sort(sort.IntSlice(a))
}
for _, v := range a {
for _, k := range n[v] {
keys = append(keys, k)
}
}
return keys
}
| Btoi | identifier_name |
bot.go | package main
import (
"fmt"
"io"
"log"
"math"
"math/rand"
"os"
"path"
"sort"
"strconv"
"time"
"github.com/go-errors/errors"
"github.com/andyleap/gioframework"
"github.com/xarg/gopathfinding"
"os/signal"
)
const (
TileEmpty = -1
TileMountain = -2
TileFog = -3
TileFogObstacle = -4
)
// If we allow too few future moves, then slow network means we could miss turns
// If we allow too many future moves, then bot is less adaptive to changing
// conditions
const MaxPlannedMoves = 8
const NumGamesToPlay = 100
func main() {
client, _ := gioframework.Connect("bot", os.Getenv("GENERALS_BOT_ID"), os.Getenv("GENERALS_BOT_NAME"))
go client.Run()
abort := false
ch := make(chan os.Signal)
signal.Notify(ch, os.Interrupt)
go func() {
<-ch
log.Println("abort set to true")
abort = true
<-ch
log.Println("ok leaving this function")
os.Exit(2)
}()
// Hack to help with race condition for setting name
time.Sleep(time.Second)
for i := 0; i < NumGamesToPlay; i++ {
if abort {
break
}
setupLogging()
log.Printf("---------- Game #%v/%v -----------", i+1, NumGamesToPlay)
realGame := os.Getenv("REAL_GAME") == "true"
var game *gioframework.Game
if realGame {
game = client.Join1v1()
log.Println("Waiting for opponent...")
} else {
gameId := "bot_game"
game = client.JoinCustomGame(gameId)
teamVar := os.Getenv("TEAM")
if teamVar != "" {
team, _ := strconv.Atoi(teamVar)
game.SetTeam(team, gameId)
}
url := "http://bot.generals.io/games/" + gameId
log.Printf("Joined custom game, go to: %v", url)
game.SetForceStart(true)
}
started := false
game.Start = func(playerIndex int, users []string) {
log.Println("Game started with ", users)
log.Printf("Replay available at: http://bot.generals.io/replays/%v", game.ReplayID)
for i, user := range users {
if i == playerIndex {
continue
}
game.SendChat(fmt.Sprintf("%v, prepare to be destroyed!", user))
}
started = true
}
done := false
game.Won = func() {
log.Println("=========================== Won game! ============================")
done = true
}
game.Lost = func() {
log.Println("============================ Lost game... ========================")
done = true
}
for !started {
time.Sleep(1 * time.Second)
}
time.Sleep(1 * time.Second)
for !done {
time.Sleep(100 * time.Millisecond)
if game.QueueLength() > 0 {
continue
}
if game.TurnCount < 20 {
continue
}
logTurnData(game)
from, toTarget := GetBestMove(game)
if from < 0 {
continue
}
path, err := GetShortestPath(game, from, toTarget)
if err != nil {
log.Println(err)
continue
}
if len(path) == 0 {
log.Printf("Registering impossible tile: %v", game.GetCoordString(toTarget))
game.ImpossibleTiles[toTarget] = true
}
max_num_moves := min(len(path)-1, MaxPlannedMoves)
for i := 0; i < max_num_moves; i++ {
log.Printf("Move army: %v -> %v (Armies: %v -> %v)",
game.GetCoordString(path[i]), game.GetCoordString(path[i+1]),
game.GameMap[path[i]].Armies, game.GameMap[path[i+1]].Armies)
game.Attack(path[i], path[i+1], false)
}
}
log.Printf("Replay available at: http://bot.generals.io/replays/%v", game.ReplayID)
time.Sleep(10*time.Second)
}
}
func setupLogging() {
logDir := "log"
_ = os.Mkdir(logDir, os.ModePerm)
rand.Seed(time.Now().UTC().UnixNano())
logFilename := path.Join(logDir, "log_"+strconv.Itoa(rand.Intn(10000)))
logFile, err := os.OpenFile(logFilename, os.O_CREATE|os.O_APPEND|os.O_RDWR, 0666)
check(err)
mw := io.MultiWriter(os.Stdout, logFile)
log.SetOutput(mw)
}
func logTurnData(g *gioframework.Game) {
log.Println("------------------------------------------")
log.Printf("Turn: %v (UI%v)", g.TurnCount, float64(g.TurnCount)/2.)
var msgs []string
for i, s := range g.Scores {
msg := fmt.Sprintf("%10v: Tiles: %v, Army: %v", g.Usernames[i], s.Tiles, s.Armies)
msgs = append(msgs, msg)
}
sort.Strings(msgs)
for _, msg := range msgs {
log.Println(msg)
}
if g.TurnCount < 10 {
log.Printf("My General at: %v", g.GetCoordString(g.Generals[g.PlayerIndex]))
}
log.Println("------------------------------------------")
}
func check(err error) {
if err != nil {
panic(err)
}
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
func Btoi(b bool) int {
if b {
return 1
}
return 0
}
func Btof(b bool) float64 {
if b {
return 1.
}
return 0.
}
func getHeuristicPathDistance(game *gioframework.Game, from, to int) float64 {
/* Would have preferred to use A* to get actual path distance, but that's
prohibitvely expensive. (I need to calculate this many times per turn)
*/
baseDistance := game.GetDistance(from, to)
tilesInSquare := getTilesInSquare(game, from, to)
numObstacles := 0.
for _, tile := range tilesInSquare {
numObstacles += Btof(!game.Walkable(tile))
}
total_area := len(tilesInSquare)
obstacleRatio := numObstacles / float64(total_area)
// Not sure this is the best heuristic, but it's simple, so I'll use it for
// now
hDist := float64(baseDistance) * (1. + 2.0*obstacleRatio)
//log.Printf("hDist from %v to %v is: %v", game.GetCoordString(from), game.GetCoordString(to), hDist)
//log.Println("tilesInSquare: ")
//for _, i := range tilesInSquare {
// log.Println(game.GetCoordString(i))
//}
//log.Println("baseDistance:", baseDistance)
//log.Println("obstacleRatio:", obstacleRatio)
return hDist
}
func getTilesInSquare(game *gioframework.Game, i, j int) []int {
// Gets index of all tiles in a square defined by two diagonally opposed
// corners
rowI := game.GetRow(i)
colI := game.GetCol(i)
rowJ := game.GetRow(j)
colJ := game.GetCol(j)
rowLimits := []int{rowI, rowJ}
colLimits := []int{colI, colJ}
sort.Ints(rowLimits)
sort.Ints(colLimits)
var tiles []int
for row := rowLimits[0]; row < rowLimits[1]+1; row++ {
for col := colLimits[0]; col < colLimits[1]+1; col++ {
tiles = append(tiles, game.GetIndex(row, col))
}
}
return tiles
}
type AstarError struct {
From, To string
}
func (e AstarError) Error() string {
return fmt.Sprintf("Astar error with from:%v to:%v", e.From, e.To)
}
func GetShortestPath(game *gioframework.Game, from, to int) (path []int, err error) {
// pathfinding.Astar has no error handling, so we catch its panics
defer func() {
if r := recover(); r != nil {
log.Printf("ERROR, GetShortestPath recovered from panic: %v\n", r)
path = []int{}
err = AstarError{
game.GetCoordString(from),
game.GetCoordString(to),
}
}
}()
// TODO: if from and to are the same, just erturn an err.
map_data := *pathfinding.NewMapData(game.Height, game.Width)
for row := 0; row < game.Height; row++ {
for col := 0; col < game.Width; col++ {
i := game.GetIndex(row, col)
tile := game.GameMap[i]
// We don't want to accidentally attack cities on route to
// somewhere else. Note: if it is the final destination, it'll be
// changed
not_my_city := tile.Type == gioframework.City && tile.Faction != game.PlayerIndex
map_data[row][col] = Btoi(!game.Walkable(i) || not_my_city)
}
}
map_data[game.GetRow(from)][game.GetCol(from)] = pathfinding.START
map_data[game.GetRow(to)][game.GetCol(to)] = pathfinding.STOP
graph := pathfinding.NewGraph(&map_data)
nodesPath := pathfinding.Astar(graph)
path = []int{}
for _, node := range nodesPath {
path = append(path, game.GetIndex(node.X, node.Y))
}
return path, nil
}
func GetBestMove(game *gioframework.Game) (bestFrom int, bestTo int) {
defer func() {
if r := recover(); r != nil {
log.Printf("ERROR, GetBestMove recovered from panic: %v", r)
fmt.Println(errors.Wrap(r, 2).ErrorStack())
bestFrom = -1
bestTo = -1
}
}()
bestFrom = -1
bestTo = -1
bestTotalScore := -10.
var bestScores map[string]float64
myGeneral := game.Generals[game.PlayerIndex]
enemyCOM := getEnemyCenterOfMass(game)
/// First check for attacking new empty or enemy tiles
for from, fromTile := range game.GameMap {
if fromTile.Faction != game.PlayerIndex || fromTile.Armies < 2 {
continue
}
for to, toTile := range game.GameMap {
if toTile.Faction < TileEmpty || toTile.Faction == game.PlayerIndex {
continue
}
if game.ImpossibleTiles[to] {
continue
}
isEmpty := toTile.Faction == TileEmpty
isEnemy := IsEnemy(game, toTile)
isGeneral := toTile.Type == gioframework.General
isCity := toTile.Type == gioframework.City
outnumber := float64(fromTile.Armies - toTile.Armies)
dist := getHeuristicPathDistance(game, from, to)
distFromGen := getHeuristicPathDistance(game, myGeneral, to)
center := game.GetIndex(game.Width/2, game.Height/2)
distCenter := getHeuristicPathDistance(game, center, to)
centerness := 1. - distCenter/float64(game.Width)
// This is the vector pointing towards the enemy
enemyVector := [2]int{
game.GetRow(enemyCOM) - game.GetRow(myGeneral),
game.GetCol(enemyCOM) - game.GetCol(myGeneral),
}
// The vector showing the proposed move
moveVector := [2]int{
game.GetRow(to) - game.GetRow(from),
game.GetCol(to) - game.GetCol(from),
}
neighbors := game.GetNeighborhood(to, false)
numAlliedNeighbors := 0
for _, neighbor := range neighbors {
if !IsEnemy(game, game.GameMap[neighbor]) {
numAlliedNeighbors += 1
}
}
if isCity && outnumber < 2 && !isEnemy {
// Never attack a neutral city and lose
continue
}
scores := make(map[string]float64)
| scores["outnumbered penalty"] = -0.2 * Btof(outnumber < 2)
scores["general threat score"] = (0.25 * math.Pow(distFromGen, -1.0)) *
Truncate(float64(toTile.Armies)/10, 0., 1.0) * Btof(isEnemy)
scores["dist penalty"] = Truncate(-0.5*dist/30, -0.3, 0)
scores["dist gt army penalty"] = -0.2 * Btof(fromTile.Armies < int(dist))
scores["is enemy score"] = 0.05 * Btof(isEnemy)
scores["close city score"] = 0.35 * Btof(isCity && outnumber >= 2) *
math.Pow(distFromGen, -0.5)
scores["enemy city score"] = 0.2 * Btof(isCity && isEnemy)
scores["enemy gen score"] = 0.15 * Btof(isGeneral) * Btof(isEnemy)
scores["empty score"] = 0.08 * Btof(isEmpty)
// Generally a good strategy to take the center of the board
scores["centerness score"] = 0.02 * centerness
// You should move towards enemy's main base, not random little
// patches of enemy. This prevents the bot from "cleaning up"
// irrelevant squares. This could be improved by making the vectors
// normalized and having the score gradually increase as you point
// towards the enemy
scores["towards enemy score"] = 0.03 * Btof(dotProduct(enemyVector, moveVector) > 1)
// Instead of attacking all the tiles on the enemy's border it is
// typically better to make a deep drive into enemy land
scores["deep drive score"] = 0.04 * Btof(numAlliedNeighbors < 2)
totalScore := 0.
for _, score := range scores {
totalScore += score
}
//log.Printf("============Considering move %v->%v, got score: %v\n", from, to, totalScore)
//logSortedScores(scores)
if totalScore > bestTotalScore {
bestScores = scores
bestTotalScore = totalScore
bestFrom = from
bestTo = to
}
}
}
logSortedScores(bestScores)
log.Printf("Attack score: %.2f", bestTotalScore)
log.Printf("From:%v To:%v", game.GetCoordString(bestFrom), game.GetCoordString(bestTo))
log.Println("--------")
/////////////// Then check for consolidation //////////////////////////////
//consolScore := getConsolidationScore(game)
// It's a good idea to consolidate armies right after the armies regenerate.
// armyCycle shows the amount of the cycle that's passed. [0, 1]
armyCycle := float64(game.TurnCount % 50) / 50
consolScore := 0.6 * math.Pow(1-armyCycle, 6) - 0.2
log.Printf("Consolidation score:%.2f", consolScore)
tiles := getTilesSortedOnArmies(game)
if len(tiles) > 10 && consolScore > bestTotalScore {
largestTile := tiles[0]
for _, tile := range tiles[:5] {
log.Printf("Army ranked: %v", game.GameMap[tile].Armies)
}
highestAvArmy := 0.
for _, from := range tiles[1:] {
if game.GameMap[from].Armies < 2 {
continue
}
// Warning: this path could cut through enemy territory! Keep an
// eye for this
path_, _ := GetShortestPath(game, from, largestTile) // TODO: handle err //TODO: this throws an error it seems! largestTile == from ??!!
armies := 0
for _, i := range path_[:len(path_)-1] {
armies += game.GameMap[i].Armies
}
av_army := float64(armies) / float64(len(path_)-1)
if av_army > highestAvArmy {
highestAvArmy = av_army
bestFrom = from
}
}
bestTo = largestTile
// We want to move towards the enemy. Reverse if that's not the
// case
if enemyCOM >= 0 {
fromDistEnemy := getHeuristicPathDistance(game, enemyCOM, bestFrom)
toDistEnemy := getHeuristicPathDistance(game, enemyCOM, bestTo)
if fromDistEnemy < toDistEnemy {
log.Println("Switching direction of consolidation")
bestFrom, bestTo = bestTo, bestFrom
}
}
log.Printf("Consolidating, with average army: %v", highestAvArmy)
}
// Trash talk
toTile := game.GameMap[bestTo]
if toTile.Type == gioframework.City && IsEnemy(game, toTile) {
game.SendChat("Sorry, I'm gonna need that citadel")
}
return bestFrom, bestTo
}
func IsEnemy(game *gioframework.Game, tile gioframework.Cell) bool {
if len(game.Teams) == 0 {
// This means we're playing 1v1 or FFA
return tile.Faction != game.PlayerIndex && tile.Faction >= 0
} else {
myTeam := game.Teams[game.PlayerIndex]
return tile.Faction >= 0 && game.Teams[tile.Faction] != myTeam
}
}
func logSortedScores(scores map[string]float64) {
keys := make([]string, len(scores))
i := 0
for k := range scores {
keys[i] = k
i++
}
sort.Strings(keys)
for _, k := range keys {
log.Printf("%20v: %.3f\n", k, scores[k])
}
}
func Truncate(val, min, max float64) float64 {
return math.Min(math.Max(val, min), max)
}
func Sum(x []int) int { // TODO make this an interface for fun
sum := 0
for _, i := range x {
sum += i
}
return sum
}
func dotProduct(x [2]int, y [2]int) float64 {
return float64(x[0]) * float64(y[0]) + float64(x[1]) * float64(y[1])
}
// getEnemyCenterOfMass find the central point of the visible enemy terrain,
// weighted by armies, and rounded to the closes tile.
func getEnemyCenterOfMass(game *gioframework.Game) int {
rows := []int{}
cols := []int{}
armies := 0
for i, tile := range game.GameMap {
if IsEnemy(game, tile) {
army := tile.Armies
rows = append(rows, army*game.GetRow(i))
cols = append(cols, army*game.GetCol(i))
armies += army
}
}
var COM int
if armies == 0 {
COM = -1
log.Println("COM is: -1")
return COM
}
avRow := float64(Sum(rows))/float64(armies)
avCol := float64(Sum(cols))/float64(armies)
COM = game.GetIndex(int(avRow), int(avCol))
log.Printf("COM is: %v\n", game.GetCoordString(COM))
return COM
}
func getConsolidationScore(game *gioframework.Game) float64 {
gini := getArmyGiniCoefficient(game)
totalArmy := float64(game.Scores[game.PlayerIndex].Armies)
log.Printf("Gini coefficient: %.2f", gini)
log.Printf("Total army: %v", totalArmy)
return (0.65 - gini) * Truncate(totalArmy/500., 0.5, 2.)
}
func getArmyGiniCoefficient(game *gioframework.Game) float64 {
movableArmies := []int{}
for _, tile := range game.GameMap {
if tile.Faction == game.PlayerIndex {
movableArmies = append(movableArmies, tile.Armies-1)
}
}
//log.Printf("movableArmies: %v", movableArmies)
return giniCoefficient(movableArmies)
}
// giniCoefficient is calculated as described here:
// https://en.wikipedia.org/wiki/Gini_coefficient#Alternate_expressions
func giniCoefficient(nums []int) float64 {
sort.Ints(nums)
n := len(nums)
denom := 0
for _, num := range nums {
denom += num
}
denom *= n
numer := 0
for i, num := range nums {
numer += (i + 1) * num
}
numer *= 2
return float64(numer)/float64(denom) - float64(n+1)/float64(n)
}
func getTilesSortedOnArmies(game *gioframework.Game) []int {
tileToArmy := make(map[int]int)
for i := 0; i < game.Height*game.Width; i++ {
tile := game.GameMap[i]
if tile.Faction == game.PlayerIndex {
tileToArmy[i] = tile.Armies
}
}
largestArmyTiles := sortKeysByValues(tileToArmy, true)
return largestArmyTiles
}
func sortKeysByValues(m map[int]int, reversed bool) []int {
n := map[int][]int{}
var a []int
for k, v := range m {
n[v] = append(n[v], k)
}
for v := range n {
a = append(a, v)
}
keys := []int{}
if reversed {
sort.Sort(sort.Reverse(sort.IntSlice(a)))
} else {
sort.Sort(sort.IntSlice(a))
}
for _, v := range a {
for _, k := range n[v] {
keys = append(keys, k)
}
}
return keys
} | scores["outnumber score"] = Truncate(outnumber/200, 0., 0.25) * Btof(isEnemy) | random_line_split |
bot.go | package main
import (
"fmt"
"io"
"log"
"math"
"math/rand"
"os"
"path"
"sort"
"strconv"
"time"
"github.com/go-errors/errors"
"github.com/andyleap/gioframework"
"github.com/xarg/gopathfinding"
"os/signal"
)
const (
TileEmpty = -1
TileMountain = -2
TileFog = -3
TileFogObstacle = -4
)
// If we allow too few future moves, then slow network means we could miss turns
// If we allow too many future moves, then bot is less adaptive to changing
// conditions
const MaxPlannedMoves = 8
const NumGamesToPlay = 100
func main() {
client, _ := gioframework.Connect("bot", os.Getenv("GENERALS_BOT_ID"), os.Getenv("GENERALS_BOT_NAME"))
go client.Run()
abort := false
ch := make(chan os.Signal)
signal.Notify(ch, os.Interrupt)
go func() {
<-ch
log.Println("abort set to true")
abort = true
<-ch
log.Println("ok leaving this function")
os.Exit(2)
}()
// Hack to help with race condition for setting name
time.Sleep(time.Second)
for i := 0; i < NumGamesToPlay; i++ {
if abort {
break
}
setupLogging()
log.Printf("---------- Game #%v/%v -----------", i+1, NumGamesToPlay)
realGame := os.Getenv("REAL_GAME") == "true"
var game *gioframework.Game
if realGame {
game = client.Join1v1()
log.Println("Waiting for opponent...")
} else {
gameId := "bot_game"
game = client.JoinCustomGame(gameId)
teamVar := os.Getenv("TEAM")
if teamVar != "" {
team, _ := strconv.Atoi(teamVar)
game.SetTeam(team, gameId)
}
url := "http://bot.generals.io/games/" + gameId
log.Printf("Joined custom game, go to: %v", url)
game.SetForceStart(true)
}
started := false
game.Start = func(playerIndex int, users []string) {
log.Println("Game started with ", users)
log.Printf("Replay available at: http://bot.generals.io/replays/%v", game.ReplayID)
for i, user := range users {
if i == playerIndex {
continue
}
game.SendChat(fmt.Sprintf("%v, prepare to be destroyed!", user))
}
started = true
}
done := false
game.Won = func() {
log.Println("=========================== Won game! ============================")
done = true
}
game.Lost = func() {
log.Println("============================ Lost game... ========================")
done = true
}
for !started {
time.Sleep(1 * time.Second)
}
time.Sleep(1 * time.Second)
for !done {
time.Sleep(100 * time.Millisecond)
if game.QueueLength() > 0 {
continue
}
if game.TurnCount < 20 {
continue
}
logTurnData(game)
from, toTarget := GetBestMove(game)
if from < 0 {
continue
}
path, err := GetShortestPath(game, from, toTarget)
if err != nil {
log.Println(err)
continue
}
if len(path) == 0 {
log.Printf("Registering impossible tile: %v", game.GetCoordString(toTarget))
game.ImpossibleTiles[toTarget] = true
}
max_num_moves := min(len(path)-1, MaxPlannedMoves)
for i := 0; i < max_num_moves; i++ {
log.Printf("Move army: %v -> %v (Armies: %v -> %v)",
game.GetCoordString(path[i]), game.GetCoordString(path[i+1]),
game.GameMap[path[i]].Armies, game.GameMap[path[i+1]].Armies)
game.Attack(path[i], path[i+1], false)
}
}
log.Printf("Replay available at: http://bot.generals.io/replays/%v", game.ReplayID)
time.Sleep(10*time.Second)
}
}
func setupLogging() {
logDir := "log"
_ = os.Mkdir(logDir, os.ModePerm)
rand.Seed(time.Now().UTC().UnixNano())
logFilename := path.Join(logDir, "log_"+strconv.Itoa(rand.Intn(10000)))
logFile, err := os.OpenFile(logFilename, os.O_CREATE|os.O_APPEND|os.O_RDWR, 0666)
check(err)
mw := io.MultiWriter(os.Stdout, logFile)
log.SetOutput(mw)
}
func logTurnData(g *gioframework.Game) {
log.Println("------------------------------------------")
log.Printf("Turn: %v (UI%v)", g.TurnCount, float64(g.TurnCount)/2.)
var msgs []string
for i, s := range g.Scores {
msg := fmt.Sprintf("%10v: Tiles: %v, Army: %v", g.Usernames[i], s.Tiles, s.Armies)
msgs = append(msgs, msg)
}
sort.Strings(msgs)
for _, msg := range msgs {
log.Println(msg)
}
if g.TurnCount < 10 {
log.Printf("My General at: %v", g.GetCoordString(g.Generals[g.PlayerIndex]))
}
log.Println("------------------------------------------")
}
func check(err error) {
if err != nil {
panic(err)
}
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
func Btoi(b bool) int {
if b {
return 1
}
return 0
}
func Btof(b bool) float64 {
if b {
return 1.
}
return 0.
}
func getHeuristicPathDistance(game *gioframework.Game, from, to int) float64 {
/* Would have preferred to use A* to get actual path distance, but that's
prohibitvely expensive. (I need to calculate this many times per turn)
*/
baseDistance := game.GetDistance(from, to)
tilesInSquare := getTilesInSquare(game, from, to)
numObstacles := 0.
for _, tile := range tilesInSquare {
numObstacles += Btof(!game.Walkable(tile))
}
total_area := len(tilesInSquare)
obstacleRatio := numObstacles / float64(total_area)
// Not sure this is the best heuristic, but it's simple, so I'll use it for
// now
hDist := float64(baseDistance) * (1. + 2.0*obstacleRatio)
//log.Printf("hDist from %v to %v is: %v", game.GetCoordString(from), game.GetCoordString(to), hDist)
//log.Println("tilesInSquare: ")
//for _, i := range tilesInSquare {
// log.Println(game.GetCoordString(i))
//}
//log.Println("baseDistance:", baseDistance)
//log.Println("obstacleRatio:", obstacleRatio)
return hDist
}
func getTilesInSquare(game *gioframework.Game, i, j int) []int {
// Gets index of all tiles in a square defined by two diagonally opposed
// corners
rowI := game.GetRow(i)
colI := game.GetCol(i)
rowJ := game.GetRow(j)
colJ := game.GetCol(j)
rowLimits := []int{rowI, rowJ}
colLimits := []int{colI, colJ}
sort.Ints(rowLimits)
sort.Ints(colLimits)
var tiles []int
for row := rowLimits[0]; row < rowLimits[1]+1; row++ {
for col := colLimits[0]; col < colLimits[1]+1; col++ {
tiles = append(tiles, game.GetIndex(row, col))
}
}
return tiles
}
type AstarError struct {
From, To string
}
func (e AstarError) Error() string {
return fmt.Sprintf("Astar error with from:%v to:%v", e.From, e.To)
}
func GetShortestPath(game *gioframework.Game, from, to int) (path []int, err error) {
// pathfinding.Astar has no error handling, so we catch its panics
defer func() {
if r := recover(); r != nil {
log.Printf("ERROR, GetShortestPath recovered from panic: %v\n", r)
path = []int{}
err = AstarError{
game.GetCoordString(from),
game.GetCoordString(to),
}
}
}()
// TODO: if from and to are the same, just erturn an err.
map_data := *pathfinding.NewMapData(game.Height, game.Width)
for row := 0; row < game.Height; row++ {
for col := 0; col < game.Width; col++ {
i := game.GetIndex(row, col)
tile := game.GameMap[i]
// We don't want to accidentally attack cities on route to
// somewhere else. Note: if it is the final destination, it'll be
// changed
not_my_city := tile.Type == gioframework.City && tile.Faction != game.PlayerIndex
map_data[row][col] = Btoi(!game.Walkable(i) || not_my_city)
}
}
map_data[game.GetRow(from)][game.GetCol(from)] = pathfinding.START
map_data[game.GetRow(to)][game.GetCol(to)] = pathfinding.STOP
graph := pathfinding.NewGraph(&map_data)
nodesPath := pathfinding.Astar(graph)
path = []int{}
for _, node := range nodesPath {
path = append(path, game.GetIndex(node.X, node.Y))
}
return path, nil
}
func GetBestMove(game *gioframework.Game) (bestFrom int, bestTo int) {
defer func() {
if r := recover(); r != nil {
log.Printf("ERROR, GetBestMove recovered from panic: %v", r)
fmt.Println(errors.Wrap(r, 2).ErrorStack())
bestFrom = -1
bestTo = -1
}
}()
bestFrom = -1
bestTo = -1
bestTotalScore := -10.
var bestScores map[string]float64
myGeneral := game.Generals[game.PlayerIndex]
enemyCOM := getEnemyCenterOfMass(game)
/// First check for attacking new empty or enemy tiles
for from, fromTile := range game.GameMap {
if fromTile.Faction != game.PlayerIndex || fromTile.Armies < 2 {
continue
}
for to, toTile := range game.GameMap {
if toTile.Faction < TileEmpty || toTile.Faction == game.PlayerIndex {
continue
}
if game.ImpossibleTiles[to] {
continue
}
isEmpty := toTile.Faction == TileEmpty
isEnemy := IsEnemy(game, toTile)
isGeneral := toTile.Type == gioframework.General
isCity := toTile.Type == gioframework.City
outnumber := float64(fromTile.Armies - toTile.Armies)
dist := getHeuristicPathDistance(game, from, to)
distFromGen := getHeuristicPathDistance(game, myGeneral, to)
center := game.GetIndex(game.Width/2, game.Height/2)
distCenter := getHeuristicPathDistance(game, center, to)
centerness := 1. - distCenter/float64(game.Width)
// This is the vector pointing towards the enemy
enemyVector := [2]int{
game.GetRow(enemyCOM) - game.GetRow(myGeneral),
game.GetCol(enemyCOM) - game.GetCol(myGeneral),
}
// The vector showing the proposed move
moveVector := [2]int{
game.GetRow(to) - game.GetRow(from),
game.GetCol(to) - game.GetCol(from),
}
neighbors := game.GetNeighborhood(to, false)
numAlliedNeighbors := 0
for _, neighbor := range neighbors {
if !IsEnemy(game, game.GameMap[neighbor]) {
numAlliedNeighbors += 1
}
}
if isCity && outnumber < 2 && !isEnemy {
// Never attack a neutral city and lose
continue
}
scores := make(map[string]float64)
scores["outnumber score"] = Truncate(outnumber/200, 0., 0.25) * Btof(isEnemy)
scores["outnumbered penalty"] = -0.2 * Btof(outnumber < 2)
scores["general threat score"] = (0.25 * math.Pow(distFromGen, -1.0)) *
Truncate(float64(toTile.Armies)/10, 0., 1.0) * Btof(isEnemy)
scores["dist penalty"] = Truncate(-0.5*dist/30, -0.3, 0)
scores["dist gt army penalty"] = -0.2 * Btof(fromTile.Armies < int(dist))
scores["is enemy score"] = 0.05 * Btof(isEnemy)
scores["close city score"] = 0.35 * Btof(isCity && outnumber >= 2) *
math.Pow(distFromGen, -0.5)
scores["enemy city score"] = 0.2 * Btof(isCity && isEnemy)
scores["enemy gen score"] = 0.15 * Btof(isGeneral) * Btof(isEnemy)
scores["empty score"] = 0.08 * Btof(isEmpty)
// Generally a good strategy to take the center of the board
scores["centerness score"] = 0.02 * centerness
// You should move towards enemy's main base, not random little
// patches of enemy. This prevents the bot from "cleaning up"
// irrelevant squares. This could be improved by making the vectors
// normalized and having the score gradually increase as you point
// towards the enemy
scores["towards enemy score"] = 0.03 * Btof(dotProduct(enemyVector, moveVector) > 1)
// Instead of attacking all the tiles on the enemy's border it is
// typically better to make a deep drive into enemy land
scores["deep drive score"] = 0.04 * Btof(numAlliedNeighbors < 2)
totalScore := 0.
for _, score := range scores {
totalScore += score
}
//log.Printf("============Considering move %v->%v, got score: %v\n", from, to, totalScore)
//logSortedScores(scores)
if totalScore > bestTotalScore {
bestScores = scores
bestTotalScore = totalScore
bestFrom = from
bestTo = to
}
}
}
logSortedScores(bestScores)
log.Printf("Attack score: %.2f", bestTotalScore)
log.Printf("From:%v To:%v", game.GetCoordString(bestFrom), game.GetCoordString(bestTo))
log.Println("--------")
/////////////// Then check for consolidation //////////////////////////////
//consolScore := getConsolidationScore(game)
// It's a good idea to consolidate armies right after the armies regenerate.
// armyCycle shows the amount of the cycle that's passed. [0, 1]
armyCycle := float64(game.TurnCount % 50) / 50
consolScore := 0.6 * math.Pow(1-armyCycle, 6) - 0.2
log.Printf("Consolidation score:%.2f", consolScore)
tiles := getTilesSortedOnArmies(game)
if len(tiles) > 10 && consolScore > bestTotalScore {
largestTile := tiles[0]
for _, tile := range tiles[:5] {
log.Printf("Army ranked: %v", game.GameMap[tile].Armies)
}
highestAvArmy := 0.
for _, from := range tiles[1:] {
if game.GameMap[from].Armies < 2 {
continue
}
// Warning: this path could cut through enemy territory! Keep an
// eye for this
path_, _ := GetShortestPath(game, from, largestTile) // TODO: handle err //TODO: this throws an error it seems! largestTile == from ??!!
armies := 0
for _, i := range path_[:len(path_)-1] {
armies += game.GameMap[i].Armies
}
av_army := float64(armies) / float64(len(path_)-1)
if av_army > highestAvArmy {
highestAvArmy = av_army
bestFrom = from
}
}
bestTo = largestTile
// We want to move towards the enemy. Reverse if that's not the
// case
if enemyCOM >= 0 {
fromDistEnemy := getHeuristicPathDistance(game, enemyCOM, bestFrom)
toDistEnemy := getHeuristicPathDistance(game, enemyCOM, bestTo)
if fromDistEnemy < toDistEnemy {
log.Println("Switching direction of consolidation")
bestFrom, bestTo = bestTo, bestFrom
}
}
log.Printf("Consolidating, with average army: %v", highestAvArmy)
}
// Trash talk
toTile := game.GameMap[bestTo]
if toTile.Type == gioframework.City && IsEnemy(game, toTile) {
game.SendChat("Sorry, I'm gonna need that citadel")
}
return bestFrom, bestTo
}
func IsEnemy(game *gioframework.Game, tile gioframework.Cell) bool {
if len(game.Teams) == 0 | else {
myTeam := game.Teams[game.PlayerIndex]
return tile.Faction >= 0 && game.Teams[tile.Faction] != myTeam
}
}
func logSortedScores(scores map[string]float64) {
keys := make([]string, len(scores))
i := 0
for k := range scores {
keys[i] = k
i++
}
sort.Strings(keys)
for _, k := range keys {
log.Printf("%20v: %.3f\n", k, scores[k])
}
}
func Truncate(val, min, max float64) float64 {
return math.Min(math.Max(val, min), max)
}
func Sum(x []int) int { // TODO make this an interface for fun
sum := 0
for _, i := range x {
sum += i
}
return sum
}
func dotProduct(x [2]int, y [2]int) float64 {
return float64(x[0]) * float64(y[0]) + float64(x[1]) * float64(y[1])
}
// getEnemyCenterOfMass find the central point of the visible enemy terrain,
// weighted by armies, and rounded to the closes tile.
func getEnemyCenterOfMass(game *gioframework.Game) int {
rows := []int{}
cols := []int{}
armies := 0
for i, tile := range game.GameMap {
if IsEnemy(game, tile) {
army := tile.Armies
rows = append(rows, army*game.GetRow(i))
cols = append(cols, army*game.GetCol(i))
armies += army
}
}
var COM int
if armies == 0 {
COM = -1
log.Println("COM is: -1")
return COM
}
avRow := float64(Sum(rows))/float64(armies)
avCol := float64(Sum(cols))/float64(armies)
COM = game.GetIndex(int(avRow), int(avCol))
log.Printf("COM is: %v\n", game.GetCoordString(COM))
return COM
}
func getConsolidationScore(game *gioframework.Game) float64 {
gini := getArmyGiniCoefficient(game)
totalArmy := float64(game.Scores[game.PlayerIndex].Armies)
log.Printf("Gini coefficient: %.2f", gini)
log.Printf("Total army: %v", totalArmy)
return (0.65 - gini) * Truncate(totalArmy/500., 0.5, 2.)
}
func getArmyGiniCoefficient(game *gioframework.Game) float64 {
movableArmies := []int{}
for _, tile := range game.GameMap {
if tile.Faction == game.PlayerIndex {
movableArmies = append(movableArmies, tile.Armies-1)
}
}
//log.Printf("movableArmies: %v", movableArmies)
return giniCoefficient(movableArmies)
}
// giniCoefficient is calculated as described here:
// https://en.wikipedia.org/wiki/Gini_coefficient#Alternate_expressions
func giniCoefficient(nums []int) float64 {
sort.Ints(nums)
n := len(nums)
denom := 0
for _, num := range nums {
denom += num
}
denom *= n
numer := 0
for i, num := range nums {
numer += (i + 1) * num
}
numer *= 2
return float64(numer)/float64(denom) - float64(n+1)/float64(n)
}
func getTilesSortedOnArmies(game *gioframework.Game) []int {
tileToArmy := make(map[int]int)
for i := 0; i < game.Height*game.Width; i++ {
tile := game.GameMap[i]
if tile.Faction == game.PlayerIndex {
tileToArmy[i] = tile.Armies
}
}
largestArmyTiles := sortKeysByValues(tileToArmy, true)
return largestArmyTiles
}
func sortKeysByValues(m map[int]int, reversed bool) []int {
n := map[int][]int{}
var a []int
for k, v := range m {
n[v] = append(n[v], k)
}
for v := range n {
a = append(a, v)
}
keys := []int{}
if reversed {
sort.Sort(sort.Reverse(sort.IntSlice(a)))
} else {
sort.Sort(sort.IntSlice(a))
}
for _, v := range a {
for _, k := range n[v] {
keys = append(keys, k)
}
}
return keys
}
| {
// This means we're playing 1v1 or FFA
return tile.Faction != game.PlayerIndex && tile.Faction >= 0
} | conditional_block |
data_window.go | // Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package telemetry
import (
"context"
"sync"
"time"
"github.com/pingcap/tidb/domain/infosync"
"github.com/pingcap/tidb/util/logutil"
"github.com/prometheus/client_golang/api"
promv1 "github.com/prometheus/client_golang/api/prometheus/v1"
pmodel "github.com/prometheus/common/model"
"go.uber.org/atomic"
"go.uber.org/zap"
)
var (
// CurrentExecuteCount is CurrentExecuteCount
CurrentExecuteCount atomic.Uint64
// CurrentTiFlashPushDownCount is CurrentTiFlashPushDownCount
CurrentTiFlashPushDownCount atomic.Uint64
// CurrentTiFlashExchangePushDownCount is CurrentTiFlashExchangePushDownCount
CurrentTiFlashExchangePushDownCount atomic.Uint64
// CurrentCoprCacheHitRatioGTE0Count is CurrentCoprCacheHitRatioGTE1Count
CurrentCoprCacheHitRatioGTE0Count atomic.Uint64
// CurrentCoprCacheHitRatioGTE1Count is CurrentCoprCacheHitRatioGTE1Count
CurrentCoprCacheHitRatioGTE1Count atomic.Uint64
// CurrentCoprCacheHitRatioGTE10Count is CurrentCoprCacheHitRatioGTE10Count
CurrentCoprCacheHitRatioGTE10Count atomic.Uint64
// CurrentCoprCacheHitRatioGTE20Count is CurrentCoprCacheHitRatioGTE20Count
CurrentCoprCacheHitRatioGTE20Count atomic.Uint64
// CurrentCoprCacheHitRatioGTE40Count is CurrentCoprCacheHitRatioGTE40Count
CurrentCoprCacheHitRatioGTE40Count atomic.Uint64
// CurrentCoprCacheHitRatioGTE80Count is CurrentCoprCacheHitRatioGTE80Count
CurrentCoprCacheHitRatioGTE80Count atomic.Uint64
// CurrentCoprCacheHitRatioGTE100Count is CurrentCoprCacheHitRatioGTE100Count
CurrentCoprCacheHitRatioGTE100Count atomic.Uint64
// CurrentTiflashTableScanCount count the number of tiflash table scan and tiflash partition table scan
CurrentTiflashTableScanCount atomic.Uint64
// CurrentTiflashTableScanWithFastScanCount count the number of tiflash table scan and tiflash partition table scan which use fastscan
CurrentTiflashTableScanWithFastScanCount atomic.Uint64
)
const (
// WindowSize determines how long some data is aggregated by.
WindowSize = 1 * time.Hour
// SubWindowSize determines how often data is rotated.
SubWindowSize = 1 * time.Minute
maxSubWindowLength = int(ReportInterval / SubWindowSize) // TODO: Ceiling?
maxSubWindowLengthInWindow = int(WindowSize / SubWindowSize) // TODO: Ceiling?
promReadTimeout = time.Second * 30
)
type windowData struct {
BeginAt time.Time `json:"beginAt"`
ExecuteCount uint64 `json:"executeCount"`
TiFlashUsage tiFlashUsageData `json:"tiFlashUsage"`
CoprCacheUsage coprCacheUsageData `json:"coprCacheUsage"`
SQLUsage sqlUsageData `json:"SQLUsage"`
BuiltinFunctionsUsage map[string]uint32 `json:"builtinFunctionsUsage"`
}
type sqlType map[string]uint64
type sqlUsageData struct {
SQLTotal uint64 `json:"total"`
SQLType sqlType `json:"type"`
}
type coprCacheUsageData struct {
GTE0 uint64 `json:"gte0"`
GTE1 uint64 `json:"gte1"`
GTE10 uint64 `json:"gte10"`
GTE20 uint64 `json:"gte20"`
GTE40 uint64 `json:"gte40"`
GTE80 uint64 `json:"gte80"`
GTE100 uint64 `json:"gte100"`
}
type tiFlashUsageData struct {
PushDown uint64 `json:"pushDown"`
ExchangePushDown uint64 `json:"exchangePushDown"`
TableScan uint64 `json:"tableScan"`
TableScanWithFastScan uint64 `json:"tableScanWithFastScan"`
}
// builtinFunctionsUsageCollector collects builtin functions usage information and dump it into windowData.
type builtinFunctionsUsageCollector struct {
sync.Mutex
// Should acquire lock to access this
usageData BuiltinFunctionsUsage
}
// Merge BuiltinFunctionsUsage data
func (b *builtinFunctionsUsageCollector) Collect(usageData BuiltinFunctionsUsage) {
// TODO(leiysky): use multi-worker to collect the usage information so we can make this asynchronous
b.Lock()
defer b.Unlock()
b.usageData.Merge(usageData)
}
// Dump BuiltinFunctionsUsage data
func (b *builtinFunctionsUsageCollector) Dump() map[string]uint32 {
b.Lock()
ret := b.usageData
b.usageData = make(map[string]uint32)
b.Unlock()
return ret
}
// BuiltinFunctionsUsage is a map from ScalarFuncSig_name(string) to usage count(uint32)
type BuiltinFunctionsUsage map[string]uint32
// Inc will increase the usage count of scalar function by 1
func (b BuiltinFunctionsUsage) Inc(scalarFuncSigName string) {
v, ok := b[scalarFuncSigName]
if !ok {
b[scalarFuncSigName] = 1
} else {
b[scalarFuncSigName] = v + 1
}
}
// Merge BuiltinFunctionsUsage data
func (b BuiltinFunctionsUsage) Merge(usageData BuiltinFunctionsUsage) {
for k, v := range usageData {
prev, ok := b[k]
if !ok {
b[k] = v
} else {
b[k] = prev + v
}
}
}
// GlobalBuiltinFunctionsUsage is used to collect builtin functions usage information
var GlobalBuiltinFunctionsUsage = &builtinFunctionsUsageCollector{usageData: make(BuiltinFunctionsUsage)}
var (
rotatedSubWindows []*windowData
subWindowsLock = sync.RWMutex{}
)
func getSQLSum(sqlTypeData *sqlType) uint64 {
result := uint64(0)
for _, v := range *sqlTypeData {
result += v
}
return result
}
func readSQLMetric(timepoint time.Time, sqlResult *sqlUsageData) error {
ctx := context.TODO()
promQL := "avg(tidb_executor_statement_total{}) by (type)"
result, err := querySQLMetric(ctx, timepoint, promQL)
if err != nil {
return err
}
analysisSQLUsage(result, sqlResult)
return nil
}
func querySQLMetric(ctx context.Context, queryTime time.Time, promQL string) (result pmodel.Value, err error) {
// Add retry to avoid network error.
var prometheusAddr string
for i := 0; i < 5; i++ {
//TODO: the prometheus will be Integrated into the PD, then we need to query the prometheus in PD directly, which need change the quire API
prometheusAddr, err = infosync.GetPrometheusAddr()
if err == nil || err == infosync.ErrPrometheusAddrIsNotSet {
break
}
time.Sleep(100 * time.Millisecond)
}
if err != nil {
return nil, err
}
promClient, err := api.NewClient(api.Config{
Address: prometheusAddr,
})
if err != nil {
return nil, err
}
promQLAPI := promv1.NewAPI(promClient)
ctx, cancel := context.WithTimeout(ctx, promReadTimeout)
defer cancel()
// Add retry to avoid network error.
for i := 0; i < 5; i++ {
result, _, err = promQLAPI.Query(ctx, promQL, queryTime)
if err == nil {
break
}
time.Sleep(100 * time.Millisecond)
}
return result, err
}
func analysisSQLUsage(promResult pmodel.Value, sqlResult *sqlUsageData) {
if promResult == nil {
return
}
if promResult.Type() == pmodel.ValVector {
matrix := promResult.(pmodel.Vector)
for _, m := range matrix {
v := m.Value
promLable := string(m.Metric[pmodel.LabelName("type")])
sqlResult.SQLType[promLable] = uint64(v)
}
}
}
// RotateSubWindow rotates the telemetry sub window.
func RotateSubWindow() {
thisSubWindow := windowData{
BeginAt: time.Now(),
ExecuteCount: CurrentExecuteCount.Swap(0),
TiFlashUsage: tiFlashUsageData{
PushDown: CurrentTiFlashPushDownCount.Swap(0),
ExchangePushDown: CurrentTiFlashExchangePushDownCount.Swap(0),
TableScan: CurrentTiflashTableScanCount.Swap(0),
TableScanWithFastScan: CurrentTiflashTableScanWithFastScanCount.Swap(0),
},
CoprCacheUsage: coprCacheUsageData{
GTE0: CurrentCoprCacheHitRatioGTE0Count.Swap(0),
GTE1: CurrentCoprCacheHitRatioGTE1Count.Swap(0),
GTE10: CurrentCoprCacheHitRatioGTE10Count.Swap(0),
GTE20: CurrentCoprCacheHitRatioGTE20Count.Swap(0),
GTE40: CurrentCoprCacheHitRatioGTE40Count.Swap(0),
GTE80: CurrentCoprCacheHitRatioGTE80Count.Swap(0),
GTE100: CurrentCoprCacheHitRatioGTE100Count.Swap(0),
},
SQLUsage: sqlUsageData{
SQLTotal: 0,
SQLType: make(sqlType),
},
BuiltinFunctionsUsage: GlobalBuiltinFunctionsUsage.Dump(),
}
err := readSQLMetric(time.Now(), &thisSubWindow.SQLUsage)
if err != nil {
logutil.BgLogger().Info("Error exists when getting the SQL Metric.",
zap.Error(err))
}
thisSubWindow.SQLUsage.SQLTotal = getSQLSum(&thisSubWindow.SQLUsage.SQLType)
subWindowsLock.Lock()
rotatedSubWindows = append(rotatedSubWindows, &thisSubWindow)
if len(rotatedSubWindows) > maxSubWindowLength {
// Only retain last N sub windows, according to the report interval.
rotatedSubWindows = rotatedSubWindows[len(rotatedSubWindows)-maxSubWindowLength:]
}
subWindowsLock.Unlock()
}
func calDeltaSQLTypeMap(cur sqlType, last sqlType) sqlType {
deltaMap := make(sqlType)
for key, value := range cur {
deltaMap[key] = value - (last)[key]
}
return deltaMap
}
// getWindowData returns data aggregated by window size.
func getWindowData() []*windowData {
results := make([]*windowData, 0)
subWindowsLock.RLock()
i := 0
for i < len(rotatedSubWindows) {
thisWindow := *rotatedSubWindows[i]
var startWindow windowData | aggregatedSubWindows := 1
// Aggregate later sub windows
i++
for i < len(rotatedSubWindows) && aggregatedSubWindows < maxSubWindowLengthInWindow {
thisWindow.ExecuteCount += rotatedSubWindows[i].ExecuteCount
thisWindow.TiFlashUsage.PushDown += rotatedSubWindows[i].TiFlashUsage.PushDown
thisWindow.TiFlashUsage.ExchangePushDown += rotatedSubWindows[i].TiFlashUsage.ExchangePushDown
thisWindow.TiFlashUsage.TableScan += rotatedSubWindows[i].TiFlashUsage.TableScan
thisWindow.TiFlashUsage.TableScanWithFastScan += rotatedSubWindows[i].TiFlashUsage.TableScanWithFastScan
thisWindow.CoprCacheUsage.GTE0 += rotatedSubWindows[i].CoprCacheUsage.GTE0
thisWindow.CoprCacheUsage.GTE1 += rotatedSubWindows[i].CoprCacheUsage.GTE1
thisWindow.CoprCacheUsage.GTE10 += rotatedSubWindows[i].CoprCacheUsage.GTE10
thisWindow.CoprCacheUsage.GTE20 += rotatedSubWindows[i].CoprCacheUsage.GTE20
thisWindow.CoprCacheUsage.GTE40 += rotatedSubWindows[i].CoprCacheUsage.GTE40
thisWindow.CoprCacheUsage.GTE80 += rotatedSubWindows[i].CoprCacheUsage.GTE80
thisWindow.CoprCacheUsage.GTE100 += rotatedSubWindows[i].CoprCacheUsage.GTE100
thisWindow.SQLUsage.SQLTotal = rotatedSubWindows[i].SQLUsage.SQLTotal - startWindow.SQLUsage.SQLTotal
thisWindow.SQLUsage.SQLType = calDeltaSQLTypeMap(rotatedSubWindows[i].SQLUsage.SQLType, startWindow.SQLUsage.SQLType)
mergedBuiltinFunctionsUsage := BuiltinFunctionsUsage(thisWindow.BuiltinFunctionsUsage)
mergedBuiltinFunctionsUsage.Merge(BuiltinFunctionsUsage(rotatedSubWindows[i].BuiltinFunctionsUsage))
thisWindow.BuiltinFunctionsUsage = mergedBuiltinFunctionsUsage
aggregatedSubWindows++
i++
}
results = append(results, &thisWindow)
}
subWindowsLock.RUnlock()
return results
} | if i == 0 {
startWindow = thisWindow
} else {
startWindow = *rotatedSubWindows[i-1]
} | random_line_split |
data_window.go | // Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package telemetry
import (
"context"
"sync"
"time"
"github.com/pingcap/tidb/domain/infosync"
"github.com/pingcap/tidb/util/logutil"
"github.com/prometheus/client_golang/api"
promv1 "github.com/prometheus/client_golang/api/prometheus/v1"
pmodel "github.com/prometheus/common/model"
"go.uber.org/atomic"
"go.uber.org/zap"
)
var (
// CurrentExecuteCount is CurrentExecuteCount
CurrentExecuteCount atomic.Uint64
// CurrentTiFlashPushDownCount is CurrentTiFlashPushDownCount
CurrentTiFlashPushDownCount atomic.Uint64
// CurrentTiFlashExchangePushDownCount is CurrentTiFlashExchangePushDownCount
CurrentTiFlashExchangePushDownCount atomic.Uint64
// CurrentCoprCacheHitRatioGTE0Count is CurrentCoprCacheHitRatioGTE1Count
CurrentCoprCacheHitRatioGTE0Count atomic.Uint64
// CurrentCoprCacheHitRatioGTE1Count is CurrentCoprCacheHitRatioGTE1Count
CurrentCoprCacheHitRatioGTE1Count atomic.Uint64
// CurrentCoprCacheHitRatioGTE10Count is CurrentCoprCacheHitRatioGTE10Count
CurrentCoprCacheHitRatioGTE10Count atomic.Uint64
// CurrentCoprCacheHitRatioGTE20Count is CurrentCoprCacheHitRatioGTE20Count
CurrentCoprCacheHitRatioGTE20Count atomic.Uint64
// CurrentCoprCacheHitRatioGTE40Count is CurrentCoprCacheHitRatioGTE40Count
CurrentCoprCacheHitRatioGTE40Count atomic.Uint64
// CurrentCoprCacheHitRatioGTE80Count is CurrentCoprCacheHitRatioGTE80Count
CurrentCoprCacheHitRatioGTE80Count atomic.Uint64
// CurrentCoprCacheHitRatioGTE100Count is CurrentCoprCacheHitRatioGTE100Count
CurrentCoprCacheHitRatioGTE100Count atomic.Uint64
// CurrentTiflashTableScanCount count the number of tiflash table scan and tiflash partition table scan
CurrentTiflashTableScanCount atomic.Uint64
// CurrentTiflashTableScanWithFastScanCount count the number of tiflash table scan and tiflash partition table scan which use fastscan
CurrentTiflashTableScanWithFastScanCount atomic.Uint64
)
const (
// WindowSize determines how long some data is aggregated by.
WindowSize = 1 * time.Hour
// SubWindowSize determines how often data is rotated.
SubWindowSize = 1 * time.Minute
maxSubWindowLength = int(ReportInterval / SubWindowSize) // TODO: Ceiling?
maxSubWindowLengthInWindow = int(WindowSize / SubWindowSize) // TODO: Ceiling?
promReadTimeout = time.Second * 30
)
type windowData struct {
BeginAt time.Time `json:"beginAt"`
ExecuteCount uint64 `json:"executeCount"`
TiFlashUsage tiFlashUsageData `json:"tiFlashUsage"`
CoprCacheUsage coprCacheUsageData `json:"coprCacheUsage"`
SQLUsage sqlUsageData `json:"SQLUsage"`
BuiltinFunctionsUsage map[string]uint32 `json:"builtinFunctionsUsage"`
}
type sqlType map[string]uint64
type sqlUsageData struct {
SQLTotal uint64 `json:"total"`
SQLType sqlType `json:"type"`
}
type coprCacheUsageData struct {
GTE0 uint64 `json:"gte0"`
GTE1 uint64 `json:"gte1"`
GTE10 uint64 `json:"gte10"`
GTE20 uint64 `json:"gte20"`
GTE40 uint64 `json:"gte40"`
GTE80 uint64 `json:"gte80"`
GTE100 uint64 `json:"gte100"`
}
type tiFlashUsageData struct {
PushDown uint64 `json:"pushDown"`
ExchangePushDown uint64 `json:"exchangePushDown"`
TableScan uint64 `json:"tableScan"`
TableScanWithFastScan uint64 `json:"tableScanWithFastScan"`
}
// builtinFunctionsUsageCollector collects builtin functions usage information and dump it into windowData.
type builtinFunctionsUsageCollector struct {
sync.Mutex
// Should acquire lock to access this
usageData BuiltinFunctionsUsage
}
// Merge BuiltinFunctionsUsage data
func (b *builtinFunctionsUsageCollector) Collect(usageData BuiltinFunctionsUsage) {
// TODO(leiysky): use multi-worker to collect the usage information so we can make this asynchronous
b.Lock()
defer b.Unlock()
b.usageData.Merge(usageData)
}
// Dump BuiltinFunctionsUsage data
func (b *builtinFunctionsUsageCollector) Dump() map[string]uint32 {
b.Lock()
ret := b.usageData
b.usageData = make(map[string]uint32)
b.Unlock()
return ret
}
// BuiltinFunctionsUsage is a map from ScalarFuncSig_name(string) to usage count(uint32)
type BuiltinFunctionsUsage map[string]uint32
// Inc will increase the usage count of scalar function by 1
func (b BuiltinFunctionsUsage) Inc(scalarFuncSigName string) {
v, ok := b[scalarFuncSigName]
if !ok {
b[scalarFuncSigName] = 1
} else {
b[scalarFuncSigName] = v + 1
}
}
// Merge BuiltinFunctionsUsage data
func (b BuiltinFunctionsUsage) Merge(usageData BuiltinFunctionsUsage) {
for k, v := range usageData {
prev, ok := b[k]
if !ok {
b[k] = v
} else {
b[k] = prev + v
}
}
}
// GlobalBuiltinFunctionsUsage is used to collect builtin functions usage information
var GlobalBuiltinFunctionsUsage = &builtinFunctionsUsageCollector{usageData: make(BuiltinFunctionsUsage)}
var (
rotatedSubWindows []*windowData
subWindowsLock = sync.RWMutex{}
)
func getSQLSum(sqlTypeData *sqlType) uint64 {
result := uint64(0)
for _, v := range *sqlTypeData {
result += v
}
return result
}
func readSQLMetric(timepoint time.Time, sqlResult *sqlUsageData) error {
ctx := context.TODO()
promQL := "avg(tidb_executor_statement_total{}) by (type)"
result, err := querySQLMetric(ctx, timepoint, promQL)
if err != nil {
return err
}
analysisSQLUsage(result, sqlResult)
return nil
}
func querySQLMetric(ctx context.Context, queryTime time.Time, promQL string) (result pmodel.Value, err error) {
// Add retry to avoid network error.
var prometheusAddr string
for i := 0; i < 5; i++ {
//TODO: the prometheus will be Integrated into the PD, then we need to query the prometheus in PD directly, which need change the quire API
prometheusAddr, err = infosync.GetPrometheusAddr()
if err == nil || err == infosync.ErrPrometheusAddrIsNotSet {
break
}
time.Sleep(100 * time.Millisecond)
}
if err != nil {
return nil, err
}
promClient, err := api.NewClient(api.Config{
Address: prometheusAddr,
})
if err != nil {
return nil, err
}
promQLAPI := promv1.NewAPI(promClient)
ctx, cancel := context.WithTimeout(ctx, promReadTimeout)
defer cancel()
// Add retry to avoid network error.
for i := 0; i < 5; i++ |
return result, err
}
func analysisSQLUsage(promResult pmodel.Value, sqlResult *sqlUsageData) {
if promResult == nil {
return
}
if promResult.Type() == pmodel.ValVector {
matrix := promResult.(pmodel.Vector)
for _, m := range matrix {
v := m.Value
promLable := string(m.Metric[pmodel.LabelName("type")])
sqlResult.SQLType[promLable] = uint64(v)
}
}
}
// RotateSubWindow rotates the telemetry sub window.
func RotateSubWindow() {
thisSubWindow := windowData{
BeginAt: time.Now(),
ExecuteCount: CurrentExecuteCount.Swap(0),
TiFlashUsage: tiFlashUsageData{
PushDown: CurrentTiFlashPushDownCount.Swap(0),
ExchangePushDown: CurrentTiFlashExchangePushDownCount.Swap(0),
TableScan: CurrentTiflashTableScanCount.Swap(0),
TableScanWithFastScan: CurrentTiflashTableScanWithFastScanCount.Swap(0),
},
CoprCacheUsage: coprCacheUsageData{
GTE0: CurrentCoprCacheHitRatioGTE0Count.Swap(0),
GTE1: CurrentCoprCacheHitRatioGTE1Count.Swap(0),
GTE10: CurrentCoprCacheHitRatioGTE10Count.Swap(0),
GTE20: CurrentCoprCacheHitRatioGTE20Count.Swap(0),
GTE40: CurrentCoprCacheHitRatioGTE40Count.Swap(0),
GTE80: CurrentCoprCacheHitRatioGTE80Count.Swap(0),
GTE100: CurrentCoprCacheHitRatioGTE100Count.Swap(0),
},
SQLUsage: sqlUsageData{
SQLTotal: 0,
SQLType: make(sqlType),
},
BuiltinFunctionsUsage: GlobalBuiltinFunctionsUsage.Dump(),
}
err := readSQLMetric(time.Now(), &thisSubWindow.SQLUsage)
if err != nil {
logutil.BgLogger().Info("Error exists when getting the SQL Metric.",
zap.Error(err))
}
thisSubWindow.SQLUsage.SQLTotal = getSQLSum(&thisSubWindow.SQLUsage.SQLType)
subWindowsLock.Lock()
rotatedSubWindows = append(rotatedSubWindows, &thisSubWindow)
if len(rotatedSubWindows) > maxSubWindowLength {
// Only retain last N sub windows, according to the report interval.
rotatedSubWindows = rotatedSubWindows[len(rotatedSubWindows)-maxSubWindowLength:]
}
subWindowsLock.Unlock()
}
func calDeltaSQLTypeMap(cur sqlType, last sqlType) sqlType {
deltaMap := make(sqlType)
for key, value := range cur {
deltaMap[key] = value - (last)[key]
}
return deltaMap
}
// getWindowData returns data aggregated by window size.
func getWindowData() []*windowData {
results := make([]*windowData, 0)
subWindowsLock.RLock()
i := 0
for i < len(rotatedSubWindows) {
thisWindow := *rotatedSubWindows[i]
var startWindow windowData
if i == 0 {
startWindow = thisWindow
} else {
startWindow = *rotatedSubWindows[i-1]
}
aggregatedSubWindows := 1
// Aggregate later sub windows
i++
for i < len(rotatedSubWindows) && aggregatedSubWindows < maxSubWindowLengthInWindow {
thisWindow.ExecuteCount += rotatedSubWindows[i].ExecuteCount
thisWindow.TiFlashUsage.PushDown += rotatedSubWindows[i].TiFlashUsage.PushDown
thisWindow.TiFlashUsage.ExchangePushDown += rotatedSubWindows[i].TiFlashUsage.ExchangePushDown
thisWindow.TiFlashUsage.TableScan += rotatedSubWindows[i].TiFlashUsage.TableScan
thisWindow.TiFlashUsage.TableScanWithFastScan += rotatedSubWindows[i].TiFlashUsage.TableScanWithFastScan
thisWindow.CoprCacheUsage.GTE0 += rotatedSubWindows[i].CoprCacheUsage.GTE0
thisWindow.CoprCacheUsage.GTE1 += rotatedSubWindows[i].CoprCacheUsage.GTE1
thisWindow.CoprCacheUsage.GTE10 += rotatedSubWindows[i].CoprCacheUsage.GTE10
thisWindow.CoprCacheUsage.GTE20 += rotatedSubWindows[i].CoprCacheUsage.GTE20
thisWindow.CoprCacheUsage.GTE40 += rotatedSubWindows[i].CoprCacheUsage.GTE40
thisWindow.CoprCacheUsage.GTE80 += rotatedSubWindows[i].CoprCacheUsage.GTE80
thisWindow.CoprCacheUsage.GTE100 += rotatedSubWindows[i].CoprCacheUsage.GTE100
thisWindow.SQLUsage.SQLTotal = rotatedSubWindows[i].SQLUsage.SQLTotal - startWindow.SQLUsage.SQLTotal
thisWindow.SQLUsage.SQLType = calDeltaSQLTypeMap(rotatedSubWindows[i].SQLUsage.SQLType, startWindow.SQLUsage.SQLType)
mergedBuiltinFunctionsUsage := BuiltinFunctionsUsage(thisWindow.BuiltinFunctionsUsage)
mergedBuiltinFunctionsUsage.Merge(BuiltinFunctionsUsage(rotatedSubWindows[i].BuiltinFunctionsUsage))
thisWindow.BuiltinFunctionsUsage = mergedBuiltinFunctionsUsage
aggregatedSubWindows++
i++
}
results = append(results, &thisWindow)
}
subWindowsLock.RUnlock()
return results
}
| {
result, _, err = promQLAPI.Query(ctx, promQL, queryTime)
if err == nil {
break
}
time.Sleep(100 * time.Millisecond)
} | conditional_block |
data_window.go | // Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package telemetry
import (
"context"
"sync"
"time"
"github.com/pingcap/tidb/domain/infosync"
"github.com/pingcap/tidb/util/logutil"
"github.com/prometheus/client_golang/api"
promv1 "github.com/prometheus/client_golang/api/prometheus/v1"
pmodel "github.com/prometheus/common/model"
"go.uber.org/atomic"
"go.uber.org/zap"
)
var (
// CurrentExecuteCount is CurrentExecuteCount
CurrentExecuteCount atomic.Uint64
// CurrentTiFlashPushDownCount is CurrentTiFlashPushDownCount
CurrentTiFlashPushDownCount atomic.Uint64
// CurrentTiFlashExchangePushDownCount is CurrentTiFlashExchangePushDownCount
CurrentTiFlashExchangePushDownCount atomic.Uint64
// CurrentCoprCacheHitRatioGTE0Count is CurrentCoprCacheHitRatioGTE1Count
CurrentCoprCacheHitRatioGTE0Count atomic.Uint64
// CurrentCoprCacheHitRatioGTE1Count is CurrentCoprCacheHitRatioGTE1Count
CurrentCoprCacheHitRatioGTE1Count atomic.Uint64
// CurrentCoprCacheHitRatioGTE10Count is CurrentCoprCacheHitRatioGTE10Count
CurrentCoprCacheHitRatioGTE10Count atomic.Uint64
// CurrentCoprCacheHitRatioGTE20Count is CurrentCoprCacheHitRatioGTE20Count
CurrentCoprCacheHitRatioGTE20Count atomic.Uint64
// CurrentCoprCacheHitRatioGTE40Count is CurrentCoprCacheHitRatioGTE40Count
CurrentCoprCacheHitRatioGTE40Count atomic.Uint64
// CurrentCoprCacheHitRatioGTE80Count is CurrentCoprCacheHitRatioGTE80Count
CurrentCoprCacheHitRatioGTE80Count atomic.Uint64
// CurrentCoprCacheHitRatioGTE100Count is CurrentCoprCacheHitRatioGTE100Count
CurrentCoprCacheHitRatioGTE100Count atomic.Uint64
// CurrentTiflashTableScanCount count the number of tiflash table scan and tiflash partition table scan
CurrentTiflashTableScanCount atomic.Uint64
// CurrentTiflashTableScanWithFastScanCount count the number of tiflash table scan and tiflash partition table scan which use fastscan
CurrentTiflashTableScanWithFastScanCount atomic.Uint64
)
const (
// WindowSize determines how long some data is aggregated by.
WindowSize = 1 * time.Hour
// SubWindowSize determines how often data is rotated.
SubWindowSize = 1 * time.Minute
maxSubWindowLength = int(ReportInterval / SubWindowSize) // TODO: Ceiling?
maxSubWindowLengthInWindow = int(WindowSize / SubWindowSize) // TODO: Ceiling?
promReadTimeout = time.Second * 30
)
type windowData struct {
BeginAt time.Time `json:"beginAt"`
ExecuteCount uint64 `json:"executeCount"`
TiFlashUsage tiFlashUsageData `json:"tiFlashUsage"`
CoprCacheUsage coprCacheUsageData `json:"coprCacheUsage"`
SQLUsage sqlUsageData `json:"SQLUsage"`
BuiltinFunctionsUsage map[string]uint32 `json:"builtinFunctionsUsage"`
}
type sqlType map[string]uint64
type sqlUsageData struct {
SQLTotal uint64 `json:"total"`
SQLType sqlType `json:"type"`
}
type coprCacheUsageData struct {
GTE0 uint64 `json:"gte0"`
GTE1 uint64 `json:"gte1"`
GTE10 uint64 `json:"gte10"`
GTE20 uint64 `json:"gte20"`
GTE40 uint64 `json:"gte40"`
GTE80 uint64 `json:"gte80"`
GTE100 uint64 `json:"gte100"`
}
type tiFlashUsageData struct {
PushDown uint64 `json:"pushDown"`
ExchangePushDown uint64 `json:"exchangePushDown"`
TableScan uint64 `json:"tableScan"`
TableScanWithFastScan uint64 `json:"tableScanWithFastScan"`
}
// builtinFunctionsUsageCollector collects builtin functions usage information and dump it into windowData.
type builtinFunctionsUsageCollector struct {
sync.Mutex
// Should acquire lock to access this
usageData BuiltinFunctionsUsage
}
// Merge BuiltinFunctionsUsage data
func (b *builtinFunctionsUsageCollector) Collect(usageData BuiltinFunctionsUsage) {
// TODO(leiysky): use multi-worker to collect the usage information so we can make this asynchronous
b.Lock()
defer b.Unlock()
b.usageData.Merge(usageData)
}
// Dump BuiltinFunctionsUsage data
func (b *builtinFunctionsUsageCollector) Dump() map[string]uint32 {
b.Lock()
ret := b.usageData
b.usageData = make(map[string]uint32)
b.Unlock()
return ret
}
// BuiltinFunctionsUsage is a map from ScalarFuncSig_name(string) to usage count(uint32)
type BuiltinFunctionsUsage map[string]uint32
// Inc will increase the usage count of scalar function by 1
func (b BuiltinFunctionsUsage) Inc(scalarFuncSigName string) {
v, ok := b[scalarFuncSigName]
if !ok {
b[scalarFuncSigName] = 1
} else {
b[scalarFuncSigName] = v + 1
}
}
// Merge BuiltinFunctionsUsage data
func (b BuiltinFunctionsUsage) Merge(usageData BuiltinFunctionsUsage) {
for k, v := range usageData {
prev, ok := b[k]
if !ok {
b[k] = v
} else {
b[k] = prev + v
}
}
}
// GlobalBuiltinFunctionsUsage is used to collect builtin functions usage information
var GlobalBuiltinFunctionsUsage = &builtinFunctionsUsageCollector{usageData: make(BuiltinFunctionsUsage)}
var (
rotatedSubWindows []*windowData
subWindowsLock = sync.RWMutex{}
)
func getSQLSum(sqlTypeData *sqlType) uint64 {
result := uint64(0)
for _, v := range *sqlTypeData {
result += v
}
return result
}
func readSQLMetric(timepoint time.Time, sqlResult *sqlUsageData) error |
func querySQLMetric(ctx context.Context, queryTime time.Time, promQL string) (result pmodel.Value, err error) {
// Add retry to avoid network error.
var prometheusAddr string
for i := 0; i < 5; i++ {
//TODO: the prometheus will be Integrated into the PD, then we need to query the prometheus in PD directly, which need change the quire API
prometheusAddr, err = infosync.GetPrometheusAddr()
if err == nil || err == infosync.ErrPrometheusAddrIsNotSet {
break
}
time.Sleep(100 * time.Millisecond)
}
if err != nil {
return nil, err
}
promClient, err := api.NewClient(api.Config{
Address: prometheusAddr,
})
if err != nil {
return nil, err
}
promQLAPI := promv1.NewAPI(promClient)
ctx, cancel := context.WithTimeout(ctx, promReadTimeout)
defer cancel()
// Add retry to avoid network error.
for i := 0; i < 5; i++ {
result, _, err = promQLAPI.Query(ctx, promQL, queryTime)
if err == nil {
break
}
time.Sleep(100 * time.Millisecond)
}
return result, err
}
func analysisSQLUsage(promResult pmodel.Value, sqlResult *sqlUsageData) {
if promResult == nil {
return
}
if promResult.Type() == pmodel.ValVector {
matrix := promResult.(pmodel.Vector)
for _, m := range matrix {
v := m.Value
promLable := string(m.Metric[pmodel.LabelName("type")])
sqlResult.SQLType[promLable] = uint64(v)
}
}
}
// RotateSubWindow rotates the telemetry sub window.
func RotateSubWindow() {
thisSubWindow := windowData{
BeginAt: time.Now(),
ExecuteCount: CurrentExecuteCount.Swap(0),
TiFlashUsage: tiFlashUsageData{
PushDown: CurrentTiFlashPushDownCount.Swap(0),
ExchangePushDown: CurrentTiFlashExchangePushDownCount.Swap(0),
TableScan: CurrentTiflashTableScanCount.Swap(0),
TableScanWithFastScan: CurrentTiflashTableScanWithFastScanCount.Swap(0),
},
CoprCacheUsage: coprCacheUsageData{
GTE0: CurrentCoprCacheHitRatioGTE0Count.Swap(0),
GTE1: CurrentCoprCacheHitRatioGTE1Count.Swap(0),
GTE10: CurrentCoprCacheHitRatioGTE10Count.Swap(0),
GTE20: CurrentCoprCacheHitRatioGTE20Count.Swap(0),
GTE40: CurrentCoprCacheHitRatioGTE40Count.Swap(0),
GTE80: CurrentCoprCacheHitRatioGTE80Count.Swap(0),
GTE100: CurrentCoprCacheHitRatioGTE100Count.Swap(0),
},
SQLUsage: sqlUsageData{
SQLTotal: 0,
SQLType: make(sqlType),
},
BuiltinFunctionsUsage: GlobalBuiltinFunctionsUsage.Dump(),
}
err := readSQLMetric(time.Now(), &thisSubWindow.SQLUsage)
if err != nil {
logutil.BgLogger().Info("Error exists when getting the SQL Metric.",
zap.Error(err))
}
thisSubWindow.SQLUsage.SQLTotal = getSQLSum(&thisSubWindow.SQLUsage.SQLType)
subWindowsLock.Lock()
rotatedSubWindows = append(rotatedSubWindows, &thisSubWindow)
if len(rotatedSubWindows) > maxSubWindowLength {
// Only retain last N sub windows, according to the report interval.
rotatedSubWindows = rotatedSubWindows[len(rotatedSubWindows)-maxSubWindowLength:]
}
subWindowsLock.Unlock()
}
func calDeltaSQLTypeMap(cur sqlType, last sqlType) sqlType {
deltaMap := make(sqlType)
for key, value := range cur {
deltaMap[key] = value - (last)[key]
}
return deltaMap
}
// getWindowData returns data aggregated by window size.
func getWindowData() []*windowData {
results := make([]*windowData, 0)
subWindowsLock.RLock()
i := 0
for i < len(rotatedSubWindows) {
thisWindow := *rotatedSubWindows[i]
var startWindow windowData
if i == 0 {
startWindow = thisWindow
} else {
startWindow = *rotatedSubWindows[i-1]
}
aggregatedSubWindows := 1
// Aggregate later sub windows
i++
for i < len(rotatedSubWindows) && aggregatedSubWindows < maxSubWindowLengthInWindow {
thisWindow.ExecuteCount += rotatedSubWindows[i].ExecuteCount
thisWindow.TiFlashUsage.PushDown += rotatedSubWindows[i].TiFlashUsage.PushDown
thisWindow.TiFlashUsage.ExchangePushDown += rotatedSubWindows[i].TiFlashUsage.ExchangePushDown
thisWindow.TiFlashUsage.TableScan += rotatedSubWindows[i].TiFlashUsage.TableScan
thisWindow.TiFlashUsage.TableScanWithFastScan += rotatedSubWindows[i].TiFlashUsage.TableScanWithFastScan
thisWindow.CoprCacheUsage.GTE0 += rotatedSubWindows[i].CoprCacheUsage.GTE0
thisWindow.CoprCacheUsage.GTE1 += rotatedSubWindows[i].CoprCacheUsage.GTE1
thisWindow.CoprCacheUsage.GTE10 += rotatedSubWindows[i].CoprCacheUsage.GTE10
thisWindow.CoprCacheUsage.GTE20 += rotatedSubWindows[i].CoprCacheUsage.GTE20
thisWindow.CoprCacheUsage.GTE40 += rotatedSubWindows[i].CoprCacheUsage.GTE40
thisWindow.CoprCacheUsage.GTE80 += rotatedSubWindows[i].CoprCacheUsage.GTE80
thisWindow.CoprCacheUsage.GTE100 += rotatedSubWindows[i].CoprCacheUsage.GTE100
thisWindow.SQLUsage.SQLTotal = rotatedSubWindows[i].SQLUsage.SQLTotal - startWindow.SQLUsage.SQLTotal
thisWindow.SQLUsage.SQLType = calDeltaSQLTypeMap(rotatedSubWindows[i].SQLUsage.SQLType, startWindow.SQLUsage.SQLType)
mergedBuiltinFunctionsUsage := BuiltinFunctionsUsage(thisWindow.BuiltinFunctionsUsage)
mergedBuiltinFunctionsUsage.Merge(BuiltinFunctionsUsage(rotatedSubWindows[i].BuiltinFunctionsUsage))
thisWindow.BuiltinFunctionsUsage = mergedBuiltinFunctionsUsage
aggregatedSubWindows++
i++
}
results = append(results, &thisWindow)
}
subWindowsLock.RUnlock()
return results
}
| {
ctx := context.TODO()
promQL := "avg(tidb_executor_statement_total{}) by (type)"
result, err := querySQLMetric(ctx, timepoint, promQL)
if err != nil {
return err
}
analysisSQLUsage(result, sqlResult)
return nil
} | identifier_body |
data_window.go | // Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package telemetry
import (
"context"
"sync"
"time"
"github.com/pingcap/tidb/domain/infosync"
"github.com/pingcap/tidb/util/logutil"
"github.com/prometheus/client_golang/api"
promv1 "github.com/prometheus/client_golang/api/prometheus/v1"
pmodel "github.com/prometheus/common/model"
"go.uber.org/atomic"
"go.uber.org/zap"
)
var (
// CurrentExecuteCount is CurrentExecuteCount
CurrentExecuteCount atomic.Uint64
// CurrentTiFlashPushDownCount is CurrentTiFlashPushDownCount
CurrentTiFlashPushDownCount atomic.Uint64
// CurrentTiFlashExchangePushDownCount is CurrentTiFlashExchangePushDownCount
CurrentTiFlashExchangePushDownCount atomic.Uint64
// CurrentCoprCacheHitRatioGTE0Count is CurrentCoprCacheHitRatioGTE1Count
CurrentCoprCacheHitRatioGTE0Count atomic.Uint64
// CurrentCoprCacheHitRatioGTE1Count is CurrentCoprCacheHitRatioGTE1Count
CurrentCoprCacheHitRatioGTE1Count atomic.Uint64
// CurrentCoprCacheHitRatioGTE10Count is CurrentCoprCacheHitRatioGTE10Count
CurrentCoprCacheHitRatioGTE10Count atomic.Uint64
// CurrentCoprCacheHitRatioGTE20Count is CurrentCoprCacheHitRatioGTE20Count
CurrentCoprCacheHitRatioGTE20Count atomic.Uint64
// CurrentCoprCacheHitRatioGTE40Count is CurrentCoprCacheHitRatioGTE40Count
CurrentCoprCacheHitRatioGTE40Count atomic.Uint64
// CurrentCoprCacheHitRatioGTE80Count is CurrentCoprCacheHitRatioGTE80Count
CurrentCoprCacheHitRatioGTE80Count atomic.Uint64
// CurrentCoprCacheHitRatioGTE100Count is CurrentCoprCacheHitRatioGTE100Count
CurrentCoprCacheHitRatioGTE100Count atomic.Uint64
// CurrentTiflashTableScanCount count the number of tiflash table scan and tiflash partition table scan
CurrentTiflashTableScanCount atomic.Uint64
// CurrentTiflashTableScanWithFastScanCount count the number of tiflash table scan and tiflash partition table scan which use fastscan
CurrentTiflashTableScanWithFastScanCount atomic.Uint64
)
const (
// WindowSize determines how long some data is aggregated by.
WindowSize = 1 * time.Hour
// SubWindowSize determines how often data is rotated.
SubWindowSize = 1 * time.Minute
maxSubWindowLength = int(ReportInterval / SubWindowSize) // TODO: Ceiling?
maxSubWindowLengthInWindow = int(WindowSize / SubWindowSize) // TODO: Ceiling?
promReadTimeout = time.Second * 30
)
type windowData struct {
BeginAt time.Time `json:"beginAt"`
ExecuteCount uint64 `json:"executeCount"`
TiFlashUsage tiFlashUsageData `json:"tiFlashUsage"`
CoprCacheUsage coprCacheUsageData `json:"coprCacheUsage"`
SQLUsage sqlUsageData `json:"SQLUsage"`
BuiltinFunctionsUsage map[string]uint32 `json:"builtinFunctionsUsage"`
}
type sqlType map[string]uint64
type sqlUsageData struct {
SQLTotal uint64 `json:"total"`
SQLType sqlType `json:"type"`
}
type coprCacheUsageData struct {
GTE0 uint64 `json:"gte0"`
GTE1 uint64 `json:"gte1"`
GTE10 uint64 `json:"gte10"`
GTE20 uint64 `json:"gte20"`
GTE40 uint64 `json:"gte40"`
GTE80 uint64 `json:"gte80"`
GTE100 uint64 `json:"gte100"`
}
type tiFlashUsageData struct {
PushDown uint64 `json:"pushDown"`
ExchangePushDown uint64 `json:"exchangePushDown"`
TableScan uint64 `json:"tableScan"`
TableScanWithFastScan uint64 `json:"tableScanWithFastScan"`
}
// builtinFunctionsUsageCollector collects builtin functions usage information and dump it into windowData.
type builtinFunctionsUsageCollector struct {
sync.Mutex
// Should acquire lock to access this
usageData BuiltinFunctionsUsage
}
// Merge BuiltinFunctionsUsage data
func (b *builtinFunctionsUsageCollector) Collect(usageData BuiltinFunctionsUsage) {
// TODO(leiysky): use multi-worker to collect the usage information so we can make this asynchronous
b.Lock()
defer b.Unlock()
b.usageData.Merge(usageData)
}
// Dump BuiltinFunctionsUsage data
func (b *builtinFunctionsUsageCollector) Dump() map[string]uint32 {
b.Lock()
ret := b.usageData
b.usageData = make(map[string]uint32)
b.Unlock()
return ret
}
// BuiltinFunctionsUsage is a map from ScalarFuncSig_name(string) to usage count(uint32)
type BuiltinFunctionsUsage map[string]uint32
// Inc will increase the usage count of scalar function by 1
func (b BuiltinFunctionsUsage) Inc(scalarFuncSigName string) {
v, ok := b[scalarFuncSigName]
if !ok {
b[scalarFuncSigName] = 1
} else {
b[scalarFuncSigName] = v + 1
}
}
// Merge BuiltinFunctionsUsage data
func (b BuiltinFunctionsUsage) Merge(usageData BuiltinFunctionsUsage) {
for k, v := range usageData {
prev, ok := b[k]
if !ok {
b[k] = v
} else {
b[k] = prev + v
}
}
}
// GlobalBuiltinFunctionsUsage is used to collect builtin functions usage information
var GlobalBuiltinFunctionsUsage = &builtinFunctionsUsageCollector{usageData: make(BuiltinFunctionsUsage)}
var (
rotatedSubWindows []*windowData
subWindowsLock = sync.RWMutex{}
)
func getSQLSum(sqlTypeData *sqlType) uint64 {
result := uint64(0)
for _, v := range *sqlTypeData {
result += v
}
return result
}
func readSQLMetric(timepoint time.Time, sqlResult *sqlUsageData) error {
ctx := context.TODO()
promQL := "avg(tidb_executor_statement_total{}) by (type)"
result, err := querySQLMetric(ctx, timepoint, promQL)
if err != nil {
return err
}
analysisSQLUsage(result, sqlResult)
return nil
}
func | (ctx context.Context, queryTime time.Time, promQL string) (result pmodel.Value, err error) {
// Add retry to avoid network error.
var prometheusAddr string
for i := 0; i < 5; i++ {
//TODO: the prometheus will be Integrated into the PD, then we need to query the prometheus in PD directly, which need change the quire API
prometheusAddr, err = infosync.GetPrometheusAddr()
if err == nil || err == infosync.ErrPrometheusAddrIsNotSet {
break
}
time.Sleep(100 * time.Millisecond)
}
if err != nil {
return nil, err
}
promClient, err := api.NewClient(api.Config{
Address: prometheusAddr,
})
if err != nil {
return nil, err
}
promQLAPI := promv1.NewAPI(promClient)
ctx, cancel := context.WithTimeout(ctx, promReadTimeout)
defer cancel()
// Add retry to avoid network error.
for i := 0; i < 5; i++ {
result, _, err = promQLAPI.Query(ctx, promQL, queryTime)
if err == nil {
break
}
time.Sleep(100 * time.Millisecond)
}
return result, err
}
func analysisSQLUsage(promResult pmodel.Value, sqlResult *sqlUsageData) {
if promResult == nil {
return
}
if promResult.Type() == pmodel.ValVector {
matrix := promResult.(pmodel.Vector)
for _, m := range matrix {
v := m.Value
promLable := string(m.Metric[pmodel.LabelName("type")])
sqlResult.SQLType[promLable] = uint64(v)
}
}
}
// RotateSubWindow rotates the telemetry sub window.
func RotateSubWindow() {
thisSubWindow := windowData{
BeginAt: time.Now(),
ExecuteCount: CurrentExecuteCount.Swap(0),
TiFlashUsage: tiFlashUsageData{
PushDown: CurrentTiFlashPushDownCount.Swap(0),
ExchangePushDown: CurrentTiFlashExchangePushDownCount.Swap(0),
TableScan: CurrentTiflashTableScanCount.Swap(0),
TableScanWithFastScan: CurrentTiflashTableScanWithFastScanCount.Swap(0),
},
CoprCacheUsage: coprCacheUsageData{
GTE0: CurrentCoprCacheHitRatioGTE0Count.Swap(0),
GTE1: CurrentCoprCacheHitRatioGTE1Count.Swap(0),
GTE10: CurrentCoprCacheHitRatioGTE10Count.Swap(0),
GTE20: CurrentCoprCacheHitRatioGTE20Count.Swap(0),
GTE40: CurrentCoprCacheHitRatioGTE40Count.Swap(0),
GTE80: CurrentCoprCacheHitRatioGTE80Count.Swap(0),
GTE100: CurrentCoprCacheHitRatioGTE100Count.Swap(0),
},
SQLUsage: sqlUsageData{
SQLTotal: 0,
SQLType: make(sqlType),
},
BuiltinFunctionsUsage: GlobalBuiltinFunctionsUsage.Dump(),
}
err := readSQLMetric(time.Now(), &thisSubWindow.SQLUsage)
if err != nil {
logutil.BgLogger().Info("Error exists when getting the SQL Metric.",
zap.Error(err))
}
thisSubWindow.SQLUsage.SQLTotal = getSQLSum(&thisSubWindow.SQLUsage.SQLType)
subWindowsLock.Lock()
rotatedSubWindows = append(rotatedSubWindows, &thisSubWindow)
if len(rotatedSubWindows) > maxSubWindowLength {
// Only retain last N sub windows, according to the report interval.
rotatedSubWindows = rotatedSubWindows[len(rotatedSubWindows)-maxSubWindowLength:]
}
subWindowsLock.Unlock()
}
func calDeltaSQLTypeMap(cur sqlType, last sqlType) sqlType {
deltaMap := make(sqlType)
for key, value := range cur {
deltaMap[key] = value - (last)[key]
}
return deltaMap
}
// getWindowData returns data aggregated by window size.
func getWindowData() []*windowData {
results := make([]*windowData, 0)
subWindowsLock.RLock()
i := 0
for i < len(rotatedSubWindows) {
thisWindow := *rotatedSubWindows[i]
var startWindow windowData
if i == 0 {
startWindow = thisWindow
} else {
startWindow = *rotatedSubWindows[i-1]
}
aggregatedSubWindows := 1
// Aggregate later sub windows
i++
for i < len(rotatedSubWindows) && aggregatedSubWindows < maxSubWindowLengthInWindow {
thisWindow.ExecuteCount += rotatedSubWindows[i].ExecuteCount
thisWindow.TiFlashUsage.PushDown += rotatedSubWindows[i].TiFlashUsage.PushDown
thisWindow.TiFlashUsage.ExchangePushDown += rotatedSubWindows[i].TiFlashUsage.ExchangePushDown
thisWindow.TiFlashUsage.TableScan += rotatedSubWindows[i].TiFlashUsage.TableScan
thisWindow.TiFlashUsage.TableScanWithFastScan += rotatedSubWindows[i].TiFlashUsage.TableScanWithFastScan
thisWindow.CoprCacheUsage.GTE0 += rotatedSubWindows[i].CoprCacheUsage.GTE0
thisWindow.CoprCacheUsage.GTE1 += rotatedSubWindows[i].CoprCacheUsage.GTE1
thisWindow.CoprCacheUsage.GTE10 += rotatedSubWindows[i].CoprCacheUsage.GTE10
thisWindow.CoprCacheUsage.GTE20 += rotatedSubWindows[i].CoprCacheUsage.GTE20
thisWindow.CoprCacheUsage.GTE40 += rotatedSubWindows[i].CoprCacheUsage.GTE40
thisWindow.CoprCacheUsage.GTE80 += rotatedSubWindows[i].CoprCacheUsage.GTE80
thisWindow.CoprCacheUsage.GTE100 += rotatedSubWindows[i].CoprCacheUsage.GTE100
thisWindow.SQLUsage.SQLTotal = rotatedSubWindows[i].SQLUsage.SQLTotal - startWindow.SQLUsage.SQLTotal
thisWindow.SQLUsage.SQLType = calDeltaSQLTypeMap(rotatedSubWindows[i].SQLUsage.SQLType, startWindow.SQLUsage.SQLType)
mergedBuiltinFunctionsUsage := BuiltinFunctionsUsage(thisWindow.BuiltinFunctionsUsage)
mergedBuiltinFunctionsUsage.Merge(BuiltinFunctionsUsage(rotatedSubWindows[i].BuiltinFunctionsUsage))
thisWindow.BuiltinFunctionsUsage = mergedBuiltinFunctionsUsage
aggregatedSubWindows++
i++
}
results = append(results, &thisWindow)
}
subWindowsLock.RUnlock()
return results
}
| querySQLMetric | identifier_name |
download.rs | use std::collections::{HashMap, HashSet};
use console::style;
use dialoguer::Confirm;
use fimfic_tracker::{
Config, Id, Result, SensibilityLevel, Story, StoryData, StoryStatus, StoryUpdate, TrackerError,
};
use crate::args::{Download, Prompt};
use crate::readable::ReadableDate;
use crate::Requester;
macro_rules! format_update {
(author, $before:expr => $after:expr) => {
format_update!([green] &$before, &$after)
};
(chapters, $before:expr => $after:expr) => {
format_update!([blue] $before, $after)
};
(words, $before:expr => $after:expr) => {
format_update!([blue] $before, $after)
};
(timestamp, $before:expr => $after:expr) => {
format_update!([yellow] ReadableDate($before), ReadableDate($after))
};
(status, $before:expr => $after:expr) => {
format_update!([yellow] $before, $after)
};
([$color:ident] $before:expr, $after:expr) => {
format_args!(
"{} {} {}",
style($before).$color(),
style("=>").cyan(),
style($after).$color().bold()
)
};
}
macro_rules! info_story_checking {
($story:expr) => {
info!("Checking for {} ...", format_story!($story));
};
}
macro_rules! info_update {
([ignored] $story:expr, $on:ident, $before:expr => $after:expr) => {
info_update!($story, $on, $before, $after, ". Ignoring")
};
($story:expr, $on:ident, $before:expr => $after:expr) => {
info_update!($story, $on, $before, $after, "")
};
($story:expr, $on:ident, $before:expr, $after:expr, $extra:expr) => {
info!(
"{} has an update on {} ({}){}",
format_story!($story),
stringify!($on),
format_update!($on, $before => $after),
$extra
);
};
}
#[derive(Debug)]
enum StoryDownload {
Update(Id, Story),
Forced(Id),
}
pub fn download(
config: &Config,
requester: &Requester,
story_data: &mut StoryData,
Download {
force,
prompt,
ref ids,
}: Download,
) -> Result<()> {
let selected_ids: Vec<Id> = if ids.is_empty() {
story_data.keys().cloned().collect()
} else {
story_data
.keys()
.filter(|id| ids.contains(id))
.cloned()
.collect()
};
let mut ignored_ids: HashSet<Id> = HashSet::with_capacity(selected_ids.len());
let mut printed = false;
macro_rules! set_printed {
() => {
if !printed {
printed = true;
}
};
}
for (id, story) in story_data.iter().filter_map(|(id, story)| {
if selected_ids.contains(id) {
Some((*id, story))
} else {
None
}
}) {
if let StoryStatus::Incomplete = story.status {
continue;
}
set_printed!();
let status_notice = format!(
"{} has been marked as {} by the author",
format_story!(story),
format_status!(story)
);
match prompt {
Prompt::AssumeYes => {
info!("{}. Checking for an update on it anyways.", status_notice);
}
Prompt::AssumeNo => {
info!("{}. Skipping checking for an update on it.", status_notice);
ignored_ids.insert(id);
}
Prompt::Ask => {
let confirm = Confirm::new()
.with_prompt(format!(
"{}. Do you want to still check for an update on it?",
status_notice
))
.interact()
.map_err(|err| {
TrackerError::io(err)
.context("failed to launch overwrite confirmation prompt")
})?;
if !confirm {
ignored_ids.insert(id);
}
}
}
}
if printed {
separate!();
printed = false;
}
let mut updated_stories: HashMap<Id, Story> = HashMap::with_capacity(selected_ids.len());
let mut ids_to_download: HashSet<Id> = HashSet::with_capacity(selected_ids.len());
for (id, story) in story_data
.iter()
.filter(|(id, _)| selected_ids.contains(id) && !ignored_ids.contains(id))
.map(|(id, story)| (*id, story))
{
info_story_checking!(story);
let updated_story: Story = requester.get_story_response(id)?.into();
let title_changed = story.title != updated_story.title;
let author_changed = story.author != updated_story.author;
let status_changed = story.status != updated_story.status;
let story_update = story.compare_to(&updated_story)?;
if story_update.is_some() || title_changed || author_changed || status_changed {
// If we are here, something will be printed to stderr. Be it by the specific cases
// just below or by the resulting StoryUpdate comparison.
set_printed!();
if title_changed || author_changed || status_changed {
clear_last_lines!();
if title_changed {
info!(
"{} has changed its title to {}",
format_story!(story),
style(&updated_story.title).green().bold()
);
}
if author_changed {
info!(
"{} has changed its author ({})",
format_story!(story),
format_update!(author, story.author => updated_story.author)
);
}
if status_changed {
info!(
"{} has changed its status ({})",
format_story!(story),
format_update!(status, story.status => updated_story.status),
);
}
// Avoid this message from being repeated twice in verbose output.
if verbose_disabled!() {
info_story_checking!(story);
}
}
updated_stories.insert(id, updated_story);
}
clear_last_lines!();
match story_update {
Some(StoryUpdate::Chapters { before, after }) => {
info_update!(story, chapters, before => after);
}
Some(StoryUpdate::Words { before, after })
if config.sensibility_level >= SensibilityLevel::IncludeWords =>
{
info_update!(story, words, before => after);
}
Some(StoryUpdate::DateTime { before, after })
if config.sensibility_level == SensibilityLevel::Anything =>
{
info_update!(story, timestamp, before => after);
}
Some(StoryUpdate::Words { before, after }) => {
info_update!([ignored] story, words, before => after);
continue;
}
Some(StoryUpdate::DateTime { before, after }) => {
info_update!([ignored] story, timestamp, before => after);
continue;
}
None => continue,
};
ids_to_download.insert(id);
}
// Update stories with ignored updates.
// This way if the downloads fail, these should be saved by the "emergency save".
//
// After this block, `updated_stories` should only contain stories whose IDs are in
// `ids_to_download`.
{
let mut updated_ids = story_data
.keys()
.filter(|id| !ids_to_download.contains(id))
.filter_map(|id| updated_stories.remove_entry(id))
.collect::<Vec<(Id, Story)>>();
debug!("Ignored updates: {:?}", &updated_ids);
for (id, story) in updated_ids.drain(..) {
story_data.insert(id, story);
}
}
if printed {
separate!();
}
if !force && ids_to_download.is_empty() {
info!("There is nothing to download");
} else if force {
progress_or_info!(
"{}",
style(format!(
"Force downloading {}",
if ids.is_empty() && ignored_ids.is_empty() {
"every story on the tracking list"
} else {
"selected stories"
}
))
.bold(),
);
separate!();
}
let use_separator = config.exec.is_some() && !config.quiet;
let delay = std::time::Duration::from_secs(config.download_delay);
let mut stories_to_download: Vec<StoryDownload> = story_data
.keys()
// Only download the stories that:
// (1) Whose IDs were given by the user if any.
// (2) The user responded to its prompt with Y.
.filter(|id| selected_ids.contains(id) && !ignored_ids.contains(id))
// Download all stories if the user forced it, otherwise only those who passed the update
// sensibility test.
.filter(|id| force || ids_to_download.contains(id))
.map(|id| match updated_stories.remove(id) {
Some(story) => StoryDownload::Update(*id, story),
None => StoryDownload::Forced(*id),
})
.collect();
debug!("Stories to download: {:?}", &stories_to_download);
for (is_first, story_download) in stories_to_download
.drain(..)
.enumerate()
.map(|(index, story_download)| (index == 0, story_download))
{
download_delay!(!is_first, use_separator, delay);
match &story_download {
StoryDownload::Update(_, story) => requester.download(story)?,
// While this should be safe to unwrap, in the unlikely event that it panics the
// "emergency save" would be skipped. | StoryDownload::Forced(id) => match story_data.get(id) {
Some(story) => requester.download(story)?,
None => warn!("{} is not present in the tracker file.", id),
},
};
// Insert the update once it downloads.
if let StoryDownload::Update(id, story) = story_download {
story_data.insert(id, story);
}
}
Ok(())
} | // So I throw in a `match` to "safely" unwrap it and throw a warning if it is not
// present. | random_line_split |
download.rs | use std::collections::{HashMap, HashSet};
use console::style;
use dialoguer::Confirm;
use fimfic_tracker::{
Config, Id, Result, SensibilityLevel, Story, StoryData, StoryStatus, StoryUpdate, TrackerError,
};
use crate::args::{Download, Prompt};
use crate::readable::ReadableDate;
use crate::Requester;
macro_rules! format_update {
(author, $before:expr => $after:expr) => {
format_update!([green] &$before, &$after)
};
(chapters, $before:expr => $after:expr) => {
format_update!([blue] $before, $after)
};
(words, $before:expr => $after:expr) => {
format_update!([blue] $before, $after)
};
(timestamp, $before:expr => $after:expr) => {
format_update!([yellow] ReadableDate($before), ReadableDate($after))
};
(status, $before:expr => $after:expr) => {
format_update!([yellow] $before, $after)
};
([$color:ident] $before:expr, $after:expr) => {
format_args!(
"{} {} {}",
style($before).$color(),
style("=>").cyan(),
style($after).$color().bold()
)
};
}
macro_rules! info_story_checking {
($story:expr) => {
info!("Checking for {} ...", format_story!($story));
};
}
macro_rules! info_update {
([ignored] $story:expr, $on:ident, $before:expr => $after:expr) => {
info_update!($story, $on, $before, $after, ". Ignoring")
};
($story:expr, $on:ident, $before:expr => $after:expr) => {
info_update!($story, $on, $before, $after, "")
};
($story:expr, $on:ident, $before:expr, $after:expr, $extra:expr) => {
info!(
"{} has an update on {} ({}){}",
format_story!($story),
stringify!($on),
format_update!($on, $before => $after),
$extra
);
};
}
#[derive(Debug)]
enum StoryDownload {
Update(Id, Story),
Forced(Id),
}
pub fn download(
config: &Config,
requester: &Requester,
story_data: &mut StoryData,
Download {
force,
prompt,
ref ids,
}: Download,
) -> Result<()> {
let selected_ids: Vec<Id> = if ids.is_empty() {
story_data.keys().cloned().collect()
} else {
story_data
.keys()
.filter(|id| ids.contains(id))
.cloned()
.collect()
};
let mut ignored_ids: HashSet<Id> = HashSet::with_capacity(selected_ids.len());
let mut printed = false;
macro_rules! set_printed {
() => {
if !printed {
printed = true;
}
};
}
for (id, story) in story_data.iter().filter_map(|(id, story)| {
if selected_ids.contains(id) {
Some((*id, story))
} else {
None
}
}) {
if let StoryStatus::Incomplete = story.status |
set_printed!();
let status_notice = format!(
"{} has been marked as {} by the author",
format_story!(story),
format_status!(story)
);
match prompt {
Prompt::AssumeYes => {
info!("{}. Checking for an update on it anyways.", status_notice);
}
Prompt::AssumeNo => {
info!("{}. Skipping checking for an update on it.", status_notice);
ignored_ids.insert(id);
}
Prompt::Ask => {
let confirm = Confirm::new()
.with_prompt(format!(
"{}. Do you want to still check for an update on it?",
status_notice
))
.interact()
.map_err(|err| {
TrackerError::io(err)
.context("failed to launch overwrite confirmation prompt")
})?;
if !confirm {
ignored_ids.insert(id);
}
}
}
}
if printed {
separate!();
printed = false;
}
let mut updated_stories: HashMap<Id, Story> = HashMap::with_capacity(selected_ids.len());
let mut ids_to_download: HashSet<Id> = HashSet::with_capacity(selected_ids.len());
for (id, story) in story_data
.iter()
.filter(|(id, _)| selected_ids.contains(id) && !ignored_ids.contains(id))
.map(|(id, story)| (*id, story))
{
info_story_checking!(story);
let updated_story: Story = requester.get_story_response(id)?.into();
let title_changed = story.title != updated_story.title;
let author_changed = story.author != updated_story.author;
let status_changed = story.status != updated_story.status;
let story_update = story.compare_to(&updated_story)?;
if story_update.is_some() || title_changed || author_changed || status_changed {
// If we are here, something will be printed to stderr. Be it by the specific cases
// just below or by the resulting StoryUpdate comparison.
set_printed!();
if title_changed || author_changed || status_changed {
clear_last_lines!();
if title_changed {
info!(
"{} has changed its title to {}",
format_story!(story),
style(&updated_story.title).green().bold()
);
}
if author_changed {
info!(
"{} has changed its author ({})",
format_story!(story),
format_update!(author, story.author => updated_story.author)
);
}
if status_changed {
info!(
"{} has changed its status ({})",
format_story!(story),
format_update!(status, story.status => updated_story.status),
);
}
// Avoid this message from being repeated twice in verbose output.
if verbose_disabled!() {
info_story_checking!(story);
}
}
updated_stories.insert(id, updated_story);
}
clear_last_lines!();
match story_update {
Some(StoryUpdate::Chapters { before, after }) => {
info_update!(story, chapters, before => after);
}
Some(StoryUpdate::Words { before, after })
if config.sensibility_level >= SensibilityLevel::IncludeWords =>
{
info_update!(story, words, before => after);
}
Some(StoryUpdate::DateTime { before, after })
if config.sensibility_level == SensibilityLevel::Anything =>
{
info_update!(story, timestamp, before => after);
}
Some(StoryUpdate::Words { before, after }) => {
info_update!([ignored] story, words, before => after);
continue;
}
Some(StoryUpdate::DateTime { before, after }) => {
info_update!([ignored] story, timestamp, before => after);
continue;
}
None => continue,
};
ids_to_download.insert(id);
}
// Update stories with ignored updates.
// This way if the downloads fail, these should be saved by the "emergency save".
//
// After this block, `updated_stories` should only contain stories whose IDs are in
// `ids_to_download`.
{
let mut updated_ids = story_data
.keys()
.filter(|id| !ids_to_download.contains(id))
.filter_map(|id| updated_stories.remove_entry(id))
.collect::<Vec<(Id, Story)>>();
debug!("Ignored updates: {:?}", &updated_ids);
for (id, story) in updated_ids.drain(..) {
story_data.insert(id, story);
}
}
if printed {
separate!();
}
if !force && ids_to_download.is_empty() {
info!("There is nothing to download");
} else if force {
progress_or_info!(
"{}",
style(format!(
"Force downloading {}",
if ids.is_empty() && ignored_ids.is_empty() {
"every story on the tracking list"
} else {
"selected stories"
}
))
.bold(),
);
separate!();
}
let use_separator = config.exec.is_some() && !config.quiet;
let delay = std::time::Duration::from_secs(config.download_delay);
let mut stories_to_download: Vec<StoryDownload> = story_data
.keys()
// Only download the stories that:
// (1) Whose IDs were given by the user if any.
// (2) The user responded to its prompt with Y.
.filter(|id| selected_ids.contains(id) && !ignored_ids.contains(id))
// Download all stories if the user forced it, otherwise only those who passed the update
// sensibility test.
.filter(|id| force || ids_to_download.contains(id))
.map(|id| match updated_stories.remove(id) {
Some(story) => StoryDownload::Update(*id, story),
None => StoryDownload::Forced(*id),
})
.collect();
debug!("Stories to download: {:?}", &stories_to_download);
for (is_first, story_download) in stories_to_download
.drain(..)
.enumerate()
.map(|(index, story_download)| (index == 0, story_download))
{
download_delay!(!is_first, use_separator, delay);
match &story_download {
StoryDownload::Update(_, story) => requester.download(story)?,
// While this should be safe to unwrap, in the unlikely event that it panics the
// "emergency save" would be skipped.
// So I throw in a `match` to "safely" unwrap it and throw a warning if it is not
// present.
StoryDownload::Forced(id) => match story_data.get(id) {
Some(story) => requester.download(story)?,
None => warn!("{} is not present in the tracker file.", id),
},
};
// Insert the update once it downloads.
if let StoryDownload::Update(id, story) = story_download {
story_data.insert(id, story);
}
}
Ok(())
}
| {
continue;
} | conditional_block |
download.rs | use std::collections::{HashMap, HashSet};
use console::style;
use dialoguer::Confirm;
use fimfic_tracker::{
Config, Id, Result, SensibilityLevel, Story, StoryData, StoryStatus, StoryUpdate, TrackerError,
};
use crate::args::{Download, Prompt};
use crate::readable::ReadableDate;
use crate::Requester;
macro_rules! format_update {
(author, $before:expr => $after:expr) => {
format_update!([green] &$before, &$after)
};
(chapters, $before:expr => $after:expr) => {
format_update!([blue] $before, $after)
};
(words, $before:expr => $after:expr) => {
format_update!([blue] $before, $after)
};
(timestamp, $before:expr => $after:expr) => {
format_update!([yellow] ReadableDate($before), ReadableDate($after))
};
(status, $before:expr => $after:expr) => {
format_update!([yellow] $before, $after)
};
([$color:ident] $before:expr, $after:expr) => {
format_args!(
"{} {} {}",
style($before).$color(),
style("=>").cyan(),
style($after).$color().bold()
)
};
}
macro_rules! info_story_checking {
($story:expr) => {
info!("Checking for {} ...", format_story!($story));
};
}
macro_rules! info_update {
([ignored] $story:expr, $on:ident, $before:expr => $after:expr) => {
info_update!($story, $on, $before, $after, ". Ignoring")
};
($story:expr, $on:ident, $before:expr => $after:expr) => {
info_update!($story, $on, $before, $after, "")
};
($story:expr, $on:ident, $before:expr, $after:expr, $extra:expr) => {
info!(
"{} has an update on {} ({}){}",
format_story!($story),
stringify!($on),
format_update!($on, $before => $after),
$extra
);
};
}
#[derive(Debug)]
enum StoryDownload {
Update(Id, Story),
Forced(Id),
}
pub fn download(
config: &Config,
requester: &Requester,
story_data: &mut StoryData,
Download {
force,
prompt,
ref ids,
}: Download,
) -> Result<()> | {
let selected_ids: Vec<Id> = if ids.is_empty() {
story_data.keys().cloned().collect()
} else {
story_data
.keys()
.filter(|id| ids.contains(id))
.cloned()
.collect()
};
let mut ignored_ids: HashSet<Id> = HashSet::with_capacity(selected_ids.len());
let mut printed = false;
macro_rules! set_printed {
() => {
if !printed {
printed = true;
}
};
}
for (id, story) in story_data.iter().filter_map(|(id, story)| {
if selected_ids.contains(id) {
Some((*id, story))
} else {
None
}
}) {
if let StoryStatus::Incomplete = story.status {
continue;
}
set_printed!();
let status_notice = format!(
"{} has been marked as {} by the author",
format_story!(story),
format_status!(story)
);
match prompt {
Prompt::AssumeYes => {
info!("{}. Checking for an update on it anyways.", status_notice);
}
Prompt::AssumeNo => {
info!("{}. Skipping checking for an update on it.", status_notice);
ignored_ids.insert(id);
}
Prompt::Ask => {
let confirm = Confirm::new()
.with_prompt(format!(
"{}. Do you want to still check for an update on it?",
status_notice
))
.interact()
.map_err(|err| {
TrackerError::io(err)
.context("failed to launch overwrite confirmation prompt")
})?;
if !confirm {
ignored_ids.insert(id);
}
}
}
}
if printed {
separate!();
printed = false;
}
let mut updated_stories: HashMap<Id, Story> = HashMap::with_capacity(selected_ids.len());
let mut ids_to_download: HashSet<Id> = HashSet::with_capacity(selected_ids.len());
for (id, story) in story_data
.iter()
.filter(|(id, _)| selected_ids.contains(id) && !ignored_ids.contains(id))
.map(|(id, story)| (*id, story))
{
info_story_checking!(story);
let updated_story: Story = requester.get_story_response(id)?.into();
let title_changed = story.title != updated_story.title;
let author_changed = story.author != updated_story.author;
let status_changed = story.status != updated_story.status;
let story_update = story.compare_to(&updated_story)?;
if story_update.is_some() || title_changed || author_changed || status_changed {
// If we are here, something will be printed to stderr. Be it by the specific cases
// just below or by the resulting StoryUpdate comparison.
set_printed!();
if title_changed || author_changed || status_changed {
clear_last_lines!();
if title_changed {
info!(
"{} has changed its title to {}",
format_story!(story),
style(&updated_story.title).green().bold()
);
}
if author_changed {
info!(
"{} has changed its author ({})",
format_story!(story),
format_update!(author, story.author => updated_story.author)
);
}
if status_changed {
info!(
"{} has changed its status ({})",
format_story!(story),
format_update!(status, story.status => updated_story.status),
);
}
// Avoid this message from being repeated twice in verbose output.
if verbose_disabled!() {
info_story_checking!(story);
}
}
updated_stories.insert(id, updated_story);
}
clear_last_lines!();
match story_update {
Some(StoryUpdate::Chapters { before, after }) => {
info_update!(story, chapters, before => after);
}
Some(StoryUpdate::Words { before, after })
if config.sensibility_level >= SensibilityLevel::IncludeWords =>
{
info_update!(story, words, before => after);
}
Some(StoryUpdate::DateTime { before, after })
if config.sensibility_level == SensibilityLevel::Anything =>
{
info_update!(story, timestamp, before => after);
}
Some(StoryUpdate::Words { before, after }) => {
info_update!([ignored] story, words, before => after);
continue;
}
Some(StoryUpdate::DateTime { before, after }) => {
info_update!([ignored] story, timestamp, before => after);
continue;
}
None => continue,
};
ids_to_download.insert(id);
}
// Update stories with ignored updates.
// This way if the downloads fail, these should be saved by the "emergency save".
//
// After this block, `updated_stories` should only contain stories whose IDs are in
// `ids_to_download`.
{
let mut updated_ids = story_data
.keys()
.filter(|id| !ids_to_download.contains(id))
.filter_map(|id| updated_stories.remove_entry(id))
.collect::<Vec<(Id, Story)>>();
debug!("Ignored updates: {:?}", &updated_ids);
for (id, story) in updated_ids.drain(..) {
story_data.insert(id, story);
}
}
if printed {
separate!();
}
if !force && ids_to_download.is_empty() {
info!("There is nothing to download");
} else if force {
progress_or_info!(
"{}",
style(format!(
"Force downloading {}",
if ids.is_empty() && ignored_ids.is_empty() {
"every story on the tracking list"
} else {
"selected stories"
}
))
.bold(),
);
separate!();
}
let use_separator = config.exec.is_some() && !config.quiet;
let delay = std::time::Duration::from_secs(config.download_delay);
let mut stories_to_download: Vec<StoryDownload> = story_data
.keys()
// Only download the stories that:
// (1) Whose IDs were given by the user if any.
// (2) The user responded to its prompt with Y.
.filter(|id| selected_ids.contains(id) && !ignored_ids.contains(id))
// Download all stories if the user forced it, otherwise only those who passed the update
// sensibility test.
.filter(|id| force || ids_to_download.contains(id))
.map(|id| match updated_stories.remove(id) {
Some(story) => StoryDownload::Update(*id, story),
None => StoryDownload::Forced(*id),
})
.collect();
debug!("Stories to download: {:?}", &stories_to_download);
for (is_first, story_download) in stories_to_download
.drain(..)
.enumerate()
.map(|(index, story_download)| (index == 0, story_download))
{
download_delay!(!is_first, use_separator, delay);
match &story_download {
StoryDownload::Update(_, story) => requester.download(story)?,
// While this should be safe to unwrap, in the unlikely event that it panics the
// "emergency save" would be skipped.
// So I throw in a `match` to "safely" unwrap it and throw a warning if it is not
// present.
StoryDownload::Forced(id) => match story_data.get(id) {
Some(story) => requester.download(story)?,
None => warn!("{} is not present in the tracker file.", id),
},
};
// Insert the update once it downloads.
if let StoryDownload::Update(id, story) = story_download {
story_data.insert(id, story);
}
}
Ok(())
} | identifier_body | |
download.rs | use std::collections::{HashMap, HashSet};
use console::style;
use dialoguer::Confirm;
use fimfic_tracker::{
Config, Id, Result, SensibilityLevel, Story, StoryData, StoryStatus, StoryUpdate, TrackerError,
};
use crate::args::{Download, Prompt};
use crate::readable::ReadableDate;
use crate::Requester;
macro_rules! format_update {
(author, $before:expr => $after:expr) => {
format_update!([green] &$before, &$after)
};
(chapters, $before:expr => $after:expr) => {
format_update!([blue] $before, $after)
};
(words, $before:expr => $after:expr) => {
format_update!([blue] $before, $after)
};
(timestamp, $before:expr => $after:expr) => {
format_update!([yellow] ReadableDate($before), ReadableDate($after))
};
(status, $before:expr => $after:expr) => {
format_update!([yellow] $before, $after)
};
([$color:ident] $before:expr, $after:expr) => {
format_args!(
"{} {} {}",
style($before).$color(),
style("=>").cyan(),
style($after).$color().bold()
)
};
}
macro_rules! info_story_checking {
($story:expr) => {
info!("Checking for {} ...", format_story!($story));
};
}
macro_rules! info_update {
([ignored] $story:expr, $on:ident, $before:expr => $after:expr) => {
info_update!($story, $on, $before, $after, ". Ignoring")
};
($story:expr, $on:ident, $before:expr => $after:expr) => {
info_update!($story, $on, $before, $after, "")
};
($story:expr, $on:ident, $before:expr, $after:expr, $extra:expr) => {
info!(
"{} has an update on {} ({}){}",
format_story!($story),
stringify!($on),
format_update!($on, $before => $after),
$extra
);
};
}
#[derive(Debug)]
enum | {
Update(Id, Story),
Forced(Id),
}
pub fn download(
config: &Config,
requester: &Requester,
story_data: &mut StoryData,
Download {
force,
prompt,
ref ids,
}: Download,
) -> Result<()> {
let selected_ids: Vec<Id> = if ids.is_empty() {
story_data.keys().cloned().collect()
} else {
story_data
.keys()
.filter(|id| ids.contains(id))
.cloned()
.collect()
};
let mut ignored_ids: HashSet<Id> = HashSet::with_capacity(selected_ids.len());
let mut printed = false;
macro_rules! set_printed {
() => {
if !printed {
printed = true;
}
};
}
for (id, story) in story_data.iter().filter_map(|(id, story)| {
if selected_ids.contains(id) {
Some((*id, story))
} else {
None
}
}) {
if let StoryStatus::Incomplete = story.status {
continue;
}
set_printed!();
let status_notice = format!(
"{} has been marked as {} by the author",
format_story!(story),
format_status!(story)
);
match prompt {
Prompt::AssumeYes => {
info!("{}. Checking for an update on it anyways.", status_notice);
}
Prompt::AssumeNo => {
info!("{}. Skipping checking for an update on it.", status_notice);
ignored_ids.insert(id);
}
Prompt::Ask => {
let confirm = Confirm::new()
.with_prompt(format!(
"{}. Do you want to still check for an update on it?",
status_notice
))
.interact()
.map_err(|err| {
TrackerError::io(err)
.context("failed to launch overwrite confirmation prompt")
})?;
if !confirm {
ignored_ids.insert(id);
}
}
}
}
if printed {
separate!();
printed = false;
}
let mut updated_stories: HashMap<Id, Story> = HashMap::with_capacity(selected_ids.len());
let mut ids_to_download: HashSet<Id> = HashSet::with_capacity(selected_ids.len());
for (id, story) in story_data
.iter()
.filter(|(id, _)| selected_ids.contains(id) && !ignored_ids.contains(id))
.map(|(id, story)| (*id, story))
{
info_story_checking!(story);
let updated_story: Story = requester.get_story_response(id)?.into();
let title_changed = story.title != updated_story.title;
let author_changed = story.author != updated_story.author;
let status_changed = story.status != updated_story.status;
let story_update = story.compare_to(&updated_story)?;
if story_update.is_some() || title_changed || author_changed || status_changed {
// If we are here, something will be printed to stderr. Be it by the specific cases
// just below or by the resulting StoryUpdate comparison.
set_printed!();
if title_changed || author_changed || status_changed {
clear_last_lines!();
if title_changed {
info!(
"{} has changed its title to {}",
format_story!(story),
style(&updated_story.title).green().bold()
);
}
if author_changed {
info!(
"{} has changed its author ({})",
format_story!(story),
format_update!(author, story.author => updated_story.author)
);
}
if status_changed {
info!(
"{} has changed its status ({})",
format_story!(story),
format_update!(status, story.status => updated_story.status),
);
}
// Avoid this message from being repeated twice in verbose output.
if verbose_disabled!() {
info_story_checking!(story);
}
}
updated_stories.insert(id, updated_story);
}
clear_last_lines!();
match story_update {
Some(StoryUpdate::Chapters { before, after }) => {
info_update!(story, chapters, before => after);
}
Some(StoryUpdate::Words { before, after })
if config.sensibility_level >= SensibilityLevel::IncludeWords =>
{
info_update!(story, words, before => after);
}
Some(StoryUpdate::DateTime { before, after })
if config.sensibility_level == SensibilityLevel::Anything =>
{
info_update!(story, timestamp, before => after);
}
Some(StoryUpdate::Words { before, after }) => {
info_update!([ignored] story, words, before => after);
continue;
}
Some(StoryUpdate::DateTime { before, after }) => {
info_update!([ignored] story, timestamp, before => after);
continue;
}
None => continue,
};
ids_to_download.insert(id);
}
// Update stories with ignored updates.
// This way if the downloads fail, these should be saved by the "emergency save".
//
// After this block, `updated_stories` should only contain stories whose IDs are in
// `ids_to_download`.
{
let mut updated_ids = story_data
.keys()
.filter(|id| !ids_to_download.contains(id))
.filter_map(|id| updated_stories.remove_entry(id))
.collect::<Vec<(Id, Story)>>();
debug!("Ignored updates: {:?}", &updated_ids);
for (id, story) in updated_ids.drain(..) {
story_data.insert(id, story);
}
}
if printed {
separate!();
}
if !force && ids_to_download.is_empty() {
info!("There is nothing to download");
} else if force {
progress_or_info!(
"{}",
style(format!(
"Force downloading {}",
if ids.is_empty() && ignored_ids.is_empty() {
"every story on the tracking list"
} else {
"selected stories"
}
))
.bold(),
);
separate!();
}
let use_separator = config.exec.is_some() && !config.quiet;
let delay = std::time::Duration::from_secs(config.download_delay);
let mut stories_to_download: Vec<StoryDownload> = story_data
.keys()
// Only download the stories that:
// (1) Whose IDs were given by the user if any.
// (2) The user responded to its prompt with Y.
.filter(|id| selected_ids.contains(id) && !ignored_ids.contains(id))
// Download all stories if the user forced it, otherwise only those who passed the update
// sensibility test.
.filter(|id| force || ids_to_download.contains(id))
.map(|id| match updated_stories.remove(id) {
Some(story) => StoryDownload::Update(*id, story),
None => StoryDownload::Forced(*id),
})
.collect();
debug!("Stories to download: {:?}", &stories_to_download);
for (is_first, story_download) in stories_to_download
.drain(..)
.enumerate()
.map(|(index, story_download)| (index == 0, story_download))
{
download_delay!(!is_first, use_separator, delay);
match &story_download {
StoryDownload::Update(_, story) => requester.download(story)?,
// While this should be safe to unwrap, in the unlikely event that it panics the
// "emergency save" would be skipped.
// So I throw in a `match` to "safely" unwrap it and throw a warning if it is not
// present.
StoryDownload::Forced(id) => match story_data.get(id) {
Some(story) => requester.download(story)?,
None => warn!("{} is not present in the tracker file.", id),
},
};
// Insert the update once it downloads.
if let StoryDownload::Update(id, story) = story_download {
story_data.insert(id, story);
}
}
Ok(())
}
| StoryDownload | identifier_name |
asnd.rs | //! The ``asnd`` module of ``ogc-rs``.
//!
//! This module implements a safe wrapper around the audio functions found in ``asndlib.h``.
use crate::{ffi, OgcError, Result};
use alloc::format;
use core::time::Duration;
macro_rules! if_not {
($valid:ident => $error_output:expr, $var:ident $(,)*) => {
if $var == ffi::$valid as _ {
Ok(())
} else {
Err(OgcError::Audio(format!($error_output, $var)))
}
};
}
/// Voice Options Callback Type
pub type VoiceOptionsCallback = Option<unsafe extern "C" fn(i32)>;
/// Options to be passed when creating a new voice.
///
/// # Examples
///
/// Create `VoiceOptions` with voice slot 2 and format Mono16Bit:
///
/// ```rust
/// let options = VoiceOptions::new().voice(2).format(VoiceFormat::Mono16Bit);
/// ```
pub struct VoiceOptions {
voice: u32,
format: VoiceFormat,
pitch: u32,
delay: u32,
volume_left: u8,
volume_right: u8,
callback: VoiceOptionsCallback,
}
impl Default for VoiceOptions {
fn default() -> Self {
VoiceOptions::new()
}
}
impl VoiceOptions {
/// Create this struct with sensible default values.
pub fn new() -> Self {
Self {
voice: 0,
format: VoiceFormat::Stereo16Bit,
pitch: 48000,
delay: 0,
volume_left: 255,
volume_right: 255,
callback: None,
}
}
/// Voice slot to use for this sound. Valid values are `0..16` non-inclusive.
#[must_use]
pub fn voice(mut self, voice: u32) -> Self {
assert!(voice < 16, "Voice index {} is >= 16", voice);
self.voice = voice;
self
}
/// Format to use for this sound.
#[must_use]
pub fn format(mut self, format: VoiceFormat) -> Self {
self.format = format;
self
}
/// Frequency to use, in Hz.
#[must_use]
pub fn pitch(mut self, pitch: u32) -> Self {
self.pitch = pitch;
self
}
/// Delay to wait before playing, in milliseconds.
#[must_use]
pub fn delay(mut self, delay: u32) -> Self {
self.delay = delay;
self
}
/// Voice volume of the left channel.
#[must_use]
pub fn volume_left(mut self, volume_left: u8) -> Self {
self.volume_left = volume_left;
self
}
/// Voice volume of the right channel.
#[must_use]
pub fn volume_right(mut self, volume_right: u8) -> Self {
self.volume_right = volume_right;
self
}
/// Optional callback function to use.
#[must_use]
pub fn callback(mut self, callback: Option<unsafe extern "C" fn(i32)>) -> Self {
self.callback = callback;
self
}
}
/// Source voice format.
pub enum VoiceFormat {
Mono8Bit,
Mono16Bit,
Mono16BitBe,
Stereo8Bit,
Stereo16Bit,
Stereo16BitBe,
Mono8BitU,
Mono16BitLE,
Stereo8BitU,
Stereo16BitLe,
}
impl VoiceFormat {
fn as_i32(&self) -> i32 {
match self {
VoiceFormat::Mono8Bit => 0,
VoiceFormat::Mono16Bit => 1,
VoiceFormat::Mono16BitBe => 1,
VoiceFormat::Stereo8Bit => 2,
VoiceFormat::Stereo16Bit => 3,
VoiceFormat::Stereo16BitBe => 3,
VoiceFormat::Mono8BitU => 4,
VoiceFormat::Mono16BitLE => 5,
VoiceFormat::Stereo8BitU => 6,
VoiceFormat::Stereo16BitLe => 7,
}
}
}
/// Represents the asnd service.
/// This service can only be created once!
/// If you use `Asnd::init()`, you cannot do `Audio::init()`.
/// Only one of them can be used at a time.
pub struct Asnd;
/// Implementation of the asnd service.
impl Asnd {
/// Initializes the asnd lib and fixes the hardware sample rate to 48000hz.
pub fn init() -> Self {
unsafe {
ffi::ASND_Init();
}
Self
}
/// De-initializes the asnd lib. This is also called when `Asnd` gets dropped.
pub fn end() {
unsafe {
ffi::ASND_End();
}
}
/// Pauses if true and resumes if false.
pub fn pause(should_pause: bool) {
unsafe {
ffi::ASND_Pause(should_pause as i32);
}
}
/// Returns true if paused, false if not paused.
pub fn is_paused() -> bool {
unsafe { ffi::ASND_Is_Paused() > 0 }
}
/// Returns the global time in milliseconds. Time is updated from the IRQ.
pub fn get_time() -> u32 {
unsafe { ffi::ASND_GetTime() }
}
/// Returns the global sample counter. Can be used to implement timers with high precision.
pub fn | () -> u32 {
unsafe { ffi::ASND_GetSampleCounter() }
}
/// Returns the samples sent from the IRQ in one tick.
pub fn get_samples_per_tick() -> u32 {
unsafe { ffi::ASND_GetSamplesPerTick() }
}
/// Sets the global time, in milliseconds.
pub fn set_time(time: u32) {
unsafe {
ffi::ASND_SetTime(time);
}
}
/// Sets a global callback for general purposes. It is called by the IRQ.
pub fn set_callback<F>(callback: Option<unsafe extern "C" fn()>) {
unsafe {
ffi::ASND_SetCallback(callback);
}
}
/// Returs the current audio rate. Default is 48000hz.
pub fn get_audio_rate() -> i32 {
unsafe { ffi::ASND_GetAudioRate() }
}
/// Sets a PCM voice to play. This function stops one previous voice. Use
/// `Asnd::status_voice()` to test status. The voices are played in 16-bit stereo,
/// regardless of source format. The buffer MUST be aligned and padded to 32 bytes.
pub fn set_voice(options: VoiceOptions, sound_buffer: &mut [u8]) -> Result<()> {
Self::validate_buffer(sound_buffer);
let err = unsafe {
ffi::ASND_SetVoice(
options.voice as i32,
options.format.as_i32(),
options.pitch as i32,
options.delay as i32,
sound_buffer.as_mut_ptr() as *mut _,
sound_buffer.len() as i32,
options.volume_left as i32,
options.volume_right as i32,
options.callback,
)
};
if_not!(SND_OK => "Asnd::set_voice() failed with error {}!", err)
}
/// Sets a PCM voice to play infinitely. See `Asnd::set_voice()` as it is largely identical.
/// The buffer MUST be aligned and padded to 32 bytes.
pub fn set_infinite_voice(options: VoiceOptions, sound_buffer: &mut [u8]) -> Result<()> {
Self::validate_buffer(sound_buffer);
let err = unsafe {
ffi::ASND_SetInfiniteVoice(
options.voice as i32,
options.format.as_i32(),
options.pitch as i32,
options.delay as i32,
sound_buffer.as_mut_ptr() as *mut _,
sound_buffer.len() as i32,
options.volume_left as i32,
options.volume_right as i32,
)
};
if_not!(SND_OK => "Asnd::set_infinite_voice() failed with error {}", err)
}
/// Adds a PCM voice to play from the second buffer. Sound buffer must be 32-byte
/// aligned and have same sample format as first buffer. This must only be called after
/// `Asnd::set_voice()`, which must return `Ok()`.
/// The buffer MUST be aligned and padded to 32 bytes.
fn add_voice(voice: u32, sound_buffer: &mut [u8]) -> Result<()> {
assert!(voice < 16, "Voice index {} is >= 16", voice);
Self::validate_buffer(sound_buffer);
let err = unsafe {
ffi::ASND_AddVoice(
voice as i32,
sound_buffer.as_mut_ptr() as *mut _,
sound_buffer.len() as i32,
)
};
if_not!(SND_OK => "Asnd::add_voice() failed with error {}", err)
}
/// Stops the selected voice. If the voice is used in song mode, you need to
/// assign the samples with `Asnd::set_song_sample_voice()`.
pub fn stop_voice(voice: u32) -> Result<()> {
assert!(voice < 16, "Voice index {} is >= 16", voice);
let err = unsafe { ffi::ASND_StopVoice(voice as i32) };
if_not!(SND_OK => "Asnd::stop_voice() failed with error {}", err)
}
/// Pauses the selected voice. Can also be used to resume voice.
pub fn pause_voice(voice: u32, pause: bool) -> Result<()> {
assert!(voice < 16, "Voice index {} is >= 16", voice);
let err = unsafe { ffi::ASND_PauseVoice(voice as i32, pause as i32) };
if_not!(SND_OK => "Asnd::pause_voice() failed with error {}", err)
}
/// Returns the state of the selected voice.
pub fn status_voice(voice: u32) -> Result<()> {
assert!(voice < 16, "Voice index {} is >= 16", voice);
let err = unsafe { ffi::ASND_StatusVoice(voice as i32) };
if_not!(SND_WORKING => "Asnd::status_voice() failed with error {}", err)
}
/// Returns the first unused voice. Fails if no voices are available.
pub fn get_first_unused_voice() -> Result<u32> {
let err = unsafe { ffi::ASND_GetFirstUnusedVoice() };
match err {
x if x < 16 => Ok(x as u32),
_ => Err(OgcError::Audio(format!(
"Asnd::get_first_unused_voice() failed with error {}",
err
))),
}
}
/// Changes the voice-pitch in real time. This function can be used to
/// create audio effects such as Doppler effect simulation.
pub fn change_pitch_voice(voice: u32, pitch: u32) -> Result<()> {
assert!(voice < 16, "Voice index {} is >= 16", voice);
let err = unsafe { ffi::ASND_ChangePitchVoice(voice as i32, pitch as i32) };
if_not!(SND_OK => "Asnd::change_pitch_voice() failed with error {}", err)
}
/// Changes the voice volume in real time. This function can be used to create
/// audio effects like distance attenuation.
pub fn change_volume_voice(voice: u32, volume_left: u8, volume_right: u8) -> Result<()> {
assert!(voice < 16, "Voice index {} is >= 16", voice);
let err = unsafe {
ffi::ASND_ChangeVolumeVoice(voice as i32, volume_left as i32, volume_right as i32)
};
if_not!(SND_OK => "Asnd::change_volume_voice() failed with error {}", err)
}
/// Returns the voice tick counter. This value represents the number of ticks
/// since this voice started to play, sans delay time. If the lib is initialized with
/// `INIT_RATE=48000`, a return value of 24000 is equal to 0.5 seconds.
pub fn get_tick_counter_voice(voice: u32) -> u32 {
assert!(voice < 16, "Voice index {} is >= 16", voice);
unsafe { ffi::ASND_GetTickCounterVoice(voice as i32) }
}
/// Returns the voice playback time. This value represents the time in milliseconds
/// since this voice started playing.
pub fn get_timer_voice(voice: u32) -> u32 {
assert!(voice < 16, "Voice index {} is >= 16", voice);
unsafe { ffi::ASND_GetTimerVoice(voice as i32) }
}
/// Tests if a pointer is in use by a voice as a buffer.
/// This must be the same pointer sent to `Asnd::add_voice()` or `Asnd::set_voice()`.
/// Returns 0 if the pointer is unused.
/// Returns 1 if the pointer is used as a buffer.
/// Returns `ogc_sys::SND_INVALID` if invalid.
pub fn test_pointer<T>(voice: u32, pointer: *mut T) -> i32 {
assert!(voice < 16, "Voice index {} is >= 16", voice);
unsafe { ffi::ASND_TestPointer(voice as i32, pointer as *mut _) }
}
/// Tests to determine if the voice is ready to receive a new buffer sample
/// with `Asnd::add_voice()`. Returns true if voice is ready.
pub fn test_voice_buffer_ready(voice: u32) -> bool {
assert!(voice < 16, "Voice index {} is >= 16", voice);
unsafe { ffi::ASND_TestVoiceBufferReady(voice as i32) > 0 }
}
/// Returns the DSP usage, in percent `(0..=100)`.
pub fn get_dsp_percent_use() -> u32 {
unsafe { ffi::ASND_GetDSP_PercentUse() }
}
/// Returns DSP process time, in nano seconds.
pub fn get_dsp_process_time() -> Duration {
unsafe { Duration::from_nanos(ffi::ASND_GetDSP_ProcessTime().into()) }
}
fn validate_buffer(sound_buffer: &mut [u8]) {
assert_eq!(
0,
sound_buffer.as_ptr().align_offset(32),
"Data is not aligned correctly."
);
assert_eq!(
0,
sound_buffer.len() % 32,
"Data length is not a multiple of 32."
);
}
}
impl Drop for Asnd {
fn drop(&mut self) {
Self::end();
}
}
| get_sample_counter | identifier_name |
asnd.rs | //! The ``asnd`` module of ``ogc-rs``.
//!
//! This module implements a safe wrapper around the audio functions found in ``asndlib.h``.
use crate::{ffi, OgcError, Result};
use alloc::format;
use core::time::Duration;
macro_rules! if_not {
($valid:ident => $error_output:expr, $var:ident $(,)*) => {
if $var == ffi::$valid as _ {
Ok(())
} else {
Err(OgcError::Audio(format!($error_output, $var)))
}
};
}
/// Voice Options Callback Type
pub type VoiceOptionsCallback = Option<unsafe extern "C" fn(i32)>;
/// Options to be passed when creating a new voice.
///
/// # Examples
///
/// Create `VoiceOptions` with voice slot 2 and format Mono16Bit:
///
/// ```rust
/// let options = VoiceOptions::new().voice(2).format(VoiceFormat::Mono16Bit);
/// ```
pub struct VoiceOptions {
voice: u32,
format: VoiceFormat,
pitch: u32,
delay: u32,
volume_left: u8,
volume_right: u8,
callback: VoiceOptionsCallback,
}
impl Default for VoiceOptions {
fn default() -> Self {
VoiceOptions::new()
}
}
impl VoiceOptions {
/// Create this struct with sensible default values.
pub fn new() -> Self {
Self {
voice: 0,
format: VoiceFormat::Stereo16Bit,
pitch: 48000,
delay: 0,
volume_left: 255,
volume_right: 255,
callback: None,
}
}
/// Voice slot to use for this sound. Valid values are `0..16` non-inclusive.
#[must_use]
pub fn voice(mut self, voice: u32) -> Self {
assert!(voice < 16, "Voice index {} is >= 16", voice);
self.voice = voice;
self
}
/// Format to use for this sound.
#[must_use]
pub fn format(mut self, format: VoiceFormat) -> Self {
self.format = format;
self
}
/// Frequency to use, in Hz.
#[must_use]
pub fn pitch(mut self, pitch: u32) -> Self {
self.pitch = pitch;
self
}
/// Delay to wait before playing, in milliseconds.
#[must_use]
pub fn delay(mut self, delay: u32) -> Self {
self.delay = delay;
self
}
/// Voice volume of the left channel.
#[must_use]
pub fn volume_left(mut self, volume_left: u8) -> Self {
self.volume_left = volume_left;
self
}
/// Voice volume of the right channel.
#[must_use]
pub fn volume_right(mut self, volume_right: u8) -> Self {
self.volume_right = volume_right;
self
}
/// Optional callback function to use.
#[must_use]
pub fn callback(mut self, callback: Option<unsafe extern "C" fn(i32)>) -> Self {
self.callback = callback;
self
}
}
/// Source voice format.
pub enum VoiceFormat {
Mono8Bit,
Mono16Bit,
Mono16BitBe,
Stereo8Bit,
Stereo16Bit,
Stereo16BitBe,
Mono8BitU,
Mono16BitLE,
Stereo8BitU,
Stereo16BitLe,
}
impl VoiceFormat {
fn as_i32(&self) -> i32 {
match self {
VoiceFormat::Mono8Bit => 0,
VoiceFormat::Mono16Bit => 1,
VoiceFormat::Mono16BitBe => 1,
VoiceFormat::Stereo8Bit => 2,
VoiceFormat::Stereo16Bit => 3,
VoiceFormat::Stereo16BitBe => 3,
VoiceFormat::Mono8BitU => 4,
VoiceFormat::Mono16BitLE => 5,
VoiceFormat::Stereo8BitU => 6,
VoiceFormat::Stereo16BitLe => 7,
}
}
}
/// Represents the asnd service.
/// This service can only be created once!
/// If you use `Asnd::init()`, you cannot do `Audio::init()`.
/// Only one of them can be used at a time.
pub struct Asnd;
/// Implementation of the asnd service.
impl Asnd {
/// Initializes the asnd lib and fixes the hardware sample rate to 48000hz.
pub fn init() -> Self {
unsafe {
ffi::ASND_Init();
}
Self
}
/// De-initializes the asnd lib. This is also called when `Asnd` gets dropped.
pub fn end() {
unsafe {
ffi::ASND_End();
}
}
/// Pauses if true and resumes if false.
pub fn pause(should_pause: bool) {
unsafe {
ffi::ASND_Pause(should_pause as i32);
}
}
/// Returns true if paused, false if not paused.
pub fn is_paused() -> bool {
unsafe { ffi::ASND_Is_Paused() > 0 }
}
/// Returns the global time in milliseconds. Time is updated from the IRQ.
pub fn get_time() -> u32 {
unsafe { ffi::ASND_GetTime() }
}
/// Returns the global sample counter. Can be used to implement timers with high precision.
pub fn get_sample_counter() -> u32 {
unsafe { ffi::ASND_GetSampleCounter() }
}
/// Returns the samples sent from the IRQ in one tick.
pub fn get_samples_per_tick() -> u32 {
unsafe { ffi::ASND_GetSamplesPerTick() }
}
/// Sets the global time, in milliseconds.
pub fn set_time(time: u32) {
unsafe {
ffi::ASND_SetTime(time);
}
}
/// Sets a global callback for general purposes. It is called by the IRQ.
pub fn set_callback<F>(callback: Option<unsafe extern "C" fn()>) {
unsafe {
ffi::ASND_SetCallback(callback);
}
}
/// Returs the current audio rate. Default is 48000hz.
pub fn get_audio_rate() -> i32 {
unsafe { ffi::ASND_GetAudioRate() }
}
/// Sets a PCM voice to play. This function stops one previous voice. Use
/// `Asnd::status_voice()` to test status. The voices are played in 16-bit stereo,
/// regardless of source format. The buffer MUST be aligned and padded to 32 bytes.
pub fn set_voice(options: VoiceOptions, sound_buffer: &mut [u8]) -> Result<()> {
Self::validate_buffer(sound_buffer);
let err = unsafe {
ffi::ASND_SetVoice(
options.voice as i32,
options.format.as_i32(),
options.pitch as i32,
options.delay as i32,
sound_buffer.as_mut_ptr() as *mut _,
sound_buffer.len() as i32,
options.volume_left as i32,
options.volume_right as i32,
options.callback,
)
};
if_not!(SND_OK => "Asnd::set_voice() failed with error {}!", err)
}
/// Sets a PCM voice to play infinitely. See `Asnd::set_voice()` as it is largely identical.
/// The buffer MUST be aligned and padded to 32 bytes.
pub fn set_infinite_voice(options: VoiceOptions, sound_buffer: &mut [u8]) -> Result<()> {
Self::validate_buffer(sound_buffer);
let err = unsafe {
ffi::ASND_SetInfiniteVoice(
options.voice as i32,
options.format.as_i32(),
options.pitch as i32,
options.delay as i32,
sound_buffer.as_mut_ptr() as *mut _,
sound_buffer.len() as i32,
options.volume_left as i32,
options.volume_right as i32,
)
};
if_not!(SND_OK => "Asnd::set_infinite_voice() failed with error {}", err)
}
/// Adds a PCM voice to play from the second buffer. Sound buffer must be 32-byte
/// aligned and have same sample format as first buffer. This must only be called after
/// `Asnd::set_voice()`, which must return `Ok()`.
/// The buffer MUST be aligned and padded to 32 bytes.
fn add_voice(voice: u32, sound_buffer: &mut [u8]) -> Result<()> {
assert!(voice < 16, "Voice index {} is >= 16", voice);
Self::validate_buffer(sound_buffer);
let err = unsafe {
ffi::ASND_AddVoice(
voice as i32,
sound_buffer.as_mut_ptr() as *mut _,
sound_buffer.len() as i32,
)
};
if_not!(SND_OK => "Asnd::add_voice() failed with error {}", err)
}
/// Stops the selected voice. If the voice is used in song mode, you need to
/// assign the samples with `Asnd::set_song_sample_voice()`.
pub fn stop_voice(voice: u32) -> Result<()> {
assert!(voice < 16, "Voice index {} is >= 16", voice);
let err = unsafe { ffi::ASND_StopVoice(voice as i32) };
if_not!(SND_OK => "Asnd::stop_voice() failed with error {}", err)
}
/// Pauses the selected voice. Can also be used to resume voice.
pub fn pause_voice(voice: u32, pause: bool) -> Result<()> {
assert!(voice < 16, "Voice index {} is >= 16", voice);
let err = unsafe { ffi::ASND_PauseVoice(voice as i32, pause as i32) };
if_not!(SND_OK => "Asnd::pause_voice() failed with error {}", err)
}
/// Returns the state of the selected voice.
pub fn status_voice(voice: u32) -> Result<()> {
assert!(voice < 16, "Voice index {} is >= 16", voice);
let err = unsafe { ffi::ASND_StatusVoice(voice as i32) };
if_not!(SND_WORKING => "Asnd::status_voice() failed with error {}", err)
}
/// Returns the first unused voice. Fails if no voices are available.
pub fn get_first_unused_voice() -> Result<u32> {
let err = unsafe { ffi::ASND_GetFirstUnusedVoice() };
match err {
x if x < 16 => Ok(x as u32),
_ => Err(OgcError::Audio(format!(
"Asnd::get_first_unused_voice() failed with error {}",
err
))),
}
}
/// Changes the voice-pitch in real time. This function can be used to
/// create audio effects such as Doppler effect simulation.
pub fn change_pitch_voice(voice: u32, pitch: u32) -> Result<()> {
assert!(voice < 16, "Voice index {} is >= 16", voice);
let err = unsafe { ffi::ASND_ChangePitchVoice(voice as i32, pitch as i32) };
if_not!(SND_OK => "Asnd::change_pitch_voice() failed with error {}", err)
}
/// Changes the voice volume in real time. This function can be used to create
/// audio effects like distance attenuation.
pub fn change_volume_voice(voice: u32, volume_left: u8, volume_right: u8) -> Result<()> {
assert!(voice < 16, "Voice index {} is >= 16", voice);
let err = unsafe {
ffi::ASND_ChangeVolumeVoice(voice as i32, volume_left as i32, volume_right as i32)
};
if_not!(SND_OK => "Asnd::change_volume_voice() failed with error {}", err)
}
/// Returns the voice tick counter. This value represents the number of ticks
/// since this voice started to play, sans delay time. If the lib is initialized with
/// `INIT_RATE=48000`, a return value of 24000 is equal to 0.5 seconds.
pub fn get_tick_counter_voice(voice: u32) -> u32 {
assert!(voice < 16, "Voice index {} is >= 16", voice);
unsafe { ffi::ASND_GetTickCounterVoice(voice as i32) }
}
/// Returns the voice playback time. This value represents the time in milliseconds
/// since this voice started playing.
pub fn get_timer_voice(voice: u32) -> u32 {
assert!(voice < 16, "Voice index {} is >= 16", voice);
unsafe { ffi::ASND_GetTimerVoice(voice as i32) }
}
/// Tests if a pointer is in use by a voice as a buffer.
/// This must be the same pointer sent to `Asnd::add_voice()` or `Asnd::set_voice()`.
/// Returns 0 if the pointer is unused.
/// Returns 1 if the pointer is used as a buffer.
/// Returns `ogc_sys::SND_INVALID` if invalid.
pub fn test_pointer<T>(voice: u32, pointer: *mut T) -> i32 |
/// Tests to determine if the voice is ready to receive a new buffer sample
/// with `Asnd::add_voice()`. Returns true if voice is ready.
pub fn test_voice_buffer_ready(voice: u32) -> bool {
assert!(voice < 16, "Voice index {} is >= 16", voice);
unsafe { ffi::ASND_TestVoiceBufferReady(voice as i32) > 0 }
}
/// Returns the DSP usage, in percent `(0..=100)`.
pub fn get_dsp_percent_use() -> u32 {
unsafe { ffi::ASND_GetDSP_PercentUse() }
}
/// Returns DSP process time, in nano seconds.
pub fn get_dsp_process_time() -> Duration {
unsafe { Duration::from_nanos(ffi::ASND_GetDSP_ProcessTime().into()) }
}
fn validate_buffer(sound_buffer: &mut [u8]) {
assert_eq!(
0,
sound_buffer.as_ptr().align_offset(32),
"Data is not aligned correctly."
);
assert_eq!(
0,
sound_buffer.len() % 32,
"Data length is not a multiple of 32."
);
}
}
impl Drop for Asnd {
fn drop(&mut self) {
Self::end();
}
}
| {
assert!(voice < 16, "Voice index {} is >= 16", voice);
unsafe { ffi::ASND_TestPointer(voice as i32, pointer as *mut _) }
} | identifier_body |
asnd.rs | //! The ``asnd`` module of ``ogc-rs``.
//!
//! This module implements a safe wrapper around the audio functions found in ``asndlib.h``.
use crate::{ffi, OgcError, Result};
use alloc::format;
use core::time::Duration;
macro_rules! if_not {
($valid:ident => $error_output:expr, $var:ident $(,)*) => {
if $var == ffi::$valid as _ {
Ok(())
} else {
Err(OgcError::Audio(format!($error_output, $var)))
}
};
}
/// Voice Options Callback Type
pub type VoiceOptionsCallback = Option<unsafe extern "C" fn(i32)>;
/// Options to be passed when creating a new voice.
///
/// # Examples
///
/// Create `VoiceOptions` with voice slot 2 and format Mono16Bit:
///
/// ```rust
/// let options = VoiceOptions::new().voice(2).format(VoiceFormat::Mono16Bit);
/// ```
pub struct VoiceOptions {
voice: u32,
format: VoiceFormat,
pitch: u32,
delay: u32,
volume_left: u8,
volume_right: u8,
callback: VoiceOptionsCallback,
}
impl Default for VoiceOptions {
fn default() -> Self {
VoiceOptions::new()
}
}
impl VoiceOptions {
/// Create this struct with sensible default values.
pub fn new() -> Self {
Self {
voice: 0,
format: VoiceFormat::Stereo16Bit,
pitch: 48000,
delay: 0,
volume_left: 255,
volume_right: 255,
callback: None,
}
}
/// Voice slot to use for this sound. Valid values are `0..16` non-inclusive.
#[must_use]
pub fn voice(mut self, voice: u32) -> Self {
assert!(voice < 16, "Voice index {} is >= 16", voice);
self.voice = voice;
self
}
/// Format to use for this sound.
#[must_use]
pub fn format(mut self, format: VoiceFormat) -> Self {
self.format = format;
self
}
/// Frequency to use, in Hz.
#[must_use]
pub fn pitch(mut self, pitch: u32) -> Self {
self.pitch = pitch;
self
}
/// Delay to wait before playing, in milliseconds.
#[must_use]
pub fn delay(mut self, delay: u32) -> Self {
self.delay = delay;
self
}
/// Voice volume of the left channel.
#[must_use]
pub fn volume_left(mut self, volume_left: u8) -> Self {
self.volume_left = volume_left;
self
}
/// Voice volume of the right channel.
#[must_use]
pub fn volume_right(mut self, volume_right: u8) -> Self {
self.volume_right = volume_right;
self
}
/// Optional callback function to use.
#[must_use]
pub fn callback(mut self, callback: Option<unsafe extern "C" fn(i32)>) -> Self {
self.callback = callback;
self
}
}
/// Source voice format.
pub enum VoiceFormat {
Mono8Bit,
Mono16Bit,
Mono16BitBe,
Stereo8Bit,
Stereo16Bit,
Stereo16BitBe,
Mono8BitU,
Mono16BitLE,
Stereo8BitU,
Stereo16BitLe,
}
impl VoiceFormat {
fn as_i32(&self) -> i32 {
match self {
VoiceFormat::Mono8Bit => 0,
VoiceFormat::Mono16Bit => 1,
VoiceFormat::Mono16BitBe => 1,
VoiceFormat::Stereo8Bit => 2,
VoiceFormat::Stereo16Bit => 3,
VoiceFormat::Stereo16BitBe => 3,
VoiceFormat::Mono8BitU => 4,
VoiceFormat::Mono16BitLE => 5,
VoiceFormat::Stereo8BitU => 6,
VoiceFormat::Stereo16BitLe => 7,
}
}
}
/// Represents the asnd service.
/// This service can only be created once!
/// If you use `Asnd::init()`, you cannot do `Audio::init()`.
/// Only one of them can be used at a time.
pub struct Asnd;
/// Implementation of the asnd service.
impl Asnd {
/// Initializes the asnd lib and fixes the hardware sample rate to 48000hz.
pub fn init() -> Self {
unsafe {
ffi::ASND_Init();
}
Self
}
/// De-initializes the asnd lib. This is also called when `Asnd` gets dropped.
pub fn end() {
unsafe {
ffi::ASND_End();
}
}
/// Pauses if true and resumes if false.
pub fn pause(should_pause: bool) {
unsafe {
ffi::ASND_Pause(should_pause as i32);
}
}
/// Returns true if paused, false if not paused.
pub fn is_paused() -> bool {
unsafe { ffi::ASND_Is_Paused() > 0 }
}
/// Returns the global time in milliseconds. Time is updated from the IRQ.
pub fn get_time() -> u32 {
unsafe { ffi::ASND_GetTime() }
}
/// Returns the global sample counter. Can be used to implement timers with high precision.
pub fn get_sample_counter() -> u32 {
unsafe { ffi::ASND_GetSampleCounter() }
}
/// Returns the samples sent from the IRQ in one tick.
pub fn get_samples_per_tick() -> u32 {
unsafe { ffi::ASND_GetSamplesPerTick() }
}
/// Sets the global time, in milliseconds.
pub fn set_time(time: u32) {
unsafe {
ffi::ASND_SetTime(time);
}
}
/// Sets a global callback for general purposes. It is called by the IRQ.
pub fn set_callback<F>(callback: Option<unsafe extern "C" fn()>) {
unsafe {
ffi::ASND_SetCallback(callback);
}
}
/// Returs the current audio rate. Default is 48000hz.
pub fn get_audio_rate() -> i32 {
unsafe { ffi::ASND_GetAudioRate() }
}
/// Sets a PCM voice to play. This function stops one previous voice. Use
/// `Asnd::status_voice()` to test status. The voices are played in 16-bit stereo,
/// regardless of source format. The buffer MUST be aligned and padded to 32 bytes.
pub fn set_voice(options: VoiceOptions, sound_buffer: &mut [u8]) -> Result<()> {
Self::validate_buffer(sound_buffer);
let err = unsafe {
ffi::ASND_SetVoice(
options.voice as i32,
options.format.as_i32(),
options.pitch as i32,
options.delay as i32,
sound_buffer.as_mut_ptr() as *mut _,
sound_buffer.len() as i32,
options.volume_left as i32,
options.volume_right as i32,
options.callback,
)
};
if_not!(SND_OK => "Asnd::set_voice() failed with error {}!", err)
}
/// Sets a PCM voice to play infinitely. See `Asnd::set_voice()` as it is largely identical.
/// The buffer MUST be aligned and padded to 32 bytes.
pub fn set_infinite_voice(options: VoiceOptions, sound_buffer: &mut [u8]) -> Result<()> {
Self::validate_buffer(sound_buffer);
let err = unsafe {
ffi::ASND_SetInfiniteVoice(
options.voice as i32,
options.format.as_i32(),
options.pitch as i32,
options.delay as i32,
sound_buffer.as_mut_ptr() as *mut _,
sound_buffer.len() as i32,
options.volume_left as i32,
options.volume_right as i32,
)
};
if_not!(SND_OK => "Asnd::set_infinite_voice() failed with error {}", err)
}
/// Adds a PCM voice to play from the second buffer. Sound buffer must be 32-byte
/// aligned and have same sample format as first buffer. This must only be called after
/// `Asnd::set_voice()`, which must return `Ok()`.
/// The buffer MUST be aligned and padded to 32 bytes.
fn add_voice(voice: u32, sound_buffer: &mut [u8]) -> Result<()> {
assert!(voice < 16, "Voice index {} is >= 16", voice);
Self::validate_buffer(sound_buffer);
let err = unsafe {
ffi::ASND_AddVoice(
voice as i32,
sound_buffer.as_mut_ptr() as *mut _,
sound_buffer.len() as i32,
)
};
if_not!(SND_OK => "Asnd::add_voice() failed with error {}", err)
}
/// Stops the selected voice. If the voice is used in song mode, you need to
/// assign the samples with `Asnd::set_song_sample_voice()`.
pub fn stop_voice(voice: u32) -> Result<()> {
assert!(voice < 16, "Voice index {} is >= 16", voice);
let err = unsafe { ffi::ASND_StopVoice(voice as i32) };
if_not!(SND_OK => "Asnd::stop_voice() failed with error {}", err)
}
/// Pauses the selected voice. Can also be used to resume voice.
pub fn pause_voice(voice: u32, pause: bool) -> Result<()> {
assert!(voice < 16, "Voice index {} is >= 16", voice);
let err = unsafe { ffi::ASND_PauseVoice(voice as i32, pause as i32) };
if_not!(SND_OK => "Asnd::pause_voice() failed with error {}", err)
}
/// Returns the state of the selected voice.
pub fn status_voice(voice: u32) -> Result<()> {
assert!(voice < 16, "Voice index {} is >= 16", voice);
let err = unsafe { ffi::ASND_StatusVoice(voice as i32) };
if_not!(SND_WORKING => "Asnd::status_voice() failed with error {}", err)
}
/// Returns the first unused voice. Fails if no voices are available.
pub fn get_first_unused_voice() -> Result<u32> {
let err = unsafe { ffi::ASND_GetFirstUnusedVoice() };
match err {
x if x < 16 => Ok(x as u32),
_ => Err(OgcError::Audio(format!(
"Asnd::get_first_unused_voice() failed with error {}",
err
))),
}
}
/// Changes the voice-pitch in real time. This function can be used to
/// create audio effects such as Doppler effect simulation.
pub fn change_pitch_voice(voice: u32, pitch: u32) -> Result<()> {
assert!(voice < 16, "Voice index {} is >= 16", voice);
let err = unsafe { ffi::ASND_ChangePitchVoice(voice as i32, pitch as i32) };
if_not!(SND_OK => "Asnd::change_pitch_voice() failed with error {}", err)
}
/// Changes the voice volume in real time. This function can be used to create
/// audio effects like distance attenuation.
pub fn change_volume_voice(voice: u32, volume_left: u8, volume_right: u8) -> Result<()> {
assert!(voice < 16, "Voice index {} is >= 16", voice);
let err = unsafe {
ffi::ASND_ChangeVolumeVoice(voice as i32, volume_left as i32, volume_right as i32)
};
if_not!(SND_OK => "Asnd::change_volume_voice() failed with error {}", err)
}
/// Returns the voice tick counter. This value represents the number of ticks
/// since this voice started to play, sans delay time. If the lib is initialized with
/// `INIT_RATE=48000`, a return value of 24000 is equal to 0.5 seconds.
pub fn get_tick_counter_voice(voice: u32) -> u32 {
assert!(voice < 16, "Voice index {} is >= 16", voice);
unsafe { ffi::ASND_GetTickCounterVoice(voice as i32) }
}
/// Returns the voice playback time. This value represents the time in milliseconds
/// since this voice started playing.
pub fn get_timer_voice(voice: u32) -> u32 {
assert!(voice < 16, "Voice index {} is >= 16", voice);
unsafe { ffi::ASND_GetTimerVoice(voice as i32) }
}
/// Tests if a pointer is in use by a voice as a buffer.
/// This must be the same pointer sent to `Asnd::add_voice()` or `Asnd::set_voice()`.
/// Returns 0 if the pointer is unused.
/// Returns 1 if the pointer is used as a buffer.
/// Returns `ogc_sys::SND_INVALID` if invalid.
pub fn test_pointer<T>(voice: u32, pointer: *mut T) -> i32 {
assert!(voice < 16, "Voice index {} is >= 16", voice);
unsafe { ffi::ASND_TestPointer(voice as i32, pointer as *mut _) }
}
/// Tests to determine if the voice is ready to receive a new buffer sample
/// with `Asnd::add_voice()`. Returns true if voice is ready.
pub fn test_voice_buffer_ready(voice: u32) -> bool {
assert!(voice < 16, "Voice index {} is >= 16", voice);
unsafe { ffi::ASND_TestVoiceBufferReady(voice as i32) > 0 }
}
/// Returns the DSP usage, in percent `(0..=100)`.
pub fn get_dsp_percent_use() -> u32 {
unsafe { ffi::ASND_GetDSP_PercentUse() }
}
/// Returns DSP process time, in nano seconds.
pub fn get_dsp_process_time() -> Duration {
unsafe { Duration::from_nanos(ffi::ASND_GetDSP_ProcessTime().into()) }
}
fn validate_buffer(sound_buffer: &mut [u8]) {
assert_eq!(
0,
sound_buffer.as_ptr().align_offset(32),
"Data is not aligned correctly."
);
assert_eq!(
0,
sound_buffer.len() % 32,
"Data length is not a multiple of 32."
);
}
}
impl Drop for Asnd {
fn drop(&mut self) {
Self::end();
} | } | random_line_split | |
dg.go | package tbk
import (
"strconv"
)
type dg struct {
Client *Tbk
}
func (t *Tbk) Dg() *dg {
return &dg{Client: t}
}
// 搜索响应数据结构体
type MaterialOptionalResponse struct {
Response struct {
TotalResults int `json:"total_results"`
ResultList struct {
MapData []MaterialOptionalData `json:"map_data"`
} `json:"result_list"`
} `json:"tbk_dg_material_optional_response"`
}
/**
* (通用物料搜索API(导购))
* taobao.tbk.dg.material.optional
* @line http://open.taobao.com/docs/api.htm?apiId=35896
*/
func (d *dg) MaterialOptional(adzoneId int64, others ...map[string]string) (res *MaterialOptionalResponse, err error) {
params := make(map[string]string)
if len(others) > 0 {
params = others[0]
}
params["adzone_id"] = strconv.FormatInt(adzoneId, 10)
_, err = d.Client.httpPost("taobao.tbk.dg.material.optional", params, &res)
return
}
type OptimusMaterialResponse struct {
Response struct {
ResultList struct {
MapData []struct {
MaterialOptionalData
JuPlayEndTime string `json:"ju_play_end_time"`
JuPlayStartTime string `json:"ju_play_start_time"`
PlayInfo string `json:"play_info"`
TmallPlayActivityEndTime int64 `json:"tmall_play_activity_end_time"`
TmallPlayActivityStartTime int64 `json:"tmall_play_activity_start_time"`
JuPreShowEndTime int64 `json:"ju_pre_show_end_time"`
JuPreShowStartTime int64 `json:"ju_pre_show_start_time"`
FavoritesInfo struct {
TotalCount int `json:"total_count"`
FavoritesList []FavoritesDetail `json:"favorites_list"`
} `json:"favorites_info"` // 选品库信息
} `json:"map_data"`
} `json:"result_list"`
IsDefault string `json:"is_default"`
TotalCount int `json:"total_count"`
} `json:"tbk_dg_optimus_material_response"`
}
// 选品库详情
type FavoritesDetail struct {
FavoritesId int64 `json:"favorites_id"`
FavoritesTitle string `json:"favorites_title"`
}
/**
* ( 淘宝客-推广者-物料精选 )
* taobao.tbk.dg.optimus.material
* @line https://open.taobao.com/api.htm?docId=33947&docType=2
*/
func (d *dg) OptimusMaterial(adzoneId int64, others ...map[string]string) (res *OptimusMaterialResponse, err error) {
params := make(map[string]string)
if len(others) > 0 {
params = others[0]
}
params["adzone_id"] = strconv.FormatInt(adzoneId, 10)
_, err = d.Client.httpPost("taobao.tbk.dg.optimus.material", params, &res)
return
}
type vegas struct {
Client *Tbk
}
func (d *dg) Vegas() *vegas {
return &vegas{Client: d.Client}
}
// 淘礼金创建配置
type TljConf struct {
AdzoneId int64 `json:"adzone_id"` // 广告位id
ItemId string `json:"item_id"` // 商品id
TotalNum int64 `json:"total_num"` // 总体个数
Name string `json:"name"` // 淘礼金名称
UserTotalWinNumLimit int `json:"user_total_win_num_limit"` // 单用户累计中奖次数上限
SecuritySwitch bool `json:"security_switch"` // 安全开关
PerFace string `json:"per_face"` // 单个淘礼金面额
SendStartTime string `json:"send_start_time"` // 发放开始时间
SendEndTime string `json:"send_end_time"` // 发放截止时间
UseEndTime string `json:"use_end_time"` // 使用结束日期
UseEndTimeMode int `json:"use_end_time_mode"` // 结束日期模式,1:相对时间 2:绝对时间
UseStartTime string `json:"use_start_time"` // 开始时间
}
// 淘礼金创建完成响应
type TljResponse struct {
Response struct {
Result struct {
Model struct {
RightsId string `json:"rights_id"`
SendUrl string `json:"send_url"`
VegasCode string `json:"vegas_code"`
} `json:"model"`
MsgCode string `json:"msg_code"`
MsgInfo string `json:"msg_info"`
Success bool `json:"success"`
} `json:"result"`
} `json:"tbk_dg_vegas_tlj_create_response"`
}
/**
* (淘宝客-推广者-淘礼金创建)
* taobao.tbk.dg.vegas.tlj.create
* @line https://open.taobao.com/api.htm?docId=40173&docType=2
*/
func (v *vegas) TljCreate(conf *TljConf) (res *TljResponse, err error) {
_, err = v.Client.httpPost("taobao.tbk.dg.vegas.tlj.create", v.Client.Struct2MapString(conf), &res)
return
}
type TljReportResponse struct {
Response struct {
Result struct {
MsgCode string `json:"msg_code"`
MsgInfo string `json:"msg_info"`
Success bool `json:"success"`
Model struct {
UnfreezeAmount float64 `json:"unfreeze_amount"`
UnfreezeNum int64 `json:"unfreeze_num"`
RefundAmount float64 `json:"refund_amount"`
RefundNum int64 `json:"refund_num"`
AlipayAmount float64 `json:"alipay_amount"`
UseAmount float64 `json:"use_amount"`
UseNum int64 `json:"use_num"`
WinAmount float64 `json:"win_amount"`
WinNum int64 `json:"win_num"`
PerCommissionAmount float64 `json:"per_commission_amount"`
FpRefundAmount float64 `json:"fp_refund_amount"`
FpRefundNum int64 `json:"fp_refund_num"`
} `json:"model"`
} `json:"result"`
} `json:"tbk_dg_vegas_tlj_instance_report_response"`
}
/**
* ( 淘宝客-推广者-淘礼金发放及使用报表 )
* taobao.tbk.dg.vegas.tlj.instance.report
* @line https://open.taobao.com/api.htm?docId=43317&docType=2&scopeId=15029
*/
func (v *vegas) TljInstanceReport(RightsId string) (res *TljReportResponse, err error) {
_, err = v.Client.httpPost("taobao.tbk.dg.vegas.tlj.instance.report", map[string]string{
"rights_id": RightsId,
}, &res)
return
}
type LbTljConf struct {
SecurityLevel int `json:"security_level"` // 安全等级
UseStartTime string `json:"use_start_time"` // 使用开始日期
UseEndTime string `json:"use_end_time"` // 使用结束日期
UseEndTimeMode int `json:"use_end_time_mode"` // 结束日期的模式 1:相对时间 2:绝对时间
AcceptStartTime string `json:"accept_start_time"` // 裂变任务领取开始时间
AcceptEndTime string `json:"accept_end_time"` // 裂变任务领取截止时间
RightsPerFace string `json:"rights_per_face"` // 单个淘礼金面额,支持两位小数,单位元
SecuritySwitch bool `json:"security_switch"` // 安全开关 true启用 false不启用
UserTotalWinNumLimit int `json:"user_total_win_num_limit"` // 单用户累计中奖次数上限
Name string `json:"name"` // 淘礼金名称
RightsNum int `json:"rights_num"` // 淘礼金总个数
ItemId string `json:"item_id"` // 商品ID
CampaignType string `json:"campaign_type"`
TaskRightNum int `json:"task_right_num"` // 裂变淘礼金总个数
TaskRightsPerFace string `json:"task_rights_per_face"` // 裂变单个淘礼金面额
InviteNum int `json:"invite_num"` // 裂变淘礼金邀请人数
InviteTimeLimit int `json:"invite_time_limit"` // 裂变淘礼金邀请时长,单位分钟,最大120分钟
AdzoneId int `json:"adzone_id"` // 推广位ID
}
type LbTljResponse struct {
Response struct {
Result struct {
Model struct {
RightsID string `json:"rights_id"` // 直接领取淘礼金ID 即小额淘礼金
SendURL string `json:"send_url"` // 发放地址
TaskRightsID string `json:"task_rights_id"` // 裂变淘礼金ID 即大额淘礼金
TaskID string `json:"task_id"` // 裂变任务ID
} `json:"model"`
MsgCode string `json:"msg_code"`
MsgInfo string `json:"msg_info"`
Success bool `json:"success"`
} `json:"result"`
} `json:"tbk_dg_vegas_lbtlj_create_response"`
}
/**
* ( 淘宝客-推广者-裂变淘礼金创建 )
* taobao.tbk.dg.vegas.lbtlj.create
* @line https://open.taobao.com/api.htm?docId=57710&docType=2&scopeId=23995
*/
func (v *vegas) LbTljCreate(conf *LbTljConf) (res *LbTljResponse, err error) {
_, err = v.Client.httpPost("taobao.tbk.dg.vegas.lbtlj.create", v.Client.Struct2MapString(conf), &res)
return
}
type newUser struct {
Client *Tbk
}
func (d *dg) NewUser() *newUser {
return &newUser{Client: d.Client}
}
type OrderGetResponse struct {
Response struct {
Result struct {
Data struct {
Result []OrderGetData `json:"result"`
PageNo int `json:"page_no"`
PageSize int `json:"page_size"`
HasNext bool `json:"has_next"`
} `json:"data"`
} `json:"result"`
} `json:"tbk_dg_newuser_order_get_response"`
}
type OrderGetData struct {
RegisterTime string `json:"register_time"`
BindTime string `json:"bind_time"`
BuyTime string `json:"buy_time"`
Status int `json:"status"`
Mobile string `json:"mobile"`
OrderTkType int | n:"order_tk_type"` // 订单淘客类型:1.淘客订单 2.非淘客订单,仅淘宝天猫拉新适用
UnionId string `json:"union_id"`
MemberId int `json:"member_id"`
MemberNick string `json:"member_nick"`
SiteId int64 `json:"site_id"`
SiteName string `json:"site_name"`
AdzoneId int64 `json:"adzone_id"`
AdzoneName string `json:"adzone_name"`
TbTradeParentId int64 `json:"tb_trade_parent_id"`
AcceptTime string `json:"accept_time"`
ReceiveTime string `json:"receive_time"`
SuccessTime string `json:"success_time"`
ActivityType string `json:"activity_type"`
ActivityId string `json:"activity_id"`
BizDate string `json:"biz_date"`
BindCardTime string `json:"bind_card_time"`
LoginTime string `json:"login_time"`
IsCardSave int `json:"is_card_save"` // 银行卡绑定状态 1.绑定 0.未绑定
UseRightsTime string `json:"use_rights_time"`
GetRightsTime string `json:"get_rights_time"`
RelationId string `json:"relation_id"`
Orders []OrderData `json:"orders"`
}
type OrderData struct {
Commission string `json:"commission"`
ConfirmReceiveTime string `json:"confirm_receive_time"`
PayTime string `json:"pay_time"`
OrderNo string `json:"order_no"`
}
/**
* (淘宝客-推广者-新用户订单明细查询)
* taobao.tbk.dg.newuser.order.get
* @line https://open.taobao.com/api.htm?docId=33892&docType=2
*/
func (n *newUser) OrderGet(activityId string, others ...map[string]string) (res *OrderGetResponse, err error) {
params := make(map[string]string)
if len(others) > 0 {
params = others[0]
}
params["activity_id"] = activityId
_, err = n.Client.httpPost("taobao.tbk.dg.newuser.order.get", params, &res)
return
}
type OrderSumResponse struct {
Response struct {
Results struct {
Data struct {
PageNo int `json:"page_no"`
PageSize int `json:"page_size"`
HasNext bool `json:"has_next"`
Results struct {
Data []SumData `json:"data"`
} `json:"results"`
} `json:"data"`
} `json:"results"`
} `json:"tbk_dg_newuser_order_sum_response"`
}
type SumData struct {
ActivityId string `json:"activity_id"`
BizDate string `json:"biz_date"`
RegUserCnt int64 `json:"reg_user_cnt"`
LoginUserCnt int64 `json:"login_user_cnt"`
AlipayUserCnt int64 `json:"alipay_user_cnt"`
RcvValidUserCnt int64 `json:"rcv_valid_user_cnt"`
RcvUserCnt int64 `json:"rcv_user_cnt"`
AlipayUserCpaPreAmt string `json:"alipay_user_cpa_pre_amt"`
BindBuyUserCpaPreAmt string `json:"bind_buy_user_cpa_pre_amt"`
BindBuyValidUserCnt int64 `json:"bind_buy_valid_user_cnt"`
BindCardValidUserCnt int64 `json:"bind_card_valid_user_cnt"`
ReBuyValidUserCnt int64 `json:"re_buy_valid_user_cnt"`
ValidNum int64 `json:"valid_num"`
RelationId string `json:"relation_id"`
}
/**
* ( 淘宝客-推广者-拉新活动对应数据查询 )
* taobao.tbk.dg.newuser.order.sum
* @line https://open.taobao.com/api.htm?docId=36836&docType=2
*/
func (n *newUser) OrderSum(activityId string, pageNo, pageSize int, others ...map[string]string) (res *OrderSumResponse, err error) {
params := make(map[string]string)
if len(others) > 0 {
params = others[0]
}
params["activity_id"] = activityId
params["page_no"] = strconv.Itoa(pageNo)
params["page_size"] = strconv.Itoa(pageSize)
_, err = n.Client.httpPost("taobao.tbk.dg.newuser.order.sum", params, &res)
return
}
| `jso | identifier_name |
dg.go | package tbk
import (
"strconv"
)
type dg struct {
Client *Tbk
}
func (t *Tbk) Dg() *dg |
// 搜索响应数据结构体
type MaterialOptionalResponse struct {
Response struct {
TotalResults int `json:"total_results"`
ResultList struct {
MapData []MaterialOptionalData `json:"map_data"`
} `json:"result_list"`
} `json:"tbk_dg_material_optional_response"`
}
/**
* (通用物料搜索API(导购))
* taobao.tbk.dg.material.optional
* @line http://open.taobao.com/docs/api.htm?apiId=35896
*/
func (d *dg) MaterialOptional(adzoneId int64, others ...map[string]string) (res *MaterialOptionalResponse, err error) {
params := make(map[string]string)
if len(others) > 0 {
params = others[0]
}
params["adzone_id"] = strconv.FormatInt(adzoneId, 10)
_, err = d.Client.httpPost("taobao.tbk.dg.material.optional", params, &res)
return
}
type OptimusMaterialResponse struct {
Response struct {
ResultList struct {
MapData []struct {
MaterialOptionalData
JuPlayEndTime string `json:"ju_play_end_time"`
JuPlayStartTime string `json:"ju_play_start_time"`
PlayInfo string `json:"play_info"`
TmallPlayActivityEndTime int64 `json:"tmall_play_activity_end_time"`
TmallPlayActivityStartTime int64 `json:"tmall_play_activity_start_time"`
JuPreShowEndTime int64 `json:"ju_pre_show_end_time"`
JuPreShowStartTime int64 `json:"ju_pre_show_start_time"`
FavoritesInfo struct {
TotalCount int `json:"total_count"`
FavoritesList []FavoritesDetail `json:"favorites_list"`
} `json:"favorites_info"` // 选品库信息
} `json:"map_data"`
} `json:"result_list"`
IsDefault string `json:"is_default"`
TotalCount int `json:"total_count"`
} `json:"tbk_dg_optimus_material_response"`
}
// 选品库详情
type FavoritesDetail struct {
FavoritesId int64 `json:"favorites_id"`
FavoritesTitle string `json:"favorites_title"`
}
/**
* ( 淘宝客-推广者-物料精选 )
* taobao.tbk.dg.optimus.material
* @line https://open.taobao.com/api.htm?docId=33947&docType=2
*/
func (d *dg) OptimusMaterial(adzoneId int64, others ...map[string]string) (res *OptimusMaterialResponse, err error) {
params := make(map[string]string)
if len(others) > 0 {
params = others[0]
}
params["adzone_id"] = strconv.FormatInt(adzoneId, 10)
_, err = d.Client.httpPost("taobao.tbk.dg.optimus.material", params, &res)
return
}
type vegas struct {
Client *Tbk
}
func (d *dg) Vegas() *vegas {
return &vegas{Client: d.Client}
}
// 淘礼金创建配置
type TljConf struct {
AdzoneId int64 `json:"adzone_id"` // 广告位id
ItemId string `json:"item_id"` // 商品id
TotalNum int64 `json:"total_num"` // 总体个数
Name string `json:"name"` // 淘礼金名称
UserTotalWinNumLimit int `json:"user_total_win_num_limit"` // 单用户累计中奖次数上限
SecuritySwitch bool `json:"security_switch"` // 安全开关
PerFace string `json:"per_face"` // 单个淘礼金面额
SendStartTime string `json:"send_start_time"` // 发放开始时间
SendEndTime string `json:"send_end_time"` // 发放截止时间
UseEndTime string `json:"use_end_time"` // 使用结束日期
UseEndTimeMode int `json:"use_end_time_mode"` // 结束日期模式,1:相对时间 2:绝对时间
UseStartTime string `json:"use_start_time"` // 开始时间
}
// 淘礼金创建完成响应
type TljResponse struct {
Response struct {
Result struct {
Model struct {
RightsId string `json:"rights_id"`
SendUrl string `json:"send_url"`
VegasCode string `json:"vegas_code"`
} `json:"model"`
MsgCode string `json:"msg_code"`
MsgInfo string `json:"msg_info"`
Success bool `json:"success"`
} `json:"result"`
} `json:"tbk_dg_vegas_tlj_create_response"`
}
/**
* (淘宝客-推广者-淘礼金创建)
* taobao.tbk.dg.vegas.tlj.create
* @line https://open.taobao.com/api.htm?docId=40173&docType=2
*/
func (v *vegas) TljCreate(conf *TljConf) (res *TljResponse, err error) {
_, err = v.Client.httpPost("taobao.tbk.dg.vegas.tlj.create", v.Client.Struct2MapString(conf), &res)
return
}
type TljReportResponse struct {
Response struct {
Result struct {
MsgCode string `json:"msg_code"`
MsgInfo string `json:"msg_info"`
Success bool `json:"success"`
Model struct {
UnfreezeAmount float64 `json:"unfreeze_amount"`
UnfreezeNum int64 `json:"unfreeze_num"`
RefundAmount float64 `json:"refund_amount"`
RefundNum int64 `json:"refund_num"`
AlipayAmount float64 `json:"alipay_amount"`
UseAmount float64 `json:"use_amount"`
UseNum int64 `json:"use_num"`
WinAmount float64 `json:"win_amount"`
WinNum int64 `json:"win_num"`
PerCommissionAmount float64 `json:"per_commission_amount"`
FpRefundAmount float64 `json:"fp_refund_amount"`
FpRefundNum int64 `json:"fp_refund_num"`
} `json:"model"`
} `json:"result"`
} `json:"tbk_dg_vegas_tlj_instance_report_response"`
}
/**
* ( 淘宝客-推广者-淘礼金发放及使用报表 )
* taobao.tbk.dg.vegas.tlj.instance.report
* @line https://open.taobao.com/api.htm?docId=43317&docType=2&scopeId=15029
*/
func (v *vegas) TljInstanceReport(RightsId string) (res *TljReportResponse, err error) {
_, err = v.Client.httpPost("taobao.tbk.dg.vegas.tlj.instance.report", map[string]string{
"rights_id": RightsId,
}, &res)
return
}
type LbTljConf struct {
SecurityLevel int `json:"security_level"` // 安全等级
UseStartTime string `json:"use_start_time"` // 使用开始日期
UseEndTime string `json:"use_end_time"` // 使用结束日期
UseEndTimeMode int `json:"use_end_time_mode"` // 结束日期的模式 1:相对时间 2:绝对时间
AcceptStartTime string `json:"accept_start_time"` // 裂变任务领取开始时间
AcceptEndTime string `json:"accept_end_time"` // 裂变任务领取截止时间
RightsPerFace string `json:"rights_per_face"` // 单个淘礼金面额,支持两位小数,单位元
SecuritySwitch bool `json:"security_switch"` // 安全开关 true启用 false不启用
UserTotalWinNumLimit int `json:"user_total_win_num_limit"` // 单用户累计中奖次数上限
Name string `json:"name"` // 淘礼金名称
RightsNum int `json:"rights_num"` // 淘礼金总个数
ItemId string `json:"item_id"` // 商品ID
CampaignType string `json:"campaign_type"`
TaskRightNum int `json:"task_right_num"` // 裂变淘礼金总个数
TaskRightsPerFace string `json:"task_rights_per_face"` // 裂变单个淘礼金面额
InviteNum int `json:"invite_num"` // 裂变淘礼金邀请人数
InviteTimeLimit int `json:"invite_time_limit"` // 裂变淘礼金邀请时长,单位分钟,最大120分钟
AdzoneId int `json:"adzone_id"` // 推广位ID
}
type LbTljResponse struct {
Response struct {
Result struct {
Model struct {
RightsID string `json:"rights_id"` // 直接领取淘礼金ID 即小额淘礼金
SendURL string `json:"send_url"` // 发放地址
TaskRightsID string `json:"task_rights_id"` // 裂变淘礼金ID 即大额淘礼金
TaskID string `json:"task_id"` // 裂变任务ID
} `json:"model"`
MsgCode string `json:"msg_code"`
MsgInfo string `json:"msg_info"`
Success bool `json:"success"`
} `json:"result"`
} `json:"tbk_dg_vegas_lbtlj_create_response"`
}
/**
* ( 淘宝客-推广者-裂变淘礼金创建 )
* taobao.tbk.dg.vegas.lbtlj.create
* @line https://open.taobao.com/api.htm?docId=57710&docType=2&scopeId=23995
*/
func (v *vegas) LbTljCreate(conf *LbTljConf) (res *LbTljResponse, err error) {
_, err = v.Client.httpPost("taobao.tbk.dg.vegas.lbtlj.create", v.Client.Struct2MapString(conf), &res)
return
}
type newUser struct {
Client *Tbk
}
func (d *dg) NewUser() *newUser {
return &newUser{Client: d.Client}
}
type OrderGetResponse struct {
Response struct {
Result struct {
Data struct {
Result []OrderGetData `json:"result"`
PageNo int `json:"page_no"`
PageSize int `json:"page_size"`
HasNext bool `json:"has_next"`
} `json:"data"`
} `json:"result"`
} `json:"tbk_dg_newuser_order_get_response"`
}
type OrderGetData struct {
RegisterTime string `json:"register_time"`
BindTime string `json:"bind_time"`
BuyTime string `json:"buy_time"`
Status int `json:"status"`
Mobile string `json:"mobile"`
OrderTkType int `json:"order_tk_type"` // 订单淘客类型:1.淘客订单 2.非淘客订单,仅淘宝天猫拉新适用
UnionId string `json:"union_id"`
MemberId int `json:"member_id"`
MemberNick string `json:"member_nick"`
SiteId int64 `json:"site_id"`
SiteName string `json:"site_name"`
AdzoneId int64 `json:"adzone_id"`
AdzoneName string `json:"adzone_name"`
TbTradeParentId int64 `json:"tb_trade_parent_id"`
AcceptTime string `json:"accept_time"`
ReceiveTime string `json:"receive_time"`
SuccessTime string `json:"success_time"`
ActivityType string `json:"activity_type"`
ActivityId string `json:"activity_id"`
BizDate string `json:"biz_date"`
BindCardTime string `json:"bind_card_time"`
LoginTime string `json:"login_time"`
IsCardSave int `json:"is_card_save"` // 银行卡绑定状态 1.绑定 0.未绑定
UseRightsTime string `json:"use_rights_time"`
GetRightsTime string `json:"get_rights_time"`
RelationId string `json:"relation_id"`
Orders []OrderData `json:"orders"`
}
type OrderData struct {
Commission string `json:"commission"`
ConfirmReceiveTime string `json:"confirm_receive_time"`
PayTime string `json:"pay_time"`
OrderNo string `json:"order_no"`
}
/**
* (淘宝客-推广者-新用户订单明细查询)
* taobao.tbk.dg.newuser.order.get
* @line https://open.taobao.com/api.htm?docId=33892&docType=2
*/
func (n *newUser) OrderGet(activityId string, others ...map[string]string) (res *OrderGetResponse, err error) {
params := make(map[string]string)
if len(others) > 0 {
params = others[0]
}
params["activity_id"] = activityId
_, err = n.Client.httpPost("taobao.tbk.dg.newuser.order.get", params, &res)
return
}
type OrderSumResponse struct {
Response struct {
Results struct {
Data struct {
PageNo int `json:"page_no"`
PageSize int `json:"page_size"`
HasNext bool `json:"has_next"`
Results struct {
Data []SumData `json:"data"`
} `json:"results"`
} `json:"data"`
} `json:"results"`
} `json:"tbk_dg_newuser_order_sum_response"`
}
type SumData struct {
ActivityId string `json:"activity_id"`
BizDate string `json:"biz_date"`
RegUserCnt int64 `json:"reg_user_cnt"`
LoginUserCnt int64 `json:"login_user_cnt"`
AlipayUserCnt int64 `json:"alipay_user_cnt"`
RcvValidUserCnt int64 `json:"rcv_valid_user_cnt"`
RcvUserCnt int64 `json:"rcv_user_cnt"`
AlipayUserCpaPreAmt string `json:"alipay_user_cpa_pre_amt"`
BindBuyUserCpaPreAmt string `json:"bind_buy_user_cpa_pre_amt"`
BindBuyValidUserCnt int64 `json:"bind_buy_valid_user_cnt"`
BindCardValidUserCnt int64 `json:"bind_card_valid_user_cnt"`
ReBuyValidUserCnt int64 `json:"re_buy_valid_user_cnt"`
ValidNum int64 `json:"valid_num"`
RelationId string `json:"relation_id"`
}
/**
* ( 淘宝客-推广者-拉新活动对应数据查询 )
* taobao.tbk.dg.newuser.order.sum
* @line https://open.taobao.com/api.htm?docId=36836&docType=2
*/
func (n *newUser) OrderSum(activityId string, pageNo, pageSize int, others ...map[string]string) (res *OrderSumResponse, err error) {
params := make(map[string]string)
if len(others) > 0 {
params = others[0]
}
params["activity_id"] = activityId
params["page_no"] = strconv.Itoa(pageNo)
params["page_size"] = strconv.Itoa(pageSize)
_, err = n.Client.httpPost("taobao.tbk.dg.newuser.order.sum", params, &res)
return
}
| {
return &dg{Client: t}
} | identifier_body |
dg.go | package tbk
import (
"strconv"
)
type dg struct {
Client *Tbk
}
func (t *Tbk) Dg() *dg {
return &dg{Client: t}
}
// 搜索响应数据结构体
type MaterialOptionalResponse struct {
Response struct {
TotalResults int `json:"total_results"`
ResultList struct {
MapData []MaterialOptionalData `json:"map_data"`
} `json:"result_list"`
} `json:"tbk_dg_material_optional_response"`
}
/**
* (通用物料搜索API(导购))
* taobao.tbk.dg.material.optional
* @line http://open.taobao.com/docs/api.htm?apiId=35896
*/
func (d *dg) MaterialOptional(adzoneId int64, others ...map[string]string) (res *MaterialOptionalResponse, err error) {
params := make(map[string]string)
if len(others) > 0 {
params = others[0]
}
params["adzone_id"] = strconv.FormatInt(adzoneId, 10)
_, err = d.Client.httpPost("taobao.tbk.dg.material.optional", params, &res)
return
}
| JuPlayEndTime string `json:"ju_play_end_time"`
JuPlayStartTime string `json:"ju_play_start_time"`
PlayInfo string `json:"play_info"`
TmallPlayActivityEndTime int64 `json:"tmall_play_activity_end_time"`
TmallPlayActivityStartTime int64 `json:"tmall_play_activity_start_time"`
JuPreShowEndTime int64 `json:"ju_pre_show_end_time"`
JuPreShowStartTime int64 `json:"ju_pre_show_start_time"`
FavoritesInfo struct {
TotalCount int `json:"total_count"`
FavoritesList []FavoritesDetail `json:"favorites_list"`
} `json:"favorites_info"` // 选品库信息
} `json:"map_data"`
} `json:"result_list"`
IsDefault string `json:"is_default"`
TotalCount int `json:"total_count"`
} `json:"tbk_dg_optimus_material_response"`
}
// 选品库详情
type FavoritesDetail struct {
FavoritesId int64 `json:"favorites_id"`
FavoritesTitle string `json:"favorites_title"`
}
/**
* ( 淘宝客-推广者-物料精选 )
* taobao.tbk.dg.optimus.material
* @line https://open.taobao.com/api.htm?docId=33947&docType=2
*/
func (d *dg) OptimusMaterial(adzoneId int64, others ...map[string]string) (res *OptimusMaterialResponse, err error) {
params := make(map[string]string)
if len(others) > 0 {
params = others[0]
}
params["adzone_id"] = strconv.FormatInt(adzoneId, 10)
_, err = d.Client.httpPost("taobao.tbk.dg.optimus.material", params, &res)
return
}
type vegas struct {
Client *Tbk
}
func (d *dg) Vegas() *vegas {
return &vegas{Client: d.Client}
}
// 淘礼金创建配置
type TljConf struct {
AdzoneId int64 `json:"adzone_id"` // 广告位id
ItemId string `json:"item_id"` // 商品id
TotalNum int64 `json:"total_num"` // 总体个数
Name string `json:"name"` // 淘礼金名称
UserTotalWinNumLimit int `json:"user_total_win_num_limit"` // 单用户累计中奖次数上限
SecuritySwitch bool `json:"security_switch"` // 安全开关
PerFace string `json:"per_face"` // 单个淘礼金面额
SendStartTime string `json:"send_start_time"` // 发放开始时间
SendEndTime string `json:"send_end_time"` // 发放截止时间
UseEndTime string `json:"use_end_time"` // 使用结束日期
UseEndTimeMode int `json:"use_end_time_mode"` // 结束日期模式,1:相对时间 2:绝对时间
UseStartTime string `json:"use_start_time"` // 开始时间
}
// 淘礼金创建完成响应
type TljResponse struct {
Response struct {
Result struct {
Model struct {
RightsId string `json:"rights_id"`
SendUrl string `json:"send_url"`
VegasCode string `json:"vegas_code"`
} `json:"model"`
MsgCode string `json:"msg_code"`
MsgInfo string `json:"msg_info"`
Success bool `json:"success"`
} `json:"result"`
} `json:"tbk_dg_vegas_tlj_create_response"`
}
/**
* (淘宝客-推广者-淘礼金创建)
* taobao.tbk.dg.vegas.tlj.create
* @line https://open.taobao.com/api.htm?docId=40173&docType=2
*/
func (v *vegas) TljCreate(conf *TljConf) (res *TljResponse, err error) {
_, err = v.Client.httpPost("taobao.tbk.dg.vegas.tlj.create", v.Client.Struct2MapString(conf), &res)
return
}
type TljReportResponse struct {
Response struct {
Result struct {
MsgCode string `json:"msg_code"`
MsgInfo string `json:"msg_info"`
Success bool `json:"success"`
Model struct {
UnfreezeAmount float64 `json:"unfreeze_amount"`
UnfreezeNum int64 `json:"unfreeze_num"`
RefundAmount float64 `json:"refund_amount"`
RefundNum int64 `json:"refund_num"`
AlipayAmount float64 `json:"alipay_amount"`
UseAmount float64 `json:"use_amount"`
UseNum int64 `json:"use_num"`
WinAmount float64 `json:"win_amount"`
WinNum int64 `json:"win_num"`
PerCommissionAmount float64 `json:"per_commission_amount"`
FpRefundAmount float64 `json:"fp_refund_amount"`
FpRefundNum int64 `json:"fp_refund_num"`
} `json:"model"`
} `json:"result"`
} `json:"tbk_dg_vegas_tlj_instance_report_response"`
}
/**
* ( 淘宝客-推广者-淘礼金发放及使用报表 )
* taobao.tbk.dg.vegas.tlj.instance.report
* @line https://open.taobao.com/api.htm?docId=43317&docType=2&scopeId=15029
*/
func (v *vegas) TljInstanceReport(RightsId string) (res *TljReportResponse, err error) {
_, err = v.Client.httpPost("taobao.tbk.dg.vegas.tlj.instance.report", map[string]string{
"rights_id": RightsId,
}, &res)
return
}
type LbTljConf struct {
SecurityLevel int `json:"security_level"` // 安全等级
UseStartTime string `json:"use_start_time"` // 使用开始日期
UseEndTime string `json:"use_end_time"` // 使用结束日期
UseEndTimeMode int `json:"use_end_time_mode"` // 结束日期的模式 1:相对时间 2:绝对时间
AcceptStartTime string `json:"accept_start_time"` // 裂变任务领取开始时间
AcceptEndTime string `json:"accept_end_time"` // 裂变任务领取截止时间
RightsPerFace string `json:"rights_per_face"` // 单个淘礼金面额,支持两位小数,单位元
SecuritySwitch bool `json:"security_switch"` // 安全开关 true启用 false不启用
UserTotalWinNumLimit int `json:"user_total_win_num_limit"` // 单用户累计中奖次数上限
Name string `json:"name"` // 淘礼金名称
RightsNum int `json:"rights_num"` // 淘礼金总个数
ItemId string `json:"item_id"` // 商品ID
CampaignType string `json:"campaign_type"`
TaskRightNum int `json:"task_right_num"` // 裂变淘礼金总个数
TaskRightsPerFace string `json:"task_rights_per_face"` // 裂变单个淘礼金面额
InviteNum int `json:"invite_num"` // 裂变淘礼金邀请人数
InviteTimeLimit int `json:"invite_time_limit"` // 裂变淘礼金邀请时长,单位分钟,最大120分钟
AdzoneId int `json:"adzone_id"` // 推广位ID
}
type LbTljResponse struct {
Response struct {
Result struct {
Model struct {
RightsID string `json:"rights_id"` // 直接领取淘礼金ID 即小额淘礼金
SendURL string `json:"send_url"` // 发放地址
TaskRightsID string `json:"task_rights_id"` // 裂变淘礼金ID 即大额淘礼金
TaskID string `json:"task_id"` // 裂变任务ID
} `json:"model"`
MsgCode string `json:"msg_code"`
MsgInfo string `json:"msg_info"`
Success bool `json:"success"`
} `json:"result"`
} `json:"tbk_dg_vegas_lbtlj_create_response"`
}
/**
* ( 淘宝客-推广者-裂变淘礼金创建 )
* taobao.tbk.dg.vegas.lbtlj.create
* @line https://open.taobao.com/api.htm?docId=57710&docType=2&scopeId=23995
*/
func (v *vegas) LbTljCreate(conf *LbTljConf) (res *LbTljResponse, err error) {
_, err = v.Client.httpPost("taobao.tbk.dg.vegas.lbtlj.create", v.Client.Struct2MapString(conf), &res)
return
}
type newUser struct {
Client *Tbk
}
func (d *dg) NewUser() *newUser {
return &newUser{Client: d.Client}
}
type OrderGetResponse struct {
Response struct {
Result struct {
Data struct {
Result []OrderGetData `json:"result"`
PageNo int `json:"page_no"`
PageSize int `json:"page_size"`
HasNext bool `json:"has_next"`
} `json:"data"`
} `json:"result"`
} `json:"tbk_dg_newuser_order_get_response"`
}
type OrderGetData struct {
RegisterTime string `json:"register_time"`
BindTime string `json:"bind_time"`
BuyTime string `json:"buy_time"`
Status int `json:"status"`
Mobile string `json:"mobile"`
OrderTkType int `json:"order_tk_type"` // 订单淘客类型:1.淘客订单 2.非淘客订单,仅淘宝天猫拉新适用
UnionId string `json:"union_id"`
MemberId int `json:"member_id"`
MemberNick string `json:"member_nick"`
SiteId int64 `json:"site_id"`
SiteName string `json:"site_name"`
AdzoneId int64 `json:"adzone_id"`
AdzoneName string `json:"adzone_name"`
TbTradeParentId int64 `json:"tb_trade_parent_id"`
AcceptTime string `json:"accept_time"`
ReceiveTime string `json:"receive_time"`
SuccessTime string `json:"success_time"`
ActivityType string `json:"activity_type"`
ActivityId string `json:"activity_id"`
BizDate string `json:"biz_date"`
BindCardTime string `json:"bind_card_time"`
LoginTime string `json:"login_time"`
IsCardSave int `json:"is_card_save"` // 银行卡绑定状态 1.绑定 0.未绑定
UseRightsTime string `json:"use_rights_time"`
GetRightsTime string `json:"get_rights_time"`
RelationId string `json:"relation_id"`
Orders []OrderData `json:"orders"`
}
type OrderData struct {
Commission string `json:"commission"`
ConfirmReceiveTime string `json:"confirm_receive_time"`
PayTime string `json:"pay_time"`
OrderNo string `json:"order_no"`
}
/**
* (淘宝客-推广者-新用户订单明细查询)
* taobao.tbk.dg.newuser.order.get
* @line https://open.taobao.com/api.htm?docId=33892&docType=2
*/
func (n *newUser) OrderGet(activityId string, others ...map[string]string) (res *OrderGetResponse, err error) {
params := make(map[string]string)
if len(others) > 0 {
params = others[0]
}
params["activity_id"] = activityId
_, err = n.Client.httpPost("taobao.tbk.dg.newuser.order.get", params, &res)
return
}
type OrderSumResponse struct {
Response struct {
Results struct {
Data struct {
PageNo int `json:"page_no"`
PageSize int `json:"page_size"`
HasNext bool `json:"has_next"`
Results struct {
Data []SumData `json:"data"`
} `json:"results"`
} `json:"data"`
} `json:"results"`
} `json:"tbk_dg_newuser_order_sum_response"`
}
type SumData struct {
ActivityId string `json:"activity_id"`
BizDate string `json:"biz_date"`
RegUserCnt int64 `json:"reg_user_cnt"`
LoginUserCnt int64 `json:"login_user_cnt"`
AlipayUserCnt int64 `json:"alipay_user_cnt"`
RcvValidUserCnt int64 `json:"rcv_valid_user_cnt"`
RcvUserCnt int64 `json:"rcv_user_cnt"`
AlipayUserCpaPreAmt string `json:"alipay_user_cpa_pre_amt"`
BindBuyUserCpaPreAmt string `json:"bind_buy_user_cpa_pre_amt"`
BindBuyValidUserCnt int64 `json:"bind_buy_valid_user_cnt"`
BindCardValidUserCnt int64 `json:"bind_card_valid_user_cnt"`
ReBuyValidUserCnt int64 `json:"re_buy_valid_user_cnt"`
ValidNum int64 `json:"valid_num"`
RelationId string `json:"relation_id"`
}
/**
* ( 淘宝客-推广者-拉新活动对应数据查询 )
* taobao.tbk.dg.newuser.order.sum
* @line https://open.taobao.com/api.htm?docId=36836&docType=2
*/
func (n *newUser) OrderSum(activityId string, pageNo, pageSize int, others ...map[string]string) (res *OrderSumResponse, err error) {
params := make(map[string]string)
if len(others) > 0 {
params = others[0]
}
params["activity_id"] = activityId
params["page_no"] = strconv.Itoa(pageNo)
params["page_size"] = strconv.Itoa(pageSize)
_, err = n.Client.httpPost("taobao.tbk.dg.newuser.order.sum", params, &res)
return
} | type OptimusMaterialResponse struct {
Response struct {
ResultList struct {
MapData []struct {
MaterialOptionalData | random_line_split |
dg.go | package tbk
import (
"strconv"
)
type dg struct {
Client *Tbk
}
func (t *Tbk) Dg() *dg {
return &dg{Client: t}
}
// 搜索响应数据结构体
type MaterialOptionalResponse struct {
Response struct {
TotalResults int `json:"total_results"`
ResultList struct {
MapData []MaterialOptionalData `json:"map_data"`
} `json:"result_list"`
} `json:"tbk_dg_material_optional_response"`
}
/**
* (通用物料搜索API(导购))
* taobao.tbk.dg.material.optional
* @line http://open.taobao.com/docs/api.htm?apiId=35896
*/
func (d *dg) MaterialOptional(adzoneId int64, others ...map[string]string) (res *MaterialOptionalResponse, err error) {
params := make(map[string]string)
if len(others) > 0 {
params = others[0]
}
params["adzone_id"] = strconv.FormatInt(adzoneId, 10)
_, err = d.Client.httpPost("taobao.tbk.dg.material.optional", params, &res)
return
}
type OptimusMaterialResponse struct {
Response struct {
ResultList struct {
MapData []struct {
MaterialOptionalData
JuPlayEndTime string `json:"ju_play_end_time"`
JuPlayStartTime string `json:"ju_play_start_time"`
PlayInfo string `json:"play_info"`
TmallPlayActivityEndTime int64 `json:"tmall_play_activity_end_time"`
TmallPlayActivityStartTime int64 `json:"tmall_play_activity_start_time"`
JuPreShowEndTime int64 `json:"ju_pre_show_end_time"`
JuPreShowStartTime int64 `json:"ju_pre_show_start_time"`
FavoritesInfo struct {
TotalCount int `json:"total_count"`
FavoritesList []FavoritesDetail `json:"favorites_list"`
} `json:"favorites_info"` // 选品库信息
} `json:"map_data"`
} `json:"result_list"`
IsDefault string `json:"is_default"`
TotalCount int `json:"total_count"`
} `json:"tbk_dg_optimus_material_response"`
}
// 选品库详情
type FavoritesDetail struct {
FavoritesId int64 `json:"favorites_id"`
FavoritesTitle string `json:"favorites_title"`
}
/**
* ( 淘宝客-推广者-物料精选 )
* taobao.tbk.dg.optimus.material
* @line https://open.taobao.com/api.htm?docId=33947&docType=2
*/
func (d *dg) OptimusMaterial(adzoneId int64, others ...map[string]string) (res *OptimusMaterialResponse, err error) {
params := make(map[string]string)
if len(others) > 0 {
params = others[0]
}
params["adzone_id"] = strconv.FormatInt(adzoneId, 10)
_, err = d.Client.httpPost("taobao.tbk.dg.optimus.material", params, &res)
return
}
type vegas struct {
Client *Tbk
}
func (d *dg) Vegas() *vegas {
return &vegas{Client: d.Client}
}
// 淘礼金创建配置
type TljConf struct {
AdzoneId int64 `json:"adzone_id"` // 广告位id
ItemId string `json:"item_id"` // 商品id
TotalNum int64 `json:"total_num"` // 总体个数
Name string `json:"name"` // 淘礼金名称
UserTotalWinNumLimit int `json:"user_total_win_num_limit"` // 单用户累计中奖次数上限
SecuritySwitch bool `json:"security_switch"` // 安全开关
PerFace string `json:"per_face"` // 单个淘礼金面额
SendStartTime string `json:"send_start_time"` // 发放开始时间
SendEndTime string `json:"send_end_time"` // 发放截止时间
UseEndTime string `json:"use_end_time"` // 使用结束日期
UseEndTimeMode int `json:"use_end_time_mode"` // 结束日期模式,1:相对时间 2:绝对时间
UseStartTime string `json:"use_start_time"` // 开始时间
}
// 淘礼金创建完成响应
type TljResponse struct {
Response struct {
Result struct {
Model struct {
RightsId string `json:"rights_id"`
SendUrl string `json:"send_url"`
VegasCode string `json:"vegas_code"`
} `json:"model"`
MsgCode string `json:"msg_code"`
MsgInfo string `json:"msg_info"`
Success bool `json:"success"`
} `json:"result"`
} `json:"tbk_dg_vegas_tlj_create_response"`
}
/**
* (淘宝客-推广者-淘礼金创建)
* taobao.tbk.dg.vegas.tlj.create
* @line https://open.taobao.com/api.htm?docId=40173&docType=2
*/
func (v *vegas) TljCreate(conf *TljConf) (res *TljResponse, err error) {
_, err = v.Client.httpPost("taobao.tbk.dg.vegas.tlj.create", v.Client.Struct2MapString(conf), &res)
return
}
type TljReportResponse struct {
Response struct {
Result struct {
MsgCode string `json:"msg_code"`
MsgInfo string `json:"msg_info"`
Success bool `json:"success"`
Model struct {
UnfreezeAmount float64 `json:"unfreeze_amount"`
UnfreezeNum int64 `json:"unfreeze_num"`
RefundAmount float64 `json:"refund_amount"`
RefundNum int64 `json:"refund_num"`
AlipayAmount float64 `json:"alipay_amount"`
UseAmount float64 `json:"use_amount"`
UseNum int64 `json:"use_num"`
WinAmount float64 `json:"win_amount"`
WinNum int64 `json:"win_num"`
PerCommissionAmount float64 `json:"per_commission_amount"`
FpRefundAmount float64 `json:"fp_refund_amount"`
FpRefundNum int64 `json:"fp_refund_num"`
} `json:"model"`
} `json:"result"`
} `json:"tbk_dg_vegas_tlj_instance_report_response"`
}
/**
* ( 淘宝客-推广者-淘礼金发放及使用报表 )
* taobao.tbk.dg.vegas.tlj.instance.report
* @line https://open.taobao.com/api.htm?docId=43317&docType=2&scopeId=15029
*/
func (v *vegas) TljInstanceReport(RightsId string) (res *TljReportResponse, err error) {
_, err = v.Client.httpPost("taobao.tbk.dg.vegas.tlj.instance.report", map[string]string{
"rights_id": RightsId,
}, &res)
return
}
type LbTljConf struct {
SecurityLevel int `json:"security_level"` // 安全等级
UseStartTime string `json:"use_start_time"` // 使用开始日期
UseEndTime string `json:"use_end_time"` // 使用结束日期
UseEndTimeMode int `json:"use_end_time_mode"` // 结束日期的模式 1:相对时间 2:绝对时间
AcceptStartTime string `json:"accept_start_time"` // 裂变任务领取开始时间
AcceptEndTime string `json:"accept_end_time"` // 裂变任务领取截止时间
RightsPerFace string `json:"rights_per_face"` // 单个淘礼金面额,支持两位小数,单位元
SecuritySwitch bool `json:"security_switch"` // 安全开关 true启用 false不启用
UserTotalWinNumLimit int `json:"user_total_win_num_limit"` // 单用户累计中奖次数上限
Name string `json:"name"` // 淘礼金名称
RightsNum int `json:"rights_num"` // 淘礼金总个数
ItemId string `json:"item_id"` // 商品ID
CampaignType string `json:"campaign_type"`
TaskRightNum int `json:"task_right_num"` // 裂变淘礼金总个数
TaskRightsPerFace string `json:"task_rights_per_face"` // 裂变单个淘礼金面额
InviteNum int `json:"invite_num"` // 裂变淘礼金邀请人数
InviteTimeLimit int `json:"invite_time_limit"` // 裂变淘礼金邀请时长,单位分钟,最大120分钟
AdzoneId int `json:"adzone_id"` // 推广位ID
}
type LbTljResponse struct {
Response struct {
Result struct {
Model struct {
RightsID string `json:"rights_id"` // 直接领取淘礼金ID 即小额淘礼金
SendURL string `json:"send_url"` // 发放地址
TaskRightsID string `json:"task_rights_id"` // 裂变淘礼金ID 即大额淘礼金
TaskID string `json:"task_id"` // 裂变任务ID
} `json:"model"`
MsgCode string `json:"msg_code"`
MsgInfo string `json:"msg_info"`
Success bool `json:"success"`
} `json:"result"`
} `json:"tbk_dg_vegas_lbtlj_create_response"`
}
/**
* ( 淘宝客-推广者-裂变淘礼金创建 )
* taobao.tbk.dg.vegas.lbtlj.create
* @line https://open.taobao.com/api.htm?docId=57710&docType=2&scopeId=23995
*/
func (v *vegas) LbTljCreate(conf *LbTljConf) (res *LbTljResponse, err error) {
_, err = v.Client.httpPost("taobao.tbk.dg.vegas.lbtlj.create", v.Client.Struct2MapString(conf), &res)
return
}
type newUser struct {
Client *Tbk
}
func (d *dg) NewUser() *newUser {
return &newUser{Client: d.Client}
}
type OrderGetResponse struct {
Response struct {
Result struct {
Data struct {
Result []OrderGetData `json:"result"`
PageNo int `json:"page_no"`
PageSize int `json:"page_size"`
HasNext bool `json:"has_next"`
} `json:"data"`
} `json:"result"`
} `json:"tbk_dg_newuser_order_get_response"`
}
type OrderGetData struct {
RegisterTime string `json:"register_time"`
BindTime string `json:"bind_time"`
BuyTime string `json:"buy_time"`
Status int `json:"status"`
Mobile string `json:"mobile"`
OrderTkType int `json:"order_tk_type"` // 订单淘客类型:1.淘客订单 2.非淘客订单,仅淘宝天猫拉新适用
UnionId string `json:"union_id"`
MemberId int `json:"member_id"`
MemberNick string `json:"member_nick"`
SiteId int64 `json:"site_id"`
SiteName string `json:"site_name"`
AdzoneId int64 `json:"adzone_id"`
AdzoneName string `json:"adzone_name"`
TbTradeParentId int64 `json:"tb_trade_parent_id"`
AcceptTime string `json:"accept_time"`
ReceiveTime string `json:"receive_time"`
SuccessTime string `json:"success_time"`
ActivityType string `json:"activity_type"`
ActivityId string `json:"activity_id"`
BizDate string `json:"biz_date"`
BindCardTime string `json:"bind_card_time"`
LoginTime string `json:"login_time"`
IsCardSave int `json:"is_card_save"` // 银行卡绑定状态 1.绑定 0.未绑定
UseRightsTime string `json:"use_rights_time"`
GetRightsTime string `json:"get_rights_time"`
RelationId string `json:"relation_id"`
Orders []OrderData `json:"orders"`
}
type OrderData struct {
Commission string `json:"commission"`
ConfirmReceiveTime string `json:"confirm_receive_time"`
PayTime string `json:"pay_time"`
OrderNo string `json:"order_no"`
}
/**
* (淘宝客-推广者-新用户订单明细查询)
* taobao.tbk.dg.newuser.order.get
* @line https://open.taobao.com/api.htm?docId=33892&docType=2
*/
func (n *newUser) OrderGet(activityId string, others ...map[string]string) (res *OrderGetResponse, err error) {
params := make(map[string]string)
if len(others) > 0 {
params = others[0]
}
params["activity_id"] = activityId
_, err = n.Client.httpPost("taobao.tbk.dg.newuser.order.get", params, &res)
return
}
type OrderSumResponse struct {
Response struct {
Results struct {
Data struct {
PageNo int `json:"page_no"`
PageSize int `json:"page_size"`
HasNext bool `json:"has_next"`
Results struct {
Data []SumData `json:"data"`
} `json:"results"`
} `json:"data"`
} `json:"results"`
} `json:"tbk_dg_newuser_order_sum_response"`
}
type SumData struct {
ActivityId string `json:"activity_id"`
BizDate string `json:"biz_date"`
RegUserCnt int64 `json:"reg_user_cnt"`
LoginUserCnt int64 `json:"login_user_cnt"`
AlipayUserCnt int64 `json:"alipay_user_cnt"`
RcvValidUserCnt int64 `json:"rcv_valid_user_cnt"`
RcvUserCnt int64 `json:"rcv_user_cnt"`
AlipayUserCpaPreAmt string `json:"alipay_user_cpa_pre_amt"`
BindBuyUserCpaPreAmt string `json:"bind_buy_user_cpa_pre_amt"`
BindBuyValidUserCnt int64 `json:"bind_buy_valid_user_cnt"`
BindCardValidUserCnt int64 `json:"bind_card_valid_user_cnt"`
ReBuyValidUserCnt int64 `json:"re_buy_valid_user_cnt"`
ValidNum int64 `json:"valid_num"`
RelationId string `json:"relation_id"`
}
/**
* ( 淘宝客-推广者-拉新活动对应数据查询 )
* taobao.tbk.dg.newuser.order.sum
* @line https://open.taobao.com/api.htm?docId=36836&docType=2
*/
func (n *newUser) OrderSum(activityId string, pageNo, pageSize int, others ...map[string]string) (res *OrderSumResponse, err error) {
params := make(map[string]string)
if len(others) > 0 {
params = others[0]
}
params["activity_id"] = activityId
params["page_no"] = strconv.Itoa(pageNo)
params["page_size"] = strconv.Itoa(pageSize)
_, err = n.Client.httpPost("taobao.tbk.dg.newuser.order.sum", params, &res)
return
}
| conditional_block | ||
path.rs | //! File path utilities.
//!
//! Some of the functions are similar to [`std::path::Path`] ones, but here they
//! work directly upon [`&str`](str) instead of [`&OsStr`](std::ffi::OsStr).
use crate::co;
use crate::decl::*;
use crate::guard::*;
use crate::prelude::*;
/// Returns an iterator over the files and folders within a directory.
/// Optionally, a wildcard can be specified to filter files by name.
///
/// This is a high-level abstraction over [`HFINDFILE`](crate::HFINDFILE)
/// iteration functions.
///
/// # Examples
///
/// Listing all text files in a directory:
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// for file_path in w::path::dir_list("C:\\temp", Some("*.txt")) {
/// let file_path = file_path?;
/// println!("{}", file_path);
/// }
/// # Ok::<_, winsafe::co::ERROR>(())
/// ```
#[must_use]
pub fn dir_list<'a>(
dir_path: &'a str,
filter: Option<&'a str>,
) -> impl Iterator<Item = SysResult<String>> + 'a
{
DirListIter::new(dir_path.to_owned(), filter)
}
/// Returns an interator over the files within a directory, and all its
/// subdirectories, recursively.
///
/// This is a high-level abstraction over [`HFINDFILE`](crate::HFINDFILE)
/// iteration functions.
///
/// # Examples
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// // Ordinary for loop
/// for file_path in w::path::dir_walk("C:\\Temp") {
/// let file_path = file_path?;
/// println!("{}", file_path);
/// }
///
/// // Closure with try_for_each
/// w::path::dir_walk("C:\\Temp")
/// .try_for_each(|file_path| {
/// let file_path = file_path?;
/// println!("{}", file_path);
/// Ok(())
/// })?;
///
/// // Collecting into a Vec
/// let all = w::path::dir_walk("C:\\Temp")
/// .collect::<w::SysResult<Vec<_>>>()?;
///
/// // Transforming and collecting into a Vec
/// let all = w::path::dir_walk("C:\\Temp")
/// .map(|file_path| {
/// let file_path = file_path?;
/// Ok(format!("PATH: {}", file_path))
/// })
/// .collect::<w::SysResult<Vec<_>>>()?;
/// # Ok::<_, winsafe::co::ERROR>(())
/// ```
#[must_use]
pub fn dir_walk<'a>(
dir_path: &'a str,
) -> impl Iterator<Item = SysResult<String>> + 'a
{
DirWalkIter::new(dir_path.to_owned())
}
/// Returns the path of the current EXE file, without the EXE filename, and
/// without a trailing backslash.
///
/// In a debug build, the `target\debug` folders will be suppressed.
#[cfg(debug_assertions)]
#[must_use]
pub fn exe_path() -> SysResult<String> {
let dbg = HINSTANCE::NULL.GetModuleFileName()?;
Ok(
get_path( // target
get_path( // debug
get_path(&dbg).unwrap(), // exe name
).unwrap(),
).unwrap()
.to_owned(),
)
}
/// Returns the path of the current EXE file, without the EXE filename, and
/// without a trailing backslash.
///
/// In a debug build, the `target\debug` folders will be suppressed.
#[cfg(not(debug_assertions))]
#[must_use]
pub fn exe_path() -> SysResult<String> {
Ok(
get_path(&HINSTANCE::NULL.GetModuleFileName()?)
.unwrap().to_owned(),
)
}
/// Returns true if the path exists.
#[must_use]
pub fn exists(full_path: &str) -> bool {
GetFileAttributes(full_path).is_ok()
}
/// Extracts the file name from a full path, if any.
///
/// # Examples
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// let f = w::path::get_file_name("C:\\Temp\\foo.txt"); // foo.txt
/// ```
#[must_use]
pub fn get_file_name(full_path: &str) -> Option<&str> {
match full_path.rfind('\\') {
None => Some(full_path), // if no backslash, the whole string is the file name
Some(idx) => if idx == full_path.chars().count() - 1 {
None // last char is '\\', no file name
} else {
Some(&full_path[idx + 1..])
},
}
}
/// Extracts the full path, but the last part.
///
/// # Examples
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// let p = w::path::get_path("C:\\Temp\\xx\\a.txt"); // C:\Temp\xx
/// let q = w::path::get_path("C:\\Temp\\xx\\"); // C:\Temp\xx
/// let r = w::path::get_path("C:\\Temp\\xx"); // C:\Temp"
/// ```
#[must_use]
pub fn get_path(full_path: &str) -> Option<&str> {
full_path.rfind('\\') // if no backslash, the whole string is the file name, so no path
.map(|idx| &full_path[0..idx])
}
/// Tells whether the full path ends in one of the given extensions,
/// case-insensitive.
///
/// # Examples
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// println!("{}",
/// w::path::has_extension("file.txt", &[".txt", ".bat"]));
/// ```
#[must_use]
pub fn has_extension(full_path: &str, extensions: &[impl AsRef<str>]) -> bool {
let full_path_u = full_path.to_uppercase();
extensions.iter()
.find(|ext| {
let ext_u = ext.as_ref().to_uppercase();
full_path_u.ends_with(&ext_u)
})
.is_some()
}
/// Returns true if the path is a directory.
///
/// # Panics
///
/// Panics if the path does not exist.
#[must_use]
pub fn is_directory(full_path: &str) -> bool {
let flags = GetFileAttributes(full_path).unwrap();
flags.has(co::FILE_ATTRIBUTE::DIRECTORY)
}
/// Returns true if the path is hidden.
///
/// # Panics
///
/// Panics if the path does not exist.
#[must_use]
pub fn is_hidden(full_path: &str) -> bool {
let flags = GetFileAttributes(full_path).unwrap();
flags.has(co::FILE_ATTRIBUTE::HIDDEN)
}
/// Replaces the extension by the given one.
///
/// # Examples
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// let p = w::path::replace_extension(
/// "C:\\Temp\\something.txt", ".sh"); // C:\Temp\something.sh
/// ```
#[must_use]
pub fn replace_extension(full_path: &str, new_extension: &str) -> String {
if let Some(last) = full_path.chars().last() {
if last == '\\' { // full_path is a directory, do nothing
return rtrim_backslash(full_path).to_owned();
}
}
let new_has_dot = new_extension.chars().next() == Some('.');
match full_path.rfind('.') {
None => format!("{}{}{}", // file name without extension, just append it
full_path,
if new_has_dot { "" } else { "." },
new_extension,
),
Some(idx) => format!("{}{}{}",
&full_path[0..idx],
if new_has_dot { "" } else { "." },
new_extension,
),
}
}
/// Replaces the file name by the given one.
#[must_use]
pub fn replace_file_name(full_path: &str, new_file: &str) -> String {
match get_path(full_path) {
None => new_file.to_owned(),
Some(path) => format!("{}\\{}", path, new_file),
}
}
/// Keeps the file name and replaces the path by the given one.
///
/// # Examples
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// let p = w::path::replace_path( // C:\another\foo.txt
/// "C:\\Temp\\foo.txt",
/// "C:\\another",
/// );
/// ```
#[must_use]
pub fn replace_path(full_path: &str, new_path: &str) -> String {
let file_name = get_file_name(full_path);
format!("{}{}{}",
rtrim_backslash(new_path),
if file_name.is_some() { "\\" } else { "" },
file_name.unwrap_or(""))
}
/// Removes a trailing backslash, if any.
///
/// # Examples
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// let p = w::path::rtrim_backslash("C:\\Temp\\"); // C:\Temp
/// ```
#[must_use]
pub fn rtrim_backslash(full_path: &str) -> &str {
match full_path.chars().last() {
None => full_path, // empty string
Some(last_ch) => if last_ch == '\\' {
let mut chars = full_path.chars();
chars.next_back(); // remove last char
chars.as_str()
} else {
full_path // no trailing backslash
},
}
}
/// Returns a `Vec` with each part of the full path.
#[must_use]
pub fn split_parts(full_path: &str) -> Vec<&str> {
let no_bs = rtrim_backslash(full_path);
no_bs.split('\\').collect()
}
//------------------------------------------------------------------------------
pub(in crate::kernel) struct DirListIter<'a> {
dir_path: String,
filter: Option<&'a str>,
hfind: Option<FindCloseGuard>,
wfd: WIN32_FIND_DATA,
no_more: bool,
}
impl<'a> Iterator for DirListIter<'a> {
type Item = SysResult<String>;
fn next(&mut self) -> Option<Self::Item> {
if self.no_more {
return None;
}
let found = match &self.hfind {
None => { // first pass
let dir_final = match self.filter {
None => format!("{}\\*", self.dir_path),
Some(filter) => format!("{}\\{}", self.dir_path, filter),
};
let found = match HFINDFILE::FindFirstFile(&dir_final, &mut self.wfd) {
Err(e) => {
self.no_more = true; // prevent further iterations
return Some(Err(e));
},
Ok((hfind, found)) => {
self.hfind = Some(hfind); // store our find handle
found
},
};
found
},
Some(hfind) => { // subsequent passes
match hfind.FindNextFile(&mut self.wfd) {
Err(e) => {
self.no_more = true; // prevent further iterations
return Some(Err(e));
},
Ok(found) => found,
}
},
};
if found {
let file_name = self.wfd.cFileName();
if file_name == "." || file_name == ".." { // skip these
self.next()
} else {
Some(Ok(format!("{}\\{}", self.dir_path, self.wfd.cFileName())))
}
} else {
None
}
}
}
impl<'a> DirListIter<'a> {
pub(in crate::kernel) fn new(
dir_path: String,
filter: Option<&'a str>,
) -> Self {
Self {
dir_path: rtrim_backslash(&dir_path).to_owned(),
filter,
hfind: None,
wfd: WIN32_FIND_DATA::default(),
no_more: false,
}
}
}
//------------------------------------------------------------------------------
pub(in crate::kernel) struct DirWalkIter<'a> {
runner: DirListIter<'a>,
subdir_runner: Option<Box<DirWalkIter<'a>>>,
no_more: bool,
}
impl<'a> Iterator for DirWalkIter<'a> {
type Item = SysResult<String>;
fn next(&mut self) -> Option<Self::Item> {
if self.no_more {
return None;
}
match &mut self.subdir_runner {
None => {
let cur_file = self.runner.next();
match cur_file {
None => None,
Some(cur_file) => {
match cur_file {
Err(e) => {
self.no_more = true; // prevent further iterations
Some(Err(e))
},
Ok(cur_file) => {
if is_directory(&cur_file) {
self.subdir_runner = Some(Box::new(Self::new(cur_file))); // recursively
self.next()
} else {
Some(Ok(cur_file))
}
},
}
},
}
},
Some(subdir_runner) => {
let inner_file = subdir_runner.next();
match inner_file {
None => { // subdir_runner finished his work
self.subdir_runner = None;
self.next()
},
Some(inner_file) => {
Some(inner_file)
},
}
|
impl<'a> DirWalkIter<'a> {
pub(in crate::kernel) fn new(dir_path: String) -> Self {
Self {
runner: DirListIter::new(dir_path, None),
subdir_runner: None,
no_more: false,
}
}
} | },
}
}
}
| random_line_split |
path.rs | //! File path utilities.
//!
//! Some of the functions are similar to [`std::path::Path`] ones, but here they
//! work directly upon [`&str`](str) instead of [`&OsStr`](std::ffi::OsStr).
use crate::co;
use crate::decl::*;
use crate::guard::*;
use crate::prelude::*;
/// Returns an iterator over the files and folders within a directory.
/// Optionally, a wildcard can be specified to filter files by name.
///
/// This is a high-level abstraction over [`HFINDFILE`](crate::HFINDFILE)
/// iteration functions.
///
/// # Examples
///
/// Listing all text files in a directory:
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// for file_path in w::path::dir_list("C:\\temp", Some("*.txt")) {
/// let file_path = file_path?;
/// println!("{}", file_path);
/// }
/// # Ok::<_, winsafe::co::ERROR>(())
/// ```
#[must_use]
pub fn dir_list<'a>(
dir_path: &'a str,
filter: Option<&'a str>,
) -> impl Iterator<Item = SysResult<String>> + 'a
{
DirListIter::new(dir_path.to_owned(), filter)
}
/// Returns an interator over the files within a directory, and all its
/// subdirectories, recursively.
///
/// This is a high-level abstraction over [`HFINDFILE`](crate::HFINDFILE)
/// iteration functions.
///
/// # Examples
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// // Ordinary for loop
/// for file_path in w::path::dir_walk("C:\\Temp") {
/// let file_path = file_path?;
/// println!("{}", file_path);
/// }
///
/// // Closure with try_for_each
/// w::path::dir_walk("C:\\Temp")
/// .try_for_each(|file_path| {
/// let file_path = file_path?;
/// println!("{}", file_path);
/// Ok(())
/// })?;
///
/// // Collecting into a Vec
/// let all = w::path::dir_walk("C:\\Temp")
/// .collect::<w::SysResult<Vec<_>>>()?;
///
/// // Transforming and collecting into a Vec
/// let all = w::path::dir_walk("C:\\Temp")
/// .map(|file_path| {
/// let file_path = file_path?;
/// Ok(format!("PATH: {}", file_path))
/// })
/// .collect::<w::SysResult<Vec<_>>>()?;
/// # Ok::<_, winsafe::co::ERROR>(())
/// ```
#[must_use]
pub fn dir_walk<'a>(
dir_path: &'a str,
) -> impl Iterator<Item = SysResult<String>> + 'a
{
DirWalkIter::new(dir_path.to_owned())
}
/// Returns the path of the current EXE file, without the EXE filename, and
/// without a trailing backslash.
///
/// In a debug build, the `target\debug` folders will be suppressed.
#[cfg(debug_assertions)]
#[must_use]
pub fn exe_path() -> SysResult<String> {
let dbg = HINSTANCE::NULL.GetModuleFileName()?;
Ok(
get_path( // target
get_path( // debug
get_path(&dbg).unwrap(), // exe name
).unwrap(),
).unwrap()
.to_owned(),
)
}
/// Returns the path of the current EXE file, without the EXE filename, and
/// without a trailing backslash.
///
/// In a debug build, the `target\debug` folders will be suppressed.
#[cfg(not(debug_assertions))]
#[must_use]
pub fn exe_path() -> SysResult<String> {
Ok(
get_path(&HINSTANCE::NULL.GetModuleFileName()?)
.unwrap().to_owned(),
)
}
/// Returns true if the path exists.
#[must_use]
pub fn exists(full_path: &str) -> bool {
GetFileAttributes(full_path).is_ok()
}
/// Extracts the file name from a full path, if any.
///
/// # Examples
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// let f = w::path::get_file_name("C:\\Temp\\foo.txt"); // foo.txt
/// ```
#[must_use]
pub fn get_file_name(full_path: &str) -> Option<&str> |
/// Extracts the full path, but the last part.
///
/// # Examples
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// let p = w::path::get_path("C:\\Temp\\xx\\a.txt"); // C:\Temp\xx
/// let q = w::path::get_path("C:\\Temp\\xx\\"); // C:\Temp\xx
/// let r = w::path::get_path("C:\\Temp\\xx"); // C:\Temp"
/// ```
#[must_use]
pub fn get_path(full_path: &str) -> Option<&str> {
full_path.rfind('\\') // if no backslash, the whole string is the file name, so no path
.map(|idx| &full_path[0..idx])
}
/// Tells whether the full path ends in one of the given extensions,
/// case-insensitive.
///
/// # Examples
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// println!("{}",
/// w::path::has_extension("file.txt", &[".txt", ".bat"]));
/// ```
#[must_use]
pub fn has_extension(full_path: &str, extensions: &[impl AsRef<str>]) -> bool {
let full_path_u = full_path.to_uppercase();
extensions.iter()
.find(|ext| {
let ext_u = ext.as_ref().to_uppercase();
full_path_u.ends_with(&ext_u)
})
.is_some()
}
/// Returns true if the path is a directory.
///
/// # Panics
///
/// Panics if the path does not exist.
#[must_use]
pub fn is_directory(full_path: &str) -> bool {
let flags = GetFileAttributes(full_path).unwrap();
flags.has(co::FILE_ATTRIBUTE::DIRECTORY)
}
/// Returns true if the path is hidden.
///
/// # Panics
///
/// Panics if the path does not exist.
#[must_use]
pub fn is_hidden(full_path: &str) -> bool {
let flags = GetFileAttributes(full_path).unwrap();
flags.has(co::FILE_ATTRIBUTE::HIDDEN)
}
/// Replaces the extension by the given one.
///
/// # Examples
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// let p = w::path::replace_extension(
/// "C:\\Temp\\something.txt", ".sh"); // C:\Temp\something.sh
/// ```
#[must_use]
pub fn replace_extension(full_path: &str, new_extension: &str) -> String {
if let Some(last) = full_path.chars().last() {
if last == '\\' { // full_path is a directory, do nothing
return rtrim_backslash(full_path).to_owned();
}
}
let new_has_dot = new_extension.chars().next() == Some('.');
match full_path.rfind('.') {
None => format!("{}{}{}", // file name without extension, just append it
full_path,
if new_has_dot { "" } else { "." },
new_extension,
),
Some(idx) => format!("{}{}{}",
&full_path[0..idx],
if new_has_dot { "" } else { "." },
new_extension,
),
}
}
/// Replaces the file name by the given one.
#[must_use]
pub fn replace_file_name(full_path: &str, new_file: &str) -> String {
match get_path(full_path) {
None => new_file.to_owned(),
Some(path) => format!("{}\\{}", path, new_file),
}
}
/// Keeps the file name and replaces the path by the given one.
///
/// # Examples
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// let p = w::path::replace_path( // C:\another\foo.txt
/// "C:\\Temp\\foo.txt",
/// "C:\\another",
/// );
/// ```
#[must_use]
pub fn replace_path(full_path: &str, new_path: &str) -> String {
let file_name = get_file_name(full_path);
format!("{}{}{}",
rtrim_backslash(new_path),
if file_name.is_some() { "\\" } else { "" },
file_name.unwrap_or(""))
}
/// Removes a trailing backslash, if any.
///
/// # Examples
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// let p = w::path::rtrim_backslash("C:\\Temp\\"); // C:\Temp
/// ```
#[must_use]
pub fn rtrim_backslash(full_path: &str) -> &str {
match full_path.chars().last() {
None => full_path, // empty string
Some(last_ch) => if last_ch == '\\' {
let mut chars = full_path.chars();
chars.next_back(); // remove last char
chars.as_str()
} else {
full_path // no trailing backslash
},
}
}
/// Returns a `Vec` with each part of the full path.
#[must_use]
pub fn split_parts(full_path: &str) -> Vec<&str> {
let no_bs = rtrim_backslash(full_path);
no_bs.split('\\').collect()
}
//------------------------------------------------------------------------------
pub(in crate::kernel) struct DirListIter<'a> {
dir_path: String,
filter: Option<&'a str>,
hfind: Option<FindCloseGuard>,
wfd: WIN32_FIND_DATA,
no_more: bool,
}
impl<'a> Iterator for DirListIter<'a> {
type Item = SysResult<String>;
fn next(&mut self) -> Option<Self::Item> {
if self.no_more {
return None;
}
let found = match &self.hfind {
None => { // first pass
let dir_final = match self.filter {
None => format!("{}\\*", self.dir_path),
Some(filter) => format!("{}\\{}", self.dir_path, filter),
};
let found = match HFINDFILE::FindFirstFile(&dir_final, &mut self.wfd) {
Err(e) => {
self.no_more = true; // prevent further iterations
return Some(Err(e));
},
Ok((hfind, found)) => {
self.hfind = Some(hfind); // store our find handle
found
},
};
found
},
Some(hfind) => { // subsequent passes
match hfind.FindNextFile(&mut self.wfd) {
Err(e) => {
self.no_more = true; // prevent further iterations
return Some(Err(e));
},
Ok(found) => found,
}
},
};
if found {
let file_name = self.wfd.cFileName();
if file_name == "." || file_name == ".." { // skip these
self.next()
} else {
Some(Ok(format!("{}\\{}", self.dir_path, self.wfd.cFileName())))
}
} else {
None
}
}
}
impl<'a> DirListIter<'a> {
pub(in crate::kernel) fn new(
dir_path: String,
filter: Option<&'a str>,
) -> Self {
Self {
dir_path: rtrim_backslash(&dir_path).to_owned(),
filter,
hfind: None,
wfd: WIN32_FIND_DATA::default(),
no_more: false,
}
}
}
//------------------------------------------------------------------------------
pub(in crate::kernel) struct DirWalkIter<'a> {
runner: DirListIter<'a>,
subdir_runner: Option<Box<DirWalkIter<'a>>>,
no_more: bool,
}
impl<'a> Iterator for DirWalkIter<'a> {
type Item = SysResult<String>;
fn next(&mut self) -> Option<Self::Item> {
if self.no_more {
return None;
}
match &mut self.subdir_runner {
None => {
let cur_file = self.runner.next();
match cur_file {
None => None,
Some(cur_file) => {
match cur_file {
Err(e) => {
self.no_more = true; // prevent further iterations
Some(Err(e))
},
Ok(cur_file) => {
if is_directory(&cur_file) {
self.subdir_runner = Some(Box::new(Self::new(cur_file))); // recursively
self.next()
} else {
Some(Ok(cur_file))
}
},
}
},
}
},
Some(subdir_runner) => {
let inner_file = subdir_runner.next();
match inner_file {
None => { // subdir_runner finished his work
self.subdir_runner = None;
self.next()
},
Some(inner_file) => {
Some(inner_file)
},
}
},
}
}
}
impl<'a> DirWalkIter<'a> {
pub(in crate::kernel) fn new(dir_path: String) -> Self {
Self {
runner: DirListIter::new(dir_path, None),
subdir_runner: None,
no_more: false,
}
}
}
| {
match full_path.rfind('\\') {
None => Some(full_path), // if no backslash, the whole string is the file name
Some(idx) => if idx == full_path.chars().count() - 1 {
None // last char is '\\', no file name
} else {
Some(&full_path[idx + 1..])
},
}
} | identifier_body |
path.rs | //! File path utilities.
//!
//! Some of the functions are similar to [`std::path::Path`] ones, but here they
//! work directly upon [`&str`](str) instead of [`&OsStr`](std::ffi::OsStr).
use crate::co;
use crate::decl::*;
use crate::guard::*;
use crate::prelude::*;
/// Returns an iterator over the files and folders within a directory.
/// Optionally, a wildcard can be specified to filter files by name.
///
/// This is a high-level abstraction over [`HFINDFILE`](crate::HFINDFILE)
/// iteration functions.
///
/// # Examples
///
/// Listing all text files in a directory:
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// for file_path in w::path::dir_list("C:\\temp", Some("*.txt")) {
/// let file_path = file_path?;
/// println!("{}", file_path);
/// }
/// # Ok::<_, winsafe::co::ERROR>(())
/// ```
#[must_use]
pub fn dir_list<'a>(
dir_path: &'a str,
filter: Option<&'a str>,
) -> impl Iterator<Item = SysResult<String>> + 'a
{
DirListIter::new(dir_path.to_owned(), filter)
}
/// Returns an interator over the files within a directory, and all its
/// subdirectories, recursively.
///
/// This is a high-level abstraction over [`HFINDFILE`](crate::HFINDFILE)
/// iteration functions.
///
/// # Examples
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// // Ordinary for loop
/// for file_path in w::path::dir_walk("C:\\Temp") {
/// let file_path = file_path?;
/// println!("{}", file_path);
/// }
///
/// // Closure with try_for_each
/// w::path::dir_walk("C:\\Temp")
/// .try_for_each(|file_path| {
/// let file_path = file_path?;
/// println!("{}", file_path);
/// Ok(())
/// })?;
///
/// // Collecting into a Vec
/// let all = w::path::dir_walk("C:\\Temp")
/// .collect::<w::SysResult<Vec<_>>>()?;
///
/// // Transforming and collecting into a Vec
/// let all = w::path::dir_walk("C:\\Temp")
/// .map(|file_path| {
/// let file_path = file_path?;
/// Ok(format!("PATH: {}", file_path))
/// })
/// .collect::<w::SysResult<Vec<_>>>()?;
/// # Ok::<_, winsafe::co::ERROR>(())
/// ```
#[must_use]
pub fn dir_walk<'a>(
dir_path: &'a str,
) -> impl Iterator<Item = SysResult<String>> + 'a
{
DirWalkIter::new(dir_path.to_owned())
}
/// Returns the path of the current EXE file, without the EXE filename, and
/// without a trailing backslash.
///
/// In a debug build, the `target\debug` folders will be suppressed.
#[cfg(debug_assertions)]
#[must_use]
pub fn exe_path() -> SysResult<String> {
let dbg = HINSTANCE::NULL.GetModuleFileName()?;
Ok(
get_path( // target
get_path( // debug
get_path(&dbg).unwrap(), // exe name
).unwrap(),
).unwrap()
.to_owned(),
)
}
/// Returns the path of the current EXE file, without the EXE filename, and
/// without a trailing backslash.
///
/// In a debug build, the `target\debug` folders will be suppressed.
#[cfg(not(debug_assertions))]
#[must_use]
pub fn exe_path() -> SysResult<String> {
Ok(
get_path(&HINSTANCE::NULL.GetModuleFileName()?)
.unwrap().to_owned(),
)
}
/// Returns true if the path exists.
#[must_use]
pub fn exists(full_path: &str) -> bool {
GetFileAttributes(full_path).is_ok()
}
/// Extracts the file name from a full path, if any.
///
/// # Examples
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// let f = w::path::get_file_name("C:\\Temp\\foo.txt"); // foo.txt
/// ```
#[must_use]
pub fn get_file_name(full_path: &str) -> Option<&str> {
match full_path.rfind('\\') {
None => Some(full_path), // if no backslash, the whole string is the file name
Some(idx) => if idx == full_path.chars().count() - 1 {
None // last char is '\\', no file name
} else {
Some(&full_path[idx + 1..])
},
}
}
/// Extracts the full path, but the last part.
///
/// # Examples
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// let p = w::path::get_path("C:\\Temp\\xx\\a.txt"); // C:\Temp\xx
/// let q = w::path::get_path("C:\\Temp\\xx\\"); // C:\Temp\xx
/// let r = w::path::get_path("C:\\Temp\\xx"); // C:\Temp"
/// ```
#[must_use]
pub fn get_path(full_path: &str) -> Option<&str> {
full_path.rfind('\\') // if no backslash, the whole string is the file name, so no path
.map(|idx| &full_path[0..idx])
}
/// Tells whether the full path ends in one of the given extensions,
/// case-insensitive.
///
/// # Examples
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// println!("{}",
/// w::path::has_extension("file.txt", &[".txt", ".bat"]));
/// ```
#[must_use]
pub fn has_extension(full_path: &str, extensions: &[impl AsRef<str>]) -> bool {
let full_path_u = full_path.to_uppercase();
extensions.iter()
.find(|ext| {
let ext_u = ext.as_ref().to_uppercase();
full_path_u.ends_with(&ext_u)
})
.is_some()
}
/// Returns true if the path is a directory.
///
/// # Panics
///
/// Panics if the path does not exist.
#[must_use]
pub fn is_directory(full_path: &str) -> bool {
let flags = GetFileAttributes(full_path).unwrap();
flags.has(co::FILE_ATTRIBUTE::DIRECTORY)
}
/// Returns true if the path is hidden.
///
/// # Panics
///
/// Panics if the path does not exist.
#[must_use]
pub fn is_hidden(full_path: &str) -> bool {
let flags = GetFileAttributes(full_path).unwrap();
flags.has(co::FILE_ATTRIBUTE::HIDDEN)
}
/// Replaces the extension by the given one.
///
/// # Examples
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// let p = w::path::replace_extension(
/// "C:\\Temp\\something.txt", ".sh"); // C:\Temp\something.sh
/// ```
#[must_use]
pub fn replace_extension(full_path: &str, new_extension: &str) -> String {
if let Some(last) = full_path.chars().last() {
if last == '\\' { // full_path is a directory, do nothing
return rtrim_backslash(full_path).to_owned();
}
}
let new_has_dot = new_extension.chars().next() == Some('.');
match full_path.rfind('.') {
None => format!("{}{}{}", // file name without extension, just append it
full_path,
if new_has_dot { "" } else { "." },
new_extension,
),
Some(idx) => format!("{}{}{}",
&full_path[0..idx],
if new_has_dot { "" } else { "." },
new_extension,
),
}
}
/// Replaces the file name by the given one.
#[must_use]
pub fn replace_file_name(full_path: &str, new_file: &str) -> String {
match get_path(full_path) {
None => new_file.to_owned(),
Some(path) => format!("{}\\{}", path, new_file),
}
}
/// Keeps the file name and replaces the path by the given one.
///
/// # Examples
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// let p = w::path::replace_path( // C:\another\foo.txt
/// "C:\\Temp\\foo.txt",
/// "C:\\another",
/// );
/// ```
#[must_use]
pub fn replace_path(full_path: &str, new_path: &str) -> String {
let file_name = get_file_name(full_path);
format!("{}{}{}",
rtrim_backslash(new_path),
if file_name.is_some() { "\\" } else { "" },
file_name.unwrap_or(""))
}
/// Removes a trailing backslash, if any.
///
/// # Examples
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// let p = w::path::rtrim_backslash("C:\\Temp\\"); // C:\Temp
/// ```
#[must_use]
pub fn rtrim_backslash(full_path: &str) -> &str {
match full_path.chars().last() {
None => full_path, // empty string
Some(last_ch) => if last_ch == '\\' {
let mut chars = full_path.chars();
chars.next_back(); // remove last char
chars.as_str()
} else {
full_path // no trailing backslash
},
}
}
/// Returns a `Vec` with each part of the full path.
#[must_use]
pub fn split_parts(full_path: &str) -> Vec<&str> {
let no_bs = rtrim_backslash(full_path);
no_bs.split('\\').collect()
}
//------------------------------------------------------------------------------
pub(in crate::kernel) struct DirListIter<'a> {
dir_path: String,
filter: Option<&'a str>,
hfind: Option<FindCloseGuard>,
wfd: WIN32_FIND_DATA,
no_more: bool,
}
impl<'a> Iterator for DirListIter<'a> {
type Item = SysResult<String>;
fn next(&mut self) -> Option<Self::Item> {
if self.no_more {
return None;
}
let found = match &self.hfind {
None => { // first pass
let dir_final = match self.filter {
None => format!("{}\\*", self.dir_path),
Some(filter) => format!("{}\\{}", self.dir_path, filter),
};
let found = match HFINDFILE::FindFirstFile(&dir_final, &mut self.wfd) {
Err(e) => {
self.no_more = true; // prevent further iterations
return Some(Err(e));
},
Ok((hfind, found)) => {
self.hfind = Some(hfind); // store our find handle
found
},
};
found
},
Some(hfind) => { // subsequent passes
match hfind.FindNextFile(&mut self.wfd) {
Err(e) => {
self.no_more = true; // prevent further iterations
return Some(Err(e));
},
Ok(found) => found,
}
},
};
if found {
let file_name = self.wfd.cFileName();
if file_name == "." || file_name == ".." { // skip these
self.next()
} else {
Some(Ok(format!("{}\\{}", self.dir_path, self.wfd.cFileName())))
}
} else {
None
}
}
}
impl<'a> DirListIter<'a> {
pub(in crate::kernel) fn new(
dir_path: String,
filter: Option<&'a str>,
) -> Self {
Self {
dir_path: rtrim_backslash(&dir_path).to_owned(),
filter,
hfind: None,
wfd: WIN32_FIND_DATA::default(),
no_more: false,
}
}
}
//------------------------------------------------------------------------------
pub(in crate::kernel) struct DirWalkIter<'a> {
runner: DirListIter<'a>,
subdir_runner: Option<Box<DirWalkIter<'a>>>,
no_more: bool,
}
impl<'a> Iterator for DirWalkIter<'a> {
type Item = SysResult<String>;
fn next(&mut self) -> Option<Self::Item> {
if self.no_more |
match &mut self.subdir_runner {
None => {
let cur_file = self.runner.next();
match cur_file {
None => None,
Some(cur_file) => {
match cur_file {
Err(e) => {
self.no_more = true; // prevent further iterations
Some(Err(e))
},
Ok(cur_file) => {
if is_directory(&cur_file) {
self.subdir_runner = Some(Box::new(Self::new(cur_file))); // recursively
self.next()
} else {
Some(Ok(cur_file))
}
},
}
},
}
},
Some(subdir_runner) => {
let inner_file = subdir_runner.next();
match inner_file {
None => { // subdir_runner finished his work
self.subdir_runner = None;
self.next()
},
Some(inner_file) => {
Some(inner_file)
},
}
},
}
}
}
impl<'a> DirWalkIter<'a> {
pub(in crate::kernel) fn new(dir_path: String) -> Self {
Self {
runner: DirListIter::new(dir_path, None),
subdir_runner: None,
no_more: false,
}
}
}
| {
return None;
} | conditional_block |
path.rs | //! File path utilities.
//!
//! Some of the functions are similar to [`std::path::Path`] ones, but here they
//! work directly upon [`&str`](str) instead of [`&OsStr`](std::ffi::OsStr).
use crate::co;
use crate::decl::*;
use crate::guard::*;
use crate::prelude::*;
/// Returns an iterator over the files and folders within a directory.
/// Optionally, a wildcard can be specified to filter files by name.
///
/// This is a high-level abstraction over [`HFINDFILE`](crate::HFINDFILE)
/// iteration functions.
///
/// # Examples
///
/// Listing all text files in a directory:
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// for file_path in w::path::dir_list("C:\\temp", Some("*.txt")) {
/// let file_path = file_path?;
/// println!("{}", file_path);
/// }
/// # Ok::<_, winsafe::co::ERROR>(())
/// ```
#[must_use]
pub fn dir_list<'a>(
dir_path: &'a str,
filter: Option<&'a str>,
) -> impl Iterator<Item = SysResult<String>> + 'a
{
DirListIter::new(dir_path.to_owned(), filter)
}
/// Returns an interator over the files within a directory, and all its
/// subdirectories, recursively.
///
/// This is a high-level abstraction over [`HFINDFILE`](crate::HFINDFILE)
/// iteration functions.
///
/// # Examples
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// // Ordinary for loop
/// for file_path in w::path::dir_walk("C:\\Temp") {
/// let file_path = file_path?;
/// println!("{}", file_path);
/// }
///
/// // Closure with try_for_each
/// w::path::dir_walk("C:\\Temp")
/// .try_for_each(|file_path| {
/// let file_path = file_path?;
/// println!("{}", file_path);
/// Ok(())
/// })?;
///
/// // Collecting into a Vec
/// let all = w::path::dir_walk("C:\\Temp")
/// .collect::<w::SysResult<Vec<_>>>()?;
///
/// // Transforming and collecting into a Vec
/// let all = w::path::dir_walk("C:\\Temp")
/// .map(|file_path| {
/// let file_path = file_path?;
/// Ok(format!("PATH: {}", file_path))
/// })
/// .collect::<w::SysResult<Vec<_>>>()?;
/// # Ok::<_, winsafe::co::ERROR>(())
/// ```
#[must_use]
pub fn dir_walk<'a>(
dir_path: &'a str,
) -> impl Iterator<Item = SysResult<String>> + 'a
{
DirWalkIter::new(dir_path.to_owned())
}
/// Returns the path of the current EXE file, without the EXE filename, and
/// without a trailing backslash.
///
/// In a debug build, the `target\debug` folders will be suppressed.
#[cfg(debug_assertions)]
#[must_use]
pub fn exe_path() -> SysResult<String> {
let dbg = HINSTANCE::NULL.GetModuleFileName()?;
Ok(
get_path( // target
get_path( // debug
get_path(&dbg).unwrap(), // exe name
).unwrap(),
).unwrap()
.to_owned(),
)
}
/// Returns the path of the current EXE file, without the EXE filename, and
/// without a trailing backslash.
///
/// In a debug build, the `target\debug` folders will be suppressed.
#[cfg(not(debug_assertions))]
#[must_use]
pub fn exe_path() -> SysResult<String> {
Ok(
get_path(&HINSTANCE::NULL.GetModuleFileName()?)
.unwrap().to_owned(),
)
}
/// Returns true if the path exists.
#[must_use]
pub fn exists(full_path: &str) -> bool {
GetFileAttributes(full_path).is_ok()
}
/// Extracts the file name from a full path, if any.
///
/// # Examples
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// let f = w::path::get_file_name("C:\\Temp\\foo.txt"); // foo.txt
/// ```
#[must_use]
pub fn get_file_name(full_path: &str) -> Option<&str> {
match full_path.rfind('\\') {
None => Some(full_path), // if no backslash, the whole string is the file name
Some(idx) => if idx == full_path.chars().count() - 1 {
None // last char is '\\', no file name
} else {
Some(&full_path[idx + 1..])
},
}
}
/// Extracts the full path, but the last part.
///
/// # Examples
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// let p = w::path::get_path("C:\\Temp\\xx\\a.txt"); // C:\Temp\xx
/// let q = w::path::get_path("C:\\Temp\\xx\\"); // C:\Temp\xx
/// let r = w::path::get_path("C:\\Temp\\xx"); // C:\Temp"
/// ```
#[must_use]
pub fn get_path(full_path: &str) -> Option<&str> {
full_path.rfind('\\') // if no backslash, the whole string is the file name, so no path
.map(|idx| &full_path[0..idx])
}
/// Tells whether the full path ends in one of the given extensions,
/// case-insensitive.
///
/// # Examples
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// println!("{}",
/// w::path::has_extension("file.txt", &[".txt", ".bat"]));
/// ```
#[must_use]
pub fn has_extension(full_path: &str, extensions: &[impl AsRef<str>]) -> bool {
let full_path_u = full_path.to_uppercase();
extensions.iter()
.find(|ext| {
let ext_u = ext.as_ref().to_uppercase();
full_path_u.ends_with(&ext_u)
})
.is_some()
}
/// Returns true if the path is a directory.
///
/// # Panics
///
/// Panics if the path does not exist.
#[must_use]
pub fn is_directory(full_path: &str) -> bool {
let flags = GetFileAttributes(full_path).unwrap();
flags.has(co::FILE_ATTRIBUTE::DIRECTORY)
}
/// Returns true if the path is hidden.
///
/// # Panics
///
/// Panics if the path does not exist.
#[must_use]
pub fn is_hidden(full_path: &str) -> bool {
let flags = GetFileAttributes(full_path).unwrap();
flags.has(co::FILE_ATTRIBUTE::HIDDEN)
}
/// Replaces the extension by the given one.
///
/// # Examples
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// let p = w::path::replace_extension(
/// "C:\\Temp\\something.txt", ".sh"); // C:\Temp\something.sh
/// ```
#[must_use]
pub fn | (full_path: &str, new_extension: &str) -> String {
if let Some(last) = full_path.chars().last() {
if last == '\\' { // full_path is a directory, do nothing
return rtrim_backslash(full_path).to_owned();
}
}
let new_has_dot = new_extension.chars().next() == Some('.');
match full_path.rfind('.') {
None => format!("{}{}{}", // file name without extension, just append it
full_path,
if new_has_dot { "" } else { "." },
new_extension,
),
Some(idx) => format!("{}{}{}",
&full_path[0..idx],
if new_has_dot { "" } else { "." },
new_extension,
),
}
}
/// Replaces the file name by the given one.
#[must_use]
pub fn replace_file_name(full_path: &str, new_file: &str) -> String {
match get_path(full_path) {
None => new_file.to_owned(),
Some(path) => format!("{}\\{}", path, new_file),
}
}
/// Keeps the file name and replaces the path by the given one.
///
/// # Examples
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// let p = w::path::replace_path( // C:\another\foo.txt
/// "C:\\Temp\\foo.txt",
/// "C:\\another",
/// );
/// ```
#[must_use]
pub fn replace_path(full_path: &str, new_path: &str) -> String {
let file_name = get_file_name(full_path);
format!("{}{}{}",
rtrim_backslash(new_path),
if file_name.is_some() { "\\" } else { "" },
file_name.unwrap_or(""))
}
/// Removes a trailing backslash, if any.
///
/// # Examples
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// let p = w::path::rtrim_backslash("C:\\Temp\\"); // C:\Temp
/// ```
#[must_use]
pub fn rtrim_backslash(full_path: &str) -> &str {
match full_path.chars().last() {
None => full_path, // empty string
Some(last_ch) => if last_ch == '\\' {
let mut chars = full_path.chars();
chars.next_back(); // remove last char
chars.as_str()
} else {
full_path // no trailing backslash
},
}
}
/// Returns a `Vec` with each part of the full path.
#[must_use]
pub fn split_parts(full_path: &str) -> Vec<&str> {
let no_bs = rtrim_backslash(full_path);
no_bs.split('\\').collect()
}
//------------------------------------------------------------------------------
pub(in crate::kernel) struct DirListIter<'a> {
dir_path: String,
filter: Option<&'a str>,
hfind: Option<FindCloseGuard>,
wfd: WIN32_FIND_DATA,
no_more: bool,
}
impl<'a> Iterator for DirListIter<'a> {
type Item = SysResult<String>;
fn next(&mut self) -> Option<Self::Item> {
if self.no_more {
return None;
}
let found = match &self.hfind {
None => { // first pass
let dir_final = match self.filter {
None => format!("{}\\*", self.dir_path),
Some(filter) => format!("{}\\{}", self.dir_path, filter),
};
let found = match HFINDFILE::FindFirstFile(&dir_final, &mut self.wfd) {
Err(e) => {
self.no_more = true; // prevent further iterations
return Some(Err(e));
},
Ok((hfind, found)) => {
self.hfind = Some(hfind); // store our find handle
found
},
};
found
},
Some(hfind) => { // subsequent passes
match hfind.FindNextFile(&mut self.wfd) {
Err(e) => {
self.no_more = true; // prevent further iterations
return Some(Err(e));
},
Ok(found) => found,
}
},
};
if found {
let file_name = self.wfd.cFileName();
if file_name == "." || file_name == ".." { // skip these
self.next()
} else {
Some(Ok(format!("{}\\{}", self.dir_path, self.wfd.cFileName())))
}
} else {
None
}
}
}
impl<'a> DirListIter<'a> {
pub(in crate::kernel) fn new(
dir_path: String,
filter: Option<&'a str>,
) -> Self {
Self {
dir_path: rtrim_backslash(&dir_path).to_owned(),
filter,
hfind: None,
wfd: WIN32_FIND_DATA::default(),
no_more: false,
}
}
}
//------------------------------------------------------------------------------
pub(in crate::kernel) struct DirWalkIter<'a> {
runner: DirListIter<'a>,
subdir_runner: Option<Box<DirWalkIter<'a>>>,
no_more: bool,
}
impl<'a> Iterator for DirWalkIter<'a> {
type Item = SysResult<String>;
fn next(&mut self) -> Option<Self::Item> {
if self.no_more {
return None;
}
match &mut self.subdir_runner {
None => {
let cur_file = self.runner.next();
match cur_file {
None => None,
Some(cur_file) => {
match cur_file {
Err(e) => {
self.no_more = true; // prevent further iterations
Some(Err(e))
},
Ok(cur_file) => {
if is_directory(&cur_file) {
self.subdir_runner = Some(Box::new(Self::new(cur_file))); // recursively
self.next()
} else {
Some(Ok(cur_file))
}
},
}
},
}
},
Some(subdir_runner) => {
let inner_file = subdir_runner.next();
match inner_file {
None => { // subdir_runner finished his work
self.subdir_runner = None;
self.next()
},
Some(inner_file) => {
Some(inner_file)
},
}
},
}
}
}
impl<'a> DirWalkIter<'a> {
pub(in crate::kernel) fn new(dir_path: String) -> Self {
Self {
runner: DirListIter::new(dir_path, None),
subdir_runner: None,
no_more: false,
}
}
}
| replace_extension | identifier_name |
unitconverter.py | """
Copyright (c) 2014 Russell Nakamura
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH
THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
class UnitNames(object):
"""
Unit Names is a namespace to hold units
"""
__slots__ = ()
# bits
bits = "bits"
kbits = "K" + bits
kilobits = kbits
mbits = "M" + bits
megabits = mbits
gbits = "G" + bits
gigabits = gbits
tbits = "T" + bits
terabits = tbits
pbits = "P" + bits
petabits = pbits
ebits = "E" + bits
exabits = ebits
zbits = "Z" + bits
zettabits = zbits
ybits = "Y" + bits
yottabits = ybits
# bytes
bytes = "Bytes"
kbytes = "K" + bytes
kilobytes = kbytes
mbytes = "M" + bytes
megabytes = mbytes
gbytes = "G" + bytes
gigabytes = gbytes
tbytes = "T" + bytes
terabytes = tbytes
pbytes = "P" + bytes
petabytes = pbytes
ebytes = "E" + bytes
exabytes = ebytes
zbytes = 'Z' + bytes
zettabytes = zbytes
ybytes = 'Y' + bytes
yottabytes = ybytes
class BinaryUnitNames(object):
"""
namespace for binary-unit names
"""
bits = UnitNames.bits
bibits = 'bi' + bits
kibibits = "ki" + bibits
mebibits = 'me' + bibits
gibibits = "gi" + bibits
tebibits = "te" + bibits
pebibits = "pe" + bibits
exbibits = "ex" + bibits
zebibits = "ze" + bibits
yobibits = "yo" + bibits
bytes = 'bytes'
bibytes = 'bi' + bytes
kibibytes = "ki" + bibytes
mebibytes = "me" + bibytes
gibibytes = 'gi' + bibytes
tebibytes = 'te' + bibytes
pebibytes = 'pe' + bibytes
exbibytes = "ex" + bibytes
zebibytes = "ze" + bibytes
yobibytes = "yo" + bibytes
# iperf base 2
iperf_bytes = UnitNames.bytes
iperf_kibibytes = UnitNames.kbytes
iperf_mebibytes = UnitNames.mbytes
iperf_gibibytes = UnitNames.gbytes
iperf_tebibytes = UnitNames.tbytes
iperf_pebibytes = UnitNames.pbytes
iperf_exbibytes = UnitNames.ebytes
iperf_zebibytes = UnitNames.zbytes
iperf_yobibytes = UnitNames.ybytes
# end BinaryUnitNames
IDENTITY = 1
ONE = 1.0
BYTE = 8
TO_BYTE = ONE/BYTE
class BaseConverter(dict):
"""
A creator of unit-conversion dictionaries
"""
def __init__(self, to_units, kilo_prefix):
"""
base_converter constructor
:param:
- `to_units`: a list of the units to covert to (has to be half to-bits, half to-bytes)
- `kilo_prefix`: kilo multiplier matching type of units
"""
self.to_units = to_units
self.kilo_prefix = kilo_prefix
self._prefix_conversions = None
self._bits_to_bytes = None
self._bytes_to_bits = None
# split the to_units list for later
self.bit_conversions = self.byte_conversions = len(to_units)//2
self.bit_units = to_units[:self.bit_conversions]
self.byte_units = to_units[self.byte_conversions:]
return
@property
def prefix_conversions(self):
"""
List of lists of prefix conversions
"""
if self._prefix_conversions is None:
# start with list that assumes value has no prefix
# this list is for 'bits' or 'bytes'
# the values will be 1, 1/kilo, 1/mega, etc.
start_list = [self.kilo_prefix**(-power)
for power in range(self.bit_conversions)]
self._prefix_conversions = self.conversions(conversion_factor=1,
start_list=start_list)
return self._prefix_conversions
@property
def bits_to_bytes(self):
"""
List of conversions for bits to bytes
"""
if self._bits_to_bytes is None:
self._bits_to_bytes = self.conversions(conversion_factor=TO_BYTE)
return self._bits_to_bytes
@property
def bytes_to_bits(self):
"""
list of conversions for bytes to bits
"""
if self._bytes_to_bits is None:
self._bytes_to_bits = self.conversions(conversion_factor=BYTE)
return self._bytes_to_bits
def conversions(self, conversion_factor, start_list=None):
"""
Creates the converter-lists
:param:
- `conversion_factor`: multiplier for values (8 or 1/8, or 1)
- `start_list`: if given, use to start the conversion-list
:return: list of conversion_lists
"""
if start_list is None:
# assume that prefix_conversions exists (not safe, but...)
start_list = self.prefix_conversions[0]
# start with byte_factor times the base conversions (1, 1/kilo, etc.)
converter_list = [[conversion_factor * conversion
for conversion in start_list]]
for previous in range(self.bit_conversions - 1):
# 'pop' last item from previous list
# and prepend one higher-power conversion
next_conversions = ([self.kilo_prefix**(previous+1) * conversion_factor] +
converter_list[previous][:-1])
converter_list.append(next_conversions)
return converter_list
def build_conversions(self):
"""
builds the dictionary
"""
# from bits to bits or bytes
for index, units in enumerate(self.bit_units):
self[units] = dict(list(zip(self.to_units, self.prefix_conversions[index] +
self.bits_to_bytes[index])))
# from bytes to bits or bytes
for index, units in enumerate(self.byte_units):
self[units] = dict(list(zip(self.to_units, self.bytes_to_bits[index] +
self.prefix_conversions[index])))
return
# end class BaseConverter
bit_units = [UnitNames.bits,
UnitNames.kbits,
UnitNames.mbits,
UnitNames.gbits,
UnitNames.terabits,
UnitNames.petabits,
UnitNames.exabits,
UnitNames.zettabits,
UnitNames.yottabits]
byte_units = [UnitNames.bytes,
UnitNames.kbytes,
UnitNames.mbytes,
UnitNames.gbytes,
UnitNames.terabytes,
UnitNames.petabytes,
UnitNames.exabytes,
UnitNames.zettabytes,
UnitNames.yottabytes]
decimal_to_units = bit_units + byte_units
KILO = 10**3
class UnitConverter(BaseConverter):
"""
The UnitConverter makes conversions based on a base-10 system
"""
def __init__(self):
super(UnitConverter, self).__init__(to_units=decimal_to_units,
kilo_prefix=KILO)
self.build_conversions()
return
# end class UnitConverter
DecimalUnitConverter = UnitConverter
to_bits = [BinaryUnitNames.bits,
BinaryUnitNames.kibibits,
BinaryUnitNames.mebibits,
BinaryUnitNames.gibibits,
BinaryUnitNames.tebibits,
BinaryUnitNames.pebibits,
BinaryUnitNames.exbibits,
BinaryUnitNames.zebibits,
BinaryUnitNames.yobibits]
to_bytes = [BinaryUnitNames.bytes,
BinaryUnitNames.kibibytes,
BinaryUnitNames.mebibytes,
BinaryUnitNames.gibibytes,
BinaryUnitNames.tebibytes,
BinaryUnitNames.pebibytes,
BinaryUnitNames.exbibytes,
BinaryUnitNames.zebibytes,
BinaryUnitNames.yobibytes]
binary_to_units = to_bits + to_bytes
KIBI = 2**10
class | (BaseConverter):
"""
The BinaryUnitconverter is a conversion lookup table for binary data
Usage::
converted = old * UnitConverter[old units][new units]
Use class UnitNames to get valid unit names
"""
def __init__(self):
super(BinaryUnitconverter, self).__init__(to_units=binary_to_units,
kilo_prefix=KIBI)
self.build_conversions()
return
# end class BinaryUnitConverter
to_bits = [BinaryUnitNames.bits,
BinaryUnitNames.kibibits,
BinaryUnitNames.mebibits,
BinaryUnitNames.gibibits,
BinaryUnitNames.tebibits,
BinaryUnitNames.pebibits,
BinaryUnitNames.exbibits,
BinaryUnitNames.zebibits,
BinaryUnitNames.yobibits]
to_bytes = [BinaryUnitNames.iperf_bytes,
BinaryUnitNames.iperf_kibibytes,
BinaryUnitNames.iperf_mebibytes,
BinaryUnitNames.iperf_gibibytes,
BinaryUnitNames.iperf_tebibytes,
BinaryUnitNames.iperf_pebibytes,
BinaryUnitNames.iperf_exbibytes,
BinaryUnitNames.iperf_zebibytes,
BinaryUnitNames.iperf_yobibytes]
iperf_binary_to_units = to_bits + to_bytes
class IperfbinaryConverter(BaseConverter):
"""
The IperfbinaryConverter is a conversion lookup table for binary data
Usage::
converter = IperfbinaryConverter()
converted = old * converter[old units][new units]
Use class UnitNames to get valid unit names
"""
def __init__(self):
super(IperfbinaryConverter, self).__init__(to_units=iperf_binary_to_units,
kilo_prefix=KIBI)
self.build_conversions()
return
# end class BinaryUnitConverter
if __name__ == "__builtin__":
unit_converter = UnitConverter()
bits = 10**6
converted = bits * unit_converter['bits']['Mbits']
print("{0} Mbits".format(converted))
if __name__ == "__builtin__":
binary_converter = BinaryUnitconverter()
MBytes = 1
bits = MBytes * binary_converter[BinaryUnitNames.mebibytes][UnitNames.bits]
print("{0:,} bits".format(bits))
if __name__ == '__builtin__':
mbits = bits * unit_converter[UnitNames.bits][UnitNames.mbits]
print('{0} Mbits'.format(mbits))
| BinaryUnitconverter | identifier_name |
unitconverter.py | """
Copyright (c) 2014 Russell Nakamura
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH
THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
class UnitNames(object):
"""
Unit Names is a namespace to hold units
"""
__slots__ = ()
# bits
bits = "bits"
kbits = "K" + bits
kilobits = kbits
mbits = "M" + bits
megabits = mbits
gbits = "G" + bits
gigabits = gbits
tbits = "T" + bits
terabits = tbits
pbits = "P" + bits
petabits = pbits
ebits = "E" + bits
exabits = ebits
zbits = "Z" + bits
zettabits = zbits
ybits = "Y" + bits
yottabits = ybits
# bytes
bytes = "Bytes"
kbytes = "K" + bytes
kilobytes = kbytes
mbytes = "M" + bytes
megabytes = mbytes
gbytes = "G" + bytes
gigabytes = gbytes
tbytes = "T" + bytes
terabytes = tbytes
pbytes = "P" + bytes
petabytes = pbytes
ebytes = "E" + bytes
exabytes = ebytes
zbytes = 'Z' + bytes
zettabytes = zbytes
ybytes = 'Y' + bytes
yottabytes = ybytes
class BinaryUnitNames(object):
"""
namespace for binary-unit names
"""
bits = UnitNames.bits
bibits = 'bi' + bits
kibibits = "ki" + bibits
mebibits = 'me' + bibits
gibibits = "gi" + bibits
tebibits = "te" + bibits
pebibits = "pe" + bibits
exbibits = "ex" + bibits
zebibits = "ze" + bibits
yobibits = "yo" + bibits
bytes = 'bytes'
bibytes = 'bi' + bytes
kibibytes = "ki" + bibytes
mebibytes = "me" + bibytes
gibibytes = 'gi' + bibytes
tebibytes = 'te' + bibytes
pebibytes = 'pe' + bibytes
exbibytes = "ex" + bibytes
zebibytes = "ze" + bibytes
yobibytes = "yo" + bibytes
# iperf base 2
iperf_bytes = UnitNames.bytes
iperf_kibibytes = UnitNames.kbytes
iperf_mebibytes = UnitNames.mbytes
iperf_gibibytes = UnitNames.gbytes
iperf_tebibytes = UnitNames.tbytes
iperf_pebibytes = UnitNames.pbytes
iperf_exbibytes = UnitNames.ebytes
iperf_zebibytes = UnitNames.zbytes
iperf_yobibytes = UnitNames.ybytes
# end BinaryUnitNames
IDENTITY = 1
ONE = 1.0
BYTE = 8
TO_BYTE = ONE/BYTE
class BaseConverter(dict):
"""
A creator of unit-conversion dictionaries
"""
def __init__(self, to_units, kilo_prefix):
"""
base_converter constructor
:param:
- `to_units`: a list of the units to covert to (has to be half to-bits, half to-bytes)
- `kilo_prefix`: kilo multiplier matching type of units
"""
self.to_units = to_units
self.kilo_prefix = kilo_prefix
self._prefix_conversions = None
self._bits_to_bytes = None
self._bytes_to_bits = None
# split the to_units list for later
self.bit_conversions = self.byte_conversions = len(to_units)//2
self.bit_units = to_units[:self.bit_conversions]
self.byte_units = to_units[self.byte_conversions:]
return
@property
def prefix_conversions(self):
|
@property
def bits_to_bytes(self):
"""
List of conversions for bits to bytes
"""
if self._bits_to_bytes is None:
self._bits_to_bytes = self.conversions(conversion_factor=TO_BYTE)
return self._bits_to_bytes
@property
def bytes_to_bits(self):
"""
list of conversions for bytes to bits
"""
if self._bytes_to_bits is None:
self._bytes_to_bits = self.conversions(conversion_factor=BYTE)
return self._bytes_to_bits
def conversions(self, conversion_factor, start_list=None):
"""
Creates the converter-lists
:param:
- `conversion_factor`: multiplier for values (8 or 1/8, or 1)
- `start_list`: if given, use to start the conversion-list
:return: list of conversion_lists
"""
if start_list is None:
# assume that prefix_conversions exists (not safe, but...)
start_list = self.prefix_conversions[0]
# start with byte_factor times the base conversions (1, 1/kilo, etc.)
converter_list = [[conversion_factor * conversion
for conversion in start_list]]
for previous in range(self.bit_conversions - 1):
# 'pop' last item from previous list
# and prepend one higher-power conversion
next_conversions = ([self.kilo_prefix**(previous+1) * conversion_factor] +
converter_list[previous][:-1])
converter_list.append(next_conversions)
return converter_list
def build_conversions(self):
"""
builds the dictionary
"""
# from bits to bits or bytes
for index, units in enumerate(self.bit_units):
self[units] = dict(list(zip(self.to_units, self.prefix_conversions[index] +
self.bits_to_bytes[index])))
# from bytes to bits or bytes
for index, units in enumerate(self.byte_units):
self[units] = dict(list(zip(self.to_units, self.bytes_to_bits[index] +
self.prefix_conversions[index])))
return
# end class BaseConverter
bit_units = [UnitNames.bits,
UnitNames.kbits,
UnitNames.mbits,
UnitNames.gbits,
UnitNames.terabits,
UnitNames.petabits,
UnitNames.exabits,
UnitNames.zettabits,
UnitNames.yottabits]
byte_units = [UnitNames.bytes,
UnitNames.kbytes,
UnitNames.mbytes,
UnitNames.gbytes,
UnitNames.terabytes,
UnitNames.petabytes,
UnitNames.exabytes,
UnitNames.zettabytes,
UnitNames.yottabytes]
decimal_to_units = bit_units + byte_units
KILO = 10**3
class UnitConverter(BaseConverter):
"""
The UnitConverter makes conversions based on a base-10 system
"""
def __init__(self):
super(UnitConverter, self).__init__(to_units=decimal_to_units,
kilo_prefix=KILO)
self.build_conversions()
return
# end class UnitConverter
DecimalUnitConverter = UnitConverter
to_bits = [BinaryUnitNames.bits,
BinaryUnitNames.kibibits,
BinaryUnitNames.mebibits,
BinaryUnitNames.gibibits,
BinaryUnitNames.tebibits,
BinaryUnitNames.pebibits,
BinaryUnitNames.exbibits,
BinaryUnitNames.zebibits,
BinaryUnitNames.yobibits]
to_bytes = [BinaryUnitNames.bytes,
BinaryUnitNames.kibibytes,
BinaryUnitNames.mebibytes,
BinaryUnitNames.gibibytes,
BinaryUnitNames.tebibytes,
BinaryUnitNames.pebibytes,
BinaryUnitNames.exbibytes,
BinaryUnitNames.zebibytes,
BinaryUnitNames.yobibytes]
binary_to_units = to_bits + to_bytes
KIBI = 2**10
class BinaryUnitconverter(BaseConverter):
"""
The BinaryUnitconverter is a conversion lookup table for binary data
Usage::
converted = old * UnitConverter[old units][new units]
Use class UnitNames to get valid unit names
"""
def __init__(self):
super(BinaryUnitconverter, self).__init__(to_units=binary_to_units,
kilo_prefix=KIBI)
self.build_conversions()
return
# end class BinaryUnitConverter
to_bits = [BinaryUnitNames.bits,
BinaryUnitNames.kibibits,
BinaryUnitNames.mebibits,
BinaryUnitNames.gibibits,
BinaryUnitNames.tebibits,
BinaryUnitNames.pebibits,
BinaryUnitNames.exbibits,
BinaryUnitNames.zebibits,
BinaryUnitNames.yobibits]
to_bytes = [BinaryUnitNames.iperf_bytes,
BinaryUnitNames.iperf_kibibytes,
BinaryUnitNames.iperf_mebibytes,
BinaryUnitNames.iperf_gibibytes,
BinaryUnitNames.iperf_tebibytes,
BinaryUnitNames.iperf_pebibytes,
BinaryUnitNames.iperf_exbibytes,
BinaryUnitNames.iperf_zebibytes,
BinaryUnitNames.iperf_yobibytes]
iperf_binary_to_units = to_bits + to_bytes
class IperfbinaryConverter(BaseConverter):
"""
The IperfbinaryConverter is a conversion lookup table for binary data
Usage::
converter = IperfbinaryConverter()
converted = old * converter[old units][new units]
Use class UnitNames to get valid unit names
"""
def __init__(self):
super(IperfbinaryConverter, self).__init__(to_units=iperf_binary_to_units,
kilo_prefix=KIBI)
self.build_conversions()
return
# end class BinaryUnitConverter
if __name__ == "__builtin__":
unit_converter = UnitConverter()
bits = 10**6
converted = bits * unit_converter['bits']['Mbits']
print("{0} Mbits".format(converted))
if __name__ == "__builtin__":
binary_converter = BinaryUnitconverter()
MBytes = 1
bits = MBytes * binary_converter[BinaryUnitNames.mebibytes][UnitNames.bits]
print("{0:,} bits".format(bits))
if __name__ == '__builtin__':
mbits = bits * unit_converter[UnitNames.bits][UnitNames.mbits]
print('{0} Mbits'.format(mbits))
| """
List of lists of prefix conversions
"""
if self._prefix_conversions is None:
# start with list that assumes value has no prefix
# this list is for 'bits' or 'bytes'
# the values will be 1, 1/kilo, 1/mega, etc.
start_list = [self.kilo_prefix**(-power)
for power in range(self.bit_conversions)]
self._prefix_conversions = self.conversions(conversion_factor=1,
start_list=start_list)
return self._prefix_conversions | identifier_body |
unitconverter.py | """
Copyright (c) 2014 Russell Nakamura
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH
THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
class UnitNames(object):
"""
Unit Names is a namespace to hold units
"""
__slots__ = ()
# bits
bits = "bits"
kbits = "K" + bits
kilobits = kbits
mbits = "M" + bits
megabits = mbits
gbits = "G" + bits
gigabits = gbits
tbits = "T" + bits
terabits = tbits
pbits = "P" + bits
petabits = pbits
ebits = "E" + bits
exabits = ebits
zbits = "Z" + bits
zettabits = zbits
ybits = "Y" + bits
yottabits = ybits
# bytes
bytes = "Bytes"
kbytes = "K" + bytes
kilobytes = kbytes
mbytes = "M" + bytes
megabytes = mbytes
gbytes = "G" + bytes
gigabytes = gbytes
tbytes = "T" + bytes
terabytes = tbytes
pbytes = "P" + bytes
petabytes = pbytes
ebytes = "E" + bytes
exabytes = ebytes
zbytes = 'Z' + bytes
zettabytes = zbytes
ybytes = 'Y' + bytes
yottabytes = ybytes
class BinaryUnitNames(object):
"""
namespace for binary-unit names
"""
bits = UnitNames.bits
bibits = 'bi' + bits
kibibits = "ki" + bibits
mebibits = 'me' + bibits
gibibits = "gi" + bibits
tebibits = "te" + bibits
pebibits = "pe" + bibits
exbibits = "ex" + bibits
zebibits = "ze" + bibits
yobibits = "yo" + bibits
bytes = 'bytes'
bibytes = 'bi' + bytes
kibibytes = "ki" + bibytes
mebibytes = "me" + bibytes
gibibytes = 'gi' + bibytes
tebibytes = 'te' + bibytes
pebibytes = 'pe' + bibytes
exbibytes = "ex" + bibytes
zebibytes = "ze" + bibytes
yobibytes = "yo" + bibytes
# iperf base 2
iperf_bytes = UnitNames.bytes
iperf_kibibytes = UnitNames.kbytes
iperf_mebibytes = UnitNames.mbytes
iperf_gibibytes = UnitNames.gbytes
iperf_tebibytes = UnitNames.tbytes
iperf_pebibytes = UnitNames.pbytes
iperf_exbibytes = UnitNames.ebytes
iperf_zebibytes = UnitNames.zbytes
iperf_yobibytes = UnitNames.ybytes
# end BinaryUnitNames
IDENTITY = 1
ONE = 1.0
BYTE = 8
TO_BYTE = ONE/BYTE
class BaseConverter(dict):
"""
A creator of unit-conversion dictionaries
"""
def __init__(self, to_units, kilo_prefix):
"""
base_converter constructor
:param:
- `to_units`: a list of the units to covert to (has to be half to-bits, half to-bytes)
- `kilo_prefix`: kilo multiplier matching type of units
"""
self.to_units = to_units
self.kilo_prefix = kilo_prefix
self._prefix_conversions = None
self._bits_to_bytes = None
self._bytes_to_bits = None
# split the to_units list for later
self.bit_conversions = self.byte_conversions = len(to_units)//2
self.bit_units = to_units[:self.bit_conversions]
self.byte_units = to_units[self.byte_conversions:]
return
@property
def prefix_conversions(self):
"""
List of lists of prefix conversions
"""
if self._prefix_conversions is None:
# start with list that assumes value has no prefix
# this list is for 'bits' or 'bytes'
# the values will be 1, 1/kilo, 1/mega, etc.
start_list = [self.kilo_prefix**(-power)
for power in range(self.bit_conversions)]
self._prefix_conversions = self.conversions(conversion_factor=1,
start_list=start_list)
return self._prefix_conversions
@property
def bits_to_bytes(self):
"""
List of conversions for bits to bytes
"""
if self._bits_to_bytes is None:
self._bits_to_bytes = self.conversions(conversion_factor=TO_BYTE)
return self._bits_to_bytes
@property
def bytes_to_bits(self):
"""
list of conversions for bytes to bits
"""
if self._bytes_to_bits is None:
self._bytes_to_bits = self.conversions(conversion_factor=BYTE)
return self._bytes_to_bits
def conversions(self, conversion_factor, start_list=None):
"""
Creates the converter-lists
:param:
- `conversion_factor`: multiplier for values (8 or 1/8, or 1)
- `start_list`: if given, use to start the conversion-list
:return: list of conversion_lists
"""
if start_list is None:
# assume that prefix_conversions exists (not safe, but...)
start_list = self.prefix_conversions[0]
# start with byte_factor times the base conversions (1, 1/kilo, etc.)
converter_list = [[conversion_factor * conversion
for conversion in start_list]]
for previous in range(self.bit_conversions - 1):
# 'pop' last item from previous list
# and prepend one higher-power conversion
next_conversions = ([self.kilo_prefix**(previous+1) * conversion_factor] +
converter_list[previous][:-1])
converter_list.append(next_conversions)
return converter_list
def build_conversions(self):
"""
builds the dictionary
"""
# from bits to bits or bytes
for index, units in enumerate(self.bit_units):
|
# from bytes to bits or bytes
for index, units in enumerate(self.byte_units):
self[units] = dict(list(zip(self.to_units, self.bytes_to_bits[index] +
self.prefix_conversions[index])))
return
# end class BaseConverter
bit_units = [UnitNames.bits,
UnitNames.kbits,
UnitNames.mbits,
UnitNames.gbits,
UnitNames.terabits,
UnitNames.petabits,
UnitNames.exabits,
UnitNames.zettabits,
UnitNames.yottabits]
byte_units = [UnitNames.bytes,
UnitNames.kbytes,
UnitNames.mbytes,
UnitNames.gbytes,
UnitNames.terabytes,
UnitNames.petabytes,
UnitNames.exabytes,
UnitNames.zettabytes,
UnitNames.yottabytes]
decimal_to_units = bit_units + byte_units
KILO = 10**3
class UnitConverter(BaseConverter):
"""
The UnitConverter makes conversions based on a base-10 system
"""
def __init__(self):
super(UnitConverter, self).__init__(to_units=decimal_to_units,
kilo_prefix=KILO)
self.build_conversions()
return
# end class UnitConverter
DecimalUnitConverter = UnitConverter
to_bits = [BinaryUnitNames.bits,
BinaryUnitNames.kibibits,
BinaryUnitNames.mebibits,
BinaryUnitNames.gibibits,
BinaryUnitNames.tebibits,
BinaryUnitNames.pebibits,
BinaryUnitNames.exbibits,
BinaryUnitNames.zebibits,
BinaryUnitNames.yobibits]
to_bytes = [BinaryUnitNames.bytes,
BinaryUnitNames.kibibytes,
BinaryUnitNames.mebibytes,
BinaryUnitNames.gibibytes,
BinaryUnitNames.tebibytes,
BinaryUnitNames.pebibytes,
BinaryUnitNames.exbibytes,
BinaryUnitNames.zebibytes,
BinaryUnitNames.yobibytes]
binary_to_units = to_bits + to_bytes
KIBI = 2**10
class BinaryUnitconverter(BaseConverter):
"""
The BinaryUnitconverter is a conversion lookup table for binary data
Usage::
converted = old * UnitConverter[old units][new units]
Use class UnitNames to get valid unit names
"""
def __init__(self):
super(BinaryUnitconverter, self).__init__(to_units=binary_to_units,
kilo_prefix=KIBI)
self.build_conversions()
return
# end class BinaryUnitConverter
to_bits = [BinaryUnitNames.bits,
BinaryUnitNames.kibibits,
BinaryUnitNames.mebibits,
BinaryUnitNames.gibibits,
BinaryUnitNames.tebibits,
BinaryUnitNames.pebibits,
BinaryUnitNames.exbibits,
BinaryUnitNames.zebibits,
BinaryUnitNames.yobibits]
to_bytes = [BinaryUnitNames.iperf_bytes,
BinaryUnitNames.iperf_kibibytes,
BinaryUnitNames.iperf_mebibytes,
BinaryUnitNames.iperf_gibibytes,
BinaryUnitNames.iperf_tebibytes,
BinaryUnitNames.iperf_pebibytes,
BinaryUnitNames.iperf_exbibytes,
BinaryUnitNames.iperf_zebibytes,
BinaryUnitNames.iperf_yobibytes]
iperf_binary_to_units = to_bits + to_bytes
class IperfbinaryConverter(BaseConverter):
"""
The IperfbinaryConverter is a conversion lookup table for binary data
Usage::
converter = IperfbinaryConverter()
converted = old * converter[old units][new units]
Use class UnitNames to get valid unit names
"""
def __init__(self):
super(IperfbinaryConverter, self).__init__(to_units=iperf_binary_to_units,
kilo_prefix=KIBI)
self.build_conversions()
return
# end class BinaryUnitConverter
if __name__ == "__builtin__":
unit_converter = UnitConverter()
bits = 10**6
converted = bits * unit_converter['bits']['Mbits']
print("{0} Mbits".format(converted))
if __name__ == "__builtin__":
binary_converter = BinaryUnitconverter()
MBytes = 1
bits = MBytes * binary_converter[BinaryUnitNames.mebibytes][UnitNames.bits]
print("{0:,} bits".format(bits))
if __name__ == '__builtin__':
mbits = bits * unit_converter[UnitNames.bits][UnitNames.mbits]
print('{0} Mbits'.format(mbits))
| self[units] = dict(list(zip(self.to_units, self.prefix_conversions[index] +
self.bits_to_bytes[index]))) | conditional_block |
unitconverter.py | """
Copyright (c) 2014 Russell Nakamura
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH
THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
class UnitNames(object):
"""
Unit Names is a namespace to hold units
"""
__slots__ = ()
# bits
bits = "bits"
kbits = "K" + bits
kilobits = kbits
mbits = "M" + bits
megabits = mbits
gbits = "G" + bits
gigabits = gbits
tbits = "T" + bits
terabits = tbits
pbits = "P" + bits
petabits = pbits
ebits = "E" + bits
exabits = ebits
zbits = "Z" + bits
zettabits = zbits
ybits = "Y" + bits
yottabits = ybits
# bytes
bytes = "Bytes"
kbytes = "K" + bytes
kilobytes = kbytes
mbytes = "M" + bytes
megabytes = mbytes
gbytes = "G" + bytes
gigabytes = gbytes
tbytes = "T" + bytes
terabytes = tbytes
pbytes = "P" + bytes
petabytes = pbytes
ebytes = "E" + bytes
exabytes = ebytes
zbytes = 'Z' + bytes
zettabytes = zbytes
ybytes = 'Y' + bytes
yottabytes = ybytes
class BinaryUnitNames(object):
"""
namespace for binary-unit names
"""
bits = UnitNames.bits
bibits = 'bi' + bits
kibibits = "ki" + bibits
mebibits = 'me' + bibits
gibibits = "gi" + bibits
tebibits = "te" + bibits
pebibits = "pe" + bibits
exbibits = "ex" + bibits
zebibits = "ze" + bibits
yobibits = "yo" + bibits
bytes = 'bytes'
bibytes = 'bi' + bytes
kibibytes = "ki" + bibytes
mebibytes = "me" + bibytes
gibibytes = 'gi' + bibytes
tebibytes = 'te' + bibytes
pebibytes = 'pe' + bibytes
exbibytes = "ex" + bibytes
zebibytes = "ze" + bibytes
yobibytes = "yo" + bibytes
# iperf base 2
iperf_bytes = UnitNames.bytes
iperf_kibibytes = UnitNames.kbytes
iperf_mebibytes = UnitNames.mbytes
iperf_gibibytes = UnitNames.gbytes
iperf_tebibytes = UnitNames.tbytes
iperf_pebibytes = UnitNames.pbytes
iperf_exbibytes = UnitNames.ebytes
iperf_zebibytes = UnitNames.zbytes
iperf_yobibytes = UnitNames.ybytes
# end BinaryUnitNames
IDENTITY = 1
ONE = 1.0
BYTE = 8
TO_BYTE = ONE/BYTE
class BaseConverter(dict):
"""
A creator of unit-conversion dictionaries
"""
def __init__(self, to_units, kilo_prefix):
"""
base_converter constructor
:param:
- `to_units`: a list of the units to covert to (has to be half to-bits, half to-bytes)
- `kilo_prefix`: kilo multiplier matching type of units
"""
self.to_units = to_units
self.kilo_prefix = kilo_prefix
self._prefix_conversions = None
self._bits_to_bytes = None
self._bytes_to_bits = None
# split the to_units list for later
self.bit_conversions = self.byte_conversions = len(to_units)//2
self.bit_units = to_units[:self.bit_conversions]
self.byte_units = to_units[self.byte_conversions:]
return
@property
def prefix_conversions(self):
"""
List of lists of prefix conversions
"""
if self._prefix_conversions is None:
# start with list that assumes value has no prefix
# this list is for 'bits' or 'bytes'
# the values will be 1, 1/kilo, 1/mega, etc.
start_list = [self.kilo_prefix**(-power)
for power in range(self.bit_conversions)]
self._prefix_conversions = self.conversions(conversion_factor=1,
start_list=start_list)
return self._prefix_conversions
@property
def bits_to_bytes(self):
"""
List of conversions for bits to bytes
"""
if self._bits_to_bytes is None:
self._bits_to_bytes = self.conversions(conversion_factor=TO_BYTE)
return self._bits_to_bytes
@property
def bytes_to_bits(self):
"""
list of conversions for bytes to bits
"""
if self._bytes_to_bits is None:
self._bytes_to_bits = self.conversions(conversion_factor=BYTE)
return self._bytes_to_bits
def conversions(self, conversion_factor, start_list=None):
"""
Creates the converter-lists
:param:
- `conversion_factor`: multiplier for values (8 or 1/8, or 1)
- `start_list`: if given, use to start the conversion-list
:return: list of conversion_lists
"""
if start_list is None:
# assume that prefix_conversions exists (not safe, but...)
start_list = self.prefix_conversions[0]
# start with byte_factor times the base conversions (1, 1/kilo, etc.)
converter_list = [[conversion_factor * conversion
for conversion in start_list]]
for previous in range(self.bit_conversions - 1):
# 'pop' last item from previous list
# and prepend one higher-power conversion
next_conversions = ([self.kilo_prefix**(previous+1) * conversion_factor] +
converter_list[previous][:-1])
converter_list.append(next_conversions) | def build_conversions(self):
"""
builds the dictionary
"""
# from bits to bits or bytes
for index, units in enumerate(self.bit_units):
self[units] = dict(list(zip(self.to_units, self.prefix_conversions[index] +
self.bits_to_bytes[index])))
# from bytes to bits or bytes
for index, units in enumerate(self.byte_units):
self[units] = dict(list(zip(self.to_units, self.bytes_to_bits[index] +
self.prefix_conversions[index])))
return
# end class BaseConverter
bit_units = [UnitNames.bits,
UnitNames.kbits,
UnitNames.mbits,
UnitNames.gbits,
UnitNames.terabits,
UnitNames.petabits,
UnitNames.exabits,
UnitNames.zettabits,
UnitNames.yottabits]
byte_units = [UnitNames.bytes,
UnitNames.kbytes,
UnitNames.mbytes,
UnitNames.gbytes,
UnitNames.terabytes,
UnitNames.petabytes,
UnitNames.exabytes,
UnitNames.zettabytes,
UnitNames.yottabytes]
decimal_to_units = bit_units + byte_units
KILO = 10**3
class UnitConverter(BaseConverter):
"""
The UnitConverter makes conversions based on a base-10 system
"""
def __init__(self):
super(UnitConverter, self).__init__(to_units=decimal_to_units,
kilo_prefix=KILO)
self.build_conversions()
return
# end class UnitConverter
DecimalUnitConverter = UnitConverter
to_bits = [BinaryUnitNames.bits,
BinaryUnitNames.kibibits,
BinaryUnitNames.mebibits,
BinaryUnitNames.gibibits,
BinaryUnitNames.tebibits,
BinaryUnitNames.pebibits,
BinaryUnitNames.exbibits,
BinaryUnitNames.zebibits,
BinaryUnitNames.yobibits]
to_bytes = [BinaryUnitNames.bytes,
BinaryUnitNames.kibibytes,
BinaryUnitNames.mebibytes,
BinaryUnitNames.gibibytes,
BinaryUnitNames.tebibytes,
BinaryUnitNames.pebibytes,
BinaryUnitNames.exbibytes,
BinaryUnitNames.zebibytes,
BinaryUnitNames.yobibytes]
binary_to_units = to_bits + to_bytes
KIBI = 2**10
class BinaryUnitconverter(BaseConverter):
"""
The BinaryUnitconverter is a conversion lookup table for binary data
Usage::
converted = old * UnitConverter[old units][new units]
Use class UnitNames to get valid unit names
"""
def __init__(self):
super(BinaryUnitconverter, self).__init__(to_units=binary_to_units,
kilo_prefix=KIBI)
self.build_conversions()
return
# end class BinaryUnitConverter
to_bits = [BinaryUnitNames.bits,
BinaryUnitNames.kibibits,
BinaryUnitNames.mebibits,
BinaryUnitNames.gibibits,
BinaryUnitNames.tebibits,
BinaryUnitNames.pebibits,
BinaryUnitNames.exbibits,
BinaryUnitNames.zebibits,
BinaryUnitNames.yobibits]
to_bytes = [BinaryUnitNames.iperf_bytes,
BinaryUnitNames.iperf_kibibytes,
BinaryUnitNames.iperf_mebibytes,
BinaryUnitNames.iperf_gibibytes,
BinaryUnitNames.iperf_tebibytes,
BinaryUnitNames.iperf_pebibytes,
BinaryUnitNames.iperf_exbibytes,
BinaryUnitNames.iperf_zebibytes,
BinaryUnitNames.iperf_yobibytes]
iperf_binary_to_units = to_bits + to_bytes
class IperfbinaryConverter(BaseConverter):
"""
The IperfbinaryConverter is a conversion lookup table for binary data
Usage::
converter = IperfbinaryConverter()
converted = old * converter[old units][new units]
Use class UnitNames to get valid unit names
"""
def __init__(self):
super(IperfbinaryConverter, self).__init__(to_units=iperf_binary_to_units,
kilo_prefix=KIBI)
self.build_conversions()
return
# end class BinaryUnitConverter
if __name__ == "__builtin__":
unit_converter = UnitConverter()
bits = 10**6
converted = bits * unit_converter['bits']['Mbits']
print("{0} Mbits".format(converted))
if __name__ == "__builtin__":
binary_converter = BinaryUnitconverter()
MBytes = 1
bits = MBytes * binary_converter[BinaryUnitNames.mebibytes][UnitNames.bits]
print("{0:,} bits".format(bits))
if __name__ == '__builtin__':
mbits = bits * unit_converter[UnitNames.bits][UnitNames.mbits]
print('{0} Mbits'.format(mbits)) | return converter_list
| random_line_split |
timeloop.py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import functools
import inspect
import os
import subprocess
import sys
import timeit
import argparse
import copy
import re
import libconf
import yaml
from common import *
# Output file names.
out_prefix = "timeloop-mapper."
log_file_name = out_prefix + "log"
stats_file_name = out_prefix + "stats.txt"
xml_file_name = out_prefix + "map+stats.xml"
map_txt_file_name = out_prefix + "map.txt"
map_cfg_file_name = out_prefix + "map.cfg"
map_cpp_file_name = out_prefix + "map.cpp"
output_file_names = [log_file_name,
stats_file_name,
xml_file_name,
map_txt_file_name,
map_cfg_file_name,
map_cpp_file_name]
# dimension conversion that maps a WU problem to FW problem
wu2fw = {'P': 'R',
'Q': 'S',
'R': 'P',
'S': 'Q',
'C': 'K',
'K': 'N',
'N': 'C'}
def prod(l):
return functools.reduce(lambda x, y: x * y, l)
def rewrite_workload_bounds(src, dst, workload_bounds, model, layer, batchsize, dataflow, phase, terminate, threads, synthetic, sparsity, save, replication, array_width, glb_scaling, dense): # backward_padding
w, h, c, n, k, s, r, wpad, hpad, wstride, hstride = workload_bounds
n = batchsize
q = int((w - s + 2 * wpad) / wstride) + 1
p = int((h - r + 2 * hpad) / hstride) + 1
wu_equiv = k != 'D' and phase == 'wu'
env_list = {}
if not wu_equiv:
print('Workload Dimensions:')
print(' W =', w)
print(' H =', h)
print(' C =', c)
print(' K =', k)
print(' S =', s)
print(' R =', r) | print(' H-pad =', hpad)
print(' W-stride =', wstride)
print(' H-stride =', hstride)
print()
else:
print('Equivalence Test: can we convert WU problem to FW and use cnn-layer.cfg? (at least in the dense case?)')
print('Workload Dimensions:')
print(' W =', w)
print(' H =', h)
print(f' C <- N {n}')
print(f' K <- C {c}')
print(f' S <- Q {q}')
print(f' R <- P {p}')
print(f' P <- R {r}')
print(f' Q <- S {s}')
print(f' N <- K {k}')
print(' W-pad =', wpad)
print(' H-pad =', hpad)
print(' W-stride =', wstride)
print(' H-stride =', hstride)
print()
env_list['TIMELOOP_EQUIVLENT_WU'] = 'True'
with open(src, "r") as f:
if "cfg" in src:
config = libconf.load(f)
elif "yaml" in src:
config = yaml.load(f, Loader=yaml.SafeLoader)
config['problem']['shape'] = shapes[phase]
if wu_equiv:
config['problem']['shape'] = shapes['fw']
if k == 'D':
depthwise = True
adapt_depthwise_config(config)
else:
depthwise = False
config['problem']['shape'] += '.yaml'
if wu_equiv:
dataflow = convert_dataflow(dataflow)
if phase == 'wu':
remove_block_constraint(config)
if depthwise:
if dataflow == 'CK':
dataflow = 'CN'
dataflow = dataflow.replace('K', 'C')
rewrite_dataflow(config, dataflow, replication, array_width)
rewrite_mesh(config, array_width)
if glb_scaling:
rewrite_glb_size(config, array_width)
if not wu_equiv:
config['problem']['R'] = r
config['problem']['S'] = s
config['problem']['P'] = p
config['problem']['Q'] = q
config['problem']['C'] = c
if not depthwise:
config['problem']['K'] = k
config['problem']['N'] = n
else:
config['problem']['R'] = p
config['problem']['S'] = q
config['problem']['P'] = r
config['problem']['Q'] = s
config['problem']['C'] = n
config['problem']['K'] = c
config['problem']['N'] = k
config['problem']['Wstride'] = wstride
config['problem']['Hstride'] = hstride
config['problem']['Wdilation'] = 1
config['problem']['Hdilation'] = 1
config['mapper']['model-name'] = model
config['mapper']['layer-name'] = layer
if terminate is not None:
config['mapper']['victory-condition'] = terminate
if threads is not None:
config['mapper']['num-threads'] = threads
# rewrite synthetic mask configuration
if not synthetic:
try:
config['mapper'].pop('mask-synthetic')
except KeyError:
pass
else:
config['mapper']['mask-synthetic'] = {}
if sparsity is not None:
config['mapper']['mask-synthetic']['target-sparsity'] = sparsity
if save is not None:
config['mapper']['mask-synthetic']['synthetic-mask-path'] = save
if dense:
opt_metrics = []
for opt in config['mapper']['optimization-metrics']:
opt_metrics.append(opt.split('-')[-1])
config['mapper']['optimization-metrics'] = opt_metrics
with open(dst, "w") as f:
if "cfg" in src:
f.write(libconf.dumps(config))
elif "yaml" in src:
f.write(yaml.dump(config))
return env_list
def convert_dataflow(dataflow):
pre_convert_dataflow = copy.copy(dataflow)
converted_dataflow = []
converted_dataflow.append(wu2fw[pre_convert_dataflow[0]])
converted_dataflow.append(wu2fw[pre_convert_dataflow[1]])
converted = ''
converted = converted.join(converted_dataflow)
print(f'convert from {dataflow} to {converted}')
return converted
def remove_block_constraint(config): # or possibily remove
for constraint in config['mapspace']['constraints']:
if constraint['type'] == 'temporal' and constraint['target'] == 'RegFile':
try:
constraint.pop('factors')
except KeyError:
pass
def rewrite_dataflow(config, dataflow, replication, array_width):
# loop through constaints, and make sure there is only 1 spatial type constraint
# dingqing FIXME: not general for more spatial level architecture config
num_spatial = 0
for constraint in config['mapspace']['constraints']:
if num_spatial > 1:
raise Exception("More than one spatial level! Check the config and the scripts.")
if constraint['type'] == 'spatial':
num_spatial += 1
# determine if it is possible to replicate
possible2replicate = replication and (not config['problem'][dataflow[0]] > array_width / 2 or not config['problem'][dataflow[1]] > array_width / 2)
print('possible2replicate?', possible2replicate)
factors = constraint['factors'].split(' ')
new_factor = []
for factor in factors:
if factor[0] in dataflow:
# look at problem size
new_factor.append(factor[0] + f'{array_width}')
elif not possible2replicate:
new_factor.append(factor[0] + '1')
constraint['factors'] = ' '.join(new_factor)
# rewrite permutation
# emmmm ugly
non_spatial_dims = constraint['permutation'].replace(dataflow[0], '').replace(dataflow[1], '')
constraint['permutation'] = dataflow[0] + non_spatial_dims + dataflow[1]
def rewrite_mesh(config, array_width):
# honestly, the structure is kinda unnatural...
pe_subtree = config['architecture']['subtree'][0]['subtree'][0] # FIXME: this is not generic enough
pe_name = pe_subtree['name']
num_pe_prev = re.findall(r'\d+', pe_name)[-1]
num_pe_new = array_width * array_width - 1
pe_subtree['name'] = pe_name.replace(num_pe_prev, f'{num_pe_new}')
# iterate over RF and PE
for component in pe_subtree['local']:
component['attributes']['meshX'] = array_width
def rewrite_glb_size(config, array_width):
scaling_factor = array_width / 16
# honestly, the structure is kinda unnatural...
sys_subtree = config['architecture']['subtree'][0] # FIXME: this is not generic enough
for comp in sys_subtree['local']:
if comp['name'] == 'GlobalBuffer':
comp['attributes']['depth'] = int(comp['attributes']['depth'] * scaling_factor)
comp['attributes']['n_banks'] = int(comp['attributes']['n_banks'] * scaling_factor)
def adapt_depthwise_config(config):
config['problem']['shape'] += '-depthwise.yaml'
try:
config['problem'].pop('K')
except KeyError:
pass
for constraint in config['mapspace']['constraints']:
if 'factors' in constraint:
factors = constraint['factors'].split(' ')
new_factor = [x for x in factors if x[0] != 'K']
constraint['factors'] = ' '.join(new_factor)
if 'permutation' in constraint:
constraint['permutation'] = ''.join([x for x in constraint['permutation'] if x != 'K'])
def run_timeloop(dirname, configfile, logfile='timeloop.log', env_list={}, dense=False, dense_dirname='dense-timeloop'):
configfile_path = os.path.join(dirname, os.path.basename(configfile))
logfile_path = os.path.join(dirname, logfile)
print('Running timeloop to get mapping')
def stmt():
with open(logfile_path, "w") as outfile:
this_file_path = os.path.abspath(inspect.getfile(inspect.currentframe()))
if not dense:
timeloop_executable_location = os.path.join(
os.path.dirname(this_file_path), '..', 'build', 'timeloop-mapper')
else:
timeloop_executable_location = os.path.join(
os.path.dirname(this_file_path), '..', '..', dense_dirname, 'build', 'timeloop-mapper')
status = subprocess.call([timeloop_executable_location, configfile_path], stdout=outfile, stderr=outfile, env=dict(os.environ, **env_list))
# status = subprocess.call([timeloop_executable_location, configfile_path, 'ERT.yaml'], stdout=outfile, stderr=outfile)
if status != 0:
subprocess.check_call(['cat', logfile_path])
print('Did you remember to build timeloop and set up your environment properly?')
sys.exit(1)
t = timeit.Timer(stmt)
time = t.timeit(1)
print('Time to run timeloop = ', time)
# Move timeloop output files to the right directory
for f in output_file_names:
if os.path.exists(f):
os.rename(f, dirname + '/' + f) | print(' P =', p)
print(' Q =', q)
print(' N =', n)
print(' W-pad =', wpad) | random_line_split |
timeloop.py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import functools
import inspect
import os
import subprocess
import sys
import timeit
import argparse
import copy
import re
import libconf
import yaml
from common import *
# Output file names.
out_prefix = "timeloop-mapper."
log_file_name = out_prefix + "log"
stats_file_name = out_prefix + "stats.txt"
xml_file_name = out_prefix + "map+stats.xml"
map_txt_file_name = out_prefix + "map.txt"
map_cfg_file_name = out_prefix + "map.cfg"
map_cpp_file_name = out_prefix + "map.cpp"
output_file_names = [log_file_name,
stats_file_name,
xml_file_name,
map_txt_file_name,
map_cfg_file_name,
map_cpp_file_name]
# dimension conversion that maps a WU problem to FW problem
wu2fw = {'P': 'R',
'Q': 'S',
'R': 'P',
'S': 'Q',
'C': 'K',
'K': 'N',
'N': 'C'}
def prod(l):
return functools.reduce(lambda x, y: x * y, l)
def rewrite_workload_bounds(src, dst, workload_bounds, model, layer, batchsize, dataflow, phase, terminate, threads, synthetic, sparsity, save, replication, array_width, glb_scaling, dense): # backward_padding
w, h, c, n, k, s, r, wpad, hpad, wstride, hstride = workload_bounds
n = batchsize
q = int((w - s + 2 * wpad) / wstride) + 1
p = int((h - r + 2 * hpad) / hstride) + 1
wu_equiv = k != 'D' and phase == 'wu'
env_list = {}
if not wu_equiv:
print('Workload Dimensions:')
print(' W =', w)
print(' H =', h)
print(' C =', c)
print(' K =', k)
print(' S =', s)
print(' R =', r)
print(' P =', p)
print(' Q =', q)
print(' N =', n)
print(' W-pad =', wpad)
print(' H-pad =', hpad)
print(' W-stride =', wstride)
print(' H-stride =', hstride)
print()
else:
print('Equivalence Test: can we convert WU problem to FW and use cnn-layer.cfg? (at least in the dense case?)')
print('Workload Dimensions:')
print(' W =', w)
print(' H =', h)
print(f' C <- N {n}')
print(f' K <- C {c}')
print(f' S <- Q {q}')
print(f' R <- P {p}')
print(f' P <- R {r}')
print(f' Q <- S {s}')
print(f' N <- K {k}')
print(' W-pad =', wpad)
print(' H-pad =', hpad)
print(' W-stride =', wstride)
print(' H-stride =', hstride)
print()
env_list['TIMELOOP_EQUIVLENT_WU'] = 'True'
with open(src, "r") as f:
if "cfg" in src:
config = libconf.load(f)
elif "yaml" in src:
config = yaml.load(f, Loader=yaml.SafeLoader)
config['problem']['shape'] = shapes[phase]
if wu_equiv:
config['problem']['shape'] = shapes['fw']
if k == 'D':
depthwise = True
adapt_depthwise_config(config)
else:
depthwise = False
config['problem']['shape'] += '.yaml'
if wu_equiv:
dataflow = convert_dataflow(dataflow)
if phase == 'wu':
remove_block_constraint(config)
if depthwise:
if dataflow == 'CK':
dataflow = 'CN'
dataflow = dataflow.replace('K', 'C')
rewrite_dataflow(config, dataflow, replication, array_width)
rewrite_mesh(config, array_width)
if glb_scaling:
rewrite_glb_size(config, array_width)
if not wu_equiv:
|
else:
config['problem']['R'] = p
config['problem']['S'] = q
config['problem']['P'] = r
config['problem']['Q'] = s
config['problem']['C'] = n
config['problem']['K'] = c
config['problem']['N'] = k
config['problem']['Wstride'] = wstride
config['problem']['Hstride'] = hstride
config['problem']['Wdilation'] = 1
config['problem']['Hdilation'] = 1
config['mapper']['model-name'] = model
config['mapper']['layer-name'] = layer
if terminate is not None:
config['mapper']['victory-condition'] = terminate
if threads is not None:
config['mapper']['num-threads'] = threads
# rewrite synthetic mask configuration
if not synthetic:
try:
config['mapper'].pop('mask-synthetic')
except KeyError:
pass
else:
config['mapper']['mask-synthetic'] = {}
if sparsity is not None:
config['mapper']['mask-synthetic']['target-sparsity'] = sparsity
if save is not None:
config['mapper']['mask-synthetic']['synthetic-mask-path'] = save
if dense:
opt_metrics = []
for opt in config['mapper']['optimization-metrics']:
opt_metrics.append(opt.split('-')[-1])
config['mapper']['optimization-metrics'] = opt_metrics
with open(dst, "w") as f:
if "cfg" in src:
f.write(libconf.dumps(config))
elif "yaml" in src:
f.write(yaml.dump(config))
return env_list
def convert_dataflow(dataflow):
pre_convert_dataflow = copy.copy(dataflow)
converted_dataflow = []
converted_dataflow.append(wu2fw[pre_convert_dataflow[0]])
converted_dataflow.append(wu2fw[pre_convert_dataflow[1]])
converted = ''
converted = converted.join(converted_dataflow)
print(f'convert from {dataflow} to {converted}')
return converted
def remove_block_constraint(config): # or possibily remove
for constraint in config['mapspace']['constraints']:
if constraint['type'] == 'temporal' and constraint['target'] == 'RegFile':
try:
constraint.pop('factors')
except KeyError:
pass
def rewrite_dataflow(config, dataflow, replication, array_width):
# loop through constaints, and make sure there is only 1 spatial type constraint
# dingqing FIXME: not general for more spatial level architecture config
num_spatial = 0
for constraint in config['mapspace']['constraints']:
if num_spatial > 1:
raise Exception("More than one spatial level! Check the config and the scripts.")
if constraint['type'] == 'spatial':
num_spatial += 1
# determine if it is possible to replicate
possible2replicate = replication and (not config['problem'][dataflow[0]] > array_width / 2 or not config['problem'][dataflow[1]] > array_width / 2)
print('possible2replicate?', possible2replicate)
factors = constraint['factors'].split(' ')
new_factor = []
for factor in factors:
if factor[0] in dataflow:
# look at problem size
new_factor.append(factor[0] + f'{array_width}')
elif not possible2replicate:
new_factor.append(factor[0] + '1')
constraint['factors'] = ' '.join(new_factor)
# rewrite permutation
# emmmm ugly
non_spatial_dims = constraint['permutation'].replace(dataflow[0], '').replace(dataflow[1], '')
constraint['permutation'] = dataflow[0] + non_spatial_dims + dataflow[1]
def rewrite_mesh(config, array_width):
# honestly, the structure is kinda unnatural...
pe_subtree = config['architecture']['subtree'][0]['subtree'][0] # FIXME: this is not generic enough
pe_name = pe_subtree['name']
num_pe_prev = re.findall(r'\d+', pe_name)[-1]
num_pe_new = array_width * array_width - 1
pe_subtree['name'] = pe_name.replace(num_pe_prev, f'{num_pe_new}')
# iterate over RF and PE
for component in pe_subtree['local']:
component['attributes']['meshX'] = array_width
def rewrite_glb_size(config, array_width):
scaling_factor = array_width / 16
# honestly, the structure is kinda unnatural...
sys_subtree = config['architecture']['subtree'][0] # FIXME: this is not generic enough
for comp in sys_subtree['local']:
if comp['name'] == 'GlobalBuffer':
comp['attributes']['depth'] = int(comp['attributes']['depth'] * scaling_factor)
comp['attributes']['n_banks'] = int(comp['attributes']['n_banks'] * scaling_factor)
def adapt_depthwise_config(config):
config['problem']['shape'] += '-depthwise.yaml'
try:
config['problem'].pop('K')
except KeyError:
pass
for constraint in config['mapspace']['constraints']:
if 'factors' in constraint:
factors = constraint['factors'].split(' ')
new_factor = [x for x in factors if x[0] != 'K']
constraint['factors'] = ' '.join(new_factor)
if 'permutation' in constraint:
constraint['permutation'] = ''.join([x for x in constraint['permutation'] if x != 'K'])
def run_timeloop(dirname, configfile, logfile='timeloop.log', env_list={}, dense=False, dense_dirname='dense-timeloop'):
configfile_path = os.path.join(dirname, os.path.basename(configfile))
logfile_path = os.path.join(dirname, logfile)
print('Running timeloop to get mapping')
def stmt():
with open(logfile_path, "w") as outfile:
this_file_path = os.path.abspath(inspect.getfile(inspect.currentframe()))
if not dense:
timeloop_executable_location = os.path.join(
os.path.dirname(this_file_path), '..', 'build', 'timeloop-mapper')
else:
timeloop_executable_location = os.path.join(
os.path.dirname(this_file_path), '..', '..', dense_dirname, 'build', 'timeloop-mapper')
status = subprocess.call([timeloop_executable_location, configfile_path], stdout=outfile, stderr=outfile, env=dict(os.environ, **env_list))
# status = subprocess.call([timeloop_executable_location, configfile_path, 'ERT.yaml'], stdout=outfile, stderr=outfile)
if status != 0:
subprocess.check_call(['cat', logfile_path])
print('Did you remember to build timeloop and set up your environment properly?')
sys.exit(1)
t = timeit.Timer(stmt)
time = t.timeit(1)
print('Time to run timeloop = ', time)
# Move timeloop output files to the right directory
for f in output_file_names:
if os.path.exists(f):
os.rename(f, dirname + '/' + f)
| config['problem']['R'] = r
config['problem']['S'] = s
config['problem']['P'] = p
config['problem']['Q'] = q
config['problem']['C'] = c
if not depthwise:
config['problem']['K'] = k
config['problem']['N'] = n | conditional_block |
timeloop.py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import functools
import inspect
import os
import subprocess
import sys
import timeit
import argparse
import copy
import re
import libconf
import yaml
from common import *
# Output file names.
out_prefix = "timeloop-mapper."
log_file_name = out_prefix + "log"
stats_file_name = out_prefix + "stats.txt"
xml_file_name = out_prefix + "map+stats.xml"
map_txt_file_name = out_prefix + "map.txt"
map_cfg_file_name = out_prefix + "map.cfg"
map_cpp_file_name = out_prefix + "map.cpp"
output_file_names = [log_file_name,
stats_file_name,
xml_file_name,
map_txt_file_name,
map_cfg_file_name,
map_cpp_file_name]
# dimension conversion that maps a WU problem to FW problem
wu2fw = {'P': 'R',
'Q': 'S',
'R': 'P',
'S': 'Q',
'C': 'K',
'K': 'N',
'N': 'C'}
def prod(l):
return functools.reduce(lambda x, y: x * y, l)
def rewrite_workload_bounds(src, dst, workload_bounds, model, layer, batchsize, dataflow, phase, terminate, threads, synthetic, sparsity, save, replication, array_width, glb_scaling, dense): # backward_padding
w, h, c, n, k, s, r, wpad, hpad, wstride, hstride = workload_bounds
n = batchsize
q = int((w - s + 2 * wpad) / wstride) + 1
p = int((h - r + 2 * hpad) / hstride) + 1
wu_equiv = k != 'D' and phase == 'wu'
env_list = {}
if not wu_equiv:
print('Workload Dimensions:')
print(' W =', w)
print(' H =', h)
print(' C =', c)
print(' K =', k)
print(' S =', s)
print(' R =', r)
print(' P =', p)
print(' Q =', q)
print(' N =', n)
print(' W-pad =', wpad)
print(' H-pad =', hpad)
print(' W-stride =', wstride)
print(' H-stride =', hstride)
print()
else:
print('Equivalence Test: can we convert WU problem to FW and use cnn-layer.cfg? (at least in the dense case?)')
print('Workload Dimensions:')
print(' W =', w)
print(' H =', h)
print(f' C <- N {n}')
print(f' K <- C {c}')
print(f' S <- Q {q}')
print(f' R <- P {p}')
print(f' P <- R {r}')
print(f' Q <- S {s}')
print(f' N <- K {k}')
print(' W-pad =', wpad)
print(' H-pad =', hpad)
print(' W-stride =', wstride)
print(' H-stride =', hstride)
print()
env_list['TIMELOOP_EQUIVLENT_WU'] = 'True'
with open(src, "r") as f:
if "cfg" in src:
config = libconf.load(f)
elif "yaml" in src:
config = yaml.load(f, Loader=yaml.SafeLoader)
config['problem']['shape'] = shapes[phase]
if wu_equiv:
config['problem']['shape'] = shapes['fw']
if k == 'D':
depthwise = True
adapt_depthwise_config(config)
else:
depthwise = False
config['problem']['shape'] += '.yaml'
if wu_equiv:
dataflow = convert_dataflow(dataflow)
if phase == 'wu':
remove_block_constraint(config)
if depthwise:
if dataflow == 'CK':
dataflow = 'CN'
dataflow = dataflow.replace('K', 'C')
rewrite_dataflow(config, dataflow, replication, array_width)
rewrite_mesh(config, array_width)
if glb_scaling:
rewrite_glb_size(config, array_width)
if not wu_equiv:
config['problem']['R'] = r
config['problem']['S'] = s
config['problem']['P'] = p
config['problem']['Q'] = q
config['problem']['C'] = c
if not depthwise:
config['problem']['K'] = k
config['problem']['N'] = n
else:
config['problem']['R'] = p
config['problem']['S'] = q
config['problem']['P'] = r
config['problem']['Q'] = s
config['problem']['C'] = n
config['problem']['K'] = c
config['problem']['N'] = k
config['problem']['Wstride'] = wstride
config['problem']['Hstride'] = hstride
config['problem']['Wdilation'] = 1
config['problem']['Hdilation'] = 1
config['mapper']['model-name'] = model
config['mapper']['layer-name'] = layer
if terminate is not None:
config['mapper']['victory-condition'] = terminate
if threads is not None:
config['mapper']['num-threads'] = threads
# rewrite synthetic mask configuration
if not synthetic:
try:
config['mapper'].pop('mask-synthetic')
except KeyError:
pass
else:
config['mapper']['mask-synthetic'] = {}
if sparsity is not None:
config['mapper']['mask-synthetic']['target-sparsity'] = sparsity
if save is not None:
config['mapper']['mask-synthetic']['synthetic-mask-path'] = save
if dense:
opt_metrics = []
for opt in config['mapper']['optimization-metrics']:
opt_metrics.append(opt.split('-')[-1])
config['mapper']['optimization-metrics'] = opt_metrics
with open(dst, "w") as f:
if "cfg" in src:
f.write(libconf.dumps(config))
elif "yaml" in src:
f.write(yaml.dump(config))
return env_list
def convert_dataflow(dataflow):
pre_convert_dataflow = copy.copy(dataflow)
converted_dataflow = []
converted_dataflow.append(wu2fw[pre_convert_dataflow[0]])
converted_dataflow.append(wu2fw[pre_convert_dataflow[1]])
converted = ''
converted = converted.join(converted_dataflow)
print(f'convert from {dataflow} to {converted}')
return converted
def remove_block_constraint(config): # or possibily remove
for constraint in config['mapspace']['constraints']:
if constraint['type'] == 'temporal' and constraint['target'] == 'RegFile':
try:
constraint.pop('factors')
except KeyError:
pass
def rewrite_dataflow(config, dataflow, replication, array_width):
# loop through constaints, and make sure there is only 1 spatial type constraint
# dingqing FIXME: not general for more spatial level architecture config
num_spatial = 0
for constraint in config['mapspace']['constraints']:
if num_spatial > 1:
raise Exception("More than one spatial level! Check the config and the scripts.")
if constraint['type'] == 'spatial':
num_spatial += 1
# determine if it is possible to replicate
possible2replicate = replication and (not config['problem'][dataflow[0]] > array_width / 2 or not config['problem'][dataflow[1]] > array_width / 2)
print('possible2replicate?', possible2replicate)
factors = constraint['factors'].split(' ')
new_factor = []
for factor in factors:
if factor[0] in dataflow:
# look at problem size
new_factor.append(factor[0] + f'{array_width}')
elif not possible2replicate:
new_factor.append(factor[0] + '1')
constraint['factors'] = ' '.join(new_factor)
# rewrite permutation
# emmmm ugly
non_spatial_dims = constraint['permutation'].replace(dataflow[0], '').replace(dataflow[1], '')
constraint['permutation'] = dataflow[0] + non_spatial_dims + dataflow[1]
def rewrite_mesh(config, array_width):
# honestly, the structure is kinda unnatural...
pe_subtree = config['architecture']['subtree'][0]['subtree'][0] # FIXME: this is not generic enough
pe_name = pe_subtree['name']
num_pe_prev = re.findall(r'\d+', pe_name)[-1]
num_pe_new = array_width * array_width - 1
pe_subtree['name'] = pe_name.replace(num_pe_prev, f'{num_pe_new}')
# iterate over RF and PE
for component in pe_subtree['local']:
component['attributes']['meshX'] = array_width
def rewrite_glb_size(config, array_width):
scaling_factor = array_width / 16
# honestly, the structure is kinda unnatural...
sys_subtree = config['architecture']['subtree'][0] # FIXME: this is not generic enough
for comp in sys_subtree['local']:
if comp['name'] == 'GlobalBuffer':
comp['attributes']['depth'] = int(comp['attributes']['depth'] * scaling_factor)
comp['attributes']['n_banks'] = int(comp['attributes']['n_banks'] * scaling_factor)
def adapt_depthwise_config(config):
config['problem']['shape'] += '-depthwise.yaml'
try:
config['problem'].pop('K')
except KeyError:
pass
for constraint in config['mapspace']['constraints']:
if 'factors' in constraint:
factors = constraint['factors'].split(' ')
new_factor = [x for x in factors if x[0] != 'K']
constraint['factors'] = ' '.join(new_factor)
if 'permutation' in constraint:
constraint['permutation'] = ''.join([x for x in constraint['permutation'] if x != 'K'])
def run_timeloop(dirname, configfile, logfile='timeloop.log', env_list={}, dense=False, dense_dirname='dense-timeloop'):
configfile_path = os.path.join(dirname, os.path.basename(configfile))
logfile_path = os.path.join(dirname, logfile)
print('Running timeloop to get mapping')
def stmt():
|
t = timeit.Timer(stmt)
time = t.timeit(1)
print('Time to run timeloop = ', time)
# Move timeloop output files to the right directory
for f in output_file_names:
if os.path.exists(f):
os.rename(f, dirname + '/' + f)
| with open(logfile_path, "w") as outfile:
this_file_path = os.path.abspath(inspect.getfile(inspect.currentframe()))
if not dense:
timeloop_executable_location = os.path.join(
os.path.dirname(this_file_path), '..', 'build', 'timeloop-mapper')
else:
timeloop_executable_location = os.path.join(
os.path.dirname(this_file_path), '..', '..', dense_dirname, 'build', 'timeloop-mapper')
status = subprocess.call([timeloop_executable_location, configfile_path], stdout=outfile, stderr=outfile, env=dict(os.environ, **env_list))
# status = subprocess.call([timeloop_executable_location, configfile_path, 'ERT.yaml'], stdout=outfile, stderr=outfile)
if status != 0:
subprocess.check_call(['cat', logfile_path])
print('Did you remember to build timeloop and set up your environment properly?')
sys.exit(1) | identifier_body |
timeloop.py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import functools
import inspect
import os
import subprocess
import sys
import timeit
import argparse
import copy
import re
import libconf
import yaml
from common import *
# Output file names.
out_prefix = "timeloop-mapper."
log_file_name = out_prefix + "log"
stats_file_name = out_prefix + "stats.txt"
xml_file_name = out_prefix + "map+stats.xml"
map_txt_file_name = out_prefix + "map.txt"
map_cfg_file_name = out_prefix + "map.cfg"
map_cpp_file_name = out_prefix + "map.cpp"
output_file_names = [log_file_name,
stats_file_name,
xml_file_name,
map_txt_file_name,
map_cfg_file_name,
map_cpp_file_name]
# dimension conversion that maps a WU problem to FW problem
wu2fw = {'P': 'R',
'Q': 'S',
'R': 'P',
'S': 'Q',
'C': 'K',
'K': 'N',
'N': 'C'}
def | (l):
return functools.reduce(lambda x, y: x * y, l)
def rewrite_workload_bounds(src, dst, workload_bounds, model, layer, batchsize, dataflow, phase, terminate, threads, synthetic, sparsity, save, replication, array_width, glb_scaling, dense): # backward_padding
w, h, c, n, k, s, r, wpad, hpad, wstride, hstride = workload_bounds
n = batchsize
q = int((w - s + 2 * wpad) / wstride) + 1
p = int((h - r + 2 * hpad) / hstride) + 1
wu_equiv = k != 'D' and phase == 'wu'
env_list = {}
if not wu_equiv:
print('Workload Dimensions:')
print(' W =', w)
print(' H =', h)
print(' C =', c)
print(' K =', k)
print(' S =', s)
print(' R =', r)
print(' P =', p)
print(' Q =', q)
print(' N =', n)
print(' W-pad =', wpad)
print(' H-pad =', hpad)
print(' W-stride =', wstride)
print(' H-stride =', hstride)
print()
else:
print('Equivalence Test: can we convert WU problem to FW and use cnn-layer.cfg? (at least in the dense case?)')
print('Workload Dimensions:')
print(' W =', w)
print(' H =', h)
print(f' C <- N {n}')
print(f' K <- C {c}')
print(f' S <- Q {q}')
print(f' R <- P {p}')
print(f' P <- R {r}')
print(f' Q <- S {s}')
print(f' N <- K {k}')
print(' W-pad =', wpad)
print(' H-pad =', hpad)
print(' W-stride =', wstride)
print(' H-stride =', hstride)
print()
env_list['TIMELOOP_EQUIVLENT_WU'] = 'True'
with open(src, "r") as f:
if "cfg" in src:
config = libconf.load(f)
elif "yaml" in src:
config = yaml.load(f, Loader=yaml.SafeLoader)
config['problem']['shape'] = shapes[phase]
if wu_equiv:
config['problem']['shape'] = shapes['fw']
if k == 'D':
depthwise = True
adapt_depthwise_config(config)
else:
depthwise = False
config['problem']['shape'] += '.yaml'
if wu_equiv:
dataflow = convert_dataflow(dataflow)
if phase == 'wu':
remove_block_constraint(config)
if depthwise:
if dataflow == 'CK':
dataflow = 'CN'
dataflow = dataflow.replace('K', 'C')
rewrite_dataflow(config, dataflow, replication, array_width)
rewrite_mesh(config, array_width)
if glb_scaling:
rewrite_glb_size(config, array_width)
if not wu_equiv:
config['problem']['R'] = r
config['problem']['S'] = s
config['problem']['P'] = p
config['problem']['Q'] = q
config['problem']['C'] = c
if not depthwise:
config['problem']['K'] = k
config['problem']['N'] = n
else:
config['problem']['R'] = p
config['problem']['S'] = q
config['problem']['P'] = r
config['problem']['Q'] = s
config['problem']['C'] = n
config['problem']['K'] = c
config['problem']['N'] = k
config['problem']['Wstride'] = wstride
config['problem']['Hstride'] = hstride
config['problem']['Wdilation'] = 1
config['problem']['Hdilation'] = 1
config['mapper']['model-name'] = model
config['mapper']['layer-name'] = layer
if terminate is not None:
config['mapper']['victory-condition'] = terminate
if threads is not None:
config['mapper']['num-threads'] = threads
# rewrite synthetic mask configuration
if not synthetic:
try:
config['mapper'].pop('mask-synthetic')
except KeyError:
pass
else:
config['mapper']['mask-synthetic'] = {}
if sparsity is not None:
config['mapper']['mask-synthetic']['target-sparsity'] = sparsity
if save is not None:
config['mapper']['mask-synthetic']['synthetic-mask-path'] = save
if dense:
opt_metrics = []
for opt in config['mapper']['optimization-metrics']:
opt_metrics.append(opt.split('-')[-1])
config['mapper']['optimization-metrics'] = opt_metrics
with open(dst, "w") as f:
if "cfg" in src:
f.write(libconf.dumps(config))
elif "yaml" in src:
f.write(yaml.dump(config))
return env_list
def convert_dataflow(dataflow):
pre_convert_dataflow = copy.copy(dataflow)
converted_dataflow = []
converted_dataflow.append(wu2fw[pre_convert_dataflow[0]])
converted_dataflow.append(wu2fw[pre_convert_dataflow[1]])
converted = ''
converted = converted.join(converted_dataflow)
print(f'convert from {dataflow} to {converted}')
return converted
def remove_block_constraint(config): # or possibily remove
for constraint in config['mapspace']['constraints']:
if constraint['type'] == 'temporal' and constraint['target'] == 'RegFile':
try:
constraint.pop('factors')
except KeyError:
pass
def rewrite_dataflow(config, dataflow, replication, array_width):
# loop through constaints, and make sure there is only 1 spatial type constraint
# dingqing FIXME: not general for more spatial level architecture config
num_spatial = 0
for constraint in config['mapspace']['constraints']:
if num_spatial > 1:
raise Exception("More than one spatial level! Check the config and the scripts.")
if constraint['type'] == 'spatial':
num_spatial += 1
# determine if it is possible to replicate
possible2replicate = replication and (not config['problem'][dataflow[0]] > array_width / 2 or not config['problem'][dataflow[1]] > array_width / 2)
print('possible2replicate?', possible2replicate)
factors = constraint['factors'].split(' ')
new_factor = []
for factor in factors:
if factor[0] in dataflow:
# look at problem size
new_factor.append(factor[0] + f'{array_width}')
elif not possible2replicate:
new_factor.append(factor[0] + '1')
constraint['factors'] = ' '.join(new_factor)
# rewrite permutation
# emmmm ugly
non_spatial_dims = constraint['permutation'].replace(dataflow[0], '').replace(dataflow[1], '')
constraint['permutation'] = dataflow[0] + non_spatial_dims + dataflow[1]
def rewrite_mesh(config, array_width):
# honestly, the structure is kinda unnatural...
pe_subtree = config['architecture']['subtree'][0]['subtree'][0] # FIXME: this is not generic enough
pe_name = pe_subtree['name']
num_pe_prev = re.findall(r'\d+', pe_name)[-1]
num_pe_new = array_width * array_width - 1
pe_subtree['name'] = pe_name.replace(num_pe_prev, f'{num_pe_new}')
# iterate over RF and PE
for component in pe_subtree['local']:
component['attributes']['meshX'] = array_width
def rewrite_glb_size(config, array_width):
scaling_factor = array_width / 16
# honestly, the structure is kinda unnatural...
sys_subtree = config['architecture']['subtree'][0] # FIXME: this is not generic enough
for comp in sys_subtree['local']:
if comp['name'] == 'GlobalBuffer':
comp['attributes']['depth'] = int(comp['attributes']['depth'] * scaling_factor)
comp['attributes']['n_banks'] = int(comp['attributes']['n_banks'] * scaling_factor)
def adapt_depthwise_config(config):
config['problem']['shape'] += '-depthwise.yaml'
try:
config['problem'].pop('K')
except KeyError:
pass
for constraint in config['mapspace']['constraints']:
if 'factors' in constraint:
factors = constraint['factors'].split(' ')
new_factor = [x for x in factors if x[0] != 'K']
constraint['factors'] = ' '.join(new_factor)
if 'permutation' in constraint:
constraint['permutation'] = ''.join([x for x in constraint['permutation'] if x != 'K'])
def run_timeloop(dirname, configfile, logfile='timeloop.log', env_list={}, dense=False, dense_dirname='dense-timeloop'):
configfile_path = os.path.join(dirname, os.path.basename(configfile))
logfile_path = os.path.join(dirname, logfile)
print('Running timeloop to get mapping')
def stmt():
with open(logfile_path, "w") as outfile:
this_file_path = os.path.abspath(inspect.getfile(inspect.currentframe()))
if not dense:
timeloop_executable_location = os.path.join(
os.path.dirname(this_file_path), '..', 'build', 'timeloop-mapper')
else:
timeloop_executable_location = os.path.join(
os.path.dirname(this_file_path), '..', '..', dense_dirname, 'build', 'timeloop-mapper')
status = subprocess.call([timeloop_executable_location, configfile_path], stdout=outfile, stderr=outfile, env=dict(os.environ, **env_list))
# status = subprocess.call([timeloop_executable_location, configfile_path, 'ERT.yaml'], stdout=outfile, stderr=outfile)
if status != 0:
subprocess.check_call(['cat', logfile_path])
print('Did you remember to build timeloop and set up your environment properly?')
sys.exit(1)
t = timeit.Timer(stmt)
time = t.timeit(1)
print('Time to run timeloop = ', time)
# Move timeloop output files to the right directory
for f in output_file_names:
if os.path.exists(f):
os.rename(f, dirname + '/' + f)
| prod | identifier_name |
ops.py | """""
GBDX Notebook: "Identifying Destroyed Buildings with Multispectral Imagery"
Link: https://notebooks.geobigdata.io/hub/notebooks/5b47cfb82486966ea89b75fd?tab=code
Author: Ai-Linh Alten
Date created: 7/5/2018
Date last modified: 7/13/2018
Python Version: 2.7.15
"""
import cPickle
import folium
from functools import partial
from gbdxtools import CatalogImage, IdahoImage
import geojson
from IPython.display import HTML, display
import jinja2
import json
from matplotlib import pyplot as plt, colors
import numpy as np
import os
from past.utils import old_div
import pickle
import pyproj
from rasterio import features
import requests
from scipy import ndimage as ndi
from shapely import geometry, ops
from shapely.geometry import shape, geo, box
from skimage import filters, morphology, measure, color, segmentation, exposure
from skimage.measure import label, regionprops
from sklearn.metrics import confusion_matrix, recall_score, precision_score, accuracy_score
#CONSTANTS
buildings_geojson_link = 'https://s3.amazonaws.com/gbdx-training/burnt_areas/Nuns_SonomaCounty_Glenn_selected_labelled.geojson'
RF_model_link = 'https://s3.amazonaws.com/gbdx-training/burnt_areas/rf_allseg_model.pkl'
"""Helper functions for the GBDX Notebook."""
def pixels_as_features(image, include_gabors=True):
"""Calculates remote sensing indices and gabor filters(optional).
Returns image features of image bands, remote sensing indices, and gabor filters."""
# roll axes to conventional row,col,depth
img = np.rollaxis(image, 0, 3)
rsi = calc_rsi(image)
if include_gabors is True:
gabors = calc_gabors(image)
stack = np.dstack([img, rsi, gabors])
else:
stack = np.dstack([img, rsi])
feats = stack.ravel().reshape(stack.shape[0] * stack.shape[1], stack.shape[2])
return feats
def calc_rsi(image):
"""Remote sensing indices for vegetation, built-up, and bare soil."""
# roll axes to conventional row,col,depth
img = np.rollaxis(image, 0, 3)
# bands: Coastal(0), Blue(1), Green(2), Yellow(3), Red(4), Red-edge(5), NIR1(6), NIR2(7)) Multispectral
COAST = img[:, :, 0]
B = img[:, :, 1]
G = img[:, :, 2]
Y = img[:, :, 3]
R = img[:, :, 4]
RE = img[:, :, 5]
NIR1 = img[:, :, 6]
NIR2 = img[:, :, 7]
arvi = old_div((NIR1 - (R - (B - R))), (NIR1 + (R - (B - R))))
dd = (2 * NIR1 - R) - (G - B)
gi2 = (B * -0.2848 + G * -0.2434 + R * -0.5436 + NIR1 * 0.7243 + NIR2 * 0.0840) * 5
gndvi = old_div((NIR1 - G), (NIR1 + G))
ndre = old_div((NIR1 - RE), (NIR1 + RE))
ndvi = old_div((NIR1 - R), (NIR1 + R))
ndvi35 = old_div((G - R), (G + R))
ndvi84 = old_div((NIR2 - Y), (NIR2 + Y))
nirry = old_div((NIR1), (R + Y))
normnir = old_div(NIR1, (NIR1 + R + G))
psri = old_div((R - B), RE)
rey = old_div((RE - Y), (RE + Y))
rvi = old_div(NIR1, R)
sa = old_div(((Y + R) * 0.35), 2) + old_div((0.7 * (NIR1 + NIR2)), 2) - 0.69
vi1 = old_div((10000 * NIR1), (RE) ** 2)
vire = old_div(NIR1, RE)
br = (old_div(R, B)) * (old_div(G, B)) * (old_div(RE, B)) * (old_div(NIR1, B))
gr = old_div(G, R)
rr = (old_div(NIR1, R)) * (old_div(G, R)) * (old_div(NIR1, RE))
###Built-Up indices
wvbi = old_div((COAST - RE), (COAST + RE))
wvnhfd = old_div((RE - COAST), (RE + COAST))
###SIs
evi = old_div((2.5 * (NIR2 - R)), (NIR2 + 6 * R - 7.5 * B + 1))
L = 0.5 # some coefficient for Soil Adjusted Vegetation Index (SAVI) DO NOT INCLUDE IN FEATURES
savi = old_div(((1 + L) * (NIR2 - R)), (NIR2 + R + L))
msavi = old_div((2 * NIR2 + 1 - ((2 * NIR2 + 1) ** 2 - 8 * (NIR2 - R)) ** 0.5), 2)
bai = old_div(1.0, ((0.1 + R) ** 2 + 0.06 + NIR2))
rgi = old_div(R, G)
bri = old_div(B, R)
rsi = np.stack(
[arvi, dd, gi2, gndvi, ndre, ndvi, ndvi35, ndvi84, nirry, normnir, psri, rey, rvi, sa, vi1, vire, br, gr, rr,
wvbi, wvnhfd, evi, savi, msavi, bai, rgi, bri],
axis=2)
return rsi
def power(image, kernel):
"""Normalize images for better comparison."""
image = old_div((image - image.mean()), image.std())
return np.sqrt(ndi.convolve(image, np.real(kernel), mode='wrap') ** 2 +
ndi.convolve(image, np.imag(kernel), mode='wrap') ** 2)
def calc_gabors(image, frequency=1, theta_vals=[0, 1, 2, 3]):
"""Calculate gabor."""
# convert to gray scale
img = exposure.equalize_hist(color.rgb2gray(image.rgb(blm=True)))
results_list = []
for theta in theta_vals:
theta = theta / 4. * np.pi
kernel = filters.gabor_kernel(frequency, theta=theta)
# Save kernel and the power image for each image
results_list.append(power(img, kernel))
gabors = np.rollaxis(np.dstack([results_list]), 0, 3)
return gabors
def get_link(model_url):
"""Fetch the RF model pickle file or the building footprints geojson."""
response = requests.get(model_url)
return response.content
#partials
get_model = partial(get_link, model_url=RF_model_link) #gets RF model response content
get_geojson = partial(get_link, model_url=buildings_geojson_link) #gets building geojson response content
def reproject(geom, from_proj='EPSG:4326', to_proj='EPSG:26942'):
"""Project from ESPG:4326 to ESPG:26942."""
tfm = partial(pyproj.transform, pyproj.Proj(init=from_proj), pyproj.Proj(init=to_proj))
return ops.transform(tfm, geom)
def km2_area(polygons):
"""Get area in km^2 after reprojection."""
reprojected_polygons = [reproject(p) for p in polygons]
return ops.cascaded_union(reprojected_polygons).area * 1e-6
def clean(img):
"""Clean the binary image by removing small holes and objects."""
label_img = label(img, connectivity=2)
props = sorted(regionprops(label_img), key=lambda x: x.area)
clean = morphology.binary_closing(img)
clean = morphology.remove_small_holes(clean)
return morphology.remove_small_objects(clean,
int(np.floor(props[-1].area) / 10), connectivity=2)
def to_geojson(shapes, buildings):
"""Converts the shapes into geojson.
This function will combine the burn scar region and buildings into geojson.
Burn scar polygon in red, buildings polygon all in blue."""
#append burn scar region polygons to geojson
if type(shapes) == list:
results = ({
'type': 'Feature',
'properties': {'raster_val': v, 'color': 'red'},
'geometry': s.__geo_interface__}
for i, (s, v)
in enumerate(shapes))
else:
results = ({
'type': 'Feature',
'properties': {'raster_val': v, 'color': 'red'},
'geometry': s}
for i, (s, v)
in enumerate(shapes))
list_results = list(results)
# append the building footprints to geojson
results_buildings = ({
'type': 'Feature',
'properties': {'BuildingID': b['properties']['BuildingID'], 'color': 'blue'},
'geometry': b['geometry']}
for i, b
in enumerate(buildings['features']))
list_results_buildings = list(results_buildings)
collection = {
'type': 'FeatureCollection',
'features': list_results + list_results_buildings}
return collection
def geojson_to_polygons(js_):
"""Convert the geojson into Shapely Polygons.
Keep burn scar polygons as red.
Mark all building polygons labelled as ('yellow', False) and will be changed later."""
burnt_polys = []
building_polys = []
for i, feat in enumerate(js_['features']):
o = {
"coordinates": feat['geometry']['coordinates'],
"type": feat['geometry']['type']
}
s = json.dumps(o)
# convert to geojson.geometry.Polygon
g1 = geojson.loads(s)
# covert to shapely.geometry.polygon.Polygon
g2 = shape(g1)
if feat['properties']['color'] == 'red': # red for the burnt region
burnt_polys.append(g2)
else: # for the building poly
building_polys.append([g2, [feat['properties']['BuildingID'], 'yellow',
False]]) # mark building polygons as 'yellow' for non-burnt for now
return burnt_polys, building_polys
def label_building_polys(burnt_polys, building_polys):
"""Labels the building polygons as ('blue', True) if the building is destroyed."""
for b in building_polys:
for r in burnt_polys:
if b[0].intersects(r):
b[1] = [b[1][0], 'blue', True] # mark building polygon as 'blue' if found in burnt region
continue
def to_geojson_burnt(burnt_polys, building_polys):
"""Convert shapes into geojson with new labelled building footprints. """
results = ({
'type': 'Feature',
'properties': {'color': 'red'},
'geometry': geo.mapping(r)}
for r in burnt_polys)
list_results = list(results)
# append the building footprints to geojson
results_buildings = ({
'type': 'Feature',
'properties': {'BuildingID': b[1][0], 'color': b[1][1]},
'geometry': geo.mapping(b[0])}
for b in building_polys)
list_results_buildings = list(results_buildings)
collection = {
'type': 'FeatureCollection',
'features': list_results + list_results_buildings}
return collection
def to_geojson_groundtruth(burnt_polys, data_labelled):
"""Convert shapes into geojson for the groundtruth."""
results = ({
'type': 'Feature',
'properties': {'color': 'red'},
'geometry': geo.mapping(r)}
for r in burnt_polys)
list_results = list(results)
# append the building footprints to geojson
results_buildings = ({
'type': 'Feature',
'properties': {'BuildingID': b['properties']['BuildingID'], 'color': b['properties']['color'],
'Burnt_Label': b['properties']['Burnt_Label']},
'geometry': b['geometry']}
for b in data_labelled['features'])
list_results_buildings = list(results_buildings)
collection = {
'type': 'FeatureCollection',
'features': list_results + list_results_buildings}
return collection
def geojson_to_polygons_groundtruth(js_):
"""Convert geojson to polygons for the groundtruth map."""
burnt_polys = []
building_polys = []
for i, feat in enumerate(js_['features']):
o = {
"coordinates": feat['geometry']['coordinates'],
"type": feat['geometry']['type']
}
s = json.dumps(o)
# convert to geojson.geometry.Polygon
g1 = geojson.loads(s)
# covert to shapely.geometry.polygon.Polygon
g2 = shape(g1)
if feat['properties']['color'] == 'red': # red for the burnt region
burnt_polys.append(g2)
else: # for the building poly
if feat['properties']['Burnt_Label']:
building_polys.append([g2, [feat['properties']['BuildingID'], 'blue',
True]]) # mark building polygons as 'blue' for burnt for now
else:
building_polys.append([g2, [feat['properties']['BuildingID'], 'yellow',
False]]) # mark building polygons as 'yellow' for non-burnt for now
return burnt_polys, building_polys
def accuracy_measures(predictions, trues):
"""Accuracy measures for the predictions of the method vs the groundtruth.
Prints a confusion matrix, accuracy, misclassifcation rate, true positieve rate, false positive rate, specificity, precision, prevalence.
Returns the accuracy score, precision score, and recall score."""
tn, fp, fn, tp = confusion_matrix(trues, predictions).ravel()
print "\t(tn, fp, fn, tp) =", (tn, fp, fn, tp)
# how often is classifier correct?
print "\tAccuracy = {:.2%}".format(float(tp + tn) / len(trues))
# how often is it wrong?
print "\tMisclassification Rate = {:.2%}".format(float(fp + fn) / len(trues))
# when actually yes, how often does it predict yes?
print "\tTrue Positive Rate = {:.2%}".format(float(tp) / trues.count(True))
# when actually no, how often does it predict yes?
print "\tFalse Positive Rate = {:.2%}".format(float(fp) / trues.count(False))
# when actually no, how often does it predict no?
print "\tSpecificity = {:.2%}".format(float(tn) / trues.count(False))
# when it predicts yes, how often is it correct?
print "\tPrecision = {:.2%}".format(float(tp) / predictions.count(True))
# how often does yes condition occur in our sample?
print "\tPrevalence = {:.2%}\n".format(float(trues.count(True)) / len(trues))
# return accuracy, precision, and recall score
return accuracy_score(trues, predictions), precision_score(trues, predictions, average='binary'), recall_score(
trues, predictions, average='binary')
def create_mask(predictions_2d, sizeX, sizeY, chip_shape):
"""Create a new binary mask of burn scar with the tiles."""
# reshape predictions_2d
predictions_2d_res = np.array(predictions_2d)
predictions_2d_res = predictions_2d_res.reshape(sizeX, sizeY)
# create new mask of area of interest
new_mask = np.zeros((chip_shape[1], chip_shape[2]))
for x in range(0, chip_shape[1], 256):
for y in range(0, chip_shape[2], 256):
new_mask[x:x + 256, y:y + 256] = predictions_2d_res[x / 256][y / 256]
return new_mask
"""Functions for plots."""
def folium_map(geojson_to_overlay, layer_name, location, style_function=None, tiles='Stamen Terrain', zoom_start=16,
show_layer_control=True, width='100%', height='75%', attr=None, map_zoom=18, max_zoom=20, tms=False,
zoom_beyond_max=None, base_tiles='OpenStreetMap', opacity=1):
"""Folium map with Geojson layer and TMS tiles layer.
This function requires geojson_to_overlay (geojson), layer_name (String), and location (map center tuple).
You can also set tiles to the TMS URL and control map zoom."""
m = folium.Map(location=location, zoom_start=zoom_start, width=width, height=height, max_zoom=map_zoom,
tiles=base_tiles)
tiles = folium.TileLayer(tiles=tiles, attr=attr, name=attr, max_zoom=max_zoom)
if tms is True:
options = json.loads(tiles.options)
options.update({'tms': True})
tiles.options = json.dumps(options, sort_keys=True, indent=2)
tiles._template = jinja2.Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.tileLayer(
'{{this.tiles}}',
{{ this.options }}
).addTo({{this._parent.get_name()}});
{% endmacro %}
""")
if zoom_beyond_max is not None:
options = json.loads(tiles.options)
options.update({'maxNativeZoom': zoom_beyond_max, 'maxZoom': max_zoom})
tiles.options = json.dumps(options, sort_keys=True, indent=2)
tiles._template = jinja2.Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.tileLayer(
'{{this.tiles}}',
{{ this.options }}
).addTo({{this._parent.get_name()}});
{% endmacro %}
""")
if opacity < 1:
options = json.loads(tiles.options)
options.update({'opacity': opacity})
tiles.options = json.dumps(options, sort_keys=True, indent=2)
tiles._template = jinja2.Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.tileLayer(
'{{this.tiles}}',
{{ this.options }}
).addTo({{this._parent.get_name()}});
{% endmacro %}
""")
tiles.add_to(m)
if style_function is not None:
gj = folium.GeoJson(geojson_to_overlay, overlay=True, name=layer_name, style_function=style_function)
else:
gj = folium.GeoJson(geojson_to_overlay, overlay=True, name=layer_name)
gj.add_to(m)
if show_layer_control is True:
folium.LayerControl().add_to(m)
return m
def | (array, subplot_ijk, title="", font_size=18, cmap=None):
"""Plot image with subplot.
Requires image and subplot location (ie. (1,2,1)).
You can also set title."""
sp = plt.subplot(*subplot_ijk)
sp.set_title(title, fontsize=font_size)
plt.axis('off')
plt.imshow(array, cmap=cmap)
def displayHTMLtable(acc_sent2, acc_wv03, acc, prec_sent2, prec_wv03, prec, recall_sent2, recall_wv03, recall):
"""Display accuracy scores in a table."""
methods = ['Sent2 NBR', 'WV03 NBR', 'WV03 RF']
accuracies = ["{:.2%}".format(acc_sent2), "{:.2%}".format(acc_wv03), "{:.2%}".format(acc)]
precisions = ["{:.2%}".format(prec_sent2), "{:.2%}".format(prec_wv03), "{:.2%}".format(prec)]
recalls = ["{:.2%}".format(recall_sent2), "{:.2%}".format(recall_wv03), "{:.2%}".format(recall)]
data = methods + accuracies + precisions + recalls
data = np.reshape(data, (4, 3)).T
display(HTML(
'<table style="width:100%;"><th>Method</th><th>Accuracy</th><th>Precision</th><th>Recall</th><tr>{}</tr></table>'.format(
'</tr><tr>'.join(
'<td>{}</td>'.format('</td><td>'.join(str(_) for _ in row)) for row in data)
)
))
| plot_array | identifier_name |
ops.py | """""
GBDX Notebook: "Identifying Destroyed Buildings with Multispectral Imagery"
Link: https://notebooks.geobigdata.io/hub/notebooks/5b47cfb82486966ea89b75fd?tab=code
Author: Ai-Linh Alten
Date created: 7/5/2018
Date last modified: 7/13/2018
Python Version: 2.7.15
"""
import cPickle
import folium
from functools import partial
from gbdxtools import CatalogImage, IdahoImage
import geojson
from IPython.display import HTML, display
import jinja2
import json
from matplotlib import pyplot as plt, colors
import numpy as np
import os
from past.utils import old_div
import pickle
import pyproj
from rasterio import features
import requests
from scipy import ndimage as ndi
from shapely import geometry, ops
from shapely.geometry import shape, geo, box
from skimage import filters, morphology, measure, color, segmentation, exposure
from skimage.measure import label, regionprops
from sklearn.metrics import confusion_matrix, recall_score, precision_score, accuracy_score
#CONSTANTS
buildings_geojson_link = 'https://s3.amazonaws.com/gbdx-training/burnt_areas/Nuns_SonomaCounty_Glenn_selected_labelled.geojson'
RF_model_link = 'https://s3.amazonaws.com/gbdx-training/burnt_areas/rf_allseg_model.pkl'
"""Helper functions for the GBDX Notebook."""
def pixels_as_features(image, include_gabors=True):
"""Calculates remote sensing indices and gabor filters(optional).
Returns image features of image bands, remote sensing indices, and gabor filters."""
# roll axes to conventional row,col,depth
img = np.rollaxis(image, 0, 3)
rsi = calc_rsi(image)
if include_gabors is True:
gabors = calc_gabors(image)
stack = np.dstack([img, rsi, gabors])
else:
|
feats = stack.ravel().reshape(stack.shape[0] * stack.shape[1], stack.shape[2])
return feats
def calc_rsi(image):
"""Remote sensing indices for vegetation, built-up, and bare soil."""
# roll axes to conventional row,col,depth
img = np.rollaxis(image, 0, 3)
# bands: Coastal(0), Blue(1), Green(2), Yellow(3), Red(4), Red-edge(5), NIR1(6), NIR2(7)) Multispectral
COAST = img[:, :, 0]
B = img[:, :, 1]
G = img[:, :, 2]
Y = img[:, :, 3]
R = img[:, :, 4]
RE = img[:, :, 5]
NIR1 = img[:, :, 6]
NIR2 = img[:, :, 7]
arvi = old_div((NIR1 - (R - (B - R))), (NIR1 + (R - (B - R))))
dd = (2 * NIR1 - R) - (G - B)
gi2 = (B * -0.2848 + G * -0.2434 + R * -0.5436 + NIR1 * 0.7243 + NIR2 * 0.0840) * 5
gndvi = old_div((NIR1 - G), (NIR1 + G))
ndre = old_div((NIR1 - RE), (NIR1 + RE))
ndvi = old_div((NIR1 - R), (NIR1 + R))
ndvi35 = old_div((G - R), (G + R))
ndvi84 = old_div((NIR2 - Y), (NIR2 + Y))
nirry = old_div((NIR1), (R + Y))
normnir = old_div(NIR1, (NIR1 + R + G))
psri = old_div((R - B), RE)
rey = old_div((RE - Y), (RE + Y))
rvi = old_div(NIR1, R)
sa = old_div(((Y + R) * 0.35), 2) + old_div((0.7 * (NIR1 + NIR2)), 2) - 0.69
vi1 = old_div((10000 * NIR1), (RE) ** 2)
vire = old_div(NIR1, RE)
br = (old_div(R, B)) * (old_div(G, B)) * (old_div(RE, B)) * (old_div(NIR1, B))
gr = old_div(G, R)
rr = (old_div(NIR1, R)) * (old_div(G, R)) * (old_div(NIR1, RE))
###Built-Up indices
wvbi = old_div((COAST - RE), (COAST + RE))
wvnhfd = old_div((RE - COAST), (RE + COAST))
###SIs
evi = old_div((2.5 * (NIR2 - R)), (NIR2 + 6 * R - 7.5 * B + 1))
L = 0.5 # some coefficient for Soil Adjusted Vegetation Index (SAVI) DO NOT INCLUDE IN FEATURES
savi = old_div(((1 + L) * (NIR2 - R)), (NIR2 + R + L))
msavi = old_div((2 * NIR2 + 1 - ((2 * NIR2 + 1) ** 2 - 8 * (NIR2 - R)) ** 0.5), 2)
bai = old_div(1.0, ((0.1 + R) ** 2 + 0.06 + NIR2))
rgi = old_div(R, G)
bri = old_div(B, R)
rsi = np.stack(
[arvi, dd, gi2, gndvi, ndre, ndvi, ndvi35, ndvi84, nirry, normnir, psri, rey, rvi, sa, vi1, vire, br, gr, rr,
wvbi, wvnhfd, evi, savi, msavi, bai, rgi, bri],
axis=2)
return rsi
def power(image, kernel):
"""Normalize images for better comparison."""
image = old_div((image - image.mean()), image.std())
return np.sqrt(ndi.convolve(image, np.real(kernel), mode='wrap') ** 2 +
ndi.convolve(image, np.imag(kernel), mode='wrap') ** 2)
def calc_gabors(image, frequency=1, theta_vals=[0, 1, 2, 3]):
"""Calculate gabor."""
# convert to gray scale
img = exposure.equalize_hist(color.rgb2gray(image.rgb(blm=True)))
results_list = []
for theta in theta_vals:
theta = theta / 4. * np.pi
kernel = filters.gabor_kernel(frequency, theta=theta)
# Save kernel and the power image for each image
results_list.append(power(img, kernel))
gabors = np.rollaxis(np.dstack([results_list]), 0, 3)
return gabors
def get_link(model_url):
"""Fetch the RF model pickle file or the building footprints geojson."""
response = requests.get(model_url)
return response.content
#partials
get_model = partial(get_link, model_url=RF_model_link) #gets RF model response content
get_geojson = partial(get_link, model_url=buildings_geojson_link) #gets building geojson response content
def reproject(geom, from_proj='EPSG:4326', to_proj='EPSG:26942'):
"""Project from ESPG:4326 to ESPG:26942."""
tfm = partial(pyproj.transform, pyproj.Proj(init=from_proj), pyproj.Proj(init=to_proj))
return ops.transform(tfm, geom)
def km2_area(polygons):
"""Get area in km^2 after reprojection."""
reprojected_polygons = [reproject(p) for p in polygons]
return ops.cascaded_union(reprojected_polygons).area * 1e-6
def clean(img):
"""Clean the binary image by removing small holes and objects."""
label_img = label(img, connectivity=2)
props = sorted(regionprops(label_img), key=lambda x: x.area)
clean = morphology.binary_closing(img)
clean = morphology.remove_small_holes(clean)
return morphology.remove_small_objects(clean,
int(np.floor(props[-1].area) / 10), connectivity=2)
def to_geojson(shapes, buildings):
"""Converts the shapes into geojson.
This function will combine the burn scar region and buildings into geojson.
Burn scar polygon in red, buildings polygon all in blue."""
#append burn scar region polygons to geojson
if type(shapes) == list:
results = ({
'type': 'Feature',
'properties': {'raster_val': v, 'color': 'red'},
'geometry': s.__geo_interface__}
for i, (s, v)
in enumerate(shapes))
else:
results = ({
'type': 'Feature',
'properties': {'raster_val': v, 'color': 'red'},
'geometry': s}
for i, (s, v)
in enumerate(shapes))
list_results = list(results)
# append the building footprints to geojson
results_buildings = ({
'type': 'Feature',
'properties': {'BuildingID': b['properties']['BuildingID'], 'color': 'blue'},
'geometry': b['geometry']}
for i, b
in enumerate(buildings['features']))
list_results_buildings = list(results_buildings)
collection = {
'type': 'FeatureCollection',
'features': list_results + list_results_buildings}
return collection
def geojson_to_polygons(js_):
"""Convert the geojson into Shapely Polygons.
Keep burn scar polygons as red.
Mark all building polygons labelled as ('yellow', False) and will be changed later."""
burnt_polys = []
building_polys = []
for i, feat in enumerate(js_['features']):
o = {
"coordinates": feat['geometry']['coordinates'],
"type": feat['geometry']['type']
}
s = json.dumps(o)
# convert to geojson.geometry.Polygon
g1 = geojson.loads(s)
# covert to shapely.geometry.polygon.Polygon
g2 = shape(g1)
if feat['properties']['color'] == 'red': # red for the burnt region
burnt_polys.append(g2)
else: # for the building poly
building_polys.append([g2, [feat['properties']['BuildingID'], 'yellow',
False]]) # mark building polygons as 'yellow' for non-burnt for now
return burnt_polys, building_polys
def label_building_polys(burnt_polys, building_polys):
"""Labels the building polygons as ('blue', True) if the building is destroyed."""
for b in building_polys:
for r in burnt_polys:
if b[0].intersects(r):
b[1] = [b[1][0], 'blue', True] # mark building polygon as 'blue' if found in burnt region
continue
def to_geojson_burnt(burnt_polys, building_polys):
"""Convert shapes into geojson with new labelled building footprints. """
results = ({
'type': 'Feature',
'properties': {'color': 'red'},
'geometry': geo.mapping(r)}
for r in burnt_polys)
list_results = list(results)
# append the building footprints to geojson
results_buildings = ({
'type': 'Feature',
'properties': {'BuildingID': b[1][0], 'color': b[1][1]},
'geometry': geo.mapping(b[0])}
for b in building_polys)
list_results_buildings = list(results_buildings)
collection = {
'type': 'FeatureCollection',
'features': list_results + list_results_buildings}
return collection
def to_geojson_groundtruth(burnt_polys, data_labelled):
"""Convert shapes into geojson for the groundtruth."""
results = ({
'type': 'Feature',
'properties': {'color': 'red'},
'geometry': geo.mapping(r)}
for r in burnt_polys)
list_results = list(results)
# append the building footprints to geojson
results_buildings = ({
'type': 'Feature',
'properties': {'BuildingID': b['properties']['BuildingID'], 'color': b['properties']['color'],
'Burnt_Label': b['properties']['Burnt_Label']},
'geometry': b['geometry']}
for b in data_labelled['features'])
list_results_buildings = list(results_buildings)
collection = {
'type': 'FeatureCollection',
'features': list_results + list_results_buildings}
return collection
def geojson_to_polygons_groundtruth(js_):
"""Convert geojson to polygons for the groundtruth map."""
burnt_polys = []
building_polys = []
for i, feat in enumerate(js_['features']):
o = {
"coordinates": feat['geometry']['coordinates'],
"type": feat['geometry']['type']
}
s = json.dumps(o)
# convert to geojson.geometry.Polygon
g1 = geojson.loads(s)
# covert to shapely.geometry.polygon.Polygon
g2 = shape(g1)
if feat['properties']['color'] == 'red': # red for the burnt region
burnt_polys.append(g2)
else: # for the building poly
if feat['properties']['Burnt_Label']:
building_polys.append([g2, [feat['properties']['BuildingID'], 'blue',
True]]) # mark building polygons as 'blue' for burnt for now
else:
building_polys.append([g2, [feat['properties']['BuildingID'], 'yellow',
False]]) # mark building polygons as 'yellow' for non-burnt for now
return burnt_polys, building_polys
def accuracy_measures(predictions, trues):
"""Accuracy measures for the predictions of the method vs the groundtruth.
Prints a confusion matrix, accuracy, misclassifcation rate, true positieve rate, false positive rate, specificity, precision, prevalence.
Returns the accuracy score, precision score, and recall score."""
tn, fp, fn, tp = confusion_matrix(trues, predictions).ravel()
print "\t(tn, fp, fn, tp) =", (tn, fp, fn, tp)
# how often is classifier correct?
print "\tAccuracy = {:.2%}".format(float(tp + tn) / len(trues))
# how often is it wrong?
print "\tMisclassification Rate = {:.2%}".format(float(fp + fn) / len(trues))
# when actually yes, how often does it predict yes?
print "\tTrue Positive Rate = {:.2%}".format(float(tp) / trues.count(True))
# when actually no, how often does it predict yes?
print "\tFalse Positive Rate = {:.2%}".format(float(fp) / trues.count(False))
# when actually no, how often does it predict no?
print "\tSpecificity = {:.2%}".format(float(tn) / trues.count(False))
# when it predicts yes, how often is it correct?
print "\tPrecision = {:.2%}".format(float(tp) / predictions.count(True))
# how often does yes condition occur in our sample?
print "\tPrevalence = {:.2%}\n".format(float(trues.count(True)) / len(trues))
# return accuracy, precision, and recall score
return accuracy_score(trues, predictions), precision_score(trues, predictions, average='binary'), recall_score(
trues, predictions, average='binary')
def create_mask(predictions_2d, sizeX, sizeY, chip_shape):
"""Create a new binary mask of burn scar with the tiles."""
# reshape predictions_2d
predictions_2d_res = np.array(predictions_2d)
predictions_2d_res = predictions_2d_res.reshape(sizeX, sizeY)
# create new mask of area of interest
new_mask = np.zeros((chip_shape[1], chip_shape[2]))
for x in range(0, chip_shape[1], 256):
for y in range(0, chip_shape[2], 256):
new_mask[x:x + 256, y:y + 256] = predictions_2d_res[x / 256][y / 256]
return new_mask
"""Functions for plots."""
def folium_map(geojson_to_overlay, layer_name, location, style_function=None, tiles='Stamen Terrain', zoom_start=16,
show_layer_control=True, width='100%', height='75%', attr=None, map_zoom=18, max_zoom=20, tms=False,
zoom_beyond_max=None, base_tiles='OpenStreetMap', opacity=1):
"""Folium map with Geojson layer and TMS tiles layer.
This function requires geojson_to_overlay (geojson), layer_name (String), and location (map center tuple).
You can also set tiles to the TMS URL and control map zoom."""
m = folium.Map(location=location, zoom_start=zoom_start, width=width, height=height, max_zoom=map_zoom,
tiles=base_tiles)
tiles = folium.TileLayer(tiles=tiles, attr=attr, name=attr, max_zoom=max_zoom)
if tms is True:
options = json.loads(tiles.options)
options.update({'tms': True})
tiles.options = json.dumps(options, sort_keys=True, indent=2)
tiles._template = jinja2.Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.tileLayer(
'{{this.tiles}}',
{{ this.options }}
).addTo({{this._parent.get_name()}});
{% endmacro %}
""")
if zoom_beyond_max is not None:
options = json.loads(tiles.options)
options.update({'maxNativeZoom': zoom_beyond_max, 'maxZoom': max_zoom})
tiles.options = json.dumps(options, sort_keys=True, indent=2)
tiles._template = jinja2.Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.tileLayer(
'{{this.tiles}}',
{{ this.options }}
).addTo({{this._parent.get_name()}});
{% endmacro %}
""")
if opacity < 1:
options = json.loads(tiles.options)
options.update({'opacity': opacity})
tiles.options = json.dumps(options, sort_keys=True, indent=2)
tiles._template = jinja2.Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.tileLayer(
'{{this.tiles}}',
{{ this.options }}
).addTo({{this._parent.get_name()}});
{% endmacro %}
""")
tiles.add_to(m)
if style_function is not None:
gj = folium.GeoJson(geojson_to_overlay, overlay=True, name=layer_name, style_function=style_function)
else:
gj = folium.GeoJson(geojson_to_overlay, overlay=True, name=layer_name)
gj.add_to(m)
if show_layer_control is True:
folium.LayerControl().add_to(m)
return m
def plot_array(array, subplot_ijk, title="", font_size=18, cmap=None):
"""Plot image with subplot.
Requires image and subplot location (ie. (1,2,1)).
You can also set title."""
sp = plt.subplot(*subplot_ijk)
sp.set_title(title, fontsize=font_size)
plt.axis('off')
plt.imshow(array, cmap=cmap)
def displayHTMLtable(acc_sent2, acc_wv03, acc, prec_sent2, prec_wv03, prec, recall_sent2, recall_wv03, recall):
"""Display accuracy scores in a table."""
methods = ['Sent2 NBR', 'WV03 NBR', 'WV03 RF']
accuracies = ["{:.2%}".format(acc_sent2), "{:.2%}".format(acc_wv03), "{:.2%}".format(acc)]
precisions = ["{:.2%}".format(prec_sent2), "{:.2%}".format(prec_wv03), "{:.2%}".format(prec)]
recalls = ["{:.2%}".format(recall_sent2), "{:.2%}".format(recall_wv03), "{:.2%}".format(recall)]
data = methods + accuracies + precisions + recalls
data = np.reshape(data, (4, 3)).T
display(HTML(
'<table style="width:100%;"><th>Method</th><th>Accuracy</th><th>Precision</th><th>Recall</th><tr>{}</tr></table>'.format(
'</tr><tr>'.join(
'<td>{}</td>'.format('</td><td>'.join(str(_) for _ in row)) for row in data)
)
))
| stack = np.dstack([img, rsi]) | conditional_block |
ops.py | """""
GBDX Notebook: "Identifying Destroyed Buildings with Multispectral Imagery"
Link: https://notebooks.geobigdata.io/hub/notebooks/5b47cfb82486966ea89b75fd?tab=code
Author: Ai-Linh Alten
Date created: 7/5/2018
Date last modified: 7/13/2018
Python Version: 2.7.15
"""
import cPickle
import folium
from functools import partial
from gbdxtools import CatalogImage, IdahoImage
import geojson
from IPython.display import HTML, display
import jinja2
import json
from matplotlib import pyplot as plt, colors
import numpy as np
import os
from past.utils import old_div
import pickle
import pyproj
from rasterio import features
import requests
from scipy import ndimage as ndi
from shapely import geometry, ops
from shapely.geometry import shape, geo, box
from skimage import filters, morphology, measure, color, segmentation, exposure
from skimage.measure import label, regionprops
from sklearn.metrics import confusion_matrix, recall_score, precision_score, accuracy_score
#CONSTANTS
buildings_geojson_link = 'https://s3.amazonaws.com/gbdx-training/burnt_areas/Nuns_SonomaCounty_Glenn_selected_labelled.geojson'
RF_model_link = 'https://s3.amazonaws.com/gbdx-training/burnt_areas/rf_allseg_model.pkl'
"""Helper functions for the GBDX Notebook."""
def pixels_as_features(image, include_gabors=True):
"""Calculates remote sensing indices and gabor filters(optional).
Returns image features of image bands, remote sensing indices, and gabor filters."""
# roll axes to conventional row,col,depth
img = np.rollaxis(image, 0, 3)
rsi = calc_rsi(image)
if include_gabors is True:
gabors = calc_gabors(image)
stack = np.dstack([img, rsi, gabors])
else:
stack = np.dstack([img, rsi])
feats = stack.ravel().reshape(stack.shape[0] * stack.shape[1], stack.shape[2])
return feats
def calc_rsi(image):
"""Remote sensing indices for vegetation, built-up, and bare soil."""
# roll axes to conventional row,col,depth
img = np.rollaxis(image, 0, 3)
# bands: Coastal(0), Blue(1), Green(2), Yellow(3), Red(4), Red-edge(5), NIR1(6), NIR2(7)) Multispectral
COAST = img[:, :, 0]
B = img[:, :, 1]
G = img[:, :, 2]
Y = img[:, :, 3]
R = img[:, :, 4]
RE = img[:, :, 5]
NIR1 = img[:, :, 6]
NIR2 = img[:, :, 7]
arvi = old_div((NIR1 - (R - (B - R))), (NIR1 + (R - (B - R))))
dd = (2 * NIR1 - R) - (G - B)
gi2 = (B * -0.2848 + G * -0.2434 + R * -0.5436 + NIR1 * 0.7243 + NIR2 * 0.0840) * 5
gndvi = old_div((NIR1 - G), (NIR1 + G))
ndre = old_div((NIR1 - RE), (NIR1 + RE))
ndvi = old_div((NIR1 - R), (NIR1 + R))
ndvi35 = old_div((G - R), (G + R))
ndvi84 = old_div((NIR2 - Y), (NIR2 + Y))
nirry = old_div((NIR1), (R + Y))
normnir = old_div(NIR1, (NIR1 + R + G))
psri = old_div((R - B), RE)
rey = old_div((RE - Y), (RE + Y))
rvi = old_div(NIR1, R)
sa = old_div(((Y + R) * 0.35), 2) + old_div((0.7 * (NIR1 + NIR2)), 2) - 0.69
vi1 = old_div((10000 * NIR1), (RE) ** 2)
vire = old_div(NIR1, RE)
br = (old_div(R, B)) * (old_div(G, B)) * (old_div(RE, B)) * (old_div(NIR1, B))
gr = old_div(G, R)
rr = (old_div(NIR1, R)) * (old_div(G, R)) * (old_div(NIR1, RE))
###Built-Up indices
wvbi = old_div((COAST - RE), (COAST + RE))
wvnhfd = old_div((RE - COAST), (RE + COAST))
###SIs
evi = old_div((2.5 * (NIR2 - R)), (NIR2 + 6 * R - 7.5 * B + 1))
L = 0.5 # some coefficient for Soil Adjusted Vegetation Index (SAVI) DO NOT INCLUDE IN FEATURES
savi = old_div(((1 + L) * (NIR2 - R)), (NIR2 + R + L))
msavi = old_div((2 * NIR2 + 1 - ((2 * NIR2 + 1) ** 2 - 8 * (NIR2 - R)) ** 0.5), 2)
bai = old_div(1.0, ((0.1 + R) ** 2 + 0.06 + NIR2))
rgi = old_div(R, G)
bri = old_div(B, R)
rsi = np.stack(
[arvi, dd, gi2, gndvi, ndre, ndvi, ndvi35, ndvi84, nirry, normnir, psri, rey, rvi, sa, vi1, vire, br, gr, rr,
wvbi, wvnhfd, evi, savi, msavi, bai, rgi, bri],
axis=2)
return rsi
def power(image, kernel):
"""Normalize images for better comparison."""
image = old_div((image - image.mean()), image.std())
return np.sqrt(ndi.convolve(image, np.real(kernel), mode='wrap') ** 2 +
ndi.convolve(image, np.imag(kernel), mode='wrap') ** 2)
def calc_gabors(image, frequency=1, theta_vals=[0, 1, 2, 3]):
"""Calculate gabor."""
# convert to gray scale
img = exposure.equalize_hist(color.rgb2gray(image.rgb(blm=True)))
results_list = []
for theta in theta_vals:
theta = theta / 4. * np.pi
kernel = filters.gabor_kernel(frequency, theta=theta)
# Save kernel and the power image for each image
results_list.append(power(img, kernel))
gabors = np.rollaxis(np.dstack([results_list]), 0, 3)
return gabors
def get_link(model_url):
"""Fetch the RF model pickle file or the building footprints geojson."""
response = requests.get(model_url)
return response.content
#partials
get_model = partial(get_link, model_url=RF_model_link) #gets RF model response content
get_geojson = partial(get_link, model_url=buildings_geojson_link) #gets building geojson response content
def reproject(geom, from_proj='EPSG:4326', to_proj='EPSG:26942'):
"""Project from ESPG:4326 to ESPG:26942."""
tfm = partial(pyproj.transform, pyproj.Proj(init=from_proj), pyproj.Proj(init=to_proj))
return ops.transform(tfm, geom)
def km2_area(polygons):
"""Get area in km^2 after reprojection."""
reprojected_polygons = [reproject(p) for p in polygons]
return ops.cascaded_union(reprojected_polygons).area * 1e-6
def clean(img):
"""Clean the binary image by removing small holes and objects."""
label_img = label(img, connectivity=2)
props = sorted(regionprops(label_img), key=lambda x: x.area)
clean = morphology.binary_closing(img)
clean = morphology.remove_small_holes(clean)
return morphology.remove_small_objects(clean,
int(np.floor(props[-1].area) / 10), connectivity=2)
def to_geojson(shapes, buildings):
"""Converts the shapes into geojson.
This function will combine the burn scar region and buildings into geojson.
Burn scar polygon in red, buildings polygon all in blue."""
#append burn scar region polygons to geojson
if type(shapes) == list:
results = ({
'type': 'Feature',
'properties': {'raster_val': v, 'color': 'red'},
'geometry': s.__geo_interface__}
for i, (s, v)
in enumerate(shapes))
else:
results = ({
'type': 'Feature',
'properties': {'raster_val': v, 'color': 'red'},
'geometry': s}
for i, (s, v)
in enumerate(shapes))
list_results = list(results)
# append the building footprints to geojson
results_buildings = ({
'type': 'Feature',
'properties': {'BuildingID': b['properties']['BuildingID'], 'color': 'blue'},
'geometry': b['geometry']}
for i, b
in enumerate(buildings['features']))
list_results_buildings = list(results_buildings)
collection = {
'type': 'FeatureCollection',
'features': list_results + list_results_buildings}
return collection
def geojson_to_polygons(js_):
"""Convert the geojson into Shapely Polygons.
Keep burn scar polygons as red.
Mark all building polygons labelled as ('yellow', False) and will be changed later."""
burnt_polys = []
building_polys = []
for i, feat in enumerate(js_['features']):
o = {
"coordinates": feat['geometry']['coordinates'],
"type": feat['geometry']['type']
}
s = json.dumps(o)
# convert to geojson.geometry.Polygon
g1 = geojson.loads(s)
# covert to shapely.geometry.polygon.Polygon
g2 = shape(g1)
if feat['properties']['color'] == 'red': # red for the burnt region
burnt_polys.append(g2)
else: # for the building poly
building_polys.append([g2, [feat['properties']['BuildingID'], 'yellow',
False]]) # mark building polygons as 'yellow' for non-burnt for now
return burnt_polys, building_polys
def label_building_polys(burnt_polys, building_polys):
"""Labels the building polygons as ('blue', True) if the building is destroyed."""
for b in building_polys:
for r in burnt_polys:
if b[0].intersects(r):
b[1] = [b[1][0], 'blue', True] # mark building polygon as 'blue' if found in burnt region
continue
def to_geojson_burnt(burnt_polys, building_polys):
"""Convert shapes into geojson with new labelled building footprints. """
results = ({
'type': 'Feature',
'properties': {'color': 'red'},
'geometry': geo.mapping(r)}
for r in burnt_polys)
list_results = list(results)
# append the building footprints to geojson
results_buildings = ({
'type': 'Feature',
'properties': {'BuildingID': b[1][0], 'color': b[1][1]},
'geometry': geo.mapping(b[0])}
for b in building_polys)
list_results_buildings = list(results_buildings)
collection = {
'type': 'FeatureCollection',
'features': list_results + list_results_buildings}
return collection
def to_geojson_groundtruth(burnt_polys, data_labelled):
|
def geojson_to_polygons_groundtruth(js_):
"""Convert geojson to polygons for the groundtruth map."""
burnt_polys = []
building_polys = []
for i, feat in enumerate(js_['features']):
o = {
"coordinates": feat['geometry']['coordinates'],
"type": feat['geometry']['type']
}
s = json.dumps(o)
# convert to geojson.geometry.Polygon
g1 = geojson.loads(s)
# covert to shapely.geometry.polygon.Polygon
g2 = shape(g1)
if feat['properties']['color'] == 'red': # red for the burnt region
burnt_polys.append(g2)
else: # for the building poly
if feat['properties']['Burnt_Label']:
building_polys.append([g2, [feat['properties']['BuildingID'], 'blue',
True]]) # mark building polygons as 'blue' for burnt for now
else:
building_polys.append([g2, [feat['properties']['BuildingID'], 'yellow',
False]]) # mark building polygons as 'yellow' for non-burnt for now
return burnt_polys, building_polys
def accuracy_measures(predictions, trues):
"""Accuracy measures for the predictions of the method vs the groundtruth.
Prints a confusion matrix, accuracy, misclassifcation rate, true positieve rate, false positive rate, specificity, precision, prevalence.
Returns the accuracy score, precision score, and recall score."""
tn, fp, fn, tp = confusion_matrix(trues, predictions).ravel()
print "\t(tn, fp, fn, tp) =", (tn, fp, fn, tp)
# how often is classifier correct?
print "\tAccuracy = {:.2%}".format(float(tp + tn) / len(trues))
# how often is it wrong?
print "\tMisclassification Rate = {:.2%}".format(float(fp + fn) / len(trues))
# when actually yes, how often does it predict yes?
print "\tTrue Positive Rate = {:.2%}".format(float(tp) / trues.count(True))
# when actually no, how often does it predict yes?
print "\tFalse Positive Rate = {:.2%}".format(float(fp) / trues.count(False))
# when actually no, how often does it predict no?
print "\tSpecificity = {:.2%}".format(float(tn) / trues.count(False))
# when it predicts yes, how often is it correct?
print "\tPrecision = {:.2%}".format(float(tp) / predictions.count(True))
# how often does yes condition occur in our sample?
print "\tPrevalence = {:.2%}\n".format(float(trues.count(True)) / len(trues))
# return accuracy, precision, and recall score
return accuracy_score(trues, predictions), precision_score(trues, predictions, average='binary'), recall_score(
trues, predictions, average='binary')
def create_mask(predictions_2d, sizeX, sizeY, chip_shape):
"""Create a new binary mask of burn scar with the tiles."""
# reshape predictions_2d
predictions_2d_res = np.array(predictions_2d)
predictions_2d_res = predictions_2d_res.reshape(sizeX, sizeY)
# create new mask of area of interest
new_mask = np.zeros((chip_shape[1], chip_shape[2]))
for x in range(0, chip_shape[1], 256):
for y in range(0, chip_shape[2], 256):
new_mask[x:x + 256, y:y + 256] = predictions_2d_res[x / 256][y / 256]
return new_mask
"""Functions for plots."""
def folium_map(geojson_to_overlay, layer_name, location, style_function=None, tiles='Stamen Terrain', zoom_start=16,
show_layer_control=True, width='100%', height='75%', attr=None, map_zoom=18, max_zoom=20, tms=False,
zoom_beyond_max=None, base_tiles='OpenStreetMap', opacity=1):
"""Folium map with Geojson layer and TMS tiles layer.
This function requires geojson_to_overlay (geojson), layer_name (String), and location (map center tuple).
You can also set tiles to the TMS URL and control map zoom."""
m = folium.Map(location=location, zoom_start=zoom_start, width=width, height=height, max_zoom=map_zoom,
tiles=base_tiles)
tiles = folium.TileLayer(tiles=tiles, attr=attr, name=attr, max_zoom=max_zoom)
if tms is True:
options = json.loads(tiles.options)
options.update({'tms': True})
tiles.options = json.dumps(options, sort_keys=True, indent=2)
tiles._template = jinja2.Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.tileLayer(
'{{this.tiles}}',
{{ this.options }}
).addTo({{this._parent.get_name()}});
{% endmacro %}
""")
if zoom_beyond_max is not None:
options = json.loads(tiles.options)
options.update({'maxNativeZoom': zoom_beyond_max, 'maxZoom': max_zoom})
tiles.options = json.dumps(options, sort_keys=True, indent=2)
tiles._template = jinja2.Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.tileLayer(
'{{this.tiles}}',
{{ this.options }}
).addTo({{this._parent.get_name()}});
{% endmacro %}
""")
if opacity < 1:
options = json.loads(tiles.options)
options.update({'opacity': opacity})
tiles.options = json.dumps(options, sort_keys=True, indent=2)
tiles._template = jinja2.Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.tileLayer(
'{{this.tiles}}',
{{ this.options }}
).addTo({{this._parent.get_name()}});
{% endmacro %}
""")
tiles.add_to(m)
if style_function is not None:
gj = folium.GeoJson(geojson_to_overlay, overlay=True, name=layer_name, style_function=style_function)
else:
gj = folium.GeoJson(geojson_to_overlay, overlay=True, name=layer_name)
gj.add_to(m)
if show_layer_control is True:
folium.LayerControl().add_to(m)
return m
def plot_array(array, subplot_ijk, title="", font_size=18, cmap=None):
"""Plot image with subplot.
Requires image and subplot location (ie. (1,2,1)).
You can also set title."""
sp = plt.subplot(*subplot_ijk)
sp.set_title(title, fontsize=font_size)
plt.axis('off')
plt.imshow(array, cmap=cmap)
def displayHTMLtable(acc_sent2, acc_wv03, acc, prec_sent2, prec_wv03, prec, recall_sent2, recall_wv03, recall):
"""Display accuracy scores in a table."""
methods = ['Sent2 NBR', 'WV03 NBR', 'WV03 RF']
accuracies = ["{:.2%}".format(acc_sent2), "{:.2%}".format(acc_wv03), "{:.2%}".format(acc)]
precisions = ["{:.2%}".format(prec_sent2), "{:.2%}".format(prec_wv03), "{:.2%}".format(prec)]
recalls = ["{:.2%}".format(recall_sent2), "{:.2%}".format(recall_wv03), "{:.2%}".format(recall)]
data = methods + accuracies + precisions + recalls
data = np.reshape(data, (4, 3)).T
display(HTML(
'<table style="width:100%;"><th>Method</th><th>Accuracy</th><th>Precision</th><th>Recall</th><tr>{}</tr></table>'.format(
'</tr><tr>'.join(
'<td>{}</td>'.format('</td><td>'.join(str(_) for _ in row)) for row in data)
)
))
| """Convert shapes into geojson for the groundtruth."""
results = ({
'type': 'Feature',
'properties': {'color': 'red'},
'geometry': geo.mapping(r)}
for r in burnt_polys)
list_results = list(results)
# append the building footprints to geojson
results_buildings = ({
'type': 'Feature',
'properties': {'BuildingID': b['properties']['BuildingID'], 'color': b['properties']['color'],
'Burnt_Label': b['properties']['Burnt_Label']},
'geometry': b['geometry']}
for b in data_labelled['features'])
list_results_buildings = list(results_buildings)
collection = {
'type': 'FeatureCollection',
'features': list_results + list_results_buildings}
return collection | identifier_body |
ops.py | """""
GBDX Notebook: "Identifying Destroyed Buildings with Multispectral Imagery"
Link: https://notebooks.geobigdata.io/hub/notebooks/5b47cfb82486966ea89b75fd?tab=code
Author: Ai-Linh Alten
Date created: 7/5/2018
Date last modified: 7/13/2018
Python Version: 2.7.15
"""
import cPickle
import folium
from functools import partial
from gbdxtools import CatalogImage, IdahoImage
import geojson
from IPython.display import HTML, display
import jinja2
import json
from matplotlib import pyplot as plt, colors
import numpy as np
import os
from past.utils import old_div
import pickle
import pyproj
from rasterio import features
import requests
from scipy import ndimage as ndi
from shapely import geometry, ops
from shapely.geometry import shape, geo, box
from skimage import filters, morphology, measure, color, segmentation, exposure
from skimage.measure import label, regionprops
from sklearn.metrics import confusion_matrix, recall_score, precision_score, accuracy_score
#CONSTANTS
buildings_geojson_link = 'https://s3.amazonaws.com/gbdx-training/burnt_areas/Nuns_SonomaCounty_Glenn_selected_labelled.geojson'
RF_model_link = 'https://s3.amazonaws.com/gbdx-training/burnt_areas/rf_allseg_model.pkl'
"""Helper functions for the GBDX Notebook."""
def pixels_as_features(image, include_gabors=True):
"""Calculates remote sensing indices and gabor filters(optional).
Returns image features of image bands, remote sensing indices, and gabor filters."""
# roll axes to conventional row,col,depth
img = np.rollaxis(image, 0, 3)
rsi = calc_rsi(image)
if include_gabors is True:
gabors = calc_gabors(image)
stack = np.dstack([img, rsi, gabors])
else:
stack = np.dstack([img, rsi])
feats = stack.ravel().reshape(stack.shape[0] * stack.shape[1], stack.shape[2])
return feats
def calc_rsi(image):
"""Remote sensing indices for vegetation, built-up, and bare soil."""
# roll axes to conventional row,col,depth
img = np.rollaxis(image, 0, 3)
# bands: Coastal(0), Blue(1), Green(2), Yellow(3), Red(4), Red-edge(5), NIR1(6), NIR2(7)) Multispectral
COAST = img[:, :, 0]
B = img[:, :, 1]
G = img[:, :, 2]
Y = img[:, :, 3]
R = img[:, :, 4]
RE = img[:, :, 5]
NIR1 = img[:, :, 6]
NIR2 = img[:, :, 7]
arvi = old_div((NIR1 - (R - (B - R))), (NIR1 + (R - (B - R))))
dd = (2 * NIR1 - R) - (G - B)
gi2 = (B * -0.2848 + G * -0.2434 + R * -0.5436 + NIR1 * 0.7243 + NIR2 * 0.0840) * 5
gndvi = old_div((NIR1 - G), (NIR1 + G))
ndre = old_div((NIR1 - RE), (NIR1 + RE))
ndvi = old_div((NIR1 - R), (NIR1 + R))
ndvi35 = old_div((G - R), (G + R))
ndvi84 = old_div((NIR2 - Y), (NIR2 + Y))
nirry = old_div((NIR1), (R + Y))
normnir = old_div(NIR1, (NIR1 + R + G))
psri = old_div((R - B), RE)
rey = old_div((RE - Y), (RE + Y))
rvi = old_div(NIR1, R)
sa = old_div(((Y + R) * 0.35), 2) + old_div((0.7 * (NIR1 + NIR2)), 2) - 0.69
vi1 = old_div((10000 * NIR1), (RE) ** 2)
vire = old_div(NIR1, RE)
br = (old_div(R, B)) * (old_div(G, B)) * (old_div(RE, B)) * (old_div(NIR1, B))
gr = old_div(G, R)
rr = (old_div(NIR1, R)) * (old_div(G, R)) * (old_div(NIR1, RE))
###Built-Up indices
wvbi = old_div((COAST - RE), (COAST + RE))
wvnhfd = old_div((RE - COAST), (RE + COAST))
###SIs
evi = old_div((2.5 * (NIR2 - R)), (NIR2 + 6 * R - 7.5 * B + 1))
L = 0.5 # some coefficient for Soil Adjusted Vegetation Index (SAVI) DO NOT INCLUDE IN FEATURES
savi = old_div(((1 + L) * (NIR2 - R)), (NIR2 + R + L))
msavi = old_div((2 * NIR2 + 1 - ((2 * NIR2 + 1) ** 2 - 8 * (NIR2 - R)) ** 0.5), 2)
bai = old_div(1.0, ((0.1 + R) ** 2 + 0.06 + NIR2))
rgi = old_div(R, G)
bri = old_div(B, R)
rsi = np.stack(
[arvi, dd, gi2, gndvi, ndre, ndvi, ndvi35, ndvi84, nirry, normnir, psri, rey, rvi, sa, vi1, vire, br, gr, rr,
wvbi, wvnhfd, evi, savi, msavi, bai, rgi, bri],
axis=2)
return rsi
def power(image, kernel):
"""Normalize images for better comparison."""
image = old_div((image - image.mean()), image.std())
return np.sqrt(ndi.convolve(image, np.real(kernel), mode='wrap') ** 2 +
ndi.convolve(image, np.imag(kernel), mode='wrap') ** 2)
def calc_gabors(image, frequency=1, theta_vals=[0, 1, 2, 3]):
"""Calculate gabor."""
# convert to gray scale
img = exposure.equalize_hist(color.rgb2gray(image.rgb(blm=True)))
results_list = []
for theta in theta_vals:
theta = theta / 4. * np.pi
kernel = filters.gabor_kernel(frequency, theta=theta)
# Save kernel and the power image for each image
results_list.append(power(img, kernel))
gabors = np.rollaxis(np.dstack([results_list]), 0, 3)
return gabors
def get_link(model_url):
"""Fetch the RF model pickle file or the building footprints geojson."""
response = requests.get(model_url)
| return response.content
#partials
get_model = partial(get_link, model_url=RF_model_link) #gets RF model response content
get_geojson = partial(get_link, model_url=buildings_geojson_link) #gets building geojson response content
def reproject(geom, from_proj='EPSG:4326', to_proj='EPSG:26942'):
"""Project from ESPG:4326 to ESPG:26942."""
tfm = partial(pyproj.transform, pyproj.Proj(init=from_proj), pyproj.Proj(init=to_proj))
return ops.transform(tfm, geom)
def km2_area(polygons):
"""Get area in km^2 after reprojection."""
reprojected_polygons = [reproject(p) for p in polygons]
return ops.cascaded_union(reprojected_polygons).area * 1e-6
def clean(img):
"""Clean the binary image by removing small holes and objects."""
label_img = label(img, connectivity=2)
props = sorted(regionprops(label_img), key=lambda x: x.area)
clean = morphology.binary_closing(img)
clean = morphology.remove_small_holes(clean)
return morphology.remove_small_objects(clean,
int(np.floor(props[-1].area) / 10), connectivity=2)
def to_geojson(shapes, buildings):
"""Converts the shapes into geojson.
This function will combine the burn scar region and buildings into geojson.
Burn scar polygon in red, buildings polygon all in blue."""
#append burn scar region polygons to geojson
if type(shapes) == list:
results = ({
'type': 'Feature',
'properties': {'raster_val': v, 'color': 'red'},
'geometry': s.__geo_interface__}
for i, (s, v)
in enumerate(shapes))
else:
results = ({
'type': 'Feature',
'properties': {'raster_val': v, 'color': 'red'},
'geometry': s}
for i, (s, v)
in enumerate(shapes))
list_results = list(results)
# append the building footprints to geojson
results_buildings = ({
'type': 'Feature',
'properties': {'BuildingID': b['properties']['BuildingID'], 'color': 'blue'},
'geometry': b['geometry']}
for i, b
in enumerate(buildings['features']))
list_results_buildings = list(results_buildings)
collection = {
'type': 'FeatureCollection',
'features': list_results + list_results_buildings}
return collection
def geojson_to_polygons(js_):
"""Convert the geojson into Shapely Polygons.
Keep burn scar polygons as red.
Mark all building polygons labelled as ('yellow', False) and will be changed later."""
burnt_polys = []
building_polys = []
for i, feat in enumerate(js_['features']):
o = {
"coordinates": feat['geometry']['coordinates'],
"type": feat['geometry']['type']
}
s = json.dumps(o)
# convert to geojson.geometry.Polygon
g1 = geojson.loads(s)
# covert to shapely.geometry.polygon.Polygon
g2 = shape(g1)
if feat['properties']['color'] == 'red': # red for the burnt region
burnt_polys.append(g2)
else: # for the building poly
building_polys.append([g2, [feat['properties']['BuildingID'], 'yellow',
False]]) # mark building polygons as 'yellow' for non-burnt for now
return burnt_polys, building_polys
def label_building_polys(burnt_polys, building_polys):
"""Labels the building polygons as ('blue', True) if the building is destroyed."""
for b in building_polys:
for r in burnt_polys:
if b[0].intersects(r):
b[1] = [b[1][0], 'blue', True] # mark building polygon as 'blue' if found in burnt region
continue
def to_geojson_burnt(burnt_polys, building_polys):
"""Convert shapes into geojson with new labelled building footprints. """
results = ({
'type': 'Feature',
'properties': {'color': 'red'},
'geometry': geo.mapping(r)}
for r in burnt_polys)
list_results = list(results)
# append the building footprints to geojson
results_buildings = ({
'type': 'Feature',
'properties': {'BuildingID': b[1][0], 'color': b[1][1]},
'geometry': geo.mapping(b[0])}
for b in building_polys)
list_results_buildings = list(results_buildings)
collection = {
'type': 'FeatureCollection',
'features': list_results + list_results_buildings}
return collection
def to_geojson_groundtruth(burnt_polys, data_labelled):
"""Convert shapes into geojson for the groundtruth."""
results = ({
'type': 'Feature',
'properties': {'color': 'red'},
'geometry': geo.mapping(r)}
for r in burnt_polys)
list_results = list(results)
# append the building footprints to geojson
results_buildings = ({
'type': 'Feature',
'properties': {'BuildingID': b['properties']['BuildingID'], 'color': b['properties']['color'],
'Burnt_Label': b['properties']['Burnt_Label']},
'geometry': b['geometry']}
for b in data_labelled['features'])
list_results_buildings = list(results_buildings)
collection = {
'type': 'FeatureCollection',
'features': list_results + list_results_buildings}
return collection
def geojson_to_polygons_groundtruth(js_):
"""Convert geojson to polygons for the groundtruth map."""
burnt_polys = []
building_polys = []
for i, feat in enumerate(js_['features']):
o = {
"coordinates": feat['geometry']['coordinates'],
"type": feat['geometry']['type']
}
s = json.dumps(o)
# convert to geojson.geometry.Polygon
g1 = geojson.loads(s)
# covert to shapely.geometry.polygon.Polygon
g2 = shape(g1)
if feat['properties']['color'] == 'red': # red for the burnt region
burnt_polys.append(g2)
else: # for the building poly
if feat['properties']['Burnt_Label']:
building_polys.append([g2, [feat['properties']['BuildingID'], 'blue',
True]]) # mark building polygons as 'blue' for burnt for now
else:
building_polys.append([g2, [feat['properties']['BuildingID'], 'yellow',
False]]) # mark building polygons as 'yellow' for non-burnt for now
return burnt_polys, building_polys
def accuracy_measures(predictions, trues):
"""Accuracy measures for the predictions of the method vs the groundtruth.
Prints a confusion matrix, accuracy, misclassifcation rate, true positieve rate, false positive rate, specificity, precision, prevalence.
Returns the accuracy score, precision score, and recall score."""
tn, fp, fn, tp = confusion_matrix(trues, predictions).ravel()
print "\t(tn, fp, fn, tp) =", (tn, fp, fn, tp)
# how often is classifier correct?
print "\tAccuracy = {:.2%}".format(float(tp + tn) / len(trues))
# how often is it wrong?
print "\tMisclassification Rate = {:.2%}".format(float(fp + fn) / len(trues))
# when actually yes, how often does it predict yes?
print "\tTrue Positive Rate = {:.2%}".format(float(tp) / trues.count(True))
# when actually no, how often does it predict yes?
print "\tFalse Positive Rate = {:.2%}".format(float(fp) / trues.count(False))
# when actually no, how often does it predict no?
print "\tSpecificity = {:.2%}".format(float(tn) / trues.count(False))
# when it predicts yes, how often is it correct?
print "\tPrecision = {:.2%}".format(float(tp) / predictions.count(True))
# how often does yes condition occur in our sample?
print "\tPrevalence = {:.2%}\n".format(float(trues.count(True)) / len(trues))
# return accuracy, precision, and recall score
return accuracy_score(trues, predictions), precision_score(trues, predictions, average='binary'), recall_score(
trues, predictions, average='binary')
def create_mask(predictions_2d, sizeX, sizeY, chip_shape):
"""Create a new binary mask of burn scar with the tiles."""
# reshape predictions_2d
predictions_2d_res = np.array(predictions_2d)
predictions_2d_res = predictions_2d_res.reshape(sizeX, sizeY)
# create new mask of area of interest
new_mask = np.zeros((chip_shape[1], chip_shape[2]))
for x in range(0, chip_shape[1], 256):
for y in range(0, chip_shape[2], 256):
new_mask[x:x + 256, y:y + 256] = predictions_2d_res[x / 256][y / 256]
return new_mask
"""Functions for plots."""
def folium_map(geojson_to_overlay, layer_name, location, style_function=None, tiles='Stamen Terrain', zoom_start=16,
show_layer_control=True, width='100%', height='75%', attr=None, map_zoom=18, max_zoom=20, tms=False,
zoom_beyond_max=None, base_tiles='OpenStreetMap', opacity=1):
"""Folium map with Geojson layer and TMS tiles layer.
This function requires geojson_to_overlay (geojson), layer_name (String), and location (map center tuple).
You can also set tiles to the TMS URL and control map zoom."""
m = folium.Map(location=location, zoom_start=zoom_start, width=width, height=height, max_zoom=map_zoom,
tiles=base_tiles)
tiles = folium.TileLayer(tiles=tiles, attr=attr, name=attr, max_zoom=max_zoom)
if tms is True:
options = json.loads(tiles.options)
options.update({'tms': True})
tiles.options = json.dumps(options, sort_keys=True, indent=2)
tiles._template = jinja2.Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.tileLayer(
'{{this.tiles}}',
{{ this.options }}
).addTo({{this._parent.get_name()}});
{% endmacro %}
""")
if zoom_beyond_max is not None:
options = json.loads(tiles.options)
options.update({'maxNativeZoom': zoom_beyond_max, 'maxZoom': max_zoom})
tiles.options = json.dumps(options, sort_keys=True, indent=2)
tiles._template = jinja2.Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.tileLayer(
'{{this.tiles}}',
{{ this.options }}
).addTo({{this._parent.get_name()}});
{% endmacro %}
""")
if opacity < 1:
options = json.loads(tiles.options)
options.update({'opacity': opacity})
tiles.options = json.dumps(options, sort_keys=True, indent=2)
tiles._template = jinja2.Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.tileLayer(
'{{this.tiles}}',
{{ this.options }}
).addTo({{this._parent.get_name()}});
{% endmacro %}
""")
tiles.add_to(m)
if style_function is not None:
gj = folium.GeoJson(geojson_to_overlay, overlay=True, name=layer_name, style_function=style_function)
else:
gj = folium.GeoJson(geojson_to_overlay, overlay=True, name=layer_name)
gj.add_to(m)
if show_layer_control is True:
folium.LayerControl().add_to(m)
return m
def plot_array(array, subplot_ijk, title="", font_size=18, cmap=None):
"""Plot image with subplot.
Requires image and subplot location (ie. (1,2,1)).
You can also set title."""
sp = plt.subplot(*subplot_ijk)
sp.set_title(title, fontsize=font_size)
plt.axis('off')
plt.imshow(array, cmap=cmap)
def displayHTMLtable(acc_sent2, acc_wv03, acc, prec_sent2, prec_wv03, prec, recall_sent2, recall_wv03, recall):
"""Display accuracy scores in a table."""
methods = ['Sent2 NBR', 'WV03 NBR', 'WV03 RF']
accuracies = ["{:.2%}".format(acc_sent2), "{:.2%}".format(acc_wv03), "{:.2%}".format(acc)]
precisions = ["{:.2%}".format(prec_sent2), "{:.2%}".format(prec_wv03), "{:.2%}".format(prec)]
recalls = ["{:.2%}".format(recall_sent2), "{:.2%}".format(recall_wv03), "{:.2%}".format(recall)]
data = methods + accuracies + precisions + recalls
data = np.reshape(data, (4, 3)).T
display(HTML(
'<table style="width:100%;"><th>Method</th><th>Accuracy</th><th>Precision</th><th>Recall</th><tr>{}</tr></table>'.format(
'</tr><tr>'.join(
'<td>{}</td>'.format('</td><td>'.join(str(_) for _ in row)) for row in data)
)
)) | random_line_split | |
dwi_corr_util.py | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 11 13:13:53 2018
@author: zhang
"""
'''
Warp Commands use during diffusion-weighted images preprocessing
================================================================
dwidenoise & mrdegibbs from MRTrix3.0; eddy-openmp from FSL
-------------------------------------------------------------------------
for unkonwn reason they are not included after loading relavant interface
'''
from nipype.interfaces.base import (CommandLine,
CommandLineInputSpec,
File,
TraitedSpec,
traits,
isdefined,
InputMultiPath)
import os
# warp the dwidenoise function from MRtrix
class DWIdenoiseInputSpec(CommandLineInputSpec):
in_file = InputMultiPath(
File(exists=True),
mandatory=True,
position=0,
argstr="%s",
desc="input DWI image")
noise = File(
argstr='-noise %s',
desc='noise map')
force = traits.Bool(
desc='force overwrite of output files',
position=-1,
argstr='-force')
out_file = File(name_template='%s_denoised',
name_source='in_file',
keep_extension=True,
argstr="%s",
position=1,
desc="the output denoised DWI image")
class DWIdenoiseOutputSpec(TraitedSpec):
out_file = File(desc = "the output denoised DWI image", exists = True)
class DWIdenoise(CommandLine):
"""Use MRTrix3 dwidenoise command to denoisie DWI data and estimate the
noise level based on the optimal threshold for PCA.
For more information, see
<https://mrtrix.readthedocs.io/en/latest/reference/commands/dwidenoise.html>
"""
_cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/dwidenoise'
input_spec = DWIdenoiseInputSpec
output_spec = DWIdenoiseOutputSpec
# warp the unring function from MRtrix
class MRdegibbsInputSpec(CommandLineInputSpec):
in_file = File(
desc="input DWI image",
exists=True,
mandatory=True,
position=0,
argstr="%s")
force = traits.Bool(
desc='force overwrite of output files',
position=-1,
argstr='-force')
out_file = File(name_template='%s_unringed',
name_source='in_file',
keep_extension=True,
argstr="%s",
position=1,
desc="the output unringed DWI image")
class MRdegibbsOutputSpec(TraitedSpec):
out_file = File(desc = "the output unringed DWI image", exists = True)
class MRdegibbs(CommandLine):
"""Use MRTrix3 degibbs command for removing the gibbs ringing artefact.
For more information, see
<https://mrtrix.readthedocs.io/en/latest/reference/commands/mrdegibbs.html>
"""
_cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/mrdegibbs'
input_spec = MRdegibbsInputSpec
output_spec = MRdegibbsOutputSpec
# Wrap FSL eddy (copy from nipype interface)
class EddyInputSpec(CommandLineInputSpec):
in_file = File(
exists=True,
mandatory=True,
argstr='--imain=%s',
desc=('File containing all the images to estimate '
'distortions for'))
in_mask = File(
exists=True,
mandatory=True,
argstr='--mask=%s',
desc='Mask to indicate brain')
in_index = File(
exists=True,
mandatory=True,
argstr='--index=%s',
desc=('File containing indices for all volumes in --imain '
'into --acqp and --topup'))
in_acqp = File(
exists=True,
mandatory=True,
argstr='--acqp=%s',
desc='File containing acquisition parameters')
in_bvec = File(
exists=True,
mandatory=True,
argstr='--bvecs=%s',
desc=('File containing the b-vectors for all volumes in '
'--imain'))
in_bval = File(
exists=True,
mandatory=True,
argstr='--bvals=%s',
desc=('File containing the b-values for all volumes in '
'--imain'))
out_base = traits.Str(
'eddy_corrected',
argstr='--out=%s',
usedefault=True,
desc=('basename for output (warped) image'))
session = File(
exists=True,
argstr='--session=%s',
desc=('File containing session indices for all volumes in '
'--imain'))
in_topup_fieldcoef = File(
exists=True,
argstr="--topup=%s",
requires=['in_topup_movpar'],
desc=('topup file containing the field '
'coefficients'))
in_topup_movpar = File(
exists=True,
requires=['in_topup_fieldcoef'],
desc='topup movpar.txt file')
flm = traits.Enum(
'linear',
'quadratic',
'cubic',
argstr='--flm=%s',
desc='First level EC model')
slm = traits.Enum(
'none',
'linear',
'quadratic',
argstr='--slm=%s',
desc='Second level EC model')
fep = traits.Bool(
False, argstr='--fep', desc='Fill empty planes in x- or y-directions')
interp = traits.Enum(
'spline',
'trilinear',
argstr='--interp=%s',
desc='Interpolation model for estimation step')
nvoxhp = traits.Int(
1000, usedefault=True,
argstr='--nvoxhp=%s',
desc=('# of voxels used to estimate the '
'hyperparameters'))
fudge_factor = traits.Float(
10.0, usedefault=True,
argstr='--ff=%s',
desc=('Fudge factor for hyperparameter '
'error variance'))
dont_sep_offs_move = traits.Bool(
False,
argstr='--dont_sep_offs_move',
desc=('Do NOT attempt to separate '
'field offset from subject '
'movement'))
dont_peas = traits.Bool(
False,
argstr='--dont_peas',
desc="Do NOT perform a post-eddy alignment of "
"shells")
fwhm = traits.Float(
desc=('FWHM for conditioning filter when estimating '
'the parameters'),
argstr='--fwhm=%s')
niter = traits.Int(5, usedefault=True,
argstr='--niter=%s', desc='Number of iterations')
method = traits.Enum(
'jac',
'lsr',
argstr='--resamp=%s',
desc=('Final resampling method (jacobian/least '
'squares)'))
repol = traits.Bool(
False, argstr='--repol', desc='Detect and replace outlier slices')
num_threads = traits.Int(
1,
usedefault=True,
nohash=True,
desc="Number of openmp threads to use")
is_shelled = traits.Bool(
False,
argstr='--data_is_shelled',
desc="Override internal check to ensure that "
"date are acquired on a set of b-value "
"shells")
field = traits.Str(
argstr='--field=%s',
desc="NonTOPUP fieldmap scaled in Hz - filename has "
"to be provided without an extension. TOPUP is "
"strongly recommended")
field_mat = File(
exists=True,
argstr='--field_mat=%s',
desc="Matrix that specifies the relative locations of "
"the field specified by --field and first volume "
"in file --imain")
use_cuda = traits.Bool(False, desc="Run eddy using cuda gpu")
class EddyOutputSpec(TraitedSpec):
out_corrected = File(
exists=True, desc='4D image file containing all the corrected volumes')
out_parameter = File(
exists=True,
desc=('text file with parameters definining the field and'
'movement for each scan'))
out_rotated_bvecs = File(
exists=True, desc='File containing rotated b-values for all volumes')
out_movement_rms = File(
exists=True, desc='Summary of the "total movement" in each volume')
out_restricted_movement_rms = File(
exists=True,
desc=('Summary of the "total movement" in each volume '
'disregarding translation in the PE direction'))
out_shell_alignment_parameters = File(
exists=True,
desc=('File containing rigid body movement parameters '
'between the different shells as estimated by a '
'post-hoc mutual information based registration'))
out_outlier_report = File(
exists=True,
desc=('Text-file with a plain language report on what '
'outlier slices eddy has found'))
class Eddy(CommandLine):
"""
Interface for FSL eddy, a tool for estimating and correcting eddy
currents induced distortions. `User guide
<http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Eddy/UsersGuide>`_ and
`more info regarding acqp file
<http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/eddy/Faq#How_do_I_know_what_to_put_into_my_--acqp_file>`_.
Examples
--------
>>> from nipype.interfaces.fsl import Eddy
>>> eddy = Eddy()
>>> eddy.inputs.in_file = 'epi.nii'
>>> eddy.inputs.in_mask = 'epi_mask.nii'
>>> eddy.inputs.in_index = 'epi_index.txt'
>>> eddy.inputs.in_acqp = 'epi_acqp.txt'
>>> eddy.inputs.in_bvec = 'bvecs.scheme'
>>> eddy.inputs.in_bval = 'bvals.scheme'
>>> eddy.inputs.use_cuda = True
>>> eddy.cmdline # doctest: +ELLIPSIS
'eddy_cuda --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme \
--bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt \
--mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'
>>> eddy.inputs.use_cuda = False
>>> eddy.cmdline # doctest: +ELLIPSIS
'eddy_openmp --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme \
--bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt \
--mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'
>>> res = eddy.run() # doctest: +SKIP
"""
_cmd = '/data/pt_life_dti/scripts/life2018/eddy_openmp-5.0.11'
input_spec = EddyInputSpec
output_spec = EddyOutputSpec
_num_threads = 1
def __init__(self, **inputs):
|
def _num_threads_update(self):
self._num_threads = self.inputs.num_threads
if not isdefined(self.inputs.num_threads):
if 'OMP_NUM_THREADS' in self.inputs.environ:
del self.inputs.environ['OMP_NUM_THREADS']
else:
self.inputs.environ['OMP_NUM_THREADS'] = str(
self.inputs.num_threads)
def _use_cuda(self):
self._cmd = 'eddy_cuda' if self.inputs.use_cuda else 'eddy_openmp'
def _run_interface(self, runtime):
# If 'eddy_openmp' is missing, use 'eddy'
FSLDIR = os.getenv('FSLDIR', '')
cmd = self._cmd
if all((FSLDIR != '', cmd == 'eddy_openmp',
not os.path.exists(os.path.join(FSLDIR, 'bin', cmd)))):
self._cmd = 'eddy'
runtime = super(Eddy, self)._run_interface(runtime)
# Restore command to avoid side-effects
self._cmd = cmd
return runtime
def _format_arg(self, name, spec, value):
if name == 'in_topup_fieldcoef':
return spec.argstr % value.split('_fieldcoef')[0]
if name == 'out_base':
return spec.argstr % os.path.abspath(value)
return super(Eddy, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_corrected'] = os.path.abspath(
'%s.nii.gz' % self.inputs.out_base)
outputs['out_parameter'] = os.path.abspath(
'%s.eddy_parameters' % self.inputs.out_base)
# File generation might depend on the version of EDDY
out_rotated_bvecs = os.path.abspath(
'%s.eddy_rotated_bvecs' % self.inputs.out_base)
out_movement_rms = os.path.abspath(
'%s.eddy_movement_rms' % self.inputs.out_base)
out_restricted_movement_rms = os.path.abspath(
'%s.eddy_restricted_movement_rms' % self.inputs.out_base)
out_shell_alignment_parameters = os.path.abspath(
'%s.eddy_post_eddy_shell_alignment_parameters' %
self.inputs.out_base)
out_outlier_report = os.path.abspath(
'%s.eddy_outlier_report' % self.inputs.out_base)
if os.path.exists(out_rotated_bvecs):
outputs['out_rotated_bvecs'] = out_rotated_bvecs
if os.path.exists(out_movement_rms):
outputs['out_movement_rms'] = out_movement_rms
if os.path.exists(out_restricted_movement_rms):
outputs['out_restricted_movement_rms'] = \
out_restricted_movement_rms
if os.path.exists(out_shell_alignment_parameters):
outputs['out_shell_alignment_parameters'] = \
out_shell_alignment_parameters
if os.path.exists(out_outlier_report):
outputs['out_outlier_report'] = out_outlier_report
return outputs
| super(Eddy, self).__init__(**inputs)
self.inputs.on_trait_change(self._num_threads_update, 'num_threads')
if not isdefined(self.inputs.num_threads):
self.inputs.num_threads = self._num_threads
else:
self._num_threads_update()
self.inputs.on_trait_change(self._use_cuda, 'use_cuda')
if isdefined(self.inputs.use_cuda):
self._use_cuda() | identifier_body |
dwi_corr_util.py | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 11 13:13:53 2018
@author: zhang
"""
'''
Warp Commands use during diffusion-weighted images preprocessing
================================================================
dwidenoise & mrdegibbs from MRTrix3.0; eddy-openmp from FSL
-------------------------------------------------------------------------
for unkonwn reason they are not included after loading relavant interface
'''
from nipype.interfaces.base import (CommandLine,
CommandLineInputSpec,
File,
TraitedSpec,
traits,
isdefined,
InputMultiPath)
import os
# warp the dwidenoise function from MRtrix
class DWIdenoiseInputSpec(CommandLineInputSpec):
in_file = InputMultiPath(
File(exists=True),
mandatory=True,
position=0,
argstr="%s",
desc="input DWI image")
noise = File(
argstr='-noise %s',
desc='noise map')
force = traits.Bool(
desc='force overwrite of output files',
position=-1,
argstr='-force')
out_file = File(name_template='%s_denoised',
name_source='in_file',
keep_extension=True,
argstr="%s",
position=1,
desc="the output denoised DWI image")
class DWIdenoiseOutputSpec(TraitedSpec):
out_file = File(desc = "the output denoised DWI image", exists = True)
class DWIdenoise(CommandLine):
"""Use MRTrix3 dwidenoise command to denoisie DWI data and estimate the
noise level based on the optimal threshold for PCA.
For more information, see
<https://mrtrix.readthedocs.io/en/latest/reference/commands/dwidenoise.html>
"""
_cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/dwidenoise'
input_spec = DWIdenoiseInputSpec
output_spec = DWIdenoiseOutputSpec
# warp the unring function from MRtrix
class MRdegibbsInputSpec(CommandLineInputSpec):
in_file = File(
desc="input DWI image",
exists=True,
mandatory=True,
position=0,
argstr="%s")
force = traits.Bool(
desc='force overwrite of output files',
position=-1,
argstr='-force')
out_file = File(name_template='%s_unringed',
name_source='in_file',
keep_extension=True,
argstr="%s",
position=1,
desc="the output unringed DWI image")
class MRdegibbsOutputSpec(TraitedSpec):
out_file = File(desc = "the output unringed DWI image", exists = True)
class MRdegibbs(CommandLine):
"""Use MRTrix3 degibbs command for removing the gibbs ringing artefact.
For more information, see
<https://mrtrix.readthedocs.io/en/latest/reference/commands/mrdegibbs.html>
"""
_cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/mrdegibbs'
input_spec = MRdegibbsInputSpec
output_spec = MRdegibbsOutputSpec
# Wrap FSL eddy (copy from nipype interface)
class EddyInputSpec(CommandLineInputSpec):
in_file = File(
exists=True,
mandatory=True,
argstr='--imain=%s',
desc=('File containing all the images to estimate '
'distortions for'))
in_mask = File(
exists=True,
mandatory=True,
argstr='--mask=%s',
desc='Mask to indicate brain')
in_index = File(
exists=True,
mandatory=True,
argstr='--index=%s',
desc=('File containing indices for all volumes in --imain '
'into --acqp and --topup'))
in_acqp = File(
exists=True,
mandatory=True,
argstr='--acqp=%s',
desc='File containing acquisition parameters')
in_bvec = File(
exists=True,
mandatory=True,
argstr='--bvecs=%s',
desc=('File containing the b-vectors for all volumes in '
'--imain'))
in_bval = File(
exists=True,
mandatory=True,
argstr='--bvals=%s',
desc=('File containing the b-values for all volumes in '
'--imain'))
out_base = traits.Str(
'eddy_corrected',
argstr='--out=%s',
usedefault=True,
desc=('basename for output (warped) image'))
session = File(
exists=True,
argstr='--session=%s',
desc=('File containing session indices for all volumes in '
'--imain'))
in_topup_fieldcoef = File(
exists=True,
argstr="--topup=%s",
requires=['in_topup_movpar'],
desc=('topup file containing the field '
'coefficients'))
in_topup_movpar = File(
exists=True,
requires=['in_topup_fieldcoef'],
desc='topup movpar.txt file')
flm = traits.Enum(
'linear',
'quadratic',
'cubic',
argstr='--flm=%s',
desc='First level EC model')
slm = traits.Enum(
'none',
'linear',
'quadratic',
argstr='--slm=%s',
desc='Second level EC model')
fep = traits.Bool(
False, argstr='--fep', desc='Fill empty planes in x- or y-directions')
interp = traits.Enum(
'spline',
'trilinear',
argstr='--interp=%s',
desc='Interpolation model for estimation step')
nvoxhp = traits.Int(
1000, usedefault=True,
argstr='--nvoxhp=%s',
desc=('# of voxels used to estimate the '
'hyperparameters'))
fudge_factor = traits.Float(
10.0, usedefault=True,
argstr='--ff=%s',
desc=('Fudge factor for hyperparameter '
'error variance'))
dont_sep_offs_move = traits.Bool(
False,
argstr='--dont_sep_offs_move',
desc=('Do NOT attempt to separate '
'field offset from subject '
'movement'))
dont_peas = traits.Bool(
False,
argstr='--dont_peas',
desc="Do NOT perform a post-eddy alignment of "
"shells")
fwhm = traits.Float(
desc=('FWHM for conditioning filter when estimating '
'the parameters'),
argstr='--fwhm=%s')
niter = traits.Int(5, usedefault=True,
argstr='--niter=%s', desc='Number of iterations')
method = traits.Enum(
'jac',
'lsr',
argstr='--resamp=%s',
desc=('Final resampling method (jacobian/least '
'squares)'))
repol = traits.Bool(
False, argstr='--repol', desc='Detect and replace outlier slices')
num_threads = traits.Int(
1,
usedefault=True,
nohash=True,
desc="Number of openmp threads to use")
is_shelled = traits.Bool(
False,
argstr='--data_is_shelled',
desc="Override internal check to ensure that "
"date are acquired on a set of b-value "
"shells")
field = traits.Str(
argstr='--field=%s',
desc="NonTOPUP fieldmap scaled in Hz - filename has "
"to be provided without an extension. TOPUP is "
"strongly recommended")
field_mat = File(
exists=True,
argstr='--field_mat=%s',
desc="Matrix that specifies the relative locations of "
"the field specified by --field and first volume "
"in file --imain")
use_cuda = traits.Bool(False, desc="Run eddy using cuda gpu")
class EddyOutputSpec(TraitedSpec):
out_corrected = File(
exists=True, desc='4D image file containing all the corrected volumes')
out_parameter = File(
exists=True,
desc=('text file with parameters definining the field and'
'movement for each scan'))
out_rotated_bvecs = File(
exists=True, desc='File containing rotated b-values for all volumes')
out_movement_rms = File(
exists=True, desc='Summary of the "total movement" in each volume')
out_restricted_movement_rms = File(
exists=True,
desc=('Summary of the "total movement" in each volume '
'disregarding translation in the PE direction'))
out_shell_alignment_parameters = File(
exists=True,
desc=('File containing rigid body movement parameters '
'between the different shells as estimated by a '
'post-hoc mutual information based registration'))
out_outlier_report = File(
exists=True,
desc=('Text-file with a plain language report on what '
'outlier slices eddy has found'))
class Eddy(CommandLine):
"""
Interface for FSL eddy, a tool for estimating and correcting eddy
currents induced distortions. `User guide
<http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Eddy/UsersGuide>`_ and
`more info regarding acqp file
<http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/eddy/Faq#How_do_I_know_what_to_put_into_my_--acqp_file>`_.
Examples
--------
>>> from nipype.interfaces.fsl import Eddy
>>> eddy = Eddy()
>>> eddy.inputs.in_file = 'epi.nii'
>>> eddy.inputs.in_mask = 'epi_mask.nii'
>>> eddy.inputs.in_index = 'epi_index.txt'
>>> eddy.inputs.in_acqp = 'epi_acqp.txt'
>>> eddy.inputs.in_bvec = 'bvecs.scheme'
>>> eddy.inputs.in_bval = 'bvals.scheme'
>>> eddy.inputs.use_cuda = True
>>> eddy.cmdline # doctest: +ELLIPSIS
'eddy_cuda --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme \
--bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt \
--mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'
>>> eddy.inputs.use_cuda = False
>>> eddy.cmdline # doctest: +ELLIPSIS
'eddy_openmp --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme \
--bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt \
--mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'
>>> res = eddy.run() # doctest: +SKIP
"""
_cmd = '/data/pt_life_dti/scripts/life2018/eddy_openmp-5.0.11'
input_spec = EddyInputSpec
output_spec = EddyOutputSpec
_num_threads = 1
def __init__(self, **inputs):
super(Eddy, self).__init__(**inputs)
self.inputs.on_trait_change(self._num_threads_update, 'num_threads')
if not isdefined(self.inputs.num_threads):
self.inputs.num_threads = self._num_threads
else:
self._num_threads_update()
self.inputs.on_trait_change(self._use_cuda, 'use_cuda')
if isdefined(self.inputs.use_cuda):
self._use_cuda()
def _num_threads_update(self):
self._num_threads = self.inputs.num_threads
if not isdefined(self.inputs.num_threads):
if 'OMP_NUM_THREADS' in self.inputs.environ:
del self.inputs.environ['OMP_NUM_THREADS']
else:
self.inputs.environ['OMP_NUM_THREADS'] = str(
self.inputs.num_threads)
def _use_cuda(self):
self._cmd = 'eddy_cuda' if self.inputs.use_cuda else 'eddy_openmp'
def _run_interface(self, runtime):
# If 'eddy_openmp' is missing, use 'eddy'
FSLDIR = os.getenv('FSLDIR', '')
cmd = self._cmd
if all((FSLDIR != '', cmd == 'eddy_openmp',
not os.path.exists(os.path.join(FSLDIR, 'bin', cmd)))):
self._cmd = 'eddy'
runtime = super(Eddy, self)._run_interface(runtime)
# Restore command to avoid side-effects
self._cmd = cmd
return runtime
def _format_arg(self, name, spec, value):
if name == 'in_topup_fieldcoef':
return spec.argstr % value.split('_fieldcoef')[0]
if name == 'out_base':
return spec.argstr % os.path.abspath(value)
return super(Eddy, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_corrected'] = os.path.abspath(
'%s.nii.gz' % self.inputs.out_base)
outputs['out_parameter'] = os.path.abspath(
'%s.eddy_parameters' % self.inputs.out_base)
# File generation might depend on the version of EDDY
out_rotated_bvecs = os.path.abspath(
'%s.eddy_rotated_bvecs' % self.inputs.out_base)
out_movement_rms = os.path.abspath(
'%s.eddy_movement_rms' % self.inputs.out_base)
out_restricted_movement_rms = os.path.abspath(
'%s.eddy_restricted_movement_rms' % self.inputs.out_base)
out_shell_alignment_parameters = os.path.abspath(
'%s.eddy_post_eddy_shell_alignment_parameters' %
self.inputs.out_base)
out_outlier_report = os.path.abspath(
'%s.eddy_outlier_report' % self.inputs.out_base)
if os.path.exists(out_rotated_bvecs):
outputs['out_rotated_bvecs'] = out_rotated_bvecs
if os.path.exists(out_movement_rms):
outputs['out_movement_rms'] = out_movement_rms
if os.path.exists(out_restricted_movement_rms):
outputs['out_restricted_movement_rms'] = \
out_restricted_movement_rms
if os.path.exists(out_shell_alignment_parameters):
|
if os.path.exists(out_outlier_report):
outputs['out_outlier_report'] = out_outlier_report
return outputs
| outputs['out_shell_alignment_parameters'] = \
out_shell_alignment_parameters | conditional_block |
dwi_corr_util.py | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 11 13:13:53 2018
@author: zhang
"""
'''
Warp Commands use during diffusion-weighted images preprocessing
================================================================
dwidenoise & mrdegibbs from MRTrix3.0; eddy-openmp from FSL
-------------------------------------------------------------------------
for unkonwn reason they are not included after loading relavant interface
'''
from nipype.interfaces.base import (CommandLine,
CommandLineInputSpec,
File,
TraitedSpec,
traits,
isdefined,
InputMultiPath)
import os
# warp the dwidenoise function from MRtrix
class DWIdenoiseInputSpec(CommandLineInputSpec):
in_file = InputMultiPath(
File(exists=True),
mandatory=True,
position=0,
argstr="%s",
desc="input DWI image")
noise = File(
argstr='-noise %s',
desc='noise map')
force = traits.Bool(
desc='force overwrite of output files',
position=-1,
argstr='-force')
out_file = File(name_template='%s_denoised',
name_source='in_file',
keep_extension=True,
argstr="%s",
position=1,
desc="the output denoised DWI image")
class DWIdenoiseOutputSpec(TraitedSpec):
out_file = File(desc = "the output denoised DWI image", exists = True)
class DWIdenoise(CommandLine):
"""Use MRTrix3 dwidenoise command to denoisie DWI data and estimate the
noise level based on the optimal threshold for PCA.
For more information, see
<https://mrtrix.readthedocs.io/en/latest/reference/commands/dwidenoise.html>
"""
_cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/dwidenoise'
input_spec = DWIdenoiseInputSpec
output_spec = DWIdenoiseOutputSpec
# warp the unring function from MRtrix
class MRdegibbsInputSpec(CommandLineInputSpec):
in_file = File(
desc="input DWI image",
exists=True,
mandatory=True,
position=0,
argstr="%s")
force = traits.Bool(
desc='force overwrite of output files',
position=-1,
argstr='-force')
out_file = File(name_template='%s_unringed',
name_source='in_file',
keep_extension=True,
argstr="%s",
position=1,
desc="the output unringed DWI image")
class MRdegibbsOutputSpec(TraitedSpec):
out_file = File(desc = "the output unringed DWI image", exists = True)
class MRdegibbs(CommandLine):
"""Use MRTrix3 degibbs command for removing the gibbs ringing artefact.
For more information, see
<https://mrtrix.readthedocs.io/en/latest/reference/commands/mrdegibbs.html>
"""
_cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/mrdegibbs'
input_spec = MRdegibbsInputSpec
output_spec = MRdegibbsOutputSpec
# Wrap FSL eddy (copy from nipype interface)
class EddyInputSpec(CommandLineInputSpec):
in_file = File(
exists=True,
mandatory=True,
argstr='--imain=%s',
desc=('File containing all the images to estimate '
'distortions for'))
in_mask = File(
exists=True,
mandatory=True,
argstr='--mask=%s',
desc='Mask to indicate brain')
in_index = File(
exists=True,
mandatory=True,
argstr='--index=%s',
desc=('File containing indices for all volumes in --imain '
'into --acqp and --topup'))
in_acqp = File(
exists=True,
mandatory=True,
argstr='--acqp=%s',
desc='File containing acquisition parameters')
in_bvec = File(
exists=True,
mandatory=True,
argstr='--bvecs=%s',
desc=('File containing the b-vectors for all volumes in '
'--imain'))
in_bval = File(
exists=True,
mandatory=True,
argstr='--bvals=%s',
desc=('File containing the b-values for all volumes in '
'--imain'))
out_base = traits.Str(
'eddy_corrected',
argstr='--out=%s',
usedefault=True,
desc=('basename for output (warped) image'))
session = File(
exists=True,
argstr='--session=%s',
desc=('File containing session indices for all volumes in '
'--imain'))
in_topup_fieldcoef = File(
exists=True,
argstr="--topup=%s",
requires=['in_topup_movpar'],
desc=('topup file containing the field '
'coefficients'))
in_topup_movpar = File(
exists=True,
requires=['in_topup_fieldcoef'],
desc='topup movpar.txt file')
flm = traits.Enum(
'linear',
'quadratic',
'cubic',
argstr='--flm=%s',
desc='First level EC model')
slm = traits.Enum(
'none',
'linear',
'quadratic',
argstr='--slm=%s',
desc='Second level EC model')
fep = traits.Bool(
False, argstr='--fep', desc='Fill empty planes in x- or y-directions')
interp = traits.Enum(
'spline',
'trilinear',
argstr='--interp=%s',
desc='Interpolation model for estimation step')
nvoxhp = traits.Int(
1000, usedefault=True,
argstr='--nvoxhp=%s',
desc=('# of voxels used to estimate the '
'hyperparameters'))
fudge_factor = traits.Float(
10.0, usedefault=True,
argstr='--ff=%s',
desc=('Fudge factor for hyperparameter '
'error variance'))
dont_sep_offs_move = traits.Bool(
False,
argstr='--dont_sep_offs_move',
desc=('Do NOT attempt to separate '
'field offset from subject '
'movement'))
dont_peas = traits.Bool(
False,
argstr='--dont_peas',
desc="Do NOT perform a post-eddy alignment of "
"shells")
fwhm = traits.Float(
desc=('FWHM for conditioning filter when estimating '
'the parameters'),
argstr='--fwhm=%s')
niter = traits.Int(5, usedefault=True,
argstr='--niter=%s', desc='Number of iterations')
method = traits.Enum(
'jac',
'lsr',
argstr='--resamp=%s',
desc=('Final resampling method (jacobian/least '
'squares)'))
repol = traits.Bool(
False, argstr='--repol', desc='Detect and replace outlier slices')
num_threads = traits.Int(
1,
usedefault=True,
nohash=True,
desc="Number of openmp threads to use")
is_shelled = traits.Bool(
False,
argstr='--data_is_shelled',
desc="Override internal check to ensure that "
"date are acquired on a set of b-value "
"shells")
field = traits.Str(
argstr='--field=%s',
desc="NonTOPUP fieldmap scaled in Hz - filename has "
"to be provided without an extension. TOPUP is "
"strongly recommended")
field_mat = File(
exists=True,
argstr='--field_mat=%s',
desc="Matrix that specifies the relative locations of "
"the field specified by --field and first volume "
"in file --imain")
use_cuda = traits.Bool(False, desc="Run eddy using cuda gpu")
class EddyOutputSpec(TraitedSpec):
out_corrected = File(
exists=True, desc='4D image file containing all the corrected volumes')
out_parameter = File(
exists=True,
desc=('text file with parameters definining the field and'
'movement for each scan'))
out_rotated_bvecs = File(
exists=True, desc='File containing rotated b-values for all volumes')
out_movement_rms = File(
exists=True, desc='Summary of the "total movement" in each volume')
out_restricted_movement_rms = File(
exists=True,
desc=('Summary of the "total movement" in each volume '
'disregarding translation in the PE direction'))
out_shell_alignment_parameters = File(
exists=True,
desc=('File containing rigid body movement parameters '
'between the different shells as estimated by a '
'post-hoc mutual information based registration'))
out_outlier_report = File(
exists=True,
desc=('Text-file with a plain language report on what '
'outlier slices eddy has found'))
class Eddy(CommandLine):
"""
Interface for FSL eddy, a tool for estimating and correcting eddy
currents induced distortions. `User guide
<http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Eddy/UsersGuide>`_ and
`more info regarding acqp file
<http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/eddy/Faq#How_do_I_know_what_to_put_into_my_--acqp_file>`_.
Examples
--------
>>> from nipype.interfaces.fsl import Eddy
>>> eddy = Eddy()
>>> eddy.inputs.in_file = 'epi.nii'
>>> eddy.inputs.in_mask = 'epi_mask.nii'
>>> eddy.inputs.in_index = 'epi_index.txt'
>>> eddy.inputs.in_acqp = 'epi_acqp.txt'
>>> eddy.inputs.in_bvec = 'bvecs.scheme'
>>> eddy.inputs.in_bval = 'bvals.scheme'
>>> eddy.inputs.use_cuda = True
>>> eddy.cmdline # doctest: +ELLIPSIS
'eddy_cuda --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme \
--bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt \
--mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'
>>> eddy.inputs.use_cuda = False
>>> eddy.cmdline # doctest: +ELLIPSIS
'eddy_openmp --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme \
--bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt \
--mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'
>>> res = eddy.run() # doctest: +SKIP
"""
_cmd = '/data/pt_life_dti/scripts/life2018/eddy_openmp-5.0.11'
input_spec = EddyInputSpec
output_spec = EddyOutputSpec
_num_threads = 1
def __init__(self, **inputs):
super(Eddy, self).__init__(**inputs)
self.inputs.on_trait_change(self._num_threads_update, 'num_threads')
if not isdefined(self.inputs.num_threads):
self.inputs.num_threads = self._num_threads
else:
self._num_threads_update()
self.inputs.on_trait_change(self._use_cuda, 'use_cuda')
if isdefined(self.inputs.use_cuda):
self._use_cuda()
def _num_threads_update(self): | if 'OMP_NUM_THREADS' in self.inputs.environ:
del self.inputs.environ['OMP_NUM_THREADS']
else:
self.inputs.environ['OMP_NUM_THREADS'] = str(
self.inputs.num_threads)
def _use_cuda(self):
self._cmd = 'eddy_cuda' if self.inputs.use_cuda else 'eddy_openmp'
def _run_interface(self, runtime):
# If 'eddy_openmp' is missing, use 'eddy'
FSLDIR = os.getenv('FSLDIR', '')
cmd = self._cmd
if all((FSLDIR != '', cmd == 'eddy_openmp',
not os.path.exists(os.path.join(FSLDIR, 'bin', cmd)))):
self._cmd = 'eddy'
runtime = super(Eddy, self)._run_interface(runtime)
# Restore command to avoid side-effects
self._cmd = cmd
return runtime
def _format_arg(self, name, spec, value):
if name == 'in_topup_fieldcoef':
return spec.argstr % value.split('_fieldcoef')[0]
if name == 'out_base':
return spec.argstr % os.path.abspath(value)
return super(Eddy, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_corrected'] = os.path.abspath(
'%s.nii.gz' % self.inputs.out_base)
outputs['out_parameter'] = os.path.abspath(
'%s.eddy_parameters' % self.inputs.out_base)
# File generation might depend on the version of EDDY
out_rotated_bvecs = os.path.abspath(
'%s.eddy_rotated_bvecs' % self.inputs.out_base)
out_movement_rms = os.path.abspath(
'%s.eddy_movement_rms' % self.inputs.out_base)
out_restricted_movement_rms = os.path.abspath(
'%s.eddy_restricted_movement_rms' % self.inputs.out_base)
out_shell_alignment_parameters = os.path.abspath(
'%s.eddy_post_eddy_shell_alignment_parameters' %
self.inputs.out_base)
out_outlier_report = os.path.abspath(
'%s.eddy_outlier_report' % self.inputs.out_base)
if os.path.exists(out_rotated_bvecs):
outputs['out_rotated_bvecs'] = out_rotated_bvecs
if os.path.exists(out_movement_rms):
outputs['out_movement_rms'] = out_movement_rms
if os.path.exists(out_restricted_movement_rms):
outputs['out_restricted_movement_rms'] = \
out_restricted_movement_rms
if os.path.exists(out_shell_alignment_parameters):
outputs['out_shell_alignment_parameters'] = \
out_shell_alignment_parameters
if os.path.exists(out_outlier_report):
outputs['out_outlier_report'] = out_outlier_report
return outputs | self._num_threads = self.inputs.num_threads
if not isdefined(self.inputs.num_threads): | random_line_split |
dwi_corr_util.py | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 11 13:13:53 2018
@author: zhang
"""
'''
Warp Commands use during diffusion-weighted images preprocessing
================================================================
dwidenoise & mrdegibbs from MRTrix3.0; eddy-openmp from FSL
-------------------------------------------------------------------------
for unkonwn reason they are not included after loading relavant interface
'''
from nipype.interfaces.base import (CommandLine,
CommandLineInputSpec,
File,
TraitedSpec,
traits,
isdefined,
InputMultiPath)
import os
# warp the dwidenoise function from MRtrix
class DWIdenoiseInputSpec(CommandLineInputSpec):
in_file = InputMultiPath(
File(exists=True),
mandatory=True,
position=0,
argstr="%s",
desc="input DWI image")
noise = File(
argstr='-noise %s',
desc='noise map')
force = traits.Bool(
desc='force overwrite of output files',
position=-1,
argstr='-force')
out_file = File(name_template='%s_denoised',
name_source='in_file',
keep_extension=True,
argstr="%s",
position=1,
desc="the output denoised DWI image")
class DWIdenoiseOutputSpec(TraitedSpec):
out_file = File(desc = "the output denoised DWI image", exists = True)
class DWIdenoise(CommandLine):
"""Use MRTrix3 dwidenoise command to denoisie DWI data and estimate the
noise level based on the optimal threshold for PCA.
For more information, see
<https://mrtrix.readthedocs.io/en/latest/reference/commands/dwidenoise.html>
"""
_cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/dwidenoise'
input_spec = DWIdenoiseInputSpec
output_spec = DWIdenoiseOutputSpec
# warp the unring function from MRtrix
class MRdegibbsInputSpec(CommandLineInputSpec):
in_file = File(
desc="input DWI image",
exists=True,
mandatory=True,
position=0,
argstr="%s")
force = traits.Bool(
desc='force overwrite of output files',
position=-1,
argstr='-force')
out_file = File(name_template='%s_unringed',
name_source='in_file',
keep_extension=True,
argstr="%s",
position=1,
desc="the output unringed DWI image")
class MRdegibbsOutputSpec(TraitedSpec):
out_file = File(desc = "the output unringed DWI image", exists = True)
class MRdegibbs(CommandLine):
"""Use MRTrix3 degibbs command for removing the gibbs ringing artefact.
For more information, see
<https://mrtrix.readthedocs.io/en/latest/reference/commands/mrdegibbs.html>
"""
_cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/mrdegibbs'
input_spec = MRdegibbsInputSpec
output_spec = MRdegibbsOutputSpec
# Wrap FSL eddy (copy from nipype interface)
class EddyInputSpec(CommandLineInputSpec):
in_file = File(
exists=True,
mandatory=True,
argstr='--imain=%s',
desc=('File containing all the images to estimate '
'distortions for'))
in_mask = File(
exists=True,
mandatory=True,
argstr='--mask=%s',
desc='Mask to indicate brain')
in_index = File(
exists=True,
mandatory=True,
argstr='--index=%s',
desc=('File containing indices for all volumes in --imain '
'into --acqp and --topup'))
in_acqp = File(
exists=True,
mandatory=True,
argstr='--acqp=%s',
desc='File containing acquisition parameters')
in_bvec = File(
exists=True,
mandatory=True,
argstr='--bvecs=%s',
desc=('File containing the b-vectors for all volumes in '
'--imain'))
in_bval = File(
exists=True,
mandatory=True,
argstr='--bvals=%s',
desc=('File containing the b-values for all volumes in '
'--imain'))
out_base = traits.Str(
'eddy_corrected',
argstr='--out=%s',
usedefault=True,
desc=('basename for output (warped) image'))
session = File(
exists=True,
argstr='--session=%s',
desc=('File containing session indices for all volumes in '
'--imain'))
in_topup_fieldcoef = File(
exists=True,
argstr="--topup=%s",
requires=['in_topup_movpar'],
desc=('topup file containing the field '
'coefficients'))
in_topup_movpar = File(
exists=True,
requires=['in_topup_fieldcoef'],
desc='topup movpar.txt file')
flm = traits.Enum(
'linear',
'quadratic',
'cubic',
argstr='--flm=%s',
desc='First level EC model')
slm = traits.Enum(
'none',
'linear',
'quadratic',
argstr='--slm=%s',
desc='Second level EC model')
fep = traits.Bool(
False, argstr='--fep', desc='Fill empty planes in x- or y-directions')
interp = traits.Enum(
'spline',
'trilinear',
argstr='--interp=%s',
desc='Interpolation model for estimation step')
nvoxhp = traits.Int(
1000, usedefault=True,
argstr='--nvoxhp=%s',
desc=('# of voxels used to estimate the '
'hyperparameters'))
fudge_factor = traits.Float(
10.0, usedefault=True,
argstr='--ff=%s',
desc=('Fudge factor for hyperparameter '
'error variance'))
dont_sep_offs_move = traits.Bool(
False,
argstr='--dont_sep_offs_move',
desc=('Do NOT attempt to separate '
'field offset from subject '
'movement'))
dont_peas = traits.Bool(
False,
argstr='--dont_peas',
desc="Do NOT perform a post-eddy alignment of "
"shells")
fwhm = traits.Float(
desc=('FWHM for conditioning filter when estimating '
'the parameters'),
argstr='--fwhm=%s')
niter = traits.Int(5, usedefault=True,
argstr='--niter=%s', desc='Number of iterations')
method = traits.Enum(
'jac',
'lsr',
argstr='--resamp=%s',
desc=('Final resampling method (jacobian/least '
'squares)'))
repol = traits.Bool(
False, argstr='--repol', desc='Detect and replace outlier slices')
num_threads = traits.Int(
1,
usedefault=True,
nohash=True,
desc="Number of openmp threads to use")
is_shelled = traits.Bool(
False,
argstr='--data_is_shelled',
desc="Override internal check to ensure that "
"date are acquired on a set of b-value "
"shells")
field = traits.Str(
argstr='--field=%s',
desc="NonTOPUP fieldmap scaled in Hz - filename has "
"to be provided without an extension. TOPUP is "
"strongly recommended")
field_mat = File(
exists=True,
argstr='--field_mat=%s',
desc="Matrix that specifies the relative locations of "
"the field specified by --field and first volume "
"in file --imain")
use_cuda = traits.Bool(False, desc="Run eddy using cuda gpu")
class EddyOutputSpec(TraitedSpec):
out_corrected = File(
exists=True, desc='4D image file containing all the corrected volumes')
out_parameter = File(
exists=True,
desc=('text file with parameters definining the field and'
'movement for each scan'))
out_rotated_bvecs = File(
exists=True, desc='File containing rotated b-values for all volumes')
out_movement_rms = File(
exists=True, desc='Summary of the "total movement" in each volume')
out_restricted_movement_rms = File(
exists=True,
desc=('Summary of the "total movement" in each volume '
'disregarding translation in the PE direction'))
out_shell_alignment_parameters = File(
exists=True,
desc=('File containing rigid body movement parameters '
'between the different shells as estimated by a '
'post-hoc mutual information based registration'))
out_outlier_report = File(
exists=True,
desc=('Text-file with a plain language report on what '
'outlier slices eddy has found'))
class Eddy(CommandLine):
"""
Interface for FSL eddy, a tool for estimating and correcting eddy
currents induced distortions. `User guide
<http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Eddy/UsersGuide>`_ and
`more info regarding acqp file
<http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/eddy/Faq#How_do_I_know_what_to_put_into_my_--acqp_file>`_.
Examples
--------
>>> from nipype.interfaces.fsl import Eddy
>>> eddy = Eddy()
>>> eddy.inputs.in_file = 'epi.nii'
>>> eddy.inputs.in_mask = 'epi_mask.nii'
>>> eddy.inputs.in_index = 'epi_index.txt'
>>> eddy.inputs.in_acqp = 'epi_acqp.txt'
>>> eddy.inputs.in_bvec = 'bvecs.scheme'
>>> eddy.inputs.in_bval = 'bvals.scheme'
>>> eddy.inputs.use_cuda = True
>>> eddy.cmdline # doctest: +ELLIPSIS
'eddy_cuda --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme \
--bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt \
--mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'
>>> eddy.inputs.use_cuda = False
>>> eddy.cmdline # doctest: +ELLIPSIS
'eddy_openmp --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme \
--bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt \
--mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'
>>> res = eddy.run() # doctest: +SKIP
"""
_cmd = '/data/pt_life_dti/scripts/life2018/eddy_openmp-5.0.11'
input_spec = EddyInputSpec
output_spec = EddyOutputSpec
_num_threads = 1
def __init__(self, **inputs):
super(Eddy, self).__init__(**inputs)
self.inputs.on_trait_change(self._num_threads_update, 'num_threads')
if not isdefined(self.inputs.num_threads):
self.inputs.num_threads = self._num_threads
else:
self._num_threads_update()
self.inputs.on_trait_change(self._use_cuda, 'use_cuda')
if isdefined(self.inputs.use_cuda):
self._use_cuda()
def | (self):
self._num_threads = self.inputs.num_threads
if not isdefined(self.inputs.num_threads):
if 'OMP_NUM_THREADS' in self.inputs.environ:
del self.inputs.environ['OMP_NUM_THREADS']
else:
self.inputs.environ['OMP_NUM_THREADS'] = str(
self.inputs.num_threads)
def _use_cuda(self):
self._cmd = 'eddy_cuda' if self.inputs.use_cuda else 'eddy_openmp'
def _run_interface(self, runtime):
# If 'eddy_openmp' is missing, use 'eddy'
FSLDIR = os.getenv('FSLDIR', '')
cmd = self._cmd
if all((FSLDIR != '', cmd == 'eddy_openmp',
not os.path.exists(os.path.join(FSLDIR, 'bin', cmd)))):
self._cmd = 'eddy'
runtime = super(Eddy, self)._run_interface(runtime)
# Restore command to avoid side-effects
self._cmd = cmd
return runtime
def _format_arg(self, name, spec, value):
if name == 'in_topup_fieldcoef':
return spec.argstr % value.split('_fieldcoef')[0]
if name == 'out_base':
return spec.argstr % os.path.abspath(value)
return super(Eddy, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_corrected'] = os.path.abspath(
'%s.nii.gz' % self.inputs.out_base)
outputs['out_parameter'] = os.path.abspath(
'%s.eddy_parameters' % self.inputs.out_base)
# File generation might depend on the version of EDDY
out_rotated_bvecs = os.path.abspath(
'%s.eddy_rotated_bvecs' % self.inputs.out_base)
out_movement_rms = os.path.abspath(
'%s.eddy_movement_rms' % self.inputs.out_base)
out_restricted_movement_rms = os.path.abspath(
'%s.eddy_restricted_movement_rms' % self.inputs.out_base)
out_shell_alignment_parameters = os.path.abspath(
'%s.eddy_post_eddy_shell_alignment_parameters' %
self.inputs.out_base)
out_outlier_report = os.path.abspath(
'%s.eddy_outlier_report' % self.inputs.out_base)
if os.path.exists(out_rotated_bvecs):
outputs['out_rotated_bvecs'] = out_rotated_bvecs
if os.path.exists(out_movement_rms):
outputs['out_movement_rms'] = out_movement_rms
if os.path.exists(out_restricted_movement_rms):
outputs['out_restricted_movement_rms'] = \
out_restricted_movement_rms
if os.path.exists(out_shell_alignment_parameters):
outputs['out_shell_alignment_parameters'] = \
out_shell_alignment_parameters
if os.path.exists(out_outlier_report):
outputs['out_outlier_report'] = out_outlier_report
return outputs
| _num_threads_update | identifier_name |
lib.rs | //! [](https://github.com/time-rs/time)
//! 
//! [](https://www.whatrustisit.com)
//!
//! # Feature flags
//!
//! This crate exposes a number of features. These can be enabled or disabled as
//! shown [in Cargo's documentation](https://doc.rust-lang.org/cargo/reference/features.html).
//! Features are _disabled_ by default unless otherwise noted.
//!
//! Reliance on a given feature is always indicated alongside the item
//! definition.
//!
//! - `std` (_enabled by default, implicitly enables `alloc`_)
//!
//! This enables a number of features that depend on the standard library.
//! [`Instant`] is the primary item that requires this feature, though some
//! others methods may rely on [`Instant`] internally.
//!
//! - `alloc` (_enabled by default via `std`_)
//!
//! Enables a number of features that require the ability to dynamically
//! allocate memory.
//!
//! - `macros`
//!
//! Enables macros that provide compile-time verification of values and
//! intuitive syntax.
//!
//! - `local-offset` (_implicitly enables `std`_)
//!
//! This feature enables a number of methods that allow obtaining the system's
//! UTC offset.
//!
//! - `large-dates`
//!
//! By default, only years within the ±9999 range (inclusive) are supported.
//! If you need support for years outside this range, consider enabling this
//! feature; the supported range will be increased to ±999,999.
//!
//! Note that enabling this feature has some costs, as it means forgoing some
//! optimizations. Ambiguities may be introduced when parsing that would not
//! otherwise exist.
//!
//! - `serde`
//!
//! Enables [serde](https://docs.rs/serde) support for all types.
//!
//! - `rand`
//!
//! Enables [rand](https://docs.rs/rand) support for all types.
//!
//! - `quickcheck` (_implicitly enables `rand`_)
//!
//! Enables [quickcheck](https://docs.rs/quickcheck) support for all types except [`Instant`].
#![cfg_attr(__time_03_docs, feature(doc_cfg))]
#![cfg_attr(__time_03_docs, deny(broken_intra_doc_links))]
#![cfg_attr(not(feature = "std"), no_std)]
#![deny(
anonymous_parameters,
clippy::all,
const_err,
illegal_floating_point_literal_pattern,
late_bound_lifetime_arguments,
path_statements,
patterns_in_fns_without_body,
rust_2018_idioms,
trivial_casts,
trivial_numeric_casts,
unreachable_pub,
unsafe_code,
unused_extern_crates
)]
#![warn(
clippy::dbg_macro,
clippy::decimal_literal_representation,
clippy::get_unwrap,
clippy::missing_docs_in_private_items,
clippy::nursery,
clippy::pedantic,
clippy::print_stdout,
clippy::todo,
clippy::unimplemented,
clippy::unwrap_used,
clippy::use_debug,
missing_copy_implementations,
missing_debug_implementations,
unused_qualifications,
variant_size_differences
)]
#![allow(
clippy::cast_lossless,
clippy::cast_possible_truncation,
clippy::cast_possible_wrap,
clippy::cast_precision_loss,
clippy::cast_sign_loss,
clippy::enum_glob_use,
clippy::map_err_ignore,
clippy::missing_errors_doc,
clippy::must_use_candidate,
clippy::redundant_pub_crate,
clippy::wildcard_imports
)]
#![doc(html_favicon_url = "https://avatars0.githubusercontent.com/u/55999857")]
#![doc(html_logo_url = "https://avatars0.githubusercontent.com/u/55999857")]
#![doc(test(attr(deny(warnings))))]
#[cfg(feature = "alloc")]
extern crate alloc;
/// Returns `Err(error::ComponentRange)` if the value is not in range.
macro_rules! ensure_value_in_range {
($value:ident in $start:expr => $end:expr) => {{
#![allow(trivial_numeric_casts, unused_comparisons)]
if $value < $start || $value > $end {
return Err(crate::error::ComponentRange {
name: stringify!($value),
minimum: $start as _,
maximum: $end as _,
value: $value as _,
conditional_range: false,
});
}
}};
($value:ident conditionally in $start:expr => $end:expr) => {{
#![allow(trivial_numeric_casts, unused_comparisons)]
if $value < $start || $value > $end {
return Err(crate::error::ComponentRange {
name: stringify!($value),
minimum: $start as _,
maximum: $end as _,
value: $value as _,
conditional_range: true,
});
}
}};
}
/// Try to unwrap an expression, returning if not possible.
///
/// This is similar to the `?` operator, but does not perform `.into()`. Because
/// of this, it is usable in `const` contexts. | Err(error) => return Err(error),
}
};
}
/// Try to unwrap an expression, returning if not possible.
///
/// This is similar to the `?` operator, but is usable in `const` contexts.
macro_rules! const_try_opt {
($e:expr) => {
match $e {
Some(value) => value,
None => return None,
}
};
}
/// The [`Date`] struct and its associated `impl`s.
mod date;
/// The [`Duration`] struct and its associated `impl`s.
mod duration;
/// Various error types returned by methods in the time crate.
pub mod error;
/// Extension traits.
pub mod ext;
pub mod format_description;
mod formatting;
mod hack;
/// The [`Instant`] struct and its associated `impl`s.
#[cfg(feature = "std")]
#[cfg_attr(__time_03_docs, doc(cfg(feature = "std")))]
mod instant;
/// The [`OffsetDateTime`] struct and its associated `impl`s.
mod offset_date_time;
/// The [`PrimitiveDateTime`] struct and its associated `impl`s.
mod primitive_date_time;
#[cfg(feature = "quickcheck")]
#[cfg_attr(__time_03_docs, doc(cfg(feature = "quickcheck")))]
mod quickcheck;
#[cfg(feature = "rand")]
#[cfg_attr(__time_03_docs, doc(cfg(feature = "rand")))]
mod rand;
#[cfg(feature = "serde")]
#[cfg_attr(__time_03_docs, doc(cfg(feature = "serde")))]
#[allow(missing_copy_implementations, missing_debug_implementations)]
pub mod serde;
/// The [`Time`] struct and its associated `impl`s.
mod time;
/// The [`UtcOffset`] struct and its associated `impl`s.
mod utc_offset;
pub mod util;
/// Days of the week.
mod weekday;
/// Macros to construct statically known values.
#[cfg(feature = "macros")]
#[cfg_attr(__time_03_docs, doc(cfg(feature = "macros")))]
pub mod macros {
/// Construct a [`Date`](crate::Date) with a statically known value.
///
/// The resulting expression can be used in `const` or `static` declarations.
///
/// Three formats are supported: year-week-weekday, year-ordinal, and
/// year-month-day.
///
/// ```rust
/// # use time::{Date, Weekday::*};
/// # use time_macros::date;
/// assert_eq!(
/// date!("2020-W01-3"),
/// Date::from_iso_week_date(2020, 1, Wednesday)?
/// );
/// assert_eq!(date!("2020-001"), Date::from_ordinal_date(2020, 1)?);
/// assert_eq!(date!("2020-01-01"), Date::from_calendar_date(2020, 1, 1)?);
/// # Ok::<_, time::Error>(())
/// ```
pub use time_macros::date;
/// Construct a [`PrimitiveDateTime`] or [`OffsetDateTime`] with a
/// statically known value.
///
/// The resulting expression can be used in `const` or `static` declarations.
///
/// The syntax accepted by this macro is the same as [`date!`] and
/// [`time!`], with an optional [`offset!`], all space-separated. If an
/// [`offset!`] is provided, the resulting value will be an
/// [`OffsetDateTime`]; otherwise it will be a [`PrimitiveDateTime`].
///
/// [`date!`]: crate::macros::date
/// [`time!`]: crate::macros::time
/// [`offset!`]: crate::macros::offset
/// [`OffsetDateTime`]: crate::OffsetDateTime
/// [`PrimitiveDateTime`]: crate::PrimitiveDateTime
pub use time_macros::datetime;
/// Construct a [`UtcOffset`](crate::UtcOffset) with a statically known value.
///
/// The resulting expression can be used in `const` or `static` declarations.
///
/// A sign and the hour must be provided; minutes and seconds default to zero.
/// `UTC` (both uppercase and lowercase) is also allowed.
///
/// ```rust
/// # use time::UtcOffset;
/// # use time_macros::offset;
/// assert_eq!(offset!("UTC"), UtcOffset::from_hms(0, 0, 0)?);
/// assert_eq!(offset!("utc"), UtcOffset::from_hms(0, 0, 0)?);
/// assert_eq!(offset!("+0"), UtcOffset::from_hms(0, 0, 0)?);
/// assert_eq!(offset!("+1"), UtcOffset::from_hms(1, 0, 0)?);
/// assert_eq!(offset!("-1"), UtcOffset::from_hms(-1, 0, 0)?);
/// assert_eq!(offset!("+1:30"), UtcOffset::from_hms(1, 30, 0)?);
/// assert_eq!(offset!("-1:30"), UtcOffset::from_hms(-1, -30, 0)?);
/// assert_eq!(offset!("+1:30:59"), UtcOffset::from_hms(1, 30, 59)?);
/// assert_eq!(offset!("-1:30:59"), UtcOffset::from_hms(-1, -30, -59)?);
/// assert_eq!(offset!("+23:59:59"), UtcOffset::from_hms(23, 59, 59)?);
/// assert_eq!(offset!("-23:59:59"), UtcOffset::from_hms(-23, -59, -59)?);
/// # Ok::<_, time::Error>(())
/// ```
pub use time_macros::offset;
/// Construct a [`Time`](crate::Time) with a statically known value.
///
/// The resulting expression can be used in `const` or `static` declarations.
///
/// Hours and minutes must be provided, while seconds defaults to zero. AM/PM is
/// allowed (either uppercase or lowercase). Any number of subsecond digits may
/// be provided (though any past nine will be discarded).
///
/// All components are validated at compile-time. An error will be raised if any
/// value is invalid.
///
/// ```rust
/// # use time::Time;
/// # use time_macros::time;
/// assert_eq!(time!("0:00"), Time::from_hms(0, 0, 0)?);
/// assert_eq!(time!("1:02:03"), Time::from_hms(1, 2, 3)?);
/// assert_eq!(
/// time!("1:02:03.004_005_006"),
/// Time::from_hms_nano(1, 2, 3, 4_005_006)?
/// );
/// assert_eq!(time!("12:00 am"), Time::from_hms(0, 0, 0)?);
/// assert_eq!(time!("1:02:03 am"), Time::from_hms(1, 2, 3)?);
/// assert_eq!(
/// time!("1:02:03.004_005_006 am"),
/// Time::from_hms_nano(1, 2, 3, 4_005_006)?
/// );
/// assert_eq!(time!("12:00 pm"), Time::from_hms(12, 0, 0)?);
/// assert_eq!(time!("1:02:03 pm"), Time::from_hms(13, 2, 3)?);
/// assert_eq!(
/// time!("1:02:03.004_005_006 pm"),
/// Time::from_hms_nano(13, 2, 3, 4_005_006)?
/// );
/// # Ok::<_, time::Error>(())
/// ```
pub use time_macros::time;
}
pub use crate::time::Time;
pub use date::Date;
pub use duration::Duration;
pub use error::Error;
#[cfg(feature = "std")]
pub use instant::Instant;
pub use offset_date_time::OffsetDateTime;
pub use primitive_date_time::PrimitiveDateTime;
pub use utc_offset::UtcOffset;
pub use weekday::Weekday;
/// An alias for [`std::result::Result`] with a generic error from the time
/// crate.
pub type Result<T> = core::result::Result<T, Error>; | macro_rules! const_try {
($e:expr) => {
match $e {
Ok(value) => value, | random_line_split |
durconn.go | package stanmsg
import (
"context"
"fmt"
"sync"
"time"
"github.com/huangjunwen/golibs/logr"
"github.com/huangjunwen/golibs/taskrunner"
"github.com/huangjunwen/golibs/taskrunner/limitedrunner"
"github.com/nats-io/nats.go"
"github.com/nats-io/stan.go"
"github.com/rs/xid"
"google.golang.org/protobuf/proto"
npenc "github.com/huangjunwen/nproto/v2/enc"
npmd "github.com/huangjunwen/nproto/v2/md"
. "github.com/huangjunwen/nproto/v2/msg"
nppbmd "github.com/huangjunwen/nproto/v2/pb/md"
nppbmsg "github.com/huangjunwen/nproto/v2/pb/msg"
)
// DurConn is 'durable connection' to nats-streaming-server which handles reconnect and resubscription automatically.
type DurConn struct {
// Immutable fields.
nc *nats.Conn
clusterID string
ctx context.Context
subjectPrefix string
runner taskrunner.TaskRunner // runner for handlers.
logger logr.Logger
reconnectWait time.Duration
subRetryWait time.Duration
stanOptPingInterval int
stanOptPingMaxOut int
stanOptPubAckWait time.Duration
connectCb func(stan.Conn)
disconnectCb func(stan.Conn)
subscribeCb func(sc stan.Conn, spec MsgSpec)
wg sync.WaitGroup // wait for go routines
connectMu sync.Mutex // at most on connect can be run at any time
mu sync.RWMutex // to protect mutable fields
// Mutable fields.
closed bool
subs map[[2]string]*subscription // (subject, queue) -> subscriptioin
sc stan.Conn // nil if DurConn has not connected or is reconnecting
scStaleCh chan struct{} // pair with sc, it will closed when sc is stale
}
type subscription struct {
spec MsgSpec
queue string
handler MsgHandler
stanOptions []stan.SubscriptionOption
decoder npenc.Decoder
}
// DurConnOption is option in creating DurConn.
type DurConnOption func(*DurConn) error
// SubOption is option in subscription.
type SubOption func(*subscription) error
// NewDurConn creates a new DurConn. `nc` must have MaxReconnect < 0
// (e.g. never give up trying to reconnect).
// `clusterID` is the nats-streaming-server's cluster id.
func NewDurConn(nc *nats.Conn, clusterID string, opts ...DurConnOption) (durConn *DurConn, err error) {
if nc.Opts.MaxReconnect >= 0 {
return nil, ErrNCMaxReconnect
}
dc := &DurConn{
nc: nc,
clusterID: clusterID,
ctx: context.Background(),
subjectPrefix: DefaultSubjectPrefix,
runner: limitedrunner.Must(),
logger: logr.Nop,
reconnectWait: DefaultReconnectWait,
subRetryWait: DefaultSubRetryWait,
stanOptPingInterval: DefaultStanPingInterval,
stanOptPingMaxOut: DefaultStanPingMaxOut,
stanOptPubAckWait: DefaultStanPubAckWait,
connectCb: func(_ stan.Conn) {},
disconnectCb: func(_ stan.Conn) {},
subscribeCb: func(_ stan.Conn, _ MsgSpec) {},
subs: make(map[[2]string]*subscription),
}
defer func() {
if err != nil {
dc.runner.Close()
}
}()
for _, opt := range opts {
if err = opt(dc); err != nil {
return nil, err
}
}
dc.goConnect(false)
return dc, nil
}
// NewPublisher creates a publisher using specified encoder.
func (dc *DurConn) NewPublisher(encoder npenc.Encoder) MsgAsyncPublisherFunc {
return func(ctx context.Context, spec MsgSpec, msg interface{}, cb func(error)) error {
return dc.publishAsync(ctx, spec, msg, encoder, cb)
}
}
// NewSubscriber creates a subscriber using specified decoder.
func (dc *DurConn) NewSubscriber(decoder npenc.Decoder) MsgSubscriberFunc {
return func(spec MsgSpec, queue string, handler MsgHandler, opts ...interface{}) error {
sub := &subscription{
spec: spec,
queue: queue,
handler: handler,
stanOptions: []stan.SubscriptionOption{},
decoder: decoder,
}
for _, opt := range opts {
option, ok := opt.(SubOption)
if !ok {
return fmt.Errorf("Expect SubOption but got %v", opt)
}
if err := option(sub); err != nil {
return err
}
}
return dc.subscribeOne(sub)
}
}
func (dc *DurConn) goConnect(wait bool) {
dc.wg.Add(1)
go func() {
defer dc.wg.Done()
dc.connect(wait)
}()
}
func (dc *DurConn) connect(wait bool) {
if wait |
dc.connectMu.Lock()
defer dc.connectMu.Unlock()
// Reset connection: release old connection
{
dc.mu.Lock()
if dc.closed {
dc.mu.Unlock()
dc.logger.Info("closed when reseting connection")
return
}
sc := dc.sc
scStaleCh := dc.scStaleCh
dc.sc = nil
dc.scStaleCh = nil
dc.mu.Unlock()
if sc != nil {
sc.Close()
close(scStaleCh)
}
}
// Connect
var sc stan.Conn
{
opts := []stan.Option{
stan.Pings(dc.stanOptPingInterval, dc.stanOptPingMaxOut),
stan.PubAckWait(dc.stanOptPubAckWait),
stan.NatsConn(dc.nc),
// NOTE: ConnectionLostHandler is used to be notified if the Streaming connection
// is closed due to unexpected errors.
// The callback will not be invoked on normal Conn.Close().
stan.SetConnectionLostHandler(func(sc stan.Conn, err error) {
dc.logger.Error(err, "connection lost")
dc.disconnectCb(sc)
// reconnect after a while
dc.goConnect(true)
}),
}
// NOTE: Use a UUID-like id as client id sine we only use durable queue subscription.
// See: https://groups.google.com/d/msg/natsio/SkWAdSU1AgU/tCX9f3ONBQAJ
var err error
sc, err = stan.Connect(dc.clusterID, xid.New().String(), opts...)
if err != nil {
// reconnect immediately.
dc.logger.Error(err, "connect failed")
dc.goConnect(true)
return
}
dc.logger.Info("connect successfully")
dc.connectCb(sc)
}
// Update new connection
var subs []*subscription
scStaleCh := make(chan struct{})
{
dc.mu.Lock()
if dc.closed {
dc.mu.Unlock()
sc.Close()
close(scStaleCh)
dc.logger.Info("closed when updating connection")
return
}
dc.sc = sc
dc.scStaleCh = scStaleCh
for _, sub := range dc.subs {
subs = append(subs, sub)
}
dc.mu.Unlock()
}
// Re-subscribe
dc.goSubscribeAll(subs, sc, scStaleCh)
}
func (dc *DurConn) publishAsync(ctx context.Context, spec MsgSpec, msg interface{}, encoder npenc.Encoder, cb func(error)) error {
if err := AssertMsgType(spec, msg); err != nil {
return err
}
m := &nppbmsg.MessageWithMD{
MetaData: nppbmd.NewMetaData(npmd.MDFromOutgoingContext(ctx)),
}
if err := encoder.EncodeData(msg, &m.MsgFormat, &m.MsgBytes); err != nil {
return err
}
mData, err := proto.Marshal(m)
if err != nil {
return err
}
dc.mu.RLock()
closed := dc.closed
sc := dc.sc
dc.mu.RUnlock()
if closed {
return ErrClosed
}
if sc == nil {
return ErrNotConnected
}
// Publish.
// TODO: sc.PublishAsync maybe block in some rare condition:
// see https://github.com/nats-io/stan.go/issues/210
_, err = sc.PublishAsync(
subjectFormat(dc.subjectPrefix, spec.SubjectName()),
mData,
func(_ string, err error) { cb(err) },
)
return err
}
func (dc *DurConn) subscribeOne(sub *subscription) error {
key := [2]string{sub.spec.SubjectName(), sub.queue}
dc.mu.Lock()
defer dc.mu.Unlock()
if dc.closed {
return ErrClosed
}
if _, ok := dc.subs[key]; ok {
return ErrDupSubscription
}
// subscribe if sc is not nil
if dc.sc != nil {
if err := dc.subscribe(sub, dc.sc); err != nil {
return err
}
}
dc.subs[key] = sub
return nil
}
func (dc *DurConn) goSubscribeAll(subs []*subscription, sc stan.Conn, scStaleCh chan struct{}) {
dc.wg.Add(1)
go func() {
defer dc.wg.Done()
dc.subscribeAll(subs, sc, scStaleCh)
}()
}
// NOTE: subscribe until all success or scStaleCh closed.
func (dc *DurConn) subscribeAll(subs []*subscription, sc stan.Conn, scStaleCh chan struct{}) {
success := make([]bool, len(subs))
for {
n := 0
for i, sub := range subs {
if success[i] {
// Already success.
n++
continue
}
if err := dc.subscribe(sub, sc); err != nil {
continue
}
success[i] = true
n++
select {
case <-scStaleCh:
dc.logger.Info("subscribe stale")
return
default:
}
}
if n >= len(subs) {
// All success.
return
}
select {
case <-scStaleCh:
dc.logger.Info("subscribe stale during retry wait")
return
case <-time.After(dc.subRetryWait):
}
}
}
func (dc *DurConn) subscribe(sub *subscription, sc stan.Conn) error {
fullSubject := subjectFormat(dc.subjectPrefix, sub.spec.SubjectName())
opts := []stan.SubscriptionOption{}
opts = append(opts, sub.stanOptions...)
opts = append(opts, stan.SetManualAckMode()) // Use manual ack mode.
opts = append(opts, stan.DurableName(sub.queue)) // Queue as durable name.
_, err := sc.QueueSubscribe(fullSubject, sub.queue, dc.msgHandler(sub), opts...)
if err != nil {
dc.logger.Error(err, "subscribe error", "subject", sub.spec.SubjectName(), "queue", sub.queue)
return err
}
dc.logger.Info("subscribe successfully", "subject", sub.spec.SubjectName(), "queue", sub.queue)
dc.subscribeCb(sc, sub.spec)
return nil
}
func (dc *DurConn) msgHandler(sub *subscription) stan.MsgHandler {
logger := dc.logger.WithValues("subject", sub.spec.SubjectName(), "queue", sub.queue)
return func(stanMsg *stan.Msg) {
if err := dc.runner.Submit(func() {
defer func() {
if e := recover(); e != nil {
err, ok := e.(error)
if !ok {
err = fmt.Errorf("%+v", e)
}
logger.Error(err, "handler panic")
}
}()
m := &nppbmsg.MessageWithMD{}
if err := proto.Unmarshal(stanMsg.Data, m); err != nil {
logger.Error(err, "unmarshal msg error", "data", stanMsg.Data)
return
}
msg := sub.spec.NewMsg()
if err := sub.decoder.DecodeData(m.MsgFormat, m.MsgBytes, msg); err != nil {
logger.Error(err, "decode msg error")
return
}
ctx := dc.ctx
if len(m.MetaData) != 0 {
ctx = npmd.NewIncomingContextWithMD(ctx, nppbmd.MetaData(m.MetaData))
}
if err := sub.handler(ctx, msg); err != nil {
// NOTE: do not print handle's error log. Let the handler do it itself.
return
}
// Ack if no error.
stanMsg.Ack()
return
}); err != nil {
logger.Error(err, "submit task error")
}
}
}
// Close shutdowns the DurConn: closes handler runner and disconnects from nats-streaming-server.
func (dc *DurConn) Close() {
dc.mu.Lock()
if dc.closed {
dc.mu.Unlock()
return
}
sc := dc.sc
scStaleCh := dc.scStaleCh
dc.sc = nil
dc.scStaleCh = nil
dc.closed = true
dc.mu.Unlock()
if sc != nil {
sc.Close()
close(scStaleCh)
}
dc.runner.Close()
dc.wg.Wait()
}
| {
time.Sleep(dc.reconnectWait)
} | conditional_block |
durconn.go | package stanmsg
import (
"context"
"fmt"
"sync"
"time"
"github.com/huangjunwen/golibs/logr"
"github.com/huangjunwen/golibs/taskrunner"
"github.com/huangjunwen/golibs/taskrunner/limitedrunner"
"github.com/nats-io/nats.go"
"github.com/nats-io/stan.go"
"github.com/rs/xid"
"google.golang.org/protobuf/proto"
npenc "github.com/huangjunwen/nproto/v2/enc"
npmd "github.com/huangjunwen/nproto/v2/md"
. "github.com/huangjunwen/nproto/v2/msg"
nppbmd "github.com/huangjunwen/nproto/v2/pb/md"
nppbmsg "github.com/huangjunwen/nproto/v2/pb/msg"
)
// DurConn is 'durable connection' to nats-streaming-server which handles reconnect and resubscription automatically.
type DurConn struct {
// Immutable fields.
nc *nats.Conn
clusterID string
ctx context.Context
subjectPrefix string
runner taskrunner.TaskRunner // runner for handlers.
logger logr.Logger
reconnectWait time.Duration
subRetryWait time.Duration
stanOptPingInterval int
stanOptPingMaxOut int
stanOptPubAckWait time.Duration
connectCb func(stan.Conn)
disconnectCb func(stan.Conn)
subscribeCb func(sc stan.Conn, spec MsgSpec)
wg sync.WaitGroup // wait for go routines
connectMu sync.Mutex // at most on connect can be run at any time
mu sync.RWMutex // to protect mutable fields
// Mutable fields.
closed bool
subs map[[2]string]*subscription // (subject, queue) -> subscriptioin
sc stan.Conn // nil if DurConn has not connected or is reconnecting
scStaleCh chan struct{} // pair with sc, it will closed when sc is stale
}
type subscription struct {
spec MsgSpec
queue string
handler MsgHandler
stanOptions []stan.SubscriptionOption
decoder npenc.Decoder
}
// DurConnOption is option in creating DurConn.
type DurConnOption func(*DurConn) error
// SubOption is option in subscription.
type SubOption func(*subscription) error
// NewDurConn creates a new DurConn. `nc` must have MaxReconnect < 0
// (e.g. never give up trying to reconnect).
// `clusterID` is the nats-streaming-server's cluster id.
func NewDurConn(nc *nats.Conn, clusterID string, opts ...DurConnOption) (durConn *DurConn, err error) {
if nc.Opts.MaxReconnect >= 0 {
return nil, ErrNCMaxReconnect
}
dc := &DurConn{
nc: nc,
clusterID: clusterID,
ctx: context.Background(),
subjectPrefix: DefaultSubjectPrefix,
runner: limitedrunner.Must(),
logger: logr.Nop,
reconnectWait: DefaultReconnectWait,
subRetryWait: DefaultSubRetryWait,
stanOptPingInterval: DefaultStanPingInterval, | subs: make(map[[2]string]*subscription),
}
defer func() {
if err != nil {
dc.runner.Close()
}
}()
for _, opt := range opts {
if err = opt(dc); err != nil {
return nil, err
}
}
dc.goConnect(false)
return dc, nil
}
// NewPublisher creates a publisher using specified encoder.
func (dc *DurConn) NewPublisher(encoder npenc.Encoder) MsgAsyncPublisherFunc {
return func(ctx context.Context, spec MsgSpec, msg interface{}, cb func(error)) error {
return dc.publishAsync(ctx, spec, msg, encoder, cb)
}
}
// NewSubscriber creates a subscriber using specified decoder.
func (dc *DurConn) NewSubscriber(decoder npenc.Decoder) MsgSubscriberFunc {
return func(spec MsgSpec, queue string, handler MsgHandler, opts ...interface{}) error {
sub := &subscription{
spec: spec,
queue: queue,
handler: handler,
stanOptions: []stan.SubscriptionOption{},
decoder: decoder,
}
for _, opt := range opts {
option, ok := opt.(SubOption)
if !ok {
return fmt.Errorf("Expect SubOption but got %v", opt)
}
if err := option(sub); err != nil {
return err
}
}
return dc.subscribeOne(sub)
}
}
func (dc *DurConn) goConnect(wait bool) {
dc.wg.Add(1)
go func() {
defer dc.wg.Done()
dc.connect(wait)
}()
}
func (dc *DurConn) connect(wait bool) {
if wait {
time.Sleep(dc.reconnectWait)
}
dc.connectMu.Lock()
defer dc.connectMu.Unlock()
// Reset connection: release old connection
{
dc.mu.Lock()
if dc.closed {
dc.mu.Unlock()
dc.logger.Info("closed when reseting connection")
return
}
sc := dc.sc
scStaleCh := dc.scStaleCh
dc.sc = nil
dc.scStaleCh = nil
dc.mu.Unlock()
if sc != nil {
sc.Close()
close(scStaleCh)
}
}
// Connect
var sc stan.Conn
{
opts := []stan.Option{
stan.Pings(dc.stanOptPingInterval, dc.stanOptPingMaxOut),
stan.PubAckWait(dc.stanOptPubAckWait),
stan.NatsConn(dc.nc),
// NOTE: ConnectionLostHandler is used to be notified if the Streaming connection
// is closed due to unexpected errors.
// The callback will not be invoked on normal Conn.Close().
stan.SetConnectionLostHandler(func(sc stan.Conn, err error) {
dc.logger.Error(err, "connection lost")
dc.disconnectCb(sc)
// reconnect after a while
dc.goConnect(true)
}),
}
// NOTE: Use a UUID-like id as client id sine we only use durable queue subscription.
// See: https://groups.google.com/d/msg/natsio/SkWAdSU1AgU/tCX9f3ONBQAJ
var err error
sc, err = stan.Connect(dc.clusterID, xid.New().String(), opts...)
if err != nil {
// reconnect immediately.
dc.logger.Error(err, "connect failed")
dc.goConnect(true)
return
}
dc.logger.Info("connect successfully")
dc.connectCb(sc)
}
// Update new connection
var subs []*subscription
scStaleCh := make(chan struct{})
{
dc.mu.Lock()
if dc.closed {
dc.mu.Unlock()
sc.Close()
close(scStaleCh)
dc.logger.Info("closed when updating connection")
return
}
dc.sc = sc
dc.scStaleCh = scStaleCh
for _, sub := range dc.subs {
subs = append(subs, sub)
}
dc.mu.Unlock()
}
// Re-subscribe
dc.goSubscribeAll(subs, sc, scStaleCh)
}
func (dc *DurConn) publishAsync(ctx context.Context, spec MsgSpec, msg interface{}, encoder npenc.Encoder, cb func(error)) error {
if err := AssertMsgType(spec, msg); err != nil {
return err
}
m := &nppbmsg.MessageWithMD{
MetaData: nppbmd.NewMetaData(npmd.MDFromOutgoingContext(ctx)),
}
if err := encoder.EncodeData(msg, &m.MsgFormat, &m.MsgBytes); err != nil {
return err
}
mData, err := proto.Marshal(m)
if err != nil {
return err
}
dc.mu.RLock()
closed := dc.closed
sc := dc.sc
dc.mu.RUnlock()
if closed {
return ErrClosed
}
if sc == nil {
return ErrNotConnected
}
// Publish.
// TODO: sc.PublishAsync maybe block in some rare condition:
// see https://github.com/nats-io/stan.go/issues/210
_, err = sc.PublishAsync(
subjectFormat(dc.subjectPrefix, spec.SubjectName()),
mData,
func(_ string, err error) { cb(err) },
)
return err
}
func (dc *DurConn) subscribeOne(sub *subscription) error {
key := [2]string{sub.spec.SubjectName(), sub.queue}
dc.mu.Lock()
defer dc.mu.Unlock()
if dc.closed {
return ErrClosed
}
if _, ok := dc.subs[key]; ok {
return ErrDupSubscription
}
// subscribe if sc is not nil
if dc.sc != nil {
if err := dc.subscribe(sub, dc.sc); err != nil {
return err
}
}
dc.subs[key] = sub
return nil
}
func (dc *DurConn) goSubscribeAll(subs []*subscription, sc stan.Conn, scStaleCh chan struct{}) {
dc.wg.Add(1)
go func() {
defer dc.wg.Done()
dc.subscribeAll(subs, sc, scStaleCh)
}()
}
// NOTE: subscribe until all success or scStaleCh closed.
func (dc *DurConn) subscribeAll(subs []*subscription, sc stan.Conn, scStaleCh chan struct{}) {
success := make([]bool, len(subs))
for {
n := 0
for i, sub := range subs {
if success[i] {
// Already success.
n++
continue
}
if err := dc.subscribe(sub, sc); err != nil {
continue
}
success[i] = true
n++
select {
case <-scStaleCh:
dc.logger.Info("subscribe stale")
return
default:
}
}
if n >= len(subs) {
// All success.
return
}
select {
case <-scStaleCh:
dc.logger.Info("subscribe stale during retry wait")
return
case <-time.After(dc.subRetryWait):
}
}
}
func (dc *DurConn) subscribe(sub *subscription, sc stan.Conn) error {
fullSubject := subjectFormat(dc.subjectPrefix, sub.spec.SubjectName())
opts := []stan.SubscriptionOption{}
opts = append(opts, sub.stanOptions...)
opts = append(opts, stan.SetManualAckMode()) // Use manual ack mode.
opts = append(opts, stan.DurableName(sub.queue)) // Queue as durable name.
_, err := sc.QueueSubscribe(fullSubject, sub.queue, dc.msgHandler(sub), opts...)
if err != nil {
dc.logger.Error(err, "subscribe error", "subject", sub.spec.SubjectName(), "queue", sub.queue)
return err
}
dc.logger.Info("subscribe successfully", "subject", sub.spec.SubjectName(), "queue", sub.queue)
dc.subscribeCb(sc, sub.spec)
return nil
}
func (dc *DurConn) msgHandler(sub *subscription) stan.MsgHandler {
logger := dc.logger.WithValues("subject", sub.spec.SubjectName(), "queue", sub.queue)
return func(stanMsg *stan.Msg) {
if err := dc.runner.Submit(func() {
defer func() {
if e := recover(); e != nil {
err, ok := e.(error)
if !ok {
err = fmt.Errorf("%+v", e)
}
logger.Error(err, "handler panic")
}
}()
m := &nppbmsg.MessageWithMD{}
if err := proto.Unmarshal(stanMsg.Data, m); err != nil {
logger.Error(err, "unmarshal msg error", "data", stanMsg.Data)
return
}
msg := sub.spec.NewMsg()
if err := sub.decoder.DecodeData(m.MsgFormat, m.MsgBytes, msg); err != nil {
logger.Error(err, "decode msg error")
return
}
ctx := dc.ctx
if len(m.MetaData) != 0 {
ctx = npmd.NewIncomingContextWithMD(ctx, nppbmd.MetaData(m.MetaData))
}
if err := sub.handler(ctx, msg); err != nil {
// NOTE: do not print handle's error log. Let the handler do it itself.
return
}
// Ack if no error.
stanMsg.Ack()
return
}); err != nil {
logger.Error(err, "submit task error")
}
}
}
// Close shutdowns the DurConn: closes handler runner and disconnects from nats-streaming-server.
func (dc *DurConn) Close() {
dc.mu.Lock()
if dc.closed {
dc.mu.Unlock()
return
}
sc := dc.sc
scStaleCh := dc.scStaleCh
dc.sc = nil
dc.scStaleCh = nil
dc.closed = true
dc.mu.Unlock()
if sc != nil {
sc.Close()
close(scStaleCh)
}
dc.runner.Close()
dc.wg.Wait()
} | stanOptPingMaxOut: DefaultStanPingMaxOut,
stanOptPubAckWait: DefaultStanPubAckWait,
connectCb: func(_ stan.Conn) {},
disconnectCb: func(_ stan.Conn) {},
subscribeCb: func(_ stan.Conn, _ MsgSpec) {}, | random_line_split |
durconn.go | package stanmsg
import (
"context"
"fmt"
"sync"
"time"
"github.com/huangjunwen/golibs/logr"
"github.com/huangjunwen/golibs/taskrunner"
"github.com/huangjunwen/golibs/taskrunner/limitedrunner"
"github.com/nats-io/nats.go"
"github.com/nats-io/stan.go"
"github.com/rs/xid"
"google.golang.org/protobuf/proto"
npenc "github.com/huangjunwen/nproto/v2/enc"
npmd "github.com/huangjunwen/nproto/v2/md"
. "github.com/huangjunwen/nproto/v2/msg"
nppbmd "github.com/huangjunwen/nproto/v2/pb/md"
nppbmsg "github.com/huangjunwen/nproto/v2/pb/msg"
)
// DurConn is 'durable connection' to nats-streaming-server which handles reconnect and resubscription automatically.
type DurConn struct {
// Immutable fields.
nc *nats.Conn
clusterID string
ctx context.Context
subjectPrefix string
runner taskrunner.TaskRunner // runner for handlers.
logger logr.Logger
reconnectWait time.Duration
subRetryWait time.Duration
stanOptPingInterval int
stanOptPingMaxOut int
stanOptPubAckWait time.Duration
connectCb func(stan.Conn)
disconnectCb func(stan.Conn)
subscribeCb func(sc stan.Conn, spec MsgSpec)
wg sync.WaitGroup // wait for go routines
connectMu sync.Mutex // at most on connect can be run at any time
mu sync.RWMutex // to protect mutable fields
// Mutable fields.
closed bool
subs map[[2]string]*subscription // (subject, queue) -> subscriptioin
sc stan.Conn // nil if DurConn has not connected or is reconnecting
scStaleCh chan struct{} // pair with sc, it will closed when sc is stale
}
type subscription struct {
spec MsgSpec
queue string
handler MsgHandler
stanOptions []stan.SubscriptionOption
decoder npenc.Decoder
}
// DurConnOption is option in creating DurConn.
type DurConnOption func(*DurConn) error
// SubOption is option in subscription.
type SubOption func(*subscription) error
// NewDurConn creates a new DurConn. `nc` must have MaxReconnect < 0
// (e.g. never give up trying to reconnect).
// `clusterID` is the nats-streaming-server's cluster id.
func NewDurConn(nc *nats.Conn, clusterID string, opts ...DurConnOption) (durConn *DurConn, err error) {
if nc.Opts.MaxReconnect >= 0 {
return nil, ErrNCMaxReconnect
}
dc := &DurConn{
nc: nc,
clusterID: clusterID,
ctx: context.Background(),
subjectPrefix: DefaultSubjectPrefix,
runner: limitedrunner.Must(),
logger: logr.Nop,
reconnectWait: DefaultReconnectWait,
subRetryWait: DefaultSubRetryWait,
stanOptPingInterval: DefaultStanPingInterval,
stanOptPingMaxOut: DefaultStanPingMaxOut,
stanOptPubAckWait: DefaultStanPubAckWait,
connectCb: func(_ stan.Conn) {},
disconnectCb: func(_ stan.Conn) {},
subscribeCb: func(_ stan.Conn, _ MsgSpec) {},
subs: make(map[[2]string]*subscription),
}
defer func() {
if err != nil {
dc.runner.Close()
}
}()
for _, opt := range opts {
if err = opt(dc); err != nil {
return nil, err
}
}
dc.goConnect(false)
return dc, nil
}
// NewPublisher creates a publisher using specified encoder.
func (dc *DurConn) NewPublisher(encoder npenc.Encoder) MsgAsyncPublisherFunc {
return func(ctx context.Context, spec MsgSpec, msg interface{}, cb func(error)) error {
return dc.publishAsync(ctx, spec, msg, encoder, cb)
}
}
// NewSubscriber creates a subscriber using specified decoder.
func (dc *DurConn) NewSubscriber(decoder npenc.Decoder) MsgSubscriberFunc {
return func(spec MsgSpec, queue string, handler MsgHandler, opts ...interface{}) error {
sub := &subscription{
spec: spec,
queue: queue,
handler: handler,
stanOptions: []stan.SubscriptionOption{},
decoder: decoder,
}
for _, opt := range opts {
option, ok := opt.(SubOption)
if !ok {
return fmt.Errorf("Expect SubOption but got %v", opt)
}
if err := option(sub); err != nil {
return err
}
}
return dc.subscribeOne(sub)
}
}
func (dc *DurConn) goConnect(wait bool) {
dc.wg.Add(1)
go func() {
defer dc.wg.Done()
dc.connect(wait)
}()
}
func (dc *DurConn) connect(wait bool) {
if wait {
time.Sleep(dc.reconnectWait)
}
dc.connectMu.Lock()
defer dc.connectMu.Unlock()
// Reset connection: release old connection
{
dc.mu.Lock()
if dc.closed {
dc.mu.Unlock()
dc.logger.Info("closed when reseting connection")
return
}
sc := dc.sc
scStaleCh := dc.scStaleCh
dc.sc = nil
dc.scStaleCh = nil
dc.mu.Unlock()
if sc != nil {
sc.Close()
close(scStaleCh)
}
}
// Connect
var sc stan.Conn
{
opts := []stan.Option{
stan.Pings(dc.stanOptPingInterval, dc.stanOptPingMaxOut),
stan.PubAckWait(dc.stanOptPubAckWait),
stan.NatsConn(dc.nc),
// NOTE: ConnectionLostHandler is used to be notified if the Streaming connection
// is closed due to unexpected errors.
// The callback will not be invoked on normal Conn.Close().
stan.SetConnectionLostHandler(func(sc stan.Conn, err error) {
dc.logger.Error(err, "connection lost")
dc.disconnectCb(sc)
// reconnect after a while
dc.goConnect(true)
}),
}
// NOTE: Use a UUID-like id as client id sine we only use durable queue subscription.
// See: https://groups.google.com/d/msg/natsio/SkWAdSU1AgU/tCX9f3ONBQAJ
var err error
sc, err = stan.Connect(dc.clusterID, xid.New().String(), opts...)
if err != nil {
// reconnect immediately.
dc.logger.Error(err, "connect failed")
dc.goConnect(true)
return
}
dc.logger.Info("connect successfully")
dc.connectCb(sc)
}
// Update new connection
var subs []*subscription
scStaleCh := make(chan struct{})
{
dc.mu.Lock()
if dc.closed {
dc.mu.Unlock()
sc.Close()
close(scStaleCh)
dc.logger.Info("closed when updating connection")
return
}
dc.sc = sc
dc.scStaleCh = scStaleCh
for _, sub := range dc.subs {
subs = append(subs, sub)
}
dc.mu.Unlock()
}
// Re-subscribe
dc.goSubscribeAll(subs, sc, scStaleCh)
}
func (dc *DurConn) publishAsync(ctx context.Context, spec MsgSpec, msg interface{}, encoder npenc.Encoder, cb func(error)) error {
if err := AssertMsgType(spec, msg); err != nil {
return err
}
m := &nppbmsg.MessageWithMD{
MetaData: nppbmd.NewMetaData(npmd.MDFromOutgoingContext(ctx)),
}
if err := encoder.EncodeData(msg, &m.MsgFormat, &m.MsgBytes); err != nil {
return err
}
mData, err := proto.Marshal(m)
if err != nil {
return err
}
dc.mu.RLock()
closed := dc.closed
sc := dc.sc
dc.mu.RUnlock()
if closed {
return ErrClosed
}
if sc == nil {
return ErrNotConnected
}
// Publish.
// TODO: sc.PublishAsync maybe block in some rare condition:
// see https://github.com/nats-io/stan.go/issues/210
_, err = sc.PublishAsync(
subjectFormat(dc.subjectPrefix, spec.SubjectName()),
mData,
func(_ string, err error) { cb(err) },
)
return err
}
func (dc *DurConn) subscribeOne(sub *subscription) error {
key := [2]string{sub.spec.SubjectName(), sub.queue}
dc.mu.Lock()
defer dc.mu.Unlock()
if dc.closed {
return ErrClosed
}
if _, ok := dc.subs[key]; ok {
return ErrDupSubscription
}
// subscribe if sc is not nil
if dc.sc != nil {
if err := dc.subscribe(sub, dc.sc); err != nil {
return err
}
}
dc.subs[key] = sub
return nil
}
func (dc *DurConn) goSubscribeAll(subs []*subscription, sc stan.Conn, scStaleCh chan struct{}) {
dc.wg.Add(1)
go func() {
defer dc.wg.Done()
dc.subscribeAll(subs, sc, scStaleCh)
}()
}
// NOTE: subscribe until all success or scStaleCh closed.
func (dc *DurConn) subscribeAll(subs []*subscription, sc stan.Conn, scStaleCh chan struct{}) {
success := make([]bool, len(subs))
for {
n := 0
for i, sub := range subs {
if success[i] {
// Already success.
n++
continue
}
if err := dc.subscribe(sub, sc); err != nil {
continue
}
success[i] = true
n++
select {
case <-scStaleCh:
dc.logger.Info("subscribe stale")
return
default:
}
}
if n >= len(subs) {
// All success.
return
}
select {
case <-scStaleCh:
dc.logger.Info("subscribe stale during retry wait")
return
case <-time.After(dc.subRetryWait):
}
}
}
func (dc *DurConn) subscribe(sub *subscription, sc stan.Conn) error {
fullSubject := subjectFormat(dc.subjectPrefix, sub.spec.SubjectName())
opts := []stan.SubscriptionOption{}
opts = append(opts, sub.stanOptions...)
opts = append(opts, stan.SetManualAckMode()) // Use manual ack mode.
opts = append(opts, stan.DurableName(sub.queue)) // Queue as durable name.
_, err := sc.QueueSubscribe(fullSubject, sub.queue, dc.msgHandler(sub), opts...)
if err != nil {
dc.logger.Error(err, "subscribe error", "subject", sub.spec.SubjectName(), "queue", sub.queue)
return err
}
dc.logger.Info("subscribe successfully", "subject", sub.spec.SubjectName(), "queue", sub.queue)
dc.subscribeCb(sc, sub.spec)
return nil
}
func (dc *DurConn) msgHandler(sub *subscription) stan.MsgHandler {
logger := dc.logger.WithValues("subject", sub.spec.SubjectName(), "queue", sub.queue)
return func(stanMsg *stan.Msg) {
if err := dc.runner.Submit(func() {
defer func() {
if e := recover(); e != nil {
err, ok := e.(error)
if !ok {
err = fmt.Errorf("%+v", e)
}
logger.Error(err, "handler panic")
}
}()
m := &nppbmsg.MessageWithMD{}
if err := proto.Unmarshal(stanMsg.Data, m); err != nil {
logger.Error(err, "unmarshal msg error", "data", stanMsg.Data)
return
}
msg := sub.spec.NewMsg()
if err := sub.decoder.DecodeData(m.MsgFormat, m.MsgBytes, msg); err != nil {
logger.Error(err, "decode msg error")
return
}
ctx := dc.ctx
if len(m.MetaData) != 0 {
ctx = npmd.NewIncomingContextWithMD(ctx, nppbmd.MetaData(m.MetaData))
}
if err := sub.handler(ctx, msg); err != nil {
// NOTE: do not print handle's error log. Let the handler do it itself.
return
}
// Ack if no error.
stanMsg.Ack()
return
}); err != nil {
logger.Error(err, "submit task error")
}
}
}
// Close shutdowns the DurConn: closes handler runner and disconnects from nats-streaming-server.
func (dc *DurConn) Close() | {
dc.mu.Lock()
if dc.closed {
dc.mu.Unlock()
return
}
sc := dc.sc
scStaleCh := dc.scStaleCh
dc.sc = nil
dc.scStaleCh = nil
dc.closed = true
dc.mu.Unlock()
if sc != nil {
sc.Close()
close(scStaleCh)
}
dc.runner.Close()
dc.wg.Wait()
} | identifier_body | |
durconn.go | package stanmsg
import (
"context"
"fmt"
"sync"
"time"
"github.com/huangjunwen/golibs/logr"
"github.com/huangjunwen/golibs/taskrunner"
"github.com/huangjunwen/golibs/taskrunner/limitedrunner"
"github.com/nats-io/nats.go"
"github.com/nats-io/stan.go"
"github.com/rs/xid"
"google.golang.org/protobuf/proto"
npenc "github.com/huangjunwen/nproto/v2/enc"
npmd "github.com/huangjunwen/nproto/v2/md"
. "github.com/huangjunwen/nproto/v2/msg"
nppbmd "github.com/huangjunwen/nproto/v2/pb/md"
nppbmsg "github.com/huangjunwen/nproto/v2/pb/msg"
)
// DurConn is 'durable connection' to nats-streaming-server which handles reconnect and resubscription automatically.
type DurConn struct {
// Immutable fields.
nc *nats.Conn
clusterID string
ctx context.Context
subjectPrefix string
runner taskrunner.TaskRunner // runner for handlers.
logger logr.Logger
reconnectWait time.Duration
subRetryWait time.Duration
stanOptPingInterval int
stanOptPingMaxOut int
stanOptPubAckWait time.Duration
connectCb func(stan.Conn)
disconnectCb func(stan.Conn)
subscribeCb func(sc stan.Conn, spec MsgSpec)
wg sync.WaitGroup // wait for go routines
connectMu sync.Mutex // at most on connect can be run at any time
mu sync.RWMutex // to protect mutable fields
// Mutable fields.
closed bool
subs map[[2]string]*subscription // (subject, queue) -> subscriptioin
sc stan.Conn // nil if DurConn has not connected or is reconnecting
scStaleCh chan struct{} // pair with sc, it will closed when sc is stale
}
type subscription struct {
spec MsgSpec
queue string
handler MsgHandler
stanOptions []stan.SubscriptionOption
decoder npenc.Decoder
}
// DurConnOption is option in creating DurConn.
type DurConnOption func(*DurConn) error
// SubOption is option in subscription.
type SubOption func(*subscription) error
// NewDurConn creates a new DurConn. `nc` must have MaxReconnect < 0
// (e.g. never give up trying to reconnect).
// `clusterID` is the nats-streaming-server's cluster id.
func NewDurConn(nc *nats.Conn, clusterID string, opts ...DurConnOption) (durConn *DurConn, err error) {
if nc.Opts.MaxReconnect >= 0 {
return nil, ErrNCMaxReconnect
}
dc := &DurConn{
nc: nc,
clusterID: clusterID,
ctx: context.Background(),
subjectPrefix: DefaultSubjectPrefix,
runner: limitedrunner.Must(),
logger: logr.Nop,
reconnectWait: DefaultReconnectWait,
subRetryWait: DefaultSubRetryWait,
stanOptPingInterval: DefaultStanPingInterval,
stanOptPingMaxOut: DefaultStanPingMaxOut,
stanOptPubAckWait: DefaultStanPubAckWait,
connectCb: func(_ stan.Conn) {},
disconnectCb: func(_ stan.Conn) {},
subscribeCb: func(_ stan.Conn, _ MsgSpec) {},
subs: make(map[[2]string]*subscription),
}
defer func() {
if err != nil {
dc.runner.Close()
}
}()
for _, opt := range opts {
if err = opt(dc); err != nil {
return nil, err
}
}
dc.goConnect(false)
return dc, nil
}
// NewPublisher creates a publisher using specified encoder.
func (dc *DurConn) NewPublisher(encoder npenc.Encoder) MsgAsyncPublisherFunc {
return func(ctx context.Context, spec MsgSpec, msg interface{}, cb func(error)) error {
return dc.publishAsync(ctx, spec, msg, encoder, cb)
}
}
// NewSubscriber creates a subscriber using specified decoder.
func (dc *DurConn) NewSubscriber(decoder npenc.Decoder) MsgSubscriberFunc {
return func(spec MsgSpec, queue string, handler MsgHandler, opts ...interface{}) error {
sub := &subscription{
spec: spec,
queue: queue,
handler: handler,
stanOptions: []stan.SubscriptionOption{},
decoder: decoder,
}
for _, opt := range opts {
option, ok := opt.(SubOption)
if !ok {
return fmt.Errorf("Expect SubOption but got %v", opt)
}
if err := option(sub); err != nil {
return err
}
}
return dc.subscribeOne(sub)
}
}
func (dc *DurConn) goConnect(wait bool) {
dc.wg.Add(1)
go func() {
defer dc.wg.Done()
dc.connect(wait)
}()
}
func (dc *DurConn) connect(wait bool) {
if wait {
time.Sleep(dc.reconnectWait)
}
dc.connectMu.Lock()
defer dc.connectMu.Unlock()
// Reset connection: release old connection
{
dc.mu.Lock()
if dc.closed {
dc.mu.Unlock()
dc.logger.Info("closed when reseting connection")
return
}
sc := dc.sc
scStaleCh := dc.scStaleCh
dc.sc = nil
dc.scStaleCh = nil
dc.mu.Unlock()
if sc != nil {
sc.Close()
close(scStaleCh)
}
}
// Connect
var sc stan.Conn
{
opts := []stan.Option{
stan.Pings(dc.stanOptPingInterval, dc.stanOptPingMaxOut),
stan.PubAckWait(dc.stanOptPubAckWait),
stan.NatsConn(dc.nc),
// NOTE: ConnectionLostHandler is used to be notified if the Streaming connection
// is closed due to unexpected errors.
// The callback will not be invoked on normal Conn.Close().
stan.SetConnectionLostHandler(func(sc stan.Conn, err error) {
dc.logger.Error(err, "connection lost")
dc.disconnectCb(sc)
// reconnect after a while
dc.goConnect(true)
}),
}
// NOTE: Use a UUID-like id as client id sine we only use durable queue subscription.
// See: https://groups.google.com/d/msg/natsio/SkWAdSU1AgU/tCX9f3ONBQAJ
var err error
sc, err = stan.Connect(dc.clusterID, xid.New().String(), opts...)
if err != nil {
// reconnect immediately.
dc.logger.Error(err, "connect failed")
dc.goConnect(true)
return
}
dc.logger.Info("connect successfully")
dc.connectCb(sc)
}
// Update new connection
var subs []*subscription
scStaleCh := make(chan struct{})
{
dc.mu.Lock()
if dc.closed {
dc.mu.Unlock()
sc.Close()
close(scStaleCh)
dc.logger.Info("closed when updating connection")
return
}
dc.sc = sc
dc.scStaleCh = scStaleCh
for _, sub := range dc.subs {
subs = append(subs, sub)
}
dc.mu.Unlock()
}
// Re-subscribe
dc.goSubscribeAll(subs, sc, scStaleCh)
}
func (dc *DurConn) publishAsync(ctx context.Context, spec MsgSpec, msg interface{}, encoder npenc.Encoder, cb func(error)) error {
if err := AssertMsgType(spec, msg); err != nil {
return err
}
m := &nppbmsg.MessageWithMD{
MetaData: nppbmd.NewMetaData(npmd.MDFromOutgoingContext(ctx)),
}
if err := encoder.EncodeData(msg, &m.MsgFormat, &m.MsgBytes); err != nil {
return err
}
mData, err := proto.Marshal(m)
if err != nil {
return err
}
dc.mu.RLock()
closed := dc.closed
sc := dc.sc
dc.mu.RUnlock()
if closed {
return ErrClosed
}
if sc == nil {
return ErrNotConnected
}
// Publish.
// TODO: sc.PublishAsync maybe block in some rare condition:
// see https://github.com/nats-io/stan.go/issues/210
_, err = sc.PublishAsync(
subjectFormat(dc.subjectPrefix, spec.SubjectName()),
mData,
func(_ string, err error) { cb(err) },
)
return err
}
func (dc *DurConn) subscribeOne(sub *subscription) error {
key := [2]string{sub.spec.SubjectName(), sub.queue}
dc.mu.Lock()
defer dc.mu.Unlock()
if dc.closed {
return ErrClosed
}
if _, ok := dc.subs[key]; ok {
return ErrDupSubscription
}
// subscribe if sc is not nil
if dc.sc != nil {
if err := dc.subscribe(sub, dc.sc); err != nil {
return err
}
}
dc.subs[key] = sub
return nil
}
func (dc *DurConn) goSubscribeAll(subs []*subscription, sc stan.Conn, scStaleCh chan struct{}) {
dc.wg.Add(1)
go func() {
defer dc.wg.Done()
dc.subscribeAll(subs, sc, scStaleCh)
}()
}
// NOTE: subscribe until all success or scStaleCh closed.
func (dc *DurConn) | (subs []*subscription, sc stan.Conn, scStaleCh chan struct{}) {
success := make([]bool, len(subs))
for {
n := 0
for i, sub := range subs {
if success[i] {
// Already success.
n++
continue
}
if err := dc.subscribe(sub, sc); err != nil {
continue
}
success[i] = true
n++
select {
case <-scStaleCh:
dc.logger.Info("subscribe stale")
return
default:
}
}
if n >= len(subs) {
// All success.
return
}
select {
case <-scStaleCh:
dc.logger.Info("subscribe stale during retry wait")
return
case <-time.After(dc.subRetryWait):
}
}
}
func (dc *DurConn) subscribe(sub *subscription, sc stan.Conn) error {
fullSubject := subjectFormat(dc.subjectPrefix, sub.spec.SubjectName())
opts := []stan.SubscriptionOption{}
opts = append(opts, sub.stanOptions...)
opts = append(opts, stan.SetManualAckMode()) // Use manual ack mode.
opts = append(opts, stan.DurableName(sub.queue)) // Queue as durable name.
_, err := sc.QueueSubscribe(fullSubject, sub.queue, dc.msgHandler(sub), opts...)
if err != nil {
dc.logger.Error(err, "subscribe error", "subject", sub.spec.SubjectName(), "queue", sub.queue)
return err
}
dc.logger.Info("subscribe successfully", "subject", sub.spec.SubjectName(), "queue", sub.queue)
dc.subscribeCb(sc, sub.spec)
return nil
}
func (dc *DurConn) msgHandler(sub *subscription) stan.MsgHandler {
logger := dc.logger.WithValues("subject", sub.spec.SubjectName(), "queue", sub.queue)
return func(stanMsg *stan.Msg) {
if err := dc.runner.Submit(func() {
defer func() {
if e := recover(); e != nil {
err, ok := e.(error)
if !ok {
err = fmt.Errorf("%+v", e)
}
logger.Error(err, "handler panic")
}
}()
m := &nppbmsg.MessageWithMD{}
if err := proto.Unmarshal(stanMsg.Data, m); err != nil {
logger.Error(err, "unmarshal msg error", "data", stanMsg.Data)
return
}
msg := sub.spec.NewMsg()
if err := sub.decoder.DecodeData(m.MsgFormat, m.MsgBytes, msg); err != nil {
logger.Error(err, "decode msg error")
return
}
ctx := dc.ctx
if len(m.MetaData) != 0 {
ctx = npmd.NewIncomingContextWithMD(ctx, nppbmd.MetaData(m.MetaData))
}
if err := sub.handler(ctx, msg); err != nil {
// NOTE: do not print handle's error log. Let the handler do it itself.
return
}
// Ack if no error.
stanMsg.Ack()
return
}); err != nil {
logger.Error(err, "submit task error")
}
}
}
// Close shutdowns the DurConn: closes handler runner and disconnects from nats-streaming-server.
func (dc *DurConn) Close() {
dc.mu.Lock()
if dc.closed {
dc.mu.Unlock()
return
}
sc := dc.sc
scStaleCh := dc.scStaleCh
dc.sc = nil
dc.scStaleCh = nil
dc.closed = true
dc.mu.Unlock()
if sc != nil {
sc.Close()
close(scStaleCh)
}
dc.runner.Close()
dc.wg.Wait()
}
| subscribeAll | identifier_name |
circuitpusher.py | #! /usr/bin/python
"""
circuitpusher utilizes floodlight rest APIs to create a bidirectional circuit,
i.e., permanent flow entry, on all switches in route between two devices based
on IP addresses with specified priority.
Notes:
1. The circuit pusher currently only creates circuit with two IP end points
2. Prior to sending restAPI requests to the circuit pusher, the specified end
points must already been known to the controller (i.e., already have sent
packets on the network, easy way to assure this is to do a ping (to any
target) from the two hosts.
3. The current supported command syntax format is:
a) circuitpusher.py --controller={IP}:{rest port} --type ip --src {IP} --dst {IP} --add --name {circuit-name}
adds a new circuit between src and dst devices Currently ip circuit is supported. ARP is automatically supported.
Currently a simple circuit record storage is provided in a text file circuits.json in the working directory.
The file is not protected and does not clean itself between controller restarts. The file is needed for correct operation
and the user should make sure deleting the file when floodlight controller is restarted.
b) circuitpusher.py --controller={IP}:{rest port} --delete --name {circuit-name}
deletes a created circuit (as recorded in circuits.json) using the previously given name
@author kcwang
"""
import os
import sys
import subprocess
import json
import argparse
import io
import time
# parse circuit options. Currently supports add and delete actions.
# Syntax:
# circuitpusher --controller {IP:REST_PORT} --add --name {CIRCUIT_NAME} --type ip --src {IP} --dst {IP}
# circuitpusher --controller {IP:REST_PORT} --delete --name {CIRCUIT_NAME}
parser = argparse.ArgumentParser(description='Circuit Pusher')
parser.add_argument('--controller', dest='controllerRestIp', action='store', default='localhost:8080', help='controller IP:RESTport, e.g., localhost:8080 or A.B.C.D:8080')
parser.add_argument('--add', dest='action', action='store_const', const='add', default='add', help='action: add, delete')
parser.add_argument('--delete', dest='action', action='store_const', const='delete', default='add', help='action: add, delete')
parser.add_argument('--type', dest='type', action='store', default='ip', help='valid types: ip')
parser.add_argument('--src', dest='srcAddress', action='store', default='0.0.0.0', help='source address: if type=ip, A.B.C.D')
parser.add_argument('--dst', dest='dstAddress', action='store', default='0.0.0.0', help='destination address: if type=ip, A.B.C.D')
parser.add_argument('--name', dest='circuitName', action='store', default='circuit-1', help='name for circuit, e.g., circuit-1')
#user catches
if len(sys.argv) == 1:
command = './circuitpusher.py -h'
instruct = os.popen(command).read()
print instruct
exit(1)
elif sys.argv[1] == "help":
command = './circuitpusher.py -h'
instruct = os.popen(command).read()
print instruct
exit(1)
#parse arguments
args = parser.parse_args()
print args
controllerRestIp = args.controllerRestIp
# first check if a local file exists, which needs to be updated after add/delete
if os.path.exists('./circuits.json'):
|
else:
lines={}
if args.action=='add':
circuitDb = open('./circuits.json','a')
for line in lines:
data = json.loads(line)
if data['name']==(args.circuitName):
print "Circuit %s exists already. Use new name to create." % args.circuitName
sys.exit()
else:
circuitExists = False
# retrieve source and destination device attachment points
# using DeviceManager rest API
command = "curl -s http://%s/wm/device/?ipv4=%s" % (args.controllerRestIp, args.srcAddress)
result = os.popen(command).read()
parsedResult = json.loads(result)
print command+"\n"
sourceSwitch = parsedResult[0]['attachmentPoint'][0]['switchDPID']
sourcePort = parsedResult[0]['attachmentPoint'][0]['port']
command = "curl -s http://%s/wm/device/?ipv4=%s" % (args.controllerRestIp, args.dstAddress)
result = os.popen(command).read()
parsedResult = json.loads(result)
print command+"\n"
destSwitch = parsedResult[0]['attachmentPoint'][0]['switchDPID']
destPort = parsedResult[0]['attachmentPoint'][0]['port']
print "Creating circuit:"
print "from source device at switch %s port %s" % (sourceSwitch,sourcePort)
print "to destination device at switch %s port %s"% (destSwitch,destPort)
# retrieving route from source to destination
# using Routing rest API
command = "curl -s http://%s/wm/topology/route/%s/%s/%s/%s/json" % (controllerRestIp, sourceSwitch, sourcePort, destSwitch, destPort)
result = os.popen(command).read()
parsedResult = json.loads(result)
print command+"\n"
print result+"\n"
for i in range(len(parsedResult)):
if i % 2 == 0:
ap1Dpid = parsedResult[i]['switch']
ap1Port = parsedResult[i]['port']
print ap1Dpid, ap1Port
else:
ap2Dpid = parsedResult[i]['switch']
ap2Port = parsedResult[i]['port']
print ap2Dpid, ap2Port
# send one flow mod per pair of APs in route
# using StaticFlowPusher rest API
# IMPORTANT NOTE: current Floodlight StaticflowEntryPusher
# assumes all flow entries to have unique name across all switches
# this will most possibly be relaxed later, but for now we
# encode each flow entry's name with both switch dpid, user
# specified name, and flow type (f: forward, r: reverse, farp/rarp: arp)
command = "curl -s -d '{\"switch\": \"%s\", \"name\":\"%s\", \"src-ip\":\"%s\", \"dst-ip\":\"%s\", \"ether-type\":\"%s\", \"cookie\":\"0\", \"priority\":\"32768\", \"ingress-port\":\"%s\",\"active\":\"true\", \"actions\":\"output=%s\"}' http://%s/wm/staticflowentrypusher/json" % (ap1Dpid, ap1Dpid+"."+args.circuitName+".f", args.srcAddress, args.dstAddress, "0x800", ap1Port, ap2Port, controllerRestIp)
result = os.popen(command).read()
print command
command = "curl -s -d '{\"switch\": \"%s\", \"name\":\"%s\", \"ether-type\":\"%s\", \"cookie\":\"0\", \"priority\":\"32768\", \"ingress-port\":\"%s\",\"active\":\"true\", \"actions\":\"output=%s\"}' http://%s/wm/staticflowentrypusher/json" % (ap1Dpid, ap1Dpid+"."+args.circuitName+".farp", "0x806", ap1Port, ap2Port, controllerRestIp)
result = os.popen(command).read()
print command
command = "curl -s -d '{\"switch\": \"%s\", \"name\":\"%s\", \"src-ip\":\"%s\", \"dst-ip\":\"%s\", \"ether-type\":\"%s\", \"cookie\":\"0\", \"priority\":\"32768\", \"ingress-port\":\"%s\",\"active\":\"true\", \"actions\":\"output=%s\"}' http://%s/wm/staticflowentrypusher/json" % (ap1Dpid, ap1Dpid+"."+args.circuitName+".r", args.dstAddress, args.srcAddress, "0x800", ap2Port, ap1Port, controllerRestIp)
result = os.popen(command).read()
print command
command = "curl -s -d '{\"switch\": \"%s\", \"name\":\"%s\", \"ether-type\":\"%s\", \"cookie\":\"0\", \"priority\":\"32768\", \"ingress-port\":\"%s\",\"active\":\"true\", \"actions\":\"output=%s\"}' http://%s/wm/staticflowentrypusher/json" % (ap1Dpid, ap1Dpid+"."+args.circuitName+".rarp", "0x806", ap2Port, ap1Port, controllerRestIp)
result = os.popen(command).read()
print command
# store created circuit attributes in local ./circuits.json
datetime = time.asctime()
circuitParams = {'name':args.circuitName, 'Dpid':ap1Dpid, 'inPort':ap1Port, 'outPort':ap2Port, 'datetime':datetime}
str = json.dumps(circuitParams)
circuitDb.write(str+"\n")
# confirm successful circuit creation
# using controller rest API
command="curl -s http://%s/wm/core/switch/all/flow/json| python -mjson.tool" % (controllerRestIp)
result = os.popen(command).read()
print command + "\n" + result
elif args.action=='delete':
circuitDb = open('./circuits.json','w')
# removing previously created flow from switches
# using StaticFlowPusher rest API
# currently, circuitpusher records created circuits in local file ./circuits.db
# with circuit name and list of switches
circuitExists = False
for line in lines:
data = json.loads(line)
if data['name']==(args.circuitName):
circuitExists = True
sw = data['Dpid']
print data, sw
command = "curl -X DELETE -d '{\"name\":\"%s\", \"switch\":\"%s\"}' http://%s/wm/staticflowentrypusher/json" % (sw+"."+args.circuitName+".f", sw, controllerRestIp)
result = os.popen(command).read()
print command, result
command = "curl -X DELETE -d '{\"name\":\"%s\", \"switch\":\"%s\"}' http://%s/wm/staticflowentrypusher/json" % (sw+"."+args.circuitName+".farp", sw, controllerRestIp)
result = os.popen(command).read()
print command, result
command = "curl -X DELETE -d '{\"name\":\"%s\", \"switch\":\"%s\"}' http://%s/wm/staticflowentrypusher/json" % (sw+"."+args.circuitName+".r", sw, controllerRestIp)
result = os.popen(command).read()
print command, result
command = "curl -X DELETE -d '{\"name\":\"%s\", \"switch\":\"%s\"}' http://%s/wm/staticflowentrypusher/json" % (sw+"."+args.circuitName+".rarp", sw, controllerRestIp)
result = os.popen(command).read()
print command, result
else:
circuitDb.write(line)
circuitDb.close()
if not circuitExists:
print "specified circuit does not exist"
sys.exit()
| circuitDb = open('./circuits.json','r')
lines = circuitDb.readlines()
circuitDb.close() | conditional_block |
circuitpusher.py | #! /usr/bin/python
"""
circuitpusher utilizes floodlight rest APIs to create a bidirectional circuit,
i.e., permanent flow entry, on all switches in route between two devices based
on IP addresses with specified priority.
Notes:
1. The circuit pusher currently only creates circuit with two IP end points
2. Prior to sending restAPI requests to the circuit pusher, the specified end
points must already been known to the controller (i.e., already have sent
packets on the network, easy way to assure this is to do a ping (to any
target) from the two hosts.
3. The current supported command syntax format is:
a) circuitpusher.py --controller={IP}:{rest port} --type ip --src {IP} --dst {IP} --add --name {circuit-name}
adds a new circuit between src and dst devices Currently ip circuit is supported. ARP is automatically supported.
Currently a simple circuit record storage is provided in a text file circuits.json in the working directory.
The file is not protected and does not clean itself between controller restarts. The file is needed for correct operation
and the user should make sure deleting the file when floodlight controller is restarted.
b) circuitpusher.py --controller={IP}:{rest port} --delete --name {circuit-name}
deletes a created circuit (as recorded in circuits.json) using the previously given name
@author kcwang
"""
import os
import sys
import subprocess
import json
import argparse
import io
import time
# parse circuit options. Currently supports add and delete actions.
# Syntax:
# circuitpusher --controller {IP:REST_PORT} --add --name {CIRCUIT_NAME} --type ip --src {IP} --dst {IP}
# circuitpusher --controller {IP:REST_PORT} --delete --name {CIRCUIT_NAME}
parser = argparse.ArgumentParser(description='Circuit Pusher')
parser.add_argument('--controller', dest='controllerRestIp', action='store', default='localhost:8080', help='controller IP:RESTport, e.g., localhost:8080 or A.B.C.D:8080')
parser.add_argument('--add', dest='action', action='store_const', const='add', default='add', help='action: add, delete')
parser.add_argument('--delete', dest='action', action='store_const', const='delete', default='add', help='action: add, delete')
parser.add_argument('--type', dest='type', action='store', default='ip', help='valid types: ip')
parser.add_argument('--src', dest='srcAddress', action='store', default='0.0.0.0', help='source address: if type=ip, A.B.C.D')
parser.add_argument('--dst', dest='dstAddress', action='store', default='0.0.0.0', help='destination address: if type=ip, A.B.C.D')
parser.add_argument('--name', dest='circuitName', action='store', default='circuit-1', help='name for circuit, e.g., circuit-1')
#user catches
if len(sys.argv) == 1:
command = './circuitpusher.py -h'
instruct = os.popen(command).read()
print instruct
exit(1)
elif sys.argv[1] == "help":
command = './circuitpusher.py -h'
instruct = os.popen(command).read()
print instruct
exit(1)
#parse arguments | controllerRestIp = args.controllerRestIp
# first check if a local file exists, which needs to be updated after add/delete
if os.path.exists('./circuits.json'):
circuitDb = open('./circuits.json','r')
lines = circuitDb.readlines()
circuitDb.close()
else:
lines={}
if args.action=='add':
circuitDb = open('./circuits.json','a')
for line in lines:
data = json.loads(line)
if data['name']==(args.circuitName):
print "Circuit %s exists already. Use new name to create." % args.circuitName
sys.exit()
else:
circuitExists = False
# retrieve source and destination device attachment points
# using DeviceManager rest API
command = "curl -s http://%s/wm/device/?ipv4=%s" % (args.controllerRestIp, args.srcAddress)
result = os.popen(command).read()
parsedResult = json.loads(result)
print command+"\n"
sourceSwitch = parsedResult[0]['attachmentPoint'][0]['switchDPID']
sourcePort = parsedResult[0]['attachmentPoint'][0]['port']
command = "curl -s http://%s/wm/device/?ipv4=%s" % (args.controllerRestIp, args.dstAddress)
result = os.popen(command).read()
parsedResult = json.loads(result)
print command+"\n"
destSwitch = parsedResult[0]['attachmentPoint'][0]['switchDPID']
destPort = parsedResult[0]['attachmentPoint'][0]['port']
print "Creating circuit:"
print "from source device at switch %s port %s" % (sourceSwitch,sourcePort)
print "to destination device at switch %s port %s"% (destSwitch,destPort)
# retrieving route from source to destination
# using Routing rest API
command = "curl -s http://%s/wm/topology/route/%s/%s/%s/%s/json" % (controllerRestIp, sourceSwitch, sourcePort, destSwitch, destPort)
result = os.popen(command).read()
parsedResult = json.loads(result)
print command+"\n"
print result+"\n"
for i in range(len(parsedResult)):
if i % 2 == 0:
ap1Dpid = parsedResult[i]['switch']
ap1Port = parsedResult[i]['port']
print ap1Dpid, ap1Port
else:
ap2Dpid = parsedResult[i]['switch']
ap2Port = parsedResult[i]['port']
print ap2Dpid, ap2Port
# send one flow mod per pair of APs in route
# using StaticFlowPusher rest API
# IMPORTANT NOTE: current Floodlight StaticflowEntryPusher
# assumes all flow entries to have unique name across all switches
# this will most possibly be relaxed later, but for now we
# encode each flow entry's name with both switch dpid, user
# specified name, and flow type (f: forward, r: reverse, farp/rarp: arp)
command = "curl -s -d '{\"switch\": \"%s\", \"name\":\"%s\", \"src-ip\":\"%s\", \"dst-ip\":\"%s\", \"ether-type\":\"%s\", \"cookie\":\"0\", \"priority\":\"32768\", \"ingress-port\":\"%s\",\"active\":\"true\", \"actions\":\"output=%s\"}' http://%s/wm/staticflowentrypusher/json" % (ap1Dpid, ap1Dpid+"."+args.circuitName+".f", args.srcAddress, args.dstAddress, "0x800", ap1Port, ap2Port, controllerRestIp)
result = os.popen(command).read()
print command
command = "curl -s -d '{\"switch\": \"%s\", \"name\":\"%s\", \"ether-type\":\"%s\", \"cookie\":\"0\", \"priority\":\"32768\", \"ingress-port\":\"%s\",\"active\":\"true\", \"actions\":\"output=%s\"}' http://%s/wm/staticflowentrypusher/json" % (ap1Dpid, ap1Dpid+"."+args.circuitName+".farp", "0x806", ap1Port, ap2Port, controllerRestIp)
result = os.popen(command).read()
print command
command = "curl -s -d '{\"switch\": \"%s\", \"name\":\"%s\", \"src-ip\":\"%s\", \"dst-ip\":\"%s\", \"ether-type\":\"%s\", \"cookie\":\"0\", \"priority\":\"32768\", \"ingress-port\":\"%s\",\"active\":\"true\", \"actions\":\"output=%s\"}' http://%s/wm/staticflowentrypusher/json" % (ap1Dpid, ap1Dpid+"."+args.circuitName+".r", args.dstAddress, args.srcAddress, "0x800", ap2Port, ap1Port, controllerRestIp)
result = os.popen(command).read()
print command
command = "curl -s -d '{\"switch\": \"%s\", \"name\":\"%s\", \"ether-type\":\"%s\", \"cookie\":\"0\", \"priority\":\"32768\", \"ingress-port\":\"%s\",\"active\":\"true\", \"actions\":\"output=%s\"}' http://%s/wm/staticflowentrypusher/json" % (ap1Dpid, ap1Dpid+"."+args.circuitName+".rarp", "0x806", ap2Port, ap1Port, controllerRestIp)
result = os.popen(command).read()
print command
# store created circuit attributes in local ./circuits.json
datetime = time.asctime()
circuitParams = {'name':args.circuitName, 'Dpid':ap1Dpid, 'inPort':ap1Port, 'outPort':ap2Port, 'datetime':datetime}
str = json.dumps(circuitParams)
circuitDb.write(str+"\n")
# confirm successful circuit creation
# using controller rest API
command="curl -s http://%s/wm/core/switch/all/flow/json| python -mjson.tool" % (controllerRestIp)
result = os.popen(command).read()
print command + "\n" + result
elif args.action=='delete':
circuitDb = open('./circuits.json','w')
# removing previously created flow from switches
# using StaticFlowPusher rest API
# currently, circuitpusher records created circuits in local file ./circuits.db
# with circuit name and list of switches
circuitExists = False
for line in lines:
data = json.loads(line)
if data['name']==(args.circuitName):
circuitExists = True
sw = data['Dpid']
print data, sw
command = "curl -X DELETE -d '{\"name\":\"%s\", \"switch\":\"%s\"}' http://%s/wm/staticflowentrypusher/json" % (sw+"."+args.circuitName+".f", sw, controllerRestIp)
result = os.popen(command).read()
print command, result
command = "curl -X DELETE -d '{\"name\":\"%s\", \"switch\":\"%s\"}' http://%s/wm/staticflowentrypusher/json" % (sw+"."+args.circuitName+".farp", sw, controllerRestIp)
result = os.popen(command).read()
print command, result
command = "curl -X DELETE -d '{\"name\":\"%s\", \"switch\":\"%s\"}' http://%s/wm/staticflowentrypusher/json" % (sw+"."+args.circuitName+".r", sw, controllerRestIp)
result = os.popen(command).read()
print command, result
command = "curl -X DELETE -d '{\"name\":\"%s\", \"switch\":\"%s\"}' http://%s/wm/staticflowentrypusher/json" % (sw+"."+args.circuitName+".rarp", sw, controllerRestIp)
result = os.popen(command).read()
print command, result
else:
circuitDb.write(line)
circuitDb.close()
if not circuitExists:
print "specified circuit does not exist"
sys.exit() | args = parser.parse_args()
print args
| random_line_split |
play15old2.rs | use ndarray::Array2;
use rand::Rng;
use std::borrow::Borrow;
use std::cmp::Ordering;
use std::collections::{BinaryHeap, HashMap, HashSet, VecDeque};
use std::fmt::{Display, Formatter};
pub const WIDTH: usize = 4;
pub const HEIGHT: usize = 4;
#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)]
pub struct Board([[u8; WIDTH]; HEIGHT]);
#[derive(Debug, Copy, Clone, Default, Eq, PartialEq)]
pub struct | ;
impl Board {
pub fn new() -> Self {
let mut arr = [[0u8; WIDTH]; HEIGHT];
for y in 0..WIDTH {
for x in 0..HEIGHT {
arr[y][x] = ((y * WIDTH + x + 1) % (WIDTH * HEIGHT)) as u8
}
}
Board(arr)
}
pub fn from_array(arr: [[u8; WIDTH]; HEIGHT]) -> Result<Self, BoardCreateError> {
let w = WIDTH;
let h = HEIGHT;
let mut tile_count = vec![0; w * h];
for y in 0..HEIGHT {
for x in 0..WIDTH {
tile_count.get_mut(arr[y][x] as usize).map(|x| *x += 1);
}
}
let has_one_of_all = tile_count.iter().all(|x| *x == 1);
if has_one_of_all {
Ok(Board(arr))
} else {
Err(BoardCreateError)
}
}
pub fn size(&self) -> (usize, usize) {
(WIDTH, HEIGHT)
}
pub fn empty_at(&self) -> (usize, usize) {
for y in 0..HEIGHT {
for x in 0..WIDTH {
if self.0[y][x] == 0 {
return (x, y);
}
}
}
panic!()
}
#[inline(always)]
pub fn swap(&mut self, p1: (usize, usize), p2: (usize, usize)) {
let arr = &mut self.0;
let t1 = arr[p1.1][p1.0];
let t2 = arr[p2.1][p2.0];
arr[p1.1][p1.0] = t2;
arr[p2.1][p2.0] = t1;
}
pub fn apply(&mut self, dir: Dir) -> Result<(), ()> {
let (zx, zy) = self.empty_at();
let (w, h) = self.size();
match dir {
Dir::Right if zx < w - 1 => {
self.swap((zx, zy), (zx + 1, zy));
Ok(())
}
Dir::Down if zy < h - 1 => {
self.swap((zx, zy), (zx, zy + 1));
Ok(())
}
Dir::Left if zx > 0 => {
self.swap((zx, zy), (zx - 1, zy));
Ok(())
}
Dir::Up if zy > 0 => {
self.swap((zx, zy), (zx, zy - 1));
Ok(())
}
_ => Err(()),
}
}
//
// pub fn possible_steps_with<F: FnMut(Board, u8)>(&self, mut f: F) {
// let (zx, zy) = self.empty_at();
// let w = self.0.shape()[0];
// let h = self.0.shape()[1];
// if zx < w - 1 {
// // Направо
// let mut b = self.clone();
// b.0.swap((zx, zy), (zx + 1, zy));
// let moved = b.0[(zx, zy)];
// f(b, moved)
// }
// if zy < h - 1 {
// // Вниз
// let mut b = self.clone();
// b.0.swap((zx, zy), (zx, zy + 1));
// let moved = b.0[(zx, zy)];
// f(b, moved)
// }
// if zx > 0 {
// // Налево
// let mut b = self.clone();
// b.0.swap((zx, zy), (zx - 1, zy));
// let moved = b.0[(zx, zy)];
// f(b, moved)
// }
// if zy > 0 {
// // Вверх
// let mut b = self.clone();
// b.0.swap((zx, zy), (zx, zy - 1));
// let moved = b.0[(zx, zy)];
// f(b, moved)
// }
// }
pub fn is_solved(&self) -> bool {
let (w, h) = self.size();
for y in 0..h {
for x in 0..w {
if ((y * w + x + 1) % (w * h)) as u8 != self.0[y][x] {
return false;
}
}
}
true
}
pub fn can_solve(&self) -> bool {
// let (w, h) = self.size();
// let mut flat = Vec::<u8>::with_capacity(w * h);
// for y in 0..h {
// for x in 0..w {
// flat.push(self.0[(x, y)]);
// }
// }
// let (_zx, zy) = self.empty_at();
// let sum: usize = (0..flat.len())
// .map(|i| {
// let c = flat[i] as usize;
// let c = if c == 0 { w * h } else { c };
// let k = flat[i..]
// .iter()
// .map(|x| if *x == 0 { (w * h) } else { *x as usize })
// .filter(|x| *x < c)
// .count();
// k
// })
// .sum();
// let n = sum + zy;
// n % 2 == 0
true
}
pub fn wrong_tiles(&self) -> usize {
let (w, h) = self.size();
let mut c = 0;
for y in 0..h {
for x in 0..w {
if ((y * w + x + 1) % (w * h)) as u8 != self.0[y][x] {
c += 1;
}
}
}
c
}
pub fn solve(&self) -> Result<Path, ()> {
if !self.can_solve() {
return Err(());
}
let mut checked_position_length = HashMap::new();
let mut heap = BinaryHeap::with_capacity(1000);
heap.push(QPath(Path::new(self.clone())));
let mut i = 0;
loop {
i += 1;
let current = heap.pop().unwrap();
let last = checked_position_length.get_mut(¤t.0.current_board);
let remove_longer = |heap: &mut BinaryHeap<QPath>, to_remove: Board| {
heap.retain(|qpath| qpath.0.current_board != to_remove);
};
if i % 10_000 == 0 {
println!(
"iter = {}e4, path len = {}, euristic = {}, in heap {} el",
i / 10_000,
current.0.path().len(),
current.0.current_board().wrong_tiles(),
heap.len()
);
}
match last {
Some(last) if *last <= current.0.path.len() => continue,
Some(last) => {
*last = current.0.path.len();
//remove_longer(&mut heap, current.0.current_board);
}
_ => {
checked_position_length.insert(current.0.current_board, current.0.path.len());
//remove_longer(&mut heap, current.0.current_board);
}
}
// println!("Current board with {}", current.cost());
if current.0.current_board().is_solved() {
return Ok(current.0);
}
let mut push_or_ignore = |dir| {
// Oh... Remove?
if heap.len() > 1_000_000 {
let mut replacement = BinaryHeap::with_capacity(1_000_005);
for _i in 0..10_000 {
replacement.push(heap.pop().unwrap());
}
heap = replacement;
}
// ^^^^^^^
let mut c = ¤t;
let path = c.0.push_step_cloned(dir);
if let Ok(path) = path {
if !checked_position_length.contains_key(path.current_board()) {
heap.push(QPath::new(path));
}
}
}; // 15 2 1 12 8 5 6 11 4 9 10 7 3 14 13 0
push_or_ignore(Dir::Up);
push_or_ignore(Dir::Right);
push_or_ignore(Dir::Down);
push_or_ignore(Dir::Left);
}
}
pub fn inner(&self) -> &[[u8; WIDTH]; HEIGHT] {
&self.0
}
}
impl Display for Board {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let (w, h) = self.size();
for y in 0..h {
for x in 0..w {
match w * h {
0..=9 => write!(f, "{:1} ", self.0[y][x])?,
10..=99 => write!(f, "{:2} ", self.0[y][x])?,
100..=999 => write!(f, "{:3} ", self.0[y][x])?,
_ => panic!(""),
};
}
writeln!(f)?;
}
Ok(())
}
}
#[derive(Copy, Clone, Eq, PartialEq, Debug, Hash)]
pub enum Dir {
Up,
Right,
Down,
Left,
}
#[derive(Clone, Debug, Hash, Eq, PartialEq)]
pub struct Path {
current_board: Board,
path: Vec<Dir>,
}
impl Path {
pub fn current_board(&self) -> &Board {
&self.current_board
}
pub fn path(&self) -> &Vec<Dir> {
&self.path
}
pub fn len(&self) -> usize {
self.path.len()
}
}
impl Path {
pub fn new(start_board: Board) -> Self {
Self {
current_board: start_board,
path: Vec::new(),
}
}
pub fn push_step(&mut self, dir: Dir) -> Result<(), ()> {
self.current_board.apply(dir).map(|_| self.path.push(dir))
}
pub fn push_step_cloned(&self, dir: Dir) -> Result<Self, ()> {
let mut board_clone = self.current_board.clone();
board_clone.apply(dir)?;
let mut path_clone = self.path.clone();
path_clone.push(dir);
Ok(Self {
current_board: board_clone,
path: path_clone,
})
}
}
#[derive(Clone)]
struct QPath(Path);
impl QPath {
fn new(p: Path) -> Self {
Self(p)
}
pub fn cost(&self) -> usize {
let g = self.0.len();
let f = self.0.current_board.wrong_tiles();
// let f: usize = self
// .0
// .current_board()
// .inner()
// .indexed_iter()
// .map(|((x, y), v)| {
// let (w, h) = self.0.current_board().size();
// let (ox, oy) = if *v == 0 {
// (w - 1, h - 1)
// } else {
// let v = (*v - 1) as usize;
// (v % w, v / h)
// };
// (ox.max(x) - ox.min(x)) + (oy.max(y) - oy.min(y))
// })
// .sum();
g + f
}
}
impl Ord for QPath {
fn cmp(&self, other: &Self) -> Ordering {
(other.cost()).cmp(&self.cost())
}
}
impl PartialOrd for QPath {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for QPath {
fn eq(&self, other: &Self) -> bool {
self.cmp(other) == Ordering::Equal
}
}
impl Eq for QPath {}
| BoardCreateError | identifier_name |
play15old2.rs | use ndarray::Array2;
use rand::Rng;
use std::borrow::Borrow;
use std::cmp::Ordering;
use std::collections::{BinaryHeap, HashMap, HashSet, VecDeque};
use std::fmt::{Display, Formatter};
pub const WIDTH: usize = 4;
pub const HEIGHT: usize = 4;
#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)]
pub struct Board([[u8; WIDTH]; HEIGHT]);
#[derive(Debug, Copy, Clone, Default, Eq, PartialEq)]
pub struct BoardCreateError;
impl Board {
pub fn new() -> Self {
let mut arr = [[0u8; WIDTH]; HEIGHT];
for y in 0..WIDTH {
for x in 0..HEIGHT {
arr[y][x] = ((y * WIDTH + x + 1) % (WIDTH * HEIGHT)) as u8
}
}
Board(arr)
}
pub fn from_array(arr: [[u8; WIDTH]; HEIGHT]) -> Result<Self, BoardCreateError> {
let w = WIDTH;
let h = HEIGHT;
let mut tile_count = vec![0; w * h];
for y in 0..HEIGHT {
for x in 0..WIDTH {
tile_count.get_mut(arr[y][x] as usize).map(|x| *x += 1);
}
}
let has_one_of_all = tile_count.iter().all(|x| *x == 1);
if has_one_of_all {
Ok(Board(arr))
} else {
Err(BoardCreateError)
}
}
pub fn size(&self) -> (usize, usize) {
(WIDTH, HEIGHT)
}
pub fn empty_at(&self) -> (usize, usize) |
#[inline(always)]
pub fn swap(&mut self, p1: (usize, usize), p2: (usize, usize)) {
let arr = &mut self.0;
let t1 = arr[p1.1][p1.0];
let t2 = arr[p2.1][p2.0];
arr[p1.1][p1.0] = t2;
arr[p2.1][p2.0] = t1;
}
pub fn apply(&mut self, dir: Dir) -> Result<(), ()> {
let (zx, zy) = self.empty_at();
let (w, h) = self.size();
match dir {
Dir::Right if zx < w - 1 => {
self.swap((zx, zy), (zx + 1, zy));
Ok(())
}
Dir::Down if zy < h - 1 => {
self.swap((zx, zy), (zx, zy + 1));
Ok(())
}
Dir::Left if zx > 0 => {
self.swap((zx, zy), (zx - 1, zy));
Ok(())
}
Dir::Up if zy > 0 => {
self.swap((zx, zy), (zx, zy - 1));
Ok(())
}
_ => Err(()),
}
}
//
// pub fn possible_steps_with<F: FnMut(Board, u8)>(&self, mut f: F) {
// let (zx, zy) = self.empty_at();
// let w = self.0.shape()[0];
// let h = self.0.shape()[1];
// if zx < w - 1 {
// // Направо
// let mut b = self.clone();
// b.0.swap((zx, zy), (zx + 1, zy));
// let moved = b.0[(zx, zy)];
// f(b, moved)
// }
// if zy < h - 1 {
// // Вниз
// let mut b = self.clone();
// b.0.swap((zx, zy), (zx, zy + 1));
// let moved = b.0[(zx, zy)];
// f(b, moved)
// }
// if zx > 0 {
// // Налево
// let mut b = self.clone();
// b.0.swap((zx, zy), (zx - 1, zy));
// let moved = b.0[(zx, zy)];
// f(b, moved)
// }
// if zy > 0 {
// // Вверх
// let mut b = self.clone();
// b.0.swap((zx, zy), (zx, zy - 1));
// let moved = b.0[(zx, zy)];
// f(b, moved)
// }
// }
pub fn is_solved(&self) -> bool {
let (w, h) = self.size();
for y in 0..h {
for x in 0..w {
if ((y * w + x + 1) % (w * h)) as u8 != self.0[y][x] {
return false;
}
}
}
true
}
pub fn can_solve(&self) -> bool {
// let (w, h) = self.size();
// let mut flat = Vec::<u8>::with_capacity(w * h);
// for y in 0..h {
// for x in 0..w {
// flat.push(self.0[(x, y)]);
// }
// }
// let (_zx, zy) = self.empty_at();
// let sum: usize = (0..flat.len())
// .map(|i| {
// let c = flat[i] as usize;
// let c = if c == 0 { w * h } else { c };
// let k = flat[i..]
// .iter()
// .map(|x| if *x == 0 { (w * h) } else { *x as usize })
// .filter(|x| *x < c)
// .count();
// k
// })
// .sum();
// let n = sum + zy;
// n % 2 == 0
true
}
pub fn wrong_tiles(&self) -> usize {
let (w, h) = self.size();
let mut c = 0;
for y in 0..h {
for x in 0..w {
if ((y * w + x + 1) % (w * h)) as u8 != self.0[y][x] {
c += 1;
}
}
}
c
}
pub fn solve(&self) -> Result<Path, ()> {
if !self.can_solve() {
return Err(());
}
let mut checked_position_length = HashMap::new();
let mut heap = BinaryHeap::with_capacity(1000);
heap.push(QPath(Path::new(self.clone())));
let mut i = 0;
loop {
i += 1;
let current = heap.pop().unwrap();
let last = checked_position_length.get_mut(¤t.0.current_board);
let remove_longer = |heap: &mut BinaryHeap<QPath>, to_remove: Board| {
heap.retain(|qpath| qpath.0.current_board != to_remove);
};
if i % 10_000 == 0 {
println!(
"iter = {}e4, path len = {}, euristic = {}, in heap {} el",
i / 10_000,
current.0.path().len(),
current.0.current_board().wrong_tiles(),
heap.len()
);
}
match last {
Some(last) if *last <= current.0.path.len() => continue,
Some(last) => {
*last = current.0.path.len();
//remove_longer(&mut heap, current.0.current_board);
}
_ => {
checked_position_length.insert(current.0.current_board, current.0.path.len());
//remove_longer(&mut heap, current.0.current_board);
}
}
// println!("Current board with {}", current.cost());
if current.0.current_board().is_solved() {
return Ok(current.0);
}
let mut push_or_ignore = |dir| {
// Oh... Remove?
if heap.len() > 1_000_000 {
let mut replacement = BinaryHeap::with_capacity(1_000_005);
for _i in 0..10_000 {
replacement.push(heap.pop().unwrap());
}
heap = replacement;
}
// ^^^^^^^
let mut c = ¤t;
let path = c.0.push_step_cloned(dir);
if let Ok(path) = path {
if !checked_position_length.contains_key(path.current_board()) {
heap.push(QPath::new(path));
}
}
}; // 15 2 1 12 8 5 6 11 4 9 10 7 3 14 13 0
push_or_ignore(Dir::Up);
push_or_ignore(Dir::Right);
push_or_ignore(Dir::Down);
push_or_ignore(Dir::Left);
}
}
pub fn inner(&self) -> &[[u8; WIDTH]; HEIGHT] {
&self.0
}
}
impl Display for Board {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let (w, h) = self.size();
for y in 0..h {
for x in 0..w {
match w * h {
0..=9 => write!(f, "{:1} ", self.0[y][x])?,
10..=99 => write!(f, "{:2} ", self.0[y][x])?,
100..=999 => write!(f, "{:3} ", self.0[y][x])?,
_ => panic!(""),
};
}
writeln!(f)?;
}
Ok(())
}
}
#[derive(Copy, Clone, Eq, PartialEq, Debug, Hash)]
pub enum Dir {
Up,
Right,
Down,
Left,
}
#[derive(Clone, Debug, Hash, Eq, PartialEq)]
pub struct Path {
current_board: Board,
path: Vec<Dir>,
}
impl Path {
pub fn current_board(&self) -> &Board {
&self.current_board
}
pub fn path(&self) -> &Vec<Dir> {
&self.path
}
pub fn len(&self) -> usize {
self.path.len()
}
}
impl Path {
pub fn new(start_board: Board) -> Self {
Self {
current_board: start_board,
path: Vec::new(),
}
}
pub fn push_step(&mut self, dir: Dir) -> Result<(), ()> {
self.current_board.apply(dir).map(|_| self.path.push(dir))
}
pub fn push_step_cloned(&self, dir: Dir) -> Result<Self, ()> {
let mut board_clone = self.current_board.clone();
board_clone.apply(dir)?;
let mut path_clone = self.path.clone();
path_clone.push(dir);
Ok(Self {
current_board: board_clone,
path: path_clone,
})
}
}
#[derive(Clone)]
struct QPath(Path);
impl QPath {
fn new(p: Path) -> Self {
Self(p)
}
pub fn cost(&self) -> usize {
let g = self.0.len();
let f = self.0.current_board.wrong_tiles();
// let f: usize = self
// .0
// .current_board()
// .inner()
// .indexed_iter()
// .map(|((x, y), v)| {
// let (w, h) = self.0.current_board().size();
// let (ox, oy) = if *v == 0 {
// (w - 1, h - 1)
// } else {
// let v = (*v - 1) as usize;
// (v % w, v / h)
// };
// (ox.max(x) - ox.min(x)) + (oy.max(y) - oy.min(y))
// })
// .sum();
g + f
}
}
impl Ord for QPath {
fn cmp(&self, other: &Self) -> Ordering {
(other.cost()).cmp(&self.cost())
}
}
impl PartialOrd for QPath {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for QPath {
fn eq(&self, other: &Self) -> bool {
self.cmp(other) == Ordering::Equal
}
}
impl Eq for QPath {}
| {
for y in 0..HEIGHT {
for x in 0..WIDTH {
if self.0[y][x] == 0 {
return (x, y);
}
}
}
panic!()
} | identifier_body |
play15old2.rs | use ndarray::Array2;
use rand::Rng;
use std::borrow::Borrow;
use std::cmp::Ordering;
use std::collections::{BinaryHeap, HashMap, HashSet, VecDeque};
use std::fmt::{Display, Formatter};
pub const WIDTH: usize = 4;
pub const HEIGHT: usize = 4;
#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)]
pub struct Board([[u8; WIDTH]; HEIGHT]);
#[derive(Debug, Copy, Clone, Default, Eq, PartialEq)]
pub struct BoardCreateError;
impl Board {
pub fn new() -> Self {
let mut arr = [[0u8; WIDTH]; HEIGHT];
for y in 0..WIDTH {
for x in 0..HEIGHT {
arr[y][x] = ((y * WIDTH + x + 1) % (WIDTH * HEIGHT)) as u8
}
}
Board(arr)
}
pub fn from_array(arr: [[u8; WIDTH]; HEIGHT]) -> Result<Self, BoardCreateError> {
let w = WIDTH;
let h = HEIGHT;
let mut tile_count = vec![0; w * h];
for y in 0..HEIGHT {
for x in 0..WIDTH {
tile_count.get_mut(arr[y][x] as usize).map(|x| *x += 1);
}
}
let has_one_of_all = tile_count.iter().all(|x| *x == 1);
if has_one_of_all {
Ok(Board(arr))
} else {
Err(BoardCreateError)
}
}
pub fn size(&self) -> (usize, usize) {
(WIDTH, HEIGHT)
}
pub fn empty_at(&self) -> (usize, usize) {
for y in 0..HEIGHT {
for x in 0..WIDTH {
if self.0[y][x] == 0 {
return (x, y);
}
}
}
panic!()
}
#[inline(always)]
pub fn swap(&mut self, p1: (usize, usize), p2: (usize, usize)) {
let arr = &mut self.0;
let t1 = arr[p1.1][p1.0];
let t2 = arr[p2.1][p2.0];
arr[p1.1][p1.0] = t2;
arr[p2.1][p2.0] = t1;
}
pub fn apply(&mut self, dir: Dir) -> Result<(), ()> {
let (zx, zy) = self.empty_at();
let (w, h) = self.size();
match dir {
Dir::Right if zx < w - 1 => {
self.swap((zx, zy), (zx + 1, zy));
Ok(())
}
Dir::Down if zy < h - 1 => {
self.swap((zx, zy), (zx, zy + 1));
Ok(())
}
Dir::Left if zx > 0 => {
self.swap((zx, zy), (zx - 1, zy));
Ok(())
}
Dir::Up if zy > 0 => {
self.swap((zx, zy), (zx, zy - 1));
Ok(())
}
_ => Err(()),
}
}
//
// pub fn possible_steps_with<F: FnMut(Board, u8)>(&self, mut f: F) {
// let (zx, zy) = self.empty_at();
// let w = self.0.shape()[0];
// let h = self.0.shape()[1];
// if zx < w - 1 {
// // Направо
// let mut b = self.clone();
// b.0.swap((zx, zy), (zx + 1, zy));
// let moved = b.0[(zx, zy)];
// f(b, moved)
// }
// if zy < h - 1 {
// // Вниз
// let mut b = self.clone();
// b.0.swap((zx, zy), (zx, zy + 1));
// let moved = b.0[(zx, zy)];
// f(b, moved)
// }
// if zx > 0 {
// // Налево
// let mut b = self.clone();
// b.0.swap((zx, zy), (zx - 1, zy));
// let moved = b.0[(zx, zy)];
// f(b, moved)
// }
// if zy > 0 {
// // Вверх
// let mut b = self.clone();
// b.0.swap((zx, zy), (zx, zy - 1));
// let moved = b.0[(zx, zy)];
// f(b, moved)
// }
// }
pub fn is_solved(&self) -> bool {
let (w, h) = self.size();
for y in 0..h {
for x in 0..w {
if ((y * w + x + 1) % (w * h)) as u8 != self.0[y][x] {
return false;
}
}
}
true
}
pub fn can_solve(&self) -> bool {
// let (w, h) = self.size();
// let mut flat = Vec::<u8>::with_capacity(w * h);
// for y in 0..h {
// for x in 0..w {
// flat.push(self.0[(x, y)]);
// }
// }
// let (_zx, zy) = self.empty_at();
// let sum: usize = (0..flat.len())
// .map(|i| {
// let c = flat[i] as usize;
// let c = if c == 0 { w * h } else { c };
// let k = flat[i..]
// .iter()
// .map(|x| if *x == 0 { (w * h) } else { *x as usize })
// .filter(|x| *x < c)
// .count();
// k
// })
// .sum();
// let n = sum + zy;
// n % 2 == 0
true
}
pub fn wrong_tiles(&self) -> usize {
let (w, h) = self.size();
let mut c = 0;
for y in 0..h {
for x in 0..w {
if ((y * w + x + 1) % (w * h)) as u8 != self.0[y][x] {
c += 1;
}
}
}
c
}
pub fn solve(&self) -> Result<Path, ()> {
if !self.can_solve() {
return Err(());
}
let mut checked_position_length = HashMap::new();
let mut heap = BinaryHeap::with_capacity(1000);
heap.push(QPath(Path::new(self.clone())));
let mut i = 0;
loop {
i += 1;
let current = heap.pop().unwrap();
let last = checked_position_length.get_mut(¤t.0.current_board);
let remove_longer = |heap: &mut BinaryHeap<QPath>, to_remove: Board| {
heap.retain(|qpath| qpath.0.current_board != to_remove);
};
if i % 10_000 == 0 {
println!(
"iter = {}e4, path len = {}, euristic = {}, in heap {} el",
i / 10_000,
current.0.path().len(),
current.0.current_board().wrong_tiles(),
heap.len()
);
}
match last {
Some(last) if *last <= current.0.path.len() => continue,
Some(last) => {
*last = current.0.path.len();
//remove_longer(&mut heap, current.0.current_board);
}
_ => {
checked_position_length.insert(current.0.current_board, current.0.path.len());
//remove_longer(&mut heap, current.0.current_board);
}
}
// println!("Current board with {}", current.cost());
if current.0.current_board().is_solved() {
return Ok(current.0);
}
let mut push_or_ignore = |dir| {
// Oh... Remove?
if heap.len() > 1_000_000 {
let mut replacement = BinaryHeap::with_capacity(1_000_005);
for _i in 0..10_000 {
replacement.push(heap.pop().unwrap());
}
heap = replacement;
}
// ^^^^^^^
let mut c = ¤t;
let path = c.0.push_step_cloned(dir);
if let Ok(path) = path {
if !checked_position_length.contains_key(path.current_board()) {
heap.push(QPath::new(path));
}
}
}; // 15 2 1 12 8 5 6 11 4 9 10 7 3 14 13 0
push_or_ignore(Dir::Up);
push_or_ignore(Dir::Right);
push_or_ignore(Dir::Down);
push_or_ignore(Dir::Left);
}
}
pub fn inner(&self) -> &[[u8; WIDTH]; HEIGHT] {
&self.0
}
}
impl Display for Board {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let (w, h) = self.size();
for y in 0..h {
for x in 0..w {
match w * h {
0..=9 => write!(f, "{:1} ", self.0[y][x])?,
10..=99 => write!(f, "{:2} ", self.0[y][x])?,
100..=999 => write!(f, "{:3} ", self.0[y][x])?,
_ => panic!(""),
};
}
writeln!(f)?;
}
Ok(())
}
}
#[derive(Copy, Clone, Eq, PartialEq, Debug, Hash)]
pub enum Dir {
Up,
Right,
Down,
Left,
}
#[derive(Clone, Debug, Hash, Eq, PartialEq)]
pub struct Path {
current_board: Board,
path: Vec<Dir>,
}
impl Path {
pub fn current_board(&self) -> &Board {
&self.current_board
}
pub fn path(&self) -> &Vec<Dir> {
&self.path
}
pub fn len(&self) -> usize {
self.path.len()
}
}
impl Path {
pub fn new(start_board: Board) -> Self {
Self {
current_board: start_board,
path: Vec::new(),
}
}
pub fn push_step(&mut self, dir: Dir) -> Result<(), ()> {
self.current_board.apply(dir).map(|_| self.path.push(dir))
}
pub fn push_step_cloned(&self, dir: Dir) -> Result<Self, ()> {
let mut board_clone = self.current_board.clone();
board_clone.apply(dir)?;
let mut path_clone = self.path.clone();
path_clone.push(dir);
Ok(Self {
current_board: board_clone,
path: path_clone,
})
}
}
#[derive(Clone)]
struct QPath(Path);
impl QPath {
fn new(p: Path) -> Self {
Self(p)
}
pub fn cost(&self) -> usize {
let g = self.0.len();
let f = self.0.current_board.wrong_tiles();
// let f: usize = self
// .0
// .current_board()
// .inner() | // .indexed_iter()
// .map(|((x, y), v)| {
// let (w, h) = self.0.current_board().size();
// let (ox, oy) = if *v == 0 {
// (w - 1, h - 1)
// } else {
// let v = (*v - 1) as usize;
// (v % w, v / h)
// };
// (ox.max(x) - ox.min(x)) + (oy.max(y) - oy.min(y))
// })
// .sum();
g + f
}
}
impl Ord for QPath {
fn cmp(&self, other: &Self) -> Ordering {
(other.cost()).cmp(&self.cost())
}
}
impl PartialOrd for QPath {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for QPath {
fn eq(&self, other: &Self) -> bool {
self.cmp(other) == Ordering::Equal
}
}
impl Eq for QPath {} | random_line_split | |
code_quality_eval.py | import sys
import os
sys.path.insert(0, '../oscar.py')
import re
from oscar import Project
from oscar import Time_project_info as Proj
import subprocess
from time import time as current_time
start_time = current_time()
def bash(command):
proc = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
return out
def | (hash, type):
"""
Method used to search for a specific blob, commit or tree.
If a tree is searched for, the result is splitted into its components (blobs and directories),
which are again splitted into their mode, hash and name.
In the case of a commit, we split the information string and the tree hash and
parent's commit hash are returned
"""
out = bash('echo ' + hash + ' | ~/lookup/showCnt ' + type)
if type == 'tree':
return [blob.split(';') for blob in out.strip().split('\n')]
if type == 'commit':
splitted = out.split(';')
# the tree and parent commit hashes are the second and third word, respectively
# the commit time is the last word, from which we discard the timezone and cast it to int
return splitted[1], splitted[2], int(splitted[-1].split()[0])
return out
# files used in continuous integration
ci_files = [
'\.gitlab\-ci\.yml', '\.travis\.yml', 'Jenkinsfile', 'buddy\.yml', '\.drone\.yml',
'circle\.yml', '\.circleci', 'bamboo\.yaml', 'codeship\-steps\.yml', '\.teamcity',
'wercker\.yml', 'appveyor\.yml', 'bitrise\.yml', 'codefresh\.yml', 'solano\.yml',
'shippable\.yml', 'phpci\.yml', 'cloudbuild\.yaml'
]
def ci_lookup(tree_hash):
"""
Method used to check the usage of Continuous Integration in a tree, given its hash.
"""
query = 'echo ' + tree_hash + ' | ~/lookup/showCnt tree | egrep "' + '|'.join(ci_files) +'"'
out = bash(query)
"""
# alternate method
blobs = search(tree_hash, 'tree')
index = {'mode':1, 'hash':1, 'name':2}
ci = False
for blob in blobs:
name = blob[index['name']]
hash = blob[index['hash']]
if ((name in ci_files) or
(name in ci_config_dir and ';'+ci_config_dir[name] in search(hash, 'tree'))):
ci = True
break
"""
return bool(out)
def calc_CI_introductions(commits, author):
"""
Alternative way to check_if_introduction, to compare performance.
"""
# using a dictionary that has the commits' hashes as keys,
# so as to not search multiple times for the same commit
CI_checked = {}
# delete contents
open('introductions.csv', 'w').close()
# for every commit, we look up whether the author included a CI file,
# that did not exist in the parent commit
for count, commit in enumerate(commits):
# status update
if (count + 1) % 50 == 0:
print count + 1, ' / ', len(commits)
tree_hash, parent_commit_hash, time = search(commit, 'commit')
if tree_hash not in CI_checked:
CI_checked[tree_hash] = ci_lookup(tree_hash)
# controlling for the case of multiple parent commits
all_parent_CI = False
for parent in parent_commit_hash.split(':'):
# controlling for the case of no parent commits
if parent == '':
break
parent_tree_hash = search(parent, 'commit')[0]
if parent_tree_hash not in CI_checked:
parent_CI = ci_lookup(parent_tree_hash)
CI_checked[parent_tree_hash] = parent_CI
else:
parent_CI = CI_checked[parent_tree_hash]
# checking all the parent commits for the usage of CI
all_parent_CI = all_parent_CI or parent_CI
# if the tree has a CI file, while the parent tree does not, increase the CI score
if CI_checked[tree_hash] and not all_parent_CI:
out = bash('echo ' + commit + ' | ~/lookup/getValues c2P')
main_proj = out.strip().split(';')[1]
f = open("introductions.csv", "a")
f.write(author + ', ' + 'CI' + ', ' + str(time) + ', ' + main_proj + '\n')
f.close()
print 'wrote'
print (current_time()-start_time)/len(commits), 'seconds per commit'
def check_if_introduction(commit, result):
"""
We check the parent commit to see if its child commit introduced or modified a CI config file.
"""
tree_hash, parent_commit_hash, time = search(commit, 'commit')
# controlling for the case of no parent commits
if parent_commit_hash == '':
return True
# controlling for the case of multiple parent commits
all_parent_CI = False
for parent in parent_commit_hash.split(':'):
parent_tree_hash = search(parent, 'commit')[0]
parent_CI = ci_lookup(parent_tree_hash)
# checking all the parent commits for the usage of CI
all_parent_CI = all_parent_CI or parent_CI
# if the tree has a CI file, while the parent tree does not, it is an introduction
return not all_parent_CI
def calc_CI(commits, author):
"""
Used to investigate how many commits, from a user, modified a CI configuration file.
Unix commands are used for a better performance.
"""
# delete contents
open('modifications.csv', 'w').close()
open('introductions.csv', 'w').close()
for count, commit in enumerate(commits):
# status update
if (count + 1) % 50 == 0:
print commit, '.. ..', count + 1, ' / ', len(commits)
# c2f does seems to result in a tie error, so c2b and b2f is used instead
#getting the blobs
query = ("for x in $(echo " + commit + " | ~/lookup/getValues c2b |" +
# splitting on the semicolon and discarding the newlines
" awk -v RS='[;\\n]' 1 |" +
# discarding the commit's hash (it appears before the blobs' hashes)
" tail -n+2); do" +
# for each blob, we look up it's filename
" echo $x | ~/lookup/getValues b2f;" +
" done |" +
# we discard the first field of the results (blobs' hash)
" cut -d ';' -f2 |" +
# we check whether one of the modified files is a CI configuration file
" egrep '" + "|".join(ci_files) + "'")
result = bash(query)
if result:
out = bash('echo ' + commit + ' | ~/lookup/getValues c2P')
main_proj = out.strip().split(';')[1]
time = search(commit, 'commit')[2]
if check_if_introduction(commit, result):
f = open("introductions.csv", "a")
print 'introduction'
else:
f = open("modifications.csv", "a")
print 'modification'
f.write(author + ', ' + 'CI' + ', ' + str(time) + ', ' + main_proj + '\n')
f.close()
print 'wrote: -->', commit
def calc_CI_diff(commits, author):
"""
Method written as a faster alternative to calc_CI. It seems to be 30 times faster.
"""
# delete contents
open('modifications.csv', 'w').close()
open('introductions.csv', 'w').close()
for count, commit in enumerate(commits):
#status update
if (count + 1) % 50 == 0:
print commit, '.. ..', count + 1, ' / ', len(commits)
# cmputeDiff2.perl seems to produce junk to the stdout occasionally
diff = bash("echo " + commit + " | ssh da4 ~/lookup/cmputeDiff2.perl")
# if a CI configuration file is in the diff
if re.search("|".join(ci_files), diff):
out = bash('echo ' + commit + ' | ~/lookup/getValues c2P')
main_proj = out.strip().split(';')[1]
time = search(commit, 'commit')[2]
for blob in diff.split():
# looking for the CI config blob and checking if parent blob exists
if re.search("|".join(ci_files), blob):
# if we have both an introduction and a modification
# in the same commit, we count it as an introduction
if blob.endswith(';'):
# if we don't have the parent blob, after the last semicolon,
# it is an introduction
f = open("introductions.csv", "a")
print 'introduction'
else:
f = open("modifications.csv", "a")
print 'modification'
break
f.write(author + ', ' + 'CI' + ', ' + str(time) + ', ' + main_proj + '\n')
f.close()
print 'wrote: -->', commit
def find_links(author, end_time, method='sh'):
"""
Method used to find the neighbours of a given author, i.e. the authors that
affected the given author's use of good coding practices.
A timestamp is also given to define the time till which we find the connections.
"""
out = bash('echo "'+ author + '" | ~/lookup/getValues a2P')
pr = [x for x in out.strip().split(';')[1:]]
if method == 'pr_timeline':
p = Proj()
for project in pr:
rows = p.project_timeline(['time','repo', 'author'], project)
for row in rows:
print row
#### Start building the regular expression that will be used to search for unit testing libraries,
#### in the commit's blobs ####
# Java
java_lib = ['io.restassured', 'org.openqa.selenium', 'org.spockframework', 'jtest',
'org.springframework.test', 'org.dbunit', 'org.jwalk', 'org.mockito', 'org.junit']
java_regex = (['import\s+'+s.replace('.', '\.') for s in java_lib])
java_all_reg = '|'.join(java_regex)
# Perl
perl_all_reg = 'use\s+Test::'
# Javascript
js = ['assert', 'mocha', 'jasmine', 'ava', 'jest', 'karma', 'storybook', 'tape',
'cypress', 'puppeteer', 'chai', 'qunit', 'sinon', 'casper', 'buster']
js_regex = (["require\([\\\'\\\"]" + s + "[\\\'\\\"]\)" for s in js])
js_all_reg = '|'.join(js_regex)
# C#
c_sharp = ['NUnit', 'Microsoft\.VisualStudio\.TestTools\.UnitTesting',
'Xunit', 'csUnit', 'MbUnit']
c_sharp_regex = (["using\s+" + s for s in c_sharp])
c_sharp_all_reg = '|'.join(c_sharp_regex)
# C and C++
c = ['cmocka', 'unity', 'CppuTest', 'embUnit', 'CUnit', 'CuTest', 'check',
'gtest', 'uCUnit', 'munit', 'minunit', 'acutest', 'boost/test',
'UnitTest\+\+', 'cpptest', 'cppunit', 'catch', 'bandit', 'tut']
c_regex = (['#include\s+[<\\\"]' + s + '\.h[>\\\"]'for s in c])
c_all_reg = '|'.join(c_regex)
# PHP
php = ['PHPUnit', 'Codeception', 'Behat', 'PhpSpec', 'Storyplayer', 'Peridot',
'atoum', 'Kahlan', 'vendor/EnhanceTestFramework']
php_regex = (['(include|require|use).+' + s for s in php])
php_all_reg = '|'.join(php_regex)
# Python
python = ['pytest', 'unittest', 'doctest', 'testify', 'nose', 'hypothesis']
python_regex = (['import\s+'+lib+'|from\s+'+lib+'\s+import' for lib in python])
python_all_reg = '|'.join(python_regex)
all_reg = [java_all_reg, perl_all_reg, js_all_reg, c_sharp_all_reg, c_all_reg, php_all_reg, python_all_reg]
final_reg = '|'.join(all_reg)
#### End of regex building ####
def calc_test(commits, author):
"""
Used to investigate how many commits, from a user, modified a unit testing file.
Unix commands are used to achieve a better performance.
The blobs are parsed, looking for unit testing library imports. An alternative would
be using the thruMaps directories or the ClickHouse API, but those options seem slower.
"""
open('modifications.csv', 'w').close()
for count, commit in enumerate(commits):
# status update
if (count + 1) % 5 == 0:
print commit, '.. ..', count + 1, ' / ', len(commits)
# getting every blob from a given commit
query = ('for x in $(echo ' + commit + ' | ~/lookup/getValues c2b | ' +
# splitting it and discarding the newlines and the commit's hash
'awk -v RS="[;\\n]" 1 | tail -n+2); do ' +
# We look up the content's of each blob, and discard the STDERR,
# in the case of trying to look up a blob that does not exist in the database
'echo $x | ~/lookup/showCnt blob 2> /dev/null; done | ' +
# We search for the use of a unit testing library, using the above regex, and
# keeping the first result only, since that is enough to know that the commit contains
# a unit testing file, to make the execution faster
'egrep -m 1 "' + final_reg + '"')
if bash(query): # if contains unit testing lib
out = bash('echo ' + commit + ' | ~/lookup/getValues c2P')
main_proj = out.strip().split(';')[1]
time = search(commit, 'commit')[2]
# at this point we could search the parent's tree for the existence of tests, but this
# would require recursively looking at every directory and parsing every file in the tree, so, due
# to the complexity, we skip it and consider it a modification instead of a possible introduction
f = open("modifications.csv", "a")
print 'modification'
f.write(author + ', ' + 'TEST' + ', ' + str(time) + ', ' + main_proj + '\n')
f.close()
print 'wrote: -->', commit
def calc_lang_features(commits, author):
"""
Method used to count the usage of certain languages' good practices and modern approaches.
We parse the diff of a modified file and the content of an introduced file, in order to find those
practices, and we count the extent of the usage. Then, we write to a file, for each commit that
included these features.
"""
lang_features = ['/\*\*', '\\"\\"\\"', '///', # documentation
'^\s*@', 'def.+:.+->', 'using\s+System\.ComponentModel\.DataAnnotations', # assertion
'assert', 'TODO', 'lambda']
# delete contents
open('lang_features.csv', 'w').close()
for count, commit in enumerate(commits):
# status update
if (count + 1) % 5 == 0:
print commit, '.. ..', count + 1, ' / ', len(commits)
# for each blob modified
query = ("for x in $(echo " + commit + " | ssh da4 ~/lookup/cmputeDiff2.perl); do " +
# get the chold and parent blob
"diff_blobs=$(echo $x | awk -v RS=';' 1 | sed -n '3,4 p');" +
# if a parent blob does not exist, the author authored all of the content of the file
"if [ $(echo $diff_blobs|wc -w) -eq 1 ]; then " +
"echo $diff_blobs | ~/lookup/showCnt blob 2> /dev/null; " +
# if a parent blob exists, find the diff, in order to search only the modified lines
"elif [ $(echo $diff_blobs|wc -w) -eq 2 ]; then " +
"vars=( $diff_blobs );" +
# using bash instead of sh in order to use the process substitution,
# to get the modified lines
"/bin/bash -c \"diff <(echo ${vars[0]} | ~/lookup/showCnt blob)" +
" <(echo ${vars[1]} | ~/lookup/showCnt blob)\";" +
"fi;" +
# grep the above practices and discard the lines that were deleted from the parent blob
# (they start with ">" in diff)
"done | egrep \"" + "|".join(lang_features) + "\" | grep -v '^>' | wc -l ")
count_uses = int(bash(query).strip())
if count_uses > 0: # good practice feature is used
out = bash('echo ' + commit + ' | ~/lookup/getValues c2P')
main_proj = out.strip().split(';')[1]
time = search(commit, 'commit')[2]
f = open("lang_features.csv", "a")
print 'lang_f'
f.write(author + ', ' + 'LANG_F' + ', ' + str(time) + ', ' + main_proj + ', ' + str(count_uses) + '\n')
f.close()
print 'wrote: -->', commit
def calculate_metrics(author):
# getting the author's commits
out = bash('echo "'+ author + '" | ~/lookup/getValues a2c')
commits = [x for x in out.strip().split(';')[1:]]
#time1 = current_time()
#calc_CI(commits, author)
#time2 = current_time()
#print 'without diff time is ' + str(time2 - time1)
#calc_CI_diff(commits, author)
#print 'with is ' + str(current_time() - time2)
#calc_test(commits, author)
calc_lang_features(commits, author)
# checking whether the user provided the author
if len(sys.argv) == 1:
sys.exit('No author provided')
calculate_metrics(sys.argv[1])
| search | identifier_name |
code_quality_eval.py | import sys
import os
sys.path.insert(0, '../oscar.py')
import re
from oscar import Project
from oscar import Time_project_info as Proj
import subprocess
from time import time as current_time
start_time = current_time()
def bash(command):
proc = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
return out
def search(hash, type):
"""
Method used to search for a specific blob, commit or tree.
If a tree is searched for, the result is splitted into its components (blobs and directories),
which are again splitted into their mode, hash and name.
In the case of a commit, we split the information string and the tree hash and
parent's commit hash are returned
"""
out = bash('echo ' + hash + ' | ~/lookup/showCnt ' + type)
if type == 'tree':
return [blob.split(';') for blob in out.strip().split('\n')]
if type == 'commit':
splitted = out.split(';')
# the tree and parent commit hashes are the second and third word, respectively
# the commit time is the last word, from which we discard the timezone and cast it to int
return splitted[1], splitted[2], int(splitted[-1].split()[0])
return out
# files used in continuous integration
ci_files = [
'\.gitlab\-ci\.yml', '\.travis\.yml', 'Jenkinsfile', 'buddy\.yml', '\.drone\.yml',
'circle\.yml', '\.circleci', 'bamboo\.yaml', 'codeship\-steps\.yml', '\.teamcity',
'wercker\.yml', 'appveyor\.yml', 'bitrise\.yml', 'codefresh\.yml', 'solano\.yml',
'shippable\.yml', 'phpci\.yml', 'cloudbuild\.yaml'
]
def ci_lookup(tree_hash):
"""
Method used to check the usage of Continuous Integration in a tree, given its hash.
"""
query = 'echo ' + tree_hash + ' | ~/lookup/showCnt tree | egrep "' + '|'.join(ci_files) +'"'
out = bash(query)
"""
# alternate method
blobs = search(tree_hash, 'tree')
index = {'mode':1, 'hash':1, 'name':2}
ci = False
for blob in blobs:
name = blob[index['name']]
hash = blob[index['hash']]
if ((name in ci_files) or
(name in ci_config_dir and ';'+ci_config_dir[name] in search(hash, 'tree'))):
ci = True
break
"""
return bool(out)
def calc_CI_introductions(commits, author):
"""
Alternative way to check_if_introduction, to compare performance.
"""
# using a dictionary that has the commits' hashes as keys,
# so as to not search multiple times for the same commit
CI_checked = {}
# delete contents
open('introductions.csv', 'w').close()
# for every commit, we look up whether the author included a CI file,
# that did not exist in the parent commit
for count, commit in enumerate(commits):
# status update
if (count + 1) % 50 == 0:
print count + 1, ' / ', len(commits)
tree_hash, parent_commit_hash, time = search(commit, 'commit')
if tree_hash not in CI_checked:
CI_checked[tree_hash] = ci_lookup(tree_hash)
# controlling for the case of multiple parent commits
all_parent_CI = False
for parent in parent_commit_hash.split(':'):
# controlling for the case of no parent commits
if parent == '':
break
parent_tree_hash = search(parent, 'commit')[0]
if parent_tree_hash not in CI_checked:
parent_CI = ci_lookup(parent_tree_hash)
CI_checked[parent_tree_hash] = parent_CI
else:
parent_CI = CI_checked[parent_tree_hash]
# checking all the parent commits for the usage of CI
all_parent_CI = all_parent_CI or parent_CI
# if the tree has a CI file, while the parent tree does not, increase the CI score
if CI_checked[tree_hash] and not all_parent_CI:
out = bash('echo ' + commit + ' | ~/lookup/getValues c2P')
main_proj = out.strip().split(';')[1]
f = open("introductions.csv", "a")
f.write(author + ', ' + 'CI' + ', ' + str(time) + ', ' + main_proj + '\n')
f.close()
print 'wrote'
print (current_time()-start_time)/len(commits), 'seconds per commit'
def check_if_introduction(commit, result):
"""
We check the parent commit to see if its child commit introduced or modified a CI config file.
"""
tree_hash, parent_commit_hash, time = search(commit, 'commit')
# controlling for the case of no parent commits
if parent_commit_hash == '':
return True
# controlling for the case of multiple parent commits
all_parent_CI = False
for parent in parent_commit_hash.split(':'):
parent_tree_hash = search(parent, 'commit')[0]
parent_CI = ci_lookup(parent_tree_hash)
# checking all the parent commits for the usage of CI
all_parent_CI = all_parent_CI or parent_CI
# if the tree has a CI file, while the parent tree does not, it is an introduction
return not all_parent_CI
def calc_CI(commits, author):
"""
Used to investigate how many commits, from a user, modified a CI configuration file.
Unix commands are used for a better performance.
"""
# delete contents
open('modifications.csv', 'w').close()
open('introductions.csv', 'w').close()
for count, commit in enumerate(commits):
# status update
if (count + 1) % 50 == 0:
print commit, '.. ..', count + 1, ' / ', len(commits)
# c2f does seems to result in a tie error, so c2b and b2f is used instead
#getting the blobs
query = ("for x in $(echo " + commit + " | ~/lookup/getValues c2b |" +
# splitting on the semicolon and discarding the newlines
" awk -v RS='[;\\n]' 1 |" +
# discarding the commit's hash (it appears before the blobs' hashes)
" tail -n+2); do" +
# for each blob, we look up it's filename
" echo $x | ~/lookup/getValues b2f;" +
" done |" +
# we discard the first field of the results (blobs' hash)
" cut -d ';' -f2 |" +
# we check whether one of the modified files is a CI configuration file
" egrep '" + "|".join(ci_files) + "'")
result = bash(query)
if result:
out = bash('echo ' + commit + ' | ~/lookup/getValues c2P')
main_proj = out.strip().split(';')[1]
time = search(commit, 'commit')[2]
if check_if_introduction(commit, result):
f = open("introductions.csv", "a")
print 'introduction'
else:
f = open("modifications.csv", "a")
print 'modification'
f.write(author + ', ' + 'CI' + ', ' + str(time) + ', ' + main_proj + '\n')
f.close()
print 'wrote: -->', commit
def calc_CI_diff(commits, author):
"""
Method written as a faster alternative to calc_CI. It seems to be 30 times faster.
"""
# delete contents
open('modifications.csv', 'w').close()
open('introductions.csv', 'w').close()
for count, commit in enumerate(commits):
#status update
if (count + 1) % 50 == 0:
print commit, '.. ..', count + 1, ' / ', len(commits)
# cmputeDiff2.perl seems to produce junk to the stdout occasionally
diff = bash("echo " + commit + " | ssh da4 ~/lookup/cmputeDiff2.perl")
# if a CI configuration file is in the diff
if re.search("|".join(ci_files), diff):
out = bash('echo ' + commit + ' | ~/lookup/getValues c2P')
main_proj = out.strip().split(';')[1]
time = search(commit, 'commit')[2]
for blob in diff.split():
# looking for the CI config blob and checking if parent blob exists
if re.search("|".join(ci_files), blob):
# if we have both an introduction and a modification
# in the same commit, we count it as an introduction
if blob.endswith(';'):
# if we don't have the parent blob, after the last semicolon,
# it is an introduction
f = open("introductions.csv", "a")
print 'introduction'
else:
f = open("modifications.csv", "a")
print 'modification'
break
f.write(author + ', ' + 'CI' + ', ' + str(time) + ', ' + main_proj + '\n')
f.close()
print 'wrote: -->', commit
def find_links(author, end_time, method='sh'):
|
#### Start building the regular expression that will be used to search for unit testing libraries,
#### in the commit's blobs ####
# Java
java_lib = ['io.restassured', 'org.openqa.selenium', 'org.spockframework', 'jtest',
'org.springframework.test', 'org.dbunit', 'org.jwalk', 'org.mockito', 'org.junit']
java_regex = (['import\s+'+s.replace('.', '\.') for s in java_lib])
java_all_reg = '|'.join(java_regex)
# Perl
perl_all_reg = 'use\s+Test::'
# Javascript
js = ['assert', 'mocha', 'jasmine', 'ava', 'jest', 'karma', 'storybook', 'tape',
'cypress', 'puppeteer', 'chai', 'qunit', 'sinon', 'casper', 'buster']
js_regex = (["require\([\\\'\\\"]" + s + "[\\\'\\\"]\)" for s in js])
js_all_reg = '|'.join(js_regex)
# C#
c_sharp = ['NUnit', 'Microsoft\.VisualStudio\.TestTools\.UnitTesting',
'Xunit', 'csUnit', 'MbUnit']
c_sharp_regex = (["using\s+" + s for s in c_sharp])
c_sharp_all_reg = '|'.join(c_sharp_regex)
# C and C++
c = ['cmocka', 'unity', 'CppuTest', 'embUnit', 'CUnit', 'CuTest', 'check',
'gtest', 'uCUnit', 'munit', 'minunit', 'acutest', 'boost/test',
'UnitTest\+\+', 'cpptest', 'cppunit', 'catch', 'bandit', 'tut']
c_regex = (['#include\s+[<\\\"]' + s + '\.h[>\\\"]'for s in c])
c_all_reg = '|'.join(c_regex)
# PHP
php = ['PHPUnit', 'Codeception', 'Behat', 'PhpSpec', 'Storyplayer', 'Peridot',
'atoum', 'Kahlan', 'vendor/EnhanceTestFramework']
php_regex = (['(include|require|use).+' + s for s in php])
php_all_reg = '|'.join(php_regex)
# Python
python = ['pytest', 'unittest', 'doctest', 'testify', 'nose', 'hypothesis']
python_regex = (['import\s+'+lib+'|from\s+'+lib+'\s+import' for lib in python])
python_all_reg = '|'.join(python_regex)
all_reg = [java_all_reg, perl_all_reg, js_all_reg, c_sharp_all_reg, c_all_reg, php_all_reg, python_all_reg]
final_reg = '|'.join(all_reg)
#### End of regex building ####
def calc_test(commits, author):
"""
Used to investigate how many commits, from a user, modified a unit testing file.
Unix commands are used to achieve a better performance.
The blobs are parsed, looking for unit testing library imports. An alternative would
be using the thruMaps directories or the ClickHouse API, but those options seem slower.
"""
open('modifications.csv', 'w').close()
for count, commit in enumerate(commits):
# status update
if (count + 1) % 5 == 0:
print commit, '.. ..', count + 1, ' / ', len(commits)
# getting every blob from a given commit
query = ('for x in $(echo ' + commit + ' | ~/lookup/getValues c2b | ' +
# splitting it and discarding the newlines and the commit's hash
'awk -v RS="[;\\n]" 1 | tail -n+2); do ' +
# We look up the content's of each blob, and discard the STDERR,
# in the case of trying to look up a blob that does not exist in the database
'echo $x | ~/lookup/showCnt blob 2> /dev/null; done | ' +
# We search for the use of a unit testing library, using the above regex, and
# keeping the first result only, since that is enough to know that the commit contains
# a unit testing file, to make the execution faster
'egrep -m 1 "' + final_reg + '"')
if bash(query): # if contains unit testing lib
out = bash('echo ' + commit + ' | ~/lookup/getValues c2P')
main_proj = out.strip().split(';')[1]
time = search(commit, 'commit')[2]
# at this point we could search the parent's tree for the existence of tests, but this
# would require recursively looking at every directory and parsing every file in the tree, so, due
# to the complexity, we skip it and consider it a modification instead of a possible introduction
f = open("modifications.csv", "a")
print 'modification'
f.write(author + ', ' + 'TEST' + ', ' + str(time) + ', ' + main_proj + '\n')
f.close()
print 'wrote: -->', commit
def calc_lang_features(commits, author):
"""
Method used to count the usage of certain languages' good practices and modern approaches.
We parse the diff of a modified file and the content of an introduced file, in order to find those
practices, and we count the extent of the usage. Then, we write to a file, for each commit that
included these features.
"""
lang_features = ['/\*\*', '\\"\\"\\"', '///', # documentation
'^\s*@', 'def.+:.+->', 'using\s+System\.ComponentModel\.DataAnnotations', # assertion
'assert', 'TODO', 'lambda']
# delete contents
open('lang_features.csv', 'w').close()
for count, commit in enumerate(commits):
# status update
if (count + 1) % 5 == 0:
print commit, '.. ..', count + 1, ' / ', len(commits)
# for each blob modified
query = ("for x in $(echo " + commit + " | ssh da4 ~/lookup/cmputeDiff2.perl); do " +
# get the chold and parent blob
"diff_blobs=$(echo $x | awk -v RS=';' 1 | sed -n '3,4 p');" +
# if a parent blob does not exist, the author authored all of the content of the file
"if [ $(echo $diff_blobs|wc -w) -eq 1 ]; then " +
"echo $diff_blobs | ~/lookup/showCnt blob 2> /dev/null; " +
# if a parent blob exists, find the diff, in order to search only the modified lines
"elif [ $(echo $diff_blobs|wc -w) -eq 2 ]; then " +
"vars=( $diff_blobs );" +
# using bash instead of sh in order to use the process substitution,
# to get the modified lines
"/bin/bash -c \"diff <(echo ${vars[0]} | ~/lookup/showCnt blob)" +
" <(echo ${vars[1]} | ~/lookup/showCnt blob)\";" +
"fi;" +
# grep the above practices and discard the lines that were deleted from the parent blob
# (they start with ">" in diff)
"done | egrep \"" + "|".join(lang_features) + "\" | grep -v '^>' | wc -l ")
count_uses = int(bash(query).strip())
if count_uses > 0: # good practice feature is used
out = bash('echo ' + commit + ' | ~/lookup/getValues c2P')
main_proj = out.strip().split(';')[1]
time = search(commit, 'commit')[2]
f = open("lang_features.csv", "a")
print 'lang_f'
f.write(author + ', ' + 'LANG_F' + ', ' + str(time) + ', ' + main_proj + ', ' + str(count_uses) + '\n')
f.close()
print 'wrote: -->', commit
def calculate_metrics(author):
# getting the author's commits
out = bash('echo "'+ author + '" | ~/lookup/getValues a2c')
commits = [x for x in out.strip().split(';')[1:]]
#time1 = current_time()
#calc_CI(commits, author)
#time2 = current_time()
#print 'without diff time is ' + str(time2 - time1)
#calc_CI_diff(commits, author)
#print 'with is ' + str(current_time() - time2)
#calc_test(commits, author)
calc_lang_features(commits, author)
# checking whether the user provided the author
if len(sys.argv) == 1:
sys.exit('No author provided')
calculate_metrics(sys.argv[1])
| """
Method used to find the neighbours of a given author, i.e. the authors that
affected the given author's use of good coding practices.
A timestamp is also given to define the time till which we find the connections.
"""
out = bash('echo "'+ author + '" | ~/lookup/getValues a2P')
pr = [x for x in out.strip().split(';')[1:]]
if method == 'pr_timeline':
p = Proj()
for project in pr:
rows = p.project_timeline(['time','repo', 'author'], project)
for row in rows:
print row | identifier_body |
code_quality_eval.py | import sys
import os
sys.path.insert(0, '../oscar.py')
import re
from oscar import Project
from oscar import Time_project_info as Proj
import subprocess
from time import time as current_time
start_time = current_time()
def bash(command):
proc = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
return out
def search(hash, type):
"""
Method used to search for a specific blob, commit or tree. | which are again splitted into their mode, hash and name.
In the case of a commit, we split the information string and the tree hash and
parent's commit hash are returned
"""
out = bash('echo ' + hash + ' | ~/lookup/showCnt ' + type)
if type == 'tree':
return [blob.split(';') for blob in out.strip().split('\n')]
if type == 'commit':
splitted = out.split(';')
# the tree and parent commit hashes are the second and third word, respectively
# the commit time is the last word, from which we discard the timezone and cast it to int
return splitted[1], splitted[2], int(splitted[-1].split()[0])
return out
# files used in continuous integration
ci_files = [
'\.gitlab\-ci\.yml', '\.travis\.yml', 'Jenkinsfile', 'buddy\.yml', '\.drone\.yml',
'circle\.yml', '\.circleci', 'bamboo\.yaml', 'codeship\-steps\.yml', '\.teamcity',
'wercker\.yml', 'appveyor\.yml', 'bitrise\.yml', 'codefresh\.yml', 'solano\.yml',
'shippable\.yml', 'phpci\.yml', 'cloudbuild\.yaml'
]
def ci_lookup(tree_hash):
"""
Method used to check the usage of Continuous Integration in a tree, given its hash.
"""
query = 'echo ' + tree_hash + ' | ~/lookup/showCnt tree | egrep "' + '|'.join(ci_files) +'"'
out = bash(query)
"""
# alternate method
blobs = search(tree_hash, 'tree')
index = {'mode':1, 'hash':1, 'name':2}
ci = False
for blob in blobs:
name = blob[index['name']]
hash = blob[index['hash']]
if ((name in ci_files) or
(name in ci_config_dir and ';'+ci_config_dir[name] in search(hash, 'tree'))):
ci = True
break
"""
return bool(out)
def calc_CI_introductions(commits, author):
"""
Alternative way to check_if_introduction, to compare performance.
"""
# using a dictionary that has the commits' hashes as keys,
# so as to not search multiple times for the same commit
CI_checked = {}
# delete contents
open('introductions.csv', 'w').close()
# for every commit, we look up whether the author included a CI file,
# that did not exist in the parent commit
for count, commit in enumerate(commits):
# status update
if (count + 1) % 50 == 0:
print count + 1, ' / ', len(commits)
tree_hash, parent_commit_hash, time = search(commit, 'commit')
if tree_hash not in CI_checked:
CI_checked[tree_hash] = ci_lookup(tree_hash)
# controlling for the case of multiple parent commits
all_parent_CI = False
for parent in parent_commit_hash.split(':'):
# controlling for the case of no parent commits
if parent == '':
break
parent_tree_hash = search(parent, 'commit')[0]
if parent_tree_hash not in CI_checked:
parent_CI = ci_lookup(parent_tree_hash)
CI_checked[parent_tree_hash] = parent_CI
else:
parent_CI = CI_checked[parent_tree_hash]
# checking all the parent commits for the usage of CI
all_parent_CI = all_parent_CI or parent_CI
# if the tree has a CI file, while the parent tree does not, increase the CI score
if CI_checked[tree_hash] and not all_parent_CI:
out = bash('echo ' + commit + ' | ~/lookup/getValues c2P')
main_proj = out.strip().split(';')[1]
f = open("introductions.csv", "a")
f.write(author + ', ' + 'CI' + ', ' + str(time) + ', ' + main_proj + '\n')
f.close()
print 'wrote'
print (current_time()-start_time)/len(commits), 'seconds per commit'
def check_if_introduction(commit, result):
"""
We check the parent commit to see if its child commit introduced or modified a CI config file.
"""
tree_hash, parent_commit_hash, time = search(commit, 'commit')
# controlling for the case of no parent commits
if parent_commit_hash == '':
return True
# controlling for the case of multiple parent commits
all_parent_CI = False
for parent in parent_commit_hash.split(':'):
parent_tree_hash = search(parent, 'commit')[0]
parent_CI = ci_lookup(parent_tree_hash)
# checking all the parent commits for the usage of CI
all_parent_CI = all_parent_CI or parent_CI
# if the tree has a CI file, while the parent tree does not, it is an introduction
return not all_parent_CI
def calc_CI(commits, author):
"""
Used to investigate how many commits, from a user, modified a CI configuration file.
Unix commands are used for a better performance.
"""
# delete contents
open('modifications.csv', 'w').close()
open('introductions.csv', 'w').close()
for count, commit in enumerate(commits):
# status update
if (count + 1) % 50 == 0:
print commit, '.. ..', count + 1, ' / ', len(commits)
# c2f does seems to result in a tie error, so c2b and b2f is used instead
#getting the blobs
query = ("for x in $(echo " + commit + " | ~/lookup/getValues c2b |" +
# splitting on the semicolon and discarding the newlines
" awk -v RS='[;\\n]' 1 |" +
# discarding the commit's hash (it appears before the blobs' hashes)
" tail -n+2); do" +
# for each blob, we look up it's filename
" echo $x | ~/lookup/getValues b2f;" +
" done |" +
# we discard the first field of the results (blobs' hash)
" cut -d ';' -f2 |" +
# we check whether one of the modified files is a CI configuration file
" egrep '" + "|".join(ci_files) + "'")
result = bash(query)
if result:
out = bash('echo ' + commit + ' | ~/lookup/getValues c2P')
main_proj = out.strip().split(';')[1]
time = search(commit, 'commit')[2]
if check_if_introduction(commit, result):
f = open("introductions.csv", "a")
print 'introduction'
else:
f = open("modifications.csv", "a")
print 'modification'
f.write(author + ', ' + 'CI' + ', ' + str(time) + ', ' + main_proj + '\n')
f.close()
print 'wrote: -->', commit
def calc_CI_diff(commits, author):
"""
Method written as a faster alternative to calc_CI. It seems to be 30 times faster.
"""
# delete contents
open('modifications.csv', 'w').close()
open('introductions.csv', 'w').close()
for count, commit in enumerate(commits):
#status update
if (count + 1) % 50 == 0:
print commit, '.. ..', count + 1, ' / ', len(commits)
# cmputeDiff2.perl seems to produce junk to the stdout occasionally
diff = bash("echo " + commit + " | ssh da4 ~/lookup/cmputeDiff2.perl")
# if a CI configuration file is in the diff
if re.search("|".join(ci_files), diff):
out = bash('echo ' + commit + ' | ~/lookup/getValues c2P')
main_proj = out.strip().split(';')[1]
time = search(commit, 'commit')[2]
for blob in diff.split():
# looking for the CI config blob and checking if parent blob exists
if re.search("|".join(ci_files), blob):
# if we have both an introduction and a modification
# in the same commit, we count it as an introduction
if blob.endswith(';'):
# if we don't have the parent blob, after the last semicolon,
# it is an introduction
f = open("introductions.csv", "a")
print 'introduction'
else:
f = open("modifications.csv", "a")
print 'modification'
break
f.write(author + ', ' + 'CI' + ', ' + str(time) + ', ' + main_proj + '\n')
f.close()
print 'wrote: -->', commit
def find_links(author, end_time, method='sh'):
"""
Method used to find the neighbours of a given author, i.e. the authors that
affected the given author's use of good coding practices.
A timestamp is also given to define the time till which we find the connections.
"""
out = bash('echo "'+ author + '" | ~/lookup/getValues a2P')
pr = [x for x in out.strip().split(';')[1:]]
if method == 'pr_timeline':
p = Proj()
for project in pr:
rows = p.project_timeline(['time','repo', 'author'], project)
for row in rows:
print row
#### Start building the regular expression that will be used to search for unit testing libraries,
#### in the commit's blobs ####
# Java
java_lib = ['io.restassured', 'org.openqa.selenium', 'org.spockframework', 'jtest',
'org.springframework.test', 'org.dbunit', 'org.jwalk', 'org.mockito', 'org.junit']
java_regex = (['import\s+'+s.replace('.', '\.') for s in java_lib])
java_all_reg = '|'.join(java_regex)
# Perl
perl_all_reg = 'use\s+Test::'
# Javascript
js = ['assert', 'mocha', 'jasmine', 'ava', 'jest', 'karma', 'storybook', 'tape',
'cypress', 'puppeteer', 'chai', 'qunit', 'sinon', 'casper', 'buster']
js_regex = (["require\([\\\'\\\"]" + s + "[\\\'\\\"]\)" for s in js])
js_all_reg = '|'.join(js_regex)
# C#
c_sharp = ['NUnit', 'Microsoft\.VisualStudio\.TestTools\.UnitTesting',
'Xunit', 'csUnit', 'MbUnit']
c_sharp_regex = (["using\s+" + s for s in c_sharp])
c_sharp_all_reg = '|'.join(c_sharp_regex)
# C and C++
c = ['cmocka', 'unity', 'CppuTest', 'embUnit', 'CUnit', 'CuTest', 'check',
'gtest', 'uCUnit', 'munit', 'minunit', 'acutest', 'boost/test',
'UnitTest\+\+', 'cpptest', 'cppunit', 'catch', 'bandit', 'tut']
c_regex = (['#include\s+[<\\\"]' + s + '\.h[>\\\"]'for s in c])
c_all_reg = '|'.join(c_regex)
# PHP
php = ['PHPUnit', 'Codeception', 'Behat', 'PhpSpec', 'Storyplayer', 'Peridot',
'atoum', 'Kahlan', 'vendor/EnhanceTestFramework']
php_regex = (['(include|require|use).+' + s for s in php])
php_all_reg = '|'.join(php_regex)
# Python
python = ['pytest', 'unittest', 'doctest', 'testify', 'nose', 'hypothesis']
python_regex = (['import\s+'+lib+'|from\s+'+lib+'\s+import' for lib in python])
python_all_reg = '|'.join(python_regex)
all_reg = [java_all_reg, perl_all_reg, js_all_reg, c_sharp_all_reg, c_all_reg, php_all_reg, python_all_reg]
final_reg = '|'.join(all_reg)
#### End of regex building ####
def calc_test(commits, author):
"""
Used to investigate how many commits, from a user, modified a unit testing file.
Unix commands are used to achieve a better performance.
The blobs are parsed, looking for unit testing library imports. An alternative would
be using the thruMaps directories or the ClickHouse API, but those options seem slower.
"""
open('modifications.csv', 'w').close()
for count, commit in enumerate(commits):
# status update
if (count + 1) % 5 == 0:
print commit, '.. ..', count + 1, ' / ', len(commits)
# getting every blob from a given commit
query = ('for x in $(echo ' + commit + ' | ~/lookup/getValues c2b | ' +
# splitting it and discarding the newlines and the commit's hash
'awk -v RS="[;\\n]" 1 | tail -n+2); do ' +
# We look up the content's of each blob, and discard the STDERR,
# in the case of trying to look up a blob that does not exist in the database
'echo $x | ~/lookup/showCnt blob 2> /dev/null; done | ' +
# We search for the use of a unit testing library, using the above regex, and
# keeping the first result only, since that is enough to know that the commit contains
# a unit testing file, to make the execution faster
'egrep -m 1 "' + final_reg + '"')
if bash(query): # if contains unit testing lib
out = bash('echo ' + commit + ' | ~/lookup/getValues c2P')
main_proj = out.strip().split(';')[1]
time = search(commit, 'commit')[2]
# at this point we could search the parent's tree for the existence of tests, but this
# would require recursively looking at every directory and parsing every file in the tree, so, due
# to the complexity, we skip it and consider it a modification instead of a possible introduction
f = open("modifications.csv", "a")
print 'modification'
f.write(author + ', ' + 'TEST' + ', ' + str(time) + ', ' + main_proj + '\n')
f.close()
print 'wrote: -->', commit
def calc_lang_features(commits, author):
"""
Method used to count the usage of certain languages' good practices and modern approaches.
We parse the diff of a modified file and the content of an introduced file, in order to find those
practices, and we count the extent of the usage. Then, we write to a file, for each commit that
included these features.
"""
lang_features = ['/\*\*', '\\"\\"\\"', '///', # documentation
'^\s*@', 'def.+:.+->', 'using\s+System\.ComponentModel\.DataAnnotations', # assertion
'assert', 'TODO', 'lambda']
# delete contents
open('lang_features.csv', 'w').close()
for count, commit in enumerate(commits):
# status update
if (count + 1) % 5 == 0:
print commit, '.. ..', count + 1, ' / ', len(commits)
# for each blob modified
query = ("for x in $(echo " + commit + " | ssh da4 ~/lookup/cmputeDiff2.perl); do " +
# get the chold and parent blob
"diff_blobs=$(echo $x | awk -v RS=';' 1 | sed -n '3,4 p');" +
# if a parent blob does not exist, the author authored all of the content of the file
"if [ $(echo $diff_blobs|wc -w) -eq 1 ]; then " +
"echo $diff_blobs | ~/lookup/showCnt blob 2> /dev/null; " +
# if a parent blob exists, find the diff, in order to search only the modified lines
"elif [ $(echo $diff_blobs|wc -w) -eq 2 ]; then " +
"vars=( $diff_blobs );" +
# using bash instead of sh in order to use the process substitution,
# to get the modified lines
"/bin/bash -c \"diff <(echo ${vars[0]} | ~/lookup/showCnt blob)" +
" <(echo ${vars[1]} | ~/lookup/showCnt blob)\";" +
"fi;" +
# grep the above practices and discard the lines that were deleted from the parent blob
# (they start with ">" in diff)
"done | egrep \"" + "|".join(lang_features) + "\" | grep -v '^>' | wc -l ")
count_uses = int(bash(query).strip())
if count_uses > 0: # good practice feature is used
out = bash('echo ' + commit + ' | ~/lookup/getValues c2P')
main_proj = out.strip().split(';')[1]
time = search(commit, 'commit')[2]
f = open("lang_features.csv", "a")
print 'lang_f'
f.write(author + ', ' + 'LANG_F' + ', ' + str(time) + ', ' + main_proj + ', ' + str(count_uses) + '\n')
f.close()
print 'wrote: -->', commit
def calculate_metrics(author):
# getting the author's commits
out = bash('echo "'+ author + '" | ~/lookup/getValues a2c')
commits = [x for x in out.strip().split(';')[1:]]
#time1 = current_time()
#calc_CI(commits, author)
#time2 = current_time()
#print 'without diff time is ' + str(time2 - time1)
#calc_CI_diff(commits, author)
#print 'with is ' + str(current_time() - time2)
#calc_test(commits, author)
calc_lang_features(commits, author)
# checking whether the user provided the author
if len(sys.argv) == 1:
sys.exit('No author provided')
calculate_metrics(sys.argv[1]) |
If a tree is searched for, the result is splitted into its components (blobs and directories), | random_line_split |
code_quality_eval.py | import sys
import os
sys.path.insert(0, '../oscar.py')
import re
from oscar import Project
from oscar import Time_project_info as Proj
import subprocess
from time import time as current_time
start_time = current_time()
def bash(command):
proc = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
return out
def search(hash, type):
"""
Method used to search for a specific blob, commit or tree.
If a tree is searched for, the result is splitted into its components (blobs and directories),
which are again splitted into their mode, hash and name.
In the case of a commit, we split the information string and the tree hash and
parent's commit hash are returned
"""
out = bash('echo ' + hash + ' | ~/lookup/showCnt ' + type)
if type == 'tree':
return [blob.split(';') for blob in out.strip().split('\n')]
if type == 'commit':
splitted = out.split(';')
# the tree and parent commit hashes are the second and third word, respectively
# the commit time is the last word, from which we discard the timezone and cast it to int
return splitted[1], splitted[2], int(splitted[-1].split()[0])
return out
# files used in continuous integration
ci_files = [
'\.gitlab\-ci\.yml', '\.travis\.yml', 'Jenkinsfile', 'buddy\.yml', '\.drone\.yml',
'circle\.yml', '\.circleci', 'bamboo\.yaml', 'codeship\-steps\.yml', '\.teamcity',
'wercker\.yml', 'appveyor\.yml', 'bitrise\.yml', 'codefresh\.yml', 'solano\.yml',
'shippable\.yml', 'phpci\.yml', 'cloudbuild\.yaml'
]
def ci_lookup(tree_hash):
"""
Method used to check the usage of Continuous Integration in a tree, given its hash.
"""
query = 'echo ' + tree_hash + ' | ~/lookup/showCnt tree | egrep "' + '|'.join(ci_files) +'"'
out = bash(query)
"""
# alternate method
blobs = search(tree_hash, 'tree')
index = {'mode':1, 'hash':1, 'name':2}
ci = False
for blob in blobs:
name = blob[index['name']]
hash = blob[index['hash']]
if ((name in ci_files) or
(name in ci_config_dir and ';'+ci_config_dir[name] in search(hash, 'tree'))):
ci = True
break
"""
return bool(out)
def calc_CI_introductions(commits, author):
"""
Alternative way to check_if_introduction, to compare performance.
"""
# using a dictionary that has the commits' hashes as keys,
# so as to not search multiple times for the same commit
CI_checked = {}
# delete contents
open('introductions.csv', 'w').close()
# for every commit, we look up whether the author included a CI file,
# that did not exist in the parent commit
for count, commit in enumerate(commits):
# status update
if (count + 1) % 50 == 0:
print count + 1, ' / ', len(commits)
tree_hash, parent_commit_hash, time = search(commit, 'commit')
if tree_hash not in CI_checked:
|
# controlling for the case of multiple parent commits
all_parent_CI = False
for parent in parent_commit_hash.split(':'):
# controlling for the case of no parent commits
if parent == '':
break
parent_tree_hash = search(parent, 'commit')[0]
if parent_tree_hash not in CI_checked:
parent_CI = ci_lookup(parent_tree_hash)
CI_checked[parent_tree_hash] = parent_CI
else:
parent_CI = CI_checked[parent_tree_hash]
# checking all the parent commits for the usage of CI
all_parent_CI = all_parent_CI or parent_CI
# if the tree has a CI file, while the parent tree does not, increase the CI score
if CI_checked[tree_hash] and not all_parent_CI:
out = bash('echo ' + commit + ' | ~/lookup/getValues c2P')
main_proj = out.strip().split(';')[1]
f = open("introductions.csv", "a")
f.write(author + ', ' + 'CI' + ', ' + str(time) + ', ' + main_proj + '\n')
f.close()
print 'wrote'
print (current_time()-start_time)/len(commits), 'seconds per commit'
def check_if_introduction(commit, result):
"""
We check the parent commit to see if its child commit introduced or modified a CI config file.
"""
tree_hash, parent_commit_hash, time = search(commit, 'commit')
# controlling for the case of no parent commits
if parent_commit_hash == '':
return True
# controlling for the case of multiple parent commits
all_parent_CI = False
for parent in parent_commit_hash.split(':'):
parent_tree_hash = search(parent, 'commit')[0]
parent_CI = ci_lookup(parent_tree_hash)
# checking all the parent commits for the usage of CI
all_parent_CI = all_parent_CI or parent_CI
# if the tree has a CI file, while the parent tree does not, it is an introduction
return not all_parent_CI
def calc_CI(commits, author):
"""
Used to investigate how many commits, from a user, modified a CI configuration file.
Unix commands are used for a better performance.
"""
# delete contents
open('modifications.csv', 'w').close()
open('introductions.csv', 'w').close()
for count, commit in enumerate(commits):
# status update
if (count + 1) % 50 == 0:
print commit, '.. ..', count + 1, ' / ', len(commits)
# c2f does seems to result in a tie error, so c2b and b2f is used instead
#getting the blobs
query = ("for x in $(echo " + commit + " | ~/lookup/getValues c2b |" +
# splitting on the semicolon and discarding the newlines
" awk -v RS='[;\\n]' 1 |" +
# discarding the commit's hash (it appears before the blobs' hashes)
" tail -n+2); do" +
# for each blob, we look up it's filename
" echo $x | ~/lookup/getValues b2f;" +
" done |" +
# we discard the first field of the results (blobs' hash)
" cut -d ';' -f2 |" +
# we check whether one of the modified files is a CI configuration file
" egrep '" + "|".join(ci_files) + "'")
result = bash(query)
if result:
out = bash('echo ' + commit + ' | ~/lookup/getValues c2P')
main_proj = out.strip().split(';')[1]
time = search(commit, 'commit')[2]
if check_if_introduction(commit, result):
f = open("introductions.csv", "a")
print 'introduction'
else:
f = open("modifications.csv", "a")
print 'modification'
f.write(author + ', ' + 'CI' + ', ' + str(time) + ', ' + main_proj + '\n')
f.close()
print 'wrote: -->', commit
def calc_CI_diff(commits, author):
"""
Method written as a faster alternative to calc_CI. It seems to be 30 times faster.
"""
# delete contents
open('modifications.csv', 'w').close()
open('introductions.csv', 'w').close()
for count, commit in enumerate(commits):
#status update
if (count + 1) % 50 == 0:
print commit, '.. ..', count + 1, ' / ', len(commits)
# cmputeDiff2.perl seems to produce junk to the stdout occasionally
diff = bash("echo " + commit + " | ssh da4 ~/lookup/cmputeDiff2.perl")
# if a CI configuration file is in the diff
if re.search("|".join(ci_files), diff):
out = bash('echo ' + commit + ' | ~/lookup/getValues c2P')
main_proj = out.strip().split(';')[1]
time = search(commit, 'commit')[2]
for blob in diff.split():
# looking for the CI config blob and checking if parent blob exists
if re.search("|".join(ci_files), blob):
# if we have both an introduction and a modification
# in the same commit, we count it as an introduction
if blob.endswith(';'):
# if we don't have the parent blob, after the last semicolon,
# it is an introduction
f = open("introductions.csv", "a")
print 'introduction'
else:
f = open("modifications.csv", "a")
print 'modification'
break
f.write(author + ', ' + 'CI' + ', ' + str(time) + ', ' + main_proj + '\n')
f.close()
print 'wrote: -->', commit
def find_links(author, end_time, method='sh'):
"""
Method used to find the neighbours of a given author, i.e. the authors that
affected the given author's use of good coding practices.
A timestamp is also given to define the time till which we find the connections.
"""
out = bash('echo "'+ author + '" | ~/lookup/getValues a2P')
pr = [x for x in out.strip().split(';')[1:]]
if method == 'pr_timeline':
p = Proj()
for project in pr:
rows = p.project_timeline(['time','repo', 'author'], project)
for row in rows:
print row
#### Start building the regular expression that will be used to search for unit testing libraries,
#### in the commit's blobs ####
# Java
java_lib = ['io.restassured', 'org.openqa.selenium', 'org.spockframework', 'jtest',
'org.springframework.test', 'org.dbunit', 'org.jwalk', 'org.mockito', 'org.junit']
java_regex = (['import\s+'+s.replace('.', '\.') for s in java_lib])
java_all_reg = '|'.join(java_regex)
# Perl
perl_all_reg = 'use\s+Test::'
# Javascript
js = ['assert', 'mocha', 'jasmine', 'ava', 'jest', 'karma', 'storybook', 'tape',
'cypress', 'puppeteer', 'chai', 'qunit', 'sinon', 'casper', 'buster']
js_regex = (["require\([\\\'\\\"]" + s + "[\\\'\\\"]\)" for s in js])
js_all_reg = '|'.join(js_regex)
# C#
c_sharp = ['NUnit', 'Microsoft\.VisualStudio\.TestTools\.UnitTesting',
'Xunit', 'csUnit', 'MbUnit']
c_sharp_regex = (["using\s+" + s for s in c_sharp])
c_sharp_all_reg = '|'.join(c_sharp_regex)
# C and C++
c = ['cmocka', 'unity', 'CppuTest', 'embUnit', 'CUnit', 'CuTest', 'check',
'gtest', 'uCUnit', 'munit', 'minunit', 'acutest', 'boost/test',
'UnitTest\+\+', 'cpptest', 'cppunit', 'catch', 'bandit', 'tut']
c_regex = (['#include\s+[<\\\"]' + s + '\.h[>\\\"]'for s in c])
c_all_reg = '|'.join(c_regex)
# PHP
php = ['PHPUnit', 'Codeception', 'Behat', 'PhpSpec', 'Storyplayer', 'Peridot',
'atoum', 'Kahlan', 'vendor/EnhanceTestFramework']
php_regex = (['(include|require|use).+' + s for s in php])
php_all_reg = '|'.join(php_regex)
# Python
python = ['pytest', 'unittest', 'doctest', 'testify', 'nose', 'hypothesis']
python_regex = (['import\s+'+lib+'|from\s+'+lib+'\s+import' for lib in python])
python_all_reg = '|'.join(python_regex)
all_reg = [java_all_reg, perl_all_reg, js_all_reg, c_sharp_all_reg, c_all_reg, php_all_reg, python_all_reg]
final_reg = '|'.join(all_reg)
#### End of regex building ####
def calc_test(commits, author):
"""
Used to investigate how many commits, from a user, modified a unit testing file.
Unix commands are used to achieve a better performance.
The blobs are parsed, looking for unit testing library imports. An alternative would
be using the thruMaps directories or the ClickHouse API, but those options seem slower.
"""
open('modifications.csv', 'w').close()
for count, commit in enumerate(commits):
# status update
if (count + 1) % 5 == 0:
print commit, '.. ..', count + 1, ' / ', len(commits)
# getting every blob from a given commit
query = ('for x in $(echo ' + commit + ' | ~/lookup/getValues c2b | ' +
# splitting it and discarding the newlines and the commit's hash
'awk -v RS="[;\\n]" 1 | tail -n+2); do ' +
# We look up the content's of each blob, and discard the STDERR,
# in the case of trying to look up a blob that does not exist in the database
'echo $x | ~/lookup/showCnt blob 2> /dev/null; done | ' +
# We search for the use of a unit testing library, using the above regex, and
# keeping the first result only, since that is enough to know that the commit contains
# a unit testing file, to make the execution faster
'egrep -m 1 "' + final_reg + '"')
if bash(query): # if contains unit testing lib
out = bash('echo ' + commit + ' | ~/lookup/getValues c2P')
main_proj = out.strip().split(';')[1]
time = search(commit, 'commit')[2]
# at this point we could search the parent's tree for the existence of tests, but this
# would require recursively looking at every directory and parsing every file in the tree, so, due
# to the complexity, we skip it and consider it a modification instead of a possible introduction
f = open("modifications.csv", "a")
print 'modification'
f.write(author + ', ' + 'TEST' + ', ' + str(time) + ', ' + main_proj + '\n')
f.close()
print 'wrote: -->', commit
def calc_lang_features(commits, author):
"""
Method used to count the usage of certain languages' good practices and modern approaches.
We parse the diff of a modified file and the content of an introduced file, in order to find those
practices, and we count the extent of the usage. Then, we write to a file, for each commit that
included these features.
"""
lang_features = ['/\*\*', '\\"\\"\\"', '///', # documentation
'^\s*@', 'def.+:.+->', 'using\s+System\.ComponentModel\.DataAnnotations', # assertion
'assert', 'TODO', 'lambda']
# delete contents
open('lang_features.csv', 'w').close()
for count, commit in enumerate(commits):
# status update
if (count + 1) % 5 == 0:
print commit, '.. ..', count + 1, ' / ', len(commits)
# for each blob modified
query = ("for x in $(echo " + commit + " | ssh da4 ~/lookup/cmputeDiff2.perl); do " +
# get the chold and parent blob
"diff_blobs=$(echo $x | awk -v RS=';' 1 | sed -n '3,4 p');" +
# if a parent blob does not exist, the author authored all of the content of the file
"if [ $(echo $diff_blobs|wc -w) -eq 1 ]; then " +
"echo $diff_blobs | ~/lookup/showCnt blob 2> /dev/null; " +
# if a parent blob exists, find the diff, in order to search only the modified lines
"elif [ $(echo $diff_blobs|wc -w) -eq 2 ]; then " +
"vars=( $diff_blobs );" +
# using bash instead of sh in order to use the process substitution,
# to get the modified lines
"/bin/bash -c \"diff <(echo ${vars[0]} | ~/lookup/showCnt blob)" +
" <(echo ${vars[1]} | ~/lookup/showCnt blob)\";" +
"fi;" +
# grep the above practices and discard the lines that were deleted from the parent blob
# (they start with ">" in diff)
"done | egrep \"" + "|".join(lang_features) + "\" | grep -v '^>' | wc -l ")
count_uses = int(bash(query).strip())
if count_uses > 0: # good practice feature is used
out = bash('echo ' + commit + ' | ~/lookup/getValues c2P')
main_proj = out.strip().split(';')[1]
time = search(commit, 'commit')[2]
f = open("lang_features.csv", "a")
print 'lang_f'
f.write(author + ', ' + 'LANG_F' + ', ' + str(time) + ', ' + main_proj + ', ' + str(count_uses) + '\n')
f.close()
print 'wrote: -->', commit
def calculate_metrics(author):
# getting the author's commits
out = bash('echo "'+ author + '" | ~/lookup/getValues a2c')
commits = [x for x in out.strip().split(';')[1:]]
#time1 = current_time()
#calc_CI(commits, author)
#time2 = current_time()
#print 'without diff time is ' + str(time2 - time1)
#calc_CI_diff(commits, author)
#print 'with is ' + str(current_time() - time2)
#calc_test(commits, author)
calc_lang_features(commits, author)
# checking whether the user provided the author
if len(sys.argv) == 1:
sys.exit('No author provided')
calculate_metrics(sys.argv[1])
| CI_checked[tree_hash] = ci_lookup(tree_hash) | conditional_block |
aas220_poster.py | # coding: utf-8
"""
Generate figures that will go on my AAS poster
"""
from __future__ import division
# Standard library
import sys
import os
import cPickle as pickle
# Third-party
#import apwlib.convert as c
import apwlib.geometry as g
import matplotlib
matplotlib.use("WxAgg")
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import pyfits as pf
from sqlalchemy import func
# PTF
from ptf.parameters import *
from ptf.db.DatabaseConnection import *
import ptf.simulation.util as simu
from ptf import PTFLightCurve
import coverageplots
import detectionefficiency as de
title_font_size = 38
label_font_size = 34
tick_font_size = 24
parameter_to_label = {"j" : "J", "k" : "K", "sigma_mu" : r"$\sigma/\mu$", "eta" : r"$\eta$", "delta_chi_squared" : r"$\Delta \chi^2$"}
def survey_coverage():
# PTF:
raw_field_data = pf.open("data/exposureData.fits")[1].data
unq_field_ids = np.unique(raw_field_data.field_id)
ptf_fields = []
for field_id in unq_field_ids:
one_field_data = raw_field_data[raw_field_data.field_id == field_id]
mean_ra = np.mean(one_field_data.ra) / 15.
mean_dec = np.mean(one_field_data.dec)
observations = len(one_field_data) / len(np.unique(one_field_data.ccd_id))
ptf_fields.append(coverageplots.PTFField(mean_ra, mean_dec, id=field_id, number_of_observations=observations))
# OGLE:
high_cadence = np.genfromtxt("data/ogle4_common.txt", names=["ra","dec","l","b"], usecols=[6,7,8,9]).view(np.recarray)
low_cadence = np.genfromtxt("data/ogle4_less_frequent.txt", names=["ra","dec","l","b"], usecols=[6,7,8,9]).view(np.recarray)
ogle_high_cadence_fields = []
for row in high_cadence: ogle_high_cadence_fields.append(coverageplots.OGLEField(row["ra"], row["dec"]))
ogle_low_cadence_fields = []
for row in low_cadence: ogle_low_cadence_fields.append(coverageplots.OGLEField(row["ra"], row["dec"]))
coverage_plot = coverageplots.PTFCoveragePlot(figsize=(30,15), projection="aitoff")
coverage_plot.addFields(ptf_fields, label="PTF", color_by_observations=True)
coverage_plot.addFields(ogle_low_cadence_fields + ogle_high_cadence_fields, label="OGLE-IV", color="c", alpha=0.15)
#coverage_plot.addFields(ogle_high_cadence_fields, label="OGLE-IV - high cadence", color="r", alpha=0.15)
# Now I need to add globular and open clusters to the plot!
open_clusters = np.genfromtxt("data/open_clusters.csv", usecols=[0,1,2,11], dtype=[("name","|S20"),("ra","|S8"), ("dec","|S9"), ("diameter", float)], delimiter=",").view(np.recarray)
for ii,cluster in enumerate(open_clusters):
ra_deg = g.RA(cluster["ra"]).degrees
dec_rad = g.Dec(cluster["dec"]).radians
if ii == 0:
circle = matplotlib.patches.Ellipse((np.radians(-ra_deg+180), dec_rad), width=np.radians(cluster["diameter"]/60./np.cos(dec_rad)), height=np.radians(cluster["diameter"]/60.), alpha=.4, edgecolor="r", facecolor="r", label="Open Clusters")
else:
circle = matplotlib.patches.Ellipse((np.radians(-ra_deg+180), dec_rad), width=np.radians(cluster["diameter"]/60./np.cos(dec_rad)), height=np.radians(cluster["diameter"]/60.), alpha=.4, edgecolor="r", facecolor="r")
coverage_plot.axis.add_patch(circle)
"""
globular_clusters = np.genfromtxt("data/allGlobularClusters.txt", dtype=[("r_h",float),("ra","|S11"), ("dec","|S12")], delimiter=",").view(np.recarray)
for ii,cluster in enumerate(globular_clusters):
ra_deg = g.RA(cluster["ra"]).degrees
dec_rad = g.Dec(cluster["dec"]).radians
diameter = 2.*cluster["r_h"]*10
if ii == 0:
circle = matplotlib.patches.Ellipse((np.radians(-ra_deg+180), dec_rad), width=np.radians(diameter/60./np.cos(dec_rad)), height=np.radians(diameter/60.), alpha=.4, edgecolor="g", facecolor="g", label="Globular Clusters")
else:
circle = matplotlib.patches.Ellipse((np.radians(-ra_deg+180), dec_rad), width=np.radians(diameter/60./np.cos(dec_rad)), height=np.radians(diameter/60.), alpha=.4, edgecolor="g", facecolor="g")
coverage_plot.axis.add_patch(circle)
"""
coverage_plot.addLegend()
coverage_plot.title.set_fontsize(title_font_size)
legendtext = coverage_plot.legend.get_texts()
plt.setp(legendtext, fontsize=label_font_size) # the legend text fontsize
#plt.show()
coverage_plot.figure.savefig("plots/aas_ptf_coverage.png")
# To be used by the Praesepe timescale distribution plot and the
# detection efficiency
timescale_bins = np.logspace(np.log10(1), np.log10(1000), 100) # from 1 day to 1000 days
def praesepe_timescale_distribution():
filename = "data/praesepeTimeScales.npy"
timescales = np.load(filename)
plt.figure(figsize=(15,15))
plt.hist(timescales, bins=timescale_bins, normed=True)
plt.xscale("log")
plt.xlabel(r"$t_E$ [days]", size=label_font_size)
t = plt.title("Normalized timescale distribution for Praesepe field", size=title_font_size)
t.set_y(1.04)
ax = plt.gca()
for label in ax.get_xticklabels():
label.set_fontsize(tick_font_size)
ax.set_yticklabels([])
plt.savefig("plots/aas_praesepe_timescale.png")
def praesepe_event_rate():
filename = "data/praesepeTimeScales.npy"
timescales = np.load(filename)
global_event_rate = 0.0081 #
# To get the event rate distribution, I have to normalize the timescale dist. so
# the integral from 0 to infinity = global event rate
timescale_pdf, bin_edges = np.histogram(timescales, bins=timescale_bins, density=True)
event_rate_distribution = timescale_pdf*global_event_rate
# Get the Praesepe detection efficiency
filename = "data/praesepe_detection_efficiency.npy"
# Load the simulation results
sim_results = np.load(filename)
# Get the RMS scatter of delta chi-squared for the vanilla light curves
dcs = [x[0] for x in session.query(VariabilityIndices.delta_chi_squared).join(LightCurve).filter(LightCurve.objid < 100000).all()]
sigma = np.std(dcs)
# 2*sigma ~ 300
sim_results[np.isnan(sim_results["tE"])] = 0.
detections = sim_results[sim_results["delta_chi_squared"] > 2.*sigma]
detections = detections[detections["event_added"] == True]
#detections = sim_results[(sim_results["delta_chi_squared"] > 2.*sigma)]
#detections = detections[detections["event_added"] == True]
tE_counts, tE_bin_edges = np.histogram(detections["tE"], bins=timescale_bins)
total_counts, bin_edges = np.histogram(sim_results[sim_results["event_added"] == True]["tE"], bins=timescale_bins)
detection_efficiency_distribution = tE_counts / total_counts
bin_widths = bin_edges[1:] - bin_edges[:-1]
# Compute number of events!
# - detection_efficiency_distribution is dE/dt_E
# - event_rate_distribution is dN/dt_E
# - bin_widths give us dt_E
N_exp = np.sum(detection_efficiency_distribution * event_rate_distribution / 365. * bin_widths) * 102. # days of Praesepe obs.
# Number of events if we had observed it consistently for 3 years
N_exp_all_survey = np.sum(detection_efficiency_distribution * event_rate_distribution / 365. * bin_widths) * 1095. # days of Praesepe obs.
print "Number of events in our Praesepe sample (102 days): {} +/- {}".format(N_exp, np.sqrt(N_exp))
print "Number of events if we had observed it consistently for 3 years: {} +/- {}".format(N_exp_all_survey, np.sqrt(N_exp_all_survey))
def praesepe_detection_efficiency():
filename = "data/praesepe_detection_efficiency.npy"
if not os.path.exists(filename):
# Select out just the Praesepe light curves (objid < 100000)
light_curve_generator = de.AllPraesepeLightCurves(limit=10000, random=True)
sim_results = de.run_simulation(light_curve_generator, N=100)
np.save(filename, sim_results)
# Load the simulation results
sim_results = np.load(filename)
# Get the RMS scatter of delta chi-squared for the vanilla light curves
dcs = [x[0] for x in session.query(VariabilityIndices.delta_chi_squared).join(LightCurve).filter(LightCurve.objid < 100000).all()]
sigma = np.std(dcs)
# 2*sigma ~ 300
sim_results[np.isnan(sim_results["tE"])] = 0.
detections = sim_results[sim_results["delta_chi_squared"] > 2.*sigma]
detections = detections[detections["event_added"] == True]
#detections = sim_results[(sim_results["delta_chi_squared"] > 2.*sigma)]
#detections = detections[detections["event_added"] == True]
tE_counts, tE_bin_edges = np.histogram(detections["tE"], bins=timescale_bins)
total_counts, total_bin_edges = np.histogram(sim_results[sim_results["event_added"] == True]["tE"], bins=timescale_bins)
detection_efficiency = tE_counts / total_counts
bin_widths = total_bin_edges[1:] - total_bin_edges[:-1]
#print np.sum(bin_widths*detection_efficiency)
#return
plt.figure(figsize=(15,15))
# Multiply by 2 because we only put events in 50% of the cases
plt.semilogx((total_bin_edges[1:]+total_bin_edges[:-1])/2, tE_counts / total_counts, 'k-', lw=3)
plt.xlabel(r"$t_E$ [days]", size=label_font_size)
plt.ylabel(r"Detection Efficiency $\mathcal{E}(t_E)$", size=label_font_size)
plt.ylim(0., 0.75)
t = plt.title("PTF Detection Efficiency for Praesepe Light Curves", size=title_font_size)
t.set_y(1.04)
# Change tick label size
ax = plt.gca()
for label in ax.get_xticklabels() + ax.get_yticklabels():
label.set_fontsize(tick_font_size)
plt.tight_layout()
plt.savefig("plots/aas_praesepe_detection_efficiency.png")
def random_praesepe_light_curve():
objid = np.random.randint(82415)
try:
lc = session.query(LightCurve).filter(LightCurve.objid == objid).one()
except:
lc = session.query(LightCurve).filter(LightCurve.objid == 101).one()
return lc
def survey_detection_effieciency():
""" Here I want a figure that shows how the detection efficiency
changes for uniform, random, and clumpy observations
"""
baseline = 365 #days
# TODO: Rerun with 1024, add praesepe line
max_num_observations = 1024
min_num_observations = 16
num_clumps = 4
num_iterations = 10000
if not os.path.exists("data/aas_survey_detection_efficiency.pickle"):
data_dict = {"clumpy" : {1. : [], 10. : [], 100 : []}, "uniform" : {1. : [], 10. : [], 100 : []}}
for timescale in [1., 10., 100.]:
for sampling in ["clumpy", "uniform"]: #, "random"]:
if sampling == "random":
mjd = np.random.random(max_num_observations)*baseline
elif sampling == "clumpy":
sparse_samples = np.random.random(max_num_observations/2)*baseline
clumps = []
days = []
sum = 0.
pts_per_clump = max_num_observations / 2 / num_clumps
for ii in range(num_clumps):
day = np.random.randint(365)
if day in days: continue
days.append(day)
clumpy_samples = np.linspace(day+0.1, day+0.6, pts_per_clump)
clumps.append(clumpy_samples)
clumps.append(sparse_samples)
mjd = np.concatenate(tuple(clumps))
plt.plot(mjd, [1.]*len(mjd), 'ro', alpha=0.4)
plt.show()
elif sampling == "uniform":
mjd = np.linspace(0., baseline, max_num_observations)
for jj in range(num_iterations):
lc = random_praesepe_light_curve()
if len(lc.mag) < 100: continue
dupe_mags = np.array(lc.mag*15)
dupe_err = np.array(list(lc.error)*15)
shuffled_idx = np.arange(0, len(dupe_mags))
np.random.shuffle(shuffled_idx)
mags = dupe_mags[shuffled_idx]
err = dupe_err[shuffled_idx]
sim_light_curve = simu.SimulatedLightCurve(mjd=mjd, mag=mags[:len(mjd)], error=err[:len(mjd)])
sim_light_curve.addMicrolensingEvent(tE=timescale)
#sim_light_curve.plot()
delta_chi_squareds = []
sim_mjd = sim_light_curve.mjd
sim_mag = sim_light_curve.mag
sim_err = sim_light_curve.error
while True:
if len(sim_mjd) < min_num_observations: break
dcs = simu.compute_delta_chi_squared((sim_mjd, sim_mag, sim_err), force_fit=True)
delta_chi_squareds.append(dcs)
prune = np.arange(len(sim_mjd))
np.random.shuffle(prune)
prune = prune[::2]
sim_mjd = sim_mjd[prune]
sim_mag = sim_mag[prune]
sim_err = sim_err[prune]
data_dict[sampling][timescale].append(delta_chi_squareds)
f = open("data/aas_survey_detection_efficiency.pickle", "w")
pickle.dump(data_dict, f)
f.close()
f = open("data/aas_survey_detection_efficiency.pickle", "r")
data_dict = pickle.load(f)
# Plotting stuff
plt.figure(figsize=(15,15))
dcs_cutoff = 300.
num_observations = [2**x for x in range(int(np.log2(max_num_observations)), int(np.log2(min_num_observations))-1, -1)]
linestyles = {"uniform" : "--", "clumpy" : "-"}
linecolors = {1. : "k", 10. : "r", 100. : "c"}
for sampling in data_dict.keys():
for timescale in data_dict[sampling].keys():
data = np.array(data_dict[sampling][timescale])
efficiencies = []
for col,num_obs in enumerate(num_observations):
efficiencies.append(np.sum(data[:,col] > dcs_cutoff) / len(data[:,col]))
plt.plot(np.log2(num_observations), efficiencies, ls=linestyles[sampling], color=linecolors[timescale], label=r"$t_E={}$ day, {} sampling".format(int(timescale), sampling), lw=3)
#plt.axvline(np.log2(625.), c="g", ls="--", lw=2, label="PTF Praesepe fields")
plt.xlabel("Number of Observations / 1 year", size=label_font_size)
plt.ylabel(r"Detection Efficiency $\mathcal{E}(t_E)$", size=label_font_size)
plt.title("Simulated Detection Efficiency for\nDifferent Sampling Patterns", size=title_font_size)
# Change tick label size
ax = plt.gca()
for label in ax.get_xticklabels() + ax.get_yticklabels():
label.set_fontsize(tick_font_size)
ax.set_xticklabels(num_observations[::-1])
legend = plt.legend(loc="upper left", shadow=True, fancybox=True)
legendtext = legend.get_texts()
plt.setp(legendtext, fontsize=tick_font_size) # the legend text fontsize
plt.tight_layout()
#plt.show()
plt.savefig("plots/aas_survey_detection_efficiency.png")
def variability_indices():
import PraesepeLightCurves as plc
# TODO: Sample timescale from the distribution that Amanda will send me
# TODO: Fix legend in post-editing
plc.aas_figure()
def variability_indices_detection_efficiency():
""" This figure should show the detection efficiency curve for the Praesepe
data for each variability index (cut at 2-sigma)
"""
filename = "data/praesepe_detection_efficiency.npy"
# Load the simulation results
sim_results = np.load(filename)
var_indices = session.query(VariabilityIndices).join(LightCurve).filter(LightCurve.objid < 100000).all()
styles = [(3,"-."), (3,":"), (3,"--"), (1.5,"--"), (2,"-")]
colors = ["c", "m", "g", "y", "k"]
plt.figure(figsize=(15,15))
for ii,idx in enumerate(["j", "k", "eta", "sigma_mu", "delta_chi_squared"]):
values = [getattr(x, idx) for x in var_indices]
sigma = np.std(values)
mu = np.mean(values)
sim_results[np.isnan(sim_results["tE"])] = 0.
detections = sim_results[(np.fabs(sim_results[idx]) > (mu + 2.*sigma)) | (np.fabs(sim_results[idx]) < (mu - 2.*sigma))]
detections = detections[detections["event_added"] == True]
tE_counts, tE_bin_edges = np.histogram(detections["tE"], bins=timescale_bins)
total_counts, total_bin_edges = np.histogram(sim_results[sim_results["event_added"] == True]["tE"], bins=timescale_bins)
lw,ls = styles[ii]
plt.semilogx((total_bin_edges[1:]+total_bin_edges[:-1])/2, tE_counts / total_counts, c=colors[ii], lw=lw, label=r"{}".format(parameter_to_label[idx]), ls=ls)
plt.xlabel(r"$t_E$ [days]", size=label_font_size)
plt.ylabel(r"Detection Efficiency $\mathcal{E}(t_E)$", size=label_font_size)
plt.ylim(0., 1.0)
t = plt.title("PTF Detection Efficiency for Praesepe Light Curves", size=title_font_size)
t.set_y(1.04)
# Change tick label size
ax = plt.gca()
for label in ax.get_xticklabels() + ax.get_yticklabels():
label.set_fontsize(tick_font_size)
leg = plt.legend(shadow=True, fancybox=True)
legendtext = leg.get_texts()
plt.setp(legendtext, fontsize=label_font_size)
plt.tight_layout()
plt.savefig("plots/aas_var_indices_detection_efficiency.png")
#plt.show()
def systematics_552():
# Bad1 http://kanaloa.ipac.caltech.edu/ibe/data/ptf/dev/process/proc/2010/04/17/f2/c6/p13/v1/PTF_201004172696_i_p_scie_t062817_u011575385_f02_p110002_c06.fits?center=127.403,19.6476deg&size=100px
# Bad2 http://kanaloa.ipac.caltech.edu/ibe/data/ptf/dev/process/proc/2010/04/17/f2/c6/p13/v1/PTF_201004173180_i_p_scie_t073759_u011575280_f02_p110002_c06.fits?center=127.403,19.6476deg&size=100px
# Good http://kanaloa.ipac.caltech.edu/ibe/data/ptf/dev/process/proc/2010/04/07/f2/c6/p13/v1/PTF_201004071439_i_p_scie_t032714_u011539562_f02_p110002_c06.fits?center=127.403,19.6476deg&size=100px
mjd_offset = 54832
lc1 = session.query(LightCurve).filter(LightCurve.objid == 552).one()
bad_light_curve = PTFLightCurve.fromDBLightCurve(lc1)
light_curves = session.query(LightCurve).filter(func.q3c_radial_query(LightCurve.ra, LightCurve.dec, lc1.ra, lc1.dec,30/3600.)).all()
bad_obs1 = 55303.26964
bad_obs2 = 55303.31804
good_obs = 55293.14391
# seeing, airmass, filename, mjd
imlist = np.genfromtxt("data/aas_552_imagelist.txt", skiprows=4, usecols=[11,12,20,25], dtype=[("seeing", float), ("airmass", float), ("filename", "|S100"), ("mjd", float)])
idx_sort = np.argsort(imlist["mjd"])
imlist = imlist[idx_sort]
print "Bad1:", imlist["filename"][imlist["mjd"] == 55303.26964]
print "Bad2:", imlist["filename"][imlist["mjd"] == 55303.31804]
print "Good:", imlist["filename"][imlist["mjd"] == 55293.14391]
return
plt.plot(imlist["mjd"], imlist["seeing"], "r.")
plt.show() | # http://kanaloa.ipac.caltech.edu/ibe/search/ptf/dev/process?POS=129.568,19.6232
# one http://kanaloa.ipac.caltech.edu/ibe/data/ptf/dev/process/proc/2010/05/15/f2/c6/p13/v1/PTF_201005152355_i_p_scie_t053906_u011486277_f02_p110004_c06.fits?center=129.568,19.6232deg&size=150px
# two http://kanaloa.ipac.caltech.edu/ibe/data/ptf/dev/process/proc/2010/04/25/f2/c6/p13/v1/PTF_201004251929_i_p_scie_t043750_u011578017_f02_p110004_c06.fits?center=129.568,19.6232deg&size=150px
mjd_offset = 54832
lc1 = session.query(LightCurve).filter(LightCurve.objid == 9347).one()
bad_light_curve = PTFLightCurve.fromDBLightCurve(lc1)
light_curves = session.query(LightCurve).filter(func.q3c_radial_query(LightCurve.ra, LightCurve.dec, lc1.ra, lc1.dec,30/3600.)).all()
bad_light_curve.plot()
print [x.ra for x in light_curves]
print [x.dec for x in light_curves]
print lc1.ra, lc1.dec
return
if __name__ == "__main__":
#survey_coverage()
#praesepe_detection_efficiency()
#survey_detection_effieciency()
#variability_indices()
#variability_indices_detection_efficiency()
praesepe_timescale_distribution()
#praesepe_event_rate() |
def systematics_9347(): | random_line_split |
aas220_poster.py | # coding: utf-8
"""
Generate figures that will go on my AAS poster
"""
from __future__ import division
# Standard library
import sys
import os
import cPickle as pickle
# Third-party
#import apwlib.convert as c
import apwlib.geometry as g
import matplotlib
matplotlib.use("WxAgg")
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import pyfits as pf
from sqlalchemy import func
# PTF
from ptf.parameters import *
from ptf.db.DatabaseConnection import *
import ptf.simulation.util as simu
from ptf import PTFLightCurve
import coverageplots
import detectionefficiency as de
title_font_size = 38
label_font_size = 34
tick_font_size = 24
parameter_to_label = {"j" : "J", "k" : "K", "sigma_mu" : r"$\sigma/\mu$", "eta" : r"$\eta$", "delta_chi_squared" : r"$\Delta \chi^2$"}
def survey_coverage():
# PTF:
|
# To be used by the Praesepe timescale distribution plot and the
# detection efficiency
timescale_bins = np.logspace(np.log10(1), np.log10(1000), 100) # from 1 day to 1000 days
def praesepe_timescale_distribution():
filename = "data/praesepeTimeScales.npy"
timescales = np.load(filename)
plt.figure(figsize=(15,15))
plt.hist(timescales, bins=timescale_bins, normed=True)
plt.xscale("log")
plt.xlabel(r"$t_E$ [days]", size=label_font_size)
t = plt.title("Normalized timescale distribution for Praesepe field", size=title_font_size)
t.set_y(1.04)
ax = plt.gca()
for label in ax.get_xticklabels():
label.set_fontsize(tick_font_size)
ax.set_yticklabels([])
plt.savefig("plots/aas_praesepe_timescale.png")
def praesepe_event_rate():
filename = "data/praesepeTimeScales.npy"
timescales = np.load(filename)
global_event_rate = 0.0081 #
# To get the event rate distribution, I have to normalize the timescale dist. so
# the integral from 0 to infinity = global event rate
timescale_pdf, bin_edges = np.histogram(timescales, bins=timescale_bins, density=True)
event_rate_distribution = timescale_pdf*global_event_rate
# Get the Praesepe detection efficiency
filename = "data/praesepe_detection_efficiency.npy"
# Load the simulation results
sim_results = np.load(filename)
# Get the RMS scatter of delta chi-squared for the vanilla light curves
dcs = [x[0] for x in session.query(VariabilityIndices.delta_chi_squared).join(LightCurve).filter(LightCurve.objid < 100000).all()]
sigma = np.std(dcs)
# 2*sigma ~ 300
sim_results[np.isnan(sim_results["tE"])] = 0.
detections = sim_results[sim_results["delta_chi_squared"] > 2.*sigma]
detections = detections[detections["event_added"] == True]
#detections = sim_results[(sim_results["delta_chi_squared"] > 2.*sigma)]
#detections = detections[detections["event_added"] == True]
tE_counts, tE_bin_edges = np.histogram(detections["tE"], bins=timescale_bins)
total_counts, bin_edges = np.histogram(sim_results[sim_results["event_added"] == True]["tE"], bins=timescale_bins)
detection_efficiency_distribution = tE_counts / total_counts
bin_widths = bin_edges[1:] - bin_edges[:-1]
# Compute number of events!
# - detection_efficiency_distribution is dE/dt_E
# - event_rate_distribution is dN/dt_E
# - bin_widths give us dt_E
N_exp = np.sum(detection_efficiency_distribution * event_rate_distribution / 365. * bin_widths) * 102. # days of Praesepe obs.
# Number of events if we had observed it consistently for 3 years
N_exp_all_survey = np.sum(detection_efficiency_distribution * event_rate_distribution / 365. * bin_widths) * 1095. # days of Praesepe obs.
print "Number of events in our Praesepe sample (102 days): {} +/- {}".format(N_exp, np.sqrt(N_exp))
print "Number of events if we had observed it consistently for 3 years: {} +/- {}".format(N_exp_all_survey, np.sqrt(N_exp_all_survey))
def praesepe_detection_efficiency():
filename = "data/praesepe_detection_efficiency.npy"
if not os.path.exists(filename):
# Select out just the Praesepe light curves (objid < 100000)
light_curve_generator = de.AllPraesepeLightCurves(limit=10000, random=True)
sim_results = de.run_simulation(light_curve_generator, N=100)
np.save(filename, sim_results)
# Load the simulation results
sim_results = np.load(filename)
# Get the RMS scatter of delta chi-squared for the vanilla light curves
dcs = [x[0] for x in session.query(VariabilityIndices.delta_chi_squared).join(LightCurve).filter(LightCurve.objid < 100000).all()]
sigma = np.std(dcs)
# 2*sigma ~ 300
sim_results[np.isnan(sim_results["tE"])] = 0.
detections = sim_results[sim_results["delta_chi_squared"] > 2.*sigma]
detections = detections[detections["event_added"] == True]
#detections = sim_results[(sim_results["delta_chi_squared"] > 2.*sigma)]
#detections = detections[detections["event_added"] == True]
tE_counts, tE_bin_edges = np.histogram(detections["tE"], bins=timescale_bins)
total_counts, total_bin_edges = np.histogram(sim_results[sim_results["event_added"] == True]["tE"], bins=timescale_bins)
detection_efficiency = tE_counts / total_counts
bin_widths = total_bin_edges[1:] - total_bin_edges[:-1]
#print np.sum(bin_widths*detection_efficiency)
#return
plt.figure(figsize=(15,15))
# Multiply by 2 because we only put events in 50% of the cases
plt.semilogx((total_bin_edges[1:]+total_bin_edges[:-1])/2, tE_counts / total_counts, 'k-', lw=3)
plt.xlabel(r"$t_E$ [days]", size=label_font_size)
plt.ylabel(r"Detection Efficiency $\mathcal{E}(t_E)$", size=label_font_size)
plt.ylim(0., 0.75)
t = plt.title("PTF Detection Efficiency for Praesepe Light Curves", size=title_font_size)
t.set_y(1.04)
# Change tick label size
ax = plt.gca()
for label in ax.get_xticklabels() + ax.get_yticklabels():
label.set_fontsize(tick_font_size)
plt.tight_layout()
plt.savefig("plots/aas_praesepe_detection_efficiency.png")
def random_praesepe_light_curve():
objid = np.random.randint(82415)
try:
lc = session.query(LightCurve).filter(LightCurve.objid == objid).one()
except:
lc = session.query(LightCurve).filter(LightCurve.objid == 101).one()
return lc
def survey_detection_effieciency():
""" Here I want a figure that shows how the detection efficiency
changes for uniform, random, and clumpy observations
"""
baseline = 365 #days
# TODO: Rerun with 1024, add praesepe line
max_num_observations = 1024
min_num_observations = 16
num_clumps = 4
num_iterations = 10000
if not os.path.exists("data/aas_survey_detection_efficiency.pickle"):
data_dict = {"clumpy" : {1. : [], 10. : [], 100 : []}, "uniform" : {1. : [], 10. : [], 100 : []}}
for timescale in [1., 10., 100.]:
for sampling in ["clumpy", "uniform"]: #, "random"]:
if sampling == "random":
mjd = np.random.random(max_num_observations)*baseline
elif sampling == "clumpy":
sparse_samples = np.random.random(max_num_observations/2)*baseline
clumps = []
days = []
sum = 0.
pts_per_clump = max_num_observations / 2 / num_clumps
for ii in range(num_clumps):
day = np.random.randint(365)
if day in days: continue
days.append(day)
clumpy_samples = np.linspace(day+0.1, day+0.6, pts_per_clump)
clumps.append(clumpy_samples)
clumps.append(sparse_samples)
mjd = np.concatenate(tuple(clumps))
plt.plot(mjd, [1.]*len(mjd), 'ro', alpha=0.4)
plt.show()
elif sampling == "uniform":
mjd = np.linspace(0., baseline, max_num_observations)
for jj in range(num_iterations):
lc = random_praesepe_light_curve()
if len(lc.mag) < 100: continue
dupe_mags = np.array(lc.mag*15)
dupe_err = np.array(list(lc.error)*15)
shuffled_idx = np.arange(0, len(dupe_mags))
np.random.shuffle(shuffled_idx)
mags = dupe_mags[shuffled_idx]
err = dupe_err[shuffled_idx]
sim_light_curve = simu.SimulatedLightCurve(mjd=mjd, mag=mags[:len(mjd)], error=err[:len(mjd)])
sim_light_curve.addMicrolensingEvent(tE=timescale)
#sim_light_curve.plot()
delta_chi_squareds = []
sim_mjd = sim_light_curve.mjd
sim_mag = sim_light_curve.mag
sim_err = sim_light_curve.error
while True:
if len(sim_mjd) < min_num_observations: break
dcs = simu.compute_delta_chi_squared((sim_mjd, sim_mag, sim_err), force_fit=True)
delta_chi_squareds.append(dcs)
prune = np.arange(len(sim_mjd))
np.random.shuffle(prune)
prune = prune[::2]
sim_mjd = sim_mjd[prune]
sim_mag = sim_mag[prune]
sim_err = sim_err[prune]
data_dict[sampling][timescale].append(delta_chi_squareds)
f = open("data/aas_survey_detection_efficiency.pickle", "w")
pickle.dump(data_dict, f)
f.close()
f = open("data/aas_survey_detection_efficiency.pickle", "r")
data_dict = pickle.load(f)
# Plotting stuff
plt.figure(figsize=(15,15))
dcs_cutoff = 300.
num_observations = [2**x for x in range(int(np.log2(max_num_observations)), int(np.log2(min_num_observations))-1, -1)]
linestyles = {"uniform" : "--", "clumpy" : "-"}
linecolors = {1. : "k", 10. : "r", 100. : "c"}
for sampling in data_dict.keys():
for timescale in data_dict[sampling].keys():
data = np.array(data_dict[sampling][timescale])
efficiencies = []
for col,num_obs in enumerate(num_observations):
efficiencies.append(np.sum(data[:,col] > dcs_cutoff) / len(data[:,col]))
plt.plot(np.log2(num_observations), efficiencies, ls=linestyles[sampling], color=linecolors[timescale], label=r"$t_E={}$ day, {} sampling".format(int(timescale), sampling), lw=3)
#plt.axvline(np.log2(625.), c="g", ls="--", lw=2, label="PTF Praesepe fields")
plt.xlabel("Number of Observations / 1 year", size=label_font_size)
plt.ylabel(r"Detection Efficiency $\mathcal{E}(t_E)$", size=label_font_size)
plt.title("Simulated Detection Efficiency for\nDifferent Sampling Patterns", size=title_font_size)
# Change tick label size
ax = plt.gca()
for label in ax.get_xticklabels() + ax.get_yticklabels():
label.set_fontsize(tick_font_size)
ax.set_xticklabels(num_observations[::-1])
legend = plt.legend(loc="upper left", shadow=True, fancybox=True)
legendtext = legend.get_texts()
plt.setp(legendtext, fontsize=tick_font_size) # the legend text fontsize
plt.tight_layout()
#plt.show()
plt.savefig("plots/aas_survey_detection_efficiency.png")
def variability_indices():
import PraesepeLightCurves as plc
# TODO: Sample timescale from the distribution that Amanda will send me
# TODO: Fix legend in post-editing
plc.aas_figure()
def variability_indices_detection_efficiency():
""" This figure should show the detection efficiency curve for the Praesepe
data for each variability index (cut at 2-sigma)
"""
filename = "data/praesepe_detection_efficiency.npy"
# Load the simulation results
sim_results = np.load(filename)
var_indices = session.query(VariabilityIndices).join(LightCurve).filter(LightCurve.objid < 100000).all()
styles = [(3,"-."), (3,":"), (3,"--"), (1.5,"--"), (2,"-")]
colors = ["c", "m", "g", "y", "k"]
plt.figure(figsize=(15,15))
for ii,idx in enumerate(["j", "k", "eta", "sigma_mu", "delta_chi_squared"]):
values = [getattr(x, idx) for x in var_indices]
sigma = np.std(values)
mu = np.mean(values)
sim_results[np.isnan(sim_results["tE"])] = 0.
detections = sim_results[(np.fabs(sim_results[idx]) > (mu + 2.*sigma)) | (np.fabs(sim_results[idx]) < (mu - 2.*sigma))]
detections = detections[detections["event_added"] == True]
tE_counts, tE_bin_edges = np.histogram(detections["tE"], bins=timescale_bins)
total_counts, total_bin_edges = np.histogram(sim_results[sim_results["event_added"] == True]["tE"], bins=timescale_bins)
lw,ls = styles[ii]
plt.semilogx((total_bin_edges[1:]+total_bin_edges[:-1])/2, tE_counts / total_counts, c=colors[ii], lw=lw, label=r"{}".format(parameter_to_label[idx]), ls=ls)
plt.xlabel(r"$t_E$ [days]", size=label_font_size)
plt.ylabel(r"Detection Efficiency $\mathcal{E}(t_E)$", size=label_font_size)
plt.ylim(0., 1.0)
t = plt.title("PTF Detection Efficiency for Praesepe Light Curves", size=title_font_size)
t.set_y(1.04)
# Change tick label size
ax = plt.gca()
for label in ax.get_xticklabels() + ax.get_yticklabels():
label.set_fontsize(tick_font_size)
leg = plt.legend(shadow=True, fancybox=True)
legendtext = leg.get_texts()
plt.setp(legendtext, fontsize=label_font_size)
plt.tight_layout()
plt.savefig("plots/aas_var_indices_detection_efficiency.png")
#plt.show()
def systematics_552():
# Bad1 http://kanaloa.ipac.caltech.edu/ibe/data/ptf/dev/process/proc/2010/04/17/f2/c6/p13/v1/PTF_201004172696_i_p_scie_t062817_u011575385_f02_p110002_c06.fits?center=127.403,19.6476deg&size=100px
# Bad2 http://kanaloa.ipac.caltech.edu/ibe/data/ptf/dev/process/proc/2010/04/17/f2/c6/p13/v1/PTF_201004173180_i_p_scie_t073759_u011575280_f02_p110002_c06.fits?center=127.403,19.6476deg&size=100px
# Good http://kanaloa.ipac.caltech.edu/ibe/data/ptf/dev/process/proc/2010/04/07/f2/c6/p13/v1/PTF_201004071439_i_p_scie_t032714_u011539562_f02_p110002_c06.fits?center=127.403,19.6476deg&size=100px
mjd_offset = 54832
lc1 = session.query(LightCurve).filter(LightCurve.objid == 552).one()
bad_light_curve = PTFLightCurve.fromDBLightCurve(lc1)
light_curves = session.query(LightCurve).filter(func.q3c_radial_query(LightCurve.ra, LightCurve.dec, lc1.ra, lc1.dec,30/3600.)).all()
bad_obs1 = 55303.26964
bad_obs2 = 55303.31804
good_obs = 55293.14391
# seeing, airmass, filename, mjd
imlist = np.genfromtxt("data/aas_552_imagelist.txt", skiprows=4, usecols=[11,12,20,25], dtype=[("seeing", float), ("airmass", float), ("filename", "|S100"), ("mjd", float)])
idx_sort = np.argsort(imlist["mjd"])
imlist = imlist[idx_sort]
print "Bad1:", imlist["filename"][imlist["mjd"] == 55303.26964]
print "Bad2:", imlist["filename"][imlist["mjd"] == 55303.31804]
print "Good:", imlist["filename"][imlist["mjd"] == 55293.14391]
return
plt.plot(imlist["mjd"], imlist["seeing"], "r.")
plt.show()
def systematics_9347():
# http://kanaloa.ipac.caltech.edu/ibe/search/ptf/dev/process?POS=129.568,19.6232
# one http://kanaloa.ipac.caltech.edu/ibe/data/ptf/dev/process/proc/2010/05/15/f2/c6/p13/v1/PTF_201005152355_i_p_scie_t053906_u011486277_f02_p110004_c06.fits?center=129.568,19.6232deg&size=150px
# two http://kanaloa.ipac.caltech.edu/ibe/data/ptf/dev/process/proc/2010/04/25/f2/c6/p13/v1/PTF_201004251929_i_p_scie_t043750_u011578017_f02_p110004_c06.fits?center=129.568,19.6232deg&size=150px
mjd_offset = 54832
lc1 = session.query(LightCurve).filter(LightCurve.objid == 9347).one()
bad_light_curve = PTFLightCurve.fromDBLightCurve(lc1)
light_curves = session.query(LightCurve).filter(func.q3c_radial_query(LightCurve.ra, LightCurve.dec, lc1.ra, lc1.dec,30/3600.)).all()
bad_light_curve.plot()
print [x.ra for x in light_curves]
print [x.dec for x in light_curves]
print lc1.ra, lc1.dec
return
if __name__ == "__main__":
#survey_coverage()
#praesepe_detection_efficiency()
#survey_detection_effieciency()
#variability_indices()
#variability_indices_detection_efficiency()
praesepe_timescale_distribution()
#praesepe_event_rate() | raw_field_data = pf.open("data/exposureData.fits")[1].data
unq_field_ids = np.unique(raw_field_data.field_id)
ptf_fields = []
for field_id in unq_field_ids:
one_field_data = raw_field_data[raw_field_data.field_id == field_id]
mean_ra = np.mean(one_field_data.ra) / 15.
mean_dec = np.mean(one_field_data.dec)
observations = len(one_field_data) / len(np.unique(one_field_data.ccd_id))
ptf_fields.append(coverageplots.PTFField(mean_ra, mean_dec, id=field_id, number_of_observations=observations))
# OGLE:
high_cadence = np.genfromtxt("data/ogle4_common.txt", names=["ra","dec","l","b"], usecols=[6,7,8,9]).view(np.recarray)
low_cadence = np.genfromtxt("data/ogle4_less_frequent.txt", names=["ra","dec","l","b"], usecols=[6,7,8,9]).view(np.recarray)
ogle_high_cadence_fields = []
for row in high_cadence: ogle_high_cadence_fields.append(coverageplots.OGLEField(row["ra"], row["dec"]))
ogle_low_cadence_fields = []
for row in low_cadence: ogle_low_cadence_fields.append(coverageplots.OGLEField(row["ra"], row["dec"]))
coverage_plot = coverageplots.PTFCoveragePlot(figsize=(30,15), projection="aitoff")
coverage_plot.addFields(ptf_fields, label="PTF", color_by_observations=True)
coverage_plot.addFields(ogle_low_cadence_fields + ogle_high_cadence_fields, label="OGLE-IV", color="c", alpha=0.15)
#coverage_plot.addFields(ogle_high_cadence_fields, label="OGLE-IV - high cadence", color="r", alpha=0.15)
# Now I need to add globular and open clusters to the plot!
open_clusters = np.genfromtxt("data/open_clusters.csv", usecols=[0,1,2,11], dtype=[("name","|S20"),("ra","|S8"), ("dec","|S9"), ("diameter", float)], delimiter=",").view(np.recarray)
for ii,cluster in enumerate(open_clusters):
ra_deg = g.RA(cluster["ra"]).degrees
dec_rad = g.Dec(cluster["dec"]).radians
if ii == 0:
circle = matplotlib.patches.Ellipse((np.radians(-ra_deg+180), dec_rad), width=np.radians(cluster["diameter"]/60./np.cos(dec_rad)), height=np.radians(cluster["diameter"]/60.), alpha=.4, edgecolor="r", facecolor="r", label="Open Clusters")
else:
circle = matplotlib.patches.Ellipse((np.radians(-ra_deg+180), dec_rad), width=np.radians(cluster["diameter"]/60./np.cos(dec_rad)), height=np.radians(cluster["diameter"]/60.), alpha=.4, edgecolor="r", facecolor="r")
coverage_plot.axis.add_patch(circle)
"""
globular_clusters = np.genfromtxt("data/allGlobularClusters.txt", dtype=[("r_h",float),("ra","|S11"), ("dec","|S12")], delimiter=",").view(np.recarray)
for ii,cluster in enumerate(globular_clusters):
ra_deg = g.RA(cluster["ra"]).degrees
dec_rad = g.Dec(cluster["dec"]).radians
diameter = 2.*cluster["r_h"]*10
if ii == 0:
circle = matplotlib.patches.Ellipse((np.radians(-ra_deg+180), dec_rad), width=np.radians(diameter/60./np.cos(dec_rad)), height=np.radians(diameter/60.), alpha=.4, edgecolor="g", facecolor="g", label="Globular Clusters")
else:
circle = matplotlib.patches.Ellipse((np.radians(-ra_deg+180), dec_rad), width=np.radians(diameter/60./np.cos(dec_rad)), height=np.radians(diameter/60.), alpha=.4, edgecolor="g", facecolor="g")
coverage_plot.axis.add_patch(circle)
"""
coverage_plot.addLegend()
coverage_plot.title.set_fontsize(title_font_size)
legendtext = coverage_plot.legend.get_texts()
plt.setp(legendtext, fontsize=label_font_size) # the legend text fontsize
#plt.show()
coverage_plot.figure.savefig("plots/aas_ptf_coverage.png") | identifier_body |
aas220_poster.py | # coding: utf-8
"""
Generate figures that will go on my AAS poster
"""
from __future__ import division
# Standard library
import sys
import os
import cPickle as pickle
# Third-party
#import apwlib.convert as c
import apwlib.geometry as g
import matplotlib
matplotlib.use("WxAgg")
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import pyfits as pf
from sqlalchemy import func
# PTF
from ptf.parameters import *
from ptf.db.DatabaseConnection import *
import ptf.simulation.util as simu
from ptf import PTFLightCurve
import coverageplots
import detectionefficiency as de
title_font_size = 38
label_font_size = 34
tick_font_size = 24
parameter_to_label = {"j" : "J", "k" : "K", "sigma_mu" : r"$\sigma/\mu$", "eta" : r"$\eta$", "delta_chi_squared" : r"$\Delta \chi^2$"}
def survey_coverage():
# PTF:
raw_field_data = pf.open("data/exposureData.fits")[1].data
unq_field_ids = np.unique(raw_field_data.field_id)
ptf_fields = []
for field_id in unq_field_ids:
one_field_data = raw_field_data[raw_field_data.field_id == field_id]
mean_ra = np.mean(one_field_data.ra) / 15.
mean_dec = np.mean(one_field_data.dec)
observations = len(one_field_data) / len(np.unique(one_field_data.ccd_id))
ptf_fields.append(coverageplots.PTFField(mean_ra, mean_dec, id=field_id, number_of_observations=observations))
# OGLE:
high_cadence = np.genfromtxt("data/ogle4_common.txt", names=["ra","dec","l","b"], usecols=[6,7,8,9]).view(np.recarray)
low_cadence = np.genfromtxt("data/ogle4_less_frequent.txt", names=["ra","dec","l","b"], usecols=[6,7,8,9]).view(np.recarray)
ogle_high_cadence_fields = []
for row in high_cadence: ogle_high_cadence_fields.append(coverageplots.OGLEField(row["ra"], row["dec"]))
ogle_low_cadence_fields = []
for row in low_cadence: ogle_low_cadence_fields.append(coverageplots.OGLEField(row["ra"], row["dec"]))
coverage_plot = coverageplots.PTFCoveragePlot(figsize=(30,15), projection="aitoff")
coverage_plot.addFields(ptf_fields, label="PTF", color_by_observations=True)
coverage_plot.addFields(ogle_low_cadence_fields + ogle_high_cadence_fields, label="OGLE-IV", color="c", alpha=0.15)
#coverage_plot.addFields(ogle_high_cadence_fields, label="OGLE-IV - high cadence", color="r", alpha=0.15)
# Now I need to add globular and open clusters to the plot!
open_clusters = np.genfromtxt("data/open_clusters.csv", usecols=[0,1,2,11], dtype=[("name","|S20"),("ra","|S8"), ("dec","|S9"), ("diameter", float)], delimiter=",").view(np.recarray)
for ii,cluster in enumerate(open_clusters):
ra_deg = g.RA(cluster["ra"]).degrees
dec_rad = g.Dec(cluster["dec"]).radians
if ii == 0:
circle = matplotlib.patches.Ellipse((np.radians(-ra_deg+180), dec_rad), width=np.radians(cluster["diameter"]/60./np.cos(dec_rad)), height=np.radians(cluster["diameter"]/60.), alpha=.4, edgecolor="r", facecolor="r", label="Open Clusters")
else:
circle = matplotlib.patches.Ellipse((np.radians(-ra_deg+180), dec_rad), width=np.radians(cluster["diameter"]/60./np.cos(dec_rad)), height=np.radians(cluster["diameter"]/60.), alpha=.4, edgecolor="r", facecolor="r")
coverage_plot.axis.add_patch(circle)
"""
globular_clusters = np.genfromtxt("data/allGlobularClusters.txt", dtype=[("r_h",float),("ra","|S11"), ("dec","|S12")], delimiter=",").view(np.recarray)
for ii,cluster in enumerate(globular_clusters):
ra_deg = g.RA(cluster["ra"]).degrees
dec_rad = g.Dec(cluster["dec"]).radians
diameter = 2.*cluster["r_h"]*10
if ii == 0:
circle = matplotlib.patches.Ellipse((np.radians(-ra_deg+180), dec_rad), width=np.radians(diameter/60./np.cos(dec_rad)), height=np.radians(diameter/60.), alpha=.4, edgecolor="g", facecolor="g", label="Globular Clusters")
else:
circle = matplotlib.patches.Ellipse((np.radians(-ra_deg+180), dec_rad), width=np.radians(diameter/60./np.cos(dec_rad)), height=np.radians(diameter/60.), alpha=.4, edgecolor="g", facecolor="g")
coverage_plot.axis.add_patch(circle)
"""
coverage_plot.addLegend()
coverage_plot.title.set_fontsize(title_font_size)
legendtext = coverage_plot.legend.get_texts()
plt.setp(legendtext, fontsize=label_font_size) # the legend text fontsize
#plt.show()
coverage_plot.figure.savefig("plots/aas_ptf_coverage.png")
# To be used by the Praesepe timescale distribution plot and the
# detection efficiency
timescale_bins = np.logspace(np.log10(1), np.log10(1000), 100) # from 1 day to 1000 days
def praesepe_timescale_distribution():
filename = "data/praesepeTimeScales.npy"
timescales = np.load(filename)
plt.figure(figsize=(15,15))
plt.hist(timescales, bins=timescale_bins, normed=True)
plt.xscale("log")
plt.xlabel(r"$t_E$ [days]", size=label_font_size)
t = plt.title("Normalized timescale distribution for Praesepe field", size=title_font_size)
t.set_y(1.04)
ax = plt.gca()
for label in ax.get_xticklabels():
label.set_fontsize(tick_font_size)
ax.set_yticklabels([])
plt.savefig("plots/aas_praesepe_timescale.png")
def praesepe_event_rate():
filename = "data/praesepeTimeScales.npy"
timescales = np.load(filename)
global_event_rate = 0.0081 #
# To get the event rate distribution, I have to normalize the timescale dist. so
# the integral from 0 to infinity = global event rate
timescale_pdf, bin_edges = np.histogram(timescales, bins=timescale_bins, density=True)
event_rate_distribution = timescale_pdf*global_event_rate
# Get the Praesepe detection efficiency
filename = "data/praesepe_detection_efficiency.npy"
# Load the simulation results
sim_results = np.load(filename)
# Get the RMS scatter of delta chi-squared for the vanilla light curves
dcs = [x[0] for x in session.query(VariabilityIndices.delta_chi_squared).join(LightCurve).filter(LightCurve.objid < 100000).all()]
sigma = np.std(dcs)
# 2*sigma ~ 300
sim_results[np.isnan(sim_results["tE"])] = 0.
detections = sim_results[sim_results["delta_chi_squared"] > 2.*sigma]
detections = detections[detections["event_added"] == True]
#detections = sim_results[(sim_results["delta_chi_squared"] > 2.*sigma)]
#detections = detections[detections["event_added"] == True]
tE_counts, tE_bin_edges = np.histogram(detections["tE"], bins=timescale_bins)
total_counts, bin_edges = np.histogram(sim_results[sim_results["event_added"] == True]["tE"], bins=timescale_bins)
detection_efficiency_distribution = tE_counts / total_counts
bin_widths = bin_edges[1:] - bin_edges[:-1]
# Compute number of events!
# - detection_efficiency_distribution is dE/dt_E
# - event_rate_distribution is dN/dt_E
# - bin_widths give us dt_E
N_exp = np.sum(detection_efficiency_distribution * event_rate_distribution / 365. * bin_widths) * 102. # days of Praesepe obs.
# Number of events if we had observed it consistently for 3 years
N_exp_all_survey = np.sum(detection_efficiency_distribution * event_rate_distribution / 365. * bin_widths) * 1095. # days of Praesepe obs.
print "Number of events in our Praesepe sample (102 days): {} +/- {}".format(N_exp, np.sqrt(N_exp))
print "Number of events if we had observed it consistently for 3 years: {} +/- {}".format(N_exp_all_survey, np.sqrt(N_exp_all_survey))
def praesepe_detection_efficiency():
filename = "data/praesepe_detection_efficiency.npy"
if not os.path.exists(filename):
# Select out just the Praesepe light curves (objid < 100000)
light_curve_generator = de.AllPraesepeLightCurves(limit=10000, random=True)
sim_results = de.run_simulation(light_curve_generator, N=100)
np.save(filename, sim_results)
# Load the simulation results
sim_results = np.load(filename)
# Get the RMS scatter of delta chi-squared for the vanilla light curves
dcs = [x[0] for x in session.query(VariabilityIndices.delta_chi_squared).join(LightCurve).filter(LightCurve.objid < 100000).all()]
sigma = np.std(dcs)
# 2*sigma ~ 300
sim_results[np.isnan(sim_results["tE"])] = 0.
detections = sim_results[sim_results["delta_chi_squared"] > 2.*sigma]
detections = detections[detections["event_added"] == True]
#detections = sim_results[(sim_results["delta_chi_squared"] > 2.*sigma)]
#detections = detections[detections["event_added"] == True]
tE_counts, tE_bin_edges = np.histogram(detections["tE"], bins=timescale_bins)
total_counts, total_bin_edges = np.histogram(sim_results[sim_results["event_added"] == True]["tE"], bins=timescale_bins)
detection_efficiency = tE_counts / total_counts
bin_widths = total_bin_edges[1:] - total_bin_edges[:-1]
#print np.sum(bin_widths*detection_efficiency)
#return
plt.figure(figsize=(15,15))
# Multiply by 2 because we only put events in 50% of the cases
plt.semilogx((total_bin_edges[1:]+total_bin_edges[:-1])/2, tE_counts / total_counts, 'k-', lw=3)
plt.xlabel(r"$t_E$ [days]", size=label_font_size)
plt.ylabel(r"Detection Efficiency $\mathcal{E}(t_E)$", size=label_font_size)
plt.ylim(0., 0.75)
t = plt.title("PTF Detection Efficiency for Praesepe Light Curves", size=title_font_size)
t.set_y(1.04)
# Change tick label size
ax = plt.gca()
for label in ax.get_xticklabels() + ax.get_yticklabels():
label.set_fontsize(tick_font_size)
plt.tight_layout()
plt.savefig("plots/aas_praesepe_detection_efficiency.png")
def random_praesepe_light_curve():
objid = np.random.randint(82415)
try:
lc = session.query(LightCurve).filter(LightCurve.objid == objid).one()
except:
lc = session.query(LightCurve).filter(LightCurve.objid == 101).one()
return lc
def survey_detection_effieciency():
""" Here I want a figure that shows how the detection efficiency
changes for uniform, random, and clumpy observations
"""
baseline = 365 #days
# TODO: Rerun with 1024, add praesepe line
max_num_observations = 1024
min_num_observations = 16
num_clumps = 4
num_iterations = 10000
if not os.path.exists("data/aas_survey_detection_efficiency.pickle"):
data_dict = {"clumpy" : {1. : [], 10. : [], 100 : []}, "uniform" : {1. : [], 10. : [], 100 : []}}
for timescale in [1., 10., 100.]:
for sampling in ["clumpy", "uniform"]: #, "random"]:
if sampling == "random":
mjd = np.random.random(max_num_observations)*baseline
elif sampling == "clumpy":
sparse_samples = np.random.random(max_num_observations/2)*baseline
clumps = []
days = []
sum = 0.
pts_per_clump = max_num_observations / 2 / num_clumps
for ii in range(num_clumps):
day = np.random.randint(365)
if day in days: continue
days.append(day)
clumpy_samples = np.linspace(day+0.1, day+0.6, pts_per_clump)
clumps.append(clumpy_samples)
clumps.append(sparse_samples)
mjd = np.concatenate(tuple(clumps))
plt.plot(mjd, [1.]*len(mjd), 'ro', alpha=0.4)
plt.show()
elif sampling == "uniform":
mjd = np.linspace(0., baseline, max_num_observations)
for jj in range(num_iterations):
lc = random_praesepe_light_curve()
if len(lc.mag) < 100: continue
dupe_mags = np.array(lc.mag*15)
dupe_err = np.array(list(lc.error)*15)
shuffled_idx = np.arange(0, len(dupe_mags))
np.random.shuffle(shuffled_idx)
mags = dupe_mags[shuffled_idx]
err = dupe_err[shuffled_idx]
sim_light_curve = simu.SimulatedLightCurve(mjd=mjd, mag=mags[:len(mjd)], error=err[:len(mjd)])
sim_light_curve.addMicrolensingEvent(tE=timescale)
#sim_light_curve.plot()
delta_chi_squareds = []
sim_mjd = sim_light_curve.mjd
sim_mag = sim_light_curve.mag
sim_err = sim_light_curve.error
while True:
if len(sim_mjd) < min_num_observations: break
dcs = simu.compute_delta_chi_squared((sim_mjd, sim_mag, sim_err), force_fit=True)
delta_chi_squareds.append(dcs)
prune = np.arange(len(sim_mjd))
np.random.shuffle(prune)
prune = prune[::2]
sim_mjd = sim_mjd[prune]
sim_mag = sim_mag[prune]
sim_err = sim_err[prune]
data_dict[sampling][timescale].append(delta_chi_squareds)
f = open("data/aas_survey_detection_efficiency.pickle", "w")
pickle.dump(data_dict, f)
f.close()
f = open("data/aas_survey_detection_efficiency.pickle", "r")
data_dict = pickle.load(f)
# Plotting stuff
plt.figure(figsize=(15,15))
dcs_cutoff = 300.
num_observations = [2**x for x in range(int(np.log2(max_num_observations)), int(np.log2(min_num_observations))-1, -1)]
linestyles = {"uniform" : "--", "clumpy" : "-"}
linecolors = {1. : "k", 10. : "r", 100. : "c"}
for sampling in data_dict.keys():
for timescale in data_dict[sampling].keys():
data = np.array(data_dict[sampling][timescale])
efficiencies = []
for col,num_obs in enumerate(num_observations):
efficiencies.append(np.sum(data[:,col] > dcs_cutoff) / len(data[:,col]))
plt.plot(np.log2(num_observations), efficiencies, ls=linestyles[sampling], color=linecolors[timescale], label=r"$t_E={}$ day, {} sampling".format(int(timescale), sampling), lw=3)
#plt.axvline(np.log2(625.), c="g", ls="--", lw=2, label="PTF Praesepe fields")
plt.xlabel("Number of Observations / 1 year", size=label_font_size)
plt.ylabel(r"Detection Efficiency $\mathcal{E}(t_E)$", size=label_font_size)
plt.title("Simulated Detection Efficiency for\nDifferent Sampling Patterns", size=title_font_size)
# Change tick label size
ax = plt.gca()
for label in ax.get_xticklabels() + ax.get_yticklabels():
label.set_fontsize(tick_font_size)
ax.set_xticklabels(num_observations[::-1])
legend = plt.legend(loc="upper left", shadow=True, fancybox=True)
legendtext = legend.get_texts()
plt.setp(legendtext, fontsize=tick_font_size) # the legend text fontsize
plt.tight_layout()
#plt.show()
plt.savefig("plots/aas_survey_detection_efficiency.png")
def | ():
import PraesepeLightCurves as plc
# TODO: Sample timescale from the distribution that Amanda will send me
# TODO: Fix legend in post-editing
plc.aas_figure()
def variability_indices_detection_efficiency():
""" This figure should show the detection efficiency curve for the Praesepe
data for each variability index (cut at 2-sigma)
"""
filename = "data/praesepe_detection_efficiency.npy"
# Load the simulation results
sim_results = np.load(filename)
var_indices = session.query(VariabilityIndices).join(LightCurve).filter(LightCurve.objid < 100000).all()
styles = [(3,"-."), (3,":"), (3,"--"), (1.5,"--"), (2,"-")]
colors = ["c", "m", "g", "y", "k"]
plt.figure(figsize=(15,15))
for ii,idx in enumerate(["j", "k", "eta", "sigma_mu", "delta_chi_squared"]):
values = [getattr(x, idx) for x in var_indices]
sigma = np.std(values)
mu = np.mean(values)
sim_results[np.isnan(sim_results["tE"])] = 0.
detections = sim_results[(np.fabs(sim_results[idx]) > (mu + 2.*sigma)) | (np.fabs(sim_results[idx]) < (mu - 2.*sigma))]
detections = detections[detections["event_added"] == True]
tE_counts, tE_bin_edges = np.histogram(detections["tE"], bins=timescale_bins)
total_counts, total_bin_edges = np.histogram(sim_results[sim_results["event_added"] == True]["tE"], bins=timescale_bins)
lw,ls = styles[ii]
plt.semilogx((total_bin_edges[1:]+total_bin_edges[:-1])/2, tE_counts / total_counts, c=colors[ii], lw=lw, label=r"{}".format(parameter_to_label[idx]), ls=ls)
plt.xlabel(r"$t_E$ [days]", size=label_font_size)
plt.ylabel(r"Detection Efficiency $\mathcal{E}(t_E)$", size=label_font_size)
plt.ylim(0., 1.0)
t = plt.title("PTF Detection Efficiency for Praesepe Light Curves", size=title_font_size)
t.set_y(1.04)
# Change tick label size
ax = plt.gca()
for label in ax.get_xticklabels() + ax.get_yticklabels():
label.set_fontsize(tick_font_size)
leg = plt.legend(shadow=True, fancybox=True)
legendtext = leg.get_texts()
plt.setp(legendtext, fontsize=label_font_size)
plt.tight_layout()
plt.savefig("plots/aas_var_indices_detection_efficiency.png")
#plt.show()
def systematics_552():
# Bad1 http://kanaloa.ipac.caltech.edu/ibe/data/ptf/dev/process/proc/2010/04/17/f2/c6/p13/v1/PTF_201004172696_i_p_scie_t062817_u011575385_f02_p110002_c06.fits?center=127.403,19.6476deg&size=100px
# Bad2 http://kanaloa.ipac.caltech.edu/ibe/data/ptf/dev/process/proc/2010/04/17/f2/c6/p13/v1/PTF_201004173180_i_p_scie_t073759_u011575280_f02_p110002_c06.fits?center=127.403,19.6476deg&size=100px
# Good http://kanaloa.ipac.caltech.edu/ibe/data/ptf/dev/process/proc/2010/04/07/f2/c6/p13/v1/PTF_201004071439_i_p_scie_t032714_u011539562_f02_p110002_c06.fits?center=127.403,19.6476deg&size=100px
mjd_offset = 54832
lc1 = session.query(LightCurve).filter(LightCurve.objid == 552).one()
bad_light_curve = PTFLightCurve.fromDBLightCurve(lc1)
light_curves = session.query(LightCurve).filter(func.q3c_radial_query(LightCurve.ra, LightCurve.dec, lc1.ra, lc1.dec,30/3600.)).all()
bad_obs1 = 55303.26964
bad_obs2 = 55303.31804
good_obs = 55293.14391
# seeing, airmass, filename, mjd
imlist = np.genfromtxt("data/aas_552_imagelist.txt", skiprows=4, usecols=[11,12,20,25], dtype=[("seeing", float), ("airmass", float), ("filename", "|S100"), ("mjd", float)])
idx_sort = np.argsort(imlist["mjd"])
imlist = imlist[idx_sort]
print "Bad1:", imlist["filename"][imlist["mjd"] == 55303.26964]
print "Bad2:", imlist["filename"][imlist["mjd"] == 55303.31804]
print "Good:", imlist["filename"][imlist["mjd"] == 55293.14391]
return
plt.plot(imlist["mjd"], imlist["seeing"], "r.")
plt.show()
def systematics_9347():
# http://kanaloa.ipac.caltech.edu/ibe/search/ptf/dev/process?POS=129.568,19.6232
# one http://kanaloa.ipac.caltech.edu/ibe/data/ptf/dev/process/proc/2010/05/15/f2/c6/p13/v1/PTF_201005152355_i_p_scie_t053906_u011486277_f02_p110004_c06.fits?center=129.568,19.6232deg&size=150px
# two http://kanaloa.ipac.caltech.edu/ibe/data/ptf/dev/process/proc/2010/04/25/f2/c6/p13/v1/PTF_201004251929_i_p_scie_t043750_u011578017_f02_p110004_c06.fits?center=129.568,19.6232deg&size=150px
mjd_offset = 54832
lc1 = session.query(LightCurve).filter(LightCurve.objid == 9347).one()
bad_light_curve = PTFLightCurve.fromDBLightCurve(lc1)
light_curves = session.query(LightCurve).filter(func.q3c_radial_query(LightCurve.ra, LightCurve.dec, lc1.ra, lc1.dec,30/3600.)).all()
bad_light_curve.plot()
print [x.ra for x in light_curves]
print [x.dec for x in light_curves]
print lc1.ra, lc1.dec
return
if __name__ == "__main__":
#survey_coverage()
#praesepe_detection_efficiency()
#survey_detection_effieciency()
#variability_indices()
#variability_indices_detection_efficiency()
praesepe_timescale_distribution()
#praesepe_event_rate() | variability_indices | identifier_name |
aas220_poster.py | # coding: utf-8
"""
Generate figures that will go on my AAS poster
"""
from __future__ import division
# Standard library
import sys
import os
import cPickle as pickle
# Third-party
#import apwlib.convert as c
import apwlib.geometry as g
import matplotlib
matplotlib.use("WxAgg")
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import pyfits as pf
from sqlalchemy import func
# PTF
from ptf.parameters import *
from ptf.db.DatabaseConnection import *
import ptf.simulation.util as simu
from ptf import PTFLightCurve
import coverageplots
import detectionefficiency as de
title_font_size = 38
label_font_size = 34
tick_font_size = 24
parameter_to_label = {"j" : "J", "k" : "K", "sigma_mu" : r"$\sigma/\mu$", "eta" : r"$\eta$", "delta_chi_squared" : r"$\Delta \chi^2$"}
def survey_coverage():
# PTF:
raw_field_data = pf.open("data/exposureData.fits")[1].data
unq_field_ids = np.unique(raw_field_data.field_id)
ptf_fields = []
for field_id in unq_field_ids:
one_field_data = raw_field_data[raw_field_data.field_id == field_id]
mean_ra = np.mean(one_field_data.ra) / 15.
mean_dec = np.mean(one_field_data.dec)
observations = len(one_field_data) / len(np.unique(one_field_data.ccd_id))
ptf_fields.append(coverageplots.PTFField(mean_ra, mean_dec, id=field_id, number_of_observations=observations))
# OGLE:
high_cadence = np.genfromtxt("data/ogle4_common.txt", names=["ra","dec","l","b"], usecols=[6,7,8,9]).view(np.recarray)
low_cadence = np.genfromtxt("data/ogle4_less_frequent.txt", names=["ra","dec","l","b"], usecols=[6,7,8,9]).view(np.recarray)
ogle_high_cadence_fields = []
for row in high_cadence: ogle_high_cadence_fields.append(coverageplots.OGLEField(row["ra"], row["dec"]))
ogle_low_cadence_fields = []
for row in low_cadence: ogle_low_cadence_fields.append(coverageplots.OGLEField(row["ra"], row["dec"]))
coverage_plot = coverageplots.PTFCoveragePlot(figsize=(30,15), projection="aitoff")
coverage_plot.addFields(ptf_fields, label="PTF", color_by_observations=True)
coverage_plot.addFields(ogle_low_cadence_fields + ogle_high_cadence_fields, label="OGLE-IV", color="c", alpha=0.15)
#coverage_plot.addFields(ogle_high_cadence_fields, label="OGLE-IV - high cadence", color="r", alpha=0.15)
# Now I need to add globular and open clusters to the plot!
open_clusters = np.genfromtxt("data/open_clusters.csv", usecols=[0,1,2,11], dtype=[("name","|S20"),("ra","|S8"), ("dec","|S9"), ("diameter", float)], delimiter=",").view(np.recarray)
for ii,cluster in enumerate(open_clusters):
ra_deg = g.RA(cluster["ra"]).degrees
dec_rad = g.Dec(cluster["dec"]).radians
if ii == 0:
circle = matplotlib.patches.Ellipse((np.radians(-ra_deg+180), dec_rad), width=np.radians(cluster["diameter"]/60./np.cos(dec_rad)), height=np.radians(cluster["diameter"]/60.), alpha=.4, edgecolor="r", facecolor="r", label="Open Clusters")
else:
circle = matplotlib.patches.Ellipse((np.radians(-ra_deg+180), dec_rad), width=np.radians(cluster["diameter"]/60./np.cos(dec_rad)), height=np.radians(cluster["diameter"]/60.), alpha=.4, edgecolor="r", facecolor="r")
coverage_plot.axis.add_patch(circle)
"""
globular_clusters = np.genfromtxt("data/allGlobularClusters.txt", dtype=[("r_h",float),("ra","|S11"), ("dec","|S12")], delimiter=",").view(np.recarray)
for ii,cluster in enumerate(globular_clusters):
ra_deg = g.RA(cluster["ra"]).degrees
dec_rad = g.Dec(cluster["dec"]).radians
diameter = 2.*cluster["r_h"]*10
if ii == 0:
circle = matplotlib.patches.Ellipse((np.radians(-ra_deg+180), dec_rad), width=np.radians(diameter/60./np.cos(dec_rad)), height=np.radians(diameter/60.), alpha=.4, edgecolor="g", facecolor="g", label="Globular Clusters")
else:
circle = matplotlib.patches.Ellipse((np.radians(-ra_deg+180), dec_rad), width=np.radians(diameter/60./np.cos(dec_rad)), height=np.radians(diameter/60.), alpha=.4, edgecolor="g", facecolor="g")
coverage_plot.axis.add_patch(circle)
"""
coverage_plot.addLegend()
coverage_plot.title.set_fontsize(title_font_size)
legendtext = coverage_plot.legend.get_texts()
plt.setp(legendtext, fontsize=label_font_size) # the legend text fontsize
#plt.show()
coverage_plot.figure.savefig("plots/aas_ptf_coverage.png")
# To be used by the Praesepe timescale distribution plot and the
# detection efficiency
timescale_bins = np.logspace(np.log10(1), np.log10(1000), 100) # from 1 day to 1000 days
def praesepe_timescale_distribution():
filename = "data/praesepeTimeScales.npy"
timescales = np.load(filename)
plt.figure(figsize=(15,15))
plt.hist(timescales, bins=timescale_bins, normed=True)
plt.xscale("log")
plt.xlabel(r"$t_E$ [days]", size=label_font_size)
t = plt.title("Normalized timescale distribution for Praesepe field", size=title_font_size)
t.set_y(1.04)
ax = plt.gca()
for label in ax.get_xticklabels():
label.set_fontsize(tick_font_size)
ax.set_yticklabels([])
plt.savefig("plots/aas_praesepe_timescale.png")
def praesepe_event_rate():
filename = "data/praesepeTimeScales.npy"
timescales = np.load(filename)
global_event_rate = 0.0081 #
# To get the event rate distribution, I have to normalize the timescale dist. so
# the integral from 0 to infinity = global event rate
timescale_pdf, bin_edges = np.histogram(timescales, bins=timescale_bins, density=True)
event_rate_distribution = timescale_pdf*global_event_rate
# Get the Praesepe detection efficiency
filename = "data/praesepe_detection_efficiency.npy"
# Load the simulation results
sim_results = np.load(filename)
# Get the RMS scatter of delta chi-squared for the vanilla light curves
dcs = [x[0] for x in session.query(VariabilityIndices.delta_chi_squared).join(LightCurve).filter(LightCurve.objid < 100000).all()]
sigma = np.std(dcs)
# 2*sigma ~ 300
sim_results[np.isnan(sim_results["tE"])] = 0.
detections = sim_results[sim_results["delta_chi_squared"] > 2.*sigma]
detections = detections[detections["event_added"] == True]
#detections = sim_results[(sim_results["delta_chi_squared"] > 2.*sigma)]
#detections = detections[detections["event_added"] == True]
tE_counts, tE_bin_edges = np.histogram(detections["tE"], bins=timescale_bins)
total_counts, bin_edges = np.histogram(sim_results[sim_results["event_added"] == True]["tE"], bins=timescale_bins)
detection_efficiency_distribution = tE_counts / total_counts
bin_widths = bin_edges[1:] - bin_edges[:-1]
# Compute number of events!
# - detection_efficiency_distribution is dE/dt_E
# - event_rate_distribution is dN/dt_E
# - bin_widths give us dt_E
N_exp = np.sum(detection_efficiency_distribution * event_rate_distribution / 365. * bin_widths) * 102. # days of Praesepe obs.
# Number of events if we had observed it consistently for 3 years
N_exp_all_survey = np.sum(detection_efficiency_distribution * event_rate_distribution / 365. * bin_widths) * 1095. # days of Praesepe obs.
print "Number of events in our Praesepe sample (102 days): {} +/- {}".format(N_exp, np.sqrt(N_exp))
print "Number of events if we had observed it consistently for 3 years: {} +/- {}".format(N_exp_all_survey, np.sqrt(N_exp_all_survey))
def praesepe_detection_efficiency():
filename = "data/praesepe_detection_efficiency.npy"
if not os.path.exists(filename):
# Select out just the Praesepe light curves (objid < 100000)
light_curve_generator = de.AllPraesepeLightCurves(limit=10000, random=True)
sim_results = de.run_simulation(light_curve_generator, N=100)
np.save(filename, sim_results)
# Load the simulation results
sim_results = np.load(filename)
# Get the RMS scatter of delta chi-squared for the vanilla light curves
dcs = [x[0] for x in session.query(VariabilityIndices.delta_chi_squared).join(LightCurve).filter(LightCurve.objid < 100000).all()]
sigma = np.std(dcs)
# 2*sigma ~ 300
sim_results[np.isnan(sim_results["tE"])] = 0.
detections = sim_results[sim_results["delta_chi_squared"] > 2.*sigma]
detections = detections[detections["event_added"] == True]
#detections = sim_results[(sim_results["delta_chi_squared"] > 2.*sigma)]
#detections = detections[detections["event_added"] == True]
tE_counts, tE_bin_edges = np.histogram(detections["tE"], bins=timescale_bins)
total_counts, total_bin_edges = np.histogram(sim_results[sim_results["event_added"] == True]["tE"], bins=timescale_bins)
detection_efficiency = tE_counts / total_counts
bin_widths = total_bin_edges[1:] - total_bin_edges[:-1]
#print np.sum(bin_widths*detection_efficiency)
#return
plt.figure(figsize=(15,15))
# Multiply by 2 because we only put events in 50% of the cases
plt.semilogx((total_bin_edges[1:]+total_bin_edges[:-1])/2, tE_counts / total_counts, 'k-', lw=3)
plt.xlabel(r"$t_E$ [days]", size=label_font_size)
plt.ylabel(r"Detection Efficiency $\mathcal{E}(t_E)$", size=label_font_size)
plt.ylim(0., 0.75)
t = plt.title("PTF Detection Efficiency for Praesepe Light Curves", size=title_font_size)
t.set_y(1.04)
# Change tick label size
ax = plt.gca()
for label in ax.get_xticklabels() + ax.get_yticklabels():
label.set_fontsize(tick_font_size)
plt.tight_layout()
plt.savefig("plots/aas_praesepe_detection_efficiency.png")
def random_praesepe_light_curve():
objid = np.random.randint(82415)
try:
lc = session.query(LightCurve).filter(LightCurve.objid == objid).one()
except:
lc = session.query(LightCurve).filter(LightCurve.objid == 101).one()
return lc
def survey_detection_effieciency():
""" Here I want a figure that shows how the detection efficiency
changes for uniform, random, and clumpy observations
"""
baseline = 365 #days
# TODO: Rerun with 1024, add praesepe line
max_num_observations = 1024
min_num_observations = 16
num_clumps = 4
num_iterations = 10000
if not os.path.exists("data/aas_survey_detection_efficiency.pickle"):
|
f = open("data/aas_survey_detection_efficiency.pickle", "r")
data_dict = pickle.load(f)
# Plotting stuff
plt.figure(figsize=(15,15))
dcs_cutoff = 300.
num_observations = [2**x for x in range(int(np.log2(max_num_observations)), int(np.log2(min_num_observations))-1, -1)]
linestyles = {"uniform" : "--", "clumpy" : "-"}
linecolors = {1. : "k", 10. : "r", 100. : "c"}
for sampling in data_dict.keys():
for timescale in data_dict[sampling].keys():
data = np.array(data_dict[sampling][timescale])
efficiencies = []
for col,num_obs in enumerate(num_observations):
efficiencies.append(np.sum(data[:,col] > dcs_cutoff) / len(data[:,col]))
plt.plot(np.log2(num_observations), efficiencies, ls=linestyles[sampling], color=linecolors[timescale], label=r"$t_E={}$ day, {} sampling".format(int(timescale), sampling), lw=3)
#plt.axvline(np.log2(625.), c="g", ls="--", lw=2, label="PTF Praesepe fields")
plt.xlabel("Number of Observations / 1 year", size=label_font_size)
plt.ylabel(r"Detection Efficiency $\mathcal{E}(t_E)$", size=label_font_size)
plt.title("Simulated Detection Efficiency for\nDifferent Sampling Patterns", size=title_font_size)
# Change tick label size
ax = plt.gca()
for label in ax.get_xticklabels() + ax.get_yticklabels():
label.set_fontsize(tick_font_size)
ax.set_xticklabels(num_observations[::-1])
legend = plt.legend(loc="upper left", shadow=True, fancybox=True)
legendtext = legend.get_texts()
plt.setp(legendtext, fontsize=tick_font_size) # the legend text fontsize
plt.tight_layout()
#plt.show()
plt.savefig("plots/aas_survey_detection_efficiency.png")
def variability_indices():
import PraesepeLightCurves as plc
# TODO: Sample timescale from the distribution that Amanda will send me
# TODO: Fix legend in post-editing
plc.aas_figure()
def variability_indices_detection_efficiency():
""" This figure should show the detection efficiency curve for the Praesepe
data for each variability index (cut at 2-sigma)
"""
filename = "data/praesepe_detection_efficiency.npy"
# Load the simulation results
sim_results = np.load(filename)
var_indices = session.query(VariabilityIndices).join(LightCurve).filter(LightCurve.objid < 100000).all()
styles = [(3,"-."), (3,":"), (3,"--"), (1.5,"--"), (2,"-")]
colors = ["c", "m", "g", "y", "k"]
plt.figure(figsize=(15,15))
for ii,idx in enumerate(["j", "k", "eta", "sigma_mu", "delta_chi_squared"]):
values = [getattr(x, idx) for x in var_indices]
sigma = np.std(values)
mu = np.mean(values)
sim_results[np.isnan(sim_results["tE"])] = 0.
detections = sim_results[(np.fabs(sim_results[idx]) > (mu + 2.*sigma)) | (np.fabs(sim_results[idx]) < (mu - 2.*sigma))]
detections = detections[detections["event_added"] == True]
tE_counts, tE_bin_edges = np.histogram(detections["tE"], bins=timescale_bins)
total_counts, total_bin_edges = np.histogram(sim_results[sim_results["event_added"] == True]["tE"], bins=timescale_bins)
lw,ls = styles[ii]
plt.semilogx((total_bin_edges[1:]+total_bin_edges[:-1])/2, tE_counts / total_counts, c=colors[ii], lw=lw, label=r"{}".format(parameter_to_label[idx]), ls=ls)
plt.xlabel(r"$t_E$ [days]", size=label_font_size)
plt.ylabel(r"Detection Efficiency $\mathcal{E}(t_E)$", size=label_font_size)
plt.ylim(0., 1.0)
t = plt.title("PTF Detection Efficiency for Praesepe Light Curves", size=title_font_size)
t.set_y(1.04)
# Change tick label size
ax = plt.gca()
for label in ax.get_xticklabels() + ax.get_yticklabels():
label.set_fontsize(tick_font_size)
leg = plt.legend(shadow=True, fancybox=True)
legendtext = leg.get_texts()
plt.setp(legendtext, fontsize=label_font_size)
plt.tight_layout()
plt.savefig("plots/aas_var_indices_detection_efficiency.png")
#plt.show()
def systematics_552():
# Bad1 http://kanaloa.ipac.caltech.edu/ibe/data/ptf/dev/process/proc/2010/04/17/f2/c6/p13/v1/PTF_201004172696_i_p_scie_t062817_u011575385_f02_p110002_c06.fits?center=127.403,19.6476deg&size=100px
# Bad2 http://kanaloa.ipac.caltech.edu/ibe/data/ptf/dev/process/proc/2010/04/17/f2/c6/p13/v1/PTF_201004173180_i_p_scie_t073759_u011575280_f02_p110002_c06.fits?center=127.403,19.6476deg&size=100px
# Good http://kanaloa.ipac.caltech.edu/ibe/data/ptf/dev/process/proc/2010/04/07/f2/c6/p13/v1/PTF_201004071439_i_p_scie_t032714_u011539562_f02_p110002_c06.fits?center=127.403,19.6476deg&size=100px
mjd_offset = 54832
lc1 = session.query(LightCurve).filter(LightCurve.objid == 552).one()
bad_light_curve = PTFLightCurve.fromDBLightCurve(lc1)
light_curves = session.query(LightCurve).filter(func.q3c_radial_query(LightCurve.ra, LightCurve.dec, lc1.ra, lc1.dec,30/3600.)).all()
bad_obs1 = 55303.26964
bad_obs2 = 55303.31804
good_obs = 55293.14391
# seeing, airmass, filename, mjd
imlist = np.genfromtxt("data/aas_552_imagelist.txt", skiprows=4, usecols=[11,12,20,25], dtype=[("seeing", float), ("airmass", float), ("filename", "|S100"), ("mjd", float)])
idx_sort = np.argsort(imlist["mjd"])
imlist = imlist[idx_sort]
print "Bad1:", imlist["filename"][imlist["mjd"] == 55303.26964]
print "Bad2:", imlist["filename"][imlist["mjd"] == 55303.31804]
print "Good:", imlist["filename"][imlist["mjd"] == 55293.14391]
return
plt.plot(imlist["mjd"], imlist["seeing"], "r.")
plt.show()
def systematics_9347():
# http://kanaloa.ipac.caltech.edu/ibe/search/ptf/dev/process?POS=129.568,19.6232
# one http://kanaloa.ipac.caltech.edu/ibe/data/ptf/dev/process/proc/2010/05/15/f2/c6/p13/v1/PTF_201005152355_i_p_scie_t053906_u011486277_f02_p110004_c06.fits?center=129.568,19.6232deg&size=150px
# two http://kanaloa.ipac.caltech.edu/ibe/data/ptf/dev/process/proc/2010/04/25/f2/c6/p13/v1/PTF_201004251929_i_p_scie_t043750_u011578017_f02_p110004_c06.fits?center=129.568,19.6232deg&size=150px
mjd_offset = 54832
lc1 = session.query(LightCurve).filter(LightCurve.objid == 9347).one()
bad_light_curve = PTFLightCurve.fromDBLightCurve(lc1)
light_curves = session.query(LightCurve).filter(func.q3c_radial_query(LightCurve.ra, LightCurve.dec, lc1.ra, lc1.dec,30/3600.)).all()
bad_light_curve.plot()
print [x.ra for x in light_curves]
print [x.dec for x in light_curves]
print lc1.ra, lc1.dec
return
if __name__ == "__main__":
#survey_coverage()
#praesepe_detection_efficiency()
#survey_detection_effieciency()
#variability_indices()
#variability_indices_detection_efficiency()
praesepe_timescale_distribution()
#praesepe_event_rate() | data_dict = {"clumpy" : {1. : [], 10. : [], 100 : []}, "uniform" : {1. : [], 10. : [], 100 : []}}
for timescale in [1., 10., 100.]:
for sampling in ["clumpy", "uniform"]: #, "random"]:
if sampling == "random":
mjd = np.random.random(max_num_observations)*baseline
elif sampling == "clumpy":
sparse_samples = np.random.random(max_num_observations/2)*baseline
clumps = []
days = []
sum = 0.
pts_per_clump = max_num_observations / 2 / num_clumps
for ii in range(num_clumps):
day = np.random.randint(365)
if day in days: continue
days.append(day)
clumpy_samples = np.linspace(day+0.1, day+0.6, pts_per_clump)
clumps.append(clumpy_samples)
clumps.append(sparse_samples)
mjd = np.concatenate(tuple(clumps))
plt.plot(mjd, [1.]*len(mjd), 'ro', alpha=0.4)
plt.show()
elif sampling == "uniform":
mjd = np.linspace(0., baseline, max_num_observations)
for jj in range(num_iterations):
lc = random_praesepe_light_curve()
if len(lc.mag) < 100: continue
dupe_mags = np.array(lc.mag*15)
dupe_err = np.array(list(lc.error)*15)
shuffled_idx = np.arange(0, len(dupe_mags))
np.random.shuffle(shuffled_idx)
mags = dupe_mags[shuffled_idx]
err = dupe_err[shuffled_idx]
sim_light_curve = simu.SimulatedLightCurve(mjd=mjd, mag=mags[:len(mjd)], error=err[:len(mjd)])
sim_light_curve.addMicrolensingEvent(tE=timescale)
#sim_light_curve.plot()
delta_chi_squareds = []
sim_mjd = sim_light_curve.mjd
sim_mag = sim_light_curve.mag
sim_err = sim_light_curve.error
while True:
if len(sim_mjd) < min_num_observations: break
dcs = simu.compute_delta_chi_squared((sim_mjd, sim_mag, sim_err), force_fit=True)
delta_chi_squareds.append(dcs)
prune = np.arange(len(sim_mjd))
np.random.shuffle(prune)
prune = prune[::2]
sim_mjd = sim_mjd[prune]
sim_mag = sim_mag[prune]
sim_err = sim_err[prune]
data_dict[sampling][timescale].append(delta_chi_squareds)
f = open("data/aas_survey_detection_efficiency.pickle", "w")
pickle.dump(data_dict, f)
f.close() | conditional_block |
get_models.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#extract models info from unifi javascript
# N Waterton 4th July 2019 V1.0: initial release
# N Waterton 13th July 2019 V1.0.2 minor fixes.
# N Waterton 10th Sep 2019 V1.0.3 minor fixes
import time, os, sys, json, re
from datetime import timedelta
from collections import OrderedDict
import hjson #pip3 install hjson
import signal
import logging
from logging.handlers import RotatingFileHandler
supported_devices=['UGW','USW','UAP','UDM']
__VERSION__ = __version__ = '1.0.3'
class progress_bar():
'''
create terminal progress bar
@params:
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
bar_length - Optional : character length of bar (Int)
'''
def __init__(self,total=100, prefix='', suffix='', decimals=1, bar_length=100):
self.total = total
self.prefix = prefix
self.suffix = suffix
self.decimals = decimals
self.bar_length = bar_length
self.prev_output_len = 0
def update(self,iteration):
iteration = max(min(iteration, self.total), 0)
str_format = "{0:." + str(self.decimals) + "f}"
percents = str_format.format(100 * (iteration / float(self.total)))
filled_length = int(round(self.bar_length * iteration / float(self.total)))
#bar = b'█'.decode('utf8') * filled_length + '-' * (self.bar_length - filled_length)
bar = '█' * filled_length + '-' * (self.bar_length - filled_length)
output = '\r%s |%s| %s%s %s' % (self.prefix, bar, percents, '%', self.suffix)
current_output_len = len(output)
diff = self.prev_output_len - current_output_len
if diff > 0: #if output is shorter than previously
output += ' ' * diff #pad output with spaces
self.prev_output_len = current_output_len
sys.stdout.write(output)
sys.stdout.flush()
def newline():
output = '\n'
sys.stdout.write(output)
sys.stdout.flush()
def pprint(obj):
"""Pretty JSON dump of an object."""
return json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': '))
def deduplicate_list(input_list):
return list(dict.fromkeys(input_list))
def get_js_web_urls(url):
global requests
import requests #pip3 install requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
from bs4 import BeautifulSoup #pip3 install bs4
r = requests.get(url, verify=False, timeout=10)
soup = BeautifulSoup(r.content,features="html.parser")
src = [sc["src"] for sc in soup.select("script[src]")]
return src
def get_js_from_web(base_url,url,tempdir = 'tempDir'+os.sep):
log.info('retrieving js from: %s' % base_url+url)
if not os.path.exists(tempdir):
os.mkdir(tempdir)
try:
data = None
r = requests.get(base_url+url, verify=False, timeout=10)
assert r.status_code == 200
data = r.text
if data:
os.makedirs(os.path.dirname(tempdir+url), exist_ok=True)
with open(tempdir+url,'w+') as f:
f.write(data)
return tempdir+url
except (AssertionError, requests.ConnectionError, requests.Timeout) as e:
log.error("Connection failed error: %s" % e)
except Exception as e:
log.exception("unknown exception: %s" % e)
return None
def get_js_files(unifi_dir):
js_files = []
for root, dir, files in os.walk(unifi_dir):
for file in files:
if file.endswith('.js'):
js_files.append(root+os.sep+file)
return js_files
def find_models_file(files, pattern):
models_files = []
pattern = re.compile(pattern)
for file in files:
for i, line in enumerate(open(file)):
for match in re.finditer(pattern, line):
#log.info('Found in file %s on line %s: %s' % (file, i+1, match.group()))
models_files.append(file)
continue
return models_files
def find_json(files, pattern_match, all=False):
pattern = re.compile(r".*\.exports=(\{.*?\}\}\}\})\}.*")
json_obj = []
for file in files:
size = os.path.getsize(file)
pos = 0
progress = progress_bar(100, decimals=0, bar_length=40)
progress.prefix='%s%s' % ('...' if len(file) > 30 else '', file[max(0,len(file)-27):])
with open(file) as dataFile:
for line, data in enumerate(dataFile,1):
pos += len(data)
#log.info("searching line: %d, %d%%" % (line, int(pos*100//size)))
progress.update(pos*100.0/size)
match_iter = pattern.finditer(data)
for match in match_iter:
if pattern_match in match.group(1):
json_string = match.group(1)
json_string = json_string.replace("!0", "1").replace("!1", "0").strip()
try:
jsonObj = hjson.loads(json_string)
json_obj.append(jsonObj)
progress.suffix="matches: %d" % len(json_obj)
log.debug('Found json: %s' % pprint(jsonObj))
if not all:
newline()
log.info("Total Json matches found: %d" % len(json_obj))
return json_obj
except json.decoder.JSONDecodeError as e:
newline()
log.error('Json Error: %s' % e)
newline()
log.info("Total Json matches found: %d" % len(json_obj))
return json_obj
def merge_dicts(a, b):
c = a.copy()
for k,v in b.items():
if isinstance(v, dict):
d = a.get(k, None)
if isinstance(d,dict):
c[k] = merge_dicts(v, d)
else:
c[k]=v
else:
c[k]=v
return c
def consolidate_json(json_list):
json_obj = {}
for json in json_list:
json_obj.update(json)
json_dict = OrderedDict(sorted(json_obj.items(), key=lambda t: t[1]['type']))
return json_dict
def get_summary(data):
summary = {}
for device, info in data.items():
if device in supported_devices: #models.json format data
summary[device]=len(info)
else: #unifi devices format data
if info['type'] not in summary:
summary[info['type']] = 1
else:
summary[info['type']] += 1
return summary
def update_models(file, data):
if os.path.exists(file):
log.warn('Updating file: %s, press ^C if you want to exit!' % file)
with open(file) as f:
models = json.loads(f.read(), object_pairs_hook=OrderedDict)
new_models = OrderedDict()
#ensure we have an entry for each supported type (even if it's blank)
for type in supported_devices:
if not models.get(type):
models[type] = {}
new_models[type] = {}
log.info('NOTE! currently supported devices are: %s' % supported_devices)
for device, info in data.items():
type = info['type'].upper()
result = True
if type in supported_devices and device not in models[type]:
#check for UDN
if type == 'UDM':
new_models[type][device]=info #add to models database (even though UDM section isn't currently used)
elif type == 'UAP':
#all that is needed for AP's
new_models[type][device]={}
new_models[type][device]['name']=info['name']
log.info('Added %s device: %s - %s' % (type, device, info['name']))
continue
else:
for existing_device, existing_info in models[type].items():
if info['name'].upper() in existing_info['name'].upper():
log.info('========New Device %s =========' % device)
log.info('looks like %s - %s is similar to %s - %s' % (device, info['name'], existing_device, existing_info['name']))
if query_yes_no("Do you want to copy it into the database? (if You select No, you can add it as a new device)"):
new_models[type][device]=existing_info
log.info('Device: %s copied' % new_models[type][device])
result = False
else:
result = query_yes_no("Do you want to add it as a new device to the database?")
if not result:
continue
#new device
log.info('========New Device %s =========' % device)
log.info('found new device: %s - %s, type: %s' % (device, info['name'], type))
if not query_yes_no("Do you want to add it to the database?"):
continue
else:
#UDM only
if type == 'UDM':
log.info('This is a %s device, you have to choose what type of device to add it as' % type)
while True:
try:
options = [{option: choice.upper()} for option, choice in enumerate(info['subtypes'])]
sel_option = query_number('please select one of the following options: %s' % options, 0)
type = info['subtypes'][sel_option].upper()
break
except exception as e:
log.error('error: %s' % e)
#add new device name
new_models[type][device]={}
new_models[type][device]['name']=info['name']
if type != 'UAP': #only way for UAP to get here is if a UDM was selected as a UAP, this will skip over in that case.
if info.get('features'):
poe = info['features'].get('poe',0)
new_models[type][device]['poe'] = True if poe == 1 else False
if info.get('diagram'):
diagram = info['diagram']
log.info('here is a diagram of the device: %s' % pprint(diagram))
rows = len(diagram)
standard, sfp, sfp_plus = extract_ports_list(info['ports'])
if len(standard) > 0:
log.info('ports %s are standard ports' % standard )
standard = len(standard)
if len(sfp) > 0:
log.info('ports %s are sfp ports' % sfp )
sfp = len(sfp)
if len(sfp_plus) > 0:
log.info('ports %s are sfp+ ports' % sfp_plus )
sfp_plus = len(sfp_plus)
new_models[type][device]['ports']={'number':standard,'rows':0 if standard==0 else 1 if standard <= 8 else 2}
new_models[type][device]['sfp']={'number':sfp,'rows': sfp if sfp < 2 else 2}
new_models[type][device]['sfp+']={'number':sfp_plus,'rows':sfp_plus if sfp_plus < 2 else 2}
if standard > 0:
rows = new_models[type][device]['ports']['rows']
new_models[type][device]['ports']['rows'] = query_number('how many ROWS of standard ports are there? (eg, 1,2)', rows)
if sfp > 0:
rows = new_models[type][device]['sfp']['rows']
new_models[type][device]['sfp']['rows'] = query_number('how many ROWS of sfp ports are there? (eg, 1,2)', rows)
if sfp_plus > 0:
rows = new_models[type][device]['sfp+']['rows']
new_models[type][device]['sfp+']['rows'] = query_number('how many ROWS of sfp+ ports are there? (eg, 1,2)', rows)
if sfp > 0 or sfp_plus > 0:
while not new_models[type][device].get('order'):
if query_yes_no("Are the first ports (from the left) standard ports?"):
new_models[type][device]['order'] = [0,1,2]
else:
if query_yes_no("Are the first ports (from the left) sfp ports?"):
new_models[type][device]['order'] = [1,0,2]
else:
if query_yes_no("Are the first ports (from the left) sfp+ ports?"):
new_models[type][device]['order'] = [2,0,1]
else:
log.error('OK, the first ports must be standard, sfp, or sfp+ ports. try again')
log.debug('Device: %s added' % new_models[type][device])
log.info('Device: %s added' % new_models[type][device]['name'])
log.debug('The following new devices have been added: %s' % pprint(new_models))
log.info('New devices: %s' % get_summary(new_models))
all_models = OrderedDict()
all_models.update(models)
if not any([value for value in get_summary(new_models).values()]):
log.info('No New Models Found')
#return
else:
if query_yes_no("Do you want to add them to the database?"):
all_models = merge_dicts(models, new_models)
log.info('database updated')
if query_yes_no("Do you want to add the full Unifi data to the database (recommended)?"):
for type, devices in all_models.copy().items():
for device in devices:
if device in data:
#log.info("adding : %s to models[%s][%s]['unifi']" % (data[device], type, device))
all_models[type][device]['unifi'] = data[device].copy()
log.debug('The following data will be written to the database: %s' % pprint(all_models))
log.info('total devices: %s' % get_summary(all_models))
if query_yes_no("Do you want to overwrite the %s file?" % file, None):
#backup original file
from shutil import copyfile
copyfile(file, file+'.org')
#write models file out
with open(file, 'w') as f:
f.write(pprint(all_models))
log.info('File: %s Updated' % file)
log.info('Total devices: %s' % get_summary(all_models))
else:
log.info('File: %s NOT updated' % file)
def extract_ports_list(ports):
'''
returns ports list from unifi data as tuple of lists of port number ints
eg ([0,1,2,3], [4,5],[])
(standard []. sfp[], sfp_plus[])
NOTE, USG's start at port 0, but switches start at port 1.
'''
standard = [] | standard = [x for x in range(len(ports))]
if ports.get('standard'):
standard = ports_list_decode(ports['standard'])
if ports.get('sfp'):
sfp = ports_list_decode(ports['sfp'])
if ports.get('plus'):
sfp_plus = ports_list_decode(ports['plus'])
return standard, sfp, sfp_plus
def ports_list_decode(ports):
ports_list = []
if isinstance(ports, int):
ports_list = [x for x in range(1,ports+1,1)]
if isinstance(ports, list):
ports_list = ports
if isinstance(ports, str):
#log.info('Ports is a string: %s' % ports)
ports_string_list = ports.split('-')
ports_list = [x for x in range(int(ports_string_list[0]),int(ports_string_list[-1])+1,1)]
return ports_list
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def query_number(question, default=1):
"""Ask a numerical question via input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be a number or None (meaning
an answer is required of the user).
The "answer" return value is an int.
"""
if default is None:
prompt = " [] "
else:
prompt = " [%d] " % default
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return int(default)
elif choice.isdigit():
return int(choice)
else:
sys.stdout.write("Please respond with a number\n")
def secondsToStr(elapsed=None):
if elapsed is None:
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
else:
return str(timedelta(seconds=elapsed))
def sigterm_handler(signal, frame):
log.info('Received SIGTERM signal')
sys.exit(0)
def setup_logger(logger_name, log_file, level=logging.DEBUG, console=False):
try:
l = logging.getLogger(logger_name)
formatter = logging.Formatter('[%(levelname)1.1s %(asctime)s] (%(name)-5s) %(message)s')
if log_file is not None:
fileHandler = logging.handlers.RotatingFileHandler(log_file, mode='a', maxBytes=2000000, backupCount=5)
fileHandler.setFormatter(formatter)
if console == True:
formatter = logging.Formatter('[%(levelname)1.1s %(name)-5s] %(message)s')
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
l.setLevel(level)
if log_file is not None:
l.addHandler(fileHandler)
if console == True:
l.addHandler(streamHandler)
except Exception as e:
print("Error in Logging setup: %s - do you have permission to write the log file??" % e)
sys.exit(1)
def main():
'''
Main routine
'''
global log
import argparse
parser = argparse.ArgumentParser(description='extract model info from Unifi')
parser.add_argument('-f','--files', action="store", default='/usr/lib/unifi', help='unifi files base location (default: /usr/lib/unifi)')
parser.add_argument('-u','--url', action="store", default=None, help='unifi url base location eg https://192.168.1.1:8443 (default: None)')
parser.add_argument('-up','--update', action="store", default=None, help='models file to update eg models.json (default: None)')
parser.add_argument('-o','--out', action="store", default='models_tmp.json', help='output file name (default: models_tmp.json)')
parser.add_argument('-p','--pattern', action="store", default='U7HD', help='pattern to search for (default; U7HD)')
parser.add_argument('-a','--all', action='store_true', help='get all matches (not just first) default: False)', default = False)
parser.add_argument('-l','--log', action="store",default="None", help='log file. (default: None)')
#parser.add_argument('-d','--dryrun', action='store_true', help='dry run (no file written)', default = False)
parser.add_argument('-D','--debug', action='store_true', help='debug mode', default = False)
parser.add_argument('-V','--version', action='version',version='%(prog)s {version}'.format(version=__VERSION__))
arg = parser.parse_args()
if arg.debug:
log_level = logging.DEBUG
else:
log_level = logging.INFO
#setup logging
if arg.log == 'None':
log_file = None
else:
log_file=os.path.expanduser(arg.log)
setup_logger('Main',log_file,level=log_level,console=True)
log = logging.getLogger('Main')
log.debug('Debug mode')
log.info("Python Version: %s" % sys.version.replace('\n',''))
log.info("Unifi Models Extract Version: %s" % __version__)
#register signal handler
signal.signal(signal.SIGTERM, sigterm_handler)
try:
start = time.time()
js_files = []
if arg.url:
log.info("Downloading javascript files, this can take a while...")
tmpdir = 'tempDir'+os.sep
if arg.debug and os.path.exists(tmpdir):
arg.files = tmpdir
else:
base_url = arg.url+'/manage/'
src = get_js_web_urls(base_url)
for url in src:
file = get_js_from_web(base_url,url,tmpdir)
if file:
js_files.append(file)
#log.info('found js files : %s' % js_files)
if len(js_files) == 0:
if not os.path.exists(arg.files):
log.warn('This has to be run on the unifi controller, not your display device!')
lop.warn('please supply a URL for your controller to run from your display device (eg -u https://192.168.1.1:8443)')
os.exit(1)
log.warn("Searching for models data in %s, This can take quite a while to run on large files!" % arg.files)
js_files = get_js_files(arg.files)
models_files = find_models_file(js_files, arg.pattern)
models_files = deduplicate_list(models_files)
json_list = find_json(models_files, arg.pattern, arg.all)
json_models = consolidate_json(json_list)
if len(json_models) > 0:
with open(arg.out,'w+') as dataFile:
dataFile.write(pprint(json_models))
log.info('Got data for: %s' % get_summary(json_models))
log.info('Models Data written to: %s' % arg.out)
if arg.update:
update_models(arg.update, json_models)
else:
log.warn('No Models Data found')
except (KeyboardInterrupt, SystemExit):
log.info("System exit Received - Exiting program")
finally:
log.debug("Program Exited")
if arg.url and not arg.debug:
if os.path.exists(tmpdir):
import shutil
shutil.rmtree(tmpdir)
log.info("Elapsed time: %s" % secondsToStr(time.time()-start))
if __name__ == '__main__':
main() | sfp = []
sfp_plus = []
if isinstance(ports, (list, dict)):
#standard = [x for x in range(1,len(ports)+1,1)] | random_line_split |
get_models.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#extract models info from unifi javascript
# N Waterton 4th July 2019 V1.0: initial release
# N Waterton 13th July 2019 V1.0.2 minor fixes.
# N Waterton 10th Sep 2019 V1.0.3 minor fixes
import time, os, sys, json, re
from datetime import timedelta
from collections import OrderedDict
import hjson #pip3 install hjson
import signal
import logging
from logging.handlers import RotatingFileHandler
supported_devices=['UGW','USW','UAP','UDM']
__VERSION__ = __version__ = '1.0.3'
class progress_bar():
'''
create terminal progress bar
@params:
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
bar_length - Optional : character length of bar (Int)
'''
def __init__(self,total=100, prefix='', suffix='', decimals=1, bar_length=100):
|
def update(self,iteration):
iteration = max(min(iteration, self.total), 0)
str_format = "{0:." + str(self.decimals) + "f}"
percents = str_format.format(100 * (iteration / float(self.total)))
filled_length = int(round(self.bar_length * iteration / float(self.total)))
#bar = b'█'.decode('utf8') * filled_length + '-' * (self.bar_length - filled_length)
bar = '█' * filled_length + '-' * (self.bar_length - filled_length)
output = '\r%s |%s| %s%s %s' % (self.prefix, bar, percents, '%', self.suffix)
current_output_len = len(output)
diff = self.prev_output_len - current_output_len
if diff > 0: #if output is shorter than previously
output += ' ' * diff #pad output with spaces
self.prev_output_len = current_output_len
sys.stdout.write(output)
sys.stdout.flush()
def newline():
output = '\n'
sys.stdout.write(output)
sys.stdout.flush()
def pprint(obj):
"""Pretty JSON dump of an object."""
return json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': '))
def deduplicate_list(input_list):
return list(dict.fromkeys(input_list))
def get_js_web_urls(url):
global requests
import requests #pip3 install requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
from bs4 import BeautifulSoup #pip3 install bs4
r = requests.get(url, verify=False, timeout=10)
soup = BeautifulSoup(r.content,features="html.parser")
src = [sc["src"] for sc in soup.select("script[src]")]
return src
def get_js_from_web(base_url,url,tempdir = 'tempDir'+os.sep):
log.info('retrieving js from: %s' % base_url+url)
if not os.path.exists(tempdir):
os.mkdir(tempdir)
try:
data = None
r = requests.get(base_url+url, verify=False, timeout=10)
assert r.status_code == 200
data = r.text
if data:
os.makedirs(os.path.dirname(tempdir+url), exist_ok=True)
with open(tempdir+url,'w+') as f:
f.write(data)
return tempdir+url
except (AssertionError, requests.ConnectionError, requests.Timeout) as e:
log.error("Connection failed error: %s" % e)
except Exception as e:
log.exception("unknown exception: %s" % e)
return None
def get_js_files(unifi_dir):
js_files = []
for root, dir, files in os.walk(unifi_dir):
for file in files:
if file.endswith('.js'):
js_files.append(root+os.sep+file)
return js_files
def find_models_file(files, pattern):
models_files = []
pattern = re.compile(pattern)
for file in files:
for i, line in enumerate(open(file)):
for match in re.finditer(pattern, line):
#log.info('Found in file %s on line %s: %s' % (file, i+1, match.group()))
models_files.append(file)
continue
return models_files
def find_json(files, pattern_match, all=False):
pattern = re.compile(r".*\.exports=(\{.*?\}\}\}\})\}.*")
json_obj = []
for file in files:
size = os.path.getsize(file)
pos = 0
progress = progress_bar(100, decimals=0, bar_length=40)
progress.prefix='%s%s' % ('...' if len(file) > 30 else '', file[max(0,len(file)-27):])
with open(file) as dataFile:
for line, data in enumerate(dataFile,1):
pos += len(data)
#log.info("searching line: %d, %d%%" % (line, int(pos*100//size)))
progress.update(pos*100.0/size)
match_iter = pattern.finditer(data)
for match in match_iter:
if pattern_match in match.group(1):
json_string = match.group(1)
json_string = json_string.replace("!0", "1").replace("!1", "0").strip()
try:
jsonObj = hjson.loads(json_string)
json_obj.append(jsonObj)
progress.suffix="matches: %d" % len(json_obj)
log.debug('Found json: %s' % pprint(jsonObj))
if not all:
newline()
log.info("Total Json matches found: %d" % len(json_obj))
return json_obj
except json.decoder.JSONDecodeError as e:
newline()
log.error('Json Error: %s' % e)
newline()
log.info("Total Json matches found: %d" % len(json_obj))
return json_obj
def merge_dicts(a, b):
c = a.copy()
for k,v in b.items():
if isinstance(v, dict):
d = a.get(k, None)
if isinstance(d,dict):
c[k] = merge_dicts(v, d)
else:
c[k]=v
else:
c[k]=v
return c
def consolidate_json(json_list):
json_obj = {}
for json in json_list:
json_obj.update(json)
json_dict = OrderedDict(sorted(json_obj.items(), key=lambda t: t[1]['type']))
return json_dict
def get_summary(data):
summary = {}
for device, info in data.items():
if device in supported_devices: #models.json format data
summary[device]=len(info)
else: #unifi devices format data
if info['type'] not in summary:
summary[info['type']] = 1
else:
summary[info['type']] += 1
return summary
def update_models(file, data):
if os.path.exists(file):
log.warn('Updating file: %s, press ^C if you want to exit!' % file)
with open(file) as f:
models = json.loads(f.read(), object_pairs_hook=OrderedDict)
new_models = OrderedDict()
#ensure we have an entry for each supported type (even if it's blank)
for type in supported_devices:
if not models.get(type):
models[type] = {}
new_models[type] = {}
log.info('NOTE! currently supported devices are: %s' % supported_devices)
for device, info in data.items():
type = info['type'].upper()
result = True
if type in supported_devices and device not in models[type]:
#check for UDN
if type == 'UDM':
new_models[type][device]=info #add to models database (even though UDM section isn't currently used)
elif type == 'UAP':
#all that is needed for AP's
new_models[type][device]={}
new_models[type][device]['name']=info['name']
log.info('Added %s device: %s - %s' % (type, device, info['name']))
continue
else:
for existing_device, existing_info in models[type].items():
if info['name'].upper() in existing_info['name'].upper():
log.info('========New Device %s =========' % device)
log.info('looks like %s - %s is similar to %s - %s' % (device, info['name'], existing_device, existing_info['name']))
if query_yes_no("Do you want to copy it into the database? (if You select No, you can add it as a new device)"):
new_models[type][device]=existing_info
log.info('Device: %s copied' % new_models[type][device])
result = False
else:
result = query_yes_no("Do you want to add it as a new device to the database?")
if not result:
continue
#new device
log.info('========New Device %s =========' % device)
log.info('found new device: %s - %s, type: %s' % (device, info['name'], type))
if not query_yes_no("Do you want to add it to the database?"):
continue
else:
#UDM only
if type == 'UDM':
log.info('This is a %s device, you have to choose what type of device to add it as' % type)
while True:
try:
options = [{option: choice.upper()} for option, choice in enumerate(info['subtypes'])]
sel_option = query_number('please select one of the following options: %s' % options, 0)
type = info['subtypes'][sel_option].upper()
break
except exception as e:
log.error('error: %s' % e)
#add new device name
new_models[type][device]={}
new_models[type][device]['name']=info['name']
if type != 'UAP': #only way for UAP to get here is if a UDM was selected as a UAP, this will skip over in that case.
if info.get('features'):
poe = info['features'].get('poe',0)
new_models[type][device]['poe'] = True if poe == 1 else False
if info.get('diagram'):
diagram = info['diagram']
log.info('here is a diagram of the device: %s' % pprint(diagram))
rows = len(diagram)
standard, sfp, sfp_plus = extract_ports_list(info['ports'])
if len(standard) > 0:
log.info('ports %s are standard ports' % standard )
standard = len(standard)
if len(sfp) > 0:
log.info('ports %s are sfp ports' % sfp )
sfp = len(sfp)
if len(sfp_plus) > 0:
log.info('ports %s are sfp+ ports' % sfp_plus )
sfp_plus = len(sfp_plus)
new_models[type][device]['ports']={'number':standard,'rows':0 if standard==0 else 1 if standard <= 8 else 2}
new_models[type][device]['sfp']={'number':sfp,'rows': sfp if sfp < 2 else 2}
new_models[type][device]['sfp+']={'number':sfp_plus,'rows':sfp_plus if sfp_plus < 2 else 2}
if standard > 0:
rows = new_models[type][device]['ports']['rows']
new_models[type][device]['ports']['rows'] = query_number('how many ROWS of standard ports are there? (eg, 1,2)', rows)
if sfp > 0:
rows = new_models[type][device]['sfp']['rows']
new_models[type][device]['sfp']['rows'] = query_number('how many ROWS of sfp ports are there? (eg, 1,2)', rows)
if sfp_plus > 0:
rows = new_models[type][device]['sfp+']['rows']
new_models[type][device]['sfp+']['rows'] = query_number('how many ROWS of sfp+ ports are there? (eg, 1,2)', rows)
if sfp > 0 or sfp_plus > 0:
while not new_models[type][device].get('order'):
if query_yes_no("Are the first ports (from the left) standard ports?"):
new_models[type][device]['order'] = [0,1,2]
else:
if query_yes_no("Are the first ports (from the left) sfp ports?"):
new_models[type][device]['order'] = [1,0,2]
else:
if query_yes_no("Are the first ports (from the left) sfp+ ports?"):
new_models[type][device]['order'] = [2,0,1]
else:
log.error('OK, the first ports must be standard, sfp, or sfp+ ports. try again')
log.debug('Device: %s added' % new_models[type][device])
log.info('Device: %s added' % new_models[type][device]['name'])
log.debug('The following new devices have been added: %s' % pprint(new_models))
log.info('New devices: %s' % get_summary(new_models))
all_models = OrderedDict()
all_models.update(models)
if not any([value for value in get_summary(new_models).values()]):
log.info('No New Models Found')
#return
else:
if query_yes_no("Do you want to add them to the database?"):
all_models = merge_dicts(models, new_models)
log.info('database updated')
if query_yes_no("Do you want to add the full Unifi data to the database (recommended)?"):
for type, devices in all_models.copy().items():
for device in devices:
if device in data:
#log.info("adding : %s to models[%s][%s]['unifi']" % (data[device], type, device))
all_models[type][device]['unifi'] = data[device].copy()
log.debug('The following data will be written to the database: %s' % pprint(all_models))
log.info('total devices: %s' % get_summary(all_models))
if query_yes_no("Do you want to overwrite the %s file?" % file, None):
#backup original file
from shutil import copyfile
copyfile(file, file+'.org')
#write models file out
with open(file, 'w') as f:
f.write(pprint(all_models))
log.info('File: %s Updated' % file)
log.info('Total devices: %s' % get_summary(all_models))
else:
log.info('File: %s NOT updated' % file)
def extract_ports_list(ports):
'''
returns ports list from unifi data as tuple of lists of port number ints
eg ([0,1,2,3], [4,5],[])
(standard []. sfp[], sfp_plus[])
NOTE, USG's start at port 0, but switches start at port 1.
'''
standard = []
sfp = []
sfp_plus = []
if isinstance(ports, (list, dict)):
#standard = [x for x in range(1,len(ports)+1,1)]
standard = [x for x in range(len(ports))]
if ports.get('standard'):
standard = ports_list_decode(ports['standard'])
if ports.get('sfp'):
sfp = ports_list_decode(ports['sfp'])
if ports.get('plus'):
sfp_plus = ports_list_decode(ports['plus'])
return standard, sfp, sfp_plus
def ports_list_decode(ports):
ports_list = []
if isinstance(ports, int):
ports_list = [x for x in range(1,ports+1,1)]
if isinstance(ports, list):
ports_list = ports
if isinstance(ports, str):
#log.info('Ports is a string: %s' % ports)
ports_string_list = ports.split('-')
ports_list = [x for x in range(int(ports_string_list[0]),int(ports_string_list[-1])+1,1)]
return ports_list
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def query_number(question, default=1):
"""Ask a numerical question via input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be a number or None (meaning
an answer is required of the user).
The "answer" return value is an int.
"""
if default is None:
prompt = " [] "
else:
prompt = " [%d] " % default
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return int(default)
elif choice.isdigit():
return int(choice)
else:
sys.stdout.write("Please respond with a number\n")
def secondsToStr(elapsed=None):
if elapsed is None:
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
else:
return str(timedelta(seconds=elapsed))
def sigterm_handler(signal, frame):
log.info('Received SIGTERM signal')
sys.exit(0)
def setup_logger(logger_name, log_file, level=logging.DEBUG, console=False):
try:
l = logging.getLogger(logger_name)
formatter = logging.Formatter('[%(levelname)1.1s %(asctime)s] (%(name)-5s) %(message)s')
if log_file is not None:
fileHandler = logging.handlers.RotatingFileHandler(log_file, mode='a', maxBytes=2000000, backupCount=5)
fileHandler.setFormatter(formatter)
if console == True:
formatter = logging.Formatter('[%(levelname)1.1s %(name)-5s] %(message)s')
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
l.setLevel(level)
if log_file is not None:
l.addHandler(fileHandler)
if console == True:
l.addHandler(streamHandler)
except Exception as e:
print("Error in Logging setup: %s - do you have permission to write the log file??" % e)
sys.exit(1)
def main():
'''
Main routine
'''
global log
import argparse
parser = argparse.ArgumentParser(description='extract model info from Unifi')
parser.add_argument('-f','--files', action="store", default='/usr/lib/unifi', help='unifi files base location (default: /usr/lib/unifi)')
parser.add_argument('-u','--url', action="store", default=None, help='unifi url base location eg https://192.168.1.1:8443 (default: None)')
parser.add_argument('-up','--update', action="store", default=None, help='models file to update eg models.json (default: None)')
parser.add_argument('-o','--out', action="store", default='models_tmp.json', help='output file name (default: models_tmp.json)')
parser.add_argument('-p','--pattern', action="store", default='U7HD', help='pattern to search for (default; U7HD)')
parser.add_argument('-a','--all', action='store_true', help='get all matches (not just first) default: False)', default = False)
parser.add_argument('-l','--log', action="store",default="None", help='log file. (default: None)')
#parser.add_argument('-d','--dryrun', action='store_true', help='dry run (no file written)', default = False)
parser.add_argument('-D','--debug', action='store_true', help='debug mode', default = False)
parser.add_argument('-V','--version', action='version',version='%(prog)s {version}'.format(version=__VERSION__))
arg = parser.parse_args()
if arg.debug:
log_level = logging.DEBUG
else:
log_level = logging.INFO
#setup logging
if arg.log == 'None':
log_file = None
else:
log_file=os.path.expanduser(arg.log)
setup_logger('Main',log_file,level=log_level,console=True)
log = logging.getLogger('Main')
log.debug('Debug mode')
log.info("Python Version: %s" % sys.version.replace('\n',''))
log.info("Unifi Models Extract Version: %s" % __version__)
#register signal handler
signal.signal(signal.SIGTERM, sigterm_handler)
try:
start = time.time()
js_files = []
if arg.url:
log.info("Downloading javascript files, this can take a while...")
tmpdir = 'tempDir'+os.sep
if arg.debug and os.path.exists(tmpdir):
arg.files = tmpdir
else:
base_url = arg.url+'/manage/'
src = get_js_web_urls(base_url)
for url in src:
file = get_js_from_web(base_url,url,tmpdir)
if file:
js_files.append(file)
#log.info('found js files : %s' % js_files)
if len(js_files) == 0:
if not os.path.exists(arg.files):
log.warn('This has to be run on the unifi controller, not your display device!')
lop.warn('please supply a URL for your controller to run from your display device (eg -u https://192.168.1.1:8443)')
os.exit(1)
log.warn("Searching for models data in %s, This can take quite a while to run on large files!" % arg.files)
js_files = get_js_files(arg.files)
models_files = find_models_file(js_files, arg.pattern)
models_files = deduplicate_list(models_files)
json_list = find_json(models_files, arg.pattern, arg.all)
json_models = consolidate_json(json_list)
if len(json_models) > 0:
with open(arg.out,'w+') as dataFile:
dataFile.write(pprint(json_models))
log.info('Got data for: %s' % get_summary(json_models))
log.info('Models Data written to: %s' % arg.out)
if arg.update:
update_models(arg.update, json_models)
else:
log.warn('No Models Data found')
except (KeyboardInterrupt, SystemExit):
log.info("System exit Received - Exiting program")
finally:
log.debug("Program Exited")
if arg.url and not arg.debug:
if os.path.exists(tmpdir):
import shutil
shutil.rmtree(tmpdir)
log.info("Elapsed time: %s" % secondsToStr(time.time()-start))
if __name__ == '__main__':
main()
| self.total = total
self.prefix = prefix
self.suffix = suffix
self.decimals = decimals
self.bar_length = bar_length
self.prev_output_len = 0 | identifier_body |
get_models.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#extract models info from unifi javascript
# N Waterton 4th July 2019 V1.0: initial release
# N Waterton 13th July 2019 V1.0.2 minor fixes.
# N Waterton 10th Sep 2019 V1.0.3 minor fixes
import time, os, sys, json, re
from datetime import timedelta
from collections import OrderedDict
import hjson #pip3 install hjson
import signal
import logging
from logging.handlers import RotatingFileHandler
supported_devices=['UGW','USW','UAP','UDM']
__VERSION__ = __version__ = '1.0.3'
class progress_bar():
'''
create terminal progress bar
@params:
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
bar_length - Optional : character length of bar (Int)
'''
def __init__(self,total=100, prefix='', suffix='', decimals=1, bar_length=100):
self.total = total
self.prefix = prefix
self.suffix = suffix
self.decimals = decimals
self.bar_length = bar_length
self.prev_output_len = 0
def update(self,iteration):
iteration = max(min(iteration, self.total), 0)
str_format = "{0:." + str(self.decimals) + "f}"
percents = str_format.format(100 * (iteration / float(self.total)))
filled_length = int(round(self.bar_length * iteration / float(self.total)))
#bar = b'█'.decode('utf8') * filled_length + '-' * (self.bar_length - filled_length)
bar = '█' * filled_length + '-' * (self.bar_length - filled_length)
output = '\r%s |%s| %s%s %s' % (self.prefix, bar, percents, '%', self.suffix)
current_output_len = len(output)
diff = self.prev_output_len - current_output_len
if diff > 0: #if output is shorter than previously
output += ' ' * diff #pad output with spaces
self.prev_output_len = current_output_len
sys.stdout.write(output)
sys.stdout.flush()
def newline():
output = '\n'
sys.stdout.write(output)
sys.stdout.flush()
def pprint(obj):
"""Pretty JSON dump of an object."""
return json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': '))
def deduplicate_list(input_list):
return list(dict.fromkeys(input_list))
def get_js_web_urls(url):
global requests
import requests #pip3 install requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
from bs4 import BeautifulSoup #pip3 install bs4
r = requests.get(url, verify=False, timeout=10)
soup = BeautifulSoup(r.content,features="html.parser")
src = [sc["src"] for sc in soup.select("script[src]")]
return src
def get_js_from_web(base_url,url,tempdir = 'tempDir'+os.sep):
log.info('retrieving js from: %s' % base_url+url)
if not os.path.exists(tempdir):
os.mkdir(tempdir)
try:
data = None
r = requests.get(base_url+url, verify=False, timeout=10)
assert r.status_code == 200
data = r.text
if data:
os.makedirs(os.path.dirname(tempdir+url), exist_ok=True)
with open(tempdir+url,'w+') as f:
f.write(data)
return tempdir+url
except (AssertionError, requests.ConnectionError, requests.Timeout) as e:
log.error("Connection failed error: %s" % e)
except Exception as e:
log.exception("unknown exception: %s" % e)
return None
def get_js_files(unifi_dir):
js_files = []
for root, dir, files in os.walk(unifi_dir):
for file in files:
if file.endswith('.js'):
js_files.append(root+os.sep+file)
return js_files
def find_models_file(files, pattern):
models_files = []
pattern = re.compile(pattern)
for file in files:
for i, line in enumerate(open(file)):
for match in re.finditer(pattern, line):
#log.info('Found in file %s on line %s: %s' % (file, i+1, match.group()))
models_files.append(file)
continue
return models_files
def find_json(files, pattern_match, all=False):
pattern = re.compile(r".*\.exports=(\{.*?\}\}\}\})\}.*")
json_obj = []
for file in files:
size = os.path.getsize(file)
pos = 0
progress = progress_bar(100, decimals=0, bar_length=40)
progress.prefix='%s%s' % ('...' if len(file) > 30 else '', file[max(0,len(file)-27):])
with open(file) as dataFile:
for line, data in enumerate(dataFile,1):
pos += len(data)
#log.info("searching line: %d, %d%%" % (line, int(pos*100//size)))
progress.update(pos*100.0/size)
match_iter = pattern.finditer(data)
for match in match_iter:
if pattern_match in match.group(1):
json_string = match.group(1)
json_string = json_string.replace("!0", "1").replace("!1", "0").strip()
try:
jsonObj = hjson.loads(json_string)
json_obj.append(jsonObj)
progress.suffix="matches: %d" % len(json_obj)
log.debug('Found json: %s' % pprint(jsonObj))
if not all:
newline()
log.info("Total Json matches found: %d" % len(json_obj))
return json_obj
except json.decoder.JSONDecodeError as e:
newline()
log.error('Json Error: %s' % e)
newline()
log.info("Total Json matches found: %d" % len(json_obj))
return json_obj
def merge_dicts(a, b):
c = a.copy()
for k,v in b.items():
if isinstance(v, dict):
d = a.get(k, None)
if isinstance(d,dict):
c[k] = merge_dicts(v, d)
else:
c[k]=v
else:
c[k]=v
return c
def consolidate_json(json_list):
json_obj = {}
for json in json_list:
json_obj.update(json)
json_dict = OrderedDict(sorted(json_obj.items(), key=lambda t: t[1]['type']))
return json_dict
def get_summary(data):
summary = {}
for device, info in data.items():
if device in supported_devices: #models.json format data
summary[device]=len(info)
else: #unifi devices format data
if info['type'] not in summary:
summary[info['type']] = 1
else:
summary[info['type']] += 1
return summary
def update_models(file, data):
if os.path.exists(file):
log.warn('Updating file: %s, press ^C if you want to exit!' % file)
with open(file) as f:
models = json.loads(f.read(), object_pairs_hook=OrderedDict)
new_models = OrderedDict()
#ensure we have an entry for each supported type (even if it's blank)
for type in supported_devices:
if not models.get(type):
models[type] = {}
new_models[type] = {}
log.info('NOTE! currently supported devices are: %s' % supported_devices)
for device, info in data.items():
type = info['type'].upper()
result = True
if type in supported_devices and device not in models[type]:
#check for UDN
if type == 'UDM':
new_models[type][device]=info #add to models database (even though UDM section isn't currently used)
elif type == 'UAP':
#all that is needed for AP's
new_models[type][device]={}
new_models[type][device]['name']=info['name']
log.info('Added %s device: %s - %s' % (type, device, info['name']))
continue
else:
for existing_device, existing_info in models[type].items():
if info['name'].upper() in existing_info['name'].upper():
log.info('========New Device %s =========' % device)
log.info('looks like %s - %s is similar to %s - %s' % (device, info['name'], existing_device, existing_info['name']))
if query_yes_no("Do you want to copy it into the database? (if You select No, you can add it as a new device)"):
new_models[type][device]=existing_info
log.info('Device: %s copied' % new_models[type][device])
result = False
else:
result = query_yes_no("Do you want to add it as a new device to the database?")
if not result:
continue
#new device
log.info('========New Device %s =========' % device)
log.info('found new device: %s - %s, type: %s' % (device, info['name'], type))
if not query_yes_no("Do you want to add it to the database?"):
continue
else:
#UDM only
if type == 'UDM':
log.info('This is a %s device, you have to choose what type of device to add it as' % type)
while True:
try:
options = [{option: choice.upper()} for option, choice in enumerate(info['subtypes'])]
sel_option = query_number('please select one of the following options: %s' % options, 0)
type = info['subtypes'][sel_option].upper()
break
except exception as e:
log.error('error: %s' % e)
#add new device name
new_models[type][device]={}
new_models[type][device]['name']=info['name']
if type != 'UAP': #only way for UAP to get here is if a UDM was selected as a UAP, this will skip over in that case.
if info.get('features'):
poe = info['features'].get('poe',0)
new_models[type][device]['poe'] = True if poe == 1 else False
if info.get('diagram'):
diagram = info['diagram']
log.info('here is a diagram of the device: %s' % pprint(diagram))
rows = len(diagram)
standard, sfp, sfp_plus = extract_ports_list(info['ports'])
if len(standard) > 0:
log.info('ports %s are standard ports' % standard )
standard = len(standard)
if len(sfp) > 0:
log.info('ports %s are sfp ports' % sfp )
sfp = len(sfp)
if len(sfp_plus) > 0:
log.info('ports %s are sfp+ ports' % sfp_plus )
sfp_plus = len(sfp_plus)
new_models[type][device]['ports']={'number':standard,'rows':0 if standard==0 else 1 if standard <= 8 else 2}
new_models[type][device]['sfp']={'number':sfp,'rows': sfp if sfp < 2 else 2}
new_models[type][device]['sfp+']={'number':sfp_plus,'rows':sfp_plus if sfp_plus < 2 else 2}
if standard > 0:
rows = new_models[type][device]['ports']['rows']
new_models[type][device]['ports']['rows'] = query_number('how many ROWS of standard ports are there? (eg, 1,2)', rows)
if sfp > 0:
rows = new_models[type][device]['sfp']['rows']
new_models[type][device]['sfp']['rows'] = query_number('how many ROWS of sfp ports are there? (eg, 1,2)', rows)
if sfp_plus > 0:
rows = new_models[type][device]['sfp+']['rows']
new_models[type][device]['sfp+']['rows'] = query_number('how many ROWS of sfp+ ports are there? (eg, 1,2)', rows)
if sfp > 0 or sfp_plus > 0:
while not new_models[type][device].get('order'):
if query_yes_no("Are the first ports (from the left) standard ports?"):
new_models[type][device]['order'] = [0,1,2]
else:
if query_yes_no("Are the first ports (from the left) sfp ports?"):
new_models[type][device]['order'] = [1,0,2]
else:
if query_yes_no("Are the first ports (from the left) sfp+ ports?"):
new_models[type][device]['order'] = [2,0,1]
else:
log.error('OK, the first ports must be standard, sfp, or sfp+ ports. try again')
log.debug('Device: %s added' % new_models[type][device])
log.info('Device: %s added' % new_models[type][device]['name'])
log.debug('The following new devices have been added: %s' % pprint(new_models))
log.info('New devices: %s' % get_summary(new_models))
all_models = OrderedDict()
all_models.update(models)
if not any([value for value in get_summary(new_models).values()]):
log.info('No New Models Found')
#return
else:
if query_yes_no("Do you want to add them to the database?"):
all_models = merge_dicts(models, new_models)
log.info('database updated')
if query_yes_no("Do you want to add the full Unifi data to the database (recommended)?"):
for type, devices in all_models.copy().items():
for device in devices:
if device in data:
#log.info("adding : %s to models[%s][%s]['unifi']" % (data[device], type, device))
all_models[type][device]['unifi'] = data[device].copy()
log.debug('The following data will be written to the database: %s' % pprint(all_models))
log.info('total devices: %s' % get_summary(all_models))
if query_yes_no("Do you want to overwrite the %s file?" % file, None):
#backup original file
from shutil import copyfile
copyfile(file, file+'.org')
#write models file out
with open(file, 'w') as f:
f.write(pprint(all_models))
log.info('File: %s Updated' % file)
log.info('Total devices: %s' % get_summary(all_models))
else:
log.info('File: %s NOT updated' % file)
def extract_ports_list(ports):
'''
returns ports list from unifi data as tuple of lists of port number ints
eg ([0,1,2,3], [4,5],[])
(standard []. sfp[], sfp_plus[])
NOTE, USG's start at port 0, but switches start at port 1.
'''
standard = []
sfp = []
sfp_plus = []
if isinstance(ports, (list, dict)):
#standard = [x for x in range(1,len(ports)+1,1)]
standard = [x for x in range(len(ports))]
if ports.get('standard'):
standard = ports_list_decode(ports['standard'])
if ports.get('sfp'):
sfp = ports_list_decode(ports['sfp'])
if ports.get('plus'):
sfp_plus = ports_list_decode(ports['plus'])
return standard, sfp, sfp_plus
def ports_list_decode(ports):
ports_list = []
if isinstance(ports, int):
ports_list = [x for x in range(1,ports+1,1)]
if isinstance(ports, list):
ports_list = ports
if isinstance(ports, str):
#log.info('Ports is a string: %s' % ports)
ports_string_list = ports.split('-')
ports_list = [x for x in range(int(ports_string_list[0]),int(ports_string_list[-1])+1,1)]
return ports_list
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def query_number(question, default=1):
"""Ask a numerical question via input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be a number or None (meaning
an answer is required of the user).
The "answer" return value is an int.
"""
if default is None:
prompt = " [] "
else:
prompt = " [%d] " % default
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return int(default)
elif choice.isdigit():
return int(choice)
else:
sys.stdout.write("Please respond with a number\n")
def secondsToStr(elapsed=None):
if elapsed is None:
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
else:
return str(timedelta(seconds=elapsed))
def sigterm_handler(signal, frame):
log.info('Received SIGTERM signal')
sys.exit(0)
def setup_logger(logger_name, log_file, level=logging.DEBUG, console=False):
try:
l = logging.getLogger(logger_name)
formatter = logging.Formatter('[%(levelname)1.1s %(asctime)s] (%(name)-5s) %(message)s')
if log_file is not None:
fileHandler = logging.handlers.RotatingFileHandler(log_file, mode='a', maxBytes=2000000, backupCount=5)
fileHandler.setFormatter(formatter)
if console == True:
formatter = logging.Formatter('[%(levelname)1.1s %(name)-5s] %(message)s')
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
l.setLevel(level)
if log_file is not None:
l.addHandler(fileHandler)
if console == True:
l.addHandler(streamHandler)
except Exception as e:
print("Error in Logging setup: %s - do you have permission to write the log file??" % e)
sys.exit(1)
def main | '''
Main routine
'''
global log
import argparse
parser = argparse.ArgumentParser(description='extract model info from Unifi')
parser.add_argument('-f','--files', action="store", default='/usr/lib/unifi', help='unifi files base location (default: /usr/lib/unifi)')
parser.add_argument('-u','--url', action="store", default=None, help='unifi url base location eg https://192.168.1.1:8443 (default: None)')
parser.add_argument('-up','--update', action="store", default=None, help='models file to update eg models.json (default: None)')
parser.add_argument('-o','--out', action="store", default='models_tmp.json', help='output file name (default: models_tmp.json)')
parser.add_argument('-p','--pattern', action="store", default='U7HD', help='pattern to search for (default; U7HD)')
parser.add_argument('-a','--all', action='store_true', help='get all matches (not just first) default: False)', default = False)
parser.add_argument('-l','--log', action="store",default="None", help='log file. (default: None)')
#parser.add_argument('-d','--dryrun', action='store_true', help='dry run (no file written)', default = False)
parser.add_argument('-D','--debug', action='store_true', help='debug mode', default = False)
parser.add_argument('-V','--version', action='version',version='%(prog)s {version}'.format(version=__VERSION__))
arg = parser.parse_args()
if arg.debug:
log_level = logging.DEBUG
else:
log_level = logging.INFO
#setup logging
if arg.log == 'None':
log_file = None
else:
log_file=os.path.expanduser(arg.log)
setup_logger('Main',log_file,level=log_level,console=True)
log = logging.getLogger('Main')
log.debug('Debug mode')
log.info("Python Version: %s" % sys.version.replace('\n',''))
log.info("Unifi Models Extract Version: %s" % __version__)
#register signal handler
signal.signal(signal.SIGTERM, sigterm_handler)
try:
start = time.time()
js_files = []
if arg.url:
log.info("Downloading javascript files, this can take a while...")
tmpdir = 'tempDir'+os.sep
if arg.debug and os.path.exists(tmpdir):
arg.files = tmpdir
else:
base_url = arg.url+'/manage/'
src = get_js_web_urls(base_url)
for url in src:
file = get_js_from_web(base_url,url,tmpdir)
if file:
js_files.append(file)
#log.info('found js files : %s' % js_files)
if len(js_files) == 0:
if not os.path.exists(arg.files):
log.warn('This has to be run on the unifi controller, not your display device!')
lop.warn('please supply a URL for your controller to run from your display device (eg -u https://192.168.1.1:8443)')
os.exit(1)
log.warn("Searching for models data in %s, This can take quite a while to run on large files!" % arg.files)
js_files = get_js_files(arg.files)
models_files = find_models_file(js_files, arg.pattern)
models_files = deduplicate_list(models_files)
json_list = find_json(models_files, arg.pattern, arg.all)
json_models = consolidate_json(json_list)
if len(json_models) > 0:
with open(arg.out,'w+') as dataFile:
dataFile.write(pprint(json_models))
log.info('Got data for: %s' % get_summary(json_models))
log.info('Models Data written to: %s' % arg.out)
if arg.update:
update_models(arg.update, json_models)
else:
log.warn('No Models Data found')
except (KeyboardInterrupt, SystemExit):
log.info("System exit Received - Exiting program")
finally:
log.debug("Program Exited")
if arg.url and not arg.debug:
if os.path.exists(tmpdir):
import shutil
shutil.rmtree(tmpdir)
log.info("Elapsed time: %s" % secondsToStr(time.time()-start))
if __name__ == '__main__':
main()
| ():
| identifier_name |
get_models.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#extract models info from unifi javascript
# N Waterton 4th July 2019 V1.0: initial release
# N Waterton 13th July 2019 V1.0.2 minor fixes.
# N Waterton 10th Sep 2019 V1.0.3 minor fixes
import time, os, sys, json, re
from datetime import timedelta
from collections import OrderedDict
import hjson #pip3 install hjson
import signal
import logging
from logging.handlers import RotatingFileHandler
supported_devices=['UGW','USW','UAP','UDM']
__VERSION__ = __version__ = '1.0.3'
class progress_bar():
'''
create terminal progress bar
@params:
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
bar_length - Optional : character length of bar (Int)
'''
def __init__(self,total=100, prefix='', suffix='', decimals=1, bar_length=100):
self.total = total
self.prefix = prefix
self.suffix = suffix
self.decimals = decimals
self.bar_length = bar_length
self.prev_output_len = 0
def update(self,iteration):
iteration = max(min(iteration, self.total), 0)
str_format = "{0:." + str(self.decimals) + "f}"
percents = str_format.format(100 * (iteration / float(self.total)))
filled_length = int(round(self.bar_length * iteration / float(self.total)))
#bar = b'█'.decode('utf8') * filled_length + '-' * (self.bar_length - filled_length)
bar = '█' * filled_length + '-' * (self.bar_length - filled_length)
output = '\r%s |%s| %s%s %s' % (self.prefix, bar, percents, '%', self.suffix)
current_output_len = len(output)
diff = self.prev_output_len - current_output_len
if diff > 0: #if output is shorter than previously
output += ' ' * diff #pad output with spaces
self.prev_output_len = current_output_len
sys.stdout.write(output)
sys.stdout.flush()
def newline():
output = '\n'
sys.stdout.write(output)
sys.stdout.flush()
def pprint(obj):
"""Pretty JSON dump of an object."""
return json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': '))
def deduplicate_list(input_list):
return list(dict.fromkeys(input_list))
def get_js_web_urls(url):
global requests
import requests #pip3 install requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
from bs4 import BeautifulSoup #pip3 install bs4
r = requests.get(url, verify=False, timeout=10)
soup = BeautifulSoup(r.content,features="html.parser")
src = [sc["src"] for sc in soup.select("script[src]")]
return src
def get_js_from_web(base_url,url,tempdir = 'tempDir'+os.sep):
log.info('retrieving js from: %s' % base_url+url)
if not os.path.exists(tempdir):
os.mkdir(tempdir)
try:
data = None
r = requests.get(base_url+url, verify=False, timeout=10)
assert r.status_code == 200
data = r.text
if data:
os.makedirs(os.path.dirname(tempdir+url), exist_ok=True)
with open(tempdir+url,'w+') as f:
f.write(data)
return tempdir+url
except (AssertionError, requests.ConnectionError, requests.Timeout) as e:
log.error("Connection failed error: %s" % e)
except Exception as e:
log.exception("unknown exception: %s" % e)
return None
def get_js_files(unifi_dir):
js_files = []
for root, dir, files in os.walk(unifi_dir):
for file in files:
if file.endswith('.js'):
js_files.append(root+os.sep+file)
return js_files
def find_models_file(files, pattern):
models_files = []
pattern = re.compile(pattern)
for file in files:
for i, line in enumerate(open(file)):
for match in re.finditer(pattern, line):
#log.info('Found in file %s on line %s: %s' % (file, i+1, match.group()))
models_files.append(file)
continue
return models_files
def find_json(files, pattern_match, all=False):
pattern = re.compile(r".*\.exports=(\{.*?\}\}\}\})\}.*")
json_obj = []
for file in files:
size = os.path.getsize(file)
pos = 0
progress = progress_bar(100, decimals=0, bar_length=40)
progress.prefix='%s%s' % ('...' if len(file) > 30 else '', file[max(0,len(file)-27):])
with open(file) as dataFile:
for line, data in enumerate(dataFile,1):
pos += len(data)
#log.info("searching line: %d, %d%%" % (line, int(pos*100//size)))
progress.update(pos*100.0/size)
match_iter = pattern.finditer(data)
for match in match_iter:
if pattern_match in match.group(1):
json_string = match.group(1)
json_string = json_string.replace("!0", "1").replace("!1", "0").strip()
try:
jsonObj = hjson.loads(json_string)
json_obj.append(jsonObj)
progress.suffix="matches: %d" % len(json_obj)
log.debug('Found json: %s' % pprint(jsonObj))
if not all:
newline()
log.info("Total Json matches found: %d" % len(json_obj))
return json_obj
except json.decoder.JSONDecodeError as e:
newline()
log.error('Json Error: %s' % e)
newline()
log.info("Total Json matches found: %d" % len(json_obj))
return json_obj
def merge_dicts(a, b):
c = a.copy()
for k,v in b.items():
if isinstance(v, dict):
d = a.get(k, None)
if isinstance(d,dict):
c[k] = merge_dicts(v, d)
else:
c[k]=v
else:
c[k]=v
return c
def consolidate_json(json_list):
json_obj = {}
for json in json_list:
json_obj.update(json)
json_dict = OrderedDict(sorted(json_obj.items(), key=lambda t: t[1]['type']))
return json_dict
def get_summary(data):
summary = {}
for device, info in data.items():
if device in supported_devices: #models.json format data
summary[device]=len(info)
else: #unifi devices format data
if info['type'] not in summary:
summary[info['type']] = 1
else:
summary[info['type']] += 1
return summary
def update_models(file, data):
if os.path.exists(file):
log.warn('Updating file: %s, press ^C if you want to exit!' % file)
with open(file) as f:
models = json.loads(f.read(), object_pairs_hook=OrderedDict)
new_models = OrderedDict()
#ensure we have an entry for each supported type (even if it's blank)
for type in supported_devices:
if not models.get(type):
models[type] = {}
new_models[type] = {}
log.info('NOTE! currently supported devices are: %s' % supported_devices)
for device, info in data.items():
type = info['type'].upper()
result = True
if type in supported_devices and device not in models[type]:
#check for UDN
if type == 'UDM':
new_models[type][device]=info #add to models database (even though UDM section isn't currently used)
elif type == 'UAP':
#all that is needed for AP's
new_models[type][device]={}
new_models[type][device]['name']=info['name']
log.info('Added %s device: %s - %s' % (type, device, info['name']))
continue
else:
for existing_device, existing_info in models[type].items():
if info['name'].upper() in existing_info['name'].upper():
log.info('========New Device %s =========' % device)
log.info('looks like %s - %s is similar to %s - %s' % (device, info['name'], existing_device, existing_info['name']))
if query_yes_no("Do you want to copy it into the database? (if You select No, you can add it as a new device)"):
new_models[type][device]=existing_info
log.info('Device: %s copied' % new_models[type][device])
result = False
else:
result = query_yes_no("Do you want to add it as a new device to the database?")
if not result:
continue
#new device
log.info('========New Device %s =========' % device)
log.info('found new device: %s - %s, type: %s' % (device, info['name'], type))
if not query_yes_no("Do you want to add it to the database?"):
continue
else:
#UDM only
if type == 'UDM':
log.info('This is a %s device, you have to choose what type of device to add it as' % type)
while True:
try:
options = [{option: choice.upper()} for option, choice in enumerate(info['subtypes'])]
sel_option = query_number('please select one of the following options: %s' % options, 0)
type = info['subtypes'][sel_option].upper()
break
except exception as e:
log.error('error: %s' % e)
#add new device name
new_models[type][device]={}
new_models[type][device]['name']=info['name']
if type != 'UAP': #only way for UAP to get here is if a UDM was selected as a UAP, this will skip over in that case.
if info.get('features'):
poe = info['features'].get('poe',0)
new_models[type][device]['poe'] = True if poe == 1 else False
if info.get('diagram'):
diagram = info['diagram']
log.info('here is a diagram of the device: %s' % pprint(diagram))
rows = len(diagram)
standard, sfp, sfp_plus = extract_ports_list(info['ports'])
if len(standard) > 0:
log.info('ports %s are standard ports' % standard )
standard = len(standard)
if len(sfp) > 0:
log.info('ports %s are sfp ports' % sfp )
sfp = len(sfp)
if len(sfp_plus) > 0:
log.info('ports %s are sfp+ ports' % sfp_plus )
sfp_plus = len(sfp_plus)
new_models[type][device]['ports']={'number':standard,'rows':0 if standard==0 else 1 if standard <= 8 else 2}
new_models[type][device]['sfp']={'number':sfp,'rows': sfp if sfp < 2 else 2}
new_models[type][device]['sfp+']={'number':sfp_plus,'rows':sfp_plus if sfp_plus < 2 else 2}
if standard > 0:
rows = new_models[type][device]['ports']['rows']
new_models[type][device]['ports']['rows'] = query_number('how many ROWS of standard ports are there? (eg, 1,2)', rows)
if sfp > 0:
rows = new_models[type][device]['sfp']['rows']
new_models[type][device]['sfp']['rows'] = query_number('how many ROWS of sfp ports are there? (eg, 1,2)', rows)
if sfp_plus > 0:
rows = new_models[type][device]['sfp+']['rows']
new_models[type][device]['sfp+']['rows'] = query_number('how many ROWS of sfp+ ports are there? (eg, 1,2)', rows)
if sfp > 0 or sfp_plus > 0:
while not new_models[type][device].get('order'):
if query_yes_no("Are the first ports (from the left) standard ports?"):
new_models[type][device]['order'] = [0,1,2]
else:
if query_yes_no("Are the first ports (from the left) sfp ports?"):
new_models[type][device]['order'] = [1,0,2]
else:
if query_yes_no("Are the first ports (from the left) sfp+ ports?"):
new_ | else:
log.error('OK, the first ports must be standard, sfp, or sfp+ ports. try again')
log.debug('Device: %s added' % new_models[type][device])
log.info('Device: %s added' % new_models[type][device]['name'])
log.debug('The following new devices have been added: %s' % pprint(new_models))
log.info('New devices: %s' % get_summary(new_models))
all_models = OrderedDict()
all_models.update(models)
if not any([value for value in get_summary(new_models).values()]):
log.info('No New Models Found')
#return
else:
if query_yes_no("Do you want to add them to the database?"):
all_models = merge_dicts(models, new_models)
log.info('database updated')
if query_yes_no("Do you want to add the full Unifi data to the database (recommended)?"):
for type, devices in all_models.copy().items():
for device in devices:
if device in data:
#log.info("adding : %s to models[%s][%s]['unifi']" % (data[device], type, device))
all_models[type][device]['unifi'] = data[device].copy()
log.debug('The following data will be written to the database: %s' % pprint(all_models))
log.info('total devices: %s' % get_summary(all_models))
if query_yes_no("Do you want to overwrite the %s file?" % file, None):
#backup original file
from shutil import copyfile
copyfile(file, file+'.org')
#write models file out
with open(file, 'w') as f:
f.write(pprint(all_models))
log.info('File: %s Updated' % file)
log.info('Total devices: %s' % get_summary(all_models))
else:
log.info('File: %s NOT updated' % file)
def extract_ports_list(ports):
'''
returns ports list from unifi data as tuple of lists of port number ints
eg ([0,1,2,3], [4,5],[])
(standard []. sfp[], sfp_plus[])
NOTE, USG's start at port 0, but switches start at port 1.
'''
standard = []
sfp = []
sfp_plus = []
if isinstance(ports, (list, dict)):
#standard = [x for x in range(1,len(ports)+1,1)]
standard = [x for x in range(len(ports))]
if ports.get('standard'):
standard = ports_list_decode(ports['standard'])
if ports.get('sfp'):
sfp = ports_list_decode(ports['sfp'])
if ports.get('plus'):
sfp_plus = ports_list_decode(ports['plus'])
return standard, sfp, sfp_plus
def ports_list_decode(ports):
ports_list = []
if isinstance(ports, int):
ports_list = [x for x in range(1,ports+1,1)]
if isinstance(ports, list):
ports_list = ports
if isinstance(ports, str):
#log.info('Ports is a string: %s' % ports)
ports_string_list = ports.split('-')
ports_list = [x for x in range(int(ports_string_list[0]),int(ports_string_list[-1])+1,1)]
return ports_list
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def query_number(question, default=1):
"""Ask a numerical question via input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be a number or None (meaning
an answer is required of the user).
The "answer" return value is an int.
"""
if default is None:
prompt = " [] "
else:
prompt = " [%d] " % default
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return int(default)
elif choice.isdigit():
return int(choice)
else:
sys.stdout.write("Please respond with a number\n")
def secondsToStr(elapsed=None):
if elapsed is None:
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
else:
return str(timedelta(seconds=elapsed))
def sigterm_handler(signal, frame):
log.info('Received SIGTERM signal')
sys.exit(0)
def setup_logger(logger_name, log_file, level=logging.DEBUG, console=False):
try:
l = logging.getLogger(logger_name)
formatter = logging.Formatter('[%(levelname)1.1s %(asctime)s] (%(name)-5s) %(message)s')
if log_file is not None:
fileHandler = logging.handlers.RotatingFileHandler(log_file, mode='a', maxBytes=2000000, backupCount=5)
fileHandler.setFormatter(formatter)
if console == True:
formatter = logging.Formatter('[%(levelname)1.1s %(name)-5s] %(message)s')
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
l.setLevel(level)
if log_file is not None:
l.addHandler(fileHandler)
if console == True:
l.addHandler(streamHandler)
except Exception as e:
print("Error in Logging setup: %s - do you have permission to write the log file??" % e)
sys.exit(1)
def main():
'''
Main routine
'''
global log
import argparse
parser = argparse.ArgumentParser(description='extract model info from Unifi')
parser.add_argument('-f','--files', action="store", default='/usr/lib/unifi', help='unifi files base location (default: /usr/lib/unifi)')
parser.add_argument('-u','--url', action="store", default=None, help='unifi url base location eg https://192.168.1.1:8443 (default: None)')
parser.add_argument('-up','--update', action="store", default=None, help='models file to update eg models.json (default: None)')
parser.add_argument('-o','--out', action="store", default='models_tmp.json', help='output file name (default: models_tmp.json)')
parser.add_argument('-p','--pattern', action="store", default='U7HD', help='pattern to search for (default; U7HD)')
parser.add_argument('-a','--all', action='store_true', help='get all matches (not just first) default: False)', default = False)
parser.add_argument('-l','--log', action="store",default="None", help='log file. (default: None)')
#parser.add_argument('-d','--dryrun', action='store_true', help='dry run (no file written)', default = False)
parser.add_argument('-D','--debug', action='store_true', help='debug mode', default = False)
parser.add_argument('-V','--version', action='version',version='%(prog)s {version}'.format(version=__VERSION__))
arg = parser.parse_args()
if arg.debug:
log_level = logging.DEBUG
else:
log_level = logging.INFO
#setup logging
if arg.log == 'None':
log_file = None
else:
log_file=os.path.expanduser(arg.log)
setup_logger('Main',log_file,level=log_level,console=True)
log = logging.getLogger('Main')
log.debug('Debug mode')
log.info("Python Version: %s" % sys.version.replace('\n',''))
log.info("Unifi Models Extract Version: %s" % __version__)
#register signal handler
signal.signal(signal.SIGTERM, sigterm_handler)
try:
start = time.time()
js_files = []
if arg.url:
log.info("Downloading javascript files, this can take a while...")
tmpdir = 'tempDir'+os.sep
if arg.debug and os.path.exists(tmpdir):
arg.files = tmpdir
else:
base_url = arg.url+'/manage/'
src = get_js_web_urls(base_url)
for url in src:
file = get_js_from_web(base_url,url,tmpdir)
if file:
js_files.append(file)
#log.info('found js files : %s' % js_files)
if len(js_files) == 0:
if not os.path.exists(arg.files):
log.warn('This has to be run on the unifi controller, not your display device!')
lop.warn('please supply a URL for your controller to run from your display device (eg -u https://192.168.1.1:8443)')
os.exit(1)
log.warn("Searching for models data in %s, This can take quite a while to run on large files!" % arg.files)
js_files = get_js_files(arg.files)
models_files = find_models_file(js_files, arg.pattern)
models_files = deduplicate_list(models_files)
json_list = find_json(models_files, arg.pattern, arg.all)
json_models = consolidate_json(json_list)
if len(json_models) > 0:
with open(arg.out,'w+') as dataFile:
dataFile.write(pprint(json_models))
log.info('Got data for: %s' % get_summary(json_models))
log.info('Models Data written to: %s' % arg.out)
if arg.update:
update_models(arg.update, json_models)
else:
log.warn('No Models Data found')
except (KeyboardInterrupt, SystemExit):
log.info("System exit Received - Exiting program")
finally:
log.debug("Program Exited")
if arg.url and not arg.debug:
if os.path.exists(tmpdir):
import shutil
shutil.rmtree(tmpdir)
log.info("Elapsed time: %s" % secondsToStr(time.time()-start))
if __name__ == '__main__':
main()
| models[type][device]['order'] = [2,0,1]
| conditional_block |
webhdfs.rs | use webhdfs::*;
fn main() {
use std::fs::File;
use std::path::Path;
use std::fs::create_dir_all;
use commandline::*;
let (mut client, op) = parse_command_line();
match op {
Operation::Get(mut fs) => {
match &fs[..] {
&[ref input] => {
let input_path = Path::new(input);
let output = input_path.file_name().expect2("file name must be specified if no output file is given");
let mut out = File::create(&output).expect2("Could not create output file");
client.get_file(&input, &mut out).expect2("get error")
}
&[ref input, ref output] => {
let mut out = File::create(&output).expect2("Could not create output file");
client.get_file(&input, &mut out).expect2("get error")
}
_ => {
let target_dir_ = fs.pop().unwrap();
let target_dir = Path::new(&target_dir_);
create_dir_all(&target_dir).expect2("Could not create output dir");
for input in fs {
let input_path = Path::new(&input);
let output_file = input_path.file_name().expect2("file name must be specified if no output file is given");
let output = target_dir.join(&Path::new(output_file));
let mut out = File::create(&output).expect2("Could not create output file");
client.get_file(&input, &mut out).expect2("get error")
}
}
}
}
}
}
fn version() -> ! {
println!(
"{} ({}) version {}",
env!("CARGO_PKG_DESCRIPTION"),
env!("CARGO_PKG_NAME"),
env!("CARGO_PKG_VERSION")
);
std::process::exit(0);
}
fn usage() -> ! {
println!("USAGE:
webhdfs <options>... <command> <files>...
webhdfs -h|--help
webhdfs -v|--version
options:
-U|--uri|--url <url> API entrypoint
-u|--user <string> User name
-d|--doas <string> DoAs username
-T|--dt <string> Delegation token
-t|--timeout <unsigned> Default timeout in seconds
-N|--natmap-file <filepath> Path to NAT mappings file
-n|--natmap-entry <k=v> NAT mapping (multiple options are Ok)
command and files:
-v|--version
Print version and exit
-h|--help
Print this thelp screen and exit
--save-config <filepath>
Save the effective configuration to the file
-g|--get <remote-filepath> <local-path>
-g|--get <remote-filepath>
-g|--get <remote-filepath>.. <local-dirpath>
Get files from HDFS
");
std::process::exit(1);
}
enum Operation {
Get(Vec<String>)
}
fn parse_command_line() -> (SyncHdfsClient, Operation) {
use std::time::Duration;
use std::collections::HashMap;
use commandline::*;
enum Sw {
Uri, User, Doas, DToken, Timeout, NMFile, NMEntry, SaveConfig
}
enum Op {
Get
}
struct S {
sw: Option<Sw>,
op: Option<Op>,
files: Vec<String>,
uri: Option<String>,
user: Option<String>,
doas: Option<String>,
dtoken: Option<String>,
timeout: Option<Duration>,
natmap: Option<HashMap<String, String>>,
save_config: Option<String>,
}
let s0 = S {
sw: None, op: None, files: vec![],
uri: None, user: None, doas:None, timeout: None, dtoken: None, natmap: None,
save_config: None
};
let result = commandline::parse_cmdln(s0, |mut s, arg| if let Some(sw) = s.sw.take() {
match sw {
Sw::Uri => S { uri: Some(arg.arg()), ..s },
Sw::User => S { user: Some(arg.arg()), ..s },
Sw::Doas => S { doas: Some(arg.arg()), ..s },
Sw::DToken => S { dtoken: Some(arg.arg()), ..s },
Sw::SaveConfig => S { save_config: Some(arg.arg()), ..s },
Sw::Timeout => S { timeout: Some(Duration::from_secs(arg.arg().parse().expect2("Invalid timeout duration"))), ..s },
Sw::NMFile => S { natmap: Some(config::read_kv_file(&arg.arg()).expect2("malformed natmap file")), ..s },
Sw::NMEntry => {
let mut nm = if let Some(nm) = s.natmap { nm } else { HashMap::new() };
let (k, v) = config::split_kv(arg.arg()).expect2("invalid natmap entry");
nm.insert(k, v);
S { natmap: Some(nm), ..s }
}
}
} else {
match arg.switch_ref() {
"-v"|"--version" => version(),
"-h"|"--help" => usage(),
"-g"|"--get" => S { op: Some(Op::Get), ..s },
"-U"|"--uri"|"--url" => S { sw: Some(Sw::Uri), ..s },
"-u"|"--user" => S { sw: Some(Sw::User), ..s },
"-d"|"--doas" => S { sw: Some(Sw::Doas), ..s },
"-T"|"--dt" => S { sw: Some(Sw::DToken), ..s },
"-t"|"--timeout" => S { sw: Some(Sw::Timeout), ..s },
"-N"|"--natmap-file" => S { sw: Some(Sw::NMFile), ..s },
"-n"|"--natmap-entry" => S { sw: Some(Sw::NMEntry), ..s },
"--save-config" => S { sw: Some(Sw::SaveConfig), ..s },
_ => { s.files.push(arg.arg()); s}
}
});
if result.sw.is_some() {
error_exit("invalid command line at the end", "")
}
if let Some(f) = result.save_config {
if result.op.is_some() {
error_exit("--save-config must be used alone", "")
}
let uri = result.uri.expect2("must specify --uri when saving config");
let cfg = config::Config::new(uri.parse().expect2("Cannot parse URI"));
config::write_config(&std::path::Path::new(&f), &cfg, true);
std::process::exit(0);
} else {
let operation = if let Some(op) = result.op {
op
} else {
error_exit("must specify operation", "")
};
//build context
let mut cx = if let Some(uri) = result.uri {
SyncHdfsClientBuilder::new(uri.parse().expect2("Cannot parse URI"))
} else {
SyncHdfsClientBuilder::from_config_opt().expect2("No configuration files were found, and no mandatory options (--uri) were specified")
};
if let Some(user) = result.user { cx = cx.user_name(user) }
if let Some(doas) = result.doas { cx = cx.doas(doas) }
if let Some(timeout) = result.timeout { cx = cx.default_timeout(timeout) }
if let Some(natmap) = result.natmap { cx = cx.natmap(NatMap::new(natmap.into_iter()).expect2("Invalid natmap")) }
if let Some(dtoken) = result.dtoken { cx = cx.delegation_token(dtoken) }
let client = cx.build().expect2("Cannot build SyncHdfsClient");
let operation = match operation {
Op::Get =>
if result.files.len() > 0 { Operation::Get(result.files) } else |
};
(client, operation)
}
}
//-------------------------
mod commandline {
/// Prints two-part message to stderr and exits
pub fn error_exit(msg: &str, detail: &str) -> ! {
eprint!("Error: {}", msg);
if detail.is_empty() {
eprintln!()
} else {
eprintln!(" ({})", detail);
}
std::process::exit(1)
}
/// Expect2 function
pub trait Expect2<T> {
/// Same as Result::expect but the error message is brief and not intimidating
fn expect2(self, msg: &str) -> T;
}
impl<T, E: std::error::Error> Expect2<T> for std::result::Result<T, E> {
fn expect2(self, msg: &str) -> T {
match self {
Ok(v) => v,
Err(e) => error_exit(msg, &e.to_string())
}
}
}
impl<T> Expect2<T> for Option<T> {
fn expect2(self, msg: &str) -> T {
match self {
Some(v) => v,
None => error_exit(msg, "")
}
}
}
#[derive(Debug)]
pub enum CmdLn {
Switch(String),
Arg(String),
Item(String)
}
impl std::fmt::Display for CmdLn {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
CmdLn::Switch(s) => write!(fmt, "Switch '{}'", s),
CmdLn::Arg(s) => write!(fmt, "Arg '{}'", s),
CmdLn::Item(s) => write!(fmt, "Item '{}'", s)
}
}
}
impl CmdLn {
/// Splits command line argruments if needed
/// - _ if bypass => Item(_)
/// - '--sw=arg' => Switch('--sw') Arg('arg')
/// - '-abc' => Item('-a') Item('-b') Item('-c')
/// - '--' => *bypass = true; []
/// - _ => Item(_)
fn convert_arg(bypass: &mut bool, v: String) -> Vec<CmdLn> {
use std::iter::FromIterator;
if *bypass {
vec![CmdLn::Item(v)]
} else if v == "--" {
*bypass = true;
vec![]
} else if v.starts_with("--") {
let mut s: Vec<String> = v.splitn(2, "=").map(|r| r.to_string()).collect();
let a = s.pop();
let b = s.pop();
match (a, b) {
(Some(a), None) => vec![CmdLn::Item(a)],
(Some(b), Some(a)) => vec![CmdLn::Switch(a), CmdLn::Arg(b)],
_ => unreachable!()
}
} else if v.starts_with("-") && v != "-" {
v.chars().skip(1).map(|c| CmdLn::Item(String::from_iter(vec!['-', c]))).collect()
} else {
vec![CmdLn::Item(v)]
}
}
fn raise(&self, w: &str) -> ! {
error_exit(&format!("we wanted {}, but got {:?}", w, self), "command line syntax error")
}
/*pub fn switch(self) -> String {
match self {
CmdLn::Switch(v) | CmdLn::Item(v) => v,
other => other.raise("Switch")
}
}*/
pub fn switch_ref(&self) -> &str {
match self {
CmdLn::Switch(v) | CmdLn::Item(v) => v,
other => other.raise("Switch")
}
}
pub fn arg(self) -> String {
match self {
CmdLn::Arg(v) | CmdLn::Item(v) => v,
other => other.raise("Arg")
}
}
}
/// Parses command line for 0- and 1-argument options.
/// `f` consumes the current state and a command line item, and produces the new state.
pub fn parse_cmdln<S, F>(s0: S, f: F) -> S where F: FnMut(S, CmdLn) -> S {
std::env::args().skip(1).scan(false, |s, a| Some(CmdLn::convert_arg(s, a))).flatten().fold(s0, f)
}
/*
pub fn bool_opt(s: String) -> bool {
match s.as_ref() {
"true"|"+"|"yes" => true,
"false"|"-"|"no" => false,
v => panic!("invalid bool value '{}'", v)
}
}
*/
} | { error_exit("must specify at least one input file for --get", "") } | conditional_block |
webhdfs.rs | use webhdfs::*;
fn main() {
use std::fs::File;
use std::path::Path;
use std::fs::create_dir_all;
use commandline::*;
let (mut client, op) = parse_command_line();
match op {
Operation::Get(mut fs) => {
match &fs[..] {
&[ref input] => {
let input_path = Path::new(input);
let output = input_path.file_name().expect2("file name must be specified if no output file is given");
let mut out = File::create(&output).expect2("Could not create output file");
client.get_file(&input, &mut out).expect2("get error")
}
&[ref input, ref output] => {
let mut out = File::create(&output).expect2("Could not create output file");
client.get_file(&input, &mut out).expect2("get error")
}
_ => {
let target_dir_ = fs.pop().unwrap();
let target_dir = Path::new(&target_dir_);
create_dir_all(&target_dir).expect2("Could not create output dir");
for input in fs {
let input_path = Path::new(&input);
let output_file = input_path.file_name().expect2("file name must be specified if no output file is given");
let output = target_dir.join(&Path::new(output_file));
let mut out = File::create(&output).expect2("Could not create output file");
client.get_file(&input, &mut out).expect2("get error")
}
}
}
}
}
}
fn version() -> ! {
println!(
"{} ({}) version {}",
env!("CARGO_PKG_DESCRIPTION"),
env!("CARGO_PKG_NAME"),
env!("CARGO_PKG_VERSION")
);
std::process::exit(0);
}
fn usage() -> ! {
println!("USAGE:
webhdfs <options>... <command> <files>...
webhdfs -h|--help
webhdfs -v|--version
options:
-U|--uri|--url <url> API entrypoint
-u|--user <string> User name
-d|--doas <string> DoAs username
-T|--dt <string> Delegation token
-t|--timeout <unsigned> Default timeout in seconds
-N|--natmap-file <filepath> Path to NAT mappings file
-n|--natmap-entry <k=v> NAT mapping (multiple options are Ok)
command and files:
-v|--version
Print version and exit
-h|--help
Print this thelp screen and exit
--save-config <filepath>
Save the effective configuration to the file
-g|--get <remote-filepath> <local-path>
-g|--get <remote-filepath>
-g|--get <remote-filepath>.. <local-dirpath>
Get files from HDFS
");
std::process::exit(1);
}
enum Operation {
Get(Vec<String>)
}
fn parse_command_line() -> (SyncHdfsClient, Operation) {
use std::time::Duration;
use std::collections::HashMap;
use commandline::*;
enum Sw {
Uri, User, Doas, DToken, Timeout, NMFile, NMEntry, SaveConfig
}
enum Op {
Get
}
struct S {
sw: Option<Sw>,
op: Option<Op>,
files: Vec<String>,
uri: Option<String>,
user: Option<String>,
doas: Option<String>,
dtoken: Option<String>,
timeout: Option<Duration>,
natmap: Option<HashMap<String, String>>,
save_config: Option<String>,
}
let s0 = S {
sw: None, op: None, files: vec![],
uri: None, user: None, doas:None, timeout: None, dtoken: None, natmap: None,
save_config: None
};
let result = commandline::parse_cmdln(s0, |mut s, arg| if let Some(sw) = s.sw.take() {
match sw {
Sw::Uri => S { uri: Some(arg.arg()), ..s },
Sw::User => S { user: Some(arg.arg()), ..s },
Sw::Doas => S { doas: Some(arg.arg()), ..s },
Sw::DToken => S { dtoken: Some(arg.arg()), ..s },
Sw::SaveConfig => S { save_config: Some(arg.arg()), ..s },
Sw::Timeout => S { timeout: Some(Duration::from_secs(arg.arg().parse().expect2("Invalid timeout duration"))), ..s },
Sw::NMFile => S { natmap: Some(config::read_kv_file(&arg.arg()).expect2("malformed natmap file")), ..s },
Sw::NMEntry => {
let mut nm = if let Some(nm) = s.natmap { nm } else { HashMap::new() };
let (k, v) = config::split_kv(arg.arg()).expect2("invalid natmap entry");
nm.insert(k, v);
S { natmap: Some(nm), ..s }
}
}
} else {
match arg.switch_ref() {
"-v"|"--version" => version(),
"-h"|"--help" => usage(),
"-g"|"--get" => S { op: Some(Op::Get), ..s },
"-U"|"--uri"|"--url" => S { sw: Some(Sw::Uri), ..s },
"-u"|"--user" => S { sw: Some(Sw::User), ..s },
"-d"|"--doas" => S { sw: Some(Sw::Doas), ..s },
"-T"|"--dt" => S { sw: Some(Sw::DToken), ..s },
"-t"|"--timeout" => S { sw: Some(Sw::Timeout), ..s },
"-N"|"--natmap-file" => S { sw: Some(Sw::NMFile), ..s },
"-n"|"--natmap-entry" => S { sw: Some(Sw::NMEntry), ..s },
"--save-config" => S { sw: Some(Sw::SaveConfig), ..s },
_ => { s.files.push(arg.arg()); s}
}
});
if result.sw.is_some() {
error_exit("invalid command line at the end", "")
}
if let Some(f) = result.save_config {
if result.op.is_some() {
error_exit("--save-config must be used alone", "")
}
let uri = result.uri.expect2("must specify --uri when saving config");
let cfg = config::Config::new(uri.parse().expect2("Cannot parse URI"));
config::write_config(&std::path::Path::new(&f), &cfg, true);
std::process::exit(0);
} else {
let operation = if let Some(op) = result.op {
op
} else {
error_exit("must specify operation", "")
};
//build context
let mut cx = if let Some(uri) = result.uri {
SyncHdfsClientBuilder::new(uri.parse().expect2("Cannot parse URI"))
} else {
SyncHdfsClientBuilder::from_config_opt().expect2("No configuration files were found, and no mandatory options (--uri) were specified")
};
if let Some(user) = result.user { cx = cx.user_name(user) }
if let Some(doas) = result.doas { cx = cx.doas(doas) }
if let Some(timeout) = result.timeout { cx = cx.default_timeout(timeout) }
if let Some(natmap) = result.natmap { cx = cx.natmap(NatMap::new(natmap.into_iter()).expect2("Invalid natmap")) }
if let Some(dtoken) = result.dtoken { cx = cx.delegation_token(dtoken) }
let client = cx.build().expect2("Cannot build SyncHdfsClient");
let operation = match operation {
Op::Get =>
if result.files.len() > 0 { Operation::Get(result.files) } else { error_exit("must specify at least one input file for --get", "") }
};
(client, operation)
}
}
//-------------------------
mod commandline {
/// Prints two-part message to stderr and exits
pub fn error_exit(msg: &str, detail: &str) -> ! {
eprint!("Error: {}", msg);
if detail.is_empty() {
eprintln!()
} else {
eprintln!(" ({})", detail);
}
std::process::exit(1)
}
/// Expect2 function
pub trait Expect2<T> {
/// Same as Result::expect but the error message is brief and not intimidating
fn expect2(self, msg: &str) -> T;
}
impl<T, E: std::error::Error> Expect2<T> for std::result::Result<T, E> {
fn expect2(self, msg: &str) -> T {
match self {
Ok(v) => v,
Err(e) => error_exit(msg, &e.to_string())
}
}
}
impl<T> Expect2<T> for Option<T> {
fn expect2(self, msg: &str) -> T |
}
#[derive(Debug)]
pub enum CmdLn {
Switch(String),
Arg(String),
Item(String)
}
impl std::fmt::Display for CmdLn {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
CmdLn::Switch(s) => write!(fmt, "Switch '{}'", s),
CmdLn::Arg(s) => write!(fmt, "Arg '{}'", s),
CmdLn::Item(s) => write!(fmt, "Item '{}'", s)
}
}
}
impl CmdLn {
/// Splits command line argruments if needed
/// - _ if bypass => Item(_)
/// - '--sw=arg' => Switch('--sw') Arg('arg')
/// - '-abc' => Item('-a') Item('-b') Item('-c')
/// - '--' => *bypass = true; []
/// - _ => Item(_)
fn convert_arg(bypass: &mut bool, v: String) -> Vec<CmdLn> {
use std::iter::FromIterator;
if *bypass {
vec![CmdLn::Item(v)]
} else if v == "--" {
*bypass = true;
vec![]
} else if v.starts_with("--") {
let mut s: Vec<String> = v.splitn(2, "=").map(|r| r.to_string()).collect();
let a = s.pop();
let b = s.pop();
match (a, b) {
(Some(a), None) => vec![CmdLn::Item(a)],
(Some(b), Some(a)) => vec![CmdLn::Switch(a), CmdLn::Arg(b)],
_ => unreachable!()
}
} else if v.starts_with("-") && v != "-" {
v.chars().skip(1).map(|c| CmdLn::Item(String::from_iter(vec!['-', c]))).collect()
} else {
vec![CmdLn::Item(v)]
}
}
fn raise(&self, w: &str) -> ! {
error_exit(&format!("we wanted {}, but got {:?}", w, self), "command line syntax error")
}
/*pub fn switch(self) -> String {
match self {
CmdLn::Switch(v) | CmdLn::Item(v) => v,
other => other.raise("Switch")
}
}*/
pub fn switch_ref(&self) -> &str {
match self {
CmdLn::Switch(v) | CmdLn::Item(v) => v,
other => other.raise("Switch")
}
}
pub fn arg(self) -> String {
match self {
CmdLn::Arg(v) | CmdLn::Item(v) => v,
other => other.raise("Arg")
}
}
}
/// Parses command line for 0- and 1-argument options.
/// `f` consumes the current state and a command line item, and produces the new state.
pub fn parse_cmdln<S, F>(s0: S, f: F) -> S where F: FnMut(S, CmdLn) -> S {
std::env::args().skip(1).scan(false, |s, a| Some(CmdLn::convert_arg(s, a))).flatten().fold(s0, f)
}
/*
pub fn bool_opt(s: String) -> bool {
match s.as_ref() {
"true"|"+"|"yes" => true,
"false"|"-"|"no" => false,
v => panic!("invalid bool value '{}'", v)
}
}
*/
} | {
match self {
Some(v) => v,
None => error_exit(msg, "")
}
} | identifier_body |
webhdfs.rs | use webhdfs::*;
fn main() {
use std::fs::File;
use std::path::Path;
use std::fs::create_dir_all;
use commandline::*;
let (mut client, op) = parse_command_line();
match op {
Operation::Get(mut fs) => {
match &fs[..] {
&[ref input] => {
let input_path = Path::new(input);
let output = input_path.file_name().expect2("file name must be specified if no output file is given");
let mut out = File::create(&output).expect2("Could not create output file");
client.get_file(&input, &mut out).expect2("get error")
}
&[ref input, ref output] => {
let mut out = File::create(&output).expect2("Could not create output file");
client.get_file(&input, &mut out).expect2("get error")
}
_ => {
let target_dir_ = fs.pop().unwrap();
let target_dir = Path::new(&target_dir_);
create_dir_all(&target_dir).expect2("Could not create output dir");
for input in fs {
let input_path = Path::new(&input);
let output_file = input_path.file_name().expect2("file name must be specified if no output file is given");
let output = target_dir.join(&Path::new(output_file));
let mut out = File::create(&output).expect2("Could not create output file");
client.get_file(&input, &mut out).expect2("get error")
}
}
}
}
}
}
fn version() -> ! {
println!(
"{} ({}) version {}",
env!("CARGO_PKG_DESCRIPTION"),
env!("CARGO_PKG_NAME"),
env!("CARGO_PKG_VERSION")
);
std::process::exit(0);
}
fn usage() -> ! {
println!("USAGE:
webhdfs <options>... <command> <files>...
webhdfs -h|--help
webhdfs -v|--version
options:
-U|--uri|--url <url> API entrypoint
-u|--user <string> User name
-d|--doas <string> DoAs username
-T|--dt <string> Delegation token
-t|--timeout <unsigned> Default timeout in seconds
-N|--natmap-file <filepath> Path to NAT mappings file
-n|--natmap-entry <k=v> NAT mapping (multiple options are Ok)
command and files:
-v|--version
Print version and exit
-h|--help
Print this thelp screen and exit
--save-config <filepath>
Save the effective configuration to the file
-g|--get <remote-filepath> <local-path>
-g|--get <remote-filepath>
-g|--get <remote-filepath>.. <local-dirpath>
Get files from HDFS
");
std::process::exit(1);
}
enum Operation {
Get(Vec<String>)
}
fn parse_command_line() -> (SyncHdfsClient, Operation) {
use std::time::Duration;
use std::collections::HashMap;
use commandline::*;
enum Sw {
Uri, User, Doas, DToken, Timeout, NMFile, NMEntry, SaveConfig
}
enum Op {
Get
}
struct S {
sw: Option<Sw>,
op: Option<Op>,
files: Vec<String>,
uri: Option<String>,
user: Option<String>,
doas: Option<String>,
dtoken: Option<String>,
timeout: Option<Duration>,
natmap: Option<HashMap<String, String>>,
save_config: Option<String>,
}
let s0 = S {
sw: None, op: None, files: vec![],
uri: None, user: None, doas:None, timeout: None, dtoken: None, natmap: None,
save_config: None
};
let result = commandline::parse_cmdln(s0, |mut s, arg| if let Some(sw) = s.sw.take() {
match sw {
Sw::Uri => S { uri: Some(arg.arg()), ..s },
Sw::User => S { user: Some(arg.arg()), ..s },
Sw::Doas => S { doas: Some(arg.arg()), ..s },
Sw::DToken => S { dtoken: Some(arg.arg()), ..s },
Sw::SaveConfig => S { save_config: Some(arg.arg()), ..s },
Sw::Timeout => S { timeout: Some(Duration::from_secs(arg.arg().parse().expect2("Invalid timeout duration"))), ..s },
Sw::NMFile => S { natmap: Some(config::read_kv_file(&arg.arg()).expect2("malformed natmap file")), ..s },
Sw::NMEntry => {
let mut nm = if let Some(nm) = s.natmap { nm } else { HashMap::new() };
let (k, v) = config::split_kv(arg.arg()).expect2("invalid natmap entry");
nm.insert(k, v);
S { natmap: Some(nm), ..s }
}
}
} else {
match arg.switch_ref() {
"-v"|"--version" => version(),
"-h"|"--help" => usage(),
"-g"|"--get" => S { op: Some(Op::Get), ..s },
"-U"|"--uri"|"--url" => S { sw: Some(Sw::Uri), ..s },
"-u"|"--user" => S { sw: Some(Sw::User), ..s },
"-d"|"--doas" => S { sw: Some(Sw::Doas), ..s },
"-T"|"--dt" => S { sw: Some(Sw::DToken), ..s },
"-t"|"--timeout" => S { sw: Some(Sw::Timeout), ..s },
"-N"|"--natmap-file" => S { sw: Some(Sw::NMFile), ..s },
"-n"|"--natmap-entry" => S { sw: Some(Sw::NMEntry), ..s },
"--save-config" => S { sw: Some(Sw::SaveConfig), ..s },
_ => { s.files.push(arg.arg()); s}
}
});
if result.sw.is_some() {
error_exit("invalid command line at the end", "")
}
if let Some(f) = result.save_config {
if result.op.is_some() {
error_exit("--save-config must be used alone", "")
}
let uri = result.uri.expect2("must specify --uri when saving config");
let cfg = config::Config::new(uri.parse().expect2("Cannot parse URI"));
config::write_config(&std::path::Path::new(&f), &cfg, true);
std::process::exit(0);
} else {
let operation = if let Some(op) = result.op {
op
} else {
error_exit("must specify operation", "")
};
//build context
let mut cx = if let Some(uri) = result.uri {
SyncHdfsClientBuilder::new(uri.parse().expect2("Cannot parse URI"))
} else {
SyncHdfsClientBuilder::from_config_opt().expect2("No configuration files were found, and no mandatory options (--uri) were specified")
};
if let Some(user) = result.user { cx = cx.user_name(user) }
if let Some(doas) = result.doas { cx = cx.doas(doas) }
if let Some(timeout) = result.timeout { cx = cx.default_timeout(timeout) }
if let Some(natmap) = result.natmap { cx = cx.natmap(NatMap::new(natmap.into_iter()).expect2("Invalid natmap")) }
if let Some(dtoken) = result.dtoken { cx = cx.delegation_token(dtoken) }
let client = cx.build().expect2("Cannot build SyncHdfsClient");
let operation = match operation {
Op::Get =>
if result.files.len() > 0 { Operation::Get(result.files) } else { error_exit("must specify at least one input file for --get", "") }
};
(client, operation)
}
}
//-------------------------
mod commandline {
/// Prints two-part message to stderr and exits
pub fn error_exit(msg: &str, detail: &str) -> ! {
eprint!("Error: {}", msg);
if detail.is_empty() {
eprintln!()
} else {
eprintln!(" ({})", detail);
}
std::process::exit(1)
}
/// Expect2 function
pub trait Expect2<T> {
/// Same as Result::expect but the error message is brief and not intimidating
fn expect2(self, msg: &str) -> T;
}
impl<T, E: std::error::Error> Expect2<T> for std::result::Result<T, E> {
fn expect2(self, msg: &str) -> T {
match self {
Ok(v) => v,
Err(e) => error_exit(msg, &e.to_string())
}
}
}
impl<T> Expect2<T> for Option<T> {
fn expect2(self, msg: &str) -> T {
match self {
Some(v) => v,
None => error_exit(msg, "")
}
}
}
#[derive(Debug)]
pub enum CmdLn { | impl std::fmt::Display for CmdLn {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
CmdLn::Switch(s) => write!(fmt, "Switch '{}'", s),
CmdLn::Arg(s) => write!(fmt, "Arg '{}'", s),
CmdLn::Item(s) => write!(fmt, "Item '{}'", s)
}
}
}
impl CmdLn {
/// Splits command line argruments if needed
/// - _ if bypass => Item(_)
/// - '--sw=arg' => Switch('--sw') Arg('arg')
/// - '-abc' => Item('-a') Item('-b') Item('-c')
/// - '--' => *bypass = true; []
/// - _ => Item(_)
fn convert_arg(bypass: &mut bool, v: String) -> Vec<CmdLn> {
use std::iter::FromIterator;
if *bypass {
vec![CmdLn::Item(v)]
} else if v == "--" {
*bypass = true;
vec![]
} else if v.starts_with("--") {
let mut s: Vec<String> = v.splitn(2, "=").map(|r| r.to_string()).collect();
let a = s.pop();
let b = s.pop();
match (a, b) {
(Some(a), None) => vec![CmdLn::Item(a)],
(Some(b), Some(a)) => vec![CmdLn::Switch(a), CmdLn::Arg(b)],
_ => unreachable!()
}
} else if v.starts_with("-") && v != "-" {
v.chars().skip(1).map(|c| CmdLn::Item(String::from_iter(vec!['-', c]))).collect()
} else {
vec![CmdLn::Item(v)]
}
}
fn raise(&self, w: &str) -> ! {
error_exit(&format!("we wanted {}, but got {:?}", w, self), "command line syntax error")
}
/*pub fn switch(self) -> String {
match self {
CmdLn::Switch(v) | CmdLn::Item(v) => v,
other => other.raise("Switch")
}
}*/
pub fn switch_ref(&self) -> &str {
match self {
CmdLn::Switch(v) | CmdLn::Item(v) => v,
other => other.raise("Switch")
}
}
pub fn arg(self) -> String {
match self {
CmdLn::Arg(v) | CmdLn::Item(v) => v,
other => other.raise("Arg")
}
}
}
/// Parses command line for 0- and 1-argument options.
/// `f` consumes the current state and a command line item, and produces the new state.
pub fn parse_cmdln<S, F>(s0: S, f: F) -> S where F: FnMut(S, CmdLn) -> S {
std::env::args().skip(1).scan(false, |s, a| Some(CmdLn::convert_arg(s, a))).flatten().fold(s0, f)
}
/*
pub fn bool_opt(s: String) -> bool {
match s.as_ref() {
"true"|"+"|"yes" => true,
"false"|"-"|"no" => false,
v => panic!("invalid bool value '{}'", v)
}
}
*/
} | Switch(String),
Arg(String),
Item(String)
}
| random_line_split |
webhdfs.rs | use webhdfs::*;
fn main() {
use std::fs::File;
use std::path::Path;
use std::fs::create_dir_all;
use commandline::*;
let (mut client, op) = parse_command_line();
match op {
Operation::Get(mut fs) => {
match &fs[..] {
&[ref input] => {
let input_path = Path::new(input);
let output = input_path.file_name().expect2("file name must be specified if no output file is given");
let mut out = File::create(&output).expect2("Could not create output file");
client.get_file(&input, &mut out).expect2("get error")
}
&[ref input, ref output] => {
let mut out = File::create(&output).expect2("Could not create output file");
client.get_file(&input, &mut out).expect2("get error")
}
_ => {
let target_dir_ = fs.pop().unwrap();
let target_dir = Path::new(&target_dir_);
create_dir_all(&target_dir).expect2("Could not create output dir");
for input in fs {
let input_path = Path::new(&input);
let output_file = input_path.file_name().expect2("file name must be specified if no output file is given");
let output = target_dir.join(&Path::new(output_file));
let mut out = File::create(&output).expect2("Could not create output file");
client.get_file(&input, &mut out).expect2("get error")
}
}
}
}
}
}
fn | () -> ! {
println!(
"{} ({}) version {}",
env!("CARGO_PKG_DESCRIPTION"),
env!("CARGO_PKG_NAME"),
env!("CARGO_PKG_VERSION")
);
std::process::exit(0);
}
fn usage() -> ! {
println!("USAGE:
webhdfs <options>... <command> <files>...
webhdfs -h|--help
webhdfs -v|--version
options:
-U|--uri|--url <url> API entrypoint
-u|--user <string> User name
-d|--doas <string> DoAs username
-T|--dt <string> Delegation token
-t|--timeout <unsigned> Default timeout in seconds
-N|--natmap-file <filepath> Path to NAT mappings file
-n|--natmap-entry <k=v> NAT mapping (multiple options are Ok)
command and files:
-v|--version
Print version and exit
-h|--help
Print this thelp screen and exit
--save-config <filepath>
Save the effective configuration to the file
-g|--get <remote-filepath> <local-path>
-g|--get <remote-filepath>
-g|--get <remote-filepath>.. <local-dirpath>
Get files from HDFS
");
std::process::exit(1);
}
enum Operation {
Get(Vec<String>)
}
fn parse_command_line() -> (SyncHdfsClient, Operation) {
use std::time::Duration;
use std::collections::HashMap;
use commandline::*;
enum Sw {
Uri, User, Doas, DToken, Timeout, NMFile, NMEntry, SaveConfig
}
enum Op {
Get
}
struct S {
sw: Option<Sw>,
op: Option<Op>,
files: Vec<String>,
uri: Option<String>,
user: Option<String>,
doas: Option<String>,
dtoken: Option<String>,
timeout: Option<Duration>,
natmap: Option<HashMap<String, String>>,
save_config: Option<String>,
}
let s0 = S {
sw: None, op: None, files: vec![],
uri: None, user: None, doas:None, timeout: None, dtoken: None, natmap: None,
save_config: None
};
let result = commandline::parse_cmdln(s0, |mut s, arg| if let Some(sw) = s.sw.take() {
match sw {
Sw::Uri => S { uri: Some(arg.arg()), ..s },
Sw::User => S { user: Some(arg.arg()), ..s },
Sw::Doas => S { doas: Some(arg.arg()), ..s },
Sw::DToken => S { dtoken: Some(arg.arg()), ..s },
Sw::SaveConfig => S { save_config: Some(arg.arg()), ..s },
Sw::Timeout => S { timeout: Some(Duration::from_secs(arg.arg().parse().expect2("Invalid timeout duration"))), ..s },
Sw::NMFile => S { natmap: Some(config::read_kv_file(&arg.arg()).expect2("malformed natmap file")), ..s },
Sw::NMEntry => {
let mut nm = if let Some(nm) = s.natmap { nm } else { HashMap::new() };
let (k, v) = config::split_kv(arg.arg()).expect2("invalid natmap entry");
nm.insert(k, v);
S { natmap: Some(nm), ..s }
}
}
} else {
match arg.switch_ref() {
"-v"|"--version" => version(),
"-h"|"--help" => usage(),
"-g"|"--get" => S { op: Some(Op::Get), ..s },
"-U"|"--uri"|"--url" => S { sw: Some(Sw::Uri), ..s },
"-u"|"--user" => S { sw: Some(Sw::User), ..s },
"-d"|"--doas" => S { sw: Some(Sw::Doas), ..s },
"-T"|"--dt" => S { sw: Some(Sw::DToken), ..s },
"-t"|"--timeout" => S { sw: Some(Sw::Timeout), ..s },
"-N"|"--natmap-file" => S { sw: Some(Sw::NMFile), ..s },
"-n"|"--natmap-entry" => S { sw: Some(Sw::NMEntry), ..s },
"--save-config" => S { sw: Some(Sw::SaveConfig), ..s },
_ => { s.files.push(arg.arg()); s}
}
});
if result.sw.is_some() {
error_exit("invalid command line at the end", "")
}
if let Some(f) = result.save_config {
if result.op.is_some() {
error_exit("--save-config must be used alone", "")
}
let uri = result.uri.expect2("must specify --uri when saving config");
let cfg = config::Config::new(uri.parse().expect2("Cannot parse URI"));
config::write_config(&std::path::Path::new(&f), &cfg, true);
std::process::exit(0);
} else {
let operation = if let Some(op) = result.op {
op
} else {
error_exit("must specify operation", "")
};
//build context
let mut cx = if let Some(uri) = result.uri {
SyncHdfsClientBuilder::new(uri.parse().expect2("Cannot parse URI"))
} else {
SyncHdfsClientBuilder::from_config_opt().expect2("No configuration files were found, and no mandatory options (--uri) were specified")
};
if let Some(user) = result.user { cx = cx.user_name(user) }
if let Some(doas) = result.doas { cx = cx.doas(doas) }
if let Some(timeout) = result.timeout { cx = cx.default_timeout(timeout) }
if let Some(natmap) = result.natmap { cx = cx.natmap(NatMap::new(natmap.into_iter()).expect2("Invalid natmap")) }
if let Some(dtoken) = result.dtoken { cx = cx.delegation_token(dtoken) }
let client = cx.build().expect2("Cannot build SyncHdfsClient");
let operation = match operation {
Op::Get =>
if result.files.len() > 0 { Operation::Get(result.files) } else { error_exit("must specify at least one input file for --get", "") }
};
(client, operation)
}
}
//-------------------------
mod commandline {
/// Prints two-part message to stderr and exits
pub fn error_exit(msg: &str, detail: &str) -> ! {
eprint!("Error: {}", msg);
if detail.is_empty() {
eprintln!()
} else {
eprintln!(" ({})", detail);
}
std::process::exit(1)
}
/// Expect2 function
pub trait Expect2<T> {
/// Same as Result::expect but the error message is brief and not intimidating
fn expect2(self, msg: &str) -> T;
}
impl<T, E: std::error::Error> Expect2<T> for std::result::Result<T, E> {
fn expect2(self, msg: &str) -> T {
match self {
Ok(v) => v,
Err(e) => error_exit(msg, &e.to_string())
}
}
}
impl<T> Expect2<T> for Option<T> {
fn expect2(self, msg: &str) -> T {
match self {
Some(v) => v,
None => error_exit(msg, "")
}
}
}
#[derive(Debug)]
pub enum CmdLn {
Switch(String),
Arg(String),
Item(String)
}
impl std::fmt::Display for CmdLn {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
CmdLn::Switch(s) => write!(fmt, "Switch '{}'", s),
CmdLn::Arg(s) => write!(fmt, "Arg '{}'", s),
CmdLn::Item(s) => write!(fmt, "Item '{}'", s)
}
}
}
impl CmdLn {
/// Splits command line argruments if needed
/// - _ if bypass => Item(_)
/// - '--sw=arg' => Switch('--sw') Arg('arg')
/// - '-abc' => Item('-a') Item('-b') Item('-c')
/// - '--' => *bypass = true; []
/// - _ => Item(_)
fn convert_arg(bypass: &mut bool, v: String) -> Vec<CmdLn> {
use std::iter::FromIterator;
if *bypass {
vec![CmdLn::Item(v)]
} else if v == "--" {
*bypass = true;
vec![]
} else if v.starts_with("--") {
let mut s: Vec<String> = v.splitn(2, "=").map(|r| r.to_string()).collect();
let a = s.pop();
let b = s.pop();
match (a, b) {
(Some(a), None) => vec![CmdLn::Item(a)],
(Some(b), Some(a)) => vec![CmdLn::Switch(a), CmdLn::Arg(b)],
_ => unreachable!()
}
} else if v.starts_with("-") && v != "-" {
v.chars().skip(1).map(|c| CmdLn::Item(String::from_iter(vec!['-', c]))).collect()
} else {
vec![CmdLn::Item(v)]
}
}
fn raise(&self, w: &str) -> ! {
error_exit(&format!("we wanted {}, but got {:?}", w, self), "command line syntax error")
}
/*pub fn switch(self) -> String {
match self {
CmdLn::Switch(v) | CmdLn::Item(v) => v,
other => other.raise("Switch")
}
}*/
pub fn switch_ref(&self) -> &str {
match self {
CmdLn::Switch(v) | CmdLn::Item(v) => v,
other => other.raise("Switch")
}
}
pub fn arg(self) -> String {
match self {
CmdLn::Arg(v) | CmdLn::Item(v) => v,
other => other.raise("Arg")
}
}
}
/// Parses command line for 0- and 1-argument options.
/// `f` consumes the current state and a command line item, and produces the new state.
pub fn parse_cmdln<S, F>(s0: S, f: F) -> S where F: FnMut(S, CmdLn) -> S {
std::env::args().skip(1).scan(false, |s, a| Some(CmdLn::convert_arg(s, a))).flatten().fold(s0, f)
}
/*
pub fn bool_opt(s: String) -> bool {
match s.as_ref() {
"true"|"+"|"yes" => true,
"false"|"-"|"no" => false,
v => panic!("invalid bool value '{}'", v)
}
}
*/
} | version | identifier_name |
deepobject.go | /*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package runtime
import (
"encoding/json"
"fmt"
"net/url"
"reflect"
"sort"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/exoscale/internal/github.com/deepmap/oapi-codegen/pkg/types"
)
func marshalDeepObject(in interface{}, path []string) ([]string, error) {
var result []string
switch t := in.(type) {
case []interface{}:
// For the array, we will use numerical subscripts of the form [x],
// in the same order as the array.
for i, iface := range t {
newPath := append(path, strconv.Itoa(i))
fields, err := marshalDeepObject(iface, newPath)
if err != nil {
return nil, errors.Wrap(err, "error traversing array")
}
result = append(result, fields...)
}
case map[string]interface{}:
// For a map, each key (field name) becomes a member of the path, and
// we recurse. First, sort the keys.
keys := make([]string, len(t))
i := 0
for k := range t {
keys[i] = k
i++
}
sort.Strings(keys)
// Now, for each key, we recursively marshal it.
for _, k := range keys {
newPath := append(path, k)
fields, err := marshalDeepObject(t[k], newPath)
if err != nil {
return nil, errors.Wrap(err, "error traversing map")
}
result = append(result, fields...)
}
default:
// Now, for a concrete value, we will turn the path elements
// into a deepObject style set of subscripts. [a, b, c] turns into
// [a][b][c]
prefix := "[" + strings.Join(path, "][") + "]"
result = []string{
prefix + fmt.Sprintf("=%v", t),
}
}
return result, nil
}
func MarshalDeepObject(i interface{}, paramName string) (string, error) {
// We're going to marshal to JSON and unmarshal into an interface{},
// which will use the json pkg to deal with all the field annotations. We
// can then walk the generic object structure to produce a deepObject. This
// isn't efficient and it would be more efficient to reflect on our own,
// but it's complicated, error-prone code.
buf, err := json.Marshal(i)
if err != nil {
return "", errors.Wrap(err, "failed to marshal input to JSON")
}
var i2 interface{}
err = json.Unmarshal(buf, &i2)
if err != nil {
return "", errors.Wrap(err, "failed to unmarshal JSON")
}
fields, err := marshalDeepObject(i2, nil)
if err != nil {
return "", errors.Wrap(err, "error traversing JSON structure")
}
// Prefix the param name to each subscripted field.
for i := range fields {
fields[i] = paramName + fields[i]
}
return strings.Join(fields, "&"), nil
}
type fieldOrValue struct {
fields map[string]fieldOrValue
value string
}
func (f *fieldOrValue) appendPathValue(path []string, value string) {
fieldName := path[0]
if len(path) == 1 {
f.fields[fieldName] = fieldOrValue{value: value}
return
}
pv, found := f.fields[fieldName]
if !found {
pv = fieldOrValue{
fields: make(map[string]fieldOrValue),
}
f.fields[fieldName] = pv
}
pv.appendPathValue(path[1:], value)
}
func makeFieldOrValue(paths [][]string, values []string) fieldOrValue {
f := fieldOrValue{
fields: make(map[string]fieldOrValue),
}
for i := range paths {
path := paths[i]
value := values[i]
f.appendPathValue(path, value)
}
return f
}
func | (dst interface{}, paramName string, params url.Values) error {
// Params are all the query args, so we need those that look like
// "paramName["...
var fieldNames []string
var fieldValues []string
searchStr := paramName + "["
for pName, pValues := range params {
if strings.HasPrefix(pName, searchStr) {
// trim the parameter name from the full name.
pName = pName[len(paramName):]
fieldNames = append(fieldNames, pName)
if len(pValues) != 1 {
return fmt.Errorf("%s has multiple values", pName)
}
fieldValues = append(fieldValues, pValues[0])
}
}
// Now, for each field, reconstruct its subscript path and value
paths := make([][]string, len(fieldNames))
for i, path := range fieldNames {
path = strings.TrimLeft(path, "[")
path = strings.TrimRight(path, "]")
paths[i] = strings.Split(path, "][")
}
fieldPaths := makeFieldOrValue(paths, fieldValues)
err := assignPathValues(dst, fieldPaths)
if err != nil {
return errors.Wrap(err, "error assigning value to destination")
}
return nil
}
// This returns a field name, either using the variable name, or the json
// annotation if that exists.
func getFieldName(f reflect.StructField) string {
n := f.Name
tag, found := f.Tag.Lookup("json")
if found {
// If we have a json field, and the first part of it before the
// first comma is non-empty, that's our field name.
parts := strings.Split(tag, ",")
if parts[0] != "" {
n = parts[0]
}
}
return n
}
// Create a map of field names that we'll see in the deepObject to reflect
// field indices on the given type.
func fieldIndicesByJsonTag(i interface{}) (map[string]int, error) {
t := reflect.TypeOf(i)
if t.Kind() != reflect.Struct {
return nil, errors.New("expected a struct as input")
}
n := t.NumField()
fieldMap := make(map[string]int)
for i := 0; i < n; i++ {
field := t.Field(i)
fieldName := getFieldName(field)
fieldMap[fieldName] = i
}
return fieldMap, nil
}
func assignPathValues(dst interface{}, pathValues fieldOrValue) error {
//t := reflect.TypeOf(dst)
v := reflect.ValueOf(dst)
iv := reflect.Indirect(v)
it := iv.Type()
switch it.Kind() {
case reflect.Slice:
sliceLength := len(pathValues.fields)
dstSlice := reflect.MakeSlice(it, sliceLength, sliceLength)
err := assignSlice(dstSlice, pathValues)
if err != nil {
return errors.Wrap(err, "error assigning slice")
}
iv.Set(dstSlice)
return nil
case reflect.Struct:
// Some special types we care about are structs. Handle them
// here. They may be redefined, so we need to do some hoop
// jumping. If the types are aliased, we need to type convert
// the pointer, then set the value of the dereference pointer.
// We check to see if the object implements the Binder interface first.
if dst, isBinder := v.Interface().(Binder); isBinder {
return dst.Bind(pathValues.value)
}
// Then check the legacy types
if it.ConvertibleTo(reflect.TypeOf(types.Date{})) {
var date types.Date
var err error
date.Time, err = time.Parse(types.DateFormat, pathValues.value)
if err != nil {
return errors.Wrap(err, "invalid date format")
}
dst := iv
if it != reflect.TypeOf(types.Date{}) {
// Types are aliased, convert the pointers.
ivPtr := iv.Addr()
aPtr := ivPtr.Convert(reflect.TypeOf(&types.Date{}))
dst = reflect.Indirect(aPtr)
}
dst.Set(reflect.ValueOf(date))
}
if it.ConvertibleTo(reflect.TypeOf(time.Time{})) {
var tm time.Time
var err error
tm, err = time.Parse(time.RFC3339Nano, pathValues.value)
if err != nil {
// Fall back to parsing it as a date.
tm, err = time.Parse(types.DateFormat, pathValues.value)
if err != nil {
return fmt.Errorf("error parsing tim as RFC3339 or 2006-01-02 time: %s", err)
}
return errors.Wrap(err, "invalid date format")
}
dst := iv
if it != reflect.TypeOf(time.Time{}) {
// Types are aliased, convert the pointers.
ivPtr := iv.Addr()
aPtr := ivPtr.Convert(reflect.TypeOf(&time.Time{}))
dst = reflect.Indirect(aPtr)
}
dst.Set(reflect.ValueOf(tm))
}
fieldMap, err := fieldIndicesByJsonTag(iv.Interface())
if err != nil {
return errors.Wrap(err, "failed enumerating fields")
}
for _, fieldName := range sortedFieldOrValueKeys(pathValues.fields) {
fieldValue := pathValues.fields[fieldName]
fieldIndex, found := fieldMap[fieldName]
if !found {
return fmt.Errorf("field [%s] is not present in destination object", fieldName)
}
field := iv.Field(fieldIndex)
err = assignPathValues(field.Addr().Interface(), fieldValue)
if err != nil {
return errors.Wrapf(err, "error assigning field [%s]", fieldName)
}
}
return nil
case reflect.Ptr:
// If we have a pointer after redirecting, it means we're dealing with
// an optional field, such as *string, which was passed in as &foo. We
// will allocate it if necessary, and call ourselves with a different
// interface.
dstVal := reflect.New(it.Elem())
dstPtr := dstVal.Interface()
err := assignPathValues(dstPtr, pathValues)
iv.Set(dstVal)
return err
case reflect.Bool:
val, err := strconv.ParseBool(pathValues.value)
if err != nil {
return fmt.Errorf("expected a valid bool, got %s", pathValues.value)
}
iv.SetBool(val)
return nil
case reflect.Float32:
val, err := strconv.ParseFloat(pathValues.value, 32)
if err != nil {
return fmt.Errorf("expected a valid float, got %s", pathValues.value)
}
iv.SetFloat(val)
return nil
case reflect.Float64:
val, err := strconv.ParseFloat(pathValues.value, 64)
if err != nil {
return fmt.Errorf("expected a valid float, got %s", pathValues.value)
}
iv.SetFloat(val)
return nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
val, err := strconv.ParseInt(pathValues.value, 10, 64)
if err != nil {
return fmt.Errorf("expected a valid int, got %s", pathValues.value)
}
iv.SetInt(val)
return nil
case reflect.String:
iv.SetString(pathValues.value)
return nil
default:
return errors.New("unhandled type: " + it.String())
}
}
func assignSlice(dst reflect.Value, pathValues fieldOrValue) error {
// Gather up the values
nValues := len(pathValues.fields)
values := make([]string, nValues)
// We expect to have consecutive array indices in the map
for i := 0; i < nValues; i++ {
indexStr := strconv.Itoa(i)
fv, found := pathValues.fields[indexStr]
if !found {
return errors.New("array deepObjects must have consecutive indices")
}
values[i] = fv.value
}
// This could be cleaner, but we can call into assignPathValues to
// avoid recreating this logic.
for i := 0; i < nValues; i++ {
dstElem := dst.Index(i).Addr()
err := assignPathValues(dstElem.Interface(), fieldOrValue{value: values[i]})
if err != nil {
return errors.Wrap(err, "error binding array")
}
}
return nil
}
func sortedFieldOrValueKeys(m map[string]fieldOrValue) []string {
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
sort.Strings(keys)
return keys
}
| UnmarshalDeepObject | identifier_name |
deepobject.go | /*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package runtime
import (
"encoding/json"
"fmt"
"net/url"
"reflect"
"sort"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/exoscale/internal/github.com/deepmap/oapi-codegen/pkg/types"
)
func marshalDeepObject(in interface{}, path []string) ([]string, error) {
var result []string
switch t := in.(type) {
case []interface{}:
// For the array, we will use numerical subscripts of the form [x],
// in the same order as the array.
for i, iface := range t {
newPath := append(path, strconv.Itoa(i))
fields, err := marshalDeepObject(iface, newPath)
if err != nil {
return nil, errors.Wrap(err, "error traversing array")
}
result = append(result, fields...)
}
case map[string]interface{}:
// For a map, each key (field name) becomes a member of the path, and
// we recurse. First, sort the keys.
keys := make([]string, len(t))
i := 0
for k := range t {
keys[i] = k
i++
}
sort.Strings(keys)
// Now, for each key, we recursively marshal it.
for _, k := range keys {
newPath := append(path, k)
fields, err := marshalDeepObject(t[k], newPath)
if err != nil {
return nil, errors.Wrap(err, "error traversing map")
}
result = append(result, fields...)
}
default:
// Now, for a concrete value, we will turn the path elements
// into a deepObject style set of subscripts. [a, b, c] turns into
// [a][b][c]
prefix := "[" + strings.Join(path, "][") + "]"
result = []string{
prefix + fmt.Sprintf("=%v", t),
}
}
return result, nil
}
func MarshalDeepObject(i interface{}, paramName string) (string, error) {
// We're going to marshal to JSON and unmarshal into an interface{},
// which will use the json pkg to deal with all the field annotations. We
// can then walk the generic object structure to produce a deepObject. This
// isn't efficient and it would be more efficient to reflect on our own,
// but it's complicated, error-prone code.
buf, err := json.Marshal(i)
if err != nil {
return "", errors.Wrap(err, "failed to marshal input to JSON")
}
var i2 interface{}
err = json.Unmarshal(buf, &i2)
if err != nil {
return "", errors.Wrap(err, "failed to unmarshal JSON")
}
fields, err := marshalDeepObject(i2, nil)
if err != nil {
return "", errors.Wrap(err, "error traversing JSON structure")
}
// Prefix the param name to each subscripted field.
for i := range fields {
fields[i] = paramName + fields[i]
}
return strings.Join(fields, "&"), nil
}
type fieldOrValue struct {
fields map[string]fieldOrValue
value string
}
func (f *fieldOrValue) appendPathValue(path []string, value string) {
fieldName := path[0]
if len(path) == 1 |
pv, found := f.fields[fieldName]
if !found {
pv = fieldOrValue{
fields: make(map[string]fieldOrValue),
}
f.fields[fieldName] = pv
}
pv.appendPathValue(path[1:], value)
}
func makeFieldOrValue(paths [][]string, values []string) fieldOrValue {
f := fieldOrValue{
fields: make(map[string]fieldOrValue),
}
for i := range paths {
path := paths[i]
value := values[i]
f.appendPathValue(path, value)
}
return f
}
func UnmarshalDeepObject(dst interface{}, paramName string, params url.Values) error {
// Params are all the query args, so we need those that look like
// "paramName["...
var fieldNames []string
var fieldValues []string
searchStr := paramName + "["
for pName, pValues := range params {
if strings.HasPrefix(pName, searchStr) {
// trim the parameter name from the full name.
pName = pName[len(paramName):]
fieldNames = append(fieldNames, pName)
if len(pValues) != 1 {
return fmt.Errorf("%s has multiple values", pName)
}
fieldValues = append(fieldValues, pValues[0])
}
}
// Now, for each field, reconstruct its subscript path and value
paths := make([][]string, len(fieldNames))
for i, path := range fieldNames {
path = strings.TrimLeft(path, "[")
path = strings.TrimRight(path, "]")
paths[i] = strings.Split(path, "][")
}
fieldPaths := makeFieldOrValue(paths, fieldValues)
err := assignPathValues(dst, fieldPaths)
if err != nil {
return errors.Wrap(err, "error assigning value to destination")
}
return nil
}
// This returns a field name, either using the variable name, or the json
// annotation if that exists.
func getFieldName(f reflect.StructField) string {
n := f.Name
tag, found := f.Tag.Lookup("json")
if found {
// If we have a json field, and the first part of it before the
// first comma is non-empty, that's our field name.
parts := strings.Split(tag, ",")
if parts[0] != "" {
n = parts[0]
}
}
return n
}
// Create a map of field names that we'll see in the deepObject to reflect
// field indices on the given type.
func fieldIndicesByJsonTag(i interface{}) (map[string]int, error) {
t := reflect.TypeOf(i)
if t.Kind() != reflect.Struct {
return nil, errors.New("expected a struct as input")
}
n := t.NumField()
fieldMap := make(map[string]int)
for i := 0; i < n; i++ {
field := t.Field(i)
fieldName := getFieldName(field)
fieldMap[fieldName] = i
}
return fieldMap, nil
}
func assignPathValues(dst interface{}, pathValues fieldOrValue) error {
//t := reflect.TypeOf(dst)
v := reflect.ValueOf(dst)
iv := reflect.Indirect(v)
it := iv.Type()
switch it.Kind() {
case reflect.Slice:
sliceLength := len(pathValues.fields)
dstSlice := reflect.MakeSlice(it, sliceLength, sliceLength)
err := assignSlice(dstSlice, pathValues)
if err != nil {
return errors.Wrap(err, "error assigning slice")
}
iv.Set(dstSlice)
return nil
case reflect.Struct:
// Some special types we care about are structs. Handle them
// here. They may be redefined, so we need to do some hoop
// jumping. If the types are aliased, we need to type convert
// the pointer, then set the value of the dereference pointer.
// We check to see if the object implements the Binder interface first.
if dst, isBinder := v.Interface().(Binder); isBinder {
return dst.Bind(pathValues.value)
}
// Then check the legacy types
if it.ConvertibleTo(reflect.TypeOf(types.Date{})) {
var date types.Date
var err error
date.Time, err = time.Parse(types.DateFormat, pathValues.value)
if err != nil {
return errors.Wrap(err, "invalid date format")
}
dst := iv
if it != reflect.TypeOf(types.Date{}) {
// Types are aliased, convert the pointers.
ivPtr := iv.Addr()
aPtr := ivPtr.Convert(reflect.TypeOf(&types.Date{}))
dst = reflect.Indirect(aPtr)
}
dst.Set(reflect.ValueOf(date))
}
if it.ConvertibleTo(reflect.TypeOf(time.Time{})) {
var tm time.Time
var err error
tm, err = time.Parse(time.RFC3339Nano, pathValues.value)
if err != nil {
// Fall back to parsing it as a date.
tm, err = time.Parse(types.DateFormat, pathValues.value)
if err != nil {
return fmt.Errorf("error parsing tim as RFC3339 or 2006-01-02 time: %s", err)
}
return errors.Wrap(err, "invalid date format")
}
dst := iv
if it != reflect.TypeOf(time.Time{}) {
// Types are aliased, convert the pointers.
ivPtr := iv.Addr()
aPtr := ivPtr.Convert(reflect.TypeOf(&time.Time{}))
dst = reflect.Indirect(aPtr)
}
dst.Set(reflect.ValueOf(tm))
}
fieldMap, err := fieldIndicesByJsonTag(iv.Interface())
if err != nil {
return errors.Wrap(err, "failed enumerating fields")
}
for _, fieldName := range sortedFieldOrValueKeys(pathValues.fields) {
fieldValue := pathValues.fields[fieldName]
fieldIndex, found := fieldMap[fieldName]
if !found {
return fmt.Errorf("field [%s] is not present in destination object", fieldName)
}
field := iv.Field(fieldIndex)
err = assignPathValues(field.Addr().Interface(), fieldValue)
if err != nil {
return errors.Wrapf(err, "error assigning field [%s]", fieldName)
}
}
return nil
case reflect.Ptr:
// If we have a pointer after redirecting, it means we're dealing with
// an optional field, such as *string, which was passed in as &foo. We
// will allocate it if necessary, and call ourselves with a different
// interface.
dstVal := reflect.New(it.Elem())
dstPtr := dstVal.Interface()
err := assignPathValues(dstPtr, pathValues)
iv.Set(dstVal)
return err
case reflect.Bool:
val, err := strconv.ParseBool(pathValues.value)
if err != nil {
return fmt.Errorf("expected a valid bool, got %s", pathValues.value)
}
iv.SetBool(val)
return nil
case reflect.Float32:
val, err := strconv.ParseFloat(pathValues.value, 32)
if err != nil {
return fmt.Errorf("expected a valid float, got %s", pathValues.value)
}
iv.SetFloat(val)
return nil
case reflect.Float64:
val, err := strconv.ParseFloat(pathValues.value, 64)
if err != nil {
return fmt.Errorf("expected a valid float, got %s", pathValues.value)
}
iv.SetFloat(val)
return nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
val, err := strconv.ParseInt(pathValues.value, 10, 64)
if err != nil {
return fmt.Errorf("expected a valid int, got %s", pathValues.value)
}
iv.SetInt(val)
return nil
case reflect.String:
iv.SetString(pathValues.value)
return nil
default:
return errors.New("unhandled type: " + it.String())
}
}
func assignSlice(dst reflect.Value, pathValues fieldOrValue) error {
// Gather up the values
nValues := len(pathValues.fields)
values := make([]string, nValues)
// We expect to have consecutive array indices in the map
for i := 0; i < nValues; i++ {
indexStr := strconv.Itoa(i)
fv, found := pathValues.fields[indexStr]
if !found {
return errors.New("array deepObjects must have consecutive indices")
}
values[i] = fv.value
}
// This could be cleaner, but we can call into assignPathValues to
// avoid recreating this logic.
for i := 0; i < nValues; i++ {
dstElem := dst.Index(i).Addr()
err := assignPathValues(dstElem.Interface(), fieldOrValue{value: values[i]})
if err != nil {
return errors.Wrap(err, "error binding array")
}
}
return nil
}
func sortedFieldOrValueKeys(m map[string]fieldOrValue) []string {
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
sort.Strings(keys)
return keys
}
| {
f.fields[fieldName] = fieldOrValue{value: value}
return
} | conditional_block |
deepobject.go | /*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package runtime
import (
"encoding/json"
"fmt"
"net/url"
"reflect"
"sort"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/exoscale/internal/github.com/deepmap/oapi-codegen/pkg/types"
)
func marshalDeepObject(in interface{}, path []string) ([]string, error) {
var result []string
switch t := in.(type) {
case []interface{}:
// For the array, we will use numerical subscripts of the form [x],
// in the same order as the array.
for i, iface := range t {
newPath := append(path, strconv.Itoa(i))
fields, err := marshalDeepObject(iface, newPath)
if err != nil {
return nil, errors.Wrap(err, "error traversing array")
}
result = append(result, fields...)
}
case map[string]interface{}:
// For a map, each key (field name) becomes a member of the path, and
// we recurse. First, sort the keys.
keys := make([]string, len(t))
i := 0
for k := range t {
keys[i] = k
i++
}
sort.Strings(keys)
// Now, for each key, we recursively marshal it.
for _, k := range keys {
newPath := append(path, k)
fields, err := marshalDeepObject(t[k], newPath)
if err != nil {
return nil, errors.Wrap(err, "error traversing map")
}
result = append(result, fields...)
}
default:
// Now, for a concrete value, we will turn the path elements
// into a deepObject style set of subscripts. [a, b, c] turns into
// [a][b][c]
prefix := "[" + strings.Join(path, "][") + "]"
result = []string{
prefix + fmt.Sprintf("=%v", t),
}
}
return result, nil
}
func MarshalDeepObject(i interface{}, paramName string) (string, error) {
// We're going to marshal to JSON and unmarshal into an interface{},
// which will use the json pkg to deal with all the field annotations. We
// can then walk the generic object structure to produce a deepObject. This
// isn't efficient and it would be more efficient to reflect on our own,
// but it's complicated, error-prone code.
buf, err := json.Marshal(i)
if err != nil {
return "", errors.Wrap(err, "failed to marshal input to JSON")
}
var i2 interface{}
err = json.Unmarshal(buf, &i2)
if err != nil {
return "", errors.Wrap(err, "failed to unmarshal JSON")
}
fields, err := marshalDeepObject(i2, nil)
if err != nil {
return "", errors.Wrap(err, "error traversing JSON structure")
}
// Prefix the param name to each subscripted field.
for i := range fields {
fields[i] = paramName + fields[i]
}
return strings.Join(fields, "&"), nil | fields map[string]fieldOrValue
value string
}
func (f *fieldOrValue) appendPathValue(path []string, value string) {
fieldName := path[0]
if len(path) == 1 {
f.fields[fieldName] = fieldOrValue{value: value}
return
}
pv, found := f.fields[fieldName]
if !found {
pv = fieldOrValue{
fields: make(map[string]fieldOrValue),
}
f.fields[fieldName] = pv
}
pv.appendPathValue(path[1:], value)
}
func makeFieldOrValue(paths [][]string, values []string) fieldOrValue {
f := fieldOrValue{
fields: make(map[string]fieldOrValue),
}
for i := range paths {
path := paths[i]
value := values[i]
f.appendPathValue(path, value)
}
return f
}
func UnmarshalDeepObject(dst interface{}, paramName string, params url.Values) error {
// Params are all the query args, so we need those that look like
// "paramName["...
var fieldNames []string
var fieldValues []string
searchStr := paramName + "["
for pName, pValues := range params {
if strings.HasPrefix(pName, searchStr) {
// trim the parameter name from the full name.
pName = pName[len(paramName):]
fieldNames = append(fieldNames, pName)
if len(pValues) != 1 {
return fmt.Errorf("%s has multiple values", pName)
}
fieldValues = append(fieldValues, pValues[0])
}
}
// Now, for each field, reconstruct its subscript path and value
paths := make([][]string, len(fieldNames))
for i, path := range fieldNames {
path = strings.TrimLeft(path, "[")
path = strings.TrimRight(path, "]")
paths[i] = strings.Split(path, "][")
}
fieldPaths := makeFieldOrValue(paths, fieldValues)
err := assignPathValues(dst, fieldPaths)
if err != nil {
return errors.Wrap(err, "error assigning value to destination")
}
return nil
}
// This returns a field name, either using the variable name, or the json
// annotation if that exists.
func getFieldName(f reflect.StructField) string {
n := f.Name
tag, found := f.Tag.Lookup("json")
if found {
// If we have a json field, and the first part of it before the
// first comma is non-empty, that's our field name.
parts := strings.Split(tag, ",")
if parts[0] != "" {
n = parts[0]
}
}
return n
}
// Create a map of field names that we'll see in the deepObject to reflect
// field indices on the given type.
func fieldIndicesByJsonTag(i interface{}) (map[string]int, error) {
t := reflect.TypeOf(i)
if t.Kind() != reflect.Struct {
return nil, errors.New("expected a struct as input")
}
n := t.NumField()
fieldMap := make(map[string]int)
for i := 0; i < n; i++ {
field := t.Field(i)
fieldName := getFieldName(field)
fieldMap[fieldName] = i
}
return fieldMap, nil
}
func assignPathValues(dst interface{}, pathValues fieldOrValue) error {
//t := reflect.TypeOf(dst)
v := reflect.ValueOf(dst)
iv := reflect.Indirect(v)
it := iv.Type()
switch it.Kind() {
case reflect.Slice:
sliceLength := len(pathValues.fields)
dstSlice := reflect.MakeSlice(it, sliceLength, sliceLength)
err := assignSlice(dstSlice, pathValues)
if err != nil {
return errors.Wrap(err, "error assigning slice")
}
iv.Set(dstSlice)
return nil
case reflect.Struct:
// Some special types we care about are structs. Handle them
// here. They may be redefined, so we need to do some hoop
// jumping. If the types are aliased, we need to type convert
// the pointer, then set the value of the dereference pointer.
// We check to see if the object implements the Binder interface first.
if dst, isBinder := v.Interface().(Binder); isBinder {
return dst.Bind(pathValues.value)
}
// Then check the legacy types
if it.ConvertibleTo(reflect.TypeOf(types.Date{})) {
var date types.Date
var err error
date.Time, err = time.Parse(types.DateFormat, pathValues.value)
if err != nil {
return errors.Wrap(err, "invalid date format")
}
dst := iv
if it != reflect.TypeOf(types.Date{}) {
// Types are aliased, convert the pointers.
ivPtr := iv.Addr()
aPtr := ivPtr.Convert(reflect.TypeOf(&types.Date{}))
dst = reflect.Indirect(aPtr)
}
dst.Set(reflect.ValueOf(date))
}
if it.ConvertibleTo(reflect.TypeOf(time.Time{})) {
var tm time.Time
var err error
tm, err = time.Parse(time.RFC3339Nano, pathValues.value)
if err != nil {
// Fall back to parsing it as a date.
tm, err = time.Parse(types.DateFormat, pathValues.value)
if err != nil {
return fmt.Errorf("error parsing tim as RFC3339 or 2006-01-02 time: %s", err)
}
return errors.Wrap(err, "invalid date format")
}
dst := iv
if it != reflect.TypeOf(time.Time{}) {
// Types are aliased, convert the pointers.
ivPtr := iv.Addr()
aPtr := ivPtr.Convert(reflect.TypeOf(&time.Time{}))
dst = reflect.Indirect(aPtr)
}
dst.Set(reflect.ValueOf(tm))
}
fieldMap, err := fieldIndicesByJsonTag(iv.Interface())
if err != nil {
return errors.Wrap(err, "failed enumerating fields")
}
for _, fieldName := range sortedFieldOrValueKeys(pathValues.fields) {
fieldValue := pathValues.fields[fieldName]
fieldIndex, found := fieldMap[fieldName]
if !found {
return fmt.Errorf("field [%s] is not present in destination object", fieldName)
}
field := iv.Field(fieldIndex)
err = assignPathValues(field.Addr().Interface(), fieldValue)
if err != nil {
return errors.Wrapf(err, "error assigning field [%s]", fieldName)
}
}
return nil
case reflect.Ptr:
// If we have a pointer after redirecting, it means we're dealing with
// an optional field, such as *string, which was passed in as &foo. We
// will allocate it if necessary, and call ourselves with a different
// interface.
dstVal := reflect.New(it.Elem())
dstPtr := dstVal.Interface()
err := assignPathValues(dstPtr, pathValues)
iv.Set(dstVal)
return err
case reflect.Bool:
val, err := strconv.ParseBool(pathValues.value)
if err != nil {
return fmt.Errorf("expected a valid bool, got %s", pathValues.value)
}
iv.SetBool(val)
return nil
case reflect.Float32:
val, err := strconv.ParseFloat(pathValues.value, 32)
if err != nil {
return fmt.Errorf("expected a valid float, got %s", pathValues.value)
}
iv.SetFloat(val)
return nil
case reflect.Float64:
val, err := strconv.ParseFloat(pathValues.value, 64)
if err != nil {
return fmt.Errorf("expected a valid float, got %s", pathValues.value)
}
iv.SetFloat(val)
return nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
val, err := strconv.ParseInt(pathValues.value, 10, 64)
if err != nil {
return fmt.Errorf("expected a valid int, got %s", pathValues.value)
}
iv.SetInt(val)
return nil
case reflect.String:
iv.SetString(pathValues.value)
return nil
default:
return errors.New("unhandled type: " + it.String())
}
}
func assignSlice(dst reflect.Value, pathValues fieldOrValue) error {
// Gather up the values
nValues := len(pathValues.fields)
values := make([]string, nValues)
// We expect to have consecutive array indices in the map
for i := 0; i < nValues; i++ {
indexStr := strconv.Itoa(i)
fv, found := pathValues.fields[indexStr]
if !found {
return errors.New("array deepObjects must have consecutive indices")
}
values[i] = fv.value
}
// This could be cleaner, but we can call into assignPathValues to
// avoid recreating this logic.
for i := 0; i < nValues; i++ {
dstElem := dst.Index(i).Addr()
err := assignPathValues(dstElem.Interface(), fieldOrValue{value: values[i]})
if err != nil {
return errors.Wrap(err, "error binding array")
}
}
return nil
}
func sortedFieldOrValueKeys(m map[string]fieldOrValue) []string {
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
sort.Strings(keys)
return keys
} | }
type fieldOrValue struct { | random_line_split |
deepobject.go | /*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package runtime
import (
"encoding/json"
"fmt"
"net/url"
"reflect"
"sort"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/exoscale/internal/github.com/deepmap/oapi-codegen/pkg/types"
)
func marshalDeepObject(in interface{}, path []string) ([]string, error) {
var result []string
switch t := in.(type) {
case []interface{}:
// For the array, we will use numerical subscripts of the form [x],
// in the same order as the array.
for i, iface := range t {
newPath := append(path, strconv.Itoa(i))
fields, err := marshalDeepObject(iface, newPath)
if err != nil {
return nil, errors.Wrap(err, "error traversing array")
}
result = append(result, fields...)
}
case map[string]interface{}:
// For a map, each key (field name) becomes a member of the path, and
// we recurse. First, sort the keys.
keys := make([]string, len(t))
i := 0
for k := range t {
keys[i] = k
i++
}
sort.Strings(keys)
// Now, for each key, we recursively marshal it.
for _, k := range keys {
newPath := append(path, k)
fields, err := marshalDeepObject(t[k], newPath)
if err != nil {
return nil, errors.Wrap(err, "error traversing map")
}
result = append(result, fields...)
}
default:
// Now, for a concrete value, we will turn the path elements
// into a deepObject style set of subscripts. [a, b, c] turns into
// [a][b][c]
prefix := "[" + strings.Join(path, "][") + "]"
result = []string{
prefix + fmt.Sprintf("=%v", t),
}
}
return result, nil
}
func MarshalDeepObject(i interface{}, paramName string) (string, error) {
// We're going to marshal to JSON and unmarshal into an interface{},
// which will use the json pkg to deal with all the field annotations. We
// can then walk the generic object structure to produce a deepObject. This
// isn't efficient and it would be more efficient to reflect on our own,
// but it's complicated, error-prone code.
buf, err := json.Marshal(i)
if err != nil {
return "", errors.Wrap(err, "failed to marshal input to JSON")
}
var i2 interface{}
err = json.Unmarshal(buf, &i2)
if err != nil {
return "", errors.Wrap(err, "failed to unmarshal JSON")
}
fields, err := marshalDeepObject(i2, nil)
if err != nil {
return "", errors.Wrap(err, "error traversing JSON structure")
}
// Prefix the param name to each subscripted field.
for i := range fields {
fields[i] = paramName + fields[i]
}
return strings.Join(fields, "&"), nil
}
type fieldOrValue struct {
fields map[string]fieldOrValue
value string
}
func (f *fieldOrValue) appendPathValue(path []string, value string) |
func makeFieldOrValue(paths [][]string, values []string) fieldOrValue {
f := fieldOrValue{
fields: make(map[string]fieldOrValue),
}
for i := range paths {
path := paths[i]
value := values[i]
f.appendPathValue(path, value)
}
return f
}
func UnmarshalDeepObject(dst interface{}, paramName string, params url.Values) error {
// Params are all the query args, so we need those that look like
// "paramName["...
var fieldNames []string
var fieldValues []string
searchStr := paramName + "["
for pName, pValues := range params {
if strings.HasPrefix(pName, searchStr) {
// trim the parameter name from the full name.
pName = pName[len(paramName):]
fieldNames = append(fieldNames, pName)
if len(pValues) != 1 {
return fmt.Errorf("%s has multiple values", pName)
}
fieldValues = append(fieldValues, pValues[0])
}
}
// Now, for each field, reconstruct its subscript path and value
paths := make([][]string, len(fieldNames))
for i, path := range fieldNames {
path = strings.TrimLeft(path, "[")
path = strings.TrimRight(path, "]")
paths[i] = strings.Split(path, "][")
}
fieldPaths := makeFieldOrValue(paths, fieldValues)
err := assignPathValues(dst, fieldPaths)
if err != nil {
return errors.Wrap(err, "error assigning value to destination")
}
return nil
}
// This returns a field name, either using the variable name, or the json
// annotation if that exists.
func getFieldName(f reflect.StructField) string {
n := f.Name
tag, found := f.Tag.Lookup("json")
if found {
// If we have a json field, and the first part of it before the
// first comma is non-empty, that's our field name.
parts := strings.Split(tag, ",")
if parts[0] != "" {
n = parts[0]
}
}
return n
}
// Create a map of field names that we'll see in the deepObject to reflect
// field indices on the given type.
func fieldIndicesByJsonTag(i interface{}) (map[string]int, error) {
t := reflect.TypeOf(i)
if t.Kind() != reflect.Struct {
return nil, errors.New("expected a struct as input")
}
n := t.NumField()
fieldMap := make(map[string]int)
for i := 0; i < n; i++ {
field := t.Field(i)
fieldName := getFieldName(field)
fieldMap[fieldName] = i
}
return fieldMap, nil
}
func assignPathValues(dst interface{}, pathValues fieldOrValue) error {
//t := reflect.TypeOf(dst)
v := reflect.ValueOf(dst)
iv := reflect.Indirect(v)
it := iv.Type()
switch it.Kind() {
case reflect.Slice:
sliceLength := len(pathValues.fields)
dstSlice := reflect.MakeSlice(it, sliceLength, sliceLength)
err := assignSlice(dstSlice, pathValues)
if err != nil {
return errors.Wrap(err, "error assigning slice")
}
iv.Set(dstSlice)
return nil
case reflect.Struct:
// Some special types we care about are structs. Handle them
// here. They may be redefined, so we need to do some hoop
// jumping. If the types are aliased, we need to type convert
// the pointer, then set the value of the dereference pointer.
// We check to see if the object implements the Binder interface first.
if dst, isBinder := v.Interface().(Binder); isBinder {
return dst.Bind(pathValues.value)
}
// Then check the legacy types
if it.ConvertibleTo(reflect.TypeOf(types.Date{})) {
var date types.Date
var err error
date.Time, err = time.Parse(types.DateFormat, pathValues.value)
if err != nil {
return errors.Wrap(err, "invalid date format")
}
dst := iv
if it != reflect.TypeOf(types.Date{}) {
// Types are aliased, convert the pointers.
ivPtr := iv.Addr()
aPtr := ivPtr.Convert(reflect.TypeOf(&types.Date{}))
dst = reflect.Indirect(aPtr)
}
dst.Set(reflect.ValueOf(date))
}
if it.ConvertibleTo(reflect.TypeOf(time.Time{})) {
var tm time.Time
var err error
tm, err = time.Parse(time.RFC3339Nano, pathValues.value)
if err != nil {
// Fall back to parsing it as a date.
tm, err = time.Parse(types.DateFormat, pathValues.value)
if err != nil {
return fmt.Errorf("error parsing tim as RFC3339 or 2006-01-02 time: %s", err)
}
return errors.Wrap(err, "invalid date format")
}
dst := iv
if it != reflect.TypeOf(time.Time{}) {
// Types are aliased, convert the pointers.
ivPtr := iv.Addr()
aPtr := ivPtr.Convert(reflect.TypeOf(&time.Time{}))
dst = reflect.Indirect(aPtr)
}
dst.Set(reflect.ValueOf(tm))
}
fieldMap, err := fieldIndicesByJsonTag(iv.Interface())
if err != nil {
return errors.Wrap(err, "failed enumerating fields")
}
for _, fieldName := range sortedFieldOrValueKeys(pathValues.fields) {
fieldValue := pathValues.fields[fieldName]
fieldIndex, found := fieldMap[fieldName]
if !found {
return fmt.Errorf("field [%s] is not present in destination object", fieldName)
}
field := iv.Field(fieldIndex)
err = assignPathValues(field.Addr().Interface(), fieldValue)
if err != nil {
return errors.Wrapf(err, "error assigning field [%s]", fieldName)
}
}
return nil
case reflect.Ptr:
// If we have a pointer after redirecting, it means we're dealing with
// an optional field, such as *string, which was passed in as &foo. We
// will allocate it if necessary, and call ourselves with a different
// interface.
dstVal := reflect.New(it.Elem())
dstPtr := dstVal.Interface()
err := assignPathValues(dstPtr, pathValues)
iv.Set(dstVal)
return err
case reflect.Bool:
val, err := strconv.ParseBool(pathValues.value)
if err != nil {
return fmt.Errorf("expected a valid bool, got %s", pathValues.value)
}
iv.SetBool(val)
return nil
case reflect.Float32:
val, err := strconv.ParseFloat(pathValues.value, 32)
if err != nil {
return fmt.Errorf("expected a valid float, got %s", pathValues.value)
}
iv.SetFloat(val)
return nil
case reflect.Float64:
val, err := strconv.ParseFloat(pathValues.value, 64)
if err != nil {
return fmt.Errorf("expected a valid float, got %s", pathValues.value)
}
iv.SetFloat(val)
return nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
val, err := strconv.ParseInt(pathValues.value, 10, 64)
if err != nil {
return fmt.Errorf("expected a valid int, got %s", pathValues.value)
}
iv.SetInt(val)
return nil
case reflect.String:
iv.SetString(pathValues.value)
return nil
default:
return errors.New("unhandled type: " + it.String())
}
}
func assignSlice(dst reflect.Value, pathValues fieldOrValue) error {
// Gather up the values
nValues := len(pathValues.fields)
values := make([]string, nValues)
// We expect to have consecutive array indices in the map
for i := 0; i < nValues; i++ {
indexStr := strconv.Itoa(i)
fv, found := pathValues.fields[indexStr]
if !found {
return errors.New("array deepObjects must have consecutive indices")
}
values[i] = fv.value
}
// This could be cleaner, but we can call into assignPathValues to
// avoid recreating this logic.
for i := 0; i < nValues; i++ {
dstElem := dst.Index(i).Addr()
err := assignPathValues(dstElem.Interface(), fieldOrValue{value: values[i]})
if err != nil {
return errors.Wrap(err, "error binding array")
}
}
return nil
}
func sortedFieldOrValueKeys(m map[string]fieldOrValue) []string {
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
sort.Strings(keys)
return keys
}
| {
fieldName := path[0]
if len(path) == 1 {
f.fields[fieldName] = fieldOrValue{value: value}
return
}
pv, found := f.fields[fieldName]
if !found {
pv = fieldOrValue{
fields: make(map[string]fieldOrValue),
}
f.fields[fieldName] = pv
}
pv.appendPathValue(path[1:], value)
} | identifier_body |
server.go | package server
import (
"avrilko-rpc/log"
"avrilko-rpc/protocol"
"avrilko-rpc/share"
"bufio"
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"net"
"net/http"
"os"
"os/signal"
"reflect"
"runtime"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
)
var ErrServerClosed = errors.New("主服务已经关闭")
const (
ReadBuffSize = 1024 // 读取消息时候缓冲区大小
)
type contextKey struct {
name string
}
func (c *contextKey) String() string {
return c.name
}
var (
RemoteConnContextKey = &contextKey{"remote_conn"}
StartRequestContextKey = &contextKey{"start-parse-request"}
)
// 核心服务类
type Server struct {
ln net.Listener // 全局唯一的监听(可以多路复用实现不同协议的转发)
readTimeout time.Duration // 读超时
writeTimeout time.Duration // 写超时
gatewayHttpServer *http.Server // 当启用http网关时候被挂载
disableHTTPGateway bool // 是否禁用http网关服务(开启时候方便测试和调试rpc服务)
disableJSONRPCGateway bool // 是否禁用json rpc网关服务
serviceMapMu sync.RWMutex // 服务提供者map读写锁
serviceMap map[string]*service // 服务提供者集合map
connMu sync.RWMutex // 各个活跃连接读写锁
activeConn map[net.Conn]struct{} // 每个活跃的连接,map结构防止重复
doneChan chan struct{} // 服务结束chan
inShutdown int32 //服务是否关闭 1为关闭 0为正在运行
onShutdown []func(s *Server) // 服务结束后执行的钩子函数
tlsConfig *tls.Config // tls证书配置
Plugins PluginContainer // 插件容器(设计核心)
AuthFunc func(ctx context.Context, request *protocol.Message, token string) error // 认证函数
handlerMsgNum int32 // 正在处理的消息数量
}
// 初始化服务
func NewServer(opts ...OptionFunc) *Server {
server := &Server{
serviceMapMu: sync.RWMutex{},
serviceMap: make(map[string]*service),
activeConn: make(map[net.Conn]struct{}),
doneChan: make(chan struct{}),
Plugins: &pluginContainer{},
}
if len(opts) > 0 {
for _, opt := range opts {
opt(server)
}
}
return server
}
// 开启服务
func (s *Server) Serve(network, address string) error {
var ln net.Listener
var err error
ln, err = s.makeListener(network, address)
if err != nil {
return err
}
return s.ServeListener(network, ln)
}
// 开启服务
func (s *Server) ServeListener(network string, ln net.Listener) error {
// 开启信号量监听
s.startShutdownServe()
// 开启网关
s.startGateway(network, ln)
return s.serveListener(ln)
}
// 循环监听conn 并发给severConn处理
func (s *Server) serveListener(ln net.Listener) error {
// 定义临时错误的延迟时间
var tempDelay time.Duration
s.connMu.Lock()
s.ln = ln
s.connMu.Unlock()
for {
conn, err := ln.Accept()
if err != nil {
select {
case <-s.doneChan:
return ErrServerClosed
default:
}
// 如果错误断言为网络错误,且是一个临时的(比如当时网络环境差,dns服务器不稳定引起的),稍后可能会自动恢复的
// 不能直接返回错误,应该等待一段时间才返回错误
// 等待的时间为上一次的两倍最大等待1s
// 参考官方http包实现的
if ne, ok := conn.(net.Error); ok && ne.Temporary() {
if tempDelay == 0 {
tempDelay = 5 * time.Millisecond
} else {
tempDelay = tempDelay * 2
}
if tempDelay > time.Second { // 大于1秒直接返回错误
return err
}
time.Sleep(tempDelay)
log.ErrorF("rpc服务接受conn异常,正在重试, 原因%v, sleep %d", err, tempDelay)
continue
}
if strings.Contains(err.Error(), "listener closed") { // 服务关闭
return ErrServerClosed
}
return err
}
// 成功请求延迟时间置为0
tempDelay = 0
if tc, ok := conn.(*net.TCPConn); ok { // tcp请求需要设置keepAlive保证链接的稳定性能
tc.SetKeepAlive(true)
tc.SetKeepAlivePeriod(time.Minute * 5) // 5分钟没有响应报错
tc.SetLinger(10) // 关闭连接的行为 设置数据在断开时候也能在后台发送
}
conn, ok := s.Plugins.DoPostConnAccept(conn)
if !ok { // 不允许链接则关闭(可能是限流没通过,验证没通过,业务方面的自己用插件扩展...)
s.closeChannel(conn)
}
s.connMu.Lock()
s.activeConn[conn] = struct{}{}
s.connMu.Unlock()
go s.serveConn(conn)
}
}
// 开始处理消息
func (s *Server) serveConn(conn net.Conn) {
// 单个conn协程中没有权限影响主进程panic,所有panic会这一层处理
defer func() {
if err := recover(); err != nil { // 发生panic
buf := make([]byte, 65536)
size := runtime.Stack(buf, false)
if size > 65536 {
size = 65536
}
buf = buf[:size]
log.ErrorF("conn 发生panic,原因%s, 客户端地址%s, 堆栈信息 %s", err, conn.RemoteAddr(), buf)
}
s.connMu.Lock()
delete(s.activeConn, conn)
s.connMu.Unlock()
s.Plugins.DoPostConnClose(conn)
}()
// 判断此时服务是否已经关闭
if s.isShutdown() {
s.closeChannel(conn)
return
}
now := time.Now()
// tls连接需要先握手
if tlsL, ok := conn.(*tls.Conn); ok {
if s.readTimeout != 0 {
tlsL.SetReadDeadline(now.Add(s.readTimeout))
}
if s.writeTimeout != 0 {
tlsL.SetWriteDeadline(now.Add(s.writeTimeout))
}
if err := tlsL.Handshake(); err != nil {
log.ErrorF("tls尝试握手失败,原因:%s,addr:", err, tlsL.RemoteAddr())
return
}
}
// 初始化读取缓冲区
rBuff := bufio.NewReaderSize(conn, ReadBuffSize)
for {
// 判断此时服务是否已经关闭
if s.isShutdown() {
s.closeChannel(conn)
return
}
if s.readTimeout != 0 { // 设置读取的超时时间
conn.SetReadDeadline(now.Add(s.readTimeout))
}
ctx := share.WithValue(context.Background(), RemoteConnContextKey, conn)
request, err := s.readRequest(ctx, rBuff)
if err != nil {
if err == io.EOF {
log.InfoF("客户端已经关闭链接c%s", conn.RemoteAddr())
} else if strings.Contains(err.Error(), "use of closed network connection") {
log.InfoF("连接已经被关闭%s", conn.RemoteAddr())
} else {
log.WarnF("rpc 读取数据失败,错误原因%v", err)
}
return
}
// 要开始写入了
if s.writeTimeout != 0 {
conn.SetWriteDeadline(now.Add(s.writeTimeout))
}
// 将开始时间写上下文中
ctx = share.WithLocalValue(ctx, StartRequestContextKey, time.Now().UnixNano())
if !request.IsHeartbeat() { // auth鉴权
err := s.auth(ctx, request)
if err != nil { // 鉴权失败
if !request.IsOneway() { // 需要回复客户端鉴权失败
response := request.Clone() // 复制一个请求出来
response.SetMessageType(protocol.Response) // 设置为response消息
handleError(response, err)
data := response.EncodeSlicePointer()
_, err = conn.Write(*data)
protocol.PutData(data)
s.Plugins.DoPostWriteResponse(ctx, request, response, err)
protocol.FreeMsg(response)
} else { // 不需要回复
s.Plugins.DoPreWriteResponse(ctx, request, nil)
}
protocol.FreeMsg(request)
log.InfoF("连接鉴权失败,%s,错误原因%v", conn.RemoteAddr(), err)
return
}
}
// 下面需要处理消息了噢
go func() {
// 正在处理的消息数量+1
atomic.AddInt32(&s.handlerMsgNum, 1)
// 正在处理消息的数量-1
defer atomic.AddInt32(&s.handlerMsgNum, - 1)
if request.IsHeartbeat() { // 如果是客户端心跳
request.SetMessageType(protocol.Response)
data := request.EncodeSlicePointer()
conn.Write(*data)
protocol.PutData(data)
return
}
// 不是心跳初始化返给客户端的meta
responseMetadata := make(map[string]string)
// 先将服务端的metadata方法进去
ctx = share.WithLocalValue(ctx, share.ReqMetaDataKey, request.Metadata)
// 再将客户端的metadata放进去
ctx = share.WithLocalValue(ctx, share.ResMetaDataKey, responseMetadata)
s.Plugins.DoPreHandleRequest(ctx, request) // 开始处理请求了
response, err := s.handleRequest(ctx, request)
if err != nil {
log.WarnF("处理请求错误: %v", err)
}
s.Plugins.DoPreWriteResponse(ctx, request, response)
if !request.IsOneway() { // 需要回复客户端
// 从ctx中拿出meta信息
responseMetadataCtx := ctx.Value(share.ResMetaDataKey).(map[string]string)
if len(responseMetadata) > 0 {
meta := response.Metadata
if meta == nil {
meta = responseMetadataCtx
} else {
for k, v := range responseMetadataCtx {
if meta[k] == "" {
meta[k] = v
}
}
}
}
if len(response.Payload) > 1024 && request.CompressType() != protocol.None {
response.SetCompressType(request.CompressType())
}
data := response.EncodeSlicePointer()
conn.Write(*data)
protocol.PutData(data)
}
s.Plugins.DoPostWriteResponse(ctx, request, response, err)
protocol.FreeMsg(response)
protocol.FreeMsg(request)
}()
}
}
// 处理单个请求
func (s *Server) handleRequest(ctx context.Context, request *protocol.Message) (*protocol.Message, error) {
var err error
serviceName := request.ServicePath
methodName := request.ServiceMethod
response := request.Clone()
response.SetMessageType(protocol.Response)
s.serviceMapMu.RLock()
service, ok := s.serviceMap[serviceName]
s.serviceMapMu.RUnlock()
if !ok { // 都没注册直接返回错误
err = errors.New(fmt.Sprintf("不能找到服务发现者为%s的服务", serviceName))
return handleError(response, err)
}
methodType, ok := service.method[methodName]
if !ok { // 看看是否注册了函数的调用
if _, ok := service.function[methodName]; ok {
protocol.FreeMsg(response) // 这里创建对象要回收的
return s.handleRequestForFunction(ctx, request)
}
err = errors.New(fmt.Sprintf("不能找到服务提供者%s下方法名为%s的方法", serviceName, methodName))
return handleError(response, err)
}
requestType := ObjectPool.Get(methodType.requestType)
defer ObjectPool.Put(methodType.requestType, requestType)
codec := share.Codecs[request.SerializeType()]
if codec == nil {
err = fmt.Errorf("不能找到对应的的序列化方式:%T", request.SerializeType())
return handleError(response, err)
}
err = codec.Decode(request.Payload, requestType)
if err != nil {
return handleError(response, err)
}
responseType := ObjectPool.Get(methodType.responseType)
defer ObjectPool.Put(methodType.responseType, responseType)
requestType, err = s.Plugins.DoPreCall(ctx, serviceName, methodName, requestType)
if err != nil {
return handleError(response, err)
}
if methodType.requestType.Kind() != reflect.Ptr { // 不是指针
err = service.call(ctx, methodType, reflect.ValueOf(requestType).Elem(), reflect.ValueOf(responseType))
} else {
err = service.call(ctx, methodType, reflect.ValueOf(requestType), reflect.ValueOf(responseType))
}
if err != nil {
return handleError(response, err)
}
responseType, err = s.Plugins.DoPostCall(ctx, serviceName, methodName, requestType, responseType)
if err != nil {
return handleError(response, err)
}
if !request.IsOneway() {
data, err := codec.Encode(responseType)
if err != nil {
return handleError(response, err)
}
response.Payload = data
}
return response, nil
}
// 处理函数类型 |
var err error
response := request.Clone()
response.SetMessageType(protocol.Response)
serviceName := request.ServicePath
methodName := request.ServiceMethod
s.serviceMapMu.RLock()
service, ok := s.serviceMap[serviceName]
s.serviceMapMu.RUnlock()
if !ok { // 都没注册直接返回错误
err = errors.New(fmt.Sprintf("不能找到服务发现者为%s的服务", serviceName))
return handleError(response, err)
}
funcType := service.function[serviceName]
if funcType == nil {
err = errors.New(fmt.Sprintf("不能找到服务发现者为%s对应的函数调用%s", serviceName, methodName))
return handleError(response, err)
}
requestType := ObjectPool.Get(funcType.requestType)
defer ObjectPool.Put(funcType.requestType, requestType)
codec := share.Codecs[request.SerializeType()]
if codec == nil {
err = fmt.Errorf("不能找到对应的的序列化方式:%T", request.SerializeType())
return handleError(response, err)
}
err = codec.Decode(request.Payload, requestType)
if err != nil {
return handleError(response, err)
}
responseType := ObjectPool.Get(funcType.responseType)
defer ObjectPool.Put(funcType.responseType, responseType)
requestType, err = s.Plugins.DoPreCall(ctx, serviceName, methodName, requestType)
if err != nil {
return handleError(response, err)
}
if funcType.requestType.Kind() != reflect.Ptr { // 不是指针
err = service.callForFunc(ctx, funcType, reflect.ValueOf(requestType).Elem(), reflect.ValueOf(responseType))
} else {
err = service.callForFunc(ctx, funcType, reflect.ValueOf(requestType), reflect.ValueOf(responseType))
}
if err != nil {
return handleError(response, err)
}
responseType, err = s.Plugins.DoPostCall(ctx, serviceName, methodName, requestType, responseType)
if err != nil {
return handleError(response, err)
}
if !request.IsOneway() {
data, err := codec.Encode(responseType)
if err != nil {
return handleError(response, err)
}
response.Payload = data
}
return response, nil
}
// 鉴权
func (s *Server) auth(ctx context.Context, request *protocol.Message) error {
if s.AuthFunc == nil {
return nil
}
token := request.Metadata[share.AuthKey]
return s.AuthFunc(ctx, request, token)
}
// 暴力关闭服务(生产环境不建议使用,建议使用Shutdown)
func (s *Server) Close() error {
s.serviceMapMu.Lock()
defer s.serviceMapMu.Unlock()
var err error
if s.ln != nil {
err = s.ln.Close()
}
for conn, _ := range s.activeConn {
err = conn.Close()
delete(s.activeConn, conn)
s.Plugins.DoPostConnClose(conn)
}
return err
}
// 优雅的关闭服务,
// 先关闭tcp监听,使得不再有conn连接进来
// 关闭每个conn的读端,使其不再收客户端数据
// 循环等待服务正在处理的消息数量变为0 (使得所有正在处理的消息都能处理完成)
// 关闭网关服务
// 依次关闭conn的读和写
func (s *Server) Shutdown(ctx context.Context) error {
var err error
if atomic.CompareAndSwapInt32(&s.inShutdown, 0, 1) { // 保证结束进程只执行一次
log.Info("服务开始关闭...")
// 先关闭tcp链接的读端(写端要等所有请求都结束后才能关闭)
s.connMu.Lock()
if s.ln != nil {
s.ln.Close() // 关闭监听
}
for conn, _ := range s.activeConn {
if lConn, ok := conn.(*net.TCPConn); ok {
lConn.CloseRead()
}
}
s.connMu.Unlock()
ticker := time.NewTicker(time.Second) // 监听间隔
defer ticker.Stop()
outer:
for {
if s.checkMsgHandlerFinish() {
break
}
select {
case <-ctx.Done():
break outer
case <-ticker.C:
}
}
if s.gatewayHttpServer != nil {
if err = s.closeHTTP1APIGateway(ctx); err != nil {
log.WarnF("关闭http网关时出错:%v", err)
} else {
log.Info("http网关服务已经关闭")
}
}
s.connMu.Lock()
for conn, _ := range s.activeConn {
conn.Close()
delete(s.activeConn, conn)
s.Plugins.DoPostConnClose(conn)
}
s.closeDoneChanLocked()
s.connMu.Unlock()
}
return err
}
// 检测服务处理的消息是否处理完成
func (s *Server) checkMsgHandlerFinish() bool {
size := atomic.LoadInt32(&s.handlerMsgNum)
log.InfoF("还需要处理%d条消息", size)
return size == 0
}
// 关闭结束通道(如果别的协程已经关闭,则直接返回)
func (s *Server) closeDoneChanLocked() {
select {
case <-s.doneChan:
return
default:
close(s.doneChan)
}
}
// 监听结束服务事件(terminated)
func (s *Server) startShutdownServe() {
go func(s *Server) {
log.InfoF("rpc listen at %s", s.ln.Addr().String())
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGTERM)
sg := <-c
if sg.String() == "terminated" {
if s.onShutdown != nil && len(s.onShutdown) > 0 {
for _, shutdown := range s.onShutdown {
shutdown(s)
}
}
err := s.Shutdown(context.Background())
if err != nil {
log.Error(err.Error())
}
}
}(s)
}
// 关闭链接
func (s *Server) closeChannel(conn net.Conn) {
s.connMu.Lock()
defer s.connMu.Unlock()
delete(s.activeConn, conn)
conn.Close()
}
// 判断服务是否已经关闭了
func (s *Server) isShutdown() bool {
return atomic.LoadInt32(&s.inShutdown) == 1
}
func (s *Server) readRequest(ctx context.Context, rBuff io.Reader) (*protocol.Message, error) {
var err error
err = s.Plugins.DoPreReadRequest(ctx)
if err != nil {
return nil, err
}
request := protocol.GetPooledMsg()
err = s.Plugins.DoPreReadRequest(ctx)
if err != nil {
return nil, err
}
// 开始解码
err = request.Decode(rBuff)
if err == io.EOF { // io.EOF代表读完了
return request, err
}
pErr := s.Plugins.DoPostReadRequest(ctx, request, err)
if err == nil { // 看看插件里面的调用会报什么错误
err = pErr
}
return request, err
}
// 处理错误
func handleError(response *protocol.Message, err error) (*protocol.Message, error) {
response.SetMessageStatusType(protocol.Error)
if response.Metadata == nil {
response.Metadata = make(map[string]string, 10)
}
response.Metadata[protocol.ServiceError] = err.Error()
return response, err
}
| 的
func (s *Server) handleRequestForFunction(ctx context.Context, request *protocol.Message) (*protocol.Message, error) { | conditional_block |
server.go | package server
import (
"avrilko-rpc/log"
"avrilko-rpc/protocol"
"avrilko-rpc/share"
"bufio"
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"net"
"net/http"
"os"
"os/signal"
"reflect"
"runtime"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
)
var ErrServerClosed = errors.New("主服务已经关闭")
const (
ReadBuffSize = 1024 // 读取消息时候缓冲区大小
)
type contextKey struct {
name string
}
func (c *contextKey) String() string {
return c.name
}
var (
RemoteConnContextKey = &contextKey{"remote_conn"}
StartRequestContextKey = &contextKey{"start-parse-request"}
)
// 核心服务类
type Server struct {
ln net.Listener // 全局唯一的监听(可以多路复用实现不同协议的转发)
readTimeout time.Duration // 读超时
writeTimeout time.Duration // 写超时
gatewayHttpServer *http.Server // 当启用http网关时候被挂载
disableHTTPGateway bool // 是否禁用http网关服务(开启时候方便测试和调试rpc服务)
disableJSONRPCGateway bool // 是否禁用json rpc网关服务
serviceMapMu sync.RWMutex // 服务提供者map读写锁
serviceMap map[string]*service // 服务提供者集合map
connMu sync.RWMutex // 各个活跃连接读写锁
activeConn map[net.Conn]struct{} // 每个活跃的连接,map结构防止重复
doneChan chan struct{} // 服务结束chan
inShutdown int32 //服务是否关闭 1为关闭 0为正在运行
onShutdown []func(s *Server) // 服务结束后执行的钩子函数
tlsConfig *tls.Config // tls证书配置
Plugins PluginContainer // 插件容器(设计核心)
AuthFunc func(ctx context.Context, request *protocol.Message, token string) error // 认证函数
handlerMsgNum int32 // 正在处理的消息数量
}
// 初始化服务
func NewServer(opts ...OptionFunc) *Server {
server := &Server{
serviceMapMu: sync.RWMutex{},
serviceMap: make(map[string]*service),
activeConn: make(map[net.Conn]struct{}),
doneChan: make(chan struct{}),
Plugins: &pluginContainer{},
}
if len(opts) > 0 {
for _, opt := range opts {
opt(server)
}
}
return server
}
// 开启服务
func (s *Server) Serve(network, address string) error {
var ln net.Listener
var err error
ln, err = s.makeListener(network, address)
if err != nil {
return err
}
return s.ServeListener(network, ln)
}
// 开启服务
func (s *Server) ServeListener(network string, ln net.Listener) error {
// 开启信号量监听
s.startShutdownServe()
// 开启网关
s.startGateway(network, ln)
return s.serveListener(ln)
}
// 循环监听conn 并发给severConn处理
func (s *Server) serveListener(ln net.Listener) error {
// 定义临时错误的延迟时间
var tempDelay time.Duration
s.connMu.Lock()
s.ln = ln
s.connMu.Unlock()
for {
conn, err := ln.Accept()
if err != nil {
select {
case <-s.doneChan:
return ErrServerClosed
default:
}
// 如果错误断言为网络错误,且是一个临时的(比如当时网络环境差,dns服务器不稳定引起的),稍后可能会自动恢复的
// 不能直接返回错误,应该等待一段时间才返回错误
// 等待的时间为上一次的两倍最大等待1s
// 参考官方http包实现的
if ne, ok := conn.(net.Error); ok && ne.Temporary() {
if tempDelay == 0 {
tempDelay = 5 * time.Millisecond
} else {
tempDelay = tempDelay * 2
}
if tempDelay > time.Second { // 大于1秒直接返回错误
return err
}
time.Sleep(tempDelay)
log.ErrorF("rpc服务接受conn异常,正在重试, 原因%v, sleep %d", err, tempDelay)
continue
}
if strings.Contains(err.Error(), "listener closed") { // 服务关闭
return ErrServerClosed
}
return err
}
// 成功请求延迟时间置为0
tempDelay = 0
if tc, ok := conn.(*net.TCPConn); ok { // tcp请求需要设置keepAlive保证链接的稳定性能
tc.SetKeepAlive(true)
tc.SetKeepAlivePeriod(time.Minute * 5) // 5分钟没有响应报错
tc.SetLinger(10) // 关闭连接的行为 设置数据在断开时候也能在后台发送
}
conn, ok := s.Plugins.DoPostConnAccept(conn)
if !ok { // 不允许链接则关闭(可能是限流没通过,验证没通过,业务方面的自己用插件扩展...)
s.closeChannel(conn)
}
s.connMu.Lock()
s.activeConn[conn] = struct{}{}
s.connMu.Unlock()
go s.serveConn(conn)
}
}
// 开始处理消息
func (s *Server) serveConn(conn net.Conn) {
// 单个conn协程中没有权限影响主进程panic,所有panic会这一层处理
defer func() {
if err := recover(); err != nil { // 发生panic
buf := make([]byte, 65536)
size := runtime.Stack(buf, false)
if size > 65536 {
size = 65536
}
buf = buf[:size]
log.ErrorF("conn 发生panic,原因%s, 客户端地址%s, 堆栈信息 %s", err, conn.RemoteAddr(), buf)
}
s.connMu.Lock()
delete(s.activeConn, conn)
s.connMu.Unlock()
s.Plugins.DoPostConnClose(conn)
}()
// 判断此时服务是否已经关闭
if s.isShutdown() {
s.closeChannel(conn)
return
}
now := time.Now()
// tls连接需要先握手
if tlsL, ok := conn.(*tls.Conn); ok {
if s.readTimeout != 0 {
tlsL.SetReadDeadline(now.Add(s.readTimeout))
}
if s.writeTimeout != 0 {
tlsL.SetWriteDeadline(now.Add(s.writeTimeout))
}
if err := tlsL.Handshake(); err != nil {
log.ErrorF("tls尝试握手失败,原因:%s,addr:", err, tlsL.RemoteAddr())
return
}
}
// 初始化读取缓冲区
rBuff := bufio.NewReaderSize(conn, ReadBuffSize)
for {
// 判断此时服务是否已经关闭
if s.isShutdown() {
s.closeChannel(conn)
return
}
if s.readTimeout != 0 { // 设置读取的超时时间
conn.SetReadDeadline(now.Add(s.readTimeout))
}
ctx := share.WithValue(context.Background(), RemoteConnContextKey, conn)
request, err := s.readRequest(ctx, rBuff)
if err != nil {
if err == io.EOF {
log.InfoF("客户端已经关闭链接c%s", conn.RemoteAddr())
} else if strings.Contains(err.Error(), "use of closed network connection") {
log.InfoF("连接已经被关闭%s", conn.RemoteAddr())
} else {
log.WarnF("rpc 读取数据失败,错误原因%v", err)
}
return
}
// 要开始写入了
if s.writeTimeout != 0 {
conn.SetWriteDeadline(now.Add(s.writeTimeout))
}
// 将开始时间写上下文中
ctx = share.WithLocalValue(ctx, StartRequestContextKey, time.Now().UnixNano())
if !request.IsHeartbeat() { // auth鉴权
err := s.auth(ctx, request)
if err != nil { // 鉴权失败
if !request.IsOneway() { // 需要回复客户端鉴权失败
response := request.Clone() // 复制一个请求出来
response.SetMessageType(protocol.Response) // 设置为response消息
handleError(response, err)
data := response.EncodeSlicePointer()
_, err = conn.Write(*data)
protocol.PutData(data)
s.Plugins.DoPostWriteResponse(ctx, request, response, err)
protocol.FreeMsg(response)
} else { // 不需要回复
s.Plugins.DoPreWriteResponse(ctx, request, nil)
}
protocol.FreeMsg(request)
log.InfoF("连接鉴权失败,%s,错误原因%v", conn.RemoteAddr(), err)
return
}
}
// 下面需要处理消息了噢
go func() {
// 正在处理的消息数量+1
atomic.AddInt32(&s.handlerMsgNum, 1)
// 正在处理消息的数量-1
defer atomic.AddInt32(&s.handlerMsgNum, - 1)
if request.IsHeartbeat() { // 如果是客户端心跳
request.SetMessageType(protocol.Response)
data := request.EncodeSlicePointer()
conn.Write(*data)
protocol.PutData(data)
return
}
// 不是心跳初始化返给客户端的meta
responseMetadata := make(map[string]string)
// 先将服务端的metadata方法进去
ctx = share.WithLocalValue(ctx, share.ReqMetaDataKey, request.Metadata)
// 再将客户端的metadata放进去
ctx = share.WithLocalValue(ctx, share.ResMetaDataKey, responseMetadata)
s.Plugins.DoPreHandleRequest(ctx, request) // 开始处理请求了
response, err := s.handleRequest(ctx, request)
if err != nil {
log.WarnF("处理请求错误: %v", err)
}
s.Plugins.DoPreWriteResponse(ctx, request, response)
if !request.IsOneway() { // 需要回复客户端
// 从ctx中拿出meta信息
responseMetadataCtx := ctx.Value(share.ResMetaDataKey).(map[string]string)
if len(responseMetadata) > 0 {
meta := response.Metadata
if meta == nil {
meta = responseMetadataCtx
} else {
for k, v := range responseMetadataCtx {
if meta[k] == "" {
meta[k] = v
}
}
}
}
if len(response.Payload) > 1024 && request.CompressType() != protocol.None {
response.SetCompressType(request.CompressType())
}
data := response.EncodeSlicePointer()
conn.Write(*data)
protocol.PutData(data)
}
s.Plugins.DoPostWriteResponse(ctx, request, response, err)
protocol.FreeMsg(response)
protocol.FreeMsg(request)
}()
}
}
// 处理单个请求
func (s *Server) handleRequest(ctx context.Context, request *protocol.Message) (*protocol.Message, error) {
var err error
serviceName := request.ServicePath
methodName := request.ServiceMethod
response := request.Clone()
response.SetMessageType(protocol.Response)
s.serviceMapMu.RLock()
service, ok := s.serviceMap[serviceName]
s.serviceMapMu.RUnlock()
if !ok { // 都没注册直接返回错误
err = errors.New(fmt.Sprintf("不能找到服务发现者为%s的服务", serviceName))
return handleError(response, err)
}
methodType, ok := service.method[methodName]
if !ok { // 看看是否注册了函数的调用
if _, ok := service.function[methodName]; ok {
protocol.FreeMsg(response) // 这里创建对象要回收的
return s.handleRequestForFunction(ctx, request)
}
err = errors.New(fmt.Sprintf("不能找到服务提供者%s下方法名为%s的方法", serviceName, methodName))
return handleError(response, err)
}
requestType := ObjectPool.Get(methodType.requestType)
defer ObjectPool.Put(methodType.requestType, requestType)
codec := share.Codecs[request.SerializeType()]
if codec == nil {
err = fmt.Errorf("不能找到对应的的序列化方式:%T", request.SerializeType())
return handleError(response, err)
}
err = codec.Decode(request.Payload, requestType)
if err != nil {
return handleError(response, err)
}
responseType := ObjectPool.Get(methodType.responseType)
defer ObjectPool.Put(methodType.responseType, responseType)
requestType, err = s.Plugins.DoPreCall(ctx, serviceName, methodName, requestType)
if err != nil {
return handleError(response, err)
}
if methodType.requestType.Kind() != reflect.Ptr { // 不是指针
err = service.call(ctx, methodType, reflect.ValueOf(requestType).Elem(), reflect.ValueOf(responseType))
} else {
err = service.call(ctx, methodType, reflect.ValueOf(requestType), reflect.ValueOf(responseType))
}
if err != nil {
return handleError(response, err)
}
responseType, err = s.Plugins.DoPostCall(ctx, serviceName, methodName, requestType, responseType)
if err != nil {
return handleError(response, err)
}
if !request.IsOneway() {
data, err := codec.Encode(responseType)
if err != nil {
return handleError(response, err)
}
response.Payload = data
}
return response, nil
}
// 处理函数类型的
func (s *Server) handleRequestForFunction(ctx context.Context, request *protocol.Message) (*protocol.Message, error) {
var err error
response := request.Clone()
response.SetMessageType(protocol.Response)
serviceName := request.ServicePath
methodName := request.ServiceMethod
s.serviceMapMu.RLock()
service, ok := s.serviceMap[serviceName]
s.serviceMapMu.RUnlock()
if !ok { // 都没注册直接返回错误
err = errors.New(fmt.Sprintf("不能找到服务发现者为%s的服务", serviceName))
return handleError(response, err)
}
funcType := service.function[serviceName]
if funcType == nil {
err = errors.New(fmt.Sprintf("不能找到服务发现者为%s对应的函数调用%s", serviceName, methodName))
return handleError(response, err)
}
requestType := ObjectPool.Get(funcType.requestType)
defer ObjectPool.Put(funcType.requestType, requestType)
codec := share.Codecs[request.SerializeType()]
if codec == nil {
err = fmt.Errorf("不能找到对应的的序列化方式:%T", request.SerializeType())
return handleError(response, err)
}
err = codec.Decode(request.Payload, requestType)
if err != nil {
return handleError(response, err)
}
responseType := ObjectPool.Get(funcType.responseType) |
requestType, err = s.Plugins.DoPreCall(ctx, serviceName, methodName, requestType)
if err != nil {
return handleError(response, err)
}
if funcType.requestType.Kind() != reflect.Ptr { // 不是指针
err = service.callForFunc(ctx, funcType, reflect.ValueOf(requestType).Elem(), reflect.ValueOf(responseType))
} else {
err = service.callForFunc(ctx, funcType, reflect.ValueOf(requestType), reflect.ValueOf(responseType))
}
if err != nil {
return handleError(response, err)
}
responseType, err = s.Plugins.DoPostCall(ctx, serviceName, methodName, requestType, responseType)
if err != nil {
return handleError(response, err)
}
if !request.IsOneway() {
data, err := codec.Encode(responseType)
if err != nil {
return handleError(response, err)
}
response.Payload = data
}
return response, nil
}
// 鉴权
func (s *Server) auth(ctx context.Context, request *protocol.Message) error {
if s.AuthFunc == nil {
return nil
}
token := request.Metadata[share.AuthKey]
return s.AuthFunc(ctx, request, token)
}
// 暴力关闭服务(生产环境不建议使用,建议使用Shutdown)
func (s *Server) Close() error {
s.serviceMapMu.Lock()
defer s.serviceMapMu.Unlock()
var err error
if s.ln != nil {
err = s.ln.Close()
}
for conn, _ := range s.activeConn {
err = conn.Close()
delete(s.activeConn, conn)
s.Plugins.DoPostConnClose(conn)
}
return err
}
// 优雅的关闭服务,
// 先关闭tcp监听,使得不再有conn连接进来
// 关闭每个conn的读端,使其不再收客户端数据
// 循环等待服务正在处理的消息数量变为0 (使得所有正在处理的消息都能处理完成)
// 关闭网关服务
// 依次关闭conn的读和写
func (s *Server) Shutdown(ctx context.Context) error {
var err error
if atomic.CompareAndSwapInt32(&s.inShutdown, 0, 1) { // 保证结束进程只执行一次
log.Info("服务开始关闭...")
// 先关闭tcp链接的读端(写端要等所有请求都结束后才能关闭)
s.connMu.Lock()
if s.ln != nil {
s.ln.Close() // 关闭监听
}
for conn, _ := range s.activeConn {
if lConn, ok := conn.(*net.TCPConn); ok {
lConn.CloseRead()
}
}
s.connMu.Unlock()
ticker := time.NewTicker(time.Second) // 监听间隔
defer ticker.Stop()
outer:
for {
if s.checkMsgHandlerFinish() {
break
}
select {
case <-ctx.Done():
break outer
case <-ticker.C:
}
}
if s.gatewayHttpServer != nil {
if err = s.closeHTTP1APIGateway(ctx); err != nil {
log.WarnF("关闭http网关时出错:%v", err)
} else {
log.Info("http网关服务已经关闭")
}
}
s.connMu.Lock()
for conn, _ := range s.activeConn {
conn.Close()
delete(s.activeConn, conn)
s.Plugins.DoPostConnClose(conn)
}
s.closeDoneChanLocked()
s.connMu.Unlock()
}
return err
}
// 检测服务处理的消息是否处理完成
func (s *Server) checkMsgHandlerFinish() bool {
size := atomic.LoadInt32(&s.handlerMsgNum)
log.InfoF("还需要处理%d条消息", size)
return size == 0
}
// 关闭结束通道(如果别的协程已经关闭,则直接返回)
func (s *Server) closeDoneChanLocked() {
select {
case <-s.doneChan:
return
default:
close(s.doneChan)
}
}
// 监听结束服务事件(terminated)
func (s *Server) startShutdownServe() {
go func(s *Server) {
log.InfoF("rpc listen at %s", s.ln.Addr().String())
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGTERM)
sg := <-c
if sg.String() == "terminated" {
if s.onShutdown != nil && len(s.onShutdown) > 0 {
for _, shutdown := range s.onShutdown {
shutdown(s)
}
}
err := s.Shutdown(context.Background())
if err != nil {
log.Error(err.Error())
}
}
}(s)
}
// 关闭链接
func (s *Server) closeChannel(conn net.Conn) {
s.connMu.Lock()
defer s.connMu.Unlock()
delete(s.activeConn, conn)
conn.Close()
}
// 判断服务是否已经关闭了
func (s *Server) isShutdown() bool {
return atomic.LoadInt32(&s.inShutdown) == 1
}
func (s *Server) readRequest(ctx context.Context, rBuff io.Reader) (*protocol.Message, error) {
var err error
err = s.Plugins.DoPreReadRequest(ctx)
if err != nil {
return nil, err
}
request := protocol.GetPooledMsg()
err = s.Plugins.DoPreReadRequest(ctx)
if err != nil {
return nil, err
}
// 开始解码
err = request.Decode(rBuff)
if err == io.EOF { // io.EOF代表读完了
return request, err
}
pErr := s.Plugins.DoPostReadRequest(ctx, request, err)
if err == nil { // 看看插件里面的调用会报什么错误
err = pErr
}
return request, err
}
// 处理错误
func handleError(response *protocol.Message, err error) (*protocol.Message, error) {
response.SetMessageStatusType(protocol.Error)
if response.Metadata == nil {
response.Metadata = make(map[string]string, 10)
}
response.Metadata[protocol.ServiceError] = err.Error()
return response, err
} | defer ObjectPool.Put(funcType.responseType, responseType) | random_line_split |
server.go | package server
import (
"avrilko-rpc/log"
"avrilko-rpc/protocol"
"avrilko-rpc/share"
"bufio"
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"net"
"net/http"
"os"
"os/signal"
"reflect"
"runtime"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
)
var ErrServerClosed = errors.New("主服务已经关闭")
const (
ReadBuffSize = 1024 // 读取消息时候缓冲区大小
)
type contextKey struct {
name string
}
func (c *contextKey) String() string {
return c.name
}
var (
RemoteConnContextKey = &contextKey{"remote_conn"}
StartRequestContextKey = &contextKey{"start-parse-request"}
)
// 核心服务类
type Server struct {
ln net.Listener // 全局唯一的监听(可以多路复用实现不同协议的转发)
readTimeout time.Duration // 读超时
writeTimeout time.Duration // 写超时
gatewayHttpServer *http.Server // 当启用http网关时候被挂载
disableHTTPGateway bool // 是否禁用http网关服务(开启时候方便测试和调试rpc服务)
disableJSONRPCGateway bool // 是否禁用json rpc网关服务
serviceMapMu sync.RWMutex // 服务提供者map读写锁
serviceMap map[string]*service // 服务提供者集合map
connMu sync.RWMutex // 各个活跃连接读写锁
activeConn map[net.Conn]struct{} // 每个活跃的连接,map结构防止重复
doneChan chan struct{} // 服务结束chan
inShutdown int32 //服务是否关闭 1为关闭 0为正在运行
onShutdown []func(s *Server) // 服务结束后执行的钩子函数
tlsConfig *tls.Config // tls证书配置
Plugins PluginContainer // 插件容器(设计核心)
AuthFunc func(ctx context.Context, request *protocol.Message, token string) error // 认证函数
handlerMsgNum int32 // 正在处理的消息数量
}
// 初始化服务
func NewServer(opts ...OptionFunc) *Server {
server := &Server{
serviceMapMu: sync.RWMutex{},
serviceMap: make(map[string]*service),
activeConn: make(map[net.Conn]struct{}),
doneChan: make(chan struct{}),
Plugins: &pluginContainer{},
}
if len(opts) > 0 {
for _, opt := range opts {
opt(server)
}
}
return server
}
// 开启服务
func (s *Server) Serve(network, address string) error {
var ln net.Listener
var err error
ln, err = s.makeListener(network, address)
if err != nil {
return err
}
return s.ServeListener(network, ln)
}
// 开启服务
func (s *Server) ServeListener(network string, ln net.Listener) error {
// 开启信号量监听
s.startShutdownServe()
// 开启网关
s.startGateway(network, ln)
return s.serveListener(ln)
}
// 循环监听conn 并发给sev | n处理
func (s *Server) serveListener(ln net.Listener) error {
// 定义临时错误的延迟时间
var tempDelay time.Duration
s.connMu.Lock()
s.ln = ln
s.connMu.Unlock()
for {
conn, err := ln.Accept()
if err != nil {
select {
case <-s.doneChan:
return ErrServerClosed
default:
}
// 如果错误断言为网络错误,且是一个临时的(比如当时网络环境差,dns服务器不稳定引起的),稍后可能会自动恢复的
// 不能直接返回错误,应该等待一段时间才返回错误
// 等待的时间为上一次的两倍最大等待1s
// 参考官方http包实现的
if ne, ok := conn.(net.Error); ok && ne.Temporary() {
if tempDelay == 0 {
tempDelay = 5 * time.Millisecond
} else {
tempDelay = tempDelay * 2
}
if tempDelay > time.Second { // 大于1秒直接返回错误
return err
}
time.Sleep(tempDelay)
log.ErrorF("rpc服务接受conn异常,正在重试, 原因%v, sleep %d", err, tempDelay)
continue
}
if strings.Contains(err.Error(), "listener closed") { // 服务关闭
return ErrServerClosed
}
return err
}
// 成功请求延迟时间置为0
tempDelay = 0
if tc, ok := conn.(*net.TCPConn); ok { // tcp请求需要设置keepAlive保证链接的稳定性能
tc.SetKeepAlive(true)
tc.SetKeepAlivePeriod(time.Minute * 5) // 5分钟没有响应报错
tc.SetLinger(10) // 关闭连接的行为 设置数据在断开时候也能在后台发送
}
conn, ok := s.Plugins.DoPostConnAccept(conn)
if !ok { // 不允许链接则关闭(可能是限流没通过,验证没通过,业务方面的自己用插件扩展...)
s.closeChannel(conn)
}
s.connMu.Lock()
s.activeConn[conn] = struct{}{}
s.connMu.Unlock()
go s.serveConn(conn)
}
}
// 开始处理消息
func (s *Server) serveConn(conn net.Conn) {
// 单个conn协程中没有权限影响主进程panic,所有panic会这一层处理
defer func() {
if err := recover(); err != nil { // 发生panic
buf := make([]byte, 65536)
size := runtime.Stack(buf, false)
if size > 65536 {
size = 65536
}
buf = buf[:size]
log.ErrorF("conn 发生panic,原因%s, 客户端地址%s, 堆栈信息 %s", err, conn.RemoteAddr(), buf)
}
s.connMu.Lock()
delete(s.activeConn, conn)
s.connMu.Unlock()
s.Plugins.DoPostConnClose(conn)
}()
// 判断此时服务是否已经关闭
if s.isShutdown() {
s.closeChannel(conn)
return
}
now := time.Now()
// tls连接需要先握手
if tlsL, ok := conn.(*tls.Conn); ok {
if s.readTimeout != 0 {
tlsL.SetReadDeadline(now.Add(s.readTimeout))
}
if s.writeTimeout != 0 {
tlsL.SetWriteDeadline(now.Add(s.writeTimeout))
}
if err := tlsL.Handshake(); err != nil {
log.ErrorF("tls尝试握手失败,原因:%s,addr:", err, tlsL.RemoteAddr())
return
}
}
// 初始化读取缓冲区
rBuff := bufio.NewReaderSize(conn, ReadBuffSize)
for {
// 判断此时服务是否已经关闭
if s.isShutdown() {
s.closeChannel(conn)
return
}
if s.readTimeout != 0 { // 设置读取的超时时间
conn.SetReadDeadline(now.Add(s.readTimeout))
}
ctx := share.WithValue(context.Background(), RemoteConnContextKey, conn)
request, err := s.readRequest(ctx, rBuff)
if err != nil {
if err == io.EOF {
log.InfoF("客户端已经关闭链接c%s", conn.RemoteAddr())
} else if strings.Contains(err.Error(), "use of closed network connection") {
log.InfoF("连接已经被关闭%s", conn.RemoteAddr())
} else {
log.WarnF("rpc 读取数据失败,错误原因%v", err)
}
return
}
// 要开始写入了
if s.writeTimeout != 0 {
conn.SetWriteDeadline(now.Add(s.writeTimeout))
}
// 将开始时间写上下文中
ctx = share.WithLocalValue(ctx, StartRequestContextKey, time.Now().UnixNano())
if !request.IsHeartbeat() { // auth鉴权
err := s.auth(ctx, request)
if err != nil { // 鉴权失败
if !request.IsOneway() { // 需要回复客户端鉴权失败
response := request.Clone() // 复制一个请求出来
response.SetMessageType(protocol.Response) // 设置为response消息
handleError(response, err)
data := response.EncodeSlicePointer()
_, err = conn.Write(*data)
protocol.PutData(data)
s.Plugins.DoPostWriteResponse(ctx, request, response, err)
protocol.FreeMsg(response)
} else { // 不需要回复
s.Plugins.DoPreWriteResponse(ctx, request, nil)
}
protocol.FreeMsg(request)
log.InfoF("连接鉴权失败,%s,错误原因%v", conn.RemoteAddr(), err)
return
}
}
// 下面需要处理消息了噢
go func() {
// 正在处理的消息数量+1
atomic.AddInt32(&s.handlerMsgNum, 1)
// 正在处理消息的数量-1
defer atomic.AddInt32(&s.handlerMsgNum, - 1)
if request.IsHeartbeat() { // 如果是客户端心跳
request.SetMessageType(protocol.Response)
data := request.EncodeSlicePointer()
conn.Write(*data)
protocol.PutData(data)
return
}
// 不是心跳初始化返给客户端的meta
responseMetadata := make(map[string]string)
// 先将服务端的metadata方法进去
ctx = share.WithLocalValue(ctx, share.ReqMetaDataKey, request.Metadata)
// 再将客户端的metadata放进去
ctx = share.WithLocalValue(ctx, share.ResMetaDataKey, responseMetadata)
s.Plugins.DoPreHandleRequest(ctx, request) // 开始处理请求了
response, err := s.handleRequest(ctx, request)
if err != nil {
log.WarnF("处理请求错误: %v", err)
}
s.Plugins.DoPreWriteResponse(ctx, request, response)
if !request.IsOneway() { // 需要回复客户端
// 从ctx中拿出meta信息
responseMetadataCtx := ctx.Value(share.ResMetaDataKey).(map[string]string)
if len(responseMetadata) > 0 {
meta := response.Metadata
if meta == nil {
meta = responseMetadataCtx
} else {
for k, v := range responseMetadataCtx {
if meta[k] == "" {
meta[k] = v
}
}
}
}
if len(response.Payload) > 1024 && request.CompressType() != protocol.None {
response.SetCompressType(request.CompressType())
}
data := response.EncodeSlicePointer()
conn.Write(*data)
protocol.PutData(data)
}
s.Plugins.DoPostWriteResponse(ctx, request, response, err)
protocol.FreeMsg(response)
protocol.FreeMsg(request)
}()
}
}
// 处理单个请求
func (s *Server) handleRequest(ctx context.Context, request *protocol.Message) (*protocol.Message, error) {
var err error
serviceName := request.ServicePath
methodName := request.ServiceMethod
response := request.Clone()
response.SetMessageType(protocol.Response)
s.serviceMapMu.RLock()
service, ok := s.serviceMap[serviceName]
s.serviceMapMu.RUnlock()
if !ok { // 都没注册直接返回错误
err = errors.New(fmt.Sprintf("不能找到服务发现者为%s的服务", serviceName))
return handleError(response, err)
}
methodType, ok := service.method[methodName]
if !ok { // 看看是否注册了函数的调用
if _, ok := service.function[methodName]; ok {
protocol.FreeMsg(response) // 这里创建对象要回收的
return s.handleRequestForFunction(ctx, request)
}
err = errors.New(fmt.Sprintf("不能找到服务提供者%s下方法名为%s的方法", serviceName, methodName))
return handleError(response, err)
}
requestType := ObjectPool.Get(methodType.requestType)
defer ObjectPool.Put(methodType.requestType, requestType)
codec := share.Codecs[request.SerializeType()]
if codec == nil {
err = fmt.Errorf("不能找到对应的的序列化方式:%T", request.SerializeType())
return handleError(response, err)
}
err = codec.Decode(request.Payload, requestType)
if err != nil {
return handleError(response, err)
}
responseType := ObjectPool.Get(methodType.responseType)
defer ObjectPool.Put(methodType.responseType, responseType)
requestType, err = s.Plugins.DoPreCall(ctx, serviceName, methodName, requestType)
if err != nil {
return handleError(response, err)
}
if methodType.requestType.Kind() != reflect.Ptr { // 不是指针
err = service.call(ctx, methodType, reflect.ValueOf(requestType).Elem(), reflect.ValueOf(responseType))
} else {
err = service.call(ctx, methodType, reflect.ValueOf(requestType), reflect.ValueOf(responseType))
}
if err != nil {
return handleError(response, err)
}
responseType, err = s.Plugins.DoPostCall(ctx, serviceName, methodName, requestType, responseType)
if err != nil {
return handleError(response, err)
}
if !request.IsOneway() {
data, err := codec.Encode(responseType)
if err != nil {
return handleError(response, err)
}
response.Payload = data
}
return response, nil
}
// 处理函数类型的
func (s *Server) handleRequestForFunction(ctx context.Context, request *protocol.Message) (*protocol.Message, error) {
var err error
response := request.Clone()
response.SetMessageType(protocol.Response)
serviceName := request.ServicePath
methodName := request.ServiceMethod
s.serviceMapMu.RLock()
service, ok := s.serviceMap[serviceName]
s.serviceMapMu.RUnlock()
if !ok { // 都没注册直接返回错误
err = errors.New(fmt.Sprintf("不能找到服务发现者为%s的服务", serviceName))
return handleError(response, err)
}
funcType := service.function[serviceName]
if funcType == nil {
err = errors.New(fmt.Sprintf("不能找到服务发现者为%s对应的函数调用%s", serviceName, methodName))
return handleError(response, err)
}
requestType := ObjectPool.Get(funcType.requestType)
defer ObjectPool.Put(funcType.requestType, requestType)
codec := share.Codecs[request.SerializeType()]
if codec == nil {
err = fmt.Errorf("不能找到对应的的序列化方式:%T", request.SerializeType())
return handleError(response, err)
}
err = codec.Decode(request.Payload, requestType)
if err != nil {
return handleError(response, err)
}
responseType := ObjectPool.Get(funcType.responseType)
defer ObjectPool.Put(funcType.responseType, responseType)
requestType, err = s.Plugins.DoPreCall(ctx, serviceName, methodName, requestType)
if err != nil {
return handleError(response, err)
}
if funcType.requestType.Kind() != reflect.Ptr { // 不是指针
err = service.callForFunc(ctx, funcType, reflect.ValueOf(requestType).Elem(), reflect.ValueOf(responseType))
} else {
err = service.callForFunc(ctx, funcType, reflect.ValueOf(requestType), reflect.ValueOf(responseType))
}
if err != nil {
return handleError(response, err)
}
responseType, err = s.Plugins.DoPostCall(ctx, serviceName, methodName, requestType, responseType)
if err != nil {
return handleError(response, err)
}
if !request.IsOneway() {
data, err := codec.Encode(responseType)
if err != nil {
return handleError(response, err)
}
response.Payload = data
}
return response, nil
}
// 鉴权
func (s *Server) auth(ctx context.Context, request *protocol.Message) error {
if s.AuthFunc == nil {
return nil
}
token := request.Metadata[share.AuthKey]
return s.AuthFunc(ctx, request, token)
}
// 暴力关闭服务(生产环境不建议使用,建议使用Shutdown)
func (s *Server) Close() error {
s.serviceMapMu.Lock()
defer s.serviceMapMu.Unlock()
var err error
if s.ln != nil {
err = s.ln.Close()
}
for conn, _ := range s.activeConn {
err = conn.Close()
delete(s.activeConn, conn)
s.Plugins.DoPostConnClose(conn)
}
return err
}
// 优雅的关闭服务,
// 先关闭tcp监听,使得不再有conn连接进来
// 关闭每个conn的读端,使其不再收客户端数据
// 循环等待服务正在处理的消息数量变为0 (使得所有正在处理的消息都能处理完成)
// 关闭网关服务
// 依次关闭conn的读和写
func (s *Server) Shutdown(ctx context.Context) error {
var err error
if atomic.CompareAndSwapInt32(&s.inShutdown, 0, 1) { // 保证结束进程只执行一次
log.Info("服务开始关闭...")
// 先关闭tcp链接的读端(写端要等所有请求都结束后才能关闭)
s.connMu.Lock()
if s.ln != nil {
s.ln.Close() // 关闭监听
}
for conn, _ := range s.activeConn {
if lConn, ok := conn.(*net.TCPConn); ok {
lConn.CloseRead()
}
}
s.connMu.Unlock()
ticker := time.NewTicker(time.Second) // 监听间隔
defer ticker.Stop()
outer:
for {
if s.checkMsgHandlerFinish() {
break
}
select {
case <-ctx.Done():
break outer
case <-ticker.C:
}
}
if s.gatewayHttpServer != nil {
if err = s.closeHTTP1APIGateway(ctx); err != nil {
log.WarnF("关闭http网关时出错:%v", err)
} else {
log.Info("http网关服务已经关闭")
}
}
s.connMu.Lock()
for conn, _ := range s.activeConn {
conn.Close()
delete(s.activeConn, conn)
s.Plugins.DoPostConnClose(conn)
}
s.closeDoneChanLocked()
s.connMu.Unlock()
}
return err
}
// 检测服务处理的消息是否处理完成
func (s *Server) checkMsgHandlerFinish() bool {
size := atomic.LoadInt32(&s.handlerMsgNum)
log.InfoF("还需要处理%d条消息", size)
return size == 0
}
// 关闭结束通道(如果别的协程已经关闭,则直接返回)
func (s *Server) closeDoneChanLocked() {
select {
case <-s.doneChan:
return
default:
close(s.doneChan)
}
}
// 监听结束服务事件(terminated)
func (s *Server) startShutdownServe() {
go func(s *Server) {
log.InfoF("rpc listen at %s", s.ln.Addr().String())
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGTERM)
sg := <-c
if sg.String() == "terminated" {
if s.onShutdown != nil && len(s.onShutdown) > 0 {
for _, shutdown := range s.onShutdown {
shutdown(s)
}
}
err := s.Shutdown(context.Background())
if err != nil {
log.Error(err.Error())
}
}
}(s)
}
// 关闭链接
func (s *Server) closeChannel(conn net.Conn) {
s.connMu.Lock()
defer s.connMu.Unlock()
delete(s.activeConn, conn)
conn.Close()
}
// 判断服务是否已经关闭了
func (s *Server) isShutdown() bool {
return atomic.LoadInt32(&s.inShutdown) == 1
}
func (s *Server) readRequest(ctx context.Context, rBuff io.Reader) (*protocol.Message, error) {
var err error
err = s.Plugins.DoPreReadRequest(ctx)
if err != nil {
return nil, err
}
request := protocol.GetPooledMsg()
err = s.Plugins.DoPreReadRequest(ctx)
if err != nil {
return nil, err
}
// 开始解码
err = request.Decode(rBuff)
if err == io.EOF { // io.EOF代表读完了
return request, err
}
pErr := s.Plugins.DoPostReadRequest(ctx, request, err)
if err == nil { // 看看插件里面的调用会报什么错误
err = pErr
}
return request, err
}
// 处理错误
func handleError(response *protocol.Message, err error) (*protocol.Message, error) {
response.SetMessageStatusType(protocol.Error)
if response.Metadata == nil {
response.Metadata = make(map[string]string, 10)
}
response.Metadata[protocol.ServiceError] = err.Error()
return response, err
}
| erCon | identifier_name |
server.go | package server
import (
"avrilko-rpc/log"
"avrilko-rpc/protocol"
"avrilko-rpc/share"
"bufio"
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"net"
"net/http"
"os"
"os/signal"
"reflect"
"runtime"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
)
var ErrServerClosed = errors.New("主服务已经关闭")
const (
ReadBuffSize = 1024 // 读取消息时候缓冲区大小
)
type contextKey struct {
name string
}
func (c *contextKey) String() string {
return c.name
}
var (
RemoteConnContextKey = &contextKey{"remote_conn"}
StartRequestContextKey = &contextKey{"start-parse-request"}
)
// 核心服务类
type Server struct {
ln net.Listener // 全局唯一的监听(可以多路复用实现不同协议的转发)
readTimeout time.Duration // 读超时
writeTimeout time.Duration // 写超时
gatewayHttpServer *http.Server // 当启用http网关时候被挂载
disableHTTPGateway bool // 是否禁用http网关服务(开启时候方便测试和调试rpc服务)
disableJSONRPCGateway bool // 是否禁用json rpc网关服务
serviceMapMu sync.RWMutex // 服务提供者map读写锁
serviceMap map[string]*service // 服务提供者集合map
connMu sync.RWMutex // 各个活跃连接读写锁
activeConn map[net.Conn]struct{} // 每个活跃的连接,map结构防止重复
doneChan chan struct{} // 服务结束chan
inShutdown int32 //服务是否关闭 1为关闭 0为正在运行
onShutdown []func(s *Server) // 服务结束后执行的钩子函数
tlsConfig *tls.Config // tls证书配置
Plugins PluginContainer // 插件容器(设计核心)
AuthFunc func(ctx context.Context, request *protocol.Message, token string) error // 认证函数
handlerMsgNum int32 // 正在处理的消息数量
}
// 初始化服务
func NewServer(opts ...OptionFunc) *Server {
server := &Server{
serviceMapMu: sync.RWMutex{},
serviceMap: make(map[string]*service),
activeConn: make(map[net.Conn]struct{}),
doneChan: make(chan struct{}),
Plugins: &pluginContainer{},
}
if len(opts) > 0 {
for _, opt := range opts {
opt(server)
}
}
return server
}
// 开启服务
func (s *Server) Serve(network, address string) error {
var ln net.Listener
var err error
ln, err = s.makeListener(network, address)
if err != nil {
return err
}
return s.ServeListener(network, ln)
}
// 开启服务
func (s *Server) ServeListener(network string, ln net.Listener) error {
// 开启信号量监听
s.startShutdownServe()
// 开启网关
s.startGateway(network, ln)
return s.serveListener(ln)
}
// 循环监听conn 并发给severConn处理
func (s *Server) serveListener(ln net.Listener) error {
// 定义临时错误的延迟时间
var tempDelay time.Duration
s.connMu.Lock()
s.ln = ln
s.connMu.Unlock()
for {
conn, err := ln.Accept()
if err != nil {
select {
case <-s.doneChan:
return ErrServerClosed
default:
}
// 如果错误断言为网络错误,且是一个临时的(比如当时网络环境差,dns服务器不稳定引起的),稍后可能会自动恢复的
// 不能直接返回错误,应该等待一段时间才返回错误
// 等待的时间为上一次的两倍最大等待1s
// 参考官方http包实现的
if ne, ok := conn.(net.Error); ok && ne.Temporary() {
if tempDelay == 0 {
tempDelay = 5 * time.Millisecond
} else {
tempDelay = tempDelay * 2
}
if tempDelay > time.Second { // 大于1秒直接返回错误
return err
}
time.Sleep(tempDelay)
log.ErrorF("rpc服务接受conn异常,正在重试, 原因%v, sleep %d", err, tempDelay)
continue
}
if strings.Contains(err.Error(), "listener closed") { // 服务关闭
return ErrServerClosed
}
return err
}
// 成功请求延迟时间置为0
tempDelay = 0
if tc, ok := conn.(*net.TCPConn); ok { // tcp请求需要设置keepAlive保证链接的稳定性能
tc.SetKeepAlive(true)
tc.SetKeepAlivePeriod(time.Minute * 5) // 5分钟没有响应报错
tc.SetLinger(10) // 关闭连接的行为 设置数据在断开时候也能在后台发送
}
conn, ok := s.Plugins.DoPostConnAccept(conn)
if !ok { // 不允许链接则关闭(可能是限流没通过,验证没通过,业务方面的自己用插件扩展...)
s.closeChannel(conn)
}
s.connMu.Lock()
s.activeConn[conn] = struct{}{}
s.connMu.Unlock()
go s.serveConn(conn)
}
}
// 开始处理消息
func (s *Server) serveConn(conn net.Conn) {
// 单个conn协程中没有权限影响主进程panic,所有panic会这一层处理
defer func() {
if err := recover(); err != nil { // 发生panic
buf := make([]byte, 65536)
size := runtime.Stack(buf, false)
if size > 65536 {
size = 65536
}
buf = buf[:size]
log.ErrorF("conn 发生panic,原因%s, 客户端地址%s, 堆栈信息 %s", err, conn.RemoteAddr(), buf)
}
s.connMu.Lock()
delete(s.activeConn, conn)
s.connMu.Unlock()
s.Plugins.DoPostConnClose(conn)
}()
// 判断此时服务是否已经关闭
if s.isShutdown() {
s.closeChannel(conn)
return
}
now := time.Now()
// tls连接需要先握手
if tlsL, ok := conn.(*tls.Conn); ok {
if s.readTimeout != 0 {
tlsL.SetReadDeadline(now.Add(s.readTimeout))
}
if s.writeTimeout != 0 {
tlsL.SetWriteDeadline(now.Add(s.writeTimeout))
}
if err := tlsL.Handshake(); err != nil {
log.ErrorF("tls尝试握手失败,原因:%s,addr:", err, tlsL.RemoteAddr())
return
}
}
// 初始化读取缓冲区
rBuff := bufio.NewReaderSize(conn, ReadBuffSize)
for {
// 判断此时服务是否已经关闭
if s.isShutdown() {
s.closeChannel(conn)
return
}
if s.readTimeout != 0 { // 设置读取的超时时间
conn.SetReadDeadline(now.Add(s.readTimeout))
}
ctx := share.WithValue(context.Background(), RemoteConnContextKey, conn)
request, err := s.readRequest(ctx, rBuff)
if err != nil {
if err == io.EOF {
log.InfoF("客户端已经关闭链接c%s", conn.RemoteAddr())
} else if strings.Contains(err.Error(), "use of closed network connection") {
log.InfoF("连接已经被关闭%s", conn.RemoteAddr())
} else {
log.WarnF("rpc 读取数据失败,错误原因%v", err)
}
return
}
// 要开始写入了
if s.writeTimeout != 0 {
conn.SetWriteDeadline(now.Add(s.writeTimeout))
}
// 将开始时间写上下文中
ctx = share.WithLocalValue(ctx, StartRequestContextKey, time.Now().UnixNano())
if !request.IsHeartbeat() { // auth鉴权
err := s.auth(ctx, request)
if err != nil { // 鉴权失败
if !request.IsOneway() { // 需要回复客户端鉴权失败
response := request.Clone() // 复制一个请求出来
response.SetMessageType(protocol.Response) // 设置为response消息
handleError(response, err)
data := response.EncodeSlicePointer()
_, err = conn.Write(*data)
protocol.PutData(data)
s.Plugins.DoPostWriteResponse(ctx, request, response, err)
protocol.FreeMsg(response)
} else { // 不需要回复
s.Plugins.DoPreWriteResponse(ctx, request, nil)
}
protocol.FreeMsg(request)
log.InfoF("连接鉴权失败,%s,错误原因%v", conn.RemoteAddr(), err)
return
}
}
// 下面需要处理消息了噢
go func() {
// 正在处理的消息数量+1
atomic.AddInt32(&s.handlerMsgNum, 1)
// 正在处理消息的数量-1
defer atomic.AddInt32(&s.handlerMsgNum, - 1)
if request.IsHeartbeat() { // 如果是客户端心跳
request.SetMessageType(protocol.Response)
data := request.EncodeSlicePointer()
conn.Write(*data)
protocol.PutData(data)
return
}
// 不是心跳初始化返给客户端的meta
responseMetadata := make(map[string]string)
// 先将服务端的metadata方法进去
ctx = share.WithLocalValue(ctx, share.ReqMetaDataKey, request.Metadata)
// 再将客户端的metadata放进去
ctx = share.WithLocalValue(ctx, share.ResMetaDataKey, responseMetadata)
s.Plugins.DoPreHandleRequest(ctx, request) // 开始处理请求了
response, err := s.handleRequest(ctx, request)
if err != nil {
log.WarnF("处理请求错误: %v", err)
}
s.Plugins.DoPreWriteResponse(ctx, request, response)
if !request.IsOneway() { // 需要回复客户端
// 从ctx中拿出meta信息
responseMetadataCtx := ctx.Value(share.ResMetaDataKey).(map[string]string)
if len(responseMetadata) > 0 {
meta := response.Metadata
if meta == nil {
meta = responseMetadataCtx
} else {
for k, v := range responseMetadataCtx {
if meta[k] == "" {
meta[k] = v
}
}
}
}
if len(response.Payload) > 1024 && request.CompressType() != protocol.None {
response.SetCompressType(request.CompressType())
}
data := response.EncodeSlicePointer()
conn.Write(*data)
protocol.PutData(data)
}
s.Plugins.DoPostWriteResponse(ctx, request, response, err)
protocol.FreeMsg(response)
protocol.FreeMsg(request)
}()
}
}
// 处理单个请求
func (s *Server) handleRequest(ctx context.Context, request *protocol.Message) (*protocol.Message, error) {
var err error
serviceName := request.ServicePath
methodName := request.ServiceMethod
response := request.Clone()
response.SetMessageType(protocol.Response)
s.serviceMapMu.RLock()
service, ok := s.serviceMap[serviceName]
s.serviceMapMu.RUnlock()
if !ok { // 都没注册直接返回错误
err = errors.New(fmt.Sprintf("不能找到服务发现者为%s的服务", serviceName))
return handleError(response, err)
}
methodType, ok := service.method[methodName]
if !ok { // 看看是否注册了函数的调用
if _, ok := service.function[methodName]; ok {
protocol.FreeMsg(response) // 这里创建对象要回收的
return s.handleRequestForFunction(ctx, request)
}
err = errors.New(fmt.Sprintf("不能找到服务提供者%s下方法名为%s的方法", serviceName, methodName))
return handleError(response, err)
}
requestType := ObjectPool.Get(methodType.requestType)
defer ObjectPool.Put(methodType.requestType, requestType)
codec := share.Codecs[request.SerializeType()]
if codec == nil {
err = fmt.Errorf("不能找到对应的的序列化方式:%T", request.SerializeType())
return handleError(response, err)
}
err = codec.Decode(request.Payload, requestType)
if err != nil {
return handleError(response, err)
}
responseType := ObjectPool.Get(methodType.responseType)
defer ObjectPool.Put(methodType.responseType, responseType)
requestType, err = s.Plugins.DoPreCall(ctx, serviceName, methodName, requestType)
if err != nil {
return handleError(response, err)
}
if methodType.requestType.Kind() != reflect.Ptr { // 不是指针
err = service.call(ctx, methodType, reflect.ValueOf(requestType).Elem(), reflect.ValueOf(responseType))
} else {
err = service.call(ctx, methodType, reflect.ValueOf(requestType), reflect.ValueOf(responseType))
}
if err != nil {
return handleError(response, err)
}
responseType, err = s.Plugins.DoPostCall(ctx, serviceName, methodName, requestType, responseType)
if err != nil {
return handleError(response, err)
}
if !request.IsOneway() {
data, err := codec.Encode(responseType)
if err != nil {
return handleError(response, err)
}
response.Payload = data
}
return response, nil
}
// 处理函数类型的
func (s *Server) handleRequestForFunction(ctx context.Context, request *protocol.Message) (*protocol.Message, error) {
var err error
response := request.Clone()
response.SetMessageType(protocol.Response)
serviceName := request.ServicePath
methodName := request.ServiceMethod
s.serviceMapMu.RLock()
service, ok := s.serviceMap[serviceName]
s.serviceMapMu.RUnlock()
if !ok { // 都没注册直接返回错误
err = errors.New(fmt.Sprintf("不能找到服务发现者为%s的服务", serviceName))
return handleError(response, err)
}
funcType := service.function[serviceName]
if funcType == nil {
err = errors.New(fmt.Sprintf("不能找到服务发现者为%s对应的函数调用%s", serviceName, methodName))
return handleError(response, err)
}
requestType := ObjectPool.Get(funcType.requestType)
defer ObjectPool.Put(funcType.requestType, requestType)
codec := share.Codecs[request.SerializeType()]
if codec == nil {
err = fmt.Errorf("不能找到对应的的序列化方式:%T", request.SerializeType())
return handleError(response, err)
}
err = codec.Decode(request.Payload, requestType)
if err != nil {
return handleError(response, err)
}
responseType := ObjectPool.Get(funcType.responseType)
defer ObjectPool.Put(funcType.responseType, responseType)
requestType, err = s.Plugins.DoPreCall(ctx, serviceName, methodName, requestType)
if err != nil {
return handleError(response, err)
}
if funcType.requestType.Kind() != reflect.Ptr { // 不是指针
err = service.callForFunc(ctx, funcType, reflect.ValueOf(requestType).Elem(), reflect.ValueOf(responseType))
} else {
err = service.callForFunc(ctx, funcType, reflect.ValueOf(requestType), reflect.ValueOf(responseType))
}
if err != nil {
return handleError(response, err)
}
responseType, err = s.Plugins.DoPostCall(ctx, serviceName, methodName, requestType, responseType)
if err != nil {
return handleError(response, err)
}
if !request.IsOneway() {
data, err := codec.Encode(responseType)
if err != nil {
return handleError(response, err)
}
response.Payload = data
}
return response, nil
}
// 鉴权
func (s *Server) auth(ctx context.Context, request *protocol.Message) error {
if s.AuthFunc == nil {
return nil
}
token := request.Metadata[share.AuthKey]
return s.AuthFunc(ctx, request, token)
}
// 暴力关闭服务(生产环境不建议使用,建议使用Shutdown)
func (s *Server) Close() error {
s.serviceMapMu.Lock()
defer s.serviceMapMu.Unlock()
var err error
if s.ln != nil {
err = s.ln.Close()
}
for conn, _ := range s.activeConn {
err = conn.Close()
delete(s.activeConn, conn)
s.Plugins.DoPostConnClose(conn)
}
return err
}
// 优雅的关闭服务,
// 先关闭tcp监听,使得不再有conn连接进来
// 关闭每个conn的读端,使其不再收客户端数据
// 循环等待服务正在处理的消息数量变为0 (使得所有正在处理的消息都能处理完成)
// 关闭网关服务
// 依次关闭conn的读和写
func (s *Server) Shutdown(ctx context.Context) error {
var err error
if atomic.CompareAndSwapInt32(&s.inShutdown, 0, 1) { // 保证结束进程只执行一次
log.Info("服务开始关闭...")
// 先关闭tcp链接的读端(写端要等所有请求都结束后才能关闭)
s.connMu.Lock()
if s.ln != nil {
s.ln.Close() // 关闭监听
}
for conn, _ := range s.activeConn {
if lConn, ok := conn.(*net.TCPConn); ok {
lConn.CloseRead()
}
}
s.connMu.Unlock()
ticker := time.NewTicker(time.Second) // 监听间隔
defer ticker.Stop()
outer:
for {
if s.checkMsgHandlerFinish() {
break
}
select {
case <-ctx.Done():
break outer
case <-ticker.C:
}
}
if s.gatewayHttpServer != nil {
if err = s.closeHTTP1APIGateway(ctx); err != nil {
log.WarnF("关闭http网关时出错:%v", err)
} else {
log.Info("http网关服务已经关闭")
}
}
s.connMu.Lock()
for conn, _ := range s.activeConn {
conn.Close()
delete(s.activeConn, conn)
s.Plugins.DoPostConnClose(conn)
}
s.closeDoneChanLocked()
s.connMu.Unlock()
}
return err
}
// 检测服务处理的消息是否处理完成
func (s *Server) checkMsgHandlerFinish() bool {
size := atomic.LoadInt32(&s.handlerMsgNum)
log.InfoF("还需要处理%d条消息", size)
return size == 0
}
// 关闭结束通道(如果别的协程已经关闭,则直接返回)
func (s *Server) closeDoneChanLocked() {
select {
case <-s.doneChan:
return
default:
close(s.doneChan)
}
}
// 监听结束服务事件(terminated)
func (s *Server) startShutdownServe() {
go func(s *Server) {
log.InfoF("rpc listen at %s", s.ln.Addr().String())
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGTERM)
sg := <-c
if sg.String() == "terminated" {
if s.onShutdown != nil && len(s.onShutdown) > 0 {
for _, shutdown := range s.onShutdown {
shutdown(s)
}
}
err := s.Shutdown(context.Background())
if err != nil {
log.Error(err.Error())
}
}
}(s)
}
// 关闭链接
func (s *Server) closeChannel(conn net.Conn) {
s.connMu.Lock()
defer s.connMu.Unlock()
delete(s.activeConn, conn)
conn.Close()
}
// 判断服务是否已经关闭了
func (s *Server) isShutdown() bool {
return atomic.LoadInt32(&s.inShutdown) == 1
}
func (s *Server) readRequest(ctx context.Context, rBuff io.Reader) (*protocol.Message, error) {
var err error
err = s.Plugins.DoPreReadRequest(ctx)
if err != nil {
return nil, err
}
request := protocol.GetPooledMsg()
err = s.Plugins.DoPreReadRequest(ctx)
if err != nil {
return nil, err
}
// 开始解码
err = request.Decode(rBuff)
if err == io.EOF { // io.EOF代表读完了
return request, err
}
pErr := s.Plugins.DoPostReadRequest(ctx, request, err)
if err == nil { // 看看插件里面的调用会报什么错误
err = pErr
}
return request, err
}
// 处理错误
func handleError(response *protocol.Message, err error) (*protocol.Message, error) {
response.SetMessageStatusType(protocol.Error)
if response.Metadata == nil {
response.Metadata = make(map[string]string, 10)
}
response.Metadata[protocol.ServiceError] = err.Error()
return response, err
}
| identifier_body | ||
prefilter.rs | use core::{
cmp,
fmt::Debug,
panic::{RefUnwindSafe, UnwindSafe},
u8,
};
use alloc::{sync::Arc, vec, vec::Vec};
use crate::{
packed,
util::{
alphabet::ByteSet,
search::{Match, MatchKind, Span},
},
};
/// A prefilter for accelerating a search.
///
/// This crate uses prefilters in the core search implementations to accelerate
/// common cases. They typically only apply to cases where there are a small
/// number of patterns (less than 100 or so), but when they do, thoughput can
/// be boosted considerably, perhaps by an order of magnitude. When a prefilter
/// is active, it is used whenever a search enters an automaton's start state.
///
/// Currently, prefilters cannot be constructed by
/// callers. A `Prefilter` can only be accessed via the
/// [`Automaton::prefilter`](crate::automaton::Automaton::prefilter)
/// method and used to execute a search. In other words, a prefilter can be
/// used to optimize your own search implementation if necessary, but cannot do
/// much else. If you have a use case for more APIs, please submit an issue.
#[derive(Clone, Debug)]
pub struct Prefilter {
finder: Arc<dyn PrefilterI>,
memory_usage: usize,
}
impl Prefilter {
/// Execute a search in the haystack within the span given. If a match or
/// a possible match is returned, then it is guaranteed to occur within
/// the bounds of the span.
///
/// If the span provided is invalid for the given haystack, then behavior
/// is unspecified.
#[inline]
pub fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
self.finder.find_in(haystack, span)
}
#[inline]
pub(crate) fn memory_usage(&self) -> usize {
self.memory_usage
}
}
/// A candidate is the result of running a prefilter on a haystack at a
/// particular position.
///
/// The result is either no match, a confirmed match or a possible match.
///
/// When no match is returned, the prefilter is guaranteeing that no possible
/// match can be found in the haystack, and the caller may trust this. That is,
/// all correct prefilters must never report false negatives.
///
/// In some cases, a prefilter can confirm a match very quickly, in which case,
/// the caller may use this to stop what it's doing and report the match. In
/// this case, prefilter implementations must never report a false positive.
/// In other cases, the prefilter can only report a potential match, in which
/// case the callers must attempt to confirm the match. In this case, prefilter
/// implementations are permitted to return false positives.
#[derive(Clone, Debug)]
pub enum Candidate {
/// No match was found. Since false negatives are not possible, this means
/// the search can quit as it is guaranteed not to find another match.
None,
/// A confirmed match was found. Callers do not need to confirm it.
Match(Match),
/// The start of a possible match was found. Callers must confirm it before
/// reporting it as a match.
PossibleStartOfMatch(usize),
}
impl Candidate {
/// Convert this candidate into an option. This is useful when callers
/// do not distinguish between true positives and false positives (i.e.,
/// the caller must always confirm the match).
pub fn into_option(self) -> Option<usize> {
match self {
Candidate::None => None,
Candidate::Match(ref m) => Some(m.start()),
Candidate::PossibleStartOfMatch(start) => Some(start),
}
}
}
/// A prefilter describes the behavior of fast literal scanners for quickly
/// skipping past bytes in the haystack that we know cannot possibly
/// participate in a match.
trait PrefilterI:
Send + Sync + RefUnwindSafe + UnwindSafe + Debug + 'static
{
/// Returns the next possible match candidate. This may yield false
/// positives, so callers must confirm a match starting at the position
/// returned. This, however, must never produce false negatives. That is,
/// this must, at minimum, return the starting position of the next match
/// in the given haystack after or at the given position.
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate;
}
impl<P: PrefilterI + ?Sized> PrefilterI for Arc<P> {
#[inline(always)]
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
(**self).find_in(haystack, span)
}
}
/// A builder for constructing the best possible prefilter. When constructed,
/// this builder will heuristically select the best prefilter it can build,
/// if any, and discard the rest.
#[derive(Debug)]
pub(crate) struct Builder {
count: usize,
ascii_case_insensitive: bool,
start_bytes: StartBytesBuilder,
rare_bytes: RareBytesBuilder,
memmem: MemmemBuilder,
packed: Option<packed::Builder>,
// If we run across a condition that suggests we shouldn't use a prefilter
// at all (like an empty pattern), then disable prefilters entirely.
enabled: bool,
}
impl Builder {
/// Create a new builder for constructing the best possible prefilter.
pub(crate) fn new(kind: MatchKind) -> Builder {
let pbuilder = kind
.as_packed()
.map(|kind| packed::Config::new().match_kind(kind).builder());
Builder {
count: 0,
ascii_case_insensitive: false,
start_bytes: StartBytesBuilder::new(),
rare_bytes: RareBytesBuilder::new(),
memmem: MemmemBuilder::default(),
packed: pbuilder,
enabled: true,
}
}
/// Enable ASCII case insensitivity. When set, byte strings added to this
/// builder will be interpreted without respect to ASCII case.
pub(crate) fn ascii_case_insensitive(mut self, yes: bool) -> Builder {
self.ascii_case_insensitive = yes;
self.start_bytes = self.start_bytes.ascii_case_insensitive(yes);
self.rare_bytes = self.rare_bytes.ascii_case_insensitive(yes);
self
}
/// Return a prefilter suitable for quickly finding potential matches.
///
/// All patterns added to an Aho-Corasick automaton should be added to this
/// builder before attempting to construct the prefilter.
pub(crate) fn build(&self) -> Option<Prefilter> {
if !self.enabled {
return None;
}
// If we only have one pattern, then deferring to memmem is always
// the best choice. This is kind of a weird case, because, well, why
// use Aho-Corasick if you only have one pattern? But maybe you don't
// know exactly how many patterns you'll get up front, and you need to
// support the option of multiple patterns. So instead of relying on
// the caller to branch and use memmem explicitly, we just do it for
// them.
if !self.ascii_case_insensitive {
if let Some(pre) = self.memmem.build() {
return Some(pre);
}
}
match (self.start_bytes.build(), self.rare_bytes.build()) {
// If we could build both start and rare prefilters, then there are
// a few cases in which we'd want to use the start-byte prefilter
// over the rare-byte prefilter, since the former has lower
// overhead.
(prestart @ Some(_), prerare @ Some(_)) => {
// If the start-byte prefilter can scan for a smaller number
// of bytes than the rare-byte prefilter, then it's probably
// faster.
let has_fewer_bytes =
self.start_bytes.count < self.rare_bytes.count;
// Otherwise, if the combined frequency rank of the detected
// bytes in the start-byte prefilter is "close" to the combined
// frequency rank of the rare-byte prefilter, then we pick
// the start-byte prefilter even if the rare-byte prefilter
// heuristically searches for rare bytes. This is because the
// rare-byte prefilter has higher constant costs, so we tend to
// prefer the start-byte prefilter when we can.
let has_rarer_bytes =
self.start_bytes.rank_sum <= self.rare_bytes.rank_sum + 50;
if has_fewer_bytes || has_rarer_bytes {
prestart
} else {
prerare
}
}
(prestart @ Some(_), None) => prestart,
(None, prerare @ Some(_)) => prerare,
(None, None) if self.ascii_case_insensitive => None,
(None, None) => {
self.packed.as_ref().and_then(|b| b.build()).map(|s| {
let memory_usage = s.memory_usage();
Prefilter { finder: Arc::new(Packed(s)), memory_usage }
})
}
}
}
/// Add a literal string to this prefilter builder.
pub(crate) fn add(&mut self, bytes: &[u8]) {
if bytes.is_empty() {
self.enabled = false;
}
if !self.enabled {
return;
}
self.count += 1;
self.start_bytes.add(bytes);
self.rare_bytes.add(bytes);
self.memmem.add(bytes);
if let Some(ref mut pbuilder) = self.packed {
pbuilder.add(bytes);
}
}
}
/// A type that wraps a packed searcher and implements the `Prefilter`
/// interface.
#[derive(Clone, Debug)]
struct Packed(packed::Searcher);
impl PrefilterI for Packed {
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
self.0
.find_in(&haystack, span)
.map_or(Candidate::None, Candidate::Match)
}
}
/// A builder for constructing a prefilter that uses memmem.
#[derive(Debug, Default)]
struct MemmemBuilder {
/// The number of patterns that have been added.
count: usize,
/// The singular pattern to search for. This is only set when count==1.
one: Option<Vec<u8>>,
}
impl MemmemBuilder {
fn build(&self) -> Option<Prefilter> {
#[cfg(all(feature = "std", feature = "perf-literal"))]
fn imp(builder: &MemmemBuilder) -> Option<Prefilter> {
let pattern = builder.one.as_ref()?;
assert_eq!(1, builder.count);
let finder = Arc::new(Memmem(
memchr::memmem::Finder::new(pattern).into_owned(),
));
let memory_usage = pattern.len();
Some(Prefilter { finder, memory_usage })
}
#[cfg(not(all(feature = "std", feature = "perf-literal")))]
fn imp(_: &MemmemBuilder) -> Option<Prefilter> {
None
}
imp(self)
}
fn add(&mut self, bytes: &[u8]) {
self.count += 1;
if self.count == 1 {
self.one = Some(bytes.to_vec());
} else {
self.one = None;
}
}
}
/// A type that wraps a SIMD accelerated single substring search from the
/// `memchr` crate for use as a prefilter.
///
/// Currently, this prefilter is only active for Aho-Corasick searchers with
/// a single pattern. In theory, this could be extended to support searchers
/// that have a common prefix of more than one byte (for one byte, we would use
/// memchr), but it's not clear if it's worth it or not.
///
/// Also, unfortunately, this currently also requires the 'std' feature to
/// be enabled. That's because memchr doesn't have a no-std-but-with-alloc
/// mode, and so APIs like Finder::into_owned aren't available when 'std' is
/// disabled. But there should be an 'alloc' feature that brings in APIs like
/// Finder::into_owned but doesn't use std-only features like runtime CPU
/// feature detection.
#[cfg(all(feature = "std", feature = "perf-literal"))]
#[derive(Clone, Debug)]
struct Memmem(memchr::memmem::Finder<'static>);
#[cfg(all(feature = "std", feature = "perf-literal"))]
impl PrefilterI for Memmem {
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
use crate::util::primitives::PatternID;
self.0.find(&haystack[span]).map_or(Candidate::None, |i| {
let start = span.start + i;
let end = start + self.0.needle().len();
// N.B. We can declare a match and use a fixed pattern ID here
// because a Memmem prefilter is only ever created for searchers
// with exactly one pattern. Thus, every match is always a match
// and it is always for the first and only pattern.
Candidate::Match(Match::new(PatternID::ZERO, start..end))
})
}
}
/// A builder for constructing a rare byte prefilter.
///
/// A rare byte prefilter attempts to pick out a small set of rare bytes that
/// occurr in the patterns, and then quickly scan to matches of those rare
/// bytes.
#[derive(Clone, Debug)]
struct | {
/// Whether this prefilter should account for ASCII case insensitivity or
/// not.
ascii_case_insensitive: bool,
/// A set of rare bytes, indexed by byte value.
rare_set: ByteSet,
/// A set of byte offsets associated with bytes in a pattern. An entry
/// corresponds to a particular bytes (its index) and is only non-zero if
/// the byte occurred at an offset greater than 0 in at least one pattern.
///
/// If a byte's offset is not representable in 8 bits, then the rare bytes
/// prefilter becomes inert.
byte_offsets: RareByteOffsets,
/// Whether this is available as a prefilter or not. This can be set to
/// false during construction if a condition is seen that invalidates the
/// use of the rare-byte prefilter.
available: bool,
/// The number of bytes set to an active value in `byte_offsets`.
count: usize,
/// The sum of frequency ranks for the rare bytes detected. This is
/// intended to give a heuristic notion of how rare the bytes are.
rank_sum: u16,
}
/// A set of byte offsets, keyed by byte.
#[derive(Clone, Copy)]
struct RareByteOffsets {
/// Each entry corresponds to the maximum offset of the corresponding
/// byte across all patterns seen.
set: [RareByteOffset; 256],
}
impl RareByteOffsets {
/// Create a new empty set of rare byte offsets.
pub(crate) fn empty() -> RareByteOffsets {
RareByteOffsets { set: [RareByteOffset::default(); 256] }
}
/// Add the given offset for the given byte to this set. If the offset is
/// greater than the existing offset, then it overwrites the previous
/// value and returns false. If there is no previous value set, then this
/// sets it and returns true.
pub(crate) fn set(&mut self, byte: u8, off: RareByteOffset) {
self.set[byte as usize].max =
cmp::max(self.set[byte as usize].max, off.max);
}
}
impl core::fmt::Debug for RareByteOffsets {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
let mut offsets = vec![];
for off in self.set.iter() {
if off.max > 0 {
offsets.push(off);
}
}
f.debug_struct("RareByteOffsets").field("set", &offsets).finish()
}
}
/// Offsets associated with an occurrence of a "rare" byte in any of the
/// patterns used to construct a single Aho-Corasick automaton.
#[derive(Clone, Copy, Debug)]
struct RareByteOffset {
/// The maximum offset at which a particular byte occurs from the start
/// of any pattern. This is used as a shift amount. That is, when an
/// occurrence of this byte is found, the candidate position reported by
/// the prefilter is `position_of_byte - max`, such that the automaton
/// will begin its search at a position that is guaranteed to observe a
/// match.
///
/// To avoid accidentally quadratic behavior, a prefilter is considered
/// ineffective when it is asked to start scanning from a position that it
/// has already scanned past.
///
/// Using a `u8` here means that if we ever see a pattern that's longer
/// than 255 bytes, then the entire rare byte prefilter is disabled.
max: u8,
}
impl Default for RareByteOffset {
fn default() -> RareByteOffset {
RareByteOffset { max: 0 }
}
}
impl RareByteOffset {
/// Create a new rare byte offset. If the given offset is too big, then
/// None is returned. In that case, callers should render the rare bytes
/// prefilter inert.
fn new(max: usize) -> Option<RareByteOffset> {
if max > u8::MAX as usize {
None
} else {
Some(RareByteOffset { max: max as u8 })
}
}
}
impl RareBytesBuilder {
/// Create a new builder for constructing a rare byte prefilter.
fn new() -> RareBytesBuilder {
RareBytesBuilder {
ascii_case_insensitive: false,
rare_set: ByteSet::empty(),
byte_offsets: RareByteOffsets::empty(),
available: true,
count: 0,
rank_sum: 0,
}
}
/// Enable ASCII case insensitivity. When set, byte strings added to this
/// builder will be interpreted without respect to ASCII case.
fn ascii_case_insensitive(mut self, yes: bool) -> RareBytesBuilder {
self.ascii_case_insensitive = yes;
self
}
/// Build the rare bytes prefilter.
///
/// If there are more than 3 distinct rare bytes found, or if heuristics
/// otherwise determine that this prefilter should not be used, then `None`
/// is returned.
fn build(&self) -> Option<Prefilter> {
#[cfg(feature = "perf-literal")]
fn imp(builder: &RareBytesBuilder) -> Option<Prefilter> {
if !builder.available || builder.count > 3 {
return None;
}
let (mut bytes, mut len) = ([0; 3], 0);
for b in 0..=255 {
if builder.rare_set.contains(b) {
bytes[len] = b as u8;
len += 1;
}
}
let finder: Arc<dyn PrefilterI> = match len {
0 => return None,
1 => Arc::new(RareBytesOne {
byte1: bytes[0],
offset: builder.byte_offsets.set[bytes[0] as usize],
}),
2 => Arc::new(RareBytesTwo {
offsets: builder.byte_offsets,
byte1: bytes[0],
byte2: bytes[1],
}),
3 => Arc::new(RareBytesThree {
offsets: builder.byte_offsets,
byte1: bytes[0],
byte2: bytes[1],
byte3: bytes[2],
}),
_ => unreachable!(),
};
Some(Prefilter { finder, memory_usage: 0 })
}
#[cfg(not(feature = "perf-literal"))]
fn imp(_: &RareBytesBuilder) -> Option<Prefilter> {
None
}
imp(self)
}
/// Add a byte string to this builder.
///
/// All patterns added to an Aho-Corasick automaton should be added to this
/// builder before attempting to construct the prefilter.
fn add(&mut self, bytes: &[u8]) {
// If we've already given up, then do nothing.
if !self.available {
return;
}
// If we've already blown our budget, then don't waste time looking
// for more rare bytes.
if self.count > 3 {
self.available = false;
return;
}
// If the pattern is too long, then our offset table is bunk, so
// give up.
if bytes.len() >= 256 {
self.available = false;
return;
}
let mut rarest = match bytes.get(0) {
None => return,
Some(&b) => (b, freq_rank(b)),
};
// The idea here is to look for the rarest byte in each pattern, and
// add that to our set. As a special exception, if we see a byte that
// we've already added, then we immediately stop and choose that byte,
// even if there's another rare byte in the pattern. This helps us
// apply the rare byte optimization in more cases by attempting to pick
// bytes that are in common between patterns. So for example, if we
// were searching for `Sherlock` and `lockjaw`, then this would pick
// `k` for both patterns, resulting in the use of `memchr` instead of
// `memchr2` for `k` and `j`.
let mut found = false;
for (pos, &b) in bytes.iter().enumerate() {
self.set_offset(pos, b);
if found {
continue;
}
if self.rare_set.contains(b) {
found = true;
continue;
}
let rank = freq_rank(b);
if rank < rarest.1 {
rarest = (b, rank);
}
}
if !found {
self.add_rare_byte(rarest.0);
}
}
fn set_offset(&mut self, pos: usize, byte: u8) {
// This unwrap is OK because pos is never bigger than our max.
let offset = RareByteOffset::new(pos).unwrap();
self.byte_offsets.set(byte, offset);
if self.ascii_case_insensitive {
self.byte_offsets.set(opposite_ascii_case(byte), offset);
}
}
fn add_rare_byte(&mut self, byte: u8) {
self.add_one_rare_byte(byte);
if self.ascii_case_insensitive {
self.add_one_rare_byte(opposite_ascii_case(byte));
}
}
fn add_one_rare_byte(&mut self, byte: u8) {
if !self.rare_set.contains(byte) {
self.rare_set.add(byte);
self.count += 1;
self.rank_sum += freq_rank(byte) as u16;
}
}
}
/// A prefilter for scanning for a single "rare" byte.
#[cfg(feature = "perf-literal")]
#[derive(Clone, Debug)]
struct RareBytesOne {
byte1: u8,
offset: RareByteOffset,
}
#[cfg(feature = "perf-literal")]
impl PrefilterI for RareBytesOne {
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
memchr::memchr(self.byte1, &haystack[span])
.map(|i| {
let pos = span.start + i;
cmp::max(
span.start,
pos.saturating_sub(usize::from(self.offset.max)),
)
})
.map_or(Candidate::None, Candidate::PossibleStartOfMatch)
}
}
/// A prefilter for scanning for two "rare" bytes.
#[cfg(feature = "perf-literal")]
#[derive(Clone, Debug)]
struct RareBytesTwo {
offsets: RareByteOffsets,
byte1: u8,
byte2: u8,
}
#[cfg(feature = "perf-literal")]
impl PrefilterI for RareBytesTwo {
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
memchr::memchr2(self.byte1, self.byte2, &haystack[span])
.map(|i| {
let pos = span.start + i;
let offset = self.offsets.set[usize::from(haystack[pos])].max;
cmp::max(span.start, pos.saturating_sub(usize::from(offset)))
})
.map_or(Candidate::None, Candidate::PossibleStartOfMatch)
}
}
/// A prefilter for scanning for three "rare" bytes.
#[cfg(feature = "perf-literal")]
#[derive(Clone, Debug)]
struct RareBytesThree {
offsets: RareByteOffsets,
byte1: u8,
byte2: u8,
byte3: u8,
}
#[cfg(feature = "perf-literal")]
impl PrefilterI for RareBytesThree {
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
memchr::memchr3(self.byte1, self.byte2, self.byte3, &haystack[span])
.map(|i| {
let pos = span.start + i;
let offset = self.offsets.set[usize::from(haystack[pos])].max;
cmp::max(span.start, pos.saturating_sub(usize::from(offset)))
})
.map_or(Candidate::None, Candidate::PossibleStartOfMatch)
}
}
/// A builder for constructing a starting byte prefilter.
///
/// A starting byte prefilter is a simplistic prefilter that looks for possible
/// matches by reporting all positions corresponding to a particular byte. This
/// generally only takes affect when there are at most 3 distinct possible
/// starting bytes. e.g., the patterns `foo`, `bar`, and `baz` have two
/// distinct starting bytes (`f` and `b`), and this prefilter returns all
/// occurrences of either `f` or `b`.
///
/// In some cases, a heuristic frequency analysis may determine that it would
/// be better not to use this prefilter even when there are 3 or fewer distinct
/// starting bytes.
#[derive(Clone, Debug)]
struct StartBytesBuilder {
/// Whether this prefilter should account for ASCII case insensitivity or
/// not.
ascii_case_insensitive: bool,
/// The set of starting bytes observed.
byteset: Vec<bool>,
/// The number of bytes set to true in `byteset`.
count: usize,
/// The sum of frequency ranks for the rare bytes detected. This is
/// intended to give a heuristic notion of how rare the bytes are.
rank_sum: u16,
}
impl StartBytesBuilder {
/// Create a new builder for constructing a start byte prefilter.
fn new() -> StartBytesBuilder {
StartBytesBuilder {
ascii_case_insensitive: false,
byteset: vec![false; 256],
count: 0,
rank_sum: 0,
}
}
/// Enable ASCII case insensitivity. When set, byte strings added to this
/// builder will be interpreted without respect to ASCII case.
fn ascii_case_insensitive(mut self, yes: bool) -> StartBytesBuilder {
self.ascii_case_insensitive = yes;
self
}
/// Build the starting bytes prefilter.
///
/// If there are more than 3 distinct starting bytes, or if heuristics
/// otherwise determine that this prefilter should not be used, then `None`
/// is returned.
fn build(&self) -> Option<Prefilter> {
#[cfg(feature = "perf-literal")]
fn imp(builder: &StartBytesBuilder) -> Option<Prefilter> {
if builder.count > 3 {
return None;
}
let (mut bytes, mut len) = ([0; 3], 0);
for b in 0..256 {
if !builder.byteset[b] {
continue;
}
// We don't handle non-ASCII bytes for now. Getting non-ASCII
// bytes right is trickier, since we generally don't want to put
// a leading UTF-8 code unit into a prefilter that isn't ASCII,
// since they can frequently. Instead, it would be better to use a
// continuation byte, but this requires more sophisticated analysis
// of the automaton and a richer prefilter API.
if b > 0x7F {
return None;
}
bytes[len] = b as u8;
len += 1;
}
let finder: Arc<dyn PrefilterI> = match len {
0 => return None,
1 => Arc::new(StartBytesOne { byte1: bytes[0] }),
2 => Arc::new(StartBytesTwo {
byte1: bytes[0],
byte2: bytes[1],
}),
3 => Arc::new(StartBytesThree {
byte1: bytes[0],
byte2: bytes[1],
byte3: bytes[2],
}),
_ => unreachable!(),
};
Some(Prefilter { finder, memory_usage: 0 })
}
#[cfg(not(feature = "perf-literal"))]
fn imp(_: &StartBytesBuilder) -> Option<Prefilter> {
None
}
imp(self)
}
/// Add a byte string to this builder.
///
/// All patterns added to an Aho-Corasick automaton should be added to this
/// builder before attempting to construct the prefilter.
fn add(&mut self, bytes: &[u8]) {
if self.count > 3 {
return;
}
if let Some(&byte) = bytes.get(0) {
self.add_one_byte(byte);
if self.ascii_case_insensitive {
self.add_one_byte(opposite_ascii_case(byte));
}
}
}
fn add_one_byte(&mut self, byte: u8) {
if !self.byteset[byte as usize] {
self.byteset[byte as usize] = true;
self.count += 1;
self.rank_sum += freq_rank(byte) as u16;
}
}
}
/// A prefilter for scanning for a single starting byte.
#[cfg(feature = "perf-literal")]
#[derive(Clone, Debug)]
struct StartBytesOne {
byte1: u8,
}
#[cfg(feature = "perf-literal")]
impl PrefilterI for StartBytesOne {
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
memchr::memchr(self.byte1, &haystack[span])
.map(|i| span.start + i)
.map_or(Candidate::None, Candidate::PossibleStartOfMatch)
}
}
/// A prefilter for scanning for two starting bytes.
#[cfg(feature = "perf-literal")]
#[derive(Clone, Debug)]
struct StartBytesTwo {
byte1: u8,
byte2: u8,
}
#[cfg(feature = "perf-literal")]
impl PrefilterI for StartBytesTwo {
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
memchr::memchr2(self.byte1, self.byte2, &haystack[span])
.map(|i| span.start + i)
.map_or(Candidate::None, Candidate::PossibleStartOfMatch)
}
}
/// A prefilter for scanning for three starting bytes.
#[cfg(feature = "perf-literal")]
#[derive(Clone, Debug)]
struct StartBytesThree {
byte1: u8,
byte2: u8,
byte3: u8,
}
#[cfg(feature = "perf-literal")]
impl PrefilterI for StartBytesThree {
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
memchr::memchr3(self.byte1, self.byte2, self.byte3, &haystack[span])
.map(|i| span.start + i)
.map_or(Candidate::None, Candidate::PossibleStartOfMatch)
}
}
/// If the given byte is an ASCII letter, then return it in the opposite case.
/// e.g., Given `b'A'`, this returns `b'a'`, and given `b'a'`, this returns
/// `b'A'`. If a non-ASCII letter is given, then the given byte is returned.
pub(crate) fn opposite_ascii_case(b: u8) -> u8 {
if b'A' <= b && b <= b'Z' {
b.to_ascii_lowercase()
} else if b'a' <= b && b <= b'z' {
b.to_ascii_uppercase()
} else {
b
}
}
/// Return the frequency rank of the given byte. The higher the rank, the more
/// common the byte (heuristically speaking).
fn freq_rank(b: u8) -> u8 {
use crate::util::byte_frequencies::BYTE_FREQUENCIES;
BYTE_FREQUENCIES[b as usize]
}
| RareBytesBuilder | identifier_name |
prefilter.rs | use core::{
cmp,
fmt::Debug,
panic::{RefUnwindSafe, UnwindSafe},
u8,
};
use alloc::{sync::Arc, vec, vec::Vec};
use crate::{
packed,
util::{
alphabet::ByteSet,
search::{Match, MatchKind, Span},
},
};
/// A prefilter for accelerating a search.
///
/// This crate uses prefilters in the core search implementations to accelerate
/// common cases. They typically only apply to cases where there are a small
/// number of patterns (less than 100 or so), but when they do, thoughput can
/// be boosted considerably, perhaps by an order of magnitude. When a prefilter
/// is active, it is used whenever a search enters an automaton's start state.
///
/// Currently, prefilters cannot be constructed by
/// callers. A `Prefilter` can only be accessed via the
/// [`Automaton::prefilter`](crate::automaton::Automaton::prefilter)
/// method and used to execute a search. In other words, a prefilter can be
/// used to optimize your own search implementation if necessary, but cannot do
/// much else. If you have a use case for more APIs, please submit an issue.
#[derive(Clone, Debug)]
pub struct Prefilter {
finder: Arc<dyn PrefilterI>,
memory_usage: usize,
}
impl Prefilter {
/// Execute a search in the haystack within the span given. If a match or
/// a possible match is returned, then it is guaranteed to occur within
/// the bounds of the span.
///
/// If the span provided is invalid for the given haystack, then behavior
/// is unspecified.
#[inline]
pub fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
self.finder.find_in(haystack, span)
}
#[inline]
pub(crate) fn memory_usage(&self) -> usize {
self.memory_usage
}
}
/// A candidate is the result of running a prefilter on a haystack at a
/// particular position.
///
/// The result is either no match, a confirmed match or a possible match.
///
/// When no match is returned, the prefilter is guaranteeing that no possible
/// match can be found in the haystack, and the caller may trust this. That is,
/// all correct prefilters must never report false negatives.
///
/// In some cases, a prefilter can confirm a match very quickly, in which case,
/// the caller may use this to stop what it's doing and report the match. In
/// this case, prefilter implementations must never report a false positive.
/// In other cases, the prefilter can only report a potential match, in which
/// case the callers must attempt to confirm the match. In this case, prefilter
/// implementations are permitted to return false positives.
#[derive(Clone, Debug)]
pub enum Candidate {
/// No match was found. Since false negatives are not possible, this means
/// the search can quit as it is guaranteed not to find another match.
None,
/// A confirmed match was found. Callers do not need to confirm it.
Match(Match),
/// The start of a possible match was found. Callers must confirm it before
/// reporting it as a match.
PossibleStartOfMatch(usize),
}
impl Candidate {
/// Convert this candidate into an option. This is useful when callers
/// do not distinguish between true positives and false positives (i.e.,
/// the caller must always confirm the match).
pub fn into_option(self) -> Option<usize> {
match self {
Candidate::None => None,
Candidate::Match(ref m) => Some(m.start()),
Candidate::PossibleStartOfMatch(start) => Some(start),
}
}
}
/// A prefilter describes the behavior of fast literal scanners for quickly
/// skipping past bytes in the haystack that we know cannot possibly
/// participate in a match.
trait PrefilterI:
Send + Sync + RefUnwindSafe + UnwindSafe + Debug + 'static
{
/// Returns the next possible match candidate. This may yield false
/// positives, so callers must confirm a match starting at the position
/// returned. This, however, must never produce false negatives. That is,
/// this must, at minimum, return the starting position of the next match
/// in the given haystack after or at the given position.
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate;
}
impl<P: PrefilterI + ?Sized> PrefilterI for Arc<P> {
#[inline(always)]
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
(**self).find_in(haystack, span)
}
}
/// A builder for constructing the best possible prefilter. When constructed,
/// this builder will heuristically select the best prefilter it can build,
/// if any, and discard the rest.
#[derive(Debug)]
pub(crate) struct Builder {
count: usize,
ascii_case_insensitive: bool,
start_bytes: StartBytesBuilder,
rare_bytes: RareBytesBuilder,
memmem: MemmemBuilder,
packed: Option<packed::Builder>,
// If we run across a condition that suggests we shouldn't use a prefilter
// at all (like an empty pattern), then disable prefilters entirely.
enabled: bool,
}
impl Builder {
/// Create a new builder for constructing the best possible prefilter.
pub(crate) fn new(kind: MatchKind) -> Builder {
let pbuilder = kind
.as_packed()
.map(|kind| packed::Config::new().match_kind(kind).builder());
Builder {
count: 0,
ascii_case_insensitive: false,
start_bytes: StartBytesBuilder::new(),
rare_bytes: RareBytesBuilder::new(),
memmem: MemmemBuilder::default(),
packed: pbuilder,
enabled: true,
}
}
/// Enable ASCII case insensitivity. When set, byte strings added to this
/// builder will be interpreted without respect to ASCII case.
pub(crate) fn ascii_case_insensitive(mut self, yes: bool) -> Builder {
self.ascii_case_insensitive = yes;
self.start_bytes = self.start_bytes.ascii_case_insensitive(yes);
self.rare_bytes = self.rare_bytes.ascii_case_insensitive(yes);
self
}
/// Return a prefilter suitable for quickly finding potential matches.
///
/// All patterns added to an Aho-Corasick automaton should be added to this
/// builder before attempting to construct the prefilter.
pub(crate) fn build(&self) -> Option<Prefilter> {
if !self.enabled {
return None;
}
// If we only have one pattern, then deferring to memmem is always
// the best choice. This is kind of a weird case, because, well, why
// use Aho-Corasick if you only have one pattern? But maybe you don't
// know exactly how many patterns you'll get up front, and you need to
// support the option of multiple patterns. So instead of relying on
// the caller to branch and use memmem explicitly, we just do it for
// them.
if !self.ascii_case_insensitive {
if let Some(pre) = self.memmem.build() {
return Some(pre);
}
}
match (self.start_bytes.build(), self.rare_bytes.build()) {
// If we could build both start and rare prefilters, then there are
// a few cases in which we'd want to use the start-byte prefilter
// over the rare-byte prefilter, since the former has lower
// overhead.
(prestart @ Some(_), prerare @ Some(_)) => {
// If the start-byte prefilter can scan for a smaller number
// of bytes than the rare-byte prefilter, then it's probably
// faster.
let has_fewer_bytes =
self.start_bytes.count < self.rare_bytes.count;
// Otherwise, if the combined frequency rank of the detected
// bytes in the start-byte prefilter is "close" to the combined
// frequency rank of the rare-byte prefilter, then we pick
// the start-byte prefilter even if the rare-byte prefilter
// heuristically searches for rare bytes. This is because the
// rare-byte prefilter has higher constant costs, so we tend to
// prefer the start-byte prefilter when we can.
let has_rarer_bytes =
self.start_bytes.rank_sum <= self.rare_bytes.rank_sum + 50;
if has_fewer_bytes || has_rarer_bytes {
prestart
} else {
prerare
}
}
(prestart @ Some(_), None) => prestart,
(None, prerare @ Some(_)) => prerare,
(None, None) if self.ascii_case_insensitive => None,
(None, None) => {
self.packed.as_ref().and_then(|b| b.build()).map(|s| {
let memory_usage = s.memory_usage();
Prefilter { finder: Arc::new(Packed(s)), memory_usage }
})
}
}
}
/// Add a literal string to this prefilter builder.
pub(crate) fn add(&mut self, bytes: &[u8]) {
if bytes.is_empty() {
self.enabled = false;
}
if !self.enabled {
return;
}
self.count += 1;
self.start_bytes.add(bytes);
self.rare_bytes.add(bytes);
self.memmem.add(bytes);
if let Some(ref mut pbuilder) = self.packed {
pbuilder.add(bytes);
}
}
}
/// A type that wraps a packed searcher and implements the `Prefilter`
/// interface.
#[derive(Clone, Debug)]
struct Packed(packed::Searcher);
impl PrefilterI for Packed {
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
self.0
.find_in(&haystack, span)
.map_or(Candidate::None, Candidate::Match)
}
}
/// A builder for constructing a prefilter that uses memmem.
#[derive(Debug, Default)]
struct MemmemBuilder {
/// The number of patterns that have been added.
count: usize,
/// The singular pattern to search for. This is only set when count==1.
one: Option<Vec<u8>>,
}
impl MemmemBuilder {
fn build(&self) -> Option<Prefilter> {
#[cfg(all(feature = "std", feature = "perf-literal"))]
fn imp(builder: &MemmemBuilder) -> Option<Prefilter> {
let pattern = builder.one.as_ref()?;
assert_eq!(1, builder.count);
let finder = Arc::new(Memmem(
memchr::memmem::Finder::new(pattern).into_owned(),
));
let memory_usage = pattern.len();
Some(Prefilter { finder, memory_usage })
}
#[cfg(not(all(feature = "std", feature = "perf-literal")))]
fn imp(_: &MemmemBuilder) -> Option<Prefilter> {
None
}
imp(self)
}
fn add(&mut self, bytes: &[u8]) {
self.count += 1;
if self.count == 1 {
self.one = Some(bytes.to_vec());
} else {
self.one = None;
}
}
}
/// A type that wraps a SIMD accelerated single substring search from the
/// `memchr` crate for use as a prefilter.
///
/// Currently, this prefilter is only active for Aho-Corasick searchers with
/// a single pattern. In theory, this could be extended to support searchers
/// that have a common prefix of more than one byte (for one byte, we would use
/// memchr), but it's not clear if it's worth it or not.
///
/// Also, unfortunately, this currently also requires the 'std' feature to
/// be enabled. That's because memchr doesn't have a no-std-but-with-alloc
/// mode, and so APIs like Finder::into_owned aren't available when 'std' is
/// disabled. But there should be an 'alloc' feature that brings in APIs like
/// Finder::into_owned but doesn't use std-only features like runtime CPU
/// feature detection.
#[cfg(all(feature = "std", feature = "perf-literal"))]
#[derive(Clone, Debug)]
struct Memmem(memchr::memmem::Finder<'static>);
#[cfg(all(feature = "std", feature = "perf-literal"))]
impl PrefilterI for Memmem {
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
use crate::util::primitives::PatternID;
self.0.find(&haystack[span]).map_or(Candidate::None, |i| {
let start = span.start + i;
let end = start + self.0.needle().len();
// N.B. We can declare a match and use a fixed pattern ID here
// because a Memmem prefilter is only ever created for searchers
// with exactly one pattern. Thus, every match is always a match
// and it is always for the first and only pattern.
Candidate::Match(Match::new(PatternID::ZERO, start..end))
})
}
}
/// A builder for constructing a rare byte prefilter.
///
/// A rare byte prefilter attempts to pick out a small set of rare bytes that
/// occurr in the patterns, and then quickly scan to matches of those rare
/// bytes.
#[derive(Clone, Debug)]
struct RareBytesBuilder {
/// Whether this prefilter should account for ASCII case insensitivity or
/// not.
ascii_case_insensitive: bool,
/// A set of rare bytes, indexed by byte value.
rare_set: ByteSet,
/// A set of byte offsets associated with bytes in a pattern. An entry
/// corresponds to a particular bytes (its index) and is only non-zero if
/// the byte occurred at an offset greater than 0 in at least one pattern.
///
/// If a byte's offset is not representable in 8 bits, then the rare bytes
/// prefilter becomes inert.
byte_offsets: RareByteOffsets,
/// Whether this is available as a prefilter or not. This can be set to
/// false during construction if a condition is seen that invalidates the
/// use of the rare-byte prefilter.
available: bool,
/// The number of bytes set to an active value in `byte_offsets`.
count: usize,
/// The sum of frequency ranks for the rare bytes detected. This is
/// intended to give a heuristic notion of how rare the bytes are.
rank_sum: u16,
}
/// A set of byte offsets, keyed by byte.
#[derive(Clone, Copy)]
struct RareByteOffsets {
/// Each entry corresponds to the maximum offset of the corresponding
/// byte across all patterns seen.
set: [RareByteOffset; 256],
}
impl RareByteOffsets {
/// Create a new empty set of rare byte offsets.
pub(crate) fn empty() -> RareByteOffsets {
RareByteOffsets { set: [RareByteOffset::default(); 256] }
}
/// Add the given offset for the given byte to this set. If the offset is
/// greater than the existing offset, then it overwrites the previous
/// value and returns false. If there is no previous value set, then this
/// sets it and returns true.
pub(crate) fn set(&mut self, byte: u8, off: RareByteOffset) {
self.set[byte as usize].max =
cmp::max(self.set[byte as usize].max, off.max);
}
}
impl core::fmt::Debug for RareByteOffsets {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
let mut offsets = vec![];
for off in self.set.iter() {
if off.max > 0 {
offsets.push(off);
}
}
f.debug_struct("RareByteOffsets").field("set", &offsets).finish()
}
}
/// Offsets associated with an occurrence of a "rare" byte in any of the
/// patterns used to construct a single Aho-Corasick automaton.
#[derive(Clone, Copy, Debug)]
struct RareByteOffset {
/// The maximum offset at which a particular byte occurs from the start
/// of any pattern. This is used as a shift amount. That is, when an
/// occurrence of this byte is found, the candidate position reported by
/// the prefilter is `position_of_byte - max`, such that the automaton
/// will begin its search at a position that is guaranteed to observe a
/// match.
///
/// To avoid accidentally quadratic behavior, a prefilter is considered
/// ineffective when it is asked to start scanning from a position that it
/// has already scanned past.
///
/// Using a `u8` here means that if we ever see a pattern that's longer
/// than 255 bytes, then the entire rare byte prefilter is disabled.
max: u8,
}
impl Default for RareByteOffset {
fn default() -> RareByteOffset {
RareByteOffset { max: 0 }
}
}
impl RareByteOffset {
/// Create a new rare byte offset. If the given offset is too big, then
/// None is returned. In that case, callers should render the rare bytes
/// prefilter inert.
fn new(max: usize) -> Option<RareByteOffset> {
if max > u8::MAX as usize {
None
} else {
Some(RareByteOffset { max: max as u8 })
}
}
}
impl RareBytesBuilder {
/// Create a new builder for constructing a rare byte prefilter.
fn new() -> RareBytesBuilder {
RareBytesBuilder {
ascii_case_insensitive: false,
rare_set: ByteSet::empty(),
byte_offsets: RareByteOffsets::empty(),
available: true,
count: 0,
rank_sum: 0,
}
}
/// Enable ASCII case insensitivity. When set, byte strings added to this
/// builder will be interpreted without respect to ASCII case.
fn ascii_case_insensitive(mut self, yes: bool) -> RareBytesBuilder {
self.ascii_case_insensitive = yes;
self
}
/// Build the rare bytes prefilter.
///
/// If there are more than 3 distinct rare bytes found, or if heuristics
/// otherwise determine that this prefilter should not be used, then `None`
/// is returned.
fn build(&self) -> Option<Prefilter> {
#[cfg(feature = "perf-literal")]
fn imp(builder: &RareBytesBuilder) -> Option<Prefilter> {
if !builder.available || builder.count > 3 {
return None;
}
let (mut bytes, mut len) = ([0; 3], 0);
for b in 0..=255 {
if builder.rare_set.contains(b) {
bytes[len] = b as u8;
len += 1;
}
}
let finder: Arc<dyn PrefilterI> = match len {
0 => return None,
1 => Arc::new(RareBytesOne {
byte1: bytes[0],
offset: builder.byte_offsets.set[bytes[0] as usize],
}),
2 => Arc::new(RareBytesTwo {
offsets: builder.byte_offsets,
byte1: bytes[0],
byte2: bytes[1],
}),
3 => Arc::new(RareBytesThree {
offsets: builder.byte_offsets,
byte1: bytes[0],
byte2: bytes[1],
byte3: bytes[2],
}),
_ => unreachable!(),
};
Some(Prefilter { finder, memory_usage: 0 })
}
#[cfg(not(feature = "perf-literal"))]
fn imp(_: &RareBytesBuilder) -> Option<Prefilter> {
None
}
imp(self)
}
/// Add a byte string to this builder.
///
/// All patterns added to an Aho-Corasick automaton should be added to this
/// builder before attempting to construct the prefilter.
fn add(&mut self, bytes: &[u8]) {
// If we've already given up, then do nothing.
if !self.available {
return;
}
// If we've already blown our budget, then don't waste time looking
// for more rare bytes.
if self.count > 3 {
self.available = false;
return;
}
// If the pattern is too long, then our offset table is bunk, so
// give up.
if bytes.len() >= 256 {
self.available = false;
return;
}
let mut rarest = match bytes.get(0) {
None => return,
Some(&b) => (b, freq_rank(b)),
};
// The idea here is to look for the rarest byte in each pattern, and
// add that to our set. As a special exception, if we see a byte that
// we've already added, then we immediately stop and choose that byte,
// even if there's another rare byte in the pattern. This helps us
// apply the rare byte optimization in more cases by attempting to pick
// bytes that are in common between patterns. So for example, if we
// were searching for `Sherlock` and `lockjaw`, then this would pick
// `k` for both patterns, resulting in the use of `memchr` instead of
// `memchr2` for `k` and `j`.
let mut found = false;
for (pos, &b) in bytes.iter().enumerate() {
self.set_offset(pos, b);
if found {
continue;
}
if self.rare_set.contains(b) {
found = true;
continue;
}
let rank = freq_rank(b);
if rank < rarest.1 {
rarest = (b, rank);
}
}
if !found {
self.add_rare_byte(rarest.0);
}
}
fn set_offset(&mut self, pos: usize, byte: u8) {
// This unwrap is OK because pos is never bigger than our max.
let offset = RareByteOffset::new(pos).unwrap();
self.byte_offsets.set(byte, offset);
if self.ascii_case_insensitive {
self.byte_offsets.set(opposite_ascii_case(byte), offset);
}
}
fn add_rare_byte(&mut self, byte: u8) {
self.add_one_rare_byte(byte);
if self.ascii_case_insensitive {
self.add_one_rare_byte(opposite_ascii_case(byte));
}
}
fn add_one_rare_byte(&mut self, byte: u8) {
if !self.rare_set.contains(byte) |
}
}
/// A prefilter for scanning for a single "rare" byte.
#[cfg(feature = "perf-literal")]
#[derive(Clone, Debug)]
struct RareBytesOne {
byte1: u8,
offset: RareByteOffset,
}
#[cfg(feature = "perf-literal")]
impl PrefilterI for RareBytesOne {
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
memchr::memchr(self.byte1, &haystack[span])
.map(|i| {
let pos = span.start + i;
cmp::max(
span.start,
pos.saturating_sub(usize::from(self.offset.max)),
)
})
.map_or(Candidate::None, Candidate::PossibleStartOfMatch)
}
}
/// A prefilter for scanning for two "rare" bytes.
#[cfg(feature = "perf-literal")]
#[derive(Clone, Debug)]
struct RareBytesTwo {
offsets: RareByteOffsets,
byte1: u8,
byte2: u8,
}
#[cfg(feature = "perf-literal")]
impl PrefilterI for RareBytesTwo {
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
memchr::memchr2(self.byte1, self.byte2, &haystack[span])
.map(|i| {
let pos = span.start + i;
let offset = self.offsets.set[usize::from(haystack[pos])].max;
cmp::max(span.start, pos.saturating_sub(usize::from(offset)))
})
.map_or(Candidate::None, Candidate::PossibleStartOfMatch)
}
}
/// A prefilter for scanning for three "rare" bytes.
#[cfg(feature = "perf-literal")]
#[derive(Clone, Debug)]
struct RareBytesThree {
offsets: RareByteOffsets,
byte1: u8,
byte2: u8,
byte3: u8,
}
#[cfg(feature = "perf-literal")]
impl PrefilterI for RareBytesThree {
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
memchr::memchr3(self.byte1, self.byte2, self.byte3, &haystack[span])
.map(|i| {
let pos = span.start + i;
let offset = self.offsets.set[usize::from(haystack[pos])].max;
cmp::max(span.start, pos.saturating_sub(usize::from(offset)))
})
.map_or(Candidate::None, Candidate::PossibleStartOfMatch)
}
}
/// A builder for constructing a starting byte prefilter.
///
/// A starting byte prefilter is a simplistic prefilter that looks for possible
/// matches by reporting all positions corresponding to a particular byte. This
/// generally only takes affect when there are at most 3 distinct possible
/// starting bytes. e.g., the patterns `foo`, `bar`, and `baz` have two
/// distinct starting bytes (`f` and `b`), and this prefilter returns all
/// occurrences of either `f` or `b`.
///
/// In some cases, a heuristic frequency analysis may determine that it would
/// be better not to use this prefilter even when there are 3 or fewer distinct
/// starting bytes.
#[derive(Clone, Debug)]
struct StartBytesBuilder {
/// Whether this prefilter should account for ASCII case insensitivity or
/// not.
ascii_case_insensitive: bool,
/// The set of starting bytes observed.
byteset: Vec<bool>,
/// The number of bytes set to true in `byteset`.
count: usize,
/// The sum of frequency ranks for the rare bytes detected. This is
/// intended to give a heuristic notion of how rare the bytes are.
rank_sum: u16,
}
impl StartBytesBuilder {
/// Create a new builder for constructing a start byte prefilter.
fn new() -> StartBytesBuilder {
StartBytesBuilder {
ascii_case_insensitive: false,
byteset: vec![false; 256],
count: 0,
rank_sum: 0,
}
}
/// Enable ASCII case insensitivity. When set, byte strings added to this
/// builder will be interpreted without respect to ASCII case.
fn ascii_case_insensitive(mut self, yes: bool) -> StartBytesBuilder {
self.ascii_case_insensitive = yes;
self
}
/// Build the starting bytes prefilter.
///
/// If there are more than 3 distinct starting bytes, or if heuristics
/// otherwise determine that this prefilter should not be used, then `None`
/// is returned.
fn build(&self) -> Option<Prefilter> {
#[cfg(feature = "perf-literal")]
fn imp(builder: &StartBytesBuilder) -> Option<Prefilter> {
if builder.count > 3 {
return None;
}
let (mut bytes, mut len) = ([0; 3], 0);
for b in 0..256 {
if !builder.byteset[b] {
continue;
}
// We don't handle non-ASCII bytes for now. Getting non-ASCII
// bytes right is trickier, since we generally don't want to put
// a leading UTF-8 code unit into a prefilter that isn't ASCII,
// since they can frequently. Instead, it would be better to use a
// continuation byte, but this requires more sophisticated analysis
// of the automaton and a richer prefilter API.
if b > 0x7F {
return None;
}
bytes[len] = b as u8;
len += 1;
}
let finder: Arc<dyn PrefilterI> = match len {
0 => return None,
1 => Arc::new(StartBytesOne { byte1: bytes[0] }),
2 => Arc::new(StartBytesTwo {
byte1: bytes[0],
byte2: bytes[1],
}),
3 => Arc::new(StartBytesThree {
byte1: bytes[0],
byte2: bytes[1],
byte3: bytes[2],
}),
_ => unreachable!(),
};
Some(Prefilter { finder, memory_usage: 0 })
}
#[cfg(not(feature = "perf-literal"))]
fn imp(_: &StartBytesBuilder) -> Option<Prefilter> {
None
}
imp(self)
}
/// Add a byte string to this builder.
///
/// All patterns added to an Aho-Corasick automaton should be added to this
/// builder before attempting to construct the prefilter.
fn add(&mut self, bytes: &[u8]) {
if self.count > 3 {
return;
}
if let Some(&byte) = bytes.get(0) {
self.add_one_byte(byte);
if self.ascii_case_insensitive {
self.add_one_byte(opposite_ascii_case(byte));
}
}
}
fn add_one_byte(&mut self, byte: u8) {
if !self.byteset[byte as usize] {
self.byteset[byte as usize] = true;
self.count += 1;
self.rank_sum += freq_rank(byte) as u16;
}
}
}
/// A prefilter for scanning for a single starting byte.
#[cfg(feature = "perf-literal")]
#[derive(Clone, Debug)]
struct StartBytesOne {
byte1: u8,
}
#[cfg(feature = "perf-literal")]
impl PrefilterI for StartBytesOne {
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
memchr::memchr(self.byte1, &haystack[span])
.map(|i| span.start + i)
.map_or(Candidate::None, Candidate::PossibleStartOfMatch)
}
}
/// A prefilter for scanning for two starting bytes.
#[cfg(feature = "perf-literal")]
#[derive(Clone, Debug)]
struct StartBytesTwo {
byte1: u8,
byte2: u8,
}
#[cfg(feature = "perf-literal")]
impl PrefilterI for StartBytesTwo {
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
memchr::memchr2(self.byte1, self.byte2, &haystack[span])
.map(|i| span.start + i)
.map_or(Candidate::None, Candidate::PossibleStartOfMatch)
}
}
/// A prefilter for scanning for three starting bytes.
#[cfg(feature = "perf-literal")]
#[derive(Clone, Debug)]
struct StartBytesThree {
byte1: u8,
byte2: u8,
byte3: u8,
}
#[cfg(feature = "perf-literal")]
impl PrefilterI for StartBytesThree {
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
memchr::memchr3(self.byte1, self.byte2, self.byte3, &haystack[span])
.map(|i| span.start + i)
.map_or(Candidate::None, Candidate::PossibleStartOfMatch)
}
}
/// If the given byte is an ASCII letter, then return it in the opposite case.
/// e.g., Given `b'A'`, this returns `b'a'`, and given `b'a'`, this returns
/// `b'A'`. If a non-ASCII letter is given, then the given byte is returned.
pub(crate) fn opposite_ascii_case(b: u8) -> u8 {
if b'A' <= b && b <= b'Z' {
b.to_ascii_lowercase()
} else if b'a' <= b && b <= b'z' {
b.to_ascii_uppercase()
} else {
b
}
}
/// Return the frequency rank of the given byte. The higher the rank, the more
/// common the byte (heuristically speaking).
fn freq_rank(b: u8) -> u8 {
use crate::util::byte_frequencies::BYTE_FREQUENCIES;
BYTE_FREQUENCIES[b as usize]
}
| {
self.rare_set.add(byte);
self.count += 1;
self.rank_sum += freq_rank(byte) as u16;
} | conditional_block |
prefilter.rs | use core::{
cmp,
fmt::Debug,
panic::{RefUnwindSafe, UnwindSafe},
u8,
};
use alloc::{sync::Arc, vec, vec::Vec};
use crate::{
packed,
util::{
alphabet::ByteSet,
search::{Match, MatchKind, Span},
},
};
/// A prefilter for accelerating a search.
///
/// This crate uses prefilters in the core search implementations to accelerate
/// common cases. They typically only apply to cases where there are a small
/// number of patterns (less than 100 or so), but when they do, thoughput can
/// be boosted considerably, perhaps by an order of magnitude. When a prefilter
/// is active, it is used whenever a search enters an automaton's start state.
///
/// Currently, prefilters cannot be constructed by
/// callers. A `Prefilter` can only be accessed via the
/// [`Automaton::prefilter`](crate::automaton::Automaton::prefilter)
/// method and used to execute a search. In other words, a prefilter can be
/// used to optimize your own search implementation if necessary, but cannot do
/// much else. If you have a use case for more APIs, please submit an issue.
#[derive(Clone, Debug)]
pub struct Prefilter {
finder: Arc<dyn PrefilterI>,
memory_usage: usize,
}
impl Prefilter {
/// Execute a search in the haystack within the span given. If a match or
/// a possible match is returned, then it is guaranteed to occur within
/// the bounds of the span.
///
/// If the span provided is invalid for the given haystack, then behavior
/// is unspecified.
#[inline]
pub fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
self.finder.find_in(haystack, span)
}
#[inline]
pub(crate) fn memory_usage(&self) -> usize {
self.memory_usage
}
}
/// A candidate is the result of running a prefilter on a haystack at a
/// particular position.
///
/// The result is either no match, a confirmed match or a possible match.
///
/// When no match is returned, the prefilter is guaranteeing that no possible
/// match can be found in the haystack, and the caller may trust this. That is,
/// all correct prefilters must never report false negatives.
///
/// In some cases, a prefilter can confirm a match very quickly, in which case,
/// the caller may use this to stop what it's doing and report the match. In
/// this case, prefilter implementations must never report a false positive.
/// In other cases, the prefilter can only report a potential match, in which
/// case the callers must attempt to confirm the match. In this case, prefilter
/// implementations are permitted to return false positives.
#[derive(Clone, Debug)]
pub enum Candidate {
/// No match was found. Since false negatives are not possible, this means
/// the search can quit as it is guaranteed not to find another match.
None,
/// A confirmed match was found. Callers do not need to confirm it.
Match(Match),
/// The start of a possible match was found. Callers must confirm it before
/// reporting it as a match.
PossibleStartOfMatch(usize),
}
impl Candidate {
/// Convert this candidate into an option. This is useful when callers
/// do not distinguish between true positives and false positives (i.e.,
/// the caller must always confirm the match).
pub fn into_option(self) -> Option<usize> {
match self {
Candidate::None => None,
Candidate::Match(ref m) => Some(m.start()),
Candidate::PossibleStartOfMatch(start) => Some(start),
}
}
}
/// A prefilter describes the behavior of fast literal scanners for quickly
/// skipping past bytes in the haystack that we know cannot possibly
/// participate in a match.
trait PrefilterI:
Send + Sync + RefUnwindSafe + UnwindSafe + Debug + 'static
{
/// Returns the next possible match candidate. This may yield false
/// positives, so callers must confirm a match starting at the position
/// returned. This, however, must never produce false negatives. That is,
/// this must, at minimum, return the starting position of the next match
/// in the given haystack after or at the given position.
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate;
}
impl<P: PrefilterI + ?Sized> PrefilterI for Arc<P> {
#[inline(always)]
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
(**self).find_in(haystack, span)
}
}
/// A builder for constructing the best possible prefilter. When constructed,
/// this builder will heuristically select the best prefilter it can build,
/// if any, and discard the rest.
#[derive(Debug)]
pub(crate) struct Builder {
count: usize,
ascii_case_insensitive: bool,
start_bytes: StartBytesBuilder,
rare_bytes: RareBytesBuilder,
memmem: MemmemBuilder,
packed: Option<packed::Builder>,
// If we run across a condition that suggests we shouldn't use a prefilter
// at all (like an empty pattern), then disable prefilters entirely.
enabled: bool,
}
impl Builder {
/// Create a new builder for constructing the best possible prefilter.
pub(crate) fn new(kind: MatchKind) -> Builder {
let pbuilder = kind
.as_packed()
.map(|kind| packed::Config::new().match_kind(kind).builder());
Builder {
count: 0,
ascii_case_insensitive: false,
start_bytes: StartBytesBuilder::new(),
rare_bytes: RareBytesBuilder::new(),
memmem: MemmemBuilder::default(),
packed: pbuilder,
enabled: true,
}
}
/// Enable ASCII case insensitivity. When set, byte strings added to this
/// builder will be interpreted without respect to ASCII case.
pub(crate) fn ascii_case_insensitive(mut self, yes: bool) -> Builder {
self.ascii_case_insensitive = yes;
self.start_bytes = self.start_bytes.ascii_case_insensitive(yes);
self.rare_bytes = self.rare_bytes.ascii_case_insensitive(yes);
self
}
/// Return a prefilter suitable for quickly finding potential matches.
///
/// All patterns added to an Aho-Corasick automaton should be added to this
/// builder before attempting to construct the prefilter.
pub(crate) fn build(&self) -> Option<Prefilter> {
if !self.enabled {
return None;
}
// If we only have one pattern, then deferring to memmem is always
// the best choice. This is kind of a weird case, because, well, why
// use Aho-Corasick if you only have one pattern? But maybe you don't
// know exactly how many patterns you'll get up front, and you need to
// support the option of multiple patterns. So instead of relying on
// the caller to branch and use memmem explicitly, we just do it for
// them.
if !self.ascii_case_insensitive {
if let Some(pre) = self.memmem.build() {
return Some(pre);
}
}
match (self.start_bytes.build(), self.rare_bytes.build()) {
// If we could build both start and rare prefilters, then there are
// a few cases in which we'd want to use the start-byte prefilter
// over the rare-byte prefilter, since the former has lower
// overhead.
(prestart @ Some(_), prerare @ Some(_)) => {
// If the start-byte prefilter can scan for a smaller number
// of bytes than the rare-byte prefilter, then it's probably
// faster.
let has_fewer_bytes =
self.start_bytes.count < self.rare_bytes.count;
// Otherwise, if the combined frequency rank of the detected
// bytes in the start-byte prefilter is "close" to the combined
// frequency rank of the rare-byte prefilter, then we pick
// the start-byte prefilter even if the rare-byte prefilter
// heuristically searches for rare bytes. This is because the
// rare-byte prefilter has higher constant costs, so we tend to
// prefer the start-byte prefilter when we can.
let has_rarer_bytes =
self.start_bytes.rank_sum <= self.rare_bytes.rank_sum + 50;
if has_fewer_bytes || has_rarer_bytes {
prestart
} else {
prerare
}
}
(prestart @ Some(_), None) => prestart,
(None, prerare @ Some(_)) => prerare,
(None, None) if self.ascii_case_insensitive => None,
(None, None) => {
self.packed.as_ref().and_then(|b| b.build()).map(|s| {
let memory_usage = s.memory_usage();
Prefilter { finder: Arc::new(Packed(s)), memory_usage }
})
}
}
}
/// Add a literal string to this prefilter builder.
pub(crate) fn add(&mut self, bytes: &[u8]) {
if bytes.is_empty() {
self.enabled = false;
}
if !self.enabled {
return;
}
self.count += 1;
self.start_bytes.add(bytes);
self.rare_bytes.add(bytes);
self.memmem.add(bytes);
if let Some(ref mut pbuilder) = self.packed {
pbuilder.add(bytes);
}
}
}
/// A type that wraps a packed searcher and implements the `Prefilter`
/// interface.
#[derive(Clone, Debug)]
struct Packed(packed::Searcher);
impl PrefilterI for Packed {
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
self.0
.find_in(&haystack, span)
.map_or(Candidate::None, Candidate::Match)
}
}
/// A builder for constructing a prefilter that uses memmem.
#[derive(Debug, Default)]
struct MemmemBuilder {
/// The number of patterns that have been added.
count: usize,
/// The singular pattern to search for. This is only set when count==1.
one: Option<Vec<u8>>,
}
impl MemmemBuilder {
fn build(&self) -> Option<Prefilter> {
#[cfg(all(feature = "std", feature = "perf-literal"))]
fn imp(builder: &MemmemBuilder) -> Option<Prefilter> {
let pattern = builder.one.as_ref()?;
assert_eq!(1, builder.count);
let finder = Arc::new(Memmem(
memchr::memmem::Finder::new(pattern).into_owned(),
));
let memory_usage = pattern.len();
Some(Prefilter { finder, memory_usage })
}
#[cfg(not(all(feature = "std", feature = "perf-literal")))]
fn imp(_: &MemmemBuilder) -> Option<Prefilter> {
None
}
imp(self)
}
fn add(&mut self, bytes: &[u8]) {
self.count += 1;
if self.count == 1 {
self.one = Some(bytes.to_vec());
} else {
self.one = None;
}
}
}
/// A type that wraps a SIMD accelerated single substring search from the
/// `memchr` crate for use as a prefilter.
///
/// Currently, this prefilter is only active for Aho-Corasick searchers with
/// a single pattern. In theory, this could be extended to support searchers
/// that have a common prefix of more than one byte (for one byte, we would use
/// memchr), but it's not clear if it's worth it or not.
///
/// Also, unfortunately, this currently also requires the 'std' feature to
/// be enabled. That's because memchr doesn't have a no-std-but-with-alloc
/// mode, and so APIs like Finder::into_owned aren't available when 'std' is
/// disabled. But there should be an 'alloc' feature that brings in APIs like
/// Finder::into_owned but doesn't use std-only features like runtime CPU
/// feature detection.
#[cfg(all(feature = "std", feature = "perf-literal"))]
#[derive(Clone, Debug)]
struct Memmem(memchr::memmem::Finder<'static>);
#[cfg(all(feature = "std", feature = "perf-literal"))]
impl PrefilterI for Memmem {
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate |
}
/// A builder for constructing a rare byte prefilter.
///
/// A rare byte prefilter attempts to pick out a small set of rare bytes that
/// occurr in the patterns, and then quickly scan to matches of those rare
/// bytes.
#[derive(Clone, Debug)]
struct RareBytesBuilder {
/// Whether this prefilter should account for ASCII case insensitivity or
/// not.
ascii_case_insensitive: bool,
/// A set of rare bytes, indexed by byte value.
rare_set: ByteSet,
/// A set of byte offsets associated with bytes in a pattern. An entry
/// corresponds to a particular bytes (its index) and is only non-zero if
/// the byte occurred at an offset greater than 0 in at least one pattern.
///
/// If a byte's offset is not representable in 8 bits, then the rare bytes
/// prefilter becomes inert.
byte_offsets: RareByteOffsets,
/// Whether this is available as a prefilter or not. This can be set to
/// false during construction if a condition is seen that invalidates the
/// use of the rare-byte prefilter.
available: bool,
/// The number of bytes set to an active value in `byte_offsets`.
count: usize,
/// The sum of frequency ranks for the rare bytes detected. This is
/// intended to give a heuristic notion of how rare the bytes are.
rank_sum: u16,
}
/// A set of byte offsets, keyed by byte.
#[derive(Clone, Copy)]
struct RareByteOffsets {
/// Each entry corresponds to the maximum offset of the corresponding
/// byte across all patterns seen.
set: [RareByteOffset; 256],
}
impl RareByteOffsets {
/// Create a new empty set of rare byte offsets.
pub(crate) fn empty() -> RareByteOffsets {
RareByteOffsets { set: [RareByteOffset::default(); 256] }
}
/// Add the given offset for the given byte to this set. If the offset is
/// greater than the existing offset, then it overwrites the previous
/// value and returns false. If there is no previous value set, then this
/// sets it and returns true.
pub(crate) fn set(&mut self, byte: u8, off: RareByteOffset) {
self.set[byte as usize].max =
cmp::max(self.set[byte as usize].max, off.max);
}
}
impl core::fmt::Debug for RareByteOffsets {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
let mut offsets = vec![];
for off in self.set.iter() {
if off.max > 0 {
offsets.push(off);
}
}
f.debug_struct("RareByteOffsets").field("set", &offsets).finish()
}
}
/// Offsets associated with an occurrence of a "rare" byte in any of the
/// patterns used to construct a single Aho-Corasick automaton.
#[derive(Clone, Copy, Debug)]
struct RareByteOffset {
/// The maximum offset at which a particular byte occurs from the start
/// of any pattern. This is used as a shift amount. That is, when an
/// occurrence of this byte is found, the candidate position reported by
/// the prefilter is `position_of_byte - max`, such that the automaton
/// will begin its search at a position that is guaranteed to observe a
/// match.
///
/// To avoid accidentally quadratic behavior, a prefilter is considered
/// ineffective when it is asked to start scanning from a position that it
/// has already scanned past.
///
/// Using a `u8` here means that if we ever see a pattern that's longer
/// than 255 bytes, then the entire rare byte prefilter is disabled.
max: u8,
}
impl Default for RareByteOffset {
fn default() -> RareByteOffset {
RareByteOffset { max: 0 }
}
}
impl RareByteOffset {
/// Create a new rare byte offset. If the given offset is too big, then
/// None is returned. In that case, callers should render the rare bytes
/// prefilter inert.
fn new(max: usize) -> Option<RareByteOffset> {
if max > u8::MAX as usize {
None
} else {
Some(RareByteOffset { max: max as u8 })
}
}
}
impl RareBytesBuilder {
/// Create a new builder for constructing a rare byte prefilter.
fn new() -> RareBytesBuilder {
RareBytesBuilder {
ascii_case_insensitive: false,
rare_set: ByteSet::empty(),
byte_offsets: RareByteOffsets::empty(),
available: true,
count: 0,
rank_sum: 0,
}
}
/// Enable ASCII case insensitivity. When set, byte strings added to this
/// builder will be interpreted without respect to ASCII case.
fn ascii_case_insensitive(mut self, yes: bool) -> RareBytesBuilder {
self.ascii_case_insensitive = yes;
self
}
/// Build the rare bytes prefilter.
///
/// If there are more than 3 distinct rare bytes found, or if heuristics
/// otherwise determine that this prefilter should not be used, then `None`
/// is returned.
fn build(&self) -> Option<Prefilter> {
#[cfg(feature = "perf-literal")]
fn imp(builder: &RareBytesBuilder) -> Option<Prefilter> {
if !builder.available || builder.count > 3 {
return None;
}
let (mut bytes, mut len) = ([0; 3], 0);
for b in 0..=255 {
if builder.rare_set.contains(b) {
bytes[len] = b as u8;
len += 1;
}
}
let finder: Arc<dyn PrefilterI> = match len {
0 => return None,
1 => Arc::new(RareBytesOne {
byte1: bytes[0],
offset: builder.byte_offsets.set[bytes[0] as usize],
}),
2 => Arc::new(RareBytesTwo {
offsets: builder.byte_offsets,
byte1: bytes[0],
byte2: bytes[1],
}),
3 => Arc::new(RareBytesThree {
offsets: builder.byte_offsets,
byte1: bytes[0],
byte2: bytes[1],
byte3: bytes[2],
}),
_ => unreachable!(),
};
Some(Prefilter { finder, memory_usage: 0 })
}
#[cfg(not(feature = "perf-literal"))]
fn imp(_: &RareBytesBuilder) -> Option<Prefilter> {
None
}
imp(self)
}
/// Add a byte string to this builder.
///
/// All patterns added to an Aho-Corasick automaton should be added to this
/// builder before attempting to construct the prefilter.
fn add(&mut self, bytes: &[u8]) {
// If we've already given up, then do nothing.
if !self.available {
return;
}
// If we've already blown our budget, then don't waste time looking
// for more rare bytes.
if self.count > 3 {
self.available = false;
return;
}
// If the pattern is too long, then our offset table is bunk, so
// give up.
if bytes.len() >= 256 {
self.available = false;
return;
}
let mut rarest = match bytes.get(0) {
None => return,
Some(&b) => (b, freq_rank(b)),
};
// The idea here is to look for the rarest byte in each pattern, and
// add that to our set. As a special exception, if we see a byte that
// we've already added, then we immediately stop and choose that byte,
// even if there's another rare byte in the pattern. This helps us
// apply the rare byte optimization in more cases by attempting to pick
// bytes that are in common between patterns. So for example, if we
// were searching for `Sherlock` and `lockjaw`, then this would pick
// `k` for both patterns, resulting in the use of `memchr` instead of
// `memchr2` for `k` and `j`.
let mut found = false;
for (pos, &b) in bytes.iter().enumerate() {
self.set_offset(pos, b);
if found {
continue;
}
if self.rare_set.contains(b) {
found = true;
continue;
}
let rank = freq_rank(b);
if rank < rarest.1 {
rarest = (b, rank);
}
}
if !found {
self.add_rare_byte(rarest.0);
}
}
fn set_offset(&mut self, pos: usize, byte: u8) {
// This unwrap is OK because pos is never bigger than our max.
let offset = RareByteOffset::new(pos).unwrap();
self.byte_offsets.set(byte, offset);
if self.ascii_case_insensitive {
self.byte_offsets.set(opposite_ascii_case(byte), offset);
}
}
fn add_rare_byte(&mut self, byte: u8) {
self.add_one_rare_byte(byte);
if self.ascii_case_insensitive {
self.add_one_rare_byte(opposite_ascii_case(byte));
}
}
fn add_one_rare_byte(&mut self, byte: u8) {
if !self.rare_set.contains(byte) {
self.rare_set.add(byte);
self.count += 1;
self.rank_sum += freq_rank(byte) as u16;
}
}
}
/// A prefilter for scanning for a single "rare" byte.
#[cfg(feature = "perf-literal")]
#[derive(Clone, Debug)]
struct RareBytesOne {
byte1: u8,
offset: RareByteOffset,
}
#[cfg(feature = "perf-literal")]
impl PrefilterI for RareBytesOne {
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
memchr::memchr(self.byte1, &haystack[span])
.map(|i| {
let pos = span.start + i;
cmp::max(
span.start,
pos.saturating_sub(usize::from(self.offset.max)),
)
})
.map_or(Candidate::None, Candidate::PossibleStartOfMatch)
}
}
/// A prefilter for scanning for two "rare" bytes.
#[cfg(feature = "perf-literal")]
#[derive(Clone, Debug)]
struct RareBytesTwo {
offsets: RareByteOffsets,
byte1: u8,
byte2: u8,
}
#[cfg(feature = "perf-literal")]
impl PrefilterI for RareBytesTwo {
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
memchr::memchr2(self.byte1, self.byte2, &haystack[span])
.map(|i| {
let pos = span.start + i;
let offset = self.offsets.set[usize::from(haystack[pos])].max;
cmp::max(span.start, pos.saturating_sub(usize::from(offset)))
})
.map_or(Candidate::None, Candidate::PossibleStartOfMatch)
}
}
/// A prefilter for scanning for three "rare" bytes.
#[cfg(feature = "perf-literal")]
#[derive(Clone, Debug)]
struct RareBytesThree {
offsets: RareByteOffsets,
byte1: u8,
byte2: u8,
byte3: u8,
}
#[cfg(feature = "perf-literal")]
impl PrefilterI for RareBytesThree {
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
memchr::memchr3(self.byte1, self.byte2, self.byte3, &haystack[span])
.map(|i| {
let pos = span.start + i;
let offset = self.offsets.set[usize::from(haystack[pos])].max;
cmp::max(span.start, pos.saturating_sub(usize::from(offset)))
})
.map_or(Candidate::None, Candidate::PossibleStartOfMatch)
}
}
/// A builder for constructing a starting byte prefilter.
///
/// A starting byte prefilter is a simplistic prefilter that looks for possible
/// matches by reporting all positions corresponding to a particular byte. This
/// generally only takes affect when there are at most 3 distinct possible
/// starting bytes. e.g., the patterns `foo`, `bar`, and `baz` have two
/// distinct starting bytes (`f` and `b`), and this prefilter returns all
/// occurrences of either `f` or `b`.
///
/// In some cases, a heuristic frequency analysis may determine that it would
/// be better not to use this prefilter even when there are 3 or fewer distinct
/// starting bytes.
#[derive(Clone, Debug)]
struct StartBytesBuilder {
/// Whether this prefilter should account for ASCII case insensitivity or
/// not.
ascii_case_insensitive: bool,
/// The set of starting bytes observed.
byteset: Vec<bool>,
/// The number of bytes set to true in `byteset`.
count: usize,
/// The sum of frequency ranks for the rare bytes detected. This is
/// intended to give a heuristic notion of how rare the bytes are.
rank_sum: u16,
}
impl StartBytesBuilder {
/// Create a new builder for constructing a start byte prefilter.
fn new() -> StartBytesBuilder {
StartBytesBuilder {
ascii_case_insensitive: false,
byteset: vec![false; 256],
count: 0,
rank_sum: 0,
}
}
/// Enable ASCII case insensitivity. When set, byte strings added to this
/// builder will be interpreted without respect to ASCII case.
fn ascii_case_insensitive(mut self, yes: bool) -> StartBytesBuilder {
self.ascii_case_insensitive = yes;
self
}
/// Build the starting bytes prefilter.
///
/// If there are more than 3 distinct starting bytes, or if heuristics
/// otherwise determine that this prefilter should not be used, then `None`
/// is returned.
fn build(&self) -> Option<Prefilter> {
#[cfg(feature = "perf-literal")]
fn imp(builder: &StartBytesBuilder) -> Option<Prefilter> {
if builder.count > 3 {
return None;
}
let (mut bytes, mut len) = ([0; 3], 0);
for b in 0..256 {
if !builder.byteset[b] {
continue;
}
// We don't handle non-ASCII bytes for now. Getting non-ASCII
// bytes right is trickier, since we generally don't want to put
// a leading UTF-8 code unit into a prefilter that isn't ASCII,
// since they can frequently. Instead, it would be better to use a
// continuation byte, but this requires more sophisticated analysis
// of the automaton and a richer prefilter API.
if b > 0x7F {
return None;
}
bytes[len] = b as u8;
len += 1;
}
let finder: Arc<dyn PrefilterI> = match len {
0 => return None,
1 => Arc::new(StartBytesOne { byte1: bytes[0] }),
2 => Arc::new(StartBytesTwo {
byte1: bytes[0],
byte2: bytes[1],
}),
3 => Arc::new(StartBytesThree {
byte1: bytes[0],
byte2: bytes[1],
byte3: bytes[2],
}),
_ => unreachable!(),
};
Some(Prefilter { finder, memory_usage: 0 })
}
#[cfg(not(feature = "perf-literal"))]
fn imp(_: &StartBytesBuilder) -> Option<Prefilter> {
None
}
imp(self)
}
/// Add a byte string to this builder.
///
/// All patterns added to an Aho-Corasick automaton should be added to this
/// builder before attempting to construct the prefilter.
fn add(&mut self, bytes: &[u8]) {
if self.count > 3 {
return;
}
if let Some(&byte) = bytes.get(0) {
self.add_one_byte(byte);
if self.ascii_case_insensitive {
self.add_one_byte(opposite_ascii_case(byte));
}
}
}
fn add_one_byte(&mut self, byte: u8) {
if !self.byteset[byte as usize] {
self.byteset[byte as usize] = true;
self.count += 1;
self.rank_sum += freq_rank(byte) as u16;
}
}
}
/// A prefilter for scanning for a single starting byte.
#[cfg(feature = "perf-literal")]
#[derive(Clone, Debug)]
struct StartBytesOne {
byte1: u8,
}
#[cfg(feature = "perf-literal")]
impl PrefilterI for StartBytesOne {
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
memchr::memchr(self.byte1, &haystack[span])
.map(|i| span.start + i)
.map_or(Candidate::None, Candidate::PossibleStartOfMatch)
}
}
/// A prefilter for scanning for two starting bytes.
#[cfg(feature = "perf-literal")]
#[derive(Clone, Debug)]
struct StartBytesTwo {
byte1: u8,
byte2: u8,
}
#[cfg(feature = "perf-literal")]
impl PrefilterI for StartBytesTwo {
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
memchr::memchr2(self.byte1, self.byte2, &haystack[span])
.map(|i| span.start + i)
.map_or(Candidate::None, Candidate::PossibleStartOfMatch)
}
}
/// A prefilter for scanning for three starting bytes.
#[cfg(feature = "perf-literal")]
#[derive(Clone, Debug)]
struct StartBytesThree {
byte1: u8,
byte2: u8,
byte3: u8,
}
#[cfg(feature = "perf-literal")]
impl PrefilterI for StartBytesThree {
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
memchr::memchr3(self.byte1, self.byte2, self.byte3, &haystack[span])
.map(|i| span.start + i)
.map_or(Candidate::None, Candidate::PossibleStartOfMatch)
}
}
/// If the given byte is an ASCII letter, then return it in the opposite case.
/// e.g., Given `b'A'`, this returns `b'a'`, and given `b'a'`, this returns
/// `b'A'`. If a non-ASCII letter is given, then the given byte is returned.
pub(crate) fn opposite_ascii_case(b: u8) -> u8 {
if b'A' <= b && b <= b'Z' {
b.to_ascii_lowercase()
} else if b'a' <= b && b <= b'z' {
b.to_ascii_uppercase()
} else {
b
}
}
/// Return the frequency rank of the given byte. The higher the rank, the more
/// common the byte (heuristically speaking).
fn freq_rank(b: u8) -> u8 {
use crate::util::byte_frequencies::BYTE_FREQUENCIES;
BYTE_FREQUENCIES[b as usize]
}
| {
use crate::util::primitives::PatternID;
self.0.find(&haystack[span]).map_or(Candidate::None, |i| {
let start = span.start + i;
let end = start + self.0.needle().len();
// N.B. We can declare a match and use a fixed pattern ID here
// because a Memmem prefilter is only ever created for searchers
// with exactly one pattern. Thus, every match is always a match
// and it is always for the first and only pattern.
Candidate::Match(Match::new(PatternID::ZERO, start..end))
})
} | identifier_body |
prefilter.rs | use core::{
cmp,
fmt::Debug,
panic::{RefUnwindSafe, UnwindSafe},
u8,
};
use alloc::{sync::Arc, vec, vec::Vec};
use crate::{
packed,
util::{
alphabet::ByteSet,
search::{Match, MatchKind, Span},
},
};
/// A prefilter for accelerating a search.
///
/// This crate uses prefilters in the core search implementations to accelerate
/// common cases. They typically only apply to cases where there are a small
/// number of patterns (less than 100 or so), but when they do, thoughput can
/// be boosted considerably, perhaps by an order of magnitude. When a prefilter
/// is active, it is used whenever a search enters an automaton's start state.
///
/// Currently, prefilters cannot be constructed by
/// callers. A `Prefilter` can only be accessed via the
/// [`Automaton::prefilter`](crate::automaton::Automaton::prefilter)
/// method and used to execute a search. In other words, a prefilter can be
/// used to optimize your own search implementation if necessary, but cannot do
/// much else. If you have a use case for more APIs, please submit an issue.
#[derive(Clone, Debug)]
pub struct Prefilter {
finder: Arc<dyn PrefilterI>,
memory_usage: usize,
}
impl Prefilter {
/// Execute a search in the haystack within the span given. If a match or
/// a possible match is returned, then it is guaranteed to occur within
/// the bounds of the span.
///
/// If the span provided is invalid for the given haystack, then behavior
/// is unspecified.
#[inline]
pub fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
self.finder.find_in(haystack, span)
}
#[inline]
pub(crate) fn memory_usage(&self) -> usize {
self.memory_usage
}
}
/// A candidate is the result of running a prefilter on a haystack at a
/// particular position.
///
/// The result is either no match, a confirmed match or a possible match.
///
/// When no match is returned, the prefilter is guaranteeing that no possible
/// match can be found in the haystack, and the caller may trust this. That is,
/// all correct prefilters must never report false negatives.
///
/// In some cases, a prefilter can confirm a match very quickly, in which case,
/// the caller may use this to stop what it's doing and report the match. In
/// this case, prefilter implementations must never report a false positive.
/// In other cases, the prefilter can only report a potential match, in which
/// case the callers must attempt to confirm the match. In this case, prefilter
/// implementations are permitted to return false positives.
#[derive(Clone, Debug)]
pub enum Candidate {
/// No match was found. Since false negatives are not possible, this means
/// the search can quit as it is guaranteed not to find another match.
None,
/// A confirmed match was found. Callers do not need to confirm it.
Match(Match),
/// The start of a possible match was found. Callers must confirm it before
/// reporting it as a match.
PossibleStartOfMatch(usize),
}
impl Candidate {
/// Convert this candidate into an option. This is useful when callers
/// do not distinguish between true positives and false positives (i.e.,
/// the caller must always confirm the match).
pub fn into_option(self) -> Option<usize> {
match self {
Candidate::None => None,
Candidate::Match(ref m) => Some(m.start()),
Candidate::PossibleStartOfMatch(start) => Some(start),
}
}
}
/// A prefilter describes the behavior of fast literal scanners for quickly
/// skipping past bytes in the haystack that we know cannot possibly
/// participate in a match.
trait PrefilterI:
Send + Sync + RefUnwindSafe + UnwindSafe + Debug + 'static
{
/// Returns the next possible match candidate. This may yield false
/// positives, so callers must confirm a match starting at the position
/// returned. This, however, must never produce false negatives. That is,
/// this must, at minimum, return the starting position of the next match
/// in the given haystack after or at the given position.
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate;
}
impl<P: PrefilterI + ?Sized> PrefilterI for Arc<P> {
#[inline(always)]
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
(**self).find_in(haystack, span)
}
}
/// A builder for constructing the best possible prefilter. When constructed,
/// this builder will heuristically select the best prefilter it can build,
/// if any, and discard the rest.
#[derive(Debug)]
pub(crate) struct Builder {
count: usize,
ascii_case_insensitive: bool,
start_bytes: StartBytesBuilder,
rare_bytes: RareBytesBuilder,
memmem: MemmemBuilder,
packed: Option<packed::Builder>,
// If we run across a condition that suggests we shouldn't use a prefilter
// at all (like an empty pattern), then disable prefilters entirely.
enabled: bool,
}
impl Builder {
/// Create a new builder for constructing the best possible prefilter.
pub(crate) fn new(kind: MatchKind) -> Builder {
let pbuilder = kind
.as_packed()
.map(|kind| packed::Config::new().match_kind(kind).builder());
Builder {
count: 0,
ascii_case_insensitive: false,
start_bytes: StartBytesBuilder::new(),
rare_bytes: RareBytesBuilder::new(),
memmem: MemmemBuilder::default(),
packed: pbuilder,
enabled: true,
}
}
/// Enable ASCII case insensitivity. When set, byte strings added to this
/// builder will be interpreted without respect to ASCII case.
pub(crate) fn ascii_case_insensitive(mut self, yes: bool) -> Builder {
self.ascii_case_insensitive = yes;
self.start_bytes = self.start_bytes.ascii_case_insensitive(yes);
self.rare_bytes = self.rare_bytes.ascii_case_insensitive(yes);
self
}
/// Return a prefilter suitable for quickly finding potential matches.
///
/// All patterns added to an Aho-Corasick automaton should be added to this
/// builder before attempting to construct the prefilter.
pub(crate) fn build(&self) -> Option<Prefilter> {
if !self.enabled {
return None;
}
// If we only have one pattern, then deferring to memmem is always
// the best choice. This is kind of a weird case, because, well, why
// use Aho-Corasick if you only have one pattern? But maybe you don't
// know exactly how many patterns you'll get up front, and you need to
// support the option of multiple patterns. So instead of relying on
// the caller to branch and use memmem explicitly, we just do it for
// them.
if !self.ascii_case_insensitive {
if let Some(pre) = self.memmem.build() {
return Some(pre);
}
}
match (self.start_bytes.build(), self.rare_bytes.build()) {
// If we could build both start and rare prefilters, then there are
// a few cases in which we'd want to use the start-byte prefilter
// over the rare-byte prefilter, since the former has lower
// overhead.
(prestart @ Some(_), prerare @ Some(_)) => {
// If the start-byte prefilter can scan for a smaller number
// of bytes than the rare-byte prefilter, then it's probably
// faster.
let has_fewer_bytes =
self.start_bytes.count < self.rare_bytes.count;
// Otherwise, if the combined frequency rank of the detected
// bytes in the start-byte prefilter is "close" to the combined
// frequency rank of the rare-byte prefilter, then we pick
// the start-byte prefilter even if the rare-byte prefilter
// heuristically searches for rare bytes. This is because the
// rare-byte prefilter has higher constant costs, so we tend to
// prefer the start-byte prefilter when we can.
let has_rarer_bytes =
self.start_bytes.rank_sum <= self.rare_bytes.rank_sum + 50;
if has_fewer_bytes || has_rarer_bytes {
prestart
} else {
prerare
}
}
(prestart @ Some(_), None) => prestart,
(None, prerare @ Some(_)) => prerare,
(None, None) if self.ascii_case_insensitive => None,
(None, None) => {
self.packed.as_ref().and_then(|b| b.build()).map(|s| {
let memory_usage = s.memory_usage();
Prefilter { finder: Arc::new(Packed(s)), memory_usage }
})
}
}
}
/// Add a literal string to this prefilter builder.
pub(crate) fn add(&mut self, bytes: &[u8]) {
if bytes.is_empty() {
self.enabled = false;
}
if !self.enabled {
return;
}
self.count += 1;
self.start_bytes.add(bytes);
self.rare_bytes.add(bytes);
self.memmem.add(bytes);
if let Some(ref mut pbuilder) = self.packed {
pbuilder.add(bytes);
}
}
}
/// A type that wraps a packed searcher and implements the `Prefilter`
/// interface.
#[derive(Clone, Debug)]
struct Packed(packed::Searcher);
impl PrefilterI for Packed {
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
self.0
.find_in(&haystack, span)
.map_or(Candidate::None, Candidate::Match)
}
}
/// A builder for constructing a prefilter that uses memmem.
#[derive(Debug, Default)]
struct MemmemBuilder {
/// The number of patterns that have been added.
count: usize,
/// The singular pattern to search for. This is only set when count==1.
one: Option<Vec<u8>>,
}
impl MemmemBuilder {
fn build(&self) -> Option<Prefilter> {
#[cfg(all(feature = "std", feature = "perf-literal"))]
fn imp(builder: &MemmemBuilder) -> Option<Prefilter> {
let pattern = builder.one.as_ref()?;
assert_eq!(1, builder.count);
let finder = Arc::new(Memmem(
memchr::memmem::Finder::new(pattern).into_owned(),
));
let memory_usage = pattern.len();
Some(Prefilter { finder, memory_usage })
}
#[cfg(not(all(feature = "std", feature = "perf-literal")))]
fn imp(_: &MemmemBuilder) -> Option<Prefilter> {
None
}
imp(self)
}
fn add(&mut self, bytes: &[u8]) {
self.count += 1;
if self.count == 1 {
self.one = Some(bytes.to_vec());
} else {
self.one = None;
}
}
}
/// A type that wraps a SIMD accelerated single substring search from the
/// `memchr` crate for use as a prefilter.
///
/// Currently, this prefilter is only active for Aho-Corasick searchers with
/// a single pattern. In theory, this could be extended to support searchers
/// that have a common prefix of more than one byte (for one byte, we would use
/// memchr), but it's not clear if it's worth it or not.
///
/// Also, unfortunately, this currently also requires the 'std' feature to
/// be enabled. That's because memchr doesn't have a no-std-but-with-alloc
/// mode, and so APIs like Finder::into_owned aren't available when 'std' is
/// disabled. But there should be an 'alloc' feature that brings in APIs like
/// Finder::into_owned but doesn't use std-only features like runtime CPU
/// feature detection.
#[cfg(all(feature = "std", feature = "perf-literal"))]
#[derive(Clone, Debug)]
struct Memmem(memchr::memmem::Finder<'static>);
#[cfg(all(feature = "std", feature = "perf-literal"))]
impl PrefilterI for Memmem {
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
use crate::util::primitives::PatternID;
self.0.find(&haystack[span]).map_or(Candidate::None, |i| {
let start = span.start + i;
let end = start + self.0.needle().len();
// N.B. We can declare a match and use a fixed pattern ID here
// because a Memmem prefilter is only ever created for searchers
// with exactly one pattern. Thus, every match is always a match
// and it is always for the first and only pattern.
Candidate::Match(Match::new(PatternID::ZERO, start..end))
})
}
}
/// A builder for constructing a rare byte prefilter.
///
/// A rare byte prefilter attempts to pick out a small set of rare bytes that
/// occurr in the patterns, and then quickly scan to matches of those rare
/// bytes.
#[derive(Clone, Debug)]
struct RareBytesBuilder {
/// Whether this prefilter should account for ASCII case insensitivity or
/// not.
ascii_case_insensitive: bool,
/// A set of rare bytes, indexed by byte value.
rare_set: ByteSet,
/// A set of byte offsets associated with bytes in a pattern. An entry
/// corresponds to a particular bytes (its index) and is only non-zero if
/// the byte occurred at an offset greater than 0 in at least one pattern.
///
/// If a byte's offset is not representable in 8 bits, then the rare bytes
/// prefilter becomes inert.
byte_offsets: RareByteOffsets,
/// Whether this is available as a prefilter or not. This can be set to
/// false during construction if a condition is seen that invalidates the
/// use of the rare-byte prefilter.
available: bool,
/// The number of bytes set to an active value in `byte_offsets`.
count: usize,
/// The sum of frequency ranks for the rare bytes detected. This is
/// intended to give a heuristic notion of how rare the bytes are.
rank_sum: u16,
}
/// A set of byte offsets, keyed by byte.
#[derive(Clone, Copy)]
struct RareByteOffsets {
/// Each entry corresponds to the maximum offset of the corresponding
/// byte across all patterns seen.
set: [RareByteOffset; 256],
}
impl RareByteOffsets {
/// Create a new empty set of rare byte offsets.
pub(crate) fn empty() -> RareByteOffsets {
RareByteOffsets { set: [RareByteOffset::default(); 256] }
}
/// Add the given offset for the given byte to this set. If the offset is
/// greater than the existing offset, then it overwrites the previous
/// value and returns false. If there is no previous value set, then this
/// sets it and returns true.
pub(crate) fn set(&mut self, byte: u8, off: RareByteOffset) {
self.set[byte as usize].max =
cmp::max(self.set[byte as usize].max, off.max);
}
}
impl core::fmt::Debug for RareByteOffsets {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
let mut offsets = vec![];
for off in self.set.iter() {
if off.max > 0 {
offsets.push(off);
}
}
f.debug_struct("RareByteOffsets").field("set", &offsets).finish()
}
}
/// Offsets associated with an occurrence of a "rare" byte in any of the
/// patterns used to construct a single Aho-Corasick automaton.
#[derive(Clone, Copy, Debug)]
struct RareByteOffset {
/// The maximum offset at which a particular byte occurs from the start
/// of any pattern. This is used as a shift amount. That is, when an
/// occurrence of this byte is found, the candidate position reported by
/// the prefilter is `position_of_byte - max`, such that the automaton
/// will begin its search at a position that is guaranteed to observe a
/// match.
///
/// To avoid accidentally quadratic behavior, a prefilter is considered
/// ineffective when it is asked to start scanning from a position that it
/// has already scanned past.
///
/// Using a `u8` here means that if we ever see a pattern that's longer
/// than 255 bytes, then the entire rare byte prefilter is disabled.
max: u8,
}
impl Default for RareByteOffset {
fn default() -> RareByteOffset {
RareByteOffset { max: 0 }
}
}
impl RareByteOffset {
/// Create a new rare byte offset. If the given offset is too big, then
/// None is returned. In that case, callers should render the rare bytes
/// prefilter inert.
fn new(max: usize) -> Option<RareByteOffset> {
if max > u8::MAX as usize {
None
} else {
Some(RareByteOffset { max: max as u8 })
}
}
}
impl RareBytesBuilder {
/// Create a new builder for constructing a rare byte prefilter.
fn new() -> RareBytesBuilder {
RareBytesBuilder {
ascii_case_insensitive: false,
rare_set: ByteSet::empty(),
byte_offsets: RareByteOffsets::empty(),
available: true,
count: 0,
rank_sum: 0,
}
}
/// Enable ASCII case insensitivity. When set, byte strings added to this
/// builder will be interpreted without respect to ASCII case.
fn ascii_case_insensitive(mut self, yes: bool) -> RareBytesBuilder {
self.ascii_case_insensitive = yes;
self
}
/// Build the rare bytes prefilter.
///
/// If there are more than 3 distinct rare bytes found, or if heuristics
/// otherwise determine that this prefilter should not be used, then `None`
/// is returned.
fn build(&self) -> Option<Prefilter> {
#[cfg(feature = "perf-literal")]
fn imp(builder: &RareBytesBuilder) -> Option<Prefilter> {
if !builder.available || builder.count > 3 {
return None; | for b in 0..=255 {
if builder.rare_set.contains(b) {
bytes[len] = b as u8;
len += 1;
}
}
let finder: Arc<dyn PrefilterI> = match len {
0 => return None,
1 => Arc::new(RareBytesOne {
byte1: bytes[0],
offset: builder.byte_offsets.set[bytes[0] as usize],
}),
2 => Arc::new(RareBytesTwo {
offsets: builder.byte_offsets,
byte1: bytes[0],
byte2: bytes[1],
}),
3 => Arc::new(RareBytesThree {
offsets: builder.byte_offsets,
byte1: bytes[0],
byte2: bytes[1],
byte3: bytes[2],
}),
_ => unreachable!(),
};
Some(Prefilter { finder, memory_usage: 0 })
}
#[cfg(not(feature = "perf-literal"))]
fn imp(_: &RareBytesBuilder) -> Option<Prefilter> {
None
}
imp(self)
}
/// Add a byte string to this builder.
///
/// All patterns added to an Aho-Corasick automaton should be added to this
/// builder before attempting to construct the prefilter.
fn add(&mut self, bytes: &[u8]) {
// If we've already given up, then do nothing.
if !self.available {
return;
}
// If we've already blown our budget, then don't waste time looking
// for more rare bytes.
if self.count > 3 {
self.available = false;
return;
}
// If the pattern is too long, then our offset table is bunk, so
// give up.
if bytes.len() >= 256 {
self.available = false;
return;
}
let mut rarest = match bytes.get(0) {
None => return,
Some(&b) => (b, freq_rank(b)),
};
// The idea here is to look for the rarest byte in each pattern, and
// add that to our set. As a special exception, if we see a byte that
// we've already added, then we immediately stop and choose that byte,
// even if there's another rare byte in the pattern. This helps us
// apply the rare byte optimization in more cases by attempting to pick
// bytes that are in common between patterns. So for example, if we
// were searching for `Sherlock` and `lockjaw`, then this would pick
// `k` for both patterns, resulting in the use of `memchr` instead of
// `memchr2` for `k` and `j`.
let mut found = false;
for (pos, &b) in bytes.iter().enumerate() {
self.set_offset(pos, b);
if found {
continue;
}
if self.rare_set.contains(b) {
found = true;
continue;
}
let rank = freq_rank(b);
if rank < rarest.1 {
rarest = (b, rank);
}
}
if !found {
self.add_rare_byte(rarest.0);
}
}
fn set_offset(&mut self, pos: usize, byte: u8) {
// This unwrap is OK because pos is never bigger than our max.
let offset = RareByteOffset::new(pos).unwrap();
self.byte_offsets.set(byte, offset);
if self.ascii_case_insensitive {
self.byte_offsets.set(opposite_ascii_case(byte), offset);
}
}
fn add_rare_byte(&mut self, byte: u8) {
self.add_one_rare_byte(byte);
if self.ascii_case_insensitive {
self.add_one_rare_byte(opposite_ascii_case(byte));
}
}
fn add_one_rare_byte(&mut self, byte: u8) {
if !self.rare_set.contains(byte) {
self.rare_set.add(byte);
self.count += 1;
self.rank_sum += freq_rank(byte) as u16;
}
}
}
/// A prefilter for scanning for a single "rare" byte.
#[cfg(feature = "perf-literal")]
#[derive(Clone, Debug)]
struct RareBytesOne {
byte1: u8,
offset: RareByteOffset,
}
#[cfg(feature = "perf-literal")]
impl PrefilterI for RareBytesOne {
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
memchr::memchr(self.byte1, &haystack[span])
.map(|i| {
let pos = span.start + i;
cmp::max(
span.start,
pos.saturating_sub(usize::from(self.offset.max)),
)
})
.map_or(Candidate::None, Candidate::PossibleStartOfMatch)
}
}
/// A prefilter for scanning for two "rare" bytes.
#[cfg(feature = "perf-literal")]
#[derive(Clone, Debug)]
struct RareBytesTwo {
offsets: RareByteOffsets,
byte1: u8,
byte2: u8,
}
#[cfg(feature = "perf-literal")]
impl PrefilterI for RareBytesTwo {
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
memchr::memchr2(self.byte1, self.byte2, &haystack[span])
.map(|i| {
let pos = span.start + i;
let offset = self.offsets.set[usize::from(haystack[pos])].max;
cmp::max(span.start, pos.saturating_sub(usize::from(offset)))
})
.map_or(Candidate::None, Candidate::PossibleStartOfMatch)
}
}
/// A prefilter for scanning for three "rare" bytes.
#[cfg(feature = "perf-literal")]
#[derive(Clone, Debug)]
struct RareBytesThree {
offsets: RareByteOffsets,
byte1: u8,
byte2: u8,
byte3: u8,
}
#[cfg(feature = "perf-literal")]
impl PrefilterI for RareBytesThree {
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
memchr::memchr3(self.byte1, self.byte2, self.byte3, &haystack[span])
.map(|i| {
let pos = span.start + i;
let offset = self.offsets.set[usize::from(haystack[pos])].max;
cmp::max(span.start, pos.saturating_sub(usize::from(offset)))
})
.map_or(Candidate::None, Candidate::PossibleStartOfMatch)
}
}
/// A builder for constructing a starting byte prefilter.
///
/// A starting byte prefilter is a simplistic prefilter that looks for possible
/// matches by reporting all positions corresponding to a particular byte. This
/// generally only takes affect when there are at most 3 distinct possible
/// starting bytes. e.g., the patterns `foo`, `bar`, and `baz` have two
/// distinct starting bytes (`f` and `b`), and this prefilter returns all
/// occurrences of either `f` or `b`.
///
/// In some cases, a heuristic frequency analysis may determine that it would
/// be better not to use this prefilter even when there are 3 or fewer distinct
/// starting bytes.
#[derive(Clone, Debug)]
struct StartBytesBuilder {
/// Whether this prefilter should account for ASCII case insensitivity or
/// not.
ascii_case_insensitive: bool,
/// The set of starting bytes observed.
byteset: Vec<bool>,
/// The number of bytes set to true in `byteset`.
count: usize,
/// The sum of frequency ranks for the rare bytes detected. This is
/// intended to give a heuristic notion of how rare the bytes are.
rank_sum: u16,
}
impl StartBytesBuilder {
/// Create a new builder for constructing a start byte prefilter.
fn new() -> StartBytesBuilder {
StartBytesBuilder {
ascii_case_insensitive: false,
byteset: vec![false; 256],
count: 0,
rank_sum: 0,
}
}
/// Enable ASCII case insensitivity. When set, byte strings added to this
/// builder will be interpreted without respect to ASCII case.
fn ascii_case_insensitive(mut self, yes: bool) -> StartBytesBuilder {
self.ascii_case_insensitive = yes;
self
}
/// Build the starting bytes prefilter.
///
/// If there are more than 3 distinct starting bytes, or if heuristics
/// otherwise determine that this prefilter should not be used, then `None`
/// is returned.
fn build(&self) -> Option<Prefilter> {
#[cfg(feature = "perf-literal")]
fn imp(builder: &StartBytesBuilder) -> Option<Prefilter> {
if builder.count > 3 {
return None;
}
let (mut bytes, mut len) = ([0; 3], 0);
for b in 0..256 {
if !builder.byteset[b] {
continue;
}
// We don't handle non-ASCII bytes for now. Getting non-ASCII
// bytes right is trickier, since we generally don't want to put
// a leading UTF-8 code unit into a prefilter that isn't ASCII,
// since they can frequently. Instead, it would be better to use a
// continuation byte, but this requires more sophisticated analysis
// of the automaton and a richer prefilter API.
if b > 0x7F {
return None;
}
bytes[len] = b as u8;
len += 1;
}
let finder: Arc<dyn PrefilterI> = match len {
0 => return None,
1 => Arc::new(StartBytesOne { byte1: bytes[0] }),
2 => Arc::new(StartBytesTwo {
byte1: bytes[0],
byte2: bytes[1],
}),
3 => Arc::new(StartBytesThree {
byte1: bytes[0],
byte2: bytes[1],
byte3: bytes[2],
}),
_ => unreachable!(),
};
Some(Prefilter { finder, memory_usage: 0 })
}
#[cfg(not(feature = "perf-literal"))]
fn imp(_: &StartBytesBuilder) -> Option<Prefilter> {
None
}
imp(self)
}
/// Add a byte string to this builder.
///
/// All patterns added to an Aho-Corasick automaton should be added to this
/// builder before attempting to construct the prefilter.
fn add(&mut self, bytes: &[u8]) {
if self.count > 3 {
return;
}
if let Some(&byte) = bytes.get(0) {
self.add_one_byte(byte);
if self.ascii_case_insensitive {
self.add_one_byte(opposite_ascii_case(byte));
}
}
}
fn add_one_byte(&mut self, byte: u8) {
if !self.byteset[byte as usize] {
self.byteset[byte as usize] = true;
self.count += 1;
self.rank_sum += freq_rank(byte) as u16;
}
}
}
/// A prefilter for scanning for a single starting byte.
#[cfg(feature = "perf-literal")]
#[derive(Clone, Debug)]
struct StartBytesOne {
byte1: u8,
}
#[cfg(feature = "perf-literal")]
impl PrefilterI for StartBytesOne {
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
memchr::memchr(self.byte1, &haystack[span])
.map(|i| span.start + i)
.map_or(Candidate::None, Candidate::PossibleStartOfMatch)
}
}
/// A prefilter for scanning for two starting bytes.
#[cfg(feature = "perf-literal")]
#[derive(Clone, Debug)]
struct StartBytesTwo {
byte1: u8,
byte2: u8,
}
#[cfg(feature = "perf-literal")]
impl PrefilterI for StartBytesTwo {
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
memchr::memchr2(self.byte1, self.byte2, &haystack[span])
.map(|i| span.start + i)
.map_or(Candidate::None, Candidate::PossibleStartOfMatch)
}
}
/// A prefilter for scanning for three starting bytes.
#[cfg(feature = "perf-literal")]
#[derive(Clone, Debug)]
struct StartBytesThree {
byte1: u8,
byte2: u8,
byte3: u8,
}
#[cfg(feature = "perf-literal")]
impl PrefilterI for StartBytesThree {
fn find_in(&self, haystack: &[u8], span: Span) -> Candidate {
memchr::memchr3(self.byte1, self.byte2, self.byte3, &haystack[span])
.map(|i| span.start + i)
.map_or(Candidate::None, Candidate::PossibleStartOfMatch)
}
}
/// If the given byte is an ASCII letter, then return it in the opposite case.
/// e.g., Given `b'A'`, this returns `b'a'`, and given `b'a'`, this returns
/// `b'A'`. If a non-ASCII letter is given, then the given byte is returned.
pub(crate) fn opposite_ascii_case(b: u8) -> u8 {
if b'A' <= b && b <= b'Z' {
b.to_ascii_lowercase()
} else if b'a' <= b && b <= b'z' {
b.to_ascii_uppercase()
} else {
b
}
}
/// Return the frequency rank of the given byte. The higher the rank, the more
/// common the byte (heuristically speaking).
fn freq_rank(b: u8) -> u8 {
use crate::util::byte_frequencies::BYTE_FREQUENCIES;
BYTE_FREQUENCIES[b as usize]
} | }
let (mut bytes, mut len) = ([0; 3], 0); | random_line_split |
code.go | /*
Copyright 2022 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vterrors
import (
"fmt"
vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
)
var (
VT03001 = errorWithState("VT03001", vtrpcpb.Code_INVALID_ARGUMENT, SyntaxError, "aggregate functions take a single argument '%s'", "This aggregation function only takes a single argument.")
VT03002 = errorWithState("VT03002", vtrpcpb.Code_INVALID_ARGUMENT, ForbidSchemaChange, "changing schema from '%s' to '%s' is not allowed", "This schema change is not allowed. You cannot change the keyspace of a table.")
VT03003 = errorWithState("VT03003", vtrpcpb.Code_INVALID_ARGUMENT, UnknownTable, "unknown table '%s' in MULTI DELETE", "The specified table in this DELETE statement is unknown.")
VT03004 = errorWithState("VT03004", vtrpcpb.Code_INVALID_ARGUMENT, NonUpdateableTable, "the target table %s of the DELETE is not updatable", "You cannot delete something that is not a real MySQL table.")
VT03005 = errorWithState("VT03005", vtrpcpb.Code_INVALID_ARGUMENT, WrongGroupField, "cannot group on '%s'", "The planner does not allow grouping on certain field. For instance, aggregation function.")
VT03006 = errorWithState("VT03006", vtrpcpb.Code_INVALID_ARGUMENT, WrongValueCountOnRow, "column count does not match value count at row 1", "The number of columns you want to insert do not match the number of columns of your SELECT query.")
VT03007 = errorWithoutState("VT03007", vtrpcpb.Code_INVALID_ARGUMENT, "keyspace not specified", "You need to add a keyspace qualifier.")
VT03008 = errorWithState("VT03008", vtrpcpb.Code_INVALID_ARGUMENT, CantUseOptionHere, "incorrect usage/placement of '%s'", "The given token is not usable in this situation. Please refer to the MySQL documentation to learn more about your token's syntax.")
VT03009 = errorWithState("VT03009", vtrpcpb.Code_INVALID_ARGUMENT, WrongValueForVar, "unexpected value type for '%s': %v", "You cannot assign this type to the given variable.")
VT03010 = errorWithState("VT03010", vtrpcpb.Code_INVALID_ARGUMENT, IncorrectGlobalLocalVar, "variable '%s' is a read only variable", "You cannot set the given variable as it is a read-only variable.")
VT03011 = errorWithoutState("VT03011", vtrpcpb.Code_INVALID_ARGUMENT, "invalid value type: %v", "The given value type is not accepted.")
VT03012 = errorWithoutState("VT03012", vtrpcpb.Code_INVALID_ARGUMENT, "invalid syntax: %s", "The syntax is invalid. Please refer to the MySQL documentation for the proper syntax.")
VT03013 = errorWithState("VT03013", vtrpcpb.Code_INVALID_ARGUMENT, NonUniqTable, "not unique table/alias: '%s'", "This table or alias name is already use. Please use another one that is unique.")
VT03014 = errorWithState("VT03014", vtrpcpb.Code_INVALID_ARGUMENT, BadFieldError, "unknown column '%d' in '%s'", "The given column is unknown.")
VT03015 = errorWithoutState("VT03015", vtrpcpb.Code_INVALID_ARGUMENT, "column has duplicate set values: '%v'", "Cannot assign multiple values to a column in an update statement.")
VT03016 = errorWithoutState("VT03016", vtrpcpb.Code_INVALID_ARGUMENT, "unknown vindex column: '%s'", "The given column is unknown in the vindex table.")
VT03017 = errorWithState("VT03017", vtrpcpb.Code_INVALID_ARGUMENT, SyntaxError, "where clause can only be of the type 'pos > <value>'", "This vstream where clause can only be a greater than filter.")
VT03018 = errorWithoutState("VT03018", vtrpcpb.Code_INVALID_ARGUMENT, "NEXT used on a non-sequence table", "You cannot use the NEXT syntax on a table that is not a sequence table.")
VT03019 = errorWithoutState("VT03019", vtrpcpb.Code_INVALID_ARGUMENT, "column %s not found", "The given column was not found or is not available.")
VT03020 = errorWithoutState("VT03020", vtrpcpb.Code_INVALID_ARGUMENT, "column %s not found in subquery", "The given column was not found in the subquery.") | VT03022 = errorWithoutState("VT03022", vtrpcpb.Code_INVALID_ARGUMENT, "column %v not found in %v", "The given column cannot be found.")
VT03023 = errorWithoutState("VT03023", vtrpcpb.Code_INVALID_ARGUMENT, "INSERT not supported when targeting a key range: %s", "When targeting a range of shards, Vitess does not know which shard to send the INSERT to.")
VT03024 = errorWithoutState("VT03024", vtrpcpb.Code_INVALID_ARGUMENT, "'%s' user defined variable does not exists", "The query cannot be prepared using the user defined variable as it does not exists for this session.")
VT03025 = errorWithState("VT03025", vtrpcpb.Code_INVALID_ARGUMENT, WrongArguments, "Incorrect arguments to %s", "The execute statement have wrong number of arguments")
VT05001 = errorWithState("VT05001", vtrpcpb.Code_NOT_FOUND, DbDropExists, "cannot drop database '%s'; database does not exists", "The given database does not exist; Vitess cannot drop it.")
VT05002 = errorWithState("VT05002", vtrpcpb.Code_NOT_FOUND, BadDb, "cannot alter database '%s'; unknown database", "The given database does not exist; Vitess cannot alter it.")
VT05003 = errorWithState("VT05003", vtrpcpb.Code_NOT_FOUND, BadDb, "unknown database '%s' in vschema", "The given database does not exist in the VSchema.")
VT05004 = errorWithState("VT05004", vtrpcpb.Code_NOT_FOUND, UnknownTable, "table '%s' does not exist", "The given table is unknown.")
VT05005 = errorWithState("VT05005", vtrpcpb.Code_NOT_FOUND, NoSuchTable, "table '%s' does not exist in keyspace '%s'", "The given table does not exist in this keyspace.")
VT05006 = errorWithState("VT05006", vtrpcpb.Code_NOT_FOUND, UnknownSystemVariable, "unknown system variable '%s'", "The given system variable is unknown.")
VT05007 = errorWithoutState("VT05007", vtrpcpb.Code_NOT_FOUND, "no table info", "Table information is not available.")
VT06001 = errorWithState("VT06001", vtrpcpb.Code_ALREADY_EXISTS, DbCreateExists, "cannot create database '%s'; database exists", "The given database name already exists.")
VT07001 = errorWithState("VT07001", vtrpcpb.Code_PERMISSION_DENIED, KillDeniedError, "%s", "Kill statement is not allowed. More in docs about how to enable it and its limitations.")
VT09001 = errorWithState("VT09001", vtrpcpb.Code_FAILED_PRECONDITION, RequiresPrimaryKey, PrimaryVindexNotSet, "the table does not have a primary vindex, the operation is impossible.")
VT09002 = errorWithState("VT09002", vtrpcpb.Code_FAILED_PRECONDITION, InnodbReadOnly, "%s statement with a replica target", "This type of DML statement is not allowed on a replica target.")
VT09003 = errorWithoutState("VT09003", vtrpcpb.Code_FAILED_PRECONDITION, "INSERT query does not have primary vindex column '%v' in the column list", "A vindex column is mandatory for the insert, please provide one.")
VT09004 = errorWithoutState("VT09004", vtrpcpb.Code_FAILED_PRECONDITION, "INSERT should contain column list or the table should have authoritative columns in vschema", "You need to provide the list of columns you want to insert, or provide a VSchema with authoritative columns. If schema tracking is disabled you can enable it to automatically have authoritative columns.")
VT09005 = errorWithState("VT09005", vtrpcpb.Code_FAILED_PRECONDITION, NoDB, "no database selected: use keyspace<:shard><@type> or keyspace<[range]><@type> (<> are optional)", "A database must be selected.")
VT09006 = errorWithoutState("VT09006", vtrpcpb.Code_FAILED_PRECONDITION, "%s VITESS_MIGRATION works only on primary tablet", "VITESS_MIGRATION commands work only on primary tablets, you must send such commands to a primary tablet.")
VT09007 = errorWithoutState("VT09007", vtrpcpb.Code_FAILED_PRECONDITION, "%s VITESS_THROTTLED_APPS works only on primary tablet", "VITESS_THROTTLED_APPS commands work only on primary tablet, you must send such commands to a primary tablet.")
VT09008 = errorWithoutState("VT09008", vtrpcpb.Code_FAILED_PRECONDITION, "vexplain queries/all will actually run queries", "vexplain queries/all will actually run queries. `/*vt+ EXECUTE_DML_QUERIES */` must be set to run DML queries in vtexplain. Example: `vexplain /*vt+ EXECUTE_DML_QUERIES */ queries delete from t1`")
VT09009 = errorWithoutState("VT09009", vtrpcpb.Code_FAILED_PRECONDITION, "stream is supported only for primary tablet type, current type: %v", "Stream is only supported for primary tablets, please use a stream on those tablets.")
VT09010 = errorWithoutState("VT09010", vtrpcpb.Code_FAILED_PRECONDITION, "SHOW VITESS_THROTTLER STATUS works only on primary tablet", "SHOW VITESS_THROTTLER STATUS works only on primary tablet.")
VT09011 = errorWithState("VT09011", vtrpcpb.Code_FAILED_PRECONDITION, UnknownStmtHandler, "Unknown prepared statement handler (%s) given to %s", "The prepared statement is not available")
VT09012 = errorWithoutState("VT09012", vtrpcpb.Code_FAILED_PRECONDITION, "%s statement with %s tablet not allowed", "This type of statement is not allowed on the given tablet.")
VT09013 = errorWithoutState("VT09013", vtrpcpb.Code_FAILED_PRECONDITION, "semi-sync plugins are not loaded", "Durability policy wants Vitess to use semi-sync, but the MySQL instances don't have the semi-sync plugin loaded.")
VT09014 = errorWithoutState("VT09014", vtrpcpb.Code_FAILED_PRECONDITION, "vindex cannot be modified", "The vindex cannot be used as table in DML statement")
VT09015 = errorWithoutState("VT09015", vtrpcpb.Code_FAILED_PRECONDITION, "schema tracking required", "This query cannot be planned without more information on the SQL schema. Please turn on schema tracking or add authoritative columns information to your VSchema.")
VT09016 = errorWithState("VT09016", vtrpcpb.Code_FAILED_PRECONDITION, RowIsReferenced2, "Cannot delete or update a parent row: a foreign key constraint fails", "SET DEFAULT is not supported by InnoDB")
VT10001 = errorWithoutState("VT10001", vtrpcpb.Code_ABORTED, "foreign key constraints are not allowed", "Foreign key constraints are not allowed, see https://vitess.io/blog/2021-06-15-online-ddl-why-no-fk/.")
VT12001 = errorWithoutState("VT12001", vtrpcpb.Code_UNIMPLEMENTED, "unsupported: %s", "This statement is unsupported by Vitess. Please rewrite your query to use supported syntax.")
VT12002 = errorWithoutState("VT12002", vtrpcpb.Code_UNIMPLEMENTED, "unsupported: cross-shard foreign keys", "Vitess does not support cross shard foreign keys.")
VT12003 = errorWithoutState("VT12002", vtrpcpb.Code_UNIMPLEMENTED, "unsupported: foreign keys management at vitess", "Vitess does not support managing foreign keys tables.")
// VT13001 General Error
VT13001 = errorWithoutState("VT13001", vtrpcpb.Code_INTERNAL, "[BUG] %s", "This error should not happen and is a bug. Please file an issue on GitHub: https://github.com/vitessio/vitess/issues/new/choose.")
VT13002 = errorWithoutState("VT13002", vtrpcpb.Code_INTERNAL, "unexpected AST struct for query: %s", "This error should not happen and is a bug. Please file an issue on GitHub: https://github.com/vitessio/vitess/issues/new/choose.")
VT14001 = errorWithoutState("VT14001", vtrpcpb.Code_UNAVAILABLE, "connection error", "The connection failed.")
VT14002 = errorWithoutState("VT14002", vtrpcpb.Code_UNAVAILABLE, "no available connection", "No available connection.")
VT14003 = errorWithoutState("VT14003", vtrpcpb.Code_UNAVAILABLE, "no connection for tablet %v", "No connection for the given tablet.")
VT14004 = errorWithoutState("VT14004", vtrpcpb.Code_UNAVAILABLE, "cannot find keyspace for: %s", "The specified keyspace could not be found.")
VT14005 = errorWithoutState("VT14005", vtrpcpb.Code_UNAVAILABLE, "cannot lookup sidecar database for keyspace: %s", "Failed to read sidecar database identifier.")
Errors = []func(args ...any) *VitessError{
VT03001,
VT03002,
VT03003,
VT03004,
VT03005,
VT03006,
VT03007,
VT03008,
VT03009,
VT03010,
VT03011,
VT03012,
VT03013,
VT03014,
VT03015,
VT03016,
VT03017,
VT03018,
VT03019,
VT03020,
VT03021,
VT03022,
VT03023,
VT03024,
VT03025,
VT05001,
VT05002,
VT05003,
VT05004,
VT05005,
VT05006,
VT05007,
VT06001,
VT07001,
VT09001,
VT09002,
VT09003,
VT09004,
VT09005,
VT09006,
VT09007,
VT09008,
VT09009,
VT09010,
VT09011,
VT09012,
VT09013,
VT09014,
VT09015,
VT09016,
VT10001,
VT12001,
VT12002,
VT12003,
VT13001,
VT13002,
VT14001,
VT14002,
VT14003,
VT14004,
VT14005,
}
)
type VitessError struct {
Err error
Description string
ID string
State State
}
func (o *VitessError) Error() string {
return o.Err.Error()
}
func (o *VitessError) Cause() error {
return o.Err
}
var _ error = (*VitessError)(nil)
func errorWithoutState(id string, code vtrpcpb.Code, short, long string) func(args ...any) *VitessError {
return func(args ...any) *VitessError {
s := short
if len(args) != 0 {
s = fmt.Sprintf(s, args...)
}
return &VitessError{
Err: New(code, id+": "+s),
Description: long,
ID: id,
}
}
}
func errorWithState(id string, code vtrpcpb.Code, state State, short, long string) func(args ...any) *VitessError {
return func(args ...any) *VitessError {
return &VitessError{
Err: NewErrorf(code, state, id+": "+short, args...),
Description: long,
ID: id,
State: state,
}
}
} | VT03021 = errorWithoutState("VT03021", vtrpcpb.Code_INVALID_ARGUMENT, "ambiguous column reference: %v", "The given column is ambiguous. You can use a table qualifier to make it unambiguous.") | random_line_split |
code.go | /*
Copyright 2022 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vterrors
import (
"fmt"
vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
)
var (
VT03001 = errorWithState("VT03001", vtrpcpb.Code_INVALID_ARGUMENT, SyntaxError, "aggregate functions take a single argument '%s'", "This aggregation function only takes a single argument.")
VT03002 = errorWithState("VT03002", vtrpcpb.Code_INVALID_ARGUMENT, ForbidSchemaChange, "changing schema from '%s' to '%s' is not allowed", "This schema change is not allowed. You cannot change the keyspace of a table.")
VT03003 = errorWithState("VT03003", vtrpcpb.Code_INVALID_ARGUMENT, UnknownTable, "unknown table '%s' in MULTI DELETE", "The specified table in this DELETE statement is unknown.")
VT03004 = errorWithState("VT03004", vtrpcpb.Code_INVALID_ARGUMENT, NonUpdateableTable, "the target table %s of the DELETE is not updatable", "You cannot delete something that is not a real MySQL table.")
VT03005 = errorWithState("VT03005", vtrpcpb.Code_INVALID_ARGUMENT, WrongGroupField, "cannot group on '%s'", "The planner does not allow grouping on certain field. For instance, aggregation function.")
VT03006 = errorWithState("VT03006", vtrpcpb.Code_INVALID_ARGUMENT, WrongValueCountOnRow, "column count does not match value count at row 1", "The number of columns you want to insert do not match the number of columns of your SELECT query.")
VT03007 = errorWithoutState("VT03007", vtrpcpb.Code_INVALID_ARGUMENT, "keyspace not specified", "You need to add a keyspace qualifier.")
VT03008 = errorWithState("VT03008", vtrpcpb.Code_INVALID_ARGUMENT, CantUseOptionHere, "incorrect usage/placement of '%s'", "The given token is not usable in this situation. Please refer to the MySQL documentation to learn more about your token's syntax.")
VT03009 = errorWithState("VT03009", vtrpcpb.Code_INVALID_ARGUMENT, WrongValueForVar, "unexpected value type for '%s': %v", "You cannot assign this type to the given variable.")
VT03010 = errorWithState("VT03010", vtrpcpb.Code_INVALID_ARGUMENT, IncorrectGlobalLocalVar, "variable '%s' is a read only variable", "You cannot set the given variable as it is a read-only variable.")
VT03011 = errorWithoutState("VT03011", vtrpcpb.Code_INVALID_ARGUMENT, "invalid value type: %v", "The given value type is not accepted.")
VT03012 = errorWithoutState("VT03012", vtrpcpb.Code_INVALID_ARGUMENT, "invalid syntax: %s", "The syntax is invalid. Please refer to the MySQL documentation for the proper syntax.")
VT03013 = errorWithState("VT03013", vtrpcpb.Code_INVALID_ARGUMENT, NonUniqTable, "not unique table/alias: '%s'", "This table or alias name is already use. Please use another one that is unique.")
VT03014 = errorWithState("VT03014", vtrpcpb.Code_INVALID_ARGUMENT, BadFieldError, "unknown column '%d' in '%s'", "The given column is unknown.")
VT03015 = errorWithoutState("VT03015", vtrpcpb.Code_INVALID_ARGUMENT, "column has duplicate set values: '%v'", "Cannot assign multiple values to a column in an update statement.")
VT03016 = errorWithoutState("VT03016", vtrpcpb.Code_INVALID_ARGUMENT, "unknown vindex column: '%s'", "The given column is unknown in the vindex table.")
VT03017 = errorWithState("VT03017", vtrpcpb.Code_INVALID_ARGUMENT, SyntaxError, "where clause can only be of the type 'pos > <value>'", "This vstream where clause can only be a greater than filter.")
VT03018 = errorWithoutState("VT03018", vtrpcpb.Code_INVALID_ARGUMENT, "NEXT used on a non-sequence table", "You cannot use the NEXT syntax on a table that is not a sequence table.")
VT03019 = errorWithoutState("VT03019", vtrpcpb.Code_INVALID_ARGUMENT, "column %s not found", "The given column was not found or is not available.")
VT03020 = errorWithoutState("VT03020", vtrpcpb.Code_INVALID_ARGUMENT, "column %s not found in subquery", "The given column was not found in the subquery.")
VT03021 = errorWithoutState("VT03021", vtrpcpb.Code_INVALID_ARGUMENT, "ambiguous column reference: %v", "The given column is ambiguous. You can use a table qualifier to make it unambiguous.")
VT03022 = errorWithoutState("VT03022", vtrpcpb.Code_INVALID_ARGUMENT, "column %v not found in %v", "The given column cannot be found.")
VT03023 = errorWithoutState("VT03023", vtrpcpb.Code_INVALID_ARGUMENT, "INSERT not supported when targeting a key range: %s", "When targeting a range of shards, Vitess does not know which shard to send the INSERT to.")
VT03024 = errorWithoutState("VT03024", vtrpcpb.Code_INVALID_ARGUMENT, "'%s' user defined variable does not exists", "The query cannot be prepared using the user defined variable as it does not exists for this session.")
VT03025 = errorWithState("VT03025", vtrpcpb.Code_INVALID_ARGUMENT, WrongArguments, "Incorrect arguments to %s", "The execute statement have wrong number of arguments")
VT05001 = errorWithState("VT05001", vtrpcpb.Code_NOT_FOUND, DbDropExists, "cannot drop database '%s'; database does not exists", "The given database does not exist; Vitess cannot drop it.")
VT05002 = errorWithState("VT05002", vtrpcpb.Code_NOT_FOUND, BadDb, "cannot alter database '%s'; unknown database", "The given database does not exist; Vitess cannot alter it.")
VT05003 = errorWithState("VT05003", vtrpcpb.Code_NOT_FOUND, BadDb, "unknown database '%s' in vschema", "The given database does not exist in the VSchema.")
VT05004 = errorWithState("VT05004", vtrpcpb.Code_NOT_FOUND, UnknownTable, "table '%s' does not exist", "The given table is unknown.")
VT05005 = errorWithState("VT05005", vtrpcpb.Code_NOT_FOUND, NoSuchTable, "table '%s' does not exist in keyspace '%s'", "The given table does not exist in this keyspace.")
VT05006 = errorWithState("VT05006", vtrpcpb.Code_NOT_FOUND, UnknownSystemVariable, "unknown system variable '%s'", "The given system variable is unknown.")
VT05007 = errorWithoutState("VT05007", vtrpcpb.Code_NOT_FOUND, "no table info", "Table information is not available.")
VT06001 = errorWithState("VT06001", vtrpcpb.Code_ALREADY_EXISTS, DbCreateExists, "cannot create database '%s'; database exists", "The given database name already exists.")
VT07001 = errorWithState("VT07001", vtrpcpb.Code_PERMISSION_DENIED, KillDeniedError, "%s", "Kill statement is not allowed. More in docs about how to enable it and its limitations.")
VT09001 = errorWithState("VT09001", vtrpcpb.Code_FAILED_PRECONDITION, RequiresPrimaryKey, PrimaryVindexNotSet, "the table does not have a primary vindex, the operation is impossible.")
VT09002 = errorWithState("VT09002", vtrpcpb.Code_FAILED_PRECONDITION, InnodbReadOnly, "%s statement with a replica target", "This type of DML statement is not allowed on a replica target.")
VT09003 = errorWithoutState("VT09003", vtrpcpb.Code_FAILED_PRECONDITION, "INSERT query does not have primary vindex column '%v' in the column list", "A vindex column is mandatory for the insert, please provide one.")
VT09004 = errorWithoutState("VT09004", vtrpcpb.Code_FAILED_PRECONDITION, "INSERT should contain column list or the table should have authoritative columns in vschema", "You need to provide the list of columns you want to insert, or provide a VSchema with authoritative columns. If schema tracking is disabled you can enable it to automatically have authoritative columns.")
VT09005 = errorWithState("VT09005", vtrpcpb.Code_FAILED_PRECONDITION, NoDB, "no database selected: use keyspace<:shard><@type> or keyspace<[range]><@type> (<> are optional)", "A database must be selected.")
VT09006 = errorWithoutState("VT09006", vtrpcpb.Code_FAILED_PRECONDITION, "%s VITESS_MIGRATION works only on primary tablet", "VITESS_MIGRATION commands work only on primary tablets, you must send such commands to a primary tablet.")
VT09007 = errorWithoutState("VT09007", vtrpcpb.Code_FAILED_PRECONDITION, "%s VITESS_THROTTLED_APPS works only on primary tablet", "VITESS_THROTTLED_APPS commands work only on primary tablet, you must send such commands to a primary tablet.")
VT09008 = errorWithoutState("VT09008", vtrpcpb.Code_FAILED_PRECONDITION, "vexplain queries/all will actually run queries", "vexplain queries/all will actually run queries. `/*vt+ EXECUTE_DML_QUERIES */` must be set to run DML queries in vtexplain. Example: `vexplain /*vt+ EXECUTE_DML_QUERIES */ queries delete from t1`")
VT09009 = errorWithoutState("VT09009", vtrpcpb.Code_FAILED_PRECONDITION, "stream is supported only for primary tablet type, current type: %v", "Stream is only supported for primary tablets, please use a stream on those tablets.")
VT09010 = errorWithoutState("VT09010", vtrpcpb.Code_FAILED_PRECONDITION, "SHOW VITESS_THROTTLER STATUS works only on primary tablet", "SHOW VITESS_THROTTLER STATUS works only on primary tablet.")
VT09011 = errorWithState("VT09011", vtrpcpb.Code_FAILED_PRECONDITION, UnknownStmtHandler, "Unknown prepared statement handler (%s) given to %s", "The prepared statement is not available")
VT09012 = errorWithoutState("VT09012", vtrpcpb.Code_FAILED_PRECONDITION, "%s statement with %s tablet not allowed", "This type of statement is not allowed on the given tablet.")
VT09013 = errorWithoutState("VT09013", vtrpcpb.Code_FAILED_PRECONDITION, "semi-sync plugins are not loaded", "Durability policy wants Vitess to use semi-sync, but the MySQL instances don't have the semi-sync plugin loaded.")
VT09014 = errorWithoutState("VT09014", vtrpcpb.Code_FAILED_PRECONDITION, "vindex cannot be modified", "The vindex cannot be used as table in DML statement")
VT09015 = errorWithoutState("VT09015", vtrpcpb.Code_FAILED_PRECONDITION, "schema tracking required", "This query cannot be planned without more information on the SQL schema. Please turn on schema tracking or add authoritative columns information to your VSchema.")
VT09016 = errorWithState("VT09016", vtrpcpb.Code_FAILED_PRECONDITION, RowIsReferenced2, "Cannot delete or update a parent row: a foreign key constraint fails", "SET DEFAULT is not supported by InnoDB")
VT10001 = errorWithoutState("VT10001", vtrpcpb.Code_ABORTED, "foreign key constraints are not allowed", "Foreign key constraints are not allowed, see https://vitess.io/blog/2021-06-15-online-ddl-why-no-fk/.")
VT12001 = errorWithoutState("VT12001", vtrpcpb.Code_UNIMPLEMENTED, "unsupported: %s", "This statement is unsupported by Vitess. Please rewrite your query to use supported syntax.")
VT12002 = errorWithoutState("VT12002", vtrpcpb.Code_UNIMPLEMENTED, "unsupported: cross-shard foreign keys", "Vitess does not support cross shard foreign keys.")
VT12003 = errorWithoutState("VT12002", vtrpcpb.Code_UNIMPLEMENTED, "unsupported: foreign keys management at vitess", "Vitess does not support managing foreign keys tables.")
// VT13001 General Error
VT13001 = errorWithoutState("VT13001", vtrpcpb.Code_INTERNAL, "[BUG] %s", "This error should not happen and is a bug. Please file an issue on GitHub: https://github.com/vitessio/vitess/issues/new/choose.")
VT13002 = errorWithoutState("VT13002", vtrpcpb.Code_INTERNAL, "unexpected AST struct for query: %s", "This error should not happen and is a bug. Please file an issue on GitHub: https://github.com/vitessio/vitess/issues/new/choose.")
VT14001 = errorWithoutState("VT14001", vtrpcpb.Code_UNAVAILABLE, "connection error", "The connection failed.")
VT14002 = errorWithoutState("VT14002", vtrpcpb.Code_UNAVAILABLE, "no available connection", "No available connection.")
VT14003 = errorWithoutState("VT14003", vtrpcpb.Code_UNAVAILABLE, "no connection for tablet %v", "No connection for the given tablet.")
VT14004 = errorWithoutState("VT14004", vtrpcpb.Code_UNAVAILABLE, "cannot find keyspace for: %s", "The specified keyspace could not be found.")
VT14005 = errorWithoutState("VT14005", vtrpcpb.Code_UNAVAILABLE, "cannot lookup sidecar database for keyspace: %s", "Failed to read sidecar database identifier.")
Errors = []func(args ...any) *VitessError{
VT03001,
VT03002,
VT03003,
VT03004,
VT03005,
VT03006,
VT03007,
VT03008,
VT03009,
VT03010,
VT03011,
VT03012,
VT03013,
VT03014,
VT03015,
VT03016,
VT03017,
VT03018,
VT03019,
VT03020,
VT03021,
VT03022,
VT03023,
VT03024,
VT03025,
VT05001,
VT05002,
VT05003,
VT05004,
VT05005,
VT05006,
VT05007,
VT06001,
VT07001,
VT09001,
VT09002,
VT09003,
VT09004,
VT09005,
VT09006,
VT09007,
VT09008,
VT09009,
VT09010,
VT09011,
VT09012,
VT09013,
VT09014,
VT09015,
VT09016,
VT10001,
VT12001,
VT12002,
VT12003,
VT13001,
VT13002,
VT14001,
VT14002,
VT14003,
VT14004,
VT14005,
}
)
type VitessError struct {
Err error
Description string
ID string
State State
}
func (o *VitessError) Error() string {
return o.Err.Error()
}
func (o *VitessError) Cause() error {
return o.Err
}
var _ error = (*VitessError)(nil)
func errorWithoutState(id string, code vtrpcpb.Code, short, long string) func(args ...any) *VitessError |
func errorWithState(id string, code vtrpcpb.Code, state State, short, long string) func(args ...any) *VitessError {
return func(args ...any) *VitessError {
return &VitessError{
Err: NewErrorf(code, state, id+": "+short, args...),
Description: long,
ID: id,
State: state,
}
}
}
| {
return func(args ...any) *VitessError {
s := short
if len(args) != 0 {
s = fmt.Sprintf(s, args...)
}
return &VitessError{
Err: New(code, id+": "+s),
Description: long,
ID: id,
}
}
} | identifier_body |
code.go | /*
Copyright 2022 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vterrors
import (
"fmt"
vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
)
var (
VT03001 = errorWithState("VT03001", vtrpcpb.Code_INVALID_ARGUMENT, SyntaxError, "aggregate functions take a single argument '%s'", "This aggregation function only takes a single argument.")
VT03002 = errorWithState("VT03002", vtrpcpb.Code_INVALID_ARGUMENT, ForbidSchemaChange, "changing schema from '%s' to '%s' is not allowed", "This schema change is not allowed. You cannot change the keyspace of a table.")
VT03003 = errorWithState("VT03003", vtrpcpb.Code_INVALID_ARGUMENT, UnknownTable, "unknown table '%s' in MULTI DELETE", "The specified table in this DELETE statement is unknown.")
VT03004 = errorWithState("VT03004", vtrpcpb.Code_INVALID_ARGUMENT, NonUpdateableTable, "the target table %s of the DELETE is not updatable", "You cannot delete something that is not a real MySQL table.")
VT03005 = errorWithState("VT03005", vtrpcpb.Code_INVALID_ARGUMENT, WrongGroupField, "cannot group on '%s'", "The planner does not allow grouping on certain field. For instance, aggregation function.")
VT03006 = errorWithState("VT03006", vtrpcpb.Code_INVALID_ARGUMENT, WrongValueCountOnRow, "column count does not match value count at row 1", "The number of columns you want to insert do not match the number of columns of your SELECT query.")
VT03007 = errorWithoutState("VT03007", vtrpcpb.Code_INVALID_ARGUMENT, "keyspace not specified", "You need to add a keyspace qualifier.")
VT03008 = errorWithState("VT03008", vtrpcpb.Code_INVALID_ARGUMENT, CantUseOptionHere, "incorrect usage/placement of '%s'", "The given token is not usable in this situation. Please refer to the MySQL documentation to learn more about your token's syntax.")
VT03009 = errorWithState("VT03009", vtrpcpb.Code_INVALID_ARGUMENT, WrongValueForVar, "unexpected value type for '%s': %v", "You cannot assign this type to the given variable.")
VT03010 = errorWithState("VT03010", vtrpcpb.Code_INVALID_ARGUMENT, IncorrectGlobalLocalVar, "variable '%s' is a read only variable", "You cannot set the given variable as it is a read-only variable.")
VT03011 = errorWithoutState("VT03011", vtrpcpb.Code_INVALID_ARGUMENT, "invalid value type: %v", "The given value type is not accepted.")
VT03012 = errorWithoutState("VT03012", vtrpcpb.Code_INVALID_ARGUMENT, "invalid syntax: %s", "The syntax is invalid. Please refer to the MySQL documentation for the proper syntax.")
VT03013 = errorWithState("VT03013", vtrpcpb.Code_INVALID_ARGUMENT, NonUniqTable, "not unique table/alias: '%s'", "This table or alias name is already use. Please use another one that is unique.")
VT03014 = errorWithState("VT03014", vtrpcpb.Code_INVALID_ARGUMENT, BadFieldError, "unknown column '%d' in '%s'", "The given column is unknown.")
VT03015 = errorWithoutState("VT03015", vtrpcpb.Code_INVALID_ARGUMENT, "column has duplicate set values: '%v'", "Cannot assign multiple values to a column in an update statement.")
VT03016 = errorWithoutState("VT03016", vtrpcpb.Code_INVALID_ARGUMENT, "unknown vindex column: '%s'", "The given column is unknown in the vindex table.")
VT03017 = errorWithState("VT03017", vtrpcpb.Code_INVALID_ARGUMENT, SyntaxError, "where clause can only be of the type 'pos > <value>'", "This vstream where clause can only be a greater than filter.")
VT03018 = errorWithoutState("VT03018", vtrpcpb.Code_INVALID_ARGUMENT, "NEXT used on a non-sequence table", "You cannot use the NEXT syntax on a table that is not a sequence table.")
VT03019 = errorWithoutState("VT03019", vtrpcpb.Code_INVALID_ARGUMENT, "column %s not found", "The given column was not found or is not available.")
VT03020 = errorWithoutState("VT03020", vtrpcpb.Code_INVALID_ARGUMENT, "column %s not found in subquery", "The given column was not found in the subquery.")
VT03021 = errorWithoutState("VT03021", vtrpcpb.Code_INVALID_ARGUMENT, "ambiguous column reference: %v", "The given column is ambiguous. You can use a table qualifier to make it unambiguous.")
VT03022 = errorWithoutState("VT03022", vtrpcpb.Code_INVALID_ARGUMENT, "column %v not found in %v", "The given column cannot be found.")
VT03023 = errorWithoutState("VT03023", vtrpcpb.Code_INVALID_ARGUMENT, "INSERT not supported when targeting a key range: %s", "When targeting a range of shards, Vitess does not know which shard to send the INSERT to.")
VT03024 = errorWithoutState("VT03024", vtrpcpb.Code_INVALID_ARGUMENT, "'%s' user defined variable does not exists", "The query cannot be prepared using the user defined variable as it does not exists for this session.")
VT03025 = errorWithState("VT03025", vtrpcpb.Code_INVALID_ARGUMENT, WrongArguments, "Incorrect arguments to %s", "The execute statement have wrong number of arguments")
VT05001 = errorWithState("VT05001", vtrpcpb.Code_NOT_FOUND, DbDropExists, "cannot drop database '%s'; database does not exists", "The given database does not exist; Vitess cannot drop it.")
VT05002 = errorWithState("VT05002", vtrpcpb.Code_NOT_FOUND, BadDb, "cannot alter database '%s'; unknown database", "The given database does not exist; Vitess cannot alter it.")
VT05003 = errorWithState("VT05003", vtrpcpb.Code_NOT_FOUND, BadDb, "unknown database '%s' in vschema", "The given database does not exist in the VSchema.")
VT05004 = errorWithState("VT05004", vtrpcpb.Code_NOT_FOUND, UnknownTable, "table '%s' does not exist", "The given table is unknown.")
VT05005 = errorWithState("VT05005", vtrpcpb.Code_NOT_FOUND, NoSuchTable, "table '%s' does not exist in keyspace '%s'", "The given table does not exist in this keyspace.")
VT05006 = errorWithState("VT05006", vtrpcpb.Code_NOT_FOUND, UnknownSystemVariable, "unknown system variable '%s'", "The given system variable is unknown.")
VT05007 = errorWithoutState("VT05007", vtrpcpb.Code_NOT_FOUND, "no table info", "Table information is not available.")
VT06001 = errorWithState("VT06001", vtrpcpb.Code_ALREADY_EXISTS, DbCreateExists, "cannot create database '%s'; database exists", "The given database name already exists.")
VT07001 = errorWithState("VT07001", vtrpcpb.Code_PERMISSION_DENIED, KillDeniedError, "%s", "Kill statement is not allowed. More in docs about how to enable it and its limitations.")
VT09001 = errorWithState("VT09001", vtrpcpb.Code_FAILED_PRECONDITION, RequiresPrimaryKey, PrimaryVindexNotSet, "the table does not have a primary vindex, the operation is impossible.")
VT09002 = errorWithState("VT09002", vtrpcpb.Code_FAILED_PRECONDITION, InnodbReadOnly, "%s statement with a replica target", "This type of DML statement is not allowed on a replica target.")
VT09003 = errorWithoutState("VT09003", vtrpcpb.Code_FAILED_PRECONDITION, "INSERT query does not have primary vindex column '%v' in the column list", "A vindex column is mandatory for the insert, please provide one.")
VT09004 = errorWithoutState("VT09004", vtrpcpb.Code_FAILED_PRECONDITION, "INSERT should contain column list or the table should have authoritative columns in vschema", "You need to provide the list of columns you want to insert, or provide a VSchema with authoritative columns. If schema tracking is disabled you can enable it to automatically have authoritative columns.")
VT09005 = errorWithState("VT09005", vtrpcpb.Code_FAILED_PRECONDITION, NoDB, "no database selected: use keyspace<:shard><@type> or keyspace<[range]><@type> (<> are optional)", "A database must be selected.")
VT09006 = errorWithoutState("VT09006", vtrpcpb.Code_FAILED_PRECONDITION, "%s VITESS_MIGRATION works only on primary tablet", "VITESS_MIGRATION commands work only on primary tablets, you must send such commands to a primary tablet.")
VT09007 = errorWithoutState("VT09007", vtrpcpb.Code_FAILED_PRECONDITION, "%s VITESS_THROTTLED_APPS works only on primary tablet", "VITESS_THROTTLED_APPS commands work only on primary tablet, you must send such commands to a primary tablet.")
VT09008 = errorWithoutState("VT09008", vtrpcpb.Code_FAILED_PRECONDITION, "vexplain queries/all will actually run queries", "vexplain queries/all will actually run queries. `/*vt+ EXECUTE_DML_QUERIES */` must be set to run DML queries in vtexplain. Example: `vexplain /*vt+ EXECUTE_DML_QUERIES */ queries delete from t1`")
VT09009 = errorWithoutState("VT09009", vtrpcpb.Code_FAILED_PRECONDITION, "stream is supported only for primary tablet type, current type: %v", "Stream is only supported for primary tablets, please use a stream on those tablets.")
VT09010 = errorWithoutState("VT09010", vtrpcpb.Code_FAILED_PRECONDITION, "SHOW VITESS_THROTTLER STATUS works only on primary tablet", "SHOW VITESS_THROTTLER STATUS works only on primary tablet.")
VT09011 = errorWithState("VT09011", vtrpcpb.Code_FAILED_PRECONDITION, UnknownStmtHandler, "Unknown prepared statement handler (%s) given to %s", "The prepared statement is not available")
VT09012 = errorWithoutState("VT09012", vtrpcpb.Code_FAILED_PRECONDITION, "%s statement with %s tablet not allowed", "This type of statement is not allowed on the given tablet.")
VT09013 = errorWithoutState("VT09013", vtrpcpb.Code_FAILED_PRECONDITION, "semi-sync plugins are not loaded", "Durability policy wants Vitess to use semi-sync, but the MySQL instances don't have the semi-sync plugin loaded.")
VT09014 = errorWithoutState("VT09014", vtrpcpb.Code_FAILED_PRECONDITION, "vindex cannot be modified", "The vindex cannot be used as table in DML statement")
VT09015 = errorWithoutState("VT09015", vtrpcpb.Code_FAILED_PRECONDITION, "schema tracking required", "This query cannot be planned without more information on the SQL schema. Please turn on schema tracking or add authoritative columns information to your VSchema.")
VT09016 = errorWithState("VT09016", vtrpcpb.Code_FAILED_PRECONDITION, RowIsReferenced2, "Cannot delete or update a parent row: a foreign key constraint fails", "SET DEFAULT is not supported by InnoDB")
VT10001 = errorWithoutState("VT10001", vtrpcpb.Code_ABORTED, "foreign key constraints are not allowed", "Foreign key constraints are not allowed, see https://vitess.io/blog/2021-06-15-online-ddl-why-no-fk/.")
VT12001 = errorWithoutState("VT12001", vtrpcpb.Code_UNIMPLEMENTED, "unsupported: %s", "This statement is unsupported by Vitess. Please rewrite your query to use supported syntax.")
VT12002 = errorWithoutState("VT12002", vtrpcpb.Code_UNIMPLEMENTED, "unsupported: cross-shard foreign keys", "Vitess does not support cross shard foreign keys.")
VT12003 = errorWithoutState("VT12002", vtrpcpb.Code_UNIMPLEMENTED, "unsupported: foreign keys management at vitess", "Vitess does not support managing foreign keys tables.")
// VT13001 General Error
VT13001 = errorWithoutState("VT13001", vtrpcpb.Code_INTERNAL, "[BUG] %s", "This error should not happen and is a bug. Please file an issue on GitHub: https://github.com/vitessio/vitess/issues/new/choose.")
VT13002 = errorWithoutState("VT13002", vtrpcpb.Code_INTERNAL, "unexpected AST struct for query: %s", "This error should not happen and is a bug. Please file an issue on GitHub: https://github.com/vitessio/vitess/issues/new/choose.")
VT14001 = errorWithoutState("VT14001", vtrpcpb.Code_UNAVAILABLE, "connection error", "The connection failed.")
VT14002 = errorWithoutState("VT14002", vtrpcpb.Code_UNAVAILABLE, "no available connection", "No available connection.")
VT14003 = errorWithoutState("VT14003", vtrpcpb.Code_UNAVAILABLE, "no connection for tablet %v", "No connection for the given tablet.")
VT14004 = errorWithoutState("VT14004", vtrpcpb.Code_UNAVAILABLE, "cannot find keyspace for: %s", "The specified keyspace could not be found.")
VT14005 = errorWithoutState("VT14005", vtrpcpb.Code_UNAVAILABLE, "cannot lookup sidecar database for keyspace: %s", "Failed to read sidecar database identifier.")
Errors = []func(args ...any) *VitessError{
VT03001,
VT03002,
VT03003,
VT03004,
VT03005,
VT03006,
VT03007,
VT03008,
VT03009,
VT03010,
VT03011,
VT03012,
VT03013,
VT03014,
VT03015,
VT03016,
VT03017,
VT03018,
VT03019,
VT03020,
VT03021,
VT03022,
VT03023,
VT03024,
VT03025,
VT05001,
VT05002,
VT05003,
VT05004,
VT05005,
VT05006,
VT05007,
VT06001,
VT07001,
VT09001,
VT09002,
VT09003,
VT09004,
VT09005,
VT09006,
VT09007,
VT09008,
VT09009,
VT09010,
VT09011,
VT09012,
VT09013,
VT09014,
VT09015,
VT09016,
VT10001,
VT12001,
VT12002,
VT12003,
VT13001,
VT13002,
VT14001,
VT14002,
VT14003,
VT14004,
VT14005,
}
)
type VitessError struct {
Err error
Description string
ID string
State State
}
func (o *VitessError) Error() string {
return o.Err.Error()
}
func (o *VitessError) Cause() error {
return o.Err
}
var _ error = (*VitessError)(nil)
func errorWithoutState(id string, code vtrpcpb.Code, short, long string) func(args ...any) *VitessError {
return func(args ...any) *VitessError {
s := short
if len(args) != 0 |
return &VitessError{
Err: New(code, id+": "+s),
Description: long,
ID: id,
}
}
}
func errorWithState(id string, code vtrpcpb.Code, state State, short, long string) func(args ...any) *VitessError {
return func(args ...any) *VitessError {
return &VitessError{
Err: NewErrorf(code, state, id+": "+short, args...),
Description: long,
ID: id,
State: state,
}
}
}
| {
s = fmt.Sprintf(s, args...)
} | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.