file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
EncryptDecryptTextFile.py | import os
import pprint
import math
import sys
import datetime as dt
from pathlib import Path
import RotateCipher
import ShiftCipher
import TranspositionCipher
def process_textfile(
string_path: str,
encryption_algorithm: str,
algorithm_key: float,
output_folderpath: str = str(
Path(os.path.expandvars("$HOME")).anchor
) + r"/EncryptDecrypt/",
output_filename: str = r"EncryptDecrypt.txt",
to_decrypt=False,
**kwargs
):
encryption_algorithm = encryption_algorithm.lower()
available_algorithms = ["rotate", "transposition"]
if encryption_algorithm not in available_algorithms:
pprint.pprint(
["Enter an algorithm from the list. Not case-sensitive.",
available_algorithms]
)
return None
# A single dictionary may be passed as a **kwarg if it is the
# ONLY KEY-WORD ARGUMENT. Else, error is thrown.
lst_kwargs = list(kwargs.values())
if len(lst_kwargs) == 1 and (isinstance(lst_kwargs[0], dict)):
kwargs = lst_kwargs[0]
# Key in **kwargs overwrites `algorithm_key` function parameter.
if "algorithm_key" in kwargs:
algorithm_key = float(kwargs["algorithm_key"])
# Convert strings saying "True" or "False" to booleans.
for key, value in kwargs.items():
str_value = str(value)
if str_value.lower() == "False":
kwargs[key] = False
elif str_value.lower() == "True":
kwargs[key] = True
output_filename = ('/' + output_filename)
if not (output_filename.endswith(".txt")):
output_filename += ".txt"
full_outputpath = output_folderpath + output_filename
path_input = Path(string_path)
# fileobj_target = open(path_input, 'r') # Only for Python 3.6 and later.
fileobj_target = open(str(path_input), 'r')
lst_input = fileobj_target.readlines()
# str_input = '\n'.join(lst_input)
str_input = "".join(lst_input)
output_string = "None"
print(
"""Started processing.
Key-word arguments for %s algorithm:""" % encryption_algorithm
)
pprint.pprint(kwargs)
if (encryption_algorithm == "transposition") and to_decrypt is True:
output_string = ''.join(
TranspositionCipher.decrypt_transposition(
str_input, int(algorithm_key)
)
)
elif encryption_algorithm == "transposition" and not to_decrypt:
output_string = ''.join(
TranspositionCipher.encrypt_transposition(
str_input, int(algorithm_key)
)
)
elif encryption_algorithm == "rotate":
warning = """
When the algorithm is set to rotate, the "to_decrypt" parameter
is ignored. To decrypt, set the key-word argument shift left
so that it reverses the shift direction during encryption.
Ex: If the text was shifted left, i.e. values were swapped
with those "higher" up on the list read from left to right, pass
the key-word argument shift_left=False to decrypt.
RotateCipher's methods can return a list. However, it is
forced to always return a string. Passing return_list=True as
a key-word argument will have no effect. The argument is not
passed to RotateCipher.
"""
# pprint.pprint(warning) # Included literl \n and single quotes.
print(warning)
to_shiftleft = True
if "shift_left" in kwargs:
to_shiftleft = kwargs["shift_left"]
process_numbers = False
if "shift_numbers" in kwargs:
process_numbers = kwargs["shift_numbers"]
output_string = RotateCipher.rot13_e(
string=str_input,
shift_left=to_shiftleft,
rotations=int(algorithm_key),
# return_list=kwargs["return_list"], # Removed for safety.
shift_numbers=process_numbers
)
if not (os.path.exists(output_folderpath)):
os.mkdir(output_folderpath)
fileobj_output = open(
full_outputpath,
'a' # Create a file and open it for writing. Append if exists.
)
fileobj_output.write(
"\n=====\nEncryptDecrypt Output on\n%s\n=====\n" %
dt.datetime.now()
)
fileobj_output.write(output_string)
fileobj_output.close()
print("Done processing. Output folder:\n{}".format(
Path(full_outputpath)
)
)
return {
"output_file": Path(full_outputpath).resolve(),
"output_text": output_string
}
def manual_test():
dict_processedtext = process_textfile(
string_path=r"C:\Users\Rives\Downloads\Quizzes\Quiz 0 Overwrite Number 1.txt",
encryption_algorithm="rotate",
algorithm_key=1,
shift_left=True
)
print("Encrypt ROT1 with default values.")
# pprint.pprint(
# dict_processedtext
# )
print(dict_processedtext["output_file"])
dict_processedtext2 = process_textfile(
string_path=dict_processedtext["output_file"],
encryption_algorithm="rotate",
algorithm_key=1,
output_folderpath=r"C:\Users\Rives\Downloads\Decryptions",
output_filename="Quiz 0 Overwrite Number 1 Decrypted",
shift_left=False
)
print("Decrypt ROT1 with all values user-supplied.")
print(dict_processedtext["output_file"])
for i in range(2):
dict_processedtext3a = process_textfile(
string_path=r"C:\Users\Rives\Downloads\Quizzes\Quiz 0 Overwrite Number 2.txt",
encryption_algorithm="rotate",
algorithm_key=1,
output_folderpath=r"C:\Users\Rives\Downloads\Encryptions"
)
print(dict_processedtext3a["output_file"])
dict_processedtext3b = process_textfile(
string_path=dict_processedtext3a["output_file"],
encryption_algorithm="rotate",
algorithm_key=1,
output_folderpath=r"C:\Users\Rives\Downloads\Decryptions",
output_filename="Quiz 0 Overwrite Number 2 Decrypted",
shift_left=False
)
print(dict_processedtext3b["output_file"])
return None
def main():
while True:
print("Press Enter or New Line to skip entering any input.\t")
task = input("Encrypt or decrypt? Encrypts by default. Press E/D.\t")
algo = input("Algorithm? Uses Rotate by default.\t")
algorithm_key = float(input("Key? Uses 1 by default.\t"))
input_filepath = input(
"""Mandatory / Required.
Full path of target file. Includes file name and extension.\n""")
output_folder = input(
"Optional. Give the path of the output folder.\n"
)
output_file = input(
"Optional. Default output file name is EncryptDecrypt.txt.\n")
keyword_arguments = input(
"""Last question. Depends on algorithm.
Format: "key=value,key2,value2,...".
Use comma with no space as separator for two or more items.\n"""
)
while len(input_filepath) == 0:
input_filepath = input(
"""Mandatory / Required.
Full path of target file.
Includes file name and extension.\n"""
)
dict_kwargs = dict()
for pair in keyword_arguments.split(','):
try:
key, pair = tuple(pair.split('='))
dict_kwargs[key] = pair
except ValueError:
break
to_decrypt = False
if task.lower().startswith('d'):
to_decrypt = True
if len(output_folder) == 0:
output_folder = str(Path.cwd().parent / r"/EncryptDecrypt/")
if len(output_file) == 0:
|
if len(algo) == 0:
algo = "rotate"
pprint.pprint(
process_textfile(
string_path=input_filepath,
encryption_algorithm=algo,
algorithm_key=algorithm_key,
output_folderpath=output_folder,
output_filename=output_file,
to_decrypt=to_decrypt,
kwargs_dict=dict_kwargs
)
)
print(
"""Done Running.
Press Q to quit, any other key to process another file.""")
to_quit = input()
if to_quit.lower().startswith("q"):
sys.exit()
else:
continue
# manual_test()
return None
if __name__ == "__main__":
main()
"""
Notes:
*
The declared parameter data types in python functions are not enforced as of
version 3.4.
*
For some reason, even if the name "key" was a parameter for process_textfile,
it was being passed to rot13_e as a string. In the function process_textfile,
Visual Basic also listed "key" as a string when passed to rot13_e even though
the function definition specified its data type as a float and the user input
for "key" was also converted to a float in the main function. This was caused
by a for-loop. When VS Code followed the definition of key (F12) when it
was passed to rot13_e, VS Code pointed to the temporary variable "key" in a
for-loop. The parameter name was changed as a quick fix.
- Adding an else clause to the for-loop did not fix it.
- The for-loop declaration was funciton-level code while the call to rot13_e
that bugged was inside an else-clause. The else-clause holding the call to
rot13_e was also function-level, same as the for-loop declaration. The call
to RotateCipher.rot13_e was assigned to output_string.
"""
| output_file = "EncryptDecrypt.txt" | conditional_block |
row.rs | //! This module contains definition of table rows stuff
use std::io::{Error, Write};
use std::iter::FromIterator;
use std::slice::{Iter, IterMut};
// use std::vec::IntoIter;
use std::ops::{Index, IndexMut};
use super::Terminal;
use super::format::{ColumnPosition, TableFormat};
use super::utils::NEWLINE;
use super::Cell;
/// Represent a table row made of cells
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
pub struct Row {
cells: Vec<Cell>,
}
impl Row {
/// Create a new `Row` backed with `cells` vector
pub fn new(cells: Vec<Cell>) -> Row {
Row { cells }
}
/// Create an row of length `size`, with empty strings stored
pub fn empty() -> Row {
Self::new(vec![Cell::default(); 0])
}
/// Count the number of column required in the table grid.
/// It takes into account horizontal spanning of cells. For
/// example, a cell with an hspan of 3 will add 3 column to the grid
// #[deprecated(since="0.8.0", note="Will become private in future release. See [issue #87](https://github.com/phsym/prettytable-rs/issues/87)")]
pub(crate) fn column_count(&self) -> usize {
self.cells.iter().map(|c| c.get_hspan()).sum()
}
/// Get the number of cells in this row
pub fn len(&self) -> usize {
self.cells.len()
// self.cells.iter().map(|c| c.get_hspan()).sum()
}
/// Check if the row is empty (has no cell)
pub fn is_empty(&self) -> bool {
self.cells.is_empty()
}
/// Get the height of this row
// #[deprecated(since="0.8.0", note="Will become private in future release. See [issue #87](https://github.com/phsym/prettytable-rs/issues/87)")]
fn get_height(&self) -> usize {
let mut height = 1; // Minimum height must be 1 to print empty rows
for cell in &self.cells {
let h = cell.get_height();
if h > height {
height = h;
}
}
height
}
/// Get the minimum width required by the cell in the column `column`.
/// Return 0 if the cell does not exist in this row
// #[deprecated(since="0.8.0", note="Will become private in future release. See [issue #87](https://github.com/phsym/prettytable-rs/issues/87)")]
pub(crate) fn get_column_width(&self, column: usize, format: &TableFormat) -> usize {
let mut i = 0;
for c in &self.cells {
if i + c.get_hspan() > column {
if c.get_hspan() == 1 {
return c.get_width();
}
let (lp, rp) = format.get_padding();
let sep = format
.get_column_separator(ColumnPosition::Intern)
.map(|_| 1)
.unwrap_or_default();
let rem = lp + rp + sep;
let mut w = c.get_width();
if w > rem {
w -= rem;
} else {
w = 0;
}
return (w as f64 / c.get_hspan() as f64).ceil() as usize;
}
i += c.get_hspan();
}
0
}
/// Get the cell at index `idx`
pub fn get_cell(&self, idx: usize) -> Option<&Cell> {
self.cells.get(idx)
}
/// Get the mutable cell at index `idx`
pub fn get_mut_cell(&mut self, idx: usize) -> Option<&mut Cell> {
self.cells.get_mut(idx)
}
/// Set the `cell` in the row at the given `idx` index
pub fn set_cell(&mut self, cell: Cell, idx: usize) -> Result<(), &str> {
if idx >= self.len() {
return Err("Cannot find cell");
}
self.cells[idx] = cell;
Ok(())
}
/// Append a `cell` at the end of the row
pub fn add_cell(&mut self, cell: Cell) {
self.cells.push(cell);
}
/// Insert `cell` at position `index`. If `index` is higher than the row length,
/// the cell will be appended at the end
pub fn insert_cell(&mut self, index: usize, cell: Cell) {
if index < self.cells.len() {
self.cells.insert(index, cell);
} else {
self.add_cell(cell);
}
}
/// Remove the cell at position `index`. Silently skip if this cell does not exist
pub fn remove_cell(&mut self, index: usize) {
if index < self.cells.len() {
self.cells.remove(index);
}
}
/// Returns an immutable iterator over cells
pub fn iter(&self) -> Iter<Cell> {
self.cells.iter()
}
/// Returns an mutable iterator over cells
pub fn iter_mut(&mut self) -> IterMut<Cell> {
self.cells.iter_mut()
}
/// Internal only
fn __print<T: Write + ?Sized, F>(
&self,
out: &mut T,
format: &TableFormat,
col_width: &[usize],
f: F,
) -> Result<usize, Error>
where
F: Fn(&Cell, &mut T, usize, usize, bool) -> Result<(), Error>,
{
let height = self.get_height();
for i in 0..height {
//TODO: Wrap this into dedicated function one day
out.write_all(&vec![b' '; format.get_indent()])?;
format.print_column_separator(out, ColumnPosition::Left)?;
let (lp, rp) = format.get_padding();
let mut j = 0;
let mut hspan = 0; // The additional offset caused by cell's horizontal spanning
while j + hspan < col_width.len() {
out.write_all(&vec![b' '; lp])?; // Left padding
// skip_r_fill skip filling the end of the last cell if there's no character
// delimiting the end of the table
let skip_r_fill = (j == col_width.len() - 1)
&& format.get_column_separator(ColumnPosition::Right).is_none();
match self.get_cell(j) {
Some(c) => {
// In case of horizontal spanning, width is the sum of all spanned columns' width
let mut w = col_width[j + hspan..j + hspan + c.get_hspan()].iter().sum();
let real_span = c.get_hspan() - 1;
w += real_span * (lp + rp)
+ real_span
* format
.get_column_separator(ColumnPosition::Intern)
.map(|_| 1)
.unwrap_or_default();
// Print cell content
f(c, out, i, w, skip_r_fill)?;
hspan += real_span; // Add span to offset
}
None => f(&Cell::default(), out, i, col_width[j + hspan], skip_r_fill)?,
};
out.write_all(&vec![b' '; rp])?; // Right padding
if j + hspan < col_width.len() - 1 {
format.print_column_separator(out, ColumnPosition::Intern)?;
}
j += 1;
}
format.print_column_separator(out, ColumnPosition::Right)?;
out.write_all(NEWLINE)?;
}
Ok(height)
}
/// Print the row to `out`, with `separator` as column separator, and `col_width`
/// specifying the width of each columns. Returns the number of printed lines
// #[deprecated(since="0.8.0", note="Will become private in future release. See [issue #87](https://github.com/phsym/prettytable-rs/issues/87)")]
pub(crate) fn print<T: Write + ?Sized>(
&self,
out: &mut T,
format: &TableFormat,
col_width: &[usize],
) -> Result<usize, Error> {
self.__print(out, format, col_width, Cell::print)
}
/// Print the row to terminal `out`, with `separator` as column separator, and `col_width`
/// specifying the width of each columns. Apply style when needed. returns the number of printed lines
// #[deprecated(since="0.8.0", note="Will become private in future release. See [issue #87](https://github.com/phsym/prettytable-rs/issues/87)")]
pub(crate) fn print_term<T: Terminal + ?Sized>(
&self,
out: &mut T,
format: &TableFormat,
col_width: &[usize],
) -> Result<usize, Error> {
self.__print(out, format, col_width, Cell::print_term)
}
/// Print the row in HTML format to `out`.
///
/// If the row is has fewer columns than `col_num`, the row is padded with empty cells.
pub fn print_html<T: Write + ?Sized>(&self, out: &mut T, col_num: usize) -> Result<(), Error> |
}
impl Default for Row {
fn default() -> Row {
Row::empty()
}
}
impl Index<usize> for Row {
type Output = Cell;
fn index(&self, idx: usize) -> &Self::Output {
&self.cells[idx]
}
}
impl IndexMut<usize> for Row {
fn index_mut(&mut self, idx: usize) -> &mut Self::Output {
&mut self.cells[idx]
}
}
impl<A: ToString> FromIterator<A> for Row {
fn from_iter<T>(iterator: T) -> Row
where
T: IntoIterator<Item = A>,
{
Self::new(iterator.into_iter().map(|ref e| Cell::from(e)).collect())
}
}
impl<T, A> From<T> for Row
where
A: ToString,
T: IntoIterator<Item = A>,
{
fn from(it: T) -> Row {
Self::from_iter(it)
}
}
impl<'a> IntoIterator for &'a Row {
type Item = &'a Cell;
type IntoIter = Iter<'a, Cell>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
// impl IntoIterator for Row {
// type Item = Cell;
// type IntoIter = IntoIter<Cell>;
// fn into_iter(self) -> Self::IntoIter {
// self.cells.into_iter()
// }
// }
impl<'a> IntoIterator for &'a mut Row {
type Item = &'a mut Cell;
type IntoIter = IterMut<'a, Cell>;
fn into_iter(self) -> Self::IntoIter {
self.iter_mut()
}
}
impl<S: ToString> Extend<S> for Row {
fn extend<T: IntoIterator<Item = S>>(&mut self, iter: T) {
self.cells
.extend(iter.into_iter().map(|s| Cell::new(&s.to_string())));
}
}
// impl <S: Into<Cell>> Extend<S> for Row {
// fn extend<T: IntoIterator<Item=S>>(&mut self, iter: T) {
// self.cells.extend(iter.into_iter().map(|s| s.into()));
// }
// }
/// This macro simplifies `Row` creation
///
/// The syntax support style spec
/// # Example
/// ```
/// # #[macro_use] extern crate prettytable;
/// # fn main() {
/// // Create a normal row
/// let row1 = row!["Element 1", "Element 2", "Element 3"];
/// // Create a row with all cells formatted with red foreground color, yellow background color
/// // bold, italic, align in the center of the cell
/// let row2 = row![FrBybic => "Element 1", "Element 2", "Element 3"];
/// // Create a row with first cell in blue, second one in red, and last one with default style
/// let row3 = row![Fb->"blue", Fr->"red", "normal"];
/// // Do something with rows
/// # drop(row1);
/// # drop(row2);
/// # drop(row3);
/// # }
/// ```
///
/// For details about style specifier syntax, check doc for [`Cell::style_spec`](cell/struct.Cell.html#method.style_spec) method
#[macro_export]
macro_rules! row {
(($($out:tt)*);) => (vec![$($out)*]);
(($($out:tt)*); $value:expr) => (vec![$($out)* $crate::cell!($value)]);
(($($out:tt)*); $value:expr, $($n:tt)*) => ($crate::row!(($($out)* $crate::cell!($value),); $($n)*));
(($($out:tt)*); $style:ident -> $value:expr) => (vec![$($out)* $crate::cell!($style -> $value)]);
(($($out:tt)*); $style:ident -> $value:expr, $($n: tt)*) => ($crate::row!(($($out)* $crate::cell!($style -> $value),); $($n)*));
($($content:expr), *) => ($crate::Row::new(vec![$($crate::cell!($content)), *])); // This line may not be needed starting from Rust 1.20
($style:ident => $($content:expr), *) => ($crate::Row::new(vec![$($crate::cell!($style -> $content)), *]));
($style:ident => $($content:expr,) *) => ($crate::Row::new(vec![$($crate::cell!($style -> $content)), *]));
($($content:tt)*) => ($crate::Row::new($crate::row!((); $($content)*)));
}
#[cfg(test)]
mod tests {
use super::*;
use Cell;
#[test]
fn row_default_empty() {
let row1 = Row::default();
assert_eq!(row1.len(), 0);
assert!(row1.is_empty());
}
#[test]
fn get_add_set_cell() {
let mut row = Row::from(vec!["foo", "bar", "foobar"]);
assert_eq!(row.len(), 3);
assert!(row.get_mut_cell(12).is_none());
let c1 = row.get_mut_cell(0).unwrap().clone();
assert_eq!(c1.get_content(), "foo");
let c1 = Cell::from(&"baz");
assert!(row.set_cell(c1.clone(), 1000).is_err());
assert!(row.set_cell(c1.clone(), 0).is_ok());
assert_eq!(row.get_cell(0).unwrap().get_content(), "baz");
row.add_cell(c1.clone());
assert_eq!(row.len(), 4);
assert_eq!(row.get_cell(3).unwrap().get_content(), "baz");
}
#[test]
fn insert_cell() {
let mut row = Row::from(vec!["foo", "bar", "foobar"]);
assert_eq!(row.len(), 3);
let cell = Cell::new("baz");
row.insert_cell(1000, cell.clone());
assert_eq!(row.len(), 4);
assert_eq!(row.get_cell(3).unwrap().get_content(), "baz");
row.insert_cell(1, cell.clone());
assert_eq!(row.len(), 5);
assert_eq!(row.get_cell(1).unwrap().get_content(), "baz");
}
#[test]
fn remove_cell() {
let mut row = Row::from(vec!["foo", "bar", "foobar"]);
assert_eq!(row.len(), 3);
row.remove_cell(1000);
assert_eq!(row.len(), 3);
row.remove_cell(1);
assert_eq!(row.len(), 2);
assert_eq!(row.get_cell(0).unwrap().get_content(), "foo");
assert_eq!(row.get_cell(1).unwrap().get_content(), "foobar");
}
#[test]
fn extend_row() {
let mut row = Row::from(vec!["foo", "bar", "foobar"]);
row.extend(vec!["A", "B", "C"]);
assert_eq!(row.len(), 6);
assert_eq!(row.get_cell(3).unwrap().get_content(), "A");
assert_eq!(row.get_cell(4).unwrap().get_content(), "B");
assert_eq!(row.get_cell(5).unwrap().get_content(), "C");
}
}
| {
let mut printed_columns = 0;
for cell in self.iter() {
printed_columns += cell.print_html(out)?;
}
// Pad with empty cells, if target width is not reached
for _ in 0..col_num - printed_columns {
Cell::default().print_html(out)?;
}
Ok(())
} | identifier_body |
row.rs | //! This module contains definition of table rows stuff
use std::io::{Error, Write};
use std::iter::FromIterator;
use std::slice::{Iter, IterMut};
// use std::vec::IntoIter;
use std::ops::{Index, IndexMut};
use super::Terminal;
use super::format::{ColumnPosition, TableFormat};
use super::utils::NEWLINE;
use super::Cell;
/// Represent a table row made of cells
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
pub struct Row {
cells: Vec<Cell>,
}
impl Row {
/// Create a new `Row` backed with `cells` vector
pub fn new(cells: Vec<Cell>) -> Row {
Row { cells }
}
/// Create an row of length `size`, with empty strings stored
pub fn empty() -> Row {
Self::new(vec![Cell::default(); 0])
}
/// Count the number of column required in the table grid.
/// It takes into account horizontal spanning of cells. For
/// example, a cell with an hspan of 3 will add 3 column to the grid
// #[deprecated(since="0.8.0", note="Will become private in future release. See [issue #87](https://github.com/phsym/prettytable-rs/issues/87)")]
pub(crate) fn column_count(&self) -> usize {
self.cells.iter().map(|c| c.get_hspan()).sum()
}
/// Get the number of cells in this row
pub fn len(&self) -> usize {
self.cells.len()
// self.cells.iter().map(|c| c.get_hspan()).sum()
}
/// Check if the row is empty (has no cell)
pub fn is_empty(&self) -> bool {
self.cells.is_empty()
}
/// Get the height of this row
// #[deprecated(since="0.8.0", note="Will become private in future release. See [issue #87](https://github.com/phsym/prettytable-rs/issues/87)")]
fn get_height(&self) -> usize {
let mut height = 1; // Minimum height must be 1 to print empty rows
for cell in &self.cells {
let h = cell.get_height();
if h > height {
height = h;
}
}
height
}
/// Get the minimum width required by the cell in the column `column`.
/// Return 0 if the cell does not exist in this row
// #[deprecated(since="0.8.0", note="Will become private in future release. See [issue #87](https://github.com/phsym/prettytable-rs/issues/87)")]
pub(crate) fn get_column_width(&self, column: usize, format: &TableFormat) -> usize {
let mut i = 0;
for c in &self.cells {
if i + c.get_hspan() > column {
if c.get_hspan() == 1 {
return c.get_width();
}
let (lp, rp) = format.get_padding();
let sep = format
.get_column_separator(ColumnPosition::Intern)
.map(|_| 1)
.unwrap_or_default();
let rem = lp + rp + sep;
let mut w = c.get_width();
if w > rem {
w -= rem;
} else {
w = 0;
}
return (w as f64 / c.get_hspan() as f64).ceil() as usize;
}
i += c.get_hspan();
}
0
}
/// Get the cell at index `idx`
pub fn get_cell(&self, idx: usize) -> Option<&Cell> {
self.cells.get(idx)
}
/// Get the mutable cell at index `idx`
pub fn get_mut_cell(&mut self, idx: usize) -> Option<&mut Cell> {
self.cells.get_mut(idx)
}
/// Set the `cell` in the row at the given `idx` index
pub fn set_cell(&mut self, cell: Cell, idx: usize) -> Result<(), &str> {
if idx >= self.len() {
return Err("Cannot find cell");
}
self.cells[idx] = cell;
Ok(())
}
/// Append a `cell` at the end of the row
pub fn add_cell(&mut self, cell: Cell) {
self.cells.push(cell);
}
/// Insert `cell` at position `index`. If `index` is higher than the row length,
/// the cell will be appended at the end
pub fn insert_cell(&mut self, index: usize, cell: Cell) {
if index < self.cells.len() {
self.cells.insert(index, cell);
} else {
self.add_cell(cell);
}
}
/// Remove the cell at position `index`. Silently skip if this cell does not exist
pub fn remove_cell(&mut self, index: usize) {
if index < self.cells.len() {
self.cells.remove(index);
}
}
/// Returns an immutable iterator over cells
pub fn | (&self) -> Iter<Cell> {
self.cells.iter()
}
/// Returns an mutable iterator over cells
pub fn iter_mut(&mut self) -> IterMut<Cell> {
self.cells.iter_mut()
}
/// Internal only
fn __print<T: Write + ?Sized, F>(
&self,
out: &mut T,
format: &TableFormat,
col_width: &[usize],
f: F,
) -> Result<usize, Error>
where
F: Fn(&Cell, &mut T, usize, usize, bool) -> Result<(), Error>,
{
let height = self.get_height();
for i in 0..height {
//TODO: Wrap this into dedicated function one day
out.write_all(&vec![b' '; format.get_indent()])?;
format.print_column_separator(out, ColumnPosition::Left)?;
let (lp, rp) = format.get_padding();
let mut j = 0;
let mut hspan = 0; // The additional offset caused by cell's horizontal spanning
while j + hspan < col_width.len() {
out.write_all(&vec![b' '; lp])?; // Left padding
// skip_r_fill skip filling the end of the last cell if there's no character
// delimiting the end of the table
let skip_r_fill = (j == col_width.len() - 1)
&& format.get_column_separator(ColumnPosition::Right).is_none();
match self.get_cell(j) {
Some(c) => {
// In case of horizontal spanning, width is the sum of all spanned columns' width
let mut w = col_width[j + hspan..j + hspan + c.get_hspan()].iter().sum();
let real_span = c.get_hspan() - 1;
w += real_span * (lp + rp)
+ real_span
* format
.get_column_separator(ColumnPosition::Intern)
.map(|_| 1)
.unwrap_or_default();
// Print cell content
f(c, out, i, w, skip_r_fill)?;
hspan += real_span; // Add span to offset
}
None => f(&Cell::default(), out, i, col_width[j + hspan], skip_r_fill)?,
};
out.write_all(&vec![b' '; rp])?; // Right padding
if j + hspan < col_width.len() - 1 {
format.print_column_separator(out, ColumnPosition::Intern)?;
}
j += 1;
}
format.print_column_separator(out, ColumnPosition::Right)?;
out.write_all(NEWLINE)?;
}
Ok(height)
}
/// Print the row to `out`, with `separator` as column separator, and `col_width`
/// specifying the width of each columns. Returns the number of printed lines
// #[deprecated(since="0.8.0", note="Will become private in future release. See [issue #87](https://github.com/phsym/prettytable-rs/issues/87)")]
pub(crate) fn print<T: Write + ?Sized>(
&self,
out: &mut T,
format: &TableFormat,
col_width: &[usize],
) -> Result<usize, Error> {
self.__print(out, format, col_width, Cell::print)
}
/// Print the row to terminal `out`, with `separator` as column separator, and `col_width`
/// specifying the width of each columns. Apply style when needed. returns the number of printed lines
// #[deprecated(since="0.8.0", note="Will become private in future release. See [issue #87](https://github.com/phsym/prettytable-rs/issues/87)")]
pub(crate) fn print_term<T: Terminal + ?Sized>(
&self,
out: &mut T,
format: &TableFormat,
col_width: &[usize],
) -> Result<usize, Error> {
self.__print(out, format, col_width, Cell::print_term)
}
/// Print the row in HTML format to `out`.
///
/// If the row is has fewer columns than `col_num`, the row is padded with empty cells.
pub fn print_html<T: Write + ?Sized>(&self, out: &mut T, col_num: usize) -> Result<(), Error> {
let mut printed_columns = 0;
for cell in self.iter() {
printed_columns += cell.print_html(out)?;
}
// Pad with empty cells, if target width is not reached
for _ in 0..col_num - printed_columns {
Cell::default().print_html(out)?;
}
Ok(())
}
}
impl Default for Row {
fn default() -> Row {
Row::empty()
}
}
impl Index<usize> for Row {
type Output = Cell;
fn index(&self, idx: usize) -> &Self::Output {
&self.cells[idx]
}
}
impl IndexMut<usize> for Row {
fn index_mut(&mut self, idx: usize) -> &mut Self::Output {
&mut self.cells[idx]
}
}
impl<A: ToString> FromIterator<A> for Row {
fn from_iter<T>(iterator: T) -> Row
where
T: IntoIterator<Item = A>,
{
Self::new(iterator.into_iter().map(|ref e| Cell::from(e)).collect())
}
}
impl<T, A> From<T> for Row
where
A: ToString,
T: IntoIterator<Item = A>,
{
fn from(it: T) -> Row {
Self::from_iter(it)
}
}
impl<'a> IntoIterator for &'a Row {
type Item = &'a Cell;
type IntoIter = Iter<'a, Cell>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
// impl IntoIterator for Row {
// type Item = Cell;
// type IntoIter = IntoIter<Cell>;
// fn into_iter(self) -> Self::IntoIter {
// self.cells.into_iter()
// }
// }
impl<'a> IntoIterator for &'a mut Row {
type Item = &'a mut Cell;
type IntoIter = IterMut<'a, Cell>;
fn into_iter(self) -> Self::IntoIter {
self.iter_mut()
}
}
impl<S: ToString> Extend<S> for Row {
fn extend<T: IntoIterator<Item = S>>(&mut self, iter: T) {
self.cells
.extend(iter.into_iter().map(|s| Cell::new(&s.to_string())));
}
}
// impl <S: Into<Cell>> Extend<S> for Row {
// fn extend<T: IntoIterator<Item=S>>(&mut self, iter: T) {
// self.cells.extend(iter.into_iter().map(|s| s.into()));
// }
// }
/// This macro simplifies `Row` creation
///
/// The syntax support style spec
/// # Example
/// ```
/// # #[macro_use] extern crate prettytable;
/// # fn main() {
/// // Create a normal row
/// let row1 = row!["Element 1", "Element 2", "Element 3"];
/// // Create a row with all cells formatted with red foreground color, yellow background color
/// // bold, italic, align in the center of the cell
/// let row2 = row![FrBybic => "Element 1", "Element 2", "Element 3"];
/// // Create a row with first cell in blue, second one in red, and last one with default style
/// let row3 = row![Fb->"blue", Fr->"red", "normal"];
/// // Do something with rows
/// # drop(row1);
/// # drop(row2);
/// # drop(row3);
/// # }
/// ```
///
/// For details about style specifier syntax, check doc for [`Cell::style_spec`](cell/struct.Cell.html#method.style_spec) method
#[macro_export]
macro_rules! row {
(($($out:tt)*);) => (vec![$($out)*]);
(($($out:tt)*); $value:expr) => (vec![$($out)* $crate::cell!($value)]);
(($($out:tt)*); $value:expr, $($n:tt)*) => ($crate::row!(($($out)* $crate::cell!($value),); $($n)*));
(($($out:tt)*); $style:ident -> $value:expr) => (vec![$($out)* $crate::cell!($style -> $value)]);
(($($out:tt)*); $style:ident -> $value:expr, $($n: tt)*) => ($crate::row!(($($out)* $crate::cell!($style -> $value),); $($n)*));
($($content:expr), *) => ($crate::Row::new(vec![$($crate::cell!($content)), *])); // This line may not be needed starting from Rust 1.20
($style:ident => $($content:expr), *) => ($crate::Row::new(vec![$($crate::cell!($style -> $content)), *]));
($style:ident => $($content:expr,) *) => ($crate::Row::new(vec![$($crate::cell!($style -> $content)), *]));
($($content:tt)*) => ($crate::Row::new($crate::row!((); $($content)*)));
}
#[cfg(test)]
mod tests {
use super::*;
use Cell;
#[test]
fn row_default_empty() {
let row1 = Row::default();
assert_eq!(row1.len(), 0);
assert!(row1.is_empty());
}
#[test]
fn get_add_set_cell() {
let mut row = Row::from(vec!["foo", "bar", "foobar"]);
assert_eq!(row.len(), 3);
assert!(row.get_mut_cell(12).is_none());
let c1 = row.get_mut_cell(0).unwrap().clone();
assert_eq!(c1.get_content(), "foo");
let c1 = Cell::from(&"baz");
assert!(row.set_cell(c1.clone(), 1000).is_err());
assert!(row.set_cell(c1.clone(), 0).is_ok());
assert_eq!(row.get_cell(0).unwrap().get_content(), "baz");
row.add_cell(c1.clone());
assert_eq!(row.len(), 4);
assert_eq!(row.get_cell(3).unwrap().get_content(), "baz");
}
#[test]
fn insert_cell() {
let mut row = Row::from(vec!["foo", "bar", "foobar"]);
assert_eq!(row.len(), 3);
let cell = Cell::new("baz");
row.insert_cell(1000, cell.clone());
assert_eq!(row.len(), 4);
assert_eq!(row.get_cell(3).unwrap().get_content(), "baz");
row.insert_cell(1, cell.clone());
assert_eq!(row.len(), 5);
assert_eq!(row.get_cell(1).unwrap().get_content(), "baz");
}
#[test]
fn remove_cell() {
let mut row = Row::from(vec!["foo", "bar", "foobar"]);
assert_eq!(row.len(), 3);
row.remove_cell(1000);
assert_eq!(row.len(), 3);
row.remove_cell(1);
assert_eq!(row.len(), 2);
assert_eq!(row.get_cell(0).unwrap().get_content(), "foo");
assert_eq!(row.get_cell(1).unwrap().get_content(), "foobar");
}
#[test]
fn extend_row() {
let mut row = Row::from(vec!["foo", "bar", "foobar"]);
row.extend(vec!["A", "B", "C"]);
assert_eq!(row.len(), 6);
assert_eq!(row.get_cell(3).unwrap().get_content(), "A");
assert_eq!(row.get_cell(4).unwrap().get_content(), "B");
assert_eq!(row.get_cell(5).unwrap().get_content(), "C");
}
}
| iter | identifier_name |
row.rs | //! This module contains definition of table rows stuff
use std::io::{Error, Write};
use std::iter::FromIterator;
use std::slice::{Iter, IterMut};
// use std::vec::IntoIter;
use std::ops::{Index, IndexMut};
use super::Terminal;
use super::format::{ColumnPosition, TableFormat};
use super::utils::NEWLINE;
use super::Cell;
/// Represent a table row made of cells
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
pub struct Row {
cells: Vec<Cell>,
}
impl Row {
/// Create a new `Row` backed with `cells` vector
pub fn new(cells: Vec<Cell>) -> Row {
Row { cells }
}
/// Create an row of length `size`, with empty strings stored
pub fn empty() -> Row {
Self::new(vec![Cell::default(); 0])
}
/// Count the number of column required in the table grid.
/// It takes into account horizontal spanning of cells. For
/// example, a cell with an hspan of 3 will add 3 column to the grid
// #[deprecated(since="0.8.0", note="Will become private in future release. See [issue #87](https://github.com/phsym/prettytable-rs/issues/87)")]
pub(crate) fn column_count(&self) -> usize {
self.cells.iter().map(|c| c.get_hspan()).sum()
}
/// Get the number of cells in this row
pub fn len(&self) -> usize {
self.cells.len()
// self.cells.iter().map(|c| c.get_hspan()).sum()
}
/// Check if the row is empty (has no cell)
pub fn is_empty(&self) -> bool {
self.cells.is_empty()
}
/// Get the height of this row
// #[deprecated(since="0.8.0", note="Will become private in future release. See [issue #87](https://github.com/phsym/prettytable-rs/issues/87)")]
fn get_height(&self) -> usize {
let mut height = 1; // Minimum height must be 1 to print empty rows
for cell in &self.cells {
let h = cell.get_height();
if h > height {
height = h;
}
}
height
}
| pub(crate) fn get_column_width(&self, column: usize, format: &TableFormat) -> usize {
let mut i = 0;
for c in &self.cells {
if i + c.get_hspan() > column {
if c.get_hspan() == 1 {
return c.get_width();
}
let (lp, rp) = format.get_padding();
let sep = format
.get_column_separator(ColumnPosition::Intern)
.map(|_| 1)
.unwrap_or_default();
let rem = lp + rp + sep;
let mut w = c.get_width();
if w > rem {
w -= rem;
} else {
w = 0;
}
return (w as f64 / c.get_hspan() as f64).ceil() as usize;
}
i += c.get_hspan();
}
0
}
/// Get the cell at index `idx`
pub fn get_cell(&self, idx: usize) -> Option<&Cell> {
self.cells.get(idx)
}
/// Get the mutable cell at index `idx`
pub fn get_mut_cell(&mut self, idx: usize) -> Option<&mut Cell> {
self.cells.get_mut(idx)
}
/// Set the `cell` in the row at the given `idx` index
pub fn set_cell(&mut self, cell: Cell, idx: usize) -> Result<(), &str> {
if idx >= self.len() {
return Err("Cannot find cell");
}
self.cells[idx] = cell;
Ok(())
}
/// Append a `cell` at the end of the row
pub fn add_cell(&mut self, cell: Cell) {
self.cells.push(cell);
}
/// Insert `cell` at position `index`. If `index` is higher than the row length,
/// the cell will be appended at the end
pub fn insert_cell(&mut self, index: usize, cell: Cell) {
if index < self.cells.len() {
self.cells.insert(index, cell);
} else {
self.add_cell(cell);
}
}
/// Remove the cell at position `index`. Silently skip if this cell does not exist
pub fn remove_cell(&mut self, index: usize) {
if index < self.cells.len() {
self.cells.remove(index);
}
}
/// Returns an immutable iterator over cells
pub fn iter(&self) -> Iter<Cell> {
self.cells.iter()
}
/// Returns an mutable iterator over cells
pub fn iter_mut(&mut self) -> IterMut<Cell> {
self.cells.iter_mut()
}
/// Internal only
fn __print<T: Write + ?Sized, F>(
&self,
out: &mut T,
format: &TableFormat,
col_width: &[usize],
f: F,
) -> Result<usize, Error>
where
F: Fn(&Cell, &mut T, usize, usize, bool) -> Result<(), Error>,
{
let height = self.get_height();
for i in 0..height {
//TODO: Wrap this into dedicated function one day
out.write_all(&vec![b' '; format.get_indent()])?;
format.print_column_separator(out, ColumnPosition::Left)?;
let (lp, rp) = format.get_padding();
let mut j = 0;
let mut hspan = 0; // The additional offset caused by cell's horizontal spanning
while j + hspan < col_width.len() {
out.write_all(&vec![b' '; lp])?; // Left padding
// skip_r_fill skip filling the end of the last cell if there's no character
// delimiting the end of the table
let skip_r_fill = (j == col_width.len() - 1)
&& format.get_column_separator(ColumnPosition::Right).is_none();
match self.get_cell(j) {
Some(c) => {
// In case of horizontal spanning, width is the sum of all spanned columns' width
let mut w = col_width[j + hspan..j + hspan + c.get_hspan()].iter().sum();
let real_span = c.get_hspan() - 1;
w += real_span * (lp + rp)
+ real_span
* format
.get_column_separator(ColumnPosition::Intern)
.map(|_| 1)
.unwrap_or_default();
// Print cell content
f(c, out, i, w, skip_r_fill)?;
hspan += real_span; // Add span to offset
}
None => f(&Cell::default(), out, i, col_width[j + hspan], skip_r_fill)?,
};
out.write_all(&vec![b' '; rp])?; // Right padding
if j + hspan < col_width.len() - 1 {
format.print_column_separator(out, ColumnPosition::Intern)?;
}
j += 1;
}
format.print_column_separator(out, ColumnPosition::Right)?;
out.write_all(NEWLINE)?;
}
Ok(height)
}
/// Print the row to `out`, with `separator` as column separator, and `col_width`
/// specifying the width of each columns. Returns the number of printed lines
// #[deprecated(since="0.8.0", note="Will become private in future release. See [issue #87](https://github.com/phsym/prettytable-rs/issues/87)")]
pub(crate) fn print<T: Write + ?Sized>(
&self,
out: &mut T,
format: &TableFormat,
col_width: &[usize],
) -> Result<usize, Error> {
self.__print(out, format, col_width, Cell::print)
}
/// Print the row to terminal `out`, with `separator` as column separator, and `col_width`
/// specifying the width of each columns. Apply style when needed. returns the number of printed lines
// #[deprecated(since="0.8.0", note="Will become private in future release. See [issue #87](https://github.com/phsym/prettytable-rs/issues/87)")]
pub(crate) fn print_term<T: Terminal + ?Sized>(
&self,
out: &mut T,
format: &TableFormat,
col_width: &[usize],
) -> Result<usize, Error> {
self.__print(out, format, col_width, Cell::print_term)
}
/// Print the row in HTML format to `out`.
///
/// If the row is has fewer columns than `col_num`, the row is padded with empty cells.
pub fn print_html<T: Write + ?Sized>(&self, out: &mut T, col_num: usize) -> Result<(), Error> {
let mut printed_columns = 0;
for cell in self.iter() {
printed_columns += cell.print_html(out)?;
}
// Pad with empty cells, if target width is not reached
for _ in 0..col_num - printed_columns {
Cell::default().print_html(out)?;
}
Ok(())
}
}
impl Default for Row {
fn default() -> Row {
Row::empty()
}
}
impl Index<usize> for Row {
type Output = Cell;
fn index(&self, idx: usize) -> &Self::Output {
&self.cells[idx]
}
}
impl IndexMut<usize> for Row {
fn index_mut(&mut self, idx: usize) -> &mut Self::Output {
&mut self.cells[idx]
}
}
impl<A: ToString> FromIterator<A> for Row {
fn from_iter<T>(iterator: T) -> Row
where
T: IntoIterator<Item = A>,
{
Self::new(iterator.into_iter().map(|ref e| Cell::from(e)).collect())
}
}
impl<T, A> From<T> for Row
where
A: ToString,
T: IntoIterator<Item = A>,
{
fn from(it: T) -> Row {
Self::from_iter(it)
}
}
impl<'a> IntoIterator for &'a Row {
type Item = &'a Cell;
type IntoIter = Iter<'a, Cell>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
// impl IntoIterator for Row {
// type Item = Cell;
// type IntoIter = IntoIter<Cell>;
// fn into_iter(self) -> Self::IntoIter {
// self.cells.into_iter()
// }
// }
impl<'a> IntoIterator for &'a mut Row {
type Item = &'a mut Cell;
type IntoIter = IterMut<'a, Cell>;
fn into_iter(self) -> Self::IntoIter {
self.iter_mut()
}
}
impl<S: ToString> Extend<S> for Row {
fn extend<T: IntoIterator<Item = S>>(&mut self, iter: T) {
self.cells
.extend(iter.into_iter().map(|s| Cell::new(&s.to_string())));
}
}
// impl <S: Into<Cell>> Extend<S> for Row {
// fn extend<T: IntoIterator<Item=S>>(&mut self, iter: T) {
// self.cells.extend(iter.into_iter().map(|s| s.into()));
// }
// }
/// This macro simplifies `Row` creation
///
/// The syntax support style spec
/// # Example
/// ```
/// # #[macro_use] extern crate prettytable;
/// # fn main() {
/// // Create a normal row
/// let row1 = row!["Element 1", "Element 2", "Element 3"];
/// // Create a row with all cells formatted with red foreground color, yellow background color
/// // bold, italic, align in the center of the cell
/// let row2 = row![FrBybic => "Element 1", "Element 2", "Element 3"];
/// // Create a row with first cell in blue, second one in red, and last one with default style
/// let row3 = row![Fb->"blue", Fr->"red", "normal"];
/// // Do something with rows
/// # drop(row1);
/// # drop(row2);
/// # drop(row3);
/// # }
/// ```
///
/// For details about style specifier syntax, check doc for [`Cell::style_spec`](cell/struct.Cell.html#method.style_spec) method
#[macro_export]
macro_rules! row {
(($($out:tt)*);) => (vec![$($out)*]);
(($($out:tt)*); $value:expr) => (vec![$($out)* $crate::cell!($value)]);
(($($out:tt)*); $value:expr, $($n:tt)*) => ($crate::row!(($($out)* $crate::cell!($value),); $($n)*));
(($($out:tt)*); $style:ident -> $value:expr) => (vec![$($out)* $crate::cell!($style -> $value)]);
(($($out:tt)*); $style:ident -> $value:expr, $($n: tt)*) => ($crate::row!(($($out)* $crate::cell!($style -> $value),); $($n)*));
($($content:expr), *) => ($crate::Row::new(vec![$($crate::cell!($content)), *])); // This line may not be needed starting from Rust 1.20
($style:ident => $($content:expr), *) => ($crate::Row::new(vec![$($crate::cell!($style -> $content)), *]));
($style:ident => $($content:expr,) *) => ($crate::Row::new(vec![$($crate::cell!($style -> $content)), *]));
($($content:tt)*) => ($crate::Row::new($crate::row!((); $($content)*)));
}
#[cfg(test)]
mod tests {
use super::*;
use Cell;
#[test]
fn row_default_empty() {
let row1 = Row::default();
assert_eq!(row1.len(), 0);
assert!(row1.is_empty());
}
#[test]
fn get_add_set_cell() {
let mut row = Row::from(vec!["foo", "bar", "foobar"]);
assert_eq!(row.len(), 3);
assert!(row.get_mut_cell(12).is_none());
let c1 = row.get_mut_cell(0).unwrap().clone();
assert_eq!(c1.get_content(), "foo");
let c1 = Cell::from(&"baz");
assert!(row.set_cell(c1.clone(), 1000).is_err());
assert!(row.set_cell(c1.clone(), 0).is_ok());
assert_eq!(row.get_cell(0).unwrap().get_content(), "baz");
row.add_cell(c1.clone());
assert_eq!(row.len(), 4);
assert_eq!(row.get_cell(3).unwrap().get_content(), "baz");
}
#[test]
fn insert_cell() {
let mut row = Row::from(vec!["foo", "bar", "foobar"]);
assert_eq!(row.len(), 3);
let cell = Cell::new("baz");
row.insert_cell(1000, cell.clone());
assert_eq!(row.len(), 4);
assert_eq!(row.get_cell(3).unwrap().get_content(), "baz");
row.insert_cell(1, cell.clone());
assert_eq!(row.len(), 5);
assert_eq!(row.get_cell(1).unwrap().get_content(), "baz");
}
#[test]
fn remove_cell() {
let mut row = Row::from(vec!["foo", "bar", "foobar"]);
assert_eq!(row.len(), 3);
row.remove_cell(1000);
assert_eq!(row.len(), 3);
row.remove_cell(1);
assert_eq!(row.len(), 2);
assert_eq!(row.get_cell(0).unwrap().get_content(), "foo");
assert_eq!(row.get_cell(1).unwrap().get_content(), "foobar");
}
#[test]
fn extend_row() {
let mut row = Row::from(vec!["foo", "bar", "foobar"]);
row.extend(vec!["A", "B", "C"]);
assert_eq!(row.len(), 6);
assert_eq!(row.get_cell(3).unwrap().get_content(), "A");
assert_eq!(row.get_cell(4).unwrap().get_content(), "B");
assert_eq!(row.get_cell(5).unwrap().get_content(), "C");
}
} | /// Get the minimum width required by the cell in the column `column`.
/// Return 0 if the cell does not exist in this row
// #[deprecated(since="0.8.0", note="Will become private in future release. See [issue #87](https://github.com/phsym/prettytable-rs/issues/87)")] | random_line_split |
leader_score.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import csv
import logging
import sys
import time
from datetime import datetime
import apache_beam as beam
from apache_beam.metrics.metric import Metrics
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.transforms import trigger
def timestamp2str(t, fmt='%Y-%m-%d %H:%M:%S.000'):
"""Converts a unix timestamp into a formatted string."""
return datetime.fromtimestamp(t).strftime(fmt)
class ParseGameEventFn(beam.DoFn):
"""Parses the raw game event info into a Python dictionary.
Each event line has the following format:
username,teamname,score,timestamp_in_ms,readable_time
e.g.:
user2_AsparagusPig,AsparagusPig,10,1445230923951,2015-11-02 09:09:28.224
The human-readable time string is not used here.
"""
def __init__(self):
super(ParseGameEventFn, self).__init__()
self.num_parse_errors = Metrics.counter(self.__class__, 'num_parse_errors')
def process(self, elem):
try:
row = list(csv.reader([elem]))[0]
yield {
'user': row[0],
'team': row[1],
'score': int(row[2]),
'timestamp': int(row[3]) / 1000.0,
}
except: # pylint: disable=bare-except
# Log and count parse errors
self.num_parse_errors.inc()
logging.error('Parse error on "%s"', elem)
class ExtractAndSumScore(beam.PTransform):
"""A transform to extract key/score information and sum the scores.
The constructor argument `field` determines whether 'team' or 'user' info is
extracted.
""" | def __init__(self, field):
super(ExtractAndSumScore, self).__init__()
self.field = field
def expand(self, pcoll):
return (pcoll
| beam.Map(lambda elem: (elem[self.field], elem['score']))
| beam.CombinePerKey(sum))
class TeamScoresDict(beam.DoFn):
"""Formats the data into a dictionary of BigQuery columns with their values
Receives a (team, score) pair, extracts the window start timestamp, and
formats everything together into a dictionary. The dictionary is in the format
{'bigquery_column': value}
"""
def process(self, team_score, window=beam.DoFn.WindowParam):
team, score = team_score
start = timestamp2str(int(window.start))
yield {
'team': team,
'total_score': score,
'window_start': start,
'processing_time': timestamp2str(int(time.time()))
}
class WriteToBigQuery(beam.PTransform):
"""Generate, format, and write BigQuery table row information."""
def __init__(self, table_name, dataset, schema, project):
"""Initializes the transform.
Args:
table_name: Name of the BigQuery table to use.
dataset: Name of the dataset to use.
schema: Dictionary in the format {'column_name': 'bigquery_type'}
project: Name of the Cloud project containing BigQuery table.
"""
super(WriteToBigQuery, self).__init__()
self.table_name = table_name
self.dataset = dataset
self.schema = schema
self.project = project
def get_schema(self):
"""Build the output table schema."""
return ', '.join(
'%s:%s' % (col, self.schema[col]) for col in self.schema)
def expand(self, pcoll):
return (
pcoll
| 'ConvertToRow' >> beam.Map(
lambda elem: {col: elem[col] for col in self.schema})
| beam.io.WriteToBigQuery(
self.table_name, self.dataset, self.project, self.get_schema()))
# [START window_and_trigger]
class CalculateTeamScores(beam.PTransform):
"""Calculates scores for each team within the configured window duration.
Extract team/score pairs from the event stream, using hour-long windows by
default.
"""
def __init__(self, team_window_duration, allowed_lateness):
super(CalculateTeamScores, self).__init__()
self.team_window_duration = team_window_duration * 60
self.allowed_lateness_seconds = allowed_lateness * 60
def expand(self, pcoll):
# NOTE: the behavior does not exactly match the Java example
# TODO: allowed_lateness not implemented yet in FixedWindows
# TODO: AfterProcessingTime not implemented yet, replace AfterCount
return (
pcoll
# We will get early (speculative) results as well as cumulative
# processing of late data.
| 'LeaderboardTeamFixedWindows' >> beam.WindowInto(
beam.window.FixedWindows(self.team_window_duration),
trigger=trigger.AfterWatermark(trigger.AfterCount(10),
trigger.AfterCount(20)),
accumulation_mode=trigger.AccumulationMode.ACCUMULATING)
# Extract and sum teamname/score pairs from the event data.
| 'ExtractAndSumScore' >> ExtractAndSumScore('team'))
# [END window_and_trigger]
# [START processing_time_trigger]
class CalculateUserScores(beam.PTransform):
"""Extract user/score pairs from the event stream using processing time, via
global windowing. Get periodic updates on all users' running scores.
"""
def __init__(self, allowed_lateness):
super(CalculateUserScores, self).__init__()
self.allowed_lateness_seconds = allowed_lateness * 60
def expand(self, pcoll):
# NOTE: the behavior does not exactly match the Java example
# TODO: allowed_lateness not implemented yet in FixedWindows
# TODO: AfterProcessingTime not implemented yet, replace AfterCount
return (
pcoll
# Get periodic results every ten events.
| 'LeaderboardUserGlobalWindows' >> beam.WindowInto(
beam.window.GlobalWindows(),
trigger=trigger.Repeatedly(trigger.AfterCount(10)),
accumulation_mode=trigger.AccumulationMode.ACCUMULATING)
# Extract and sum username/score pairs from the event data.
| 'ExtractAndSumScore' >> ExtractAndSumScore('user'))
# [END processing_time_trigger]
def run(argv=None):
"""Main entry point; defines and runs the hourly_team_score pipeline."""
parser = argparse.ArgumentParser()
parser.add_argument('--topic',
type=str,
help='Pub/Sub topic to read from')
parser.add_argument('--subscription',
type=str,
help='Pub/Sub subscription to read from')
parser.add_argument('--dataset',
type=str,
required=True,
help='BigQuery Dataset to write tables to. '
'Must already exist.')
parser.add_argument('--table_name',
default='leader_board',
help='The BigQuery table name. Should not already exist.')
parser.add_argument('--team_window_duration',
type=int,
default=60,
help='Numeric value of fixed window duration for team '
'analysis, in minutes')
parser.add_argument('--allowed_lateness',
type=int,
default=120,
help='Numeric value of allowed data lateness, in minutes')
args, pipeline_args = parser.parse_known_args(argv)
if args.topic is None and args.subscription is None:
parser.print_usage()
print(sys.argv[0] + ': error: one of --topic or --subscription is required')
sys.exit(1)
options = PipelineOptions(pipeline_args)
# We also require the --project option to access --dataset
if options.view_as(GoogleCloudOptions).project is None:
parser.print_usage()
print(sys.argv[0] + ': error: argument --project is required')
sys.exit(1)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
options.view_as(SetupOptions).save_main_session = True
# Enforce that this pipeline is always run in streaming mode
options.view_as(StandardOptions).streaming = True
with beam.Pipeline(options=options) as p:
# Read game events from Pub/Sub using custom timestamps, which are extracted
# from the pubsub data elements, and parse the data.
# Read from PubSub into a PCollection.
if args.subscription:
scores = p | 'ReadPubSub' >> beam.io.ReadFromPubSub(
subscription=args.subscription)
else:
scores = p | 'ReadPubSub' >> beam.io.ReadFromPubSub(
topic=args.topic)
events = (
scores
| 'ParseGameEventFn' >> beam.ParDo(ParseGameEventFn())
| 'AddEventTimestamps' >> beam.Map(
lambda elem: beam.window.TimestampedValue(elem, elem['timestamp'])))
# Get team scores and write the results to BigQuery
(events # pylint: disable=expression-not-assigned
| 'CalculateTeamScores' >> CalculateTeamScores(
args.team_window_duration, args.allowed_lateness)
| 'TeamScoresDict' >> beam.ParDo(TeamScoresDict())
| 'WriteTeamScoreSums' >> WriteToBigQuery(
args.table_name + '_teams', args.dataset, {
'team': 'STRING',
'total_score': 'INTEGER',
'window_start': 'STRING',
'processing_time': 'STRING',
}, options.view_as(GoogleCloudOptions).project))
def format_user_score_sums(user_score):
(user, score) = user_score
return {'user': user, 'total_score': score}
# Get user scores and write the results to BigQuery
(events # pylint: disable=expression-not-assigned
| 'CalculateUserScores' >> CalculateUserScores(args.allowed_lateness)
| 'FormatUserScoreSums' >> beam.Map(format_user_score_sums)
| 'WriteUserScoreSums' >> WriteToBigQuery(
args.table_name + '_users', args.dataset, {
'user': 'STRING',
'total_score': 'INTEGER',
}, options.view_as(GoogleCloudOptions).project))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run() | random_line_split | |
leader_score.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import csv
import logging
import sys
import time
from datetime import datetime
import apache_beam as beam
from apache_beam.metrics.metric import Metrics
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.transforms import trigger
def timestamp2str(t, fmt='%Y-%m-%d %H:%M:%S.000'):
"""Converts a unix timestamp into a formatted string."""
return datetime.fromtimestamp(t).strftime(fmt)
class ParseGameEventFn(beam.DoFn):
"""Parses the raw game event info into a Python dictionary.
Each event line has the following format:
username,teamname,score,timestamp_in_ms,readable_time
e.g.:
user2_AsparagusPig,AsparagusPig,10,1445230923951,2015-11-02 09:09:28.224
The human-readable time string is not used here.
"""
def __init__(self):
super(ParseGameEventFn, self).__init__()
self.num_parse_errors = Metrics.counter(self.__class__, 'num_parse_errors')
def process(self, elem):
try:
row = list(csv.reader([elem]))[0]
yield {
'user': row[0],
'team': row[1],
'score': int(row[2]),
'timestamp': int(row[3]) / 1000.0,
}
except: # pylint: disable=bare-except
# Log and count parse errors
self.num_parse_errors.inc()
logging.error('Parse error on "%s"', elem)
class ExtractAndSumScore(beam.PTransform):
"""A transform to extract key/score information and sum the scores.
The constructor argument `field` determines whether 'team' or 'user' info is
extracted.
"""
def __init__(self, field):
super(ExtractAndSumScore, self).__init__()
self.field = field
def expand(self, pcoll):
return (pcoll
| beam.Map(lambda elem: (elem[self.field], elem['score']))
| beam.CombinePerKey(sum))
class TeamScoresDict(beam.DoFn):
"""Formats the data into a dictionary of BigQuery columns with their values
Receives a (team, score) pair, extracts the window start timestamp, and
formats everything together into a dictionary. The dictionary is in the format
{'bigquery_column': value}
"""
def process(self, team_score, window=beam.DoFn.WindowParam):
team, score = team_score
start = timestamp2str(int(window.start))
yield {
'team': team,
'total_score': score,
'window_start': start,
'processing_time': timestamp2str(int(time.time()))
}
class WriteToBigQuery(beam.PTransform):
"""Generate, format, and write BigQuery table row information."""
def __init__(self, table_name, dataset, schema, project):
"""Initializes the transform.
Args:
table_name: Name of the BigQuery table to use.
dataset: Name of the dataset to use.
schema: Dictionary in the format {'column_name': 'bigquery_type'}
project: Name of the Cloud project containing BigQuery table.
"""
super(WriteToBigQuery, self).__init__()
self.table_name = table_name
self.dataset = dataset
self.schema = schema
self.project = project
def get_schema(self):
"""Build the output table schema."""
return ', '.join(
'%s:%s' % (col, self.schema[col]) for col in self.schema)
def expand(self, pcoll):
return (
pcoll
| 'ConvertToRow' >> beam.Map(
lambda elem: {col: elem[col] for col in self.schema})
| beam.io.WriteToBigQuery(
self.table_name, self.dataset, self.project, self.get_schema()))
# [START window_and_trigger]
class CalculateTeamScores(beam.PTransform):
"""Calculates scores for each team within the configured window duration.
Extract team/score pairs from the event stream, using hour-long windows by
default.
"""
def __init__(self, team_window_duration, allowed_lateness):
super(CalculateTeamScores, self).__init__()
self.team_window_duration = team_window_duration * 60
self.allowed_lateness_seconds = allowed_lateness * 60
def expand(self, pcoll):
# NOTE: the behavior does not exactly match the Java example
# TODO: allowed_lateness not implemented yet in FixedWindows
# TODO: AfterProcessingTime not implemented yet, replace AfterCount
return (
pcoll
# We will get early (speculative) results as well as cumulative
# processing of late data.
| 'LeaderboardTeamFixedWindows' >> beam.WindowInto(
beam.window.FixedWindows(self.team_window_duration),
trigger=trigger.AfterWatermark(trigger.AfterCount(10),
trigger.AfterCount(20)),
accumulation_mode=trigger.AccumulationMode.ACCUMULATING)
# Extract and sum teamname/score pairs from the event data.
| 'ExtractAndSumScore' >> ExtractAndSumScore('team'))
# [END window_and_trigger]
# [START processing_time_trigger]
class CalculateUserScores(beam.PTransform):
"""Extract user/score pairs from the event stream using processing time, via
global windowing. Get periodic updates on all users' running scores.
"""
def __init__(self, allowed_lateness):
super(CalculateUserScores, self).__init__()
self.allowed_lateness_seconds = allowed_lateness * 60
def expand(self, pcoll):
# NOTE: the behavior does not exactly match the Java example
# TODO: allowed_lateness not implemented yet in FixedWindows
# TODO: AfterProcessingTime not implemented yet, replace AfterCount
return (
pcoll
# Get periodic results every ten events.
| 'LeaderboardUserGlobalWindows' >> beam.WindowInto(
beam.window.GlobalWindows(),
trigger=trigger.Repeatedly(trigger.AfterCount(10)),
accumulation_mode=trigger.AccumulationMode.ACCUMULATING)
# Extract and sum username/score pairs from the event data.
| 'ExtractAndSumScore' >> ExtractAndSumScore('user'))
# [END processing_time_trigger]
def run(argv=None):
"""Main entry point; defines and runs the hourly_team_score pipeline."""
parser = argparse.ArgumentParser()
parser.add_argument('--topic',
type=str,
help='Pub/Sub topic to read from')
parser.add_argument('--subscription',
type=str,
help='Pub/Sub subscription to read from')
parser.add_argument('--dataset',
type=str,
required=True,
help='BigQuery Dataset to write tables to. '
'Must already exist.')
parser.add_argument('--table_name',
default='leader_board',
help='The BigQuery table name. Should not already exist.')
parser.add_argument('--team_window_duration',
type=int,
default=60,
help='Numeric value of fixed window duration for team '
'analysis, in minutes')
parser.add_argument('--allowed_lateness',
type=int,
default=120,
help='Numeric value of allowed data lateness, in minutes')
args, pipeline_args = parser.parse_known_args(argv)
if args.topic is None and args.subscription is None:
parser.print_usage()
print(sys.argv[0] + ': error: one of --topic or --subscription is required')
sys.exit(1)
options = PipelineOptions(pipeline_args)
# We also require the --project option to access --dataset
if options.view_as(GoogleCloudOptions).project is None:
parser.print_usage()
print(sys.argv[0] + ': error: argument --project is required')
sys.exit(1)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
options.view_as(SetupOptions).save_main_session = True
# Enforce that this pipeline is always run in streaming mode
options.view_as(StandardOptions).streaming = True
with beam.Pipeline(options=options) as p:
# Read game events from Pub/Sub using custom timestamps, which are extracted
# from the pubsub data elements, and parse the data.
# Read from PubSub into a PCollection.
if args.subscription:
|
else:
scores = p | 'ReadPubSub' >> beam.io.ReadFromPubSub(
topic=args.topic)
events = (
scores
| 'ParseGameEventFn' >> beam.ParDo(ParseGameEventFn())
| 'AddEventTimestamps' >> beam.Map(
lambda elem: beam.window.TimestampedValue(elem, elem['timestamp'])))
# Get team scores and write the results to BigQuery
(events # pylint: disable=expression-not-assigned
| 'CalculateTeamScores' >> CalculateTeamScores(
args.team_window_duration, args.allowed_lateness)
| 'TeamScoresDict' >> beam.ParDo(TeamScoresDict())
| 'WriteTeamScoreSums' >> WriteToBigQuery(
args.table_name + '_teams', args.dataset, {
'team': 'STRING',
'total_score': 'INTEGER',
'window_start': 'STRING',
'processing_time': 'STRING',
}, options.view_as(GoogleCloudOptions).project))
def format_user_score_sums(user_score):
(user, score) = user_score
return {'user': user, 'total_score': score}
# Get user scores and write the results to BigQuery
(events # pylint: disable=expression-not-assigned
| 'CalculateUserScores' >> CalculateUserScores(args.allowed_lateness)
| 'FormatUserScoreSums' >> beam.Map(format_user_score_sums)
| 'WriteUserScoreSums' >> WriteToBigQuery(
args.table_name + '_users', args.dataset, {
'user': 'STRING',
'total_score': 'INTEGER',
}, options.view_as(GoogleCloudOptions).project))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run() | scores = p | 'ReadPubSub' >> beam.io.ReadFromPubSub(
subscription=args.subscription) | conditional_block |
leader_score.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import csv
import logging
import sys
import time
from datetime import datetime
import apache_beam as beam
from apache_beam.metrics.metric import Metrics
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.transforms import trigger
def timestamp2str(t, fmt='%Y-%m-%d %H:%M:%S.000'):
"""Converts a unix timestamp into a formatted string."""
return datetime.fromtimestamp(t).strftime(fmt)
class ParseGameEventFn(beam.DoFn):
"""Parses the raw game event info into a Python dictionary.
Each event line has the following format:
username,teamname,score,timestamp_in_ms,readable_time
e.g.:
user2_AsparagusPig,AsparagusPig,10,1445230923951,2015-11-02 09:09:28.224
The human-readable time string is not used here.
"""
def __init__(self):
super(ParseGameEventFn, self).__init__()
self.num_parse_errors = Metrics.counter(self.__class__, 'num_parse_errors')
def process(self, elem):
try:
row = list(csv.reader([elem]))[0]
yield {
'user': row[0],
'team': row[1],
'score': int(row[2]),
'timestamp': int(row[3]) / 1000.0,
}
except: # pylint: disable=bare-except
# Log and count parse errors
self.num_parse_errors.inc()
logging.error('Parse error on "%s"', elem)
class ExtractAndSumScore(beam.PTransform):
"""A transform to extract key/score information and sum the scores.
The constructor argument `field` determines whether 'team' or 'user' info is
extracted.
"""
def __init__(self, field):
super(ExtractAndSumScore, self).__init__()
self.field = field
def expand(self, pcoll):
return (pcoll
| beam.Map(lambda elem: (elem[self.field], elem['score']))
| beam.CombinePerKey(sum))
class TeamScoresDict(beam.DoFn):
|
class WriteToBigQuery(beam.PTransform):
"""Generate, format, and write BigQuery table row information."""
def __init__(self, table_name, dataset, schema, project):
"""Initializes the transform.
Args:
table_name: Name of the BigQuery table to use.
dataset: Name of the dataset to use.
schema: Dictionary in the format {'column_name': 'bigquery_type'}
project: Name of the Cloud project containing BigQuery table.
"""
super(WriteToBigQuery, self).__init__()
self.table_name = table_name
self.dataset = dataset
self.schema = schema
self.project = project
def get_schema(self):
"""Build the output table schema."""
return ', '.join(
'%s:%s' % (col, self.schema[col]) for col in self.schema)
def expand(self, pcoll):
return (
pcoll
| 'ConvertToRow' >> beam.Map(
lambda elem: {col: elem[col] for col in self.schema})
| beam.io.WriteToBigQuery(
self.table_name, self.dataset, self.project, self.get_schema()))
# [START window_and_trigger]
class CalculateTeamScores(beam.PTransform):
"""Calculates scores for each team within the configured window duration.
Extract team/score pairs from the event stream, using hour-long windows by
default.
"""
def __init__(self, team_window_duration, allowed_lateness):
super(CalculateTeamScores, self).__init__()
self.team_window_duration = team_window_duration * 60
self.allowed_lateness_seconds = allowed_lateness * 60
def expand(self, pcoll):
# NOTE: the behavior does not exactly match the Java example
# TODO: allowed_lateness not implemented yet in FixedWindows
# TODO: AfterProcessingTime not implemented yet, replace AfterCount
return (
pcoll
# We will get early (speculative) results as well as cumulative
# processing of late data.
| 'LeaderboardTeamFixedWindows' >> beam.WindowInto(
beam.window.FixedWindows(self.team_window_duration),
trigger=trigger.AfterWatermark(trigger.AfterCount(10),
trigger.AfterCount(20)),
accumulation_mode=trigger.AccumulationMode.ACCUMULATING)
# Extract and sum teamname/score pairs from the event data.
| 'ExtractAndSumScore' >> ExtractAndSumScore('team'))
# [END window_and_trigger]
# [START processing_time_trigger]
class CalculateUserScores(beam.PTransform):
"""Extract user/score pairs from the event stream using processing time, via
global windowing. Get periodic updates on all users' running scores.
"""
def __init__(self, allowed_lateness):
super(CalculateUserScores, self).__init__()
self.allowed_lateness_seconds = allowed_lateness * 60
def expand(self, pcoll):
# NOTE: the behavior does not exactly match the Java example
# TODO: allowed_lateness not implemented yet in FixedWindows
# TODO: AfterProcessingTime not implemented yet, replace AfterCount
return (
pcoll
# Get periodic results every ten events.
| 'LeaderboardUserGlobalWindows' >> beam.WindowInto(
beam.window.GlobalWindows(),
trigger=trigger.Repeatedly(trigger.AfterCount(10)),
accumulation_mode=trigger.AccumulationMode.ACCUMULATING)
# Extract and sum username/score pairs from the event data.
| 'ExtractAndSumScore' >> ExtractAndSumScore('user'))
# [END processing_time_trigger]
def run(argv=None):
"""Main entry point; defines and runs the hourly_team_score pipeline."""
parser = argparse.ArgumentParser()
parser.add_argument('--topic',
type=str,
help='Pub/Sub topic to read from')
parser.add_argument('--subscription',
type=str,
help='Pub/Sub subscription to read from')
parser.add_argument('--dataset',
type=str,
required=True,
help='BigQuery Dataset to write tables to. '
'Must already exist.')
parser.add_argument('--table_name',
default='leader_board',
help='The BigQuery table name. Should not already exist.')
parser.add_argument('--team_window_duration',
type=int,
default=60,
help='Numeric value of fixed window duration for team '
'analysis, in minutes')
parser.add_argument('--allowed_lateness',
type=int,
default=120,
help='Numeric value of allowed data lateness, in minutes')
args, pipeline_args = parser.parse_known_args(argv)
if args.topic is None and args.subscription is None:
parser.print_usage()
print(sys.argv[0] + ': error: one of --topic or --subscription is required')
sys.exit(1)
options = PipelineOptions(pipeline_args)
# We also require the --project option to access --dataset
if options.view_as(GoogleCloudOptions).project is None:
parser.print_usage()
print(sys.argv[0] + ': error: argument --project is required')
sys.exit(1)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
options.view_as(SetupOptions).save_main_session = True
# Enforce that this pipeline is always run in streaming mode
options.view_as(StandardOptions).streaming = True
with beam.Pipeline(options=options) as p:
# Read game events from Pub/Sub using custom timestamps, which are extracted
# from the pubsub data elements, and parse the data.
# Read from PubSub into a PCollection.
if args.subscription:
scores = p | 'ReadPubSub' >> beam.io.ReadFromPubSub(
subscription=args.subscription)
else:
scores = p | 'ReadPubSub' >> beam.io.ReadFromPubSub(
topic=args.topic)
events = (
scores
| 'ParseGameEventFn' >> beam.ParDo(ParseGameEventFn())
| 'AddEventTimestamps' >> beam.Map(
lambda elem: beam.window.TimestampedValue(elem, elem['timestamp'])))
# Get team scores and write the results to BigQuery
(events # pylint: disable=expression-not-assigned
| 'CalculateTeamScores' >> CalculateTeamScores(
args.team_window_duration, args.allowed_lateness)
| 'TeamScoresDict' >> beam.ParDo(TeamScoresDict())
| 'WriteTeamScoreSums' >> WriteToBigQuery(
args.table_name + '_teams', args.dataset, {
'team': 'STRING',
'total_score': 'INTEGER',
'window_start': 'STRING',
'processing_time': 'STRING',
}, options.view_as(GoogleCloudOptions).project))
def format_user_score_sums(user_score):
(user, score) = user_score
return {'user': user, 'total_score': score}
# Get user scores and write the results to BigQuery
(events # pylint: disable=expression-not-assigned
| 'CalculateUserScores' >> CalculateUserScores(args.allowed_lateness)
| 'FormatUserScoreSums' >> beam.Map(format_user_score_sums)
| 'WriteUserScoreSums' >> WriteToBigQuery(
args.table_name + '_users', args.dataset, {
'user': 'STRING',
'total_score': 'INTEGER',
}, options.view_as(GoogleCloudOptions).project))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run() | """Formats the data into a dictionary of BigQuery columns with their values
Receives a (team, score) pair, extracts the window start timestamp, and
formats everything together into a dictionary. The dictionary is in the format
{'bigquery_column': value}
"""
def process(self, team_score, window=beam.DoFn.WindowParam):
team, score = team_score
start = timestamp2str(int(window.start))
yield {
'team': team,
'total_score': score,
'window_start': start,
'processing_time': timestamp2str(int(time.time()))
} | identifier_body |
leader_score.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import csv
import logging
import sys
import time
from datetime import datetime
import apache_beam as beam
from apache_beam.metrics.metric import Metrics
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.transforms import trigger
def timestamp2str(t, fmt='%Y-%m-%d %H:%M:%S.000'):
"""Converts a unix timestamp into a formatted string."""
return datetime.fromtimestamp(t).strftime(fmt)
class ParseGameEventFn(beam.DoFn):
"""Parses the raw game event info into a Python dictionary.
Each event line has the following format:
username,teamname,score,timestamp_in_ms,readable_time
e.g.:
user2_AsparagusPig,AsparagusPig,10,1445230923951,2015-11-02 09:09:28.224
The human-readable time string is not used here.
"""
def | (self):
super(ParseGameEventFn, self).__init__()
self.num_parse_errors = Metrics.counter(self.__class__, 'num_parse_errors')
def process(self, elem):
try:
row = list(csv.reader([elem]))[0]
yield {
'user': row[0],
'team': row[1],
'score': int(row[2]),
'timestamp': int(row[3]) / 1000.0,
}
except: # pylint: disable=bare-except
# Log and count parse errors
self.num_parse_errors.inc()
logging.error('Parse error on "%s"', elem)
class ExtractAndSumScore(beam.PTransform):
"""A transform to extract key/score information and sum the scores.
The constructor argument `field` determines whether 'team' or 'user' info is
extracted.
"""
def __init__(self, field):
super(ExtractAndSumScore, self).__init__()
self.field = field
def expand(self, pcoll):
return (pcoll
| beam.Map(lambda elem: (elem[self.field], elem['score']))
| beam.CombinePerKey(sum))
class TeamScoresDict(beam.DoFn):
"""Formats the data into a dictionary of BigQuery columns with their values
Receives a (team, score) pair, extracts the window start timestamp, and
formats everything together into a dictionary. The dictionary is in the format
{'bigquery_column': value}
"""
def process(self, team_score, window=beam.DoFn.WindowParam):
team, score = team_score
start = timestamp2str(int(window.start))
yield {
'team': team,
'total_score': score,
'window_start': start,
'processing_time': timestamp2str(int(time.time()))
}
class WriteToBigQuery(beam.PTransform):
"""Generate, format, and write BigQuery table row information."""
def __init__(self, table_name, dataset, schema, project):
"""Initializes the transform.
Args:
table_name: Name of the BigQuery table to use.
dataset: Name of the dataset to use.
schema: Dictionary in the format {'column_name': 'bigquery_type'}
project: Name of the Cloud project containing BigQuery table.
"""
super(WriteToBigQuery, self).__init__()
self.table_name = table_name
self.dataset = dataset
self.schema = schema
self.project = project
def get_schema(self):
"""Build the output table schema."""
return ', '.join(
'%s:%s' % (col, self.schema[col]) for col in self.schema)
def expand(self, pcoll):
return (
pcoll
| 'ConvertToRow' >> beam.Map(
lambda elem: {col: elem[col] for col in self.schema})
| beam.io.WriteToBigQuery(
self.table_name, self.dataset, self.project, self.get_schema()))
# [START window_and_trigger]
class CalculateTeamScores(beam.PTransform):
"""Calculates scores for each team within the configured window duration.
Extract team/score pairs from the event stream, using hour-long windows by
default.
"""
def __init__(self, team_window_duration, allowed_lateness):
super(CalculateTeamScores, self).__init__()
self.team_window_duration = team_window_duration * 60
self.allowed_lateness_seconds = allowed_lateness * 60
def expand(self, pcoll):
# NOTE: the behavior does not exactly match the Java example
# TODO: allowed_lateness not implemented yet in FixedWindows
# TODO: AfterProcessingTime not implemented yet, replace AfterCount
return (
pcoll
# We will get early (speculative) results as well as cumulative
# processing of late data.
| 'LeaderboardTeamFixedWindows' >> beam.WindowInto(
beam.window.FixedWindows(self.team_window_duration),
trigger=trigger.AfterWatermark(trigger.AfterCount(10),
trigger.AfterCount(20)),
accumulation_mode=trigger.AccumulationMode.ACCUMULATING)
# Extract and sum teamname/score pairs from the event data.
| 'ExtractAndSumScore' >> ExtractAndSumScore('team'))
# [END window_and_trigger]
# [START processing_time_trigger]
class CalculateUserScores(beam.PTransform):
"""Extract user/score pairs from the event stream using processing time, via
global windowing. Get periodic updates on all users' running scores.
"""
def __init__(self, allowed_lateness):
super(CalculateUserScores, self).__init__()
self.allowed_lateness_seconds = allowed_lateness * 60
def expand(self, pcoll):
# NOTE: the behavior does not exactly match the Java example
# TODO: allowed_lateness not implemented yet in FixedWindows
# TODO: AfterProcessingTime not implemented yet, replace AfterCount
return (
pcoll
# Get periodic results every ten events.
| 'LeaderboardUserGlobalWindows' >> beam.WindowInto(
beam.window.GlobalWindows(),
trigger=trigger.Repeatedly(trigger.AfterCount(10)),
accumulation_mode=trigger.AccumulationMode.ACCUMULATING)
# Extract and sum username/score pairs from the event data.
| 'ExtractAndSumScore' >> ExtractAndSumScore('user'))
# [END processing_time_trigger]
def run(argv=None):
"""Main entry point; defines and runs the hourly_team_score pipeline."""
parser = argparse.ArgumentParser()
parser.add_argument('--topic',
type=str,
help='Pub/Sub topic to read from')
parser.add_argument('--subscription',
type=str,
help='Pub/Sub subscription to read from')
parser.add_argument('--dataset',
type=str,
required=True,
help='BigQuery Dataset to write tables to. '
'Must already exist.')
parser.add_argument('--table_name',
default='leader_board',
help='The BigQuery table name. Should not already exist.')
parser.add_argument('--team_window_duration',
type=int,
default=60,
help='Numeric value of fixed window duration for team '
'analysis, in minutes')
parser.add_argument('--allowed_lateness',
type=int,
default=120,
help='Numeric value of allowed data lateness, in minutes')
args, pipeline_args = parser.parse_known_args(argv)
if args.topic is None and args.subscription is None:
parser.print_usage()
print(sys.argv[0] + ': error: one of --topic or --subscription is required')
sys.exit(1)
options = PipelineOptions(pipeline_args)
# We also require the --project option to access --dataset
if options.view_as(GoogleCloudOptions).project is None:
parser.print_usage()
print(sys.argv[0] + ': error: argument --project is required')
sys.exit(1)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
options.view_as(SetupOptions).save_main_session = True
# Enforce that this pipeline is always run in streaming mode
options.view_as(StandardOptions).streaming = True
with beam.Pipeline(options=options) as p:
# Read game events from Pub/Sub using custom timestamps, which are extracted
# from the pubsub data elements, and parse the data.
# Read from PubSub into a PCollection.
if args.subscription:
scores = p | 'ReadPubSub' >> beam.io.ReadFromPubSub(
subscription=args.subscription)
else:
scores = p | 'ReadPubSub' >> beam.io.ReadFromPubSub(
topic=args.topic)
events = (
scores
| 'ParseGameEventFn' >> beam.ParDo(ParseGameEventFn())
| 'AddEventTimestamps' >> beam.Map(
lambda elem: beam.window.TimestampedValue(elem, elem['timestamp'])))
# Get team scores and write the results to BigQuery
(events # pylint: disable=expression-not-assigned
| 'CalculateTeamScores' >> CalculateTeamScores(
args.team_window_duration, args.allowed_lateness)
| 'TeamScoresDict' >> beam.ParDo(TeamScoresDict())
| 'WriteTeamScoreSums' >> WriteToBigQuery(
args.table_name + '_teams', args.dataset, {
'team': 'STRING',
'total_score': 'INTEGER',
'window_start': 'STRING',
'processing_time': 'STRING',
}, options.view_as(GoogleCloudOptions).project))
def format_user_score_sums(user_score):
(user, score) = user_score
return {'user': user, 'total_score': score}
# Get user scores and write the results to BigQuery
(events # pylint: disable=expression-not-assigned
| 'CalculateUserScores' >> CalculateUserScores(args.allowed_lateness)
| 'FormatUserScoreSums' >> beam.Map(format_user_score_sums)
| 'WriteUserScoreSums' >> WriteToBigQuery(
args.table_name + '_users', args.dataset, {
'user': 'STRING',
'total_score': 'INTEGER',
}, options.view_as(GoogleCloudOptions).project))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run() | __init__ | identifier_name |
main.go | package main
import (
"VAST-WATERS-21789/models"
"database/sql"
"embed"
"fmt"
"log"
"net/http"
"strconv"
"text/template"
"time"
"github.com/google/uuid"
"golang.org/x/crypto/bcrypt"
_ "github.com/go-sql-driver/mysql"
"bytes"
"strings"
"github.com/vcraescu/go-paginator/v2"
"github.com/vcraescu/go-paginator/v2/adapter"
"gorm.io/driver/mysql"
"gorm.io/gorm"
)
// CustomError: error type struct
type CustomError struct {
Code string
Message string
}
/*****************************************************************************뭔지 잘 모르는 것들*************************************************/
//필요한가 잘 모르겠음
func (e *CustomError) Error() string {
return e.Code + ", " + e.Message
}
//필요한가 잘 모르겟음
func (e *CustomError) StatusCode() int {
result, _ := strconv.Atoi(e.Code)
return result
}
// Delete delete data from db //! user 전용 11.08
// func Delete(db *sql.DB) {
// // Delete
// stmt, err := db.Prepare("delete from user where `id`=?")
// checkError(err)
// res, err := stmt.Exec(5)
// checkError(err)
// a, err := res.RowsAffected()
// checkError(err)
// fmt.Println(a, "rows in set")
// }
// Update change data from db인데 뭔지 잘 모르겠음
func Update(db *sql.DB) {
// Update
stmt, err := db.Prepare("update topic set profile=? where profile=?")
checkError(err)
res, err := stmt.Exec("developer", "dev")
checkError(err)
a, err := res.RowsAffected()
checkError(err)
fmt.Println(a, "rows in set")
}
//페이지 리스트인데 뭔지 잘 모르겠음
func getPageList(p string, limit int) []string {
page, _ := strconv.Atoi(p)
var result []string
for i := page - 2; i <= page+2; i++ {
if i > 0 && i <= limit {
result = append(result, strconv.Itoa(i))
}
}
return result
}
/**********************************************************조회*************************************************************************/
// db에서 모든 데이터를 조회
func ReadUser(db *sql.DB, req *http.Request) (models.User, *CustomError) {
// Read
id, pw := req.PostFormValue("id"), req.PostFormValue("password")
rows, err := db.Query("select * from users where id = ?", id)
checkError(err)
defer rows.Close()
var user = models.User{}
if !rows.Next() {
return user, &CustomError{Code: "401", Message: "ID doesn't exist."}
} else {
_ = rows.Scan(&user.Id, &user.Password, &user.Name, &user.Created, &user.Day, &user.Totaltime, &user.Trytime, &user.Recoverytime, &user.Frontcount, &user.Backcount, &user.AvgRPM, &user.AvgSpeed, &user.Distance, &user.Musclenum, &user.Kcalorynum, &user.Gender, &user.Area, &user.Birth, &user.Bike_info, &user.Career, &user.Club, &user.Email)
}
err = bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(pw))
if err != nil {
return user, &CustomError{Code: "401", Message: "uncorrect password."}
}
return user, nil
}
//유저를 Id로 조회
func ReadUserById(db *sql.DB, userId string) (models.User, error) {
fmt.Println("ReadUserById()")
row, err := db.Query("select * from users where id = ?", userId)
//row, err := db.Query("select * from user")
checkError(err)
defer row.Close()
var user = models.User{} //! 배열로 받아서 모든 테이블 정보 가져오기 해야함
for row.Next() {
err := row.Scan(&user.Id, &user.Password, &user.Name, &user.Created, &user.Day, &user.Totaltime, &user.Trytime, &user.Recoverytime, &user.Frontcount, &user.Backcount, &user.AvgRPM, &user.AvgSpeed, &user.Distance, &user.Musclenum, &user.Kcalorynum, &user.Gender, &user.Area, &user.Birth, &user.Bike_info, &user.Career, &user.Club, &user.Email)
if err != nil {
log.Fatal(err) //! 2021/11/4 이유
}
}
return user, nil
}
/*******************************************************잡동 사니*****************************************************************/
var (
gormDB *gorm.DB
//go:embed web
staticContent embed.FS
)
const (
MaxPerPage = 20
)
func checkError(err error) {
if err != nil {
panic(err.Error())
}
}
const (
//추가
user = "root"
password = "1234"
//port = "3307"
database = "tech"
host = "127.0.0.1"
)
// const ( //! 헤로쿠 작업할때 필요하다
// //추가/
// user = "bfbae725adafff"
// password = "ef851b9b"
// //port = "3307"
// database = "heroku_3e81fa660b7be57"
// host = "us-cdbr-east-04.cleardb.com"
// )
var (
db *sql.DB
tpl *template.Template
dbSessionCleaned time.Time
)
var content embed.FS
//템플릿 지정
func init() {
tpl = template.Must(template.ParseGlob("web/templates/*"))
dbSessionCleaned = time.Now()
}
/*******************************************************************회원가입******************************************************************/
// 유저생성
func CreateUser(db *sql.DB, req *http.Request) *CustomError { //! 이거는 어디껀가
// req.ParseForm()
id := req.PostFormValue("id")
password := req.PostFormValue("password")
name := req.PostFormValue("name")
//update := time.Now().Format("2006-01-02 15:04:05")
created := time.Now().Format("2006-01-02 15:04:05")
//day := req.PostFormValue("day")
//totaltime := req.PostFormValue("totaltime")
//trytime := req.PostFormValue("trytime")
//recoverytime := req.PostFormValue("recoverytime")
//frontcount := req.PostFormValue("frontcount")
//backcount := req.PostFormValue("backcount")
//avgrpm := req.PostFormValue("avgrpm")
//avgspeed := req.PostFormValue("avgspeed")
//distance := req.PostFormValue("distance")
//musclenum := req.PostFormValue("musclenum")
//Kcalorynum := req.PostFormValue("Kcalorynum")
gender := req.PostFormValue("gender")
area := req.PostFormValue("area")
birth := req.PostFormValue("birth")
bike_info := req.PostFormValue("bike_info")
career := req.PostFormValue("career")
//club := req.PostFormValue("club")
email := req.PostFormValue("email")
// Create 2
stmt, err := db.Prepare("insert into users (id, password, name, created, gender, area, birth, bike_info, career, email) values (?,?, ?, ?, ?,?,?,?,?,?)")
// stmt, err := db.Prepare("insert into user (id, password, name,created) values (?,?, ?,?)")
checkError(err)
defer stmt.Close()
bs, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.MinCost)
_, err = stmt.Exec(id, bs, name, created, gender, area, birth, bike_info, career, email)
if err != nil {
fmt.Println("error:", err)
return &CustomError{Code: "1062", Message: "already exists id."}
}
stm, err := db.Prepare("insert into boards (id, name, created_at, gender, area, birth, bike_info, career, email) values (?, ?, ?, ?, ?, ?, ?, ?, ?)")
// stmt, err := db.Prepare("insert into user (id, password, name,created) values (?,?, ?,?)")
checkError(err)
defer stmt.Close()
_, err = stm.Exec(id, name, created, gender, area, birth, bike_info, career, email)
if err != nil {
fmt.Println("error:", err)
return &CustomError{Code: "1062", Message: "already exists id."}
}
return nil
}
//회원가입
func signUp(w http.ResponseWriter, req *http.Request) {
if alreadyLoggedIn(w, req) {
http.Redirect(w, req, "/index", http.StatusSeeOther)
return
}
if req.Method == http.MethodGet {
tpl.ExecuteTemplate(w, "signup.gohtml", nil)
}
if req.Method == http.MethodPost {
err := CreateUser(db, req)
if err != nil {
errMsg := map[string]interface{}{"error": err}
tpl.ExecuteTemplate(w, "signup.gohtml", errMsg)
} else {
http.Redirect(w, req, "/", http.StatusSeeOther)
}
return
}
}
/*******************************************관리자 페이지*******************************************************/
//관리자 페이지
func board(w http.ResponseWriter, r *http.Request) {
var b []models.Board
if !alreadyLoggedIn(w, r) {
http.Redirect(w, r, "/", http.StatusSeeOther) //! possible to connect to /board/ for a while after logging out 11.07
return
}
// result.RowsAffected // returns found records count, equals `len(users)`
// result.Error // returns error
page := r.FormValue("page")
if page == "" {
page = "1"
}
pageInt, _ := strconv.Atoi(page)
if keyword := r.FormValue("v"); keyword != "" {
target := r.FormValue("target")
switch target {
case "email":
q := gormDB.Where("email LIKE ?", fmt.Sprintf("%%%s%%", keyword)).Find(&b)
pg := paginator.New(adapter.NewGORMAdapter(q), MaxPerPage)
pg.SetPage(pageInt)
if err := pg.Results(&b); err != nil {
panic(err)
}
pgNums, _ := pg.PageNums()
pageSlice := getPageList(page, pgNums)
temp := models.PassedData{
PostData: b,
Target: target,
Value: keyword,
PageList: pageSlice,
Page: page,
}
tpl.ExecuteTemplate(w, "board.gohtml", temp)
return
case "id":
q := gormDB.Where("id LIKE ?", fmt.Sprintf("%%%s%%", keyword)).Find(&b)
pg := paginator.New(adapter.NewGORMAdapter(q), MaxPerPage)
pg.SetPage(pageInt)
if err := pg.Results(&b); err != nil {
panic(err)
}
pgNums, _ := pg.PageNums()
pageSlice := getPageList(page, pgNums)
temp := models.PassedData{
PostData: b,
Target: target,
Value: keyword,
PageList: pageSlice,
Page: page,
}
tpl.ExecuteTemplate(w, "board.gohtml", temp)
return
}
}
q := gormDB.Order("backcount desc").Find(&b) //! ordered by author 11.08 /04:56
pg := paginator.New(adapter.NewGORMAdapter(q), MaxPerPage)
pg.SetPage(pageInt)
if err := pg.Results(&b); err != nil {
panic(err)
}
pgNums, _ := pg.PageNums()
pageSlice := getPageList(page, pgNums)
temp := models.PassedData{
PostData: b,
PageList: pageSlice,
Page: page,
}
tpl.ExecuteTemplate(w, "board.gohtml", temp)
}
//이거는 뭔지 아직 모르겠음
func write(w http.ResponseWriter, r *http.Request) { //! board 데이터 수정
if r.Method == http.MethodPost {
email := r.PostFormValue("email")
area := r.PostFormValue("area")
bike_info := r.PostFormValue("bike_info")
newPost := models.Board{Email: email, Area: area, Bike_info: bike_info}
gormDB.Create(&newPost)
http.Redirect(w, r, "/", http.StatusSeeOther)
return
}
tpl.ExecuteTemplate(w, "write.gohtml", nil)
}
//관리자페이지에 삭제
func delete(w http.ResponseWriter, r *http.Request) { //! board 삭제
id := strings.TrimPrefix(r.URL.Path, "/delete/")
gormDB.Delete(&models.Board{}, id)
http.Redirect(w, r, "/board", http.StatusSeeOther)
}
//관리자 페이지 수정 (아직 안됨)
func edit(w http.ResponseWriter, r *http.Request) {
id := strings.TrimPrefix(r.URL.Path, "/edit/")
var b models.Board
gormDB.First(&b, id)
if r.Method == http.MethodPost {
gormDB.Model(&b).Updates(models.Board{Email: r.PostFormValue("email"), Area: r.PostFormValue("area"), Bike_info: r.PostFormValue("bike_info")})
// gormDB.Model(&b).Updates(Board{Name: r.PostFormValue("name"), Totaltime: r.PostFormValue("totaltime")})
var byteBuf bytes.Buffer
byteBuf.WriteString("/post/")
byteBuf.WriteString(id)
http.Redirect(w, r, byteBuf.String(), http.StatusSeeOther)
}
tpl.ExecuteTemplate(w, "write.gohtml", b)
}
//관리자 페이지 수정 하기전 조회
func post(w http.ResponseWriter, r *http.Request) {
// id := r.FormValue("id")
id := strings.TrimPrefix(r.URL.Path, "/post/")
var b models.Board
gormDB.First(&b, id)
tpl.ExecuteTemplate(w, "post.gohtml", b)
}
/***************************************주요 메뉴들*********************************************************/
// index 페이지(dashboard.html -> mydata.html)
func mydata(w http.ResponseWriter, req *http.Request) {
// var b []Board
if !alreadyLoggedIn(w, req) {
http.Redirect(w, req, "/", http.StatusSeeOther)
return
}
u := getUser(w, req)
tpl.ExecuteTemplate(w, "mydata.html", u)
}
// mypage (index2.html -> mypage.html)
func mypage(w http.ResponseWriter, req *http.Request) {
// var b []Board
// if !alreadyLoggedIn(w, req) {
// http.Redirect(w, req, "/", http.StatusSeeOther)
// return
// }
if !alreadyLoggedIn(w, req) {
http.Redirect(w, req, "/", http.StatusSeeOther)
return
}
u := getUser(w, req)
tpl.ExecuteTemplate(w, "mypage.gohtml", u) //! html로 바꾸는법~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
}
//랭킹 (board2.html -> ranking.html)
func ranking(w http.ResponseWriter, r *http.Request) {
var b []models.Board
if !alreadyLoggedIn(w, r) {
http.Redirect(w, r, "/", http.StatusSeeOther) //! possible to connect to /board/ for a while after logging out 11.07
return
}
// result.RowsAffected // returns found records count, equals `len(users)`
// result.Error // returns error
page := r.FormValue("page")
if page == "" {
page = "1"
}
pageInt, _ := strconv.Atoi(page)
if keyword := r.FormValue("v"); keyword != "" {
target := r.FormValue("target")
switch target {
case "email":
q := gormDB.Where("email LIKE ?", fmt.Sprintf("%%%s%%", keyword)).Find(&b)
pg := paginator.New(adapter.NewGORMAdapter(q), MaxPerPage)
pg.SetPage(pageInt)
if err := pg.Results(&b); err != nil {
panic(err)
}
pgNums, _ := pg.PageNums()
pageSlice := getPageList(page, pgNums)
temp := models.PassedData{
PostData: b,
Target: target,
Value: keyword,
PageList: pageSlice,
Page: page,
}
tpl.ExecuteTemplate(w, "ranking.gohtml", temp)
return
case "area":
q := gormDB.Where("area LIKE ?", fmt.Sprintf("%%%s%%", keyword)).Find(&b)
pg := paginator.New(adapter.NewGORMAdapter(q), MaxPerPage)
pg.SetPage(pageInt)
if err := pg.Results(&b); err != nil {
panic(err)
}
pgNums, _ := pg.PageNums()
pageSlice := getPageList(page, pgNums)
temp := models.PassedData{
PostData: b,
Target: target,
Value: keyword,
PageList: pageSlice,
Page: page,
}
tpl.ExecuteTemplate(w, "ranking.gohtml", temp)
return
}
}
q := gormDB.Order("backcount desc").Find(&b) //! ordered by author 11.08 /04:56
pg := paginator.New(adapter.NewGORMAdapter(q), MaxPerPage)
pg.SetPage(pageInt)
if err := pg.Results(&b); err != nil {
panic(err)
}
pgNums, _ := pg.PageNums()
pageSlice := getPageList(page, pgNums)
temp := models.PassedData{
PostData: b,
PageList: pageSlice,
Page: page,
}
tpl.ExecuteTemplate(w, "ranking.gohtml", temp)
}
/***************************************세션 관련************************************************************/
//세션 유지 길이 //21-11-10
const sessionLength int = 600
//세션 생성
func CreateSession(db *sql.DB, sessionId string, userId string) {
stmt, err := db.Prepare("insert into sessions values (?, ?, ?)")
checkError(err)
defer stmt.Close()
_, err = stmt.Exec(sessionId, userId, time.Now().Format("2006-01-02 15:04:05"))
checkError(err)
}
//세션을 통해 유저 정보 가져오기
func getUser(w http.ResponseWriter, req *http.Request) models.User {
fmt.Println("getUser()")
// get cookie
c, err := req.Cookie("sessions")
if err != nil {
sID := uuid.New()
c = &http.Cookie{
Name: "sessions",
Value: sID.String(),
}
}
c.MaxAge = sessionLength
http.SetCookie(w, c)
// if the user exists already, get user
var u models.User
un, err := ReadSession(db, c.Value)
if err != nil {
log.Fatal(err)
}
UpdateCurrentTime(db, un)
u, _ = ReadUserById(db, un)
return u
}
//이미 로그인이 되어있는지 세션을 통해 확인
func alreadyLoggedIn(w http.ResponseWriter, req *http.Request) bool {
fmt.Println("alreadyLoggedIn()")
c, err := req.Cookie("sessions")
if err != nil {
return false
}
un, err := ReadSession(db, c.Value)
if err != nil {
return false
}
UpdateCurrentTime(db, un)
_, err = ReadUserById(db, un)
if err != nil {
return false
}
c.MaxAge = sessionLength
http.SetCookie(w, c)
return true
}
//세션 로그인에 시간 표시
func UpdateCurrentTime(db *sql.DB, sessionID string) {
stmt, err := db.Prepare("UPDATE sessions SET | ? WHERE `user_id`=?")
checkError(err)
defer stmt.Close()
_, err = stmt.Exec(time.Now().Format("2006-01-02 15:04:05"), sessionID)
checkError(err)
}
//세션 초기화
func CleanSessions(db *sql.DB) {
var sessionID string
var currentTime string
rows, err := db.Query("select session_id, current_time from sessions")
if err != nil {
log.Fatal(err)
}
defer rows.Close()
for rows.Next() {
err := rows.Scan(&sessionID, ¤tTime)
if err != nil {
log.Fatal(err)
}
t, _ := time.Parse("2006-01-02 15:04:05", currentTime)
if time.Now().Sub(t) > (time.Second * 10) {
DeleteSession(db, sessionID)
}
}
dbSessionCleaned = time.Now()
} //12
//세션 삭제
func DeleteSession(db *sql.DB, sessionID string) {
stmt, err := db.Prepare("delete from sessions where `session_id`=?")
checkError(err)
_, err = stmt.Exec(sessionID)
checkError(err)
}
//생성된 세션 읽기
func ReadSession(db *sql.DB, sessionId string) (string, error) {
fmt.Println("ReadSession()")
row, err := db.Query("select user_id from sessions where session_id = ?", sessionId)
checkError(err)
defer row.Close()
var userId string
for row.Next() {
err = row.Scan(&userId)
if err != nil {
log.Fatal(err)
}
}
return userId, nil
}
/****************************************로그인 관련*******************************************************/
//로그인
func login(w http.ResponseWriter, req *http.Request) { //! ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~``
if alreadyLoggedIn(w, req) {
http.Redirect(w, req, "/mydata", http.StatusSeeOther)
return
}
if req.Method == http.MethodPost {
user, err := ReadUser(db, req)
if err != nil {
errMsg := map[string]interface{}{"error": err}
tpl.ExecuteTemplate(w, "login3.html", errMsg)
return
}
sID := uuid.New()
c := &http.Cookie{
Name: "sessions",
Value: sID.String(),
}
http.SetCookie(w, c)
CreateSession(db, c.Value, user.Id)
http.Redirect(w, req, "/mydata", http.StatusSeeOther)
return
}
tpl.ExecuteTemplate(w, "login3.html", nil)
}
//로그아웃
func logout(w http.ResponseWriter, req *http.Request) {
if !alreadyLoggedIn(w, req) {
http.Redirect(w, req, "/", http.StatusSeeOther)
return
}
c, _ := req.Cookie("sessions")
// delete session
DeleteSession(db, c.Value)
//
c = &http.Cookie{
Name: "sessions",
Value: "",
MaxAge: -1,
}
http.SetCookie(w, c)
if time.Now().Sub(dbSessionCleaned) > (time.Second * 30) {
go CleanSessions(db)
}
http.Redirect(w, req, "/", http.StatusSeeOther)
}
/********************************************************메인함수************************************************************/
func main() {
// port := os.Getenv("PORT") //! 헤로쿠 작업할때 필요 하다 11.07
// if port == "" {
// port = "8080" // Default port if not specified
// }
fmt.Printf("Starting server at port 8080\n")
fmt.Println("Head")
var connectionString = fmt.Sprintf("%s:%s@tcp(%s:3306)/%s?charset=utf8mb4&parseTime=True", user, password, host, database)
var err error
fmt.Println("connection check..")
// Connect to mysql server
db, err = sql.Open("mysql", connectionString)
fmt.Println("Connecting to DB..")
checkError(err)
defer db.Close()
//바꾼코드
err = db.Ping()
checkError(err)
gormDB, err = gorm.Open(mysql.New(mysql.Config{
Conn: db,
}), &gorm.Config{})
if err != nil {
panic("failed to connect database")
}
gormDB.AutoMigrate(&models.Board{}, &models.User{}, &models.Session{}) //! 자동으로 author, content 심어준다
fmt.Println("Successfully Connected to DB")
http.HandleFunc("/", login)
http.HandleFunc("/delete/", delete)
http.HandleFunc("/write/", write)
http.HandleFunc("/board/", board)
http.HandleFunc("/ranking/", ranking) //1108 임 이름 변경(tables -> ranking)
http.HandleFunc("/post/", post)
http.HandleFunc("/edit/", edit)
http.HandleFunc("/mypage", mypage) //! 뭐여
http.HandleFunc("/signup", signUp)
http.HandleFunc("/mydata", mydata)
http.HandleFunc("/logout", logout)
http.Handle("/web/", http.FileServer(http.FS(staticContent)))
fmt.Println("Listening...ss")
// http.ListenAndServe(":"+port, nil) //! 헤로쿠 작업할때 필요 하다 11.07
http.ListenAndServe(":8080", nil)
}
//수정전
| `current_time`= | identifier_name |
main.go | package main
import (
"VAST-WATERS-21789/models"
"database/sql"
"embed"
"fmt"
"log"
"net/http"
"strconv"
"text/template"
"time"
"github.com/google/uuid"
"golang.org/x/crypto/bcrypt"
_ "github.com/go-sql-driver/mysql"
"bytes"
"strings"
"github.com/vcraescu/go-paginator/v2"
"github.com/vcraescu/go-paginator/v2/adapter"
"gorm.io/driver/mysql"
"gorm.io/gorm"
)
// CustomError: error type struct
type CustomError struct {
Code string
Message string
}
/*****************************************************************************뭔지 잘 모르는 것들*************************************************/
//필요한가 잘 모르겠음
func (e *CustomError) Error() string {
return e.Code + ", " + e.Message
}
//필요한가 잘 모르겟음
func (e *CustomError) StatusCode() int {
result, _ := strconv.Atoi(e.Code)
return result
}
// Delete delete data from db //! user 전용 11.08
// func Delete(db *sql.DB) {
// // Delete
// stmt, err := db.Prepare("delete from user where `id`=?")
// checkError(err)
// res, err := stmt.Exec(5)
// checkError(err)
// a, err := res.RowsAffected()
// checkError(err)
// fmt.Println(a, "rows in set")
// }
// Update change data from db인데 뭔지 잘 모르겠음
func Update(db *sql.DB) {
// Update
stmt, err := db.Prepare("update topic set profile=? where profile=?")
checkError(err)
res, err := stmt.Exec("developer", "dev")
checkError(err)
a, err := res.RowsAffected()
checkError(err)
fmt.Println(a, "rows in set")
}
//페이지 리스트인데 뭔지 잘 모르겠음
func getPageList(p string, limit int) []string {
page, _ := strconv.Atoi(p)
var result []string
for i := page - 2; i <= page+2; i++ {
if i > 0 && i <= limit {
result = append(result, strconv.Itoa(i))
}
}
return result
}
/**********************************************************조회*************************************************************************/
// db에서 모든 데이터를 조회
func ReadUser(db *sql.DB, req *http.Request) (models.User, *CustomError) {
// Read
id, pw := req.PostFormValue("id"), req.PostFormValue("password")
rows, err := db.Query("select * from users where id = ?", id)
checkError(err)
defer rows.Close()
var user = models.User{}
if !rows.Next() {
return user, &CustomError{Code: "401", Message: "ID doesn't exist."}
} else {
_ = rows.Scan(&user.Id, &user.Password, &user.Name, &user.Created, &user.Day, &user.Totaltime, &user.Trytime, &user.Recoverytime, &user.Frontcount, &user.Backcount, &user.AvgRPM, &user.AvgSpeed, &user.Distance, &user.Musclenum, &user.Kcalorynum, &user.Gender, &user.Area, &user.Birth, &user.Bike_info, &user.Career, &user.Club, &user.Email)
}
err = bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(pw))
if err != nil {
return user, &CustomError{Code: "401", Message: "uncorrect password."}
}
return user, nil
}
//유저를 Id로 조회
func ReadUserById(db *sql.DB, userId string) (models.User, error) {
fmt.Println("ReadUserById()")
row, err := db.Query("select * from users where id = ?", userId)
//row, err := db.Query("select * from user")
checkError(err)
defer row.Close()
var user = models.User{} //! 배열로 받아서 모든 테이블 정보 가져오기 해야함
for row.Next() {
err := row.Scan(&user.Id, &user.Password, &user.Name, &user.Created, &user.Day, &user.Totaltime, &user.Trytime, &user.Recoverytime, &user.Frontcount, &user.Backcount, &user.AvgRPM, &user.AvgSpeed, &user.Distance, &user.Musclenum, &user.Kcalorynum, &user.Gender, &user.Area, &user.Birth, &user.Bike_info, &user.Career, &user.Club, &user.Email)
if err != nil {
log.Fatal(err) //! 2021/11/4 이유
}
}
return user, nil
}
/*******************************************************잡동 사니*****************************************************************/
var (
gormDB *gorm.DB
//go:embed web
staticContent embed.FS
)
const (
MaxPerPage = 20
)
func checkError(err error) {
if err != nil {
panic(err.Error())
}
}
const (
//추가
user = "root"
password = "1234"
//port = "3307"
database = "tech"
host = "127.0.0.1"
)
// const ( //! 헤로쿠 작업할때 필요하다
// //추가/
// user = "bfbae725adafff"
// password = "ef851b9b"
// //port = "3307"
// database = "heroku_3e81fa660b7be57"
// host = "us-cdbr-east-04.cleardb.com"
// )
var (
db *sql.DB
tpl *template.Template
dbSessionCleaned time.Time
)
var content embed.FS
//템플릿 지정
func init() {
tpl = template.Must(template.ParseGlob("web/templates/*"))
dbSessionCleaned = time.Now()
}
/*******************************************************************회원가입******************************************************************/
// 유저생성
func CreateUser(db *sql.DB, req *http.Request) *CustomError { //! 이거는 어디껀가
// req.ParseForm()
id := req.PostFormValue("id")
password := req.PostFormValue("password")
name := req.PostFormValue("name")
//update := time.Now().Format("2006-01-02 15:04:05")
created := time.Now().Format("2006-01-02 15:04:05")
//day := req.PostFormValue("day")
//totaltime := req.PostFormValue("totaltime")
//trytime := req.PostFormValue("trytime")
//recoverytime := req.PostFormValue("recoverytime")
//frontcount := req.PostFormValue("frontcount")
//backcount := req.PostFormValue("backcount")
//avgrpm := req.PostFormValue("avgrpm")
//avgspeed := req.PostFormValue("avgspeed")
//distance := req.PostFormValue("distance")
//musclenum := req.PostFormValue("musclenum")
//Kcalorynum := req.PostFormValue("Kcalorynum")
gender := req.PostFormValue("gender")
area := req.PostFormValue("area")
birth := req.PostFormValue("birth")
bike_info := req.PostFormValue("bike_info")
career := req.PostFormValue("career")
//club := req.PostFormValue("club")
email := req.PostFormValue("email")
// Create 2
stmt, err := db.Prepare("insert into users (id, password, name, created, gender, area, birth, bike_info, career, email) values (?,?, ?, ?, ?,?,?,?,?,?)")
// stmt, err := db.Prepare("insert into user (id, password, name,created) values (?,?, ?,?)")
checkError(err)
defer stmt.Close()
bs, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.MinCost)
_, err = stmt.Exec(id, bs, name, created, gender, area, birth, bike_info, career, email)
if err != nil {
fmt.Println("error:", err)
return &CustomError{Code: "1062", Message: "already exists id."}
}
stm, err := db.Prepare("insert into boards (id, name, created_at, gender, area, birth, bike_info, career, email) values (?, ?, ?, ?, ?, ?, ?, ?, ?)")
// stmt, err := db.Prepare("insert into user (id, password, name,created) values (?,?, ?,?)")
checkError(err)
defer stmt.Close()
_, err = stm.Exec(id, name, created, gender, area, birth, bike_info, career, email)
if err != nil {
fmt.Println("error:", err)
return &CustomError{Code: "1062", Message: "already exists id."}
}
return nil
}
//회원가입
func signUp(w http.ResponseWriter, req *http.Request) {
if alreadyLoggedIn(w, req) {
http.Redirect(w, req, "/index", http.StatusSeeOther)
return
}
if req.Method == http.MethodGet {
tpl.ExecuteTemplate(w, "signup.gohtml", nil)
}
if req.Method == http.MethodPost {
err := CreateUser(db, req)
if err != nil {
errMsg := map[string]interface{}{"error": err}
tpl.ExecuteTemplate(w, "signup.gohtml", errMsg)
} else {
http.Redirect(w, req, "/", http.StatusSeeOther)
}
return
}
}
/*******************************************관리자 페이지*******************************************************/
//관리자 페이지
func board(w http.ResponseWriter, r *http.Request) {
var b []models.Board
if !alreadyLoggedIn(w, r) {
http.Redirect(w, r, "/", http.StatusSeeOther) //! possible to connect to /board/ for a while after logging out 11.07
return
}
// result.RowsAffected // returns found records count, equals `len(users)`
// result.Error // returns error
page := r.FormValue("page")
if page == "" {
page = "1"
}
pageInt, _ := strconv.Atoi(page)
if keyword := r.FormValue("v"); keyword != "" {
target := r.FormValue("target")
switch target {
case "email":
q := gormDB.Where("email LIKE ?", fmt.Sprintf("%%%s%%", keyword)).Find(&b)
pg := paginator.New(adapter.NewGORMAdapter(q), MaxPerPage)
pg.SetPage(pageInt)
if err := pg.Results(&b); err != nil {
panic(err)
}
pgNums, _ := pg.PageNums()
pageSlice := getPageList(page, pgNums)
temp := models.PassedData{
PostData: b,
Target: target,
Value: keyword,
PageList: pageSlice,
Page: page,
}
tpl.ExecuteTemplate(w, "board.gohtml", temp)
return
case "id":
q := gormDB.Where("id LIKE ?", fmt.Sprintf("%%%s%%", keyword)).Find(&b)
pg := paginator.New(adapter.NewGORMAdapter(q), MaxPerPage)
pg.SetPage(pageInt)
if err := pg.Results(&b); err != nil {
panic(err)
}
pgNums, _ := pg.PageNums()
pageSlice := getPageList(page, pgNums)
temp := models.PassedData{
PostData: b,
Target: target,
Value: keyword,
PageList: pageSlice,
Page: page,
}
tpl.ExecuteTemplate(w, "board.gohtml", temp)
return
}
}
q := gormDB.Order("backcount desc").Find(&b) //! ordered by author 11.08 /04:56
pg := paginator.New(adapter.NewGORMAdapter(q), MaxPerPage)
pg.SetPage(pageInt)
if err := pg.Results(&b); err != nil {
panic(err)
}
pgNums, _ := pg.PageNums()
pageSlice := getPageList(page, pgNums)
temp := models.PassedData{
PostData: b,
PageList: pageSlice,
Page: page,
}
tpl.ExecuteTemplate(w, "board.gohtml", temp)
}
//이거는 뭔지 아직 모르겠음
func write(w http.ResponseWriter, r *http.Request) { //! board 데이터 수정
if r.Method == http.MethodPost {
email := r.PostFormValue("email")
area := r.PostFormValue("area")
bike_info := r.PostFormValue("bike_info")
newPost := models.Board{Email: email, Area: area, Bike_info: bike_info}
gormDB.Create(&newPost)
http.Redirect(w, r, "/", http.StatusSeeOther)
return
}
tpl.ExecuteTemplate(w, "write.gohtml", nil)
}
//관리자페이지에 삭제
func delete(w http.ResponseWriter, r *http.Request) { //! board 삭제
id := strings.TrimPrefix(r.URL.Path, "/delete/")
gormDB.Delete(&models.Board{}, id)
http.Redirect(w, r, "/board", http.StatusSeeOther)
}
//관리자 페이지 수정 (아직 안됨)
func edit(w http.ResponseWriter, r *http.Request) {
id := strings.TrimPrefix(r.URL.Path, "/edit/")
var b models.Board
gormDB.First(&b, id)
if r.Method == http.MethodPost {
gormDB.Model(&b).Updates(models.Board{Email: r.PostFormValue("email"), Area: r.PostFormValue("area"), Bike_info: r.PostFormValue("bike_info")})
// gormDB.Model(&b).Updates(Board{Name: r.PostFormValue("name"), Totaltime: r.PostFormValue("totaltime")})
var byteBuf bytes.Buffer
byteBuf.WriteString("/post/")
byteBuf.WriteString(id)
http.Redirect(w, r, byteBuf.String(), http.StatusSeeOther)
}
tpl.ExecuteTemplate(w, "write.gohtml", b)
}
//관리자 페이지 수정 하기전 조회
func post(w http.ResponseWriter, r *http.Request) {
// id := r.FormValue("id")
id := strings.TrimPrefix(r.URL.Path, "/post/")
var b models.Board
gormDB.First(&b, id)
tpl.ExecuteTemplate(w, "post.gohtml", b)
}
/***************************************주요 메뉴들*********************************************************/
// index 페이지(dashboard.html -> mydata.html)
func mydata(w http.ResponseWriter, req *http.Request) {
// var b []Board
if !alreadyLoggedIn(w, req) {
http.Redirect(w, req, "/", http.StatusSeeOther)
return
}
u := getUser(w, req)
tpl.ExecuteTemplate(w, "mydata.html", u)
}
// mypage (index2.html -> mypage.html)
func mypage(w http.ResponseWriter, req *http.Request) {
// var b []Board
// if !alreadyLoggedIn(w, req) {
// http.Redirect(w, req, "/", http.StatusSeeOther)
// return
// }
if !alreadyLoggedIn(w, req) {
http.Redirect(w, req, "/", http.StatusSeeOther)
return
}
u := getUser(w, req)
tpl.ExecuteTemplate(w, "mypage.gohtml", u) //! html로 바꾸는법~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
}
//랭킹 (board2.html -> ranking.html)
func ranking(w http.ResponseWriter, r *http.Request) {
var b []models.Board
if !alreadyLoggedIn(w, r) {
http.Redirect(w, r, "/", http.StatusSeeOther) //! possible t | rn
}
// result.RowsAffected // returns found records count, equals `len(users)`
// result.Error // returns error
page := r.FormValue("page")
if page == "" {
page = "1"
}
pageInt, _ := strconv.Atoi(page)
if keyword := r.FormValue("v"); keyword != "" {
target := r.FormValue("target")
switch target {
case "email":
q := gormDB.Where("email LIKE ?", fmt.Sprintf("%%%s%%", keyword)).Find(&b)
pg := paginator.New(adapter.NewGORMAdapter(q), MaxPerPage)
pg.SetPage(pageInt)
if err := pg.Results(&b); err != nil {
panic(err)
}
pgNums, _ := pg.PageNums()
pageSlice := getPageList(page, pgNums)
temp := models.PassedData{
PostData: b,
Target: target,
Value: keyword,
PageList: pageSlice,
Page: page,
}
tpl.ExecuteTemplate(w, "ranking.gohtml", temp)
return
case "area":
q := gormDB.Where("area LIKE ?", fmt.Sprintf("%%%s%%", keyword)).Find(&b)
pg := paginator.New(adapter.NewGORMAdapter(q), MaxPerPage)
pg.SetPage(pageInt)
if err := pg.Results(&b); err != nil {
panic(err)
}
pgNums, _ := pg.PageNums()
pageSlice := getPageList(page, pgNums)
temp := models.PassedData{
PostData: b,
Target: target,
Value: keyword,
PageList: pageSlice,
Page: page,
}
tpl.ExecuteTemplate(w, "ranking.gohtml", temp)
return
}
}
q := gormDB.Order("backcount desc").Find(&b) //! ordered by author 11.08 /04:56
pg := paginator.New(adapter.NewGORMAdapter(q), MaxPerPage)
pg.SetPage(pageInt)
if err := pg.Results(&b); err != nil {
panic(err)
}
pgNums, _ := pg.PageNums()
pageSlice := getPageList(page, pgNums)
temp := models.PassedData{
PostData: b,
PageList: pageSlice,
Page: page,
}
tpl.ExecuteTemplate(w, "ranking.gohtml", temp)
}
/***************************************세션 관련************************************************************/
//세션 유지 길이 //21-11-10
const sessionLength int = 600
//세션 생성
func CreateSession(db *sql.DB, sessionId string, userId string) {
stmt, err := db.Prepare("insert into sessions values (?, ?, ?)")
checkError(err)
defer stmt.Close()
_, err = stmt.Exec(sessionId, userId, time.Now().Format("2006-01-02 15:04:05"))
checkError(err)
}
//세션을 통해 유저 정보 가져오기
func getUser(w http.ResponseWriter, req *http.Request) models.User {
fmt.Println("getUser()")
// get cookie
c, err := req.Cookie("sessions")
if err != nil {
sID := uuid.New()
c = &http.Cookie{
Name: "sessions",
Value: sID.String(),
}
}
c.MaxAge = sessionLength
http.SetCookie(w, c)
// if the user exists already, get user
var u models.User
un, err := ReadSession(db, c.Value)
if err != nil {
log.Fatal(err)
}
UpdateCurrentTime(db, un)
u, _ = ReadUserById(db, un)
return u
}
//이미 로그인이 되어있는지 세션을 통해 확인
func alreadyLoggedIn(w http.ResponseWriter, req *http.Request) bool {
fmt.Println("alreadyLoggedIn()")
c, err := req.Cookie("sessions")
if err != nil {
return false
}
un, err := ReadSession(db, c.Value)
if err != nil {
return false
}
UpdateCurrentTime(db, un)
_, err = ReadUserById(db, un)
if err != nil {
return false
}
c.MaxAge = sessionLength
http.SetCookie(w, c)
return true
}
//세션 로그인에 시간 표시
func UpdateCurrentTime(db *sql.DB, sessionID string) {
stmt, err := db.Prepare("UPDATE sessions SET `current_time`=? WHERE `user_id`=?")
checkError(err)
defer stmt.Close()
_, err = stmt.Exec(time.Now().Format("2006-01-02 15:04:05"), sessionID)
checkError(err)
}
//세션 초기화
func CleanSessions(db *sql.DB) {
var sessionID string
var currentTime string
rows, err := db.Query("select session_id, current_time from sessions")
if err != nil {
log.Fatal(err)
}
defer rows.Close()
for rows.Next() {
err := rows.Scan(&sessionID, ¤tTime)
if err != nil {
log.Fatal(err)
}
t, _ := time.Parse("2006-01-02 15:04:05", currentTime)
if time.Now().Sub(t) > (time.Second * 10) {
DeleteSession(db, sessionID)
}
}
dbSessionCleaned = time.Now()
} //12
//세션 삭제
func DeleteSession(db *sql.DB, sessionID string) {
stmt, err := db.Prepare("delete from sessions where `session_id`=?")
checkError(err)
_, err = stmt.Exec(sessionID)
checkError(err)
}
//생성된 세션 읽기
func ReadSession(db *sql.DB, sessionId string) (string, error) {
fmt.Println("ReadSession()")
row, err := db.Query("select user_id from sessions where session_id = ?", sessionId)
checkError(err)
defer row.Close()
var userId string
for row.Next() {
err = row.Scan(&userId)
if err != nil {
log.Fatal(err)
}
}
return userId, nil
}
/****************************************로그인 관련*******************************************************/
//로그인
func login(w http.ResponseWriter, req *http.Request) { //! ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~``
if alreadyLoggedIn(w, req) {
http.Redirect(w, req, "/mydata", http.StatusSeeOther)
return
}
if req.Method == http.MethodPost {
user, err := ReadUser(db, req)
if err != nil {
errMsg := map[string]interface{}{"error": err}
tpl.ExecuteTemplate(w, "login3.html", errMsg)
return
}
sID := uuid.New()
c := &http.Cookie{
Name: "sessions",
Value: sID.String(),
}
http.SetCookie(w, c)
CreateSession(db, c.Value, user.Id)
http.Redirect(w, req, "/mydata", http.StatusSeeOther)
return
}
tpl.ExecuteTemplate(w, "login3.html", nil)
}
//로그아웃
func logout(w http.ResponseWriter, req *http.Request) {
if !alreadyLoggedIn(w, req) {
http.Redirect(w, req, "/", http.StatusSeeOther)
return
}
c, _ := req.Cookie("sessions")
// delete session
DeleteSession(db, c.Value)
//
c = &http.Cookie{
Name: "sessions",
Value: "",
MaxAge: -1,
}
http.SetCookie(w, c)
if time.Now().Sub(dbSessionCleaned) > (time.Second * 30) {
go CleanSessions(db)
}
http.Redirect(w, req, "/", http.StatusSeeOther)
}
/********************************************************메인함수************************************************************/
func main() {
// port := os.Getenv("PORT") //! 헤로쿠 작업할때 필요 하다 11.07
// if port == "" {
// port = "8080" // Default port if not specified
// }
fmt.Printf("Starting server at port 8080\n")
fmt.Println("Head")
var connectionString = fmt.Sprintf("%s:%s@tcp(%s:3306)/%s?charset=utf8mb4&parseTime=True", user, password, host, database)
var err error
fmt.Println("connection check..")
// Connect to mysql server
db, err = sql.Open("mysql", connectionString)
fmt.Println("Connecting to DB..")
checkError(err)
defer db.Close()
//바꾼코드
err = db.Ping()
checkError(err)
gormDB, err = gorm.Open(mysql.New(mysql.Config{
Conn: db,
}), &gorm.Config{})
if err != nil {
panic("failed to connect database")
}
gormDB.AutoMigrate(&models.Board{}, &models.User{}, &models.Session{}) //! 자동으로 author, content 심어준다
fmt.Println("Successfully Connected to DB")
http.HandleFunc("/", login)
http.HandleFunc("/delete/", delete)
http.HandleFunc("/write/", write)
http.HandleFunc("/board/", board)
http.HandleFunc("/ranking/", ranking) //1108 임 이름 변경(tables -> ranking)
http.HandleFunc("/post/", post)
http.HandleFunc("/edit/", edit)
http.HandleFunc("/mypage", mypage) //! 뭐여
http.HandleFunc("/signup", signUp)
http.HandleFunc("/mydata", mydata)
http.HandleFunc("/logout", logout)
http.Handle("/web/", http.FileServer(http.FS(staticContent)))
fmt.Println("Listening...ss")
// http.ListenAndServe(":"+port, nil) //! 헤로쿠 작업할때 필요 하다 11.07
http.ListenAndServe(":8080", nil)
}
//수정전
| o connect to /board/ for a while after logging out 11.07
retu | conditional_block |
main.go | package main
import (
"VAST-WATERS-21789/models"
"database/sql"
"embed"
"fmt"
"log"
"net/http"
"strconv"
"text/template"
"time"
"github.com/google/uuid"
"golang.org/x/crypto/bcrypt"
_ "github.com/go-sql-driver/mysql"
"bytes"
"strings"
"github.com/vcraescu/go-paginator/v2"
"github.com/vcraescu/go-paginator/v2/adapter"
"gorm.io/driver/mysql"
"gorm.io/gorm"
)
// CustomError: error type struct
type CustomError struct {
Code string
Message string
}
/*****************************************************************************뭔지 잘 모르는 것들*************************************************/
//필요한가 잘 모르겠음
func (e *CustomError) Error() string {
return e.Code + ", " + e.Message
}
//필요한가 잘 모르겟음
func (e *CustomError) StatusCode() int {
result, _ := strconv.Atoi(e.Code)
return result
}
// Delete delete data from db //! user 전용 11.08
// func Delete(db *sql.DB) {
// // Delete
// stmt, err := db.Prepare("delete from user where `id`=?")
// checkError(err)
// res, err := stmt.Exec(5)
// checkError(err)
// a, err := res.RowsAffected()
// checkError(err)
// fmt.Println(a, "rows in set")
// }
// Update change data from db인데 뭔지 잘 모르겠음
func Update(db *sql.DB) {
// Update
stmt, err := db.Prepare("update topic set profile=? where profile=?")
checkError(err)
res, err := stmt.Exec("developer", "dev")
checkError(err)
a, err := res.RowsAffected()
checkError(err)
fmt.Println(a, "rows in set")
}
//페이지 리스트인데 뭔지 잘 모르겠음
func getPageList(p string, limit int) []string {
page, _ := strconv.Atoi(p)
var result []string
for i := page - 2; i <= page+2; i++ {
if i > 0 && i <= limit {
result = append(result, strconv.Itoa(i))
}
}
return result
}
/**********************************************************조회*************************************************************************/
// db에서 모든 데이터를 조회
func ReadUser(db *sql.DB, req *http.Request) (models.User, *CustomError) {
// Read
id, pw := req.PostFormValue("id"), req.PostFormValue("password")
rows, err := db.Query("select * from users where id = ?", id)
checkError(err)
defer rows.Close()
var user = models.User{}
if !rows.Next() {
return user, &CustomError{Code: "401", Message: "ID doesn't exist."}
} else {
_ = rows.Scan(&user.Id, &user.Password, &user.Name, &user.Created, &user.Day, &user.Totaltime, &user.Trytime, &user.Recoverytime, &user.Frontcount, &user.Backcount, &user.AvgRPM, &user.AvgSpeed, &user.Distance, &user.Musclenum, &user.Kcalorynum, &user.Gender, &user.Area, &user.Birth, &user.Bike_info, &user.Career, &user.Club, &user.Email)
}
err = bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(pw))
if err != nil {
return user, &CustomError{Code: "401", Message: "uncorrect password."}
}
return user, nil
}
//유저를 Id로 조회
func ReadUserById(db *sql.DB, userId string) (models.User, error) {
fmt.Println("ReadUserById()")
row, err := db.Query("select * from users where id = ?", userId)
//row, err := db.Query("select * from user")
checkError(err)
defer row.Close()
var user = models.User{} //! 배열로 받아서 모든 테이블 정보 가져오기 해야함
for row.Next() {
err := row.Scan(&user.Id, &user.Password, &user.Name, &user.Created, &user.Day, &user.Totaltime, &user.Trytime, &user.Recoverytime, &user.Frontcount, &user.Backcount, &user.AvgRPM, &user.AvgSpeed, &user.Distance, &user.Musclenum, &user.Kcalorynum, &user.Gender, &user.Area, &user.Birth, &user.Bike_info, &user.Career, &user.Club, &user.Email)
if err != nil {
log.Fatal(err) //! 2021/11/4 이유
}
}
return user, nil
}
/*******************************************************잡동 사니*****************************************************************/
var (
gormDB *gorm.DB
//go:embed web
staticContent embed.FS
)
const (
MaxPerPage = 20
)
func checkError(err error) {
if err != nil {
panic(err.Error())
}
}
const (
//추가
user = "root"
password = "1234"
//port = "3307"
database = "tech"
host = "127.0.0.1"
)
// const ( //! 헤로쿠 작업할때 필요하다
// //추가/
// user = "bfbae725adafff"
// password = "ef851b9b"
// //port = "3307"
// database = "heroku_3e81fa660b7be57"
// host = "us-cdbr-east-04.cleardb.com"
// )
var (
db *sql.DB
tpl *template.Template
dbSessionCleaned time.Time
)
var content embed.FS
//템플릿 지정
func init() {
tpl = template.Must(template.ParseGlob("web/templates/*"))
dbSessionCleaned = time.Now()
}
/*******************************************************************회원가입******************************************************************/
// 유저생성
func CreateUser(db *sql.DB, req *http.Request) *CustomError { //! 이거는 어디껀가
// req.ParseForm()
id := req.PostFormValue("id")
password := req.PostFormValue("password")
name := req.PostFormValue("name")
//update := time.Now().Format("2006-01-02 15:04:05")
created := time.Now().Format("2006-01-02 15:04:05")
//day := req.PostFormValue("day")
//totaltime := req.PostFormValue("totaltime")
//trytime := req.PostFormValue("trytime")
//recoverytime := req.PostFormValue("recoverytime")
//frontcount := req.PostFormValue("frontcount")
//backcount := req.PostFormValue("backcount")
//avgrpm := req.PostFormValue("avgrpm")
//avgspeed := req.PostFormValue("avgspeed")
//distance := req.PostFormValue("distance")
//musclenum := req.PostFormValue("musclenum")
//Kcalorynum := req.PostFormValue("Kcalorynum")
gender := req.PostFormValue("gender")
area := req.PostFormValue("area")
birth := req.PostFormValue("birth")
bike_info := req.PostFormValue("bike_info")
career := req.PostFormValue("career")
//club := req.PostFormValue("club")
email := req.PostFormValue("email")
// Create 2
stmt, err := db.Prepare("insert into users (id, password, name, created, gender, area, birth, bike_info, career, email) values (?,?, ?, ?, ?,?,?,?,?,?)")
// stmt, err := db.Prepare("insert into user (id, password, name,created) values (?,?, ?,?)")
checkError(err)
defer stmt.Close()
bs, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.MinCost)
_, err = stmt.Exec(id, bs, name, created, gender, area, birth, bike_info, career, email)
if err != nil {
fmt.Println("error:", err)
return &CustomError{Code: "1062", Message: "already exists id."}
}
stm, err := db.Prepare("insert into boards (id, name, created_at, gender, area, birth, bike_info, career, email) values (?, ?, ?, ?, ?, ?, ?, ?, ?)")
// stmt, err := db.Prepare("insert into user (id, password, name,created) values (?,?, ?,?)")
checkError(err)
defer stmt.Close()
_, err = stm.Exec(id, name, created, gender, area, birth, bike_info, career, email)
if err != nil {
fmt.Println("error:", err)
return &CustomError{Code: "1062", Message: "already exists id."}
}
return nil
}
//회원가입
func signUp(w http.ResponseWriter, req *http.Request) {
if alreadyLoggedIn(w, req) {
http.Redirect(w, req, "/index", http.StatusSeeOther)
return
}
if req.Method == http.MethodGet {
tpl.ExecuteTemplate(w, "signup.gohtml", nil)
}
if req.Method == http.MethodPost {
err := CreateUser(db, req)
if err != nil {
errMsg := map[string]interface{}{"error": err}
tpl.ExecuteTemplate(w, "signup.gohtml", errMsg)
} else {
http.Redirect(w, req, "/", http.StatusSeeOther)
}
return
}
}
/*******************************************관리자 페이지*******************************************************/
//관리자 페이지
func board(w http.ResponseWriter, r *http.Request) {
var b []models.Board
if !alreadyLoggedIn(w, r) {
http.Redirect(w, r, "/", http.StatusSeeOther) //! possible to connect to /board/ for a while after logging out 11.07
return
}
// result.RowsAffected // returns found records count, equals `len(users)`
// result.Error // returns error
page := r.FormValue("page")
if page == "" {
page = "1"
}
pageInt, _ := strconv.Atoi(page)
if keyword := r.FormValue("v"); keyword != "" {
target := r.FormValue("target")
switch target {
case "email":
q := gormDB.Where("email LIKE ?", fmt.Sprintf("%%%s%%", keyword)).Find(&b)
pg := paginator.New(adapter.NewGORMAdapter(q), MaxPerPage)
pg.SetPage(pageInt)
if err := pg.Results(&b); err != nil {
panic(err)
}
pgNums, _ := pg.PageNums()
pageSlice := getPageList(page, pgNums)
temp := models.PassedData{
PostData: b,
Target: target,
Value: keyword,
PageList: pageSlice,
Page: page,
}
tpl.ExecuteTemplate(w, "board.gohtml", temp)
return
case "id":
q := gormDB.Where("id LIKE ?", fmt.Sprintf("%%%s%%", keyword)).Find(&b)
pg := paginator.New(adapter.NewGORMAdapter(q), MaxPerPage)
pg.SetPage(pageInt)
if err := pg.Results(&b); err != nil {
panic(err)
}
pgNums, _ := pg.PageNums()
pageSlice := getPageList(page, pgNums)
temp := models.PassedData{
PostData: b,
Target: target,
Value: keyword,
PageList: pageSlice,
Page: page,
}
tpl.ExecuteTemplate(w, "board.gohtml", temp)
return
}
}
q := gormDB.Order("backcount desc").Find(&b) //! ordered by author 11.08 /04:56
pg := paginator.New(adapter.NewGORMAdapter(q), MaxPerPage)
pg.SetPage(pageInt)
if err := pg.Results(&b); err != nil {
panic(err)
}
pgNums, _ := pg.PageNums()
pageSlice := getPageList(page, pgNums)
temp := models.PassedData{
PostData: b,
PageList: pageSlice,
Page: page,
}
tpl.ExecuteTemplate(w, "board.gohtml", temp)
}
//이거는 뭔지 아직 모르겠음
func write(w http.ResponseWriter, r *http.Request) { //! board 데이터 수정
if r.Method == http.MethodPost {
email := r.PostFormValue("email")
area := r.PostFormValue("area")
bike_info := r.PostFormValue("bike_info")
newPost := models.Board{Email: email, Area: area, Bike_info: bike_info}
gormDB.Create(&newPost)
http.Redirect(w, r, "/", http.StatusSeeOther)
return
}
tpl.ExecuteTemplate(w, "write.gohtml", nil)
}
//관리자페이지에 삭제
func delete(w http.ResponseWriter, r *http.Request) { //! board 삭제
id := strings.TrimPrefix(r.URL.Path, "/delete/")
gormDB.Delete(&models.Board{}, id)
http.Redirect(w, r, "/board", http.StatusSeeOther)
}
//관리자 페이지 수정 (아직 안됨)
func edit(w http.ResponseWriter, r *http.Request) {
id := strings.TrimPrefix(r.URL.Path, "/edit/")
var b models.Board
gormDB.First(&b, id)
if r.Method == http.MethodPost {
gormDB.Model(&b).Updates(models.Board{Email: r.PostFormValue("email"), Area: r.PostFormValue("area"), Bike_info: r.PostFormValue("bike_info")})
// gormDB.Model(&b).Updates(Board{Name: r.PostFormValue("name"), Totaltime: r.PostFormValue("totaltime")})
var byteBuf bytes.Buffer
byteBuf.WriteString("/post/")
byteBuf.WriteString(id)
http.Redirect(w, r, byteBuf.String(), http.StatusSeeOther)
}
tpl.ExecuteTemplate(w, "write.gohtml", b)
}
//관리자 페이지 수정 하기전 조회
func post(w http.ResponseWriter, r *http.Request) {
// id := r.FormValue("id")
id := strings.TrimPrefix(r.URL.Path, "/post/")
var b models.Board
gormDB.First(&b, id)
tpl.ExecuteTemplate(w, "post.gohtml", b)
}
/***************************************주요 메뉴들*********************************************************/
// index 페이지(dashboard.html -> mydata.html)
func mydata(w http.ResponseWriter, req *http.Request) {
// var b []Board
if !alreadyLoggedIn(w, req) {
http.Redirect(w, req, "/", http.StatusSeeOther)
return
}
u := getUser(w, req)
tpl.ExecuteTemplate(w, "mydata.html", u)
}
// mypage (index2.html -> mypage.html)
func mypage(w http.ResponseWriter, req *http.Request) {
// var b []Board
// if !alreadyLoggedIn(w, req) {
// http.Redirect(w, req, "/", http.StatusSeeOther)
// return
// }
if !alreadyLoggedIn(w, req) {
http.Redirect(w, req, "/", http.StatusSeeOther)
return
}
u := getUser(w, req)
tpl.ExecuteTemplate(w, "mypage.gohtml", u) //! html로 바꾸는법~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
}
//랭킹 (board2.html -> ranking.html)
func ranking( | if page == "" {
page = "1"
}
pageInt, _ := strconv.Atoi(page)
if keyword := r.FormValue("v"); keyword != "" {
target := r.FormValue("target")
switch target {
case "email":
q := gormDB.Where("email LIKE ?", fmt.Sprintf("%%%s%%", keyword)).Find(&b)
pg := paginator.New(adapter.NewGORMAdapter(q), MaxPerPage)
pg.SetPage(pageInt)
if err := pg.Results(&b); err != nil {
panic(err)
}
pgNums, _ := pg.PageNums()
pageSlice := getPageList(page, pgNums)
temp := models.PassedData{
PostData: b,
Target: target,
Value: keyword,
PageList: pageSlice,
Page: page,
}
tpl.ExecuteTemplate(w, "ranking.gohtml", temp)
return
case "area":
q := gormDB.Where("area LIKE ?", fmt.Sprintf("%%%s%%", keyword)).Find(&b)
pg := paginator.New(adapter.NewGORMAdapter(q), MaxPerPage)
pg.SetPage(pageInt)
if err := pg.Results(&b); err != nil {
panic(err)
}
pgNums, _ := pg.PageNums()
pageSlice := getPageList(page, pgNums)
temp := models.PassedData{
PostData: b,
Target: target,
Value: keyword,
PageList: pageSlice,
Page: page,
}
tpl.ExecuteTemplate(w, "ranking.gohtml", temp)
return
}
}
q := gormDB.Order("backcount desc").Find(&b) //! ordered by author 11.08 /04:56
pg := paginator.New(adapter.NewGORMAdapter(q), MaxPerPage)
pg.SetPage(pageInt)
if err := pg.Results(&b); err != nil {
panic(err)
}
pgNums, _ := pg.PageNums()
pageSlice := getPageList(page, pgNums)
temp := models.PassedData{
PostData: b,
PageList: pageSlice,
Page: page,
}
tpl.ExecuteTemplate(w, "ranking.gohtml", temp)
}
/***************************************세션 관련************************************************************/
//세션 유지 길이 //21-11-10
const sessionLength int = 600
//세션 생성
func CreateSession(db *sql.DB, sessionId string, userId string) {
stmt, err := db.Prepare("insert into sessions values (?, ?, ?)")
checkError(err)
defer stmt.Close()
_, err = stmt.Exec(sessionId, userId, time.Now().Format("2006-01-02 15:04:05"))
checkError(err)
}
//세션을 통해 유저 정보 가져오기
func getUser(w http.ResponseWriter, req *http.Request) models.User {
fmt.Println("getUser()")
// get cookie
c, err := req.Cookie("sessions")
if err != nil {
sID := uuid.New()
c = &http.Cookie{
Name: "sessions",
Value: sID.String(),
}
}
c.MaxAge = sessionLength
http.SetCookie(w, c)
// if the user exists already, get user
var u models.User
un, err := ReadSession(db, c.Value)
if err != nil {
log.Fatal(err)
}
UpdateCurrentTime(db, un)
u, _ = ReadUserById(db, un)
return u
}
//이미 로그인이 되어있는지 세션을 통해 확인
func alreadyLoggedIn(w http.ResponseWriter, req *http.Request) bool {
fmt.Println("alreadyLoggedIn()")
c, err := req.Cookie("sessions")
if err != nil {
return false
}
un, err := ReadSession(db, c.Value)
if err != nil {
return false
}
UpdateCurrentTime(db, un)
_, err = ReadUserById(db, un)
if err != nil {
return false
}
c.MaxAge = sessionLength
http.SetCookie(w, c)
return true
}
//세션 로그인에 시간 표시
func UpdateCurrentTime(db *sql.DB, sessionID string) {
stmt, err := db.Prepare("UPDATE sessions SET `current_time`=? WHERE `user_id`=?")
checkError(err)
defer stmt.Close()
_, err = stmt.Exec(time.Now().Format("2006-01-02 15:04:05"), sessionID)
checkError(err)
}
//세션 초기화
func CleanSessions(db *sql.DB) {
var sessionID string
var currentTime string
rows, err := db.Query("select session_id, current_time from sessions")
if err != nil {
log.Fatal(err)
}
defer rows.Close()
for rows.Next() {
err := rows.Scan(&sessionID, ¤tTime)
if err != nil {
log.Fatal(err)
}
t, _ := time.Parse("2006-01-02 15:04:05", currentTime)
if time.Now().Sub(t) > (time.Second * 10) {
DeleteSession(db, sessionID)
}
}
dbSessionCleaned = time.Now()
} //12
//세션 삭제
func DeleteSession(db *sql.DB, sessionID string) {
stmt, err := db.Prepare("delete from sessions where `session_id`=?")
checkError(err)
_, err = stmt.Exec(sessionID)
checkError(err)
}
//생성된 세션 읽기
func ReadSession(db *sql.DB, sessionId string) (string, error) {
fmt.Println("ReadSession()")
row, err := db.Query("select user_id from sessions where session_id = ?", sessionId)
checkError(err)
defer row.Close()
var userId string
for row.Next() {
err = row.Scan(&userId)
if err != nil {
log.Fatal(err)
}
}
return userId, nil
}
/****************************************로그인 관련*******************************************************/
//로그인
func login(w http.ResponseWriter, req *http.Request) { //! ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~``
if alreadyLoggedIn(w, req) {
http.Redirect(w, req, "/mydata", http.StatusSeeOther)
return
}
if req.Method == http.MethodPost {
user, err := ReadUser(db, req)
if err != nil {
errMsg := map[string]interface{}{"error": err}
tpl.ExecuteTemplate(w, "login3.html", errMsg)
return
}
sID := uuid.New()
c := &http.Cookie{
Name: "sessions",
Value: sID.String(),
}
http.SetCookie(w, c)
CreateSession(db, c.Value, user.Id)
http.Redirect(w, req, "/mydata", http.StatusSeeOther)
return
}
tpl.ExecuteTemplate(w, "login3.html", nil)
}
//로그아웃
func logout(w http.ResponseWriter, req *http.Request) {
if !alreadyLoggedIn(w, req) {
http.Redirect(w, req, "/", http.StatusSeeOther)
return
}
c, _ := req.Cookie("sessions")
// delete session
DeleteSession(db, c.Value)
//
c = &http.Cookie{
Name: "sessions",
Value: "",
MaxAge: -1,
}
http.SetCookie(w, c)
if time.Now().Sub(dbSessionCleaned) > (time.Second * 30) {
go CleanSessions(db)
}
http.Redirect(w, req, "/", http.StatusSeeOther)
}
/********************************************************메인함수************************************************************/
func main() {
// port := os.Getenv("PORT") //! 헤로쿠 작업할때 필요 하다 11.07
// if port == "" {
// port = "8080" // Default port if not specified
// }
fmt.Printf("Starting server at port 8080\n")
fmt.Println("Head")
var connectionString = fmt.Sprintf("%s:%s@tcp(%s:3306)/%s?charset=utf8mb4&parseTime=True", user, password, host, database)
var err error
fmt.Println("connection check..")
// Connect to mysql server
db, err = sql.Open("mysql", connectionString)
fmt.Println("Connecting to DB..")
checkError(err)
defer db.Close()
//바꾼코드
err = db.Ping()
checkError(err)
gormDB, err = gorm.Open(mysql.New(mysql.Config{
Conn: db,
}), &gorm.Config{})
if err != nil {
panic("failed to connect database")
}
gormDB.AutoMigrate(&models.Board{}, &models.User{}, &models.Session{}) //! 자동으로 author, content 심어준다
fmt.Println("Successfully Connected to DB")
http.HandleFunc("/", login)
http.HandleFunc("/delete/", delete)
http.HandleFunc("/write/", write)
http.HandleFunc("/board/", board)
http.HandleFunc("/ranking/", ranking) //1108 임 이름 변경(tables -> ranking)
http.HandleFunc("/post/", post)
http.HandleFunc("/edit/", edit)
http.HandleFunc("/mypage", mypage) //! 뭐여
http.HandleFunc("/signup", signUp)
http.HandleFunc("/mydata", mydata)
http.HandleFunc("/logout", logout)
http.Handle("/web/", http.FileServer(http.FS(staticContent)))
fmt.Println("Listening...ss")
// http.ListenAndServe(":"+port, nil) //! 헤로쿠 작업할때 필요 하다 11.07
http.ListenAndServe(":8080", nil)
}
//수정전
| w http.ResponseWriter, r *http.Request) {
var b []models.Board
if !alreadyLoggedIn(w, r) {
http.Redirect(w, r, "/", http.StatusSeeOther) //! possible to connect to /board/ for a while after logging out 11.07
return
}
// result.RowsAffected // returns found records count, equals `len(users)`
// result.Error // returns error
page := r.FormValue("page")
| identifier_body |
main.go | package main
import (
"VAST-WATERS-21789/models"
"database/sql"
"embed"
"fmt"
"log"
"net/http"
"strconv"
"text/template"
"time"
"github.com/google/uuid"
"golang.org/x/crypto/bcrypt"
_ "github.com/go-sql-driver/mysql"
"bytes"
"strings"
"github.com/vcraescu/go-paginator/v2"
"github.com/vcraescu/go-paginator/v2/adapter"
"gorm.io/driver/mysql"
"gorm.io/gorm"
)
// CustomError: error type struct
type CustomError struct {
Code string
Message string
}
/*****************************************************************************뭔지 잘 모르는 것들*************************************************/
//필요한가 잘 모르겠음
func (e *CustomError) Error() string {
return e.Code + ", " + e.Message
}
//필요한가 잘 모르겟음
func (e *CustomError) StatusCode() int {
result, _ := strconv.Atoi(e.Code)
return result
}
// Delete delete data from db //! user 전용 11.08
// func Delete(db *sql.DB) {
// // Delete
// stmt, err := db.Prepare("delete from user where `id`=?")
// checkError(err)
// res, err := stmt.Exec(5)
// checkError(err)
// a, err := res.RowsAffected()
// checkError(err)
// fmt.Println(a, "rows in set")
// }
// Update change data from db인데 뭔지 잘 모르겠음
func Update(db *sql.DB) {
// Update
stmt, err := db.Prepare("update topic set profile=? where profile=?")
checkError(err)
res, err := stmt.Exec("developer", "dev")
checkError(err)
a, err := res.RowsAffected()
checkError(err)
fmt.Println(a, "rows in set")
}
//페이지 리스트인데 뭔지 잘 모르겠음
func getPageList(p string, limit int) []string {
page, _ := strconv.Atoi(p)
var result []string
for i := page - 2; i <= page+2; i++ {
if i > 0 && i <= limit {
result = append(result, strconv.Itoa(i))
}
}
return result
}
/**********************************************************조회*************************************************************************/
// db에서 모든 데이터를 조회
func ReadUser(db *sql.DB, req *http.Request) (models.User, *CustomError) {
// Read
id, pw := req.PostFormValue("id"), req.PostFormValue("password")
rows, err := db.Query("select * from users where id = ?", id)
checkError(err)
defer rows.Close()
var user = models.User{}
if !rows.Next() {
return user, &CustomError{Code: "401", Message: "ID doesn't exist."}
} else {
_ = rows.Scan(&user.Id, &user.Password, &user.Name, &user.Created, &user.Day, &user.Totaltime, &user.Trytime, &user.Recoverytime, &user.Frontcount, &user.Backcount, &user.AvgRPM, &user.AvgSpeed, &user.Distance, &user.Musclenum, &user.Kcalorynum, &user.Gender, &user.Area, &user.Birth, &user.Bike_info, &user.Career, &user.Club, &user.Email)
}
err = bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(pw))
if err != nil {
return user, &CustomError{Code: "401", Message: "uncorrect password."}
}
return user, nil
}
//유저를 Id로 조회
func ReadUserById(db *sql.DB, userId string) (models.User, error) {
fmt.Println("ReadUserById()")
row, err := db.Query("select * from users where id = ?", userId)
//row, err := db.Query("select * from user")
checkError(err)
defer row.Close()
var user = models.User{} //! 배열로 받아서 모든 테이블 정보 가져오기 해야함
for row.Next() {
err := row.Scan(&user.Id, &user.Password, &user.Name, &user.Created, &user.Day, &user.Totaltime, &user.Trytime, &user.Recoverytime, &user.Frontcount, &user.Backcount, &user.AvgRPM, &user.AvgSpeed, &user.Distance, &user.Musclenum, &user.Kcalorynum, &user.Gender, &user.Area, &user.Birth, &user.Bike_info, &user.Career, &user.Club, &user.Email)
if err != nil {
log.Fatal(err) //! 2021/11/4 이유
}
}
return user, nil
}
/*******************************************************잡동 사니*****************************************************************/
var (
gormDB *gorm.DB
//go:embed web
staticContent embed.FS
)
const (
MaxPerPage = 20
)
func checkError(err error) {
if err != nil {
panic(err.Error())
}
}
const (
//추가
user = "root"
password = "1234"
//port = "3307"
database = "tech"
host = "127.0.0.1"
)
// const ( //! 헤로쿠 작업할때 필요하다
// //추가/
// user = "bfbae725adafff"
// password = "ef851b9b"
// //port = "3307"
// database = "heroku_3e81fa660b7be57"
// host = "us-cdbr-east-04.cleardb.com"
// )
var (
db *sql.DB
tpl *template.Template
dbSessionCleaned time.Time
)
var content embed.FS
//템플릿 지정
func init() {
tpl = template.Must(template.ParseGlob("web/templates/*"))
dbSessionCleaned = time.Now()
}
/*******************************************************************회원가입******************************************************************/
// 유저생성
func CreateUser(db *sql.DB, req *http.Request) *CustomError { //! 이거는 어디껀가
// req.ParseForm()
id := req.PostFormValue("id")
password := req.PostFormValue("password")
name := req.PostFormValue("name")
//update := time.Now().Format("2006-01-02 15:04:05")
created := time.Now().Format("2006-01-02 15:04:05")
//day := req.PostFormValue("day")
//totaltime := req.PostFormValue("totaltime")
//trytime := req.PostFormValue("trytime")
//recoverytime := req.PostFormValue("recoverytime")
//frontcount := req.PostFormValue("frontcount")
//backcount := req.PostFormValue("backcount")
//avgrpm := req.PostFormValue("avgrpm")
//avgspeed := req.PostFormValue("avgspeed")
//distance := req.PostFormValue("distance")
//musclenum := req.PostFormValue("musclenum")
//Kcalorynum := req.PostFormValue("Kcalorynum")
gender := req.PostFormValue("gender")
area := req.PostFormValue("area")
birth := req.PostFormValue("birth")
bike_info := req.PostFormValue("bike_info")
career := req.PostFormValue("career")
//club := req.PostFormValue("club")
email := req.PostFormValue("email")
// Create 2
stmt, err := db.Prepare("insert into users (id, password, name, created, gender, area, birth, bike_info, career, email) values (?,?, ?, ?, ?,?,?,?,?,?)")
// stmt, err := db.Prepare("insert into user (id, password, name,created) values (?,?, ?,?)")
checkError(err)
defer stmt.Close()
bs, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.MinCost)
_, err = stmt.Exec(id, bs, name, created, gender, area, birth, bike_info, career, email)
if err != nil {
fmt.Println("error:", err)
return &CustomError{Code: "1062", Message: "already exists id."}
}
stm, err := db.Prepare("insert into boards (id, name, created_at, gender, area, birth, bike_info, career, email) values (?, ?, ?, ?, ?, ?, ?, ?, ?)")
// stmt, err := db.Prepare("insert into user (id, password, name,created) values (?,?, ?,?)")
checkError(err)
defer stmt.Close()
_, err = stm.Exec(id, name, created, gender, area, birth, bike_info, career, email)
if err != nil {
fmt.Println("error:", err)
return &CustomError{Code: "1062", Message: "already exists id."}
}
return nil
}
//회원가입
func signUp(w http.ResponseWriter, req *http.Request) {
if alreadyLoggedIn(w, req) {
http.Redirect(w, req, "/index", http.StatusSeeOther)
return
}
if req.Method == http.MethodGet {
tpl.ExecuteTemplate(w, "signup.gohtml", nil)
}
if req.Method == http.MethodPost {
err := CreateUser(db, req)
if err != nil {
errMsg := map[string]interface{}{"error": err}
tpl.ExecuteTemplate(w, "signup.gohtml", errMsg)
} else {
http.Redirect(w, req, "/", http.StatusSeeOther)
}
return
}
}
/*******************************************관리자 페이지*******************************************************/
//관리자 페이지
func board(w http.ResponseWriter, r *http.Request) {
var b []models.Board
if !alreadyLoggedIn(w, r) {
http.Redirect(w, r, "/", http.StatusSeeOther) //! possible to connect to /board/ for a while after logging out 11.07
return
}
// result.RowsAffected // returns found records count, equals `len(users)`
// result.Error // returns error
page := r.FormValue("page")
if page == "" {
page = "1"
}
pageInt, _ := strconv.Atoi(page)
if keyword := r.FormValue("v"); keyword != "" {
target := r.FormValue("target")
switch target {
case "email":
q := gormDB.Where("email LIKE ?", fmt.Sprintf("%%%s%%", keyword)).Find(&b)
pg := paginator.New(adapter.NewGORMAdapter(q), MaxPerPage)
pg.SetPage(pageInt)
if err := pg.Results(&b); err != nil {
panic(err)
}
pgNums, _ := pg.PageNums()
pageSlice := getPageList(page, pgNums)
temp := models.PassedData{
PostData: b,
Target: target,
Value: keyword,
PageList: pageSlice,
Page: page,
}
tpl.ExecuteTemplate(w, "board.gohtml", temp)
return
case "id":
q := gormDB.Where("id LIKE ?", fmt.Sprintf("%%%s%%", keyword)).Find(&b)
pg := paginator.New(adapter.NewGORMAdapter(q), MaxPerPage)
pg.SetPage(pageInt)
if err := pg.Results(&b); err != nil {
panic(err)
}
pgNums, _ := pg.PageNums()
pageSlice := getPageList(page, pgNums)
temp := models.PassedData{
PostData: b,
Target: target,
Value: keyword,
PageList: pageSlice,
Page: page,
}
tpl.ExecuteTemplate(w, "board.gohtml", temp)
return
}
}
q := gormDB.Order("backcount desc").Find(&b) //! ordered by author 11.08 /04:56
pg := paginator.New(adapter.NewGORMAdapter(q), MaxPerPage)
pg.SetPage(pageInt)
if err := pg.Results(&b); err != nil {
panic(err)
}
pgNums, _ := pg.PageNums()
pageSlice := getPageList(page, pgNums)
temp := models.PassedData{
PostData: b,
PageList: pageSlice,
Page: page,
}
tpl.ExecuteTemplate(w, "board.gohtml", temp)
}
//이거는 뭔지 아직 모르겠음
func write(w http.ResponseWriter, r *http.Request) { //! board 데이터 수정
if r.Method == http.MethodPost {
email := r.PostFormValue("email")
area := r.PostFormValue("area")
bike_info := r.PostFormValue("bike_info")
newPost := models.Board{Email: email, Area: area, Bike_info: bike_info}
gormDB.Create(&newPost)
http.Redirect(w, r, "/", http.StatusSeeOther)
return
}
tpl.ExecuteTemplate(w, "write.gohtml", nil)
}
//관리자페이지에 삭제
func delete(w http.ResponseWriter, r *http.Request) { //! board 삭제
id := strings.TrimPrefix(r.URL.Path, "/delete/")
gormDB.Delete(&models.Board{}, id)
http.Redirect(w, r, "/board", http.StatusSeeOther)
}
//관리자 페이지 수정 (아직 안됨)
func edit(w http.ResponseWriter, r *http.Request) {
id := strings.TrimPrefix(r.URL.Path, "/edit/")
var b models.Board
gormDB.First(&b, id)
if r.Method == http.MethodPost {
gormDB.Model(&b).Updates(models.Board{Email: r.PostFormValue("email"), Area: r.PostFormValue("area"), Bike_info: r.PostFormValue("bike_info")})
// gormDB.Model(&b).Updates(Board{Name: r.PostFormValue("name"), Totaltime: r.PostFormValue("totaltime")})
var byteBuf bytes.Buffer
byteBuf.WriteString("/post/")
byteBuf.WriteString(id)
http.Redirect(w, r, byteBuf.String(), http.StatusSeeOther)
}
tpl.ExecuteTemplate(w, "write.gohtml", b)
}
//관리자 페이지 수정 하기전 조회
func post(w http.ResponseWriter, r *http.Request) {
// id := r.FormValue("id")
id := strings.TrimPrefix(r.URL.Path, "/post/")
var b models.Board
gormDB.First(&b, id)
tpl.ExecuteTemplate(w, "post.gohtml", b)
}
/***************************************주요 메뉴들*********************************************************/
// index 페이지(dashboard.html -> mydata.html)
func mydata(w http.ResponseWriter, req *http.Request) {
// var b []Board
if !alreadyLoggedIn(w, req) {
http.Redirect(w, req, "/", http.StatusSeeOther)
return
}
u := getUser(w, req)
tpl.ExecuteTemplate(w, "mydata.html", u)
}
// mypage (index2.html -> mypage.html)
func mypage(w http.ResponseWriter, req *http.Request) {
// var b []Board
// if !alreadyLoggedIn(w, req) {
// http.Redirect(w, req, "/", http.StatusSeeOther)
// return
// }
if !alreadyLoggedIn(w, req) {
http.Redirect(w, req, "/", http.StatusSeeOther)
return
}
u := getUser(w, req)
tpl.ExecuteTemplate(w, "mypage.gohtml", u) //! html로 바꾸는법~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
}
//랭킹 (board2.html -> ranking.html)
func ranking(w http.ResponseWriter, r *http.Request) {
var b []models.Board
if !alreadyLoggedIn(w, r) {
http.Redirect(w, r, "/", http.StatusSeeOther) //! possible to connect to /board/ for a while after logging out 11.07
return
}
// result.RowsAffected // returns found records count, equals `len(users)`
// result.Error // returns error
page := r.FormValue("page")
if page == "" {
page = "1"
}
pageInt, _ := strconv.Atoi(page)
if keyword := r.FormValue("v"); keyword != "" {
target := r.FormValue("target")
switch target {
case "email":
q := gormDB.Where("email LIKE ?", fmt.Sprintf("%%%s%%", keyword)).Find(&b)
pg := paginator.New(adapter.NewGORMAdapter(q), MaxPerPage)
pg.SetPage(pageInt)
if err := pg.Results(&b); err != nil {
panic(err)
}
pgNums, _ := pg.PageNums()
pageSlice := getPageList(page, pgNums)
temp := models.PassedData{
PostData: b,
Target: target,
Value: keyword,
PageList: pageSlice,
Page: page,
}
tpl.ExecuteTemplate(w, "ranking.gohtml", temp)
return
case "area":
q := gormDB.Where("area LIKE ?", fmt.Sprintf("%%%s%%", keyword)).Find(&b)
pg := paginator.New(adapter.NewGORMAdapter(q), MaxPerPage)
pg.SetPage(pageInt)
if err := pg.Results(&b); err != nil {
panic(err)
}
pgNums, _ := pg.PageNums()
pageSlice := getPageList(page, pgNums)
temp := models.PassedData{
PostData: b,
Target: target,
Value: keyword,
PageList: pageSlice,
Page: page,
}
tpl.ExecuteTemplate(w, "ranking.gohtml", temp)
return
}
}
q := gormDB.Order("backcount desc").Find(&b) //! ordered by author 11.08 /04:56
pg := paginator.New(adapter.NewGORMAdapter(q), MaxPerPage)
pg.SetPage(pageInt)
if err := pg.Results(&b); err != nil {
panic(err)
}
pgNums, _ := pg.PageNums()
pageSlice := getPageList(page, pgNums)
temp := models.PassedData{
PostData: b,
PageList: pageSlice,
Page: page,
}
tpl.ExecuteTemplate(w, "ranking.gohtml", temp)
}
/***************************************세션 관련************************************************************/
//세션 유지 길이 //21-11-10
const sessionLength int = 600
//세션 생성
func CreateSession(db *sql.DB, sessionId string, userId string) {
stmt, err := db.Prepare("insert into sessions values (?, ?, ?)")
checkError(err)
defer stmt.Close()
_, err = stmt.Exec(sessionId, userId, time.Now().Format("2006-01-02 15:04:05"))
checkError(err)
}
//세션을 통해 유저 정보 가져오기
func getUser(w http.ResponseWriter, req *http.Request) models.User {
fmt.Println("getUser()")
// get cookie
c, err := req.Cookie("sessions")
if err != nil {
sID := uuid.New()
c = &http.Cookie{
Name: "sessions",
Value: sID.String(),
}
}
c.MaxAge = sessionLength
http.SetCookie(w, c)
// if the user exists already, get user
var u models.User
un, err := ReadSession(db, c.Value)
if err != nil {
log.Fatal(err)
}
UpdateCurrentTime(db, un)
u, _ = ReadUserById(db, un)
return u
}
//이미 로그인이 되어있는지 세션을 통해 확인
func alreadyLoggedIn(w http.ResponseWriter, req *http.Request) bool {
fmt.Println("alreadyLoggedIn()")
c, err := req.Cookie("sessions")
if err != nil {
return false
}
un, err := ReadSession(db, c.Value)
if err != nil {
return false
}
UpdateCurrentTime(db, un)
_, err = ReadUserById(db, un)
if err != nil {
return false
} |
c.MaxAge = sessionLength
http.SetCookie(w, c)
return true
}
//세션 로그인에 시간 표시
func UpdateCurrentTime(db *sql.DB, sessionID string) {
stmt, err := db.Prepare("UPDATE sessions SET `current_time`=? WHERE `user_id`=?")
checkError(err)
defer stmt.Close()
_, err = stmt.Exec(time.Now().Format("2006-01-02 15:04:05"), sessionID)
checkError(err)
}
//세션 초기화
func CleanSessions(db *sql.DB) {
var sessionID string
var currentTime string
rows, err := db.Query("select session_id, current_time from sessions")
if err != nil {
log.Fatal(err)
}
defer rows.Close()
for rows.Next() {
err := rows.Scan(&sessionID, ¤tTime)
if err != nil {
log.Fatal(err)
}
t, _ := time.Parse("2006-01-02 15:04:05", currentTime)
if time.Now().Sub(t) > (time.Second * 10) {
DeleteSession(db, sessionID)
}
}
dbSessionCleaned = time.Now()
} //12
//세션 삭제
func DeleteSession(db *sql.DB, sessionID string) {
stmt, err := db.Prepare("delete from sessions where `session_id`=?")
checkError(err)
_, err = stmt.Exec(sessionID)
checkError(err)
}
//생성된 세션 읽기
func ReadSession(db *sql.DB, sessionId string) (string, error) {
fmt.Println("ReadSession()")
row, err := db.Query("select user_id from sessions where session_id = ?", sessionId)
checkError(err)
defer row.Close()
var userId string
for row.Next() {
err = row.Scan(&userId)
if err != nil {
log.Fatal(err)
}
}
return userId, nil
}
/****************************************로그인 관련*******************************************************/
//로그인
func login(w http.ResponseWriter, req *http.Request) { //! ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~``
if alreadyLoggedIn(w, req) {
http.Redirect(w, req, "/mydata", http.StatusSeeOther)
return
}
if req.Method == http.MethodPost {
user, err := ReadUser(db, req)
if err != nil {
errMsg := map[string]interface{}{"error": err}
tpl.ExecuteTemplate(w, "login3.html", errMsg)
return
}
sID := uuid.New()
c := &http.Cookie{
Name: "sessions",
Value: sID.String(),
}
http.SetCookie(w, c)
CreateSession(db, c.Value, user.Id)
http.Redirect(w, req, "/mydata", http.StatusSeeOther)
return
}
tpl.ExecuteTemplate(w, "login3.html", nil)
}
//로그아웃
func logout(w http.ResponseWriter, req *http.Request) {
if !alreadyLoggedIn(w, req) {
http.Redirect(w, req, "/", http.StatusSeeOther)
return
}
c, _ := req.Cookie("sessions")
// delete session
DeleteSession(db, c.Value)
//
c = &http.Cookie{
Name: "sessions",
Value: "",
MaxAge: -1,
}
http.SetCookie(w, c)
if time.Now().Sub(dbSessionCleaned) > (time.Second * 30) {
go CleanSessions(db)
}
http.Redirect(w, req, "/", http.StatusSeeOther)
}
/********************************************************메인함수************************************************************/
func main() {
// port := os.Getenv("PORT") //! 헤로쿠 작업할때 필요 하다 11.07
// if port == "" {
// port = "8080" // Default port if not specified
// }
fmt.Printf("Starting server at port 8080\n")
fmt.Println("Head")
var connectionString = fmt.Sprintf("%s:%s@tcp(%s:3306)/%s?charset=utf8mb4&parseTime=True", user, password, host, database)
var err error
fmt.Println("connection check..")
// Connect to mysql server
db, err = sql.Open("mysql", connectionString)
fmt.Println("Connecting to DB..")
checkError(err)
defer db.Close()
//바꾼코드
err = db.Ping()
checkError(err)
gormDB, err = gorm.Open(mysql.New(mysql.Config{
Conn: db,
}), &gorm.Config{})
if err != nil {
panic("failed to connect database")
}
gormDB.AutoMigrate(&models.Board{}, &models.User{}, &models.Session{}) //! 자동으로 author, content 심어준다
fmt.Println("Successfully Connected to DB")
http.HandleFunc("/", login)
http.HandleFunc("/delete/", delete)
http.HandleFunc("/write/", write)
http.HandleFunc("/board/", board)
http.HandleFunc("/ranking/", ranking) //1108 임 이름 변경(tables -> ranking)
http.HandleFunc("/post/", post)
http.HandleFunc("/edit/", edit)
http.HandleFunc("/mypage", mypage) //! 뭐여
http.HandleFunc("/signup", signUp)
http.HandleFunc("/mydata", mydata)
http.HandleFunc("/logout", logout)
http.Handle("/web/", http.FileServer(http.FS(staticContent)))
fmt.Println("Listening...ss")
// http.ListenAndServe(":"+port, nil) //! 헤로쿠 작업할때 필요 하다 11.07
http.ListenAndServe(":8080", nil)
}
//수정전 | random_line_split | |
elements.ts | import * as React from "react";
import { chainSingleArgFuncs, isSubset, notNil, omit, pick } from "../common";
import {
createElementWithChildren,
ensureNotArray,
isReactNode,
mergeProps,
mergePropVals,
NONE,
} from "../react-utils";
import { Stack } from "./Stack";
interface Variants {
[vg: string]: any;
}
export type MultiChoiceArg<M extends string> = M | M[] | { [v in M]?: boolean };
export type SingleChoiceArg<M extends string> = M;
export type SingleBooleanChoiceArg<M extends string> = M | boolean;
interface OverrideTwiddle {
wrapChildren?: (children: React.ReactNode) => React.ReactNode;
wrap?: (node: React.ReactNode) => React.ReactNode;
}
export type DefaultOverride<C extends React.ElementType> = {
type: "default";
props?: Partial<React.ComponentProps<C>>;
} & OverrideTwiddle;
export type AsOverride<C extends React.ElementType> = {
type: "as";
as: C;
props?: Partial<React.ComponentProps<C>>;
} & OverrideTwiddle;
export type RenderOverride<C extends React.ElementType> = {
type: "render";
render: (props: React.ComponentProps<C>, Comp: C) => React.ReactNode;
props?: Partial<React.ComponentProps<C>>;
} & OverrideTwiddle;
export type Override<DefaultElementType extends React.ElementType> =
| DefaultOverride<DefaultElementType>
| AsOverride<any>
| RenderOverride<DefaultElementType>;
export type Overrides = Record<string, Flex<any>>;
export type Args = Record<string, any>;
// Flex provides a more "flexible" way to specify bindings.
export type Flex<DefaultElementType extends React.ElementType> =
// Fully-specified bindings
| (Omit<DefaultOverride<DefaultElementType>, "type"> & {
as?: never;
render?: never;
})
| Omit<AsOverride<any>, "type">
| (Omit<RenderOverride<DefaultElementType>, "type"> & {
as?: never;
})
// Valid ReactNode, used as children.
// Note: We use React.ReactChild instead of React.ReactNode because we don't want to include
// React.ReactFragment, which includes {}, which would allow any object to be passed in,
// defeating any attempt to type-check!
| React.ReactChild
// Ignored
| null
| undefined
// dict of props for the DefaultElementType
| (Partial<React.ComponentProps<DefaultElementType>> & {
wrap?: never;
wrapChildren?: never;
props?: never;
as?: never;
render?: never;
})
// render function taking in dict of props for the DefaultElementType
| ((props: React.ComponentProps<DefaultElementType>) => React.ReactNode);
export function hasVariant<V extends Variants>(
variants: V | undefined,
groupName: keyof V,
variant: string
) {
if (variants == null) {
return false;
}
const groupVariants = variants[groupName];
if (groupVariants == null) {
return false;
} else if (groupVariants === true) {
return variant === groupName;
} else if (groupVariants === false) {
return false;
} else if (Array.isArray(groupVariants)) {
return groupVariants.includes(variant);
} else if (typeof groupVariants === "string") {
return groupVariants === variant;
} else {
return (
groupVariants[variant] !== undefined && groupVariants[variant] !== false
);
}
}
export function wrapFlexContainerChildren(
children: React.ReactNode,
hasGap: boolean
) {
// We need to always wrap the children, even if there are no gaps, because
// otherwise if we toggle between with and without gap, React reconciliation
// will blow away the children tree and all state if we switch from having
// a wrapper and not.
const className = hasGap ? "__wab_flex-container" : "__wab_passthrough";
if (!children) {
return null;
} else if (Array.isArray(children)) {
return React.createElement("div", { className }, ...children);
} else {
return React.createElement("div", { className }, children);
}
}
function createPlasmicElement<DefaultElementType extends React.ElementType>(
override: Flex<DefaultElementType>,
defaultRoot: DefaultElementType,
defaultProps: Partial<React.ComponentProps<DefaultElementType>>,
wrapChildrenInFlex?: boolean
): React.ReactNode | null {
if (!override || Object.keys(override).length === 0) {
return createElementWithChildren(defaultRoot, defaultProps, defaultProps.children)
}
const override2 = deriveOverride(override);
const props = mergeOverrideProps(defaultProps, override2.props);
if (override2.type === "render") {
return override2.render(
props as React.ComponentProps<DefaultElementType>,
defaultRoot
);
}
let root = defaultRoot;
if (override2.type === "as" && override2.as) {
if (defaultRoot === (Stack as React.ElementType)) {
// If there was an "as" override specified, but the default type is
// a Stack, then we don't want to switch to using "as" as the root,
// because then we'd lose the flex wrapper that Stack provides.
// Instead, we specify the "as" as the "as" prop to Stack.
props.as = override2.as;
} else {
root = override2.as;
}
}
let children = props.children;
if (override2.wrapChildren) {
children = override2.wrapChildren(ensureNotArray(children));
}
if (wrapChildrenInFlex) {
// For legacy, we still support data-plasmic-wrap-flex-children
children = wrapFlexContainerChildren(children, true);
}
let result = createElementWithChildren(root, props, children);
if (override2.wrap) {
result = override2.wrap(result) as React.ReactElement;
}
return result;
}
// We use data-plasmic-XXX attributes for custom properties since Typescript doesn't
// support type check on jsx pragma. See https://github.com/microsoft/TypeScript/issues/21699
// for more info.
const seenElements = new Map<string, React.ReactNode>();
export function createPlasmicElementProxy<
DefaultElementType extends React.ElementType
>(
defaultElement: DefaultElementType,
props: Partial<React.ComponentProps<DefaultElementType>>,
...children: React.ReactNode[]
) {
// We use seenElements to keep track of elements that has been rendered by
// createPlasmicElementProxy(). When a JSX tree is evaluated, the JSX factory
// is invoked from the leaf to the root as the last call. So we can store
// all the elements we've created until we encounter the leaf, at which point
// we will clear this map. We are guaranteed that this map will only contain
// elements from one Plasmic* component at a time, because we're just creating
// elements and not "rendering" at this point; even if this JSX tree references
// other Plasmic* elements, we'll just create an element referencing that component,
// rather than following into the content of that component.
//
// TODO: is this ConcurrentMode friendly?
if (props == null) {
props = {};
}
const name = props["data-plasmic-name"];
const isRoot = props["data-plasmic-root"];
const forNodeName = props["data-plasmic-for-node"];
delete props["data-plasmic-name"];
delete props["data-plasmic-root"];
delete props["data-plasmic-for-node"];
const element = createPlasmicElementFromJsx(
defaultElement,
props,
...children
);
if (name) {
seenElements.set(name, element);
}
if (isRoot) {
// If this is the root, and we requested a specific node by specifying data-plasmic-for-node,
// then return that node instead
const forNode = forNodeName
? seenElements.get(forNodeName) ?? null
: element;
// Clear out the seenElements map, as we're done rendering this Plasmic* component.
seenElements.clear();
return forNode;
}
return element;
}
function createPlasmicElementFromJsx<
DefaultElementType extends React.ElementType
>(
defaultElement: DefaultElementType,
props: Partial<React.ComponentProps<DefaultElementType>>,
...children: React.ReactNode[]
) {
const override = props["data-plasmic-override"];
const wrapFlexChild = props["data-plasmic-wrap-flex-child"];
const triggerProps = (props["data-plasmic-trigger-props"] ??
[]) as React.HTMLAttributes<HTMLElement>[];
delete props["data-plasmic-override"];
delete props["data-plasmic-wrap-flex-child"];
delete props["data-plasmic-trigger-props"];
return createPlasmicElement(
override,
defaultElement,
mergeProps(
props,
children.length === 0 ? {} : { children: children.length === 1 ? children[0] : children },
...triggerProps
) as any,
wrapFlexChild
);
}
export function | (...children: React.ReactNode[]) {
return React.createElement(React.Fragment, {}, ...children);
}
export const UNSET = Symbol("UNSET");
function mergeOverrideProps(
defaults: Record<string, any>,
overrides?: Record<string, any>
): Record<string, any> {
if (!overrides) {
return defaults;
}
const result = { ...defaults };
for (const key of Object.keys(overrides)) {
const defaultVal = defaults[key];
let overrideVal = overrides[key];
if (overrideVal === UNSET) {
delete result[key];
} else {
// We use the NONE sentinel if the overrideVal is nil, and is not one of the
// props that we merge by default -- which are className, style, and
// event handlers. This means for all other "normal" props -- like children,
// title, etc -- a nil value will unset the default.
if (
overrideVal == null &&
key !== "className" &&
key !== "style" &&
!(key.startsWith("on") && typeof defaultVal === "function")
) {
overrideVal = NONE;
}
result[key] = mergePropVals(key, defaultVal, overrideVal);
}
}
return result;
}
export function wrapWithClassName(element: React.ReactNode, className: string) {
const key = React.isValidElement(element)
? element.key || undefined
: undefined;
return React.createElement(
"div",
{
key,
className,
style: {
display: "grid",
},
},
element
);
}
function deriveOverride<C extends React.ElementType>(x: Flex<C>): Override<C> {
if (!x) {
// undefined Binding is an empty Binding
return {
type: "default",
props: {} as any,
};
} else if (isReactNode(x)) {
// If ReactNode, then assume this is the children
return {
type: "default",
props: {
children: x,
} as any,
};
} else if (typeof x === "object") {
// If any of the overrideKeys is a key of this object, then assume
// this is a full Override
if ("as" in x) {
return {
...x,
props: x.props || {},
type: "as",
} as any;
} else if ("render" in x) {
return {
...x,
type: "render",
} as any;
} else if ("props" in x) {
return {
...x,
props: x.props || {},
type: "default",
};
} else if (isSubset(Object.keys(x), ["wrap", "wrapChildren"])) {
// Only twiddling functions present, so assume no props overrides
// (otherwise we'd assume these were props).
return {
...x,
props: {},
type: "default",
};
}
// Else, assume this is just a props object.
return {
type: "default",
props: x as any,
};
} else if (typeof x === "function") {
return {
type: "render",
render: x,
};
}
throw new Error(`Unexpected override: ${x}`);
}
function mergeVariants<V extends Variants>(
v1: Partial<V> | undefined,
v2: Partial<V> | undefined
): Partial<V> {
if (!v1 || !v2) {
return v1 || v2 || {};
}
return { ...v1, ...v2 };
}
function mergeArgs<A extends Args>(a1: Partial<A> | undefined, a2: Partial<A> | undefined): Partial<A> {
if (!a1 || !a2) {
return a1 || a2 || {};
}
return { ...a1, ...a2 };
}
function mergeFlexOverrides<O extends Overrides>(
o1: Partial<O>,
o2: Partial<O> | undefined
): Partial<O> {
if (!o2) {
return o1;
}
const keys = Array.from(new Set([...Object.keys(o1), ...Object.keys(o2)]));
const merged: Record<string, any> = {};
for (const key of keys) {
merged[key] = mergeFlexOverride(o1[key], o2[key]);
}
return merged as Partial<O>;
}
function mergeFlexOverride<C extends React.ElementType<any>>(
fo1: Flex<C> | undefined,
fo2: Flex<C> | undefined
): Flex<C> | undefined {
if (!fo1) {
return fo2;
}
if (!fo2) {
return fo1;
}
const o1 = deriveOverride(fo1);
const o2 = deriveOverride(fo2);
const wrap = chainSingleArgFuncs(...[o1.wrap, o2.wrap].filter(notNil));
const wrapChildren = chainSingleArgFuncs(
...[o1.wrapChildren, o2.wrapChildren].filter(notNil)
);
// "render" type always takes precedence, but we still merge the props
const props = mergeOverrideProps(o1.props ?? {}, o2.props) as Partial<
React.ComponentProps<C>
>;
if (o2.type === "render") {
return {
render: o2.render,
props,
wrap,
wrapChildren,
};
}
if (o1.type === "render") {
return {
render: o1.render,
props,
wrap,
wrapChildren,
};
}
// "as" will take precedence
const as =
(o2.type === "as" ? o2.as : undefined) ??
(o1.type === "as" ? o1.as : undefined);
return {
props,
wrap,
wrapChildren,
...(as ? { as } : {}),
};
}
export function deriveRenderOpts(
props: Record<string, any>,
config: {
name: string;
descendantNames: string[];
internalVariantPropNames: string[];
internalArgPropNames: string[];
}
) {
const {
name,
descendantNames,
internalVariantPropNames,
internalArgPropNames,
} = config;
const reservedPropNames = ["variants", "args", "overrides"];
const variants = mergeVariants(
omit(pick(props, ...internalVariantPropNames), ...reservedPropNames),
props.variants
);
const args = mergeArgs(
omit(pick(props, ...internalArgPropNames), ...reservedPropNames),
props.args
);
let overrides = mergeFlexOverrides(
omit(
pick(props, ...descendantNames),
...internalArgPropNames,
...internalVariantPropNames,
...reservedPropNames
),
props.overrides
);
const leftoverProps = omit(
props,
"variants",
"args",
"overrides",
...descendantNames,
...internalVariantPropNames,
...internalArgPropNames
) as Partial<React.ComponentProps<"button">>;
if (Object.keys(leftoverProps).length > 0) {
overrides = mergeFlexOverrides(overrides, {
[name]: {
props: leftoverProps,
},
});
}
return { variants, args, overrides };
}
| makeFragment | identifier_name |
elements.ts | import * as React from "react";
import { chainSingleArgFuncs, isSubset, notNil, omit, pick } from "../common";
import {
createElementWithChildren,
ensureNotArray,
isReactNode,
mergeProps,
mergePropVals,
NONE,
} from "../react-utils";
import { Stack } from "./Stack";
interface Variants {
[vg: string]: any;
}
export type MultiChoiceArg<M extends string> = M | M[] | { [v in M]?: boolean };
export type SingleChoiceArg<M extends string> = M;
export type SingleBooleanChoiceArg<M extends string> = M | boolean;
interface OverrideTwiddle {
wrapChildren?: (children: React.ReactNode) => React.ReactNode;
wrap?: (node: React.ReactNode) => React.ReactNode;
}
export type DefaultOverride<C extends React.ElementType> = {
type: "default";
props?: Partial<React.ComponentProps<C>>;
} & OverrideTwiddle;
export type AsOverride<C extends React.ElementType> = {
type: "as";
as: C;
props?: Partial<React.ComponentProps<C>>;
} & OverrideTwiddle;
export type RenderOverride<C extends React.ElementType> = {
type: "render";
render: (props: React.ComponentProps<C>, Comp: C) => React.ReactNode;
props?: Partial<React.ComponentProps<C>>;
} & OverrideTwiddle;
export type Override<DefaultElementType extends React.ElementType> =
| DefaultOverride<DefaultElementType>
| AsOverride<any>
| RenderOverride<DefaultElementType>;
export type Overrides = Record<string, Flex<any>>;
export type Args = Record<string, any>;
// Flex provides a more "flexible" way to specify bindings.
export type Flex<DefaultElementType extends React.ElementType> =
// Fully-specified bindings
| (Omit<DefaultOverride<DefaultElementType>, "type"> & {
as?: never;
render?: never;
})
| Omit<AsOverride<any>, "type">
| (Omit<RenderOverride<DefaultElementType>, "type"> & {
as?: never;
})
// Valid ReactNode, used as children.
// Note: We use React.ReactChild instead of React.ReactNode because we don't want to include
// React.ReactFragment, which includes {}, which would allow any object to be passed in,
// defeating any attempt to type-check!
| React.ReactChild
// Ignored
| null
| undefined
// dict of props for the DefaultElementType
| (Partial<React.ComponentProps<DefaultElementType>> & {
wrap?: never;
wrapChildren?: never;
props?: never;
as?: never;
render?: never;
})
// render function taking in dict of props for the DefaultElementType
| ((props: React.ComponentProps<DefaultElementType>) => React.ReactNode);
export function hasVariant<V extends Variants>(
variants: V | undefined,
groupName: keyof V,
variant: string
) {
if (variants == null) {
return false;
}
const groupVariants = variants[groupName];
if (groupVariants == null) {
return false;
} else if (groupVariants === true) {
return variant === groupName;
} else if (groupVariants === false) {
return false;
} else if (Array.isArray(groupVariants)) {
return groupVariants.includes(variant);
} else if (typeof groupVariants === "string") {
return groupVariants === variant;
} else {
return (
groupVariants[variant] !== undefined && groupVariants[variant] !== false
);
}
}
export function wrapFlexContainerChildren(
children: React.ReactNode,
hasGap: boolean
) {
// We need to always wrap the children, even if there are no gaps, because
// otherwise if we toggle between with and without gap, React reconciliation
// will blow away the children tree and all state if we switch from having
// a wrapper and not.
const className = hasGap ? "__wab_flex-container" : "__wab_passthrough";
if (!children) {
return null;
} else if (Array.isArray(children)) {
return React.createElement("div", { className }, ...children);
} else {
return React.createElement("div", { className }, children);
}
}
function createPlasmicElement<DefaultElementType extends React.ElementType>(
override: Flex<DefaultElementType>,
defaultRoot: DefaultElementType,
defaultProps: Partial<React.ComponentProps<DefaultElementType>>,
wrapChildrenInFlex?: boolean
): React.ReactNode | null {
if (!override || Object.keys(override).length === 0) {
return createElementWithChildren(defaultRoot, defaultProps, defaultProps.children)
}
const override2 = deriveOverride(override);
const props = mergeOverrideProps(defaultProps, override2.props);
if (override2.type === "render") {
return override2.render(
props as React.ComponentProps<DefaultElementType>,
defaultRoot
);
}
let root = defaultRoot;
if (override2.type === "as" && override2.as) {
if (defaultRoot === (Stack as React.ElementType)) {
// If there was an "as" override specified, but the default type is
// a Stack, then we don't want to switch to using "as" as the root,
// because then we'd lose the flex wrapper that Stack provides.
// Instead, we specify the "as" as the "as" prop to Stack.
props.as = override2.as;
} else {
root = override2.as;
}
}
let children = props.children;
if (override2.wrapChildren) {
children = override2.wrapChildren(ensureNotArray(children));
}
if (wrapChildrenInFlex) {
// For legacy, we still support data-plasmic-wrap-flex-children
children = wrapFlexContainerChildren(children, true);
}
let result = createElementWithChildren(root, props, children);
if (override2.wrap) {
result = override2.wrap(result) as React.ReactElement;
}
return result;
}
// We use data-plasmic-XXX attributes for custom properties since Typescript doesn't
// support type check on jsx pragma. See https://github.com/microsoft/TypeScript/issues/21699
// for more info.
const seenElements = new Map<string, React.ReactNode>();
export function createPlasmicElementProxy<
DefaultElementType extends React.ElementType
>(
defaultElement: DefaultElementType,
props: Partial<React.ComponentProps<DefaultElementType>>,
...children: React.ReactNode[]
) {
// We use seenElements to keep track of elements that has been rendered by
// createPlasmicElementProxy(). When a JSX tree is evaluated, the JSX factory
// is invoked from the leaf to the root as the last call. So we can store
// all the elements we've created until we encounter the leaf, at which point
// we will clear this map. We are guaranteed that this map will only contain
// elements from one Plasmic* component at a time, because we're just creating
// elements and not "rendering" at this point; even if this JSX tree references
// other Plasmic* elements, we'll just create an element referencing that component,
// rather than following into the content of that component.
//
// TODO: is this ConcurrentMode friendly?
if (props == null) {
props = {};
}
const name = props["data-plasmic-name"];
const isRoot = props["data-plasmic-root"];
const forNodeName = props["data-plasmic-for-node"];
delete props["data-plasmic-name"];
delete props["data-plasmic-root"];
delete props["data-plasmic-for-node"];
const element = createPlasmicElementFromJsx(
defaultElement,
props,
...children
);
if (name) {
seenElements.set(name, element);
}
if (isRoot) {
// If this is the root, and we requested a specific node by specifying data-plasmic-for-node,
// then return that node instead
const forNode = forNodeName
? seenElements.get(forNodeName) ?? null
: element;
// Clear out the seenElements map, as we're done rendering this Plasmic* component.
seenElements.clear();
return forNode;
}
return element;
}
function createPlasmicElementFromJsx<
DefaultElementType extends React.ElementType
>(
defaultElement: DefaultElementType,
props: Partial<React.ComponentProps<DefaultElementType>>,
...children: React.ReactNode[]
) {
const override = props["data-plasmic-override"];
const wrapFlexChild = props["data-plasmic-wrap-flex-child"];
const triggerProps = (props["data-plasmic-trigger-props"] ??
[]) as React.HTMLAttributes<HTMLElement>[];
delete props["data-plasmic-override"];
delete props["data-plasmic-wrap-flex-child"];
delete props["data-plasmic-trigger-props"];
return createPlasmicElement(
override,
defaultElement,
mergeProps(
props,
children.length === 0 ? {} : { children: children.length === 1 ? children[0] : children },
...triggerProps
) as any,
wrapFlexChild
);
}
export function makeFragment(...children: React.ReactNode[]) {
return React.createElement(React.Fragment, {}, ...children);
}
export const UNSET = Symbol("UNSET");
function mergeOverrideProps(
defaults: Record<string, any>,
overrides?: Record<string, any>
): Record<string, any> {
if (!overrides) {
return defaults;
}
const result = { ...defaults };
for (const key of Object.keys(overrides)) {
const defaultVal = defaults[key];
let overrideVal = overrides[key];
if (overrideVal === UNSET) | else {
// We use the NONE sentinel if the overrideVal is nil, and is not one of the
// props that we merge by default -- which are className, style, and
// event handlers. This means for all other "normal" props -- like children,
// title, etc -- a nil value will unset the default.
if (
overrideVal == null &&
key !== "className" &&
key !== "style" &&
!(key.startsWith("on") && typeof defaultVal === "function")
) {
overrideVal = NONE;
}
result[key] = mergePropVals(key, defaultVal, overrideVal);
}
}
return result;
}
export function wrapWithClassName(element: React.ReactNode, className: string) {
const key = React.isValidElement(element)
? element.key || undefined
: undefined;
return React.createElement(
"div",
{
key,
className,
style: {
display: "grid",
},
},
element
);
}
function deriveOverride<C extends React.ElementType>(x: Flex<C>): Override<C> {
if (!x) {
// undefined Binding is an empty Binding
return {
type: "default",
props: {} as any,
};
} else if (isReactNode(x)) {
// If ReactNode, then assume this is the children
return {
type: "default",
props: {
children: x,
} as any,
};
} else if (typeof x === "object") {
// If any of the overrideKeys is a key of this object, then assume
// this is a full Override
if ("as" in x) {
return {
...x,
props: x.props || {},
type: "as",
} as any;
} else if ("render" in x) {
return {
...x,
type: "render",
} as any;
} else if ("props" in x) {
return {
...x,
props: x.props || {},
type: "default",
};
} else if (isSubset(Object.keys(x), ["wrap", "wrapChildren"])) {
// Only twiddling functions present, so assume no props overrides
// (otherwise we'd assume these were props).
return {
...x,
props: {},
type: "default",
};
}
// Else, assume this is just a props object.
return {
type: "default",
props: x as any,
};
} else if (typeof x === "function") {
return {
type: "render",
render: x,
};
}
throw new Error(`Unexpected override: ${x}`);
}
function mergeVariants<V extends Variants>(
v1: Partial<V> | undefined,
v2: Partial<V> | undefined
): Partial<V> {
if (!v1 || !v2) {
return v1 || v2 || {};
}
return { ...v1, ...v2 };
}
function mergeArgs<A extends Args>(a1: Partial<A> | undefined, a2: Partial<A> | undefined): Partial<A> {
if (!a1 || !a2) {
return a1 || a2 || {};
}
return { ...a1, ...a2 };
}
function mergeFlexOverrides<O extends Overrides>(
o1: Partial<O>,
o2: Partial<O> | undefined
): Partial<O> {
if (!o2) {
return o1;
}
const keys = Array.from(new Set([...Object.keys(o1), ...Object.keys(o2)]));
const merged: Record<string, any> = {};
for (const key of keys) {
merged[key] = mergeFlexOverride(o1[key], o2[key]);
}
return merged as Partial<O>;
}
function mergeFlexOverride<C extends React.ElementType<any>>(
fo1: Flex<C> | undefined,
fo2: Flex<C> | undefined
): Flex<C> | undefined {
if (!fo1) {
return fo2;
}
if (!fo2) {
return fo1;
}
const o1 = deriveOverride(fo1);
const o2 = deriveOverride(fo2);
const wrap = chainSingleArgFuncs(...[o1.wrap, o2.wrap].filter(notNil));
const wrapChildren = chainSingleArgFuncs(
...[o1.wrapChildren, o2.wrapChildren].filter(notNil)
);
// "render" type always takes precedence, but we still merge the props
const props = mergeOverrideProps(o1.props ?? {}, o2.props) as Partial<
React.ComponentProps<C>
>;
if (o2.type === "render") {
return {
render: o2.render,
props,
wrap,
wrapChildren,
};
}
if (o1.type === "render") {
return {
render: o1.render,
props,
wrap,
wrapChildren,
};
}
// "as" will take precedence
const as =
(o2.type === "as" ? o2.as : undefined) ??
(o1.type === "as" ? o1.as : undefined);
return {
props,
wrap,
wrapChildren,
...(as ? { as } : {}),
};
}
export function deriveRenderOpts(
props: Record<string, any>,
config: {
name: string;
descendantNames: string[];
internalVariantPropNames: string[];
internalArgPropNames: string[];
}
) {
const {
name,
descendantNames,
internalVariantPropNames,
internalArgPropNames,
} = config;
const reservedPropNames = ["variants", "args", "overrides"];
const variants = mergeVariants(
omit(pick(props, ...internalVariantPropNames), ...reservedPropNames),
props.variants
);
const args = mergeArgs(
omit(pick(props, ...internalArgPropNames), ...reservedPropNames),
props.args
);
let overrides = mergeFlexOverrides(
omit(
pick(props, ...descendantNames),
...internalArgPropNames,
...internalVariantPropNames,
...reservedPropNames
),
props.overrides
);
const leftoverProps = omit(
props,
"variants",
"args",
"overrides",
...descendantNames,
...internalVariantPropNames,
...internalArgPropNames
) as Partial<React.ComponentProps<"button">>;
if (Object.keys(leftoverProps).length > 0) {
overrides = mergeFlexOverrides(overrides, {
[name]: {
props: leftoverProps,
},
});
}
return { variants, args, overrides };
}
| {
delete result[key];
} | conditional_block |
elements.ts | import * as React from "react";
import { chainSingleArgFuncs, isSubset, notNil, omit, pick } from "../common";
import {
createElementWithChildren,
ensureNotArray,
isReactNode,
mergeProps,
mergePropVals,
NONE,
} from "../react-utils";
import { Stack } from "./Stack";
interface Variants {
[vg: string]: any;
}
export type MultiChoiceArg<M extends string> = M | M[] | { [v in M]?: boolean };
export type SingleChoiceArg<M extends string> = M;
export type SingleBooleanChoiceArg<M extends string> = M | boolean;
interface OverrideTwiddle {
wrapChildren?: (children: React.ReactNode) => React.ReactNode;
wrap?: (node: React.ReactNode) => React.ReactNode;
}
export type DefaultOverride<C extends React.ElementType> = {
type: "default";
props?: Partial<React.ComponentProps<C>>;
} & OverrideTwiddle;
export type AsOverride<C extends React.ElementType> = {
type: "as";
as: C;
props?: Partial<React.ComponentProps<C>>;
} & OverrideTwiddle;
export type RenderOverride<C extends React.ElementType> = {
type: "render";
render: (props: React.ComponentProps<C>, Comp: C) => React.ReactNode;
props?: Partial<React.ComponentProps<C>>;
} & OverrideTwiddle;
export type Override<DefaultElementType extends React.ElementType> =
| DefaultOverride<DefaultElementType>
| AsOverride<any>
| RenderOverride<DefaultElementType>;
export type Overrides = Record<string, Flex<any>>;
export type Args = Record<string, any>;
// Flex provides a more "flexible" way to specify bindings.
export type Flex<DefaultElementType extends React.ElementType> =
// Fully-specified bindings
| (Omit<DefaultOverride<DefaultElementType>, "type"> & {
as?: never;
render?: never;
})
| Omit<AsOverride<any>, "type">
| (Omit<RenderOverride<DefaultElementType>, "type"> & {
as?: never;
})
// Valid ReactNode, used as children.
// Note: We use React.ReactChild instead of React.ReactNode because we don't want to include
// React.ReactFragment, which includes {}, which would allow any object to be passed in,
// defeating any attempt to type-check!
| React.ReactChild
// Ignored
| null
| undefined
// dict of props for the DefaultElementType
| (Partial<React.ComponentProps<DefaultElementType>> & {
wrap?: never;
wrapChildren?: never;
props?: never;
as?: never;
render?: never;
})
// render function taking in dict of props for the DefaultElementType
| ((props: React.ComponentProps<DefaultElementType>) => React.ReactNode);
export function hasVariant<V extends Variants>(
variants: V | undefined,
groupName: keyof V,
variant: string
) {
if (variants == null) {
return false;
}
const groupVariants = variants[groupName];
if (groupVariants == null) {
return false;
} else if (groupVariants === true) {
return variant === groupName;
} else if (groupVariants === false) {
return false;
} else if (Array.isArray(groupVariants)) {
return groupVariants.includes(variant);
} else if (typeof groupVariants === "string") {
return groupVariants === variant;
} else {
return (
groupVariants[variant] !== undefined && groupVariants[variant] !== false
);
}
}
export function wrapFlexContainerChildren(
children: React.ReactNode,
hasGap: boolean
) {
// We need to always wrap the children, even if there are no gaps, because
// otherwise if we toggle between with and without gap, React reconciliation
// will blow away the children tree and all state if we switch from having
// a wrapper and not.
const className = hasGap ? "__wab_flex-container" : "__wab_passthrough";
if (!children) {
return null;
} else if (Array.isArray(children)) {
return React.createElement("div", { className }, ...children);
} else {
return React.createElement("div", { className }, children);
}
}
function createPlasmicElement<DefaultElementType extends React.ElementType>(
override: Flex<DefaultElementType>,
defaultRoot: DefaultElementType,
defaultProps: Partial<React.ComponentProps<DefaultElementType>>,
wrapChildrenInFlex?: boolean
): React.ReactNode | null {
if (!override || Object.keys(override).length === 0) {
return createElementWithChildren(defaultRoot, defaultProps, defaultProps.children)
}
const override2 = deriveOverride(override);
const props = mergeOverrideProps(defaultProps, override2.props);
if (override2.type === "render") {
return override2.render(
props as React.ComponentProps<DefaultElementType>,
defaultRoot
);
}
let root = defaultRoot;
if (override2.type === "as" && override2.as) {
if (defaultRoot === (Stack as React.ElementType)) {
// If there was an "as" override specified, but the default type is
// a Stack, then we don't want to switch to using "as" as the root,
// because then we'd lose the flex wrapper that Stack provides.
// Instead, we specify the "as" as the "as" prop to Stack.
props.as = override2.as;
} else {
root = override2.as;
}
}
let children = props.children;
if (override2.wrapChildren) {
children = override2.wrapChildren(ensureNotArray(children));
}
if (wrapChildrenInFlex) {
// For legacy, we still support data-plasmic-wrap-flex-children
children = wrapFlexContainerChildren(children, true);
}
let result = createElementWithChildren(root, props, children);
if (override2.wrap) {
result = override2.wrap(result) as React.ReactElement;
}
return result;
}
// We use data-plasmic-XXX attributes for custom properties since Typescript doesn't
// support type check on jsx pragma. See https://github.com/microsoft/TypeScript/issues/21699
// for more info.
const seenElements = new Map<string, React.ReactNode>();
export function createPlasmicElementProxy<
DefaultElementType extends React.ElementType
>(
defaultElement: DefaultElementType,
props: Partial<React.ComponentProps<DefaultElementType>>,
...children: React.ReactNode[]
) {
// We use seenElements to keep track of elements that has been rendered by
// createPlasmicElementProxy(). When a JSX tree is evaluated, the JSX factory
// is invoked from the leaf to the root as the last call. So we can store
// all the elements we've created until we encounter the leaf, at which point
// we will clear this map. We are guaranteed that this map will only contain
// elements from one Plasmic* component at a time, because we're just creating
// elements and not "rendering" at this point; even if this JSX tree references
// other Plasmic* elements, we'll just create an element referencing that component,
// rather than following into the content of that component.
//
// TODO: is this ConcurrentMode friendly?
if (props == null) {
props = {};
}
const name = props["data-plasmic-name"];
const isRoot = props["data-plasmic-root"];
const forNodeName = props["data-plasmic-for-node"];
delete props["data-plasmic-name"];
delete props["data-plasmic-root"];
delete props["data-plasmic-for-node"];
const element = createPlasmicElementFromJsx(
defaultElement,
props,
...children
);
if (name) {
seenElements.set(name, element);
}
if (isRoot) {
// If this is the root, and we requested a specific node by specifying data-plasmic-for-node,
// then return that node instead
const forNode = forNodeName
? seenElements.get(forNodeName) ?? null
: element;
// Clear out the seenElements map, as we're done rendering this Plasmic* component.
seenElements.clear();
return forNode;
}
return element;
}
function createPlasmicElementFromJsx<
DefaultElementType extends React.ElementType
>(
defaultElement: DefaultElementType,
props: Partial<React.ComponentProps<DefaultElementType>>,
...children: React.ReactNode[]
) {
const override = props["data-plasmic-override"];
const wrapFlexChild = props["data-plasmic-wrap-flex-child"];
const triggerProps = (props["data-plasmic-trigger-props"] ??
[]) as React.HTMLAttributes<HTMLElement>[];
delete props["data-plasmic-override"];
delete props["data-plasmic-wrap-flex-child"];
delete props["data-plasmic-trigger-props"];
return createPlasmicElement(
override,
defaultElement,
mergeProps(
props,
children.length === 0 ? {} : { children: children.length === 1 ? children[0] : children },
...triggerProps
) as any,
wrapFlexChild
);
}
export function makeFragment(...children: React.ReactNode[]) {
return React.createElement(React.Fragment, {}, ...children);
}
export const UNSET = Symbol("UNSET");
function mergeOverrideProps(
defaults: Record<string, any>,
overrides?: Record<string, any>
): Record<string, any> {
if (!overrides) {
return defaults;
}
const result = { ...defaults };
for (const key of Object.keys(overrides)) {
const defaultVal = defaults[key];
let overrideVal = overrides[key];
if (overrideVal === UNSET) {
delete result[key];
} else {
// We use the NONE sentinel if the overrideVal is nil, and is not one of the
// props that we merge by default -- which are className, style, and
// event handlers. This means for all other "normal" props -- like children,
// title, etc -- a nil value will unset the default.
if (
overrideVal == null &&
key !== "className" &&
key !== "style" &&
!(key.startsWith("on") && typeof defaultVal === "function")
) {
overrideVal = NONE;
}
result[key] = mergePropVals(key, defaultVal, overrideVal);
}
}
return result;
}
export function wrapWithClassName(element: React.ReactNode, className: string) {
const key = React.isValidElement(element)
? element.key || undefined
: undefined;
return React.createElement(
"div",
{
key,
className,
style: {
display: "grid",
},
},
element
);
}
function deriveOverride<C extends React.ElementType>(x: Flex<C>): Override<C> {
if (!x) {
// undefined Binding is an empty Binding
return {
type: "default",
props: {} as any,
};
} else if (isReactNode(x)) {
// If ReactNode, then assume this is the children
return {
type: "default",
props: {
children: x,
} as any,
};
} else if (typeof x === "object") {
// If any of the overrideKeys is a key of this object, then assume
// this is a full Override
if ("as" in x) {
return {
...x,
props: x.props || {},
type: "as",
} as any;
} else if ("render" in x) {
return {
...x,
type: "render",
} as any;
} else if ("props" in x) {
return {
...x,
props: x.props || {},
type: "default",
};
} else if (isSubset(Object.keys(x), ["wrap", "wrapChildren"])) {
// Only twiddling functions present, so assume no props overrides
// (otherwise we'd assume these were props).
return {
...x,
props: {},
type: "default",
};
}
// Else, assume this is just a props object.
return {
type: "default",
props: x as any,
};
} else if (typeof x === "function") {
return {
type: "render",
render: x,
};
}
throw new Error(`Unexpected override: ${x}`);
}
function mergeVariants<V extends Variants>(
v1: Partial<V> | undefined,
v2: Partial<V> | undefined
): Partial<V> {
if (!v1 || !v2) {
return v1 || v2 || {};
}
return { ...v1, ...v2 };
}
function mergeArgs<A extends Args>(a1: Partial<A> | undefined, a2: Partial<A> | undefined): Partial<A> {
if (!a1 || !a2) {
return a1 || a2 || {};
}
return { ...a1, ...a2 };
}
function mergeFlexOverrides<O extends Overrides>(
o1: Partial<O>,
o2: Partial<O> | undefined
): Partial<O> {
if (!o2) {
return o1;
}
const keys = Array.from(new Set([...Object.keys(o1), ...Object.keys(o2)]));
const merged: Record<string, any> = {};
for (const key of keys) {
merged[key] = mergeFlexOverride(o1[key], o2[key]);
}
return merged as Partial<O>;
}
function mergeFlexOverride<C extends React.ElementType<any>>(
fo1: Flex<C> | undefined,
fo2: Flex<C> | undefined
): Flex<C> | undefined {
if (!fo1) {
return fo2;
}
if (!fo2) { | const o2 = deriveOverride(fo2);
const wrap = chainSingleArgFuncs(...[o1.wrap, o2.wrap].filter(notNil));
const wrapChildren = chainSingleArgFuncs(
...[o1.wrapChildren, o2.wrapChildren].filter(notNil)
);
// "render" type always takes precedence, but we still merge the props
const props = mergeOverrideProps(o1.props ?? {}, o2.props) as Partial<
React.ComponentProps<C>
>;
if (o2.type === "render") {
return {
render: o2.render,
props,
wrap,
wrapChildren,
};
}
if (o1.type === "render") {
return {
render: o1.render,
props,
wrap,
wrapChildren,
};
}
// "as" will take precedence
const as =
(o2.type === "as" ? o2.as : undefined) ??
(o1.type === "as" ? o1.as : undefined);
return {
props,
wrap,
wrapChildren,
...(as ? { as } : {}),
};
}
export function deriveRenderOpts(
props: Record<string, any>,
config: {
name: string;
descendantNames: string[];
internalVariantPropNames: string[];
internalArgPropNames: string[];
}
) {
const {
name,
descendantNames,
internalVariantPropNames,
internalArgPropNames,
} = config;
const reservedPropNames = ["variants", "args", "overrides"];
const variants = mergeVariants(
omit(pick(props, ...internalVariantPropNames), ...reservedPropNames),
props.variants
);
const args = mergeArgs(
omit(pick(props, ...internalArgPropNames), ...reservedPropNames),
props.args
);
let overrides = mergeFlexOverrides(
omit(
pick(props, ...descendantNames),
...internalArgPropNames,
...internalVariantPropNames,
...reservedPropNames
),
props.overrides
);
const leftoverProps = omit(
props,
"variants",
"args",
"overrides",
...descendantNames,
...internalVariantPropNames,
...internalArgPropNames
) as Partial<React.ComponentProps<"button">>;
if (Object.keys(leftoverProps).length > 0) {
overrides = mergeFlexOverrides(overrides, {
[name]: {
props: leftoverProps,
},
});
}
return { variants, args, overrides };
} | return fo1;
}
const o1 = deriveOverride(fo1); | random_line_split |
elements.ts | import * as React from "react";
import { chainSingleArgFuncs, isSubset, notNil, omit, pick } from "../common";
import {
createElementWithChildren,
ensureNotArray,
isReactNode,
mergeProps,
mergePropVals,
NONE,
} from "../react-utils";
import { Stack } from "./Stack";
interface Variants {
[vg: string]: any;
}
export type MultiChoiceArg<M extends string> = M | M[] | { [v in M]?: boolean };
export type SingleChoiceArg<M extends string> = M;
export type SingleBooleanChoiceArg<M extends string> = M | boolean;
interface OverrideTwiddle {
wrapChildren?: (children: React.ReactNode) => React.ReactNode;
wrap?: (node: React.ReactNode) => React.ReactNode;
}
export type DefaultOverride<C extends React.ElementType> = {
type: "default";
props?: Partial<React.ComponentProps<C>>;
} & OverrideTwiddle;
export type AsOverride<C extends React.ElementType> = {
type: "as";
as: C;
props?: Partial<React.ComponentProps<C>>;
} & OverrideTwiddle;
export type RenderOverride<C extends React.ElementType> = {
type: "render";
render: (props: React.ComponentProps<C>, Comp: C) => React.ReactNode;
props?: Partial<React.ComponentProps<C>>;
} & OverrideTwiddle;
export type Override<DefaultElementType extends React.ElementType> =
| DefaultOverride<DefaultElementType>
| AsOverride<any>
| RenderOverride<DefaultElementType>;
export type Overrides = Record<string, Flex<any>>;
export type Args = Record<string, any>;
// Flex provides a more "flexible" way to specify bindings.
export type Flex<DefaultElementType extends React.ElementType> =
// Fully-specified bindings
| (Omit<DefaultOverride<DefaultElementType>, "type"> & {
as?: never;
render?: never;
})
| Omit<AsOverride<any>, "type">
| (Omit<RenderOverride<DefaultElementType>, "type"> & {
as?: never;
})
// Valid ReactNode, used as children.
// Note: We use React.ReactChild instead of React.ReactNode because we don't want to include
// React.ReactFragment, which includes {}, which would allow any object to be passed in,
// defeating any attempt to type-check!
| React.ReactChild
// Ignored
| null
| undefined
// dict of props for the DefaultElementType
| (Partial<React.ComponentProps<DefaultElementType>> & {
wrap?: never;
wrapChildren?: never;
props?: never;
as?: never;
render?: never;
})
// render function taking in dict of props for the DefaultElementType
| ((props: React.ComponentProps<DefaultElementType>) => React.ReactNode);
export function hasVariant<V extends Variants>(
variants: V | undefined,
groupName: keyof V,
variant: string
) {
if (variants == null) {
return false;
}
const groupVariants = variants[groupName];
if (groupVariants == null) {
return false;
} else if (groupVariants === true) {
return variant === groupName;
} else if (groupVariants === false) {
return false;
} else if (Array.isArray(groupVariants)) {
return groupVariants.includes(variant);
} else if (typeof groupVariants === "string") {
return groupVariants === variant;
} else {
return (
groupVariants[variant] !== undefined && groupVariants[variant] !== false
);
}
}
export function wrapFlexContainerChildren(
children: React.ReactNode,
hasGap: boolean
) {
// We need to always wrap the children, even if there are no gaps, because
// otherwise if we toggle between with and without gap, React reconciliation
// will blow away the children tree and all state if we switch from having
// a wrapper and not.
const className = hasGap ? "__wab_flex-container" : "__wab_passthrough";
if (!children) {
return null;
} else if (Array.isArray(children)) {
return React.createElement("div", { className }, ...children);
} else {
return React.createElement("div", { className }, children);
}
}
function createPlasmicElement<DefaultElementType extends React.ElementType>(
override: Flex<DefaultElementType>,
defaultRoot: DefaultElementType,
defaultProps: Partial<React.ComponentProps<DefaultElementType>>,
wrapChildrenInFlex?: boolean
): React.ReactNode | null |
// We use data-plasmic-XXX attributes for custom properties since Typescript doesn't
// support type check on jsx pragma. See https://github.com/microsoft/TypeScript/issues/21699
// for more info.
const seenElements = new Map<string, React.ReactNode>();
export function createPlasmicElementProxy<
DefaultElementType extends React.ElementType
>(
defaultElement: DefaultElementType,
props: Partial<React.ComponentProps<DefaultElementType>>,
...children: React.ReactNode[]
) {
// We use seenElements to keep track of elements that has been rendered by
// createPlasmicElementProxy(). When a JSX tree is evaluated, the JSX factory
// is invoked from the leaf to the root as the last call. So we can store
// all the elements we've created until we encounter the leaf, at which point
// we will clear this map. We are guaranteed that this map will only contain
// elements from one Plasmic* component at a time, because we're just creating
// elements and not "rendering" at this point; even if this JSX tree references
// other Plasmic* elements, we'll just create an element referencing that component,
// rather than following into the content of that component.
//
// TODO: is this ConcurrentMode friendly?
if (props == null) {
props = {};
}
const name = props["data-plasmic-name"];
const isRoot = props["data-plasmic-root"];
const forNodeName = props["data-plasmic-for-node"];
delete props["data-plasmic-name"];
delete props["data-plasmic-root"];
delete props["data-plasmic-for-node"];
const element = createPlasmicElementFromJsx(
defaultElement,
props,
...children
);
if (name) {
seenElements.set(name, element);
}
if (isRoot) {
// If this is the root, and we requested a specific node by specifying data-plasmic-for-node,
// then return that node instead
const forNode = forNodeName
? seenElements.get(forNodeName) ?? null
: element;
// Clear out the seenElements map, as we're done rendering this Plasmic* component.
seenElements.clear();
return forNode;
}
return element;
}
function createPlasmicElementFromJsx<
DefaultElementType extends React.ElementType
>(
defaultElement: DefaultElementType,
props: Partial<React.ComponentProps<DefaultElementType>>,
...children: React.ReactNode[]
) {
const override = props["data-plasmic-override"];
const wrapFlexChild = props["data-plasmic-wrap-flex-child"];
const triggerProps = (props["data-plasmic-trigger-props"] ??
[]) as React.HTMLAttributes<HTMLElement>[];
delete props["data-plasmic-override"];
delete props["data-plasmic-wrap-flex-child"];
delete props["data-plasmic-trigger-props"];
return createPlasmicElement(
override,
defaultElement,
mergeProps(
props,
children.length === 0 ? {} : { children: children.length === 1 ? children[0] : children },
...triggerProps
) as any,
wrapFlexChild
);
}
export function makeFragment(...children: React.ReactNode[]) {
return React.createElement(React.Fragment, {}, ...children);
}
export const UNSET = Symbol("UNSET");
function mergeOverrideProps(
defaults: Record<string, any>,
overrides?: Record<string, any>
): Record<string, any> {
if (!overrides) {
return defaults;
}
const result = { ...defaults };
for (const key of Object.keys(overrides)) {
const defaultVal = defaults[key];
let overrideVal = overrides[key];
if (overrideVal === UNSET) {
delete result[key];
} else {
// We use the NONE sentinel if the overrideVal is nil, and is not one of the
// props that we merge by default -- which are className, style, and
// event handlers. This means for all other "normal" props -- like children,
// title, etc -- a nil value will unset the default.
if (
overrideVal == null &&
key !== "className" &&
key !== "style" &&
!(key.startsWith("on") && typeof defaultVal === "function")
) {
overrideVal = NONE;
}
result[key] = mergePropVals(key, defaultVal, overrideVal);
}
}
return result;
}
export function wrapWithClassName(element: React.ReactNode, className: string) {
const key = React.isValidElement(element)
? element.key || undefined
: undefined;
return React.createElement(
"div",
{
key,
className,
style: {
display: "grid",
},
},
element
);
}
function deriveOverride<C extends React.ElementType>(x: Flex<C>): Override<C> {
if (!x) {
// undefined Binding is an empty Binding
return {
type: "default",
props: {} as any,
};
} else if (isReactNode(x)) {
// If ReactNode, then assume this is the children
return {
type: "default",
props: {
children: x,
} as any,
};
} else if (typeof x === "object") {
// If any of the overrideKeys is a key of this object, then assume
// this is a full Override
if ("as" in x) {
return {
...x,
props: x.props || {},
type: "as",
} as any;
} else if ("render" in x) {
return {
...x,
type: "render",
} as any;
} else if ("props" in x) {
return {
...x,
props: x.props || {},
type: "default",
};
} else if (isSubset(Object.keys(x), ["wrap", "wrapChildren"])) {
// Only twiddling functions present, so assume no props overrides
// (otherwise we'd assume these were props).
return {
...x,
props: {},
type: "default",
};
}
// Else, assume this is just a props object.
return {
type: "default",
props: x as any,
};
} else if (typeof x === "function") {
return {
type: "render",
render: x,
};
}
throw new Error(`Unexpected override: ${x}`);
}
function mergeVariants<V extends Variants>(
v1: Partial<V> | undefined,
v2: Partial<V> | undefined
): Partial<V> {
if (!v1 || !v2) {
return v1 || v2 || {};
}
return { ...v1, ...v2 };
}
function mergeArgs<A extends Args>(a1: Partial<A> | undefined, a2: Partial<A> | undefined): Partial<A> {
if (!a1 || !a2) {
return a1 || a2 || {};
}
return { ...a1, ...a2 };
}
function mergeFlexOverrides<O extends Overrides>(
o1: Partial<O>,
o2: Partial<O> | undefined
): Partial<O> {
if (!o2) {
return o1;
}
const keys = Array.from(new Set([...Object.keys(o1), ...Object.keys(o2)]));
const merged: Record<string, any> = {};
for (const key of keys) {
merged[key] = mergeFlexOverride(o1[key], o2[key]);
}
return merged as Partial<O>;
}
function mergeFlexOverride<C extends React.ElementType<any>>(
fo1: Flex<C> | undefined,
fo2: Flex<C> | undefined
): Flex<C> | undefined {
if (!fo1) {
return fo2;
}
if (!fo2) {
return fo1;
}
const o1 = deriveOverride(fo1);
const o2 = deriveOverride(fo2);
const wrap = chainSingleArgFuncs(...[o1.wrap, o2.wrap].filter(notNil));
const wrapChildren = chainSingleArgFuncs(
...[o1.wrapChildren, o2.wrapChildren].filter(notNil)
);
// "render" type always takes precedence, but we still merge the props
const props = mergeOverrideProps(o1.props ?? {}, o2.props) as Partial<
React.ComponentProps<C>
>;
if (o2.type === "render") {
return {
render: o2.render,
props,
wrap,
wrapChildren,
};
}
if (o1.type === "render") {
return {
render: o1.render,
props,
wrap,
wrapChildren,
};
}
// "as" will take precedence
const as =
(o2.type === "as" ? o2.as : undefined) ??
(o1.type === "as" ? o1.as : undefined);
return {
props,
wrap,
wrapChildren,
...(as ? { as } : {}),
};
}
export function deriveRenderOpts(
props: Record<string, any>,
config: {
name: string;
descendantNames: string[];
internalVariantPropNames: string[];
internalArgPropNames: string[];
}
) {
const {
name,
descendantNames,
internalVariantPropNames,
internalArgPropNames,
} = config;
const reservedPropNames = ["variants", "args", "overrides"];
const variants = mergeVariants(
omit(pick(props, ...internalVariantPropNames), ...reservedPropNames),
props.variants
);
const args = mergeArgs(
omit(pick(props, ...internalArgPropNames), ...reservedPropNames),
props.args
);
let overrides = mergeFlexOverrides(
omit(
pick(props, ...descendantNames),
...internalArgPropNames,
...internalVariantPropNames,
...reservedPropNames
),
props.overrides
);
const leftoverProps = omit(
props,
"variants",
"args",
"overrides",
...descendantNames,
...internalVariantPropNames,
...internalArgPropNames
) as Partial<React.ComponentProps<"button">>;
if (Object.keys(leftoverProps).length > 0) {
overrides = mergeFlexOverrides(overrides, {
[name]: {
props: leftoverProps,
},
});
}
return { variants, args, overrides };
}
| {
if (!override || Object.keys(override).length === 0) {
return createElementWithChildren(defaultRoot, defaultProps, defaultProps.children)
}
const override2 = deriveOverride(override);
const props = mergeOverrideProps(defaultProps, override2.props);
if (override2.type === "render") {
return override2.render(
props as React.ComponentProps<DefaultElementType>,
defaultRoot
);
}
let root = defaultRoot;
if (override2.type === "as" && override2.as) {
if (defaultRoot === (Stack as React.ElementType)) {
// If there was an "as" override specified, but the default type is
// a Stack, then we don't want to switch to using "as" as the root,
// because then we'd lose the flex wrapper that Stack provides.
// Instead, we specify the "as" as the "as" prop to Stack.
props.as = override2.as;
} else {
root = override2.as;
}
}
let children = props.children;
if (override2.wrapChildren) {
children = override2.wrapChildren(ensureNotArray(children));
}
if (wrapChildrenInFlex) {
// For legacy, we still support data-plasmic-wrap-flex-children
children = wrapFlexContainerChildren(children, true);
}
let result = createElementWithChildren(root, props, children);
if (override2.wrap) {
result = override2.wrap(result) as React.ReactElement;
}
return result;
} | identifier_body |
codegen.rs | use grammar::{Grammar, NontermName, Rule, Sym, TermName};
pub fn codegen<B:BackendText>(back: &mut B) -> String where
// IMO these should not be necessary, see Rust issue #29143
B::Block: RenderIndent
{
let mut s = String::new();
s = s + &back.prefix();
let indent = back.rule_indent_preference();
let mut cg = Codegen::new(back);
for rule in &cg.grammar().rules {
// FIXME: make `fn on_rule` take a `&Rule` instead of cloning.
let (c, blocks) = cg.on_rule(rule.clone());
let l_a = cg.backend.nonterm_label(rule.left);
let b = cg.backend.block(l_a, c);
s = s + &b.render_indent(indent);
let blocks: String = blocks.iter()
.map(|b|b.render_indent(indent))
.collect();
s = s + &blocks;
}
s = s + &cg.backend.suffix();
return s;
}
pub trait RenderIndent {
fn render_indent(&self, usize) -> String;
fn render(&self) -> String { self.render_indent(0) }
}
pub trait BackendText: Backend where Self::Block: RenderIndent {
fn prefix(&self) -> String;
fn suffix(&self) -> String;
fn rule_indent_preference(&self) -> usize;
}
pub trait Backend {
type Command;
type Expr;
type Label: Clone;
type Block;
fn grammar(&self) -> &Grammar<usize>;
// (The label generators are all non `&mut self` because in
// principle we should generate the labels ahead of time
// for any given grammar.)
/// L_0 is the central loop of the parser.
fn label_0(&self) -> Self::Label;
/// R_A_k labels function call return to nonterm N from the
/// call associated with A_k. (A_k is unique in the grammar
/// and thus we can derive `N` from it in the formalism, but
/// it seems simpler to just pass it along in this API here.)
fn return_label(&self, n: NontermName, a_k: (NontermName, usize)) -> Self::Label;
/// L_A labels parse function for A.
fn nonterm_label(&self, a: NontermName) -> Self::Label;
/// L_A_i labels function for parsing ith alternate α_i of A.
fn alternate_label(&self,
a_i: (NontermName, usize)) -> Self::Label;
/// `L: C`
/// (note that `C` must have control flow ending in goto...)
fn block(&self, l: Self::Label, c: Self::Command) -> Self::Block;
/// Execute this command to report the parse attempt failed.
fn report_parse_failure(&self, &str) -> Self::Command;
/// Execute this command if something unexpected happened
/// in the generated code.
fn panic_fail(&self, &str) -> Self::Command;
/// the no-op command makes some constructions easier.
fn no_op(&self) -> Self::Command;
/// `cmd1, cmd2`
fn seq(&self,
cmd1: Self::Command,
cmd2: Self::Command) -> Self::Command;
/// `if test { then }
fn if_(&self,
test: Self::Expr,
then: Self::Command) -> Self::Command;
/// `if test { then } else { else_ }`
fn if_else(&self,
test: Self::Expr,
then: Self::Command,
else_: Self::Command) -> Self::Command;
/// `j := j + 1`
fn increment_curr(&self) -> Self::Command;
/// let L = label;
/// `goto L`
fn goto(&self, label: Self::Label) -> Self::Command;
/// this comes up a lot.
fn goto_l0(&self) -> Self::Command {
let l0 = self.label_0();
self.goto(l0)
}
/// `I[j] == a`
fn curr_matches_term(&self, a: TermName) -> Self::Expr;
/// let x = I[j]; let N = n;
/// `x in FIRST(N$)`
///
/// The leading optional component in alpha is meant to be
/// the first element of alpha, if it is present at all.
fn test_end<E:Copy>(&self, n: NontermName) -> Self::Expr;
/// let x = I[j]; let α = alpha;
/// `x in FIRST(α) or empty in FIRST(α) and x in FOLLOW(A)`
///
/// The leading optional component in alpha is meant to be
/// the first element of alpha, if it is present at all.
fn test<E:Copy>(&self,
a: NontermName,
alpha: (Option<NontermName>, &[Sym<E>])) -> Self::Expr;
/// `c_u := create(l, c_u, j)`
fn create(&self,
l: Self::Label) -> Self::Command;
/// `add(l, c_u, j)
fn add(&self, l: Self::Label) -> Self::Command;
/// `pop(c_u, j)`
fn pop(&self) -> Self::Command;
}
pub struct Codegen<'a, B:Backend+'a> {
pub backend: &'a mut B,
}
impl<'a, C:Backend> Codegen<'a, C> {
pub fn new(back: &'a mut C) -> Self {
Codegen { backend: back }
}
pub fn grammar(&self) -> &Grammar<usize> { self.backend.grammar() }
/// code(aα, j, X) = if I[j] = a {j := j+1} else {goto L_0}
pub fn on_term(&self, a: TermName) -> C::Command {
let b = &self.backend;
let matches = b.curr_matches_term(a);
let next_j = b.increment_curr();
let goto_l0 = b.goto_l0();
b.if_else(matches, next_j, goto_l0)
}
/// code(A_kα, j, X) =
/// if test(I[j], X, A_k α) {
/// c_u := create(R_A_k, c_u, j), goto L_A
/// } else {
/// goto L_0
/// }
/// R_A_k:
pub fn on_nonterm_instance<E:Copy>(&self,
(a, k): (NontermName, usize),
alpha: &[Sym<E>],
x: NontermName) -> (C::Command, C::Label) {
let b = &self.backend;
let matches = b.test(x, (Some(a), alpha));
let r_a_k = b.return_label(x, (a, k));
let create = b.create(r_a_k);
let l_a = b.nonterm_label(a);
let goto_la = b.goto(l_a);
let create_then_goto_la = b.seq(create, goto_la);
let goto_l0 = b.goto_l0();
let c = b.if_else(matches, create_then_goto_la, goto_l0);
let l = b.return_label(x, (a, k));
(c, l)
}
/// code(α, j, X) = ...
///
/// (driver for calling either of on_term/on_nonterm_instance)
pub fn on_symbols(&self,
alpha: &[Sym<usize>],
x: NontermName) -> (C::Command, Option<C::Label>) {
| / Given alpha = x1 x2 .. x_f, shorthand for
///
/// code(x1 .. x_f, j, A)
/// code( x2 .. x_f, j, A)
/// ...
/// code( x_f, j, A)
///
/// Each `code` maps to a command and (potentially) a trailing label;
/// therefore concatenating the codes results in a leading command
/// and a sequence of blocks.
/// The above maps to a command and a sequence of bl
pub fn on_symbols_in_prod(&self,
alpha: &[Sym<usize>],
a: NontermName,
end_with: C::Command)
-> (C::Command, Vec<C::Block>) {
let mut c = self.backend.no_op();
enum BuildState<C:Backend> {
FirstCommand,
MakeEndBlock {
first: C::Command,
then: Vec<C::Block>,
end: C::Label
}
}
let mut bs: BuildState<C> = BuildState::FirstCommand;
for i in 0..alpha.len() {
let (c2, opt_label) = self.on_symbols(&alpha[i..], a);
c = self.backend.seq(c, c2);
if let Some(l) = opt_label {
bs = match bs {
BuildState::FirstCommand =>
BuildState::MakeEndBlock {
first: c,
then: Vec::new(),
end: l
},
BuildState::MakeEndBlock {first,mut then,end} => {
let b = self.backend.block(end, c);
then.push(b);
BuildState::MakeEndBlock {
first: first,
then: then,
end: l
}
}
};
c = self.backend.no_op();
}
}
match bs {
BuildState::FirstCommand => {
c = self.backend.seq(c, end_with);
return (c, Vec::new());
}
BuildState::MakeEndBlock { first, mut then, end } => {
c = self.backend.seq(c, end_with);
let b = self.backend.block(end, c);
then.push(b);
return (first, then);
}
}
}
/// code(A ::= empty, j) = pop(c_u, j); goto L_0
///
/// code(A ::= <term> x_2 .. x_f , j) =
/// j := j + 1
/// code(x2 .. x_f, j, A)
/// code( x3 .. x_f, j, A)
/// ...
/// code( x_f, j, A)
/// pop(c_u, j),
/// goto L_0
///
/// code(A ::= X_l x_2 .. x_f, j) =
/// c_u := create(R_X_l, c_u, j);
/// goto L_X;
/// R_X_l: code(x_2 .. x_f, j, A)
/// code( x_3 .. x_f, j, A)
/// ...
/// code( x_f, j, A)
/// pop(c_u, j)
/// goto L_0
pub fn on_production(&self,
a: NontermName,
alpha: &[Sym<usize>]) -> (C::Command,
Vec<C::Block>) {
let end_with = {
let b = &self.backend;
let pop = b.pop();
let goto_l0 = b.goto_l0();
b.seq(pop, goto_l0)
};
if alpha.len() == 0 {
return (end_with, Vec::new());
}
match alpha[0] {
Sym::T(_) => {
// The code produced here is only meant to be run if
// we've already matched the first terminal of a
// non-empty α. It probably would be a good idea to
// actually assert such a match, but whatever.
let next_j = self.backend.increment_curr();
let (c, blocks) =
self.on_symbols_in_prod(&alpha[1..], a, end_with);
(self.backend.seq(next_j, c), blocks)
}
Sym::N { name: X, x: l } => {
let r_X_l = self.backend.return_label(a, (X, l));
let c1 = {
let b = &self.backend;
let l_X = b.nonterm_label(X);
let create = b.create(r_X_l.clone());
let goto_lX = b.goto(l_X);
b.seq(create, goto_lX)
};
let (c2, more_blocks) =
self.on_symbols_in_prod(&alpha[1..], a, end_with);
let block = self.backend.block(r_X_l, c2);
let mut blocks = Vec::with_capacity(1 + more_blocks.len());
blocks.push(block);
for b in more_blocks { blocks.push(b); }
(c1, blocks)
}
}
}
/// let the rule for A be `A ::= α_1 | ... | α_t`
///
/// code(A, j) if A is LL(1) nonterm =
/// if test(I[j], A, α_1) { goto L_A_1 }
/// ...
/// else if test(I[j], A, α_t) { goto L_A_t }
/// // (assert unreachable here?)
/// L_A_1: code(A ::= α_1, j)
/// ...
/// L_A_t: code(A ::= α_t, j)
///
/// code(A, j) if A is not LL(1) nonterm =
/// if test(I[j], A, α_1) { add(L_A_1, c_u, j) }
/// ...
/// if test(I[j], A, α_1) { add(L_A_t, c_u, j) }
/// goto L_0
/// L_A_1: code(A ::= α_1, j)
/// ...
/// L_A_t: code(A ::= α_t, j)
///
pub fn on_rule(&self,
r: Rule<usize>) -> (C::Command,
Vec<C::Block>) {
let Rule { left: a, right_hands: ref alphas } = r;
let c = if self.grammar().ll1s.contains(&a) {
let b = &self.backend;
let mut c = b.no_op();
for (i, alpha) in alphas.iter().enumerate() {
let test = b.test(a, (None, alpha));
let l_a_i = b.alternate_label((a, i));
let goto_l_a_i = b.goto(l_a_i);
let c2 = b.if_(test, goto_l_a_i);
c = b.seq(c, c2);
}
let u = b.panic_fail(&format!("unreachable for {}", a));
c = b.seq(c, u);
c
} else {
let b = &self.backend;
let mut c = b.no_op();
for (i, alpha) in alphas.iter().enumerate() {
let test = b.test(a, (None, alpha));
let l_a_i = b.alternate_label((a, i));
let add_l_a_i = b.add(l_a_i);
let c2 = b.if_(test, add_l_a_i);
c = b.seq(c, c2);
}
let goto_l0 = b.goto_l0();
c = b.seq(c, goto_l0);
c
};
// each call to `on_production` gives back a command and
// a potential block; we turn each command into its
// own block, so the total blocks is 2 * |alphas|.
let mut blocks = Vec::with_capacity(2*alphas.len());
for (i, alpha) in alphas.iter().enumerate() {
let (c, more_blocks) = self.on_production(a, alpha);
let b = &self.backend;
let l_a_i = b.alternate_label((a, i));
let block = b.block(l_a_i, c);
blocks.push(block);
for b in more_blocks { blocks.push(b); }
}
(c, blocks)
}
}
| // FIXME: the infrastructure should be revised to allow me to
// inline a sequence of terminals (since they do not need to
// be encoded into separate labelled blocks).
assert!(alpha.len() > 0);
let (s_0, alpha) = alpha.split_at(1);
match s_0[0] {
Sym::T(t) =>
(self.on_term(t), None),
Sym::N { name: a, x: x_ } => {
let (c, l) = self.on_nonterm_instance((a, x_), alpha, x);
(c, Some(l))
}
}
}
// | identifier_body |
codegen.rs | use grammar::{Grammar, NontermName, Rule, Sym, TermName};
pub fn codegen<B:BackendText>(back: &mut B) -> String where
// IMO these should not be necessary, see Rust issue #29143
B::Block: RenderIndent
{
let mut s = String::new();
s = s + &back.prefix();
let indent = back.rule_indent_preference();
let mut cg = Codegen::new(back);
for rule in &cg.grammar().rules {
// FIXME: make `fn on_rule` take a `&Rule` instead of cloning.
let (c, blocks) = cg.on_rule(rule.clone());
let l_a = cg.backend.nonterm_label(rule.left);
let b = cg.backend.block(l_a, c);
s = s + &b.render_indent(indent);
let blocks: String = blocks.iter()
.map(|b|b.render_indent(indent))
.collect();
s = s + &blocks;
}
s = s + &cg.backend.suffix();
return s;
}
pub trait RenderIndent {
fn render_indent(&self, usize) -> String;
fn render(&self) -> String { self.render_indent(0) }
}
pub trait BackendText: Backend where Self::Block: RenderIndent {
fn prefix(&self) -> String;
fn suffix(&self) -> String;
fn rule_indent_preference(&self) -> usize;
}
pub trait Backend {
type Command;
type Expr;
type Label: Clone;
type Block;
fn grammar(&self) -> &Grammar<usize>;
// (The label generators are all non `&mut self` because in
// principle we should generate the labels ahead of time
// for any given grammar.)
/// L_0 is the central loop of the parser.
fn label_0(&self) -> Self::Label;
/// R_A_k labels function call return to nonterm N from the
/// call associated with A_k. (A_k is unique in the grammar
/// and thus we can derive `N` from it in the formalism, but
/// it seems simpler to just pass it along in this API here.)
fn return_label(&self, n: NontermName, a_k: (NontermName, usize)) -> Self::Label;
/// L_A labels parse function for A.
fn nonterm_label(&self, a: NontermName) -> Self::Label;
/// L_A_i labels function for parsing ith alternate α_i of A.
fn alternate_label(&self,
a_i: (NontermName, usize)) -> Self::Label;
/// `L: C`
/// (note that `C` must have control flow ending in goto...)
fn block(&self, l: Self::Label, c: Self::Command) -> Self::Block;
/// Execute this command to report the parse attempt failed.
fn report_parse_failure(&self, &str) -> Self::Command;
/// Execute this command if something unexpected happened
/// in the generated code.
fn panic_fail(&self, &str) -> Self::Command;
/// the no-op command makes some constructions easier.
fn no_op(&self) -> Self::Command;
/// `cmd1, cmd2`
fn seq(&self,
cmd1: Self::Command,
cmd2: Self::Command) -> Self::Command;
/// `if test { then }
fn if_(&self,
test: Self::Expr,
then: Self::Command) -> Self::Command;
/// `if test { then } else { else_ }`
fn if_else(&self,
test: Self::Expr,
then: Self::Command,
else_: Self::Command) -> Self::Command;
/// `j := j + 1`
fn increment_curr(&self) -> Self::Command;
/// let L = label;
/// `goto L`
fn goto(&self, label: Self::Label) -> Self::Command;
/// this comes up a lot.
fn goto_l0(&self) -> Self::Command {
let l0 = self.label_0();
self.goto(l0)
}
/// `I[j] == a`
fn curr_matches_term(&self, a: TermName) -> Self::Expr;
/// let x = I[j]; let N = n;
/// `x in FIRST(N$)`
///
/// The leading optional component in alpha is meant to be
/// the first element of alpha, if it is present at all.
fn test_end<E:Copy>(&self, n: NontermName) -> Self::Expr;
/// let x = I[j]; let α = alpha;
/// `x in FIRST(α) or empty in FIRST(α) and x in FOLLOW(A)`
///
/// The leading optional component in alpha is meant to be
/// the first element of alpha, if it is present at all.
fn test<E:Copy>(&self,
a: NontermName,
alpha: (Option<NontermName>, &[Sym<E>])) -> Self::Expr;
/// `c_u := create(l, c_u, j)`
fn create(&self,
l: Self::Label) -> Self::Command;
/// `add(l, c_u, j)
fn add(&self, l: Self::Label) -> Self::Command;
/// `pop(c_u, j)`
fn pop(&self) -> Self::Command;
}
pub struct Codegen<'a, B:Backend+'a> {
pub backend: &'a mut B,
}
impl<'a, C:Backend> Codegen<'a, C> {
pub fn new(back: &'a mut C) -> Self {
Codegen { backend: back }
}
pub fn grammar(&self) -> &Grammar<usize> { self.backend.grammar() }
/// code(aα, j, X) = if I[j] = a {j := j+1} else {goto L_0}
pub fn on_te | f, a: TermName) -> C::Command {
let b = &self.backend;
let matches = b.curr_matches_term(a);
let next_j = b.increment_curr();
let goto_l0 = b.goto_l0();
b.if_else(matches, next_j, goto_l0)
}
/// code(A_kα, j, X) =
/// if test(I[j], X, A_k α) {
/// c_u := create(R_A_k, c_u, j), goto L_A
/// } else {
/// goto L_0
/// }
/// R_A_k:
pub fn on_nonterm_instance<E:Copy>(&self,
(a, k): (NontermName, usize),
alpha: &[Sym<E>],
x: NontermName) -> (C::Command, C::Label) {
let b = &self.backend;
let matches = b.test(x, (Some(a), alpha));
let r_a_k = b.return_label(x, (a, k));
let create = b.create(r_a_k);
let l_a = b.nonterm_label(a);
let goto_la = b.goto(l_a);
let create_then_goto_la = b.seq(create, goto_la);
let goto_l0 = b.goto_l0();
let c = b.if_else(matches, create_then_goto_la, goto_l0);
let l = b.return_label(x, (a, k));
(c, l)
}
/// code(α, j, X) = ...
///
/// (driver for calling either of on_term/on_nonterm_instance)
pub fn on_symbols(&self,
alpha: &[Sym<usize>],
x: NontermName) -> (C::Command, Option<C::Label>) {
// FIXME: the infrastructure should be revised to allow me to
// inline a sequence of terminals (since they do not need to
// be encoded into separate labelled blocks).
assert!(alpha.len() > 0);
let (s_0, alpha) = alpha.split_at(1);
match s_0[0] {
Sym::T(t) =>
(self.on_term(t), None),
Sym::N { name: a, x: x_ } => {
let (c, l) = self.on_nonterm_instance((a, x_), alpha, x);
(c, Some(l))
}
}
}
/// Given alpha = x1 x2 .. x_f, shorthand for
///
/// code(x1 .. x_f, j, A)
/// code( x2 .. x_f, j, A)
/// ...
/// code( x_f, j, A)
///
/// Each `code` maps to a command and (potentially) a trailing label;
/// therefore concatenating the codes results in a leading command
/// and a sequence of blocks.
/// The above maps to a command and a sequence of bl
pub fn on_symbols_in_prod(&self,
alpha: &[Sym<usize>],
a: NontermName,
end_with: C::Command)
-> (C::Command, Vec<C::Block>) {
let mut c = self.backend.no_op();
enum BuildState<C:Backend> {
FirstCommand,
MakeEndBlock {
first: C::Command,
then: Vec<C::Block>,
end: C::Label
}
}
let mut bs: BuildState<C> = BuildState::FirstCommand;
for i in 0..alpha.len() {
let (c2, opt_label) = self.on_symbols(&alpha[i..], a);
c = self.backend.seq(c, c2);
if let Some(l) = opt_label {
bs = match bs {
BuildState::FirstCommand =>
BuildState::MakeEndBlock {
first: c,
then: Vec::new(),
end: l
},
BuildState::MakeEndBlock {first,mut then,end} => {
let b = self.backend.block(end, c);
then.push(b);
BuildState::MakeEndBlock {
first: first,
then: then,
end: l
}
}
};
c = self.backend.no_op();
}
}
match bs {
BuildState::FirstCommand => {
c = self.backend.seq(c, end_with);
return (c, Vec::new());
}
BuildState::MakeEndBlock { first, mut then, end } => {
c = self.backend.seq(c, end_with);
let b = self.backend.block(end, c);
then.push(b);
return (first, then);
}
}
}
/// code(A ::= empty, j) = pop(c_u, j); goto L_0
///
/// code(A ::= <term> x_2 .. x_f , j) =
/// j := j + 1
/// code(x2 .. x_f, j, A)
/// code( x3 .. x_f, j, A)
/// ...
/// code( x_f, j, A)
/// pop(c_u, j),
/// goto L_0
///
/// code(A ::= X_l x_2 .. x_f, j) =
/// c_u := create(R_X_l, c_u, j);
/// goto L_X;
/// R_X_l: code(x_2 .. x_f, j, A)
/// code( x_3 .. x_f, j, A)
/// ...
/// code( x_f, j, A)
/// pop(c_u, j)
/// goto L_0
pub fn on_production(&self,
a: NontermName,
alpha: &[Sym<usize>]) -> (C::Command,
Vec<C::Block>) {
let end_with = {
let b = &self.backend;
let pop = b.pop();
let goto_l0 = b.goto_l0();
b.seq(pop, goto_l0)
};
if alpha.len() == 0 {
return (end_with, Vec::new());
}
match alpha[0] {
Sym::T(_) => {
// The code produced here is only meant to be run if
// we've already matched the first terminal of a
// non-empty α. It probably would be a good idea to
// actually assert such a match, but whatever.
let next_j = self.backend.increment_curr();
let (c, blocks) =
self.on_symbols_in_prod(&alpha[1..], a, end_with);
(self.backend.seq(next_j, c), blocks)
}
Sym::N { name: X, x: l } => {
let r_X_l = self.backend.return_label(a, (X, l));
let c1 = {
let b = &self.backend;
let l_X = b.nonterm_label(X);
let create = b.create(r_X_l.clone());
let goto_lX = b.goto(l_X);
b.seq(create, goto_lX)
};
let (c2, more_blocks) =
self.on_symbols_in_prod(&alpha[1..], a, end_with);
let block = self.backend.block(r_X_l, c2);
let mut blocks = Vec::with_capacity(1 + more_blocks.len());
blocks.push(block);
for b in more_blocks { blocks.push(b); }
(c1, blocks)
}
}
}
/// let the rule for A be `A ::= α_1 | ... | α_t`
///
/// code(A, j) if A is LL(1) nonterm =
/// if test(I[j], A, α_1) { goto L_A_1 }
/// ...
/// else if test(I[j], A, α_t) { goto L_A_t }
/// // (assert unreachable here?)
/// L_A_1: code(A ::= α_1, j)
/// ...
/// L_A_t: code(A ::= α_t, j)
///
/// code(A, j) if A is not LL(1) nonterm =
/// if test(I[j], A, α_1) { add(L_A_1, c_u, j) }
/// ...
/// if test(I[j], A, α_1) { add(L_A_t, c_u, j) }
/// goto L_0
/// L_A_1: code(A ::= α_1, j)
/// ...
/// L_A_t: code(A ::= α_t, j)
///
pub fn on_rule(&self,
r: Rule<usize>) -> (C::Command,
Vec<C::Block>) {
let Rule { left: a, right_hands: ref alphas } = r;
let c = if self.grammar().ll1s.contains(&a) {
let b = &self.backend;
let mut c = b.no_op();
for (i, alpha) in alphas.iter().enumerate() {
let test = b.test(a, (None, alpha));
let l_a_i = b.alternate_label((a, i));
let goto_l_a_i = b.goto(l_a_i);
let c2 = b.if_(test, goto_l_a_i);
c = b.seq(c, c2);
}
let u = b.panic_fail(&format!("unreachable for {}", a));
c = b.seq(c, u);
c
} else {
let b = &self.backend;
let mut c = b.no_op();
for (i, alpha) in alphas.iter().enumerate() {
let test = b.test(a, (None, alpha));
let l_a_i = b.alternate_label((a, i));
let add_l_a_i = b.add(l_a_i);
let c2 = b.if_(test, add_l_a_i);
c = b.seq(c, c2);
}
let goto_l0 = b.goto_l0();
c = b.seq(c, goto_l0);
c
};
// each call to `on_production` gives back a command and
// a potential block; we turn each command into its
// own block, so the total blocks is 2 * |alphas|.
let mut blocks = Vec::with_capacity(2*alphas.len());
for (i, alpha) in alphas.iter().enumerate() {
let (c, more_blocks) = self.on_production(a, alpha);
let b = &self.backend;
let l_a_i = b.alternate_label((a, i));
let block = b.block(l_a_i, c);
blocks.push(block);
for b in more_blocks { blocks.push(b); }
}
(c, blocks)
}
}
| rm(&sel | identifier_name |
codegen.rs | use grammar::{Grammar, NontermName, Rule, Sym, TermName};
pub fn codegen<B:BackendText>(back: &mut B) -> String where
// IMO these should not be necessary, see Rust issue #29143
B::Block: RenderIndent
{
let mut s = String::new();
s = s + &back.prefix();
let indent = back.rule_indent_preference();
let mut cg = Codegen::new(back);
for rule in &cg.grammar().rules {
// FIXME: make `fn on_rule` take a `&Rule` instead of cloning.
let (c, blocks) = cg.on_rule(rule.clone());
let l_a = cg.backend.nonterm_label(rule.left);
let b = cg.backend.block(l_a, c);
s = s + &b.render_indent(indent);
let blocks: String = blocks.iter()
.map(|b|b.render_indent(indent))
.collect();
s = s + &blocks;
}
s = s + &cg.backend.suffix();
return s;
}
pub trait RenderIndent {
fn render_indent(&self, usize) -> String;
fn render(&self) -> String { self.render_indent(0) }
}
pub trait BackendText: Backend where Self::Block: RenderIndent {
fn prefix(&self) -> String;
fn suffix(&self) -> String;
fn rule_indent_preference(&self) -> usize;
}
pub trait Backend {
type Command;
type Expr;
type Label: Clone;
type Block;
fn grammar(&self) -> &Grammar<usize>;
// (The label generators are all non `&mut self` because in
// principle we should generate the labels ahead of time
// for any given grammar.)
/// L_0 is the central loop of the parser.
fn label_0(&self) -> Self::Label;
/// R_A_k labels function call return to nonterm N from the
/// call associated with A_k. (A_k is unique in the grammar
/// and thus we can derive `N` from it in the formalism, but
/// it seems simpler to just pass it along in this API here.)
fn return_label(&self, n: NontermName, a_k: (NontermName, usize)) -> Self::Label;
/// L_A labels parse function for A.
fn nonterm_label(&self, a: NontermName) -> Self::Label;
/// L_A_i labels function for parsing ith alternate α_i of A.
fn alternate_label(&self,
a_i: (NontermName, usize)) -> Self::Label;
/// `L: C`
/// (note that `C` must have control flow ending in goto...)
fn block(&self, l: Self::Label, c: Self::Command) -> Self::Block;
/// Execute this command to report the parse attempt failed.
fn report_parse_failure(&self, &str) -> Self::Command;
/// Execute this command if something unexpected happened
/// in the generated code.
fn panic_fail(&self, &str) -> Self::Command;
/// the no-op command makes some constructions easier.
fn no_op(&self) -> Self::Command;
/// `cmd1, cmd2`
fn seq(&self,
cmd1: Self::Command,
cmd2: Self::Command) -> Self::Command;
/// `if test { then }
fn if_(&self,
test: Self::Expr,
then: Self::Command) -> Self::Command;
/// `if test { then } else { else_ }`
fn if_else(&self,
test: Self::Expr,
then: Self::Command,
else_: Self::Command) -> Self::Command;
/// `j := j + 1`
fn increment_curr(&self) -> Self::Command;
/// let L = label;
/// `goto L`
fn goto(&self, label: Self::Label) -> Self::Command;
/// this comes up a lot.
fn goto_l0(&self) -> Self::Command {
let l0 = self.label_0();
self.goto(l0)
}
/// `I[j] == a`
fn curr_matches_term(&self, a: TermName) -> Self::Expr;
/// let x = I[j]; let N = n;
/// `x in FIRST(N$)`
///
/// The leading optional component in alpha is meant to be
/// the first element of alpha, if it is present at all.
fn test_end<E:Copy>(&self, n: NontermName) -> Self::Expr;
/// let x = I[j]; let α = alpha;
/// `x in FIRST(α) or empty in FIRST(α) and x in FOLLOW(A)`
///
/// The leading optional component in alpha is meant to be
/// the first element of alpha, if it is present at all.
fn test<E:Copy>(&self,
a: NontermName,
alpha: (Option<NontermName>, &[Sym<E>])) -> Self::Expr;
/// `c_u := create(l, c_u, j)`
fn create(&self,
l: Self::Label) -> Self::Command;
/// `add(l, c_u, j)
fn add(&self, l: Self::Label) -> Self::Command;
/// `pop(c_u, j)`
fn pop(&self) -> Self::Command;
}
pub struct Codegen<'a, B:Backend+'a> {
pub backend: &'a mut B,
}
impl<'a, C:Backend> Codegen<'a, C> {
pub fn new(back: &'a mut C) -> Self {
Codegen { backend: back }
}
pub fn grammar(&self) -> &Grammar<usize> { self.backend.grammar() }
/// code(aα, j, X) = if I[j] = a {j := j+1} else {goto L_0}
pub fn on_term(&self, a: TermName) -> C::Command {
let b = &self.backend;
let matches = b.curr_matches_term(a);
let next_j = b.increment_curr();
let goto_l0 = b.goto_l0();
b.if_else(matches, next_j, goto_l0)
}
/// code(A_kα, j, X) =
/// if test(I[j], X, A_k α) {
/// c_u := create(R_A_k, c_u, j), goto L_A
/// } else {
/// goto L_0
/// }
/// R_A_k:
pub fn on_nonterm_instance<E:Copy>(&self,
(a, k): (NontermName, usize),
alpha: &[Sym<E>],
x: NontermName) -> (C::Command, C::Label) {
let b = &self.backend;
let matches = b.test(x, (Some(a), alpha));
let r_a_k = b.return_label(x, (a, k));
let create = b.create(r_a_k);
let l_a = b.nonterm_label(a);
let goto_la = b.goto(l_a);
let create_then_goto_la = b.seq(create, goto_la);
let goto_l0 = b.goto_l0();
let c = b.if_else(matches, create_then_goto_la, goto_l0);
let l = b.return_label(x, (a, k));
(c, l)
}
/// code(α, j, X) = ...
///
/// (driver for calling either of on_term/on_nonterm_instance)
pub fn on_symbols(&self,
alpha: &[Sym<usize>],
x: NontermName) -> (C::Command, Option<C::Label>) {
// FIXME: the infrastructure should be revised to allow me to
// inline a sequence of terminals (since they do not need to
// be encoded into separate labelled blocks).
assert!(alpha.len() > 0);
let (s_0, alpha) = alpha.split_at(1);
match s_0[0] {
Sym::T(t) =>
(self.on_term(t), None),
Sym::N { name: a, x: x_ } => {
let (c, l) = self.on_nonterm_instance((a, x_), alpha, x);
(c, Some(l))
}
}
}
/// Given alpha = x1 x2 .. x_f, shorthand for
///
/// code(x1 .. x_f, j, A)
/// code( x2 .. x_f, j, A)
/// ...
/// code( x_f, j, A)
///
/// Each `code` maps to a command and (potentially) a trailing label;
/// therefore concatenating the codes results in a leading command
/// and a sequence of blocks.
/// The above maps to a command and a sequence of bl
pub fn on_symbols_in_prod(&self,
alpha: &[Sym<usize>],
a: NontermName,
end_with: C::Command)
-> (C::Command, Vec<C::Block>) {
let mut c = self.backend.no_op();
enum BuildState<C:Backend> {
FirstCommand,
MakeEndBlock {
first: C::Command,
then: Vec<C::Block>,
end: C::Label
}
}
let mut bs: BuildState<C> = BuildState::FirstCommand;
for i in 0..alpha.len() {
let (c2, opt_label) = self.on_symbols(&alpha[i..], a);
c = self.backend.seq(c, c2);
if let Some(l) = opt_label {
bs = match bs {
BuildState::FirstCommand =>
BuildState::MakeEndBlock {
first: c,
then: Vec::new(),
end: l
},
BuildState::MakeEndBlock {first,mut then,end} => {
let b = self.backend.block(end, c);
then.push(b);
BuildState::MakeEndBlock {
first: first,
then: then,
end: l
}
}
};
c = self.backend.no_op();
}
}
match bs {
BuildState::FirstCommand => {
c = self.backend.seq(c, end_with);
return (c, Vec::new());
}
BuildState::MakeEndBlock { first, mut then, end } => {
c = self.backend.seq(c, end_with);
let b = self.backend.block(end, c);
then.push(b);
return (first, then);
}
}
}
/// code(A ::= empty, j) = pop(c_u, j); goto L_0
///
/// code(A ::= <term> x_2 .. x_f , j) =
/// j := j + 1
/// code(x2 .. x_f, j, A)
/// code( x3 .. x_f, j, A)
/// ...
/// code( x_f, j, A)
/// pop(c_u, j),
/// goto L_0
///
/// code(A ::= X_l x_2 .. x_f, j) =
/// c_u := create(R_X_l, c_u, j);
/// goto L_X;
/// R_X_l: code(x_2 .. x_f, j, A)
/// code( x_3 .. x_f, j, A)
/// ...
/// code( x_f, j, A)
/// pop(c_u, j)
/// goto L_0
pub fn on_production(&self,
a: NontermName,
alpha: &[Sym<usize>]) -> (C::Command,
Vec<C::Block>) {
let end_with = {
let b = &self.backend;
let pop = b.pop();
let goto_l0 = b.goto_l0();
b.seq(pop, goto_l0)
};
if alpha.len() == 0 {
return (end_with, Vec::new());
}
match alpha[0] {
Sym::T(_) => {
// The code produced here is only meant to be run if
// we've already matched the first terminal of a
// non-empty α. It probably would be a good idea to
// actually assert such a match, but whatever.
let next_j = self.backend.increment_curr();
let (c, blocks) =
self.on_symbols_in_prod(&alpha[1..], a, end_with);
(self.backend.seq(next_j, c), blocks)
}
Sym::N { name: X, x: l } => {
let r_X_l = self.backend.return_label(a, (X, l));
let c1 = {
let b = &self.backend;
let l_X = b.nonterm_label(X);
let create = b.create(r_X_l.clone());
let goto_lX = b.goto(l_X);
b.seq(create, goto_lX)
};
let (c2, more_blocks) =
self.on_symbols_in_prod(&alpha[1..], a, end_with);
let block = self.backend.block(r_X_l, c2);
let mut blocks = Vec::with_capacity(1 + more_blocks.len());
blocks.push(block);
for b in more_blocks { blocks.push(b); }
(c1, blocks)
}
}
}
/// let the rule for A be `A ::= α_1 | ... | α_t`
///
/// code(A, j) if A is LL(1) nonterm =
/// if test(I[j], A, α_1) { goto L_A_1 }
/// ...
/// else if test(I[j], A, α_t) { goto L_A_t }
/// // (assert unreachable here?)
/// L_A_1: code(A ::= α_1, j)
/// ...
/// L_A_t: code(A ::= α_t, j)
///
/// code(A, j) if A is not LL(1) nonterm =
/// if test(I[j], A, α_1) { add(L_A_1, c_u, j) }
/// ...
/// if test(I[j], A, α_1) { add(L_A_t, c_u, j) }
/// goto L_0
/// L_A_1: code(A ::= α_1, j)
/// ...
/// L_A_t: code(A ::= α_t, j)
///
pub fn on_rule(&self,
r: Rule<usize>) -> (C::Command,
Vec<C::Block>) {
let Rule { left: a, right_hands: ref alphas } = r;
let c = if self.grammar().ll1s.contains(&a) {
let b = &self.backend;
let mut c = b.no_op();
for (i, alpha) in alphas.iter().enumerate() {
let test = b.test(a, (None, alpha));
let l_a_i = b.alternate_label((a, i));
let goto_l_a_i = b.goto(l_a_i);
let c2 = b.if_(test, goto_l_a_i);
c = b.seq(c, c2);
}
let u = b.panic_fail(&format!("unreachable for {}", a));
c = b.seq(c, u);
c
} else {
let b = &self.backend; | let l_a_i = b.alternate_label((a, i));
let add_l_a_i = b.add(l_a_i);
let c2 = b.if_(test, add_l_a_i);
c = b.seq(c, c2);
}
let goto_l0 = b.goto_l0();
c = b.seq(c, goto_l0);
c
};
// each call to `on_production` gives back a command and
// a potential block; we turn each command into its
// own block, so the total blocks is 2 * |alphas|.
let mut blocks = Vec::with_capacity(2*alphas.len());
for (i, alpha) in alphas.iter().enumerate() {
let (c, more_blocks) = self.on_production(a, alpha);
let b = &self.backend;
let l_a_i = b.alternate_label((a, i));
let block = b.block(l_a_i, c);
blocks.push(block);
for b in more_blocks { blocks.push(b); }
}
(c, blocks)
}
} | let mut c = b.no_op();
for (i, alpha) in alphas.iter().enumerate() {
let test = b.test(a, (None, alpha)); | random_line_split |
mod.rs | use std::{
convert::TryInto,
io::SeekFrom,
mem::size_of,
path::{Path, PathBuf},
};
use bincode::{deserialize, serialize_into, serialized_size};
use cfg_if::cfg_if;
use once_cell::sync::OnceCell;
use serde::{de::DeserializeOwned, Serialize};
use tokio::{
fs::{create_dir, remove_file, OpenOptions},
io::{AsyncRead, AsyncReadExt, AsyncSeek, AsyncSeekExt, AsyncWriteExt},
};
#[cfg(feature = "replication")]
use crate::node::persistence::snapshot::REPLICATION_FILE as SNAPSHOT_REPLICATION_FILE;
use crate::{
config::persistence::PersistenceConfig,
node::{
event::{Event, EventLog},
persistence::{snapshot::Snapshot, PersistenceError},
Queue,
},
};
/// Queue log file name
const QUEUE_FILE: &str = "queue_log";
/// Queue compacted log file name
const QUEUE_COMPACTION_FILE: &str = "queue_compacted_log";
pub struct Log<'c> {
/// Persistence config
config: &'c PersistenceConfig<'c>,
/// Internal instance of [`Snapshot`] driver
///
/// Due to limitations of current replication storage implementation
/// it is impossible to rely only on [`Log`] driver to save event log,
/// so [`Snapshot`] driver is being used to fill the gap.
snapshot: OnceCell<Snapshot<'c>>,
}
impl<'c> Log<'c> {
pub fn new(config: &'c PersistenceConfig) -> Self {
Log {
config,
snapshot: OnceCell::new(),
}
}
/// Make log entry from serializable source
///
/// Returns bytes buffer, filled with header (currently only with entry size) and serialized entry, without any offset between each other.
/// ```
/// +---------+
/// |Entry len|
/// +---------+
/// | |
/// | Entry |
/// | |
/// +---------+
/// ```
fn make_log_entry<S>(source: &S) -> Result<Vec<u8>, PersistenceError>
where
S: Serialize,
{
let size = serialized_size(source).map_err(PersistenceError::SerializationError)?;
debug!("Log entry size: {}", size);
let capacity = size_of::<u64>()
+ TryInto::<usize>::try_into(size).map_err(PersistenceError::LogEntryTooBig)?;
let mut buf = Vec::with_capacity(capacity);
buf.extend(&size.to_le_bytes());
buf.resize(capacity, 0);
serialize_into(&mut buf[size_of::<u64>()..], source)
.map_err(PersistenceError::SerializationError)?;
Ok(buf)
}
/// Get buffer of log entries from byte source
async fn parse_log<T, S>(source: &mut S) -> Result<Vec<T>, PersistenceError>
where
T: DeserializeOwned,
S: AsyncSeek + AsyncRead + Unpin,
{
let mut entries = Vec::new();
let source_size = source
.seek(SeekFrom::End(0))
.await
.map_err(PersistenceError::from)?;
debug!("Log source size: {}", source_size);
source
.seek(SeekFrom::Start(0))
.await
.map_err(PersistenceError::from)?;
let mut buf = Vec::new();
while source
.seek(SeekFrom::Current(0))
.await
.map_err(PersistenceError::from)?
< source_size
{
let size = source.read_u64_le().await.map_err(PersistenceError::from)?;
debug!("Log entry size: {}", size);
buf.reserve(size.try_into().map_err(PersistenceError::LogEntryTooBig)?);
source
.take(size)
.read_buf(&mut buf)
.await
.map_err(PersistenceError::from)?;
entries.push(deserialize(&buf).map_err(PersistenceError::SerializationError)?);
buf.clear();
}
Ok(entries)
}
/// Appends [make_log_entry] result of `source` to `destination`
///
/// [make_log_entry]: Log::make_log_entry
async fn append<P, S>(&self, source: &S, destination: P) -> Result<(), PersistenceError>
where
P: AsRef<Path>,
S: Serialize,
{
let path = self.config.path.join(destination);
debug!("Appending to {}", path.display());
if let Some(parent) = path.parent() {
if !parent.is_dir() {
create_dir(&parent).await.map_err(PersistenceError::from)?;
}
}
OpenOptions::new()
.create(true)
.append(true)
.open(path)
.await
.map_err(PersistenceError::from)?
.write_all(&Self::make_log_entry(source)?)
.await
.map_err(PersistenceError::from)
}
/// Get log entries from `source` log file using [parse_log]
///
/// [parse_log]: Log::parse_log
async fn load<S, P>(&self, source: P) -> Result<Vec<S>, PersistenceError>
where
S: DeserializeOwned,
P: AsRef<Path>,
|
/// Append single event to `source` log file (usually queue name)
pub async fn persist_event<P>(
&self,
event: &Event<'_>,
source: P,
) -> Result<(), PersistenceError>
where
P: AsRef<Path>,
{
self.append(event, source.as_ref().join(QUEUE_FILE)).await
}
/// Restore database events from `source` log file (usually queue name)
///
/// If specified in [`PersistenceConfig`], compaction will be executed after successful loading.
pub async fn load_queue<P, DB>(&self, source: P) -> Result<Queue<DB>, PersistenceError>
where
P: AsRef<Path>,
DB: EventLog<Vec<Event<'static>>> + Serialize + DeserializeOwned,
{
let events = match self
.load::<Event, _>(source.as_ref().join(QUEUE_FILE))
.await
{
Ok(events) => events,
Err(PersistenceError::FileOpenError(e)) => {
error!("Log file not found: {}", e);
Vec::new()
}
Err(e) => return Err(e),
};
let database = if self.config.compaction {
let compaction_path = source.as_ref().join(QUEUE_COMPACTION_FILE);
let inner_db = match self.get_snapshot().load::<DB, _>(&compaction_path).await {
Ok(mut database) => {
database.apply_log(events);
database
}
Err(PersistenceError::FileOpenError(e)) => {
error!("Compaction file not found: {}", e);
DB::from_log(events)
}
Err(e) => return Err(e),
};
self.get_snapshot()
.persist(&inner_db, &compaction_path)
.await?;
match self.prune(&source).await {
Err(PersistenceError::FileOpenError(_)) | Ok(_) => (),
Err(e) => return Err(e),
};
inner_db
} else {
DB::from_log(events)
};
cfg_if! {
if #[cfg(feature = "replication")] {
// Thanks to GC threshold, it's currently impossible to use log driver
let replication_storage = match self.get_snapshot().load(source.as_ref().join(SNAPSHOT_REPLICATION_FILE)).await {
Ok(storage) => storage,
Err(PersistenceError::FileOpenError(e)) => {
error!("{}", e);
None
},
Err(e) => return Err(e)
};
let queue = Queue::new(database, replication_storage);
} else {
let queue = Queue::new(database);
}
}
Ok(queue)
}
/// Prune `queue` log file
async fn prune<P>(&self, queue: P) -> Result<(), PersistenceError>
where
P: AsRef<Path>,
{
let path = [&self.config.path, queue.as_ref(), QUEUE_FILE.as_ref()]
.iter()
.collect::<PathBuf>();
debug!("Pruning {}", path.display());
remove_file(path).await.map_err(PersistenceError::from)
}
/// Get shared [`Snapshot`] instance
fn get_snapshot(&self) -> &Snapshot<'_> {
self.snapshot.get_or_init(|| Snapshot::new(self.config))
}
}
#[cfg(test)]
mod tests {
use std::{borrow::Cow, io::Cursor};
use maybe_owned::MaybeOwned;
use spartan_lib::core::{
db::TreeDatabase,
dispatcher::StatusAwareDispatcher,
message::{builder::MessageBuilder, Message},
payload::Dispatchable,
};
use tempfile::{NamedTempFile, TempDir};
use super::*;
use crate::{config::persistence::Persistence, node::DB};
#[tokio::test]
async fn test_append_read() {
let file = NamedTempFile::new().unwrap();
let config = PersistenceConfig {
path: Cow::Borrowed(file.path().parent().unwrap()),
..Default::default()
};
let log = Log::new(&config);
log.append(&String::from("Hello, world"), file.path())
.await
.unwrap();
let entries = log.load::<String, _>(file.path()).await.unwrap();
assert_eq!(entries.len(), 1);
assert_eq!(entries.first().unwrap(), &String::from("Hello, world"));
}
#[tokio::test]
async fn test_empty_file_load() {
let file = NamedTempFile::new().unwrap();
let config = PersistenceConfig {
path: Cow::Borrowed(file.path().parent().unwrap()),
..Default::default()
};
let log = Log::new(&config);
let entries = log.load::<String, _>(file.path()).await.unwrap();
assert!(entries.is_empty());
}
#[tokio::test]
async fn test_serialize_log_entry() {
let entry = Log::make_log_entry(&vec![1u32, 2, 3]).unwrap();
let parsed = Log::parse_log::<Vec<u32>, _>(&mut Cursor::new(entry))
.await
.unwrap();
assert_eq!(parsed.len(), 1);
assert_eq!(&*parsed.first().unwrap(), &[1, 2, 3]);
}
#[tokio::test]
async fn test_multiple_log_entries() {
let mut entries = Vec::new();
entries.append(&mut Log::make_log_entry(&vec![1u32, 2, 3]).unwrap());
entries.append(&mut Log::make_log_entry(&vec![4, 5, 6]).unwrap());
entries.append(&mut Log::make_log_entry(&vec![7, 8, 9]).unwrap());
let parsed = Log::parse_log::<Vec<u32>, _>(&mut Cursor::new(entries))
.await
.unwrap();
assert_eq!(parsed.len(), 3);
assert_eq!(parsed, vec![vec![1, 2, 3], vec![4, 5, 6], vec![7, 8, 9]]);
}
#[tokio::test]
async fn test_persist_and_restore_from_events() {
let tempdir = TempDir::new().expect("Unable to create temporary test directory");
let event = Event::Push(MaybeOwned::Owned(
MessageBuilder::default().body("Hello").compose().unwrap(),
));
let config = PersistenceConfig {
mode: Persistence::Log,
path: Cow::Borrowed(tempdir.path()),
timer: 0,
compaction: false,
};
let log = Log::new(&config);
log.persist_event(&event, "test").await.unwrap();
let queue: DB = log.load_queue("test").await.unwrap();
assert_eq!(queue.database().await.pop().unwrap().body(), "Hello");
}
#[tokio::test]
async fn test_compaction() {
let tempdir = TempDir::new().expect("Unable to create temporary test directory");
let event = Event::Push(MaybeOwned::Owned(
MessageBuilder::default().body("Hello").compose().unwrap(),
));
let config = PersistenceConfig {
mode: Persistence::Log,
path: Cow::Borrowed(tempdir.path()),
timer: 0,
compaction: true,
};
let log = Log::new(&config);
log.persist_event(&event, "test").await.unwrap();
let queue: DB = log.load_queue("test").await.unwrap();
assert_eq!(queue.database().await.pop().unwrap().body(), "Hello");
assert!(matches!(
log.load::<Event, _>(Path::new("test").join(QUEUE_FILE))
.await
.unwrap_err(),
PersistenceError::FileOpenError(_)
));
let snapshot = Snapshot::new(&config);
let mut database: TreeDatabase<Message> = snapshot
.load(Path::new("test").join(QUEUE_COMPACTION_FILE))
.await
.unwrap();
assert_eq!(database.pop().unwrap().body(), "Hello");
}
}
| {
let path = self.config.path.join(source);
debug!("Loading from {}", path.display());
let mut file = OpenOptions::new()
.read(true)
.open(path)
.await
.map_err(PersistenceError::from)?;
Self::parse_log(&mut file).await
} | identifier_body |
mod.rs | use std::{
convert::TryInto,
io::SeekFrom,
mem::size_of,
path::{Path, PathBuf},
};
use bincode::{deserialize, serialize_into, serialized_size};
use cfg_if::cfg_if;
use once_cell::sync::OnceCell;
use serde::{de::DeserializeOwned, Serialize};
use tokio::{
fs::{create_dir, remove_file, OpenOptions},
io::{AsyncRead, AsyncReadExt, AsyncSeek, AsyncSeekExt, AsyncWriteExt},
};
#[cfg(feature = "replication")]
use crate::node::persistence::snapshot::REPLICATION_FILE as SNAPSHOT_REPLICATION_FILE;
use crate::{
config::persistence::PersistenceConfig,
node::{
event::{Event, EventLog},
persistence::{snapshot::Snapshot, PersistenceError},
Queue,
},
};
/// Queue log file name
const QUEUE_FILE: &str = "queue_log";
/// Queue compacted log file name
const QUEUE_COMPACTION_FILE: &str = "queue_compacted_log";
pub struct Log<'c> {
/// Persistence config
config: &'c PersistenceConfig<'c>,
/// Internal instance of [`Snapshot`] driver
///
/// Due to limitations of current replication storage implementation
/// it is impossible to rely only on [`Log`] driver to save event log,
/// so [`Snapshot`] driver is being used to fill the gap.
snapshot: OnceCell<Snapshot<'c>>,
}
impl<'c> Log<'c> {
pub fn new(config: &'c PersistenceConfig) -> Self {
Log {
config,
snapshot: OnceCell::new(),
}
}
/// Make log entry from serializable source
///
/// Returns bytes buffer, filled with header (currently only with entry size) and serialized entry, without any offset between each other.
/// ```
/// +---------+
/// |Entry len|
/// +---------+
/// | |
/// | Entry |
/// | |
/// +---------+
/// ```
fn make_log_entry<S>(source: &S) -> Result<Vec<u8>, PersistenceError>
where
S: Serialize,
{
let size = serialized_size(source).map_err(PersistenceError::SerializationError)?;
debug!("Log entry size: {}", size);
let capacity = size_of::<u64>()
+ TryInto::<usize>::try_into(size).map_err(PersistenceError::LogEntryTooBig)?;
let mut buf = Vec::with_capacity(capacity);
buf.extend(&size.to_le_bytes());
buf.resize(capacity, 0);
serialize_into(&mut buf[size_of::<u64>()..], source)
.map_err(PersistenceError::SerializationError)?;
Ok(buf)
}
/// Get buffer of log entries from byte source
async fn parse_log<T, S>(source: &mut S) -> Result<Vec<T>, PersistenceError>
where
T: DeserializeOwned,
S: AsyncSeek + AsyncRead + Unpin,
{
let mut entries = Vec::new();
let source_size = source
.seek(SeekFrom::End(0))
.await
.map_err(PersistenceError::from)?;
debug!("Log source size: {}", source_size);
source
.seek(SeekFrom::Start(0))
.await
.map_err(PersistenceError::from)?;
let mut buf = Vec::new();
while source
.seek(SeekFrom::Current(0))
.await
.map_err(PersistenceError::from)?
< source_size
{
let size = source.read_u64_le().await.map_err(PersistenceError::from)?;
debug!("Log entry size: {}", size);
buf.reserve(size.try_into().map_err(PersistenceError::LogEntryTooBig)?);
source
.take(size)
.read_buf(&mut buf)
.await
.map_err(PersistenceError::from)?;
entries.push(deserialize(&buf).map_err(PersistenceError::SerializationError)?);
buf.clear();
}
Ok(entries)
}
/// Appends [make_log_entry] result of `source` to `destination`
///
/// [make_log_entry]: Log::make_log_entry
async fn append<P, S>(&self, source: &S, destination: P) -> Result<(), PersistenceError>
where
P: AsRef<Path>,
S: Serialize,
{
let path = self.config.path.join(destination);
debug!("Appending to {}", path.display());
if let Some(parent) = path.parent() {
if !parent.is_dir() {
create_dir(&parent).await.map_err(PersistenceError::from)?;
}
}
OpenOptions::new()
.create(true)
.append(true)
.open(path)
.await
.map_err(PersistenceError::from)?
.write_all(&Self::make_log_entry(source)?)
.await
.map_err(PersistenceError::from)
}
/// Get log entries from `source` log file using [parse_log]
///
/// [parse_log]: Log::parse_log
async fn load<S, P>(&self, source: P) -> Result<Vec<S>, PersistenceError>
where
S: DeserializeOwned,
P: AsRef<Path>,
{
let path = self.config.path.join(source);
debug!("Loading from {}", path.display());
let mut file = OpenOptions::new()
.read(true)
.open(path)
.await
.map_err(PersistenceError::from)?;
Self::parse_log(&mut file).await
}
/// Append single event to `source` log file (usually queue name)
pub async fn persist_event<P>(
&self,
event: &Event<'_>,
source: P,
) -> Result<(), PersistenceError>
where
P: AsRef<Path>,
{
self.append(event, source.as_ref().join(QUEUE_FILE)).await
}
/// Restore database events from `source` log file (usually queue name)
///
/// If specified in [`PersistenceConfig`], compaction will be executed after successful loading.
pub async fn load_queue<P, DB>(&self, source: P) -> Result<Queue<DB>, PersistenceError>
where
P: AsRef<Path>,
DB: EventLog<Vec<Event<'static>>> + Serialize + DeserializeOwned,
{
let events = match self
.load::<Event, _>(source.as_ref().join(QUEUE_FILE))
.await
{
Ok(events) => events,
Err(PersistenceError::FileOpenError(e)) => {
error!("Log file not found: {}", e);
Vec::new()
}
Err(e) => return Err(e),
};
let database = if self.config.compaction {
let compaction_path = source.as_ref().join(QUEUE_COMPACTION_FILE);
let inner_db = match self.get_snapshot().load::<DB, _>(&compaction_path).await {
Ok(mut database) => {
database.apply_log(events);
database
}
Err(PersistenceError::FileOpenError(e)) => {
error!("Compaction file not found: {}", e);
DB::from_log(events)
}
Err(e) => return Err(e),
};
self.get_snapshot()
.persist(&inner_db, &compaction_path)
.await?;
match self.prune(&source).await {
Err(PersistenceError::FileOpenError(_)) | Ok(_) => (),
Err(e) => return Err(e),
};
inner_db
} else {
DB::from_log(events)
};
cfg_if! {
if #[cfg(feature = "replication")] {
// Thanks to GC threshold, it's currently impossible to use log driver
let replication_storage = match self.get_snapshot().load(source.as_ref().join(SNAPSHOT_REPLICATION_FILE)).await {
Ok(storage) => storage,
Err(PersistenceError::FileOpenError(e)) => {
error!("{}", e);
None
},
Err(e) => return Err(e)
};
let queue = Queue::new(database, replication_storage);
} else {
let queue = Queue::new(database);
}
}
Ok(queue)
}
/// Prune `queue` log file
async fn prune<P>(&self, queue: P) -> Result<(), PersistenceError>
where
P: AsRef<Path>,
{
let path = [&self.config.path, queue.as_ref(), QUEUE_FILE.as_ref()]
.iter()
.collect::<PathBuf>();
debug!("Pruning {}", path.display());
remove_file(path).await.map_err(PersistenceError::from)
}
/// Get shared [`Snapshot`] instance
fn get_snapshot(&self) -> &Snapshot<'_> {
self.snapshot.get_or_init(|| Snapshot::new(self.config))
}
}
#[cfg(test)]
mod tests {
use std::{borrow::Cow, io::Cursor};
use maybe_owned::MaybeOwned;
use spartan_lib::core::{
db::TreeDatabase,
dispatcher::StatusAwareDispatcher,
message::{builder::MessageBuilder, Message},
payload::Dispatchable,
};
use tempfile::{NamedTempFile, TempDir};
use super::*;
use crate::{config::persistence::Persistence, node::DB};
#[tokio::test]
async fn test_append_read() {
let file = NamedTempFile::new().unwrap();
let config = PersistenceConfig {
path: Cow::Borrowed(file.path().parent().unwrap()),
..Default::default()
};
let log = Log::new(&config);
log.append(&String::from("Hello, world"), file.path())
.await
.unwrap();
let entries = log.load::<String, _>(file.path()).await.unwrap();
assert_eq!(entries.len(), 1);
assert_eq!(entries.first().unwrap(), &String::from("Hello, world"));
}
#[tokio::test]
async fn test_empty_file_load() {
let file = NamedTempFile::new().unwrap();
let config = PersistenceConfig {
path: Cow::Borrowed(file.path().parent().unwrap()),
..Default::default()
};
let log = Log::new(&config);
let entries = log.load::<String, _>(file.path()).await.unwrap();
assert!(entries.is_empty());
}
#[tokio::test]
async fn test_serialize_log_entry() {
let entry = Log::make_log_entry(&vec![1u32, 2, 3]).unwrap();
let parsed = Log::parse_log::<Vec<u32>, _>(&mut Cursor::new(entry))
.await
.unwrap();
assert_eq!(parsed.len(), 1);
assert_eq!(&*parsed.first().unwrap(), &[1, 2, 3]);
}
#[tokio::test]
async fn | () {
let mut entries = Vec::new();
entries.append(&mut Log::make_log_entry(&vec![1u32, 2, 3]).unwrap());
entries.append(&mut Log::make_log_entry(&vec![4, 5, 6]).unwrap());
entries.append(&mut Log::make_log_entry(&vec![7, 8, 9]).unwrap());
let parsed = Log::parse_log::<Vec<u32>, _>(&mut Cursor::new(entries))
.await
.unwrap();
assert_eq!(parsed.len(), 3);
assert_eq!(parsed, vec![vec![1, 2, 3], vec![4, 5, 6], vec![7, 8, 9]]);
}
#[tokio::test]
async fn test_persist_and_restore_from_events() {
let tempdir = TempDir::new().expect("Unable to create temporary test directory");
let event = Event::Push(MaybeOwned::Owned(
MessageBuilder::default().body("Hello").compose().unwrap(),
));
let config = PersistenceConfig {
mode: Persistence::Log,
path: Cow::Borrowed(tempdir.path()),
timer: 0,
compaction: false,
};
let log = Log::new(&config);
log.persist_event(&event, "test").await.unwrap();
let queue: DB = log.load_queue("test").await.unwrap();
assert_eq!(queue.database().await.pop().unwrap().body(), "Hello");
}
#[tokio::test]
async fn test_compaction() {
let tempdir = TempDir::new().expect("Unable to create temporary test directory");
let event = Event::Push(MaybeOwned::Owned(
MessageBuilder::default().body("Hello").compose().unwrap(),
));
let config = PersistenceConfig {
mode: Persistence::Log,
path: Cow::Borrowed(tempdir.path()),
timer: 0,
compaction: true,
};
let log = Log::new(&config);
log.persist_event(&event, "test").await.unwrap();
let queue: DB = log.load_queue("test").await.unwrap();
assert_eq!(queue.database().await.pop().unwrap().body(), "Hello");
assert!(matches!(
log.load::<Event, _>(Path::new("test").join(QUEUE_FILE))
.await
.unwrap_err(),
PersistenceError::FileOpenError(_)
));
let snapshot = Snapshot::new(&config);
let mut database: TreeDatabase<Message> = snapshot
.load(Path::new("test").join(QUEUE_COMPACTION_FILE))
.await
.unwrap();
assert_eq!(database.pop().unwrap().body(), "Hello");
}
}
| test_multiple_log_entries | identifier_name |
mod.rs | use std::{
convert::TryInto,
io::SeekFrom,
mem::size_of,
path::{Path, PathBuf},
};
use bincode::{deserialize, serialize_into, serialized_size};
use cfg_if::cfg_if;
use once_cell::sync::OnceCell;
use serde::{de::DeserializeOwned, Serialize};
use tokio::{
fs::{create_dir, remove_file, OpenOptions},
io::{AsyncRead, AsyncReadExt, AsyncSeek, AsyncSeekExt, AsyncWriteExt},
};
#[cfg(feature = "replication")]
use crate::node::persistence::snapshot::REPLICATION_FILE as SNAPSHOT_REPLICATION_FILE;
use crate::{
config::persistence::PersistenceConfig,
node::{
event::{Event, EventLog},
persistence::{snapshot::Snapshot, PersistenceError},
Queue,
},
};
/// Queue log file name
const QUEUE_FILE: &str = "queue_log";
/// Queue compacted log file name
const QUEUE_COMPACTION_FILE: &str = "queue_compacted_log";
pub struct Log<'c> {
/// Persistence config
config: &'c PersistenceConfig<'c>,
/// Internal instance of [`Snapshot`] driver
///
/// Due to limitations of current replication storage implementation
/// it is impossible to rely only on [`Log`] driver to save event log,
/// so [`Snapshot`] driver is being used to fill the gap.
snapshot: OnceCell<Snapshot<'c>>,
}
impl<'c> Log<'c> {
pub fn new(config: &'c PersistenceConfig) -> Self {
Log {
config,
snapshot: OnceCell::new(),
}
}
/// Make log entry from serializable source
///
/// Returns bytes buffer, filled with header (currently only with entry size) and serialized entry, without any offset between each other.
/// ```
/// +---------+
/// |Entry len|
/// +---------+
/// | |
/// | Entry |
/// | |
/// +---------+
/// ```
fn make_log_entry<S>(source: &S) -> Result<Vec<u8>, PersistenceError>
where
S: Serialize,
{
let size = serialized_size(source).map_err(PersistenceError::SerializationError)?;
debug!("Log entry size: {}", size);
let capacity = size_of::<u64>()
+ TryInto::<usize>::try_into(size).map_err(PersistenceError::LogEntryTooBig)?;
let mut buf = Vec::with_capacity(capacity);
buf.extend(&size.to_le_bytes());
buf.resize(capacity, 0);
serialize_into(&mut buf[size_of::<u64>()..], source)
.map_err(PersistenceError::SerializationError)?;
Ok(buf)
}
/// Get buffer of log entries from byte source
async fn parse_log<T, S>(source: &mut S) -> Result<Vec<T>, PersistenceError>
where
T: DeserializeOwned,
S: AsyncSeek + AsyncRead + Unpin,
{
let mut entries = Vec::new();
let source_size = source
.seek(SeekFrom::End(0))
.await
.map_err(PersistenceError::from)?;
debug!("Log source size: {}", source_size);
source
.seek(SeekFrom::Start(0))
.await
.map_err(PersistenceError::from)?;
let mut buf = Vec::new();
while source
.seek(SeekFrom::Current(0))
.await
.map_err(PersistenceError::from)?
< source_size | debug!("Log entry size: {}", size);
buf.reserve(size.try_into().map_err(PersistenceError::LogEntryTooBig)?);
source
.take(size)
.read_buf(&mut buf)
.await
.map_err(PersistenceError::from)?;
entries.push(deserialize(&buf).map_err(PersistenceError::SerializationError)?);
buf.clear();
}
Ok(entries)
}
/// Appends [make_log_entry] result of `source` to `destination`
///
/// [make_log_entry]: Log::make_log_entry
async fn append<P, S>(&self, source: &S, destination: P) -> Result<(), PersistenceError>
where
P: AsRef<Path>,
S: Serialize,
{
let path = self.config.path.join(destination);
debug!("Appending to {}", path.display());
if let Some(parent) = path.parent() {
if !parent.is_dir() {
create_dir(&parent).await.map_err(PersistenceError::from)?;
}
}
OpenOptions::new()
.create(true)
.append(true)
.open(path)
.await
.map_err(PersistenceError::from)?
.write_all(&Self::make_log_entry(source)?)
.await
.map_err(PersistenceError::from)
}
/// Get log entries from `source` log file using [parse_log]
///
/// [parse_log]: Log::parse_log
async fn load<S, P>(&self, source: P) -> Result<Vec<S>, PersistenceError>
where
S: DeserializeOwned,
P: AsRef<Path>,
{
let path = self.config.path.join(source);
debug!("Loading from {}", path.display());
let mut file = OpenOptions::new()
.read(true)
.open(path)
.await
.map_err(PersistenceError::from)?;
Self::parse_log(&mut file).await
}
/// Append single event to `source` log file (usually queue name)
pub async fn persist_event<P>(
&self,
event: &Event<'_>,
source: P,
) -> Result<(), PersistenceError>
where
P: AsRef<Path>,
{
self.append(event, source.as_ref().join(QUEUE_FILE)).await
}
/// Restore database events from `source` log file (usually queue name)
///
/// If specified in [`PersistenceConfig`], compaction will be executed after successful loading.
pub async fn load_queue<P, DB>(&self, source: P) -> Result<Queue<DB>, PersistenceError>
where
P: AsRef<Path>,
DB: EventLog<Vec<Event<'static>>> + Serialize + DeserializeOwned,
{
let events = match self
.load::<Event, _>(source.as_ref().join(QUEUE_FILE))
.await
{
Ok(events) => events,
Err(PersistenceError::FileOpenError(e)) => {
error!("Log file not found: {}", e);
Vec::new()
}
Err(e) => return Err(e),
};
let database = if self.config.compaction {
let compaction_path = source.as_ref().join(QUEUE_COMPACTION_FILE);
let inner_db = match self.get_snapshot().load::<DB, _>(&compaction_path).await {
Ok(mut database) => {
database.apply_log(events);
database
}
Err(PersistenceError::FileOpenError(e)) => {
error!("Compaction file not found: {}", e);
DB::from_log(events)
}
Err(e) => return Err(e),
};
self.get_snapshot()
.persist(&inner_db, &compaction_path)
.await?;
match self.prune(&source).await {
Err(PersistenceError::FileOpenError(_)) | Ok(_) => (),
Err(e) => return Err(e),
};
inner_db
} else {
DB::from_log(events)
};
cfg_if! {
if #[cfg(feature = "replication")] {
// Thanks to GC threshold, it's currently impossible to use log driver
let replication_storage = match self.get_snapshot().load(source.as_ref().join(SNAPSHOT_REPLICATION_FILE)).await {
Ok(storage) => storage,
Err(PersistenceError::FileOpenError(e)) => {
error!("{}", e);
None
},
Err(e) => return Err(e)
};
let queue = Queue::new(database, replication_storage);
} else {
let queue = Queue::new(database);
}
}
Ok(queue)
}
/// Prune `queue` log file
async fn prune<P>(&self, queue: P) -> Result<(), PersistenceError>
where
P: AsRef<Path>,
{
let path = [&self.config.path, queue.as_ref(), QUEUE_FILE.as_ref()]
.iter()
.collect::<PathBuf>();
debug!("Pruning {}", path.display());
remove_file(path).await.map_err(PersistenceError::from)
}
/// Get shared [`Snapshot`] instance
fn get_snapshot(&self) -> &Snapshot<'_> {
self.snapshot.get_or_init(|| Snapshot::new(self.config))
}
}
#[cfg(test)]
mod tests {
use std::{borrow::Cow, io::Cursor};
use maybe_owned::MaybeOwned;
use spartan_lib::core::{
db::TreeDatabase,
dispatcher::StatusAwareDispatcher,
message::{builder::MessageBuilder, Message},
payload::Dispatchable,
};
use tempfile::{NamedTempFile, TempDir};
use super::*;
use crate::{config::persistence::Persistence, node::DB};
#[tokio::test]
async fn test_append_read() {
let file = NamedTempFile::new().unwrap();
let config = PersistenceConfig {
path: Cow::Borrowed(file.path().parent().unwrap()),
..Default::default()
};
let log = Log::new(&config);
log.append(&String::from("Hello, world"), file.path())
.await
.unwrap();
let entries = log.load::<String, _>(file.path()).await.unwrap();
assert_eq!(entries.len(), 1);
assert_eq!(entries.first().unwrap(), &String::from("Hello, world"));
}
#[tokio::test]
async fn test_empty_file_load() {
let file = NamedTempFile::new().unwrap();
let config = PersistenceConfig {
path: Cow::Borrowed(file.path().parent().unwrap()),
..Default::default()
};
let log = Log::new(&config);
let entries = log.load::<String, _>(file.path()).await.unwrap();
assert!(entries.is_empty());
}
#[tokio::test]
async fn test_serialize_log_entry() {
let entry = Log::make_log_entry(&vec![1u32, 2, 3]).unwrap();
let parsed = Log::parse_log::<Vec<u32>, _>(&mut Cursor::new(entry))
.await
.unwrap();
assert_eq!(parsed.len(), 1);
assert_eq!(&*parsed.first().unwrap(), &[1, 2, 3]);
}
#[tokio::test]
async fn test_multiple_log_entries() {
let mut entries = Vec::new();
entries.append(&mut Log::make_log_entry(&vec![1u32, 2, 3]).unwrap());
entries.append(&mut Log::make_log_entry(&vec![4, 5, 6]).unwrap());
entries.append(&mut Log::make_log_entry(&vec![7, 8, 9]).unwrap());
let parsed = Log::parse_log::<Vec<u32>, _>(&mut Cursor::new(entries))
.await
.unwrap();
assert_eq!(parsed.len(), 3);
assert_eq!(parsed, vec![vec![1, 2, 3], vec![4, 5, 6], vec![7, 8, 9]]);
}
#[tokio::test]
async fn test_persist_and_restore_from_events() {
let tempdir = TempDir::new().expect("Unable to create temporary test directory");
let event = Event::Push(MaybeOwned::Owned(
MessageBuilder::default().body("Hello").compose().unwrap(),
));
let config = PersistenceConfig {
mode: Persistence::Log,
path: Cow::Borrowed(tempdir.path()),
timer: 0,
compaction: false,
};
let log = Log::new(&config);
log.persist_event(&event, "test").await.unwrap();
let queue: DB = log.load_queue("test").await.unwrap();
assert_eq!(queue.database().await.pop().unwrap().body(), "Hello");
}
#[tokio::test]
async fn test_compaction() {
let tempdir = TempDir::new().expect("Unable to create temporary test directory");
let event = Event::Push(MaybeOwned::Owned(
MessageBuilder::default().body("Hello").compose().unwrap(),
));
let config = PersistenceConfig {
mode: Persistence::Log,
path: Cow::Borrowed(tempdir.path()),
timer: 0,
compaction: true,
};
let log = Log::new(&config);
log.persist_event(&event, "test").await.unwrap();
let queue: DB = log.load_queue("test").await.unwrap();
assert_eq!(queue.database().await.pop().unwrap().body(), "Hello");
assert!(matches!(
log.load::<Event, _>(Path::new("test").join(QUEUE_FILE))
.await
.unwrap_err(),
PersistenceError::FileOpenError(_)
));
let snapshot = Snapshot::new(&config);
let mut database: TreeDatabase<Message> = snapshot
.load(Path::new("test").join(QUEUE_COMPACTION_FILE))
.await
.unwrap();
assert_eq!(database.pop().unwrap().body(), "Hello");
}
} | {
let size = source.read_u64_le().await.map_err(PersistenceError::from)?;
| random_line_split |
critical_cliques.rs | use crate::{
graph::{GraphWeight, IndexMap},
Graph, Weight,
};
#[derive(Debug, Clone, Default)]
pub struct CritClique {
pub vertices: Vec<usize>,
}
pub struct CritCliqueGraph {
pub cliques: Vec<CritClique>,
pub graph: Graph<Weight>,
}
impl CritCliqueGraph {
pub fn to_petgraph(&self) -> petgraph::Graph<String, u8, petgraph::Undirected, u32> {
use petgraph::prelude::NodeIndex;
let mut pg = petgraph::Graph::with_capacity(self.graph.size(), 0);
for u in 0..self.graph.size() {
pg.add_node(
self.cliques[u]
.vertices
.iter()
.map(|i| i.to_string())
.collect::<Vec<_>>()
.join(", "),
);
}
for u in 0..self.graph.size() {
for v in (u + 1)..self.graph.size() {
if self.graph.get(u, v) > Weight::ZERO {
pg.add_edge(NodeIndex::new(u), NodeIndex::new(v), 0);
}
}
}
pg
}
}
pub fn build_crit_clique_graph(g: &Graph<Weight>) -> CritCliqueGraph {
let mut cliques = Vec::new();
// TODO: This looks at least O(n^2) but should apparently be do-able in O(n + m), so have
// another look at making this more efficient.
let mut visited = vec![false; g.size()];
for u in g.nodes() {
if visited[u] {
continue;
}
visited[u] = true;
let mut clique = CritClique::default();
clique.vertices.push(u);
for v in g.nodes() {
if visited[v] {
continue;
}
// TODO: Is it maybe worth storing neighbor sets instead of recomputing them?
if g.closed_neighbors(u).eq(g.closed_neighbors(v)) {
clique.vertices.push(v);
visited[v] = true;
}
}
cliques.push(clique);
}
let mut crit_graph = Graph::new(cliques.len());
for c1 in 0..cliques.len() {
for c2 in 0..cliques.len() {
if c1 == c2 {
continue;
}
if should_be_neighbors(g, &cliques[c1], &cliques[c2]) {
crit_graph.set(c1, c2, Weight::ONE);
}
}
}
CritCliqueGraph {
cliques,
graph: crit_graph,
}
}
fn should_be_neighbors(g: &Graph<Weight>, c1: &CritClique, c2: &CritClique) -> bool {
for &u in &c1.vertices {
for &v in &c2.vertices {
if !g.has_edge(u, v) {
return false;
}
}
}
true
}
/// Performs a parameter-independent reduction on the graph `g` by constructing the critical clique
/// graph and merging all critical cliques into a single vertex.
/// This assumes that the input graph is unweighted (i.e. all weights are +1 or -1 exactly). The
/// reduced graph will be weighted however.
pub fn merge_cliques(
g: &Graph<Weight>,
imap: &IndexMap,
_path_log: &mut String,
) -> (Graph<Weight>, IndexMap) {
let mut crit = build_crit_clique_graph(g);
let mut crit_imap = IndexMap::empty(crit.graph.size());
for u in 0..crit.graph.size() {
for v in (u + 1)..crit.graph.size() {
//let uv = crit.graph.get_mut_direct(u, v);
let uv = crit.graph.get(u, v);
let sign = uv.signum();
let weight = crit.cliques[u].vertices.len() * crit.cliques[v].vertices.len();
crit.graph.set(u, v, (weight as Weight) * sign);
}
crit_imap.set(
u,
crit.cliques[u]
.vertices
.iter()
.flat_map(|v| imap[*v].iter().copied())
.collect(),
);
if crit_imap[u].len() > 1 {
append_path_log_dir!(_path_log, "critcliques, merged {:?}\n", crit_imap[u]);
}
}
(crit.graph, crit_imap)
}
// This kernel can only straightforwardly be applied to unweighted instances.
// However, before even starting the parameter search, we reduce the unweighted graph by converting
// it into a weighted one. Thus we cannot use this kernel at the moment.
/*
// Chen and Meng: A 2k Kernel for the Cluster Editing Problem, 2010
pub fn apply_reductions(
g: &mut Graph,
imap: &mut IndexMap,
k: &mut f32,
edits: &mut Vec<Edit>,
) -> bool {
let mut any_rules_applied = true;
while any_rules_applied {
any_rules_applied = false;
let mut rule5_state = None;
let crit = build_crit_clique_graph(g);
for (clique_idx, clique) in crit.cliques.iter().enumerate() {
let (clique_neighbors, clique_crit_neighbor_count) =
get_clique_neighbors(g, clique_idx, &crit);
let edit_set =
calculate_edits_to_remove_clique_and_neighborhood(g, clique, &clique_neighbors);
let clique_len = clique.vertices.len();
let neighbors_len = clique_neighbors.len();
let total_edit_degree = edit_set.total_edit_degree;
let rule1_applicable = clique_len as f32 > *k;
let rule2_applicable =
clique_len >= neighbors_len && clique_len + neighbors_len > total_edit_degree;
let mut rule3_applicable = false;
let mut rule4_applicable = false;
let mut rule4_vertex = None;
let mut clique_neighbors2 = None;
if !rule1_applicable && !rule2_applicable {
// Only calculate this if the other two aren't already true since it's a bit more work
if clique_len < neighbors_len && clique_len + neighbors_len > total_edit_degree {
let neighbors2 = get_clique_neighbors2(g, clique_idx, &crit);
let threshold = (clique_len + neighbors_len) / 2;
for &u in &neighbors2 {
let count = count_intersection(g.neighbors(u), &clique_neighbors);
if count > threshold {
rule4_vertex = Some(u);
break;
}
}
if rule5_state.is_none() {
rule5_state = Some((
clique.clone(),
clique_neighbors.clone(),
clique_crit_neighbor_count,
neighbors2.clone(),
));
}
rule3_applicable = rule4_vertex.is_none();
rule4_applicable = rule4_vertex.is_some();
clique_neighbors2 = Some(neighbors2);
}
}
if rule1_applicable || rule2_applicable || rule3_applicable {
let has_reduced = make_clique_and_neighborhood_disjoint_and_remove(
g,
imap,
k,
edits,
edit_set,
&clique,
&clique_neighbors,
);
if *k < 0.0 {
return false;
}
if has_reduced {
any_rules_applied = true;
break;
}
}
if rule4_applicable {
let has_reduced = apply_rule4(
g,
imap,
k,
edits,
&clique_neighbors,
&clique_neighbors2.unwrap(),
rule4_vertex.unwrap(),
);
if *k < 0.0 {
return false;
}
if has_reduced {
any_rules_applied = true;
break;
}
}
}
if !any_rules_applied && rule5_state.is_some() {
// If we got here, either no rule was applicable or they did not result in any further
// reduction, but we found a case where rule 5 should now be applicable.
// The paper claims that the above condition and the fact that the other rules
// don#t reduce it further is sufficient to imply this condition. Let's check to be
// safe for now :)
// TODO: Might remove this check if I'm convinced it's safe.
let (clique, clique_neighbors, clique_crit_neighbor_count, clique_neighbors2) =
rule5_state.unwrap();
assert!(clique_crit_neighbor_count == 1 && clique_neighbors2.len() == 1);
let has_reduced = apply_rule5(g, imap, k, edits, &clique, &clique_neighbors);
if !has_reduced {
// All the other rules didn't apply, so we got here, and now 5 didn't do anything
// either. We're done now.
break;
}
any_rules_applied = true;
}
let new_count = g.present_node_count();
if new_count == g.size() {
continue;
}
// Construct a new graph and imap with the vertices we marked for removal actually removed. The
// new imap still maps from indices into that new graph to the vertices of the original graph
// the algorithm got as input.
// TODO: Figure out if it's necessary to do this every `while` iteration or if the
// reductions are all still valid without it; would also be nice to avoid recomputing the
// crit clique graph when it's not necessary.
// TODO: Possibly test whether it's faster to just keep the removed_g map around in a larger
// scope rather than creating the graph here.
if new_count == 0 {
return true;
}
let mut new_g = Graph::new(new_count);
let mut new_imap = IndexMap::new(new_count);
let mut new_vertex = 0;
let mut reverse_imap = vec![0; g.size()];
for u in 0..g.size() {
if !g.is_present(u) {
continue;
}
for v in g.neighbors(u) {
if v > u {
continue;
}
new_g.set_direct(reverse_imap[v], new_vertex, g.get_direct(v, u));
}
reverse_imap[u] = new_vertex;
new_imap[new_vertex] = imap.take(u);
new_vertex += 1;
}
*g = new_g;
*imap = new_imap;
}
true
}
// TODO: COOOOMMMEEEENNNNTTTTSSSS!!!!
/// Gets all the vertices that are neighbors of the critical clique, but not in the clique
/// themselves. No specific order is guaranteed.
fn get_clique_neighbors(
g: &Graph,
clique_idx: usize,
crit_graph: &CritCliqueGraph,
) -> (Vec<usize>, usize) {
let crit_neighbors = crit_graph.graph.neighbors(clique_idx);
let mut count = 0;
let neighborhood = crit_neighbors
.flat_map(|n| {
count += 1;
&crit_graph.cliques[n].vertices
})
.copied()
.filter(|&u| g.is_present(u))
.collect();
(neighborhood, count)
}
fn get_clique_neighbors2(g: &Graph, clique_idx: usize, crit_graph: &CritCliqueGraph) -> Vec<usize> {
let crit_neighbors = crit_graph.graph.neighbors(clique_idx).collect::<Vec<_>>();
crit_neighbors
.iter()
.flat_map(|&n| {
crit_graph
.graph
.neighbors(n)
.filter(|n2| !crit_neighbors.contains(n2))
.flat_map(|n2| &crit_graph.cliques[n2].vertices)
})
.copied()
.filter(|&u| g.is_present(u))
.collect()
}
fn count_intersection(n1: impl Iterator<Item = usize>, n2: &[usize]) -> usize {
let mut count = 0;
for u in n1 {
if n2.contains(&u) {
count += 1;
}
}
count
}
struct EditSet {
inserts: Vec<(usize, usize)>,
deletions: Vec<(usize, usize)>,
total_edit_degree: usize,
}
fn calculate_edits_to_remove_clique_and_neighborhood(
g: &Graph,
clique: &CritClique,
clique_neighbors: &[usize],
) -> EditSet {
// Everything in the clique is already connected with the rest of the clique (it's a clique!).
// All the neighbors are also connected to all the vertices in the clique, because all the
// clique vertices have the *same set* of neighbors outside the clique (it's a *critical*
// clique!).
// So we only need to add edges between the different groups of neighbors.
//
// The only edges that we need to remove are between the neighbors of the clique to any nodes
// that are neither in the neighbors nor the clique itself. (The vertices in the clique
// obviously don't have any such neighbors, so there's nothing to remove.)
let mut edits = EditSet {
inserts: Vec::new(),
deletions: Vec::new(),
total_edit_degree: 0,
};
for i in 0..clique_neighbors.len() {
let u = clique_neighbors[i];
if !g.is_present(u) {
continue;
}
// Add edges to other clique neighbors.
for j in (i + 1)..clique_neighbors.len() {
let v = clique_neighbors[j];
if !g.is_present(v) {
continue;
}
if g.get(u, v) < 0.0 {
edits.inserts.push((u, v));
// Increase total degree twice: we only add the (u, v) edge once but it would be
// counted in the edit degree for both u and v
edits.total_edit_degree += 2;
}
}
// Remove edges to unrelated vertices.
// TODO: Try using a BTreeSet for neighbors and vertices, or using some kind of other iteration
// strategy to avoid the linear search here.
for v in 0..g.size() {
if u == v || !g.is_present(v) {
continue;
}
if clique_neighbors.contains(&v) || clique.vertices.contains(&v) {
continue;
}
if g.get(u, v) > 0.0 {
edits.deletions.push((u, v));
// Here the degree is only increased once: it would only count for u, since v isn't
// even in the neighborhood and thus not considered.
edits.total_edit_degree += 1;
}
}
}
edits
}
fn make_clique_and_neighborhood_disjoint_and_remove(
g: &mut Graph,
imap: &IndexMap,
k: &mut f32,
edits: &mut Vec<Edit>,
edits_to_perform: EditSet,
clique: &CritClique,
clique_neighbors: &[usize],
) -> bool {
for (u, v) in edits_to_perform.inserts {
let uv = g.get_mut(u, v);
*k += *uv;
Edit::insert(edits, &imap, u, v);
*uv = f32::INFINITY;
}
for (u, v) in edits_to_perform.deletions {
let uv = g.get_mut(u, v);
*k -= *uv;
Edit::delete(edits, &imap, u, v);
*uv = f32::NEG_INFINITY;
}
// Now mark the clique and its neighbors as "removed" from the graph, so future reduction and
// algorithm steps ignore it. (It is now a disjoint clique, i.e. already done.)
for &u in clique_neighbors {
g.set_present(u, false);
}
for &u in &clique.vertices {
g.set_present(u, false);
}
clique_neighbors.len() > 0 || clique.vertices.len() > 0
}
fn apply_rule4(
g: &mut Graph,
imap: &IndexMap,
k: &mut f32,
edits: &mut Vec<Edit>,
clique_neighbors: &[usize],
clique_neighbors2: &[usize],
u: usize,
) -> bool {
// Insert edges in neighborhood to make clique+neighborhood a clique.
let mut has_done_edit = false;
for i in 0..clique_neighbors.len() {
let v = clique_neighbors[i];
// Add edges to other clique neighbors.
for j in (i + 1)..clique_neighbors.len() {
let w = clique_neighbors[j];
let vw = g.get_mut(v, w);
if *vw < 0.0 {
*k += *vw;
Edit::insert(edits, &imap, v, w);
*vw = f32::INFINITY;
has_done_edit = true;
}
}
}
// Remove edges between clique_neighbors and clique_neighbors2-u
for &v in clique_neighbors {
for &w in clique_neighbors2 {
if w == u {
continue;
}
let vw = g.get_mut(v, w);
if *vw > 0.0 {
*k -= *vw;
Edit::delete(edits, &imap, v, w);
*vw = f32::NEG_INFINITY;
has_done_edit = true;
}
}
}
has_done_edit
}
fn apply_rule5(
g: &mut Graph,
imap: &IndexMap,
k: &mut f32,
edits: &mut Vec<Edit>,
clique: &CritClique,
clique_neighbors: &[usize],
) -> bool {
// Can pick any set of |clique| vertices in clique_neighbors, we'll just use the first |clique|
// verts.
// Then, remove (clique + that set) from G, and set k = k - |clique|.
// Note that the modification to k does not actually correspond directly to the edge edits we
// do, but this is what the paper has proven to be correct *shrug*.
let clique_size = clique.vertices.len();
let to_remove = clique
.vertices
.iter()
.chain(clique_neighbors[..clique_size].iter())
.copied()
.collect::<Vec<_>>();
for &u in &to_remove {
g.set_present(u, false);
for v in 0..g.size() {
if !g.is_present(v) {
continue;
}
let uv = g.get_mut(u, v);
if *uv > 0.0 {
Edit::delete(edits, imap, u, v);
*uv = f32::NEG_INFINITY;
}
}
}
*k = *k - clique_size as f32;
to_remove.len() > 0
}
*/
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn crit_graph() |
}
| {
// This is the example from "Guo: A more effective linear kernelization for cluster
// editing, 2009", Fig. 1
let mut graph = Graph::new(9);
graph.set(0, 1, Weight::ONE);
graph.set(0, 2, Weight::ONE);
graph.set(1, 2, Weight::ONE);
graph.set(2, 3, Weight::ONE);
graph.set(2, 4, Weight::ONE);
graph.set(3, 4, Weight::ONE);
graph.set(3, 5, Weight::ONE);
graph.set(3, 6, Weight::ONE);
graph.set(4, 5, Weight::ONE);
graph.set(4, 6, Weight::ONE);
graph.set(5, 6, Weight::ONE);
graph.set(5, 7, Weight::ONE);
graph.set(5, 8, Weight::ONE);
let crit = build_crit_clique_graph(&graph);
assert_eq!(crit.cliques[0].vertices, vec![0, 1]);
assert_eq!(crit.cliques[1].vertices, vec![2]);
assert_eq!(crit.cliques[2].vertices, vec![3, 4]);
assert_eq!(crit.cliques[3].vertices, vec![5]);
assert_eq!(crit.cliques[4].vertices, vec![6]);
assert_eq!(crit.cliques[5].vertices, vec![7]);
assert_eq!(crit.cliques[6].vertices, vec![8]);
assert_eq!(crit.graph.neighbors(0).collect::<Vec<_>>(), vec![1]);
assert_eq!(crit.graph.neighbors(1).collect::<Vec<_>>(), vec![0, 2]);
assert_eq!(crit.graph.neighbors(2).collect::<Vec<_>>(), vec![1, 3, 4]);
assert_eq!(
crit.graph.neighbors(3).collect::<Vec<_>>(),
vec![2, 4, 5, 6]
);
assert_eq!(crit.graph.neighbors(4).collect::<Vec<_>>(), vec![2, 3]);
assert_eq!(crit.graph.neighbors(5).collect::<Vec<_>>(), vec![3]);
assert_eq!(crit.graph.neighbors(6).collect::<Vec<_>>(), vec![3]);
} | identifier_body |
critical_cliques.rs | use crate::{
graph::{GraphWeight, IndexMap},
Graph, Weight,
};
#[derive(Debug, Clone, Default)]
pub struct CritClique {
pub vertices: Vec<usize>,
}
pub struct CritCliqueGraph {
pub cliques: Vec<CritClique>,
pub graph: Graph<Weight>,
}
impl CritCliqueGraph {
pub fn to_petgraph(&self) -> petgraph::Graph<String, u8, petgraph::Undirected, u32> {
use petgraph::prelude::NodeIndex;
let mut pg = petgraph::Graph::with_capacity(self.graph.size(), 0);
for u in 0..self.graph.size() {
pg.add_node(
self.cliques[u]
.vertices
.iter()
.map(|i| i.to_string())
.collect::<Vec<_>>()
.join(", "),
);
}
for u in 0..self.graph.size() {
for v in (u + 1)..self.graph.size() {
if self.graph.get(u, v) > Weight::ZERO {
pg.add_edge(NodeIndex::new(u), NodeIndex::new(v), 0);
}
}
}
pg
}
}
pub fn build_crit_clique_graph(g: &Graph<Weight>) -> CritCliqueGraph {
let mut cliques = Vec::new();
// TODO: This looks at least O(n^2) but should apparently be do-able in O(n + m), so have
// another look at making this more efficient.
let mut visited = vec![false; g.size()];
for u in g.nodes() {
if visited[u] |
visited[u] = true;
let mut clique = CritClique::default();
clique.vertices.push(u);
for v in g.nodes() {
if visited[v] {
continue;
}
// TODO: Is it maybe worth storing neighbor sets instead of recomputing them?
if g.closed_neighbors(u).eq(g.closed_neighbors(v)) {
clique.vertices.push(v);
visited[v] = true;
}
}
cliques.push(clique);
}
let mut crit_graph = Graph::new(cliques.len());
for c1 in 0..cliques.len() {
for c2 in 0..cliques.len() {
if c1 == c2 {
continue;
}
if should_be_neighbors(g, &cliques[c1], &cliques[c2]) {
crit_graph.set(c1, c2, Weight::ONE);
}
}
}
CritCliqueGraph {
cliques,
graph: crit_graph,
}
}
fn should_be_neighbors(g: &Graph<Weight>, c1: &CritClique, c2: &CritClique) -> bool {
for &u in &c1.vertices {
for &v in &c2.vertices {
if !g.has_edge(u, v) {
return false;
}
}
}
true
}
/// Performs a parameter-independent reduction on the graph `g` by constructing the critical clique
/// graph and merging all critical cliques into a single vertex.
/// This assumes that the input graph is unweighted (i.e. all weights are +1 or -1 exactly). The
/// reduced graph will be weighted however.
pub fn merge_cliques(
g: &Graph<Weight>,
imap: &IndexMap,
_path_log: &mut String,
) -> (Graph<Weight>, IndexMap) {
let mut crit = build_crit_clique_graph(g);
let mut crit_imap = IndexMap::empty(crit.graph.size());
for u in 0..crit.graph.size() {
for v in (u + 1)..crit.graph.size() {
//let uv = crit.graph.get_mut_direct(u, v);
let uv = crit.graph.get(u, v);
let sign = uv.signum();
let weight = crit.cliques[u].vertices.len() * crit.cliques[v].vertices.len();
crit.graph.set(u, v, (weight as Weight) * sign);
}
crit_imap.set(
u,
crit.cliques[u]
.vertices
.iter()
.flat_map(|v| imap[*v].iter().copied())
.collect(),
);
if crit_imap[u].len() > 1 {
append_path_log_dir!(_path_log, "critcliques, merged {:?}\n", crit_imap[u]);
}
}
(crit.graph, crit_imap)
}
// This kernel can only straightforwardly be applied to unweighted instances.
// However, before even starting the parameter search, we reduce the unweighted graph by converting
// it into a weighted one. Thus we cannot use this kernel at the moment.
/*
// Chen and Meng: A 2k Kernel for the Cluster Editing Problem, 2010
pub fn apply_reductions(
g: &mut Graph,
imap: &mut IndexMap,
k: &mut f32,
edits: &mut Vec<Edit>,
) -> bool {
let mut any_rules_applied = true;
while any_rules_applied {
any_rules_applied = false;
let mut rule5_state = None;
let crit = build_crit_clique_graph(g);
for (clique_idx, clique) in crit.cliques.iter().enumerate() {
let (clique_neighbors, clique_crit_neighbor_count) =
get_clique_neighbors(g, clique_idx, &crit);
let edit_set =
calculate_edits_to_remove_clique_and_neighborhood(g, clique, &clique_neighbors);
let clique_len = clique.vertices.len();
let neighbors_len = clique_neighbors.len();
let total_edit_degree = edit_set.total_edit_degree;
let rule1_applicable = clique_len as f32 > *k;
let rule2_applicable =
clique_len >= neighbors_len && clique_len + neighbors_len > total_edit_degree;
let mut rule3_applicable = false;
let mut rule4_applicable = false;
let mut rule4_vertex = None;
let mut clique_neighbors2 = None;
if !rule1_applicable && !rule2_applicable {
// Only calculate this if the other two aren't already true since it's a bit more work
if clique_len < neighbors_len && clique_len + neighbors_len > total_edit_degree {
let neighbors2 = get_clique_neighbors2(g, clique_idx, &crit);
let threshold = (clique_len + neighbors_len) / 2;
for &u in &neighbors2 {
let count = count_intersection(g.neighbors(u), &clique_neighbors);
if count > threshold {
rule4_vertex = Some(u);
break;
}
}
if rule5_state.is_none() {
rule5_state = Some((
clique.clone(),
clique_neighbors.clone(),
clique_crit_neighbor_count,
neighbors2.clone(),
));
}
rule3_applicable = rule4_vertex.is_none();
rule4_applicable = rule4_vertex.is_some();
clique_neighbors2 = Some(neighbors2);
}
}
if rule1_applicable || rule2_applicable || rule3_applicable {
let has_reduced = make_clique_and_neighborhood_disjoint_and_remove(
g,
imap,
k,
edits,
edit_set,
&clique,
&clique_neighbors,
);
if *k < 0.0 {
return false;
}
if has_reduced {
any_rules_applied = true;
break;
}
}
if rule4_applicable {
let has_reduced = apply_rule4(
g,
imap,
k,
edits,
&clique_neighbors,
&clique_neighbors2.unwrap(),
rule4_vertex.unwrap(),
);
if *k < 0.0 {
return false;
}
if has_reduced {
any_rules_applied = true;
break;
}
}
}
if !any_rules_applied && rule5_state.is_some() {
// If we got here, either no rule was applicable or they did not result in any further
// reduction, but we found a case where rule 5 should now be applicable.
// The paper claims that the above condition and the fact that the other rules
// don#t reduce it further is sufficient to imply this condition. Let's check to be
// safe for now :)
// TODO: Might remove this check if I'm convinced it's safe.
let (clique, clique_neighbors, clique_crit_neighbor_count, clique_neighbors2) =
rule5_state.unwrap();
assert!(clique_crit_neighbor_count == 1 && clique_neighbors2.len() == 1);
let has_reduced = apply_rule5(g, imap, k, edits, &clique, &clique_neighbors);
if !has_reduced {
// All the other rules didn't apply, so we got here, and now 5 didn't do anything
// either. We're done now.
break;
}
any_rules_applied = true;
}
let new_count = g.present_node_count();
if new_count == g.size() {
continue;
}
// Construct a new graph and imap with the vertices we marked for removal actually removed. The
// new imap still maps from indices into that new graph to the vertices of the original graph
// the algorithm got as input.
// TODO: Figure out if it's necessary to do this every `while` iteration or if the
// reductions are all still valid without it; would also be nice to avoid recomputing the
// crit clique graph when it's not necessary.
// TODO: Possibly test whether it's faster to just keep the removed_g map around in a larger
// scope rather than creating the graph here.
if new_count == 0 {
return true;
}
let mut new_g = Graph::new(new_count);
let mut new_imap = IndexMap::new(new_count);
let mut new_vertex = 0;
let mut reverse_imap = vec![0; g.size()];
for u in 0..g.size() {
if !g.is_present(u) {
continue;
}
for v in g.neighbors(u) {
if v > u {
continue;
}
new_g.set_direct(reverse_imap[v], new_vertex, g.get_direct(v, u));
}
reverse_imap[u] = new_vertex;
new_imap[new_vertex] = imap.take(u);
new_vertex += 1;
}
*g = new_g;
*imap = new_imap;
}
true
}
// TODO: COOOOMMMEEEENNNNTTTTSSSS!!!!
/// Gets all the vertices that are neighbors of the critical clique, but not in the clique
/// themselves. No specific order is guaranteed.
fn get_clique_neighbors(
g: &Graph,
clique_idx: usize,
crit_graph: &CritCliqueGraph,
) -> (Vec<usize>, usize) {
let crit_neighbors = crit_graph.graph.neighbors(clique_idx);
let mut count = 0;
let neighborhood = crit_neighbors
.flat_map(|n| {
count += 1;
&crit_graph.cliques[n].vertices
})
.copied()
.filter(|&u| g.is_present(u))
.collect();
(neighborhood, count)
}
fn get_clique_neighbors2(g: &Graph, clique_idx: usize, crit_graph: &CritCliqueGraph) -> Vec<usize> {
let crit_neighbors = crit_graph.graph.neighbors(clique_idx).collect::<Vec<_>>();
crit_neighbors
.iter()
.flat_map(|&n| {
crit_graph
.graph
.neighbors(n)
.filter(|n2| !crit_neighbors.contains(n2))
.flat_map(|n2| &crit_graph.cliques[n2].vertices)
})
.copied()
.filter(|&u| g.is_present(u))
.collect()
}
fn count_intersection(n1: impl Iterator<Item = usize>, n2: &[usize]) -> usize {
let mut count = 0;
for u in n1 {
if n2.contains(&u) {
count += 1;
}
}
count
}
struct EditSet {
inserts: Vec<(usize, usize)>,
deletions: Vec<(usize, usize)>,
total_edit_degree: usize,
}
fn calculate_edits_to_remove_clique_and_neighborhood(
g: &Graph,
clique: &CritClique,
clique_neighbors: &[usize],
) -> EditSet {
// Everything in the clique is already connected with the rest of the clique (it's a clique!).
// All the neighbors are also connected to all the vertices in the clique, because all the
// clique vertices have the *same set* of neighbors outside the clique (it's a *critical*
// clique!).
// So we only need to add edges between the different groups of neighbors.
//
// The only edges that we need to remove are between the neighbors of the clique to any nodes
// that are neither in the neighbors nor the clique itself. (The vertices in the clique
// obviously don't have any such neighbors, so there's nothing to remove.)
let mut edits = EditSet {
inserts: Vec::new(),
deletions: Vec::new(),
total_edit_degree: 0,
};
for i in 0..clique_neighbors.len() {
let u = clique_neighbors[i];
if !g.is_present(u) {
continue;
}
// Add edges to other clique neighbors.
for j in (i + 1)..clique_neighbors.len() {
let v = clique_neighbors[j];
if !g.is_present(v) {
continue;
}
if g.get(u, v) < 0.0 {
edits.inserts.push((u, v));
// Increase total degree twice: we only add the (u, v) edge once but it would be
// counted in the edit degree for both u and v
edits.total_edit_degree += 2;
}
}
// Remove edges to unrelated vertices.
// TODO: Try using a BTreeSet for neighbors and vertices, or using some kind of other iteration
// strategy to avoid the linear search here.
for v in 0..g.size() {
if u == v || !g.is_present(v) {
continue;
}
if clique_neighbors.contains(&v) || clique.vertices.contains(&v) {
continue;
}
if g.get(u, v) > 0.0 {
edits.deletions.push((u, v));
// Here the degree is only increased once: it would only count for u, since v isn't
// even in the neighborhood and thus not considered.
edits.total_edit_degree += 1;
}
}
}
edits
}
fn make_clique_and_neighborhood_disjoint_and_remove(
g: &mut Graph,
imap: &IndexMap,
k: &mut f32,
edits: &mut Vec<Edit>,
edits_to_perform: EditSet,
clique: &CritClique,
clique_neighbors: &[usize],
) -> bool {
for (u, v) in edits_to_perform.inserts {
let uv = g.get_mut(u, v);
*k += *uv;
Edit::insert(edits, &imap, u, v);
*uv = f32::INFINITY;
}
for (u, v) in edits_to_perform.deletions {
let uv = g.get_mut(u, v);
*k -= *uv;
Edit::delete(edits, &imap, u, v);
*uv = f32::NEG_INFINITY;
}
// Now mark the clique and its neighbors as "removed" from the graph, so future reduction and
// algorithm steps ignore it. (It is now a disjoint clique, i.e. already done.)
for &u in clique_neighbors {
g.set_present(u, false);
}
for &u in &clique.vertices {
g.set_present(u, false);
}
clique_neighbors.len() > 0 || clique.vertices.len() > 0
}
fn apply_rule4(
g: &mut Graph,
imap: &IndexMap,
k: &mut f32,
edits: &mut Vec<Edit>,
clique_neighbors: &[usize],
clique_neighbors2: &[usize],
u: usize,
) -> bool {
// Insert edges in neighborhood to make clique+neighborhood a clique.
let mut has_done_edit = false;
for i in 0..clique_neighbors.len() {
let v = clique_neighbors[i];
// Add edges to other clique neighbors.
for j in (i + 1)..clique_neighbors.len() {
let w = clique_neighbors[j];
let vw = g.get_mut(v, w);
if *vw < 0.0 {
*k += *vw;
Edit::insert(edits, &imap, v, w);
*vw = f32::INFINITY;
has_done_edit = true;
}
}
}
// Remove edges between clique_neighbors and clique_neighbors2-u
for &v in clique_neighbors {
for &w in clique_neighbors2 {
if w == u {
continue;
}
let vw = g.get_mut(v, w);
if *vw > 0.0 {
*k -= *vw;
Edit::delete(edits, &imap, v, w);
*vw = f32::NEG_INFINITY;
has_done_edit = true;
}
}
}
has_done_edit
}
fn apply_rule5(
g: &mut Graph,
imap: &IndexMap,
k: &mut f32,
edits: &mut Vec<Edit>,
clique: &CritClique,
clique_neighbors: &[usize],
) -> bool {
// Can pick any set of |clique| vertices in clique_neighbors, we'll just use the first |clique|
// verts.
// Then, remove (clique + that set) from G, and set k = k - |clique|.
// Note that the modification to k does not actually correspond directly to the edge edits we
// do, but this is what the paper has proven to be correct *shrug*.
let clique_size = clique.vertices.len();
let to_remove = clique
.vertices
.iter()
.chain(clique_neighbors[..clique_size].iter())
.copied()
.collect::<Vec<_>>();
for &u in &to_remove {
g.set_present(u, false);
for v in 0..g.size() {
if !g.is_present(v) {
continue;
}
let uv = g.get_mut(u, v);
if *uv > 0.0 {
Edit::delete(edits, imap, u, v);
*uv = f32::NEG_INFINITY;
}
}
}
*k = *k - clique_size as f32;
to_remove.len() > 0
}
*/
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn crit_graph() {
// This is the example from "Guo: A more effective linear kernelization for cluster
// editing, 2009", Fig. 1
let mut graph = Graph::new(9);
graph.set(0, 1, Weight::ONE);
graph.set(0, 2, Weight::ONE);
graph.set(1, 2, Weight::ONE);
graph.set(2, 3, Weight::ONE);
graph.set(2, 4, Weight::ONE);
graph.set(3, 4, Weight::ONE);
graph.set(3, 5, Weight::ONE);
graph.set(3, 6, Weight::ONE);
graph.set(4, 5, Weight::ONE);
graph.set(4, 6, Weight::ONE);
graph.set(5, 6, Weight::ONE);
graph.set(5, 7, Weight::ONE);
graph.set(5, 8, Weight::ONE);
let crit = build_crit_clique_graph(&graph);
assert_eq!(crit.cliques[0].vertices, vec![0, 1]);
assert_eq!(crit.cliques[1].vertices, vec![2]);
assert_eq!(crit.cliques[2].vertices, vec![3, 4]);
assert_eq!(crit.cliques[3].vertices, vec![5]);
assert_eq!(crit.cliques[4].vertices, vec![6]);
assert_eq!(crit.cliques[5].vertices, vec![7]);
assert_eq!(crit.cliques[6].vertices, vec![8]);
assert_eq!(crit.graph.neighbors(0).collect::<Vec<_>>(), vec![1]);
assert_eq!(crit.graph.neighbors(1).collect::<Vec<_>>(), vec![0, 2]);
assert_eq!(crit.graph.neighbors(2).collect::<Vec<_>>(), vec![1, 3, 4]);
assert_eq!(
crit.graph.neighbors(3).collect::<Vec<_>>(),
vec![2, 4, 5, 6]
);
assert_eq!(crit.graph.neighbors(4).collect::<Vec<_>>(), vec![2, 3]);
assert_eq!(crit.graph.neighbors(5).collect::<Vec<_>>(), vec![3]);
assert_eq!(crit.graph.neighbors(6).collect::<Vec<_>>(), vec![3]);
}
}
| {
continue;
} | conditional_block |
critical_cliques.rs | use crate::{
graph::{GraphWeight, IndexMap},
Graph, Weight,
};
#[derive(Debug, Clone, Default)]
pub struct CritClique {
pub vertices: Vec<usize>,
}
pub struct CritCliqueGraph {
pub cliques: Vec<CritClique>,
pub graph: Graph<Weight>,
}
impl CritCliqueGraph {
pub fn | (&self) -> petgraph::Graph<String, u8, petgraph::Undirected, u32> {
use petgraph::prelude::NodeIndex;
let mut pg = petgraph::Graph::with_capacity(self.graph.size(), 0);
for u in 0..self.graph.size() {
pg.add_node(
self.cliques[u]
.vertices
.iter()
.map(|i| i.to_string())
.collect::<Vec<_>>()
.join(", "),
);
}
for u in 0..self.graph.size() {
for v in (u + 1)..self.graph.size() {
if self.graph.get(u, v) > Weight::ZERO {
pg.add_edge(NodeIndex::new(u), NodeIndex::new(v), 0);
}
}
}
pg
}
}
pub fn build_crit_clique_graph(g: &Graph<Weight>) -> CritCliqueGraph {
let mut cliques = Vec::new();
// TODO: This looks at least O(n^2) but should apparently be do-able in O(n + m), so have
// another look at making this more efficient.
let mut visited = vec![false; g.size()];
for u in g.nodes() {
if visited[u] {
continue;
}
visited[u] = true;
let mut clique = CritClique::default();
clique.vertices.push(u);
for v in g.nodes() {
if visited[v] {
continue;
}
// TODO: Is it maybe worth storing neighbor sets instead of recomputing them?
if g.closed_neighbors(u).eq(g.closed_neighbors(v)) {
clique.vertices.push(v);
visited[v] = true;
}
}
cliques.push(clique);
}
let mut crit_graph = Graph::new(cliques.len());
for c1 in 0..cliques.len() {
for c2 in 0..cliques.len() {
if c1 == c2 {
continue;
}
if should_be_neighbors(g, &cliques[c1], &cliques[c2]) {
crit_graph.set(c1, c2, Weight::ONE);
}
}
}
CritCliqueGraph {
cliques,
graph: crit_graph,
}
}
fn should_be_neighbors(g: &Graph<Weight>, c1: &CritClique, c2: &CritClique) -> bool {
for &u in &c1.vertices {
for &v in &c2.vertices {
if !g.has_edge(u, v) {
return false;
}
}
}
true
}
/// Performs a parameter-independent reduction on the graph `g` by constructing the critical clique
/// graph and merging all critical cliques into a single vertex.
/// This assumes that the input graph is unweighted (i.e. all weights are +1 or -1 exactly). The
/// reduced graph will be weighted however.
pub fn merge_cliques(
g: &Graph<Weight>,
imap: &IndexMap,
_path_log: &mut String,
) -> (Graph<Weight>, IndexMap) {
let mut crit = build_crit_clique_graph(g);
let mut crit_imap = IndexMap::empty(crit.graph.size());
for u in 0..crit.graph.size() {
for v in (u + 1)..crit.graph.size() {
//let uv = crit.graph.get_mut_direct(u, v);
let uv = crit.graph.get(u, v);
let sign = uv.signum();
let weight = crit.cliques[u].vertices.len() * crit.cliques[v].vertices.len();
crit.graph.set(u, v, (weight as Weight) * sign);
}
crit_imap.set(
u,
crit.cliques[u]
.vertices
.iter()
.flat_map(|v| imap[*v].iter().copied())
.collect(),
);
if crit_imap[u].len() > 1 {
append_path_log_dir!(_path_log, "critcliques, merged {:?}\n", crit_imap[u]);
}
}
(crit.graph, crit_imap)
}
// This kernel can only straightforwardly be applied to unweighted instances.
// However, before even starting the parameter search, we reduce the unweighted graph by converting
// it into a weighted one. Thus we cannot use this kernel at the moment.
/*
// Chen and Meng: A 2k Kernel for the Cluster Editing Problem, 2010
pub fn apply_reductions(
g: &mut Graph,
imap: &mut IndexMap,
k: &mut f32,
edits: &mut Vec<Edit>,
) -> bool {
let mut any_rules_applied = true;
while any_rules_applied {
any_rules_applied = false;
let mut rule5_state = None;
let crit = build_crit_clique_graph(g);
for (clique_idx, clique) in crit.cliques.iter().enumerate() {
let (clique_neighbors, clique_crit_neighbor_count) =
get_clique_neighbors(g, clique_idx, &crit);
let edit_set =
calculate_edits_to_remove_clique_and_neighborhood(g, clique, &clique_neighbors);
let clique_len = clique.vertices.len();
let neighbors_len = clique_neighbors.len();
let total_edit_degree = edit_set.total_edit_degree;
let rule1_applicable = clique_len as f32 > *k;
let rule2_applicable =
clique_len >= neighbors_len && clique_len + neighbors_len > total_edit_degree;
let mut rule3_applicable = false;
let mut rule4_applicable = false;
let mut rule4_vertex = None;
let mut clique_neighbors2 = None;
if !rule1_applicable && !rule2_applicable {
// Only calculate this if the other two aren't already true since it's a bit more work
if clique_len < neighbors_len && clique_len + neighbors_len > total_edit_degree {
let neighbors2 = get_clique_neighbors2(g, clique_idx, &crit);
let threshold = (clique_len + neighbors_len) / 2;
for &u in &neighbors2 {
let count = count_intersection(g.neighbors(u), &clique_neighbors);
if count > threshold {
rule4_vertex = Some(u);
break;
}
}
if rule5_state.is_none() {
rule5_state = Some((
clique.clone(),
clique_neighbors.clone(),
clique_crit_neighbor_count,
neighbors2.clone(),
));
}
rule3_applicable = rule4_vertex.is_none();
rule4_applicable = rule4_vertex.is_some();
clique_neighbors2 = Some(neighbors2);
}
}
if rule1_applicable || rule2_applicable || rule3_applicable {
let has_reduced = make_clique_and_neighborhood_disjoint_and_remove(
g,
imap,
k,
edits,
edit_set,
&clique,
&clique_neighbors,
);
if *k < 0.0 {
return false;
}
if has_reduced {
any_rules_applied = true;
break;
}
}
if rule4_applicable {
let has_reduced = apply_rule4(
g,
imap,
k,
edits,
&clique_neighbors,
&clique_neighbors2.unwrap(),
rule4_vertex.unwrap(),
);
if *k < 0.0 {
return false;
}
if has_reduced {
any_rules_applied = true;
break;
}
}
}
if !any_rules_applied && rule5_state.is_some() {
// If we got here, either no rule was applicable or they did not result in any further
// reduction, but we found a case where rule 5 should now be applicable.
// The paper claims that the above condition and the fact that the other rules
// don#t reduce it further is sufficient to imply this condition. Let's check to be
// safe for now :)
// TODO: Might remove this check if I'm convinced it's safe.
let (clique, clique_neighbors, clique_crit_neighbor_count, clique_neighbors2) =
rule5_state.unwrap();
assert!(clique_crit_neighbor_count == 1 && clique_neighbors2.len() == 1);
let has_reduced = apply_rule5(g, imap, k, edits, &clique, &clique_neighbors);
if !has_reduced {
// All the other rules didn't apply, so we got here, and now 5 didn't do anything
// either. We're done now.
break;
}
any_rules_applied = true;
}
let new_count = g.present_node_count();
if new_count == g.size() {
continue;
}
// Construct a new graph and imap with the vertices we marked for removal actually removed. The
// new imap still maps from indices into that new graph to the vertices of the original graph
// the algorithm got as input.
// TODO: Figure out if it's necessary to do this every `while` iteration or if the
// reductions are all still valid without it; would also be nice to avoid recomputing the
// crit clique graph when it's not necessary.
// TODO: Possibly test whether it's faster to just keep the removed_g map around in a larger
// scope rather than creating the graph here.
if new_count == 0 {
return true;
}
let mut new_g = Graph::new(new_count);
let mut new_imap = IndexMap::new(new_count);
let mut new_vertex = 0;
let mut reverse_imap = vec![0; g.size()];
for u in 0..g.size() {
if !g.is_present(u) {
continue;
}
for v in g.neighbors(u) {
if v > u {
continue;
}
new_g.set_direct(reverse_imap[v], new_vertex, g.get_direct(v, u));
}
reverse_imap[u] = new_vertex;
new_imap[new_vertex] = imap.take(u);
new_vertex += 1;
}
*g = new_g;
*imap = new_imap;
}
true
}
// TODO: COOOOMMMEEEENNNNTTTTSSSS!!!!
/// Gets all the vertices that are neighbors of the critical clique, but not in the clique
/// themselves. No specific order is guaranteed.
fn get_clique_neighbors(
g: &Graph,
clique_idx: usize,
crit_graph: &CritCliqueGraph,
) -> (Vec<usize>, usize) {
let crit_neighbors = crit_graph.graph.neighbors(clique_idx);
let mut count = 0;
let neighborhood = crit_neighbors
.flat_map(|n| {
count += 1;
&crit_graph.cliques[n].vertices
})
.copied()
.filter(|&u| g.is_present(u))
.collect();
(neighborhood, count)
}
fn get_clique_neighbors2(g: &Graph, clique_idx: usize, crit_graph: &CritCliqueGraph) -> Vec<usize> {
let crit_neighbors = crit_graph.graph.neighbors(clique_idx).collect::<Vec<_>>();
crit_neighbors
.iter()
.flat_map(|&n| {
crit_graph
.graph
.neighbors(n)
.filter(|n2| !crit_neighbors.contains(n2))
.flat_map(|n2| &crit_graph.cliques[n2].vertices)
})
.copied()
.filter(|&u| g.is_present(u))
.collect()
}
fn count_intersection(n1: impl Iterator<Item = usize>, n2: &[usize]) -> usize {
let mut count = 0;
for u in n1 {
if n2.contains(&u) {
count += 1;
}
}
count
}
struct EditSet {
inserts: Vec<(usize, usize)>,
deletions: Vec<(usize, usize)>,
total_edit_degree: usize,
}
fn calculate_edits_to_remove_clique_and_neighborhood(
g: &Graph,
clique: &CritClique,
clique_neighbors: &[usize],
) -> EditSet {
// Everything in the clique is already connected with the rest of the clique (it's a clique!).
// All the neighbors are also connected to all the vertices in the clique, because all the
// clique vertices have the *same set* of neighbors outside the clique (it's a *critical*
// clique!).
// So we only need to add edges between the different groups of neighbors.
//
// The only edges that we need to remove are between the neighbors of the clique to any nodes
// that are neither in the neighbors nor the clique itself. (The vertices in the clique
// obviously don't have any such neighbors, so there's nothing to remove.)
let mut edits = EditSet {
inserts: Vec::new(),
deletions: Vec::new(),
total_edit_degree: 0,
};
for i in 0..clique_neighbors.len() {
let u = clique_neighbors[i];
if !g.is_present(u) {
continue;
}
// Add edges to other clique neighbors.
for j in (i + 1)..clique_neighbors.len() {
let v = clique_neighbors[j];
if !g.is_present(v) {
continue;
}
if g.get(u, v) < 0.0 {
edits.inserts.push((u, v));
// Increase total degree twice: we only add the (u, v) edge once but it would be
// counted in the edit degree for both u and v
edits.total_edit_degree += 2;
}
}
// Remove edges to unrelated vertices.
// TODO: Try using a BTreeSet for neighbors and vertices, or using some kind of other iteration
// strategy to avoid the linear search here.
for v in 0..g.size() {
if u == v || !g.is_present(v) {
continue;
}
if clique_neighbors.contains(&v) || clique.vertices.contains(&v) {
continue;
}
if g.get(u, v) > 0.0 {
edits.deletions.push((u, v));
// Here the degree is only increased once: it would only count for u, since v isn't
// even in the neighborhood and thus not considered.
edits.total_edit_degree += 1;
}
}
}
edits
}
fn make_clique_and_neighborhood_disjoint_and_remove(
g: &mut Graph,
imap: &IndexMap,
k: &mut f32,
edits: &mut Vec<Edit>,
edits_to_perform: EditSet,
clique: &CritClique,
clique_neighbors: &[usize],
) -> bool {
for (u, v) in edits_to_perform.inserts {
let uv = g.get_mut(u, v);
*k += *uv;
Edit::insert(edits, &imap, u, v);
*uv = f32::INFINITY;
}
for (u, v) in edits_to_perform.deletions {
let uv = g.get_mut(u, v);
*k -= *uv;
Edit::delete(edits, &imap, u, v);
*uv = f32::NEG_INFINITY;
}
// Now mark the clique and its neighbors as "removed" from the graph, so future reduction and
// algorithm steps ignore it. (It is now a disjoint clique, i.e. already done.)
for &u in clique_neighbors {
g.set_present(u, false);
}
for &u in &clique.vertices {
g.set_present(u, false);
}
clique_neighbors.len() > 0 || clique.vertices.len() > 0
}
fn apply_rule4(
g: &mut Graph,
imap: &IndexMap,
k: &mut f32,
edits: &mut Vec<Edit>,
clique_neighbors: &[usize],
clique_neighbors2: &[usize],
u: usize,
) -> bool {
// Insert edges in neighborhood to make clique+neighborhood a clique.
let mut has_done_edit = false;
for i in 0..clique_neighbors.len() {
let v = clique_neighbors[i];
// Add edges to other clique neighbors.
for j in (i + 1)..clique_neighbors.len() {
let w = clique_neighbors[j];
let vw = g.get_mut(v, w);
if *vw < 0.0 {
*k += *vw;
Edit::insert(edits, &imap, v, w);
*vw = f32::INFINITY;
has_done_edit = true;
}
}
}
// Remove edges between clique_neighbors and clique_neighbors2-u
for &v in clique_neighbors {
for &w in clique_neighbors2 {
if w == u {
continue;
}
let vw = g.get_mut(v, w);
if *vw > 0.0 {
*k -= *vw;
Edit::delete(edits, &imap, v, w);
*vw = f32::NEG_INFINITY;
has_done_edit = true;
}
}
}
has_done_edit
}
fn apply_rule5(
g: &mut Graph,
imap: &IndexMap,
k: &mut f32,
edits: &mut Vec<Edit>,
clique: &CritClique,
clique_neighbors: &[usize],
) -> bool {
// Can pick any set of |clique| vertices in clique_neighbors, we'll just use the first |clique|
// verts.
// Then, remove (clique + that set) from G, and set k = k - |clique|.
// Note that the modification to k does not actually correspond directly to the edge edits we
// do, but this is what the paper has proven to be correct *shrug*.
let clique_size = clique.vertices.len();
let to_remove = clique
.vertices
.iter()
.chain(clique_neighbors[..clique_size].iter())
.copied()
.collect::<Vec<_>>();
for &u in &to_remove {
g.set_present(u, false);
for v in 0..g.size() {
if !g.is_present(v) {
continue;
}
let uv = g.get_mut(u, v);
if *uv > 0.0 {
Edit::delete(edits, imap, u, v);
*uv = f32::NEG_INFINITY;
}
}
}
*k = *k - clique_size as f32;
to_remove.len() > 0
}
*/
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn crit_graph() {
// This is the example from "Guo: A more effective linear kernelization for cluster
// editing, 2009", Fig. 1
let mut graph = Graph::new(9);
graph.set(0, 1, Weight::ONE);
graph.set(0, 2, Weight::ONE);
graph.set(1, 2, Weight::ONE);
graph.set(2, 3, Weight::ONE);
graph.set(2, 4, Weight::ONE);
graph.set(3, 4, Weight::ONE);
graph.set(3, 5, Weight::ONE);
graph.set(3, 6, Weight::ONE);
graph.set(4, 5, Weight::ONE);
graph.set(4, 6, Weight::ONE);
graph.set(5, 6, Weight::ONE);
graph.set(5, 7, Weight::ONE);
graph.set(5, 8, Weight::ONE);
let crit = build_crit_clique_graph(&graph);
assert_eq!(crit.cliques[0].vertices, vec![0, 1]);
assert_eq!(crit.cliques[1].vertices, vec![2]);
assert_eq!(crit.cliques[2].vertices, vec![3, 4]);
assert_eq!(crit.cliques[3].vertices, vec![5]);
assert_eq!(crit.cliques[4].vertices, vec![6]);
assert_eq!(crit.cliques[5].vertices, vec![7]);
assert_eq!(crit.cliques[6].vertices, vec![8]);
assert_eq!(crit.graph.neighbors(0).collect::<Vec<_>>(), vec![1]);
assert_eq!(crit.graph.neighbors(1).collect::<Vec<_>>(), vec![0, 2]);
assert_eq!(crit.graph.neighbors(2).collect::<Vec<_>>(), vec![1, 3, 4]);
assert_eq!(
crit.graph.neighbors(3).collect::<Vec<_>>(),
vec![2, 4, 5, 6]
);
assert_eq!(crit.graph.neighbors(4).collect::<Vec<_>>(), vec![2, 3]);
assert_eq!(crit.graph.neighbors(5).collect::<Vec<_>>(), vec![3]);
assert_eq!(crit.graph.neighbors(6).collect::<Vec<_>>(), vec![3]);
}
}
| to_petgraph | identifier_name |
critical_cliques.rs | use crate::{
graph::{GraphWeight, IndexMap},
Graph, Weight,
};
#[derive(Debug, Clone, Default)]
pub struct CritClique {
pub vertices: Vec<usize>,
}
pub struct CritCliqueGraph {
pub cliques: Vec<CritClique>,
pub graph: Graph<Weight>,
}
impl CritCliqueGraph {
pub fn to_petgraph(&self) -> petgraph::Graph<String, u8, petgraph::Undirected, u32> {
use petgraph::prelude::NodeIndex;
let mut pg = petgraph::Graph::with_capacity(self.graph.size(), 0);
for u in 0..self.graph.size() {
pg.add_node(
self.cliques[u]
.vertices
.iter()
.map(|i| i.to_string())
.collect::<Vec<_>>()
.join(", "),
);
}
for u in 0..self.graph.size() {
for v in (u + 1)..self.graph.size() {
if self.graph.get(u, v) > Weight::ZERO {
pg.add_edge(NodeIndex::new(u), NodeIndex::new(v), 0);
}
}
}
pg
}
}
pub fn build_crit_clique_graph(g: &Graph<Weight>) -> CritCliqueGraph {
let mut cliques = Vec::new();
// TODO: This looks at least O(n^2) but should apparently be do-able in O(n + m), so have
// another look at making this more efficient.
let mut visited = vec![false; g.size()];
for u in g.nodes() {
if visited[u] {
continue;
}
visited[u] = true;
let mut clique = CritClique::default();
clique.vertices.push(u);
for v in g.nodes() {
if visited[v] {
continue;
}
// TODO: Is it maybe worth storing neighbor sets instead of recomputing them?
if g.closed_neighbors(u).eq(g.closed_neighbors(v)) {
clique.vertices.push(v);
visited[v] = true;
}
}
cliques.push(clique);
}
let mut crit_graph = Graph::new(cliques.len());
for c1 in 0..cliques.len() {
for c2 in 0..cliques.len() {
if c1 == c2 {
continue;
}
if should_be_neighbors(g, &cliques[c1], &cliques[c2]) {
crit_graph.set(c1, c2, Weight::ONE);
}
}
}
CritCliqueGraph {
cliques,
graph: crit_graph,
}
}
fn should_be_neighbors(g: &Graph<Weight>, c1: &CritClique, c2: &CritClique) -> bool {
for &u in &c1.vertices {
for &v in &c2.vertices {
if !g.has_edge(u, v) {
return false;
}
}
}
true
}
/// Performs a parameter-independent reduction on the graph `g` by constructing the critical clique
/// graph and merging all critical cliques into a single vertex.
/// This assumes that the input graph is unweighted (i.e. all weights are +1 or -1 exactly). The
/// reduced graph will be weighted however.
pub fn merge_cliques(
g: &Graph<Weight>,
imap: &IndexMap,
_path_log: &mut String,
) -> (Graph<Weight>, IndexMap) {
let mut crit = build_crit_clique_graph(g);
let mut crit_imap = IndexMap::empty(crit.graph.size());
for u in 0..crit.graph.size() {
for v in (u + 1)..crit.graph.size() {
//let uv = crit.graph.get_mut_direct(u, v);
let uv = crit.graph.get(u, v);
let sign = uv.signum();
let weight = crit.cliques[u].vertices.len() * crit.cliques[v].vertices.len();
crit.graph.set(u, v, (weight as Weight) * sign);
}
crit_imap.set(
u,
crit.cliques[u]
.vertices
.iter()
.flat_map(|v| imap[*v].iter().copied())
.collect(),
);
if crit_imap[u].len() > 1 {
append_path_log_dir!(_path_log, "critcliques, merged {:?}\n", crit_imap[u]);
}
}
(crit.graph, crit_imap)
}
// This kernel can only straightforwardly be applied to unweighted instances.
// However, before even starting the parameter search, we reduce the unweighted graph by converting
// it into a weighted one. Thus we cannot use this kernel at the moment.
/*
// Chen and Meng: A 2k Kernel for the Cluster Editing Problem, 2010
pub fn apply_reductions(
g: &mut Graph,
imap: &mut IndexMap,
k: &mut f32,
edits: &mut Vec<Edit>,
) -> bool {
let mut any_rules_applied = true;
while any_rules_applied {
any_rules_applied = false;
let mut rule5_state = None;
let crit = build_crit_clique_graph(g);
for (clique_idx, clique) in crit.cliques.iter().enumerate() {
let (clique_neighbors, clique_crit_neighbor_count) =
get_clique_neighbors(g, clique_idx, &crit);
let edit_set =
calculate_edits_to_remove_clique_and_neighborhood(g, clique, &clique_neighbors);
let clique_len = clique.vertices.len();
let neighbors_len = clique_neighbors.len();
let total_edit_degree = edit_set.total_edit_degree;
let rule1_applicable = clique_len as f32 > *k;
let rule2_applicable =
clique_len >= neighbors_len && clique_len + neighbors_len > total_edit_degree;
let mut rule3_applicable = false;
let mut rule4_applicable = false;
let mut rule4_vertex = None;
let mut clique_neighbors2 = None;
if !rule1_applicable && !rule2_applicable {
// Only calculate this if the other two aren't already true since it's a bit more work
if clique_len < neighbors_len && clique_len + neighbors_len > total_edit_degree {
let neighbors2 = get_clique_neighbors2(g, clique_idx, &crit);
let threshold = (clique_len + neighbors_len) / 2;
for &u in &neighbors2 {
let count = count_intersection(g.neighbors(u), &clique_neighbors);
if count > threshold {
rule4_vertex = Some(u);
break;
}
}
if rule5_state.is_none() {
rule5_state = Some((
clique.clone(),
clique_neighbors.clone(),
clique_crit_neighbor_count,
neighbors2.clone(),
));
}
rule3_applicable = rule4_vertex.is_none();
rule4_applicable = rule4_vertex.is_some();
clique_neighbors2 = Some(neighbors2);
}
}
if rule1_applicable || rule2_applicable || rule3_applicable {
let has_reduced = make_clique_and_neighborhood_disjoint_and_remove(
g,
imap,
k,
edits,
edit_set,
&clique,
&clique_neighbors,
);
if *k < 0.0 {
return false;
}
if has_reduced {
any_rules_applied = true;
break;
}
}
if rule4_applicable {
let has_reduced = apply_rule4(
g,
imap,
k,
edits,
&clique_neighbors,
&clique_neighbors2.unwrap(),
rule4_vertex.unwrap(),
);
if *k < 0.0 {
return false;
}
if has_reduced {
any_rules_applied = true;
break;
}
}
}
if !any_rules_applied && rule5_state.is_some() {
// If we got here, either no rule was applicable or they did not result in any further
// reduction, but we found a case where rule 5 should now be applicable.
// The paper claims that the above condition and the fact that the other rules
// don#t reduce it further is sufficient to imply this condition. Let's check to be
// safe for now :)
// TODO: Might remove this check if I'm convinced it's safe.
let (clique, clique_neighbors, clique_crit_neighbor_count, clique_neighbors2) =
rule5_state.unwrap();
assert!(clique_crit_neighbor_count == 1 && clique_neighbors2.len() == 1);
let has_reduced = apply_rule5(g, imap, k, edits, &clique, &clique_neighbors);
if !has_reduced {
// All the other rules didn't apply, so we got here, and now 5 didn't do anything
// either. We're done now.
break;
}
any_rules_applied = true;
}
let new_count = g.present_node_count();
if new_count == g.size() {
continue;
}
// Construct a new graph and imap with the vertices we marked for removal actually removed. The
// new imap still maps from indices into that new graph to the vertices of the original graph
// the algorithm got as input.
// TODO: Figure out if it's necessary to do this every `while` iteration or if the
// reductions are all still valid without it; would also be nice to avoid recomputing the
// crit clique graph when it's not necessary.
// TODO: Possibly test whether it's faster to just keep the removed_g map around in a larger
// scope rather than creating the graph here.
if new_count == 0 {
return true;
}
let mut new_g = Graph::new(new_count);
let mut new_imap = IndexMap::new(new_count);
let mut new_vertex = 0;
let mut reverse_imap = vec![0; g.size()];
for u in 0..g.size() {
if !g.is_present(u) {
continue;
}
for v in g.neighbors(u) {
if v > u {
continue;
}
new_g.set_direct(reverse_imap[v], new_vertex, g.get_direct(v, u));
}
reverse_imap[u] = new_vertex;
new_imap[new_vertex] = imap.take(u);
new_vertex += 1;
}
*g = new_g;
*imap = new_imap;
}
true
}
// TODO: COOOOMMMEEEENNNNTTTTSSSS!!!!
/// Gets all the vertices that are neighbors of the critical clique, but not in the clique
/// themselves. No specific order is guaranteed.
fn get_clique_neighbors(
g: &Graph,
clique_idx: usize,
crit_graph: &CritCliqueGraph,
) -> (Vec<usize>, usize) {
let crit_neighbors = crit_graph.graph.neighbors(clique_idx);
let mut count = 0;
let neighborhood = crit_neighbors
.flat_map(|n| {
count += 1;
&crit_graph.cliques[n].vertices
})
.copied()
.filter(|&u| g.is_present(u))
.collect();
(neighborhood, count)
}
fn get_clique_neighbors2(g: &Graph, clique_idx: usize, crit_graph: &CritCliqueGraph) -> Vec<usize> {
let crit_neighbors = crit_graph.graph.neighbors(clique_idx).collect::<Vec<_>>();
crit_neighbors
.iter()
.flat_map(|&n| {
crit_graph
.graph
.neighbors(n)
.filter(|n2| !crit_neighbors.contains(n2))
.flat_map(|n2| &crit_graph.cliques[n2].vertices)
})
.copied()
.filter(|&u| g.is_present(u))
.collect()
}
fn count_intersection(n1: impl Iterator<Item = usize>, n2: &[usize]) -> usize {
let mut count = 0;
for u in n1 {
if n2.contains(&u) {
count += 1;
}
}
count
}
struct EditSet {
inserts: Vec<(usize, usize)>,
deletions: Vec<(usize, usize)>,
total_edit_degree: usize,
}
fn calculate_edits_to_remove_clique_and_neighborhood(
g: &Graph,
clique: &CritClique,
clique_neighbors: &[usize],
) -> EditSet {
// Everything in the clique is already connected with the rest of the clique (it's a clique!).
// All the neighbors are also connected to all the vertices in the clique, because all the
// clique vertices have the *same set* of neighbors outside the clique (it's a *critical*
// clique!).
// So we only need to add edges between the different groups of neighbors.
//
// The only edges that we need to remove are between the neighbors of the clique to any nodes
// that are neither in the neighbors nor the clique itself. (The vertices in the clique
// obviously don't have any such neighbors, so there's nothing to remove.)
let mut edits = EditSet {
inserts: Vec::new(),
deletions: Vec::new(),
total_edit_degree: 0,
};
for i in 0..clique_neighbors.len() {
let u = clique_neighbors[i];
if !g.is_present(u) {
continue;
}
// Add edges to other clique neighbors.
for j in (i + 1)..clique_neighbors.len() {
let v = clique_neighbors[j];
if !g.is_present(v) {
continue;
}
if g.get(u, v) < 0.0 {
edits.inserts.push((u, v));
// Increase total degree twice: we only add the (u, v) edge once but it would be
// counted in the edit degree for both u and v
edits.total_edit_degree += 2;
}
}
// Remove edges to unrelated vertices.
// TODO: Try using a BTreeSet for neighbors and vertices, or using some kind of other iteration
// strategy to avoid the linear search here.
for v in 0..g.size() {
if u == v || !g.is_present(v) {
continue;
}
if clique_neighbors.contains(&v) || clique.vertices.contains(&v) {
continue;
}
if g.get(u, v) > 0.0 {
edits.deletions.push((u, v));
// Here the degree is only increased once: it would only count for u, since v isn't
// even in the neighborhood and thus not considered.
edits.total_edit_degree += 1;
}
}
}
edits
}
fn make_clique_and_neighborhood_disjoint_and_remove(
g: &mut Graph,
imap: &IndexMap,
k: &mut f32,
edits: &mut Vec<Edit>,
edits_to_perform: EditSet,
clique: &CritClique,
clique_neighbors: &[usize],
) -> bool {
for (u, v) in edits_to_perform.inserts {
let uv = g.get_mut(u, v);
*k += *uv;
Edit::insert(edits, &imap, u, v);
*uv = f32::INFINITY;
}
for (u, v) in edits_to_perform.deletions {
let uv = g.get_mut(u, v); | // Now mark the clique and its neighbors as "removed" from the graph, so future reduction and
// algorithm steps ignore it. (It is now a disjoint clique, i.e. already done.)
for &u in clique_neighbors {
g.set_present(u, false);
}
for &u in &clique.vertices {
g.set_present(u, false);
}
clique_neighbors.len() > 0 || clique.vertices.len() > 0
}
fn apply_rule4(
g: &mut Graph,
imap: &IndexMap,
k: &mut f32,
edits: &mut Vec<Edit>,
clique_neighbors: &[usize],
clique_neighbors2: &[usize],
u: usize,
) -> bool {
// Insert edges in neighborhood to make clique+neighborhood a clique.
let mut has_done_edit = false;
for i in 0..clique_neighbors.len() {
let v = clique_neighbors[i];
// Add edges to other clique neighbors.
for j in (i + 1)..clique_neighbors.len() {
let w = clique_neighbors[j];
let vw = g.get_mut(v, w);
if *vw < 0.0 {
*k += *vw;
Edit::insert(edits, &imap, v, w);
*vw = f32::INFINITY;
has_done_edit = true;
}
}
}
// Remove edges between clique_neighbors and clique_neighbors2-u
for &v in clique_neighbors {
for &w in clique_neighbors2 {
if w == u {
continue;
}
let vw = g.get_mut(v, w);
if *vw > 0.0 {
*k -= *vw;
Edit::delete(edits, &imap, v, w);
*vw = f32::NEG_INFINITY;
has_done_edit = true;
}
}
}
has_done_edit
}
fn apply_rule5(
g: &mut Graph,
imap: &IndexMap,
k: &mut f32,
edits: &mut Vec<Edit>,
clique: &CritClique,
clique_neighbors: &[usize],
) -> bool {
// Can pick any set of |clique| vertices in clique_neighbors, we'll just use the first |clique|
// verts.
// Then, remove (clique + that set) from G, and set k = k - |clique|.
// Note that the modification to k does not actually correspond directly to the edge edits we
// do, but this is what the paper has proven to be correct *shrug*.
let clique_size = clique.vertices.len();
let to_remove = clique
.vertices
.iter()
.chain(clique_neighbors[..clique_size].iter())
.copied()
.collect::<Vec<_>>();
for &u in &to_remove {
g.set_present(u, false);
for v in 0..g.size() {
if !g.is_present(v) {
continue;
}
let uv = g.get_mut(u, v);
if *uv > 0.0 {
Edit::delete(edits, imap, u, v);
*uv = f32::NEG_INFINITY;
}
}
}
*k = *k - clique_size as f32;
to_remove.len() > 0
}
*/
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn crit_graph() {
// This is the example from "Guo: A more effective linear kernelization for cluster
// editing, 2009", Fig. 1
let mut graph = Graph::new(9);
graph.set(0, 1, Weight::ONE);
graph.set(0, 2, Weight::ONE);
graph.set(1, 2, Weight::ONE);
graph.set(2, 3, Weight::ONE);
graph.set(2, 4, Weight::ONE);
graph.set(3, 4, Weight::ONE);
graph.set(3, 5, Weight::ONE);
graph.set(3, 6, Weight::ONE);
graph.set(4, 5, Weight::ONE);
graph.set(4, 6, Weight::ONE);
graph.set(5, 6, Weight::ONE);
graph.set(5, 7, Weight::ONE);
graph.set(5, 8, Weight::ONE);
let crit = build_crit_clique_graph(&graph);
assert_eq!(crit.cliques[0].vertices, vec![0, 1]);
assert_eq!(crit.cliques[1].vertices, vec![2]);
assert_eq!(crit.cliques[2].vertices, vec![3, 4]);
assert_eq!(crit.cliques[3].vertices, vec![5]);
assert_eq!(crit.cliques[4].vertices, vec![6]);
assert_eq!(crit.cliques[5].vertices, vec![7]);
assert_eq!(crit.cliques[6].vertices, vec![8]);
assert_eq!(crit.graph.neighbors(0).collect::<Vec<_>>(), vec![1]);
assert_eq!(crit.graph.neighbors(1).collect::<Vec<_>>(), vec![0, 2]);
assert_eq!(crit.graph.neighbors(2).collect::<Vec<_>>(), vec![1, 3, 4]);
assert_eq!(
crit.graph.neighbors(3).collect::<Vec<_>>(),
vec![2, 4, 5, 6]
);
assert_eq!(crit.graph.neighbors(4).collect::<Vec<_>>(), vec![2, 3]);
assert_eq!(crit.graph.neighbors(5).collect::<Vec<_>>(), vec![3]);
assert_eq!(crit.graph.neighbors(6).collect::<Vec<_>>(), vec![3]);
}
} | *k -= *uv;
Edit::delete(edits, &imap, u, v);
*uv = f32::NEG_INFINITY;
}
| random_line_split |
astropy_mm.py | # This program represents a Mueller matrix system for a Dual Channel Polarimeter, which includes:
# a Wollaston Prism, a diattenuating retarder representing the derotator, a rotatable HWP, a diattenuating
# retarder representing the third mirror, and a rotation matrix that compensates for parallactic rotation.
# This uses the pyMuellerMat library to represent the Mueller matrices and the astropy library.
#
# Main functions:
# a) Given some Stokes Parameters, the two beams of the Wollaston Prism are computed.
# b) Given a set of measurements from the two beams of the Wollaston prism and knowledge of the HWP angle and
# parallactic angle, the corresponding on-sky polarization is retrieved.
# c) Given a set of targets, plot the tracks over time and parallactic angle from the Keck telescope.
from pyMuellerMat import common_mms as cmm
from pyMuellerMat import MuellerMat
import math
import numpy as np
from numpy.linalg import inv
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from astroplan import Observer, FixedTarget, download_IERS_A
from astropy.time import Time
import datetime
# Initialize the telescope
keck = Observer.at_site("Keck Observatory", timezone="US/Hawaii")
fig, ax = plt.subplots()
plt.rcParams['font.family'] = 'Times New Roman'
plt.rcParams['font.size'] = 22
plt.rcParams['figure.figsize'] = (20, 20)
# Function to find the two beams of the Wollaston prism based on the Stokes parameters
def wollaston(stokes):
|
# Function that plots the difference of two beams of a Wollaston prism with a half-wave plate over the half-wave
# plate angle
def plot_wollaston(stokes):
data = np.empty(shape=[0, 2])
sys_mm = MuellerMat.SystemMuellerMatrix([cmm.WollastonPrism(), cmm.HWP()])
# Find data points from 0 to 2 * pi
for angle in np.arange(0, 2 * math.pi, 0.001):
sys_mm.master_property_dict['HalfwaveRetarder']['theta'] = math.degrees(angle)
sys_mm.master_property_dict['WollastonPrism']['beam'] = 'o'
I1 = sys_mm.evaluate() @ stokes
sys_mm.master_property_dict['WollastonPrism']['beam'] = 'e'
I2 = sys_mm.evaluate() @ stokes
data = np.append(data, [[math.degrees(angle), (I1[0] - I2[0])]], axis=0)
# Plot the data points
plt.scatter(*data.T, s=1)
plt.title('Difference between Wollaston prism beams over HWP angle')
plt.ylabel('Difference between $\mathdefault{I^+}$ and $\mathdefault{I^-}$')
plt.xlabel('HWP angle (deg)')
ax = plt.gca()
ax.set_xlim(0, 360)
ax.set_xticks([0, 90, 180, 270, 360])
plt.show()
# Function to find the corresponding on-sky polarization based on data of the intensities of the two beams
# of the Wollaston prism, the HWP angle, and the parallactic angle
def on_sky(values):
i = np.empty(shape=[0, 1])
m_system = np.empty(shape=[0, 4])
sys_mm = MuellerMat.SystemMuellerMatrix([cmm.WollastonPrism(), cmm.HWP(), cmm.Rotator()])
# Calculate the Mueller matrices
for j in range(len(values)):
sys_mm.master_property_dict['HalfwaveRetarder']['theta'] = values[j][2]
sys_mm.master_property_dict['Rotator']['pa'] = values[j][3]
sys_mm.master_property_dict['WollastonPrism']['beam'] = 'o'
row1 = sys_mm.evaluate()
sys_mm.master_property_dict['WollastonPrism']['beam'] = 'e'
row2 = sys_mm.evaluate()
i = np.append(i, [[values[j][0]]], axis=0)
m_system = np.append(m_system, [row1[0]], axis=0)
i = np.append(i, [[values[j][1]]], axis=0)
m_system = np.append(m_system, [row2[0]], axis=0)
# Return a least-squares solution
return inv(np.transpose(m_system) @ m_system) @ np.transpose(m_system) @ i
# Function that plots the difference of two beams of a Wollaston prism with a half-wave plate of fixed targets
# over the parallactic angle and time
def track_plot(targets):
# Initialize the start time, the targets, and the initial stokes vector
time = Time("2015-09-13")
step = np.arange(0, 1, 1 / 86400)
stokes = [[0], [1], [0], [0]]
hwp_angles = [0, 22.5]
derotator = cmm.DiattenuatorRetarder()
m3 = cmm.DiattenuatorRetarder()
sys_mm = MuellerMat.SystemMuellerMatrix([cmm.WollastonPrism(), derotator, cmm.HWP(), m3, cmm.Rotator()])
# Put in M3 - use astropy for altitude - diattenuating rotator - as a perfect mirror with an angle
# "perfect" - no retardance and no diattenuation
# Derotator - diattenuating retarder at a given parallactic angle
# Check diattenuating retarder form with goldstein and witzel
# Can calculate coefficients from material parameters - Fresnel reflection - index of refraction
# Fresnel coefficients - how to get the r values and possibly retardance
# use hour angle and dec to find the parallactic angle
# find the altitude given an hour angle and a target
for hwp in hwp_angles:
angle_plot = []
time_plot = []
sys_mm.master_property_dict['HalfwaveRetarder']['theta'] = hwp
for j in range(len(targets)):
wollaston_data = []
target = FixedTarget.from_name(targets[j])
# Calculate the parallactic angles and the altitudes
angles = np.degrees((keck.parallactic_angle(time + step, target)).to_value())
altitudes = (keck.altaz(time + step, target)).alt.to_value()
# Calculate the Wollaston beams and parallactic angle as time passes
for pa, alt in zip(angles, altitudes):
sys_mm.master_property_dict['Rotator']['pa'] = pa
m3.properties['theta'] = alt
sys_mm.master_property_dict['WollastonPrism']['beam'] = 'o'
I1 = sys_mm.evaluate() @ stokes
sys_mm.master_property_dict['WollastonPrism']['beam'] = 'e'
I2 = sys_mm.evaluate() @ stokes
wollaston_data.append(np.asscalar(I1[0] - I2[0]))
angle_plot.append(np.array([angles, wollaston_data]).T)
time_plot.append(np.array([((time + step).to_datetime()), wollaston_data]).T)
# Plot the angle data points
for k in range(len(targets)):
x, y = angle_plot[k].T
plt.scatter(x, y, s=1, label=targets[k])
plt.title('Difference between Wollaston prism beams over parallactic angle with HWP at %.1f degrees' % hwp)
plt.ylabel('Difference between $\mathdefault{I^+}$ and $\mathdefault{I^-}$')
plt.xlabel('Parallactic angle (deg)')
plt.legend(loc="upper left")
plt.show()
# Plot the time data points
for k in range(len(targets)):
x, y = time_plot[k].T
plt.scatter(x, y, s=1, label=targets[k])
plt.title('Difference between Wollaston prism beams over time with HWP at %.1f degrees' % hwp)
plt.ylabel('Difference between $\mathdefault{I^+}$ and $\mathdefault{I^-}$')
plt.xlabel('Time (hour of day)')
plt.legend(loc="upper left")
ax = plt.gca()
ax.xaxis_date()
ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
ax.set_xlim(datetime.date(2015, 9, 13), datetime.date(2015, 9, 14))
plt.show()
# Main function that prompts user for input
def main():
download_IERS_A()
print("This program represents a Mueller matrix system for a dual channel polarimeter using the pyMuellerMat"
"library.")
find = ""
while find != "d":
# Prompt
find = input("\nWhat would you like to do?\na) compute the two beams of the Wollaston prism from Stokes "
"parameters\nb) find the corresponding on-sky polarization with a set of measurements from "
"the two beams of the Wollaston prism and HWP/parallactic angles data\nc) plot the tracks of "
"a set of targets over time and parallactic angle from the Keck telescope\n"
"d) quit the program\n(a/b/c/d): ").lower()
if find == "a":
stokes = []
# Get the Stokes parameters input and store into a list
while len(stokes) != 4:
stokes = input("\nEnter the Stokes parameters, separated by a space: ")
stokes = stokes.split()
if len(stokes) != 4:
print("Enter all four parameters!")
stokes = np.array([[float(stokes[0])], [float(stokes[1])], [float(stokes[2])], [float(stokes[3])]])
woll = wollaston(stokes)
print("\nThe two beams from the Wollaston prism are ", woll[0][0][0], " and ", woll[1][0][0])
input("\n---------------------------------------------------------------------------------------------")
# Plot the intensities if the user chooses to
plot = input("\nWould you like to see a plot of the intensities? (y/n): ").lower()
if plot == 'y':
plot_wollaston(stokes)
print("\n---------------------------------------------------------------------------------------------")
elif find == "b":
# Get the intensities and angle data
values = []
cont = "y"
while cont == "y":
I_1 = float(input("\nEnter the first I parameter in the pair (positive Wollaston): "))
I_2 = float(input("Enter the second I parameter in the pair (negative Wollaston): "))
hwp = math.radians(float(input("Enter the HWP angle (deg): ")))
sky = math.radians(float(input("Enter the parallactic angle (deg): ")))
values.append([I_1, I_2, hwp, sky])
cont = input("\nDo you have another pair of data to add? (y/n): ").lower()
print("\nThe corresponding on-sky polarization is:\n")
print(on_sky(values))
input("\n---------------------------------------------------------------------------------------------")
elif find == "c":
# Get the targets to track
targets = []
cont = "y"
while cont == "y":
target = input("\nEnter the name of the target to track: ")
targets.append(target)
cont = input("Add another target? (y/n): ").lower()
print("\nTracking", ', '.join(targets), "...\n")
track_plot(targets)
input("\n---------------------------------------------------------------------------------------------")
print("\nProgram ended.\n")
if __name__ == "__main__":
main()
| sys_mm = MuellerMat.SystemMuellerMatrix([cmm.WollastonPrism(), cmm.HWP()])
sys_mm.master_property_dict['WollastonPrism']['beam'] = 'o'
pos = sys_mm.evaluate() @ stokes
sys_mm.master_property_dict['WollastonPrism']['beam'] = 'e'
neg = sys_mm.evaluate() @ stokes
return [pos, neg] | identifier_body |
astropy_mm.py | # This program represents a Mueller matrix system for a Dual Channel Polarimeter, which includes:
# a Wollaston Prism, a diattenuating retarder representing the derotator, a rotatable HWP, a diattenuating
# retarder representing the third mirror, and a rotation matrix that compensates for parallactic rotation.
# This uses the pyMuellerMat library to represent the Mueller matrices and the astropy library.
#
# Main functions:
# a) Given some Stokes Parameters, the two beams of the Wollaston Prism are computed.
# b) Given a set of measurements from the two beams of the Wollaston prism and knowledge of the HWP angle and
# parallactic angle, the corresponding on-sky polarization is retrieved.
# c) Given a set of targets, plot the tracks over time and parallactic angle from the Keck telescope.
from pyMuellerMat import common_mms as cmm
from pyMuellerMat import MuellerMat
import math
import numpy as np
from numpy.linalg import inv
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from astroplan import Observer, FixedTarget, download_IERS_A
from astropy.time import Time
import datetime
# Initialize the telescope
keck = Observer.at_site("Keck Observatory", timezone="US/Hawaii")
fig, ax = plt.subplots()
plt.rcParams['font.family'] = 'Times New Roman'
plt.rcParams['font.size'] = 22
plt.rcParams['figure.figsize'] = (20, 20)
# Function to find the two beams of the Wollaston prism based on the Stokes parameters
def wollaston(stokes):
sys_mm = MuellerMat.SystemMuellerMatrix([cmm.WollastonPrism(), cmm.HWP()])
sys_mm.master_property_dict['WollastonPrism']['beam'] = 'o'
pos = sys_mm.evaluate() @ stokes
sys_mm.master_property_dict['WollastonPrism']['beam'] = 'e'
neg = sys_mm.evaluate() @ stokes
return [pos, neg]
# Function that plots the difference of two beams of a Wollaston prism with a half-wave plate over the half-wave
# plate angle
def plot_wollaston(stokes):
data = np.empty(shape=[0, 2])
sys_mm = MuellerMat.SystemMuellerMatrix([cmm.WollastonPrism(), cmm.HWP()])
# Find data points from 0 to 2 * pi
for angle in np.arange(0, 2 * math.pi, 0.001):
sys_mm.master_property_dict['HalfwaveRetarder']['theta'] = math.degrees(angle)
sys_mm.master_property_dict['WollastonPrism']['beam'] = 'o'
I1 = sys_mm.evaluate() @ stokes
sys_mm.master_property_dict['WollastonPrism']['beam'] = 'e'
I2 = sys_mm.evaluate() @ stokes
data = np.append(data, [[math.degrees(angle), (I1[0] - I2[0])]], axis=0)
# Plot the data points
plt.scatter(*data.T, s=1)
plt.title('Difference between Wollaston prism beams over HWP angle')
plt.ylabel('Difference between $\mathdefault{I^+}$ and $\mathdefault{I^-}$')
plt.xlabel('HWP angle (deg)')
ax = plt.gca()
ax.set_xlim(0, 360)
ax.set_xticks([0, 90, 180, 270, 360])
plt.show()
# Function to find the corresponding on-sky polarization based on data of the intensities of the two beams
# of the Wollaston prism, the HWP angle, and the parallactic angle
def on_sky(values):
i = np.empty(shape=[0, 1])
m_system = np.empty(shape=[0, 4])
sys_mm = MuellerMat.SystemMuellerMatrix([cmm.WollastonPrism(), cmm.HWP(), cmm.Rotator()])
# Calculate the Mueller matrices
for j in range(len(values)):
sys_mm.master_property_dict['HalfwaveRetarder']['theta'] = values[j][2]
sys_mm.master_property_dict['Rotator']['pa'] = values[j][3]
sys_mm.master_property_dict['WollastonPrism']['beam'] = 'o'
row1 = sys_mm.evaluate()
sys_mm.master_property_dict['WollastonPrism']['beam'] = 'e'
row2 = sys_mm.evaluate()
i = np.append(i, [[values[j][0]]], axis=0)
m_system = np.append(m_system, [row1[0]], axis=0)
i = np.append(i, [[values[j][1]]], axis=0)
m_system = np.append(m_system, [row2[0]], axis=0)
# Return a least-squares solution
return inv(np.transpose(m_system) @ m_system) @ np.transpose(m_system) @ i
# Function that plots the difference of two beams of a Wollaston prism with a half-wave plate of fixed targets
# over the parallactic angle and time
def track_plot(targets):
# Initialize the start time, the targets, and the initial stokes vector
time = Time("2015-09-13")
step = np.arange(0, 1, 1 / 86400)
stokes = [[0], [1], [0], [0]]
hwp_angles = [0, 22.5]
derotator = cmm.DiattenuatorRetarder()
m3 = cmm.DiattenuatorRetarder()
sys_mm = MuellerMat.SystemMuellerMatrix([cmm.WollastonPrism(), derotator, cmm.HWP(), m3, cmm.Rotator()])
# Put in M3 - use astropy for altitude - diattenuating rotator - as a perfect mirror with an angle
# "perfect" - no retardance and no diattenuation
# Derotator - diattenuating retarder at a given parallactic angle
# Check diattenuating retarder form with goldstein and witzel
# Can calculate coefficients from material parameters - Fresnel reflection - index of refraction
# Fresnel coefficients - how to get the r values and possibly retardance
# use hour angle and dec to find the parallactic angle
# find the altitude given an hour angle and a target
for hwp in hwp_angles:
angle_plot = []
time_plot = []
sys_mm.master_property_dict['HalfwaveRetarder']['theta'] = hwp
for j in range(len(targets)):
wollaston_data = []
target = FixedTarget.from_name(targets[j])
# Calculate the parallactic angles and the altitudes
angles = np.degrees((keck.parallactic_angle(time + step, target)).to_value())
altitudes = (keck.altaz(time + step, target)).alt.to_value()
# Calculate the Wollaston beams and parallactic angle as time passes
for pa, alt in zip(angles, altitudes):
sys_mm.master_property_dict['Rotator']['pa'] = pa
m3.properties['theta'] = alt
sys_mm.master_property_dict['WollastonPrism']['beam'] = 'o'
I1 = sys_mm.evaluate() @ stokes
sys_mm.master_property_dict['WollastonPrism']['beam'] = 'e'
I2 = sys_mm.evaluate() @ stokes
wollaston_data.append(np.asscalar(I1[0] - I2[0]))
angle_plot.append(np.array([angles, wollaston_data]).T)
time_plot.append(np.array([((time + step).to_datetime()), wollaston_data]).T)
# Plot the angle data points
for k in range(len(targets)):
x, y = angle_plot[k].T
plt.scatter(x, y, s=1, label=targets[k])
plt.title('Difference between Wollaston prism beams over parallactic angle with HWP at %.1f degrees' % hwp)
plt.ylabel('Difference between $\mathdefault{I^+}$ and $\mathdefault{I^-}$')
plt.xlabel('Parallactic angle (deg)')
plt.legend(loc="upper left")
plt.show()
# Plot the time data points
for k in range(len(targets)):
x, y = time_plot[k].T
plt.scatter(x, y, s=1, label=targets[k])
plt.title('Difference between Wollaston prism beams over time with HWP at %.1f degrees' % hwp)
plt.ylabel('Difference between $\mathdefault{I^+}$ and $\mathdefault{I^-}$')
plt.xlabel('Time (hour of day)')
plt.legend(loc="upper left")
ax = plt.gca()
ax.xaxis_date()
ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
ax.set_xlim(datetime.date(2015, 9, 13), datetime.date(2015, 9, 14))
plt.show()
# Main function that prompts user for input
def main():
download_IERS_A()
print("This program represents a Mueller matrix system for a dual channel polarimeter using the pyMuellerMat"
"library.")
find = ""
while find != "d":
# Prompt
|
print("\nProgram ended.\n")
if __name__ == "__main__":
main()
| find = input("\nWhat would you like to do?\na) compute the two beams of the Wollaston prism from Stokes "
"parameters\nb) find the corresponding on-sky polarization with a set of measurements from "
"the two beams of the Wollaston prism and HWP/parallactic angles data\nc) plot the tracks of "
"a set of targets over time and parallactic angle from the Keck telescope\n"
"d) quit the program\n(a/b/c/d): ").lower()
if find == "a":
stokes = []
# Get the Stokes parameters input and store into a list
while len(stokes) != 4:
stokes = input("\nEnter the Stokes parameters, separated by a space: ")
stokes = stokes.split()
if len(stokes) != 4:
print("Enter all four parameters!")
stokes = np.array([[float(stokes[0])], [float(stokes[1])], [float(stokes[2])], [float(stokes[3])]])
woll = wollaston(stokes)
print("\nThe two beams from the Wollaston prism are ", woll[0][0][0], " and ", woll[1][0][0])
input("\n---------------------------------------------------------------------------------------------")
# Plot the intensities if the user chooses to
plot = input("\nWould you like to see a plot of the intensities? (y/n): ").lower()
if plot == 'y':
plot_wollaston(stokes)
print("\n---------------------------------------------------------------------------------------------")
elif find == "b":
# Get the intensities and angle data
values = []
cont = "y"
while cont == "y":
I_1 = float(input("\nEnter the first I parameter in the pair (positive Wollaston): "))
I_2 = float(input("Enter the second I parameter in the pair (negative Wollaston): "))
hwp = math.radians(float(input("Enter the HWP angle (deg): ")))
sky = math.radians(float(input("Enter the parallactic angle (deg): ")))
values.append([I_1, I_2, hwp, sky])
cont = input("\nDo you have another pair of data to add? (y/n): ").lower()
print("\nThe corresponding on-sky polarization is:\n")
print(on_sky(values))
input("\n---------------------------------------------------------------------------------------------")
elif find == "c":
# Get the targets to track
targets = []
cont = "y"
while cont == "y":
target = input("\nEnter the name of the target to track: ")
targets.append(target)
cont = input("Add another target? (y/n): ").lower()
print("\nTracking", ', '.join(targets), "...\n")
track_plot(targets)
input("\n---------------------------------------------------------------------------------------------") | conditional_block |
astropy_mm.py | # This program represents a Mueller matrix system for a Dual Channel Polarimeter, which includes:
# a Wollaston Prism, a diattenuating retarder representing the derotator, a rotatable HWP, a diattenuating
# retarder representing the third mirror, and a rotation matrix that compensates for parallactic rotation.
# This uses the pyMuellerMat library to represent the Mueller matrices and the astropy library.
#
# Main functions:
# a) Given some Stokes Parameters, the two beams of the Wollaston Prism are computed.
# b) Given a set of measurements from the two beams of the Wollaston prism and knowledge of the HWP angle and
# parallactic angle, the corresponding on-sky polarization is retrieved.
# c) Given a set of targets, plot the tracks over time and parallactic angle from the Keck telescope.
from pyMuellerMat import common_mms as cmm
from pyMuellerMat import MuellerMat
import math
import numpy as np
from numpy.linalg import inv
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from astroplan import Observer, FixedTarget, download_IERS_A
from astropy.time import Time
import datetime
# Initialize the telescope
keck = Observer.at_site("Keck Observatory", timezone="US/Hawaii")
fig, ax = plt.subplots()
plt.rcParams['font.family'] = 'Times New Roman'
plt.rcParams['font.size'] = 22
plt.rcParams['figure.figsize'] = (20, 20)
# Function to find the two beams of the Wollaston prism based on the Stokes parameters
def wollaston(stokes):
sys_mm = MuellerMat.SystemMuellerMatrix([cmm.WollastonPrism(), cmm.HWP()])
sys_mm.master_property_dict['WollastonPrism']['beam'] = 'o'
pos = sys_mm.evaluate() @ stokes
sys_mm.master_property_dict['WollastonPrism']['beam'] = 'e'
neg = sys_mm.evaluate() @ stokes
return [pos, neg]
# Function that plots the difference of two beams of a Wollaston prism with a half-wave plate over the half-wave
# plate angle
def plot_wollaston(stokes):
data = np.empty(shape=[0, 2])
sys_mm = MuellerMat.SystemMuellerMatrix([cmm.WollastonPrism(), cmm.HWP()])
# Find data points from 0 to 2 * pi
for angle in np.arange(0, 2 * math.pi, 0.001):
sys_mm.master_property_dict['HalfwaveRetarder']['theta'] = math.degrees(angle)
sys_mm.master_property_dict['WollastonPrism']['beam'] = 'o'
I1 = sys_mm.evaluate() @ stokes
sys_mm.master_property_dict['WollastonPrism']['beam'] = 'e'
I2 = sys_mm.evaluate() @ stokes
data = np.append(data, [[math.degrees(angle), (I1[0] - I2[0])]], axis=0)
# Plot the data points
plt.scatter(*data.T, s=1)
plt.title('Difference between Wollaston prism beams over HWP angle')
plt.ylabel('Difference between $\mathdefault{I^+}$ and $\mathdefault{I^-}$')
plt.xlabel('HWP angle (deg)')
ax = plt.gca()
ax.set_xlim(0, 360)
ax.set_xticks([0, 90, 180, 270, 360])
plt.show()
# Function to find the corresponding on-sky polarization based on data of the intensities of the two beams
# of the Wollaston prism, the HWP angle, and the parallactic angle
def on_sky(values):
i = np.empty(shape=[0, 1])
m_system = np.empty(shape=[0, 4])
sys_mm = MuellerMat.SystemMuellerMatrix([cmm.WollastonPrism(), cmm.HWP(), cmm.Rotator()])
# Calculate the Mueller matrices
for j in range(len(values)):
sys_mm.master_property_dict['HalfwaveRetarder']['theta'] = values[j][2]
sys_mm.master_property_dict['Rotator']['pa'] = values[j][3]
sys_mm.master_property_dict['WollastonPrism']['beam'] = 'o'
row1 = sys_mm.evaluate()
sys_mm.master_property_dict['WollastonPrism']['beam'] = 'e'
row2 = sys_mm.evaluate()
i = np.append(i, [[values[j][0]]], axis=0)
m_system = np.append(m_system, [row1[0]], axis=0)
i = np.append(i, [[values[j][1]]], axis=0) |
# Function that plots the difference of two beams of a Wollaston prism with a half-wave plate of fixed targets
# over the parallactic angle and time
def track_plot(targets):
# Initialize the start time, the targets, and the initial stokes vector
time = Time("2015-09-13")
step = np.arange(0, 1, 1 / 86400)
stokes = [[0], [1], [0], [0]]
hwp_angles = [0, 22.5]
derotator = cmm.DiattenuatorRetarder()
m3 = cmm.DiattenuatorRetarder()
sys_mm = MuellerMat.SystemMuellerMatrix([cmm.WollastonPrism(), derotator, cmm.HWP(), m3, cmm.Rotator()])
# Put in M3 - use astropy for altitude - diattenuating rotator - as a perfect mirror with an angle
# "perfect" - no retardance and no diattenuation
# Derotator - diattenuating retarder at a given parallactic angle
# Check diattenuating retarder form with goldstein and witzel
# Can calculate coefficients from material parameters - Fresnel reflection - index of refraction
# Fresnel coefficients - how to get the r values and possibly retardance
# use hour angle and dec to find the parallactic angle
# find the altitude given an hour angle and a target
for hwp in hwp_angles:
angle_plot = []
time_plot = []
sys_mm.master_property_dict['HalfwaveRetarder']['theta'] = hwp
for j in range(len(targets)):
wollaston_data = []
target = FixedTarget.from_name(targets[j])
# Calculate the parallactic angles and the altitudes
angles = np.degrees((keck.parallactic_angle(time + step, target)).to_value())
altitudes = (keck.altaz(time + step, target)).alt.to_value()
# Calculate the Wollaston beams and parallactic angle as time passes
for pa, alt in zip(angles, altitudes):
sys_mm.master_property_dict['Rotator']['pa'] = pa
m3.properties['theta'] = alt
sys_mm.master_property_dict['WollastonPrism']['beam'] = 'o'
I1 = sys_mm.evaluate() @ stokes
sys_mm.master_property_dict['WollastonPrism']['beam'] = 'e'
I2 = sys_mm.evaluate() @ stokes
wollaston_data.append(np.asscalar(I1[0] - I2[0]))
angle_plot.append(np.array([angles, wollaston_data]).T)
time_plot.append(np.array([((time + step).to_datetime()), wollaston_data]).T)
# Plot the angle data points
for k in range(len(targets)):
x, y = angle_plot[k].T
plt.scatter(x, y, s=1, label=targets[k])
plt.title('Difference between Wollaston prism beams over parallactic angle with HWP at %.1f degrees' % hwp)
plt.ylabel('Difference between $\mathdefault{I^+}$ and $\mathdefault{I^-}$')
plt.xlabel('Parallactic angle (deg)')
plt.legend(loc="upper left")
plt.show()
# Plot the time data points
for k in range(len(targets)):
x, y = time_plot[k].T
plt.scatter(x, y, s=1, label=targets[k])
plt.title('Difference between Wollaston prism beams over time with HWP at %.1f degrees' % hwp)
plt.ylabel('Difference between $\mathdefault{I^+}$ and $\mathdefault{I^-}$')
plt.xlabel('Time (hour of day)')
plt.legend(loc="upper left")
ax = plt.gca()
ax.xaxis_date()
ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
ax.set_xlim(datetime.date(2015, 9, 13), datetime.date(2015, 9, 14))
plt.show()
# Main function that prompts user for input
def main():
download_IERS_A()
print("This program represents a Mueller matrix system for a dual channel polarimeter using the pyMuellerMat"
"library.")
find = ""
while find != "d":
# Prompt
find = input("\nWhat would you like to do?\na) compute the two beams of the Wollaston prism from Stokes "
"parameters\nb) find the corresponding on-sky polarization with a set of measurements from "
"the two beams of the Wollaston prism and HWP/parallactic angles data\nc) plot the tracks of "
"a set of targets over time and parallactic angle from the Keck telescope\n"
"d) quit the program\n(a/b/c/d): ").lower()
if find == "a":
stokes = []
# Get the Stokes parameters input and store into a list
while len(stokes) != 4:
stokes = input("\nEnter the Stokes parameters, separated by a space: ")
stokes = stokes.split()
if len(stokes) != 4:
print("Enter all four parameters!")
stokes = np.array([[float(stokes[0])], [float(stokes[1])], [float(stokes[2])], [float(stokes[3])]])
woll = wollaston(stokes)
print("\nThe two beams from the Wollaston prism are ", woll[0][0][0], " and ", woll[1][0][0])
input("\n---------------------------------------------------------------------------------------------")
# Plot the intensities if the user chooses to
plot = input("\nWould you like to see a plot of the intensities? (y/n): ").lower()
if plot == 'y':
plot_wollaston(stokes)
print("\n---------------------------------------------------------------------------------------------")
elif find == "b":
# Get the intensities and angle data
values = []
cont = "y"
while cont == "y":
I_1 = float(input("\nEnter the first I parameter in the pair (positive Wollaston): "))
I_2 = float(input("Enter the second I parameter in the pair (negative Wollaston): "))
hwp = math.radians(float(input("Enter the HWP angle (deg): ")))
sky = math.radians(float(input("Enter the parallactic angle (deg): ")))
values.append([I_1, I_2, hwp, sky])
cont = input("\nDo you have another pair of data to add? (y/n): ").lower()
print("\nThe corresponding on-sky polarization is:\n")
print(on_sky(values))
input("\n---------------------------------------------------------------------------------------------")
elif find == "c":
# Get the targets to track
targets = []
cont = "y"
while cont == "y":
target = input("\nEnter the name of the target to track: ")
targets.append(target)
cont = input("Add another target? (y/n): ").lower()
print("\nTracking", ', '.join(targets), "...\n")
track_plot(targets)
input("\n---------------------------------------------------------------------------------------------")
print("\nProgram ended.\n")
if __name__ == "__main__":
main() | m_system = np.append(m_system, [row2[0]], axis=0)
# Return a least-squares solution
return inv(np.transpose(m_system) @ m_system) @ np.transpose(m_system) @ i
| random_line_split |
astropy_mm.py | # This program represents a Mueller matrix system for a Dual Channel Polarimeter, which includes:
# a Wollaston Prism, a diattenuating retarder representing the derotator, a rotatable HWP, a diattenuating
# retarder representing the third mirror, and a rotation matrix that compensates for parallactic rotation.
# This uses the pyMuellerMat library to represent the Mueller matrices and the astropy library.
#
# Main functions:
# a) Given some Stokes Parameters, the two beams of the Wollaston Prism are computed.
# b) Given a set of measurements from the two beams of the Wollaston prism and knowledge of the HWP angle and
# parallactic angle, the corresponding on-sky polarization is retrieved.
# c) Given a set of targets, plot the tracks over time and parallactic angle from the Keck telescope.
from pyMuellerMat import common_mms as cmm
from pyMuellerMat import MuellerMat
import math
import numpy as np
from numpy.linalg import inv
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from astroplan import Observer, FixedTarget, download_IERS_A
from astropy.time import Time
import datetime
# Initialize the telescope
keck = Observer.at_site("Keck Observatory", timezone="US/Hawaii")
fig, ax = plt.subplots()
plt.rcParams['font.family'] = 'Times New Roman'
plt.rcParams['font.size'] = 22
plt.rcParams['figure.figsize'] = (20, 20)
# Function to find the two beams of the Wollaston prism based on the Stokes parameters
def | (stokes):
sys_mm = MuellerMat.SystemMuellerMatrix([cmm.WollastonPrism(), cmm.HWP()])
sys_mm.master_property_dict['WollastonPrism']['beam'] = 'o'
pos = sys_mm.evaluate() @ stokes
sys_mm.master_property_dict['WollastonPrism']['beam'] = 'e'
neg = sys_mm.evaluate() @ stokes
return [pos, neg]
# Function that plots the difference of two beams of a Wollaston prism with a half-wave plate over the half-wave
# plate angle
def plot_wollaston(stokes):
data = np.empty(shape=[0, 2])
sys_mm = MuellerMat.SystemMuellerMatrix([cmm.WollastonPrism(), cmm.HWP()])
# Find data points from 0 to 2 * pi
for angle in np.arange(0, 2 * math.pi, 0.001):
sys_mm.master_property_dict['HalfwaveRetarder']['theta'] = math.degrees(angle)
sys_mm.master_property_dict['WollastonPrism']['beam'] = 'o'
I1 = sys_mm.evaluate() @ stokes
sys_mm.master_property_dict['WollastonPrism']['beam'] = 'e'
I2 = sys_mm.evaluate() @ stokes
data = np.append(data, [[math.degrees(angle), (I1[0] - I2[0])]], axis=0)
# Plot the data points
plt.scatter(*data.T, s=1)
plt.title('Difference between Wollaston prism beams over HWP angle')
plt.ylabel('Difference between $\mathdefault{I^+}$ and $\mathdefault{I^-}$')
plt.xlabel('HWP angle (deg)')
ax = plt.gca()
ax.set_xlim(0, 360)
ax.set_xticks([0, 90, 180, 270, 360])
plt.show()
# Function to find the corresponding on-sky polarization based on data of the intensities of the two beams
# of the Wollaston prism, the HWP angle, and the parallactic angle
def on_sky(values):
i = np.empty(shape=[0, 1])
m_system = np.empty(shape=[0, 4])
sys_mm = MuellerMat.SystemMuellerMatrix([cmm.WollastonPrism(), cmm.HWP(), cmm.Rotator()])
# Calculate the Mueller matrices
for j in range(len(values)):
sys_mm.master_property_dict['HalfwaveRetarder']['theta'] = values[j][2]
sys_mm.master_property_dict['Rotator']['pa'] = values[j][3]
sys_mm.master_property_dict['WollastonPrism']['beam'] = 'o'
row1 = sys_mm.evaluate()
sys_mm.master_property_dict['WollastonPrism']['beam'] = 'e'
row2 = sys_mm.evaluate()
i = np.append(i, [[values[j][0]]], axis=0)
m_system = np.append(m_system, [row1[0]], axis=0)
i = np.append(i, [[values[j][1]]], axis=0)
m_system = np.append(m_system, [row2[0]], axis=0)
# Return a least-squares solution
return inv(np.transpose(m_system) @ m_system) @ np.transpose(m_system) @ i
# Function that plots the difference of two beams of a Wollaston prism with a half-wave plate of fixed targets
# over the parallactic angle and time
def track_plot(targets):
# Initialize the start time, the targets, and the initial stokes vector
time = Time("2015-09-13")
step = np.arange(0, 1, 1 / 86400)
stokes = [[0], [1], [0], [0]]
hwp_angles = [0, 22.5]
derotator = cmm.DiattenuatorRetarder()
m3 = cmm.DiattenuatorRetarder()
sys_mm = MuellerMat.SystemMuellerMatrix([cmm.WollastonPrism(), derotator, cmm.HWP(), m3, cmm.Rotator()])
# Put in M3 - use astropy for altitude - diattenuating rotator - as a perfect mirror with an angle
# "perfect" - no retardance and no diattenuation
# Derotator - diattenuating retarder at a given parallactic angle
# Check diattenuating retarder form with goldstein and witzel
# Can calculate coefficients from material parameters - Fresnel reflection - index of refraction
# Fresnel coefficients - how to get the r values and possibly retardance
# use hour angle and dec to find the parallactic angle
# find the altitude given an hour angle and a target
for hwp in hwp_angles:
angle_plot = []
time_plot = []
sys_mm.master_property_dict['HalfwaveRetarder']['theta'] = hwp
for j in range(len(targets)):
wollaston_data = []
target = FixedTarget.from_name(targets[j])
# Calculate the parallactic angles and the altitudes
angles = np.degrees((keck.parallactic_angle(time + step, target)).to_value())
altitudes = (keck.altaz(time + step, target)).alt.to_value()
# Calculate the Wollaston beams and parallactic angle as time passes
for pa, alt in zip(angles, altitudes):
sys_mm.master_property_dict['Rotator']['pa'] = pa
m3.properties['theta'] = alt
sys_mm.master_property_dict['WollastonPrism']['beam'] = 'o'
I1 = sys_mm.evaluate() @ stokes
sys_mm.master_property_dict['WollastonPrism']['beam'] = 'e'
I2 = sys_mm.evaluate() @ stokes
wollaston_data.append(np.asscalar(I1[0] - I2[0]))
angle_plot.append(np.array([angles, wollaston_data]).T)
time_plot.append(np.array([((time + step).to_datetime()), wollaston_data]).T)
# Plot the angle data points
for k in range(len(targets)):
x, y = angle_plot[k].T
plt.scatter(x, y, s=1, label=targets[k])
plt.title('Difference between Wollaston prism beams over parallactic angle with HWP at %.1f degrees' % hwp)
plt.ylabel('Difference between $\mathdefault{I^+}$ and $\mathdefault{I^-}$')
plt.xlabel('Parallactic angle (deg)')
plt.legend(loc="upper left")
plt.show()
# Plot the time data points
for k in range(len(targets)):
x, y = time_plot[k].T
plt.scatter(x, y, s=1, label=targets[k])
plt.title('Difference between Wollaston prism beams over time with HWP at %.1f degrees' % hwp)
plt.ylabel('Difference between $\mathdefault{I^+}$ and $\mathdefault{I^-}$')
plt.xlabel('Time (hour of day)')
plt.legend(loc="upper left")
ax = plt.gca()
ax.xaxis_date()
ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
ax.set_xlim(datetime.date(2015, 9, 13), datetime.date(2015, 9, 14))
plt.show()
# Main function that prompts user for input
def main():
download_IERS_A()
print("This program represents a Mueller matrix system for a dual channel polarimeter using the pyMuellerMat"
"library.")
find = ""
while find != "d":
# Prompt
find = input("\nWhat would you like to do?\na) compute the two beams of the Wollaston prism from Stokes "
"parameters\nb) find the corresponding on-sky polarization with a set of measurements from "
"the two beams of the Wollaston prism and HWP/parallactic angles data\nc) plot the tracks of "
"a set of targets over time and parallactic angle from the Keck telescope\n"
"d) quit the program\n(a/b/c/d): ").lower()
if find == "a":
stokes = []
# Get the Stokes parameters input and store into a list
while len(stokes) != 4:
stokes = input("\nEnter the Stokes parameters, separated by a space: ")
stokes = stokes.split()
if len(stokes) != 4:
print("Enter all four parameters!")
stokes = np.array([[float(stokes[0])], [float(stokes[1])], [float(stokes[2])], [float(stokes[3])]])
woll = wollaston(stokes)
print("\nThe two beams from the Wollaston prism are ", woll[0][0][0], " and ", woll[1][0][0])
input("\n---------------------------------------------------------------------------------------------")
# Plot the intensities if the user chooses to
plot = input("\nWould you like to see a plot of the intensities? (y/n): ").lower()
if plot == 'y':
plot_wollaston(stokes)
print("\n---------------------------------------------------------------------------------------------")
elif find == "b":
# Get the intensities and angle data
values = []
cont = "y"
while cont == "y":
I_1 = float(input("\nEnter the first I parameter in the pair (positive Wollaston): "))
I_2 = float(input("Enter the second I parameter in the pair (negative Wollaston): "))
hwp = math.radians(float(input("Enter the HWP angle (deg): ")))
sky = math.radians(float(input("Enter the parallactic angle (deg): ")))
values.append([I_1, I_2, hwp, sky])
cont = input("\nDo you have another pair of data to add? (y/n): ").lower()
print("\nThe corresponding on-sky polarization is:\n")
print(on_sky(values))
input("\n---------------------------------------------------------------------------------------------")
elif find == "c":
# Get the targets to track
targets = []
cont = "y"
while cont == "y":
target = input("\nEnter the name of the target to track: ")
targets.append(target)
cont = input("Add another target? (y/n): ").lower()
print("\nTracking", ', '.join(targets), "...\n")
track_plot(targets)
input("\n---------------------------------------------------------------------------------------------")
print("\nProgram ended.\n")
if __name__ == "__main__":
main()
| wollaston | identifier_name |
vmrestore-admitter.go | /*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright 2018 Red Hat, Inc.
*
*/
package admitters
import (
"context"
"encoding/json"
"fmt"
"strings"
admissionv1 "k8s.io/api/admission/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/client-go/tools/cache"
"kubevirt.io/api/core"
v1 "kubevirt.io/api/core/v1"
snapshotv1 "kubevirt.io/api/snapshot/v1alpha1"
"kubevirt.io/client-go/kubecli"
webhookutils "kubevirt.io/kubevirt/pkg/util/webhooks"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
)
// VMRestoreAdmitter validates VirtualMachineRestores
type VMRestoreAdmitter struct {
Config *virtconfig.ClusterConfig
Client kubecli.KubevirtClient
VMRestoreInformer cache.SharedIndexInformer
}
// NewVMRestoreAdmitter creates a VMRestoreAdmitter
func NewVMRestoreAdmitter(config *virtconfig.ClusterConfig, client kubecli.KubevirtClient, vmRestoreInformer cache.SharedIndexInformer) *VMRestoreAdmitter {
return &VMRestoreAdmitter{
Config: config,
Client: client,
VMRestoreInformer: vmRestoreInformer,
}
}
// Admit validates an AdmissionReview
func (admitter *VMRestoreAdmitter) Admit(ar *admissionv1.AdmissionReview) *admissionv1.AdmissionResponse {
if ar.Request.Resource.Group != snapshotv1.SchemeGroupVersion.Group ||
ar.Request.Resource.Resource != "virtualmachinerestores" |
if ar.Request.Operation == admissionv1.Create && !admitter.Config.SnapshotEnabled() {
return webhookutils.ToAdmissionResponseError(fmt.Errorf("Snapshot/Restore feature gate not enabled"))
}
vmRestore := &snapshotv1.VirtualMachineRestore{}
// TODO ideally use UniversalDeserializer here
err := json.Unmarshal(ar.Request.Object.Raw, vmRestore)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
var causes []metav1.StatusCause
var targetVMExists bool
switch ar.Request.Operation {
case admissionv1.Create:
var targetUID *types.UID
targetField := k8sfield.NewPath("spec", "target")
if vmRestore.Spec.Target.APIGroup == nil {
causes = []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueNotFound,
Message: "missing apiGroup",
Field: targetField.Child("apiGroup").String(),
},
}
} else {
switch *vmRestore.Spec.Target.APIGroup {
case core.GroupName:
switch vmRestore.Spec.Target.Kind {
case "VirtualMachine":
causes, targetUID, targetVMExists, err = admitter.validateCreateVM(k8sfield.NewPath("spec"), vmRestore)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
default:
causes = []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "invalid kind",
Field: targetField.Child("kind").String(),
},
}
}
default:
causes = []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "invalid apiGroup",
Field: targetField.Child("apiGroup").String(),
},
}
}
}
snapshotCauses, err := admitter.validateSnapshot(
k8sfield.NewPath("spec", "virtualMachineSnapshotName"),
ar.Request.Namespace,
vmRestore.Spec.VirtualMachineSnapshotName,
targetUID,
targetVMExists,
)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
objects, err := admitter.VMRestoreInformer.GetIndexer().ByIndex(cache.NamespaceIndex, ar.Request.Namespace)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
for _, obj := range objects {
r := obj.(*snapshotv1.VirtualMachineRestore)
if equality.Semantic.DeepEqual(r.Spec.Target, vmRestore.Spec.Target) &&
(r.Status == nil || r.Status.Complete == nil || !*r.Status.Complete) {
cause := metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("VirtualMachineRestore %q in progress", r.Name),
Field: targetField.String(),
}
causes = append(causes, cause)
}
}
causes = append(causes, snapshotCauses...)
case admissionv1.Update:
prevObj := &snapshotv1.VirtualMachineRestore{}
err = json.Unmarshal(ar.Request.OldObject.Raw, prevObj)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
if !equality.Semantic.DeepEqual(prevObj.Spec, vmRestore.Spec) {
causes = []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "spec in immutable after creation",
Field: k8sfield.NewPath("spec").String(),
},
}
}
default:
return webhookutils.ToAdmissionResponseError(fmt.Errorf("unexpected operation %s", ar.Request.Operation))
}
if len(causes) > 0 {
return webhookutils.ToAdmissionResponse(causes)
}
reviewResponse := admissionv1.AdmissionResponse{
Allowed: true,
}
return &reviewResponse
}
func (admitter *VMRestoreAdmitter) validateCreateVM(field *k8sfield.Path, vmRestore *snapshotv1.VirtualMachineRestore) (causes []metav1.StatusCause, uid *types.UID, targetVMExists bool, err error) {
vmName := vmRestore.Spec.Target.Name
namespace := vmRestore.Namespace
causes = admitter.validatePatches(vmRestore.Spec.Patches, field.Child("patches"))
vm, err := admitter.Client.VirtualMachine(namespace).Get(context.Background(), vmName, &metav1.GetOptions{})
if errors.IsNotFound(err) {
// If the target VM does not exist it would be automatically created by the restore controller
return nil, nil, false, nil
}
if err != nil {
return nil, nil, false, err
}
rs, err := vm.RunStrategy()
if err != nil {
return nil, nil, true, err
}
if rs != v1.RunStrategyHalted {
var cause metav1.StatusCause
targetField := field.Child("target")
if vm.Spec.Running != nil && *vm.Spec.Running {
cause = metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("VirtualMachine %q is not stopped", vmName),
Field: targetField.String(),
}
} else {
cause = metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("VirtualMachine %q run strategy has to be %s", vmName, v1.RunStrategyHalted),
Field: targetField.String(),
}
}
causes = append(causes, cause)
}
return causes, &vm.UID, true, nil
}
func (admitter *VMRestoreAdmitter) validatePatches(patches []string, field *k8sfield.Path) (causes []metav1.StatusCause) {
// Validate patches are either on labels/annotations or on elements under "/spec/" path only
for _, patch := range patches {
for _, patchKeyValue := range strings.Split(strings.Trim(patch, "{}"), ",") {
// For example, if the original patch is {"op": "replace", "path": "/metadata/name", "value": "someValue"}
// now we're iterating on [`"op": "replace"`, `"path": "/metadata/name"`, `"value": "someValue"`]
keyValSlice := strings.Split(patchKeyValue, ":")
if len(keyValSlice) != 2 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf(`patch format is not valid - one ":" expected in a single key-value json patch: %s`, patchKeyValue),
Field: field.String(),
})
continue
}
key := strings.TrimSpace(keyValSlice[0])
value := strings.TrimSpace(keyValSlice[1])
if key == `"path"` {
if strings.HasPrefix(value, `"/metadata/labels/`) || strings.HasPrefix(value, `"/metadata/annotations/`) {
continue
}
if !strings.HasPrefix(value, `"/spec/`) {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("patching is valid only for elements under /spec/ only: %s", patchKeyValue),
Field: field.String(),
})
}
}
}
}
return causes
}
func (admitter *VMRestoreAdmitter) validateSnapshot(field *k8sfield.Path, namespace, name string, targetUID *types.UID, targetVMExists bool) ([]metav1.StatusCause, error) {
snapshot, err := admitter.Client.VirtualMachineSnapshot(namespace).Get(context.Background(), name, metav1.GetOptions{})
if errors.IsNotFound(err) {
return []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("VirtualMachineSnapshot %q does not exist", name),
Field: field.String(),
},
}, nil
}
if err != nil {
return nil, err
}
var causes []metav1.StatusCause
if snapshot.Status != nil && snapshot.Status.Phase == snapshotv1.Failed {
cause := metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("VirtualMachineSnapshot %q has failed and is invalid to use", name),
Field: field.String(),
}
causes = append(causes, cause)
}
if snapshot.Status == nil || snapshot.Status.ReadyToUse == nil || !*snapshot.Status.ReadyToUse {
cause := metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("VirtualMachineSnapshot %q is not ready to use", name),
Field: field.String(),
}
causes = append(causes, cause)
}
sourceTargetVmsAreDifferent := targetUID != nil && snapshot.Status != nil && snapshot.Status.SourceUID != nil && *targetUID != *snapshot.Status.SourceUID
if sourceTargetVmsAreDifferent && targetVMExists {
cause := metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("when shapsnot source and restore target VMs are different - target VM must not exist"),
Field: field.String(),
}
causes = append(causes, cause)
}
return causes, nil
}
| {
return webhookutils.ToAdmissionResponseError(fmt.Errorf("unexpected resource %+v", ar.Request.Resource))
} | conditional_block |
vmrestore-admitter.go | /*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright 2018 Red Hat, Inc.
*
*/
package admitters
import (
"context"
"encoding/json"
"fmt"
"strings"
admissionv1 "k8s.io/api/admission/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/client-go/tools/cache"
"kubevirt.io/api/core"
v1 "kubevirt.io/api/core/v1"
snapshotv1 "kubevirt.io/api/snapshot/v1alpha1"
"kubevirt.io/client-go/kubecli"
webhookutils "kubevirt.io/kubevirt/pkg/util/webhooks"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
)
// VMRestoreAdmitter validates VirtualMachineRestores
type VMRestoreAdmitter struct {
Config *virtconfig.ClusterConfig
Client kubecli.KubevirtClient
VMRestoreInformer cache.SharedIndexInformer
}
// NewVMRestoreAdmitter creates a VMRestoreAdmitter
func NewVMRestoreAdmitter(config *virtconfig.ClusterConfig, client kubecli.KubevirtClient, vmRestoreInformer cache.SharedIndexInformer) *VMRestoreAdmitter {
return &VMRestoreAdmitter{
Config: config,
Client: client,
VMRestoreInformer: vmRestoreInformer,
}
}
// Admit validates an AdmissionReview
func (admitter *VMRestoreAdmitter) Admit(ar *admissionv1.AdmissionReview) *admissionv1.AdmissionResponse |
func (admitter *VMRestoreAdmitter) validateCreateVM(field *k8sfield.Path, vmRestore *snapshotv1.VirtualMachineRestore) (causes []metav1.StatusCause, uid *types.UID, targetVMExists bool, err error) {
vmName := vmRestore.Spec.Target.Name
namespace := vmRestore.Namespace
causes = admitter.validatePatches(vmRestore.Spec.Patches, field.Child("patches"))
vm, err := admitter.Client.VirtualMachine(namespace).Get(context.Background(), vmName, &metav1.GetOptions{})
if errors.IsNotFound(err) {
// If the target VM does not exist it would be automatically created by the restore controller
return nil, nil, false, nil
}
if err != nil {
return nil, nil, false, err
}
rs, err := vm.RunStrategy()
if err != nil {
return nil, nil, true, err
}
if rs != v1.RunStrategyHalted {
var cause metav1.StatusCause
targetField := field.Child("target")
if vm.Spec.Running != nil && *vm.Spec.Running {
cause = metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("VirtualMachine %q is not stopped", vmName),
Field: targetField.String(),
}
} else {
cause = metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("VirtualMachine %q run strategy has to be %s", vmName, v1.RunStrategyHalted),
Field: targetField.String(),
}
}
causes = append(causes, cause)
}
return causes, &vm.UID, true, nil
}
func (admitter *VMRestoreAdmitter) validatePatches(patches []string, field *k8sfield.Path) (causes []metav1.StatusCause) {
// Validate patches are either on labels/annotations or on elements under "/spec/" path only
for _, patch := range patches {
for _, patchKeyValue := range strings.Split(strings.Trim(patch, "{}"), ",") {
// For example, if the original patch is {"op": "replace", "path": "/metadata/name", "value": "someValue"}
// now we're iterating on [`"op": "replace"`, `"path": "/metadata/name"`, `"value": "someValue"`]
keyValSlice := strings.Split(patchKeyValue, ":")
if len(keyValSlice) != 2 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf(`patch format is not valid - one ":" expected in a single key-value json patch: %s`, patchKeyValue),
Field: field.String(),
})
continue
}
key := strings.TrimSpace(keyValSlice[0])
value := strings.TrimSpace(keyValSlice[1])
if key == `"path"` {
if strings.HasPrefix(value, `"/metadata/labels/`) || strings.HasPrefix(value, `"/metadata/annotations/`) {
continue
}
if !strings.HasPrefix(value, `"/spec/`) {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("patching is valid only for elements under /spec/ only: %s", patchKeyValue),
Field: field.String(),
})
}
}
}
}
return causes
}
func (admitter *VMRestoreAdmitter) validateSnapshot(field *k8sfield.Path, namespace, name string, targetUID *types.UID, targetVMExists bool) ([]metav1.StatusCause, error) {
snapshot, err := admitter.Client.VirtualMachineSnapshot(namespace).Get(context.Background(), name, metav1.GetOptions{})
if errors.IsNotFound(err) {
return []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("VirtualMachineSnapshot %q does not exist", name),
Field: field.String(),
},
}, nil
}
if err != nil {
return nil, err
}
var causes []metav1.StatusCause
if snapshot.Status != nil && snapshot.Status.Phase == snapshotv1.Failed {
cause := metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("VirtualMachineSnapshot %q has failed and is invalid to use", name),
Field: field.String(),
}
causes = append(causes, cause)
}
if snapshot.Status == nil || snapshot.Status.ReadyToUse == nil || !*snapshot.Status.ReadyToUse {
cause := metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("VirtualMachineSnapshot %q is not ready to use", name),
Field: field.String(),
}
causes = append(causes, cause)
}
sourceTargetVmsAreDifferent := targetUID != nil && snapshot.Status != nil && snapshot.Status.SourceUID != nil && *targetUID != *snapshot.Status.SourceUID
if sourceTargetVmsAreDifferent && targetVMExists {
cause := metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("when shapsnot source and restore target VMs are different - target VM must not exist"),
Field: field.String(),
}
causes = append(causes, cause)
}
return causes, nil
}
| {
if ar.Request.Resource.Group != snapshotv1.SchemeGroupVersion.Group ||
ar.Request.Resource.Resource != "virtualmachinerestores" {
return webhookutils.ToAdmissionResponseError(fmt.Errorf("unexpected resource %+v", ar.Request.Resource))
}
if ar.Request.Operation == admissionv1.Create && !admitter.Config.SnapshotEnabled() {
return webhookutils.ToAdmissionResponseError(fmt.Errorf("Snapshot/Restore feature gate not enabled"))
}
vmRestore := &snapshotv1.VirtualMachineRestore{}
// TODO ideally use UniversalDeserializer here
err := json.Unmarshal(ar.Request.Object.Raw, vmRestore)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
var causes []metav1.StatusCause
var targetVMExists bool
switch ar.Request.Operation {
case admissionv1.Create:
var targetUID *types.UID
targetField := k8sfield.NewPath("spec", "target")
if vmRestore.Spec.Target.APIGroup == nil {
causes = []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueNotFound,
Message: "missing apiGroup",
Field: targetField.Child("apiGroup").String(),
},
}
} else {
switch *vmRestore.Spec.Target.APIGroup {
case core.GroupName:
switch vmRestore.Spec.Target.Kind {
case "VirtualMachine":
causes, targetUID, targetVMExists, err = admitter.validateCreateVM(k8sfield.NewPath("spec"), vmRestore)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
default:
causes = []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "invalid kind",
Field: targetField.Child("kind").String(),
},
}
}
default:
causes = []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "invalid apiGroup",
Field: targetField.Child("apiGroup").String(),
},
}
}
}
snapshotCauses, err := admitter.validateSnapshot(
k8sfield.NewPath("spec", "virtualMachineSnapshotName"),
ar.Request.Namespace,
vmRestore.Spec.VirtualMachineSnapshotName,
targetUID,
targetVMExists,
)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
objects, err := admitter.VMRestoreInformer.GetIndexer().ByIndex(cache.NamespaceIndex, ar.Request.Namespace)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
for _, obj := range objects {
r := obj.(*snapshotv1.VirtualMachineRestore)
if equality.Semantic.DeepEqual(r.Spec.Target, vmRestore.Spec.Target) &&
(r.Status == nil || r.Status.Complete == nil || !*r.Status.Complete) {
cause := metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("VirtualMachineRestore %q in progress", r.Name),
Field: targetField.String(),
}
causes = append(causes, cause)
}
}
causes = append(causes, snapshotCauses...)
case admissionv1.Update:
prevObj := &snapshotv1.VirtualMachineRestore{}
err = json.Unmarshal(ar.Request.OldObject.Raw, prevObj)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
if !equality.Semantic.DeepEqual(prevObj.Spec, vmRestore.Spec) {
causes = []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "spec in immutable after creation",
Field: k8sfield.NewPath("spec").String(),
},
}
}
default:
return webhookutils.ToAdmissionResponseError(fmt.Errorf("unexpected operation %s", ar.Request.Operation))
}
if len(causes) > 0 {
return webhookutils.ToAdmissionResponse(causes)
}
reviewResponse := admissionv1.AdmissionResponse{
Allowed: true,
}
return &reviewResponse
} | identifier_body |
vmrestore-admitter.go | /*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright 2018 Red Hat, Inc. | package admitters
import (
"context"
"encoding/json"
"fmt"
"strings"
admissionv1 "k8s.io/api/admission/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/client-go/tools/cache"
"kubevirt.io/api/core"
v1 "kubevirt.io/api/core/v1"
snapshotv1 "kubevirt.io/api/snapshot/v1alpha1"
"kubevirt.io/client-go/kubecli"
webhookutils "kubevirt.io/kubevirt/pkg/util/webhooks"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
)
// VMRestoreAdmitter validates VirtualMachineRestores
type VMRestoreAdmitter struct {
Config *virtconfig.ClusterConfig
Client kubecli.KubevirtClient
VMRestoreInformer cache.SharedIndexInformer
}
// NewVMRestoreAdmitter creates a VMRestoreAdmitter
func NewVMRestoreAdmitter(config *virtconfig.ClusterConfig, client kubecli.KubevirtClient, vmRestoreInformer cache.SharedIndexInformer) *VMRestoreAdmitter {
return &VMRestoreAdmitter{
Config: config,
Client: client,
VMRestoreInformer: vmRestoreInformer,
}
}
// Admit validates an AdmissionReview
func (admitter *VMRestoreAdmitter) Admit(ar *admissionv1.AdmissionReview) *admissionv1.AdmissionResponse {
if ar.Request.Resource.Group != snapshotv1.SchemeGroupVersion.Group ||
ar.Request.Resource.Resource != "virtualmachinerestores" {
return webhookutils.ToAdmissionResponseError(fmt.Errorf("unexpected resource %+v", ar.Request.Resource))
}
if ar.Request.Operation == admissionv1.Create && !admitter.Config.SnapshotEnabled() {
return webhookutils.ToAdmissionResponseError(fmt.Errorf("Snapshot/Restore feature gate not enabled"))
}
vmRestore := &snapshotv1.VirtualMachineRestore{}
// TODO ideally use UniversalDeserializer here
err := json.Unmarshal(ar.Request.Object.Raw, vmRestore)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
var causes []metav1.StatusCause
var targetVMExists bool
switch ar.Request.Operation {
case admissionv1.Create:
var targetUID *types.UID
targetField := k8sfield.NewPath("spec", "target")
if vmRestore.Spec.Target.APIGroup == nil {
causes = []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueNotFound,
Message: "missing apiGroup",
Field: targetField.Child("apiGroup").String(),
},
}
} else {
switch *vmRestore.Spec.Target.APIGroup {
case core.GroupName:
switch vmRestore.Spec.Target.Kind {
case "VirtualMachine":
causes, targetUID, targetVMExists, err = admitter.validateCreateVM(k8sfield.NewPath("spec"), vmRestore)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
default:
causes = []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "invalid kind",
Field: targetField.Child("kind").String(),
},
}
}
default:
causes = []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "invalid apiGroup",
Field: targetField.Child("apiGroup").String(),
},
}
}
}
snapshotCauses, err := admitter.validateSnapshot(
k8sfield.NewPath("spec", "virtualMachineSnapshotName"),
ar.Request.Namespace,
vmRestore.Spec.VirtualMachineSnapshotName,
targetUID,
targetVMExists,
)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
objects, err := admitter.VMRestoreInformer.GetIndexer().ByIndex(cache.NamespaceIndex, ar.Request.Namespace)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
for _, obj := range objects {
r := obj.(*snapshotv1.VirtualMachineRestore)
if equality.Semantic.DeepEqual(r.Spec.Target, vmRestore.Spec.Target) &&
(r.Status == nil || r.Status.Complete == nil || !*r.Status.Complete) {
cause := metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("VirtualMachineRestore %q in progress", r.Name),
Field: targetField.String(),
}
causes = append(causes, cause)
}
}
causes = append(causes, snapshotCauses...)
case admissionv1.Update:
prevObj := &snapshotv1.VirtualMachineRestore{}
err = json.Unmarshal(ar.Request.OldObject.Raw, prevObj)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
if !equality.Semantic.DeepEqual(prevObj.Spec, vmRestore.Spec) {
causes = []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "spec in immutable after creation",
Field: k8sfield.NewPath("spec").String(),
},
}
}
default:
return webhookutils.ToAdmissionResponseError(fmt.Errorf("unexpected operation %s", ar.Request.Operation))
}
if len(causes) > 0 {
return webhookutils.ToAdmissionResponse(causes)
}
reviewResponse := admissionv1.AdmissionResponse{
Allowed: true,
}
return &reviewResponse
}
func (admitter *VMRestoreAdmitter) validateCreateVM(field *k8sfield.Path, vmRestore *snapshotv1.VirtualMachineRestore) (causes []metav1.StatusCause, uid *types.UID, targetVMExists bool, err error) {
vmName := vmRestore.Spec.Target.Name
namespace := vmRestore.Namespace
causes = admitter.validatePatches(vmRestore.Spec.Patches, field.Child("patches"))
vm, err := admitter.Client.VirtualMachine(namespace).Get(context.Background(), vmName, &metav1.GetOptions{})
if errors.IsNotFound(err) {
// If the target VM does not exist it would be automatically created by the restore controller
return nil, nil, false, nil
}
if err != nil {
return nil, nil, false, err
}
rs, err := vm.RunStrategy()
if err != nil {
return nil, nil, true, err
}
if rs != v1.RunStrategyHalted {
var cause metav1.StatusCause
targetField := field.Child("target")
if vm.Spec.Running != nil && *vm.Spec.Running {
cause = metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("VirtualMachine %q is not stopped", vmName),
Field: targetField.String(),
}
} else {
cause = metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("VirtualMachine %q run strategy has to be %s", vmName, v1.RunStrategyHalted),
Field: targetField.String(),
}
}
causes = append(causes, cause)
}
return causes, &vm.UID, true, nil
}
func (admitter *VMRestoreAdmitter) validatePatches(patches []string, field *k8sfield.Path) (causes []metav1.StatusCause) {
// Validate patches are either on labels/annotations or on elements under "/spec/" path only
for _, patch := range patches {
for _, patchKeyValue := range strings.Split(strings.Trim(patch, "{}"), ",") {
// For example, if the original patch is {"op": "replace", "path": "/metadata/name", "value": "someValue"}
// now we're iterating on [`"op": "replace"`, `"path": "/metadata/name"`, `"value": "someValue"`]
keyValSlice := strings.Split(patchKeyValue, ":")
if len(keyValSlice) != 2 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf(`patch format is not valid - one ":" expected in a single key-value json patch: %s`, patchKeyValue),
Field: field.String(),
})
continue
}
key := strings.TrimSpace(keyValSlice[0])
value := strings.TrimSpace(keyValSlice[1])
if key == `"path"` {
if strings.HasPrefix(value, `"/metadata/labels/`) || strings.HasPrefix(value, `"/metadata/annotations/`) {
continue
}
if !strings.HasPrefix(value, `"/spec/`) {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("patching is valid only for elements under /spec/ only: %s", patchKeyValue),
Field: field.String(),
})
}
}
}
}
return causes
}
func (admitter *VMRestoreAdmitter) validateSnapshot(field *k8sfield.Path, namespace, name string, targetUID *types.UID, targetVMExists bool) ([]metav1.StatusCause, error) {
snapshot, err := admitter.Client.VirtualMachineSnapshot(namespace).Get(context.Background(), name, metav1.GetOptions{})
if errors.IsNotFound(err) {
return []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("VirtualMachineSnapshot %q does not exist", name),
Field: field.String(),
},
}, nil
}
if err != nil {
return nil, err
}
var causes []metav1.StatusCause
if snapshot.Status != nil && snapshot.Status.Phase == snapshotv1.Failed {
cause := metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("VirtualMachineSnapshot %q has failed and is invalid to use", name),
Field: field.String(),
}
causes = append(causes, cause)
}
if snapshot.Status == nil || snapshot.Status.ReadyToUse == nil || !*snapshot.Status.ReadyToUse {
cause := metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("VirtualMachineSnapshot %q is not ready to use", name),
Field: field.String(),
}
causes = append(causes, cause)
}
sourceTargetVmsAreDifferent := targetUID != nil && snapshot.Status != nil && snapshot.Status.SourceUID != nil && *targetUID != *snapshot.Status.SourceUID
if sourceTargetVmsAreDifferent && targetVMExists {
cause := metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("when shapsnot source and restore target VMs are different - target VM must not exist"),
Field: field.String(),
}
causes = append(causes, cause)
}
return causes, nil
} | *
*/
| random_line_split |
vmrestore-admitter.go | /*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright 2018 Red Hat, Inc.
*
*/
package admitters
import (
"context"
"encoding/json"
"fmt"
"strings"
admissionv1 "k8s.io/api/admission/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
k8sfield "k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/client-go/tools/cache"
"kubevirt.io/api/core"
v1 "kubevirt.io/api/core/v1"
snapshotv1 "kubevirt.io/api/snapshot/v1alpha1"
"kubevirt.io/client-go/kubecli"
webhookutils "kubevirt.io/kubevirt/pkg/util/webhooks"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
)
// VMRestoreAdmitter validates VirtualMachineRestores
type VMRestoreAdmitter struct {
Config *virtconfig.ClusterConfig
Client kubecli.KubevirtClient
VMRestoreInformer cache.SharedIndexInformer
}
// NewVMRestoreAdmitter creates a VMRestoreAdmitter
func NewVMRestoreAdmitter(config *virtconfig.ClusterConfig, client kubecli.KubevirtClient, vmRestoreInformer cache.SharedIndexInformer) *VMRestoreAdmitter {
return &VMRestoreAdmitter{
Config: config,
Client: client,
VMRestoreInformer: vmRestoreInformer,
}
}
// Admit validates an AdmissionReview
func (admitter *VMRestoreAdmitter) | (ar *admissionv1.AdmissionReview) *admissionv1.AdmissionResponse {
if ar.Request.Resource.Group != snapshotv1.SchemeGroupVersion.Group ||
ar.Request.Resource.Resource != "virtualmachinerestores" {
return webhookutils.ToAdmissionResponseError(fmt.Errorf("unexpected resource %+v", ar.Request.Resource))
}
if ar.Request.Operation == admissionv1.Create && !admitter.Config.SnapshotEnabled() {
return webhookutils.ToAdmissionResponseError(fmt.Errorf("Snapshot/Restore feature gate not enabled"))
}
vmRestore := &snapshotv1.VirtualMachineRestore{}
// TODO ideally use UniversalDeserializer here
err := json.Unmarshal(ar.Request.Object.Raw, vmRestore)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
var causes []metav1.StatusCause
var targetVMExists bool
switch ar.Request.Operation {
case admissionv1.Create:
var targetUID *types.UID
targetField := k8sfield.NewPath("spec", "target")
if vmRestore.Spec.Target.APIGroup == nil {
causes = []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueNotFound,
Message: "missing apiGroup",
Field: targetField.Child("apiGroup").String(),
},
}
} else {
switch *vmRestore.Spec.Target.APIGroup {
case core.GroupName:
switch vmRestore.Spec.Target.Kind {
case "VirtualMachine":
causes, targetUID, targetVMExists, err = admitter.validateCreateVM(k8sfield.NewPath("spec"), vmRestore)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
default:
causes = []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "invalid kind",
Field: targetField.Child("kind").String(),
},
}
}
default:
causes = []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "invalid apiGroup",
Field: targetField.Child("apiGroup").String(),
},
}
}
}
snapshotCauses, err := admitter.validateSnapshot(
k8sfield.NewPath("spec", "virtualMachineSnapshotName"),
ar.Request.Namespace,
vmRestore.Spec.VirtualMachineSnapshotName,
targetUID,
targetVMExists,
)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
objects, err := admitter.VMRestoreInformer.GetIndexer().ByIndex(cache.NamespaceIndex, ar.Request.Namespace)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
for _, obj := range objects {
r := obj.(*snapshotv1.VirtualMachineRestore)
if equality.Semantic.DeepEqual(r.Spec.Target, vmRestore.Spec.Target) &&
(r.Status == nil || r.Status.Complete == nil || !*r.Status.Complete) {
cause := metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("VirtualMachineRestore %q in progress", r.Name),
Field: targetField.String(),
}
causes = append(causes, cause)
}
}
causes = append(causes, snapshotCauses...)
case admissionv1.Update:
prevObj := &snapshotv1.VirtualMachineRestore{}
err = json.Unmarshal(ar.Request.OldObject.Raw, prevObj)
if err != nil {
return webhookutils.ToAdmissionResponseError(err)
}
if !equality.Semantic.DeepEqual(prevObj.Spec, vmRestore.Spec) {
causes = []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: "spec in immutable after creation",
Field: k8sfield.NewPath("spec").String(),
},
}
}
default:
return webhookutils.ToAdmissionResponseError(fmt.Errorf("unexpected operation %s", ar.Request.Operation))
}
if len(causes) > 0 {
return webhookutils.ToAdmissionResponse(causes)
}
reviewResponse := admissionv1.AdmissionResponse{
Allowed: true,
}
return &reviewResponse
}
func (admitter *VMRestoreAdmitter) validateCreateVM(field *k8sfield.Path, vmRestore *snapshotv1.VirtualMachineRestore) (causes []metav1.StatusCause, uid *types.UID, targetVMExists bool, err error) {
vmName := vmRestore.Spec.Target.Name
namespace := vmRestore.Namespace
causes = admitter.validatePatches(vmRestore.Spec.Patches, field.Child("patches"))
vm, err := admitter.Client.VirtualMachine(namespace).Get(context.Background(), vmName, &metav1.GetOptions{})
if errors.IsNotFound(err) {
// If the target VM does not exist it would be automatically created by the restore controller
return nil, nil, false, nil
}
if err != nil {
return nil, nil, false, err
}
rs, err := vm.RunStrategy()
if err != nil {
return nil, nil, true, err
}
if rs != v1.RunStrategyHalted {
var cause metav1.StatusCause
targetField := field.Child("target")
if vm.Spec.Running != nil && *vm.Spec.Running {
cause = metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("VirtualMachine %q is not stopped", vmName),
Field: targetField.String(),
}
} else {
cause = metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("VirtualMachine %q run strategy has to be %s", vmName, v1.RunStrategyHalted),
Field: targetField.String(),
}
}
causes = append(causes, cause)
}
return causes, &vm.UID, true, nil
}
func (admitter *VMRestoreAdmitter) validatePatches(patches []string, field *k8sfield.Path) (causes []metav1.StatusCause) {
// Validate patches are either on labels/annotations or on elements under "/spec/" path only
for _, patch := range patches {
for _, patchKeyValue := range strings.Split(strings.Trim(patch, "{}"), ",") {
// For example, if the original patch is {"op": "replace", "path": "/metadata/name", "value": "someValue"}
// now we're iterating on [`"op": "replace"`, `"path": "/metadata/name"`, `"value": "someValue"`]
keyValSlice := strings.Split(patchKeyValue, ":")
if len(keyValSlice) != 2 {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf(`patch format is not valid - one ":" expected in a single key-value json patch: %s`, patchKeyValue),
Field: field.String(),
})
continue
}
key := strings.TrimSpace(keyValSlice[0])
value := strings.TrimSpace(keyValSlice[1])
if key == `"path"` {
if strings.HasPrefix(value, `"/metadata/labels/`) || strings.HasPrefix(value, `"/metadata/annotations/`) {
continue
}
if !strings.HasPrefix(value, `"/spec/`) {
causes = append(causes, metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("patching is valid only for elements under /spec/ only: %s", patchKeyValue),
Field: field.String(),
})
}
}
}
}
return causes
}
func (admitter *VMRestoreAdmitter) validateSnapshot(field *k8sfield.Path, namespace, name string, targetUID *types.UID, targetVMExists bool) ([]metav1.StatusCause, error) {
snapshot, err := admitter.Client.VirtualMachineSnapshot(namespace).Get(context.Background(), name, metav1.GetOptions{})
if errors.IsNotFound(err) {
return []metav1.StatusCause{
{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("VirtualMachineSnapshot %q does not exist", name),
Field: field.String(),
},
}, nil
}
if err != nil {
return nil, err
}
var causes []metav1.StatusCause
if snapshot.Status != nil && snapshot.Status.Phase == snapshotv1.Failed {
cause := metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("VirtualMachineSnapshot %q has failed and is invalid to use", name),
Field: field.String(),
}
causes = append(causes, cause)
}
if snapshot.Status == nil || snapshot.Status.ReadyToUse == nil || !*snapshot.Status.ReadyToUse {
cause := metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("VirtualMachineSnapshot %q is not ready to use", name),
Field: field.String(),
}
causes = append(causes, cause)
}
sourceTargetVmsAreDifferent := targetUID != nil && snapshot.Status != nil && snapshot.Status.SourceUID != nil && *targetUID != *snapshot.Status.SourceUID
if sourceTargetVmsAreDifferent && targetVMExists {
cause := metav1.StatusCause{
Type: metav1.CauseTypeFieldValueInvalid,
Message: fmt.Sprintf("when shapsnot source and restore target VMs are different - target VM must not exist"),
Field: field.String(),
}
causes = append(causes, cause)
}
return causes, nil
}
| Admit | identifier_name |
config_diff.rs | use std::num::NonZeroU32;
use merge::Merge;
use schemars::JsonSchema;
use segment::types::{HnswConfig, ProductQuantization, ScalarQuantization};
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use validator::{Validate, ValidationErrors};
use crate::config::{CollectionParams, WalConfig};
use crate::operations::types::CollectionResult;
use crate::optimizers_builder::OptimizersConfig;
// Structures for partial update of collection params
// TODO: make auto-generated somehow...
pub trait DiffConfig<T: DeserializeOwned + Serialize> {
/// Update the given `config` with fields in this diff
///
/// This clones, modifies and returns `config`.
///
/// This diff has higher priority, meaning that fields specified in this diff will always be in
/// the returned object.
fn update(self, config: &T) -> CollectionResult<T>
where
Self: Sized + Serialize + DeserializeOwned + Merge,
{
update_config(config, self)
}
fn from_full(full: &T) -> CollectionResult<Self>
where
Self: Sized + Serialize + DeserializeOwned,
|
}
#[derive(
Debug,
Default,
Deserialize,
Serialize,
JsonSchema,
Validate,
Copy,
Clone,
PartialEq,
Eq,
Merge,
Hash,
)]
#[serde(rename_all = "snake_case")]
pub struct HnswConfigDiff {
/// Number of edges per node in the index graph. Larger the value - more accurate the search, more space required.
#[serde(skip_serializing_if = "Option::is_none")]
pub m: Option<usize>,
/// Number of neighbours to consider during the index building. Larger the value - more accurate the search, more time required to build the index.
#[validate(range(min = 4))]
#[serde(skip_serializing_if = "Option::is_none")]
pub ef_construct: Option<usize>,
/// Minimal size (in kilobytes) of vectors for additional payload-based indexing.
/// If payload chunk is smaller than `full_scan_threshold_kb` additional indexing won't be used -
/// in this case full-scan search should be preferred by query planner and additional indexing is not required.
/// Note: 1Kb = 1 vector of size 256
#[serde(
alias = "full_scan_threshold_kb",
default,
skip_serializing_if = "Option::is_none"
)]
#[validate(range(min = 10))]
pub full_scan_threshold: Option<usize>,
/// Number of parallel threads used for background index building. If 0 - auto selection.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub max_indexing_threads: Option<usize>,
/// Store HNSW index on disk. If set to false, the index will be stored in RAM. Default: false
#[serde(default, skip_serializing_if = "Option::is_none")]
pub on_disk: Option<bool>,
/// Custom M param for additional payload-aware HNSW links. If not set, default M will be used.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub payload_m: Option<usize>,
}
#[derive(
Debug, Deserialize, Serialize, JsonSchema, Validate, Clone, Merge, PartialEq, Eq, Hash,
)]
pub struct WalConfigDiff {
/// Size of a single WAL segment in MB
#[validate(range(min = 1))]
pub wal_capacity_mb: Option<usize>,
/// Number of WAL segments to create ahead of actually used ones
pub wal_segments_ahead: Option<usize>,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Clone, Merge, PartialEq, Eq, Hash)]
pub struct CollectionParamsDiff {
/// Number of replicas for each shard
pub replication_factor: Option<NonZeroU32>,
/// Minimal number successful responses from replicas to consider operation successful
pub write_consistency_factor: Option<NonZeroU32>,
/// If true - point's payload will not be stored in memory.
/// It will be read from the disk every time it is requested.
/// This setting saves RAM by (slightly) increasing the response time.
/// Note: those payload values that are involved in filtering and are indexed - remain in RAM.
#[serde(default)]
pub on_disk_payload: Option<bool>,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Clone, Merge)]
pub struct OptimizersConfigDiff {
/// The minimal fraction of deleted vectors in a segment, required to perform segment optimization
pub deleted_threshold: Option<f64>,
/// The minimal number of vectors in a segment, required to perform segment optimization
pub vacuum_min_vector_number: Option<usize>,
/// Target amount of segments optimizer will try to keep.
/// Real amount of segments may vary depending on multiple parameters:
/// - Amount of stored points
/// - Current write RPS
///
/// It is recommended to select default number of segments as a factor of the number of search threads,
/// so that each segment would be handled evenly by one of the threads
/// If `default_segment_number = 0`, will be automatically selected by the number of available CPUs
pub default_segment_number: Option<usize>,
/// Do not create segments larger this size (in kilobytes).
/// Large segments might require disproportionately long indexation times,
/// therefore it makes sense to limit the size of segments.
///
/// If indexation speed have more priority for your - make this parameter lower.
/// If search speed is more important - make this parameter higher.
/// Note: 1Kb = 1 vector of size 256
#[serde(alias = "max_segment_size_kb")]
pub max_segment_size: Option<usize>,
/// Maximum size (in kilobytes) of vectors to store in-memory per segment.
/// Segments larger than this threshold will be stored as read-only memmaped file.
///
/// Memmap storage is disabled by default, to enable it, set this threshold to a reasonable value.
///
/// To disable memmap storage, set this to `0`.
///
/// Note: 1Kb = 1 vector of size 256
#[serde(alias = "memmap_threshold_kb")]
pub memmap_threshold: Option<usize>,
/// Maximum size (in kilobytes) of vectors allowed for plain index, exceeding this threshold will enable vector indexing
///
/// Default value is 20,000, based on <https://github.com/google-research/google-research/blob/master/scann/docs/algorithms.md>.
///
/// To disable vector indexing, set to `0`.
///
/// Note: 1kB = 1 vector of size 256.
#[serde(alias = "indexing_threshold_kb")]
pub indexing_threshold: Option<usize>,
/// Minimum interval between forced flushes.
pub flush_interval_sec: Option<u64>,
/// Maximum available threads for optimization workers
pub max_optimization_threads: Option<usize>,
}
impl std::hash::Hash for OptimizersConfigDiff {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.deleted_threshold.map(f64::to_le_bytes).hash(state);
self.vacuum_min_vector_number.hash(state);
self.default_segment_number.hash(state);
self.max_segment_size.hash(state);
self.memmap_threshold.hash(state);
self.indexing_threshold.hash(state);
self.flush_interval_sec.hash(state);
self.max_optimization_threads.hash(state);
}
}
impl PartialEq for OptimizersConfigDiff {
fn eq(&self, other: &Self) -> bool {
self.deleted_threshold.map(f64::to_le_bytes)
== other.deleted_threshold.map(f64::to_le_bytes)
&& self.vacuum_min_vector_number == other.vacuum_min_vector_number
&& self.default_segment_number == other.default_segment_number
&& self.max_segment_size == other.max_segment_size
&& self.memmap_threshold == other.memmap_threshold
&& self.indexing_threshold == other.indexing_threshold
&& self.flush_interval_sec == other.flush_interval_sec
&& self.max_optimization_threads == other.max_optimization_threads
}
}
impl Eq for OptimizersConfigDiff {}
impl DiffConfig<HnswConfig> for HnswConfigDiff {}
impl DiffConfig<HnswConfigDiff> for HnswConfigDiff {}
impl DiffConfig<OptimizersConfig> for OptimizersConfigDiff {}
impl DiffConfig<WalConfig> for WalConfigDiff {}
impl DiffConfig<CollectionParams> for CollectionParamsDiff {}
impl From<HnswConfig> for HnswConfigDiff {
fn from(config: HnswConfig) -> Self {
HnswConfigDiff::from_full(&config).unwrap()
}
}
impl From<OptimizersConfig> for OptimizersConfigDiff {
fn from(config: OptimizersConfig) -> Self {
OptimizersConfigDiff::from_full(&config).unwrap()
}
}
impl From<WalConfig> for WalConfigDiff {
fn from(config: WalConfig) -> Self {
WalConfigDiff::from_full(&config).unwrap()
}
}
impl From<CollectionParams> for CollectionParamsDiff {
fn from(config: CollectionParams) -> Self {
CollectionParamsDiff::from_full(&config).unwrap()
}
}
pub fn from_full<T: DeserializeOwned + Serialize, Y: DeserializeOwned + Serialize>(
full_config: &T,
) -> CollectionResult<Y> {
let json = serde_json::to_value(full_config)?;
let res = serde_json::from_value(json)?;
Ok(res)
}
/// Merge first level of JSON values, if diff values present explicitly
///
/// Example:
///
/// base: {"a": 1, "b": 2}
/// diff: {"a": 3}
/// result: {"a": 3, "b": 2}
///
/// base: {"a": 1, "b": 2}
/// diff: {"a": null}
/// result: {"a": 1, "b": 2}
fn merge_level_0(base: &mut Value, diff: Value) {
match (base, diff) {
(base @ &mut Value::Object(_), Value::Object(diff)) => {
let base = base.as_object_mut().unwrap();
for (k, v) in diff {
if !v.is_null() {
base.insert(k, v);
}
}
}
(_base, _diff) => {}
}
}
/// Hacky way to update configuration structures with diff-updates.
/// Intended to only be used in non critical for speed places.
/// TODO: replace with proc macro
pub fn update_config<T: DeserializeOwned + Serialize, Y: DeserializeOwned + Serialize + Merge>(
config: &T,
update: Y,
) -> CollectionResult<T> {
let mut config_values = serde_json::to_value(config)?;
let diff_values = serde_json::to_value(&update)?;
merge_level_0(&mut config_values, diff_values);
let res = serde_json::from_value(config_values)?;
Ok(res)
}
/// Hacky way to figure out if the given configuration is considered empty
///
/// The following types are considered empty:
/// - Null
/// - Empty string
/// - Array or object with zero items
///
/// Intended to only be used in non critical for speed places.
pub fn is_empty<T: Serialize>(config: &T) -> CollectionResult<bool> {
let config_values = serde_json::to_value(config)?;
Ok(match config_values {
Value::Null => true,
Value::String(value) => value.is_empty(),
Value::Array(values) => values.is_empty(),
Value::Object(values) => values.is_empty(),
Value::Bool(_) | Value::Number(_) => false,
})
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Clone, PartialEq, Eq, Hash)]
pub enum Disabled {
Disabled,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Clone, PartialEq, Eq, Hash)]
#[serde(rename_all = "snake_case")]
#[serde(untagged)]
pub enum QuantizationConfigDiff {
Scalar(ScalarQuantization),
Product(ProductQuantization),
Disabled(Disabled),
}
impl QuantizationConfigDiff {
pub fn new_disabled() -> Self {
QuantizationConfigDiff::Disabled(Disabled::Disabled)
}
}
impl Validate for QuantizationConfigDiff {
fn validate(&self) -> Result<(), ValidationErrors> {
match self {
QuantizationConfigDiff::Scalar(scalar) => scalar.validate(),
QuantizationConfigDiff::Product(product) => product.validate(),
QuantizationConfigDiff::Disabled(_) => Ok(()),
}
}
}
#[cfg(test)]
mod tests {
use std::num::NonZeroU64;
use segment::types::{Distance, HnswConfig};
use super::*;
use crate::operations::types::VectorParams;
use crate::optimizers_builder::OptimizersConfig;
#[test]
fn test_update_collection_params() {
let params = CollectionParams {
vectors: VectorParams {
size: NonZeroU64::new(128).unwrap(),
distance: Distance::Cosine,
hnsw_config: None,
quantization_config: None,
on_disk: None,
}
.into(),
shard_number: NonZeroU32::new(1).unwrap(),
replication_factor: NonZeroU32::new(1).unwrap(),
write_consistency_factor: NonZeroU32::new(1).unwrap(),
on_disk_payload: false,
};
let diff = CollectionParamsDiff {
replication_factor: None,
write_consistency_factor: Some(NonZeroU32::new(2).unwrap()),
on_disk_payload: None,
};
let new_params = diff.update(¶ms).unwrap();
assert_eq!(new_params.replication_factor.get(), 1);
assert_eq!(new_params.write_consistency_factor.get(), 2);
assert!(!new_params.on_disk_payload);
}
#[test]
fn test_hnsw_update() {
let base_config = HnswConfig::default();
let update: HnswConfigDiff = serde_json::from_str(r#"{ "m": 32 }"#).unwrap();
let new_config = update.update(&base_config).unwrap();
assert_eq!(new_config.m, 32)
}
#[test]
fn test_optimizer_update() {
let base_config = OptimizersConfig {
deleted_threshold: 0.9,
vacuum_min_vector_number: 1000,
default_segment_number: 10,
max_segment_size: None,
memmap_threshold: None,
indexing_threshold: Some(50_000),
flush_interval_sec: 30,
max_optimization_threads: 1,
};
let update: OptimizersConfigDiff =
serde_json::from_str(r#"{ "indexing_threshold": 10000 }"#).unwrap();
let new_config = update.update(&base_config).unwrap();
assert_eq!(new_config.indexing_threshold, Some(10000))
}
#[test]
fn test_wal_config() {
let base_config = WalConfig::default();
let update: WalConfigDiff = serde_json::from_str(r#"{ "wal_segments_ahead": 2 }"#).unwrap();
let new_config = update.update(&base_config).unwrap();
assert_eq!(new_config.wal_segments_ahead, 2)
}
}
| {
from_full(full)
} | identifier_body |
config_diff.rs | use std::num::NonZeroU32;
use merge::Merge;
use schemars::JsonSchema;
use segment::types::{HnswConfig, ProductQuantization, ScalarQuantization};
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use validator::{Validate, ValidationErrors};
use crate::config::{CollectionParams, WalConfig};
use crate::operations::types::CollectionResult;
use crate::optimizers_builder::OptimizersConfig;
// Structures for partial update of collection params
// TODO: make auto-generated somehow...
pub trait DiffConfig<T: DeserializeOwned + Serialize> {
/// Update the given `config` with fields in this diff
///
/// This clones, modifies and returns `config`.
///
/// This diff has higher priority, meaning that fields specified in this diff will always be in
/// the returned object.
fn update(self, config: &T) -> CollectionResult<T>
where
Self: Sized + Serialize + DeserializeOwned + Merge,
{
update_config(config, self)
}
fn from_full(full: &T) -> CollectionResult<Self>
where
Self: Sized + Serialize + DeserializeOwned,
{
from_full(full)
}
}
#[derive(
Debug,
Default,
Deserialize,
Serialize,
JsonSchema,
Validate,
Copy,
Clone,
PartialEq,
Eq,
Merge,
Hash,
)]
#[serde(rename_all = "snake_case")]
pub struct HnswConfigDiff {
/// Number of edges per node in the index graph. Larger the value - more accurate the search, more space required.
#[serde(skip_serializing_if = "Option::is_none")]
pub m: Option<usize>,
/// Number of neighbours to consider during the index building. Larger the value - more accurate the search, more time required to build the index.
#[validate(range(min = 4))]
#[serde(skip_serializing_if = "Option::is_none")]
pub ef_construct: Option<usize>,
/// Minimal size (in kilobytes) of vectors for additional payload-based indexing.
/// If payload chunk is smaller than `full_scan_threshold_kb` additional indexing won't be used -
/// in this case full-scan search should be preferred by query planner and additional indexing is not required.
/// Note: 1Kb = 1 vector of size 256
#[serde(
alias = "full_scan_threshold_kb",
default,
skip_serializing_if = "Option::is_none"
)]
#[validate(range(min = 10))]
pub full_scan_threshold: Option<usize>,
/// Number of parallel threads used for background index building. If 0 - auto selection.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub max_indexing_threads: Option<usize>,
/// Store HNSW index on disk. If set to false, the index will be stored in RAM. Default: false
#[serde(default, skip_serializing_if = "Option::is_none")]
pub on_disk: Option<bool>,
/// Custom M param for additional payload-aware HNSW links. If not set, default M will be used.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub payload_m: Option<usize>,
}
#[derive(
Debug, Deserialize, Serialize, JsonSchema, Validate, Clone, Merge, PartialEq, Eq, Hash,
)]
pub struct WalConfigDiff {
/// Size of a single WAL segment in MB
#[validate(range(min = 1))]
pub wal_capacity_mb: Option<usize>,
/// Number of WAL segments to create ahead of actually used ones
pub wal_segments_ahead: Option<usize>,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Clone, Merge, PartialEq, Eq, Hash)]
pub struct CollectionParamsDiff {
/// Number of replicas for each shard
pub replication_factor: Option<NonZeroU32>,
/// Minimal number successful responses from replicas to consider operation successful
pub write_consistency_factor: Option<NonZeroU32>,
/// If true - point's payload will not be stored in memory.
/// It will be read from the disk every time it is requested.
/// This setting saves RAM by (slightly) increasing the response time.
/// Note: those payload values that are involved in filtering and are indexed - remain in RAM.
#[serde(default)]
pub on_disk_payload: Option<bool>,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Clone, Merge)]
pub struct OptimizersConfigDiff {
/// The minimal fraction of deleted vectors in a segment, required to perform segment optimization
pub deleted_threshold: Option<f64>,
/// The minimal number of vectors in a segment, required to perform segment optimization
pub vacuum_min_vector_number: Option<usize>,
/// Target amount of segments optimizer will try to keep.
/// Real amount of segments may vary depending on multiple parameters:
/// - Amount of stored points
/// - Current write RPS
///
/// It is recommended to select default number of segments as a factor of the number of search threads,
/// so that each segment would be handled evenly by one of the threads
/// If `default_segment_number = 0`, will be automatically selected by the number of available CPUs
pub default_segment_number: Option<usize>,
/// Do not create segments larger this size (in kilobytes).
/// Large segments might require disproportionately long indexation times,
/// therefore it makes sense to limit the size of segments.
///
/// If indexation speed have more priority for your - make this parameter lower.
/// If search speed is more important - make this parameter higher.
/// Note: 1Kb = 1 vector of size 256
#[serde(alias = "max_segment_size_kb")]
pub max_segment_size: Option<usize>,
/// Maximum size (in kilobytes) of vectors to store in-memory per segment.
/// Segments larger than this threshold will be stored as read-only memmaped file.
///
/// Memmap storage is disabled by default, to enable it, set this threshold to a reasonable value.
///
/// To disable memmap storage, set this to `0`.
///
/// Note: 1Kb = 1 vector of size 256
#[serde(alias = "memmap_threshold_kb")]
pub memmap_threshold: Option<usize>,
/// Maximum size (in kilobytes) of vectors allowed for plain index, exceeding this threshold will enable vector indexing
///
/// Default value is 20,000, based on <https://github.com/google-research/google-research/blob/master/scann/docs/algorithms.md>.
///
/// To disable vector indexing, set to `0`.
///
/// Note: 1kB = 1 vector of size 256.
#[serde(alias = "indexing_threshold_kb")]
pub indexing_threshold: Option<usize>,
/// Minimum interval between forced flushes.
pub flush_interval_sec: Option<u64>,
/// Maximum available threads for optimization workers
pub max_optimization_threads: Option<usize>,
}
impl std::hash::Hash for OptimizersConfigDiff {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.deleted_threshold.map(f64::to_le_bytes).hash(state);
self.vacuum_min_vector_number.hash(state);
self.default_segment_number.hash(state);
self.max_segment_size.hash(state);
self.memmap_threshold.hash(state);
self.indexing_threshold.hash(state);
self.flush_interval_sec.hash(state);
self.max_optimization_threads.hash(state);
}
}
impl PartialEq for OptimizersConfigDiff {
fn eq(&self, other: &Self) -> bool {
self.deleted_threshold.map(f64::to_le_bytes)
== other.deleted_threshold.map(f64::to_le_bytes)
&& self.vacuum_min_vector_number == other.vacuum_min_vector_number
&& self.default_segment_number == other.default_segment_number
&& self.max_segment_size == other.max_segment_size
&& self.memmap_threshold == other.memmap_threshold
&& self.indexing_threshold == other.indexing_threshold
&& self.flush_interval_sec == other.flush_interval_sec
&& self.max_optimization_threads == other.max_optimization_threads
}
}
impl Eq for OptimizersConfigDiff {}
impl DiffConfig<HnswConfig> for HnswConfigDiff {}
impl DiffConfig<HnswConfigDiff> for HnswConfigDiff {}
impl DiffConfig<OptimizersConfig> for OptimizersConfigDiff {}
impl DiffConfig<WalConfig> for WalConfigDiff {}
impl DiffConfig<CollectionParams> for CollectionParamsDiff {}
impl From<HnswConfig> for HnswConfigDiff {
fn from(config: HnswConfig) -> Self {
HnswConfigDiff::from_full(&config).unwrap()
}
}
impl From<OptimizersConfig> for OptimizersConfigDiff {
fn from(config: OptimizersConfig) -> Self {
OptimizersConfigDiff::from_full(&config).unwrap()
}
}
impl From<WalConfig> for WalConfigDiff {
fn from(config: WalConfig) -> Self {
WalConfigDiff::from_full(&config).unwrap()
}
}
impl From<CollectionParams> for CollectionParamsDiff {
fn from(config: CollectionParams) -> Self {
CollectionParamsDiff::from_full(&config).unwrap()
}
}
pub fn from_full<T: DeserializeOwned + Serialize, Y: DeserializeOwned + Serialize>(
full_config: &T,
) -> CollectionResult<Y> {
let json = serde_json::to_value(full_config)?;
let res = serde_json::from_value(json)?;
Ok(res)
}
/// Merge first level of JSON values, if diff values present explicitly
///
/// Example:
///
/// base: {"a": 1, "b": 2}
/// diff: {"a": 3}
/// result: {"a": 3, "b": 2}
///
/// base: {"a": 1, "b": 2} | /// result: {"a": 1, "b": 2}
fn merge_level_0(base: &mut Value, diff: Value) {
match (base, diff) {
(base @ &mut Value::Object(_), Value::Object(diff)) => {
let base = base.as_object_mut().unwrap();
for (k, v) in diff {
if !v.is_null() {
base.insert(k, v);
}
}
}
(_base, _diff) => {}
}
}
/// Hacky way to update configuration structures with diff-updates.
/// Intended to only be used in non critical for speed places.
/// TODO: replace with proc macro
pub fn update_config<T: DeserializeOwned + Serialize, Y: DeserializeOwned + Serialize + Merge>(
config: &T,
update: Y,
) -> CollectionResult<T> {
let mut config_values = serde_json::to_value(config)?;
let diff_values = serde_json::to_value(&update)?;
merge_level_0(&mut config_values, diff_values);
let res = serde_json::from_value(config_values)?;
Ok(res)
}
/// Hacky way to figure out if the given configuration is considered empty
///
/// The following types are considered empty:
/// - Null
/// - Empty string
/// - Array or object with zero items
///
/// Intended to only be used in non critical for speed places.
pub fn is_empty<T: Serialize>(config: &T) -> CollectionResult<bool> {
let config_values = serde_json::to_value(config)?;
Ok(match config_values {
Value::Null => true,
Value::String(value) => value.is_empty(),
Value::Array(values) => values.is_empty(),
Value::Object(values) => values.is_empty(),
Value::Bool(_) | Value::Number(_) => false,
})
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Clone, PartialEq, Eq, Hash)]
pub enum Disabled {
Disabled,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Clone, PartialEq, Eq, Hash)]
#[serde(rename_all = "snake_case")]
#[serde(untagged)]
pub enum QuantizationConfigDiff {
Scalar(ScalarQuantization),
Product(ProductQuantization),
Disabled(Disabled),
}
impl QuantizationConfigDiff {
pub fn new_disabled() -> Self {
QuantizationConfigDiff::Disabled(Disabled::Disabled)
}
}
impl Validate for QuantizationConfigDiff {
fn validate(&self) -> Result<(), ValidationErrors> {
match self {
QuantizationConfigDiff::Scalar(scalar) => scalar.validate(),
QuantizationConfigDiff::Product(product) => product.validate(),
QuantizationConfigDiff::Disabled(_) => Ok(()),
}
}
}
#[cfg(test)]
mod tests {
use std::num::NonZeroU64;
use segment::types::{Distance, HnswConfig};
use super::*;
use crate::operations::types::VectorParams;
use crate::optimizers_builder::OptimizersConfig;
#[test]
fn test_update_collection_params() {
let params = CollectionParams {
vectors: VectorParams {
size: NonZeroU64::new(128).unwrap(),
distance: Distance::Cosine,
hnsw_config: None,
quantization_config: None,
on_disk: None,
}
.into(),
shard_number: NonZeroU32::new(1).unwrap(),
replication_factor: NonZeroU32::new(1).unwrap(),
write_consistency_factor: NonZeroU32::new(1).unwrap(),
on_disk_payload: false,
};
let diff = CollectionParamsDiff {
replication_factor: None,
write_consistency_factor: Some(NonZeroU32::new(2).unwrap()),
on_disk_payload: None,
};
let new_params = diff.update(¶ms).unwrap();
assert_eq!(new_params.replication_factor.get(), 1);
assert_eq!(new_params.write_consistency_factor.get(), 2);
assert!(!new_params.on_disk_payload);
}
#[test]
fn test_hnsw_update() {
let base_config = HnswConfig::default();
let update: HnswConfigDiff = serde_json::from_str(r#"{ "m": 32 }"#).unwrap();
let new_config = update.update(&base_config).unwrap();
assert_eq!(new_config.m, 32)
}
#[test]
fn test_optimizer_update() {
let base_config = OptimizersConfig {
deleted_threshold: 0.9,
vacuum_min_vector_number: 1000,
default_segment_number: 10,
max_segment_size: None,
memmap_threshold: None,
indexing_threshold: Some(50_000),
flush_interval_sec: 30,
max_optimization_threads: 1,
};
let update: OptimizersConfigDiff =
serde_json::from_str(r#"{ "indexing_threshold": 10000 }"#).unwrap();
let new_config = update.update(&base_config).unwrap();
assert_eq!(new_config.indexing_threshold, Some(10000))
}
#[test]
fn test_wal_config() {
let base_config = WalConfig::default();
let update: WalConfigDiff = serde_json::from_str(r#"{ "wal_segments_ahead": 2 }"#).unwrap();
let new_config = update.update(&base_config).unwrap();
assert_eq!(new_config.wal_segments_ahead, 2)
}
} | /// diff: {"a": null} | random_line_split |
config_diff.rs | use std::num::NonZeroU32;
use merge::Merge;
use schemars::JsonSchema;
use segment::types::{HnswConfig, ProductQuantization, ScalarQuantization};
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use validator::{Validate, ValidationErrors};
use crate::config::{CollectionParams, WalConfig};
use crate::operations::types::CollectionResult;
use crate::optimizers_builder::OptimizersConfig;
// Structures for partial update of collection params
// TODO: make auto-generated somehow...
pub trait DiffConfig<T: DeserializeOwned + Serialize> {
/// Update the given `config` with fields in this diff
///
/// This clones, modifies and returns `config`.
///
/// This diff has higher priority, meaning that fields specified in this diff will always be in
/// the returned object.
fn update(self, config: &T) -> CollectionResult<T>
where
Self: Sized + Serialize + DeserializeOwned + Merge,
{
update_config(config, self)
}
fn from_full(full: &T) -> CollectionResult<Self>
where
Self: Sized + Serialize + DeserializeOwned,
{
from_full(full)
}
}
#[derive(
Debug,
Default,
Deserialize,
Serialize,
JsonSchema,
Validate,
Copy,
Clone,
PartialEq,
Eq,
Merge,
Hash,
)]
#[serde(rename_all = "snake_case")]
pub struct HnswConfigDiff {
/// Number of edges per node in the index graph. Larger the value - more accurate the search, more space required.
#[serde(skip_serializing_if = "Option::is_none")]
pub m: Option<usize>,
/// Number of neighbours to consider during the index building. Larger the value - more accurate the search, more time required to build the index.
#[validate(range(min = 4))]
#[serde(skip_serializing_if = "Option::is_none")]
pub ef_construct: Option<usize>,
/// Minimal size (in kilobytes) of vectors for additional payload-based indexing.
/// If payload chunk is smaller than `full_scan_threshold_kb` additional indexing won't be used -
/// in this case full-scan search should be preferred by query planner and additional indexing is not required.
/// Note: 1Kb = 1 vector of size 256
#[serde(
alias = "full_scan_threshold_kb",
default,
skip_serializing_if = "Option::is_none"
)]
#[validate(range(min = 10))]
pub full_scan_threshold: Option<usize>,
/// Number of parallel threads used for background index building. If 0 - auto selection.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub max_indexing_threads: Option<usize>,
/// Store HNSW index on disk. If set to false, the index will be stored in RAM. Default: false
#[serde(default, skip_serializing_if = "Option::is_none")]
pub on_disk: Option<bool>,
/// Custom M param for additional payload-aware HNSW links. If not set, default M will be used.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub payload_m: Option<usize>,
}
#[derive(
Debug, Deserialize, Serialize, JsonSchema, Validate, Clone, Merge, PartialEq, Eq, Hash,
)]
pub struct WalConfigDiff {
/// Size of a single WAL segment in MB
#[validate(range(min = 1))]
pub wal_capacity_mb: Option<usize>,
/// Number of WAL segments to create ahead of actually used ones
pub wal_segments_ahead: Option<usize>,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Clone, Merge, PartialEq, Eq, Hash)]
pub struct CollectionParamsDiff {
/// Number of replicas for each shard
pub replication_factor: Option<NonZeroU32>,
/// Minimal number successful responses from replicas to consider operation successful
pub write_consistency_factor: Option<NonZeroU32>,
/// If true - point's payload will not be stored in memory.
/// It will be read from the disk every time it is requested.
/// This setting saves RAM by (slightly) increasing the response time.
/// Note: those payload values that are involved in filtering and are indexed - remain in RAM.
#[serde(default)]
pub on_disk_payload: Option<bool>,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Clone, Merge)]
pub struct OptimizersConfigDiff {
/// The minimal fraction of deleted vectors in a segment, required to perform segment optimization
pub deleted_threshold: Option<f64>,
/// The minimal number of vectors in a segment, required to perform segment optimization
pub vacuum_min_vector_number: Option<usize>,
/// Target amount of segments optimizer will try to keep.
/// Real amount of segments may vary depending on multiple parameters:
/// - Amount of stored points
/// - Current write RPS
///
/// It is recommended to select default number of segments as a factor of the number of search threads,
/// so that each segment would be handled evenly by one of the threads
/// If `default_segment_number = 0`, will be automatically selected by the number of available CPUs
pub default_segment_number: Option<usize>,
/// Do not create segments larger this size (in kilobytes).
/// Large segments might require disproportionately long indexation times,
/// therefore it makes sense to limit the size of segments.
///
/// If indexation speed have more priority for your - make this parameter lower.
/// If search speed is more important - make this parameter higher.
/// Note: 1Kb = 1 vector of size 256
#[serde(alias = "max_segment_size_kb")]
pub max_segment_size: Option<usize>,
/// Maximum size (in kilobytes) of vectors to store in-memory per segment.
/// Segments larger than this threshold will be stored as read-only memmaped file.
///
/// Memmap storage is disabled by default, to enable it, set this threshold to a reasonable value.
///
/// To disable memmap storage, set this to `0`.
///
/// Note: 1Kb = 1 vector of size 256
#[serde(alias = "memmap_threshold_kb")]
pub memmap_threshold: Option<usize>,
/// Maximum size (in kilobytes) of vectors allowed for plain index, exceeding this threshold will enable vector indexing
///
/// Default value is 20,000, based on <https://github.com/google-research/google-research/blob/master/scann/docs/algorithms.md>.
///
/// To disable vector indexing, set to `0`.
///
/// Note: 1kB = 1 vector of size 256.
#[serde(alias = "indexing_threshold_kb")]
pub indexing_threshold: Option<usize>,
/// Minimum interval between forced flushes.
pub flush_interval_sec: Option<u64>,
/// Maximum available threads for optimization workers
pub max_optimization_threads: Option<usize>,
}
impl std::hash::Hash for OptimizersConfigDiff {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.deleted_threshold.map(f64::to_le_bytes).hash(state);
self.vacuum_min_vector_number.hash(state);
self.default_segment_number.hash(state);
self.max_segment_size.hash(state);
self.memmap_threshold.hash(state);
self.indexing_threshold.hash(state);
self.flush_interval_sec.hash(state);
self.max_optimization_threads.hash(state);
}
}
impl PartialEq for OptimizersConfigDiff {
fn eq(&self, other: &Self) -> bool {
self.deleted_threshold.map(f64::to_le_bytes)
== other.deleted_threshold.map(f64::to_le_bytes)
&& self.vacuum_min_vector_number == other.vacuum_min_vector_number
&& self.default_segment_number == other.default_segment_number
&& self.max_segment_size == other.max_segment_size
&& self.memmap_threshold == other.memmap_threshold
&& self.indexing_threshold == other.indexing_threshold
&& self.flush_interval_sec == other.flush_interval_sec
&& self.max_optimization_threads == other.max_optimization_threads
}
}
impl Eq for OptimizersConfigDiff {}
impl DiffConfig<HnswConfig> for HnswConfigDiff {}
impl DiffConfig<HnswConfigDiff> for HnswConfigDiff {}
impl DiffConfig<OptimizersConfig> for OptimizersConfigDiff {}
impl DiffConfig<WalConfig> for WalConfigDiff {}
impl DiffConfig<CollectionParams> for CollectionParamsDiff {}
impl From<HnswConfig> for HnswConfigDiff {
fn from(config: HnswConfig) -> Self {
HnswConfigDiff::from_full(&config).unwrap()
}
}
impl From<OptimizersConfig> for OptimizersConfigDiff {
fn from(config: OptimizersConfig) -> Self {
OptimizersConfigDiff::from_full(&config).unwrap()
}
}
impl From<WalConfig> for WalConfigDiff {
fn from(config: WalConfig) -> Self {
WalConfigDiff::from_full(&config).unwrap()
}
}
impl From<CollectionParams> for CollectionParamsDiff {
fn from(config: CollectionParams) -> Self {
CollectionParamsDiff::from_full(&config).unwrap()
}
}
pub fn from_full<T: DeserializeOwned + Serialize, Y: DeserializeOwned + Serialize>(
full_config: &T,
) -> CollectionResult<Y> {
let json = serde_json::to_value(full_config)?;
let res = serde_json::from_value(json)?;
Ok(res)
}
/// Merge first level of JSON values, if diff values present explicitly
///
/// Example:
///
/// base: {"a": 1, "b": 2}
/// diff: {"a": 3}
/// result: {"a": 3, "b": 2}
///
/// base: {"a": 1, "b": 2}
/// diff: {"a": null}
/// result: {"a": 1, "b": 2}
fn merge_level_0(base: &mut Value, diff: Value) {
match (base, diff) {
(base @ &mut Value::Object(_), Value::Object(diff)) => {
let base = base.as_object_mut().unwrap();
for (k, v) in diff {
if !v.is_null() {
base.insert(k, v);
}
}
}
(_base, _diff) => {}
}
}
/// Hacky way to update configuration structures with diff-updates.
/// Intended to only be used in non critical for speed places.
/// TODO: replace with proc macro
pub fn update_config<T: DeserializeOwned + Serialize, Y: DeserializeOwned + Serialize + Merge>(
config: &T,
update: Y,
) -> CollectionResult<T> {
let mut config_values = serde_json::to_value(config)?;
let diff_values = serde_json::to_value(&update)?;
merge_level_0(&mut config_values, diff_values);
let res = serde_json::from_value(config_values)?;
Ok(res)
}
/// Hacky way to figure out if the given configuration is considered empty
///
/// The following types are considered empty:
/// - Null
/// - Empty string
/// - Array or object with zero items
///
/// Intended to only be used in non critical for speed places.
pub fn is_empty<T: Serialize>(config: &T) -> CollectionResult<bool> {
let config_values = serde_json::to_value(config)?;
Ok(match config_values {
Value::Null => true,
Value::String(value) => value.is_empty(),
Value::Array(values) => values.is_empty(),
Value::Object(values) => values.is_empty(),
Value::Bool(_) | Value::Number(_) => false,
})
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Clone, PartialEq, Eq, Hash)]
pub enum Disabled {
Disabled,
}
#[derive(Debug, Deserialize, Serialize, JsonSchema, Clone, PartialEq, Eq, Hash)]
#[serde(rename_all = "snake_case")]
#[serde(untagged)]
pub enum QuantizationConfigDiff {
Scalar(ScalarQuantization),
Product(ProductQuantization),
Disabled(Disabled),
}
impl QuantizationConfigDiff {
pub fn | () -> Self {
QuantizationConfigDiff::Disabled(Disabled::Disabled)
}
}
impl Validate for QuantizationConfigDiff {
fn validate(&self) -> Result<(), ValidationErrors> {
match self {
QuantizationConfigDiff::Scalar(scalar) => scalar.validate(),
QuantizationConfigDiff::Product(product) => product.validate(),
QuantizationConfigDiff::Disabled(_) => Ok(()),
}
}
}
#[cfg(test)]
mod tests {
use std::num::NonZeroU64;
use segment::types::{Distance, HnswConfig};
use super::*;
use crate::operations::types::VectorParams;
use crate::optimizers_builder::OptimizersConfig;
#[test]
fn test_update_collection_params() {
let params = CollectionParams {
vectors: VectorParams {
size: NonZeroU64::new(128).unwrap(),
distance: Distance::Cosine,
hnsw_config: None,
quantization_config: None,
on_disk: None,
}
.into(),
shard_number: NonZeroU32::new(1).unwrap(),
replication_factor: NonZeroU32::new(1).unwrap(),
write_consistency_factor: NonZeroU32::new(1).unwrap(),
on_disk_payload: false,
};
let diff = CollectionParamsDiff {
replication_factor: None,
write_consistency_factor: Some(NonZeroU32::new(2).unwrap()),
on_disk_payload: None,
};
let new_params = diff.update(¶ms).unwrap();
assert_eq!(new_params.replication_factor.get(), 1);
assert_eq!(new_params.write_consistency_factor.get(), 2);
assert!(!new_params.on_disk_payload);
}
#[test]
fn test_hnsw_update() {
let base_config = HnswConfig::default();
let update: HnswConfigDiff = serde_json::from_str(r#"{ "m": 32 }"#).unwrap();
let new_config = update.update(&base_config).unwrap();
assert_eq!(new_config.m, 32)
}
#[test]
fn test_optimizer_update() {
let base_config = OptimizersConfig {
deleted_threshold: 0.9,
vacuum_min_vector_number: 1000,
default_segment_number: 10,
max_segment_size: None,
memmap_threshold: None,
indexing_threshold: Some(50_000),
flush_interval_sec: 30,
max_optimization_threads: 1,
};
let update: OptimizersConfigDiff =
serde_json::from_str(r#"{ "indexing_threshold": 10000 }"#).unwrap();
let new_config = update.update(&base_config).unwrap();
assert_eq!(new_config.indexing_threshold, Some(10000))
}
#[test]
fn test_wal_config() {
let base_config = WalConfig::default();
let update: WalConfigDiff = serde_json::from_str(r#"{ "wal_segments_ahead": 2 }"#).unwrap();
let new_config = update.update(&base_config).unwrap();
assert_eq!(new_config.wal_segments_ahead, 2)
}
}
| new_disabled | identifier_name |
reporter.py | import functools
import json
import logging
import re
import sys
import types
from contextlib import suppress
from datetime import datetime, date, timezone
from html import escape
from pathlib import Path
from pprint import pformat, saferepr
import platform
import jinja2
from exception_reports.traceback import get_logger_traceback, TracebackFrameProxy
from exception_reports.utils import force_text, gen_error_filename
logger = logging.getLogger(__name__)
@functools.lru_cache()
def _report_template():
"""get the report template"""
current_dir = Path(__file__).parent
with open(current_dir / "report_template.html", "r") as f:
template = f.read()
template = re.sub(r"\s{2,}", " ", template)
template = re.sub(r"\n", "", template)
template = re.sub(r"> <", "><", template)
return template
def render_exception_html(exception_data, report_template=None):
"""Render exception_data as an html report"""
report_template = report_template or _report_template()
jinja_env = jinja2.Environment(loader=jinja2.BaseLoader(), extensions=["jinja2.ext.autoescape"])
exception_data["repr"] = repr
return jinja_env.from_string(report_template).render(exception_data)
def render_exception_json(exception_data):
"""Render exception_data as a json object"""
return json.dumps(exception_data, default=_json_serializer)
def _json_serializer(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime, date)):
return obj.isoformat(sep=" ")
if isinstance(obj, (types.TracebackType, TracebackFrameProxy)):
return "<Traceback object>"
return saferepr(obj)
def get_exception_data(exc_type=None, exc_value=None, tb=None, get_full_tb=False, max_var_length=4096 + 2048):
"""
Return a dictionary containing exception information.
if exc_type, exc_value, and tb are not provided they will be supplied by sys.exc_info()
max_var_length: how long a variable's output can be before it's truncated
"""
head_var_length = int(max_var_length / 2)
tail_var_length = max_var_length - head_var_length
if not tb:
exc_type, exc_value, tb = sys.exc_info()
frames = get_traceback_frames(exc_value=exc_value, tb=tb, get_full_tb=get_full_tb)
for i, frame in enumerate(frames):
if "vars" in frame:
frame_vars = []
for k, v in frame["vars"]:
try:
v = pformat(v)
except Exception as e:
try:
v = saferepr(e)
except Exception:
v = "An error occurred rendering the exception of type: " + repr(e.__class__)
# The force_escape filter assume unicode, make sure that works
if isinstance(v, bytes):
v = v.decode("utf-8", "replace") # don't choke on non-utf-8 input
# Trim large blobs of data
if len(v) > max_var_length:
v = f"{v[0:head_var_length]}... \n\n<trimmed {len(v)} bytes string>\n\n ...{v[-tail_var_length:]}"
frame_vars.append((k, escape(v)))
frame["vars"] = frame_vars
frames[i] = frame
unicode_hint = ""
if exc_type and issubclass(exc_type, UnicodeError):
start = getattr(exc_value, "start", None)
end = getattr(exc_value, "end", None)
if start is not None and end is not None:
unicode_str = exc_value.args[1]
unicode_hint = force_text(unicode_str[max(start - 5, 0) : min(end + 5, len(unicode_str))], "ascii", errors="replace")
try:
unicode_hint.encode("utf8")
except UnicodeEncodeError:
unicode_hint = unicode_hint.encode("utf8", "surrogateescape")
c = {
"unicode_hint": unicode_hint,
"frames": frames,
"sys_executable": sys.executable,
"sys_version_info": "%d.%d.%d" % sys.version_info[0:3],
"server_time": datetime.now(timezone.utc),
"sys_path": sys.path,
"platform": platform.uname()._asdict(),
}
# Check whether exception info is available
if exc_type:
c["exception_type"] = exc_type.__name__
if exc_value:
c["exception_value"] = force_text(exc_value, errors="replace")
if frames:
c["lastframe"] = frames[-1]
return c
def get_lines_from_file(filename, lineno, context_lines, loader=None, module_name=None):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
source = None
if loader is not None and hasattr(loader, "get_source"):
with suppress(ImportError):
source = loader.get_source(module_name)
if source is not None:
source = source.splitlines()
if source is None:
with suppress(OSError, IOError):
with open(filename, "rb") as fp:
source = fp.read().splitlines()
if source is None:
return None, [], None, []
try:
# If we just read the source from a file, or if the loader did not
# apply tokenize.detect_encoding to decode the source into a Unicode
# string, then we should do that ourselves.
if isinstance(source[0], bytes):
encoding = "ascii"
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (http://www.python.org/dev/peps/pep-0263/)
match = re.search(br"coding[:=]\s*([-\w.]+)", line)
if match:
encoding = match.group(1).decode("ascii")
break
source = [str(sline, encoding, "replace") for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
pre_context = source[lower_bound:lineno]
context_line = source[lineno]
post_context = source[lineno + 1 : upper_bound]
return lower_bound, pre_context, context_line, post_context
except Exception as e:
try:
context_line = f'<There was an error displaying the source file: "{repr(e)}" The loaded source has {len(source)} lines.>'
except Exception:
context_line = "<There was an error displaying the source file. Further, there was an error displaying that error>"
return lineno, [], context_line, []
def get_traceback_frames(exc_value=None, tb=None, get_full_tb=True):
def explicit_or_implicit_cause(exc_value):
explicit = getattr(exc_value, "__cause__", None)
implicit = getattr(exc_value, "__context__", None)
return explicit or implicit
# Get the exception and all its causes
exceptions = []
while exc_value:
exceptions.append(exc_value)
exc_value = explicit_or_implicit_cause(exc_value)
frames = []
# No exceptions were supplied
if not exceptions:
return frames
# In case there's just one exception, take the traceback from self.tb
exc_value = exceptions.pop()
tb = tb if not exceptions else exc_value.__traceback__
added_full_tb = False
while tb is not None:
# Support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
if tb.tb_frame.f_locals.get("__traceback_hide__"):
tb = tb.tb_next
continue
filename = tb.tb_frame.f_code.co_filename
function = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno - 1
loader = tb.tb_frame.f_globals.get("__loader__")
module_name = tb.tb_frame.f_globals.get("__name__") or ""
pre_context_lineno, pre_context, context_line, post_context = get_lines_from_file(filename, lineno, 7, loader, module_name)
if pre_context_lineno is None:
pre_context_lineno = lineno
pre_context = []
context_line = "<source code not available>"
post_context = []
frames.append(
{
"exc_cause": explicit_or_implicit_cause(exc_value),
"exc_cause_explicit": getattr(exc_value, "__cause__", True),
"is_full_stack_trace": getattr(exc_value, "is_full_stack_trace", False),
"tb": tb,
"type": "django" if module_name.startswith("django.") else "user",
"filename": filename,
"function": function,
"lineno": lineno + 1,
"vars": list(tb.tb_frame.f_locals.items()),
"id": id(tb),
"pre_context": pre_context,
"context_line": context_line,
"post_context": post_context,
"pre_context_lineno": pre_context_lineno + 1,
}
)
# If the traceback for current exception is consumed, try the
# other exception.
if not tb.tb_next and exceptions:
exc_value = exceptions.pop()
tb = exc_value.__traceback__
else:
tb = tb.tb_next
if get_full_tb and tb is None and not added_full_tb:
exc_value = Exception("Full Stack Trace")
exc_value.is_full_stack_trace = True
exc_value.__cause__ = Exception("Full Stack Trace")
tb = get_logger_traceback()
added_full_tb = True
return frames
def create_exception_report(exc_type, exc_value, tb, output_format, storage_backend, data_processor=None, get_full_tb=False):
"""
Create an exception report and return its location
"""
exception_data = get_exception_data(exc_type, exc_value, tb, get_full_tb=get_full_tb)
if data_processor:
exception_data = data_processor(exception_data)
if output_format == "html":
text = render_exception_html(exception_data)
elif output_format == "json":
text = render_exception_json(exception_data)
else:
raise TypeError("Exception report format not correctly specified")
filename = gen_error_filename(extension=output_format)
report_location = storage_backend.write(filename, text)
return report_location
def append_to_exception_message(e, tb, added_message):
ExceptionType = type(e)
if ExceptionType.__module__ == "builtins":
# this way of altering the message isn't as good but it works for builtin exception types
e = ExceptionType(f"{str(e)} {added_message}").with_traceback(tb)
else:
|
return e
| def my_str(self):
m = ExceptionType.__str__(self)
return f"{m} {added_message}"
NewExceptionType = type(ExceptionType.__name__, (ExceptionType,), {"__str__": my_str})
e.__class__ = NewExceptionType | conditional_block |
reporter.py | import functools
import json
import logging
import re
import sys
import types
from contextlib import suppress
from datetime import datetime, date, timezone
from html import escape
from pathlib import Path
from pprint import pformat, saferepr
import platform
import jinja2
from exception_reports.traceback import get_logger_traceback, TracebackFrameProxy
from exception_reports.utils import force_text, gen_error_filename
logger = logging.getLogger(__name__)
@functools.lru_cache()
def _report_template():
"""get the report template"""
current_dir = Path(__file__).parent
with open(current_dir / "report_template.html", "r") as f:
template = f.read()
template = re.sub(r"\s{2,}", " ", template)
template = re.sub(r"\n", "", template)
template = re.sub(r"> <", "><", template)
return template
def render_exception_html(exception_data, report_template=None):
"""Render exception_data as an html report"""
report_template = report_template or _report_template()
jinja_env = jinja2.Environment(loader=jinja2.BaseLoader(), extensions=["jinja2.ext.autoescape"])
exception_data["repr"] = repr
return jinja_env.from_string(report_template).render(exception_data)
def render_exception_json(exception_data):
"""Render exception_data as a json object"""
return json.dumps(exception_data, default=_json_serializer)
def _json_serializer(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime, date)):
return obj.isoformat(sep=" ")
if isinstance(obj, (types.TracebackType, TracebackFrameProxy)):
return "<Traceback object>"
return saferepr(obj)
def get_exception_data(exc_type=None, exc_value=None, tb=None, get_full_tb=False, max_var_length=4096 + 2048):
"""
Return a dictionary containing exception information.
if exc_type, exc_value, and tb are not provided they will be supplied by sys.exc_info()
max_var_length: how long a variable's output can be before it's truncated
"""
head_var_length = int(max_var_length / 2)
tail_var_length = max_var_length - head_var_length
if not tb:
exc_type, exc_value, tb = sys.exc_info() | if "vars" in frame:
frame_vars = []
for k, v in frame["vars"]:
try:
v = pformat(v)
except Exception as e:
try:
v = saferepr(e)
except Exception:
v = "An error occurred rendering the exception of type: " + repr(e.__class__)
# The force_escape filter assume unicode, make sure that works
if isinstance(v, bytes):
v = v.decode("utf-8", "replace") # don't choke on non-utf-8 input
# Trim large blobs of data
if len(v) > max_var_length:
v = f"{v[0:head_var_length]}... \n\n<trimmed {len(v)} bytes string>\n\n ...{v[-tail_var_length:]}"
frame_vars.append((k, escape(v)))
frame["vars"] = frame_vars
frames[i] = frame
unicode_hint = ""
if exc_type and issubclass(exc_type, UnicodeError):
start = getattr(exc_value, "start", None)
end = getattr(exc_value, "end", None)
if start is not None and end is not None:
unicode_str = exc_value.args[1]
unicode_hint = force_text(unicode_str[max(start - 5, 0) : min(end + 5, len(unicode_str))], "ascii", errors="replace")
try:
unicode_hint.encode("utf8")
except UnicodeEncodeError:
unicode_hint = unicode_hint.encode("utf8", "surrogateescape")
c = {
"unicode_hint": unicode_hint,
"frames": frames,
"sys_executable": sys.executable,
"sys_version_info": "%d.%d.%d" % sys.version_info[0:3],
"server_time": datetime.now(timezone.utc),
"sys_path": sys.path,
"platform": platform.uname()._asdict(),
}
# Check whether exception info is available
if exc_type:
c["exception_type"] = exc_type.__name__
if exc_value:
c["exception_value"] = force_text(exc_value, errors="replace")
if frames:
c["lastframe"] = frames[-1]
return c
def get_lines_from_file(filename, lineno, context_lines, loader=None, module_name=None):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
source = None
if loader is not None and hasattr(loader, "get_source"):
with suppress(ImportError):
source = loader.get_source(module_name)
if source is not None:
source = source.splitlines()
if source is None:
with suppress(OSError, IOError):
with open(filename, "rb") as fp:
source = fp.read().splitlines()
if source is None:
return None, [], None, []
try:
# If we just read the source from a file, or if the loader did not
# apply tokenize.detect_encoding to decode the source into a Unicode
# string, then we should do that ourselves.
if isinstance(source[0], bytes):
encoding = "ascii"
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (http://www.python.org/dev/peps/pep-0263/)
match = re.search(br"coding[:=]\s*([-\w.]+)", line)
if match:
encoding = match.group(1).decode("ascii")
break
source = [str(sline, encoding, "replace") for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
pre_context = source[lower_bound:lineno]
context_line = source[lineno]
post_context = source[lineno + 1 : upper_bound]
return lower_bound, pre_context, context_line, post_context
except Exception as e:
try:
context_line = f'<There was an error displaying the source file: "{repr(e)}" The loaded source has {len(source)} lines.>'
except Exception:
context_line = "<There was an error displaying the source file. Further, there was an error displaying that error>"
return lineno, [], context_line, []
def get_traceback_frames(exc_value=None, tb=None, get_full_tb=True):
def explicit_or_implicit_cause(exc_value):
explicit = getattr(exc_value, "__cause__", None)
implicit = getattr(exc_value, "__context__", None)
return explicit or implicit
# Get the exception and all its causes
exceptions = []
while exc_value:
exceptions.append(exc_value)
exc_value = explicit_or_implicit_cause(exc_value)
frames = []
# No exceptions were supplied
if not exceptions:
return frames
# In case there's just one exception, take the traceback from self.tb
exc_value = exceptions.pop()
tb = tb if not exceptions else exc_value.__traceback__
added_full_tb = False
while tb is not None:
# Support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
if tb.tb_frame.f_locals.get("__traceback_hide__"):
tb = tb.tb_next
continue
filename = tb.tb_frame.f_code.co_filename
function = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno - 1
loader = tb.tb_frame.f_globals.get("__loader__")
module_name = tb.tb_frame.f_globals.get("__name__") or ""
pre_context_lineno, pre_context, context_line, post_context = get_lines_from_file(filename, lineno, 7, loader, module_name)
if pre_context_lineno is None:
pre_context_lineno = lineno
pre_context = []
context_line = "<source code not available>"
post_context = []
frames.append(
{
"exc_cause": explicit_or_implicit_cause(exc_value),
"exc_cause_explicit": getattr(exc_value, "__cause__", True),
"is_full_stack_trace": getattr(exc_value, "is_full_stack_trace", False),
"tb": tb,
"type": "django" if module_name.startswith("django.") else "user",
"filename": filename,
"function": function,
"lineno": lineno + 1,
"vars": list(tb.tb_frame.f_locals.items()),
"id": id(tb),
"pre_context": pre_context,
"context_line": context_line,
"post_context": post_context,
"pre_context_lineno": pre_context_lineno + 1,
}
)
# If the traceback for current exception is consumed, try the
# other exception.
if not tb.tb_next and exceptions:
exc_value = exceptions.pop()
tb = exc_value.__traceback__
else:
tb = tb.tb_next
if get_full_tb and tb is None and not added_full_tb:
exc_value = Exception("Full Stack Trace")
exc_value.is_full_stack_trace = True
exc_value.__cause__ = Exception("Full Stack Trace")
tb = get_logger_traceback()
added_full_tb = True
return frames
def create_exception_report(exc_type, exc_value, tb, output_format, storage_backend, data_processor=None, get_full_tb=False):
"""
Create an exception report and return its location
"""
exception_data = get_exception_data(exc_type, exc_value, tb, get_full_tb=get_full_tb)
if data_processor:
exception_data = data_processor(exception_data)
if output_format == "html":
text = render_exception_html(exception_data)
elif output_format == "json":
text = render_exception_json(exception_data)
else:
raise TypeError("Exception report format not correctly specified")
filename = gen_error_filename(extension=output_format)
report_location = storage_backend.write(filename, text)
return report_location
def append_to_exception_message(e, tb, added_message):
ExceptionType = type(e)
if ExceptionType.__module__ == "builtins":
# this way of altering the message isn't as good but it works for builtin exception types
e = ExceptionType(f"{str(e)} {added_message}").with_traceback(tb)
else:
def my_str(self):
m = ExceptionType.__str__(self)
return f"{m} {added_message}"
NewExceptionType = type(ExceptionType.__name__, (ExceptionType,), {"__str__": my_str})
e.__class__ = NewExceptionType
return e |
frames = get_traceback_frames(exc_value=exc_value, tb=tb, get_full_tb=get_full_tb)
for i, frame in enumerate(frames): | random_line_split |
reporter.py | import functools
import json
import logging
import re
import sys
import types
from contextlib import suppress
from datetime import datetime, date, timezone
from html import escape
from pathlib import Path
from pprint import pformat, saferepr
import platform
import jinja2
from exception_reports.traceback import get_logger_traceback, TracebackFrameProxy
from exception_reports.utils import force_text, gen_error_filename
logger = logging.getLogger(__name__)
@functools.lru_cache()
def _report_template():
"""get the report template"""
current_dir = Path(__file__).parent
with open(current_dir / "report_template.html", "r") as f:
template = f.read()
template = re.sub(r"\s{2,}", " ", template)
template = re.sub(r"\n", "", template)
template = re.sub(r"> <", "><", template)
return template
def | (exception_data, report_template=None):
"""Render exception_data as an html report"""
report_template = report_template or _report_template()
jinja_env = jinja2.Environment(loader=jinja2.BaseLoader(), extensions=["jinja2.ext.autoescape"])
exception_data["repr"] = repr
return jinja_env.from_string(report_template).render(exception_data)
def render_exception_json(exception_data):
"""Render exception_data as a json object"""
return json.dumps(exception_data, default=_json_serializer)
def _json_serializer(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime, date)):
return obj.isoformat(sep=" ")
if isinstance(obj, (types.TracebackType, TracebackFrameProxy)):
return "<Traceback object>"
return saferepr(obj)
def get_exception_data(exc_type=None, exc_value=None, tb=None, get_full_tb=False, max_var_length=4096 + 2048):
"""
Return a dictionary containing exception information.
if exc_type, exc_value, and tb are not provided they will be supplied by sys.exc_info()
max_var_length: how long a variable's output can be before it's truncated
"""
head_var_length = int(max_var_length / 2)
tail_var_length = max_var_length - head_var_length
if not tb:
exc_type, exc_value, tb = sys.exc_info()
frames = get_traceback_frames(exc_value=exc_value, tb=tb, get_full_tb=get_full_tb)
for i, frame in enumerate(frames):
if "vars" in frame:
frame_vars = []
for k, v in frame["vars"]:
try:
v = pformat(v)
except Exception as e:
try:
v = saferepr(e)
except Exception:
v = "An error occurred rendering the exception of type: " + repr(e.__class__)
# The force_escape filter assume unicode, make sure that works
if isinstance(v, bytes):
v = v.decode("utf-8", "replace") # don't choke on non-utf-8 input
# Trim large blobs of data
if len(v) > max_var_length:
v = f"{v[0:head_var_length]}... \n\n<trimmed {len(v)} bytes string>\n\n ...{v[-tail_var_length:]}"
frame_vars.append((k, escape(v)))
frame["vars"] = frame_vars
frames[i] = frame
unicode_hint = ""
if exc_type and issubclass(exc_type, UnicodeError):
start = getattr(exc_value, "start", None)
end = getattr(exc_value, "end", None)
if start is not None and end is not None:
unicode_str = exc_value.args[1]
unicode_hint = force_text(unicode_str[max(start - 5, 0) : min(end + 5, len(unicode_str))], "ascii", errors="replace")
try:
unicode_hint.encode("utf8")
except UnicodeEncodeError:
unicode_hint = unicode_hint.encode("utf8", "surrogateescape")
c = {
"unicode_hint": unicode_hint,
"frames": frames,
"sys_executable": sys.executable,
"sys_version_info": "%d.%d.%d" % sys.version_info[0:3],
"server_time": datetime.now(timezone.utc),
"sys_path": sys.path,
"platform": platform.uname()._asdict(),
}
# Check whether exception info is available
if exc_type:
c["exception_type"] = exc_type.__name__
if exc_value:
c["exception_value"] = force_text(exc_value, errors="replace")
if frames:
c["lastframe"] = frames[-1]
return c
def get_lines_from_file(filename, lineno, context_lines, loader=None, module_name=None):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
source = None
if loader is not None and hasattr(loader, "get_source"):
with suppress(ImportError):
source = loader.get_source(module_name)
if source is not None:
source = source.splitlines()
if source is None:
with suppress(OSError, IOError):
with open(filename, "rb") as fp:
source = fp.read().splitlines()
if source is None:
return None, [], None, []
try:
# If we just read the source from a file, or if the loader did not
# apply tokenize.detect_encoding to decode the source into a Unicode
# string, then we should do that ourselves.
if isinstance(source[0], bytes):
encoding = "ascii"
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (http://www.python.org/dev/peps/pep-0263/)
match = re.search(br"coding[:=]\s*([-\w.]+)", line)
if match:
encoding = match.group(1).decode("ascii")
break
source = [str(sline, encoding, "replace") for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
pre_context = source[lower_bound:lineno]
context_line = source[lineno]
post_context = source[lineno + 1 : upper_bound]
return lower_bound, pre_context, context_line, post_context
except Exception as e:
try:
context_line = f'<There was an error displaying the source file: "{repr(e)}" The loaded source has {len(source)} lines.>'
except Exception:
context_line = "<There was an error displaying the source file. Further, there was an error displaying that error>"
return lineno, [], context_line, []
def get_traceback_frames(exc_value=None, tb=None, get_full_tb=True):
def explicit_or_implicit_cause(exc_value):
explicit = getattr(exc_value, "__cause__", None)
implicit = getattr(exc_value, "__context__", None)
return explicit or implicit
# Get the exception and all its causes
exceptions = []
while exc_value:
exceptions.append(exc_value)
exc_value = explicit_or_implicit_cause(exc_value)
frames = []
# No exceptions were supplied
if not exceptions:
return frames
# In case there's just one exception, take the traceback from self.tb
exc_value = exceptions.pop()
tb = tb if not exceptions else exc_value.__traceback__
added_full_tb = False
while tb is not None:
# Support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
if tb.tb_frame.f_locals.get("__traceback_hide__"):
tb = tb.tb_next
continue
filename = tb.tb_frame.f_code.co_filename
function = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno - 1
loader = tb.tb_frame.f_globals.get("__loader__")
module_name = tb.tb_frame.f_globals.get("__name__") or ""
pre_context_lineno, pre_context, context_line, post_context = get_lines_from_file(filename, lineno, 7, loader, module_name)
if pre_context_lineno is None:
pre_context_lineno = lineno
pre_context = []
context_line = "<source code not available>"
post_context = []
frames.append(
{
"exc_cause": explicit_or_implicit_cause(exc_value),
"exc_cause_explicit": getattr(exc_value, "__cause__", True),
"is_full_stack_trace": getattr(exc_value, "is_full_stack_trace", False),
"tb": tb,
"type": "django" if module_name.startswith("django.") else "user",
"filename": filename,
"function": function,
"lineno": lineno + 1,
"vars": list(tb.tb_frame.f_locals.items()),
"id": id(tb),
"pre_context": pre_context,
"context_line": context_line,
"post_context": post_context,
"pre_context_lineno": pre_context_lineno + 1,
}
)
# If the traceback for current exception is consumed, try the
# other exception.
if not tb.tb_next and exceptions:
exc_value = exceptions.pop()
tb = exc_value.__traceback__
else:
tb = tb.tb_next
if get_full_tb and tb is None and not added_full_tb:
exc_value = Exception("Full Stack Trace")
exc_value.is_full_stack_trace = True
exc_value.__cause__ = Exception("Full Stack Trace")
tb = get_logger_traceback()
added_full_tb = True
return frames
def create_exception_report(exc_type, exc_value, tb, output_format, storage_backend, data_processor=None, get_full_tb=False):
"""
Create an exception report and return its location
"""
exception_data = get_exception_data(exc_type, exc_value, tb, get_full_tb=get_full_tb)
if data_processor:
exception_data = data_processor(exception_data)
if output_format == "html":
text = render_exception_html(exception_data)
elif output_format == "json":
text = render_exception_json(exception_data)
else:
raise TypeError("Exception report format not correctly specified")
filename = gen_error_filename(extension=output_format)
report_location = storage_backend.write(filename, text)
return report_location
def append_to_exception_message(e, tb, added_message):
ExceptionType = type(e)
if ExceptionType.__module__ == "builtins":
# this way of altering the message isn't as good but it works for builtin exception types
e = ExceptionType(f"{str(e)} {added_message}").with_traceback(tb)
else:
def my_str(self):
m = ExceptionType.__str__(self)
return f"{m} {added_message}"
NewExceptionType = type(ExceptionType.__name__, (ExceptionType,), {"__str__": my_str})
e.__class__ = NewExceptionType
return e
| render_exception_html | identifier_name |
reporter.py | import functools
import json
import logging
import re
import sys
import types
from contextlib import suppress
from datetime import datetime, date, timezone
from html import escape
from pathlib import Path
from pprint import pformat, saferepr
import platform
import jinja2
from exception_reports.traceback import get_logger_traceback, TracebackFrameProxy
from exception_reports.utils import force_text, gen_error_filename
logger = logging.getLogger(__name__)
@functools.lru_cache()
def _report_template():
"""get the report template"""
current_dir = Path(__file__).parent
with open(current_dir / "report_template.html", "r") as f:
template = f.read()
template = re.sub(r"\s{2,}", " ", template)
template = re.sub(r"\n", "", template)
template = re.sub(r"> <", "><", template)
return template
def render_exception_html(exception_data, report_template=None):
"""Render exception_data as an html report"""
report_template = report_template or _report_template()
jinja_env = jinja2.Environment(loader=jinja2.BaseLoader(), extensions=["jinja2.ext.autoescape"])
exception_data["repr"] = repr
return jinja_env.from_string(report_template).render(exception_data)
def render_exception_json(exception_data):
"""Render exception_data as a json object"""
return json.dumps(exception_data, default=_json_serializer)
def _json_serializer(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime, date)):
return obj.isoformat(sep=" ")
if isinstance(obj, (types.TracebackType, TracebackFrameProxy)):
return "<Traceback object>"
return saferepr(obj)
def get_exception_data(exc_type=None, exc_value=None, tb=None, get_full_tb=False, max_var_length=4096 + 2048):
"""
Return a dictionary containing exception information.
if exc_type, exc_value, and tb are not provided they will be supplied by sys.exc_info()
max_var_length: how long a variable's output can be before it's truncated
"""
head_var_length = int(max_var_length / 2)
tail_var_length = max_var_length - head_var_length
if not tb:
exc_type, exc_value, tb = sys.exc_info()
frames = get_traceback_frames(exc_value=exc_value, tb=tb, get_full_tb=get_full_tb)
for i, frame in enumerate(frames):
if "vars" in frame:
frame_vars = []
for k, v in frame["vars"]:
try:
v = pformat(v)
except Exception as e:
try:
v = saferepr(e)
except Exception:
v = "An error occurred rendering the exception of type: " + repr(e.__class__)
# The force_escape filter assume unicode, make sure that works
if isinstance(v, bytes):
v = v.decode("utf-8", "replace") # don't choke on non-utf-8 input
# Trim large blobs of data
if len(v) > max_var_length:
v = f"{v[0:head_var_length]}... \n\n<trimmed {len(v)} bytes string>\n\n ...{v[-tail_var_length:]}"
frame_vars.append((k, escape(v)))
frame["vars"] = frame_vars
frames[i] = frame
unicode_hint = ""
if exc_type and issubclass(exc_type, UnicodeError):
start = getattr(exc_value, "start", None)
end = getattr(exc_value, "end", None)
if start is not None and end is not None:
unicode_str = exc_value.args[1]
unicode_hint = force_text(unicode_str[max(start - 5, 0) : min(end + 5, len(unicode_str))], "ascii", errors="replace")
try:
unicode_hint.encode("utf8")
except UnicodeEncodeError:
unicode_hint = unicode_hint.encode("utf8", "surrogateescape")
c = {
"unicode_hint": unicode_hint,
"frames": frames,
"sys_executable": sys.executable,
"sys_version_info": "%d.%d.%d" % sys.version_info[0:3],
"server_time": datetime.now(timezone.utc),
"sys_path": sys.path,
"platform": platform.uname()._asdict(),
}
# Check whether exception info is available
if exc_type:
c["exception_type"] = exc_type.__name__
if exc_value:
c["exception_value"] = force_text(exc_value, errors="replace")
if frames:
c["lastframe"] = frames[-1]
return c
def get_lines_from_file(filename, lineno, context_lines, loader=None, module_name=None):
|
def get_traceback_frames(exc_value=None, tb=None, get_full_tb=True):
def explicit_or_implicit_cause(exc_value):
explicit = getattr(exc_value, "__cause__", None)
implicit = getattr(exc_value, "__context__", None)
return explicit or implicit
# Get the exception and all its causes
exceptions = []
while exc_value:
exceptions.append(exc_value)
exc_value = explicit_or_implicit_cause(exc_value)
frames = []
# No exceptions were supplied
if not exceptions:
return frames
# In case there's just one exception, take the traceback from self.tb
exc_value = exceptions.pop()
tb = tb if not exceptions else exc_value.__traceback__
added_full_tb = False
while tb is not None:
# Support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
if tb.tb_frame.f_locals.get("__traceback_hide__"):
tb = tb.tb_next
continue
filename = tb.tb_frame.f_code.co_filename
function = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno - 1
loader = tb.tb_frame.f_globals.get("__loader__")
module_name = tb.tb_frame.f_globals.get("__name__") or ""
pre_context_lineno, pre_context, context_line, post_context = get_lines_from_file(filename, lineno, 7, loader, module_name)
if pre_context_lineno is None:
pre_context_lineno = lineno
pre_context = []
context_line = "<source code not available>"
post_context = []
frames.append(
{
"exc_cause": explicit_or_implicit_cause(exc_value),
"exc_cause_explicit": getattr(exc_value, "__cause__", True),
"is_full_stack_trace": getattr(exc_value, "is_full_stack_trace", False),
"tb": tb,
"type": "django" if module_name.startswith("django.") else "user",
"filename": filename,
"function": function,
"lineno": lineno + 1,
"vars": list(tb.tb_frame.f_locals.items()),
"id": id(tb),
"pre_context": pre_context,
"context_line": context_line,
"post_context": post_context,
"pre_context_lineno": pre_context_lineno + 1,
}
)
# If the traceback for current exception is consumed, try the
# other exception.
if not tb.tb_next and exceptions:
exc_value = exceptions.pop()
tb = exc_value.__traceback__
else:
tb = tb.tb_next
if get_full_tb and tb is None and not added_full_tb:
exc_value = Exception("Full Stack Trace")
exc_value.is_full_stack_trace = True
exc_value.__cause__ = Exception("Full Stack Trace")
tb = get_logger_traceback()
added_full_tb = True
return frames
def create_exception_report(exc_type, exc_value, tb, output_format, storage_backend, data_processor=None, get_full_tb=False):
"""
Create an exception report and return its location
"""
exception_data = get_exception_data(exc_type, exc_value, tb, get_full_tb=get_full_tb)
if data_processor:
exception_data = data_processor(exception_data)
if output_format == "html":
text = render_exception_html(exception_data)
elif output_format == "json":
text = render_exception_json(exception_data)
else:
raise TypeError("Exception report format not correctly specified")
filename = gen_error_filename(extension=output_format)
report_location = storage_backend.write(filename, text)
return report_location
def append_to_exception_message(e, tb, added_message):
ExceptionType = type(e)
if ExceptionType.__module__ == "builtins":
# this way of altering the message isn't as good but it works for builtin exception types
e = ExceptionType(f"{str(e)} {added_message}").with_traceback(tb)
else:
def my_str(self):
m = ExceptionType.__str__(self)
return f"{m} {added_message}"
NewExceptionType = type(ExceptionType.__name__, (ExceptionType,), {"__str__": my_str})
e.__class__ = NewExceptionType
return e
| """
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
source = None
if loader is not None and hasattr(loader, "get_source"):
with suppress(ImportError):
source = loader.get_source(module_name)
if source is not None:
source = source.splitlines()
if source is None:
with suppress(OSError, IOError):
with open(filename, "rb") as fp:
source = fp.read().splitlines()
if source is None:
return None, [], None, []
try:
# If we just read the source from a file, or if the loader did not
# apply tokenize.detect_encoding to decode the source into a Unicode
# string, then we should do that ourselves.
if isinstance(source[0], bytes):
encoding = "ascii"
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (http://www.python.org/dev/peps/pep-0263/)
match = re.search(br"coding[:=]\s*([-\w.]+)", line)
if match:
encoding = match.group(1).decode("ascii")
break
source = [str(sline, encoding, "replace") for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
pre_context = source[lower_bound:lineno]
context_line = source[lineno]
post_context = source[lineno + 1 : upper_bound]
return lower_bound, pre_context, context_line, post_context
except Exception as e:
try:
context_line = f'<There was an error displaying the source file: "{repr(e)}" The loaded source has {len(source)} lines.>'
except Exception:
context_line = "<There was an error displaying the source file. Further, there was an error displaying that error>"
return lineno, [], context_line, [] | identifier_body |
jsds.js |
import Base from './base';
import _ from 'lodash';
import fs from 'fs';
import gm from 'gm';
import uuid from 'node-uuid';
import tesseract from 'node-tesseract';
import cheerio from 'cheerio';
import md5File from 'md5-file';
export default class extends Base {
fetchCaptcha(){
let codePath = 'code_' + uuid.v1() + '.jpg';
return new Promise((resolve, reject)=>{
let stream = gm(this.httpClient.get('http://www.jsds.gov.cn/index/fujia2.jsp'))
.operator('gray', 'threshold', 50, true).stream();
stream.pipe(fs.createWriteStream(codePath));
stream.on('end', ()=>{
tesseract.process(codePath, (error, text)=>{
fs.unlink(codePath);
error ? reject(error) : resolve(_.trim(text));
});
});
});
}
async login(username, password){
let captcha = await this.fetchCaptcha();
while(captcha.length != 4){
captcha = await this.fetchCaptcha();
}
let ret = await this.httpPost('http://www.jsds.gov.cn/LoginAction.do', {
form: { jsonData: JSON.stringify({
handleCode:'baseLogin',
data:{zh:username, zhPassWord:password, zhYzm: captcha}
})}
});
let {code:errno,msg:errmsg,data} = JSON.parse(ret.body);
if(errno == '0'){
errmsg = '';
this._logininfo = data;
}
else{
if(errno == '999904'){
errno = 'ERR_01';
errmsg = '代码错误';
}else if(errno == '999902'){
errno = 'ERR_02';
errmsg = '密码错误';
}else if(errno == '999901'){
errno = 'ERR_03';
errmsg = '验证码错误';
}else{
errno = 'SYS_' + errno;
}
}
return {errno, errmsg};
}
async fetch_nsrjbxx(){
let { sessionId } = this._logininfo;
let res = await this.httpGet('http://www.jsds.gov.cn/NsrjbxxAction.do', {
qs:{
sessionId, dealMethod:'queryData', jsonData:JSON.stringify({
data:{gnfldm:'CXFW',sqzldm:'',ssxmdm:''}
})
}
});
let $ = cheerio.load(res.body);
let info_tbl = $('table').eq(0);
let info_tr = $('tr', info_tbl);
let tzfxx_tr = $('#t_tzfxx tr').toArray().slice(1);
let tzfxx = _.map(tzfxx_tr, o=>({
tzfmc: $('[name=tzfxxvo_tzfmc]', o).val(),
zjzl: $('[name=tzfxxvo_zjzl]', o).val(),
zjhm: $('[name=tzfxxvo_zjhm]', o).val(),
tzbl: $('[name=tzfxxvo_tzbl]', o).val()
}));
let ret = {
nsrmc: $('td', info_tr.eq(1)).eq(1).text(),
nsrsbh: $('td', info_tr.eq(0)).eq(3).text(),
scjyqx: $('td', info_tr.eq(4)).eq(3).text(),
zcdz: $('td', info_tr.eq(6)).eq(1).text(),
zcdyzbm: $('td', info_tr.eq(7)).eq(1).text(),
zcdlxdh: $('td', info_tr.eq(7)).eq(3).text(),
scjydz: $('td', info_tr.eq(8)).eq(1).text(),
scdyzbm: $('td', info_tr.eq(9)).eq(1).text(),
scdlxdh: $('td', info_tr.eq(9)).eq(3).text(),
cyrs: $('td', info_tr.eq(11)).eq(1).text(),
wjrs: $('input', info_tr.eq(11)).val(),
jyfw: $('td', info_tr.eq(13)).eq(1).text(),
zzhm: $('td', info_tr.eq(5)).eq(3).text(),
};
if(tzfxx.length){
ret.tzfxx = tzfxx;
}
return ret;
}
// 缴款信息查询
async fetch_jkxx(sbsjq, sbsjz, sbbzl){
l | onId } = this._logininfo;
let ret = await this.httpPost('http://www.jsds.gov.cn/JkxxcxAction.do', {
form: {
sbsjq,sbsjz,sbbzl,
errorMessage:'',handleDesc:'查询缴款信息',handleCode:'queryData',
cqSb:'0',sessionId
}
});
let $ = cheerio.load(ret.body);
let $trList = $('#querytb tr').toArray().slice(1);
return _.map($trList, (o,i)=>{
let $tdList = $('td', o);
return {
sbbzl: _.trim($tdList.eq(1).text()),
sbrq: _.trim($tdList.eq(2).text()),
skssqq: _.trim($tdList.eq(3).text()),
skssqz: _.trim($tdList.eq(4).text()),
yjkje: _.trim($tdList.eq(5).text()),
wjkje: _.trim($tdList.eq(6).text()),
dkje: _.trim($tdList.eq(7).text()),
hxje: _.trim($tdList.eq(8).text()),
}
});
}
// 电子交款凭证查询打印
async fetch_dzjk(sbrqq, sbrqz, kkrqq, kkrqz, lbzt){
let { sessionId } = this._logininfo;
let ret = await this.httpPost('http://www.jsds.gov.cn/QykkxxCxAction.do', {
qs: {sessionId},
form: {
sbrqq,sbrqz,kkrqq,kkrqz,lbzt,
errorMessage:'',sucessMsg:'',handleDesc:'扣款数据查询',handleCode:'queryData',
cqSb:'0',sessionId
}
});
let $ = cheerio.load(ret.body);
return _.map($('#queryTb tr').toArray().slice(1), o=>{
let $td = $('td', o);
return {
sbblx: _.trim($td.eq(1).text()),
sbrq: _.trim($td.eq(2).text()),
skhj: _.trim($td.eq(3).text()),
jkfs: _.trim($td.eq(4).text()),
sbfs: _.trim($td.eq(5).text()),
kkrq: _.trim($td.eq(6).text()),
rkrq: _.trim($td.eq(7).text()),
}
});
}
async fetch_cwbb(sbnf){
console.log('获取财务报表:'+sbnf);
let { sessionId } = this._logininfo;
let swglm = sessionId.split(';')[0];
let cwbbjdqx = 'Y01_120';
let res = await this.httpPost('http://www.jsds.gov.cn/wb032_WBcwbbListAction.do', {
qs: {sessionId},
form: {
sbnf,cwbbErrzt:'1',cwbbdldm:'CKL',errorMessage:'',
swglm,curpzxh:'',handleDesc:'',handleCode:'submitSave',
cwbbjdqxmc:'年度终了后4月内',cwbbjdqx
}
})
console.log('获取财务报表step1');
console.log(res.body);
let $ = cheerio.load(res.body);
let cwbbList = _.map($('#queryTb tr').toArray().slice(1), o=>{
let $td = $('td', o);
console.log('debug1');
let deal_args = $td.eq(6).find('input').attr('onclick');
if(!deal_args) return null;
console.log('debug2');
console.log(deal_args);
deal_args = deal_args.substring(deal_args.indexOf('(')+1,deal_args.lastIndexOf(')'));
console.log('debug3');
deal_args = _.map(deal_args.split(','),o=>o.substr(1,o.length-2));
console.log('debug4');
let ret = {
sbnf,
bbzl: $td.eq(1).text().replace(/\s/g,''),
url:deal_args[0],
ssq:deal_args[1],
pzxh:deal_args[2],
czzt:deal_args[3],
zt:deal_args[4],
editzt:deal_args[5],
ypzxh:deal_args[6],
swglm:deal_args[7],
sqssq:deal_args[8],
bsqxdm:deal_args[9],
};
if(ret.pzxh){
ret.href = ret.url + "?sessionId=" + sessionId + "&pzxh=" + ret.pzxh + "&ssq=" + encodeURI(ret.ssq) + "&BBZT="
+ ret.czzt + "&zt=" + ret.zt + "&editzt=" + ret.editzt + "&swglm=" + ret.swglm
+ "&bsqxdm=" + ret.bsqxdm+"&cwbbjdqx=" + cwbbjdqx;
}else{
if (ypzxh != '') {
ret.href = ret.url + "?sessionId=" +sessionId+ "&ssq=" + encodeURI(ret.ssq) + "&BBZT=" + ret.zt
+ "&ypzxh=" + ret.ypzxh + "&swglm=" + ret.swglm + "&sqssq="
+ encodeURI(ret.sqssq) + "&bsqxdm=" + ret.bsqxdm+"&cwbbjdqx="+cwbbjdqx;
} else {
ret.href = ret.url + "?sessionId=" +sessionId+ "&ssq=" + encodeURI(ret.ssq) + "&BBZT=" + ret.zt
+ "&swglm=" + ret.swglm + "&sqssq=" + encodeURI(ret.sqssq)
+ "&bsqxdm=" + ret.bsqxdm+"&cwbbjdqx="+cwbbjdqx;
}
}
console.log(ret.href);
return ret;
});
console.log('获取财务报表step2');
cwbbList = _.compact(cwbbList);
for(let i in cwbbList){
let res = await this.httpGet('http://www.jsds.gov.cn'+cwbbList[i].href);
let $ = cheerio.load(res.body);
let table = $('input').toArray();
table = _.mapKeys(table, o=>$(o).attr('id'));
table = _.mapValues(table, o=>$(o).val());
cwbbList[i].table = table;
}
console.log('获取财务报表step3');
return cwbbList;
}
async data(){
console.log('地税:获取数据中...');
let { sessionId } = this._logininfo;
// 纳税人基本信息
let nsrjbxx = await this.fetch_nsrjbxx();
await this.httpGet('http://www.jsds.gov.cn/MainAction.do', {qs:{sessionId}});
//let jkxx = await this.fetch_jkxx('2015-01-01','2016-12-31','');
console.log('地税:获取电子缴款...');
let dzjk = [
...(await this.fetch_dzjk('2013-01-01','2016-12-31','','','1')),
...(await this.fetch_dzjk('2013-01-01','2016-12-31','','','2'))
];
console.log('地税:获取财务报表...');
let cwbb = [
...await this.fetch_cwbb('2016'),
...await this.fetch_cwbb('2015'),
...await this.fetch_cwbb('2014'),
...await this.fetch_cwbb('2013')
];
console.log('地税:获取财务报表完毕');
let taxList = _.map(dzjk, o=>({
name:o.sbblx,money:o.skhj,time:o.kkrq,remark:'地税-电子缴款'
}));
let taxMoneyList = _.map(cwbb, o=>({
year:o.sbnf,
capital:(parseFloat(o.table.fzqmye27 || '0') + parseFloat(o.table.fzncye27 || '0'))/2,
assets:(parseFloat(o.table.zcqmye32 || '0') + parseFloat(o.table.zcncye32 || '0'))/2,
equity:(parseFloat(o.table.fzqmye31 || '0') + parseFloat(o.table.fzncye31 || '0'))/2,
interest: o.table.bnljje18,
liability:(parseFloat(o.table.fzqmye19 || '0') + parseFloat(o.table.fzncye19 || '0'))/2,
revenue:parseFloat(o.table.bnljje1 || '0') + parseFloat(o.table.bnljje22 || '0'),
}));
let {nsrmc, ...oth} = nsrjbxx;
let info = {
name: nsrmc,
...oth
}
if(nsrjbxx.nsrsbh.length == 18) info.uscc = nsrjbxx.nsrsbh;
let cwbbList = _.map(cwbb, o=>{
let tempFile = 'runtime/jsds_'+uuid.v1()+'.txt';
let t = o.table;
fs.writeFileSync(tempFile,[
'资产负债表',
'资产\t\t期末余额\t\t月初余额',
`流动资产\t\t${t.zcqmye1}\t\t${t.zcncye1}`,
` 货币资金\t\t${t.zcqmye2}\t\t${t.zcncye2}`,//1
` 短期投资\t\t${t.zcqmye3}\t\t${t.zcncye3}`,//2
` 应收票据\t\t${t.zcqmye4}\t\t${t.zcncye4}`,//3
` 应收账款\t\t${t.zcqmye5}\t\t${t.zcncye5}`,//4
` 预付账款\t\t${t.zcqmye6}\t\t${t.zcncye6}`,//5
` 应收股利\t\t${t.zcqmye7}\t\t${t.zcncye7}`,//6
` 应收利息\t\t${t.zcqmye8}\t\t${t.zcncye8}`,//7
` 其他应收款\t\t${t.zcqmye9}\t\t${t.zcncye9}`,//8
` 存货\t\t${t.zcqmye10}\t\t${t.zcncye10}`,//9
' 其中:',
` 原材料\t\t${t.zcqmye11}\t\t${t.zcncye11}`,//10
` 在产品\t\t${t.zcqmye12}\t\t${t.zcncye12}`,//11
` 库存商品\t\t${t.zcqmye13}\t\t${t.zcncye13}`,//12
` 周转材料\t\t${t.zcqmye14}\t\t${t.zcncye14}`,//13
` 其他流动资产\t\t${t.zcqmye15}\t\t${t.zcncye15}`,//14
` 流动资产合计\t\t${t.zcqmye16}\t\t${t.zcncye16}`,//15
`非流动资产\t\t${t.zcqmye17}\t\t${t.zcncye17}`,
` 长期债券投资\t\t${t.zcqmye18}\t\t${t.zcncye18}`,//16
` 长期股权投资\t\t${t.zcqmye19}\t\t${t.zcncye19}`,//17
` 固定资产原价\t\t${t.zcqmye20}\t\t${t.zcncye20}`,//18
` 减:累计折旧\t\t${t.zcqmye21}\t\t${t.zcncye21}`,//19
` 固定资产账面价值\t\t${t.zcqmye22}\t\t${t.zcncye22}`,//20
` 在建工程\t\t${t.zcqmye23}\t\t${t.zcncye23}`,//21
` 工程物资\t\t${t.zcqmye24}\t\t${t.zcncye24}`,//22
` 固定资产清理\t\t${t.zcqmye25}\t\t${t.zcncye25}`,//23
` 生产性生物资产\t\t${t.zcqmye26}\t\t${t.zcncye26}`,//24
` 无形资产\t\t${t.zcqmye7}\t\t${t.zcncye27}`,//25
` 开发支出\t\t${t.zcqmye28}\t\t${t.zcncye28}`,//26
` 长期待摊费用\t\t${t.zcqmye29}\t\t${t.zcncye29}`,//27
` 其他非流动资产\t\t${t.zcqmye30}\t\t${t.zcncye30}`,//28
` 非流动资产合计\t\t${t.zcqmye31}\t\t${t.zcncye31}`,//29
` 资产合计\t\t${t.zcqmye32}\t\t${t.zcncye32}`,//30
'',
'负债和所有者权益\t\t期末余额\t\t月初余额',
`流动负债\t\t${t.fzqmye1}\t\t${t.fzncye1}`,
` 短期借款\t\t${t.fzqmye2}\t\t${t.fzncye2}`,//31
` 应付票据\t\t${t.fzqmye3}\t\t${t.fzncye3}`,//32
` 应付账款\t\t${t.fzqmye4}\t\t${t.fzncye4}`,//33
` 预收账款\t\t${t.fzqmye5}\t\t${t.fzncye5}`,//34
` 应付职工薪酬\t\t${t.fzqmye6}\t\t${t.fzncye6}`,//35
` 应交税费\t\t${t.fzqmye7}\t\t${t.fzncye7}`,//36
` 应付利息\t\t${t.fzqmye8}\t\t${t.fzncye8}`,//37
` 应付利润\t\t${t.fzqmye9}\t\t${t.fzncye9}`,//38
` 其他应付款\t\t${t.fzqmye10}\t\t${t.fzncye10}`,//39
` 其他流动负债\t\t${t.fzqmye11}\t\t${t.fzncye11}`,//40
` 流动负债合计\t\t${t.fzqmye12}\t\t${t.fzncye12}`,//41
`非流动负债\t\t${t.fzqmye13}\t\t${t.fzncye13}`,
` 长期借款\t\t${t.fzqmye14}\t\t${t.fzncye14}`,//42
` 长期应付款\t\t${t.fzqmye15}\t\t${t.fzncye15}`,//43
` 递延收益\t\t${t.fzqmye16}\t\t${t.fzncye16}`,//44
` 其他非流动负债\t\t${t.fzqmye17}\t\t${t.fzncye17}`,//45
` 非流动负债合计\t\t${t.fzqmye18}\t\t${t.fzncye18}`,//46
` 负债合计\t\t${t.fzqmye19}\t\t${t.fzncye19}`,//47
`所有者权益(或股东权益)\t\t${t.fzqmye26}\t\t${t.fzncye26}`,
` 实收资本(或股本)\t\t${t.fzqmye27}\t\t${t.fzncye27}`,//48
` 资本公积\t\t${t.fzqmye28}\t\t${t.fzncye28}`,//49
` 盈余公积\t\t${t.fzqmye29}\t\t${t.fzncye29}`,//50
` 未分配利润\t\t${t.fzqmye30}\t\t${t.fzncye30}`,//51
` 所有者权益(或股东权益)合计\t\t${t.fzqmye31}\t\t${t.fzncye31}`,//52
` 负债和所有者权益(或股东权益)总计\t\t${t.fzqmye32}\t\t${t.fzncye32}`,//53
'',
'',
'利润表',
'项目\t\t本年累计金额\t\t本月金额',
`一、营业收入\t\t${t.bnljje1}\t\t${t.byje1}`,//1
`减:营业成本\t\t${t.bnljje2}\t\t${t.byje2}`,//2
` 营业税金及附加\t\t${t.bnljje3}\t\t${t.byje3}`,//3
' 其中:',
` 消费税\t\t${t.bnljje4}\t\t${t.byje4}`,//4
` 营业税\t\t${t.bnljje5}\t\t${t.byje5}`,//5
` 城市维护建设税\t\t${t.bnljje6}\t\t${t.byje6}`,//6
` 资源税\t\t${t.bnljje7}\t\t${t.byje7}`,//7
` 土地增值税\t\t${t.bnljje8}\t\t${t.byje8}`,//8
` 城镇土地使用税、房产税、车船税、印花税\t\t${t.bnljje9}\t\t${t.byje9}`,//9
` 教育费附加、矿产资源补偿费、排污费\t\t${t.bnljje10}\t\t${t.byje10}`,//10
` 销售费用\t\t${t.bnljje11}\t\t${t.byje11}`,//11
' 其中:',
` 商品维修费\t\t${t.bnljje12}\t\t${t.byje12}`,//12
` 广告费和业务宣传费\t\t${t.bnljje13}\t\t${t.byje13}`,//13
` 管理费用\t\t${t.bnljje14}\t\t${t.byje14}`,//14
' 其中:',
` 开办费\t\t${t.bnljje15}\t\t${t.byje15}`,//15
` 业务招待费\t\t${t.bnljje16}\t\t${t.byje16}`,//16
` 研究费用\t\t${t.bnljje17}\t\t${t.byje17}`,//17
` 财务费用\t\t${t.bnljje18}\t\t${t.byje18}`,//18
` 其中:利息费用(收入以“—”号填列)\t\t${t.bnljje19}\t\t${t.byje19}`,//19
`加:投资收益(亏损以–填列)\t\t${t.bnljje20}\t\t${t.byje20}`,//20
'',
`二、营业利润(亏损以“-”号填列)\t\t${t.bnljje21}\t\t${t.byje21}`,//21
`加:营业外收入\t\t${t.bnljje22}\t\t${t.byje22}`,//22
` 其中:政府补助\t\t${t.bnljje23}\t\t${t.byje23}`,//23
`减:营业外支出\t\t${t.bnljje24}\t\t${t.byje24}`,//24
' 其中:',
` 坏账损失\t\t${t.bnljje25}\t\t${t.byje25}`,//25
` 无法收回的长期债券投资损失\t\t${t.bnljje26}\t\t${t.byje26}`,//26
` 无法收回的长期股权投资损失\t\t${t.bnljje27}\t\t${t.byje27}`,//27
` 自然灾害等不可抗力因素造成的损失\t\t${t.bnljje28}\t\t${t.byje28}`,//28
` 税收滞纳金\t\t${t.bnljje29}\t\t${t.byje29}`,//29
'',
`三、利润总额(亏损总额以“-”号填列)\t\t${t.bnljje30}\t\t${t.byje30}`,//30
`减:所得税费用\t\t${t.bnljje31}\t\t${t.byje31}`,//31
'',
`四、净利润(净亏损以“-”号填列)\t\t${t.bnljje32}\t\t${t.byje32}`,//32
'',
'',
'现金流量表',
'项目\t\t本年累计金额\t\t本月金额',
`一、经营活动产生的现金流量\t\t${t.xjbnljje1}\t\t${t.xjbyje1}`,
` 销售产成品、商品、提供劳务收到的现金\t\t${t.xjbnljje2}\t\t${t.xjbyje2}`,//1
` 收到的其他与经营活动有关的现金\t\t${t.xjbnljje3}\t\t${t.xjbyje3}`,//2
` 购买原材料、商品、接受劳务支付的现金\t\t${t.xjbnljje4}\t\t${t.xjbyje4}`,//3
` 支付的职工薪酬\t\t${t.xjbnljje5}\t\t${t.xjbyje5}`,//4
` 支付的税费\t\t${t.xjbnljje6}\t\t${t.xjbyje6}`,//5
` 支付的其他与经营活动有关的现金\t\t${t.xjbnljje7}\t\t${t.xjbyje7}`,//6
` 经营活动产生的现金流量净额\t\t${t.xjbnljje8}\t\t${t.xjbyje8}`,//7
`二、投资活动产生的现金流量:\t\t${t.xjbnljje9}\t\t${t.xjbyje9}`,
` 收回短期投资、长期债券投资和长期股权投资收到的现金\t\t${t.xjbnljje10}\t\t${t.xjbyje10}`,//8
` 取得投资收益收到的现金\t\t${t.xjbnljje11}\t\t${t.xjbyje11}`,//9
` 处置固定资产、无形资产和其他非流动资产收回的现金净额\t\t${t.xjbnljje12}\t\t${t.xjbyje12}`,//10
` 短期投资、长期债券投资和长期股权投资支付的现金\t\t${t.xjbnljje13}\t\t${t.xjbyje13}`,//11
` 购建固定资产、无形资产和其他非流动资产支付的现金\t\t${t.xjbnljje14}\t\t${t.xjbyje14}`,//12
` 投资活动产生的现金流量净额\t\t${t.xjbnljje15}\t\t${t.xjbyje15}`,//13
`三、筹资活动产生的现金流量:\t\t${t.xjbnljje16}\t\t${t.xjbyje16}`,
` 取得借款收到的现金\t\t${t.xjbnljje17}\t\t${t.xjbyje17}`,//14
` 吸收投资者投资收到的现金\t\t${t.xjbnljje18}\t\t${t.xjbyje18}`,//15
` 偿还借款本金支付的现金\t\t${t.xjbnljje19}\t\t${t.xjbyje19}`,//16
` 偿还借款利息支付的现金\t\t${t.xjbnljje20}\t\t${t.xjbyje20}`,//17
` 分配利润支付的现金\t\t${t.xjbnljje21}\t\t${t.xjbyje21}`,//18
` 筹资活动产生的现金流量净额\t\t${t.xjbnljje22}\t\t${t.xjbyje22}`,//19
`四、现金净增加额\t\t${t.xjbnljje23}\t\t${t.xjbyje23}`,//20
`加:期初现金余额\t\t${t.xjbnljje24}\t\t${t.xjbyje24}`,//21
`五、期末现金余额\t\t${t.xjbnljje25}\t\t${t.xjbyje25}`,//22
].join('\n'));
let hash = md5File.sync(tempFile);
fs.renameSync(tempFile, think.RUNTIME_PATH + '/archive/' + hash);
return {
name: o.bbzl,
time: o.sbnf,
archiveList:[{name:'报表',hash,type:'txt'}],
remark: '地税'
}
});
let ckzhList = await this.fetch_ckzh();
info.ckzhList = ckzhList;
return {nsrjbxx,dzjk,taxList,taxMoneyList,info, cwbb,cwbbList};
}
async fetch_ckzh(){
let { sessionId } = this._logininfo;
let ret = await this.httpPost('http://www.jsds.gov.cn/NsrkhyhAction.do', {
qs: {
handleCode:'',sqzldm:'null',ssxmdm:'null',gnfldm:'null', sessionId,
jsonData:JSON.stringify({
data:{ gnfldm:'CXFW', sqzldm:'',ssxmdm:''}
})
}
});
let $ = cheerio.load(ret.body);
let trList = $('#querytb>tr').toArray();
return _.map(trList, o=>{
let tdList = $('td', o);
return {
yhhb: _.trim(tdList.eq(3).text()),
yhdm: _.trim(tdList.eq(4).text()),
yhzh: _.trim(tdList.eq(5).text())
}
});
}
}
| et { sessi | identifier_name |
jsds.js |
import Base from './base';
import _ from 'lodash';
import fs from 'fs';
import gm from 'gm';
import uuid from 'node-uuid';
import tesseract from 'node-tesseract';
import cheerio from 'cheerio';
import md5File from 'md5-file';
export default class extends Base {
fetchCaptcha(){
let codePath = 'code_' + uuid.v1() + '.jpg';
return new Promise((resolve, reject)=>{
let stream = gm(this.httpClient.get('http://www.jsds.gov.cn/index/fujia2.jsp'))
.operator('gray', 'threshold', 50, true).stream();
stream.pipe(fs.createWriteStream(codePath));
stream.on('end', ()=>{
tesseract.process(codePath, (error, text)=>{
fs.unlink(codePath);
error ? reject(error) : resolve(_.trim(text));
});
});
});
}
async login(username, password){
let captcha = await this.fetchCaptcha();
while(captcha.length != 4){
captcha = await this.fetchCaptcha();
}
let ret = await this.httpPost('http://www.jsds.gov.cn/LoginAction.do', {
form: { jsonData: JSON.stringify({
handleCode:'baseLogin',
data:{zh:username, zhPassWord:password, zhYzm: captcha}
})}
});
let {code:errno,msg:errmsg,data} = JSON.parse(ret.body);
if(errno == '0'){
errmsg = '';
this._logininfo = data;
}
else{
if(errno == '999904'){
errno = 'ERR_01';
errmsg = '代码错误';
}else if(errno == '999902'){
errno = 'ERR_02';
errmsg = '密码错误';
}else if(errno == '999901'){
errno = 'ERR_03';
errmsg = '验证码错误';
}else{
errno = 'SYS_' + errno;
}
}
return {errno, errmsg};
}
async fetch_nsrjbxx(){
let { sessionId } = this._logininfo;
let res = await this.httpGet('http://www.jsds.gov.cn/NsrjbxxAction.do', {
qs:{
sessionId, dealMethod:'queryData', jsonData:JSON.stringify({
data:{gnfldm:'CXFW',sqzldm:'',ssxmdm:''}
})
}
});
let $ = cheerio.load(res.body);
let info_tbl = $('table').eq(0);
let info_tr = $('tr', info_tbl);
let tzfxx_tr = $('#t_tzfxx tr').toArray().slice(1);
let tzfxx = _.map(tzfxx_tr, o=>({
tzfmc: $('[name=tzfxxvo_tzfmc]', o).val(),
zjzl: $('[name=tzfxxvo_zjzl]', o).val(),
zjhm: $('[name=tzfxxvo_zjhm]', o).val(),
tzbl: $('[name=tzfxxvo_tzbl]', o).val()
}));
let ret = {
nsrmc: $('td', info_tr.eq(1)).eq(1).text(),
nsrsbh: $('td', info_tr.eq(0)).eq(3).text(),
scjyqx: $('td', info_tr.eq(4)).eq(3).text(),
zcdz: $('td', info_tr.eq(6)).eq(1).text(),
zcdyzbm: $('td', info_tr.eq(7)).eq(1).text(),
zcdlxdh: $('td', info_tr.eq(7)).eq(3).text(),
scjydz: $('td', info_tr.eq(8)).eq(1).text(),
scdyzbm: $('td', info_tr.eq(9)).eq(1).text(),
scdlxdh: $('td', info_tr.eq(9)).eq(3).text(),
cyrs: $('td', info_tr.eq(11)).eq(1).text(),
wjrs: $('input', info_tr.eq(11)).val(),
jyfw: $('td', info_tr.eq(13)).eq(1).text(),
zzhm: $('td', info_tr.eq(5)).eq(3).text(),
};
if(tzfxx.length){
ret.tzfxx = tzfxx;
}
return ret;
}
// 缴款信息查询
async fetch_jkxx(sbsjq, sbsjz, sbbzl){
let { sessionId } = this._logininfo;
let ret = await this.httpPost('http://www.jsds.gov.cn/JkxxcxAction.do', {
form: {
sbsjq,sbsjz,sbbzl,
errorMessage:'',handleDesc:'查询缴款信息',handleCode:'queryData',
cqSb:'0',sessionId
}
});
let $ = cheerio.load(ret.body);
let $trList = $('#querytb tr').toArray().slice(1);
return _.map($trList, (o,i)=>{
let $tdList = $('td', o);
return {
sbbzl: _.trim($tdList.eq(1).text()),
sbrq: _.trim($tdList.eq(2).text()),
skssqq: _.trim($tdList.eq(3).text()),
skssqz: _.trim($tdList.eq(4).text()),
yjkje: _.trim($tdList.eq(5).text()),
wjkje: _.trim($tdList.eq(6).text()),
dkje: _.trim($tdList.eq(7).text()),
hxje: _.trim($tdList.eq(8).text()),
}
});
}
// 电子交款凭证查询打印
async fetch_dzjk(sbrqq, sbrqz, kkrqq, kkrqz, lbzt){
let { sessionId } = this._logininfo;
let ret = await this.httpPost('http://www.jsds.gov.cn/QykkxxCxAction.do', {
qs: {sessionId},
form: {
sbrqq,sbrqz,kkrqq,kkrqz,lbzt,
errorMessage:'',sucessMsg:'',handleDesc:'扣款数据查询',handleCode:'queryData',
cqSb:'0',sessionId
}
});
let $ = cheerio.load(ret.body);
return _.map($('#queryTb tr').toArray().slice(1), o=>{
let $td = $('td', o);
return {
sbblx: _.trim($td.eq(1).text()),
sbrq: _.trim($td.eq(2).text()),
skhj: _.trim($td.eq(3).text()),
jkfs: _.trim($td.eq(4).text()),
sbfs: _.trim($td.eq(5).text()),
kkrq: _.trim($td.eq(6).text()),
rkrq: _.trim($td.eq(7).text()),
}
});
}
async fetch_cwbb(sbnf){
console.log('获取财务报表:'+sbnf);
let { sessionId } = this._logininfo;
le | rjbxx();
await this.httpGet('http://www.jsds.gov.cn/MainAction.do', {qs:{sessionId}});
//let jkxx = await this.fetch_jkxx('2015-01-01','2016-12-31','');
console.log('地税:获取电子缴款...');
let dzjk = [
...(await this.fetch_dzjk('2013-01-01','2016-12-31','','','1')),
...(await this.fetch_dzjk('2013-01-01','2016-12-31','','','2'))
];
console.log('地税:获取财务报表...');
let cwbb = [
...await this.fetch_cwbb('2016'),
...await this.fetch_cwbb('2015'),
...await this.fetch_cwbb('2014'),
...await this.fetch_cwbb('2013')
];
console.log('地税:获取财务报表完毕');
let taxList = _.map(dzjk, o=>({
name:o.sbblx,money:o.skhj,time:o.kkrq,remark:'地税-电子缴款'
}));
let taxMoneyList = _.map(cwbb, o=>({
year:o.sbnf,
capital:(parseFloat(o.table.fzqmye27 || '0') + parseFloat(o.table.fzncye27 || '0'))/2,
assets:(parseFloat(o.table.zcqmye32 || '0') + parseFloat(o.table.zcncye32 || '0'))/2,
equity:(parseFloat(o.table.fzqmye31 || '0') + parseFloat(o.table.fzncye31 || '0'))/2,
interest: o.table.bnljje18,
liability:(parseFloat(o.table.fzqmye19 || '0') + parseFloat(o.table.fzncye19 || '0'))/2,
revenue:parseFloat(o.table.bnljje1 || '0') + parseFloat(o.table.bnljje22 || '0'),
}));
let {nsrmc, ...oth} = nsrjbxx;
let info = {
name: nsrmc,
...oth
}
if(nsrjbxx.nsrsbh.length == 18) info.uscc = nsrjbxx.nsrsbh;
let cwbbList = _.map(cwbb, o=>{
let tempFile = 'runtime/jsds_'+uuid.v1()+'.txt';
let t = o.table;
fs.writeFileSync(tempFile,[
'资产负债表',
'资产\t\t期末余额\t\t月初余额',
`流动资产\t\t${t.zcqmye1}\t\t${t.zcncye1}`,
` 货币资金\t\t${t.zcqmye2}\t\t${t.zcncye2}`,//1
` 短期投资\t\t${t.zcqmye3}\t\t${t.zcncye3}`,//2
` 应收票据\t\t${t.zcqmye4}\t\t${t.zcncye4}`,//3
` 应收账款\t\t${t.zcqmye5}\t\t${t.zcncye5}`,//4
` 预付账款\t\t${t.zcqmye6}\t\t${t.zcncye6}`,//5
` 应收股利\t\t${t.zcqmye7}\t\t${t.zcncye7}`,//6
` 应收利息\t\t${t.zcqmye8}\t\t${t.zcncye8}`,//7
` 其他应收款\t\t${t.zcqmye9}\t\t${t.zcncye9}`,//8
` 存货\t\t${t.zcqmye10}\t\t${t.zcncye10}`,//9
' 其中:',
` 原材料\t\t${t.zcqmye11}\t\t${t.zcncye11}`,//10
` 在产品\t\t${t.zcqmye12}\t\t${t.zcncye12}`,//11
` 库存商品\t\t${t.zcqmye13}\t\t${t.zcncye13}`,//12
` 周转材料\t\t${t.zcqmye14}\t\t${t.zcncye14}`,//13
` 其他流动资产\t\t${t.zcqmye15}\t\t${t.zcncye15}`,//14
` 流动资产合计\t\t${t.zcqmye16}\t\t${t.zcncye16}`,//15
`非流动资产\t\t${t.zcqmye17}\t\t${t.zcncye17}`,
` 长期债券投资\t\t${t.zcqmye18}\t\t${t.zcncye18}`,//16
` 长期股权投资\t\t${t.zcqmye19}\t\t${t.zcncye19}`,//17
` 固定资产原价\t\t${t.zcqmye20}\t\t${t.zcncye20}`,//18
` 减:累计折旧\t\t${t.zcqmye21}\t\t${t.zcncye21}`,//19
` 固定资产账面价值\t\t${t.zcqmye22}\t\t${t.zcncye22}`,//20
` 在建工程\t\t${t.zcqmye23}\t\t${t.zcncye23}`,//21
` 工程物资\t\t${t.zcqmye24}\t\t${t.zcncye24}`,//22
` 固定资产清理\t\t${t.zcqmye25}\t\t${t.zcncye25}`,//23
` 生产性生物资产\t\t${t.zcqmye26}\t\t${t.zcncye26}`,//24
` 无形资产\t\t${t.zcqmye7}\t\t${t.zcncye27}`,//25
` 开发支出\t\t${t.zcqmye28}\t\t${t.zcncye28}`,//26
` 长期待摊费用\t\t${t.zcqmye29}\t\t${t.zcncye29}`,//27
` 其他非流动资产\t\t${t.zcqmye30}\t\t${t.zcncye30}`,//28
` 非流动资产合计\t\t${t.zcqmye31}\t\t${t.zcncye31}`,//29
` 资产合计\t\t${t.zcqmye32}\t\t${t.zcncye32}`,//30
'',
'负债和所有者权益\t\t期末余额\t\t月初余额',
`流动负债\t\t${t.fzqmye1}\t\t${t.fzncye1}`,
` 短期借款\t\t${t.fzqmye2}\t\t${t.fzncye2}`,//31
` 应付票据\t\t${t.fzqmye3}\t\t${t.fzncye3}`,//32
` 应付账款\t\t${t.fzqmye4}\t\t${t.fzncye4}`,//33
` 预收账款\t\t${t.fzqmye5}\t\t${t.fzncye5}`,//34
` 应付职工薪酬\t\t${t.fzqmye6}\t\t${t.fzncye6}`,//35
` 应交税费\t\t${t.fzqmye7}\t\t${t.fzncye7}`,//36
` 应付利息\t\t${t.fzqmye8}\t\t${t.fzncye8}`,//37
` 应付利润\t\t${t.fzqmye9}\t\t${t.fzncye9}`,//38
` 其他应付款\t\t${t.fzqmye10}\t\t${t.fzncye10}`,//39
` 其他流动负债\t\t${t.fzqmye11}\t\t${t.fzncye11}`,//40
` 流动负债合计\t\t${t.fzqmye12}\t\t${t.fzncye12}`,//41
`非流动负债\t\t${t.fzqmye13}\t\t${t.fzncye13}`,
` 长期借款\t\t${t.fzqmye14}\t\t${t.fzncye14}`,//42
` 长期应付款\t\t${t.fzqmye15}\t\t${t.fzncye15}`,//43
` 递延收益\t\t${t.fzqmye16}\t\t${t.fzncye16}`,//44
` 其他非流动负债\t\t${t.fzqmye17}\t\t${t.fzncye17}`,//45
` 非流动负债合计\t\t${t.fzqmye18}\t\t${t.fzncye18}`,//46
` 负债合计\t\t${t.fzqmye19}\t\t${t.fzncye19}`,//47
`所有者权益(或股东权益)\t\t${t.fzqmye26}\t\t${t.fzncye26}`,
` 实收资本(或股本)\t\t${t.fzqmye27}\t\t${t.fzncye27}`,//48
` 资本公积\t\t${t.fzqmye28}\t\t${t.fzncye28}`,//49
` 盈余公积\t\t${t.fzqmye29}\t\t${t.fzncye29}`,//50
` 未分配利润\t\t${t.fzqmye30}\t\t${t.fzncye30}`,//51
` 所有者权益(或股东权益)合计\t\t${t.fzqmye31}\t\t${t.fzncye31}`,//52
` 负债和所有者权益(或股东权益)总计\t\t${t.fzqmye32}\t\t${t.fzncye32}`,//53
'',
'',
'利润表',
'项目\t\t本年累计金额\t\t本月金额',
`一、营业收入\t\t${t.bnljje1}\t\t${t.byje1}`,//1
`减:营业成本\t\t${t.bnljje2}\t\t${t.byje2}`,//2
` 营业税金及附加\t\t${t.bnljje3}\t\t${t.byje3}`,//3
' 其中:',
` 消费税\t\t${t.bnljje4}\t\t${t.byje4}`,//4
` 营业税\t\t${t.bnljje5}\t\t${t.byje5}`,//5
` 城市维护建设税\t\t${t.bnljje6}\t\t${t.byje6}`,//6
` 资源税\t\t${t.bnljje7}\t\t${t.byje7}`,//7
` 土地增值税\t\t${t.bnljje8}\t\t${t.byje8}`,//8
` 城镇土地使用税、房产税、车船税、印花税\t\t${t.bnljje9}\t\t${t.byje9}`,//9
` 教育费附加、矿产资源补偿费、排污费\t\t${t.bnljje10}\t\t${t.byje10}`,//10
` 销售费用\t\t${t.bnljje11}\t\t${t.byje11}`,//11
' 其中:',
` 商品维修费\t\t${t.bnljje12}\t\t${t.byje12}`,//12
` 广告费和业务宣传费\t\t${t.bnljje13}\t\t${t.byje13}`,//13
` 管理费用\t\t${t.bnljje14}\t\t${t.byje14}`,//14
' 其中:',
` 开办费\t\t${t.bnljje15}\t\t${t.byje15}`,//15
` 业务招待费\t\t${t.bnljje16}\t\t${t.byje16}`,//16
` 研究费用\t\t${t.bnljje17}\t\t${t.byje17}`,//17
` 财务费用\t\t${t.bnljje18}\t\t${t.byje18}`,//18
` 其中:利息费用(收入以“—”号填列)\t\t${t.bnljje19}\t\t${t.byje19}`,//19
`加:投资收益(亏损以–填列)\t\t${t.bnljje20}\t\t${t.byje20}`,//20
'',
`二、营业利润(亏损以“-”号填列)\t\t${t.bnljje21}\t\t${t.byje21}`,//21
`加:营业外收入\t\t${t.bnljje22}\t\t${t.byje22}`,//22
` 其中:政府补助\t\t${t.bnljje23}\t\t${t.byje23}`,//23
`减:营业外支出\t\t${t.bnljje24}\t\t${t.byje24}`,//24
' 其中:',
` 坏账损失\t\t${t.bnljje25}\t\t${t.byje25}`,//25
` 无法收回的长期债券投资损失\t\t${t.bnljje26}\t\t${t.byje26}`,//26
` 无法收回的长期股权投资损失\t\t${t.bnljje27}\t\t${t.byje27}`,//27
` 自然灾害等不可抗力因素造成的损失\t\t${t.bnljje28}\t\t${t.byje28}`,//28
` 税收滞纳金\t\t${t.bnljje29}\t\t${t.byje29}`,//29
'',
`三、利润总额(亏损总额以“-”号填列)\t\t${t.bnljje30}\t\t${t.byje30}`,//30
`减:所得税费用\t\t${t.bnljje31}\t\t${t.byje31}`,//31
'',
`四、净利润(净亏损以“-”号填列)\t\t${t.bnljje32}\t\t${t.byje32}`,//32
'',
'',
'现金流量表',
'项目\t\t本年累计金额\t\t本月金额',
`一、经营活动产生的现金流量\t\t${t.xjbnljje1}\t\t${t.xjbyje1}`,
` 销售产成品、商品、提供劳务收到的现金\t\t${t.xjbnljje2}\t\t${t.xjbyje2}`,//1
` 收到的其他与经营活动有关的现金\t\t${t.xjbnljje3}\t\t${t.xjbyje3}`,//2
` 购买原材料、商品、接受劳务支付的现金\t\t${t.xjbnljje4}\t\t${t.xjbyje4}`,//3
` 支付的职工薪酬\t\t${t.xjbnljje5}\t\t${t.xjbyje5}`,//4
` 支付的税费\t\t${t.xjbnljje6}\t\t${t.xjbyje6}`,//5
` 支付的其他与经营活动有关的现金\t\t${t.xjbnljje7}\t\t${t.xjbyje7}`,//6
` 经营活动产生的现金流量净额\t\t${t.xjbnljje8}\t\t${t.xjbyje8}`,//7
`二、投资活动产生的现金流量:\t\t${t.xjbnljje9}\t\t${t.xjbyje9}`,
` 收回短期投资、长期债券投资和长期股权投资收到的现金\t\t${t.xjbnljje10}\t\t${t.xjbyje10}`,//8
` 取得投资收益收到的现金\t\t${t.xjbnljje11}\t\t${t.xjbyje11}`,//9
` 处置固定资产、无形资产和其他非流动资产收回的现金净额\t\t${t.xjbnljje12}\t\t${t.xjbyje12}`,//10
` 短期投资、长期债券投资和长期股权投资支付的现金\t\t${t.xjbnljje13}\t\t${t.xjbyje13}`,//11
` 购建固定资产、无形资产和其他非流动资产支付的现金\t\t${t.xjbnljje14}\t\t${t.xjbyje14}`,//12
` 投资活动产生的现金流量净额\t\t${t.xjbnljje15}\t\t${t.xjbyje15}`,//13
`三、筹资活动产生的现金流量:\t\t${t.xjbnljje16}\t\t${t.xjbyje16}`,
` 取得借款收到的现金\t\t${t.xjbnljje17}\t\t${t.xjbyje17}`,//14
` 吸收投资者投资收到的现金\t\t${t.xjbnljje18}\t\t${t.xjbyje18}`,//15
` 偿还借款本金支付的现金\t\t${t.xjbnljje19}\t\t${t.xjbyje19}`,//16
` 偿还借款利息支付的现金\t\t${t.xjbnljje20}\t\t${t.xjbyje20}`,//17
` 分配利润支付的现金\t\t${t.xjbnljje21}\t\t${t.xjbyje21}`,//18
` 筹资活动产生的现金流量净额\t\t${t.xjbnljje22}\t\t${t.xjbyje22}`,//19
`四、现金净增加额\t\t${t.xjbnljje23}\t\t${t.xjbyje23}`,//20
`加:期初现金余额\t\t${t.xjbnljje24}\t\t${t.xjbyje24}`,//21
`五、期末现金余额\t\t${t.xjbnljje25}\t\t${t.xjbyje25}`,//22
].join('\n'));
let hash = md5File.sync(tempFile);
fs.renameSync(tempFile, think.RUNTIME_PATH + '/archive/' + hash);
return {
name: o.bbzl,
time: o.sbnf,
archiveList:[{name:'报表',hash,type:'txt'}],
remark: '地税'
}
});
let ckzhList = await this.fetch_ckzh();
info.ckzhList = ckzhList;
return {nsrjbxx,dzjk,taxList,taxMoneyList,info, cwbb,cwbbList};
}
async fetch_ckzh(){
let { sessionId } = this._logininfo;
let ret = await this.httpPost('http://www.jsds.gov.cn/NsrkhyhAction.do', {
qs: {
handleCode:'',sqzldm:'null',ssxmdm:'null',gnfldm:'null', sessionId,
jsonData:JSON.stringify({
data:{ gnfldm:'CXFW', sqzldm:'',ssxmdm:''}
})
}
});
let $ = cheerio.load(ret.body);
let trList = $('#querytb>tr').toArray();
return _.map(trList, o=>{
let tdList = $('td', o);
return {
yhhb: _.trim(tdList.eq(3).text()),
yhdm: _.trim(tdList.eq(4).text()),
yhzh: _.trim(tdList.eq(5).text())
}
});
}
}
| t swglm = sessionId.split(';')[0];
let cwbbjdqx = 'Y01_120';
let res = await this.httpPost('http://www.jsds.gov.cn/wb032_WBcwbbListAction.do', {
qs: {sessionId},
form: {
sbnf,cwbbErrzt:'1',cwbbdldm:'CKL',errorMessage:'',
swglm,curpzxh:'',handleDesc:'',handleCode:'submitSave',
cwbbjdqxmc:'年度终了后4月内',cwbbjdqx
}
})
console.log('获取财务报表step1');
console.log(res.body);
let $ = cheerio.load(res.body);
let cwbbList = _.map($('#queryTb tr').toArray().slice(1), o=>{
let $td = $('td', o);
console.log('debug1');
let deal_args = $td.eq(6).find('input').attr('onclick');
if(!deal_args) return null;
console.log('debug2');
console.log(deal_args);
deal_args = deal_args.substring(deal_args.indexOf('(')+1,deal_args.lastIndexOf(')'));
console.log('debug3');
deal_args = _.map(deal_args.split(','),o=>o.substr(1,o.length-2));
console.log('debug4');
let ret = {
sbnf,
bbzl: $td.eq(1).text().replace(/\s/g,''),
url:deal_args[0],
ssq:deal_args[1],
pzxh:deal_args[2],
czzt:deal_args[3],
zt:deal_args[4],
editzt:deal_args[5],
ypzxh:deal_args[6],
swglm:deal_args[7],
sqssq:deal_args[8],
bsqxdm:deal_args[9],
};
if(ret.pzxh){
ret.href = ret.url + "?sessionId=" + sessionId + "&pzxh=" + ret.pzxh + "&ssq=" + encodeURI(ret.ssq) + "&BBZT="
+ ret.czzt + "&zt=" + ret.zt + "&editzt=" + ret.editzt + "&swglm=" + ret.swglm
+ "&bsqxdm=" + ret.bsqxdm+"&cwbbjdqx=" + cwbbjdqx;
}else{
if (ypzxh != '') {
ret.href = ret.url + "?sessionId=" +sessionId+ "&ssq=" + encodeURI(ret.ssq) + "&BBZT=" + ret.zt
+ "&ypzxh=" + ret.ypzxh + "&swglm=" + ret.swglm + "&sqssq="
+ encodeURI(ret.sqssq) + "&bsqxdm=" + ret.bsqxdm+"&cwbbjdqx="+cwbbjdqx;
} else {
ret.href = ret.url + "?sessionId=" +sessionId+ "&ssq=" + encodeURI(ret.ssq) + "&BBZT=" + ret.zt
+ "&swglm=" + ret.swglm + "&sqssq=" + encodeURI(ret.sqssq)
+ "&bsqxdm=" + ret.bsqxdm+"&cwbbjdqx="+cwbbjdqx;
}
}
console.log(ret.href);
return ret;
});
console.log('获取财务报表step2');
cwbbList = _.compact(cwbbList);
for(let i in cwbbList){
let res = await this.httpGet('http://www.jsds.gov.cn'+cwbbList[i].href);
let $ = cheerio.load(res.body);
let table = $('input').toArray();
table = _.mapKeys(table, o=>$(o).attr('id'));
table = _.mapValues(table, o=>$(o).val());
cwbbList[i].table = table;
}
console.log('获取财务报表step3');
return cwbbList;
}
async data(){
console.log('地税:获取数据中...');
let { sessionId } = this._logininfo;
// 纳税人基本信息
let nsrjbxx = await this.fetch_ns | identifier_body |
jsds.js |
import Base from './base';
import _ from 'lodash';
import fs from 'fs';
import gm from 'gm';
import uuid from 'node-uuid';
import tesseract from 'node-tesseract';
import cheerio from 'cheerio';
import md5File from 'md5-file';
export default class extends Base {
fetchCaptcha(){
let codePath = 'code_' + uuid.v1() + '.jpg';
return new Promise((resolve, reject)=>{
let stream = gm(this.httpClient.get('http://www.jsds.gov.cn/index/fujia2.jsp'))
.operator('gray', 'threshold', 50, true).stream();
stream.pipe(fs.createWriteStream(codePath));
stream.on('end', ()=>{
tesseract.process(codePath, (error, text)=>{
fs.unlink(codePath);
error ? reject(error) : resolve(_.trim(text));
});
});
});
}
async login(username, password){
let captcha = await this.fetchCaptcha();
while(captcha.length != 4){
captcha = await this.fetchCaptcha();
}
let ret = await this.httpPost('http://www.jsds.gov.cn/LoginAction.do', {
form: { jsonData: JSON.stringify({
handleCode:'baseLogin',
data:{zh:username, zhPassWord:password, zhYzm: captcha}
})}
});
let {code:errno,msg:errmsg,data} = JSON.parse(ret.body);
if(errno == '0'){
errmsg = '';
this._logininfo = data;
}
else{
if(errno == '999904'){
errno = 'ERR_01';
errmsg = '代码错误';
}else if(errno == '999902'){
errno = 'ERR_02';
errmsg = '密码错误';
}else if(errno == '999901'){
errno = 'ERR_03';
errmsg = '验证码错误';
}else{
errno = 'SYS_' + errno;
}
}
return {errno, errmsg};
}
async fetch_nsrjbxx(){
let { sessionId } = this._logininfo;
let res = await this.httpGet('http://www.jsds.gov.cn/NsrjbxxAction.do', {
qs:{
sessionId, dealMethod:'queryData', jsonData:JSON.stringify({
data:{gnfldm:'CXFW',sqzldm:'',ssxmdm:''}
})
}
});
let $ = cheerio.load(res.body);
let info_tbl = $('table').eq(0);
let info_tr = $('tr', info_tbl);
let tzfxx_tr = $('#t_tzfxx tr').toArray().slice(1);
let tzfxx = _.map(tzfxx_tr, o=>({
tzfmc: $('[name=tzfxxvo_tzfmc]', o).val(),
zjzl: $('[name=tzfxxvo_zjzl]', o).val(),
zjhm: $('[name=tzfxxvo_zjhm]', o).val(),
tzbl: $('[name=tzfxxvo_tzbl]', o).val()
}));
let ret = {
nsrmc: $('td', info_tr.eq(1)).eq(1).text(),
nsrsbh: $('td', info_tr.eq(0)).eq(3).text(),
scjyqx: $('td', info_tr.eq(4)).eq(3).text(),
zcdz: $('td', info_tr.eq(6)).eq(1).text(),
zcdyzbm: $('td', info_tr.eq(7)).eq(1).text(),
zcdlxdh: $('td', info_tr.eq(7)).eq(3).text(),
scjydz: $('td', info_tr.eq(8)).eq(1).text(),
scdyzbm: $('td', info_tr.eq(9)).eq(1).text(),
scdlxdh: $('td', info_tr.eq(9)).eq(3).text(),
cyrs: $('td', info_tr.eq(11)).eq(1).text(),
wjrs: $('input', info_tr.eq(11)).val(),
jyfw: $('td', info_tr.eq(13)).eq(1).text(),
zzhm: $('td', info_tr.eq(5)).eq(3).text(),
};
if(tzfxx.length){
ret.tzfxx = tzfxx;
}
return ret;
}
// 缴款信息查询
async fetch_jkxx(sbsjq, sbsjz, sbbzl){
let { sessionId } = this._logininfo;
let ret = await this.httpPost('http://www.jsds.gov.cn/JkxxcxAction.do', {
form: {
sbsjq,sbsjz,sbbzl,
errorMessage:'',handleDesc:'查询缴款信息',handleCode:'queryData',
cqSb:'0',sessionId
}
});
let $ = cheerio.load(ret.body);
let $trList = $('#querytb tr').toArray().slice(1);
return _.map($trList, (o,i)=>{
let $tdList = $('td', o);
return {
sbbzl: _.trim($tdList.eq(1).text()),
sbrq: _.trim($tdList.eq(2).text()),
skssqq: _.trim($tdList.eq(3).text()),
skssqz: _.trim($tdList.eq(4).text()),
yjkje: _.trim($tdList.eq(5).text()),
wjkje: _.trim($tdList.eq(6).text()),
dkje: _.trim($tdList.eq(7).text()),
hxje: _.trim($tdList.eq(8).text()),
}
});
}
// 电子交款凭证查询打印
async fetch_dzjk(sbrqq, sbrqz, kkrqq, kkrqz, lbzt){
let { sessionId } = this._logininfo;
let ret = await this.httpPost('http://www.jsds.gov.cn/QykkxxCxAction.do', {
qs: {sessionId},
form: {
sbrqq,sbrqz,kkrqq,kkrqz,lbzt,
errorMessage:'',sucessMsg:'',handleDesc:'扣款数据查询',handleCode:'queryData',
cqSb:'0',sessionId
}
});
let $ = cheerio.load(ret.body);
return _.map($('#queryTb tr').toArray().slice(1), o=>{
let $td = $('td', o);
return {
sbblx: _.trim($td.eq(1).text()),
sbrq: _.trim($td.eq(2).text()),
skhj: _.trim($td.eq(3).text()),
jkfs: _.trim($td.eq(4).text()),
sbfs: _.trim($td.eq(5).text()),
kkrq: _.trim($td.eq(6).text()),
rkrq: _.trim($td.eq(7).text()),
}
});
}
async fetch_cwbb(sbnf){
console.log('获取财务报表:'+sbnf);
let { sessionId } = this._logininfo;
let swglm = sessionId.split(';')[0];
let cwbbjdqx = 'Y01_120';
let res = await this.httpPost('http://www.jsds.gov.cn/wb032_WBcwbbListAction.do', {
qs: {sessionId},
form: {
sbnf,cwbbErrzt:'1',cwbbdldm:'CKL',errorMessage:'',
swglm,curpzxh:'',handleDesc:'',handleCode:'submitSave',
cwbbjdqxmc:'年度终了后4月内',cwbbjdqx
}
})
console.log('获取财务报表step1');
console.log(res.body);
let $ = cheerio.load(res.body);
let cwbbList = _.map($('#queryTb tr').toArray().slice(1), o=>{
let $td = $('td', o);
console.log('debug1');
let deal_args = $td.eq(6).find('input').attr('onclick');
if(!deal_args) return null;
console.log('debug2');
console.log(deal_args);
deal_args = deal_args.substring(deal_args.indexOf('(')+1,deal_args.lastIndexOf(')'));
console.log('debug3');
deal_args = _.map(deal_args.split(','),o=>o.substr(1,o.length-2));
console.log('debug4');
let ret = {
sbnf,
bbzl: $td.eq(1).text().replace(/\s/g,''),
url:deal_args[0],
ssq:deal_args[1],
pzxh:deal_args[2],
czzt:deal_args[3],
zt:deal_args[4],
editzt:deal_args[5],
ypzxh:deal_args[6],
swglm:deal_args[7],
sqssq:deal_args[8],
bsqxdm:deal_args[9],
};
if(ret.pzxh){
ret.href = ret.url + "?sessionId=" + sessionId + "&pzxh=" + ret.pzxh + "&ssq=" + encodeURI(ret.ssq) + "&BBZT="
+ ret.czzt + "&zt=" + ret.zt + "&editzt=" + ret.editzt + "&swglm=" + ret.swglm
+ "&bsqxdm=" + ret.bsqxdm+"&cwbbjdqx=" + cwbbjdqx;
}else{
if (ypzxh != '') {
ret.href = ret.url + "?sessionId=" +sessionId+ "&ssq=" + encodeURI(ret.ssq) + "&BBZT=" + ret.zt
+ "&yp | + "&swglm=" + ret.swglm + "&sqssq=" + encodeURI(ret.sqssq)
+ "&bsqxdm=" + ret.bsqxdm+"&cwbbjdqx="+cwbbjdqx;
}
}
console.log(ret.href);
return ret;
});
console.log('获取财务报表step2');
cwbbList = _.compact(cwbbList);
for(let i in cwbbList){
let res = await this.httpGet('http://www.jsds.gov.cn'+cwbbList[i].href);
let $ = cheerio.load(res.body);
let table = $('input').toArray();
table = _.mapKeys(table, o=>$(o).attr('id'));
table = _.mapValues(table, o=>$(o).val());
cwbbList[i].table = table;
}
console.log('获取财务报表step3');
return cwbbList;
}
async data(){
console.log('地税:获取数据中...');
let { sessionId } = this._logininfo;
// 纳税人基本信息
let nsrjbxx = await this.fetch_nsrjbxx();
await this.httpGet('http://www.jsds.gov.cn/MainAction.do', {qs:{sessionId}});
//let jkxx = await this.fetch_jkxx('2015-01-01','2016-12-31','');
console.log('地税:获取电子缴款...');
let dzjk = [
...(await this.fetch_dzjk('2013-01-01','2016-12-31','','','1')),
...(await this.fetch_dzjk('2013-01-01','2016-12-31','','','2'))
];
console.log('地税:获取财务报表...');
let cwbb = [
...await this.fetch_cwbb('2016'),
...await this.fetch_cwbb('2015'),
...await this.fetch_cwbb('2014'),
...await this.fetch_cwbb('2013')
];
console.log('地税:获取财务报表完毕');
let taxList = _.map(dzjk, o=>({
name:o.sbblx,money:o.skhj,time:o.kkrq,remark:'地税-电子缴款'
}));
let taxMoneyList = _.map(cwbb, o=>({
year:o.sbnf,
capital:(parseFloat(o.table.fzqmye27 || '0') + parseFloat(o.table.fzncye27 || '0'))/2,
assets:(parseFloat(o.table.zcqmye32 || '0') + parseFloat(o.table.zcncye32 || '0'))/2,
equity:(parseFloat(o.table.fzqmye31 || '0') + parseFloat(o.table.fzncye31 || '0'))/2,
interest: o.table.bnljje18,
liability:(parseFloat(o.table.fzqmye19 || '0') + parseFloat(o.table.fzncye19 || '0'))/2,
revenue:parseFloat(o.table.bnljje1 || '0') + parseFloat(o.table.bnljje22 || '0'),
}));
let {nsrmc, ...oth} = nsrjbxx;
let info = {
name: nsrmc,
...oth
}
if(nsrjbxx.nsrsbh.length == 18) info.uscc = nsrjbxx.nsrsbh;
let cwbbList = _.map(cwbb, o=>{
let tempFile = 'runtime/jsds_'+uuid.v1()+'.txt';
let t = o.table;
fs.writeFileSync(tempFile,[
'资产负债表',
'资产\t\t期末余额\t\t月初余额',
`流动资产\t\t${t.zcqmye1}\t\t${t.zcncye1}`,
` 货币资金\t\t${t.zcqmye2}\t\t${t.zcncye2}`,//1
` 短期投资\t\t${t.zcqmye3}\t\t${t.zcncye3}`,//2
` 应收票据\t\t${t.zcqmye4}\t\t${t.zcncye4}`,//3
` 应收账款\t\t${t.zcqmye5}\t\t${t.zcncye5}`,//4
` 预付账款\t\t${t.zcqmye6}\t\t${t.zcncye6}`,//5
` 应收股利\t\t${t.zcqmye7}\t\t${t.zcncye7}`,//6
` 应收利息\t\t${t.zcqmye8}\t\t${t.zcncye8}`,//7
` 其他应收款\t\t${t.zcqmye9}\t\t${t.zcncye9}`,//8
` 存货\t\t${t.zcqmye10}\t\t${t.zcncye10}`,//9
' 其中:',
` 原材料\t\t${t.zcqmye11}\t\t${t.zcncye11}`,//10
` 在产品\t\t${t.zcqmye12}\t\t${t.zcncye12}`,//11
` 库存商品\t\t${t.zcqmye13}\t\t${t.zcncye13}`,//12
` 周转材料\t\t${t.zcqmye14}\t\t${t.zcncye14}`,//13
` 其他流动资产\t\t${t.zcqmye15}\t\t${t.zcncye15}`,//14
` 流动资产合计\t\t${t.zcqmye16}\t\t${t.zcncye16}`,//15
`非流动资产\t\t${t.zcqmye17}\t\t${t.zcncye17}`,
` 长期债券投资\t\t${t.zcqmye18}\t\t${t.zcncye18}`,//16
` 长期股权投资\t\t${t.zcqmye19}\t\t${t.zcncye19}`,//17
` 固定资产原价\t\t${t.zcqmye20}\t\t${t.zcncye20}`,//18
` 减:累计折旧\t\t${t.zcqmye21}\t\t${t.zcncye21}`,//19
` 固定资产账面价值\t\t${t.zcqmye22}\t\t${t.zcncye22}`,//20
` 在建工程\t\t${t.zcqmye23}\t\t${t.zcncye23}`,//21
` 工程物资\t\t${t.zcqmye24}\t\t${t.zcncye24}`,//22
` 固定资产清理\t\t${t.zcqmye25}\t\t${t.zcncye25}`,//23
` 生产性生物资产\t\t${t.zcqmye26}\t\t${t.zcncye26}`,//24
` 无形资产\t\t${t.zcqmye7}\t\t${t.zcncye27}`,//25
` 开发支出\t\t${t.zcqmye28}\t\t${t.zcncye28}`,//26
` 长期待摊费用\t\t${t.zcqmye29}\t\t${t.zcncye29}`,//27
` 其他非流动资产\t\t${t.zcqmye30}\t\t${t.zcncye30}`,//28
` 非流动资产合计\t\t${t.zcqmye31}\t\t${t.zcncye31}`,//29
` 资产合计\t\t${t.zcqmye32}\t\t${t.zcncye32}`,//30
'',
'负债和所有者权益\t\t期末余额\t\t月初余额',
`流动负债\t\t${t.fzqmye1}\t\t${t.fzncye1}`,
` 短期借款\t\t${t.fzqmye2}\t\t${t.fzncye2}`,//31
` 应付票据\t\t${t.fzqmye3}\t\t${t.fzncye3}`,//32
` 应付账款\t\t${t.fzqmye4}\t\t${t.fzncye4}`,//33
` 预收账款\t\t${t.fzqmye5}\t\t${t.fzncye5}`,//34
` 应付职工薪酬\t\t${t.fzqmye6}\t\t${t.fzncye6}`,//35
` 应交税费\t\t${t.fzqmye7}\t\t${t.fzncye7}`,//36
` 应付利息\t\t${t.fzqmye8}\t\t${t.fzncye8}`,//37
` 应付利润\t\t${t.fzqmye9}\t\t${t.fzncye9}`,//38
` 其他应付款\t\t${t.fzqmye10}\t\t${t.fzncye10}`,//39
` 其他流动负债\t\t${t.fzqmye11}\t\t${t.fzncye11}`,//40
` 流动负债合计\t\t${t.fzqmye12}\t\t${t.fzncye12}`,//41
`非流动负债\t\t${t.fzqmye13}\t\t${t.fzncye13}`,
` 长期借款\t\t${t.fzqmye14}\t\t${t.fzncye14}`,//42
` 长期应付款\t\t${t.fzqmye15}\t\t${t.fzncye15}`,//43
` 递延收益\t\t${t.fzqmye16}\t\t${t.fzncye16}`,//44
` 其他非流动负债\t\t${t.fzqmye17}\t\t${t.fzncye17}`,//45
` 非流动负债合计\t\t${t.fzqmye18}\t\t${t.fzncye18}`,//46
` 负债合计\t\t${t.fzqmye19}\t\t${t.fzncye19}`,//47
`所有者权益(或股东权益)\t\t${t.fzqmye26}\t\t${t.fzncye26}`,
` 实收资本(或股本)\t\t${t.fzqmye27}\t\t${t.fzncye27}`,//48
` 资本公积\t\t${t.fzqmye28}\t\t${t.fzncye28}`,//49
` 盈余公积\t\t${t.fzqmye29}\t\t${t.fzncye29}`,//50
` 未分配利润\t\t${t.fzqmye30}\t\t${t.fzncye30}`,//51
` 所有者权益(或股东权益)合计\t\t${t.fzqmye31}\t\t${t.fzncye31}`,//52
` 负债和所有者权益(或股东权益)总计\t\t${t.fzqmye32}\t\t${t.fzncye32}`,//53
'',
'',
'利润表',
'项目\t\t本年累计金额\t\t本月金额',
`一、营业收入\t\t${t.bnljje1}\t\t${t.byje1}`,//1
`减:营业成本\t\t${t.bnljje2}\t\t${t.byje2}`,//2
` 营业税金及附加\t\t${t.bnljje3}\t\t${t.byje3}`,//3
' 其中:',
` 消费税\t\t${t.bnljje4}\t\t${t.byje4}`,//4
` 营业税\t\t${t.bnljje5}\t\t${t.byje5}`,//5
` 城市维护建设税\t\t${t.bnljje6}\t\t${t.byje6}`,//6
` 资源税\t\t${t.bnljje7}\t\t${t.byje7}`,//7
` 土地增值税\t\t${t.bnljje8}\t\t${t.byje8}`,//8
` 城镇土地使用税、房产税、车船税、印花税\t\t${t.bnljje9}\t\t${t.byje9}`,//9
` 教育费附加、矿产资源补偿费、排污费\t\t${t.bnljje10}\t\t${t.byje10}`,//10
` 销售费用\t\t${t.bnljje11}\t\t${t.byje11}`,//11
' 其中:',
` 商品维修费\t\t${t.bnljje12}\t\t${t.byje12}`,//12
` 广告费和业务宣传费\t\t${t.bnljje13}\t\t${t.byje13}`,//13
` 管理费用\t\t${t.bnljje14}\t\t${t.byje14}`,//14
' 其中:',
` 开办费\t\t${t.bnljje15}\t\t${t.byje15}`,//15
` 业务招待费\t\t${t.bnljje16}\t\t${t.byje16}`,//16
` 研究费用\t\t${t.bnljje17}\t\t${t.byje17}`,//17
` 财务费用\t\t${t.bnljje18}\t\t${t.byje18}`,//18
` 其中:利息费用(收入以“—”号填列)\t\t${t.bnljje19}\t\t${t.byje19}`,//19
`加:投资收益(亏损以–填列)\t\t${t.bnljje20}\t\t${t.byje20}`,//20
'',
`二、营业利润(亏损以“-”号填列)\t\t${t.bnljje21}\t\t${t.byje21}`,//21
`加:营业外收入\t\t${t.bnljje22}\t\t${t.byje22}`,//22
` 其中:政府补助\t\t${t.bnljje23}\t\t${t.byje23}`,//23
`减:营业外支出\t\t${t.bnljje24}\t\t${t.byje24}`,//24
' 其中:',
` 坏账损失\t\t${t.bnljje25}\t\t${t.byje25}`,//25
` 无法收回的长期债券投资损失\t\t${t.bnljje26}\t\t${t.byje26}`,//26
` 无法收回的长期股权投资损失\t\t${t.bnljje27}\t\t${t.byje27}`,//27
` 自然灾害等不可抗力因素造成的损失\t\t${t.bnljje28}\t\t${t.byje28}`,//28
` 税收滞纳金\t\t${t.bnljje29}\t\t${t.byje29}`,//29
'',
`三、利润总额(亏损总额以“-”号填列)\t\t${t.bnljje30}\t\t${t.byje30}`,//30
`减:所得税费用\t\t${t.bnljje31}\t\t${t.byje31}`,//31
'',
`四、净利润(净亏损以“-”号填列)\t\t${t.bnljje32}\t\t${t.byje32}`,//32
'',
'',
'现金流量表',
'项目\t\t本年累计金额\t\t本月金额',
`一、经营活动产生的现金流量\t\t${t.xjbnljje1}\t\t${t.xjbyje1}`,
` 销售产成品、商品、提供劳务收到的现金\t\t${t.xjbnljje2}\t\t${t.xjbyje2}`,//1
` 收到的其他与经营活动有关的现金\t\t${t.xjbnljje3}\t\t${t.xjbyje3}`,//2
` 购买原材料、商品、接受劳务支付的现金\t\t${t.xjbnljje4}\t\t${t.xjbyje4}`,//3
` 支付的职工薪酬\t\t${t.xjbnljje5}\t\t${t.xjbyje5}`,//4
` 支付的税费\t\t${t.xjbnljje6}\t\t${t.xjbyje6}`,//5
` 支付的其他与经营活动有关的现金\t\t${t.xjbnljje7}\t\t${t.xjbyje7}`,//6
` 经营活动产生的现金流量净额\t\t${t.xjbnljje8}\t\t${t.xjbyje8}`,//7
`二、投资活动产生的现金流量:\t\t${t.xjbnljje9}\t\t${t.xjbyje9}`,
` 收回短期投资、长期债券投资和长期股权投资收到的现金\t\t${t.xjbnljje10}\t\t${t.xjbyje10}`,//8
` 取得投资收益收到的现金\t\t${t.xjbnljje11}\t\t${t.xjbyje11}`,//9
` 处置固定资产、无形资产和其他非流动资产收回的现金净额\t\t${t.xjbnljje12}\t\t${t.xjbyje12}`,//10
` 短期投资、长期债券投资和长期股权投资支付的现金\t\t${t.xjbnljje13}\t\t${t.xjbyje13}`,//11
` 购建固定资产、无形资产和其他非流动资产支付的现金\t\t${t.xjbnljje14}\t\t${t.xjbyje14}`,//12
` 投资活动产生的现金流量净额\t\t${t.xjbnljje15}\t\t${t.xjbyje15}`,//13
`三、筹资活动产生的现金流量:\t\t${t.xjbnljje16}\t\t${t.xjbyje16}`,
` 取得借款收到的现金\t\t${t.xjbnljje17}\t\t${t.xjbyje17}`,//14
` 吸收投资者投资收到的现金\t\t${t.xjbnljje18}\t\t${t.xjbyje18}`,//15
` 偿还借款本金支付的现金\t\t${t.xjbnljje19}\t\t${t.xjbyje19}`,//16
` 偿还借款利息支付的现金\t\t${t.xjbnljje20}\t\t${t.xjbyje20}`,//17
` 分配利润支付的现金\t\t${t.xjbnljje21}\t\t${t.xjbyje21}`,//18
` 筹资活动产生的现金流量净额\t\t${t.xjbnljje22}\t\t${t.xjbyje22}`,//19
`四、现金净增加额\t\t${t.xjbnljje23}\t\t${t.xjbyje23}`,//20
`加:期初现金余额\t\t${t.xjbnljje24}\t\t${t.xjbyje24}`,//21
`五、期末现金余额\t\t${t.xjbnljje25}\t\t${t.xjbyje25}`,//22
].join('\n'));
let hash = md5File.sync(tempFile);
fs.renameSync(tempFile, think.RUNTIME_PATH + '/archive/' + hash);
return {
name: o.bbzl,
time: o.sbnf,
archiveList:[{name:'报表',hash,type:'txt'}],
remark: '地税'
}
});
let ckzhList = await this.fetch_ckzh();
info.ckzhList = ckzhList;
return {nsrjbxx,dzjk,taxList,taxMoneyList,info, cwbb,cwbbList};
}
async fetch_ckzh(){
let { sessionId } = this._logininfo;
let ret = await this.httpPost('http://www.jsds.gov.cn/NsrkhyhAction.do', {
qs: {
handleCode:'',sqzldm:'null',ssxmdm:'null',gnfldm:'null', sessionId,
jsonData:JSON.stringify({
data:{ gnfldm:'CXFW', sqzldm:'',ssxmdm:''}
})
}
});
let $ = cheerio.load(ret.body);
let trList = $('#querytb>tr').toArray();
return _.map(trList, o=>{
let tdList = $('td', o);
return {
yhhb: _.trim(tdList.eq(3).text()),
yhdm: _.trim(tdList.eq(4).text()),
yhzh: _.trim(tdList.eq(5).text())
}
});
}
}
| zxh=" + ret.ypzxh + "&swglm=" + ret.swglm + "&sqssq="
+ encodeURI(ret.sqssq) + "&bsqxdm=" + ret.bsqxdm+"&cwbbjdqx="+cwbbjdqx;
} else {
ret.href = ret.url + "?sessionId=" +sessionId+ "&ssq=" + encodeURI(ret.ssq) + "&BBZT=" + ret.zt
| conditional_block |
jsds.js | import Base from './base';
import _ from 'lodash';
import fs from 'fs';
import gm from 'gm';
import uuid from 'node-uuid';
import tesseract from 'node-tesseract';
import cheerio from 'cheerio';
import md5File from 'md5-file';
export default class extends Base {
fetchCaptcha(){
let codePath = 'code_' + uuid.v1() + '.jpg';
return new Promise((resolve, reject)=>{
let stream = gm(this.httpClient.get('http://www.jsds.gov.cn/index/fujia2.jsp'))
.operator('gray', 'threshold', 50, true).stream();
stream.pipe(fs.createWriteStream(codePath));
stream.on('end', ()=>{
tesseract.process(codePath, (error, text)=>{
fs.unlink(codePath);
error ? reject(error) : resolve(_.trim(text));
});
});
});
}
async login(username, password){
let captcha = await this.fetchCaptcha();
while(captcha.length != 4){
captcha = await this.fetchCaptcha();
}
let ret = await this.httpPost('http://www.jsds.gov.cn/LoginAction.do', {
form: { jsonData: JSON.stringify({
handleCode:'baseLogin',
data:{zh:username, zhPassWord:password, zhYzm: captcha}
})}
});
let {code:errno,msg:errmsg,data} = JSON.parse(ret.body);
if(errno == '0'){
errmsg = '';
this._logininfo = data;
}
else{
if(errno == '999904'){
errno = 'ERR_01';
errmsg = '代码错误';
}else if(errno == '999902'){
errno = 'ERR_02';
errmsg = '密码错误';
}else if(errno == '999901'){
errno = 'ERR_03';
errmsg = '验证码错误';
}else{
errno = 'SYS_' + errno;
}
}
return {errno, errmsg};
}
async fetch_nsrjbxx(){
let { sessionId } = this._logininfo; | }
});
let $ = cheerio.load(res.body);
let info_tbl = $('table').eq(0);
let info_tr = $('tr', info_tbl);
let tzfxx_tr = $('#t_tzfxx tr').toArray().slice(1);
let tzfxx = _.map(tzfxx_tr, o=>({
tzfmc: $('[name=tzfxxvo_tzfmc]', o).val(),
zjzl: $('[name=tzfxxvo_zjzl]', o).val(),
zjhm: $('[name=tzfxxvo_zjhm]', o).val(),
tzbl: $('[name=tzfxxvo_tzbl]', o).val()
}));
let ret = {
nsrmc: $('td', info_tr.eq(1)).eq(1).text(),
nsrsbh: $('td', info_tr.eq(0)).eq(3).text(),
scjyqx: $('td', info_tr.eq(4)).eq(3).text(),
zcdz: $('td', info_tr.eq(6)).eq(1).text(),
zcdyzbm: $('td', info_tr.eq(7)).eq(1).text(),
zcdlxdh: $('td', info_tr.eq(7)).eq(3).text(),
scjydz: $('td', info_tr.eq(8)).eq(1).text(),
scdyzbm: $('td', info_tr.eq(9)).eq(1).text(),
scdlxdh: $('td', info_tr.eq(9)).eq(3).text(),
cyrs: $('td', info_tr.eq(11)).eq(1).text(),
wjrs: $('input', info_tr.eq(11)).val(),
jyfw: $('td', info_tr.eq(13)).eq(1).text(),
zzhm: $('td', info_tr.eq(5)).eq(3).text(),
};
if(tzfxx.length){
ret.tzfxx = tzfxx;
}
return ret;
}
// 缴款信息查询
async fetch_jkxx(sbsjq, sbsjz, sbbzl){
let { sessionId } = this._logininfo;
let ret = await this.httpPost('http://www.jsds.gov.cn/JkxxcxAction.do', {
form: {
sbsjq,sbsjz,sbbzl,
errorMessage:'',handleDesc:'查询缴款信息',handleCode:'queryData',
cqSb:'0',sessionId
}
});
let $ = cheerio.load(ret.body);
let $trList = $('#querytb tr').toArray().slice(1);
return _.map($trList, (o,i)=>{
let $tdList = $('td', o);
return {
sbbzl: _.trim($tdList.eq(1).text()),
sbrq: _.trim($tdList.eq(2).text()),
skssqq: _.trim($tdList.eq(3).text()),
skssqz: _.trim($tdList.eq(4).text()),
yjkje: _.trim($tdList.eq(5).text()),
wjkje: _.trim($tdList.eq(6).text()),
dkje: _.trim($tdList.eq(7).text()),
hxje: _.trim($tdList.eq(8).text()),
}
});
}
// 电子交款凭证查询打印
async fetch_dzjk(sbrqq, sbrqz, kkrqq, kkrqz, lbzt){
let { sessionId } = this._logininfo;
let ret = await this.httpPost('http://www.jsds.gov.cn/QykkxxCxAction.do', {
qs: {sessionId},
form: {
sbrqq,sbrqz,kkrqq,kkrqz,lbzt,
errorMessage:'',sucessMsg:'',handleDesc:'扣款数据查询',handleCode:'queryData',
cqSb:'0',sessionId
}
});
let $ = cheerio.load(ret.body);
return _.map($('#queryTb tr').toArray().slice(1), o=>{
let $td = $('td', o);
return {
sbblx: _.trim($td.eq(1).text()),
sbrq: _.trim($td.eq(2).text()),
skhj: _.trim($td.eq(3).text()),
jkfs: _.trim($td.eq(4).text()),
sbfs: _.trim($td.eq(5).text()),
kkrq: _.trim($td.eq(6).text()),
rkrq: _.trim($td.eq(7).text()),
}
});
}
async fetch_cwbb(sbnf){
console.log('获取财务报表:'+sbnf);
let { sessionId } = this._logininfo;
let swglm = sessionId.split(';')[0];
let cwbbjdqx = 'Y01_120';
let res = await this.httpPost('http://www.jsds.gov.cn/wb032_WBcwbbListAction.do', {
qs: {sessionId},
form: {
sbnf,cwbbErrzt:'1',cwbbdldm:'CKL',errorMessage:'',
swglm,curpzxh:'',handleDesc:'',handleCode:'submitSave',
cwbbjdqxmc:'年度终了后4月内',cwbbjdqx
}
})
console.log('获取财务报表step1');
console.log(res.body);
let $ = cheerio.load(res.body);
let cwbbList = _.map($('#queryTb tr').toArray().slice(1), o=>{
let $td = $('td', o);
console.log('debug1');
let deal_args = $td.eq(6).find('input').attr('onclick');
if(!deal_args) return null;
console.log('debug2');
console.log(deal_args);
deal_args = deal_args.substring(deal_args.indexOf('(')+1,deal_args.lastIndexOf(')'));
console.log('debug3');
deal_args = _.map(deal_args.split(','),o=>o.substr(1,o.length-2));
console.log('debug4');
let ret = {
sbnf,
bbzl: $td.eq(1).text().replace(/\s/g,''),
url:deal_args[0],
ssq:deal_args[1],
pzxh:deal_args[2],
czzt:deal_args[3],
zt:deal_args[4],
editzt:deal_args[5],
ypzxh:deal_args[6],
swglm:deal_args[7],
sqssq:deal_args[8],
bsqxdm:deal_args[9],
};
if(ret.pzxh){
ret.href = ret.url + "?sessionId=" + sessionId + "&pzxh=" + ret.pzxh + "&ssq=" + encodeURI(ret.ssq) + "&BBZT="
+ ret.czzt + "&zt=" + ret.zt + "&editzt=" + ret.editzt + "&swglm=" + ret.swglm
+ "&bsqxdm=" + ret.bsqxdm+"&cwbbjdqx=" + cwbbjdqx;
}else{
if (ypzxh != '') {
ret.href = ret.url + "?sessionId=" +sessionId+ "&ssq=" + encodeURI(ret.ssq) + "&BBZT=" + ret.zt
+ "&ypzxh=" + ret.ypzxh + "&swglm=" + ret.swglm + "&sqssq="
+ encodeURI(ret.sqssq) + "&bsqxdm=" + ret.bsqxdm+"&cwbbjdqx="+cwbbjdqx;
} else {
ret.href = ret.url + "?sessionId=" +sessionId+ "&ssq=" + encodeURI(ret.ssq) + "&BBZT=" + ret.zt
+ "&swglm=" + ret.swglm + "&sqssq=" + encodeURI(ret.sqssq)
+ "&bsqxdm=" + ret.bsqxdm+"&cwbbjdqx="+cwbbjdqx;
}
}
console.log(ret.href);
return ret;
});
console.log('获取财务报表step2');
cwbbList = _.compact(cwbbList);
for(let i in cwbbList){
let res = await this.httpGet('http://www.jsds.gov.cn'+cwbbList[i].href);
let $ = cheerio.load(res.body);
let table = $('input').toArray();
table = _.mapKeys(table, o=>$(o).attr('id'));
table = _.mapValues(table, o=>$(o).val());
cwbbList[i].table = table;
}
console.log('获取财务报表step3');
return cwbbList;
}
async data(){
console.log('地税:获取数据中...');
let { sessionId } = this._logininfo;
// 纳税人基本信息
let nsrjbxx = await this.fetch_nsrjbxx();
await this.httpGet('http://www.jsds.gov.cn/MainAction.do', {qs:{sessionId}});
//let jkxx = await this.fetch_jkxx('2015-01-01','2016-12-31','');
console.log('地税:获取电子缴款...');
let dzjk = [
...(await this.fetch_dzjk('2013-01-01','2016-12-31','','','1')),
...(await this.fetch_dzjk('2013-01-01','2016-12-31','','','2'))
];
console.log('地税:获取财务报表...');
let cwbb = [
...await this.fetch_cwbb('2016'),
...await this.fetch_cwbb('2015'),
...await this.fetch_cwbb('2014'),
...await this.fetch_cwbb('2013')
];
console.log('地税:获取财务报表完毕');
let taxList = _.map(dzjk, o=>({
name:o.sbblx,money:o.skhj,time:o.kkrq,remark:'地税-电子缴款'
}));
let taxMoneyList = _.map(cwbb, o=>({
year:o.sbnf,
capital:(parseFloat(o.table.fzqmye27 || '0') + parseFloat(o.table.fzncye27 || '0'))/2,
assets:(parseFloat(o.table.zcqmye32 || '0') + parseFloat(o.table.zcncye32 || '0'))/2,
equity:(parseFloat(o.table.fzqmye31 || '0') + parseFloat(o.table.fzncye31 || '0'))/2,
interest: o.table.bnljje18,
liability:(parseFloat(o.table.fzqmye19 || '0') + parseFloat(o.table.fzncye19 || '0'))/2,
revenue:parseFloat(o.table.bnljje1 || '0') + parseFloat(o.table.bnljje22 || '0'),
}));
let {nsrmc, ...oth} = nsrjbxx;
let info = {
name: nsrmc,
...oth
}
if(nsrjbxx.nsrsbh.length == 18) info.uscc = nsrjbxx.nsrsbh;
let cwbbList = _.map(cwbb, o=>{
let tempFile = 'runtime/jsds_'+uuid.v1()+'.txt';
let t = o.table;
fs.writeFileSync(tempFile,[
'资产负债表',
'资产\t\t期末余额\t\t月初余额',
`流动资产\t\t${t.zcqmye1}\t\t${t.zcncye1}`,
` 货币资金\t\t${t.zcqmye2}\t\t${t.zcncye2}`,//1
` 短期投资\t\t${t.zcqmye3}\t\t${t.zcncye3}`,//2
` 应收票据\t\t${t.zcqmye4}\t\t${t.zcncye4}`,//3
` 应收账款\t\t${t.zcqmye5}\t\t${t.zcncye5}`,//4
` 预付账款\t\t${t.zcqmye6}\t\t${t.zcncye6}`,//5
` 应收股利\t\t${t.zcqmye7}\t\t${t.zcncye7}`,//6
` 应收利息\t\t${t.zcqmye8}\t\t${t.zcncye8}`,//7
` 其他应收款\t\t${t.zcqmye9}\t\t${t.zcncye9}`,//8
` 存货\t\t${t.zcqmye10}\t\t${t.zcncye10}`,//9
' 其中:',
` 原材料\t\t${t.zcqmye11}\t\t${t.zcncye11}`,//10
` 在产品\t\t${t.zcqmye12}\t\t${t.zcncye12}`,//11
` 库存商品\t\t${t.zcqmye13}\t\t${t.zcncye13}`,//12
` 周转材料\t\t${t.zcqmye14}\t\t${t.zcncye14}`,//13
` 其他流动资产\t\t${t.zcqmye15}\t\t${t.zcncye15}`,//14
` 流动资产合计\t\t${t.zcqmye16}\t\t${t.zcncye16}`,//15
`非流动资产\t\t${t.zcqmye17}\t\t${t.zcncye17}`,
` 长期债券投资\t\t${t.zcqmye18}\t\t${t.zcncye18}`,//16
` 长期股权投资\t\t${t.zcqmye19}\t\t${t.zcncye19}`,//17
` 固定资产原价\t\t${t.zcqmye20}\t\t${t.zcncye20}`,//18
` 减:累计折旧\t\t${t.zcqmye21}\t\t${t.zcncye21}`,//19
` 固定资产账面价值\t\t${t.zcqmye22}\t\t${t.zcncye22}`,//20
` 在建工程\t\t${t.zcqmye23}\t\t${t.zcncye23}`,//21
` 工程物资\t\t${t.zcqmye24}\t\t${t.zcncye24}`,//22
` 固定资产清理\t\t${t.zcqmye25}\t\t${t.zcncye25}`,//23
` 生产性生物资产\t\t${t.zcqmye26}\t\t${t.zcncye26}`,//24
` 无形资产\t\t${t.zcqmye7}\t\t${t.zcncye27}`,//25
` 开发支出\t\t${t.zcqmye28}\t\t${t.zcncye28}`,//26
` 长期待摊费用\t\t${t.zcqmye29}\t\t${t.zcncye29}`,//27
` 其他非流动资产\t\t${t.zcqmye30}\t\t${t.zcncye30}`,//28
` 非流动资产合计\t\t${t.zcqmye31}\t\t${t.zcncye31}`,//29
` 资产合计\t\t${t.zcqmye32}\t\t${t.zcncye32}`,//30
'',
'负债和所有者权益\t\t期末余额\t\t月初余额',
`流动负债\t\t${t.fzqmye1}\t\t${t.fzncye1}`,
` 短期借款\t\t${t.fzqmye2}\t\t${t.fzncye2}`,//31
` 应付票据\t\t${t.fzqmye3}\t\t${t.fzncye3}`,//32
` 应付账款\t\t${t.fzqmye4}\t\t${t.fzncye4}`,//33
` 预收账款\t\t${t.fzqmye5}\t\t${t.fzncye5}`,//34
` 应付职工薪酬\t\t${t.fzqmye6}\t\t${t.fzncye6}`,//35
` 应交税费\t\t${t.fzqmye7}\t\t${t.fzncye7}`,//36
` 应付利息\t\t${t.fzqmye8}\t\t${t.fzncye8}`,//37
` 应付利润\t\t${t.fzqmye9}\t\t${t.fzncye9}`,//38
` 其他应付款\t\t${t.fzqmye10}\t\t${t.fzncye10}`,//39
` 其他流动负债\t\t${t.fzqmye11}\t\t${t.fzncye11}`,//40
` 流动负债合计\t\t${t.fzqmye12}\t\t${t.fzncye12}`,//41
`非流动负债\t\t${t.fzqmye13}\t\t${t.fzncye13}`,
` 长期借款\t\t${t.fzqmye14}\t\t${t.fzncye14}`,//42
` 长期应付款\t\t${t.fzqmye15}\t\t${t.fzncye15}`,//43
` 递延收益\t\t${t.fzqmye16}\t\t${t.fzncye16}`,//44
` 其他非流动负债\t\t${t.fzqmye17}\t\t${t.fzncye17}`,//45
` 非流动负债合计\t\t${t.fzqmye18}\t\t${t.fzncye18}`,//46
` 负债合计\t\t${t.fzqmye19}\t\t${t.fzncye19}`,//47
`所有者权益(或股东权益)\t\t${t.fzqmye26}\t\t${t.fzncye26}`,
` 实收资本(或股本)\t\t${t.fzqmye27}\t\t${t.fzncye27}`,//48
` 资本公积\t\t${t.fzqmye28}\t\t${t.fzncye28}`,//49
` 盈余公积\t\t${t.fzqmye29}\t\t${t.fzncye29}`,//50
` 未分配利润\t\t${t.fzqmye30}\t\t${t.fzncye30}`,//51
` 所有者权益(或股东权益)合计\t\t${t.fzqmye31}\t\t${t.fzncye31}`,//52
` 负债和所有者权益(或股东权益)总计\t\t${t.fzqmye32}\t\t${t.fzncye32}`,//53
'',
'',
'利润表',
'项目\t\t本年累计金额\t\t本月金额',
`一、营业收入\t\t${t.bnljje1}\t\t${t.byje1}`,//1
`减:营业成本\t\t${t.bnljje2}\t\t${t.byje2}`,//2
` 营业税金及附加\t\t${t.bnljje3}\t\t${t.byje3}`,//3
' 其中:',
` 消费税\t\t${t.bnljje4}\t\t${t.byje4}`,//4
` 营业税\t\t${t.bnljje5}\t\t${t.byje5}`,//5
` 城市维护建设税\t\t${t.bnljje6}\t\t${t.byje6}`,//6
` 资源税\t\t${t.bnljje7}\t\t${t.byje7}`,//7
` 土地增值税\t\t${t.bnljje8}\t\t${t.byje8}`,//8
` 城镇土地使用税、房产税、车船税、印花税\t\t${t.bnljje9}\t\t${t.byje9}`,//9
` 教育费附加、矿产资源补偿费、排污费\t\t${t.bnljje10}\t\t${t.byje10}`,//10
` 销售费用\t\t${t.bnljje11}\t\t${t.byje11}`,//11
' 其中:',
` 商品维修费\t\t${t.bnljje12}\t\t${t.byje12}`,//12
` 广告费和业务宣传费\t\t${t.bnljje13}\t\t${t.byje13}`,//13
` 管理费用\t\t${t.bnljje14}\t\t${t.byje14}`,//14
' 其中:',
` 开办费\t\t${t.bnljje15}\t\t${t.byje15}`,//15
` 业务招待费\t\t${t.bnljje16}\t\t${t.byje16}`,//16
` 研究费用\t\t${t.bnljje17}\t\t${t.byje17}`,//17
` 财务费用\t\t${t.bnljje18}\t\t${t.byje18}`,//18
` 其中:利息费用(收入以“—”号填列)\t\t${t.bnljje19}\t\t${t.byje19}`,//19
`加:投资收益(亏损以–填列)\t\t${t.bnljje20}\t\t${t.byje20}`,//20
'',
`二、营业利润(亏损以“-”号填列)\t\t${t.bnljje21}\t\t${t.byje21}`,//21
`加:营业外收入\t\t${t.bnljje22}\t\t${t.byje22}`,//22
` 其中:政府补助\t\t${t.bnljje23}\t\t${t.byje23}`,//23
`减:营业外支出\t\t${t.bnljje24}\t\t${t.byje24}`,//24
' 其中:',
` 坏账损失\t\t${t.bnljje25}\t\t${t.byje25}`,//25
` 无法收回的长期债券投资损失\t\t${t.bnljje26}\t\t${t.byje26}`,//26
` 无法收回的长期股权投资损失\t\t${t.bnljje27}\t\t${t.byje27}`,//27
` 自然灾害等不可抗力因素造成的损失\t\t${t.bnljje28}\t\t${t.byje28}`,//28
` 税收滞纳金\t\t${t.bnljje29}\t\t${t.byje29}`,//29
'',
`三、利润总额(亏损总额以“-”号填列)\t\t${t.bnljje30}\t\t${t.byje30}`,//30
`减:所得税费用\t\t${t.bnljje31}\t\t${t.byje31}`,//31
'',
`四、净利润(净亏损以“-”号填列)\t\t${t.bnljje32}\t\t${t.byje32}`,//32
'',
'',
'现金流量表',
'项目\t\t本年累计金额\t\t本月金额',
`一、经营活动产生的现金流量\t\t${t.xjbnljje1}\t\t${t.xjbyje1}`,
` 销售产成品、商品、提供劳务收到的现金\t\t${t.xjbnljje2}\t\t${t.xjbyje2}`,//1
` 收到的其他与经营活动有关的现金\t\t${t.xjbnljje3}\t\t${t.xjbyje3}`,//2
` 购买原材料、商品、接受劳务支付的现金\t\t${t.xjbnljje4}\t\t${t.xjbyje4}`,//3
` 支付的职工薪酬\t\t${t.xjbnljje5}\t\t${t.xjbyje5}`,//4
` 支付的税费\t\t${t.xjbnljje6}\t\t${t.xjbyje6}`,//5
` 支付的其他与经营活动有关的现金\t\t${t.xjbnljje7}\t\t${t.xjbyje7}`,//6
` 经营活动产生的现金流量净额\t\t${t.xjbnljje8}\t\t${t.xjbyje8}`,//7
`二、投资活动产生的现金流量:\t\t${t.xjbnljje9}\t\t${t.xjbyje9}`,
` 收回短期投资、长期债券投资和长期股权投资收到的现金\t\t${t.xjbnljje10}\t\t${t.xjbyje10}`,//8
` 取得投资收益收到的现金\t\t${t.xjbnljje11}\t\t${t.xjbyje11}`,//9
` 处置固定资产、无形资产和其他非流动资产收回的现金净额\t\t${t.xjbnljje12}\t\t${t.xjbyje12}`,//10
` 短期投资、长期债券投资和长期股权投资支付的现金\t\t${t.xjbnljje13}\t\t${t.xjbyje13}`,//11
` 购建固定资产、无形资产和其他非流动资产支付的现金\t\t${t.xjbnljje14}\t\t${t.xjbyje14}`,//12
` 投资活动产生的现金流量净额\t\t${t.xjbnljje15}\t\t${t.xjbyje15}`,//13
`三、筹资活动产生的现金流量:\t\t${t.xjbnljje16}\t\t${t.xjbyje16}`,
` 取得借款收到的现金\t\t${t.xjbnljje17}\t\t${t.xjbyje17}`,//14
` 吸收投资者投资收到的现金\t\t${t.xjbnljje18}\t\t${t.xjbyje18}`,//15
` 偿还借款本金支付的现金\t\t${t.xjbnljje19}\t\t${t.xjbyje19}`,//16
` 偿还借款利息支付的现金\t\t${t.xjbnljje20}\t\t${t.xjbyje20}`,//17
` 分配利润支付的现金\t\t${t.xjbnljje21}\t\t${t.xjbyje21}`,//18
` 筹资活动产生的现金流量净额\t\t${t.xjbnljje22}\t\t${t.xjbyje22}`,//19
`四、现金净增加额\t\t${t.xjbnljje23}\t\t${t.xjbyje23}`,//20
`加:期初现金余额\t\t${t.xjbnljje24}\t\t${t.xjbyje24}`,//21
`五、期末现金余额\t\t${t.xjbnljje25}\t\t${t.xjbyje25}`,//22
].join('\n'));
let hash = md5File.sync(tempFile);
fs.renameSync(tempFile, think.RUNTIME_PATH + '/archive/' + hash);
return {
name: o.bbzl,
time: o.sbnf,
archiveList:[{name:'报表',hash,type:'txt'}],
remark: '地税'
}
});
let ckzhList = await this.fetch_ckzh();
info.ckzhList = ckzhList;
return {nsrjbxx,dzjk,taxList,taxMoneyList,info, cwbb,cwbbList};
}
async fetch_ckzh(){
let { sessionId } = this._logininfo;
let ret = await this.httpPost('http://www.jsds.gov.cn/NsrkhyhAction.do', {
qs: {
handleCode:'',sqzldm:'null',ssxmdm:'null',gnfldm:'null', sessionId,
jsonData:JSON.stringify({
data:{ gnfldm:'CXFW', sqzldm:'',ssxmdm:''}
})
}
});
let $ = cheerio.load(ret.body);
let trList = $('#querytb>tr').toArray();
return _.map(trList, o=>{
let tdList = $('td', o);
return {
yhhb: _.trim(tdList.eq(3).text()),
yhdm: _.trim(tdList.eq(4).text()),
yhzh: _.trim(tdList.eq(5).text())
}
});
}
} | let res = await this.httpGet('http://www.jsds.gov.cn/NsrjbxxAction.do', {
qs:{
sessionId, dealMethod:'queryData', jsonData:JSON.stringify({
data:{gnfldm:'CXFW',sqzldm:'',ssxmdm:''}
}) | random_line_split |
instanceservice.go | /**
* @Author: lzw5399
* @Date: 2021/1/16 22:58
* @Desc: 流程实例服务
*/
package service
import (
"errors"
"fmt"
. "github.com/ahmetb/go-linq/v3"
"workflow/src/global"
"workflow/src/global/constant"
"workflow/src/global/shared"
"workflow/src/model"
"workflow/src/model/dto"
"workflow/src/model/request"
"workflow/src/model/response"
"workflow/src/service/engine"
"workflow/src/util"
)
type InstanceService interface {
CreateProcessInstance(*request.ProcessInstanceRequest, uint, uint) (*model.ProcessInstance, error)
GetProcessInstance(*request.GetInstanceRequest, uint, uint) (*response.ProcessInstanceResponse, error)
ListProcessInstance(*request.InstanceListRequest, uint, uint) (*response.PagingResponse, error)
HandleProcessInstance(*request.HandleInstancesRequest, uint, uint) (*model.ProcessInstance, error)
GetProcessTrain(pi *model.ProcessInstance, instanceId uint, tenantId uint) ([]response.ProcessChainNode, error)
DenyProcessInstance(*request.DenyInstanceRequest, uint, uint) (*model.ProcessInstance, error)
}
type instanceService struct {
}
func NewInstanceService() *instanceService {
return &instanceService{}
}
// 创建实例
func (i *instanceService) CreateProcessInstance(r *request.ProcessInstanceRequest, currentUserId uint, tenantId uint) (*model.ProcessInstance, error) {
var (
processDefinition model.ProcessDefinition // 流程模板
tx = global.BankDb.Begin() // 开启事务
)
// 检查变量是否合法
err := validateVariables(r.Variables)
if err != nil {
return nil, util.BadRequest.New(err)
}
// 查询对应的流程模板
err = global.BankDb.
Where("id = ?", r.ProcessDefinitionId).
Where("tenant_id = ?", tenantId).
First(&processDefinition).
Error
if err != nil {
return nil, err
}
// 初始化流程引擎
instanceEngine, err := engine.NewProcessEngine(processDefinition, r.ToProcessInstance(currentUserId, tenantId), currentUserId, tenantId, tx)
if err != nil {
return nil, err
}
// 将初始状态赋值给当前的流程实例
if currentInstanceState, err := instanceEngine.GetInstanceInitialState(); err != nil {
return nil, err
} else {
instanceEngine.ProcessInstance.State = currentInstanceState
}
// TODO 这里判断下一步是排他网关等情况
// 更新instance的关联人
instanceEngine.UpdateRelatedPerson()
// 创建
err = instanceEngine.CreateProcessInstance()
if err != nil {
tx.Rollback()
} else {
tx.Commit()
}
return &instanceEngine.ProcessInstance, err
}
// 获取单个ProcessInstance
func (i *instanceService) GetProcessInstance(r *request.GetInstanceRequest, currentUserId uint, tenantId uint) (*response.ProcessInstanceResponse, error) {
var instance model.ProcessInstance
err := global.BankDb.
Where("id=?", r.Id).
Where("tenant_id = ?", tenantId).
First(&instance).
Error
if err != nil {
return nil, err
}
// 必须是相关的才能看到
exist := From(instance.RelatedPerson).AnyWith(func(i interface{}) bool {
return i.(int64) == int64(currentUserId)
})
if !exist {
return nil, util.NotFound.New("记录不存在")
}
resp := response.ProcessInstanceResponse{
ProcessInstance: instance,
}
// 包括流程链路
if r.IncludeProcessTrain {
trainNodes, err := i.GetProcessTrain(&instance, instance.Id, tenantId)
if err != nil {
return nil, err
}
resp.ProcessChainNodes = trainNodes
}
return &resp, nil
}
// 获取ProcessInstance列表
func (i *instanceService) ListProcessInstance(r *request.InstanceListRequest, currentUserId uint, tenantId uint) (*response.PagingResponse, error) {
var instances []model.ProcessInstance
db := global.BankDb.Model(&model.ProcessInstance{}).Where("tenant_id = ?", tenantId)
// 根据type的不同有不同的逻辑
switch r.Type {
case constant.I_MyToDo:
db = db.Joins("cross join jsonb_array_elements(state) as elem").Where(fmt.Sprintf("elem -> 'processor' @> '%v'", currentUserId))
break
case constant.I_ICreated:
db = db.Where("create_by=?", currentUserId)
break
case constant.I_IRelated:
db = db.Where(fmt.Sprintf("related_person @> '%v'", currentUserId))
break
case constant.I_All:
break
default:
return nil, errors.New("type不合法")
}
if r.Keyword != "" {
db = db.Where("title ~ ?", r.Keyword)
}
var count int64
db.Count(&count)
db = shared.ApplyPaging(db, &r.PagingRequest)
err := db.Find(&instances).Error
return &response.PagingResponse{
TotalCount: count,
CurrentCount: int64(len(instances)),
Data: &instances,
}, err
}
// 处理/审批ProcessInstance
func (i *instanceService) HandleProcessInstance(r *request.HandleInstancesRequest, currentUserId uint, tenantId uint) (*model.ProcessInstance, error) {
var (
tx = global.BankDb.Begin() // 开启事务
)
// 验证变量是否符合要求
err := validateVariables(r.Variables)
if err != nil {
return nil, err
}
// 流程实例引擎
processEngine, err := engine.NewProcessEngineByInstanceId(r.ProcessInstanceId, currentUserId, tenantId, tx)
if err != nil {
return nil, err
}
// 验证合法性(1.edgeId是否合法 2.当前用户是否有权限处理)
err = processEngine.ValidateHandleRequest(r)
if err != nil {
return nil, err
}
// 合并最新的变量
processEngine.MergeVariables(r.Variables)
// 处理操作, 判断这里的原因是因为上面都不会进行数据库改动操作
err = processEngine.Handle(r)
if err != nil {
tx.Rol | .Commit()
}
return &processEngine.ProcessInstance, err
}
// 否决流程
func (i *instanceService) DenyProcessInstance(r *request.DenyInstanceRequest, currentUserId uint, tenantId uint) (*model.ProcessInstance, error) {
var (
tx = global.BankDb.Begin() // 开启事务
)
// 流程实例引擎
instanceEngine, err := engine.NewProcessEngineByInstanceId(r.ProcessInstanceId, currentUserId, tenantId, tx)
if err != nil {
return nil, err
}
// 验证当前用户是否有权限处理
err = instanceEngine.ValidateDenyRequest(r)
if err != nil {
return nil, err
}
// 处理
err = instanceEngine.Deny(r)
if err != nil {
tx.Rollback()
} else {
tx.Commit()
}
return &instanceEngine.ProcessInstance, err
}
// 获取流程链(用于展示)
func (i *instanceService) GetProcessTrain(pi *model.ProcessInstance, instanceId uint, tenantId uint) ([]response.ProcessChainNode, error) {
// 1. 获取流程实例(如果为空)
var instance model.ProcessInstance
if pi == nil {
err := global.BankDb.
Where("id=?", instanceId).
Where("tenant_id = ?", tenantId).
First(&instance).
Error
if err != nil {
}
} else {
instance = *pi
}
// 2. 获取流程模板
var definition model.ProcessDefinition
err := global.BankDb.
Where("id=?", instance.ProcessDefinitionId).
Where("tenant_id = ?", tenantId).
First(&definition).
Error
if err != nil {
return nil, errors.New("当前流程对应的模板为空")
}
// 3. 获取实例的当前nodeId列表
currentNodeIds := make([]string, len(instance.State))
for i, state := range instance.State {
currentNodeIds[i] = state.Id
}
// 4. 获取所有的显示节点
shownNodes := make([]dto.Node, 0)
currentNodeSortRange := make([]int, 0) // 当前节点的顺序区间, 在这个区间内的顺序都当作当前节点
initialNodeId := ""
for _, node := range definition.Structure.Nodes {
// 隐藏节点就跳过
if node.IsHideNode {
continue
}
// 获取当前节点的顺序
if util.SliceAnyString(currentNodeIds, node.Id) {
currentNodeSortRange = append(currentNodeSortRange, util.StringToInt(node.Sort))
}
// 找出开始节点的id
if node.Clazz == constant.START {
initialNodeId = node.Id
}
shownNodes = append(shownNodes, node)
}
// 5. 遍历出可能的流程链路
possibleTrainNodesList := make([][]string, 0, util.Pow(len(definition.Structure.Nodes), 2))
getPossibleTrainNode(definition.Structure, initialNodeId, []string{}, &possibleTrainNodesList)
// 6. 遍历获取当前显示的节点是否必须显示的
// 具体实现方法是遍历possibleTrainNodesList中每一个变量,然后看当前变量的hitCount是否等于len(possibleTrainNodesList)
// 等于的话,说明在数组每个元素里面都出现了, 那么肯定是必须的
hitCount := make(map[string]int, len(definition.Structure.Nodes))
for _, possibleTrainNodes := range possibleTrainNodesList {
for _, trainNode := range possibleTrainNodes {
hitCount[trainNode] += 1
}
}
// 7. 获取当前节点的排序
// 由于当前节点可能有多个,排序也相应的有多个,多以会有一个当前节点排序的最大值和最小值
// 这个范围内圈起来的都被当作当前节点
currentNodeMinSort, currentNodeMaxSort := util.SliceMinMax(currentNodeSortRange)
// 8. 最后将shownNodes映射成model返回
var trainNodes []response.ProcessChainNode
From(shownNodes).Select(func(i interface{}) interface{} {
node := i.(dto.Node)
currentNodeSort := util.StringToInt(node.Sort)
var status constant.ChainNodeStatus
switch {
case currentNodeSort < currentNodeMinSort:
status = 1 // 已处理
case currentNodeSort > currentNodeMaxSort:
status = 3 // 未处理的后续节点
default:
// 如果是结束节点,则不显示为当前节点,显示为已处理
if node.Clazz == constant.End {
status = 1
} else { // 其他的等于情况显示为当前节点
status = 2 // 当前节点
}
}
var nodeType int
switch node.Clazz {
case constant.START:
nodeType = 1
case constant.UserTask:
nodeType = 2
case constant.ExclusiveGateway:
nodeType = 3
case constant.End:
nodeType = 4
}
return response.ProcessChainNode{
Name: node.Label,
Id: node.Id,
Obligatory: hitCount[node.Id] == len(possibleTrainNodesList),
Status: status,
Sort: currentNodeSort,
NodeType: nodeType,
}
}).OrderBy(func(i interface{}) interface{} {
return i.(response.ProcessChainNode).Sort
}).ToSlice(&trainNodes)
return trainNodes, nil
}
// 检查变量是否合法
func validateVariables(variables []model.InstanceVariable) error {
checkedVariables := make(map[string]model.InstanceVariable, 0)
for _, v := range variables {
illegalValueError := fmt.Errorf("当前变量:%s 的类型对应的值不合法,请检查", v.Name)
// 检查类型
switch v.Type {
case constant.VariableNumber:
_, succeed := v.Value.(float64)
if !succeed {
return illegalValueError
}
case constant.VariableString:
_, succeed := v.Value.(string)
if !succeed {
return illegalValueError
}
case constant.VariableBool:
_, succeed := v.Value.(bool)
if !succeed {
return illegalValueError
}
default:
return fmt.Errorf("当前变量:%s 的类型不合法,请检查", v.Name)
}
// 检查是否重名
if _, present := checkedVariables[v.Name]; present {
return fmt.Errorf("当前变量名:%s 重复, 请检查", v.Name)
}
checkedVariables[v.Name] = v
}
return nil
}
// 获取所有的可能的流程链路
// definitionStructure: 流程模板的结构
// currentNodes: 当前需要遍历的节点
// dependencies: 依赖项
// possibleTrainNodes: 最终返回的可能的流程链路
func getPossibleTrainNode(definitionStructure dto.Structure, currentNodeId string, dependencies []string, possibleTrainNodes *[][]string) {
targetNodeIds := make([]string, 0)
// 当前节点添加到依赖中
dependencies = append(dependencies, currentNodeId)
for _, edge := range definitionStructure.Edges {
// 找到edge的source是当前nodeId的edge
if edge.Source == currentNodeId && edge.FlowProperties != "0" {
targetNodeIds = append(targetNodeIds, edge.Target)
}
}
// 已经是最终节点了
if len(targetNodeIds) == 0 {
*possibleTrainNodes = append(*possibleTrainNodes, dependencies)
} else {
// 不是最终节点,继续递归遍历
for _, targetNodeId := range targetNodeIds {
getPossibleTrainNode(definitionStructure, targetNodeId, dependencies, possibleTrainNodes)
}
}
}
| lback()
} else {
tx | conditional_block |
instanceservice.go | /**
* @Author: lzw5399
* @Date: 2021/1/16 22:58
* @Desc: 流程实例服务
*/
package service
import (
"errors"
"fmt"
. "github.com/ahmetb/go-linq/v3"
"workflow/src/global"
"workflow/src/global/constant"
"workflow/src/global/shared"
"workflow/src/model"
"workflow/src/model/dto"
"workflow/src/model/request"
"workflow/src/model/response"
"workflow/src/service/engine"
"workflow/src/util"
)
type InstanceService interface {
CreateProcessInstance(*request.ProcessInstanceRequest, uint, uint) (*model.ProcessInstance, error)
GetProcessInstance(*request.GetInstanceRequest, uint, uint) (*response.ProcessInstanceResponse, error)
ListProcessInstance(*request.InstanceListRequest, uint, uint) (*response.PagingResponse, error)
HandleProcessInstance(*request.HandleInstancesRequest, uint, uint) (*model.ProcessInstance, error)
GetProcessTrain(pi *model.ProcessInstance, instanceId uint, tenantId uint) ([]response.ProcessChainNode, error)
DenyProcessInstance(*request.DenyInstanceRequest, uint, uint) (*model.ProcessInstance, error)
}
type instanceService struct {
}
func NewInstanceService() *instanceService {
return &instanceService{}
}
// 创建实例
func (i *instanceService) CreateProcessInstance(r *request.ProcessInstanceRequest, currentUserId uint, tenantId uint) (*model.ProcessInstance, error) {
var (
processDefinition model.ProcessDefinition // 流程模板
tx = global.BankDb.Begin() // 开启事务
)
// 检查变量是否合法
err := validateVariables(r.Variables)
if err != nil {
return nil, util.BadRequest.New(err)
}
// 查询对应的流程模板
err = global.BankDb.
Where("id = ?", r.ProcessDefinitionId).
Where("tenant_id = ?", tenantId).
First(&processDefinition).
Error
if err != nil {
return nil, err
}
// 初始化流程引擎
instanceEngine, err := engine.NewProcessEngine(processDefinition, r.ToProcessInstance(currentUserId, tenantId), currentUserId, tenantId, tx)
if err != nil {
return nil, err
}
// 将初始状态赋值给当前的流程实例
if currentInstanceState, err := instanceEngine.GetInstanceInitialState(); err != nil {
return nil, err
} else {
instanceEngine.ProcessInstance.State = currentInstanceState
}
// TODO 这里判断下一步是排他网关等情况
// 更新instance的关联人
instanceEngine.UpdateRelatedPerson()
// 创建
err = instanceEngine.CreateProcessInstance()
if err != nil {
tx.Rollback()
} else {
tx.Commit()
}
return &instanceEngine.ProcessInstance, err
}
// 获取单个ProcessInstance
func (i *instanceService) GetProcessInstance(r *request.GetInstanceRequest, currentUserId uint, tenantId uint) (*response.ProcessInstanceResponse, error) {
var instance model.ProcessInstance
err := global.BankDb.
Where("id=?", r.Id).
Where("tenant_id = ?", tenantId).
First(&instance).
Error
if err != nil {
return nil, err
}
// 必须是相关的才能看到
exist := From(instance.RelatedPerson).AnyWith(func(i interface{}) bool {
return i.(int64) == int64(currentUserId)
})
if !exist {
return nil, util.NotFound.New("记录不存在")
}
resp := response.ProcessInstanceResponse{
ProcessInstance: instance,
}
// 包括流程链路
if r.IncludeProcessTrain {
trainNodes, err := i.GetProcessTrain(&instance, instance.Id, tenantId)
if err != nil {
return nil, err
}
resp.ProcessChainNodes = trainNodes
}
return &resp, nil
}
// 获取ProcessInstance列表
func (i *instanceService) ListProcessInstance(r *request.InstanceListRequest, currentUserId uint, tenantId uint) (*response.PagingResponse, error) {
var instances []model.ProcessInstance
db := global.BankDb.Model(&model.ProcessInstance{}).Where("tenant_id = ?", tenantId)
// 根据type的不同有不同的逻辑
switch r.Type {
case constant.I_MyToDo:
db = db.Joins("cross join jsonb_array_elements(state) as elem").Where(fmt.Sprintf("elem -> 'processor' @> '%v'", currentUserId))
break
case constant.I_ICreated:
db = db.Where("create_by=?", currentUserId)
break
case constant.I_IRelated:
db = db.Where(fmt.Sprintf("related_person @> '%v'", currentUserId))
break
case constant.I_All:
break
default:
return nil, errors.New("type不合法")
}
if r.Keyword != "" {
db = db.Where("title ~ ?", r.Keyword)
}
var count int64
db.Count(&count)
db = shared.ApplyPaging(db, &r.PagingRequest)
err := db.Find(&instances).Error
return &response.PagingResponse{
TotalCount: count,
CurrentCount: int64(len(instances)),
Data: &instances,
}, err
}
// 处理/审批ProcessInstance
func (i *instanceService) HandleProcessInstance(r *request.HandleInstancesRequest, currentUserId uint, tenantId uint) (*model.ProcessInstance, error) {
var (
tx = global.BankDb.Begin() // 开启事务
)
// 验证变量是否符合要求
err := validateVariables(r.Variables)
if err != nil {
return nil, err
}
// 流程实例引擎
processEngine, err := engine.NewProcessEngineByInstanceId(r.ProcessInstanceId, currentUserId, tenantId, tx)
if err != nil {
return nil, err
}
// 验证合法性(1.edgeId是否合法 2.当前用户是否有权限处理)
err = processEngine.ValidateHandleRequest(r)
if err != nil {
return nil, err
}
// 合并最新的变量
processEngine.MergeVariables(r.Variables)
// 处理操作, 判断这里的原因是因为上面都不会进行数据库改动操作
err = processEngine.Handle(r)
if err != nil {
tx.Rollback()
} else {
tx.Commit()
}
return &processEngine.ProcessInstance, err
}
// 否决流程
func (i *instanceService) DenyProcessInstance(r *request.DenyInstanceRequest, currentUserId uint, tenantId uint) (*model.ProcessInstance, error) {
var (
tx = global.BankDb.Begin() // 开启事务
)
// 流程实例引擎
instanceEngine, err := engine.NewProcessEngineByInstanceId(r.ProcessInstanceId, currentUserId, tenantId, tx)
if err != nil {
return nil, err
}
// 验证当前用户是否有权限处理
err = instanceEngine.ValidateDenyRequest(r)
if err != nil {
return nil, err
}
// 处理
err = instanceEngine.Deny(r)
if err != nil {
tx.Rollback()
} else {
tx.Commit()
}
return &instanceEngine.ProcessInstance, err
}
// 获取流程链(用于展示)
func (i *instanceService) GetProcessTrain(pi *model.ProcessInstance, instanceId uint, tenantId uint) ([]response.ProcessChainNode, error) {
// 1. 获取流程实例(如果为空)
var instance model.ProcessInstance
if pi == nil {
err := global.BankDb.
Where("id=?", instanceId).
Where("tenant_id = ?", tenantId).
First(&instance).
Error
if err != nil {
}
} else {
instance = *pi
}
// 2. 获取流程模板
var definition model.ProcessDefinition
err := global.BankDb.
Where("id=?", instance.ProcessDefinitionId).
Where("tenant_id = ?", tenantId).
First(&definition).
Error
if err != nil {
return nil, errors.New("当前流程对应的模板为空")
}
// 3. 获取实例的当前nodeId列表
currentNodeIds := make([]string, len(instance.State))
for i, state := range instance.State {
currentNodeIds[i] = state.Id
}
// 4. 获取所有的显示节点
shownNodes := make([]dto.Node, 0)
currentNodeSortRange := make([]int, 0) // 当前节点的顺序区间, 在这个区间内的顺序都当作当前节点
initialNodeId := ""
for _, node := range definition.Structure.Nodes {
// 隐藏节点就跳过
if node.IsHideNode {
continue
}
// 获取当前节点的顺序
if util.SliceAnyString(currentNodeIds, node.Id) {
currentNodeSortRange = append(currentNodeSortRange, util.StringToInt(node.Sort))
}
// 找出开始节点的id
if node.Clazz == constant.START {
initialNodeId = node.Id
}
shownNodes = append(shownNodes, node)
}
// 5. 遍历出可能的流程链路
possibleTrainNodesList := make([][]string, 0, util.Pow(len(definition.Structure.Nodes), 2))
getPossibleTrainNode(definition.Structure, initialNodeId, []string{}, &possibleTrainNodesList)
// 6. 遍历获取当前显示的节点是否必须显示的
// 具体实现方法是遍历possibleTrainNodesList中每一个变量,然后看当前变量的hitCount是否等于len(possibleTrainNodesList)
// 等于的话,说明在数组每个元素里面都出现了, 那么肯定是必须的
hitCount := make(map[string]int, len(definition.Structure.Nodes))
for _, possibleTrainNodes := range possibleTrainNodesList {
for _, trainNode := range possibleTrainNodes {
hitCount[trainNode] += 1
}
}
// 7. 获取当前节点的排序
// 由于当前节点可能有多个,排序也相应的有多个,多以会有一个当前节点排序的最大值和最小值
// 这个范围内圈起来的都被当作当前节点
currentNodeMinSort, currentNodeMaxSort := util.SliceMinMax(currentNodeSortRange)
// 8. 最后将shownNodes映射成model返回
var trainNodes []response.ProcessChainNode
From(shownNodes).Select(func(i interface{}) interface{} {
node := i.(dto.Node)
currentNodeSort := util.StringToInt(node.Sort)
var status constant.ChainNodeStatus
switch {
case currentNodeSort < currentNodeMinSort:
status = 1 // 已处理
case currentNodeSort > currentNodeMaxSort:
status = 3 // 未处理的后续节点
default:
// 如果是结束节点,则不显示为当前节点,显示为已处理
if node.Clazz == constant.End {
status = 1
} else { // 其他的等于情况显示为当前节点
status = 2 // 当前节点
}
}
var nodeType int
switch node.Clazz {
case constant.START:
nodeType = 1
case constant.UserTask:
nodeType = 2
case constant.ExclusiveGateway:
nodeType = 3
case constant.End:
nodeType = 4
}
return response.ProcessChainNode{
Name: node.Label,
Id: node.Id,
Obligatory: hitCount[node.Id] == len(possibleTrainNodesList),
Status: status,
Sort: currentNodeSort,
NodeType: nodeType,
}
}).OrderBy(func(i interface{}) interface{} {
return i.(response.ProcessChainNode).Sort
}).ToSlice(&trainNodes)
return trainNodes, nil
}
// 检查变量是否合法
func validateVariables(variables []model.InstanceVariable) error {
checkedVariables := make(map[string]model.InstanceVariable, 0)
for _, v := range variables {
illegalValueError := fmt.Errorf("当前变量:%s 的类型对应的值不合法,请检查", v.Name)
// 检查类型
switch v.Type {
case constant.VariableNumber:
_, succeed := v.Value.(float64)
if !succeed {
return illegalValueError
}
case constant.VariableString:
_, succeed := v.Value.(string)
if !succeed {
return illegalValueError
}
case constant.VariableBool:
_, succeed := v.Value.(bool)
if !succeed {
return illegalValueError
}
default:
return fmt.Errorf("当前变量:%s 的类型不合法,请检查", v.Name)
}
// 检查是否重名
if _, present := checkedVariables[v.Name]; present {
return fmt.Errorf("当前变量名:%s 重复, 请检查", v.Name)
}
checkedVariables[v.Name] = v
}
return nil
}
// 获取所有的可能的流程链路
// definitionStructure: 流程模板的结构
// currentNodes: 当前需要遍历的节点 | dependencies = append(dependencies, currentNodeId)
for _, edge := range definitionStructure.Edges {
// 找到edge的source是当前nodeId的edge
if edge.Source == currentNodeId && edge.FlowProperties != "0" {
targetNodeIds = append(targetNodeIds, edge.Target)
}
}
// 已经是最终节点了
if len(targetNodeIds) == 0 {
*possibleTrainNodes = append(*possibleTrainNodes, dependencies)
} else {
// 不是最终节点,继续递归遍历
for _, targetNodeId := range targetNodeIds {
getPossibleTrainNode(definitionStructure, targetNodeId, dependencies, possibleTrainNodes)
}
}
} | // dependencies: 依赖项
// possibleTrainNodes: 最终返回的可能的流程链路
func getPossibleTrainNode(definitionStructure dto.Structure, currentNodeId string, dependencies []string, possibleTrainNodes *[][]string) {
targetNodeIds := make([]string, 0)
// 当前节点添加到依赖中 | random_line_split |
instanceservice.go | /**
* @Author: lzw5399
* @Date: 2021/1/16 22:58
* @Desc: 流程实例服务
*/
package service
import (
"errors"
"fmt"
. "github.com/ahmetb/go-linq/v3"
"workflow/src/global"
"workflow/src/global/constant"
"workflow/src/global/shared"
"workflow/src/model"
"workflow/src/model/dto"
"workflow/src/model/request"
"workflow/src/model/response"
"workflow/src/service/engine"
"workflow/src/util"
)
type InstanceService interface {
CreateProcessInstance(*request.ProcessInstanceRequest, uint, uint) (*model.ProcessInstance, error)
GetProcessInstance(*request.GetInstanceRequest, uint, uint) (*response.ProcessInstanceResponse, error)
ListProcessInstance(*request.InstanceListRequest, uint, uint) (*response.PagingResponse, error)
HandleProcessInstance(*request.HandleInstancesRequest, uint, uint) (*model.ProcessInstance, error)
GetProcessTrain(pi *model.ProcessInstance, instanceId uint, tenantId uint) ([]response.ProcessChainNode, error)
DenyProcessInstance(*request.DenyInstanceRequest, uint, uint) (*model.ProcessInstance, error)
}
type instanceService struct {
}
func NewInstanceService() *instanceService {
return &instanceService{}
}
// 创建实例
func (i *instanceService) CreateProcessInstance(r *request.ProcessInstanceRequest, currentUserId uint, tenantId uint) (*model.ProcessInstance, error) {
var (
processDefinition model.ProcessDefinition // 流程模板
tx = global.BankDb.Begin() // 开启事务
)
// 检查变量是否合法
err := validateVariables(r.Variables)
if err != nil {
return nil, util.BadRequest.New(err)
}
// 查询对应的流程模板
err = global.BankDb.
Where("id = ?", r.ProcessDefinitionId).
Where("tenant_id = ?", tenantId).
First(&processDefinition).
Error
if err != nil {
return nil, err
}
// 初始化流程引擎
instanceEngine, err := engine.NewProcessEngine(processDefinition, r.ToProcessInstance(currentUserId, tenantId), currentUserId, tenantId, tx)
if err != nil {
return nil, err
}
// 将初始状态赋值给当前的流程实例
if currentInstanceState, err := instanceEngine.GetInstanceInitialState(); err != nil {
return nil, err
} else {
instanceEngine.ProcessInstance.State = currentInstanceState
}
// TODO 这里判断下一步是排他网关等情况
// 更新instance的关联人
instanceEngine.UpdateRelatedPerson()
// 创建
err = instanceEngine.CreateProcessInstance()
if err != nil {
tx.Rollback()
} else {
tx.Commit()
}
return &instanceEngine.ProcessInstance, err
}
// 获取单个ProcessInstance
func (i *instanceService) GetProcessInstance(r *request.GetInstanceRequest, currentUserId uint, tenantId uint) (*response.ProcessInstanceResponse, error) {
var instance model.ProcessInstance
err := global.BankDb.
Where("id=?", r.Id).
Where("tenant_id = ?", tenantId).
First(&instance).
Error
if err != nil {
return nil, err
}
// 必须是相关的才能看到
exist := From(instance.RelatedPerson).AnyWith(func(i interface{}) bool {
return i.(int64) == int64(currentUserId)
})
if !exist {
return nil, util.NotFound.New("记录不存在")
}
resp := response.ProcessInstanceResponse{
ProcessInstance: instance,
}
// 包括流程链路
if r.IncludeProcessTrain {
trainNodes, err := i.GetProcessTrain(&instance, instance.Id, tenantId)
if err != nil {
return nil, err
}
resp.ProcessChainNodes = trainNodes
}
return &resp, nil
}
// 获取ProcessInstance列表
func (i *instanceService) ListProcessInstance(r *request.InstanceListRequest, currentUserId uint, tenantId uint) (*response.PagingResponse, error) {
var instances []model.ProcessInstance
db := global.BankDb.Model(&model.ProcessInstance{}).Where("tenant_id = ?", tenantId)
// 根据type的不同有不同的逻辑
switch r.Type {
case constant.I_MyToDo:
db = db.Joins("cross join jsonb_array_elements(state) as elem").Where(fmt.Sprintf("elem -> 'processor' @> '%v'", currentUserId))
break
case constant.I_ICreated:
db = db.Where("create_by=?", currentUserId)
break
case constant.I_IRelated:
db = db.Where(fmt.Sprintf("related_person @> '%v'", currentUserId))
break
case constant.I_All:
break
default:
return nil, errors.New("type不合法")
}
if r.Keyword != "" {
db = db.Where("title ~ ?", r.Keyword)
}
var count int64
db.Count(&count)
db = shared.ApplyPaging(db, &r.PagingRequest)
err := db.Find(&instances).Error
return &response.PagingResponse{
TotalCount: count,
CurrentCount: int64(len(instances)),
Data: &instances,
}, err
}
// 处理/审批ProcessInstance
func (i *instanceService) HandleProcessInstance(r *request.HandleInstancesRequest, currentUserId uint, tenantId uint) (*model.ProcessInstance, error) {
var (
tx = global.BankDb.Begin() // 开启事务
)
// 验证变量是否符合要求
err := validateVariables(r.Variables)
if err != nil {
return nil, err
}
// 流程实例引擎
processEngine, err := engine.NewProcessEngineByInstanceId(r.ProcessInstanceId, currentUserId, tenantId, tx)
if err != nil {
return nil, err
}
// 验证合法性(1.edgeId是否合法 2.当前用户是否有权限处理)
err = processEngine.ValidateHandleRequest(r)
if err != nil {
return nil, err
}
// 合并最新的变量
processEngine.MergeVariables(r.Variables)
// 处理操作, 判断这里的原因是因为上面都不会进行数据库改动操作
err = processEngine.Handle(r)
if err != nil {
tx.Rollback()
} else {
tx.Commit()
}
return &processEngine.ProcessInstance, err
}
// 否决流程
func (i *instanceService) DenyProcessInstance(r *request.DenyInstanceRequest, currentUserId uint, tenantId uint) (*model.ProcessInstance, error) {
var (
tx = global.BankDb.Begin() // 开启事务
)
// 流程实例引擎
instanceEngine, err := engine.NewProcessEngineByInstanceId(r.ProcessInstanceId, currentUserId, tenantId, tx)
if err != nil {
return nil, err
}
// 验证当前用户是否有权限处理
err = instanceEngine.ValidateDenyRequest(r)
if err != nil {
return nil, err
}
// 处理
err = instanceEngine.Deny(r)
if err != nil {
tx.Rollback()
} else {
tx.Commit()
}
return &instanceEngine.ProcessInstance, err
}
// 获取流程链(用于展示)
func (i *instanceService) GetProcessTrain(pi *model.ProcessInstance, instanceId uint, tenantId uint) ([]response.ProcessChainNode, error) {
// 1. 获取流程实例(如果为空)
var instance model.ProcessInstance
if pi == nil {
err := global.BankDb.
Where("id=?", instanceId).
Where("tenant_id = ?", tenantId).
First(&instance).
Error
if err != nil {
}
} else {
instance = *pi
}
// 2. 获取流程模板
var definition model.ProcessDefinition
err := global.BankDb.
Where("id=?", instance.ProcessDefinitionId).
Where("tenant_id = ?", tenantId).
First(&definition).
Error
if err != nil {
return nil, errors.New( | g, possibleTrainNodes *[][]string) {
targetNodeIds := make([]string, 0)
// 当前节点添加到依赖中
dependencies = append(dependencies, currentNodeId)
for _, edge := range definitionStructure.Edges {
// 找到edge的source是当前nodeId的edge
if edge.Source == currentNodeId && edge.FlowProperties != "0" {
targetNodeIds = append(targetNodeIds, edge.Target)
}
}
// 已经是最终节点了
if len(targetNodeIds) == 0 {
*possibleTrainNodes = append(*possibleTrainNodes, dependencies)
} else {
// 不是最终节点,继续递归遍历
for _, targetNodeId := range targetNodeIds {
getPossibleTrainNode(definitionStructure, targetNodeId, dependencies, possibleTrainNodes)
}
}
}
| "当前流程对应的模板为空")
}
// 3. 获取实例的当前nodeId列表
currentNodeIds := make([]string, len(instance.State))
for i, state := range instance.State {
currentNodeIds[i] = state.Id
}
// 4. 获取所有的显示节点
shownNodes := make([]dto.Node, 0)
currentNodeSortRange := make([]int, 0) // 当前节点的顺序区间, 在这个区间内的顺序都当作当前节点
initialNodeId := ""
for _, node := range definition.Structure.Nodes {
// 隐藏节点就跳过
if node.IsHideNode {
continue
}
// 获取当前节点的顺序
if util.SliceAnyString(currentNodeIds, node.Id) {
currentNodeSortRange = append(currentNodeSortRange, util.StringToInt(node.Sort))
}
// 找出开始节点的id
if node.Clazz == constant.START {
initialNodeId = node.Id
}
shownNodes = append(shownNodes, node)
}
// 5. 遍历出可能的流程链路
possibleTrainNodesList := make([][]string, 0, util.Pow(len(definition.Structure.Nodes), 2))
getPossibleTrainNode(definition.Structure, initialNodeId, []string{}, &possibleTrainNodesList)
// 6. 遍历获取当前显示的节点是否必须显示的
// 具体实现方法是遍历possibleTrainNodesList中每一个变量,然后看当前变量的hitCount是否等于len(possibleTrainNodesList)
// 等于的话,说明在数组每个元素里面都出现了, 那么肯定是必须的
hitCount := make(map[string]int, len(definition.Structure.Nodes))
for _, possibleTrainNodes := range possibleTrainNodesList {
for _, trainNode := range possibleTrainNodes {
hitCount[trainNode] += 1
}
}
// 7. 获取当前节点的排序
// 由于当前节点可能有多个,排序也相应的有多个,多以会有一个当前节点排序的最大值和最小值
// 这个范围内圈起来的都被当作当前节点
currentNodeMinSort, currentNodeMaxSort := util.SliceMinMax(currentNodeSortRange)
// 8. 最后将shownNodes映射成model返回
var trainNodes []response.ProcessChainNode
From(shownNodes).Select(func(i interface{}) interface{} {
node := i.(dto.Node)
currentNodeSort := util.StringToInt(node.Sort)
var status constant.ChainNodeStatus
switch {
case currentNodeSort < currentNodeMinSort:
status = 1 // 已处理
case currentNodeSort > currentNodeMaxSort:
status = 3 // 未处理的后续节点
default:
// 如果是结束节点,则不显示为当前节点,显示为已处理
if node.Clazz == constant.End {
status = 1
} else { // 其他的等于情况显示为当前节点
status = 2 // 当前节点
}
}
var nodeType int
switch node.Clazz {
case constant.START:
nodeType = 1
case constant.UserTask:
nodeType = 2
case constant.ExclusiveGateway:
nodeType = 3
case constant.End:
nodeType = 4
}
return response.ProcessChainNode{
Name: node.Label,
Id: node.Id,
Obligatory: hitCount[node.Id] == len(possibleTrainNodesList),
Status: status,
Sort: currentNodeSort,
NodeType: nodeType,
}
}).OrderBy(func(i interface{}) interface{} {
return i.(response.ProcessChainNode).Sort
}).ToSlice(&trainNodes)
return trainNodes, nil
}
// 检查变量是否合法
func validateVariables(variables []model.InstanceVariable) error {
checkedVariables := make(map[string]model.InstanceVariable, 0)
for _, v := range variables {
illegalValueError := fmt.Errorf("当前变量:%s 的类型对应的值不合法,请检查", v.Name)
// 检查类型
switch v.Type {
case constant.VariableNumber:
_, succeed := v.Value.(float64)
if !succeed {
return illegalValueError
}
case constant.VariableString:
_, succeed := v.Value.(string)
if !succeed {
return illegalValueError
}
case constant.VariableBool:
_, succeed := v.Value.(bool)
if !succeed {
return illegalValueError
}
default:
return fmt.Errorf("当前变量:%s 的类型不合法,请检查", v.Name)
}
// 检查是否重名
if _, present := checkedVariables[v.Name]; present {
return fmt.Errorf("当前变量名:%s 重复, 请检查", v.Name)
}
checkedVariables[v.Name] = v
}
return nil
}
// 获取所有的可能的流程链路
// definitionStructure: 流程模板的结构
// currentNodes: 当前需要遍历的节点
// dependencies: 依赖项
// possibleTrainNodes: 最终返回的可能的流程链路
func getPossibleTrainNode(definitionStructure dto.Structure, currentNodeId string, dependencies []strin | identifier_body |
instanceservice.go | /**
* @Author: lzw5399
* @Date: 2021/1/16 22:58
* @Desc: 流程实例服务
*/
package service
import (
"errors"
"fmt"
. "github.com/ahmetb/go-linq/v3"
"workflow/src/global"
"workflow/src/global/constant"
"workflow/src/global/shared"
"workflow/src/model"
"workflow/src/model/dto"
"workflow/src/model/request"
"workflow/src/model/response"
"workflow/src/service/engine"
"workflow/src/util"
)
type InstanceService interface {
CreateProcessInstance(*request.ProcessInstanceRequest, uint, uint) (*model.ProcessInstance, error)
GetProcessInstance(*request.GetInstanceRequest, uint, uint) (*response.ProcessInstanceResponse, error)
ListProcessInstance(*request.InstanceListRequest, uint, uint) (*response.PagingResponse, error)
HandleProcessInstance(*request.HandleInstancesRequest, uint, uint) (*model.ProcessInstance, error)
GetProcessTrain(pi *model.ProcessInstance, instanceId uint, tenantId uint) ([]response.ProcessChainNode, error)
DenyProcessInstance(*request.DenyInstanceRequest, uint, uint) (*model.ProcessInstance, error)
}
type instanceService struct {
}
func NewInstanceService() *instanceService {
return &instanceService{}
}
// 创建实例
func (i *instanceService) CreateProcessInstance(r *request.ProcessInstanceRequest, currentUserId uint, tenantId uint) (*model.ProcessInstance, error) {
var (
processDefinition model.ProcessDefinition // 流程模板
tx = global.BankDb.Begin() // 开启事务
)
// 检查变量是否合法
err := validateVariables(r.Variables)
if err != nil {
return nil, util.BadRequest.New(err)
}
// 查询对应的流程模板
err = global.BankDb.
Where("id = ?", r.ProcessDefinitionId).
Where("tenant_id = ?", tenantId).
First(&processDefinition).
Error
if err != nil {
return nil, err
}
// 初始化流程引擎
instanceEngine, err := engine.NewProcessEngine(processDefinition, r.ToProcessInstance(currentUserId, tenantId), currentUserId, tenantId, tx)
if err != nil {
return nil, err
}
// 将初始状态赋值给当前的流程实例
if currentInstanceState, err := instanceEngine.GetInstanceInitialState(); err != nil {
return nil, err
} else {
instanceEngine.ProcessInstance.State = currentInstanceState
}
// TODO 这里判断下一步是排他网关等情况
// 更新instance的关联人
instanceEngine.UpdateRelatedPerson()
// 创建
err = instanceEngine.CreateProcessInstance()
if err != nil {
tx.Rollback()
} else {
tx.Commit()
}
return &instanceEngine.ProcessInstance, err
}
// 获取单个ProcessInstance
func (i *instanceService) GetProcessInstance(r *request.GetInstanceRequest, currentUserId uint, tenantId uint) (*response.ProcessInstanceResponse, error) {
var instance model.ProcessInstance
err := global.BankDb.
Where("id=?", r.Id).
Where("tenant_id = ?", tenantId).
First(&instance).
Error
if err != nil {
return nil, err
}
// 必须是相关的才能看到
exist := From(instance.RelatedPerson).AnyWith(func(i interface{}) bool {
return i.(int64) == int64(currentUserId)
})
if !exist {
return nil, util.NotFound.New("记录不存在")
}
resp := response.ProcessInstanceResponse{
ProcessInstance: instance,
}
// 包括流程链路
if r.IncludeProcessTrain {
trainNodes, err := i.GetProcessTrain(&instance, instance.Id, tenantId)
if err != nil {
return nil, err
}
resp.ProcessChainNodes = trainNodes
}
return &resp, nil
}
// 获取ProcessInstance列表
func (i *instanceService) ListProcessInstance(r *request.InstanceListRequest, currentUserId uint, tenantId uint) (*response.PagingResponse, error) {
var instances []model.ProcessInstance
db := global.BankDb.Model(&model.ProcessInstance{}).Where("tenant_id = ?", tenantId)
// 根据type的不同有不同的逻辑
switch r.Type {
case constant.I_MyToDo:
db = db.Joins("cross join jsonb_array_elements(state) as elem").Where(fmt.Sprintf("elem -> 'processor' @> '%v'", currentUserId))
break
case constant.I_ICreated:
db = db.Where("create_by=?", currentUserId)
break
case constant.I_IRelated:
db = db.Where(fmt.Sprintf("related_person @> '%v'", currentUserId))
break
case constant.I_All:
break
default:
return nil, errors.New("type不合法")
}
if r.Keyword != "" {
db = db.Where("title ~ ?", r.Keyword)
}
var count int64
db.Count(&count)
db = shared.ApplyPaging(db, &r.PagingRequest)
err := db.Find(&instances).Error
return &response.PagingResponse{
TotalCount: count,
CurrentCount: int64(len(instances)),
Data: &instances,
}, err
}
// 处理/审批ProcessInstance
func (i *instanceService) HandleProcessInstance(r *request.HandleInstancesRequest, currentUserId uint, tenantId uint) (*model.ProcessInstance, error) {
var (
tx = global.BankDb.Begin() // 开启事务
)
// 验证变量是否符合要求
err := validateVariables(r.Variables)
if err != nil {
return nil, err
}
// 流程实例引擎
processEngine, err := engine.NewProcessEngineByInstanceId(r.ProcessInstanceId, currentUserId, tenantId, tx)
if err != nil {
return nil, err
}
// 验证合法性(1.edgeId是否合法 2.当前用户是否有权限处理)
err = processEngine.ValidateHandleRequest(r)
if err != nil {
return nil, err
}
// 合并最新的变量
processEngine.MergeVariables(r.Variables)
// 处理操作, 判断这里的原因是因为上面都不会进行数据库改动操作
err = processEngine.Handle(r)
if err != nil {
tx.Rollback()
} else {
tx.Commit()
}
return &processEngine.ProcessInstance, err
}
// 否决流程
func (i *instanceService) DenyProcessInstance(r *request.DenyInstanceRequest, currentUserId uint, tenantId uint) (*model.ProcessInstance, error) {
var (
tx = global.BankDb.Begin() // 开启事务
)
// 流程实例引擎
instanceEngine, err := engine.NewProcessEngineByInstanceId(r.ProcessInstanceId, currentUserId, tenantId, tx)
if err != nil {
return nil, err
}
// 验证当前用户是否有权限处理
err = instanceEngine.ValidateDenyRequest(r)
if err != nil {
return nil, err
}
// 处理
err = instanceEngine.Deny(r)
if err != nil {
tx.Rollback()
} else {
tx.Commit()
}
return &instanceEngine.ProcessInstance, err
}
// 获取流程链(用于展示)
func (i *instanceService) GetProcessTrain(pi *model.ProcessInstance, instanceId uint, tenantId uint) ([]response.ProcessChainNode, error) {
// 1. 获取流程实例(如果为空)
var instance model.ProcessInstance
if pi == nil {
err := global.BankDb.
Where("id=?", instanceId).
Where("tenant_id = ?", tenantId).
First(&instance).
Error
if err != nil {
}
} else {
instance = *pi
}
// 2. 获取流程模板
var definition model.ProcessDefinition
err := global.BankDb.
Where("id=?", instance.ProcessDefinitionId).
Where("tenant_id = ?", tenantId).
First(&definition).
Error
if err != nil {
return nil, errors.New("当前流程对应的模板为空")
}
// 3. 获取实例的当前nodeId列表
currentNodeIds := make([]string, len(instance.State))
for i, state := range instance.State {
currentNodeIds[i] = state.Id
}
// 4. 获取所有的显示节点
shownNodes := make([]dto.Node, 0)
currentNodeSortRange := make([]int, 0) // 当前节点的顺序区间, 在这个区间内的顺序都当作当前节点
initialNodeId := ""
for _, node := range definition.Structure.Nodes {
// 隐藏节点就跳过
if node.IsHideNode {
continue
}
// 获取当前节点的顺序
if util.SliceAnyString(currentNodeIds, node.Id) {
currentNodeSortRange = append(currentNodeSortRange, util.StringToInt(node.Sort))
}
// 找出开始节点的id
if node.Clazz == constant.START {
initialNodeId = node.Id
}
shownNodes = append(shownNodes, node)
}
// 5. 遍历出可能的流程链路
possibleTrainNodesList := make([][]string, 0, util.Pow(len(definition.Structure.Nodes), 2))
getPossibleTrainNode(definition.Structure, initialNodeId, []string{}, &possibleTrainNodesList)
// 6. 遍历获取当前显示的节点是否必须显示的
// 具体实现方法是遍历possibleTrainNodesList中每一个变量,然后看当前变量的hitCount是否等于len(possibleTrainNodesList)
// 等于的话,说明在数组每个元素里面都出现了, 那么肯定是必须的
hitCount := make(map[string]int, len(definition.Structure.Nodes))
for _, possibleTrainNodes := range possibleTrainNodesList {
for _, trainNode := range possibleTrainNodes {
hitCount[trainNode] += 1
}
}
// 7. 获取当前节点的排序
// 由于当前节点可能有多个,排序也相应的有多个,多以会有一个当前节点排序的最大值和最小值
// 这个范围内圈起来的都被当作当前节点
currentNodeMinSort, currentNodeMaxSort := util.SliceMinMax(currentNodeSortRange)
// 8. 最后将shownNodes映射成model返回
var trainNodes []response.ProcessChainNode
From(shownNodes).Select(func(i interface{}) interface{} {
node := i.(dto.Node)
currentNodeSort := util.StringToInt(node.Sort)
var status constant.ChainNodeStatus
switch {
case currentNodeSort < currentNodeMinSort:
status = 1 // 已处理
case currentNodeSort > currentNodeMaxSort:
status = 3 // 未处理的后续节点
default:
// 如果是结束节点,则不显示为当前节点,显示为已处理
if node.Clazz == constant.End {
status = 1
} else { // 其他的等于情况显示为当前节点
status = 2 // 当前节点
}
}
var nodeType int
switch node.Clazz {
case constant.START:
nodeType = 1
case constant.UserTask:
nodeType = 2
case constant.ExclusiveGateway:
nodeType = 3
case constant.End:
nodeType = 4
}
return response.ProcessChainNode{
Name: node.Label,
Id: node.Id,
Obligatory: hitCount[node.Id] == len(possibleTrainNodesList),
Status: status,
Sort: currentNodeSort,
NodeType: nodeType,
}
}).OrderBy(func(i interface{}) interface{} {
return i.(response.ProcessChainNode).Sort
}).ToSlice(&trainNodes)
return trainNodes, nil
}
// 检查变量是否合法
func validateVariables(variables []model.InstanceVariable) error {
checkedVariables := make(map[string]model.InstanceVariable, 0)
for _, v := range variables {
illegalValueError := fmt.Errorf("当前变量:%s 的类型对应的值不合法,请检查", v.Name)
// 检查类型
switch v.Type {
case constant.VariableNumber:
_, succeed := v.Value.(float64)
if !succeed {
return illegalValueError
}
case constant.VariableString:
_, succeed := v.Value.(string)
if !succeed {
return illegalValueError
}
case constant.VariableBool:
_, succeed := v.Value.(bool)
if !succeed {
return illegalValueError
}
default:
return fmt.Errorf("当前变量:%s 的类型不合法,请检查", v.Name)
}
// 检查是否重名
if _, present := checkedVariables[v.Name]; present {
return fmt.Errorf("当前变量名:%s 重复, 请检查", v.Name)
}
checkedVariables[v.Name] = v
}
return nil
}
// 获取所有的可能的流程链路
// definitionStructure: 流程模板的结构
// currentNodes: 当前需要遍历的节点
// dependencies: 依赖项
// possibleTrainNodes: 最终返回的可能的流程链路
func getPossibleTrainNode(definitionStructure dto.Structure, currentNodeId string, dependencies []string, possibleTrainNodes *[][]string) | := make([]string, 0)
// 当前节点添加到依赖中
dependencies = append(dependencies, currentNodeId)
for _, edge := range definitionStructure.Edges {
// 找到edge的source是当前nodeId的edge
if edge.Source == currentNodeId && edge.FlowProperties != "0" {
targetNodeIds = append(targetNodeIds, edge.Target)
}
}
// 已经是最终节点了
if len(targetNodeIds) == 0 {
*possibleTrainNodes = append(*possibleTrainNodes, dependencies)
} else {
// 不是最终节点,继续递归遍历
for _, targetNodeId := range targetNodeIds {
getPossibleTrainNode(definitionStructure, targetNodeId, dependencies, possibleTrainNodes)
}
}
}
| {
targetNodeIds | identifier_name |
script.js |
// An IIFE ("Iffy") - see the notes in mycourses
(function(){
"use strict";
let NUM_SAMPLES = 128;
let backgroundColor = "#ffffff";
let color = "#eb4034";
let audioElement;
let analyserNode;
let canvasBob;
let canvas,ctx;
let grad, songName;
let bassFilter, trebleFilter;
let bass=0, treble=0;
let mouth, leftEar, rightEar, nose, head, leftEye, rightEye, leftCheek, rightCheek, leftEyebrow, rightEyebrow;
function init(){
// set up canvas stuff
canvas = document.querySelector('canvas');
ctx = canvas.getContext("2d");
// get reference to <audio> element on page
audioElement = document.querySelector('audio');
// call our helper function and get an analyser node
analyserNode = createWebAudioContextWithAnalyserNode(audioElement);
// Find facial features
mouth = new Image();
mouth.src = "media/mouth-image.png";
leftEar = new Image();
leftEar.src = "media/left-ear.png";
rightEar = new Image();
rightEar.src = "media/right-ear.png";
nose = new Image();
nose.src = "media/nose-image.png";
head = new Image();
head.src = "media/head.png";
leftEye = new Image();
leftEye.src = "media/left-eye.png";
rightEye = new Image();
rightEye.src = "media/right-eye.png";
leftCheek = new Image();
leftCheek.src = "media/left-cheek.png";
rightCheek = new Image();
rightCheek.src = "media/right-cheek.png";
leftEyebrow = new Image();
leftEyebrow.src = "media/left-eyebrow.png";
rightEyebrow = new Image();
rightEyebrow.src = "media/right-eyebrow.png";
// get sound track <select> and Full Screen button working
setupUI();
// start animation loop
update();
}
function createWebAudioContextWithAnalyserNode(audioElement) {
let audioCtx, analyserNode, sourceNode;
// create new AudioContext
// The || is because WebAudio has not been standardized across browsers yet
// http://webaudio.github.io/web-audio-api/#the-audiocontext-interface
audioCtx = new (window.AudioContext || window.webkitAudioContext);
// create an analyser node
analyserNode = audioCtx.createAnalyser();
/*
We will request NUM_SAMPLES number of samples or "bins" spaced equally
across the sound spectrum.
If NUM_SAMPLES (fftSize) is 256, then the first bin is 0 Hz, the second is 172 Hz,
the third is 344Hz. Each bin contains a number between 0-255 representing
the amplitude of that frequency.
*/
// fft stands for Fast Fourier Transform
analyserNode.fftSize = NUM_SAMPLES;
// this is where we hook up the <audio> element to the analyserNode
sourceNode = audioCtx.createMediaElementSource(audioElement);
//add bass boost
bassFilter = audioCtx.createBiquadFilter();
bassFilter.type = "lowshelf";
bassFilter.frequency.value = 200;
bassFilter.gain.value = bass;
//add treble boost
trebleFilter = audioCtx.createBiquadFilter();
trebleFilter.type = "highshelf";
trebleFilter.frequency.value = 2000;
trebleFilter.gain.value = treble;
sourceNode.connect(bassFilter);
bassFilter.connect(trebleFilter);
trebleFilter.connect(analyserNode);
// here we connect to the destination i.e. speakers
analyserNode.connect(audioCtx.destination);
return analyserNode;
}
function setupUI(){
document.querySelector("#bassBoost").onchange = function(e){
bass = e.target.value;
bassFilter.gain.value = bass;
}
document.querySelector("#trebleBoost").onchange = function(e){
treble = e.target.value;
trebleFilter.gain.value = treble;
}
// get reference to file input and listen for changes
document.querySelector('#file').onchange = function(e){
var sound = document.getElementById('sound');
sound.src = URL.createObjectURL(this.files[0]);
//document.querySelector("#status").innerHTML = "Now playing: " + e.target.value;
audioElement.volume = 0.2;
audioElement.play();
sound.onend = function(e){
URL.revokeObjectURL(this.src);
}
}
}
function update() {
// this schedules a call to the update() method in 1/60 seconds
requestAnimationFrame(update);
/*
Nyquist Theorem
http://whatis.techtarget.com/definition/Nyquist-Theorem
The array of data we get back is 1/2 the size of the sample rate
*/
// create a new array of 8-bit integers (0-255), array of 64 data points
let data = new Uint8Array(NUM_SAMPLES/2);
let waveData = new Uint8Array(NUM_SAMPLES/2);
// populate the array with the frequency data
// notice these arrays can be passed "by reference"
analyserNode.getByteFrequencyData(data); //frequency data
analyserNode.getByteTimeDomainData(waveData); // waveform data
// mouth uses first third of audio range
let mouthData = 0;
for (var i = 0; i < 20; i++){
mouthData = mouthData + data[i];
}
mouthData = mouthData / 20;
// uses second third of audio range
let earData = 0;
for (var i = 20; i < 40; i++){
earData = earData + data[i];
}
earData = earData / 20;
// nose uses last third of audio range
let noseData = 0;
for (var i = 40; i < 46; i++){
noseData = noseData + data[i];
}
noseData = noseData / 6;
// clear screen
ctx.fillStyle = backgroundColor;
ctx.fillRect(0, 0, 1080, 850);
ctx.save();
ctx.fillStyle = backgroundColor;
ctx.strokeStyle ="rgba(221, 221, 221, 0.4)";
ctx.lineWidth = 2;
ctx.restore();
//Draw Ears
ctx.save();
ctx.translate(390,200);
ctx.rotate(-(earData*Math.PI/180)/15);
ctx.drawImage(leftEar,(-2 * leftEar.width/3),(-2 * leftEar.width/3));
ctx.restore();
ctx.save();
ctx.translate(685,200);
ctx.rotate((earData*Math.PI/180)/15);
ctx.drawImage(rightEar,(-1 * rightEar.width/3),(-2 * rightEar.width/3));
ctx.restore();
//Draw Face
ctx.drawImage(head, 220, 150); // drw image with scaled width and height
//Draw Eyes
ctx.drawImage(leftEye, 370, 200);
ctx.drawImage(rightEye, 590, 200);
//Draw Eyes
ctx.drawImage(leftEyebrow, 365, 180);
ctx.drawImage(rightEyebrow, 570, 190);
//Draw Cheeks
ctx.drawImage(leftCheek, 80, 510);
ctx.drawImage(rightCheek, 630, 510);
//Draw Mouth
ctx.save();
// scale the image and make sure it isn't too small
var mouthScale = mouthData / 100;
var mouthHeight;
if (mouth.height > (mouth.height * mouthScale)){
mouthHeight = mouth.height;
} else {
mouthHeight = mouth.height * mouthScale;
}
var mouthWidth;
if (mouth.width > (mouth.width * mouthScale)){
mouthWidth = mouth.width;
} else {
mouthWidth = mouth.width * mouthScale;
}
var x = (ctx.canvas.width - mouthWidth) / 2;
var y = ((ctx.canvas.height - mouthHeight) / 2) + 215;
ctx.drawImage(mouth, x, y, mouthWidth, mouthHeight); // drw image with scaled width and height
//ctx.drawImage(mouth, 525+mouth.width/2, 450+mouth.height/2, ((100 + mouthData * 0.2)), ((100 + mouthData * 0.2)));
ctx.restore();
//Draw Nose
ctx.save();
// scale the image and make sure it isn't too small
var noseScale = noseData / 60;
var noseHeight;
if (nose.height > (nose.height * noseScale)){
noseHeight = nose.height;
} else {
noseHeight = nose.height * noseScale;
}
var noseWidth;
if (nose.width > (nose.width * noseScale)){
noseWidth = nose.width;
} else |
var x = (ctx.canvas.width - noseWidth) / 2;
var y = ((ctx.canvas.height - noseHeight) / 2) + 85;
ctx.drawImage(nose, x, y, noseWidth, noseHeight); // drw image with scaled width and height
//ctx.drawImage(mouth, 525+mouth.width/2, 450+mouth.height/2, ((100 + mouthData * 0.2)), ((100 + mouthData * 0.2)));
ctx.restore();
canvasBob = earData / 100;
canvasBob = canvasBob + 10;
document.getElementById('canvas-container').style.top = canvasBob + '%';
//Draw Song name, if we want to draw text?
/*
ctx.save();
ctx.font = "25px Indie Flower";
ctx.fillStyle = "rgba(235, 64, 52, 1)";
ctx.fillText("BeeKoo Mix", 50, 50);
ctx.restore();
*/
}
window.addEventListener("load",init);
/*
// FULL SCREEN MODE - Do we want this?
function requestFullscreen(element) {
if (element.requestFullscreen) {
element.requestFullscreen();
} else if (element.mozRequestFullscreen) {
element.mozRequestFullscreen();
} else if (element.mozRequestFullScreen) { // camel-cased 'S' was changed to 's' in spec
element.mozRequestFullScreen();
} else if (element.webkitRequestFullscreen) {
element.webkitRequestFullscreen();
}
// .. and do nothing if the method is not supported
}; */
}());
function navigateToMeowsician(){
window.location.href = 'meowsician.html';
} | {
noseWidth = nose.width * noseScale;
} | conditional_block |
script.js |
// An IIFE ("Iffy") - see the notes in mycourses
(function(){
"use strict";
let NUM_SAMPLES = 128;
let backgroundColor = "#ffffff";
let color = "#eb4034";
let audioElement;
let analyserNode;
let canvasBob;
let canvas,ctx;
let grad, songName;
let bassFilter, trebleFilter;
let bass=0, treble=0;
let mouth, leftEar, rightEar, nose, head, leftEye, rightEye, leftCheek, rightCheek, leftEyebrow, rightEyebrow;
function | (){
// set up canvas stuff
canvas = document.querySelector('canvas');
ctx = canvas.getContext("2d");
// get reference to <audio> element on page
audioElement = document.querySelector('audio');
// call our helper function and get an analyser node
analyserNode = createWebAudioContextWithAnalyserNode(audioElement);
// Find facial features
mouth = new Image();
mouth.src = "media/mouth-image.png";
leftEar = new Image();
leftEar.src = "media/left-ear.png";
rightEar = new Image();
rightEar.src = "media/right-ear.png";
nose = new Image();
nose.src = "media/nose-image.png";
head = new Image();
head.src = "media/head.png";
leftEye = new Image();
leftEye.src = "media/left-eye.png";
rightEye = new Image();
rightEye.src = "media/right-eye.png";
leftCheek = new Image();
leftCheek.src = "media/left-cheek.png";
rightCheek = new Image();
rightCheek.src = "media/right-cheek.png";
leftEyebrow = new Image();
leftEyebrow.src = "media/left-eyebrow.png";
rightEyebrow = new Image();
rightEyebrow.src = "media/right-eyebrow.png";
// get sound track <select> and Full Screen button working
setupUI();
// start animation loop
update();
}
function createWebAudioContextWithAnalyserNode(audioElement) {
let audioCtx, analyserNode, sourceNode;
// create new AudioContext
// The || is because WebAudio has not been standardized across browsers yet
// http://webaudio.github.io/web-audio-api/#the-audiocontext-interface
audioCtx = new (window.AudioContext || window.webkitAudioContext);
// create an analyser node
analyserNode = audioCtx.createAnalyser();
/*
We will request NUM_SAMPLES number of samples or "bins" spaced equally
across the sound spectrum.
If NUM_SAMPLES (fftSize) is 256, then the first bin is 0 Hz, the second is 172 Hz,
the third is 344Hz. Each bin contains a number between 0-255 representing
the amplitude of that frequency.
*/
// fft stands for Fast Fourier Transform
analyserNode.fftSize = NUM_SAMPLES;
// this is where we hook up the <audio> element to the analyserNode
sourceNode = audioCtx.createMediaElementSource(audioElement);
//add bass boost
bassFilter = audioCtx.createBiquadFilter();
bassFilter.type = "lowshelf";
bassFilter.frequency.value = 200;
bassFilter.gain.value = bass;
//add treble boost
trebleFilter = audioCtx.createBiquadFilter();
trebleFilter.type = "highshelf";
trebleFilter.frequency.value = 2000;
trebleFilter.gain.value = treble;
sourceNode.connect(bassFilter);
bassFilter.connect(trebleFilter);
trebleFilter.connect(analyserNode);
// here we connect to the destination i.e. speakers
analyserNode.connect(audioCtx.destination);
return analyserNode;
}
function setupUI(){
document.querySelector("#bassBoost").onchange = function(e){
bass = e.target.value;
bassFilter.gain.value = bass;
}
document.querySelector("#trebleBoost").onchange = function(e){
treble = e.target.value;
trebleFilter.gain.value = treble;
}
// get reference to file input and listen for changes
document.querySelector('#file').onchange = function(e){
var sound = document.getElementById('sound');
sound.src = URL.createObjectURL(this.files[0]);
//document.querySelector("#status").innerHTML = "Now playing: " + e.target.value;
audioElement.volume = 0.2;
audioElement.play();
sound.onend = function(e){
URL.revokeObjectURL(this.src);
}
}
}
function update() {
// this schedules a call to the update() method in 1/60 seconds
requestAnimationFrame(update);
/*
Nyquist Theorem
http://whatis.techtarget.com/definition/Nyquist-Theorem
The array of data we get back is 1/2 the size of the sample rate
*/
// create a new array of 8-bit integers (0-255), array of 64 data points
let data = new Uint8Array(NUM_SAMPLES/2);
let waveData = new Uint8Array(NUM_SAMPLES/2);
// populate the array with the frequency data
// notice these arrays can be passed "by reference"
analyserNode.getByteFrequencyData(data); //frequency data
analyserNode.getByteTimeDomainData(waveData); // waveform data
// mouth uses first third of audio range
let mouthData = 0;
for (var i = 0; i < 20; i++){
mouthData = mouthData + data[i];
}
mouthData = mouthData / 20;
// uses second third of audio range
let earData = 0;
for (var i = 20; i < 40; i++){
earData = earData + data[i];
}
earData = earData / 20;
// nose uses last third of audio range
let noseData = 0;
for (var i = 40; i < 46; i++){
noseData = noseData + data[i];
}
noseData = noseData / 6;
// clear screen
ctx.fillStyle = backgroundColor;
ctx.fillRect(0, 0, 1080, 850);
ctx.save();
ctx.fillStyle = backgroundColor;
ctx.strokeStyle ="rgba(221, 221, 221, 0.4)";
ctx.lineWidth = 2;
ctx.restore();
//Draw Ears
ctx.save();
ctx.translate(390,200);
ctx.rotate(-(earData*Math.PI/180)/15);
ctx.drawImage(leftEar,(-2 * leftEar.width/3),(-2 * leftEar.width/3));
ctx.restore();
ctx.save();
ctx.translate(685,200);
ctx.rotate((earData*Math.PI/180)/15);
ctx.drawImage(rightEar,(-1 * rightEar.width/3),(-2 * rightEar.width/3));
ctx.restore();
//Draw Face
ctx.drawImage(head, 220, 150); // drw image with scaled width and height
//Draw Eyes
ctx.drawImage(leftEye, 370, 200);
ctx.drawImage(rightEye, 590, 200);
//Draw Eyes
ctx.drawImage(leftEyebrow, 365, 180);
ctx.drawImage(rightEyebrow, 570, 190);
//Draw Cheeks
ctx.drawImage(leftCheek, 80, 510);
ctx.drawImage(rightCheek, 630, 510);
//Draw Mouth
ctx.save();
// scale the image and make sure it isn't too small
var mouthScale = mouthData / 100;
var mouthHeight;
if (mouth.height > (mouth.height * mouthScale)){
mouthHeight = mouth.height;
} else {
mouthHeight = mouth.height * mouthScale;
}
var mouthWidth;
if (mouth.width > (mouth.width * mouthScale)){
mouthWidth = mouth.width;
} else {
mouthWidth = mouth.width * mouthScale;
}
var x = (ctx.canvas.width - mouthWidth) / 2;
var y = ((ctx.canvas.height - mouthHeight) / 2) + 215;
ctx.drawImage(mouth, x, y, mouthWidth, mouthHeight); // drw image with scaled width and height
//ctx.drawImage(mouth, 525+mouth.width/2, 450+mouth.height/2, ((100 + mouthData * 0.2)), ((100 + mouthData * 0.2)));
ctx.restore();
//Draw Nose
ctx.save();
// scale the image and make sure it isn't too small
var noseScale = noseData / 60;
var noseHeight;
if (nose.height > (nose.height * noseScale)){
noseHeight = nose.height;
} else {
noseHeight = nose.height * noseScale;
}
var noseWidth;
if (nose.width > (nose.width * noseScale)){
noseWidth = nose.width;
} else {
noseWidth = nose.width * noseScale;
}
var x = (ctx.canvas.width - noseWidth) / 2;
var y = ((ctx.canvas.height - noseHeight) / 2) + 85;
ctx.drawImage(nose, x, y, noseWidth, noseHeight); // drw image with scaled width and height
//ctx.drawImage(mouth, 525+mouth.width/2, 450+mouth.height/2, ((100 + mouthData * 0.2)), ((100 + mouthData * 0.2)));
ctx.restore();
canvasBob = earData / 100;
canvasBob = canvasBob + 10;
document.getElementById('canvas-container').style.top = canvasBob + '%';
//Draw Song name, if we want to draw text?
/*
ctx.save();
ctx.font = "25px Indie Flower";
ctx.fillStyle = "rgba(235, 64, 52, 1)";
ctx.fillText("BeeKoo Mix", 50, 50);
ctx.restore();
*/
}
window.addEventListener("load",init);
/*
// FULL SCREEN MODE - Do we want this?
function requestFullscreen(element) {
if (element.requestFullscreen) {
element.requestFullscreen();
} else if (element.mozRequestFullscreen) {
element.mozRequestFullscreen();
} else if (element.mozRequestFullScreen) { // camel-cased 'S' was changed to 's' in spec
element.mozRequestFullScreen();
} else if (element.webkitRequestFullscreen) {
element.webkitRequestFullscreen();
}
// .. and do nothing if the method is not supported
}; */
}());
function navigateToMeowsician(){
window.location.href = 'meowsician.html';
} | init | identifier_name |
script.js | // An IIFE ("Iffy") - see the notes in mycourses
(function(){
"use strict";
let NUM_SAMPLES = 128;
let backgroundColor = "#ffffff";
let color = "#eb4034";
let audioElement;
let analyserNode;
let canvasBob;
let canvas,ctx;
let grad, songName;
let bassFilter, trebleFilter;
let bass=0, treble=0;
let mouth, leftEar, rightEar, nose, head, leftEye, rightEye, leftCheek, rightCheek, leftEyebrow, rightEyebrow;
function init(){
// set up canvas stuff
canvas = document.querySelector('canvas');
ctx = canvas.getContext("2d");
// get reference to <audio> element on page
audioElement = document.querySelector('audio');
// call our helper function and get an analyser node
analyserNode = createWebAudioContextWithAnalyserNode(audioElement);
// Find facial features
mouth = new Image();
mouth.src = "media/mouth-image.png";
leftEar = new Image();
leftEar.src = "media/left-ear.png";
rightEar = new Image();
rightEar.src = "media/right-ear.png";
nose = new Image();
nose.src = "media/nose-image.png";
head = new Image();
head.src = "media/head.png";
leftEye = new Image();
leftEye.src = "media/left-eye.png";
rightEye = new Image();
rightEye.src = "media/right-eye.png";
leftCheek = new Image();
leftCheek.src = "media/left-cheek.png";
rightCheek = new Image();
rightCheek.src = "media/right-cheek.png";
leftEyebrow = new Image();
leftEyebrow.src = "media/left-eyebrow.png";
rightEyebrow = new Image();
rightEyebrow.src = "media/right-eyebrow.png";
// get sound track <select> and Full Screen button working
setupUI();
// start animation loop
update();
}
function createWebAudioContextWithAnalyserNode(audioElement) {
let audioCtx, analyserNode, sourceNode;
// create new AudioContext
// The || is because WebAudio has not been standardized across browsers yet
// http://webaudio.github.io/web-audio-api/#the-audiocontext-interface
audioCtx = new (window.AudioContext || window.webkitAudioContext);
// create an analyser node
analyserNode = audioCtx.createAnalyser();
/*
We will request NUM_SAMPLES number of samples or "bins" spaced equally
across the sound spectrum.
If NUM_SAMPLES (fftSize) is 256, then the first bin is 0 Hz, the second is 172 Hz,
the third is 344Hz. Each bin contains a number between 0-255 representing
the amplitude of that frequency.
*/
// fft stands for Fast Fourier Transform
analyserNode.fftSize = NUM_SAMPLES;
// this is where we hook up the <audio> element to the analyserNode
sourceNode = audioCtx.createMediaElementSource(audioElement);
//add bass boost
bassFilter = audioCtx.createBiquadFilter();
bassFilter.type = "lowshelf";
bassFilter.frequency.value = 200;
bassFilter.gain.value = bass;
//add treble boost
trebleFilter = audioCtx.createBiquadFilter();
trebleFilter.type = "highshelf";
trebleFilter.frequency.value = 2000;
trebleFilter.gain.value = treble;
sourceNode.connect(bassFilter);
bassFilter.connect(trebleFilter);
trebleFilter.connect(analyserNode);
// here we connect to the destination i.e. speakers
analyserNode.connect(audioCtx.destination);
return analyserNode;
}
function setupUI(){
document.querySelector("#bassBoost").onchange = function(e){
bass = e.target.value;
bassFilter.gain.value = bass;
}
document.querySelector("#trebleBoost").onchange = function(e){
treble = e.target.value;
trebleFilter.gain.value = treble;
}
// get reference to file input and listen for changes
document.querySelector('#file').onchange = function(e){
var sound = document.getElementById('sound');
sound.src = URL.createObjectURL(this.files[0]);
//document.querySelector("#status").innerHTML = "Now playing: " + e.target.value;
audioElement.volume = 0.2;
audioElement.play();
sound.onend = function(e){
URL.revokeObjectURL(this.src);
}
}
}
function update() {
// this schedules a call to the update() method in 1/60 seconds
requestAnimationFrame(update);
/*
Nyquist Theorem
http://whatis.techtarget.com/definition/Nyquist-Theorem
The array of data we get back is 1/2 the size of the sample rate
*/
// create a new array of 8-bit integers (0-255), array of 64 data points
let data = new Uint8Array(NUM_SAMPLES/2);
let waveData = new Uint8Array(NUM_SAMPLES/2);
// populate the array with the frequency data
// notice these arrays can be passed "by reference"
analyserNode.getByteFrequencyData(data); //frequency data
analyserNode.getByteTimeDomainData(waveData); // waveform data
// mouth uses first third of audio range
let mouthData = 0;
for (var i = 0; i < 20; i++){
mouthData = mouthData + data[i];
}
mouthData = mouthData / 20;
// uses second third of audio range
let earData = 0;
for (var i = 20; i < 40; i++){
earData = earData + data[i];
}
earData = earData / 20;
// nose uses last third of audio range
let noseData = 0;
for (var i = 40; i < 46; i++){
noseData = noseData + data[i];
}
noseData = noseData / 6;
// clear screen
ctx.fillStyle = backgroundColor;
ctx.fillRect(0, 0, 1080, 850);
ctx.save();
ctx.fillStyle = backgroundColor;
ctx.strokeStyle ="rgba(221, 221, 221, 0.4)";
ctx.lineWidth = 2;
ctx.restore();
//Draw Ears
ctx.save();
ctx.translate(390,200);
ctx.rotate(-(earData*Math.PI/180)/15);
ctx.drawImage(leftEar,(-2 * leftEar.width/3),(-2 * leftEar.width/3));
ctx.restore();
ctx.save();
ctx.translate(685,200);
ctx.rotate((earData*Math.PI/180)/15);
ctx.drawImage(rightEar,(-1 * rightEar.width/3),(-2 * rightEar.width/3));
ctx.restore();
//Draw Face
ctx.drawImage(head, 220, 150); // drw image with scaled width and height
//Draw Eyes
ctx.drawImage(leftEye, 370, 200);
ctx.drawImage(rightEye, 590, 200);
//Draw Eyes
ctx.drawImage(leftEyebrow, 365, 180);
ctx.drawImage(rightEyebrow, 570, 190);
//Draw Cheeks
ctx.drawImage(leftCheek, 80, 510); | ctx.drawImage(rightCheek, 630, 510);
//Draw Mouth
ctx.save();
// scale the image and make sure it isn't too small
var mouthScale = mouthData / 100;
var mouthHeight;
if (mouth.height > (mouth.height * mouthScale)){
mouthHeight = mouth.height;
} else {
mouthHeight = mouth.height * mouthScale;
}
var mouthWidth;
if (mouth.width > (mouth.width * mouthScale)){
mouthWidth = mouth.width;
} else {
mouthWidth = mouth.width * mouthScale;
}
var x = (ctx.canvas.width - mouthWidth) / 2;
var y = ((ctx.canvas.height - mouthHeight) / 2) + 215;
ctx.drawImage(mouth, x, y, mouthWidth, mouthHeight); // drw image with scaled width and height
//ctx.drawImage(mouth, 525+mouth.width/2, 450+mouth.height/2, ((100 + mouthData * 0.2)), ((100 + mouthData * 0.2)));
ctx.restore();
//Draw Nose
ctx.save();
// scale the image and make sure it isn't too small
var noseScale = noseData / 60;
var noseHeight;
if (nose.height > (nose.height * noseScale)){
noseHeight = nose.height;
} else {
noseHeight = nose.height * noseScale;
}
var noseWidth;
if (nose.width > (nose.width * noseScale)){
noseWidth = nose.width;
} else {
noseWidth = nose.width * noseScale;
}
var x = (ctx.canvas.width - noseWidth) / 2;
var y = ((ctx.canvas.height - noseHeight) / 2) + 85;
ctx.drawImage(nose, x, y, noseWidth, noseHeight); // drw image with scaled width and height
//ctx.drawImage(mouth, 525+mouth.width/2, 450+mouth.height/2, ((100 + mouthData * 0.2)), ((100 + mouthData * 0.2)));
ctx.restore();
canvasBob = earData / 100;
canvasBob = canvasBob + 10;
document.getElementById('canvas-container').style.top = canvasBob + '%';
//Draw Song name, if we want to draw text?
/*
ctx.save();
ctx.font = "25px Indie Flower";
ctx.fillStyle = "rgba(235, 64, 52, 1)";
ctx.fillText("BeeKoo Mix", 50, 50);
ctx.restore();
*/
}
window.addEventListener("load",init);
/*
// FULL SCREEN MODE - Do we want this?
function requestFullscreen(element) {
if (element.requestFullscreen) {
element.requestFullscreen();
} else if (element.mozRequestFullscreen) {
element.mozRequestFullscreen();
} else if (element.mozRequestFullScreen) { // camel-cased 'S' was changed to 's' in spec
element.mozRequestFullScreen();
} else if (element.webkitRequestFullscreen) {
element.webkitRequestFullscreen();
}
// .. and do nothing if the method is not supported
}; */
}());
function navigateToMeowsician(){
window.location.href = 'meowsician.html';
} | random_line_split | |
script.js |
// An IIFE ("Iffy") - see the notes in mycourses
(function(){
"use strict";
let NUM_SAMPLES = 128;
let backgroundColor = "#ffffff";
let color = "#eb4034";
let audioElement;
let analyserNode;
let canvasBob;
let canvas,ctx;
let grad, songName;
let bassFilter, trebleFilter;
let bass=0, treble=0;
let mouth, leftEar, rightEar, nose, head, leftEye, rightEye, leftCheek, rightCheek, leftEyebrow, rightEyebrow;
function init(){
// set up canvas stuff
canvas = document.querySelector('canvas');
ctx = canvas.getContext("2d");
// get reference to <audio> element on page
audioElement = document.querySelector('audio');
// call our helper function and get an analyser node
analyserNode = createWebAudioContextWithAnalyserNode(audioElement);
// Find facial features
mouth = new Image();
mouth.src = "media/mouth-image.png";
leftEar = new Image();
leftEar.src = "media/left-ear.png";
rightEar = new Image();
rightEar.src = "media/right-ear.png";
nose = new Image();
nose.src = "media/nose-image.png";
head = new Image();
head.src = "media/head.png";
leftEye = new Image();
leftEye.src = "media/left-eye.png";
rightEye = new Image();
rightEye.src = "media/right-eye.png";
leftCheek = new Image();
leftCheek.src = "media/left-cheek.png";
rightCheek = new Image();
rightCheek.src = "media/right-cheek.png";
leftEyebrow = new Image();
leftEyebrow.src = "media/left-eyebrow.png";
rightEyebrow = new Image();
rightEyebrow.src = "media/right-eyebrow.png";
// get sound track <select> and Full Screen button working
setupUI();
// start animation loop
update();
}
function createWebAudioContextWithAnalyserNode(audioElement) {
let audioCtx, analyserNode, sourceNode;
// create new AudioContext
// The || is because WebAudio has not been standardized across browsers yet
// http://webaudio.github.io/web-audio-api/#the-audiocontext-interface
audioCtx = new (window.AudioContext || window.webkitAudioContext);
// create an analyser node
analyserNode = audioCtx.createAnalyser();
/*
We will request NUM_SAMPLES number of samples or "bins" spaced equally
across the sound spectrum.
If NUM_SAMPLES (fftSize) is 256, then the first bin is 0 Hz, the second is 172 Hz,
the third is 344Hz. Each bin contains a number between 0-255 representing
the amplitude of that frequency.
*/
// fft stands for Fast Fourier Transform
analyserNode.fftSize = NUM_SAMPLES;
// this is where we hook up the <audio> element to the analyserNode
sourceNode = audioCtx.createMediaElementSource(audioElement);
//add bass boost
bassFilter = audioCtx.createBiquadFilter();
bassFilter.type = "lowshelf";
bassFilter.frequency.value = 200;
bassFilter.gain.value = bass;
//add treble boost
trebleFilter = audioCtx.createBiquadFilter();
trebleFilter.type = "highshelf";
trebleFilter.frequency.value = 2000;
trebleFilter.gain.value = treble;
sourceNode.connect(bassFilter);
bassFilter.connect(trebleFilter);
trebleFilter.connect(analyserNode);
// here we connect to the destination i.e. speakers
analyserNode.connect(audioCtx.destination);
return analyserNode;
}
function setupUI(){
document.querySelector("#bassBoost").onchange = function(e){
bass = e.target.value;
bassFilter.gain.value = bass;
}
document.querySelector("#trebleBoost").onchange = function(e){
treble = e.target.value;
trebleFilter.gain.value = treble;
}
// get reference to file input and listen for changes
document.querySelector('#file').onchange = function(e){
var sound = document.getElementById('sound');
sound.src = URL.createObjectURL(this.files[0]);
//document.querySelector("#status").innerHTML = "Now playing: " + e.target.value;
audioElement.volume = 0.2;
audioElement.play();
sound.onend = function(e){
URL.revokeObjectURL(this.src);
}
}
}
function update() {
// this schedules a call to the update() method in 1/60 seconds
requestAnimationFrame(update);
/*
Nyquist Theorem
http://whatis.techtarget.com/definition/Nyquist-Theorem
The array of data we get back is 1/2 the size of the sample rate
*/
// create a new array of 8-bit integers (0-255), array of 64 data points
let data = new Uint8Array(NUM_SAMPLES/2);
let waveData = new Uint8Array(NUM_SAMPLES/2);
// populate the array with the frequency data
// notice these arrays can be passed "by reference"
analyserNode.getByteFrequencyData(data); //frequency data
analyserNode.getByteTimeDomainData(waveData); // waveform data
// mouth uses first third of audio range
let mouthData = 0;
for (var i = 0; i < 20; i++){
mouthData = mouthData + data[i];
}
mouthData = mouthData / 20;
// uses second third of audio range
let earData = 0;
for (var i = 20; i < 40; i++){
earData = earData + data[i];
}
earData = earData / 20;
// nose uses last third of audio range
let noseData = 0;
for (var i = 40; i < 46; i++){
noseData = noseData + data[i];
}
noseData = noseData / 6;
// clear screen
ctx.fillStyle = backgroundColor;
ctx.fillRect(0, 0, 1080, 850);
ctx.save();
ctx.fillStyle = backgroundColor;
ctx.strokeStyle ="rgba(221, 221, 221, 0.4)";
ctx.lineWidth = 2;
ctx.restore();
//Draw Ears
ctx.save();
ctx.translate(390,200);
ctx.rotate(-(earData*Math.PI/180)/15);
ctx.drawImage(leftEar,(-2 * leftEar.width/3),(-2 * leftEar.width/3));
ctx.restore();
ctx.save();
ctx.translate(685,200);
ctx.rotate((earData*Math.PI/180)/15);
ctx.drawImage(rightEar,(-1 * rightEar.width/3),(-2 * rightEar.width/3));
ctx.restore();
//Draw Face
ctx.drawImage(head, 220, 150); // drw image with scaled width and height
//Draw Eyes
ctx.drawImage(leftEye, 370, 200);
ctx.drawImage(rightEye, 590, 200);
//Draw Eyes
ctx.drawImage(leftEyebrow, 365, 180);
ctx.drawImage(rightEyebrow, 570, 190);
//Draw Cheeks
ctx.drawImage(leftCheek, 80, 510);
ctx.drawImage(rightCheek, 630, 510);
//Draw Mouth
ctx.save();
// scale the image and make sure it isn't too small
var mouthScale = mouthData / 100;
var mouthHeight;
if (mouth.height > (mouth.height * mouthScale)){
mouthHeight = mouth.height;
} else {
mouthHeight = mouth.height * mouthScale;
}
var mouthWidth;
if (mouth.width > (mouth.width * mouthScale)){
mouthWidth = mouth.width;
} else {
mouthWidth = mouth.width * mouthScale;
}
var x = (ctx.canvas.width - mouthWidth) / 2;
var y = ((ctx.canvas.height - mouthHeight) / 2) + 215;
ctx.drawImage(mouth, x, y, mouthWidth, mouthHeight); // drw image with scaled width and height
//ctx.drawImage(mouth, 525+mouth.width/2, 450+mouth.height/2, ((100 + mouthData * 0.2)), ((100 + mouthData * 0.2)));
ctx.restore();
//Draw Nose
ctx.save();
// scale the image and make sure it isn't too small
var noseScale = noseData / 60;
var noseHeight;
if (nose.height > (nose.height * noseScale)){
noseHeight = nose.height;
} else {
noseHeight = nose.height * noseScale;
}
var noseWidth;
if (nose.width > (nose.width * noseScale)){
noseWidth = nose.width;
} else {
noseWidth = nose.width * noseScale;
}
var x = (ctx.canvas.width - noseWidth) / 2;
var y = ((ctx.canvas.height - noseHeight) / 2) + 85;
ctx.drawImage(nose, x, y, noseWidth, noseHeight); // drw image with scaled width and height
//ctx.drawImage(mouth, 525+mouth.width/2, 450+mouth.height/2, ((100 + mouthData * 0.2)), ((100 + mouthData * 0.2)));
ctx.restore();
canvasBob = earData / 100;
canvasBob = canvasBob + 10;
document.getElementById('canvas-container').style.top = canvasBob + '%';
//Draw Song name, if we want to draw text?
/*
ctx.save();
ctx.font = "25px Indie Flower";
ctx.fillStyle = "rgba(235, 64, 52, 1)";
ctx.fillText("BeeKoo Mix", 50, 50);
ctx.restore();
*/
}
window.addEventListener("load",init);
/*
// FULL SCREEN MODE - Do we want this?
function requestFullscreen(element) {
if (element.requestFullscreen) {
element.requestFullscreen();
} else if (element.mozRequestFullscreen) {
element.mozRequestFullscreen();
} else if (element.mozRequestFullScreen) { // camel-cased 'S' was changed to 's' in spec
element.mozRequestFullScreen();
} else if (element.webkitRequestFullscreen) {
element.webkitRequestFullscreen();
}
// .. and do nothing if the method is not supported
}; */
}());
function navigateToMeowsician() | {
window.location.href = 'meowsician.html';
} | identifier_body | |
tos.py | # Copyright (c) 2008 Johns Hopkins University.
# All rights reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose, without fee, and without written
# agreement is hereby granted, provided that the above copyright
# notice, the (updated) modification history and the author appear in
# all copies of this source code.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS `AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, LOSS OF USE, DATA,
# OR PROFITS) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# @author Razvan Musaloiu-E. <razvanm@cs.jhu.edu>
# @author David Purdy <david@radioretail.co.za>
"""A library that implements the T2 serial communication.
This library has two parts: one that deals with sending and receiving
packets using the serial format from T2 (TEP113) and a second one that
tries to simplifies the work with arbitrary packets.
"""
import sys, struct, time, serial, socket, operator, thread
from Queue import Queue
from threading import Lock, Condition
__version__ = "$Id: tos.py,v 1.2 2008/07/20 22:16:50 razvanm Exp $"
__all__ = ['Serial', 'AM',
'Packet', 'RawPacket',
'AckFrame', 'DataFrame', 'NoAckDataFrame',
'ActiveMessage']
ACK_WAIT = 0.2 # Maximum amount of time to wait for an ack
ACK_WARN = 0.2 # Warn if acks take longer than this to arrive
def list2hex(v):
return " ".join(["%02x" % p for p in v])
class Error(Exception):
"""Base error class for this module"""
pass
class TimeoutError(Error):
"""Thrown when a serial operation times out"""
pass
class ReadError(Error):
"""Base class for read error exceptions"""
pass
class WriteError(Error):
"""Base class for write error exceptions"""
pass
class ReadTimeoutError(TimeoutError, ReadError):
"""Thrown when a serial read operation times out"""
pass
class ReadCRCError(ReadError):
"""Thrown when a read packet fails a CRC check"""
pass
class BadAckSeqnoError(ReadError):
"""Thrown if an ack packet has an unexpected sequenc number"""
pass
class WriteTimeoutError(TimeoutError, WriteError):
"""Thrown when a serial write operation times out"""
pass
class SimpleSerial:
"""
A SimpleSerial object offers a way to send and data using a HDLC-like
formating.
Use SimpleSerial objects for basic low-level serial communications. Use
Serial objects for higher level logic (retry sends, log printfs, etc).
"""
HDLC_FLAG_BYTE = 0x7e
HDLC_CTLESC_BYTE = 0x7d
TOS_SERIAL_ACTIVE_MESSAGE_ID = 0
TOS_SERIAL_CC1000_ID = 1
TOS_SERIAL_802_15_4_ID = 2
TOS_SERIAL_UNKNOWN_ID = 255
SERIAL_PROTO_ACK = 67
SERIAL_PROTO_PACKET_ACK = 68
SERIAL_PROTO_PACKET_NOACK = 69
SERIAL_PROTO_PACKET_UNKNOWN = 255
def __init__(self, port, baudrate, flush=False, debug=False, qsize=10,
timeout=None):
self._debug = debug
self._in_queue = []
self._qsize = qsize
self._ack = None
self._write_counter = 0
self._write_counter_failures = 0
self._read_counter = 0
self._ts = None
self.timeout = timeout # Public attribute
self._received_packet_filters = [] # filter functions for received packets
# Remember sent (and unacknowledged) seqno numbers for 15 seconds:
self._unacked_seqnos = SeqTracker(15.0)
self._s = serial.Serial(port, baudrate, rtscts=0, timeout=0.5)
self._s.flushInput()
if flush:
print >>sys.stdout, "Flushing the serial port",
endtime = time.time() + 1
while time.time() < endtime:
try:
self._read()
except ReadError:
pass
sys.stdout.write(".")
if not self._debug:
sys.stdout.write("\n")
self._s.close()
self._s = serial.Serial(port, baudrate, rtscts=0, timeout=timeout)
# Add a filter for received 'write ack' packets
self.add_received_packet_filter(self._write_ack_filter)
# Returns the next incoming serial packet
def _read(self, timeout=None):
"""Wait for a packet and return it as a RawPacket.
Throws:
- ReadCRCError if a CRC check fails
- ReadTimeoutError if the timeout expires.
"""
# Developer notes:
#
# Packet data read from Serial is in this format:
# [HDLC_FLAG_BYTE][Escaped data][HDLC_FLAG_BYTE]
#
# [Escaped data] is encoded so that [HDLC_FLAG_BYTE] byte
# values cannot occur within it. When [Escaped data] has been
# unescaped, the last 2 bytes are a 16-bit CRC of the earlier
# part of the packet (excluding the initial HDLC_FLAG_BYTE
# byte)
#
# It's also possible that the serial device was half-way
# through transmitting a packet when this function was called
# (app was just started). So we also neeed to handle this case:
#
# [Incomplete escaped data][HDLC_FLAG_BYTE][HDLC_FLAG_BYTE][Escaped data][HDLC_FLAG_BYTE]
#
# In this case we skip over the first (incomplete) packet.
#
if self._s.timeout != timeout and timeout != None:
if self._debug:
print "Set the timeout to %s, previous one was %s" % (timeout, self._s.timeout)
self._s.timeout = timeout
try:
# Read bytes until we get to a HDLC_FLAG_BYTE value
# (either the end of a packet, or the start of a new one)
d = self._get_byte(timeout)
ts = time.time()
if self._debug and d != self.HDLC_FLAG_BYTE:
print "Skipping incomplete packet"
while d != self.HDLC_FLAG_BYTE:
d = self._get_byte(timeout)
ts = time.time()
# Store HDLC_FLAG_BYTE at the start of the retrieved packet
# data:
packet = [d]
# Is the next byte also HDLC_FLAG_BYTE?
d = self._get_byte(timeout)
if d == self.HDLC_FLAG_BYTE:
# Yes. This means that the previous byte was for
# the end of the previous packet, and this byte is for
# the start of the next packet.
# Get the 2nd byte of the new packet:
d = self._get_byte(timeout)
ts = time.time()
# We are now on the 2nd byte of the packet. Add it to
# our retrieved packet data:
packet.append(d)
# Read bytes from serial until we read another
# HDLC_FLAG_BYTE value (end of the current packet):
while d != self.HDLC_FLAG_BYTE:
d = self._get_byte(timeout)
packet.append(d)
# Done reading a whole packet from serial
if self._debug:
print "SimpleSerial:_read: unescaped", packet
# Decode the packet, and check CRC:
packet = self._unescape(packet)
crc = self._crc16(0, packet[1:-3])
packet_crc = self._decode(packet[-3:-1])
if crc != packet_crc:
print "Warning: wrong CRC! %x != %x %s" % (crc, packet_crc, ["%2x" % i for i in packet])
raise ReadCRCError
if self._debug:
if self._ts == None:
self._ts = ts
else:
print "Serial:_read: %.4f (%.4f) Recv:" % (ts, ts - self._ts), self._format_packet(packet[1:-3])
self._ts = ts
# Packet was successfully retrieved, so return it in a
# RawPacket wrapper object (but leave out the
# HDLC_FLAG_BYTE and CRC bytes)
return RawPacket(ts, packet[1:-3])
except socket.timeout:
raise ReadTimeoutError
def _write_ack_filter(self, packet):
"""Filter for recieved write acknowledgement packets"""
ack = AckFrame(packet.data)
if ack.protocol == self.SERIAL_PROTO_ACK:
if self._debug:
print "_filter_read: got an ack:", ack
self._ack = ack
packet = None # No further processing of received ack packet
return packet
def _filter_read(self, timeout=None):
"""Read a packet from the serial device, perform filtering, and return
the packet if it hasn't been processed yet.
"""
p = self._read(timeout)
self._read_counter += 1
if self._debug:
print "_filter_read: got a packet(%d): %s" % (self._read_counter, p)
# Pass the received packet through the filter functions:
if p is not None:
for filter_func in self._received_packet_filters:
p = filter_func(p)
# Stop now if the packet doesn't need further processing:
if p is None:
break
# Return the packet (if there was no timeout and it wasn't filtered)
return p
def _get_ack(self, timeout, expected_seqno):
"""Get the next ack packet
Read packets from the serial device until we get the next ack (which
then gets stored in self._ack), or the timeout expires. non-ack packets
are buffered.
Throws:
- ReadTimeoutError if the timeout expires.
- BadAckSeqnoError if an ack with a bad sequence number is received
"""
endtime = time.time() + timeout
while time.time() < endtime:
# Read the a packet over serial
self._ack = None
remaining = endtime - time.time()
p = self._filter_read(timeout)
# Was the packet filtered?
if p:
# Got an unfiltered packet
if len(self._in_queue) >= self._qsize:
print "Warning: Buffer overflow"
self._in_queue.pop(0)
self._in_queue.append(p)
else:
# Packet was filtered. Was it an ack?
if self._ack is not None:
# The packet was an ack, so remove it from our
# 'unacknowledged seqnos' list (or raise a BadAckSeqnoError
# error if it isn't in the list)
self._unacked_seqnos.seqno_acked(self._ack.seqno)
# Stop reading packets if it's the ack we are waiting for:
if self._ack.seqno == expected_seqno:
return
# Timed out
raise ReadTimeoutError
def close(self):
"""Close the serial device"""
self._s.close()
def read(self, timeout=None):
"""Read a packet, either from the input buffer or from the serial
device.
Returns a RawPacket object, otherwise None if the packet was filtered
(by eg: Serial's printf-filtering function)
Does not retry reads if the first one fails. Use Serial.read() for
that.
"""
if self._in_queue:
return self._in_queue.pop(0)
else:
return self._filter_read(timeout)
def write(self, payload, seqno, timeout=0.2):
"""
Write a packet. If the payload argument is a list, it is
assumed to be exactly the payload. Otherwise the payload is
assume to be a Packet and the real payload is obtain by
calling the .payload().
Only attempts to write once, and times out if an ack packet is not
received within [timeout] seconds. Use Serial.write() if you want
automatic write retries.
seqno should be an integer between 0 and 99 which changes each time you
send a new packet. The value should remain the same when you are
retrying a packet write that just failed.
Raises WriteTimeoutError if the write times out (ack packet doesn't
arrive within [timeout] seconds).
"""
if type(payload) != type([]):
# Assume this will be derived from Packet
payload = payload.payload()
packet = DataFrame();
packet.protocol = self.SERIAL_PROTO_PACKET_ACK
packet.seqno = seqno
packet.dispatch = 0
packet.data = payload
packet = packet.payload()
crc = self._crc16(0, packet)
packet.append(crc & 0xff)
packet.append((crc >> 8) & 0xff)
packet = [self.HDLC_FLAG_BYTE] + self._escape(packet) + [self.HDLC_FLAG_BYTE]
# Write the packet:
self._unacked_seqnos.seqno_sent(seqno) # Keep track of sent seqno's
self._put_bytes(packet)
self._write_counter += 1
# Wait for an ack packet:
if self._debug:
print "Send(%d/%d): %s" % (self._write_counter, self._write_counter_failures, packet)
print "Wait for ack %d ..." % (seqno)
try:
self._get_ack(timeout, seqno)
except ReadTimeoutError:
# Re-raise read timeouts (of ack packets) as write timeouts (of
# the write operation)
self._write_counter_failures += 1
raise WriteTimeoutError
# Received an ack packet, with the expected sequence number
if self._debug:
print "Wait for ack %d done. Latest ack:" % (seqno), self._ack
print "The packet was acked."
print "Returning from SimpleSerial.write..."
def add_received_packet_filter(self, filter_func):
"""Register a received packet-filtering callback function
_filter_read() calls all of the registered filter functions for each
packet received over serial. Registered filter functions are called in
the order they were registered.
Filter functions are called like this: filter_func(packet)
When a filter function recognises and handles a received packet it
should return a None value to indicate that no further processing
is required for the packet.
When a filter function skips a packet (or for some reason you want
further processing to happen on a packet you've just processed), the
function should return the packet that was passed to it as an argument.
"""
self._received_packet_filters.append(filter_func)
def remove_received_packet_filter(self, filter_func):
"""Remove a filter function added with add_received_packet_filter()"""
self._received_packet_filters.remove(filter_func)
def _format_packet(self, payload):
f = NoAckDataFrame(payload)
if f.protocol == self.SERIAL_PROTO_ACK:
rpacket = AckFrame(payload)
return "Ack seqno: %d" % (rpacket.seqno)
else:
rpacket = ActiveMessage(f.data)
return "D: %04x S: %04x L: %02x G: %02x T: %02x | %s" % \
(rpacket.destination, rpacket.source,
rpacket.length, rpacket.group, rpacket.type,
list2hex(rpacket.data))
def _crc16(self, base_crc, frame_data):
crc = base_crc
for b in frame_data:
crc = crc ^ (b << 8)
for i in range(0, 8):
if crc & 0x8000 == 0x8000:
crc = (crc << 1) ^ 0x1021
else:
crc = crc << 1
crc = crc & 0xffff
return crc
def _encode(self, val, dim):
output = []
for i in range(dim):
output.append(val & 0xFF)
val = val >> 8
return output
def _decode(self, v):
r = long(0)
for i in v[::-1]:
r = (r << 8) + i
return r
def _get_byte(self, timeout=None):
# old_timeout = self._s.timeout
# if timeout is not None:
# self._s.timeout = timeout
try:
r = struct.unpack("B", self._s.read())[0]
return r
except struct.error:
# Serial port read timeout
raise socket.timeout
# finally:
# self._s.timeout = old_timeout
def _put_bytes(self, data):
#print "DEBUG: _put_bytes:", data
for b in data:
self._s.write(struct.pack('B', b))
def _unescape(self, packet):
r = []
esc = False
for b in packet:
if esc:
r.append(b ^ 0x20)
esc = False
elif b == self.HDLC_CTLESC_BYTE:
esc = True
else:
r.append(b)
return r
def _escape(self, packet):
r = []
for b in packet:
if b == self.HDLC_FLAG_BYTE or b == self.HDLC_CTLESC_BYTE:
r.append(self.HDLC_CTLESC_BYTE)
r.append(b ^ 0x20)
else:
r.append(b)
return r
def debug(self, debug):
self._debug = debug
class SeqTracker:
"""Class for keeping track of unacknowledged packet sequence numbers.
SeqTracker is used by SimpleSerial to keep track of sequence numbers which
have been sent with write packets, but not yet acknowledged by received
write ack packets.
"""
def __init__(self, keep_for):
"""Initialise a SeqTracker object.
args:
- keep_for is the length of time for which unacknowledged sequence
numbers should be remembered. After this period has elapsed, the
sequence numbers should be forgotten. If the sequence number is
acknowledged later, it will be treated as unkown
"""
self._keep_for = keep_for
self._queue = []
def seqno_sent(self, seqno):
"""Register that a packet with the specified sequence number was just
sent."""
self._gc()
self._queue.append((seqno, time.time()))
def seqno_acked(self, seqno):
"""Register that a sequence number was just acknowledged.
Find the oldest-known occurance of seqno in the queue and remove it. If
not found then raise a BadAckSeqnoError to inform applications that
the sequence number is not known.
"""
self._gc()
for item in self._queue:
if item[0] == seqno:
# Found seqno
self._queue.remove(item)
return
# seqno not found!
raise BadAckSeqnoError
def get_seqno_sent_times(self, seqno):
"""Return the times when packets with the given sequence number were
sent."""
self._gc()
return [item[1] for item in self._queue if item[0] == seqno]
def __contains__(self, seqno):
"""Return True if the seqno was sent recently (and not acknowledged
yet)"""
self._gc()
for item in self._queue:
if item[0] == seqno:
return True
return False
def _gc(self):
"""Remove old items from the queue"""
remove_before = time.time() - self._keep_for
for item in self._queue:
# Time for the sequence to be removed?
if item[1] < remove_before:
# Sequence data is old, so remove it
self._queue.remove(item)
else:
# Sequence number was added recently, so don't remove it. Also
# stop processing the queue because all later items will be
# newer
break
class Serial:
"""
Wraps a SimpleSerial object, and provides some higher-level functionality
like retrying writes and logging printf packets.
"""
def __init__(self, port, baudrate, flush=False, debug=False, qsize=10,
timeout=None):
"""Initialise a Serial object"""
self._debug = debug
self.timeout = timeout # Public attribute
self._seqno = 0
self._simple_serial = SimpleSerial(port, baudrate, flush, debug, qsize,
timeout)
# Setup automatic logging of received printf packets:
self._printf_msg = ""
self._simple_serial.add_received_packet_filter(self._printf_filter)
def close(self):
"""Close the serial device"""
self._simple_serial.close()
def read(self, timeout=None):
"""Read a packet from the serial port.
Retries packet reads until the timeout expires.
Throws ReadTimeoutError if a a packet can't be read within the timeout.
"""
if timeout is None:
timeout = self.timeout
endtime = None
if timeout is not None:
endtime = time.time() + timeout
while endtime is None or time.time() < endtime:
remaining = None
if endtime is not None:
remaining = endtime - time.time()
try:
p = self._simple_serial.read(remaining)
except ReadError:
if self._debug:
print "Packet read failed. Try again."
else:
# Was the packet filtered?
if p is not None:
# Not filtered, so return it.
# In the current TinyOS the packets from the mote are
# always NoAckDataFrame
return NoAckDataFrame(p.data)
# Read timeout expired
raise ReadTimeoutError
def write(self, payload, timeout=None):
"""Write a packet to the serial port
Keeps retrying endlessly, unless a timeout is set. If the timeout
expires then WriteTimeoutError is thrown.
"""
if timeout is None:
timeout = self.timeout
endtime = None
if timeout is not None:
endtime = time.time() + timeout
# Generate the next sequence number:
self._seqno = (self._seqno + 1) % 100
while endtime is None or time.time() < endtime:
try:
ackwait = ACK_WAIT
if endtime is not None:
remaining = endtime - time.time()
ackwait = min(ACK_WAIT, remaining)
before = time.time()
self._simple_serial.write(payload, self._seqno, ackwait)
length = time.time() - before
if length >= ACK_WARN:
print "Warning: Packet write took %.3fs!" % (length)
return True
except Error:
if self._debug:
print "The packet was not acked. Try again."
# Write operation timed out
raise WriteTimeoutError
def _printf_filter(self, packet):
"""Filter for recieved printf packets"""
ampkt = ActiveMessage(NoAckDataFrame(packet.data).data)
if ampkt.type == 100:
self._printf_msg += "".join([chr(i) for i in ampkt.data]).strip('\0')
# Split printf data on newline character:
# (last string in the split list doesn't have a newline after
# it, so we keep it until next time)
lines = self._printf_msg.split('\n')
for line in lines[:-1]:
print "PRINTF:", line
self._printf_msg = lines[-1]
packet = None # No further processing for the printf packet
return packet
class SFClient:
def __init__(self, host, port, qsize=10):
self._in_queue = Queue(qsize)
self._s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._s.connect((host, port))
data = self._s.recv(2)
if data != 'U ':
print "Wrong handshake"
self._s.send("U ")
print "Connected"
thread.start_new_thread(self.run, ())
def run(self):
while True:
length = ord(self._s.recv(1))
data = self._s.recv(length)
data = [ord(c) for c in data][1:]
#print "Recv %d bytes" % (length), ActiveMessage(data)
if self._in_queue.full():
print "Warning: Buffer overflow"
self._in_queue.get()
p = RawPacket()
p.data = data
self._in_queue.put(p, block=False)
def read(self, timeout=0):
return self._in_queue.get()
def write(self, payload):
print "SFClient: write:", payload
if type(payload) != type([]):
# Assume this will be derived from Packet
payload = payload.payload()
payload = [0] + payload
self._s.send(chr(len(payload)))
self._s.send(''.join([chr(c) for c in payload]))
return True
class AM:
def __init__(self, s):
self._s = s
def read(self, timeout=None):
return ActiveMessage(self._s.read(timeout).data)
def write(self, packet, amid, timeout=None):
return self._s.write(ActiveMessage(packet, amid=amid), timeout=timeout)
class SimpleSerialAM(SimpleSerial):
"""A derived class of SimpleSerial so that apps can read and write using
higher-level packet structures.
Serves a simalar purpose to the AM class, but for SimpleSerial objects
instead instead of Serial.
"""
def read_am(self, timeout=None):
"""Read a RawPacket object (or None), convert it to ActiveMessage
(or None), and return to the caller"""
# Get a tos.Rawpacket (or None, if filtered) object
p = self.read(timeout)
if p is not None:
assert isinstance(p, RawPacket)
# Convert tos.RawPacket object into an ActiveMessage:
p = NoAckDataFrame(p.data)
p = ActiveMessage(p.data)
# Return the ActiveMessage (or None) packet:
return p
def write_am(self, packet, amid, seqno, timeout=2.0):
"""Convert app packet format to ActiveMessage, and write the
ActiveMessage packet to serial"""
# Convert from app-specific packet to ActiveMessage:
p = ActiveMessage(packet, amid=amid)
# Write to the serial device
self.write(p, seqno, timeout)
class Packet:
"""
The Packet class offers a handy way to build pack and unpack
binary data based on a given pattern.
"""
def _decode(self, v):
r = long(0)
for i in v:
r = (r << 8) + i
return r
def _encode(self, val, dim):
output = []
for i in range(dim):
output.append(int(val & 0xFF))
val = val >> 8
output.reverse()
return output
def __init__(self, desc, packet = None):
offset = 0
boffset = 0
sum = 0
for i in range(len(desc)-1, -1, -1):
(n, t, s) = desc[i]
if s == None:
if sum > 0:
desc[i] = (n, t, -sum)
break
sum += s
self.__dict__['_schema'] = [(t, s) for (n, t, s) in desc]
self.__dict__['_names'] = [n for (n, t, s) in desc]
self.__dict__['_values'] = []
if type(packet) == type([]):
for (t, s) in self._schema:
if t == 'int':
self._values.append(self._decode(packet[offset:offset + s]))
offset += s
elif t == 'bint':
doffset = 8 - (boffset + s)
self._values.append((packet[offset] >> doffset) & ((1<<s) - 1))
boffset += s
if boffset == 8:
offset += 1
boffset = 0
elif t == 'string':
self._values.append(''.join([chr(i) for i in packet[offset:offset + s]]))
offset += s
elif t == 'blob':
if s:
if s > 0:
self._values.append(packet[offset:offset + s])
offset += s
else:
self._values.append(packet[offset:s])
offset = len(packet) + s
else:
self._values.append(packet[offset:])
elif type(packet) == type(()):
for i in packet:
self._values.append(i)
else:
for v in self._schema:
self._values.append(None)
def __repr__(self):
return self._values.__repr__()
def __str__(self):
r = ""
for i in range(len(self._names)):
r += "%s: %s " % (self._names[i], self._values[i])
for i in range(len(self._names), len(self._values)):
r += "%s" % self._values[i]
return r
# return self._values.__str__()
# Implement the map behavior
def __getitem__(self, key):
return self.__getattr__(key)
def __setitem__(self, key, value):
self.__setattr__(key, value)
def __len__(self):
return len(self._values)
def keys(self):
return self._names
def values(self):
return self._names
# Implement the struct behavior
def __getattr__(self, name):
#print "DEBUG: __getattr__", name
if type(name) == type(0):
return self._names[name]
else:
return self._values[self._names.index(name)]
def __setattr__(self, name, value):
if type(name) == type(0):
self._values[name] = value
else:
self._values[self._names.index(name)] = value
def __ne__(self, other):
if other.__class__ == self.__class__:
return self._values != other._values
else:
return True
def __eq__(self, other):
if other.__class__ == self.__class__:
return self._values == other._values
else:
return False
def __nonzero__(self):
return True;
# Custom
def names(self):
return self._names
def sizes(self):
return self._schema
def payload(self):
r = []
boffset = 0
for i in range(len(self._schema)):
(t, s) = self._schema[i]
if t == 'int':
r += self._encode(self._values[i], s)
boffset = 0
elif t == 'bint':
doffset = 8 - (boffset + s)
if boffset == 0:
r += [self._values[i] << doffset]
else:
r[-1] |= self._values[i] << doffset
boffset += s
if boffset == 8:
boffset = 0
elif self._values[i] != []:
r += self._values[i]
for i in self._values[len(self._schema):]:
r += i
return r
class RawPacket(Packet):
def | (self, ts = None, data = None):
Packet.__init__(self,
[('ts' , 'int', 4),
('data', 'blob', None)],
None)
self.ts = ts;
self.data = data
class AckFrame(Packet):
def __init__(self, payload = None):
Packet.__init__(self,
[('protocol', 'int', 1),
('seqno', 'int', 1)],
payload)
class DataFrame(Packet):
def __init__(self, payload = None):
if payload != None and type(payload) != type([]):
# Assume is a Packet
payload = payload.payload()
Packet.__init__(self,
[('protocol', 'int', 1),
('seqno', 'int', 1),
('dispatch', 'int', 1),
('data', 'blob', None)],
payload)
class NoAckDataFrame(Packet):
def __init__(self, payload = None):
if payload != None and type(payload) != type([]):
# Assume is a Packet
payload = payload.payload()
Packet.__init__(self,
[('protocol', 'int', 1),
('dispatch', 'int', 1),
('data', 'blob', None)],
payload)
class ActiveMessage(Packet):
def __init__(self, gpacket = None, amid = 0x00, dest = 0xFFFF):
if type(gpacket) == type([]):
payload = gpacket
else:
# Assume this will be derived from Packet
payload = None
Packet.__init__(self,
[('destination', 'int', 2),
('source', 'int', 2),
('length', 'int', 1),
('group', 'int', 1),
('type', 'int', 1),
('data', 'blob', None)],
payload)
if payload == None:
self.destination = dest
self.source = 0x0000
self.group = 0x00
self.type = amid
self.data = []
if gpacket:
self.data = gpacket.payload()
self.length = len(self.data)
| __init__ | identifier_name |
tos.py | # Copyright (c) 2008 Johns Hopkins University.
# All rights reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose, without fee, and without written
# agreement is hereby granted, provided that the above copyright
# notice, the (updated) modification history and the author appear in
# all copies of this source code.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS `AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, LOSS OF USE, DATA,
# OR PROFITS) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# @author Razvan Musaloiu-E. <razvanm@cs.jhu.edu>
# @author David Purdy <david@radioretail.co.za>
"""A library that implements the T2 serial communication.
This library has two parts: one that deals with sending and receiving
packets using the serial format from T2 (TEP113) and a second one that
tries to simplifies the work with arbitrary packets.
"""
import sys, struct, time, serial, socket, operator, thread
from Queue import Queue
from threading import Lock, Condition
__version__ = "$Id: tos.py,v 1.2 2008/07/20 22:16:50 razvanm Exp $"
__all__ = ['Serial', 'AM',
'Packet', 'RawPacket',
'AckFrame', 'DataFrame', 'NoAckDataFrame',
'ActiveMessage']
ACK_WAIT = 0.2 # Maximum amount of time to wait for an ack
ACK_WARN = 0.2 # Warn if acks take longer than this to arrive
def list2hex(v):
return " ".join(["%02x" % p for p in v])
class Error(Exception):
"""Base error class for this module"""
pass
class TimeoutError(Error):
"""Thrown when a serial operation times out"""
pass
class ReadError(Error):
"""Base class for read error exceptions"""
pass
class WriteError(Error):
"""Base class for write error exceptions"""
pass
class ReadTimeoutError(TimeoutError, ReadError):
"""Thrown when a serial read operation times out"""
pass
class ReadCRCError(ReadError):
"""Thrown when a read packet fails a CRC check"""
pass
class BadAckSeqnoError(ReadError):
"""Thrown if an ack packet has an unexpected sequenc number"""
pass
class WriteTimeoutError(TimeoutError, WriteError):
"""Thrown when a serial write operation times out"""
pass
class SimpleSerial:
"""
A SimpleSerial object offers a way to send and data using a HDLC-like
formating.
Use SimpleSerial objects for basic low-level serial communications. Use
Serial objects for higher level logic (retry sends, log printfs, etc).
"""
HDLC_FLAG_BYTE = 0x7e
HDLC_CTLESC_BYTE = 0x7d
TOS_SERIAL_ACTIVE_MESSAGE_ID = 0
TOS_SERIAL_CC1000_ID = 1
TOS_SERIAL_802_15_4_ID = 2
TOS_SERIAL_UNKNOWN_ID = 255
SERIAL_PROTO_ACK = 67
SERIAL_PROTO_PACKET_ACK = 68
SERIAL_PROTO_PACKET_NOACK = 69
SERIAL_PROTO_PACKET_UNKNOWN = 255
def __init__(self, port, baudrate, flush=False, debug=False, qsize=10,
timeout=None):
self._debug = debug
self._in_queue = []
self._qsize = qsize
self._ack = None
self._write_counter = 0
self._write_counter_failures = 0
self._read_counter = 0
self._ts = None
self.timeout = timeout # Public attribute
self._received_packet_filters = [] # filter functions for received packets
# Remember sent (and unacknowledged) seqno numbers for 15 seconds:
self._unacked_seqnos = SeqTracker(15.0)
self._s = serial.Serial(port, baudrate, rtscts=0, timeout=0.5)
self._s.flushInput()
if flush:
print >>sys.stdout, "Flushing the serial port",
endtime = time.time() + 1
while time.time() < endtime:
try:
self._read()
except ReadError:
pass
sys.stdout.write(".")
if not self._debug:
sys.stdout.write("\n")
self._s.close()
self._s = serial.Serial(port, baudrate, rtscts=0, timeout=timeout)
# Add a filter for received 'write ack' packets
self.add_received_packet_filter(self._write_ack_filter)
# Returns the next incoming serial packet
def _read(self, timeout=None):
"""Wait for a packet and return it as a RawPacket.
Throws:
- ReadCRCError if a CRC check fails
- ReadTimeoutError if the timeout expires.
"""
# Developer notes:
#
# Packet data read from Serial is in this format:
# [HDLC_FLAG_BYTE][Escaped data][HDLC_FLAG_BYTE]
#
# [Escaped data] is encoded so that [HDLC_FLAG_BYTE] byte
# values cannot occur within it. When [Escaped data] has been
# unescaped, the last 2 bytes are a 16-bit CRC of the earlier
# part of the packet (excluding the initial HDLC_FLAG_BYTE
# byte)
#
# It's also possible that the serial device was half-way
# through transmitting a packet when this function was called
# (app was just started). So we also neeed to handle this case:
#
# [Incomplete escaped data][HDLC_FLAG_BYTE][HDLC_FLAG_BYTE][Escaped data][HDLC_FLAG_BYTE]
#
# In this case we skip over the first (incomplete) packet.
#
if self._s.timeout != timeout and timeout != None:
if self._debug:
print "Set the timeout to %s, previous one was %s" % (timeout, self._s.timeout)
self._s.timeout = timeout
try:
# Read bytes until we get to a HDLC_FLAG_BYTE value
# (either the end of a packet, or the start of a new one)
d = self._get_byte(timeout)
ts = time.time()
if self._debug and d != self.HDLC_FLAG_BYTE:
print "Skipping incomplete packet"
while d != self.HDLC_FLAG_BYTE:
d = self._get_byte(timeout)
ts = time.time()
# Store HDLC_FLAG_BYTE at the start of the retrieved packet
# data:
packet = [d]
# Is the next byte also HDLC_FLAG_BYTE?
d = self._get_byte(timeout)
if d == self.HDLC_FLAG_BYTE:
# Yes. This means that the previous byte was for
# the end of the previous packet, and this byte is for
# the start of the next packet.
# Get the 2nd byte of the new packet:
d = self._get_byte(timeout)
ts = time.time()
# We are now on the 2nd byte of the packet. Add it to
# our retrieved packet data:
packet.append(d)
# Read bytes from serial until we read another
# HDLC_FLAG_BYTE value (end of the current packet):
while d != self.HDLC_FLAG_BYTE:
d = self._get_byte(timeout)
packet.append(d)
# Done reading a whole packet from serial
if self._debug:
print "SimpleSerial:_read: unescaped", packet
# Decode the packet, and check CRC:
packet = self._unescape(packet)
crc = self._crc16(0, packet[1:-3])
packet_crc = self._decode(packet[-3:-1])
if crc != packet_crc:
print "Warning: wrong CRC! %x != %x %s" % (crc, packet_crc, ["%2x" % i for i in packet])
raise ReadCRCError
if self._debug:
if self._ts == None:
self._ts = ts
else:
print "Serial:_read: %.4f (%.4f) Recv:" % (ts, ts - self._ts), self._format_packet(packet[1:-3])
self._ts = ts
# Packet was successfully retrieved, so return it in a
# RawPacket wrapper object (but leave out the
# HDLC_FLAG_BYTE and CRC bytes)
return RawPacket(ts, packet[1:-3])
except socket.timeout:
raise ReadTimeoutError
def _write_ack_filter(self, packet):
"""Filter for recieved write acknowledgement packets"""
ack = AckFrame(packet.data)
if ack.protocol == self.SERIAL_PROTO_ACK:
if self._debug:
print "_filter_read: got an ack:", ack
self._ack = ack
packet = None # No further processing of received ack packet
return packet
def _filter_read(self, timeout=None):
"""Read a packet from the serial device, perform filtering, and return
the packet if it hasn't been processed yet.
"""
p = self._read(timeout)
self._read_counter += 1
if self._debug:
print "_filter_read: got a packet(%d): %s" % (self._read_counter, p)
# Pass the received packet through the filter functions:
if p is not None:
for filter_func in self._received_packet_filters:
p = filter_func(p)
# Stop now if the packet doesn't need further processing:
if p is None:
break
# Return the packet (if there was no timeout and it wasn't filtered)
return p
def _get_ack(self, timeout, expected_seqno):
"""Get the next ack packet
Read packets from the serial device until we get the next ack (which
then gets stored in self._ack), or the timeout expires. non-ack packets
are buffered.
Throws:
- ReadTimeoutError if the timeout expires.
- BadAckSeqnoError if an ack with a bad sequence number is received
"""
endtime = time.time() + timeout
while time.time() < endtime:
# Read the a packet over serial
self._ack = None
remaining = endtime - time.time()
p = self._filter_read(timeout)
# Was the packet filtered?
if p:
# Got an unfiltered packet
if len(self._in_queue) >= self._qsize:
print "Warning: Buffer overflow"
self._in_queue.pop(0)
self._in_queue.append(p)
else:
# Packet was filtered. Was it an ack?
if self._ack is not None:
# The packet was an ack, so remove it from our
# 'unacknowledged seqnos' list (or raise a BadAckSeqnoError
# error if it isn't in the list)
self._unacked_seqnos.seqno_acked(self._ack.seqno)
# Stop reading packets if it's the ack we are waiting for:
if self._ack.seqno == expected_seqno:
return
# Timed out
raise ReadTimeoutError
def close(self):
"""Close the serial device"""
self._s.close()
def read(self, timeout=None):
"""Read a packet, either from the input buffer or from the serial
device.
Returns a RawPacket object, otherwise None if the packet was filtered
(by eg: Serial's printf-filtering function)
Does not retry reads if the first one fails. Use Serial.read() for
that.
"""
if self._in_queue:
return self._in_queue.pop(0)
else:
return self._filter_read(timeout)
def write(self, payload, seqno, timeout=0.2):
"""
Write a packet. If the payload argument is a list, it is
assumed to be exactly the payload. Otherwise the payload is
assume to be a Packet and the real payload is obtain by
calling the .payload().
Only attempts to write once, and times out if an ack packet is not
received within [timeout] seconds. Use Serial.write() if you want
automatic write retries.
seqno should be an integer between 0 and 99 which changes each time you
send a new packet. The value should remain the same when you are
retrying a packet write that just failed.
Raises WriteTimeoutError if the write times out (ack packet doesn't
arrive within [timeout] seconds).
"""
if type(payload) != type([]):
# Assume this will be derived from Packet
payload = payload.payload()
packet = DataFrame();
packet.protocol = self.SERIAL_PROTO_PACKET_ACK
packet.seqno = seqno
packet.dispatch = 0
packet.data = payload
packet = packet.payload()
crc = self._crc16(0, packet)
packet.append(crc & 0xff)
packet.append((crc >> 8) & 0xff)
packet = [self.HDLC_FLAG_BYTE] + self._escape(packet) + [self.HDLC_FLAG_BYTE]
# Write the packet:
self._unacked_seqnos.seqno_sent(seqno) # Keep track of sent seqno's
self._put_bytes(packet)
self._write_counter += 1
# Wait for an ack packet:
if self._debug:
print "Send(%d/%d): %s" % (self._write_counter, self._write_counter_failures, packet)
print "Wait for ack %d ..." % (seqno)
try:
self._get_ack(timeout, seqno)
except ReadTimeoutError:
# Re-raise read timeouts (of ack packets) as write timeouts (of
# the write operation)
self._write_counter_failures += 1
raise WriteTimeoutError
# Received an ack packet, with the expected sequence number
if self._debug:
print "Wait for ack %d done. Latest ack:" % (seqno), self._ack
print "The packet was acked."
print "Returning from SimpleSerial.write..."
def add_received_packet_filter(self, filter_func):
"""Register a received packet-filtering callback function
_filter_read() calls all of the registered filter functions for each
packet received over serial. Registered filter functions are called in
the order they were registered.
Filter functions are called like this: filter_func(packet)
When a filter function recognises and handles a received packet it
should return a None value to indicate that no further processing
is required for the packet.
When a filter function skips a packet (or for some reason you want
further processing to happen on a packet you've just processed), the
function should return the packet that was passed to it as an argument.
"""
self._received_packet_filters.append(filter_func)
def remove_received_packet_filter(self, filter_func):
"""Remove a filter function added with add_received_packet_filter()"""
self._received_packet_filters.remove(filter_func)
def _format_packet(self, payload):
f = NoAckDataFrame(payload)
if f.protocol == self.SERIAL_PROTO_ACK:
rpacket = AckFrame(payload)
return "Ack seqno: %d" % (rpacket.seqno)
else:
rpacket = ActiveMessage(f.data)
return "D: %04x S: %04x L: %02x G: %02x T: %02x | %s" % \
(rpacket.destination, rpacket.source,
rpacket.length, rpacket.group, rpacket.type,
list2hex(rpacket.data))
def _crc16(self, base_crc, frame_data):
crc = base_crc
for b in frame_data:
crc = crc ^ (b << 8)
for i in range(0, 8):
if crc & 0x8000 == 0x8000:
crc = (crc << 1) ^ 0x1021
else:
crc = crc << 1
crc = crc & 0xffff
return crc
def _encode(self, val, dim):
output = []
for i in range(dim):
output.append(val & 0xFF)
val = val >> 8
return output
def _decode(self, v):
r = long(0)
for i in v[::-1]:
r = (r << 8) + i
return r
def _get_byte(self, timeout=None):
# old_timeout = self._s.timeout
# if timeout is not None:
# self._s.timeout = timeout
try:
r = struct.unpack("B", self._s.read())[0]
return r
except struct.error:
# Serial port read timeout
raise socket.timeout
# finally:
# self._s.timeout = old_timeout
def _put_bytes(self, data):
#print "DEBUG: _put_bytes:", data
for b in data:
self._s.write(struct.pack('B', b))
def _unescape(self, packet):
r = []
esc = False
for b in packet:
if esc:
r.append(b ^ 0x20)
esc = False
elif b == self.HDLC_CTLESC_BYTE:
esc = True
else:
r.append(b)
return r
def _escape(self, packet):
r = []
for b in packet:
if b == self.HDLC_FLAG_BYTE or b == self.HDLC_CTLESC_BYTE:
r.append(self.HDLC_CTLESC_BYTE)
r.append(b ^ 0x20)
else:
r.append(b)
return r
def debug(self, debug):
self._debug = debug
class SeqTracker:
"""Class for keeping track of unacknowledged packet sequence numbers.
SeqTracker is used by SimpleSerial to keep track of sequence numbers which
have been sent with write packets, but not yet acknowledged by received
write ack packets.
"""
def __init__(self, keep_for):
"""Initialise a SeqTracker object.
args:
- keep_for is the length of time for which unacknowledged sequence
numbers should be remembered. After this period has elapsed, the
sequence numbers should be forgotten. If the sequence number is
acknowledged later, it will be treated as unkown
"""
self._keep_for = keep_for
self._queue = []
def seqno_sent(self, seqno):
"""Register that a packet with the specified sequence number was just
sent."""
self._gc()
self._queue.append((seqno, time.time()))
def seqno_acked(self, seqno):
"""Register that a sequence number was just acknowledged.
Find the oldest-known occurance of seqno in the queue and remove it. If
not found then raise a BadAckSeqnoError to inform applications that
the sequence number is not known.
"""
self._gc()
for item in self._queue:
if item[0] == seqno:
# Found seqno
self._queue.remove(item)
return
# seqno not found!
raise BadAckSeqnoError
def get_seqno_sent_times(self, seqno):
"""Return the times when packets with the given sequence number were
sent."""
self._gc()
return [item[1] for item in self._queue if item[0] == seqno]
def __contains__(self, seqno):
"""Return True if the seqno was sent recently (and not acknowledged
yet)"""
self._gc()
for item in self._queue:
if item[0] == seqno:
return True
return False
def _gc(self):
"""Remove old items from the queue"""
remove_before = time.time() - self._keep_for
for item in self._queue:
# Time for the sequence to be removed?
if item[1] < remove_before:
# Sequence data is old, so remove it
self._queue.remove(item)
else:
# Sequence number was added recently, so don't remove it. Also
# stop processing the queue because all later items will be
# newer
break
class Serial:
"""
Wraps a SimpleSerial object, and provides some higher-level functionality
like retrying writes and logging printf packets.
"""
def __init__(self, port, baudrate, flush=False, debug=False, qsize=10,
timeout=None):
"""Initialise a Serial object"""
self._debug = debug
self.timeout = timeout # Public attribute
self._seqno = 0
self._simple_serial = SimpleSerial(port, baudrate, flush, debug, qsize,
timeout)
# Setup automatic logging of received printf packets:
self._printf_msg = ""
self._simple_serial.add_received_packet_filter(self._printf_filter)
def close(self):
"""Close the serial device"""
self._simple_serial.close()
def read(self, timeout=None):
"""Read a packet from the serial port.
Retries packet reads until the timeout expires.
Throws ReadTimeoutError if a a packet can't be read within the timeout.
"""
if timeout is None:
timeout = self.timeout
endtime = None
if timeout is not None:
endtime = time.time() + timeout
while endtime is None or time.time() < endtime:
remaining = None
if endtime is not None:
remaining = endtime - time.time()
try:
p = self._simple_serial.read(remaining)
except ReadError:
if self._debug:
print "Packet read failed. Try again."
else:
# Was the packet filtered?
if p is not None:
# Not filtered, so return it.
# In the current TinyOS the packets from the mote are
# always NoAckDataFrame
return NoAckDataFrame(p.data)
# Read timeout expired
raise ReadTimeoutError
def write(self, payload, timeout=None):
"""Write a packet to the serial port
Keeps retrying endlessly, unless a timeout is set. If the timeout
expires then WriteTimeoutError is thrown.
"""
if timeout is None:
timeout = self.timeout
endtime = None
if timeout is not None:
endtime = time.time() + timeout
# Generate the next sequence number:
self._seqno = (self._seqno + 1) % 100
while endtime is None or time.time() < endtime:
try:
ackwait = ACK_WAIT
if endtime is not None:
remaining = endtime - time.time()
ackwait = min(ACK_WAIT, remaining)
before = time.time()
self._simple_serial.write(payload, self._seqno, ackwait)
length = time.time() - before
if length >= ACK_WARN:
print "Warning: Packet write took %.3fs!" % (length)
return True
except Error:
if self._debug:
print "The packet was not acked. Try again."
# Write operation timed out
raise WriteTimeoutError
def _printf_filter(self, packet):
"""Filter for recieved printf packets"""
ampkt = ActiveMessage(NoAckDataFrame(packet.data).data)
if ampkt.type == 100:
self._printf_msg += "".join([chr(i) for i in ampkt.data]).strip('\0')
# Split printf data on newline character:
# (last string in the split list doesn't have a newline after
# it, so we keep it until next time)
lines = self._printf_msg.split('\n')
for line in lines[:-1]:
print "PRINTF:", line
self._printf_msg = lines[-1]
packet = None # No further processing for the printf packet
return packet
class SFClient:
def __init__(self, host, port, qsize=10):
self._in_queue = Queue(qsize)
self._s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._s.connect((host, port))
data = self._s.recv(2)
if data != 'U ':
print "Wrong handshake"
self._s.send("U ")
print "Connected"
thread.start_new_thread(self.run, ())
def run(self):
while True:
length = ord(self._s.recv(1))
data = self._s.recv(length)
data = [ord(c) for c in data][1:]
#print "Recv %d bytes" % (length), ActiveMessage(data)
if self._in_queue.full():
print "Warning: Buffer overflow"
self._in_queue.get()
p = RawPacket()
p.data = data
self._in_queue.put(p, block=False)
def read(self, timeout=0):
return self._in_queue.get()
def write(self, payload):
print "SFClient: write:", payload
if type(payload) != type([]):
# Assume this will be derived from Packet
payload = payload.payload()
payload = [0] + payload
self._s.send(chr(len(payload)))
self._s.send(''.join([chr(c) for c in payload]))
return True
class AM:
def __init__(self, s):
self._s = s
def read(self, timeout=None):
return ActiveMessage(self._s.read(timeout).data)
def write(self, packet, amid, timeout=None):
return self._s.write(ActiveMessage(packet, amid=amid), timeout=timeout)
class SimpleSerialAM(SimpleSerial):
"""A derived class of SimpleSerial so that apps can read and write using
higher-level packet structures.
Serves a simalar purpose to the AM class, but for SimpleSerial objects
instead instead of Serial.
"""
def read_am(self, timeout=None):
"""Read a RawPacket object (or None), convert it to ActiveMessage
(or None), and return to the caller"""
# Get a tos.Rawpacket (or None, if filtered) object
p = self.read(timeout)
if p is not None:
assert isinstance(p, RawPacket)
# Convert tos.RawPacket object into an ActiveMessage:
p = NoAckDataFrame(p.data)
p = ActiveMessage(p.data)
# Return the ActiveMessage (or None) packet:
return p
def write_am(self, packet, amid, seqno, timeout=2.0):
"""Convert app packet format to ActiveMessage, and write the
ActiveMessage packet to serial"""
# Convert from app-specific packet to ActiveMessage:
p = ActiveMessage(packet, amid=amid)
# Write to the serial device
self.write(p, seqno, timeout)
class Packet:
"""
The Packet class offers a handy way to build pack and unpack
binary data based on a given pattern.
"""
def _decode(self, v):
r = long(0)
for i in v:
r = (r << 8) + i
return r
def _encode(self, val, dim):
output = []
for i in range(dim):
output.append(int(val & 0xFF))
val = val >> 8
output.reverse()
return output
def __init__(self, desc, packet = None):
offset = 0
boffset = 0
sum = 0
for i in range(len(desc)-1, -1, -1):
(n, t, s) = desc[i]
if s == None:
if sum > 0:
desc[i] = (n, t, -sum)
break
sum += s
self.__dict__['_schema'] = [(t, s) for (n, t, s) in desc]
self.__dict__['_names'] = [n for (n, t, s) in desc]
self.__dict__['_values'] = []
if type(packet) == type([]):
for (t, s) in self._schema:
if t == 'int':
self._values.append(self._decode(packet[offset:offset + s]))
offset += s
elif t == 'bint':
doffset = 8 - (boffset + s)
self._values.append((packet[offset] >> doffset) & ((1<<s) - 1))
boffset += s
if boffset == 8:
offset += 1
boffset = 0
elif t == 'string':
self._values.append(''.join([chr(i) for i in packet[offset:offset + s]]))
offset += s
elif t == 'blob':
if s:
if s > 0:
self._values.append(packet[offset:offset + s])
offset += s
else:
self._values.append(packet[offset:s])
offset = len(packet) + s
else:
self._values.append(packet[offset:])
elif type(packet) == type(()):
for i in packet:
self._values.append(i)
else:
for v in self._schema:
self._values.append(None)
def __repr__(self):
return self._values.__repr__()
def __str__(self):
r = ""
for i in range(len(self._names)):
r += "%s: %s " % (self._names[i], self._values[i])
for i in range(len(self._names), len(self._values)):
r += "%s" % self._values[i]
return r
# return self._values.__str__()
# Implement the map behavior
def __getitem__(self, key):
return self.__getattr__(key)
def __setitem__(self, key, value):
self.__setattr__(key, value)
def __len__(self):
return len(self._values)
def keys(self):
return self._names
def values(self):
return self._names
# Implement the struct behavior
def __getattr__(self, name):
#print "DEBUG: __getattr__", name
if type(name) == type(0):
return self._names[name]
else:
return self._values[self._names.index(name)]
def __setattr__(self, name, value):
if type(name) == type(0):
|
else:
self._values[self._names.index(name)] = value
def __ne__(self, other):
if other.__class__ == self.__class__:
return self._values != other._values
else:
return True
def __eq__(self, other):
if other.__class__ == self.__class__:
return self._values == other._values
else:
return False
def __nonzero__(self):
return True;
# Custom
def names(self):
return self._names
def sizes(self):
return self._schema
def payload(self):
r = []
boffset = 0
for i in range(len(self._schema)):
(t, s) = self._schema[i]
if t == 'int':
r += self._encode(self._values[i], s)
boffset = 0
elif t == 'bint':
doffset = 8 - (boffset + s)
if boffset == 0:
r += [self._values[i] << doffset]
else:
r[-1] |= self._values[i] << doffset
boffset += s
if boffset == 8:
boffset = 0
elif self._values[i] != []:
r += self._values[i]
for i in self._values[len(self._schema):]:
r += i
return r
class RawPacket(Packet):
def __init__(self, ts = None, data = None):
Packet.__init__(self,
[('ts' , 'int', 4),
('data', 'blob', None)],
None)
self.ts = ts;
self.data = data
class AckFrame(Packet):
def __init__(self, payload = None):
Packet.__init__(self,
[('protocol', 'int', 1),
('seqno', 'int', 1)],
payload)
class DataFrame(Packet):
def __init__(self, payload = None):
if payload != None and type(payload) != type([]):
# Assume is a Packet
payload = payload.payload()
Packet.__init__(self,
[('protocol', 'int', 1),
('seqno', 'int', 1),
('dispatch', 'int', 1),
('data', 'blob', None)],
payload)
class NoAckDataFrame(Packet):
def __init__(self, payload = None):
if payload != None and type(payload) != type([]):
# Assume is a Packet
payload = payload.payload()
Packet.__init__(self,
[('protocol', 'int', 1),
('dispatch', 'int', 1),
('data', 'blob', None)],
payload)
class ActiveMessage(Packet):
def __init__(self, gpacket = None, amid = 0x00, dest = 0xFFFF):
if type(gpacket) == type([]):
payload = gpacket
else:
# Assume this will be derived from Packet
payload = None
Packet.__init__(self,
[('destination', 'int', 2),
('source', 'int', 2),
('length', 'int', 1),
('group', 'int', 1),
('type', 'int', 1),
('data', 'blob', None)],
payload)
if payload == None:
self.destination = dest
self.source = 0x0000
self.group = 0x00
self.type = amid
self.data = []
if gpacket:
self.data = gpacket.payload()
self.length = len(self.data)
| self._values[name] = value | conditional_block |
tos.py | # Copyright (c) 2008 Johns Hopkins University.
# All rights reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose, without fee, and without written
# agreement is hereby granted, provided that the above copyright
# notice, the (updated) modification history and the author appear in
# all copies of this source code.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS `AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, LOSS OF USE, DATA,
# OR PROFITS) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# @author Razvan Musaloiu-E. <razvanm@cs.jhu.edu>
# @author David Purdy <david@radioretail.co.za>
"""A library that implements the T2 serial communication.
This library has two parts: one that deals with sending and receiving
packets using the serial format from T2 (TEP113) and a second one that
tries to simplifies the work with arbitrary packets.
"""
import sys, struct, time, serial, socket, operator, thread
from Queue import Queue
from threading import Lock, Condition
__version__ = "$Id: tos.py,v 1.2 2008/07/20 22:16:50 razvanm Exp $"
__all__ = ['Serial', 'AM',
'Packet', 'RawPacket',
'AckFrame', 'DataFrame', 'NoAckDataFrame',
'ActiveMessage']
ACK_WAIT = 0.2 # Maximum amount of time to wait for an ack
ACK_WARN = 0.2 # Warn if acks take longer than this to arrive
def list2hex(v):
return " ".join(["%02x" % p for p in v])
class Error(Exception):
"""Base error class for this module"""
pass
class TimeoutError(Error):
"""Thrown when a serial operation times out"""
pass
class ReadError(Error):
"""Base class for read error exceptions"""
pass
class WriteError(Error):
"""Base class for write error exceptions"""
pass
class ReadTimeoutError(TimeoutError, ReadError):
"""Thrown when a serial read operation times out"""
pass
class ReadCRCError(ReadError):
"""Thrown when a read packet fails a CRC check"""
pass
class BadAckSeqnoError(ReadError):
"""Thrown if an ack packet has an unexpected sequenc number"""
pass
class WriteTimeoutError(TimeoutError, WriteError):
"""Thrown when a serial write operation times out"""
pass
class SimpleSerial:
"""
A SimpleSerial object offers a way to send and data using a HDLC-like
formating.
Use SimpleSerial objects for basic low-level serial communications. Use
Serial objects for higher level logic (retry sends, log printfs, etc).
"""
HDLC_FLAG_BYTE = 0x7e
HDLC_CTLESC_BYTE = 0x7d
TOS_SERIAL_ACTIVE_MESSAGE_ID = 0
TOS_SERIAL_CC1000_ID = 1
TOS_SERIAL_802_15_4_ID = 2
TOS_SERIAL_UNKNOWN_ID = 255
SERIAL_PROTO_ACK = 67
SERIAL_PROTO_PACKET_ACK = 68
SERIAL_PROTO_PACKET_NOACK = 69
SERIAL_PROTO_PACKET_UNKNOWN = 255
def __init__(self, port, baudrate, flush=False, debug=False, qsize=10,
timeout=None):
self._debug = debug
self._in_queue = []
self._qsize = qsize
self._ack = None
self._write_counter = 0
self._write_counter_failures = 0
self._read_counter = 0
self._ts = None
self.timeout = timeout # Public attribute
self._received_packet_filters = [] # filter functions for received packets
# Remember sent (and unacknowledged) seqno numbers for 15 seconds:
self._unacked_seqnos = SeqTracker(15.0)
self._s = serial.Serial(port, baudrate, rtscts=0, timeout=0.5)
self._s.flushInput()
if flush:
print >>sys.stdout, "Flushing the serial port",
endtime = time.time() + 1
while time.time() < endtime:
try:
self._read()
except ReadError:
pass
sys.stdout.write(".")
if not self._debug:
sys.stdout.write("\n")
self._s.close()
self._s = serial.Serial(port, baudrate, rtscts=0, timeout=timeout)
# Add a filter for received 'write ack' packets
self.add_received_packet_filter(self._write_ack_filter)
# Returns the next incoming serial packet
def _read(self, timeout=None):
"""Wait for a packet and return it as a RawPacket.
Throws:
- ReadCRCError if a CRC check fails
- ReadTimeoutError if the timeout expires.
"""
# Developer notes:
#
# Packet data read from Serial is in this format:
# [HDLC_FLAG_BYTE][Escaped data][HDLC_FLAG_BYTE]
#
# [Escaped data] is encoded so that [HDLC_FLAG_BYTE] byte
# values cannot occur within it. When [Escaped data] has been
# unescaped, the last 2 bytes are a 16-bit CRC of the earlier
# part of the packet (excluding the initial HDLC_FLAG_BYTE
# byte)
#
# It's also possible that the serial device was half-way
# through transmitting a packet when this function was called
# (app was just started). So we also neeed to handle this case:
#
# [Incomplete escaped data][HDLC_FLAG_BYTE][HDLC_FLAG_BYTE][Escaped data][HDLC_FLAG_BYTE]
#
# In this case we skip over the first (incomplete) packet.
#
if self._s.timeout != timeout and timeout != None:
if self._debug:
print "Set the timeout to %s, previous one was %s" % (timeout, self._s.timeout)
self._s.timeout = timeout
try:
# Read bytes until we get to a HDLC_FLAG_BYTE value
# (either the end of a packet, or the start of a new one)
d = self._get_byte(timeout)
ts = time.time()
if self._debug and d != self.HDLC_FLAG_BYTE:
print "Skipping incomplete packet"
while d != self.HDLC_FLAG_BYTE:
d = self._get_byte(timeout)
ts = time.time()
# Store HDLC_FLAG_BYTE at the start of the retrieved packet
# data:
packet = [d]
# Is the next byte also HDLC_FLAG_BYTE?
d = self._get_byte(timeout)
if d == self.HDLC_FLAG_BYTE:
# Yes. This means that the previous byte was for
# the end of the previous packet, and this byte is for
# the start of the next packet.
# Get the 2nd byte of the new packet:
d = self._get_byte(timeout)
ts = time.time()
# We are now on the 2nd byte of the packet. Add it to
# our retrieved packet data:
packet.append(d)
# Read bytes from serial until we read another
# HDLC_FLAG_BYTE value (end of the current packet):
while d != self.HDLC_FLAG_BYTE:
d = self._get_byte(timeout)
packet.append(d)
# Done reading a whole packet from serial
if self._debug:
print "SimpleSerial:_read: unescaped", packet
# Decode the packet, and check CRC:
packet = self._unescape(packet)
crc = self._crc16(0, packet[1:-3])
packet_crc = self._decode(packet[-3:-1])
if crc != packet_crc:
print "Warning: wrong CRC! %x != %x %s" % (crc, packet_crc, ["%2x" % i for i in packet])
raise ReadCRCError
if self._debug:
if self._ts == None:
self._ts = ts
else:
print "Serial:_read: %.4f (%.4f) Recv:" % (ts, ts - self._ts), self._format_packet(packet[1:-3])
self._ts = ts
# Packet was successfully retrieved, so return it in a
# RawPacket wrapper object (but leave out the
# HDLC_FLAG_BYTE and CRC bytes)
return RawPacket(ts, packet[1:-3])
except socket.timeout:
raise ReadTimeoutError
def _write_ack_filter(self, packet):
"""Filter for recieved write acknowledgement packets"""
ack = AckFrame(packet.data)
if ack.protocol == self.SERIAL_PROTO_ACK:
if self._debug:
print "_filter_read: got an ack:", ack
self._ack = ack
packet = None # No further processing of received ack packet
return packet
def _filter_read(self, timeout=None):
"""Read a packet from the serial device, perform filtering, and return
the packet if it hasn't been processed yet.
"""
p = self._read(timeout)
self._read_counter += 1
if self._debug:
print "_filter_read: got a packet(%d): %s" % (self._read_counter, p)
# Pass the received packet through the filter functions:
if p is not None:
for filter_func in self._received_packet_filters:
p = filter_func(p)
# Stop now if the packet doesn't need further processing:
if p is None:
break
# Return the packet (if there was no timeout and it wasn't filtered)
return p
def _get_ack(self, timeout, expected_seqno):
"""Get the next ack packet
Read packets from the serial device until we get the next ack (which
then gets stored in self._ack), or the timeout expires. non-ack packets
are buffered.
Throws:
- ReadTimeoutError if the timeout expires.
- BadAckSeqnoError if an ack with a bad sequence number is received
"""
endtime = time.time() + timeout
while time.time() < endtime:
# Read the a packet over serial
self._ack = None
remaining = endtime - time.time()
p = self._filter_read(timeout)
# Was the packet filtered?
if p:
# Got an unfiltered packet
if len(self._in_queue) >= self._qsize:
print "Warning: Buffer overflow"
self._in_queue.pop(0)
self._in_queue.append(p)
else:
# Packet was filtered. Was it an ack?
if self._ack is not None:
# The packet was an ack, so remove it from our
# 'unacknowledged seqnos' list (or raise a BadAckSeqnoError
# error if it isn't in the list)
self._unacked_seqnos.seqno_acked(self._ack.seqno)
# Stop reading packets if it's the ack we are waiting for:
if self._ack.seqno == expected_seqno:
return
# Timed out
raise ReadTimeoutError
def close(self):
"""Close the serial device"""
self._s.close()
def read(self, timeout=None):
"""Read a packet, either from the input buffer or from the serial
device.
Returns a RawPacket object, otherwise None if the packet was filtered
(by eg: Serial's printf-filtering function)
Does not retry reads if the first one fails. Use Serial.read() for
that.
"""
if self._in_queue:
return self._in_queue.pop(0)
else:
return self._filter_read(timeout)
def write(self, payload, seqno, timeout=0.2):
"""
Write a packet. If the payload argument is a list, it is
assumed to be exactly the payload. Otherwise the payload is
assume to be a Packet and the real payload is obtain by
calling the .payload().
Only attempts to write once, and times out if an ack packet is not
received within [timeout] seconds. Use Serial.write() if you want
automatic write retries.
seqno should be an integer between 0 and 99 which changes each time you
send a new packet. The value should remain the same when you are
retrying a packet write that just failed.
Raises WriteTimeoutError if the write times out (ack packet doesn't
arrive within [timeout] seconds).
"""
if type(payload) != type([]):
# Assume this will be derived from Packet
payload = payload.payload()
packet = DataFrame();
packet.protocol = self.SERIAL_PROTO_PACKET_ACK
packet.seqno = seqno
packet.dispatch = 0
packet.data = payload
packet = packet.payload()
crc = self._crc16(0, packet)
packet.append(crc & 0xff)
packet.append((crc >> 8) & 0xff)
packet = [self.HDLC_FLAG_BYTE] + self._escape(packet) + [self.HDLC_FLAG_BYTE]
# Write the packet:
self._unacked_seqnos.seqno_sent(seqno) # Keep track of sent seqno's
self._put_bytes(packet)
self._write_counter += 1
# Wait for an ack packet:
if self._debug:
print "Send(%d/%d): %s" % (self._write_counter, self._write_counter_failures, packet)
print "Wait for ack %d ..." % (seqno)
try:
self._get_ack(timeout, seqno)
except ReadTimeoutError:
# Re-raise read timeouts (of ack packets) as write timeouts (of
# the write operation)
self._write_counter_failures += 1
raise WriteTimeoutError
# Received an ack packet, with the expected sequence number
if self._debug:
print "Wait for ack %d done. Latest ack:" % (seqno), self._ack
print "The packet was acked."
print "Returning from SimpleSerial.write..."
def add_received_packet_filter(self, filter_func):
"""Register a received packet-filtering callback function
_filter_read() calls all of the registered filter functions for each
packet received over serial. Registered filter functions are called in
the order they were registered.
Filter functions are called like this: filter_func(packet)
When a filter function recognises and handles a received packet it
should return a None value to indicate that no further processing
is required for the packet.
When a filter function skips a packet (or for some reason you want
further processing to happen on a packet you've just processed), the
function should return the packet that was passed to it as an argument.
"""
self._received_packet_filters.append(filter_func)
def remove_received_packet_filter(self, filter_func):
"""Remove a filter function added with add_received_packet_filter()"""
self._received_packet_filters.remove(filter_func)
def _format_packet(self, payload):
f = NoAckDataFrame(payload)
if f.protocol == self.SERIAL_PROTO_ACK:
rpacket = AckFrame(payload)
return "Ack seqno: %d" % (rpacket.seqno)
else:
rpacket = ActiveMessage(f.data)
return "D: %04x S: %04x L: %02x G: %02x T: %02x | %s" % \
(rpacket.destination, rpacket.source,
rpacket.length, rpacket.group, rpacket.type,
list2hex(rpacket.data))
def _crc16(self, base_crc, frame_data):
crc = base_crc
for b in frame_data:
crc = crc ^ (b << 8)
for i in range(0, 8):
if crc & 0x8000 == 0x8000:
crc = (crc << 1) ^ 0x1021
else:
crc = crc << 1
crc = crc & 0xffff
return crc
def _encode(self, val, dim):
output = []
for i in range(dim):
output.append(val & 0xFF)
val = val >> 8
return output
def _decode(self, v):
r = long(0)
for i in v[::-1]:
r = (r << 8) + i
return r
def _get_byte(self, timeout=None):
# old_timeout = self._s.timeout
# if timeout is not None:
# self._s.timeout = timeout
try:
r = struct.unpack("B", self._s.read())[0]
return r
except struct.error:
# Serial port read timeout
raise socket.timeout
# finally:
# self._s.timeout = old_timeout
def _put_bytes(self, data):
#print "DEBUG: _put_bytes:", data
for b in data:
self._s.write(struct.pack('B', b))
def _unescape(self, packet):
r = []
esc = False
for b in packet:
if esc:
r.append(b ^ 0x20)
esc = False
elif b == self.HDLC_CTLESC_BYTE:
esc = True
else:
r.append(b)
return r
def _escape(self, packet):
r = []
for b in packet:
if b == self.HDLC_FLAG_BYTE or b == self.HDLC_CTLESC_BYTE:
r.append(self.HDLC_CTLESC_BYTE)
r.append(b ^ 0x20)
else:
r.append(b)
return r
def debug(self, debug):
self._debug = debug
class SeqTracker:
"""Class for keeping track of unacknowledged packet sequence numbers.
SeqTracker is used by SimpleSerial to keep track of sequence numbers which
have been sent with write packets, but not yet acknowledged by received
write ack packets.
"""
def __init__(self, keep_for):
"""Initialise a SeqTracker object.
args:
- keep_for is the length of time for which unacknowledged sequence
numbers should be remembered. After this period has elapsed, the
sequence numbers should be forgotten. If the sequence number is
acknowledged later, it will be treated as unkown
"""
self._keep_for = keep_for
self._queue = []
def seqno_sent(self, seqno):
"""Register that a packet with the specified sequence number was just
sent."""
self._gc()
self._queue.append((seqno, time.time()))
def seqno_acked(self, seqno):
"""Register that a sequence number was just acknowledged.
Find the oldest-known occurance of seqno in the queue and remove it. If
not found then raise a BadAckSeqnoError to inform applications that
the sequence number is not known.
"""
self._gc()
for item in self._queue:
if item[0] == seqno:
# Found seqno
self._queue.remove(item)
return
# seqno not found!
raise BadAckSeqnoError
def get_seqno_sent_times(self, seqno):
"""Return the times when packets with the given sequence number were
sent."""
self._gc()
return [item[1] for item in self._queue if item[0] == seqno]
def __contains__(self, seqno):
"""Return True if the seqno was sent recently (and not acknowledged
yet)"""
self._gc()
for item in self._queue:
if item[0] == seqno:
return True
return False
def _gc(self):
"""Remove old items from the queue"""
remove_before = time.time() - self._keep_for
for item in self._queue:
# Time for the sequence to be removed?
if item[1] < remove_before:
# Sequence data is old, so remove it
self._queue.remove(item)
else:
# Sequence number was added recently, so don't remove it. Also
# stop processing the queue because all later items will be
# newer
break
class Serial:
"""
Wraps a SimpleSerial object, and provides some higher-level functionality
like retrying writes and logging printf packets.
"""
def __init__(self, port, baudrate, flush=False, debug=False, qsize=10,
timeout=None):
"""Initialise a Serial object"""
self._debug = debug
self.timeout = timeout # Public attribute
self._seqno = 0
self._simple_serial = SimpleSerial(port, baudrate, flush, debug, qsize,
timeout)
# Setup automatic logging of received printf packets:
self._printf_msg = ""
self._simple_serial.add_received_packet_filter(self._printf_filter)
def close(self):
"""Close the serial device"""
self._simple_serial.close()
def read(self, timeout=None):
"""Read a packet from the serial port.
Retries packet reads until the timeout expires.
Throws ReadTimeoutError if a a packet can't be read within the timeout.
"""
if timeout is None:
timeout = self.timeout
endtime = None
if timeout is not None:
endtime = time.time() + timeout
while endtime is None or time.time() < endtime:
remaining = None
if endtime is not None:
remaining = endtime - time.time()
try:
p = self._simple_serial.read(remaining)
except ReadError:
if self._debug:
print "Packet read failed. Try again."
else:
# Was the packet filtered?
if p is not None:
# Not filtered, so return it.
# In the current TinyOS the packets from the mote are
# always NoAckDataFrame
return NoAckDataFrame(p.data)
# Read timeout expired
raise ReadTimeoutError
def write(self, payload, timeout=None):
"""Write a packet to the serial port
Keeps retrying endlessly, unless a timeout is set. If the timeout
expires then WriteTimeoutError is thrown.
"""
if timeout is None:
timeout = self.timeout
endtime = None
if timeout is not None:
endtime = time.time() + timeout
# Generate the next sequence number:
self._seqno = (self._seqno + 1) % 100
while endtime is None or time.time() < endtime:
try:
ackwait = ACK_WAIT
if endtime is not None:
remaining = endtime - time.time()
ackwait = min(ACK_WAIT, remaining)
before = time.time()
self._simple_serial.write(payload, self._seqno, ackwait)
length = time.time() - before
if length >= ACK_WARN:
print "Warning: Packet write took %.3fs!" % (length)
return True
except Error:
if self._debug:
print "The packet was not acked. Try again."
# Write operation timed out
raise WriteTimeoutError
def _printf_filter(self, packet):
"""Filter for recieved printf packets"""
ampkt = ActiveMessage(NoAckDataFrame(packet.data).data)
if ampkt.type == 100:
self._printf_msg += "".join([chr(i) for i in ampkt.data]).strip('\0')
# Split printf data on newline character:
# (last string in the split list doesn't have a newline after
# it, so we keep it until next time)
lines = self._printf_msg.split('\n')
for line in lines[:-1]:
print "PRINTF:", line
self._printf_msg = lines[-1]
packet = None # No further processing for the printf packet
return packet
class SFClient:
def __init__(self, host, port, qsize=10):
self._in_queue = Queue(qsize)
self._s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._s.connect((host, port))
data = self._s.recv(2)
if data != 'U ':
print "Wrong handshake"
self._s.send("U ")
print "Connected"
thread.start_new_thread(self.run, ())
def run(self):
while True:
length = ord(self._s.recv(1))
data = self._s.recv(length)
data = [ord(c) for c in data][1:]
#print "Recv %d bytes" % (length), ActiveMessage(data)
if self._in_queue.full():
print "Warning: Buffer overflow"
self._in_queue.get()
p = RawPacket()
p.data = data
self._in_queue.put(p, block=False)
def read(self, timeout=0):
return self._in_queue.get()
def write(self, payload):
print "SFClient: write:", payload
if type(payload) != type([]):
# Assume this will be derived from Packet
payload = payload.payload()
payload = [0] + payload
self._s.send(chr(len(payload)))
self._s.send(''.join([chr(c) for c in payload]))
return True
class AM:
def __init__(self, s):
self._s = s
def read(self, timeout=None):
return ActiveMessage(self._s.read(timeout).data)
def write(self, packet, amid, timeout=None):
return self._s.write(ActiveMessage(packet, amid=amid), timeout=timeout)
class SimpleSerialAM(SimpleSerial):
"""A derived class of SimpleSerial so that apps can read and write using
higher-level packet structures.
Serves a simalar purpose to the AM class, but for SimpleSerial objects
instead instead of Serial.
"""
def read_am(self, timeout=None):
"""Read a RawPacket object (or None), convert it to ActiveMessage
(or None), and return to the caller"""
# Get a tos.Rawpacket (or None, if filtered) object
p = self.read(timeout)
if p is not None:
assert isinstance(p, RawPacket)
# Convert tos.RawPacket object into an ActiveMessage:
p = NoAckDataFrame(p.data)
p = ActiveMessage(p.data)
# Return the ActiveMessage (or None) packet:
return p
def write_am(self, packet, amid, seqno, timeout=2.0):
"""Convert app packet format to ActiveMessage, and write the
ActiveMessage packet to serial"""
# Convert from app-specific packet to ActiveMessage:
p = ActiveMessage(packet, amid=amid)
# Write to the serial device
self.write(p, seqno, timeout)
class Packet:
"""
The Packet class offers a handy way to build pack and unpack
binary data based on a given pattern.
"""
def _decode(self, v):
r = long(0)
for i in v:
r = (r << 8) + i
return r
def _encode(self, val, dim):
output = []
for i in range(dim):
output.append(int(val & 0xFF))
val = val >> 8
output.reverse()
return output
def __init__(self, desc, packet = None):
offset = 0
boffset = 0
sum = 0
for i in range(len(desc)-1, -1, -1):
(n, t, s) = desc[i]
if s == None:
if sum > 0:
desc[i] = (n, t, -sum)
break
sum += s
self.__dict__['_schema'] = [(t, s) for (n, t, s) in desc]
self.__dict__['_names'] = [n for (n, t, s) in desc]
self.__dict__['_values'] = []
if type(packet) == type([]):
for (t, s) in self._schema:
if t == 'int':
self._values.append(self._decode(packet[offset:offset + s]))
offset += s
elif t == 'bint':
doffset = 8 - (boffset + s)
self._values.append((packet[offset] >> doffset) & ((1<<s) - 1))
boffset += s
if boffset == 8:
offset += 1
boffset = 0
elif t == 'string':
self._values.append(''.join([chr(i) for i in packet[offset:offset + s]]))
offset += s
elif t == 'blob':
if s:
if s > 0:
self._values.append(packet[offset:offset + s])
offset += s
else:
self._values.append(packet[offset:s])
offset = len(packet) + s
else:
self._values.append(packet[offset:])
elif type(packet) == type(()):
for i in packet:
self._values.append(i)
else:
for v in self._schema:
self._values.append(None)
def __repr__(self):
return self._values.__repr__()
def __str__(self):
r = ""
for i in range(len(self._names)):
r += "%s: %s " % (self._names[i], self._values[i])
for i in range(len(self._names), len(self._values)):
r += "%s" % self._values[i]
return r
# return self._values.__str__()
# Implement the map behavior
def __getitem__(self, key):
return self.__getattr__(key)
def __setitem__(self, key, value):
self.__setattr__(key, value)
def __len__(self):
return len(self._values)
def keys(self):
return self._names
def values(self):
return self._names
# Implement the struct behavior
def __getattr__(self, name):
#print "DEBUG: __getattr__", name
if type(name) == type(0):
return self._names[name]
else:
return self._values[self._names.index(name)]
def __setattr__(self, name, value):
if type(name) == type(0):
self._values[name] = value
else:
self._values[self._names.index(name)] = value
def __ne__(self, other):
if other.__class__ == self.__class__:
return self._values != other._values
else:
return True
def __eq__(self, other):
if other.__class__ == self.__class__:
return self._values == other._values
else:
return False
def __nonzero__(self):
return True;
# Custom
def names(self):
return self._names
def sizes(self):
return self._schema
def payload(self):
r = []
boffset = 0
for i in range(len(self._schema)):
(t, s) = self._schema[i]
if t == 'int':
r += self._encode(self._values[i], s)
boffset = 0
elif t == 'bint':
doffset = 8 - (boffset + s)
if boffset == 0:
r += [self._values[i] << doffset]
else:
r[-1] |= self._values[i] << doffset
boffset += s
if boffset == 8:
boffset = 0
elif self._values[i] != []:
r += self._values[i]
for i in self._values[len(self._schema):]:
r += i
return r
class RawPacket(Packet):
def __init__(self, ts = None, data = None):
Packet.__init__(self,
[('ts' , 'int', 4),
('data', 'blob', None)],
None)
self.ts = ts;
self.data = data
class AckFrame(Packet):
def __init__(self, payload = None):
Packet.__init__(self,
[('protocol', 'int', 1),
('seqno', 'int', 1)],
payload)
class DataFrame(Packet):
def __init__(self, payload = None):
if payload != None and type(payload) != type([]):
# Assume is a Packet
payload = payload.payload()
Packet.__init__(self,
[('protocol', 'int', 1),
('seqno', 'int', 1),
('dispatch', 'int', 1),
('data', 'blob', None)],
payload)
class NoAckDataFrame(Packet):
|
class ActiveMessage(Packet):
def __init__(self, gpacket = None, amid = 0x00, dest = 0xFFFF):
if type(gpacket) == type([]):
payload = gpacket
else:
# Assume this will be derived from Packet
payload = None
Packet.__init__(self,
[('destination', 'int', 2),
('source', 'int', 2),
('length', 'int', 1),
('group', 'int', 1),
('type', 'int', 1),
('data', 'blob', None)],
payload)
if payload == None:
self.destination = dest
self.source = 0x0000
self.group = 0x00
self.type = amid
self.data = []
if gpacket:
self.data = gpacket.payload()
self.length = len(self.data)
| def __init__(self, payload = None):
if payload != None and type(payload) != type([]):
# Assume is a Packet
payload = payload.payload()
Packet.__init__(self,
[('protocol', 'int', 1),
('dispatch', 'int', 1),
('data', 'blob', None)],
payload) | identifier_body |
tos.py | # Copyright (c) 2008 Johns Hopkins University.
# All rights reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose, without fee, and without written
# agreement is hereby granted, provided that the above copyright
# notice, the (updated) modification history and the author appear in
# all copies of this source code.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS `AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, LOSS OF USE, DATA,
# OR PROFITS) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# @author Razvan Musaloiu-E. <razvanm@cs.jhu.edu>
# @author David Purdy <david@radioretail.co.za>
"""A library that implements the T2 serial communication.
This library has two parts: one that deals with sending and receiving
packets using the serial format from T2 (TEP113) and a second one that
tries to simplifies the work with arbitrary packets.
"""
import sys, struct, time, serial, socket, operator, thread
from Queue import Queue
from threading import Lock, Condition
__version__ = "$Id: tos.py,v 1.2 2008/07/20 22:16:50 razvanm Exp $"
__all__ = ['Serial', 'AM',
'Packet', 'RawPacket',
'AckFrame', 'DataFrame', 'NoAckDataFrame',
'ActiveMessage']
ACK_WAIT = 0.2 # Maximum amount of time to wait for an ack
ACK_WARN = 0.2 # Warn if acks take longer than this to arrive
def list2hex(v):
return " ".join(["%02x" % p for p in v])
class Error(Exception):
"""Base error class for this module"""
pass
class TimeoutError(Error):
"""Thrown when a serial operation times out"""
pass
class ReadError(Error):
"""Base class for read error exceptions"""
pass
class WriteError(Error):
"""Base class for write error exceptions"""
pass
class ReadTimeoutError(TimeoutError, ReadError):
"""Thrown when a serial read operation times out"""
pass
class ReadCRCError(ReadError):
"""Thrown when a read packet fails a CRC check"""
pass
class BadAckSeqnoError(ReadError):
"""Thrown if an ack packet has an unexpected sequenc number"""
pass
class WriteTimeoutError(TimeoutError, WriteError):
"""Thrown when a serial write operation times out"""
pass
class SimpleSerial:
"""
A SimpleSerial object offers a way to send and data using a HDLC-like
formating.
Use SimpleSerial objects for basic low-level serial communications. Use
Serial objects for higher level logic (retry sends, log printfs, etc).
"""
HDLC_FLAG_BYTE = 0x7e
HDLC_CTLESC_BYTE = 0x7d
TOS_SERIAL_ACTIVE_MESSAGE_ID = 0
TOS_SERIAL_CC1000_ID = 1
TOS_SERIAL_802_15_4_ID = 2
TOS_SERIAL_UNKNOWN_ID = 255
SERIAL_PROTO_ACK = 67
SERIAL_PROTO_PACKET_ACK = 68
SERIAL_PROTO_PACKET_NOACK = 69
SERIAL_PROTO_PACKET_UNKNOWN = 255
def __init__(self, port, baudrate, flush=False, debug=False, qsize=10,
timeout=None):
self._debug = debug
self._in_queue = []
self._qsize = qsize
self._ack = None
self._write_counter = 0
self._write_counter_failures = 0
self._read_counter = 0
self._ts = None
self.timeout = timeout # Public attribute
self._received_packet_filters = [] # filter functions for received packets
# Remember sent (and unacknowledged) seqno numbers for 15 seconds:
self._unacked_seqnos = SeqTracker(15.0)
self._s = serial.Serial(port, baudrate, rtscts=0, timeout=0.5)
self._s.flushInput()
if flush:
print >>sys.stdout, "Flushing the serial port",
endtime = time.time() + 1
while time.time() < endtime:
try:
self._read()
except ReadError:
pass
sys.stdout.write(".")
if not self._debug:
sys.stdout.write("\n")
self._s.close()
self._s = serial.Serial(port, baudrate, rtscts=0, timeout=timeout)
# Add a filter for received 'write ack' packets
self.add_received_packet_filter(self._write_ack_filter)
# Returns the next incoming serial packet
def _read(self, timeout=None):
"""Wait for a packet and return it as a RawPacket.
Throws:
- ReadCRCError if a CRC check fails
- ReadTimeoutError if the timeout expires.
"""
# Developer notes:
#
# Packet data read from Serial is in this format:
# [HDLC_FLAG_BYTE][Escaped data][HDLC_FLAG_BYTE]
#
# [Escaped data] is encoded so that [HDLC_FLAG_BYTE] byte
# values cannot occur within it. When [Escaped data] has been
# unescaped, the last 2 bytes are a 16-bit CRC of the earlier
# part of the packet (excluding the initial HDLC_FLAG_BYTE
# byte)
#
# It's also possible that the serial device was half-way
# through transmitting a packet when this function was called
# (app was just started). So we also neeed to handle this case:
#
# [Incomplete escaped data][HDLC_FLAG_BYTE][HDLC_FLAG_BYTE][Escaped data][HDLC_FLAG_BYTE]
#
# In this case we skip over the first (incomplete) packet.
#
if self._s.timeout != timeout and timeout != None:
if self._debug:
print "Set the timeout to %s, previous one was %s" % (timeout, self._s.timeout)
self._s.timeout = timeout
try:
# Read bytes until we get to a HDLC_FLAG_BYTE value
# (either the end of a packet, or the start of a new one)
d = self._get_byte(timeout)
ts = time.time()
if self._debug and d != self.HDLC_FLAG_BYTE:
print "Skipping incomplete packet"
while d != self.HDLC_FLAG_BYTE:
d = self._get_byte(timeout)
ts = time.time()
# Store HDLC_FLAG_BYTE at the start of the retrieved packet
# data:
packet = [d]
# Is the next byte also HDLC_FLAG_BYTE?
d = self._get_byte(timeout)
if d == self.HDLC_FLAG_BYTE:
# Yes. This means that the previous byte was for
# the end of the previous packet, and this byte is for
# the start of the next packet.
# Get the 2nd byte of the new packet:
d = self._get_byte(timeout)
ts = time.time()
# We are now on the 2nd byte of the packet. Add it to
# our retrieved packet data:
packet.append(d)
# Read bytes from serial until we read another
# HDLC_FLAG_BYTE value (end of the current packet):
while d != self.HDLC_FLAG_BYTE:
d = self._get_byte(timeout)
packet.append(d)
# Done reading a whole packet from serial
if self._debug:
print "SimpleSerial:_read: unescaped", packet
# Decode the packet, and check CRC:
packet = self._unescape(packet)
crc = self._crc16(0, packet[1:-3])
packet_crc = self._decode(packet[-3:-1])
if crc != packet_crc:
print "Warning: wrong CRC! %x != %x %s" % (crc, packet_crc, ["%2x" % i for i in packet])
raise ReadCRCError
if self._debug:
if self._ts == None:
self._ts = ts
else:
print "Serial:_read: %.4f (%.4f) Recv:" % (ts, ts - self._ts), self._format_packet(packet[1:-3])
self._ts = ts
# Packet was successfully retrieved, so return it in a
# RawPacket wrapper object (but leave out the
# HDLC_FLAG_BYTE and CRC bytes)
return RawPacket(ts, packet[1:-3])
except socket.timeout:
raise ReadTimeoutError
def _write_ack_filter(self, packet):
"""Filter for recieved write acknowledgement packets"""
ack = AckFrame(packet.data)
if ack.protocol == self.SERIAL_PROTO_ACK:
if self._debug:
print "_filter_read: got an ack:", ack
self._ack = ack
packet = None # No further processing of received ack packet
return packet
def _filter_read(self, timeout=None):
"""Read a packet from the serial device, perform filtering, and return
the packet if it hasn't been processed yet.
"""
p = self._read(timeout)
self._read_counter += 1
if self._debug:
print "_filter_read: got a packet(%d): %s" % (self._read_counter, p)
# Pass the received packet through the filter functions:
if p is not None:
for filter_func in self._received_packet_filters:
p = filter_func(p)
# Stop now if the packet doesn't need further processing:
if p is None:
break
# Return the packet (if there was no timeout and it wasn't filtered)
return p
def _get_ack(self, timeout, expected_seqno):
"""Get the next ack packet
Read packets from the serial device until we get the next ack (which
then gets stored in self._ack), or the timeout expires. non-ack packets
are buffered.
Throws:
- ReadTimeoutError if the timeout expires.
- BadAckSeqnoError if an ack with a bad sequence number is received
"""
endtime = time.time() + timeout
while time.time() < endtime:
# Read the a packet over serial
self._ack = None
remaining = endtime - time.time()
p = self._filter_read(timeout)
# Was the packet filtered?
if p:
# Got an unfiltered packet
if len(self._in_queue) >= self._qsize:
print "Warning: Buffer overflow"
self._in_queue.pop(0)
self._in_queue.append(p)
else:
# Packet was filtered. Was it an ack?
if self._ack is not None:
# The packet was an ack, so remove it from our
# 'unacknowledged seqnos' list (or raise a BadAckSeqnoError
# error if it isn't in the list)
self._unacked_seqnos.seqno_acked(self._ack.seqno)
# Stop reading packets if it's the ack we are waiting for:
if self._ack.seqno == expected_seqno:
return
# Timed out
raise ReadTimeoutError
def close(self):
"""Close the serial device"""
self._s.close()
def read(self, timeout=None):
"""Read a packet, either from the input buffer or from the serial
device.
Returns a RawPacket object, otherwise None if the packet was filtered
(by eg: Serial's printf-filtering function)
Does not retry reads if the first one fails. Use Serial.read() for
that.
"""
if self._in_queue:
return self._in_queue.pop(0)
else:
return self._filter_read(timeout)
def write(self, payload, seqno, timeout=0.2):
"""
Write a packet. If the payload argument is a list, it is
assumed to be exactly the payload. Otherwise the payload is
assume to be a Packet and the real payload is obtain by
calling the .payload().
Only attempts to write once, and times out if an ack packet is not
received within [timeout] seconds. Use Serial.write() if you want
automatic write retries.
seqno should be an integer between 0 and 99 which changes each time you
send a new packet. The value should remain the same when you are
retrying a packet write that just failed.
Raises WriteTimeoutError if the write times out (ack packet doesn't
arrive within [timeout] seconds).
"""
if type(payload) != type([]):
# Assume this will be derived from Packet
payload = payload.payload()
packet = DataFrame();
packet.protocol = self.SERIAL_PROTO_PACKET_ACK
packet.seqno = seqno
packet.dispatch = 0
packet.data = payload
packet = packet.payload()
crc = self._crc16(0, packet)
packet.append(crc & 0xff)
packet.append((crc >> 8) & 0xff)
packet = [self.HDLC_FLAG_BYTE] + self._escape(packet) + [self.HDLC_FLAG_BYTE]
# Write the packet:
self._unacked_seqnos.seqno_sent(seqno) # Keep track of sent seqno's
self._put_bytes(packet)
self._write_counter += 1
# Wait for an ack packet:
if self._debug: | print "Wait for ack %d ..." % (seqno)
try:
self._get_ack(timeout, seqno)
except ReadTimeoutError:
# Re-raise read timeouts (of ack packets) as write timeouts (of
# the write operation)
self._write_counter_failures += 1
raise WriteTimeoutError
# Received an ack packet, with the expected sequence number
if self._debug:
print "Wait for ack %d done. Latest ack:" % (seqno), self._ack
print "The packet was acked."
print "Returning from SimpleSerial.write..."
def add_received_packet_filter(self, filter_func):
"""Register a received packet-filtering callback function
_filter_read() calls all of the registered filter functions for each
packet received over serial. Registered filter functions are called in
the order they were registered.
Filter functions are called like this: filter_func(packet)
When a filter function recognises and handles a received packet it
should return a None value to indicate that no further processing
is required for the packet.
When a filter function skips a packet (or for some reason you want
further processing to happen on a packet you've just processed), the
function should return the packet that was passed to it as an argument.
"""
self._received_packet_filters.append(filter_func)
def remove_received_packet_filter(self, filter_func):
"""Remove a filter function added with add_received_packet_filter()"""
self._received_packet_filters.remove(filter_func)
def _format_packet(self, payload):
f = NoAckDataFrame(payload)
if f.protocol == self.SERIAL_PROTO_ACK:
rpacket = AckFrame(payload)
return "Ack seqno: %d" % (rpacket.seqno)
else:
rpacket = ActiveMessage(f.data)
return "D: %04x S: %04x L: %02x G: %02x T: %02x | %s" % \
(rpacket.destination, rpacket.source,
rpacket.length, rpacket.group, rpacket.type,
list2hex(rpacket.data))
def _crc16(self, base_crc, frame_data):
crc = base_crc
for b in frame_data:
crc = crc ^ (b << 8)
for i in range(0, 8):
if crc & 0x8000 == 0x8000:
crc = (crc << 1) ^ 0x1021
else:
crc = crc << 1
crc = crc & 0xffff
return crc
def _encode(self, val, dim):
output = []
for i in range(dim):
output.append(val & 0xFF)
val = val >> 8
return output
def _decode(self, v):
r = long(0)
for i in v[::-1]:
r = (r << 8) + i
return r
def _get_byte(self, timeout=None):
# old_timeout = self._s.timeout
# if timeout is not None:
# self._s.timeout = timeout
try:
r = struct.unpack("B", self._s.read())[0]
return r
except struct.error:
# Serial port read timeout
raise socket.timeout
# finally:
# self._s.timeout = old_timeout
def _put_bytes(self, data):
#print "DEBUG: _put_bytes:", data
for b in data:
self._s.write(struct.pack('B', b))
def _unescape(self, packet):
r = []
esc = False
for b in packet:
if esc:
r.append(b ^ 0x20)
esc = False
elif b == self.HDLC_CTLESC_BYTE:
esc = True
else:
r.append(b)
return r
def _escape(self, packet):
r = []
for b in packet:
if b == self.HDLC_FLAG_BYTE or b == self.HDLC_CTLESC_BYTE:
r.append(self.HDLC_CTLESC_BYTE)
r.append(b ^ 0x20)
else:
r.append(b)
return r
def debug(self, debug):
self._debug = debug
class SeqTracker:
"""Class for keeping track of unacknowledged packet sequence numbers.
SeqTracker is used by SimpleSerial to keep track of sequence numbers which
have been sent with write packets, but not yet acknowledged by received
write ack packets.
"""
def __init__(self, keep_for):
"""Initialise a SeqTracker object.
args:
- keep_for is the length of time for which unacknowledged sequence
numbers should be remembered. After this period has elapsed, the
sequence numbers should be forgotten. If the sequence number is
acknowledged later, it will be treated as unkown
"""
self._keep_for = keep_for
self._queue = []
def seqno_sent(self, seqno):
"""Register that a packet with the specified sequence number was just
sent."""
self._gc()
self._queue.append((seqno, time.time()))
def seqno_acked(self, seqno):
"""Register that a sequence number was just acknowledged.
Find the oldest-known occurance of seqno in the queue and remove it. If
not found then raise a BadAckSeqnoError to inform applications that
the sequence number is not known.
"""
self._gc()
for item in self._queue:
if item[0] == seqno:
# Found seqno
self._queue.remove(item)
return
# seqno not found!
raise BadAckSeqnoError
def get_seqno_sent_times(self, seqno):
"""Return the times when packets with the given sequence number were
sent."""
self._gc()
return [item[1] for item in self._queue if item[0] == seqno]
def __contains__(self, seqno):
"""Return True if the seqno was sent recently (and not acknowledged
yet)"""
self._gc()
for item in self._queue:
if item[0] == seqno:
return True
return False
def _gc(self):
"""Remove old items from the queue"""
remove_before = time.time() - self._keep_for
for item in self._queue:
# Time for the sequence to be removed?
if item[1] < remove_before:
# Sequence data is old, so remove it
self._queue.remove(item)
else:
# Sequence number was added recently, so don't remove it. Also
# stop processing the queue because all later items will be
# newer
break
class Serial:
"""
Wraps a SimpleSerial object, and provides some higher-level functionality
like retrying writes and logging printf packets.
"""
def __init__(self, port, baudrate, flush=False, debug=False, qsize=10,
timeout=None):
"""Initialise a Serial object"""
self._debug = debug
self.timeout = timeout # Public attribute
self._seqno = 0
self._simple_serial = SimpleSerial(port, baudrate, flush, debug, qsize,
timeout)
# Setup automatic logging of received printf packets:
self._printf_msg = ""
self._simple_serial.add_received_packet_filter(self._printf_filter)
def close(self):
"""Close the serial device"""
self._simple_serial.close()
def read(self, timeout=None):
"""Read a packet from the serial port.
Retries packet reads until the timeout expires.
Throws ReadTimeoutError if a a packet can't be read within the timeout.
"""
if timeout is None:
timeout = self.timeout
endtime = None
if timeout is not None:
endtime = time.time() + timeout
while endtime is None or time.time() < endtime:
remaining = None
if endtime is not None:
remaining = endtime - time.time()
try:
p = self._simple_serial.read(remaining)
except ReadError:
if self._debug:
print "Packet read failed. Try again."
else:
# Was the packet filtered?
if p is not None:
# Not filtered, so return it.
# In the current TinyOS the packets from the mote are
# always NoAckDataFrame
return NoAckDataFrame(p.data)
# Read timeout expired
raise ReadTimeoutError
def write(self, payload, timeout=None):
"""Write a packet to the serial port
Keeps retrying endlessly, unless a timeout is set. If the timeout
expires then WriteTimeoutError is thrown.
"""
if timeout is None:
timeout = self.timeout
endtime = None
if timeout is not None:
endtime = time.time() + timeout
# Generate the next sequence number:
self._seqno = (self._seqno + 1) % 100
while endtime is None or time.time() < endtime:
try:
ackwait = ACK_WAIT
if endtime is not None:
remaining = endtime - time.time()
ackwait = min(ACK_WAIT, remaining)
before = time.time()
self._simple_serial.write(payload, self._seqno, ackwait)
length = time.time() - before
if length >= ACK_WARN:
print "Warning: Packet write took %.3fs!" % (length)
return True
except Error:
if self._debug:
print "The packet was not acked. Try again."
# Write operation timed out
raise WriteTimeoutError
def _printf_filter(self, packet):
"""Filter for recieved printf packets"""
ampkt = ActiveMessage(NoAckDataFrame(packet.data).data)
if ampkt.type == 100:
self._printf_msg += "".join([chr(i) for i in ampkt.data]).strip('\0')
# Split printf data on newline character:
# (last string in the split list doesn't have a newline after
# it, so we keep it until next time)
lines = self._printf_msg.split('\n')
for line in lines[:-1]:
print "PRINTF:", line
self._printf_msg = lines[-1]
packet = None # No further processing for the printf packet
return packet
class SFClient:
def __init__(self, host, port, qsize=10):
self._in_queue = Queue(qsize)
self._s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._s.connect((host, port))
data = self._s.recv(2)
if data != 'U ':
print "Wrong handshake"
self._s.send("U ")
print "Connected"
thread.start_new_thread(self.run, ())
def run(self):
while True:
length = ord(self._s.recv(1))
data = self._s.recv(length)
data = [ord(c) for c in data][1:]
#print "Recv %d bytes" % (length), ActiveMessage(data)
if self._in_queue.full():
print "Warning: Buffer overflow"
self._in_queue.get()
p = RawPacket()
p.data = data
self._in_queue.put(p, block=False)
def read(self, timeout=0):
return self._in_queue.get()
def write(self, payload):
print "SFClient: write:", payload
if type(payload) != type([]):
# Assume this will be derived from Packet
payload = payload.payload()
payload = [0] + payload
self._s.send(chr(len(payload)))
self._s.send(''.join([chr(c) for c in payload]))
return True
class AM:
def __init__(self, s):
self._s = s
def read(self, timeout=None):
return ActiveMessage(self._s.read(timeout).data)
def write(self, packet, amid, timeout=None):
return self._s.write(ActiveMessage(packet, amid=amid), timeout=timeout)
class SimpleSerialAM(SimpleSerial):
"""A derived class of SimpleSerial so that apps can read and write using
higher-level packet structures.
Serves a simalar purpose to the AM class, but for SimpleSerial objects
instead instead of Serial.
"""
def read_am(self, timeout=None):
"""Read a RawPacket object (or None), convert it to ActiveMessage
(or None), and return to the caller"""
# Get a tos.Rawpacket (or None, if filtered) object
p = self.read(timeout)
if p is not None:
assert isinstance(p, RawPacket)
# Convert tos.RawPacket object into an ActiveMessage:
p = NoAckDataFrame(p.data)
p = ActiveMessage(p.data)
# Return the ActiveMessage (or None) packet:
return p
def write_am(self, packet, amid, seqno, timeout=2.0):
"""Convert app packet format to ActiveMessage, and write the
ActiveMessage packet to serial"""
# Convert from app-specific packet to ActiveMessage:
p = ActiveMessage(packet, amid=amid)
# Write to the serial device
self.write(p, seqno, timeout)
class Packet:
"""
The Packet class offers a handy way to build pack and unpack
binary data based on a given pattern.
"""
def _decode(self, v):
r = long(0)
for i in v:
r = (r << 8) + i
return r
def _encode(self, val, dim):
output = []
for i in range(dim):
output.append(int(val & 0xFF))
val = val >> 8
output.reverse()
return output
def __init__(self, desc, packet = None):
offset = 0
boffset = 0
sum = 0
for i in range(len(desc)-1, -1, -1):
(n, t, s) = desc[i]
if s == None:
if sum > 0:
desc[i] = (n, t, -sum)
break
sum += s
self.__dict__['_schema'] = [(t, s) for (n, t, s) in desc]
self.__dict__['_names'] = [n for (n, t, s) in desc]
self.__dict__['_values'] = []
if type(packet) == type([]):
for (t, s) in self._schema:
if t == 'int':
self._values.append(self._decode(packet[offset:offset + s]))
offset += s
elif t == 'bint':
doffset = 8 - (boffset + s)
self._values.append((packet[offset] >> doffset) & ((1<<s) - 1))
boffset += s
if boffset == 8:
offset += 1
boffset = 0
elif t == 'string':
self._values.append(''.join([chr(i) for i in packet[offset:offset + s]]))
offset += s
elif t == 'blob':
if s:
if s > 0:
self._values.append(packet[offset:offset + s])
offset += s
else:
self._values.append(packet[offset:s])
offset = len(packet) + s
else:
self._values.append(packet[offset:])
elif type(packet) == type(()):
for i in packet:
self._values.append(i)
else:
for v in self._schema:
self._values.append(None)
def __repr__(self):
return self._values.__repr__()
def __str__(self):
r = ""
for i in range(len(self._names)):
r += "%s: %s " % (self._names[i], self._values[i])
for i in range(len(self._names), len(self._values)):
r += "%s" % self._values[i]
return r
# return self._values.__str__()
# Implement the map behavior
def __getitem__(self, key):
return self.__getattr__(key)
def __setitem__(self, key, value):
self.__setattr__(key, value)
def __len__(self):
return len(self._values)
def keys(self):
return self._names
def values(self):
return self._names
# Implement the struct behavior
def __getattr__(self, name):
#print "DEBUG: __getattr__", name
if type(name) == type(0):
return self._names[name]
else:
return self._values[self._names.index(name)]
def __setattr__(self, name, value):
if type(name) == type(0):
self._values[name] = value
else:
self._values[self._names.index(name)] = value
def __ne__(self, other):
if other.__class__ == self.__class__:
return self._values != other._values
else:
return True
def __eq__(self, other):
if other.__class__ == self.__class__:
return self._values == other._values
else:
return False
def __nonzero__(self):
return True;
# Custom
def names(self):
return self._names
def sizes(self):
return self._schema
def payload(self):
r = []
boffset = 0
for i in range(len(self._schema)):
(t, s) = self._schema[i]
if t == 'int':
r += self._encode(self._values[i], s)
boffset = 0
elif t == 'bint':
doffset = 8 - (boffset + s)
if boffset == 0:
r += [self._values[i] << doffset]
else:
r[-1] |= self._values[i] << doffset
boffset += s
if boffset == 8:
boffset = 0
elif self._values[i] != []:
r += self._values[i]
for i in self._values[len(self._schema):]:
r += i
return r
class RawPacket(Packet):
def __init__(self, ts = None, data = None):
Packet.__init__(self,
[('ts' , 'int', 4),
('data', 'blob', None)],
None)
self.ts = ts;
self.data = data
class AckFrame(Packet):
def __init__(self, payload = None):
Packet.__init__(self,
[('protocol', 'int', 1),
('seqno', 'int', 1)],
payload)
class DataFrame(Packet):
def __init__(self, payload = None):
if payload != None and type(payload) != type([]):
# Assume is a Packet
payload = payload.payload()
Packet.__init__(self,
[('protocol', 'int', 1),
('seqno', 'int', 1),
('dispatch', 'int', 1),
('data', 'blob', None)],
payload)
class NoAckDataFrame(Packet):
def __init__(self, payload = None):
if payload != None and type(payload) != type([]):
# Assume is a Packet
payload = payload.payload()
Packet.__init__(self,
[('protocol', 'int', 1),
('dispatch', 'int', 1),
('data', 'blob', None)],
payload)
class ActiveMessage(Packet):
def __init__(self, gpacket = None, amid = 0x00, dest = 0xFFFF):
if type(gpacket) == type([]):
payload = gpacket
else:
# Assume this will be derived from Packet
payload = None
Packet.__init__(self,
[('destination', 'int', 2),
('source', 'int', 2),
('length', 'int', 1),
('group', 'int', 1),
('type', 'int', 1),
('data', 'blob', None)],
payload)
if payload == None:
self.destination = dest
self.source = 0x0000
self.group = 0x00
self.type = amid
self.data = []
if gpacket:
self.data = gpacket.payload()
self.length = len(self.data) | print "Send(%d/%d): %s" % (self._write_counter, self._write_counter_failures, packet) | random_line_split |
derivatives.py |
import numpy as np
import scipy.sparse as spsp
import pyamg
from pyamg.gallery import stencil_grid
from pysit.util.derivatives.fdweight import *
from pysit.util.matrix_helpers import make_diag_mtx
__all__ = ['build_derivative_matrix','build_derivative_matrix_VDA', 'build_heterogenous_matrices','build_permutation_matrix','_build_staggered_first_derivative_matrix_part', 'build_linear_interpolation_matrix_part']
def build_derivative_matrix(mesh,
derivative, order_accuracy,
**kwargs):
if mesh.type == 'structured-cartesian':
return _build_derivative_matrix_structured_cartesian(mesh, derivative, order_accuracy, **kwargs)
else:
raise NotImplementedError('Derivative matrix builder not available (yet) for {0} meshes.'.format(mesh.discretization))
def build_derivative_matrix_VDA(mesh, derivative, order_accuracy, alpha = None, **kwargs): #variable density acoustic
if mesh.type == 'structured-cartesian':
return _build_derivative_matrix_staggered_structured_cartesian(mesh, derivative, order_accuracy, alpha=alpha, **kwargs)
else:
raise NotImplementedError('Derivative matrix builder not available (yet) for {0} meshes.'.format(mesh.discretization))
def | (bc):
if bc.type == 'pml':
return bc.boundary_type
elif bc.type == 'ghost':
return ('ghost', bc.n)
else:
return bc.type
def _build_derivative_matrix_structured_cartesian(mesh,
derivative, order_accuracy,
dimension='all',
use_shifted_differences=False,
return_1D_matrix=False,
**kwargs):
dims = list()
if type(dimension) is str:
dimension = [dimension]
if 'all' in dimension:
if mesh.dim > 1:
dims.append('x')
if mesh.dim > 2:
dims.append('y')
dims.append('z')
else:
for d in dimension:
dims.append(d)
# sh[-1] is always 'z'
# sh[0] is always 'x' if in 2 or 3d
# sh[1] is always 'y' if dim > 2
sh = mesh.shape(include_bc = True, as_grid = True)
if mesh.dim > 1:
if 'x' in dims:
lbc = _set_bc(mesh.x.lbc)
rbc = _set_bc(mesh.x.rbc)
delta = mesh.x.delta
Dx = _build_derivative_matrix_part(sh[0], derivative, order_accuracy, h=delta, lbc=lbc, rbc=rbc, use_shifted_differences=use_shifted_differences)
else:
Dx = spsp.csr_matrix((sh[0],sh[0]))
if mesh.dim > 2:
if 'y' in dims:
lbc = _set_bc(mesh.y.lbc)
rbc = _set_bc(mesh.y.rbc)
delta = mesh.y.delta
Dy = _build_derivative_matrix_part(sh[1], derivative, order_accuracy, h=delta, lbc=lbc, rbc=rbc, use_shifted_differences=use_shifted_differences)
else:
Dy = spsp.csr_matrix((sh[1],sh[1]))
if 'z' in dims:
lbc = _set_bc(mesh.z.lbc)
rbc = _set_bc(mesh.z.rbc)
delta = mesh.z.delta
Dz = _build_derivative_matrix_part(sh[-1], derivative, order_accuracy, h=delta, lbc=lbc, rbc=rbc, use_shifted_differences=use_shifted_differences)
else:
Dz = spsp.csr_matrix((sh[-1],sh[-1]))
if return_1D_matrix and 'all' not in dims:
if 'z' in dims:
mtx = Dz
elif 'y' in dims:
mtx = Dy
elif 'x' in dims:
mtx = Dx
else:
if mesh.dim == 1:
mtx = Dz.tocsr()
if mesh.dim == 2:
# kronsum in this order because wavefields are stored with 'z' in row
# and 'x' in columns, then vectorized in 'C' order
mtx = spsp.kronsum(Dz, Dx, format='csr')
if mesh.dim == 3:
mtx = spsp.kronsum(Dz, spsp.kronsum(Dy,Dx, format='csr'), format='csr')
return mtx
def _build_derivative_matrix_part(npoints, derivative, order_accuracy, h=1.0, lbc='d', rbc='d', use_shifted_differences=False):
if order_accuracy%2:
raise ValueError('Only even accuracy orders supported.')
centered_coeffs = centered_difference(derivative, order_accuracy)/(h**derivative)
mtx = stencil_grid(centered_coeffs, (npoints, ), format='lil')
max_shift= order_accuracy//2
if use_shifted_differences:
# Left side
odd_even_offset = 1-derivative%2
for i in range(0, max_shift):
coeffs = shifted_difference(derivative, order_accuracy, -(max_shift+odd_even_offset)+i)
mtx[i,0:len(coeffs)] = coeffs/(h**derivative)
# Right side
for i in range(-1, -max_shift-1,-1):
coeffs = shifted_difference(derivative, order_accuracy, max_shift+i+odd_even_offset)
mtx[i,slice(-1, -(len(coeffs)+1),-1)] = coeffs[::-1]/(h**derivative)
if 'd' in lbc: #dirichlet
mtx[0,:] = 0
mtx[0,0] = 1.0
elif 'n' in lbc: #neumann
mtx[0,:] = 0
coeffs = shifted_difference(1, order_accuracy, -max_shift)/h
coeffs /= (-1*coeffs[0])
coeffs[0] = 0.0
mtx[0,0:len(coeffs)] = coeffs
elif type(lbc) is tuple and 'g' in lbc[0]: #ghost
n_ghost_points = int(lbc[1])
mtx[0:n_ghost_points,:] = 0
for i in range(n_ghost_points):
mtx[i,i] = 1.0
if 'd' in rbc:
mtx[-1,:] = 0
mtx[-1,-1] = 1.0
elif 'n' in rbc:
mtx[-1,:] = 0
coeffs = shifted_difference(1, order_accuracy, max_shift)/h
coeffs /= (-1*coeffs[-1])
coeffs[-1] = 0.0
mtx[-1,slice(-1, -(len(coeffs)+1),-1)] = coeffs[::-1]
elif type(rbc) is tuple and 'g' in rbc[0]:
n_ghost_points = int(rbc[1])
mtx[slice(-1,-(n_ghost_points+1), -1),:] = 0
for i in range(n_ghost_points):
mtx[-i-1,-i-1] = 1.0
return mtx.tocsr()
def _build_derivative_matrix_staggered_structured_cartesian(mesh,
derivative, order_accuracy,
dimension='all',
alpha = None,
return_1D_matrix=False,
**kwargs):
#Some of the operators could be cached the same way I did to make 'build_permutation_matrix' faster.
#Could be considered if the current speed is ever considered to be insufficient.
import time
tt = time.time()
if return_1D_matrix:
raise Exception('Not yet implemented')
if derivative < 1 or derivative > 2:
raise ValueError('Only defined for first and second order right now')
if derivative == 1 and dimension not in ['x', 'y', 'z']:
raise ValueError('First derivative requires a direciton')
sh = mesh.shape(include_bc = True, as_grid = True) #Will include PML padding
if len(sh) != 2: raise Exception('currently hardcoded 2D implementation, relatively straight-forward to change. Look at the function build_derivative_matrix to get a more general function.')
nx = sh[0]
nz = sh[-1]
#Currently I am working with density input on the regular grid.
#In the derivation of the variable density solver we only require density at the stagger points
#For now I am just interpolating density defined on regular points towards the stagger points and use that as 'density model'.
#Later it is probably better to define the density directly on the stagger points (and evaluate density gradient there to update directly at these points?)
if type(alpha) == None: #If no alpha is given, we set it to a uniform vector. The result should be the homogeneous Laplacian.
alpha = np.ones(nx*nz)
alpha = alpha.flatten() #make 1D
dx = mesh.x.delta
dz = mesh.z.delta
#Get 1D linear interpolation matrices
Jx_1d = build_linear_interpolation_matrix_part(nx)
Jz_1d = build_linear_interpolation_matrix_part(nz)
#Get 1D derivative matrix for first spatial derivative using the desired order of accuracy
lbc_x = _set_bc(mesh.x.lbc)
rbc_x = _set_bc(mesh.x.rbc)
lbc_z = _set_bc(mesh.z.lbc)
rbc_z = _set_bc(mesh.z.rbc)
Dx_1d = _build_staggered_first_derivative_matrix_part(nx, order_accuracy, h=dx, lbc = lbc_x, rbc = rbc_x)
Dz_1d = _build_staggered_first_derivative_matrix_part(nz, order_accuracy, h=dz, lbc = lbc_z, rbc = rbc_z)
#Some empty matrices of the right shape so we can use kronsum to get the proper 2D matrices for the operations we want.
#The same is used in the homogeneous 'build_derivative_matrix' function.
Ix = spsp.eye(nx)
Iz = spsp.eye(nz)
Dx_2d = spsp.kron(Dx_1d, Iz, format='csr')
if dimension == 'x' and derivative == 1:
return Dx_2d
Dz_2d = spsp.kron(Ix, Dz_1d, format='csr')
if dimension == 'z' and derivative == 1:
return Dz_2d
#If we are evaluating this we want to make the heterogeneous Laplacian
Jx_2d = spsp.kron(Jx_1d, Iz, format='csr')
Jz_2d = spsp.kron(Ix, Jz_1d, format='csr')
#alpha interpolated to x stagger points. Make diag mat
diag_alpha_x = make_diag_mtx(Jx_2d*alpha)
#alpha interpolated to z stagger points. Make diag mat
diag_alpha_z = make_diag_mtx(Jz_2d*alpha)
#Create laplacian components
#The negative transpose of Dx and Dz takes care of the divergence term of the heterogeneous laplacian
Dxx_2d = -Dx_2d.T*diag_alpha_x*Dx_2d
Dzz_2d = -Dz_2d.T*diag_alpha_z*Dz_2d
#Correct the Laplacian around the boundary. This is also done in the homogeneous Laplacian
#I want the heterogeneous Laplacian to be the same as the homogeneous Laplacian when alpha is uniform
#This is the only part of the Laplacian that deviates from symmetry, just as in the homogeneous case.
#But because of these conditions on the dirichlet boundary the wavefield will always equal 0 there and this deviation from symmetry is fine.
#For indexing, get list of all boundary node numbers
left_node_nrs = np.arange(nz)
right_node_nrs = np.arange((nx-1)*nz,nx*nz)
top_node_nrs = np.arange(nz,(nx-1)*nz,nz) #does not include left and right top node
bot_node_nrs = top_node_nrs + nz - 1 #does not include left and right top node
all_boundary_node_nrs = np.concatenate((left_node_nrs, right_node_nrs, top_node_nrs, bot_node_nrs))
nb = all_boundary_node_nrs.size
L = Dxx_2d + Dzz_2d
all_node_numbers = np.arange(0,(nx*nz), dtype='int32')
internal_node_numbers = list(set(all_node_numbers) - set(all_boundary_node_nrs))
L = L.tocsr() #so we can extract rows efficiently
#Operation below fixes the boundary rows quite efficiently.
L_fixed = _turn_sparse_rows_to_identity(L, internal_node_numbers, all_boundary_node_nrs)
return L_fixed.tocsr()
def _turn_sparse_rows_to_identity(A, rows_to_keep, rows_to_change):
#Convenience function for removing some rows from the sparse laplacian
#Had some major performance problems by simply slicing in all the matrix formats I tried.
nr,nc = A.shape
if nr != nc:
raise Exception('assuming square matrix')
#Create diagonal matrix. When we multiply A by this matrix we can remove rows
rows_to_keep_diag = np.zeros(nr, dtype='int32')
rows_to_keep_diag[rows_to_keep] = 1
diag_mat_remove_rows = make_diag_mtx(rows_to_keep_diag)
#The matrix below has the rows we want to turn into identity turned to 0
A_with_rows_removed = diag_mat_remove_rows*A
#Make diag matrix that has diagonal entries in the rows we want to be identity
rows_to_change_diag = np.zeros(nr, dtype='int32')
rows_to_change_diag[rows_to_change] = 1
A_with_identity_rows = make_diag_mtx(rows_to_change_diag)
A_modified = A_with_rows_removed + A_with_identity_rows
return A_modified
def _build_staggered_first_derivative_matrix_part(npoints, order_accuracy, h=1.0, lbc='d', rbc='d'):
#npoints is the number of regular grid points.
if order_accuracy%2:
raise ValueError('Only even accuracy orders supported.')
#coefficients for the first derivative evaluated in between two regular grid points.
stagger_coeffs = staggered_difference(1, order_accuracy)/h
#Use the old 'stencil_grid' routine.
#Because we do a staggered grid we need to shift the coeffs one entry and the matrix will not be square
incorrect_mtx = stencil_grid(np.insert(stagger_coeffs,0,0), (npoints, ), format='lil')
#Get rid of the last row which we dont want in our staggered approach
mtx = incorrect_mtx[0:-1,:]
if 'n' in lbc or 'n' in rbc:
raise ValueError('Did not yet implement Neumann boundaries. Perhaps looking at the centered grid implementation would be a good start?')
if 'g' in lbc or 'g' in rbc:
raise ValueError('Did not yet implement this boundary condition yet. Perhaps looking at the centered grid implementation would be a good start?')
#For dirichlet we don't need to alter the matrix for the first derivative for the boundary nodes as is done in the centered approach
#The reason is that the first staggered point we evaluate at is in the interior of the domain.
return mtx.tocsr()
def build_linear_interpolation_matrix_part(npoints):
#same logic as in function 'build_staggered_first_derivative_matrix_part
coeffs = np.array([0.5, 0.5])
incorrect_mtx = stencil_grid(np.insert(coeffs,0,0), (npoints, ), format='lil')
mtx = incorrect_mtx[0:-1,:]
return mtx.tocsr()
def apply_derivative(mesh, derivative, order_accuracy, vector, **kwargs):
A = build_derivative_matrix(mesh, derivative, order_accuracy, **kwargs)
return A*vector
def build_permutation_matrix(nz,nx):
# This creates a permutation matrix which transforms a column vector of nx
# "component" columns of size nz, to the corresponding column vector of nz
# "component" columns of size nx.
def generate_matrix(nz, nx): #local function
P = spsp.lil_matrix((nz*nx,nz*nx))
for i in range(nz): #Looping is not efficient, but we only need to do it once as setup
for j in range(nx):
P[nx*i+j,i+j*nz]=1
return P.tocsr()
#Start body of code for 'build_permutation_matrix'
try: #See if there are already stored results from previous calls to this function
current_storage_dict = build_permutation_matrix.storage_dict
except: #If not, initialize
current_storage_dict = dict()
build_permutation_matrix.storage_dict = current_storage_dict
if (nz,nx) not in list(current_storage_dict.keys()): #Have not precomputed this!
mat = generate_matrix(nz,nx)
current_storage_dict[nz,nx] = mat
return current_storage_dict[nz,nx]
def build_offcentered_alpha(sh,alpha):
# This computes the midpoints of alpha which will be used in the heterogenous laplacian
nz=sh[-1]
nx=sh[0]
v1z,v2z,v3z=np.ones(nz),np.ones(nz-1),np.zeros(nz)
v1z[-1],v3z[0]=2.0,2.0
v1x,v2x,v3x=np.ones(nx),np.ones(nx-1),np.zeros(nx)
v1x[-1],v3x[0]=2.0,2.0
v3z=v3z.reshape(1,nz)
v3x=v3x.reshape(1,nx)
Lz1=np.array(spsp.diags([v1z,v2z],[0,1]).todense())
Lx1=np.array(spsp.diags([v1x,v2x],[0,1]).todense())
Lz=np.matrix(0.5*np.concatenate((v3z,Lz1),axis=0))
Lx=np.matrix(0.5*np.concatenate((v3x,Lx1),axis=0))
# Lz and Lx simply (of length nz and nx respectively) act on a vector and return one which is one entry larger than before,
# with each entry being a weighted sum of the two adjacent entries. Boundary values are preserved.
P=build_permutation_matrix(nz,nx)
alpha_perm=P*alpha
alpha_z,alpha_x=list(),list()
for i in range(nx):
alpha_z.append(Lz*alpha[nz*i:nz*(i+1)])
for i in range(nz):
alpha_x.append(Lx*alpha_perm[nx*i:nx*(i+1)])
return alpha_x, alpha_z
def build_heterogenous_matrices(sh,deltas,alpha=None,rp=None):
# This builds 1st order, forward and backward derivative matrices.
# alpha is a vector which goes inside of the operator, div (alpha grad)
# It can also build a hetergenous laplacian (if rp is not None),which differs from the above
# heterogenous laplacian only in its boundary conditions.
nz=sh[-1]
nx=sh[0]
#builds z derivative matrix
v=-np.ones(nx*nz)/deltas[-1]
v1=np.ones(nx*nz-1)/deltas[-1]
v1[list(range(nz-1,nz*nx-1,nz))]=0.0 # repair boundary terms.
D2=spsp.diags([v,v1],[0,1])
D2_tilda=-1.0*D2.T
#builds x derivative matrix
p=-np.ones(nx*nz)/deltas[0]
p1=np.ones(nx*nz-1)/deltas[0]
#p[range(nx-1,nz*nx,nx)]=-1.0
p1[list(range(nx-1,nz*nx-1,nx))]=0.0
D1=spsp.diags([p,p1],[0,1])
D1_tilda=-1.0*D1.T
P=build_permutation_matrix(nz,nx)
P_inv=build_permutation_matrix(nx,nz)
#builds exact adjoint gradient for z.
v=-np.ones(nx*nz)/deltas[-1]
v1=np.ones(nx*nz-1)/deltas[-1]
v1[list(range(nz-1,nz*nx-1,nz))]=0.0
v1[list(range(0,nz*nx-1,nz))]=0.0
D2_adj=spsp.diags([v,v1],[0,1])
#builds exact adjoint gradient for x.
p=-np.ones(nx*nz)/deltas[0]
p1=np.ones(nx*nz-1)/deltas[0]
p1[list(range(nx-1,nz*nx-1,nx))]=0.0
p1[list(range(0,nz*nx-1,nx))]=0.0
D1_adj=spsp.diags([p,p1],[0,1])
if rp is not None:
A=spsp.diags([alpha],[0])
Lap = D2_tilda*A*D2+P_inv*D1_tilda*P*A*P_inv*D1*P
return Lap
else:
D1=P_inv*D1*P
D1_adj=P_inv*D1_adj*P
return [D1,D1_adj],[D2,D2_adj]
if __name__ == '__main__':
from pysit import *
from pysit.gallery import horizontal_reflector
bc = Dirichlet()
dim = 2
deriv = 1 # 2
order = 4
if dim == 1:
z_config = (0.0, 7.0, bc, bc)
d = RectangularDomain(z_config)
m = CartesianMesh(d, 7)
# Generate true wave speed
C, C0 = horizontal_reflector(m)
D = build_derivative_matrix(m, deriv, order, dimension='all').todense()
Dz = build_derivative_matrix(m, deriv, order, dimension='z').todense()
if dim == 2:
x_config = (0.0, 7.0, bc, bc)
z_config = (0.0, 7.0, bc, bc)
d = RectangularDomain(x_config, z_config)
m = CartesianMesh(d, 7, 7)
# Generate true wave speed
C, C0 = horizontal_reflector(m)
D = build_derivative_matrix(m, deriv, order, dimension='all').todense()
Dx = build_derivative_matrix(m, deriv, order, dimension='x').todense()
Dz = build_derivative_matrix(m, deriv, order, dimension='z').todense()
if dim == 3:
x_config = (0.0, 7.0, bc, bc)
y_config = (0.0, 7.0, bc, bc)
z_config = (0.0, 7.0, bc, bc)
d = RectangularDomain(x_config, x_config, z_config)
m = CartesianMesh(d, 7, 7, 7)
# Generate true wave speed
C, C0 = horizontal_reflector(m)
D = build_derivative_matrix(m, deriv, order, dimension='all').todense()
sh = m.shape(as_grid=True)
Dx = build_derivative_matrix(m, deriv, order, dimension=['x']).todense()
Dy = build_derivative_matrix(m, deriv, order, dimension=['y']).todense()
Dz = build_derivative_matrix(m, deriv, order, dimension=['z']).todense()
x=(Dx*C).reshape(sh)
y=(Dy*C).reshape(sh)
z=(Dz*C).reshape(sh)
print(x[:,:,0]) # should have ones all in first and last rows
print(y[:,:,0]) # should have ones all in first and last columns
print(z[0,0,:]) # should have ones at the ends
| _set_bc | identifier_name |
derivatives.py |
import numpy as np
import scipy.sparse as spsp
import pyamg
from pyamg.gallery import stencil_grid
from pysit.util.derivatives.fdweight import *
from pysit.util.matrix_helpers import make_diag_mtx
__all__ = ['build_derivative_matrix','build_derivative_matrix_VDA', 'build_heterogenous_matrices','build_permutation_matrix','_build_staggered_first_derivative_matrix_part', 'build_linear_interpolation_matrix_part']
def build_derivative_matrix(mesh,
derivative, order_accuracy,
**kwargs):
if mesh.type == 'structured-cartesian':
return _build_derivative_matrix_structured_cartesian(mesh, derivative, order_accuracy, **kwargs)
else:
raise NotImplementedError('Derivative matrix builder not available (yet) for {0} meshes.'.format(mesh.discretization))
def build_derivative_matrix_VDA(mesh, derivative, order_accuracy, alpha = None, **kwargs): #variable density acoustic
if mesh.type == 'structured-cartesian':
return _build_derivative_matrix_staggered_structured_cartesian(mesh, derivative, order_accuracy, alpha=alpha, **kwargs)
else:
raise NotImplementedError('Derivative matrix builder not available (yet) for {0} meshes.'.format(mesh.discretization))
def _set_bc(bc):
if bc.type == 'pml':
return bc.boundary_type
elif bc.type == 'ghost':
return ('ghost', bc.n)
else:
return bc.type
def _build_derivative_matrix_structured_cartesian(mesh,
derivative, order_accuracy,
dimension='all',
use_shifted_differences=False,
return_1D_matrix=False,
**kwargs):
dims = list()
if type(dimension) is str:
dimension = [dimension]
if 'all' in dimension:
if mesh.dim > 1:
dims.append('x')
if mesh.dim > 2:
dims.append('y')
dims.append('z')
else:
for d in dimension:
dims.append(d)
# sh[-1] is always 'z'
# sh[0] is always 'x' if in 2 or 3d
# sh[1] is always 'y' if dim > 2
sh = mesh.shape(include_bc = True, as_grid = True)
if mesh.dim > 1:
if 'x' in dims:
lbc = _set_bc(mesh.x.lbc)
rbc = _set_bc(mesh.x.rbc)
delta = mesh.x.delta
Dx = _build_derivative_matrix_part(sh[0], derivative, order_accuracy, h=delta, lbc=lbc, rbc=rbc, use_shifted_differences=use_shifted_differences)
else:
Dx = spsp.csr_matrix((sh[0],sh[0]))
if mesh.dim > 2:
if 'y' in dims:
lbc = _set_bc(mesh.y.lbc)
rbc = _set_bc(mesh.y.rbc)
delta = mesh.y.delta
Dy = _build_derivative_matrix_part(sh[1], derivative, order_accuracy, h=delta, lbc=lbc, rbc=rbc, use_shifted_differences=use_shifted_differences)
else:
Dy = spsp.csr_matrix((sh[1],sh[1]))
if 'z' in dims:
lbc = _set_bc(mesh.z.lbc)
rbc = _set_bc(mesh.z.rbc)
delta = mesh.z.delta
Dz = _build_derivative_matrix_part(sh[-1], derivative, order_accuracy, h=delta, lbc=lbc, rbc=rbc, use_shifted_differences=use_shifted_differences)
else:
Dz = spsp.csr_matrix((sh[-1],sh[-1]))
if return_1D_matrix and 'all' not in dims:
if 'z' in dims:
mtx = Dz
elif 'y' in dims:
mtx = Dy
elif 'x' in dims:
mtx = Dx
else:
if mesh.dim == 1:
mtx = Dz.tocsr()
if mesh.dim == 2:
# kronsum in this order because wavefields are stored with 'z' in row
# and 'x' in columns, then vectorized in 'C' order
mtx = spsp.kronsum(Dz, Dx, format='csr')
if mesh.dim == 3:
mtx = spsp.kronsum(Dz, spsp.kronsum(Dy,Dx, format='csr'), format='csr')
return mtx
def _build_derivative_matrix_part(npoints, derivative, order_accuracy, h=1.0, lbc='d', rbc='d', use_shifted_differences=False):
if order_accuracy%2:
raise ValueError('Only even accuracy orders supported.')
centered_coeffs = centered_difference(derivative, order_accuracy)/(h**derivative)
mtx = stencil_grid(centered_coeffs, (npoints, ), format='lil')
max_shift= order_accuracy//2
if use_shifted_differences:
# Left side
odd_even_offset = 1-derivative%2
for i in range(0, max_shift):
coeffs = shifted_difference(derivative, order_accuracy, -(max_shift+odd_even_offset)+i)
mtx[i,0:len(coeffs)] = coeffs/(h**derivative)
# Right side
for i in range(-1, -max_shift-1,-1):
coeffs = shifted_difference(derivative, order_accuracy, max_shift+i+odd_even_offset)
mtx[i,slice(-1, -(len(coeffs)+1),-1)] = coeffs[::-1]/(h**derivative)
if 'd' in lbc: #dirichlet
mtx[0,:] = 0
mtx[0,0] = 1.0
elif 'n' in lbc: #neumann
mtx[0,:] = 0
coeffs = shifted_difference(1, order_accuracy, -max_shift)/h
coeffs /= (-1*coeffs[0])
coeffs[0] = 0.0
mtx[0,0:len(coeffs)] = coeffs
elif type(lbc) is tuple and 'g' in lbc[0]: #ghost
n_ghost_points = int(lbc[1])
mtx[0:n_ghost_points,:] = 0
for i in range(n_ghost_points):
mtx[i,i] = 1.0
if 'd' in rbc:
mtx[-1,:] = 0
mtx[-1,-1] = 1.0
elif 'n' in rbc:
mtx[-1,:] = 0
coeffs = shifted_difference(1, order_accuracy, max_shift)/h
coeffs /= (-1*coeffs[-1])
coeffs[-1] = 0.0
mtx[-1,slice(-1, -(len(coeffs)+1),-1)] = coeffs[::-1]
elif type(rbc) is tuple and 'g' in rbc[0]:
n_ghost_points = int(rbc[1])
mtx[slice(-1,-(n_ghost_points+1), -1),:] = 0
for i in range(n_ghost_points):
mtx[-i-1,-i-1] = 1.0
return mtx.tocsr()
def _build_derivative_matrix_staggered_structured_cartesian(mesh,
derivative, order_accuracy,
dimension='all',
alpha = None,
return_1D_matrix=False,
**kwargs):
#Some of the operators could be cached the same way I did to make 'build_permutation_matrix' faster.
#Could be considered if the current speed is ever considered to be insufficient.
import time
tt = time.time()
if return_1D_matrix:
raise Exception('Not yet implemented')
if derivative < 1 or derivative > 2:
raise ValueError('Only defined for first and second order right now')
if derivative == 1 and dimension not in ['x', 'y', 'z']:
|
sh = mesh.shape(include_bc = True, as_grid = True) #Will include PML padding
if len(sh) != 2: raise Exception('currently hardcoded 2D implementation, relatively straight-forward to change. Look at the function build_derivative_matrix to get a more general function.')
nx = sh[0]
nz = sh[-1]
#Currently I am working with density input on the regular grid.
#In the derivation of the variable density solver we only require density at the stagger points
#For now I am just interpolating density defined on regular points towards the stagger points and use that as 'density model'.
#Later it is probably better to define the density directly on the stagger points (and evaluate density gradient there to update directly at these points?)
if type(alpha) == None: #If no alpha is given, we set it to a uniform vector. The result should be the homogeneous Laplacian.
alpha = np.ones(nx*nz)
alpha = alpha.flatten() #make 1D
dx = mesh.x.delta
dz = mesh.z.delta
#Get 1D linear interpolation matrices
Jx_1d = build_linear_interpolation_matrix_part(nx)
Jz_1d = build_linear_interpolation_matrix_part(nz)
#Get 1D derivative matrix for first spatial derivative using the desired order of accuracy
lbc_x = _set_bc(mesh.x.lbc)
rbc_x = _set_bc(mesh.x.rbc)
lbc_z = _set_bc(mesh.z.lbc)
rbc_z = _set_bc(mesh.z.rbc)
Dx_1d = _build_staggered_first_derivative_matrix_part(nx, order_accuracy, h=dx, lbc = lbc_x, rbc = rbc_x)
Dz_1d = _build_staggered_first_derivative_matrix_part(nz, order_accuracy, h=dz, lbc = lbc_z, rbc = rbc_z)
#Some empty matrices of the right shape so we can use kronsum to get the proper 2D matrices for the operations we want.
#The same is used in the homogeneous 'build_derivative_matrix' function.
Ix = spsp.eye(nx)
Iz = spsp.eye(nz)
Dx_2d = spsp.kron(Dx_1d, Iz, format='csr')
if dimension == 'x' and derivative == 1:
return Dx_2d
Dz_2d = spsp.kron(Ix, Dz_1d, format='csr')
if dimension == 'z' and derivative == 1:
return Dz_2d
#If we are evaluating this we want to make the heterogeneous Laplacian
Jx_2d = spsp.kron(Jx_1d, Iz, format='csr')
Jz_2d = spsp.kron(Ix, Jz_1d, format='csr')
#alpha interpolated to x stagger points. Make diag mat
diag_alpha_x = make_diag_mtx(Jx_2d*alpha)
#alpha interpolated to z stagger points. Make diag mat
diag_alpha_z = make_diag_mtx(Jz_2d*alpha)
#Create laplacian components
#The negative transpose of Dx and Dz takes care of the divergence term of the heterogeneous laplacian
Dxx_2d = -Dx_2d.T*diag_alpha_x*Dx_2d
Dzz_2d = -Dz_2d.T*diag_alpha_z*Dz_2d
#Correct the Laplacian around the boundary. This is also done in the homogeneous Laplacian
#I want the heterogeneous Laplacian to be the same as the homogeneous Laplacian when alpha is uniform
#This is the only part of the Laplacian that deviates from symmetry, just as in the homogeneous case.
#But because of these conditions on the dirichlet boundary the wavefield will always equal 0 there and this deviation from symmetry is fine.
#For indexing, get list of all boundary node numbers
left_node_nrs = np.arange(nz)
right_node_nrs = np.arange((nx-1)*nz,nx*nz)
top_node_nrs = np.arange(nz,(nx-1)*nz,nz) #does not include left and right top node
bot_node_nrs = top_node_nrs + nz - 1 #does not include left and right top node
all_boundary_node_nrs = np.concatenate((left_node_nrs, right_node_nrs, top_node_nrs, bot_node_nrs))
nb = all_boundary_node_nrs.size
L = Dxx_2d + Dzz_2d
all_node_numbers = np.arange(0,(nx*nz), dtype='int32')
internal_node_numbers = list(set(all_node_numbers) - set(all_boundary_node_nrs))
L = L.tocsr() #so we can extract rows efficiently
#Operation below fixes the boundary rows quite efficiently.
L_fixed = _turn_sparse_rows_to_identity(L, internal_node_numbers, all_boundary_node_nrs)
return L_fixed.tocsr()
def _turn_sparse_rows_to_identity(A, rows_to_keep, rows_to_change):
#Convenience function for removing some rows from the sparse laplacian
#Had some major performance problems by simply slicing in all the matrix formats I tried.
nr,nc = A.shape
if nr != nc:
raise Exception('assuming square matrix')
#Create diagonal matrix. When we multiply A by this matrix we can remove rows
rows_to_keep_diag = np.zeros(nr, dtype='int32')
rows_to_keep_diag[rows_to_keep] = 1
diag_mat_remove_rows = make_diag_mtx(rows_to_keep_diag)
#The matrix below has the rows we want to turn into identity turned to 0
A_with_rows_removed = diag_mat_remove_rows*A
#Make diag matrix that has diagonal entries in the rows we want to be identity
rows_to_change_diag = np.zeros(nr, dtype='int32')
rows_to_change_diag[rows_to_change] = 1
A_with_identity_rows = make_diag_mtx(rows_to_change_diag)
A_modified = A_with_rows_removed + A_with_identity_rows
return A_modified
def _build_staggered_first_derivative_matrix_part(npoints, order_accuracy, h=1.0, lbc='d', rbc='d'):
#npoints is the number of regular grid points.
if order_accuracy%2:
raise ValueError('Only even accuracy orders supported.')
#coefficients for the first derivative evaluated in between two regular grid points.
stagger_coeffs = staggered_difference(1, order_accuracy)/h
#Use the old 'stencil_grid' routine.
#Because we do a staggered grid we need to shift the coeffs one entry and the matrix will not be square
incorrect_mtx = stencil_grid(np.insert(stagger_coeffs,0,0), (npoints, ), format='lil')
#Get rid of the last row which we dont want in our staggered approach
mtx = incorrect_mtx[0:-1,:]
if 'n' in lbc or 'n' in rbc:
raise ValueError('Did not yet implement Neumann boundaries. Perhaps looking at the centered grid implementation would be a good start?')
if 'g' in lbc or 'g' in rbc:
raise ValueError('Did not yet implement this boundary condition yet. Perhaps looking at the centered grid implementation would be a good start?')
#For dirichlet we don't need to alter the matrix for the first derivative for the boundary nodes as is done in the centered approach
#The reason is that the first staggered point we evaluate at is in the interior of the domain.
return mtx.tocsr()
def build_linear_interpolation_matrix_part(npoints):
#same logic as in function 'build_staggered_first_derivative_matrix_part
coeffs = np.array([0.5, 0.5])
incorrect_mtx = stencil_grid(np.insert(coeffs,0,0), (npoints, ), format='lil')
mtx = incorrect_mtx[0:-1,:]
return mtx.tocsr()
def apply_derivative(mesh, derivative, order_accuracy, vector, **kwargs):
A = build_derivative_matrix(mesh, derivative, order_accuracy, **kwargs)
return A*vector
def build_permutation_matrix(nz,nx):
# This creates a permutation matrix which transforms a column vector of nx
# "component" columns of size nz, to the corresponding column vector of nz
# "component" columns of size nx.
def generate_matrix(nz, nx): #local function
P = spsp.lil_matrix((nz*nx,nz*nx))
for i in range(nz): #Looping is not efficient, but we only need to do it once as setup
for j in range(nx):
P[nx*i+j,i+j*nz]=1
return P.tocsr()
#Start body of code for 'build_permutation_matrix'
try: #See if there are already stored results from previous calls to this function
current_storage_dict = build_permutation_matrix.storage_dict
except: #If not, initialize
current_storage_dict = dict()
build_permutation_matrix.storage_dict = current_storage_dict
if (nz,nx) not in list(current_storage_dict.keys()): #Have not precomputed this!
mat = generate_matrix(nz,nx)
current_storage_dict[nz,nx] = mat
return current_storage_dict[nz,nx]
def build_offcentered_alpha(sh,alpha):
# This computes the midpoints of alpha which will be used in the heterogenous laplacian
nz=sh[-1]
nx=sh[0]
v1z,v2z,v3z=np.ones(nz),np.ones(nz-1),np.zeros(nz)
v1z[-1],v3z[0]=2.0,2.0
v1x,v2x,v3x=np.ones(nx),np.ones(nx-1),np.zeros(nx)
v1x[-1],v3x[0]=2.0,2.0
v3z=v3z.reshape(1,nz)
v3x=v3x.reshape(1,nx)
Lz1=np.array(spsp.diags([v1z,v2z],[0,1]).todense())
Lx1=np.array(spsp.diags([v1x,v2x],[0,1]).todense())
Lz=np.matrix(0.5*np.concatenate((v3z,Lz1),axis=0))
Lx=np.matrix(0.5*np.concatenate((v3x,Lx1),axis=0))
# Lz and Lx simply (of length nz and nx respectively) act on a vector and return one which is one entry larger than before,
# with each entry being a weighted sum of the two adjacent entries. Boundary values are preserved.
P=build_permutation_matrix(nz,nx)
alpha_perm=P*alpha
alpha_z,alpha_x=list(),list()
for i in range(nx):
alpha_z.append(Lz*alpha[nz*i:nz*(i+1)])
for i in range(nz):
alpha_x.append(Lx*alpha_perm[nx*i:nx*(i+1)])
return alpha_x, alpha_z
def build_heterogenous_matrices(sh,deltas,alpha=None,rp=None):
# This builds 1st order, forward and backward derivative matrices.
# alpha is a vector which goes inside of the operator, div (alpha grad)
# It can also build a hetergenous laplacian (if rp is not None),which differs from the above
# heterogenous laplacian only in its boundary conditions.
nz=sh[-1]
nx=sh[0]
#builds z derivative matrix
v=-np.ones(nx*nz)/deltas[-1]
v1=np.ones(nx*nz-1)/deltas[-1]
v1[list(range(nz-1,nz*nx-1,nz))]=0.0 # repair boundary terms.
D2=spsp.diags([v,v1],[0,1])
D2_tilda=-1.0*D2.T
#builds x derivative matrix
p=-np.ones(nx*nz)/deltas[0]
p1=np.ones(nx*nz-1)/deltas[0]
#p[range(nx-1,nz*nx,nx)]=-1.0
p1[list(range(nx-1,nz*nx-1,nx))]=0.0
D1=spsp.diags([p,p1],[0,1])
D1_tilda=-1.0*D1.T
P=build_permutation_matrix(nz,nx)
P_inv=build_permutation_matrix(nx,nz)
#builds exact adjoint gradient for z.
v=-np.ones(nx*nz)/deltas[-1]
v1=np.ones(nx*nz-1)/deltas[-1]
v1[list(range(nz-1,nz*nx-1,nz))]=0.0
v1[list(range(0,nz*nx-1,nz))]=0.0
D2_adj=spsp.diags([v,v1],[0,1])
#builds exact adjoint gradient for x.
p=-np.ones(nx*nz)/deltas[0]
p1=np.ones(nx*nz-1)/deltas[0]
p1[list(range(nx-1,nz*nx-1,nx))]=0.0
p1[list(range(0,nz*nx-1,nx))]=0.0
D1_adj=spsp.diags([p,p1],[0,1])
if rp is not None:
A=spsp.diags([alpha],[0])
Lap = D2_tilda*A*D2+P_inv*D1_tilda*P*A*P_inv*D1*P
return Lap
else:
D1=P_inv*D1*P
D1_adj=P_inv*D1_adj*P
return [D1,D1_adj],[D2,D2_adj]
if __name__ == '__main__':
from pysit import *
from pysit.gallery import horizontal_reflector
bc = Dirichlet()
dim = 2
deriv = 1 # 2
order = 4
if dim == 1:
z_config = (0.0, 7.0, bc, bc)
d = RectangularDomain(z_config)
m = CartesianMesh(d, 7)
# Generate true wave speed
C, C0 = horizontal_reflector(m)
D = build_derivative_matrix(m, deriv, order, dimension='all').todense()
Dz = build_derivative_matrix(m, deriv, order, dimension='z').todense()
if dim == 2:
x_config = (0.0, 7.0, bc, bc)
z_config = (0.0, 7.0, bc, bc)
d = RectangularDomain(x_config, z_config)
m = CartesianMesh(d, 7, 7)
# Generate true wave speed
C, C0 = horizontal_reflector(m)
D = build_derivative_matrix(m, deriv, order, dimension='all').todense()
Dx = build_derivative_matrix(m, deriv, order, dimension='x').todense()
Dz = build_derivative_matrix(m, deriv, order, dimension='z').todense()
if dim == 3:
x_config = (0.0, 7.0, bc, bc)
y_config = (0.0, 7.0, bc, bc)
z_config = (0.0, 7.0, bc, bc)
d = RectangularDomain(x_config, x_config, z_config)
m = CartesianMesh(d, 7, 7, 7)
# Generate true wave speed
C, C0 = horizontal_reflector(m)
D = build_derivative_matrix(m, deriv, order, dimension='all').todense()
sh = m.shape(as_grid=True)
Dx = build_derivative_matrix(m, deriv, order, dimension=['x']).todense()
Dy = build_derivative_matrix(m, deriv, order, dimension=['y']).todense()
Dz = build_derivative_matrix(m, deriv, order, dimension=['z']).todense()
x=(Dx*C).reshape(sh)
y=(Dy*C).reshape(sh)
z=(Dz*C).reshape(sh)
print(x[:,:,0]) # should have ones all in first and last rows
print(y[:,:,0]) # should have ones all in first and last columns
print(z[0,0,:]) # should have ones at the ends
| raise ValueError('First derivative requires a direciton') | conditional_block |
derivatives.py | import numpy as np
import scipy.sparse as spsp
import pyamg
from pyamg.gallery import stencil_grid
from pysit.util.derivatives.fdweight import *
from pysit.util.matrix_helpers import make_diag_mtx
__all__ = ['build_derivative_matrix','build_derivative_matrix_VDA', 'build_heterogenous_matrices','build_permutation_matrix','_build_staggered_first_derivative_matrix_part', 'build_linear_interpolation_matrix_part']
def build_derivative_matrix(mesh,
derivative, order_accuracy,
**kwargs):
if mesh.type == 'structured-cartesian':
return _build_derivative_matrix_structured_cartesian(mesh, derivative, order_accuracy, **kwargs)
else:
raise NotImplementedError('Derivative matrix builder not available (yet) for {0} meshes.'.format(mesh.discretization))
def build_derivative_matrix_VDA(mesh, derivative, order_accuracy, alpha = None, **kwargs): #variable density acoustic
if mesh.type == 'structured-cartesian':
return _build_derivative_matrix_staggered_structured_cartesian(mesh, derivative, order_accuracy, alpha=alpha, **kwargs)
else:
raise NotImplementedError('Derivative matrix builder not available (yet) for {0} meshes.'.format(mesh.discretization))
def _set_bc(bc):
if bc.type == 'pml':
return bc.boundary_type
elif bc.type == 'ghost':
return ('ghost', bc.n)
else:
return bc.type
def _build_derivative_matrix_structured_cartesian(mesh,
derivative, order_accuracy,
dimension='all',
use_shifted_differences=False,
return_1D_matrix=False,
**kwargs):
dims = list()
if type(dimension) is str:
dimension = [dimension]
if 'all' in dimension:
if mesh.dim > 1:
dims.append('x')
if mesh.dim > 2:
dims.append('y')
dims.append('z')
else:
for d in dimension:
dims.append(d)
# sh[-1] is always 'z'
# sh[0] is always 'x' if in 2 or 3d
# sh[1] is always 'y' if dim > 2
sh = mesh.shape(include_bc = True, as_grid = True)
if mesh.dim > 1:
if 'x' in dims:
lbc = _set_bc(mesh.x.lbc)
rbc = _set_bc(mesh.x.rbc)
delta = mesh.x.delta
Dx = _build_derivative_matrix_part(sh[0], derivative, order_accuracy, h=delta, lbc=lbc, rbc=rbc, use_shifted_differences=use_shifted_differences)
else:
Dx = spsp.csr_matrix((sh[0],sh[0]))
if mesh.dim > 2:
if 'y' in dims:
lbc = _set_bc(mesh.y.lbc)
rbc = _set_bc(mesh.y.rbc)
delta = mesh.y.delta
Dy = _build_derivative_matrix_part(sh[1], derivative, order_accuracy, h=delta, lbc=lbc, rbc=rbc, use_shifted_differences=use_shifted_differences)
else:
Dy = spsp.csr_matrix((sh[1],sh[1]))
if 'z' in dims:
lbc = _set_bc(mesh.z.lbc)
rbc = _set_bc(mesh.z.rbc)
delta = mesh.z.delta
Dz = _build_derivative_matrix_part(sh[-1], derivative, order_accuracy, h=delta, lbc=lbc, rbc=rbc, use_shifted_differences=use_shifted_differences)
else:
Dz = spsp.csr_matrix((sh[-1],sh[-1]))
if return_1D_matrix and 'all' not in dims:
if 'z' in dims:
mtx = Dz
elif 'y' in dims:
mtx = Dy
elif 'x' in dims:
mtx = Dx
else:
if mesh.dim == 1:
mtx = Dz.tocsr()
if mesh.dim == 2:
# kronsum in this order because wavefields are stored with 'z' in row
# and 'x' in columns, then vectorized in 'C' order
mtx = spsp.kronsum(Dz, Dx, format='csr')
if mesh.dim == 3:
mtx = spsp.kronsum(Dz, spsp.kronsum(Dy,Dx, format='csr'), format='csr')
return mtx
def _build_derivative_matrix_part(npoints, derivative, order_accuracy, h=1.0, lbc='d', rbc='d', use_shifted_differences=False):
if order_accuracy%2:
raise ValueError('Only even accuracy orders supported.')
centered_coeffs = centered_difference(derivative, order_accuracy)/(h**derivative)
mtx = stencil_grid(centered_coeffs, (npoints, ), format='lil')
max_shift= order_accuracy//2
if use_shifted_differences:
# Left side
odd_even_offset = 1-derivative%2
for i in range(0, max_shift):
coeffs = shifted_difference(derivative, order_accuracy, -(max_shift+odd_even_offset)+i)
mtx[i,0:len(coeffs)] = coeffs/(h**derivative)
# Right side
for i in range(-1, -max_shift-1,-1):
coeffs = shifted_difference(derivative, order_accuracy, max_shift+i+odd_even_offset)
mtx[i,slice(-1, -(len(coeffs)+1),-1)] = coeffs[::-1]/(h**derivative)
if 'd' in lbc: #dirichlet
mtx[0,:] = 0
mtx[0,0] = 1.0
elif 'n' in lbc: #neumann
mtx[0,:] = 0
coeffs = shifted_difference(1, order_accuracy, -max_shift)/h
coeffs /= (-1*coeffs[0])
coeffs[0] = 0.0
mtx[0,0:len(coeffs)] = coeffs
elif type(lbc) is tuple and 'g' in lbc[0]: #ghost
n_ghost_points = int(lbc[1])
mtx[0:n_ghost_points,:] = 0
for i in range(n_ghost_points):
mtx[i,i] = 1.0
if 'd' in rbc:
mtx[-1,:] = 0
mtx[-1,-1] = 1.0
elif 'n' in rbc:
mtx[-1,:] = 0
coeffs = shifted_difference(1, order_accuracy, max_shift)/h
coeffs /= (-1*coeffs[-1])
coeffs[-1] = 0.0
mtx[-1,slice(-1, -(len(coeffs)+1),-1)] = coeffs[::-1]
elif type(rbc) is tuple and 'g' in rbc[0]:
n_ghost_points = int(rbc[1])
mtx[slice(-1,-(n_ghost_points+1), -1),:] = 0
for i in range(n_ghost_points):
mtx[-i-1,-i-1] = 1.0
return mtx.tocsr()
def _build_derivative_matrix_staggered_structured_cartesian(mesh,
derivative, order_accuracy,
dimension='all',
alpha = None,
return_1D_matrix=False,
**kwargs):
#Some of the operators could be cached the same way I did to make 'build_permutation_matrix' faster.
#Could be considered if the current speed is ever considered to be insufficient.
import time
tt = time.time()
if return_1D_matrix:
raise Exception('Not yet implemented')
if derivative < 1 or derivative > 2:
raise ValueError('Only defined for first and second order right now')
if derivative == 1 and dimension not in ['x', 'y', 'z']:
raise ValueError('First derivative requires a direciton') | sh = mesh.shape(include_bc = True, as_grid = True) #Will include PML padding
if len(sh) != 2: raise Exception('currently hardcoded 2D implementation, relatively straight-forward to change. Look at the function build_derivative_matrix to get a more general function.')
nx = sh[0]
nz = sh[-1]
#Currently I am working with density input on the regular grid.
#In the derivation of the variable density solver we only require density at the stagger points
#For now I am just interpolating density defined on regular points towards the stagger points and use that as 'density model'.
#Later it is probably better to define the density directly on the stagger points (and evaluate density gradient there to update directly at these points?)
if type(alpha) == None: #If no alpha is given, we set it to a uniform vector. The result should be the homogeneous Laplacian.
alpha = np.ones(nx*nz)
alpha = alpha.flatten() #make 1D
dx = mesh.x.delta
dz = mesh.z.delta
#Get 1D linear interpolation matrices
Jx_1d = build_linear_interpolation_matrix_part(nx)
Jz_1d = build_linear_interpolation_matrix_part(nz)
#Get 1D derivative matrix for first spatial derivative using the desired order of accuracy
lbc_x = _set_bc(mesh.x.lbc)
rbc_x = _set_bc(mesh.x.rbc)
lbc_z = _set_bc(mesh.z.lbc)
rbc_z = _set_bc(mesh.z.rbc)
Dx_1d = _build_staggered_first_derivative_matrix_part(nx, order_accuracy, h=dx, lbc = lbc_x, rbc = rbc_x)
Dz_1d = _build_staggered_first_derivative_matrix_part(nz, order_accuracy, h=dz, lbc = lbc_z, rbc = rbc_z)
#Some empty matrices of the right shape so we can use kronsum to get the proper 2D matrices for the operations we want.
#The same is used in the homogeneous 'build_derivative_matrix' function.
Ix = spsp.eye(nx)
Iz = spsp.eye(nz)
Dx_2d = spsp.kron(Dx_1d, Iz, format='csr')
if dimension == 'x' and derivative == 1:
return Dx_2d
Dz_2d = spsp.kron(Ix, Dz_1d, format='csr')
if dimension == 'z' and derivative == 1:
return Dz_2d
#If we are evaluating this we want to make the heterogeneous Laplacian
Jx_2d = spsp.kron(Jx_1d, Iz, format='csr')
Jz_2d = spsp.kron(Ix, Jz_1d, format='csr')
#alpha interpolated to x stagger points. Make diag mat
diag_alpha_x = make_diag_mtx(Jx_2d*alpha)
#alpha interpolated to z stagger points. Make diag mat
diag_alpha_z = make_diag_mtx(Jz_2d*alpha)
#Create laplacian components
#The negative transpose of Dx and Dz takes care of the divergence term of the heterogeneous laplacian
Dxx_2d = -Dx_2d.T*diag_alpha_x*Dx_2d
Dzz_2d = -Dz_2d.T*diag_alpha_z*Dz_2d
#Correct the Laplacian around the boundary. This is also done in the homogeneous Laplacian
#I want the heterogeneous Laplacian to be the same as the homogeneous Laplacian when alpha is uniform
#This is the only part of the Laplacian that deviates from symmetry, just as in the homogeneous case.
#But because of these conditions on the dirichlet boundary the wavefield will always equal 0 there and this deviation from symmetry is fine.
#For indexing, get list of all boundary node numbers
left_node_nrs = np.arange(nz)
right_node_nrs = np.arange((nx-1)*nz,nx*nz)
top_node_nrs = np.arange(nz,(nx-1)*nz,nz) #does not include left and right top node
bot_node_nrs = top_node_nrs + nz - 1 #does not include left and right top node
all_boundary_node_nrs = np.concatenate((left_node_nrs, right_node_nrs, top_node_nrs, bot_node_nrs))
nb = all_boundary_node_nrs.size
L = Dxx_2d + Dzz_2d
all_node_numbers = np.arange(0,(nx*nz), dtype='int32')
internal_node_numbers = list(set(all_node_numbers) - set(all_boundary_node_nrs))
L = L.tocsr() #so we can extract rows efficiently
#Operation below fixes the boundary rows quite efficiently.
L_fixed = _turn_sparse_rows_to_identity(L, internal_node_numbers, all_boundary_node_nrs)
return L_fixed.tocsr()
def _turn_sparse_rows_to_identity(A, rows_to_keep, rows_to_change):
#Convenience function for removing some rows from the sparse laplacian
#Had some major performance problems by simply slicing in all the matrix formats I tried.
nr,nc = A.shape
if nr != nc:
raise Exception('assuming square matrix')
#Create diagonal matrix. When we multiply A by this matrix we can remove rows
rows_to_keep_diag = np.zeros(nr, dtype='int32')
rows_to_keep_diag[rows_to_keep] = 1
diag_mat_remove_rows = make_diag_mtx(rows_to_keep_diag)
#The matrix below has the rows we want to turn into identity turned to 0
A_with_rows_removed = diag_mat_remove_rows*A
#Make diag matrix that has diagonal entries in the rows we want to be identity
rows_to_change_diag = np.zeros(nr, dtype='int32')
rows_to_change_diag[rows_to_change] = 1
A_with_identity_rows = make_diag_mtx(rows_to_change_diag)
A_modified = A_with_rows_removed + A_with_identity_rows
return A_modified
def _build_staggered_first_derivative_matrix_part(npoints, order_accuracy, h=1.0, lbc='d', rbc='d'):
#npoints is the number of regular grid points.
if order_accuracy%2:
raise ValueError('Only even accuracy orders supported.')
#coefficients for the first derivative evaluated in between two regular grid points.
stagger_coeffs = staggered_difference(1, order_accuracy)/h
#Use the old 'stencil_grid' routine.
#Because we do a staggered grid we need to shift the coeffs one entry and the matrix will not be square
incorrect_mtx = stencil_grid(np.insert(stagger_coeffs,0,0), (npoints, ), format='lil')
#Get rid of the last row which we dont want in our staggered approach
mtx = incorrect_mtx[0:-1,:]
if 'n' in lbc or 'n' in rbc:
raise ValueError('Did not yet implement Neumann boundaries. Perhaps looking at the centered grid implementation would be a good start?')
if 'g' in lbc or 'g' in rbc:
raise ValueError('Did not yet implement this boundary condition yet. Perhaps looking at the centered grid implementation would be a good start?')
#For dirichlet we don't need to alter the matrix for the first derivative for the boundary nodes as is done in the centered approach
#The reason is that the first staggered point we evaluate at is in the interior of the domain.
return mtx.tocsr()
def build_linear_interpolation_matrix_part(npoints):
#same logic as in function 'build_staggered_first_derivative_matrix_part
coeffs = np.array([0.5, 0.5])
incorrect_mtx = stencil_grid(np.insert(coeffs,0,0), (npoints, ), format='lil')
mtx = incorrect_mtx[0:-1,:]
return mtx.tocsr()
def apply_derivative(mesh, derivative, order_accuracy, vector, **kwargs):
A = build_derivative_matrix(mesh, derivative, order_accuracy, **kwargs)
return A*vector
def build_permutation_matrix(nz,nx):
# This creates a permutation matrix which transforms a column vector of nx
# "component" columns of size nz, to the corresponding column vector of nz
# "component" columns of size nx.
def generate_matrix(nz, nx): #local function
P = spsp.lil_matrix((nz*nx,nz*nx))
for i in range(nz): #Looping is not efficient, but we only need to do it once as setup
for j in range(nx):
P[nx*i+j,i+j*nz]=1
return P.tocsr()
#Start body of code for 'build_permutation_matrix'
try: #See if there are already stored results from previous calls to this function
current_storage_dict = build_permutation_matrix.storage_dict
except: #If not, initialize
current_storage_dict = dict()
build_permutation_matrix.storage_dict = current_storage_dict
if (nz,nx) not in list(current_storage_dict.keys()): #Have not precomputed this!
mat = generate_matrix(nz,nx)
current_storage_dict[nz,nx] = mat
return current_storage_dict[nz,nx]
def build_offcentered_alpha(sh,alpha):
# This computes the midpoints of alpha which will be used in the heterogenous laplacian
nz=sh[-1]
nx=sh[0]
v1z,v2z,v3z=np.ones(nz),np.ones(nz-1),np.zeros(nz)
v1z[-1],v3z[0]=2.0,2.0
v1x,v2x,v3x=np.ones(nx),np.ones(nx-1),np.zeros(nx)
v1x[-1],v3x[0]=2.0,2.0
v3z=v3z.reshape(1,nz)
v3x=v3x.reshape(1,nx)
Lz1=np.array(spsp.diags([v1z,v2z],[0,1]).todense())
Lx1=np.array(spsp.diags([v1x,v2x],[0,1]).todense())
Lz=np.matrix(0.5*np.concatenate((v3z,Lz1),axis=0))
Lx=np.matrix(0.5*np.concatenate((v3x,Lx1),axis=0))
# Lz and Lx simply (of length nz and nx respectively) act on a vector and return one which is one entry larger than before,
# with each entry being a weighted sum of the two adjacent entries. Boundary values are preserved.
P=build_permutation_matrix(nz,nx)
alpha_perm=P*alpha
alpha_z,alpha_x=list(),list()
for i in range(nx):
alpha_z.append(Lz*alpha[nz*i:nz*(i+1)])
for i in range(nz):
alpha_x.append(Lx*alpha_perm[nx*i:nx*(i+1)])
return alpha_x, alpha_z
def build_heterogenous_matrices(sh,deltas,alpha=None,rp=None):
# This builds 1st order, forward and backward derivative matrices.
# alpha is a vector which goes inside of the operator, div (alpha grad)
# It can also build a hetergenous laplacian (if rp is not None),which differs from the above
# heterogenous laplacian only in its boundary conditions.
nz=sh[-1]
nx=sh[0]
#builds z derivative matrix
v=-np.ones(nx*nz)/deltas[-1]
v1=np.ones(nx*nz-1)/deltas[-1]
v1[list(range(nz-1,nz*nx-1,nz))]=0.0 # repair boundary terms.
D2=spsp.diags([v,v1],[0,1])
D2_tilda=-1.0*D2.T
#builds x derivative matrix
p=-np.ones(nx*nz)/deltas[0]
p1=np.ones(nx*nz-1)/deltas[0]
#p[range(nx-1,nz*nx,nx)]=-1.0
p1[list(range(nx-1,nz*nx-1,nx))]=0.0
D1=spsp.diags([p,p1],[0,1])
D1_tilda=-1.0*D1.T
P=build_permutation_matrix(nz,nx)
P_inv=build_permutation_matrix(nx,nz)
#builds exact adjoint gradient for z.
v=-np.ones(nx*nz)/deltas[-1]
v1=np.ones(nx*nz-1)/deltas[-1]
v1[list(range(nz-1,nz*nx-1,nz))]=0.0
v1[list(range(0,nz*nx-1,nz))]=0.0
D2_adj=spsp.diags([v,v1],[0,1])
#builds exact adjoint gradient for x.
p=-np.ones(nx*nz)/deltas[0]
p1=np.ones(nx*nz-1)/deltas[0]
p1[list(range(nx-1,nz*nx-1,nx))]=0.0
p1[list(range(0,nz*nx-1,nx))]=0.0
D1_adj=spsp.diags([p,p1],[0,1])
if rp is not None:
A=spsp.diags([alpha],[0])
Lap = D2_tilda*A*D2+P_inv*D1_tilda*P*A*P_inv*D1*P
return Lap
else:
D1=P_inv*D1*P
D1_adj=P_inv*D1_adj*P
return [D1,D1_adj],[D2,D2_adj]
if __name__ == '__main__':
from pysit import *
from pysit.gallery import horizontal_reflector
bc = Dirichlet()
dim = 2
deriv = 1 # 2
order = 4
if dim == 1:
z_config = (0.0, 7.0, bc, bc)
d = RectangularDomain(z_config)
m = CartesianMesh(d, 7)
# Generate true wave speed
C, C0 = horizontal_reflector(m)
D = build_derivative_matrix(m, deriv, order, dimension='all').todense()
Dz = build_derivative_matrix(m, deriv, order, dimension='z').todense()
if dim == 2:
x_config = (0.0, 7.0, bc, bc)
z_config = (0.0, 7.0, bc, bc)
d = RectangularDomain(x_config, z_config)
m = CartesianMesh(d, 7, 7)
# Generate true wave speed
C, C0 = horizontal_reflector(m)
D = build_derivative_matrix(m, deriv, order, dimension='all').todense()
Dx = build_derivative_matrix(m, deriv, order, dimension='x').todense()
Dz = build_derivative_matrix(m, deriv, order, dimension='z').todense()
if dim == 3:
x_config = (0.0, 7.0, bc, bc)
y_config = (0.0, 7.0, bc, bc)
z_config = (0.0, 7.0, bc, bc)
d = RectangularDomain(x_config, x_config, z_config)
m = CartesianMesh(d, 7, 7, 7)
# Generate true wave speed
C, C0 = horizontal_reflector(m)
D = build_derivative_matrix(m, deriv, order, dimension='all').todense()
sh = m.shape(as_grid=True)
Dx = build_derivative_matrix(m, deriv, order, dimension=['x']).todense()
Dy = build_derivative_matrix(m, deriv, order, dimension=['y']).todense()
Dz = build_derivative_matrix(m, deriv, order, dimension=['z']).todense()
x=(Dx*C).reshape(sh)
y=(Dy*C).reshape(sh)
z=(Dz*C).reshape(sh)
print(x[:,:,0]) # should have ones all in first and last rows
print(y[:,:,0]) # should have ones all in first and last columns
print(z[0,0,:]) # should have ones at the ends | random_line_split | |
derivatives.py |
import numpy as np
import scipy.sparse as spsp
import pyamg
from pyamg.gallery import stencil_grid
from pysit.util.derivatives.fdweight import *
from pysit.util.matrix_helpers import make_diag_mtx
__all__ = ['build_derivative_matrix','build_derivative_matrix_VDA', 'build_heterogenous_matrices','build_permutation_matrix','_build_staggered_first_derivative_matrix_part', 'build_linear_interpolation_matrix_part']
def build_derivative_matrix(mesh,
derivative, order_accuracy,
**kwargs):
if mesh.type == 'structured-cartesian':
return _build_derivative_matrix_structured_cartesian(mesh, derivative, order_accuracy, **kwargs)
else:
raise NotImplementedError('Derivative matrix builder not available (yet) for {0} meshes.'.format(mesh.discretization))
def build_derivative_matrix_VDA(mesh, derivative, order_accuracy, alpha = None, **kwargs): #variable density acoustic
if mesh.type == 'structured-cartesian':
return _build_derivative_matrix_staggered_structured_cartesian(mesh, derivative, order_accuracy, alpha=alpha, **kwargs)
else:
raise NotImplementedError('Derivative matrix builder not available (yet) for {0} meshes.'.format(mesh.discretization))
def _set_bc(bc):
if bc.type == 'pml':
return bc.boundary_type
elif bc.type == 'ghost':
return ('ghost', bc.n)
else:
return bc.type
def _build_derivative_matrix_structured_cartesian(mesh,
derivative, order_accuracy,
dimension='all',
use_shifted_differences=False,
return_1D_matrix=False,
**kwargs):
dims = list()
if type(dimension) is str:
dimension = [dimension]
if 'all' in dimension:
if mesh.dim > 1:
dims.append('x')
if mesh.dim > 2:
dims.append('y')
dims.append('z')
else:
for d in dimension:
dims.append(d)
# sh[-1] is always 'z'
# sh[0] is always 'x' if in 2 or 3d
# sh[1] is always 'y' if dim > 2
sh = mesh.shape(include_bc = True, as_grid = True)
if mesh.dim > 1:
if 'x' in dims:
lbc = _set_bc(mesh.x.lbc)
rbc = _set_bc(mesh.x.rbc)
delta = mesh.x.delta
Dx = _build_derivative_matrix_part(sh[0], derivative, order_accuracy, h=delta, lbc=lbc, rbc=rbc, use_shifted_differences=use_shifted_differences)
else:
Dx = spsp.csr_matrix((sh[0],sh[0]))
if mesh.dim > 2:
if 'y' in dims:
lbc = _set_bc(mesh.y.lbc)
rbc = _set_bc(mesh.y.rbc)
delta = mesh.y.delta
Dy = _build_derivative_matrix_part(sh[1], derivative, order_accuracy, h=delta, lbc=lbc, rbc=rbc, use_shifted_differences=use_shifted_differences)
else:
Dy = spsp.csr_matrix((sh[1],sh[1]))
if 'z' in dims:
lbc = _set_bc(mesh.z.lbc)
rbc = _set_bc(mesh.z.rbc)
delta = mesh.z.delta
Dz = _build_derivative_matrix_part(sh[-1], derivative, order_accuracy, h=delta, lbc=lbc, rbc=rbc, use_shifted_differences=use_shifted_differences)
else:
Dz = spsp.csr_matrix((sh[-1],sh[-1]))
if return_1D_matrix and 'all' not in dims:
if 'z' in dims:
mtx = Dz
elif 'y' in dims:
mtx = Dy
elif 'x' in dims:
mtx = Dx
else:
if mesh.dim == 1:
mtx = Dz.tocsr()
if mesh.dim == 2:
# kronsum in this order because wavefields are stored with 'z' in row
# and 'x' in columns, then vectorized in 'C' order
mtx = spsp.kronsum(Dz, Dx, format='csr')
if mesh.dim == 3:
mtx = spsp.kronsum(Dz, spsp.kronsum(Dy,Dx, format='csr'), format='csr')
return mtx
def _build_derivative_matrix_part(npoints, derivative, order_accuracy, h=1.0, lbc='d', rbc='d', use_shifted_differences=False):
if order_accuracy%2:
raise ValueError('Only even accuracy orders supported.')
centered_coeffs = centered_difference(derivative, order_accuracy)/(h**derivative)
mtx = stencil_grid(centered_coeffs, (npoints, ), format='lil')
max_shift= order_accuracy//2
if use_shifted_differences:
# Left side
odd_even_offset = 1-derivative%2
for i in range(0, max_shift):
coeffs = shifted_difference(derivative, order_accuracy, -(max_shift+odd_even_offset)+i)
mtx[i,0:len(coeffs)] = coeffs/(h**derivative)
# Right side
for i in range(-1, -max_shift-1,-1):
coeffs = shifted_difference(derivative, order_accuracy, max_shift+i+odd_even_offset)
mtx[i,slice(-1, -(len(coeffs)+1),-1)] = coeffs[::-1]/(h**derivative)
if 'd' in lbc: #dirichlet
mtx[0,:] = 0
mtx[0,0] = 1.0
elif 'n' in lbc: #neumann
mtx[0,:] = 0
coeffs = shifted_difference(1, order_accuracy, -max_shift)/h
coeffs /= (-1*coeffs[0])
coeffs[0] = 0.0
mtx[0,0:len(coeffs)] = coeffs
elif type(lbc) is tuple and 'g' in lbc[0]: #ghost
n_ghost_points = int(lbc[1])
mtx[0:n_ghost_points,:] = 0
for i in range(n_ghost_points):
mtx[i,i] = 1.0
if 'd' in rbc:
mtx[-1,:] = 0
mtx[-1,-1] = 1.0
elif 'n' in rbc:
mtx[-1,:] = 0
coeffs = shifted_difference(1, order_accuracy, max_shift)/h
coeffs /= (-1*coeffs[-1])
coeffs[-1] = 0.0
mtx[-1,slice(-1, -(len(coeffs)+1),-1)] = coeffs[::-1]
elif type(rbc) is tuple and 'g' in rbc[0]:
n_ghost_points = int(rbc[1])
mtx[slice(-1,-(n_ghost_points+1), -1),:] = 0
for i in range(n_ghost_points):
mtx[-i-1,-i-1] = 1.0
return mtx.tocsr()
def _build_derivative_matrix_staggered_structured_cartesian(mesh,
derivative, order_accuracy,
dimension='all',
alpha = None,
return_1D_matrix=False,
**kwargs):
#Some of the operators could be cached the same way I did to make 'build_permutation_matrix' faster.
#Could be considered if the current speed is ever considered to be insufficient.
|
def _turn_sparse_rows_to_identity(A, rows_to_keep, rows_to_change):
#Convenience function for removing some rows from the sparse laplacian
#Had some major performance problems by simply slicing in all the matrix formats I tried.
nr,nc = A.shape
if nr != nc:
raise Exception('assuming square matrix')
#Create diagonal matrix. When we multiply A by this matrix we can remove rows
rows_to_keep_diag = np.zeros(nr, dtype='int32')
rows_to_keep_diag[rows_to_keep] = 1
diag_mat_remove_rows = make_diag_mtx(rows_to_keep_diag)
#The matrix below has the rows we want to turn into identity turned to 0
A_with_rows_removed = diag_mat_remove_rows*A
#Make diag matrix that has diagonal entries in the rows we want to be identity
rows_to_change_diag = np.zeros(nr, dtype='int32')
rows_to_change_diag[rows_to_change] = 1
A_with_identity_rows = make_diag_mtx(rows_to_change_diag)
A_modified = A_with_rows_removed + A_with_identity_rows
return A_modified
def _build_staggered_first_derivative_matrix_part(npoints, order_accuracy, h=1.0, lbc='d', rbc='d'):
#npoints is the number of regular grid points.
if order_accuracy%2:
raise ValueError('Only even accuracy orders supported.')
#coefficients for the first derivative evaluated in between two regular grid points.
stagger_coeffs = staggered_difference(1, order_accuracy)/h
#Use the old 'stencil_grid' routine.
#Because we do a staggered grid we need to shift the coeffs one entry and the matrix will not be square
incorrect_mtx = stencil_grid(np.insert(stagger_coeffs,0,0), (npoints, ), format='lil')
#Get rid of the last row which we dont want in our staggered approach
mtx = incorrect_mtx[0:-1,:]
if 'n' in lbc or 'n' in rbc:
raise ValueError('Did not yet implement Neumann boundaries. Perhaps looking at the centered grid implementation would be a good start?')
if 'g' in lbc or 'g' in rbc:
raise ValueError('Did not yet implement this boundary condition yet. Perhaps looking at the centered grid implementation would be a good start?')
#For dirichlet we don't need to alter the matrix for the first derivative for the boundary nodes as is done in the centered approach
#The reason is that the first staggered point we evaluate at is in the interior of the domain.
return mtx.tocsr()
def build_linear_interpolation_matrix_part(npoints):
#same logic as in function 'build_staggered_first_derivative_matrix_part
coeffs = np.array([0.5, 0.5])
incorrect_mtx = stencil_grid(np.insert(coeffs,0,0), (npoints, ), format='lil')
mtx = incorrect_mtx[0:-1,:]
return mtx.tocsr()
def apply_derivative(mesh, derivative, order_accuracy, vector, **kwargs):
A = build_derivative_matrix(mesh, derivative, order_accuracy, **kwargs)
return A*vector
def build_permutation_matrix(nz,nx):
# This creates a permutation matrix which transforms a column vector of nx
# "component" columns of size nz, to the corresponding column vector of nz
# "component" columns of size nx.
def generate_matrix(nz, nx): #local function
P = spsp.lil_matrix((nz*nx,nz*nx))
for i in range(nz): #Looping is not efficient, but we only need to do it once as setup
for j in range(nx):
P[nx*i+j,i+j*nz]=1
return P.tocsr()
#Start body of code for 'build_permutation_matrix'
try: #See if there are already stored results from previous calls to this function
current_storage_dict = build_permutation_matrix.storage_dict
except: #If not, initialize
current_storage_dict = dict()
build_permutation_matrix.storage_dict = current_storage_dict
if (nz,nx) not in list(current_storage_dict.keys()): #Have not precomputed this!
mat = generate_matrix(nz,nx)
current_storage_dict[nz,nx] = mat
return current_storage_dict[nz,nx]
def build_offcentered_alpha(sh,alpha):
# This computes the midpoints of alpha which will be used in the heterogenous laplacian
nz=sh[-1]
nx=sh[0]
v1z,v2z,v3z=np.ones(nz),np.ones(nz-1),np.zeros(nz)
v1z[-1],v3z[0]=2.0,2.0
v1x,v2x,v3x=np.ones(nx),np.ones(nx-1),np.zeros(nx)
v1x[-1],v3x[0]=2.0,2.0
v3z=v3z.reshape(1,nz)
v3x=v3x.reshape(1,nx)
Lz1=np.array(spsp.diags([v1z,v2z],[0,1]).todense())
Lx1=np.array(spsp.diags([v1x,v2x],[0,1]).todense())
Lz=np.matrix(0.5*np.concatenate((v3z,Lz1),axis=0))
Lx=np.matrix(0.5*np.concatenate((v3x,Lx1),axis=0))
# Lz and Lx simply (of length nz and nx respectively) act on a vector and return one which is one entry larger than before,
# with each entry being a weighted sum of the two adjacent entries. Boundary values are preserved.
P=build_permutation_matrix(nz,nx)
alpha_perm=P*alpha
alpha_z,alpha_x=list(),list()
for i in range(nx):
alpha_z.append(Lz*alpha[nz*i:nz*(i+1)])
for i in range(nz):
alpha_x.append(Lx*alpha_perm[nx*i:nx*(i+1)])
return alpha_x, alpha_z
def build_heterogenous_matrices(sh,deltas,alpha=None,rp=None):
# This builds 1st order, forward and backward derivative matrices.
# alpha is a vector which goes inside of the operator, div (alpha grad)
# It can also build a hetergenous laplacian (if rp is not None),which differs from the above
# heterogenous laplacian only in its boundary conditions.
nz=sh[-1]
nx=sh[0]
#builds z derivative matrix
v=-np.ones(nx*nz)/deltas[-1]
v1=np.ones(nx*nz-1)/deltas[-1]
v1[list(range(nz-1,nz*nx-1,nz))]=0.0 # repair boundary terms.
D2=spsp.diags([v,v1],[0,1])
D2_tilda=-1.0*D2.T
#builds x derivative matrix
p=-np.ones(nx*nz)/deltas[0]
p1=np.ones(nx*nz-1)/deltas[0]
#p[range(nx-1,nz*nx,nx)]=-1.0
p1[list(range(nx-1,nz*nx-1,nx))]=0.0
D1=spsp.diags([p,p1],[0,1])
D1_tilda=-1.0*D1.T
P=build_permutation_matrix(nz,nx)
P_inv=build_permutation_matrix(nx,nz)
#builds exact adjoint gradient for z.
v=-np.ones(nx*nz)/deltas[-1]
v1=np.ones(nx*nz-1)/deltas[-1]
v1[list(range(nz-1,nz*nx-1,nz))]=0.0
v1[list(range(0,nz*nx-1,nz))]=0.0
D2_adj=spsp.diags([v,v1],[0,1])
#builds exact adjoint gradient for x.
p=-np.ones(nx*nz)/deltas[0]
p1=np.ones(nx*nz-1)/deltas[0]
p1[list(range(nx-1,nz*nx-1,nx))]=0.0
p1[list(range(0,nz*nx-1,nx))]=0.0
D1_adj=spsp.diags([p,p1],[0,1])
if rp is not None:
A=spsp.diags([alpha],[0])
Lap = D2_tilda*A*D2+P_inv*D1_tilda*P*A*P_inv*D1*P
return Lap
else:
D1=P_inv*D1*P
D1_adj=P_inv*D1_adj*P
return [D1,D1_adj],[D2,D2_adj]
if __name__ == '__main__':
from pysit import *
from pysit.gallery import horizontal_reflector
bc = Dirichlet()
dim = 2
deriv = 1 # 2
order = 4
if dim == 1:
z_config = (0.0, 7.0, bc, bc)
d = RectangularDomain(z_config)
m = CartesianMesh(d, 7)
# Generate true wave speed
C, C0 = horizontal_reflector(m)
D = build_derivative_matrix(m, deriv, order, dimension='all').todense()
Dz = build_derivative_matrix(m, deriv, order, dimension='z').todense()
if dim == 2:
x_config = (0.0, 7.0, bc, bc)
z_config = (0.0, 7.0, bc, bc)
d = RectangularDomain(x_config, z_config)
m = CartesianMesh(d, 7, 7)
# Generate true wave speed
C, C0 = horizontal_reflector(m)
D = build_derivative_matrix(m, deriv, order, dimension='all').todense()
Dx = build_derivative_matrix(m, deriv, order, dimension='x').todense()
Dz = build_derivative_matrix(m, deriv, order, dimension='z').todense()
if dim == 3:
x_config = (0.0, 7.0, bc, bc)
y_config = (0.0, 7.0, bc, bc)
z_config = (0.0, 7.0, bc, bc)
d = RectangularDomain(x_config, x_config, z_config)
m = CartesianMesh(d, 7, 7, 7)
# Generate true wave speed
C, C0 = horizontal_reflector(m)
D = build_derivative_matrix(m, deriv, order, dimension='all').todense()
sh = m.shape(as_grid=True)
Dx = build_derivative_matrix(m, deriv, order, dimension=['x']).todense()
Dy = build_derivative_matrix(m, deriv, order, dimension=['y']).todense()
Dz = build_derivative_matrix(m, deriv, order, dimension=['z']).todense()
x=(Dx*C).reshape(sh)
y=(Dy*C).reshape(sh)
z=(Dz*C).reshape(sh)
print(x[:,:,0]) # should have ones all in first and last rows
print(y[:,:,0]) # should have ones all in first and last columns
print(z[0,0,:]) # should have ones at the ends
| import time
tt = time.time()
if return_1D_matrix:
raise Exception('Not yet implemented')
if derivative < 1 or derivative > 2:
raise ValueError('Only defined for first and second order right now')
if derivative == 1 and dimension not in ['x', 'y', 'z']:
raise ValueError('First derivative requires a direciton')
sh = mesh.shape(include_bc = True, as_grid = True) #Will include PML padding
if len(sh) != 2: raise Exception('currently hardcoded 2D implementation, relatively straight-forward to change. Look at the function build_derivative_matrix to get a more general function.')
nx = sh[0]
nz = sh[-1]
#Currently I am working with density input on the regular grid.
#In the derivation of the variable density solver we only require density at the stagger points
#For now I am just interpolating density defined on regular points towards the stagger points and use that as 'density model'.
#Later it is probably better to define the density directly on the stagger points (and evaluate density gradient there to update directly at these points?)
if type(alpha) == None: #If no alpha is given, we set it to a uniform vector. The result should be the homogeneous Laplacian.
alpha = np.ones(nx*nz)
alpha = alpha.flatten() #make 1D
dx = mesh.x.delta
dz = mesh.z.delta
#Get 1D linear interpolation matrices
Jx_1d = build_linear_interpolation_matrix_part(nx)
Jz_1d = build_linear_interpolation_matrix_part(nz)
#Get 1D derivative matrix for first spatial derivative using the desired order of accuracy
lbc_x = _set_bc(mesh.x.lbc)
rbc_x = _set_bc(mesh.x.rbc)
lbc_z = _set_bc(mesh.z.lbc)
rbc_z = _set_bc(mesh.z.rbc)
Dx_1d = _build_staggered_first_derivative_matrix_part(nx, order_accuracy, h=dx, lbc = lbc_x, rbc = rbc_x)
Dz_1d = _build_staggered_first_derivative_matrix_part(nz, order_accuracy, h=dz, lbc = lbc_z, rbc = rbc_z)
#Some empty matrices of the right shape so we can use kronsum to get the proper 2D matrices for the operations we want.
#The same is used in the homogeneous 'build_derivative_matrix' function.
Ix = spsp.eye(nx)
Iz = spsp.eye(nz)
Dx_2d = spsp.kron(Dx_1d, Iz, format='csr')
if dimension == 'x' and derivative == 1:
return Dx_2d
Dz_2d = spsp.kron(Ix, Dz_1d, format='csr')
if dimension == 'z' and derivative == 1:
return Dz_2d
#If we are evaluating this we want to make the heterogeneous Laplacian
Jx_2d = spsp.kron(Jx_1d, Iz, format='csr')
Jz_2d = spsp.kron(Ix, Jz_1d, format='csr')
#alpha interpolated to x stagger points. Make diag mat
diag_alpha_x = make_diag_mtx(Jx_2d*alpha)
#alpha interpolated to z stagger points. Make diag mat
diag_alpha_z = make_diag_mtx(Jz_2d*alpha)
#Create laplacian components
#The negative transpose of Dx and Dz takes care of the divergence term of the heterogeneous laplacian
Dxx_2d = -Dx_2d.T*diag_alpha_x*Dx_2d
Dzz_2d = -Dz_2d.T*diag_alpha_z*Dz_2d
#Correct the Laplacian around the boundary. This is also done in the homogeneous Laplacian
#I want the heterogeneous Laplacian to be the same as the homogeneous Laplacian when alpha is uniform
#This is the only part of the Laplacian that deviates from symmetry, just as in the homogeneous case.
#But because of these conditions on the dirichlet boundary the wavefield will always equal 0 there and this deviation from symmetry is fine.
#For indexing, get list of all boundary node numbers
left_node_nrs = np.arange(nz)
right_node_nrs = np.arange((nx-1)*nz,nx*nz)
top_node_nrs = np.arange(nz,(nx-1)*nz,nz) #does not include left and right top node
bot_node_nrs = top_node_nrs + nz - 1 #does not include left and right top node
all_boundary_node_nrs = np.concatenate((left_node_nrs, right_node_nrs, top_node_nrs, bot_node_nrs))
nb = all_boundary_node_nrs.size
L = Dxx_2d + Dzz_2d
all_node_numbers = np.arange(0,(nx*nz), dtype='int32')
internal_node_numbers = list(set(all_node_numbers) - set(all_boundary_node_nrs))
L = L.tocsr() #so we can extract rows efficiently
#Operation below fixes the boundary rows quite efficiently.
L_fixed = _turn_sparse_rows_to_identity(L, internal_node_numbers, all_boundary_node_nrs)
return L_fixed.tocsr() | identifier_body |
analyse_magnetic_linecuts_3D.py | import json
import numpy as np
from scipy.optimize import least_squares
from scipy.interpolate import interp1d
from scipy.integrate import trapz
# import matplotlib.pyplot as plt
from tqdm import tqdm
from skspatial.objects import Plane
from skspatial.objects import Points
from skspatial.plotting import plot_3d
from skspatial.plotting import plt
from util import Image, perpendicular_linecuts, load_magnetic_data, normalised_gaussian, hist_and_fit_gauss
RESAMPLE_FACTOR = 20
def magnetic_edge(x, x0, Ms, theta, phi, d_x, d_z, t):
u = x-x0
u2 = u**2
return 2 * Ms * t * ( np.sin(theta) * np.cos(phi) * d_x / (u**2 + d_x**2) - np.cos(theta) * u / (u**2 + d_z**2) )
def evaluate_cuts(params, lcx, lcy, t):
x0_x, x0_y, Ms, theta, phi, rot, d_x, d_z, c_x, c_y = extract_params(params)
flcx = magnetic_edge(lcx, x0_x, Ms, theta, phi, d_x, d_z, t) + c_x
flcy = magnetic_edge(lcy, x0_y, Ms, theta, phi + ((-1)**int(rot))*np.pi/2, d_x, d_z, t) + c_y
return flcx, flcy
def evaluate_gaussian_cuts(params, lcx, lcy, fwhm, t):
RESAMPLE_FACTOR = 10 # to interpolate and then decimate by
flcx, flcy = evaluate_cuts(params, lcx, lcy, t)
fx = interp1d(lcx, flcx)
fy = interp1d(lcy, flcy)
x_smooth = np.linspace(lcx[0], lcx[-1], lcx.shape[0] * RESAMPLE_FACTOR)
y_smooth = np.linspace(lcy[0], lcy[-1], lcy.shape[0] * RESAMPLE_FACTOR)
dx = x_smooth[1] - x_smooth[0]
dy = y_smooth[1] - y_smooth[0]
kernel_x = normalised_gaussian(x_smooth-(lcx[0]+lcx[-1])/2, fwhm)
kernel_y = normalised_gaussian(y_smooth-(lcy[0]+lcy[-1])/2, fwhm)
cflcx_smooth = np.convolve(fx(x_smooth), kernel_x, mode='same') * dx
cflcy_smooth = np.convolve(fy(y_smooth), kernel_y, mode='same') * dy
# from scipy.integrate import trapezoid
# print(trapezoid(x_smooth, kernel_x))
# fig, axes = plt.subplots(2, 3)
# axes[0][0].plot(fx(x_smooth), label="edge")
# axes[0][1].plot(cflcx_smooth, label="edge*G")
# axes[0][2].plot(x_smooth, kernel_x, label="G")
# axes[1][0].plot(fy(y_smooth), label="edge")
# axes[1][1].plot(cflcy_smooth, label="edge*G")
# axes[1][2].plot(y_smooth, kernel_y, label="G")
# [[ax.legend() for ax in row] for row in axes]
# plt.show()
# quit()
# now decimate
cflcx = cflcx_smooth[::RESAMPLE_FACTOR]
cflcy = cflcy_smooth[::RESAMPLE_FACTOR]
return cflcx, cflcy
def evaluate_gaussian_layer_cuts(params, lcx, lcy, fwhm, t, NVt):
x0_x, x0_y, Ms, theta, phi, rot, d_x, d_z, c_x, c_y = extract_params(params)
zx = np.linspace(d_x, d_x+NVt, 10)
zy = np.linspace(d_z, d_z+NVt, 10)
fx = np.zeros((zx.size, lcx.size))
fy = np.zeros((zy.size, lcy.size))
for i in range(len(zx)):
_params = compact_params(x0_x, x0_y, Ms, theta, phi, rot, zx[i], zy[i], c_x, c_y)
fx[i], fy[i] = evaluate_gaussian_cuts(_params, lcx, lcy, fwhm, t)
return trapz(fx, zx, axis=0) / NVt, trapz(fy, zy, axis=0) / NVt
def extract_params(params):
x0_x = params[0]
x0_y = params[1]
Ms = params[2]
theta = params[3]
phi = params[4]
rot = params[5]
d_x = params[6]
d_z = params[7]
c_x = params[8]
c_y = params[9]
return x0_x, x0_y, Ms, theta, phi, rot, d_x, d_z, c_x, c_y
def compact_params(x0_x, x0_y, Ms, theta, phi, rot, d_x, d_z, c_x, c_y):
return [x0_x, x0_y, Ms, theta, phi, rot, d_x, d_z, c_x, c_y]
def two_cut_residual(params, lcx, lcxv, lcy, lcyv, t):
flcx, flcy = evaluate_cuts(params, lcx, lcy, t)
return np.concatenate([lcxv-flcx, lcyv-flcy])
def two_cut_gaussian_residual(params, lcx, lcxv, lcy, lcyv, fwhm, t):
cflcx, cflcy = evaluate_gaussian_cuts(params, lcx, lcy, fwhm, t)
return np.concatenate([lcxv-cflcx, lcyv-cflcy])
def two_cut_gaussian_layer_residual(params, lcx, lcxv, lcy, lcyv, fwhm, t, NVt):
icflcx, icflcy = evaluate_gaussian_layer_cuts(params, lcx, lcy, fwhm, t, NVt)
return np.concatenate([lcxv-icflcx, lcyv-icflcy])
def get_bounds(lcx, lcy):
lower_bounds = compact_params(
x0_x=lcx[0],
x0_y=lcy[0],
Ms=-1,
theta=0,
phi=-180*np.pi/180,
rot=-np.inf,
d_x=0,
d_z=0,
c_x=-np.inf,
c_y=-np.inf
)
upper_bounds = compact_params(
x0_x=lcx[-1],
x0_y=lcy[-1],
Ms=1,
theta=90*np.pi/180,
phi=180*np.pi/180,
rot=np.inf,
d_x=np.inf,
d_z=np.inf,
c_x=np.inf,
c_y=np.inf
)
bounds = np.zeros((2, len(lower_bounds)))
bounds[0] = lower_bounds
bounds[1] = upper_bounds
return bounds
def get_x_guess(lcx, lcy):
return compact_params(
x0_x=lcx[len(lcx)//2],
x0_y=lcy[len(lcy)//2],
Ms=1e-2,
theta=30*np.pi/180,
phi=0,
rot=0,
d_x=3e-6,
d_z=3e-6,
c_x=0,
c_y=0
)
def fit_magnetic_edge(lcx, lcxv, lcy, lcyv, t):
result = least_squares(two_cut_residual, args=(
lcx, lcxv, lcy, lcyv, t), x0=get_x_guess(lcx, lcy), bounds=get_bounds(lcx, lcy))
return result
def fit_magnetic_edge_with_gaussian(lcx, lcxv, lcy, lcyv, fwhm, t):
result = least_squares(two_cut_gaussian_residual, args=(
lcx, lcxv, lcy, lcyv, fwhm, t), x0=get_x_guess(lcx, lcy), bounds=get_bounds(lcx, lcy))
return result
def fit_magnetic_edge_with_gaussian_layer(lcx, lcxv, lcy, lcyv, fwhm, t, NVt):
result = least_squares(two_cut_gaussian_layer_residual, args=(
lcx, lcxv, lcy, lcyv, fwhm, t, NVt), x0=get_x_guess(lcx, lcy), bounds=get_bounds(lcx, lcy))
return result
######################################################################################
def main():
magnetic_layer_thickness = 1e-9
NV_layer_thickness = 1e-6
N_linecuts = 10
linecut_width = 20e-6
img, optical_fwhm, p1, p2, p3, p4 = load_magnetic_data("magnetic_20x.json")
optical_fwhm_px = optical_fwhm / img.px_to_m(1) * RESAMPLE_FACTOR
assert optical_fwhm_px > 10
d_x_vals = []
d_y_vals = []
Ms_vals = []
theta_vals = []
plot_every = N_linecuts**2 // 5
d_pts = np.zeros((2, N_linecuts, 2))
d_map = np.zeros((2, N_linecuts))
pbar = tqdm(total=N_linecuts)
iterx = perpendicular_linecuts(img, p1, p2, linecut_width, N_linecuts, return_center=True)
itery = perpendicular_linecuts(img, p3, p4, linecut_width, N_linecuts, return_center=True)
for ((lcx, lcxv), cx), ((lcy, lcyv), cz) in zip(iterx, itery):
pbar.update()
result = fit_magnetic_edge_with_gaussian(lcx, lcxv, lcy, lcyv, optical_fwhm, magnetic_layer_thickness)
x0_x, x0_y, Ms, theta, phi, rot, d_x, d_z, c_x, c_y = extract_params(result.x)
if 0 < d_x < 40e-6 and 0 < d_z < 40e-6:
|
d_pts = d_pts.reshape(2*N_linecuts, 2)
# plt.scatter(pts[:, 0], pts[:, 1], c=d_map.ravel())
# fig = plt.figure()
# ax = fig.add_subplot(projection='3d')
# ax.scatter(pts[:, 0], pts[:, 1], d_map.ravel())
# plt.show()
pts = np.zeros((2*N_linecuts, 3))
pts[:,0] = d_pts[:,0]
pts[:,1] = d_pts[:,1]
pts[:,2] = d_map.ravel()
pts = pts[np.where(pts[:,2] != 0)]
pts = Points(pts)
print(pts)
plane = Plane.best_fit(pts)
xlim, ylim = img.get_size_m()
print(xlim, ylim)
plot_3d(
pts.plotter(c='k', s=50, depthshade=False),
# plane.plotter(alpha=0.2, lims_x=(-xlim/2, xlim/2), lims_y=(-ylim/2, ylim/2)),
)
plt.xlim(0, xlim)
plt.ylim(0, ylim)
ax = plt.gca()
ax.set_zlim(0, 5e-6)
X, Y, Z = plane.to_mesh(lims_x=(-ylim/2, ylim/2), lims_y=(-ylim/2, ylim/2))
x = np.linspace(0, xlim, img.data.shape[0])
y = np.linspace(0, ylim, img.data.shape[1])
xx, yy = np.meshgrid(x, y)
zz = np.linspace(start=np.linspace(Z[0][0],Z[0][1], img.data.shape[0]), stop=np.linspace(Z[1][0],Z[1][1], img.data.shape[0]), num=img.data.shape[1])
# zz = # np.ones_like(xx) * np.max(d_map)
# ax.contourf(xx, yy, img.data, 0, zdir='z', vmin=-20e-6, vmax=20e-6, cmap="BrBG")
data = np.clip((img.data+20e-6)/40e-6, 0, 1)
ax.plot_surface(xx, yy, zz, rstride=16, cstride=16, facecolors=plt.cm.bwr(data), shade=False, alpha=0.2)
xx, yy = np.meshgrid([x[0],x[-1]], [y[0],y[-1]])
zz = np.zeros_like(xx)
ax.plot_surface(xx, yy, zz)
# plt.figure()
# plt.imshow(data)
# plt.colorbar()
print(f"angle = {np.arccos(np.dot([0, 0, 1], plane.normal))}")
plt.show()
# for lcx, lcxv, cx in perpendicular_linecuts(img, p1, p2, linecut_width, N_linecuts, return_center=True):
# for lcy, lcyv, cz in perpendicular_linecuts(img, p3, p4, linecut_width, N_linecuts, return_center=True):
# pbar.update()
# # result = fit_magnetic_edge(lcx, lcxv, lcy, lcyv, magnetic_layer_thickness)
# # flcx, flcy = evaluate_cuts(result.x, lcx, lcy)
# result = fit_magnetic_edge_with_gaussian(lcx, lcxv, lcy, lcyv, optical_fwhm, magnetic_layer_thickness)
# # result = fit_magnetic_edge_with_gaussian_layer(lcx, lcxv, lcy, lcyv, optical_fwhm, magnetic_layer_thickness, NV_layer_thickness)
# x0_x, x0_y, Ms, theta, phi, rot, d_x, d_z, c_x, c_y = extract_params(result.x)
# if d_x < 10e-6 and d_z < 10e-6:
# Ms_vals.append(abs(Ms))
# d_x_vals.append(d_x)
# d_y_vals.append(d_z)
# theta_vals.append(theta)
# if pbar.n % plot_every == 0:
# flcx, flcy = evaluate_gaussian_cuts(result.x, lcx, lcy, optical_fwhm, magnetic_layer_thickness)
# # flcx, flcy = evaluate_gaussian_layer_cuts(result.x, lcx, lcy, optical_fwhm, magnetic_layer_thickness, NV_layer_thickness)
# fig, axes = plt.subplots(1, 2)
# axes[0].plot(lcx*1e6, lcxv, 'x')
# axes[1].plot(lcy*1e6, lcyv, 'x')
# axes[0].set_xlabel('x (um)')
# axes[1].set_xlabel('y (um)')
# axes[0].plot(lcx*1e6, flcx)
# axes[1].plot(lcy*1e6, flcy)
# # print()
# # print(d)
# # plt.show()
# # quit()
# print()
# print(f"mean d_x = {np.mean(d_x_vals)*1e6:.2f}um")
# print(f"std d_x = {np.std(d_x_vals)*1e6:.2f}um")
# print(f"mean d_z = {np.mean(d_y_vals)*1e6:.2f}um")
# print(f"std d_z = {np.std(d_y_vals)*1e6:.2f}um")
# print(f"mean Ms = {np.mean(Ms_vals) * 1e7 / 1e6:.2e} MA/m")
# print(f"std Ms = {np.std(Ms_vals) * 1e7 / 1e6:.2e} MA/m")
# print(f"mean theta = {np.mean(theta_vals)*180/np.pi:.2f} deg")
# print(f"std theta = {np.std(theta_vals)*180/np.pi:.2f} deg")
# print()
# fit_d_x, std_d_x = hist_and_fit_gauss(np.array(d_x_vals), plot=True, title="d_x")
# fit_d_y, std_d_y = hist_and_fit_gauss(np.array(d_y_vals), plot=True, title="d_z")
# fit_theta, std_theta = hist_and_fit_gauss(np.array(theta_vals), plot=True, title="theta")
# fit_Ms, std_Ms = hist_and_fit_gauss(np.array(Ms_vals), plot=True, logplot=True, title="Ms")
# print(f"fit d_x = {fit_d_x*1e6:.2f} +/- {std_d_x*1e6:.2f} um")
# print(f"fit d_z = {fit_d_y*1e6:.2f} +/- {std_d_y*1e6:.2f} um")
# print(f"fit theta = {fit_theta*180/np.pi:.2f} +/- {std_theta*180/np.pi:.2f} um")
# print(f"fit Ms = {fit_Ms*1e7/1e6:.2f} +/- {std_Ms*1e7/1e6:.2f} MA/m")
# plt.show()
if __name__ == "__main__":
main() | d_pts[0,pbar.n-1] = cx
d_pts[1,pbar.n-1] = cz
d_map[0,pbar.n-1] = d_x
d_map[1,pbar.n-1] = d_z | conditional_block |
analyse_magnetic_linecuts_3D.py | import json
import numpy as np
from scipy.optimize import least_squares
from scipy.interpolate import interp1d
from scipy.integrate import trapz
# import matplotlib.pyplot as plt
from tqdm import tqdm
from skspatial.objects import Plane
from skspatial.objects import Points
from skspatial.plotting import plot_3d
from skspatial.plotting import plt
from util import Image, perpendicular_linecuts, load_magnetic_data, normalised_gaussian, hist_and_fit_gauss
RESAMPLE_FACTOR = 20
def magnetic_edge(x, x0, Ms, theta, phi, d_x, d_z, t):
u = x-x0
u2 = u**2
return 2 * Ms * t * ( np.sin(theta) * np.cos(phi) * d_x / (u**2 + d_x**2) - np.cos(theta) * u / (u**2 + d_z**2) )
def evaluate_cuts(params, lcx, lcy, t):
x0_x, x0_y, Ms, theta, phi, rot, d_x, d_z, c_x, c_y = extract_params(params)
flcx = magnetic_edge(lcx, x0_x, Ms, theta, phi, d_x, d_z, t) + c_x
flcy = magnetic_edge(lcy, x0_y, Ms, theta, phi + ((-1)**int(rot))*np.pi/2, d_x, d_z, t) + c_y
return flcx, flcy
def evaluate_gaussian_cuts(params, lcx, lcy, fwhm, t):
RESAMPLE_FACTOR = 10 # to interpolate and then decimate by
flcx, flcy = evaluate_cuts(params, lcx, lcy, t)
fx = interp1d(lcx, flcx)
fy = interp1d(lcy, flcy)
x_smooth = np.linspace(lcx[0], lcx[-1], lcx.shape[0] * RESAMPLE_FACTOR)
y_smooth = np.linspace(lcy[0], lcy[-1], lcy.shape[0] * RESAMPLE_FACTOR)
dx = x_smooth[1] - x_smooth[0]
dy = y_smooth[1] - y_smooth[0]
kernel_x = normalised_gaussian(x_smooth-(lcx[0]+lcx[-1])/2, fwhm)
kernel_y = normalised_gaussian(y_smooth-(lcy[0]+lcy[-1])/2, fwhm)
cflcx_smooth = np.convolve(fx(x_smooth), kernel_x, mode='same') * dx
cflcy_smooth = np.convolve(fy(y_smooth), kernel_y, mode='same') * dy
# from scipy.integrate import trapezoid
# print(trapezoid(x_smooth, kernel_x))
# fig, axes = plt.subplots(2, 3)
# axes[0][0].plot(fx(x_smooth), label="edge")
# axes[0][1].plot(cflcx_smooth, label="edge*G")
# axes[0][2].plot(x_smooth, kernel_x, label="G")
# axes[1][0].plot(fy(y_smooth), label="edge")
# axes[1][1].plot(cflcy_smooth, label="edge*G")
# axes[1][2].plot(y_smooth, kernel_y, label="G")
# [[ax.legend() for ax in row] for row in axes]
# plt.show()
# quit()
# now decimate
cflcx = cflcx_smooth[::RESAMPLE_FACTOR]
cflcy = cflcy_smooth[::RESAMPLE_FACTOR]
return cflcx, cflcy
def evaluate_gaussian_layer_cuts(params, lcx, lcy, fwhm, t, NVt):
x0_x, x0_y, Ms, theta, phi, rot, d_x, d_z, c_x, c_y = extract_params(params)
zx = np.linspace(d_x, d_x+NVt, 10)
zy = np.linspace(d_z, d_z+NVt, 10)
fx = np.zeros((zx.size, lcx.size))
fy = np.zeros((zy.size, lcy.size))
for i in range(len(zx)):
_params = compact_params(x0_x, x0_y, Ms, theta, phi, rot, zx[i], zy[i], c_x, c_y)
fx[i], fy[i] = evaluate_gaussian_cuts(_params, lcx, lcy, fwhm, t)
return trapz(fx, zx, axis=0) / NVt, trapz(fy, zy, axis=0) / NVt
def extract_params(params):
x0_x = params[0]
x0_y = params[1]
Ms = params[2]
theta = params[3]
phi = params[4]
rot = params[5]
d_x = params[6]
d_z = params[7]
c_x = params[8]
c_y = params[9]
return x0_x, x0_y, Ms, theta, phi, rot, d_x, d_z, c_x, c_y
def compact_params(x0_x, x0_y, Ms, theta, phi, rot, d_x, d_z, c_x, c_y):
return [x0_x, x0_y, Ms, theta, phi, rot, d_x, d_z, c_x, c_y]
def two_cut_residual(params, lcx, lcxv, lcy, lcyv, t):
flcx, flcy = evaluate_cuts(params, lcx, lcy, t)
return np.concatenate([lcxv-flcx, lcyv-flcy])
def two_cut_gaussian_residual(params, lcx, lcxv, lcy, lcyv, fwhm, t):
cflcx, cflcy = evaluate_gaussian_cuts(params, lcx, lcy, fwhm, t)
return np.concatenate([lcxv-cflcx, lcyv-cflcy])
def two_cut_gaussian_layer_residual(params, lcx, lcxv, lcy, lcyv, fwhm, t, NVt):
icflcx, icflcy = evaluate_gaussian_layer_cuts(params, lcx, lcy, fwhm, t, NVt)
return np.concatenate([lcxv-icflcx, lcyv-icflcy])
def get_bounds(lcx, lcy):
lower_bounds = compact_params(
x0_x=lcx[0],
x0_y=lcy[0],
Ms=-1,
theta=0,
phi=-180*np.pi/180,
rot=-np.inf,
d_x=0,
d_z=0,
c_x=-np.inf,
c_y=-np.inf
)
upper_bounds = compact_params(
x0_x=lcx[-1],
x0_y=lcy[-1],
Ms=1,
theta=90*np.pi/180,
phi=180*np.pi/180,
rot=np.inf,
d_x=np.inf,
d_z=np.inf,
c_x=np.inf,
c_y=np.inf
)
bounds = np.zeros((2, len(lower_bounds)))
bounds[0] = lower_bounds
bounds[1] = upper_bounds
return bounds
def get_x_guess(lcx, lcy):
return compact_params(
x0_x=lcx[len(lcx)//2],
x0_y=lcy[len(lcy)//2],
Ms=1e-2,
theta=30*np.pi/180,
phi=0,
rot=0,
d_x=3e-6,
d_z=3e-6,
c_x=0,
c_y=0
)
def fit_magnetic_edge(lcx, lcxv, lcy, lcyv, t):
result = least_squares(two_cut_residual, args=(
lcx, lcxv, lcy, lcyv, t), x0=get_x_guess(lcx, lcy), bounds=get_bounds(lcx, lcy))
return result
def | (lcx, lcxv, lcy, lcyv, fwhm, t):
result = least_squares(two_cut_gaussian_residual, args=(
lcx, lcxv, lcy, lcyv, fwhm, t), x0=get_x_guess(lcx, lcy), bounds=get_bounds(lcx, lcy))
return result
def fit_magnetic_edge_with_gaussian_layer(lcx, lcxv, lcy, lcyv, fwhm, t, NVt):
result = least_squares(two_cut_gaussian_layer_residual, args=(
lcx, lcxv, lcy, lcyv, fwhm, t, NVt), x0=get_x_guess(lcx, lcy), bounds=get_bounds(lcx, lcy))
return result
######################################################################################
def main():
magnetic_layer_thickness = 1e-9
NV_layer_thickness = 1e-6
N_linecuts = 10
linecut_width = 20e-6
img, optical_fwhm, p1, p2, p3, p4 = load_magnetic_data("magnetic_20x.json")
optical_fwhm_px = optical_fwhm / img.px_to_m(1) * RESAMPLE_FACTOR
assert optical_fwhm_px > 10
d_x_vals = []
d_y_vals = []
Ms_vals = []
theta_vals = []
plot_every = N_linecuts**2 // 5
d_pts = np.zeros((2, N_linecuts, 2))
d_map = np.zeros((2, N_linecuts))
pbar = tqdm(total=N_linecuts)
iterx = perpendicular_linecuts(img, p1, p2, linecut_width, N_linecuts, return_center=True)
itery = perpendicular_linecuts(img, p3, p4, linecut_width, N_linecuts, return_center=True)
for ((lcx, lcxv), cx), ((lcy, lcyv), cz) in zip(iterx, itery):
pbar.update()
result = fit_magnetic_edge_with_gaussian(lcx, lcxv, lcy, lcyv, optical_fwhm, magnetic_layer_thickness)
x0_x, x0_y, Ms, theta, phi, rot, d_x, d_z, c_x, c_y = extract_params(result.x)
if 0 < d_x < 40e-6 and 0 < d_z < 40e-6:
d_pts[0,pbar.n-1] = cx
d_pts[1,pbar.n-1] = cz
d_map[0,pbar.n-1] = d_x
d_map[1,pbar.n-1] = d_z
d_pts = d_pts.reshape(2*N_linecuts, 2)
# plt.scatter(pts[:, 0], pts[:, 1], c=d_map.ravel())
# fig = plt.figure()
# ax = fig.add_subplot(projection='3d')
# ax.scatter(pts[:, 0], pts[:, 1], d_map.ravel())
# plt.show()
pts = np.zeros((2*N_linecuts, 3))
pts[:,0] = d_pts[:,0]
pts[:,1] = d_pts[:,1]
pts[:,2] = d_map.ravel()
pts = pts[np.where(pts[:,2] != 0)]
pts = Points(pts)
print(pts)
plane = Plane.best_fit(pts)
xlim, ylim = img.get_size_m()
print(xlim, ylim)
plot_3d(
pts.plotter(c='k', s=50, depthshade=False),
# plane.plotter(alpha=0.2, lims_x=(-xlim/2, xlim/2), lims_y=(-ylim/2, ylim/2)),
)
plt.xlim(0, xlim)
plt.ylim(0, ylim)
ax = plt.gca()
ax.set_zlim(0, 5e-6)
X, Y, Z = plane.to_mesh(lims_x=(-ylim/2, ylim/2), lims_y=(-ylim/2, ylim/2))
x = np.linspace(0, xlim, img.data.shape[0])
y = np.linspace(0, ylim, img.data.shape[1])
xx, yy = np.meshgrid(x, y)
zz = np.linspace(start=np.linspace(Z[0][0],Z[0][1], img.data.shape[0]), stop=np.linspace(Z[1][0],Z[1][1], img.data.shape[0]), num=img.data.shape[1])
# zz = # np.ones_like(xx) * np.max(d_map)
# ax.contourf(xx, yy, img.data, 0, zdir='z', vmin=-20e-6, vmax=20e-6, cmap="BrBG")
data = np.clip((img.data+20e-6)/40e-6, 0, 1)
ax.plot_surface(xx, yy, zz, rstride=16, cstride=16, facecolors=plt.cm.bwr(data), shade=False, alpha=0.2)
xx, yy = np.meshgrid([x[0],x[-1]], [y[0],y[-1]])
zz = np.zeros_like(xx)
ax.plot_surface(xx, yy, zz)
# plt.figure()
# plt.imshow(data)
# plt.colorbar()
print(f"angle = {np.arccos(np.dot([0, 0, 1], plane.normal))}")
plt.show()
# for lcx, lcxv, cx in perpendicular_linecuts(img, p1, p2, linecut_width, N_linecuts, return_center=True):
# for lcy, lcyv, cz in perpendicular_linecuts(img, p3, p4, linecut_width, N_linecuts, return_center=True):
# pbar.update()
# # result = fit_magnetic_edge(lcx, lcxv, lcy, lcyv, magnetic_layer_thickness)
# # flcx, flcy = evaluate_cuts(result.x, lcx, lcy)
# result = fit_magnetic_edge_with_gaussian(lcx, lcxv, lcy, lcyv, optical_fwhm, magnetic_layer_thickness)
# # result = fit_magnetic_edge_with_gaussian_layer(lcx, lcxv, lcy, lcyv, optical_fwhm, magnetic_layer_thickness, NV_layer_thickness)
# x0_x, x0_y, Ms, theta, phi, rot, d_x, d_z, c_x, c_y = extract_params(result.x)
# if d_x < 10e-6 and d_z < 10e-6:
# Ms_vals.append(abs(Ms))
# d_x_vals.append(d_x)
# d_y_vals.append(d_z)
# theta_vals.append(theta)
# if pbar.n % plot_every == 0:
# flcx, flcy = evaluate_gaussian_cuts(result.x, lcx, lcy, optical_fwhm, magnetic_layer_thickness)
# # flcx, flcy = evaluate_gaussian_layer_cuts(result.x, lcx, lcy, optical_fwhm, magnetic_layer_thickness, NV_layer_thickness)
# fig, axes = plt.subplots(1, 2)
# axes[0].plot(lcx*1e6, lcxv, 'x')
# axes[1].plot(lcy*1e6, lcyv, 'x')
# axes[0].set_xlabel('x (um)')
# axes[1].set_xlabel('y (um)')
# axes[0].plot(lcx*1e6, flcx)
# axes[1].plot(lcy*1e6, flcy)
# # print()
# # print(d)
# # plt.show()
# # quit()
# print()
# print(f"mean d_x = {np.mean(d_x_vals)*1e6:.2f}um")
# print(f"std d_x = {np.std(d_x_vals)*1e6:.2f}um")
# print(f"mean d_z = {np.mean(d_y_vals)*1e6:.2f}um")
# print(f"std d_z = {np.std(d_y_vals)*1e6:.2f}um")
# print(f"mean Ms = {np.mean(Ms_vals) * 1e7 / 1e6:.2e} MA/m")
# print(f"std Ms = {np.std(Ms_vals) * 1e7 / 1e6:.2e} MA/m")
# print(f"mean theta = {np.mean(theta_vals)*180/np.pi:.2f} deg")
# print(f"std theta = {np.std(theta_vals)*180/np.pi:.2f} deg")
# print()
# fit_d_x, std_d_x = hist_and_fit_gauss(np.array(d_x_vals), plot=True, title="d_x")
# fit_d_y, std_d_y = hist_and_fit_gauss(np.array(d_y_vals), plot=True, title="d_z")
# fit_theta, std_theta = hist_and_fit_gauss(np.array(theta_vals), plot=True, title="theta")
# fit_Ms, std_Ms = hist_and_fit_gauss(np.array(Ms_vals), plot=True, logplot=True, title="Ms")
# print(f"fit d_x = {fit_d_x*1e6:.2f} +/- {std_d_x*1e6:.2f} um")
# print(f"fit d_z = {fit_d_y*1e6:.2f} +/- {std_d_y*1e6:.2f} um")
# print(f"fit theta = {fit_theta*180/np.pi:.2f} +/- {std_theta*180/np.pi:.2f} um")
# print(f"fit Ms = {fit_Ms*1e7/1e6:.2f} +/- {std_Ms*1e7/1e6:.2f} MA/m")
# plt.show()
if __name__ == "__main__":
main() | fit_magnetic_edge_with_gaussian | identifier_name |
analyse_magnetic_linecuts_3D.py | import json
import numpy as np
from scipy.optimize import least_squares
from scipy.interpolate import interp1d
from scipy.integrate import trapz
# import matplotlib.pyplot as plt
from tqdm import tqdm
from skspatial.objects import Plane
from skspatial.objects import Points
from skspatial.plotting import plot_3d
from skspatial.plotting import plt
from util import Image, perpendicular_linecuts, load_magnetic_data, normalised_gaussian, hist_and_fit_gauss
RESAMPLE_FACTOR = 20
def magnetic_edge(x, x0, Ms, theta, phi, d_x, d_z, t):
u = x-x0
u2 = u**2
return 2 * Ms * t * ( np.sin(theta) * np.cos(phi) * d_x / (u**2 + d_x**2) - np.cos(theta) * u / (u**2 + d_z**2) )
def evaluate_cuts(params, lcx, lcy, t):
x0_x, x0_y, Ms, theta, phi, rot, d_x, d_z, c_x, c_y = extract_params(params)
flcx = magnetic_edge(lcx, x0_x, Ms, theta, phi, d_x, d_z, t) + c_x
flcy = magnetic_edge(lcy, x0_y, Ms, theta, phi + ((-1)**int(rot))*np.pi/2, d_x, d_z, t) + c_y
return flcx, flcy
def evaluate_gaussian_cuts(params, lcx, lcy, fwhm, t):
RESAMPLE_FACTOR = 10 # to interpolate and then decimate by
flcx, flcy = evaluate_cuts(params, lcx, lcy, t)
fx = interp1d(lcx, flcx)
fy = interp1d(lcy, flcy)
x_smooth = np.linspace(lcx[0], lcx[-1], lcx.shape[0] * RESAMPLE_FACTOR)
y_smooth = np.linspace(lcy[0], lcy[-1], lcy.shape[0] * RESAMPLE_FACTOR)
dx = x_smooth[1] - x_smooth[0]
dy = y_smooth[1] - y_smooth[0]
kernel_x = normalised_gaussian(x_smooth-(lcx[0]+lcx[-1])/2, fwhm)
kernel_y = normalised_gaussian(y_smooth-(lcy[0]+lcy[-1])/2, fwhm)
cflcx_smooth = np.convolve(fx(x_smooth), kernel_x, mode='same') * dx
cflcy_smooth = np.convolve(fy(y_smooth), kernel_y, mode='same') * dy
# from scipy.integrate import trapezoid
# print(trapezoid(x_smooth, kernel_x))
# fig, axes = plt.subplots(2, 3)
# axes[0][0].plot(fx(x_smooth), label="edge")
# axes[0][1].plot(cflcx_smooth, label="edge*G")
# axes[0][2].plot(x_smooth, kernel_x, label="G")
# axes[1][0].plot(fy(y_smooth), label="edge")
# axes[1][1].plot(cflcy_smooth, label="edge*G")
# axes[1][2].plot(y_smooth, kernel_y, label="G")
# [[ax.legend() for ax in row] for row in axes]
# plt.show()
# quit()
# now decimate
cflcx = cflcx_smooth[::RESAMPLE_FACTOR]
cflcy = cflcy_smooth[::RESAMPLE_FACTOR]
return cflcx, cflcy
def evaluate_gaussian_layer_cuts(params, lcx, lcy, fwhm, t, NVt):
x0_x, x0_y, Ms, theta, phi, rot, d_x, d_z, c_x, c_y = extract_params(params)
zx = np.linspace(d_x, d_x+NVt, 10)
zy = np.linspace(d_z, d_z+NVt, 10)
fx = np.zeros((zx.size, lcx.size))
fy = np.zeros((zy.size, lcy.size))
for i in range(len(zx)):
_params = compact_params(x0_x, x0_y, Ms, theta, phi, rot, zx[i], zy[i], c_x, c_y)
fx[i], fy[i] = evaluate_gaussian_cuts(_params, lcx, lcy, fwhm, t)
return trapz(fx, zx, axis=0) / NVt, trapz(fy, zy, axis=0) / NVt
def extract_params(params):
x0_x = params[0]
x0_y = params[1]
Ms = params[2]
theta = params[3]
phi = params[4]
rot = params[5]
d_x = params[6]
d_z = params[7]
c_x = params[8]
c_y = params[9]
return x0_x, x0_y, Ms, theta, phi, rot, d_x, d_z, c_x, c_y
def compact_params(x0_x, x0_y, Ms, theta, phi, rot, d_x, d_z, c_x, c_y):
return [x0_x, x0_y, Ms, theta, phi, rot, d_x, d_z, c_x, c_y] |
def two_cut_residual(params, lcx, lcxv, lcy, lcyv, t):
flcx, flcy = evaluate_cuts(params, lcx, lcy, t)
return np.concatenate([lcxv-flcx, lcyv-flcy])
def two_cut_gaussian_residual(params, lcx, lcxv, lcy, lcyv, fwhm, t):
cflcx, cflcy = evaluate_gaussian_cuts(params, lcx, lcy, fwhm, t)
return np.concatenate([lcxv-cflcx, lcyv-cflcy])
def two_cut_gaussian_layer_residual(params, lcx, lcxv, lcy, lcyv, fwhm, t, NVt):
icflcx, icflcy = evaluate_gaussian_layer_cuts(params, lcx, lcy, fwhm, t, NVt)
return np.concatenate([lcxv-icflcx, lcyv-icflcy])
def get_bounds(lcx, lcy):
lower_bounds = compact_params(
x0_x=lcx[0],
x0_y=lcy[0],
Ms=-1,
theta=0,
phi=-180*np.pi/180,
rot=-np.inf,
d_x=0,
d_z=0,
c_x=-np.inf,
c_y=-np.inf
)
upper_bounds = compact_params(
x0_x=lcx[-1],
x0_y=lcy[-1],
Ms=1,
theta=90*np.pi/180,
phi=180*np.pi/180,
rot=np.inf,
d_x=np.inf,
d_z=np.inf,
c_x=np.inf,
c_y=np.inf
)
bounds = np.zeros((2, len(lower_bounds)))
bounds[0] = lower_bounds
bounds[1] = upper_bounds
return bounds
def get_x_guess(lcx, lcy):
return compact_params(
x0_x=lcx[len(lcx)//2],
x0_y=lcy[len(lcy)//2],
Ms=1e-2,
theta=30*np.pi/180,
phi=0,
rot=0,
d_x=3e-6,
d_z=3e-6,
c_x=0,
c_y=0
)
def fit_magnetic_edge(lcx, lcxv, lcy, lcyv, t):
result = least_squares(two_cut_residual, args=(
lcx, lcxv, lcy, lcyv, t), x0=get_x_guess(lcx, lcy), bounds=get_bounds(lcx, lcy))
return result
def fit_magnetic_edge_with_gaussian(lcx, lcxv, lcy, lcyv, fwhm, t):
result = least_squares(two_cut_gaussian_residual, args=(
lcx, lcxv, lcy, lcyv, fwhm, t), x0=get_x_guess(lcx, lcy), bounds=get_bounds(lcx, lcy))
return result
def fit_magnetic_edge_with_gaussian_layer(lcx, lcxv, lcy, lcyv, fwhm, t, NVt):
result = least_squares(two_cut_gaussian_layer_residual, args=(
lcx, lcxv, lcy, lcyv, fwhm, t, NVt), x0=get_x_guess(lcx, lcy), bounds=get_bounds(lcx, lcy))
return result
######################################################################################
def main():
magnetic_layer_thickness = 1e-9
NV_layer_thickness = 1e-6
N_linecuts = 10
linecut_width = 20e-6
img, optical_fwhm, p1, p2, p3, p4 = load_magnetic_data("magnetic_20x.json")
optical_fwhm_px = optical_fwhm / img.px_to_m(1) * RESAMPLE_FACTOR
assert optical_fwhm_px > 10
d_x_vals = []
d_y_vals = []
Ms_vals = []
theta_vals = []
plot_every = N_linecuts**2 // 5
d_pts = np.zeros((2, N_linecuts, 2))
d_map = np.zeros((2, N_linecuts))
pbar = tqdm(total=N_linecuts)
iterx = perpendicular_linecuts(img, p1, p2, linecut_width, N_linecuts, return_center=True)
itery = perpendicular_linecuts(img, p3, p4, linecut_width, N_linecuts, return_center=True)
for ((lcx, lcxv), cx), ((lcy, lcyv), cz) in zip(iterx, itery):
pbar.update()
result = fit_magnetic_edge_with_gaussian(lcx, lcxv, lcy, lcyv, optical_fwhm, magnetic_layer_thickness)
x0_x, x0_y, Ms, theta, phi, rot, d_x, d_z, c_x, c_y = extract_params(result.x)
if 0 < d_x < 40e-6 and 0 < d_z < 40e-6:
d_pts[0,pbar.n-1] = cx
d_pts[1,pbar.n-1] = cz
d_map[0,pbar.n-1] = d_x
d_map[1,pbar.n-1] = d_z
d_pts = d_pts.reshape(2*N_linecuts, 2)
# plt.scatter(pts[:, 0], pts[:, 1], c=d_map.ravel())
# fig = plt.figure()
# ax = fig.add_subplot(projection='3d')
# ax.scatter(pts[:, 0], pts[:, 1], d_map.ravel())
# plt.show()
pts = np.zeros((2*N_linecuts, 3))
pts[:,0] = d_pts[:,0]
pts[:,1] = d_pts[:,1]
pts[:,2] = d_map.ravel()
pts = pts[np.where(pts[:,2] != 0)]
pts = Points(pts)
print(pts)
plane = Plane.best_fit(pts)
xlim, ylim = img.get_size_m()
print(xlim, ylim)
plot_3d(
pts.plotter(c='k', s=50, depthshade=False),
# plane.plotter(alpha=0.2, lims_x=(-xlim/2, xlim/2), lims_y=(-ylim/2, ylim/2)),
)
plt.xlim(0, xlim)
plt.ylim(0, ylim)
ax = plt.gca()
ax.set_zlim(0, 5e-6)
X, Y, Z = plane.to_mesh(lims_x=(-ylim/2, ylim/2), lims_y=(-ylim/2, ylim/2))
x = np.linspace(0, xlim, img.data.shape[0])
y = np.linspace(0, ylim, img.data.shape[1])
xx, yy = np.meshgrid(x, y)
zz = np.linspace(start=np.linspace(Z[0][0],Z[0][1], img.data.shape[0]), stop=np.linspace(Z[1][0],Z[1][1], img.data.shape[0]), num=img.data.shape[1])
# zz = # np.ones_like(xx) * np.max(d_map)
# ax.contourf(xx, yy, img.data, 0, zdir='z', vmin=-20e-6, vmax=20e-6, cmap="BrBG")
data = np.clip((img.data+20e-6)/40e-6, 0, 1)
ax.plot_surface(xx, yy, zz, rstride=16, cstride=16, facecolors=plt.cm.bwr(data), shade=False, alpha=0.2)
xx, yy = np.meshgrid([x[0],x[-1]], [y[0],y[-1]])
zz = np.zeros_like(xx)
ax.plot_surface(xx, yy, zz)
# plt.figure()
# plt.imshow(data)
# plt.colorbar()
print(f"angle = {np.arccos(np.dot([0, 0, 1], plane.normal))}")
plt.show()
# for lcx, lcxv, cx in perpendicular_linecuts(img, p1, p2, linecut_width, N_linecuts, return_center=True):
# for lcy, lcyv, cz in perpendicular_linecuts(img, p3, p4, linecut_width, N_linecuts, return_center=True):
# pbar.update()
# # result = fit_magnetic_edge(lcx, lcxv, lcy, lcyv, magnetic_layer_thickness)
# # flcx, flcy = evaluate_cuts(result.x, lcx, lcy)
# result = fit_magnetic_edge_with_gaussian(lcx, lcxv, lcy, lcyv, optical_fwhm, magnetic_layer_thickness)
# # result = fit_magnetic_edge_with_gaussian_layer(lcx, lcxv, lcy, lcyv, optical_fwhm, magnetic_layer_thickness, NV_layer_thickness)
# x0_x, x0_y, Ms, theta, phi, rot, d_x, d_z, c_x, c_y = extract_params(result.x)
# if d_x < 10e-6 and d_z < 10e-6:
# Ms_vals.append(abs(Ms))
# d_x_vals.append(d_x)
# d_y_vals.append(d_z)
# theta_vals.append(theta)
# if pbar.n % plot_every == 0:
# flcx, flcy = evaluate_gaussian_cuts(result.x, lcx, lcy, optical_fwhm, magnetic_layer_thickness)
# # flcx, flcy = evaluate_gaussian_layer_cuts(result.x, lcx, lcy, optical_fwhm, magnetic_layer_thickness, NV_layer_thickness)
# fig, axes = plt.subplots(1, 2)
# axes[0].plot(lcx*1e6, lcxv, 'x')
# axes[1].plot(lcy*1e6, lcyv, 'x')
# axes[0].set_xlabel('x (um)')
# axes[1].set_xlabel('y (um)')
# axes[0].plot(lcx*1e6, flcx)
# axes[1].plot(lcy*1e6, flcy)
# # print()
# # print(d)
# # plt.show()
# # quit()
# print()
# print(f"mean d_x = {np.mean(d_x_vals)*1e6:.2f}um")
# print(f"std d_x = {np.std(d_x_vals)*1e6:.2f}um")
# print(f"mean d_z = {np.mean(d_y_vals)*1e6:.2f}um")
# print(f"std d_z = {np.std(d_y_vals)*1e6:.2f}um")
# print(f"mean Ms = {np.mean(Ms_vals) * 1e7 / 1e6:.2e} MA/m")
# print(f"std Ms = {np.std(Ms_vals) * 1e7 / 1e6:.2e} MA/m")
# print(f"mean theta = {np.mean(theta_vals)*180/np.pi:.2f} deg")
# print(f"std theta = {np.std(theta_vals)*180/np.pi:.2f} deg")
# print()
# fit_d_x, std_d_x = hist_and_fit_gauss(np.array(d_x_vals), plot=True, title="d_x")
# fit_d_y, std_d_y = hist_and_fit_gauss(np.array(d_y_vals), plot=True, title="d_z")
# fit_theta, std_theta = hist_and_fit_gauss(np.array(theta_vals), plot=True, title="theta")
# fit_Ms, std_Ms = hist_and_fit_gauss(np.array(Ms_vals), plot=True, logplot=True, title="Ms")
# print(f"fit d_x = {fit_d_x*1e6:.2f} +/- {std_d_x*1e6:.2f} um")
# print(f"fit d_z = {fit_d_y*1e6:.2f} +/- {std_d_y*1e6:.2f} um")
# print(f"fit theta = {fit_theta*180/np.pi:.2f} +/- {std_theta*180/np.pi:.2f} um")
# print(f"fit Ms = {fit_Ms*1e7/1e6:.2f} +/- {std_Ms*1e7/1e6:.2f} MA/m")
# plt.show()
if __name__ == "__main__":
main() | random_line_split | |
analyse_magnetic_linecuts_3D.py | import json
import numpy as np
from scipy.optimize import least_squares
from scipy.interpolate import interp1d
from scipy.integrate import trapz
# import matplotlib.pyplot as plt
from tqdm import tqdm
from skspatial.objects import Plane
from skspatial.objects import Points
from skspatial.plotting import plot_3d
from skspatial.plotting import plt
from util import Image, perpendicular_linecuts, load_magnetic_data, normalised_gaussian, hist_and_fit_gauss
RESAMPLE_FACTOR = 20
def magnetic_edge(x, x0, Ms, theta, phi, d_x, d_z, t):
u = x-x0
u2 = u**2
return 2 * Ms * t * ( np.sin(theta) * np.cos(phi) * d_x / (u**2 + d_x**2) - np.cos(theta) * u / (u**2 + d_z**2) )
def evaluate_cuts(params, lcx, lcy, t):
x0_x, x0_y, Ms, theta, phi, rot, d_x, d_z, c_x, c_y = extract_params(params)
flcx = magnetic_edge(lcx, x0_x, Ms, theta, phi, d_x, d_z, t) + c_x
flcy = magnetic_edge(lcy, x0_y, Ms, theta, phi + ((-1)**int(rot))*np.pi/2, d_x, d_z, t) + c_y
return flcx, flcy
def evaluate_gaussian_cuts(params, lcx, lcy, fwhm, t):
RESAMPLE_FACTOR = 10 # to interpolate and then decimate by
flcx, flcy = evaluate_cuts(params, lcx, lcy, t)
fx = interp1d(lcx, flcx)
fy = interp1d(lcy, flcy)
x_smooth = np.linspace(lcx[0], lcx[-1], lcx.shape[0] * RESAMPLE_FACTOR)
y_smooth = np.linspace(lcy[0], lcy[-1], lcy.shape[0] * RESAMPLE_FACTOR)
dx = x_smooth[1] - x_smooth[0]
dy = y_smooth[1] - y_smooth[0]
kernel_x = normalised_gaussian(x_smooth-(lcx[0]+lcx[-1])/2, fwhm)
kernel_y = normalised_gaussian(y_smooth-(lcy[0]+lcy[-1])/2, fwhm)
cflcx_smooth = np.convolve(fx(x_smooth), kernel_x, mode='same') * dx
cflcy_smooth = np.convolve(fy(y_smooth), kernel_y, mode='same') * dy
# from scipy.integrate import trapezoid
# print(trapezoid(x_smooth, kernel_x))
# fig, axes = plt.subplots(2, 3)
# axes[0][0].plot(fx(x_smooth), label="edge")
# axes[0][1].plot(cflcx_smooth, label="edge*G")
# axes[0][2].plot(x_smooth, kernel_x, label="G")
# axes[1][0].plot(fy(y_smooth), label="edge")
# axes[1][1].plot(cflcy_smooth, label="edge*G")
# axes[1][2].plot(y_smooth, kernel_y, label="G")
# [[ax.legend() for ax in row] for row in axes]
# plt.show()
# quit()
# now decimate
cflcx = cflcx_smooth[::RESAMPLE_FACTOR]
cflcy = cflcy_smooth[::RESAMPLE_FACTOR]
return cflcx, cflcy
def evaluate_gaussian_layer_cuts(params, lcx, lcy, fwhm, t, NVt):
x0_x, x0_y, Ms, theta, phi, rot, d_x, d_z, c_x, c_y = extract_params(params)
zx = np.linspace(d_x, d_x+NVt, 10)
zy = np.linspace(d_z, d_z+NVt, 10)
fx = np.zeros((zx.size, lcx.size))
fy = np.zeros((zy.size, lcy.size))
for i in range(len(zx)):
_params = compact_params(x0_x, x0_y, Ms, theta, phi, rot, zx[i], zy[i], c_x, c_y)
fx[i], fy[i] = evaluate_gaussian_cuts(_params, lcx, lcy, fwhm, t)
return trapz(fx, zx, axis=0) / NVt, trapz(fy, zy, axis=0) / NVt
def extract_params(params):
x0_x = params[0]
x0_y = params[1]
Ms = params[2]
theta = params[3]
phi = params[4]
rot = params[5]
d_x = params[6]
d_z = params[7]
c_x = params[8]
c_y = params[9]
return x0_x, x0_y, Ms, theta, phi, rot, d_x, d_z, c_x, c_y
def compact_params(x0_x, x0_y, Ms, theta, phi, rot, d_x, d_z, c_x, c_y):
return [x0_x, x0_y, Ms, theta, phi, rot, d_x, d_z, c_x, c_y]
def two_cut_residual(params, lcx, lcxv, lcy, lcyv, t):
flcx, flcy = evaluate_cuts(params, lcx, lcy, t)
return np.concatenate([lcxv-flcx, lcyv-flcy])
def two_cut_gaussian_residual(params, lcx, lcxv, lcy, lcyv, fwhm, t):
cflcx, cflcy = evaluate_gaussian_cuts(params, lcx, lcy, fwhm, t)
return np.concatenate([lcxv-cflcx, lcyv-cflcy])
def two_cut_gaussian_layer_residual(params, lcx, lcxv, lcy, lcyv, fwhm, t, NVt):
icflcx, icflcy = evaluate_gaussian_layer_cuts(params, lcx, lcy, fwhm, t, NVt)
return np.concatenate([lcxv-icflcx, lcyv-icflcy])
def get_bounds(lcx, lcy):
lower_bounds = compact_params(
x0_x=lcx[0],
x0_y=lcy[0],
Ms=-1,
theta=0,
phi=-180*np.pi/180,
rot=-np.inf,
d_x=0,
d_z=0,
c_x=-np.inf,
c_y=-np.inf
)
upper_bounds = compact_params(
x0_x=lcx[-1],
x0_y=lcy[-1],
Ms=1,
theta=90*np.pi/180,
phi=180*np.pi/180,
rot=np.inf,
d_x=np.inf,
d_z=np.inf,
c_x=np.inf,
c_y=np.inf
)
bounds = np.zeros((2, len(lower_bounds)))
bounds[0] = lower_bounds
bounds[1] = upper_bounds
return bounds
def get_x_guess(lcx, lcy):
|
def fit_magnetic_edge(lcx, lcxv, lcy, lcyv, t):
result = least_squares(two_cut_residual, args=(
lcx, lcxv, lcy, lcyv, t), x0=get_x_guess(lcx, lcy), bounds=get_bounds(lcx, lcy))
return result
def fit_magnetic_edge_with_gaussian(lcx, lcxv, lcy, lcyv, fwhm, t):
result = least_squares(two_cut_gaussian_residual, args=(
lcx, lcxv, lcy, lcyv, fwhm, t), x0=get_x_guess(lcx, lcy), bounds=get_bounds(lcx, lcy))
return result
def fit_magnetic_edge_with_gaussian_layer(lcx, lcxv, lcy, lcyv, fwhm, t, NVt):
result = least_squares(two_cut_gaussian_layer_residual, args=(
lcx, lcxv, lcy, lcyv, fwhm, t, NVt), x0=get_x_guess(lcx, lcy), bounds=get_bounds(lcx, lcy))
return result
######################################################################################
def main():
magnetic_layer_thickness = 1e-9
NV_layer_thickness = 1e-6
N_linecuts = 10
linecut_width = 20e-6
img, optical_fwhm, p1, p2, p3, p4 = load_magnetic_data("magnetic_20x.json")
optical_fwhm_px = optical_fwhm / img.px_to_m(1) * RESAMPLE_FACTOR
assert optical_fwhm_px > 10
d_x_vals = []
d_y_vals = []
Ms_vals = []
theta_vals = []
plot_every = N_linecuts**2 // 5
d_pts = np.zeros((2, N_linecuts, 2))
d_map = np.zeros((2, N_linecuts))
pbar = tqdm(total=N_linecuts)
iterx = perpendicular_linecuts(img, p1, p2, linecut_width, N_linecuts, return_center=True)
itery = perpendicular_linecuts(img, p3, p4, linecut_width, N_linecuts, return_center=True)
for ((lcx, lcxv), cx), ((lcy, lcyv), cz) in zip(iterx, itery):
pbar.update()
result = fit_magnetic_edge_with_gaussian(lcx, lcxv, lcy, lcyv, optical_fwhm, magnetic_layer_thickness)
x0_x, x0_y, Ms, theta, phi, rot, d_x, d_z, c_x, c_y = extract_params(result.x)
if 0 < d_x < 40e-6 and 0 < d_z < 40e-6:
d_pts[0,pbar.n-1] = cx
d_pts[1,pbar.n-1] = cz
d_map[0,pbar.n-1] = d_x
d_map[1,pbar.n-1] = d_z
d_pts = d_pts.reshape(2*N_linecuts, 2)
# plt.scatter(pts[:, 0], pts[:, 1], c=d_map.ravel())
# fig = plt.figure()
# ax = fig.add_subplot(projection='3d')
# ax.scatter(pts[:, 0], pts[:, 1], d_map.ravel())
# plt.show()
pts = np.zeros((2*N_linecuts, 3))
pts[:,0] = d_pts[:,0]
pts[:,1] = d_pts[:,1]
pts[:,2] = d_map.ravel()
pts = pts[np.where(pts[:,2] != 0)]
pts = Points(pts)
print(pts)
plane = Plane.best_fit(pts)
xlim, ylim = img.get_size_m()
print(xlim, ylim)
plot_3d(
pts.plotter(c='k', s=50, depthshade=False),
# plane.plotter(alpha=0.2, lims_x=(-xlim/2, xlim/2), lims_y=(-ylim/2, ylim/2)),
)
plt.xlim(0, xlim)
plt.ylim(0, ylim)
ax = plt.gca()
ax.set_zlim(0, 5e-6)
X, Y, Z = plane.to_mesh(lims_x=(-ylim/2, ylim/2), lims_y=(-ylim/2, ylim/2))
x = np.linspace(0, xlim, img.data.shape[0])
y = np.linspace(0, ylim, img.data.shape[1])
xx, yy = np.meshgrid(x, y)
zz = np.linspace(start=np.linspace(Z[0][0],Z[0][1], img.data.shape[0]), stop=np.linspace(Z[1][0],Z[1][1], img.data.shape[0]), num=img.data.shape[1])
# zz = # np.ones_like(xx) * np.max(d_map)
# ax.contourf(xx, yy, img.data, 0, zdir='z', vmin=-20e-6, vmax=20e-6, cmap="BrBG")
data = np.clip((img.data+20e-6)/40e-6, 0, 1)
ax.plot_surface(xx, yy, zz, rstride=16, cstride=16, facecolors=plt.cm.bwr(data), shade=False, alpha=0.2)
xx, yy = np.meshgrid([x[0],x[-1]], [y[0],y[-1]])
zz = np.zeros_like(xx)
ax.plot_surface(xx, yy, zz)
# plt.figure()
# plt.imshow(data)
# plt.colorbar()
print(f"angle = {np.arccos(np.dot([0, 0, 1], plane.normal))}")
plt.show()
# for lcx, lcxv, cx in perpendicular_linecuts(img, p1, p2, linecut_width, N_linecuts, return_center=True):
# for lcy, lcyv, cz in perpendicular_linecuts(img, p3, p4, linecut_width, N_linecuts, return_center=True):
# pbar.update()
# # result = fit_magnetic_edge(lcx, lcxv, lcy, lcyv, magnetic_layer_thickness)
# # flcx, flcy = evaluate_cuts(result.x, lcx, lcy)
# result = fit_magnetic_edge_with_gaussian(lcx, lcxv, lcy, lcyv, optical_fwhm, magnetic_layer_thickness)
# # result = fit_magnetic_edge_with_gaussian_layer(lcx, lcxv, lcy, lcyv, optical_fwhm, magnetic_layer_thickness, NV_layer_thickness)
# x0_x, x0_y, Ms, theta, phi, rot, d_x, d_z, c_x, c_y = extract_params(result.x)
# if d_x < 10e-6 and d_z < 10e-6:
# Ms_vals.append(abs(Ms))
# d_x_vals.append(d_x)
# d_y_vals.append(d_z)
# theta_vals.append(theta)
# if pbar.n % plot_every == 0:
# flcx, flcy = evaluate_gaussian_cuts(result.x, lcx, lcy, optical_fwhm, magnetic_layer_thickness)
# # flcx, flcy = evaluate_gaussian_layer_cuts(result.x, lcx, lcy, optical_fwhm, magnetic_layer_thickness, NV_layer_thickness)
# fig, axes = plt.subplots(1, 2)
# axes[0].plot(lcx*1e6, lcxv, 'x')
# axes[1].plot(lcy*1e6, lcyv, 'x')
# axes[0].set_xlabel('x (um)')
# axes[1].set_xlabel('y (um)')
# axes[0].plot(lcx*1e6, flcx)
# axes[1].plot(lcy*1e6, flcy)
# # print()
# # print(d)
# # plt.show()
# # quit()
# print()
# print(f"mean d_x = {np.mean(d_x_vals)*1e6:.2f}um")
# print(f"std d_x = {np.std(d_x_vals)*1e6:.2f}um")
# print(f"mean d_z = {np.mean(d_y_vals)*1e6:.2f}um")
# print(f"std d_z = {np.std(d_y_vals)*1e6:.2f}um")
# print(f"mean Ms = {np.mean(Ms_vals) * 1e7 / 1e6:.2e} MA/m")
# print(f"std Ms = {np.std(Ms_vals) * 1e7 / 1e6:.2e} MA/m")
# print(f"mean theta = {np.mean(theta_vals)*180/np.pi:.2f} deg")
# print(f"std theta = {np.std(theta_vals)*180/np.pi:.2f} deg")
# print()
# fit_d_x, std_d_x = hist_and_fit_gauss(np.array(d_x_vals), plot=True, title="d_x")
# fit_d_y, std_d_y = hist_and_fit_gauss(np.array(d_y_vals), plot=True, title="d_z")
# fit_theta, std_theta = hist_and_fit_gauss(np.array(theta_vals), plot=True, title="theta")
# fit_Ms, std_Ms = hist_and_fit_gauss(np.array(Ms_vals), plot=True, logplot=True, title="Ms")
# print(f"fit d_x = {fit_d_x*1e6:.2f} +/- {std_d_x*1e6:.2f} um")
# print(f"fit d_z = {fit_d_y*1e6:.2f} +/- {std_d_y*1e6:.2f} um")
# print(f"fit theta = {fit_theta*180/np.pi:.2f} +/- {std_theta*180/np.pi:.2f} um")
# print(f"fit Ms = {fit_Ms*1e7/1e6:.2f} +/- {std_Ms*1e7/1e6:.2f} MA/m")
# plt.show()
if __name__ == "__main__":
main() | return compact_params(
x0_x=lcx[len(lcx)//2],
x0_y=lcy[len(lcy)//2],
Ms=1e-2,
theta=30*np.pi/180,
phi=0,
rot=0,
d_x=3e-6,
d_z=3e-6,
c_x=0,
c_y=0
) | identifier_body |
upcean_reader.go | package oned
import (
"strconv"
"github.com/makiuchi-d/gozxing"
)
const (
// These two values are critical for determining how permissive the decoding will be.
// We've arrived at these values through a lot of trial and error. Setting them any higher
// lets false positives creep in quickly.
UPCEANReader_MAX_AVG_VARIANCE = 0.48
UPCEANReader_MAX_INDIVIDUAL_VARIANCE = 0.7
)
var (
// Start/end guard pattern.
UPCEANReader_START_END_PATTERN = []int{1, 1, 1}
// Pattern marking the middle of a UPC/EAN pattern, separating the two halves.
UPCEANReader_MIDDLE_PATTERN = []int{1, 1, 1, 1, 1}
// end guard pattern.
UPCEANReader_END_PATTERN = []int{1, 1, 1, 1, 1, 1}
// "Odd", or "L" patterns used to encode UPC/EAN digits.
UPCEANReader_L_PATTERNS = [][]int{
{3, 2, 1, 1}, // 0
{2, 2, 2, 1}, // 1
{2, 1, 2, 2}, // 2
{1, 4, 1, 1}, // 3
{1, 1, 3, 2}, // 4
{1, 2, 3, 1}, // 5
{1, 1, 1, 4}, // 6
{1, 3, 1, 2}, // 7
{1, 2, 1, 3}, // 8
{3, 1, 1, 2}, // 9
}
// As above but also including the "even", or "G" patterns used to encode UPC/EAN digits.
UPCEANReader_L_AND_G_PATTERNS [][]int
)
func init() {
UPCEANReader_L_AND_G_PATTERNS = make([][]int, 20)
copy(UPCEANReader_L_AND_G_PATTERNS, UPCEANReader_L_PATTERNS)
for i := 10; i < 20; i++ {
widths := UPCEANReader_L_PATTERNS[i-10]
reversedWidths := make([]int, len(widths))
for j := 0; j < len(widths); j++ {
reversedWidths[j] = widths[len(widths)-j-1]
}
UPCEANReader_L_AND_G_PATTERNS[i] = reversedWidths
}
}
type upceanRowDecoder interface {
RowDecoder
// getBarcodeFormat Get the format of this decoder.
// @return The 1D format.
getBarcodeFormat() gozxing.BarcodeFormat
// decodeMiddle Subclasses override this to decode the portion of a barcode between the start
// and end guard patterns.
//
// @param row row of black/white values to search
// @param startRange start/end offset of start guard pattern
// @param resultString {@link StringBuilder} to append decoded chars to
// @return horizontal offset of first pixel after the "middle" that was decoded
// @throws NotFoundException if decoding could not complete successfully
decodeMiddle(row *gozxing.BitArray, startRange []int, result []byte) (int, []byte, error)
decodeEnd(row *gozxing.BitArray, endStart int) ([]int, error)
// checkChecksum Check checksum
// @param s string of digits to check
// @return {@link #checkStandardUPCEANChecksum(CharSequence)}
// @throws FormatException if the string does not contain only digits
checkChecksum(s string) (bool, error)
}
type upceanReader struct {
upceanRowDecoder
*OneDReader
decodeRowStringBuffer []byte
extensionReader *UPCEANExtensionSupport
}
func newUPCEANReader(rowDecoder upceanRowDecoder) *upceanReader {
this := &upceanReader{
upceanRowDecoder: rowDecoder,
decodeRowStringBuffer: make([]byte, 13),
extensionReader: NewUPCEANExtensionSupport(),
}
this.OneDReader = NewOneDReader(rowDecoder)
return this
}
func upceanReader_findStartGuardPattern(row *gozxing.BitArray) ([]int, error) {
foundStart := false
var startRange []int
nextStart := 0
counters := make([]int, len(UPCEANReader_START_END_PATTERN))
for !foundStart {
for i := range counters {
counters[i] = 0
}
var e error
startRange, e = upceanReader_findGuardPatternWithCounters(
row, nextStart, false, UPCEANReader_START_END_PATTERN, counters)
if e != nil {
return nil, e
}
start := startRange[0]
nextStart = startRange[1]
// Make sure there is a quiet zone at least as big as the start pattern before the barcode.
// If this check would run off the left edge of the image, do not accept this barcode,
// as it is very likely to be a false positive.
quietStart := start - (nextStart - start)
if quietStart >= 0 {
foundStart, _ = row.IsRange(quietStart, start, false)
}
}
return startRange, nil
}
func (this *upceanReader) DecodeRow(rowNumber int, row *gozxing.BitArray, hints map[gozxing.DecodeHintType]interface{}) (*gozxing.Result, error) {
start, e := upceanReader_findStartGuardPattern(row)
if e != nil {
return nil, gozxing.WrapNotFoundException(e)
}
return this.decodeRowWithStartRange(rowNumber, row, start, hints)
}
// decodeRowWithStartRange Like {@link #decodeRow(int, BitArray, Map)}, but
// allows caller to inform method about where the UPC/EAN start pattern is
// found. This allows this to be computed once and reused across many implementations.</p>
//
// @param rowNumber row index into the image
// @param row encoding of the row of the barcode image
// @param startGuardRange start/end column where the opening start pattern was found
// @param hints optional hints that influence decoding
// @return {@link Result} encapsulating the result of decoding a barcode in the row
// @throws NotFoundException if no potential barcode is found
// @throws ChecksumException if a potential barcode is found but does not pass its checksum
// @throws FormatException if a potential barcode is found but format is invalid
func (this *upceanReader) decodeRowWithStartRange(
rowNumber int, row *gozxing.BitArray, startGuardRange []int,
hints map[gozxing.DecodeHintType]interface{}) (*gozxing.Result, error) {
var resultPointCallback gozxing.ResultPointCallback
if hint, ok := hints[gozxing.DecodeHintType_NEED_RESULT_POINT_CALLBACK]; ok {
resultPointCallback = hint.(gozxing.ResultPointCallback)
}
symbologyIdentifier := 0
if resultPointCallback != nil {
resultPointCallback(gozxing.NewResultPoint(
float64(startGuardRange[0]+startGuardRange[1])/2.0, float64(rowNumber)))
}
result := this.decodeRowStringBuffer[:0]
endStart, result, e := this.decodeMiddle(row, startGuardRange, result)
if e != nil {
return nil, e
}
rowNumberf := float64(rowNumber)
if resultPointCallback != nil {
resultPointCallback(gozxing.NewResultPoint(float64(endStart), rowNumberf))
}
endRange, e := this.decodeEnd(row, endStart)
if e != nil {
return nil, gozxing.WrapNotFoundException(e)
}
if resultPointCallback != nil {
resultPointCallback(gozxing.NewResultPoint(
float64(endRange[0]+endRange[1])/2.0, rowNumberf))
}
// Make sure there is a quiet zone at least as big as the end pattern after the barcode. The
// spec might want more whitespace, but in practice this is the maximum we can count on.
end := endRange[1]
quietEnd := end + (end - endRange[0])
if quietEnd >= row.GetSize() {
return nil, gozxing.NewNotFoundException("quietEnd=%v, row size=%v", quietEnd, row.GetSize())
}
rowIsRange, _ := row.IsRange(end, quietEnd, false)
if !rowIsRange {
return nil, gozxing.NewNotFoundException("raw is not range")
}
this.decodeRowStringBuffer = result
resultString := string(result)
// UPC/EAN should never be less than 8 chars anyway
if len(resultString) < 8 {
return nil, gozxing.NewFormatException("len(resultString) = %v", len(resultString))
}
ok, e := this.checkChecksum(resultString)
if e != nil {
return nil, gozxing.WrapChecksumException(e)
}
if !ok {
return nil, gozxing.NewChecksumException()
} | resultString,
nil, // no natural byte representation for these barcodes
[]gozxing.ResultPoint{
gozxing.NewResultPoint(left, float64(rowNumber)),
gozxing.NewResultPoint(right, float64(rowNumber)),
},
format)
extensionLength := 0
extensionResult, e := this.extensionReader.decodeRow(rowNumber, row, endRange[1])
if e == nil {
decodeResult.PutMetadata(gozxing.ResultMetadataType_UPC_EAN_EXTENSION, extensionResult.GetText())
decodeResult.PutAllMetadata(extensionResult.GetResultMetadata())
decodeResult.AddResultPoints(extensionResult.GetResultPoints())
extensionLength = len(extensionResult.GetText())
} else {
// ignore ReaderException
if _, ok := e.(gozxing.ReaderException); !ok {
return nil, gozxing.WrapReaderException(e)
}
}
if hint, ok := hints[gozxing.DecodeHintType_ALLOWED_EAN_EXTENSIONS]; ok {
allowedExtensions, ok := hint.([]int)
if ok {
valid := false
for _, length := range allowedExtensions {
if extensionLength == length {
valid = true
break
}
}
if !valid {
return nil, gozxing.NewNotFoundException()
}
}
}
if format == gozxing.BarcodeFormat_EAN_13 || format == gozxing.BarcodeFormat_UPC_A {
countryID := eanManufacturerOrgSupportLookupCountryIdentifier(resultString)
if countryID != "" {
decodeResult.PutMetadata(gozxing.ResultMetadataType_POSSIBLE_COUNTRY, countryID)
}
}
if format == gozxing.BarcodeFormat_EAN_8 {
symbologyIdentifier = 4
}
decodeResult.PutMetadata(
gozxing.ResultMetadataType_SYMBOLOGY_IDENTIFIER, "]E"+strconv.Itoa(symbologyIdentifier))
return decodeResult, nil
}
// checkChecksum Check checksum
// @param s string of digits to check
// @return {@link #checkStandardUPCEANChecksum(CharSequence)}
// @throws FormatException if the string does not contain only digits
func upceanReader_checkChecksum(s string) (bool, error) {
return upceanReader_checkStandardUPCEANChecksum(s)
}
// checkStandardUPCEANChecksum Computes the UPC/EAN checksum on a string of digits,
// and reports whether the checksum is correct or not.
//
// @param s string of digits to check
/// @return true iff string of digits passes the UPC/EAN checksum algorithm
// @throws FormatException if the string does not contain only digits
func upceanReader_checkStandardUPCEANChecksum(s string) (bool, error) {
length := len(s)
if length == 0 {
return false, nil
}
check := int(s[length-1] - '0')
sum, e := upceanReader_getStandardUPCEANChecksum(s[:length-1])
if e != nil {
return false, e
}
return sum == check, nil
}
func upceanReader_getStandardUPCEANChecksum(s string) (int, error) {
length := len(s)
sum := 0
for i := length - 1; i >= 0; i -= 2 {
digit := int(s[i] - '0')
if digit < 0 || digit > 9 {
return 0, gozxing.NewFormatException("0x%02x is not digit", s[i])
}
sum += digit
}
sum *= 3
for i := length - 2; i >= 0; i -= 2 {
digit := int(s[i] - '0')
if digit < 0 || digit > 9 {
return 0, gozxing.NewFormatException("0x%02x is not digit", s[i])
}
sum += digit
}
return (1000 - sum) % 10, nil
}
func upceanReader_decodeEnd(row *gozxing.BitArray, endStart int) ([]int, error) {
return upceanReader_findGuardPattern(row, endStart, false, UPCEANReader_START_END_PATTERN)
}
func upceanReader_findGuardPattern(row *gozxing.BitArray, rowOffset int, whiteFirst bool, pattern []int) ([]int, error) {
counters := make([]int, len(pattern))
return upceanReader_findGuardPatternWithCounters(row, rowOffset, whiteFirst, pattern, counters)
}
// UPCEANReader_findGuardPatternWithCounters Find guard pattern
// @param row row of black/white values to search
// @param rowOffset position to start search
// @param whiteFirst if true, indicates that the pattern specifies white/black/white/...
// pixel counts, otherwise, it is interpreted as black/white/black/...
// @param pattern pattern of counts of number of black and white pixels that are being
// searched for as a pattern
// @param counters array of counters, as long as pattern, to re-use
// @return start/end horizontal offset of guard pattern, as an array of two ints
// @throws NotFoundException if pattern is not found
func upceanReader_findGuardPatternWithCounters(
row *gozxing.BitArray, rowOffset int, whiteFirst bool, pattern, counters []int) ([]int, error) {
width := row.GetSize()
if whiteFirst {
rowOffset = row.GetNextUnset(rowOffset)
} else {
rowOffset = row.GetNextSet(rowOffset)
}
counterPosition := 0
patternStart := rowOffset
patternLength := len(pattern)
isWhite := whiteFirst
for x := rowOffset; x < width; x++ {
if row.Get(x) != isWhite {
counters[counterPosition]++
} else {
if counterPosition == patternLength-1 {
if PatternMatchVariance(counters, pattern, UPCEANReader_MAX_INDIVIDUAL_VARIANCE) < UPCEANReader_MAX_AVG_VARIANCE {
return []int{patternStart, x}, nil
}
patternStart += counters[0] + counters[1]
copy(counters[:counterPosition-1], counters[2:counterPosition+1])
counters[counterPosition-1] = 0
counters[counterPosition] = 0
counterPosition--
} else {
counterPosition++
}
counters[counterPosition] = 1
isWhite = !isWhite
}
}
return nil, gozxing.NewNotFoundException()
}
// UPCEANReader_decodeDigit Attempts to decode a single UPC/EAN-encoded digit.
//
// @param row row of black/white values to decode
// @param counters the counts of runs of observed black/white/black/... values
// @param rowOffset horizontal offset to start decoding from
// @param patterns the set of patterns to use to decode -- sometimes different encodings
// for the digits 0-9 are used, and this indicates the encodings for 0 to 9 that should
// be used
// @return horizontal offset of first pixel beyond the decoded digit
// @throws NotFoundException if digit cannot be decoded
func upceanReader_decodeDigit(row *gozxing.BitArray, counters []int, rowOffset int, patterns [][]int) (int, error) {
e := RecordPattern(row, rowOffset, counters)
if e != nil {
return 0, e
}
bestVariance := UPCEANReader_MAX_AVG_VARIANCE // worst variance we'll accept
bestMatch := -1
max := len(patterns)
for i := 0; i < max; i++ {
pattern := patterns[i]
variance := PatternMatchVariance(counters, pattern, UPCEANReader_MAX_INDIVIDUAL_VARIANCE)
if variance < bestVariance {
bestVariance = variance
bestMatch = i
}
}
if bestMatch < 0 {
return 0, gozxing.NewNotFoundException()
}
return bestMatch, nil
} |
left := float64(startGuardRange[1]+startGuardRange[0]) / 2.0
right := float64(endRange[1]+endRange[0]) / 2.0
format := this.getBarcodeFormat()
decodeResult := gozxing.NewResult( | random_line_split |
upcean_reader.go | package oned
import (
"strconv"
"github.com/makiuchi-d/gozxing"
)
const (
// These two values are critical for determining how permissive the decoding will be.
// We've arrived at these values through a lot of trial and error. Setting them any higher
// lets false positives creep in quickly.
UPCEANReader_MAX_AVG_VARIANCE = 0.48
UPCEANReader_MAX_INDIVIDUAL_VARIANCE = 0.7
)
var (
// Start/end guard pattern.
UPCEANReader_START_END_PATTERN = []int{1, 1, 1}
// Pattern marking the middle of a UPC/EAN pattern, separating the two halves.
UPCEANReader_MIDDLE_PATTERN = []int{1, 1, 1, 1, 1}
// end guard pattern.
UPCEANReader_END_PATTERN = []int{1, 1, 1, 1, 1, 1}
// "Odd", or "L" patterns used to encode UPC/EAN digits.
UPCEANReader_L_PATTERNS = [][]int{
{3, 2, 1, 1}, // 0
{2, 2, 2, 1}, // 1
{2, 1, 2, 2}, // 2
{1, 4, 1, 1}, // 3
{1, 1, 3, 2}, // 4
{1, 2, 3, 1}, // 5
{1, 1, 1, 4}, // 6
{1, 3, 1, 2}, // 7
{1, 2, 1, 3}, // 8
{3, 1, 1, 2}, // 9
}
// As above but also including the "even", or "G" patterns used to encode UPC/EAN digits.
UPCEANReader_L_AND_G_PATTERNS [][]int
)
func init() {
UPCEANReader_L_AND_G_PATTERNS = make([][]int, 20)
copy(UPCEANReader_L_AND_G_PATTERNS, UPCEANReader_L_PATTERNS)
for i := 10; i < 20; i++ {
widths := UPCEANReader_L_PATTERNS[i-10]
reversedWidths := make([]int, len(widths))
for j := 0; j < len(widths); j++ {
reversedWidths[j] = widths[len(widths)-j-1]
}
UPCEANReader_L_AND_G_PATTERNS[i] = reversedWidths
}
}
type upceanRowDecoder interface {
RowDecoder
// getBarcodeFormat Get the format of this decoder.
// @return The 1D format.
getBarcodeFormat() gozxing.BarcodeFormat
// decodeMiddle Subclasses override this to decode the portion of a barcode between the start
// and end guard patterns.
//
// @param row row of black/white values to search
// @param startRange start/end offset of start guard pattern
// @param resultString {@link StringBuilder} to append decoded chars to
// @return horizontal offset of first pixel after the "middle" that was decoded
// @throws NotFoundException if decoding could not complete successfully
decodeMiddle(row *gozxing.BitArray, startRange []int, result []byte) (int, []byte, error)
decodeEnd(row *gozxing.BitArray, endStart int) ([]int, error)
// checkChecksum Check checksum
// @param s string of digits to check
// @return {@link #checkStandardUPCEANChecksum(CharSequence)}
// @throws FormatException if the string does not contain only digits
checkChecksum(s string) (bool, error)
}
type upceanReader struct {
upceanRowDecoder
*OneDReader
decodeRowStringBuffer []byte
extensionReader *UPCEANExtensionSupport
}
func newUPCEANReader(rowDecoder upceanRowDecoder) *upceanReader {
this := &upceanReader{
upceanRowDecoder: rowDecoder,
decodeRowStringBuffer: make([]byte, 13),
extensionReader: NewUPCEANExtensionSupport(),
}
this.OneDReader = NewOneDReader(rowDecoder)
return this
}
func upceanReader_findStartGuardPattern(row *gozxing.BitArray) ([]int, error) {
foundStart := false
var startRange []int
nextStart := 0
counters := make([]int, len(UPCEANReader_START_END_PATTERN))
for !foundStart {
for i := range counters {
counters[i] = 0
}
var e error
startRange, e = upceanReader_findGuardPatternWithCounters(
row, nextStart, false, UPCEANReader_START_END_PATTERN, counters)
if e != nil {
return nil, e
}
start := startRange[0]
nextStart = startRange[1]
// Make sure there is a quiet zone at least as big as the start pattern before the barcode.
// If this check would run off the left edge of the image, do not accept this barcode,
// as it is very likely to be a false positive.
quietStart := start - (nextStart - start)
if quietStart >= 0 {
foundStart, _ = row.IsRange(quietStart, start, false)
}
}
return startRange, nil
}
func (this *upceanReader) DecodeRow(rowNumber int, row *gozxing.BitArray, hints map[gozxing.DecodeHintType]interface{}) (*gozxing.Result, error) {
start, e := upceanReader_findStartGuardPattern(row)
if e != nil {
return nil, gozxing.WrapNotFoundException(e)
}
return this.decodeRowWithStartRange(rowNumber, row, start, hints)
}
// decodeRowWithStartRange Like {@link #decodeRow(int, BitArray, Map)}, but
// allows caller to inform method about where the UPC/EAN start pattern is
// found. This allows this to be computed once and reused across many implementations.</p>
//
// @param rowNumber row index into the image
// @param row encoding of the row of the barcode image
// @param startGuardRange start/end column where the opening start pattern was found
// @param hints optional hints that influence decoding
// @return {@link Result} encapsulating the result of decoding a barcode in the row
// @throws NotFoundException if no potential barcode is found
// @throws ChecksumException if a potential barcode is found but does not pass its checksum
// @throws FormatException if a potential barcode is found but format is invalid
func (this *upceanReader) decodeRowWithStartRange(
rowNumber int, row *gozxing.BitArray, startGuardRange []int,
hints map[gozxing.DecodeHintType]interface{}) (*gozxing.Result, error) {
var resultPointCallback gozxing.ResultPointCallback
if hint, ok := hints[gozxing.DecodeHintType_NEED_RESULT_POINT_CALLBACK]; ok {
resultPointCallback = hint.(gozxing.ResultPointCallback)
}
symbologyIdentifier := 0
if resultPointCallback != nil {
resultPointCallback(gozxing.NewResultPoint(
float64(startGuardRange[0]+startGuardRange[1])/2.0, float64(rowNumber)))
}
result := this.decodeRowStringBuffer[:0]
endStart, result, e := this.decodeMiddle(row, startGuardRange, result)
if e != nil {
return nil, e
}
rowNumberf := float64(rowNumber)
if resultPointCallback != nil {
resultPointCallback(gozxing.NewResultPoint(float64(endStart), rowNumberf))
}
endRange, e := this.decodeEnd(row, endStart)
if e != nil {
return nil, gozxing.WrapNotFoundException(e)
}
if resultPointCallback != nil {
resultPointCallback(gozxing.NewResultPoint(
float64(endRange[0]+endRange[1])/2.0, rowNumberf))
}
// Make sure there is a quiet zone at least as big as the end pattern after the barcode. The
// spec might want more whitespace, but in practice this is the maximum we can count on.
end := endRange[1]
quietEnd := end + (end - endRange[0])
if quietEnd >= row.GetSize() {
return nil, gozxing.NewNotFoundException("quietEnd=%v, row size=%v", quietEnd, row.GetSize())
}
rowIsRange, _ := row.IsRange(end, quietEnd, false)
if !rowIsRange {
return nil, gozxing.NewNotFoundException("raw is not range")
}
this.decodeRowStringBuffer = result
resultString := string(result)
// UPC/EAN should never be less than 8 chars anyway
if len(resultString) < 8 {
return nil, gozxing.NewFormatException("len(resultString) = %v", len(resultString))
}
ok, e := this.checkChecksum(resultString)
if e != nil {
return nil, gozxing.WrapChecksumException(e)
}
if !ok {
return nil, gozxing.NewChecksumException()
}
left := float64(startGuardRange[1]+startGuardRange[0]) / 2.0
right := float64(endRange[1]+endRange[0]) / 2.0
format := this.getBarcodeFormat()
decodeResult := gozxing.NewResult(
resultString,
nil, // no natural byte representation for these barcodes
[]gozxing.ResultPoint{
gozxing.NewResultPoint(left, float64(rowNumber)),
gozxing.NewResultPoint(right, float64(rowNumber)),
},
format)
extensionLength := 0
extensionResult, e := this.extensionReader.decodeRow(rowNumber, row, endRange[1])
if e == nil {
decodeResult.PutMetadata(gozxing.ResultMetadataType_UPC_EAN_EXTENSION, extensionResult.GetText())
decodeResult.PutAllMetadata(extensionResult.GetResultMetadata())
decodeResult.AddResultPoints(extensionResult.GetResultPoints())
extensionLength = len(extensionResult.GetText())
} else {
// ignore ReaderException
if _, ok := e.(gozxing.ReaderException); !ok {
return nil, gozxing.WrapReaderException(e)
}
}
if hint, ok := hints[gozxing.DecodeHintType_ALLOWED_EAN_EXTENSIONS]; ok {
allowedExtensions, ok := hint.([]int)
if ok {
valid := false
for _, length := range allowedExtensions {
if extensionLength == length {
valid = true
break
}
}
if !valid {
return nil, gozxing.NewNotFoundException()
}
}
}
if format == gozxing.BarcodeFormat_EAN_13 || format == gozxing.BarcodeFormat_UPC_A {
countryID := eanManufacturerOrgSupportLookupCountryIdentifier(resultString)
if countryID != "" {
decodeResult.PutMetadata(gozxing.ResultMetadataType_POSSIBLE_COUNTRY, countryID)
}
}
if format == gozxing.BarcodeFormat_EAN_8 {
symbologyIdentifier = 4
}
decodeResult.PutMetadata(
gozxing.ResultMetadataType_SYMBOLOGY_IDENTIFIER, "]E"+strconv.Itoa(symbologyIdentifier))
return decodeResult, nil
}
// checkChecksum Check checksum
// @param s string of digits to check
// @return {@link #checkStandardUPCEANChecksum(CharSequence)}
// @throws FormatException if the string does not contain only digits
func upceanReader_checkChecksum(s string) (bool, error) {
return upceanReader_checkStandardUPCEANChecksum(s)
}
// checkStandardUPCEANChecksum Computes the UPC/EAN checksum on a string of digits,
// and reports whether the checksum is correct or not.
//
// @param s string of digits to check
/// @return true iff string of digits passes the UPC/EAN checksum algorithm
// @throws FormatException if the string does not contain only digits
func upceanReader_checkStandardUPCEANChecksum(s string) (bool, error) {
length := len(s)
if length == 0 {
return false, nil
}
check := int(s[length-1] - '0')
sum, e := upceanReader_getStandardUPCEANChecksum(s[:length-1])
if e != nil {
return false, e
}
return sum == check, nil
}
func upceanReader_getStandardUPCEANChecksum(s string) (int, error) {
length := len(s)
sum := 0
for i := length - 1; i >= 0; i -= 2 {
digit := int(s[i] - '0')
if digit < 0 || digit > 9 {
return 0, gozxing.NewFormatException("0x%02x is not digit", s[i])
}
sum += digit
}
sum *= 3
for i := length - 2; i >= 0; i -= 2 {
digit := int(s[i] - '0')
if digit < 0 || digit > 9 {
return 0, gozxing.NewFormatException("0x%02x is not digit", s[i])
}
sum += digit
}
return (1000 - sum) % 10, nil
}
func upceanReader_decodeEnd(row *gozxing.BitArray, endStart int) ([]int, error) {
return upceanReader_findGuardPattern(row, endStart, false, UPCEANReader_START_END_PATTERN)
}
func upceanReader_findGuardPattern(row *gozxing.BitArray, rowOffset int, whiteFirst bool, pattern []int) ([]int, error) |
// UPCEANReader_findGuardPatternWithCounters Find guard pattern
// @param row row of black/white values to search
// @param rowOffset position to start search
// @param whiteFirst if true, indicates that the pattern specifies white/black/white/...
// pixel counts, otherwise, it is interpreted as black/white/black/...
// @param pattern pattern of counts of number of black and white pixels that are being
// searched for as a pattern
// @param counters array of counters, as long as pattern, to re-use
// @return start/end horizontal offset of guard pattern, as an array of two ints
// @throws NotFoundException if pattern is not found
func upceanReader_findGuardPatternWithCounters(
row *gozxing.BitArray, rowOffset int, whiteFirst bool, pattern, counters []int) ([]int, error) {
width := row.GetSize()
if whiteFirst {
rowOffset = row.GetNextUnset(rowOffset)
} else {
rowOffset = row.GetNextSet(rowOffset)
}
counterPosition := 0
patternStart := rowOffset
patternLength := len(pattern)
isWhite := whiteFirst
for x := rowOffset; x < width; x++ {
if row.Get(x) != isWhite {
counters[counterPosition]++
} else {
if counterPosition == patternLength-1 {
if PatternMatchVariance(counters, pattern, UPCEANReader_MAX_INDIVIDUAL_VARIANCE) < UPCEANReader_MAX_AVG_VARIANCE {
return []int{patternStart, x}, nil
}
patternStart += counters[0] + counters[1]
copy(counters[:counterPosition-1], counters[2:counterPosition+1])
counters[counterPosition-1] = 0
counters[counterPosition] = 0
counterPosition--
} else {
counterPosition++
}
counters[counterPosition] = 1
isWhite = !isWhite
}
}
return nil, gozxing.NewNotFoundException()
}
// UPCEANReader_decodeDigit Attempts to decode a single UPC/EAN-encoded digit.
//
// @param row row of black/white values to decode
// @param counters the counts of runs of observed black/white/black/... values
// @param rowOffset horizontal offset to start decoding from
// @param patterns the set of patterns to use to decode -- sometimes different encodings
// for the digits 0-9 are used, and this indicates the encodings for 0 to 9 that should
// be used
// @return horizontal offset of first pixel beyond the decoded digit
// @throws NotFoundException if digit cannot be decoded
func upceanReader_decodeDigit(row *gozxing.BitArray, counters []int, rowOffset int, patterns [][]int) (int, error) {
e := RecordPattern(row, rowOffset, counters)
if e != nil {
return 0, e
}
bestVariance := UPCEANReader_MAX_AVG_VARIANCE // worst variance we'll accept
bestMatch := -1
max := len(patterns)
for i := 0; i < max; i++ {
pattern := patterns[i]
variance := PatternMatchVariance(counters, pattern, UPCEANReader_MAX_INDIVIDUAL_VARIANCE)
if variance < bestVariance {
bestVariance = variance
bestMatch = i
}
}
if bestMatch < 0 {
return 0, gozxing.NewNotFoundException()
}
return bestMatch, nil
}
| {
counters := make([]int, len(pattern))
return upceanReader_findGuardPatternWithCounters(row, rowOffset, whiteFirst, pattern, counters)
} | identifier_body |
upcean_reader.go | package oned
import (
"strconv"
"github.com/makiuchi-d/gozxing"
)
const (
// These two values are critical for determining how permissive the decoding will be.
// We've arrived at these values through a lot of trial and error. Setting them any higher
// lets false positives creep in quickly.
UPCEANReader_MAX_AVG_VARIANCE = 0.48
UPCEANReader_MAX_INDIVIDUAL_VARIANCE = 0.7
)
var (
// Start/end guard pattern.
UPCEANReader_START_END_PATTERN = []int{1, 1, 1}
// Pattern marking the middle of a UPC/EAN pattern, separating the two halves.
UPCEANReader_MIDDLE_PATTERN = []int{1, 1, 1, 1, 1}
// end guard pattern.
UPCEANReader_END_PATTERN = []int{1, 1, 1, 1, 1, 1}
// "Odd", or "L" patterns used to encode UPC/EAN digits.
UPCEANReader_L_PATTERNS = [][]int{
{3, 2, 1, 1}, // 0
{2, 2, 2, 1}, // 1
{2, 1, 2, 2}, // 2
{1, 4, 1, 1}, // 3
{1, 1, 3, 2}, // 4
{1, 2, 3, 1}, // 5
{1, 1, 1, 4}, // 6
{1, 3, 1, 2}, // 7
{1, 2, 1, 3}, // 8
{3, 1, 1, 2}, // 9
}
// As above but also including the "even", or "G" patterns used to encode UPC/EAN digits.
UPCEANReader_L_AND_G_PATTERNS [][]int
)
func init() {
UPCEANReader_L_AND_G_PATTERNS = make([][]int, 20)
copy(UPCEANReader_L_AND_G_PATTERNS, UPCEANReader_L_PATTERNS)
for i := 10; i < 20; i++ {
widths := UPCEANReader_L_PATTERNS[i-10]
reversedWidths := make([]int, len(widths))
for j := 0; j < len(widths); j++ {
reversedWidths[j] = widths[len(widths)-j-1]
}
UPCEANReader_L_AND_G_PATTERNS[i] = reversedWidths
}
}
type upceanRowDecoder interface {
RowDecoder
// getBarcodeFormat Get the format of this decoder.
// @return The 1D format.
getBarcodeFormat() gozxing.BarcodeFormat
// decodeMiddle Subclasses override this to decode the portion of a barcode between the start
// and end guard patterns.
//
// @param row row of black/white values to search
// @param startRange start/end offset of start guard pattern
// @param resultString {@link StringBuilder} to append decoded chars to
// @return horizontal offset of first pixel after the "middle" that was decoded
// @throws NotFoundException if decoding could not complete successfully
decodeMiddle(row *gozxing.BitArray, startRange []int, result []byte) (int, []byte, error)
decodeEnd(row *gozxing.BitArray, endStart int) ([]int, error)
// checkChecksum Check checksum
// @param s string of digits to check
// @return {@link #checkStandardUPCEANChecksum(CharSequence)}
// @throws FormatException if the string does not contain only digits
checkChecksum(s string) (bool, error)
}
type upceanReader struct {
upceanRowDecoder
*OneDReader
decodeRowStringBuffer []byte
extensionReader *UPCEANExtensionSupport
}
func newUPCEANReader(rowDecoder upceanRowDecoder) *upceanReader {
this := &upceanReader{
upceanRowDecoder: rowDecoder,
decodeRowStringBuffer: make([]byte, 13),
extensionReader: NewUPCEANExtensionSupport(),
}
this.OneDReader = NewOneDReader(rowDecoder)
return this
}
func upceanReader_findStartGuardPattern(row *gozxing.BitArray) ([]int, error) {
foundStart := false
var startRange []int
nextStart := 0
counters := make([]int, len(UPCEANReader_START_END_PATTERN))
for !foundStart {
for i := range counters {
counters[i] = 0
}
var e error
startRange, e = upceanReader_findGuardPatternWithCounters(
row, nextStart, false, UPCEANReader_START_END_PATTERN, counters)
if e != nil {
return nil, e
}
start := startRange[0]
nextStart = startRange[1]
// Make sure there is a quiet zone at least as big as the start pattern before the barcode.
// If this check would run off the left edge of the image, do not accept this barcode,
// as it is very likely to be a false positive.
quietStart := start - (nextStart - start)
if quietStart >= 0 {
foundStart, _ = row.IsRange(quietStart, start, false)
}
}
return startRange, nil
}
func (this *upceanReader) DecodeRow(rowNumber int, row *gozxing.BitArray, hints map[gozxing.DecodeHintType]interface{}) (*gozxing.Result, error) {
start, e := upceanReader_findStartGuardPattern(row)
if e != nil {
return nil, gozxing.WrapNotFoundException(e)
}
return this.decodeRowWithStartRange(rowNumber, row, start, hints)
}
// decodeRowWithStartRange Like {@link #decodeRow(int, BitArray, Map)}, but
// allows caller to inform method about where the UPC/EAN start pattern is
// found. This allows this to be computed once and reused across many implementations.</p>
//
// @param rowNumber row index into the image
// @param row encoding of the row of the barcode image
// @param startGuardRange start/end column where the opening start pattern was found
// @param hints optional hints that influence decoding
// @return {@link Result} encapsulating the result of decoding a barcode in the row
// @throws NotFoundException if no potential barcode is found
// @throws ChecksumException if a potential barcode is found but does not pass its checksum
// @throws FormatException if a potential barcode is found but format is invalid
func (this *upceanReader) decodeRowWithStartRange(
rowNumber int, row *gozxing.BitArray, startGuardRange []int,
hints map[gozxing.DecodeHintType]interface{}) (*gozxing.Result, error) {
var resultPointCallback gozxing.ResultPointCallback
if hint, ok := hints[gozxing.DecodeHintType_NEED_RESULT_POINT_CALLBACK]; ok {
resultPointCallback = hint.(gozxing.ResultPointCallback)
}
symbologyIdentifier := 0
if resultPointCallback != nil {
resultPointCallback(gozxing.NewResultPoint(
float64(startGuardRange[0]+startGuardRange[1])/2.0, float64(rowNumber)))
}
result := this.decodeRowStringBuffer[:0]
endStart, result, e := this.decodeMiddle(row, startGuardRange, result)
if e != nil {
return nil, e
}
rowNumberf := float64(rowNumber)
if resultPointCallback != nil {
resultPointCallback(gozxing.NewResultPoint(float64(endStart), rowNumberf))
}
endRange, e := this.decodeEnd(row, endStart)
if e != nil {
return nil, gozxing.WrapNotFoundException(e)
}
if resultPointCallback != nil {
resultPointCallback(gozxing.NewResultPoint(
float64(endRange[0]+endRange[1])/2.0, rowNumberf))
}
// Make sure there is a quiet zone at least as big as the end pattern after the barcode. The
// spec might want more whitespace, but in practice this is the maximum we can count on.
end := endRange[1]
quietEnd := end + (end - endRange[0])
if quietEnd >= row.GetSize() {
return nil, gozxing.NewNotFoundException("quietEnd=%v, row size=%v", quietEnd, row.GetSize())
}
rowIsRange, _ := row.IsRange(end, quietEnd, false)
if !rowIsRange {
return nil, gozxing.NewNotFoundException("raw is not range")
}
this.decodeRowStringBuffer = result
resultString := string(result)
// UPC/EAN should never be less than 8 chars anyway
if len(resultString) < 8 {
return nil, gozxing.NewFormatException("len(resultString) = %v", len(resultString))
}
ok, e := this.checkChecksum(resultString)
if e != nil {
return nil, gozxing.WrapChecksumException(e)
}
if !ok {
return nil, gozxing.NewChecksumException()
}
left := float64(startGuardRange[1]+startGuardRange[0]) / 2.0
right := float64(endRange[1]+endRange[0]) / 2.0
format := this.getBarcodeFormat()
decodeResult := gozxing.NewResult(
resultString,
nil, // no natural byte representation for these barcodes
[]gozxing.ResultPoint{
gozxing.NewResultPoint(left, float64(rowNumber)),
gozxing.NewResultPoint(right, float64(rowNumber)),
},
format)
extensionLength := 0
extensionResult, e := this.extensionReader.decodeRow(rowNumber, row, endRange[1])
if e == nil {
decodeResult.PutMetadata(gozxing.ResultMetadataType_UPC_EAN_EXTENSION, extensionResult.GetText())
decodeResult.PutAllMetadata(extensionResult.GetResultMetadata())
decodeResult.AddResultPoints(extensionResult.GetResultPoints())
extensionLength = len(extensionResult.GetText())
} else {
// ignore ReaderException
if _, ok := e.(gozxing.ReaderException); !ok {
return nil, gozxing.WrapReaderException(e)
}
}
if hint, ok := hints[gozxing.DecodeHintType_ALLOWED_EAN_EXTENSIONS]; ok {
allowedExtensions, ok := hint.([]int)
if ok {
valid := false
for _, length := range allowedExtensions {
if extensionLength == length {
valid = true
break
}
}
if !valid {
return nil, gozxing.NewNotFoundException()
}
}
}
if format == gozxing.BarcodeFormat_EAN_13 || format == gozxing.BarcodeFormat_UPC_A {
countryID := eanManufacturerOrgSupportLookupCountryIdentifier(resultString)
if countryID != "" {
decodeResult.PutMetadata(gozxing.ResultMetadataType_POSSIBLE_COUNTRY, countryID)
}
}
if format == gozxing.BarcodeFormat_EAN_8 {
symbologyIdentifier = 4
}
decodeResult.PutMetadata(
gozxing.ResultMetadataType_SYMBOLOGY_IDENTIFIER, "]E"+strconv.Itoa(symbologyIdentifier))
return decodeResult, nil
}
// checkChecksum Check checksum
// @param s string of digits to check
// @return {@link #checkStandardUPCEANChecksum(CharSequence)}
// @throws FormatException if the string does not contain only digits
func upceanReader_checkChecksum(s string) (bool, error) {
return upceanReader_checkStandardUPCEANChecksum(s)
}
// checkStandardUPCEANChecksum Computes the UPC/EAN checksum on a string of digits,
// and reports whether the checksum is correct or not.
//
// @param s string of digits to check
/// @return true iff string of digits passes the UPC/EAN checksum algorithm
// @throws FormatException if the string does not contain only digits
func upceanReader_checkStandardUPCEANChecksum(s string) (bool, error) {
length := len(s)
if length == 0 {
return false, nil
}
check := int(s[length-1] - '0')
sum, e := upceanReader_getStandardUPCEANChecksum(s[:length-1])
if e != nil {
return false, e
}
return sum == check, nil
}
func upceanReader_getStandardUPCEANChecksum(s string) (int, error) {
length := len(s)
sum := 0
for i := length - 1; i >= 0; i -= 2 {
digit := int(s[i] - '0')
if digit < 0 || digit > 9 {
return 0, gozxing.NewFormatException("0x%02x is not digit", s[i])
}
sum += digit
}
sum *= 3
for i := length - 2; i >= 0; i -= 2 {
digit := int(s[i] - '0')
if digit < 0 || digit > 9 {
return 0, gozxing.NewFormatException("0x%02x is not digit", s[i])
}
sum += digit
}
return (1000 - sum) % 10, nil
}
func upceanReader_decodeEnd(row *gozxing.BitArray, endStart int) ([]int, error) {
return upceanReader_findGuardPattern(row, endStart, false, UPCEANReader_START_END_PATTERN)
}
func upceanReader_findGuardPattern(row *gozxing.BitArray, rowOffset int, whiteFirst bool, pattern []int) ([]int, error) {
counters := make([]int, len(pattern))
return upceanReader_findGuardPatternWithCounters(row, rowOffset, whiteFirst, pattern, counters)
}
// UPCEANReader_findGuardPatternWithCounters Find guard pattern
// @param row row of black/white values to search
// @param rowOffset position to start search
// @param whiteFirst if true, indicates that the pattern specifies white/black/white/...
// pixel counts, otherwise, it is interpreted as black/white/black/...
// @param pattern pattern of counts of number of black and white pixels that are being
// searched for as a pattern
// @param counters array of counters, as long as pattern, to re-use
// @return start/end horizontal offset of guard pattern, as an array of two ints
// @throws NotFoundException if pattern is not found
func upceanReader_findGuardPatternWithCounters(
row *gozxing.BitArray, rowOffset int, whiteFirst bool, pattern, counters []int) ([]int, error) {
width := row.GetSize()
if whiteFirst {
rowOffset = row.GetNextUnset(rowOffset)
} else {
rowOffset = row.GetNextSet(rowOffset)
}
counterPosition := 0
patternStart := rowOffset
patternLength := len(pattern)
isWhite := whiteFirst
for x := rowOffset; x < width; x++ {
if row.Get(x) != isWhite {
counters[counterPosition]++
} else {
if counterPosition == patternLength-1 | else {
counterPosition++
}
counters[counterPosition] = 1
isWhite = !isWhite
}
}
return nil, gozxing.NewNotFoundException()
}
// UPCEANReader_decodeDigit Attempts to decode a single UPC/EAN-encoded digit.
//
// @param row row of black/white values to decode
// @param counters the counts of runs of observed black/white/black/... values
// @param rowOffset horizontal offset to start decoding from
// @param patterns the set of patterns to use to decode -- sometimes different encodings
// for the digits 0-9 are used, and this indicates the encodings for 0 to 9 that should
// be used
// @return horizontal offset of first pixel beyond the decoded digit
// @throws NotFoundException if digit cannot be decoded
func upceanReader_decodeDigit(row *gozxing.BitArray, counters []int, rowOffset int, patterns [][]int) (int, error) {
e := RecordPattern(row, rowOffset, counters)
if e != nil {
return 0, e
}
bestVariance := UPCEANReader_MAX_AVG_VARIANCE // worst variance we'll accept
bestMatch := -1
max := len(patterns)
for i := 0; i < max; i++ {
pattern := patterns[i]
variance := PatternMatchVariance(counters, pattern, UPCEANReader_MAX_INDIVIDUAL_VARIANCE)
if variance < bestVariance {
bestVariance = variance
bestMatch = i
}
}
if bestMatch < 0 {
return 0, gozxing.NewNotFoundException()
}
return bestMatch, nil
}
| {
if PatternMatchVariance(counters, pattern, UPCEANReader_MAX_INDIVIDUAL_VARIANCE) < UPCEANReader_MAX_AVG_VARIANCE {
return []int{patternStart, x}, nil
}
patternStart += counters[0] + counters[1]
copy(counters[:counterPosition-1], counters[2:counterPosition+1])
counters[counterPosition-1] = 0
counters[counterPosition] = 0
counterPosition--
} | conditional_block |
upcean_reader.go | package oned
import (
"strconv"
"github.com/makiuchi-d/gozxing"
)
const (
// These two values are critical for determining how permissive the decoding will be.
// We've arrived at these values through a lot of trial and error. Setting them any higher
// lets false positives creep in quickly.
UPCEANReader_MAX_AVG_VARIANCE = 0.48
UPCEANReader_MAX_INDIVIDUAL_VARIANCE = 0.7
)
var (
// Start/end guard pattern.
UPCEANReader_START_END_PATTERN = []int{1, 1, 1}
// Pattern marking the middle of a UPC/EAN pattern, separating the two halves.
UPCEANReader_MIDDLE_PATTERN = []int{1, 1, 1, 1, 1}
// end guard pattern.
UPCEANReader_END_PATTERN = []int{1, 1, 1, 1, 1, 1}
// "Odd", or "L" patterns used to encode UPC/EAN digits.
UPCEANReader_L_PATTERNS = [][]int{
{3, 2, 1, 1}, // 0
{2, 2, 2, 1}, // 1
{2, 1, 2, 2}, // 2
{1, 4, 1, 1}, // 3
{1, 1, 3, 2}, // 4
{1, 2, 3, 1}, // 5
{1, 1, 1, 4}, // 6
{1, 3, 1, 2}, // 7
{1, 2, 1, 3}, // 8
{3, 1, 1, 2}, // 9
}
// As above but also including the "even", or "G" patterns used to encode UPC/EAN digits.
UPCEANReader_L_AND_G_PATTERNS [][]int
)
func init() {
UPCEANReader_L_AND_G_PATTERNS = make([][]int, 20)
copy(UPCEANReader_L_AND_G_PATTERNS, UPCEANReader_L_PATTERNS)
for i := 10; i < 20; i++ {
widths := UPCEANReader_L_PATTERNS[i-10]
reversedWidths := make([]int, len(widths))
for j := 0; j < len(widths); j++ {
reversedWidths[j] = widths[len(widths)-j-1]
}
UPCEANReader_L_AND_G_PATTERNS[i] = reversedWidths
}
}
type upceanRowDecoder interface {
RowDecoder
// getBarcodeFormat Get the format of this decoder.
// @return The 1D format.
getBarcodeFormat() gozxing.BarcodeFormat
// decodeMiddle Subclasses override this to decode the portion of a barcode between the start
// and end guard patterns.
//
// @param row row of black/white values to search
// @param startRange start/end offset of start guard pattern
// @param resultString {@link StringBuilder} to append decoded chars to
// @return horizontal offset of first pixel after the "middle" that was decoded
// @throws NotFoundException if decoding could not complete successfully
decodeMiddle(row *gozxing.BitArray, startRange []int, result []byte) (int, []byte, error)
decodeEnd(row *gozxing.BitArray, endStart int) ([]int, error)
// checkChecksum Check checksum
// @param s string of digits to check
// @return {@link #checkStandardUPCEANChecksum(CharSequence)}
// @throws FormatException if the string does not contain only digits
checkChecksum(s string) (bool, error)
}
type upceanReader struct {
upceanRowDecoder
*OneDReader
decodeRowStringBuffer []byte
extensionReader *UPCEANExtensionSupport
}
func newUPCEANReader(rowDecoder upceanRowDecoder) *upceanReader {
this := &upceanReader{
upceanRowDecoder: rowDecoder,
decodeRowStringBuffer: make([]byte, 13),
extensionReader: NewUPCEANExtensionSupport(),
}
this.OneDReader = NewOneDReader(rowDecoder)
return this
}
func upceanReader_findStartGuardPattern(row *gozxing.BitArray) ([]int, error) {
foundStart := false
var startRange []int
nextStart := 0
counters := make([]int, len(UPCEANReader_START_END_PATTERN))
for !foundStart {
for i := range counters {
counters[i] = 0
}
var e error
startRange, e = upceanReader_findGuardPatternWithCounters(
row, nextStart, false, UPCEANReader_START_END_PATTERN, counters)
if e != nil {
return nil, e
}
start := startRange[0]
nextStart = startRange[1]
// Make sure there is a quiet zone at least as big as the start pattern before the barcode.
// If this check would run off the left edge of the image, do not accept this barcode,
// as it is very likely to be a false positive.
quietStart := start - (nextStart - start)
if quietStart >= 0 {
foundStart, _ = row.IsRange(quietStart, start, false)
}
}
return startRange, nil
}
func (this *upceanReader) DecodeRow(rowNumber int, row *gozxing.BitArray, hints map[gozxing.DecodeHintType]interface{}) (*gozxing.Result, error) {
start, e := upceanReader_findStartGuardPattern(row)
if e != nil {
return nil, gozxing.WrapNotFoundException(e)
}
return this.decodeRowWithStartRange(rowNumber, row, start, hints)
}
// decodeRowWithStartRange Like {@link #decodeRow(int, BitArray, Map)}, but
// allows caller to inform method about where the UPC/EAN start pattern is
// found. This allows this to be computed once and reused across many implementations.</p>
//
// @param rowNumber row index into the image
// @param row encoding of the row of the barcode image
// @param startGuardRange start/end column where the opening start pattern was found
// @param hints optional hints that influence decoding
// @return {@link Result} encapsulating the result of decoding a barcode in the row
// @throws NotFoundException if no potential barcode is found
// @throws ChecksumException if a potential barcode is found but does not pass its checksum
// @throws FormatException if a potential barcode is found but format is invalid
func (this *upceanReader) decodeRowWithStartRange(
rowNumber int, row *gozxing.BitArray, startGuardRange []int,
hints map[gozxing.DecodeHintType]interface{}) (*gozxing.Result, error) {
var resultPointCallback gozxing.ResultPointCallback
if hint, ok := hints[gozxing.DecodeHintType_NEED_RESULT_POINT_CALLBACK]; ok {
resultPointCallback = hint.(gozxing.ResultPointCallback)
}
symbologyIdentifier := 0
if resultPointCallback != nil {
resultPointCallback(gozxing.NewResultPoint(
float64(startGuardRange[0]+startGuardRange[1])/2.0, float64(rowNumber)))
}
result := this.decodeRowStringBuffer[:0]
endStart, result, e := this.decodeMiddle(row, startGuardRange, result)
if e != nil {
return nil, e
}
rowNumberf := float64(rowNumber)
if resultPointCallback != nil {
resultPointCallback(gozxing.NewResultPoint(float64(endStart), rowNumberf))
}
endRange, e := this.decodeEnd(row, endStart)
if e != nil {
return nil, gozxing.WrapNotFoundException(e)
}
if resultPointCallback != nil {
resultPointCallback(gozxing.NewResultPoint(
float64(endRange[0]+endRange[1])/2.0, rowNumberf))
}
// Make sure there is a quiet zone at least as big as the end pattern after the barcode. The
// spec might want more whitespace, but in practice this is the maximum we can count on.
end := endRange[1]
quietEnd := end + (end - endRange[0])
if quietEnd >= row.GetSize() {
return nil, gozxing.NewNotFoundException("quietEnd=%v, row size=%v", quietEnd, row.GetSize())
}
rowIsRange, _ := row.IsRange(end, quietEnd, false)
if !rowIsRange {
return nil, gozxing.NewNotFoundException("raw is not range")
}
this.decodeRowStringBuffer = result
resultString := string(result)
// UPC/EAN should never be less than 8 chars anyway
if len(resultString) < 8 {
return nil, gozxing.NewFormatException("len(resultString) = %v", len(resultString))
}
ok, e := this.checkChecksum(resultString)
if e != nil {
return nil, gozxing.WrapChecksumException(e)
}
if !ok {
return nil, gozxing.NewChecksumException()
}
left := float64(startGuardRange[1]+startGuardRange[0]) / 2.0
right := float64(endRange[1]+endRange[0]) / 2.0
format := this.getBarcodeFormat()
decodeResult := gozxing.NewResult(
resultString,
nil, // no natural byte representation for these barcodes
[]gozxing.ResultPoint{
gozxing.NewResultPoint(left, float64(rowNumber)),
gozxing.NewResultPoint(right, float64(rowNumber)),
},
format)
extensionLength := 0
extensionResult, e := this.extensionReader.decodeRow(rowNumber, row, endRange[1])
if e == nil {
decodeResult.PutMetadata(gozxing.ResultMetadataType_UPC_EAN_EXTENSION, extensionResult.GetText())
decodeResult.PutAllMetadata(extensionResult.GetResultMetadata())
decodeResult.AddResultPoints(extensionResult.GetResultPoints())
extensionLength = len(extensionResult.GetText())
} else {
// ignore ReaderException
if _, ok := e.(gozxing.ReaderException); !ok {
return nil, gozxing.WrapReaderException(e)
}
}
if hint, ok := hints[gozxing.DecodeHintType_ALLOWED_EAN_EXTENSIONS]; ok {
allowedExtensions, ok := hint.([]int)
if ok {
valid := false
for _, length := range allowedExtensions {
if extensionLength == length {
valid = true
break
}
}
if !valid {
return nil, gozxing.NewNotFoundException()
}
}
}
if format == gozxing.BarcodeFormat_EAN_13 || format == gozxing.BarcodeFormat_UPC_A {
countryID := eanManufacturerOrgSupportLookupCountryIdentifier(resultString)
if countryID != "" {
decodeResult.PutMetadata(gozxing.ResultMetadataType_POSSIBLE_COUNTRY, countryID)
}
}
if format == gozxing.BarcodeFormat_EAN_8 {
symbologyIdentifier = 4
}
decodeResult.PutMetadata(
gozxing.ResultMetadataType_SYMBOLOGY_IDENTIFIER, "]E"+strconv.Itoa(symbologyIdentifier))
return decodeResult, nil
}
// checkChecksum Check checksum
// @param s string of digits to check
// @return {@link #checkStandardUPCEANChecksum(CharSequence)}
// @throws FormatException if the string does not contain only digits
func upceanReader_checkChecksum(s string) (bool, error) {
return upceanReader_checkStandardUPCEANChecksum(s)
}
// checkStandardUPCEANChecksum Computes the UPC/EAN checksum on a string of digits,
// and reports whether the checksum is correct or not.
//
// @param s string of digits to check
/// @return true iff string of digits passes the UPC/EAN checksum algorithm
// @throws FormatException if the string does not contain only digits
func upceanReader_checkStandardUPCEANChecksum(s string) (bool, error) {
length := len(s)
if length == 0 {
return false, nil
}
check := int(s[length-1] - '0')
sum, e := upceanReader_getStandardUPCEANChecksum(s[:length-1])
if e != nil {
return false, e
}
return sum == check, nil
}
func upceanReader_getStandardUPCEANChecksum(s string) (int, error) {
length := len(s)
sum := 0
for i := length - 1; i >= 0; i -= 2 {
digit := int(s[i] - '0')
if digit < 0 || digit > 9 {
return 0, gozxing.NewFormatException("0x%02x is not digit", s[i])
}
sum += digit
}
sum *= 3
for i := length - 2; i >= 0; i -= 2 {
digit := int(s[i] - '0')
if digit < 0 || digit > 9 {
return 0, gozxing.NewFormatException("0x%02x is not digit", s[i])
}
sum += digit
}
return (1000 - sum) % 10, nil
}
func upceanReader_decodeEnd(row *gozxing.BitArray, endStart int) ([]int, error) {
return upceanReader_findGuardPattern(row, endStart, false, UPCEANReader_START_END_PATTERN)
}
func upceanReader_findGuardPattern(row *gozxing.BitArray, rowOffset int, whiteFirst bool, pattern []int) ([]int, error) {
counters := make([]int, len(pattern))
return upceanReader_findGuardPatternWithCounters(row, rowOffset, whiteFirst, pattern, counters)
}
// UPCEANReader_findGuardPatternWithCounters Find guard pattern
// @param row row of black/white values to search
// @param rowOffset position to start search
// @param whiteFirst if true, indicates that the pattern specifies white/black/white/...
// pixel counts, otherwise, it is interpreted as black/white/black/...
// @param pattern pattern of counts of number of black and white pixels that are being
// searched for as a pattern
// @param counters array of counters, as long as pattern, to re-use
// @return start/end horizontal offset of guard pattern, as an array of two ints
// @throws NotFoundException if pattern is not found
func | (
row *gozxing.BitArray, rowOffset int, whiteFirst bool, pattern, counters []int) ([]int, error) {
width := row.GetSize()
if whiteFirst {
rowOffset = row.GetNextUnset(rowOffset)
} else {
rowOffset = row.GetNextSet(rowOffset)
}
counterPosition := 0
patternStart := rowOffset
patternLength := len(pattern)
isWhite := whiteFirst
for x := rowOffset; x < width; x++ {
if row.Get(x) != isWhite {
counters[counterPosition]++
} else {
if counterPosition == patternLength-1 {
if PatternMatchVariance(counters, pattern, UPCEANReader_MAX_INDIVIDUAL_VARIANCE) < UPCEANReader_MAX_AVG_VARIANCE {
return []int{patternStart, x}, nil
}
patternStart += counters[0] + counters[1]
copy(counters[:counterPosition-1], counters[2:counterPosition+1])
counters[counterPosition-1] = 0
counters[counterPosition] = 0
counterPosition--
} else {
counterPosition++
}
counters[counterPosition] = 1
isWhite = !isWhite
}
}
return nil, gozxing.NewNotFoundException()
}
// UPCEANReader_decodeDigit Attempts to decode a single UPC/EAN-encoded digit.
//
// @param row row of black/white values to decode
// @param counters the counts of runs of observed black/white/black/... values
// @param rowOffset horizontal offset to start decoding from
// @param patterns the set of patterns to use to decode -- sometimes different encodings
// for the digits 0-9 are used, and this indicates the encodings for 0 to 9 that should
// be used
// @return horizontal offset of first pixel beyond the decoded digit
// @throws NotFoundException if digit cannot be decoded
func upceanReader_decodeDigit(row *gozxing.BitArray, counters []int, rowOffset int, patterns [][]int) (int, error) {
e := RecordPattern(row, rowOffset, counters)
if e != nil {
return 0, e
}
bestVariance := UPCEANReader_MAX_AVG_VARIANCE // worst variance we'll accept
bestMatch := -1
max := len(patterns)
for i := 0; i < max; i++ {
pattern := patterns[i]
variance := PatternMatchVariance(counters, pattern, UPCEANReader_MAX_INDIVIDUAL_VARIANCE)
if variance < bestVariance {
bestVariance = variance
bestMatch = i
}
}
if bestMatch < 0 {
return 0, gozxing.NewNotFoundException()
}
return bestMatch, nil
}
| upceanReader_findGuardPatternWithCounters | identifier_name |
timeseries.ts | //import 'gscript-mocks.ts';
import { DateUtils, toObject, KeyValueMap } from './utils';
import { SheetUtils, SpreadsheetApp, ISheet, Logging } from './utils-google';
import { Prognoser, createPrognosis } from './prognoser';
import { Aggregation } from './aggregation';
function getAccountIdsToExclude(data: any[][]): any[] {
var header = data[0];
var activeCol = header.indexOf('Active');
var accountIdCol = header.indexOf('AccountId');
var result = data
.slice(1)
.filter(r => r[activeCol] == 'x')
.map(r => r[accountIdCol]);
return result;
}
function getTransactionsToExclude(data: any[][]) |
export function run() {
var ss = SpreadsheetApp.getActiveSpreadsheet();
var sheet = ss.getSheets()[0];
var columns = SheetUtils.getHeaderColumnsAsObject(sheet);
var rxFilter = SheetUtils.getColumnRegexFilter(sheet, columns.AccountId);
var filters = Timeseries.createFilters(
columns,
rxFilter,
ss.getSheetByName('filter_accounts'),
ss.getSheetByName('filter_tx')
);
var inYear = Timeseries.recalc(
sheet,
2,
ss.getSheetByName('prognosis_spec'),
'2020-12-01',
filters
);
//Logger.log("Fill sheet");
SheetUtils.fillSheet(SheetUtils.getOrCreateSheet('aggregated', true), inYear);
////Reverse order
//aggregated = aggregated.slice(0, 1).concat([].concat.apply([], tmp).reverse());
//accumulate(aggregated.slice(1), aggColums.Sum, Object.keys(aggColums).length);
//aggregated = aggregated.slice(0, 1).concat(aggregated.slice(1).reverse());
//Logger.log("Done");
}
// function getColumnRegexFilter(
// sheet: ISheet,
// columnIndex: number
// ): RegExp | null {
// var filter = sheet.getFilter();
// var filterCriteria = filter.getColumnFilterCriteria(columnIndex + 1);
// if (filterCriteria && filterCriteria.getCriteriaType() == 'CUSTOM_FORMULA') {
// var critVals = filterCriteria.getCriteriaValues();
// if (critVals.length == 1 && critVals[0].indexOf('REGEXMATCH') >= 0) {
// //REGEXMATCH =REGEXMATCH(TEXT(E:E, 0), "^43")
// var rxMatch = /\"(.+)\"/.exec(critVals[0]);
// if (rxMatch && rxMatch.length >= 2) {
// return new RegExp(rxMatch[1]);
// } else Logger.log('filter criteria regex no match' + critVals);
// } else
// Logger.log(
// 'filter criteria not regex: ' +
// critVals +
// ' ' +
// critVals.indexOf('REGEXMATCH')
// );
// } else Logger.log('filter criteria: N/A');
// return null;
// }
export class Timeseries {
static applyColumnFilter(rx: RegExp, rows: string[][], columnIndex: number) {
var visibleRows = [];
if (!rows) throw 'No rows'; // || !rows.length
if (!rx) throw 'No regex';
if (rows.length > 0) {
if (rows[0].length <= columnIndex)
throw 'Incorrect column ' + columnIndex + ': ' + rows[0];
for (var j = 0; j < rows.length; j++) {
if (rx.test((rows[j][columnIndex] || '').toString()))
visibleRows.push(rows[j]);
}
}
return visibleRows;
}
static createFilters(
columns: KeyValueMap<number>,
rxAccountIdColumnFilter?: RegExp | null,
sheetFilterAccounts?: ISheet,
sheetFilterTransactions?: ISheet
): Array<(data: any[][]) => any[][]> {
var result: Array<(data: any[][]) => any[][]> = [];
if (rxAccountIdColumnFilter) {
result.push(data =>
Timeseries.applyColumnFilter(rxAccountIdColumnFilter, data, columns.AccountId)
);
//Logger.log('after column filter: ' + data.length);
}
result.push(
data =>
//Filter out rows with missing transaction:
data.filter(
r =>
r[columns.Missing] !== 'TRX' &&
new Date(r[columns.Date]).getFullYear() >= 2016
)
//Logger.log('after TRX filter: ' + data.length);
);
//Filter transactions booked on specified accounts:
if (sheetFilterAccounts) {
var accountIdsToExclude = getAccountIdsToExclude(
sheetFilterAccounts.getDataRange().getValues()
);
if (accountIdsToExclude && accountIdsToExclude.length) {
result.push(
data =>
data.filter(
r => accountIdsToExclude.indexOf(r[columns.AccountId]) < 0
)
//Logger.log('after getAccountIdsToExclude: ' + data.length);
);
}
}
//Filter out specific transactions:
if (sheetFilterTransactions) {
var transactionsToExclude = getTransactionsToExclude(
sheetFilterTransactions.getDataRange().getValues()
);
if (transactionsToExclude && transactionsToExclude.length) {
result.push(
data =>
data.filter(r => {
var found = transactionsToExclude.filter(
o => o.Date.valueOf() == r[columns.Date].valueOf()
);
if (found.length) {
//TODO: could be many to exclude from same date
var matches = Object.keys(found[0])
.filter(k => k != 'Date')
.map(k => found[0][k] == r[columns[k]]);
if (matches.indexOf(false) < 0) {
//TODO: remove them from
var tmpRemoveIndex = transactionsToExclude.indexOf(found[0]);
if (tmpRemoveIndex >= 0)
transactionsToExclude.splice(tmpRemoveIndex, 1);
return false;
}
}
return true;
})
//Logger.log('after getTransactionsToExclude: ' + data.length);
);
}
}
return result;
}
static recalc(
sheet: ISheet,
numYearsLookbackAvg?: number,
sheetPrognosisSpec?: ISheet,
prognosisUntil?: string | Date,
funcFilters?: Function[]
) {
numYearsLookbackAvg = numYearsLookbackAvg == null ? 2 : numYearsLookbackAvg;
prognosisUntil = new Date(prognosisUntil || '2020-01-01');
var data = sheet.getDataRange().getValues();
//var header = data[0];
var columns = SheetUtils.getHeaderColumnsAsObject(sheet); //KeyValueMap<number> = toObject(header, function (v, i) { return [v, i]; });
var applyFilters = (dataToFilter: any[][]) => {
if (funcFilters) {
// Logger.log('filtering started: ' + dataToFilter.length);
funcFilters.forEach(f => (dataToFilter = f(dataToFilter)));
// Logger.log('after filtering: ' + dataToFilter.length);
}
return dataToFilter;
};
data = applyFilters(data);
var funcFilterAndConcat = (dataToConcat: any[][]) =>
data.concat(applyFilters(dataToConcat));
if (numYearsLookbackAvg > 0) {
Logging.log('prognosis started');
var added = createPrognosis(
data,
columns,
new Date(),
prognosisUntil,
numYearsLookbackAvg
);
data = funcFilterAndConcat(added);
if (sheetPrognosisSpec) {
Logging.log('prognosis modification started');
var progger = new Prognoser();
var progcolumns = {
Date: columns.Date,
Amount: columns.Amount,
Supplier: columns.Supplier,
Account: columns.AccountId
};
added = progger.createPrognosis(
sheetPrognosisSpec.getDataRange().getValues(),
prognosisUntil,
progcolumns
);
if (true) {
//copy modified prognosis to future year, so we can compare with and without
var laterThan = new Date(new Date().getFullYear(), 0, 1).valueOf();
var copy = data
.filter(r => r[columns.Date].valueOf() > laterThan)
.map(r => r.slice());
copy = copy.concat(added);
copy.forEach(r => {
var d = r[columns.Date];
var year = d.getFullYear();
r[columns.Date] = new Date(year + 100, d.getMonth(), d.getDate());
});
data = funcFilterAndConcat(copy); //data.concat(copy);
} else {
data = funcFilterAndConcat(added); //data.concat(added);
}
}
}
//Perform aggregation:
Logging.log('Aggregation started');
var groupingDefs = [
{
col: columns.Date,
name: 'Period',
func: (v: any) => DateUtils.getDateStr(v)
}
];
var aggregateDef = {
col: columns.Amount,
name: 'Sum',
func: (v: any, p: any) => (parseInt(v, 10) || 0) + (p || 0)
};
var aggregated = Aggregation.aggregateIntoRows(
data,
groupingDefs,
aggregateDef,
false
);
var aggColums: KeyValueMap<number> = toObject(aggregated[0], (v, i) => [
v,
i
]);
aggregated = Timeseries.sortRowsByColumn(aggregated, aggColums.Period, true, true);
aggregated[0].push('Accumulated');
//Sort rows, one list per year:
var byYear: KeyValueMap<any[]> = {};
aggregated.slice(1).forEach(row => {
var year = new Date(row[aggColums.Period]).getFullYear();
var inYear = byYear[year];
if (!inYear) {
inYear = [];
byYear[year] = inYear;
}
inYear.push(row);
});
//Create column with accumulated values per year (so each year starts with 0):
var colAcc = Object.keys(aggColums).length;
// var tmp = Object.keys(byYear).sort().map(key => {
// var list = byYear[key];
// accumulate(list, aggColums.Sum, colAcc);
// return list;
// });
Logging.log('Create per-year table');
//Create table with one row per day in year, and one column per year with accumulated values
var sortedYears = Object.keys(byYear).sort();
var inYear = [['Date'].concat(sortedYears)];
var curr = new Date(2000, 0, 1).valueOf();
var lastValues: KeyValueMap<number> = {};
sortedYears.forEach(o => (lastValues[o] = 0));
var byDayInYear: any[][] = Array.apply(null, new Array(366)).map(() => []);
sortedYears.forEach(k => {
byYear[k].forEach(r =>
byDayInYear[DateUtils.getDayInYear(new Date(r[0]))].push(r)
);
});
var lastRow: any[] = Array.apply(null, new Array(sortedYears.length)).map(
() => 0
);
for (var day = 0; day < byDayInYear.length; day++) {
var dateStr = DateUtils.getDateStr(new Date(curr));
var row = lastRow.slice();
var inDay = byDayInYear[day];
inDay.forEach(r => {
var year = new Date(r[0]).getFullYear();
row[sortedYears.indexOf(year.toString())] = r[colAcc];
});
lastRow = row;
inYear.push([dateStr].concat(row));
curr += 1000 * 60 * 60 * 24;
}
return inYear;
}
static sortRowsByColumn(
list: any[][],
column: number,
hasHeader: boolean,
reverse: boolean
): any[][] {
const sortVal = reverse ? -1 : 1;
const srt = (l: any) =>
l.sort((a: any, b: any) => (a[column] > b[column] ? sortVal : -sortVal));
// var srt = reverse
// ? (l: any) => l.sort((a, b) => a[column] > b[column] ? 1 : -1)
// : l => l.sort((a, b) => a[column] < b[column] ? 1 : -1);
return hasHeader ? list.slice(0, 1).concat(srt(list.slice(1))) : srt(list);
}
}
export function accumulate(
list: any[][],
columnToAcc: number,
columnForAccResult: number
) {
var total = 0;
list.forEach(row => {
total += parseFloat(row[columnToAcc]);
row[columnForAccResult] = total;
});
}
| {
var header = data[0];
data = data.slice(1);
var result = data.map(r => {
var tmp = r.map((_v, i) => [header[i], r[i]]).filter(o => !!o[1]);
return toObject(tmp, v => v);
});
return result;
} | identifier_body |
timeseries.ts | //import 'gscript-mocks.ts';
import { DateUtils, toObject, KeyValueMap } from './utils';
import { SheetUtils, SpreadsheetApp, ISheet, Logging } from './utils-google';
import { Prognoser, createPrognosis } from './prognoser';
import { Aggregation } from './aggregation';
function getAccountIdsToExclude(data: any[][]): any[] {
var header = data[0];
var activeCol = header.indexOf('Active');
var accountIdCol = header.indexOf('AccountId');
var result = data
.slice(1)
.filter(r => r[activeCol] == 'x')
.map(r => r[accountIdCol]);
return result;
}
function getTransactionsToExclude(data: any[][]) {
var header = data[0];
data = data.slice(1);
var result = data.map(r => {
var tmp = r.map((_v, i) => [header[i], r[i]]).filter(o => !!o[1]);
return toObject(tmp, v => v);
});
return result;
}
export function run() {
var ss = SpreadsheetApp.getActiveSpreadsheet();
var sheet = ss.getSheets()[0];
var columns = SheetUtils.getHeaderColumnsAsObject(sheet);
var rxFilter = SheetUtils.getColumnRegexFilter(sheet, columns.AccountId);
var filters = Timeseries.createFilters(
columns,
rxFilter,
ss.getSheetByName('filter_accounts'),
ss.getSheetByName('filter_tx')
);
var inYear = Timeseries.recalc(
sheet,
2,
ss.getSheetByName('prognosis_spec'),
'2020-12-01',
filters
);
//Logger.log("Fill sheet");
SheetUtils.fillSheet(SheetUtils.getOrCreateSheet('aggregated', true), inYear);
////Reverse order
//aggregated = aggregated.slice(0, 1).concat([].concat.apply([], tmp).reverse());
//accumulate(aggregated.slice(1), aggColums.Sum, Object.keys(aggColums).length);
//aggregated = aggregated.slice(0, 1).concat(aggregated.slice(1).reverse());
//Logger.log("Done");
}
// function getColumnRegexFilter(
// sheet: ISheet,
// columnIndex: number
// ): RegExp | null {
// var filter = sheet.getFilter();
// var filterCriteria = filter.getColumnFilterCriteria(columnIndex + 1);
// if (filterCriteria && filterCriteria.getCriteriaType() == 'CUSTOM_FORMULA') {
// var critVals = filterCriteria.getCriteriaValues();
// if (critVals.length == 1 && critVals[0].indexOf('REGEXMATCH') >= 0) {
// //REGEXMATCH =REGEXMATCH(TEXT(E:E, 0), "^43")
// var rxMatch = /\"(.+)\"/.exec(critVals[0]);
// if (rxMatch && rxMatch.length >= 2) {
// return new RegExp(rxMatch[1]);
// } else Logger.log('filter criteria regex no match' + critVals);
// } else
// Logger.log(
// 'filter criteria not regex: ' +
// critVals +
// ' ' +
// critVals.indexOf('REGEXMATCH')
// );
// } else Logger.log('filter criteria: N/A');
// return null;
// }
export class Timeseries {
static applyColumnFilter(rx: RegExp, rows: string[][], columnIndex: number) {
var visibleRows = [];
if (!rows) throw 'No rows'; // || !rows.length
if (!rx) throw 'No regex';
if (rows.length > 0) {
if (rows[0].length <= columnIndex)
throw 'Incorrect column ' + columnIndex + ': ' + rows[0];
for (var j = 0; j < rows.length; j++) {
if (rx.test((rows[j][columnIndex] || '').toString()))
visibleRows.push(rows[j]);
}
}
return visibleRows;
}
static createFilters(
columns: KeyValueMap<number>,
rxAccountIdColumnFilter?: RegExp | null,
sheetFilterAccounts?: ISheet,
sheetFilterTransactions?: ISheet
): Array<(data: any[][]) => any[][]> {
var result: Array<(data: any[][]) => any[][]> = [];
if (rxAccountIdColumnFilter) {
result.push(data =>
Timeseries.applyColumnFilter(rxAccountIdColumnFilter, data, columns.AccountId)
);
//Logger.log('after column filter: ' + data.length);
}
result.push(
data =>
//Filter out rows with missing transaction:
data.filter(
r =>
r[columns.Missing] !== 'TRX' &&
new Date(r[columns.Date]).getFullYear() >= 2016
)
//Logger.log('after TRX filter: ' + data.length);
);
//Filter transactions booked on specified accounts:
if (sheetFilterAccounts) |
//Filter out specific transactions:
if (sheetFilterTransactions) {
var transactionsToExclude = getTransactionsToExclude(
sheetFilterTransactions.getDataRange().getValues()
);
if (transactionsToExclude && transactionsToExclude.length) {
result.push(
data =>
data.filter(r => {
var found = transactionsToExclude.filter(
o => o.Date.valueOf() == r[columns.Date].valueOf()
);
if (found.length) {
//TODO: could be many to exclude from same date
var matches = Object.keys(found[0])
.filter(k => k != 'Date')
.map(k => found[0][k] == r[columns[k]]);
if (matches.indexOf(false) < 0) {
//TODO: remove them from
var tmpRemoveIndex = transactionsToExclude.indexOf(found[0]);
if (tmpRemoveIndex >= 0)
transactionsToExclude.splice(tmpRemoveIndex, 1);
return false;
}
}
return true;
})
//Logger.log('after getTransactionsToExclude: ' + data.length);
);
}
}
return result;
}
static recalc(
sheet: ISheet,
numYearsLookbackAvg?: number,
sheetPrognosisSpec?: ISheet,
prognosisUntil?: string | Date,
funcFilters?: Function[]
) {
numYearsLookbackAvg = numYearsLookbackAvg == null ? 2 : numYearsLookbackAvg;
prognosisUntil = new Date(prognosisUntil || '2020-01-01');
var data = sheet.getDataRange().getValues();
//var header = data[0];
var columns = SheetUtils.getHeaderColumnsAsObject(sheet); //KeyValueMap<number> = toObject(header, function (v, i) { return [v, i]; });
var applyFilters = (dataToFilter: any[][]) => {
if (funcFilters) {
// Logger.log('filtering started: ' + dataToFilter.length);
funcFilters.forEach(f => (dataToFilter = f(dataToFilter)));
// Logger.log('after filtering: ' + dataToFilter.length);
}
return dataToFilter;
};
data = applyFilters(data);
var funcFilterAndConcat = (dataToConcat: any[][]) =>
data.concat(applyFilters(dataToConcat));
if (numYearsLookbackAvg > 0) {
Logging.log('prognosis started');
var added = createPrognosis(
data,
columns,
new Date(),
prognosisUntil,
numYearsLookbackAvg
);
data = funcFilterAndConcat(added);
if (sheetPrognosisSpec) {
Logging.log('prognosis modification started');
var progger = new Prognoser();
var progcolumns = {
Date: columns.Date,
Amount: columns.Amount,
Supplier: columns.Supplier,
Account: columns.AccountId
};
added = progger.createPrognosis(
sheetPrognosisSpec.getDataRange().getValues(),
prognosisUntil,
progcolumns
);
if (true) {
//copy modified prognosis to future year, so we can compare with and without
var laterThan = new Date(new Date().getFullYear(), 0, 1).valueOf();
var copy = data
.filter(r => r[columns.Date].valueOf() > laterThan)
.map(r => r.slice());
copy = copy.concat(added);
copy.forEach(r => {
var d = r[columns.Date];
var year = d.getFullYear();
r[columns.Date] = new Date(year + 100, d.getMonth(), d.getDate());
});
data = funcFilterAndConcat(copy); //data.concat(copy);
} else {
data = funcFilterAndConcat(added); //data.concat(added);
}
}
}
//Perform aggregation:
Logging.log('Aggregation started');
var groupingDefs = [
{
col: columns.Date,
name: 'Period',
func: (v: any) => DateUtils.getDateStr(v)
}
];
var aggregateDef = {
col: columns.Amount,
name: 'Sum',
func: (v: any, p: any) => (parseInt(v, 10) || 0) + (p || 0)
};
var aggregated = Aggregation.aggregateIntoRows(
data,
groupingDefs,
aggregateDef,
false
);
var aggColums: KeyValueMap<number> = toObject(aggregated[0], (v, i) => [
v,
i
]);
aggregated = Timeseries.sortRowsByColumn(aggregated, aggColums.Period, true, true);
aggregated[0].push('Accumulated');
//Sort rows, one list per year:
var byYear: KeyValueMap<any[]> = {};
aggregated.slice(1).forEach(row => {
var year = new Date(row[aggColums.Period]).getFullYear();
var inYear = byYear[year];
if (!inYear) {
inYear = [];
byYear[year] = inYear;
}
inYear.push(row);
});
//Create column with accumulated values per year (so each year starts with 0):
var colAcc = Object.keys(aggColums).length;
// var tmp = Object.keys(byYear).sort().map(key => {
// var list = byYear[key];
// accumulate(list, aggColums.Sum, colAcc);
// return list;
// });
Logging.log('Create per-year table');
//Create table with one row per day in year, and one column per year with accumulated values
var sortedYears = Object.keys(byYear).sort();
var inYear = [['Date'].concat(sortedYears)];
var curr = new Date(2000, 0, 1).valueOf();
var lastValues: KeyValueMap<number> = {};
sortedYears.forEach(o => (lastValues[o] = 0));
var byDayInYear: any[][] = Array.apply(null, new Array(366)).map(() => []);
sortedYears.forEach(k => {
byYear[k].forEach(r =>
byDayInYear[DateUtils.getDayInYear(new Date(r[0]))].push(r)
);
});
var lastRow: any[] = Array.apply(null, new Array(sortedYears.length)).map(
() => 0
);
for (var day = 0; day < byDayInYear.length; day++) {
var dateStr = DateUtils.getDateStr(new Date(curr));
var row = lastRow.slice();
var inDay = byDayInYear[day];
inDay.forEach(r => {
var year = new Date(r[0]).getFullYear();
row[sortedYears.indexOf(year.toString())] = r[colAcc];
});
lastRow = row;
inYear.push([dateStr].concat(row));
curr += 1000 * 60 * 60 * 24;
}
return inYear;
}
static sortRowsByColumn(
list: any[][],
column: number,
hasHeader: boolean,
reverse: boolean
): any[][] {
const sortVal = reverse ? -1 : 1;
const srt = (l: any) =>
l.sort((a: any, b: any) => (a[column] > b[column] ? sortVal : -sortVal));
// var srt = reverse
// ? (l: any) => l.sort((a, b) => a[column] > b[column] ? 1 : -1)
// : l => l.sort((a, b) => a[column] < b[column] ? 1 : -1);
return hasHeader ? list.slice(0, 1).concat(srt(list.slice(1))) : srt(list);
}
}
export function accumulate(
list: any[][],
columnToAcc: number,
columnForAccResult: number
) {
var total = 0;
list.forEach(row => {
total += parseFloat(row[columnToAcc]);
row[columnForAccResult] = total;
});
}
| {
var accountIdsToExclude = getAccountIdsToExclude(
sheetFilterAccounts.getDataRange().getValues()
);
if (accountIdsToExclude && accountIdsToExclude.length) {
result.push(
data =>
data.filter(
r => accountIdsToExclude.indexOf(r[columns.AccountId]) < 0
)
//Logger.log('after getAccountIdsToExclude: ' + data.length);
);
}
} | conditional_block |
timeseries.ts | //import 'gscript-mocks.ts';
import { DateUtils, toObject, KeyValueMap } from './utils';
import { SheetUtils, SpreadsheetApp, ISheet, Logging } from './utils-google';
import { Prognoser, createPrognosis } from './prognoser';
import { Aggregation } from './aggregation';
function getAccountIdsToExclude(data: any[][]): any[] {
var header = data[0];
var activeCol = header.indexOf('Active');
var accountIdCol = header.indexOf('AccountId');
var result = data
.slice(1)
.filter(r => r[activeCol] == 'x')
.map(r => r[accountIdCol]);
return result;
}
function getTransactionsToExclude(data: any[][]) {
var header = data[0];
data = data.slice(1);
var result = data.map(r => {
var tmp = r.map((_v, i) => [header[i], r[i]]).filter(o => !!o[1]);
return toObject(tmp, v => v);
});
return result;
}
export function run() {
var ss = SpreadsheetApp.getActiveSpreadsheet();
var sheet = ss.getSheets()[0];
var columns = SheetUtils.getHeaderColumnsAsObject(sheet);
var rxFilter = SheetUtils.getColumnRegexFilter(sheet, columns.AccountId);
var filters = Timeseries.createFilters(
columns,
rxFilter,
ss.getSheetByName('filter_accounts'),
ss.getSheetByName('filter_tx')
);
var inYear = Timeseries.recalc(
sheet,
2,
ss.getSheetByName('prognosis_spec'),
'2020-12-01',
filters
);
//Logger.log("Fill sheet");
SheetUtils.fillSheet(SheetUtils.getOrCreateSheet('aggregated', true), inYear);
////Reverse order
//aggregated = aggregated.slice(0, 1).concat([].concat.apply([], tmp).reverse());
//accumulate(aggregated.slice(1), aggColums.Sum, Object.keys(aggColums).length);
//aggregated = aggregated.slice(0, 1).concat(aggregated.slice(1).reverse());
//Logger.log("Done");
}
// function getColumnRegexFilter(
// sheet: ISheet,
// columnIndex: number
// ): RegExp | null {
// var filter = sheet.getFilter();
// var filterCriteria = filter.getColumnFilterCriteria(columnIndex + 1);
// if (filterCriteria && filterCriteria.getCriteriaType() == 'CUSTOM_FORMULA') {
// var critVals = filterCriteria.getCriteriaValues();
// if (critVals.length == 1 && critVals[0].indexOf('REGEXMATCH') >= 0) {
// //REGEXMATCH =REGEXMATCH(TEXT(E:E, 0), "^43")
// var rxMatch = /\"(.+)\"/.exec(critVals[0]);
// if (rxMatch && rxMatch.length >= 2) {
// return new RegExp(rxMatch[1]);
// } else Logger.log('filter criteria regex no match' + critVals);
// } else
// Logger.log(
// 'filter criteria not regex: ' +
// critVals +
// ' ' +
// critVals.indexOf('REGEXMATCH')
// );
// } else Logger.log('filter criteria: N/A');
// return null;
// }
export class Timeseries {
static applyColumnFilter(rx: RegExp, rows: string[][], columnIndex: number) {
var visibleRows = [];
if (!rows) throw 'No rows'; // || !rows.length
if (!rx) throw 'No regex';
if (rows.length > 0) {
if (rows[0].length <= columnIndex)
throw 'Incorrect column ' + columnIndex + ': ' + rows[0];
for (var j = 0; j < rows.length; j++) {
if (rx.test((rows[j][columnIndex] || '').toString()))
visibleRows.push(rows[j]);
}
}
return visibleRows;
}
static createFilters(
columns: KeyValueMap<number>,
rxAccountIdColumnFilter?: RegExp | null,
sheetFilterAccounts?: ISheet,
sheetFilterTransactions?: ISheet
): Array<(data: any[][]) => any[][]> {
var result: Array<(data: any[][]) => any[][]> = [];
if (rxAccountIdColumnFilter) {
result.push(data =>
Timeseries.applyColumnFilter(rxAccountIdColumnFilter, data, columns.AccountId)
);
//Logger.log('after column filter: ' + data.length);
}
result.push(
data =>
//Filter out rows with missing transaction:
data.filter(
r =>
r[columns.Missing] !== 'TRX' &&
new Date(r[columns.Date]).getFullYear() >= 2016
)
//Logger.log('after TRX filter: ' + data.length);
);
//Filter transactions booked on specified accounts:
if (sheetFilterAccounts) {
var accountIdsToExclude = getAccountIdsToExclude(
sheetFilterAccounts.getDataRange().getValues()
);
if (accountIdsToExclude && accountIdsToExclude.length) {
result.push(
data =>
data.filter(
r => accountIdsToExclude.indexOf(r[columns.AccountId]) < 0
)
//Logger.log('after getAccountIdsToExclude: ' + data.length);
);
}
}
//Filter out specific transactions:
if (sheetFilterTransactions) {
var transactionsToExclude = getTransactionsToExclude(
sheetFilterTransactions.getDataRange().getValues()
);
if (transactionsToExclude && transactionsToExclude.length) {
result.push(
data =>
data.filter(r => {
var found = transactionsToExclude.filter(
o => o.Date.valueOf() == r[columns.Date].valueOf()
);
if (found.length) {
//TODO: could be many to exclude from same date
var matches = Object.keys(found[0])
.filter(k => k != 'Date')
.map(k => found[0][k] == r[columns[k]]);
if (matches.indexOf(false) < 0) {
//TODO: remove them from
var tmpRemoveIndex = transactionsToExclude.indexOf(found[0]);
if (tmpRemoveIndex >= 0)
transactionsToExclude.splice(tmpRemoveIndex, 1);
return false;
}
}
return true;
})
//Logger.log('after getTransactionsToExclude: ' + data.length);
);
}
}
return result;
}
static | (
sheet: ISheet,
numYearsLookbackAvg?: number,
sheetPrognosisSpec?: ISheet,
prognosisUntil?: string | Date,
funcFilters?: Function[]
) {
numYearsLookbackAvg = numYearsLookbackAvg == null ? 2 : numYearsLookbackAvg;
prognosisUntil = new Date(prognosisUntil || '2020-01-01');
var data = sheet.getDataRange().getValues();
//var header = data[0];
var columns = SheetUtils.getHeaderColumnsAsObject(sheet); //KeyValueMap<number> = toObject(header, function (v, i) { return [v, i]; });
var applyFilters = (dataToFilter: any[][]) => {
if (funcFilters) {
// Logger.log('filtering started: ' + dataToFilter.length);
funcFilters.forEach(f => (dataToFilter = f(dataToFilter)));
// Logger.log('after filtering: ' + dataToFilter.length);
}
return dataToFilter;
};
data = applyFilters(data);
var funcFilterAndConcat = (dataToConcat: any[][]) =>
data.concat(applyFilters(dataToConcat));
if (numYearsLookbackAvg > 0) {
Logging.log('prognosis started');
var added = createPrognosis(
data,
columns,
new Date(),
prognosisUntil,
numYearsLookbackAvg
);
data = funcFilterAndConcat(added);
if (sheetPrognosisSpec) {
Logging.log('prognosis modification started');
var progger = new Prognoser();
var progcolumns = {
Date: columns.Date,
Amount: columns.Amount,
Supplier: columns.Supplier,
Account: columns.AccountId
};
added = progger.createPrognosis(
sheetPrognosisSpec.getDataRange().getValues(),
prognosisUntil,
progcolumns
);
if (true) {
//copy modified prognosis to future year, so we can compare with and without
var laterThan = new Date(new Date().getFullYear(), 0, 1).valueOf();
var copy = data
.filter(r => r[columns.Date].valueOf() > laterThan)
.map(r => r.slice());
copy = copy.concat(added);
copy.forEach(r => {
var d = r[columns.Date];
var year = d.getFullYear();
r[columns.Date] = new Date(year + 100, d.getMonth(), d.getDate());
});
data = funcFilterAndConcat(copy); //data.concat(copy);
} else {
data = funcFilterAndConcat(added); //data.concat(added);
}
}
}
//Perform aggregation:
Logging.log('Aggregation started');
var groupingDefs = [
{
col: columns.Date,
name: 'Period',
func: (v: any) => DateUtils.getDateStr(v)
}
];
var aggregateDef = {
col: columns.Amount,
name: 'Sum',
func: (v: any, p: any) => (parseInt(v, 10) || 0) + (p || 0)
};
var aggregated = Aggregation.aggregateIntoRows(
data,
groupingDefs,
aggregateDef,
false
);
var aggColums: KeyValueMap<number> = toObject(aggregated[0], (v, i) => [
v,
i
]);
aggregated = Timeseries.sortRowsByColumn(aggregated, aggColums.Period, true, true);
aggregated[0].push('Accumulated');
//Sort rows, one list per year:
var byYear: KeyValueMap<any[]> = {};
aggregated.slice(1).forEach(row => {
var year = new Date(row[aggColums.Period]).getFullYear();
var inYear = byYear[year];
if (!inYear) {
inYear = [];
byYear[year] = inYear;
}
inYear.push(row);
});
//Create column with accumulated values per year (so each year starts with 0):
var colAcc = Object.keys(aggColums).length;
// var tmp = Object.keys(byYear).sort().map(key => {
// var list = byYear[key];
// accumulate(list, aggColums.Sum, colAcc);
// return list;
// });
Logging.log('Create per-year table');
//Create table with one row per day in year, and one column per year with accumulated values
var sortedYears = Object.keys(byYear).sort();
var inYear = [['Date'].concat(sortedYears)];
var curr = new Date(2000, 0, 1).valueOf();
var lastValues: KeyValueMap<number> = {};
sortedYears.forEach(o => (lastValues[o] = 0));
var byDayInYear: any[][] = Array.apply(null, new Array(366)).map(() => []);
sortedYears.forEach(k => {
byYear[k].forEach(r =>
byDayInYear[DateUtils.getDayInYear(new Date(r[0]))].push(r)
);
});
var lastRow: any[] = Array.apply(null, new Array(sortedYears.length)).map(
() => 0
);
for (var day = 0; day < byDayInYear.length; day++) {
var dateStr = DateUtils.getDateStr(new Date(curr));
var row = lastRow.slice();
var inDay = byDayInYear[day];
inDay.forEach(r => {
var year = new Date(r[0]).getFullYear();
row[sortedYears.indexOf(year.toString())] = r[colAcc];
});
lastRow = row;
inYear.push([dateStr].concat(row));
curr += 1000 * 60 * 60 * 24;
}
return inYear;
}
static sortRowsByColumn(
list: any[][],
column: number,
hasHeader: boolean,
reverse: boolean
): any[][] {
const sortVal = reverse ? -1 : 1;
const srt = (l: any) =>
l.sort((a: any, b: any) => (a[column] > b[column] ? sortVal : -sortVal));
// var srt = reverse
// ? (l: any) => l.sort((a, b) => a[column] > b[column] ? 1 : -1)
// : l => l.sort((a, b) => a[column] < b[column] ? 1 : -1);
return hasHeader ? list.slice(0, 1).concat(srt(list.slice(1))) : srt(list);
}
}
export function accumulate(
list: any[][],
columnToAcc: number,
columnForAccResult: number
) {
var total = 0;
list.forEach(row => {
total += parseFloat(row[columnToAcc]);
row[columnForAccResult] = total;
});
}
| recalc | identifier_name |
timeseries.ts | //import 'gscript-mocks.ts';
import { DateUtils, toObject, KeyValueMap } from './utils';
import { SheetUtils, SpreadsheetApp, ISheet, Logging } from './utils-google';
import { Prognoser, createPrognosis } from './prognoser';
import { Aggregation } from './aggregation';
function getAccountIdsToExclude(data: any[][]): any[] {
var header = data[0];
var activeCol = header.indexOf('Active');
var accountIdCol = header.indexOf('AccountId');
var result = data
.slice(1)
.filter(r => r[activeCol] == 'x')
.map(r => r[accountIdCol]);
return result;
}
function getTransactionsToExclude(data: any[][]) {
var header = data[0];
data = data.slice(1);
var result = data.map(r => {
var tmp = r.map((_v, i) => [header[i], r[i]]).filter(o => !!o[1]);
return toObject(tmp, v => v);
});
return result;
}
export function run() {
var ss = SpreadsheetApp.getActiveSpreadsheet();
var sheet = ss.getSheets()[0];
var columns = SheetUtils.getHeaderColumnsAsObject(sheet);
var rxFilter = SheetUtils.getColumnRegexFilter(sheet, columns.AccountId);
var filters = Timeseries.createFilters(
columns,
rxFilter,
ss.getSheetByName('filter_accounts'),
ss.getSheetByName('filter_tx')
);
var inYear = Timeseries.recalc(
sheet,
2,
ss.getSheetByName('prognosis_spec'),
'2020-12-01',
filters
);
//Logger.log("Fill sheet");
SheetUtils.fillSheet(SheetUtils.getOrCreateSheet('aggregated', true), inYear);
////Reverse order
//aggregated = aggregated.slice(0, 1).concat([].concat.apply([], tmp).reverse());
//accumulate(aggregated.slice(1), aggColums.Sum, Object.keys(aggColums).length);
//aggregated = aggregated.slice(0, 1).concat(aggregated.slice(1).reverse());
//Logger.log("Done");
}
// function getColumnRegexFilter(
// sheet: ISheet,
// columnIndex: number
// ): RegExp | null {
// var filter = sheet.getFilter();
// var filterCriteria = filter.getColumnFilterCriteria(columnIndex + 1);
// if (filterCriteria && filterCriteria.getCriteriaType() == 'CUSTOM_FORMULA') {
// var critVals = filterCriteria.getCriteriaValues();
// if (critVals.length == 1 && critVals[0].indexOf('REGEXMATCH') >= 0) {
// //REGEXMATCH =REGEXMATCH(TEXT(E:E, 0), "^43")
// var rxMatch = /\"(.+)\"/.exec(critVals[0]);
// if (rxMatch && rxMatch.length >= 2) {
// return new RegExp(rxMatch[1]);
// } else Logger.log('filter criteria regex no match' + critVals);
// } else
// Logger.log(
// 'filter criteria not regex: ' +
// critVals +
// ' ' +
// critVals.indexOf('REGEXMATCH')
// );
// } else Logger.log('filter criteria: N/A');
// return null;
// }
export class Timeseries {
static applyColumnFilter(rx: RegExp, rows: string[][], columnIndex: number) {
var visibleRows = [];
if (!rows) throw 'No rows'; // || !rows.length
if (!rx) throw 'No regex';
if (rows.length > 0) {
if (rows[0].length <= columnIndex)
throw 'Incorrect column ' + columnIndex + ': ' + rows[0];
for (var j = 0; j < rows.length; j++) {
if (rx.test((rows[j][columnIndex] || '').toString())) | }
return visibleRows;
}
static createFilters(
columns: KeyValueMap<number>,
rxAccountIdColumnFilter?: RegExp | null,
sheetFilterAccounts?: ISheet,
sheetFilterTransactions?: ISheet
): Array<(data: any[][]) => any[][]> {
var result: Array<(data: any[][]) => any[][]> = [];
if (rxAccountIdColumnFilter) {
result.push(data =>
Timeseries.applyColumnFilter(rxAccountIdColumnFilter, data, columns.AccountId)
);
//Logger.log('after column filter: ' + data.length);
}
result.push(
data =>
//Filter out rows with missing transaction:
data.filter(
r =>
r[columns.Missing] !== 'TRX' &&
new Date(r[columns.Date]).getFullYear() >= 2016
)
//Logger.log('after TRX filter: ' + data.length);
);
//Filter transactions booked on specified accounts:
if (sheetFilterAccounts) {
var accountIdsToExclude = getAccountIdsToExclude(
sheetFilterAccounts.getDataRange().getValues()
);
if (accountIdsToExclude && accountIdsToExclude.length) {
result.push(
data =>
data.filter(
r => accountIdsToExclude.indexOf(r[columns.AccountId]) < 0
)
//Logger.log('after getAccountIdsToExclude: ' + data.length);
);
}
}
//Filter out specific transactions:
if (sheetFilterTransactions) {
var transactionsToExclude = getTransactionsToExclude(
sheetFilterTransactions.getDataRange().getValues()
);
if (transactionsToExclude && transactionsToExclude.length) {
result.push(
data =>
data.filter(r => {
var found = transactionsToExclude.filter(
o => o.Date.valueOf() == r[columns.Date].valueOf()
);
if (found.length) {
//TODO: could be many to exclude from same date
var matches = Object.keys(found[0])
.filter(k => k != 'Date')
.map(k => found[0][k] == r[columns[k]]);
if (matches.indexOf(false) < 0) {
//TODO: remove them from
var tmpRemoveIndex = transactionsToExclude.indexOf(found[0]);
if (tmpRemoveIndex >= 0)
transactionsToExclude.splice(tmpRemoveIndex, 1);
return false;
}
}
return true;
})
//Logger.log('after getTransactionsToExclude: ' + data.length);
);
}
}
return result;
}
static recalc(
sheet: ISheet,
numYearsLookbackAvg?: number,
sheetPrognosisSpec?: ISheet,
prognosisUntil?: string | Date,
funcFilters?: Function[]
) {
numYearsLookbackAvg = numYearsLookbackAvg == null ? 2 : numYearsLookbackAvg;
prognosisUntil = new Date(prognosisUntil || '2020-01-01');
var data = sheet.getDataRange().getValues();
//var header = data[0];
var columns = SheetUtils.getHeaderColumnsAsObject(sheet); //KeyValueMap<number> = toObject(header, function (v, i) { return [v, i]; });
var applyFilters = (dataToFilter: any[][]) => {
if (funcFilters) {
// Logger.log('filtering started: ' + dataToFilter.length);
funcFilters.forEach(f => (dataToFilter = f(dataToFilter)));
// Logger.log('after filtering: ' + dataToFilter.length);
}
return dataToFilter;
};
data = applyFilters(data);
var funcFilterAndConcat = (dataToConcat: any[][]) =>
data.concat(applyFilters(dataToConcat));
if (numYearsLookbackAvg > 0) {
Logging.log('prognosis started');
var added = createPrognosis(
data,
columns,
new Date(),
prognosisUntil,
numYearsLookbackAvg
);
data = funcFilterAndConcat(added);
if (sheetPrognosisSpec) {
Logging.log('prognosis modification started');
var progger = new Prognoser();
var progcolumns = {
Date: columns.Date,
Amount: columns.Amount,
Supplier: columns.Supplier,
Account: columns.AccountId
};
added = progger.createPrognosis(
sheetPrognosisSpec.getDataRange().getValues(),
prognosisUntil,
progcolumns
);
if (true) {
//copy modified prognosis to future year, so we can compare with and without
var laterThan = new Date(new Date().getFullYear(), 0, 1).valueOf();
var copy = data
.filter(r => r[columns.Date].valueOf() > laterThan)
.map(r => r.slice());
copy = copy.concat(added);
copy.forEach(r => {
var d = r[columns.Date];
var year = d.getFullYear();
r[columns.Date] = new Date(year + 100, d.getMonth(), d.getDate());
});
data = funcFilterAndConcat(copy); //data.concat(copy);
} else {
data = funcFilterAndConcat(added); //data.concat(added);
}
}
}
//Perform aggregation:
Logging.log('Aggregation started');
var groupingDefs = [
{
col: columns.Date,
name: 'Period',
func: (v: any) => DateUtils.getDateStr(v)
}
];
var aggregateDef = {
col: columns.Amount,
name: 'Sum',
func: (v: any, p: any) => (parseInt(v, 10) || 0) + (p || 0)
};
var aggregated = Aggregation.aggregateIntoRows(
data,
groupingDefs,
aggregateDef,
false
);
var aggColums: KeyValueMap<number> = toObject(aggregated[0], (v, i) => [
v,
i
]);
aggregated = Timeseries.sortRowsByColumn(aggregated, aggColums.Period, true, true);
aggregated[0].push('Accumulated');
//Sort rows, one list per year:
var byYear: KeyValueMap<any[]> = {};
aggregated.slice(1).forEach(row => {
var year = new Date(row[aggColums.Period]).getFullYear();
var inYear = byYear[year];
if (!inYear) {
inYear = [];
byYear[year] = inYear;
}
inYear.push(row);
});
//Create column with accumulated values per year (so each year starts with 0):
var colAcc = Object.keys(aggColums).length;
// var tmp = Object.keys(byYear).sort().map(key => {
// var list = byYear[key];
// accumulate(list, aggColums.Sum, colAcc);
// return list;
// });
Logging.log('Create per-year table');
//Create table with one row per day in year, and one column per year with accumulated values
var sortedYears = Object.keys(byYear).sort();
var inYear = [['Date'].concat(sortedYears)];
var curr = new Date(2000, 0, 1).valueOf();
var lastValues: KeyValueMap<number> = {};
sortedYears.forEach(o => (lastValues[o] = 0));
var byDayInYear: any[][] = Array.apply(null, new Array(366)).map(() => []);
sortedYears.forEach(k => {
byYear[k].forEach(r =>
byDayInYear[DateUtils.getDayInYear(new Date(r[0]))].push(r)
);
});
var lastRow: any[] = Array.apply(null, new Array(sortedYears.length)).map(
() => 0
);
for (var day = 0; day < byDayInYear.length; day++) {
var dateStr = DateUtils.getDateStr(new Date(curr));
var row = lastRow.slice();
var inDay = byDayInYear[day];
inDay.forEach(r => {
var year = new Date(r[0]).getFullYear();
row[sortedYears.indexOf(year.toString())] = r[colAcc];
});
lastRow = row;
inYear.push([dateStr].concat(row));
curr += 1000 * 60 * 60 * 24;
}
return inYear;
}
static sortRowsByColumn(
list: any[][],
column: number,
hasHeader: boolean,
reverse: boolean
): any[][] {
const sortVal = reverse ? -1 : 1;
const srt = (l: any) =>
l.sort((a: any, b: any) => (a[column] > b[column] ? sortVal : -sortVal));
// var srt = reverse
// ? (l: any) => l.sort((a, b) => a[column] > b[column] ? 1 : -1)
// : l => l.sort((a, b) => a[column] < b[column] ? 1 : -1);
return hasHeader ? list.slice(0, 1).concat(srt(list.slice(1))) : srt(list);
}
}
export function accumulate(
list: any[][],
columnToAcc: number,
columnForAccResult: number
) {
var total = 0;
list.forEach(row => {
total += parseFloat(row[columnToAcc]);
row[columnForAccResult] = total;
});
} | visibleRows.push(rows[j]);
} | random_line_split |
runtime.rs | use crate::durability::Durability;
use crate::hash::*;
use crate::plumbing::CycleRecoveryStrategy;
use crate::revision::{AtomicRevision, Revision};
use crate::{Cancelled, Cycle, Database, DatabaseKeyIndex, Event, EventKind};
use log::debug;
use parking_lot::lock_api::{RawRwLock, RawRwLockRecursive};
use parking_lot::{Mutex, RwLock};
use std::hash::Hash;
use std::panic::panic_any;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
mod dependency_graph;
use dependency_graph::DependencyGraph;
pub(crate) mod local_state;
use local_state::LocalState;
use self::local_state::{ActiveQueryGuard, QueryInputs, QueryRevisions};
/// The salsa runtime stores the storage for all queries as well as
/// tracking the query stack and dependencies between cycles.
///
/// Each new runtime you create (e.g., via `Runtime::new` or
/// `Runtime::default`) will have an independent set of query storage
/// associated with it. Normally, therefore, you only do this once, at
/// the start of your application.
pub struct Runtime {
/// Our unique runtime id.
id: RuntimeId,
/// If this is a "forked" runtime, then the `revision_guard` will
/// be `Some`; this guard holds a read-lock on the global query
/// lock.
revision_guard: Option<RevisionGuard>,
/// Local state that is specific to this runtime (thread).
local_state: LocalState,
/// Shared state that is accessible via all runtimes.
shared_state: Arc<SharedState>,
}
#[derive(Clone, Debug)]
pub(crate) enum WaitResult {
Completed,
Panicked,
Cycle(Cycle),
}
impl Default for Runtime {
fn default() -> Self {
Runtime {
id: RuntimeId { counter: 0 },
revision_guard: None,
shared_state: Default::default(),
local_state: Default::default(),
}
}
}
impl std::fmt::Debug for Runtime {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
fmt.debug_struct("Runtime")
.field("id", &self.id())
.field("forked", &self.revision_guard.is_some())
.field("shared_state", &self.shared_state)
.finish()
}
}
impl Runtime {
/// Create a new runtime; equivalent to `Self::default`. This is
/// used when creating a new database.
pub fn new() -> Self {
Self::default()
}
/// See [`crate::storage::Storage::snapshot`].
pub(crate) fn snapshot(&self) -> Self {
if self.local_state.query_in_progress() {
panic!("it is not legal to `snapshot` during a query (see salsa-rs/salsa#80)");
}
let revision_guard = RevisionGuard::new(&self.shared_state);
let id = RuntimeId {
counter: self.shared_state.next_id.fetch_add(1, Ordering::SeqCst),
};
Runtime {
id,
revision_guard: Some(revision_guard),
shared_state: self.shared_state.clone(),
local_state: Default::default(),
}
}
/// A "synthetic write" causes the system to act *as though* some
/// input of durability `durability` has changed. This is mostly
/// useful for profiling scenarios.
///
/// **WARNING:** Just like an ordinary write, this method triggers
/// cancellation. If you invoke it while a snapshot exists, it
/// will block until that snapshot is dropped -- if that snapshot
/// is owned by the current thread, this could trigger deadlock.
pub fn synthetic_write(&mut self, durability: Durability) {
self.with_incremented_revision(|_next_revision| Some(durability));
}
/// The unique identifier attached to this `SalsaRuntime`. Each
/// snapshotted runtime has a distinct identifier.
#[inline]
pub fn id(&self) -> RuntimeId {
self.id
}
/// Returns the database-key for the query that this thread is
/// actively executing (if any).
pub fn active_query(&self) -> Option<DatabaseKeyIndex> {
self.local_state.active_query()
}
/// Read current value of the revision counter.
#[inline]
pub(crate) fn current_revision(&self) -> Revision {
self.shared_state.revisions[0].load()
}
/// The revision in which values with durability `d` may have last
/// changed. For D0, this is just the current revision. But for
/// higher levels of durability, this value may lag behind the
/// current revision. If we encounter a value of durability Di,
/// then, we can check this function to get a "bound" on when the
/// value may have changed, which allows us to skip walking its
/// dependencies.
#[inline]
pub(crate) fn last_changed_revision(&self, d: Durability) -> Revision {
self.shared_state.revisions[d.index()].load()
}
/// Read current value of the revision counter.
#[inline]
pub(crate) fn pending_revision(&self) -> Revision {
self.shared_state.pending_revision.load()
}
#[cold]
pub(crate) fn unwind_cancelled(&self) {
self.report_untracked_read();
Cancelled::PendingWrite.throw();
}
/// Acquires the **global query write lock** (ensuring that no queries are
/// executing) and then increments the current revision counter; invokes
/// `op` with the global query write lock still held.
///
/// While we wait to acquire the global query write lock, this method will
/// also increment `pending_revision_increments`, thus signalling to queries
/// that their results are "cancelled" and they should abort as expeditiously
/// as possible.
///
/// The `op` closure should actually perform the writes needed. It is given
/// the new revision as an argument, and its return value indicates whether
/// any pre-existing value was modified:
///
/// - returning `None` means that no pre-existing value was modified (this
/// could occur e.g. when setting some key on an input that was never set
/// before)
/// - returning `Some(d)` indicates that a pre-existing value was modified
/// and it had the durability `d`. This will update the records for when
/// values with each durability were modified.
///
/// Note that, given our writer model, we can assume that only one thread is
/// attempting to increment the global revision at a time.
pub(crate) fn with_incremented_revision<F>(&mut self, op: F)
where
F: FnOnce(Revision) -> Option<Durability>,
{
log::debug!("increment_revision()");
if !self.permits_increment() {
panic!("increment_revision invoked during a query computation");
}
// Set the `pending_revision` field so that people
// know current revision is cancelled.
let current_revision = self.shared_state.pending_revision.fetch_then_increment();
// To modify the revision, we need the lock.
let shared_state = self.shared_state.clone();
let _lock = shared_state.query_lock.write();
let old_revision = self.shared_state.revisions[0].fetch_then_increment();
assert_eq!(current_revision, old_revision);
let new_revision = current_revision.next();
debug!("increment_revision: incremented to {:?}", new_revision);
if let Some(d) = op(new_revision) {
for rev in &self.shared_state.revisions[1..=d.index()] {
rev.store(new_revision);
}
}
}
pub(crate) fn permits_increment(&self) -> bool {
self.revision_guard.is_none() && !self.local_state.query_in_progress()
}
#[inline]
pub(crate) fn push_query(&self, database_key_index: DatabaseKeyIndex) -> ActiveQueryGuard<'_> {
self.local_state.push_query(database_key_index)
}
/// Reports that the currently active query read the result from
/// another query.
///
/// Also checks whether the "cycle participant" flag is set on
/// the current stack frame -- if so, panics with `CycleParticipant`
/// value, which should be caught by the code executing the query.
///
/// # Parameters
///
/// - `database_key`: the query whose result was read
/// - `changed_revision`: the last revision in which the result of that
/// query had changed
pub(crate) fn report_query_read_and_unwind_if_cycle_resulted(
&self,
input: DatabaseKeyIndex,
durability: Durability,
changed_at: Revision,
) {
self.local_state
.report_query_read_and_unwind_if_cycle_resulted(input, durability, changed_at);
}
/// Reports that the query depends on some state unknown to salsa.
///
/// Queries which report untracked reads will be re-executed in the next
/// revision.
pub fn report_untracked_read(&self) {
self.local_state
.report_untracked_read(self.current_revision());
}
/// Acts as though the current query had read an input with the given durability; this will force the current query's durability to be at most `durability`.
///
/// This is mostly useful to control the durability level for [on-demand inputs](https://salsa-rs.github.io/salsa/common_patterns/on_demand_inputs.html).
pub fn report_synthetic_read(&self, durability: Durability) {
let changed_at = self.last_changed_revision(durability);
self.local_state
.report_synthetic_read(durability, changed_at);
}
/// Handles a cycle in the dependency graph that was detected when the
/// current thread tried to block on `database_key_index` which is being
/// executed by `to_id`. If this function returns, then `to_id` no longer
/// depends on the current thread, and so we should continue executing
/// as normal. Otherwise, the function will throw a `Cycle` which is expected
/// to be caught by some frame on our stack. This occurs either if there is
/// a frame on our stack with cycle recovery (possibly the top one!) or if there
/// is no cycle recovery at all.
fn unblock_cycle_and_maybe_throw(
&self,
db: &dyn Database,
dg: &mut DependencyGraph,
database_key_index: DatabaseKeyIndex,
to_id: RuntimeId,
) {
debug!(
"unblock_cycle_and_maybe_throw(database_key={:?})",
database_key_index
);
let mut from_stack = self.local_state.take_query_stack();
let from_id = self.id();
// Make a "dummy stack frame". As we iterate through the cycle, we will collect the
// inputs from each participant. Then, if we are participating in cycle recovery, we
// will propagate those results to all participants.
let mut cycle_query = ActiveQuery::new(database_key_index);
// Identify the cycle participants:
let cycle = {
let mut v = vec![];
dg.for_each_cycle_participant(
from_id,
&mut from_stack,
database_key_index,
to_id,
|aqs| {
aqs.iter_mut().for_each(|aq| {
cycle_query.add_from(aq);
v.push(aq.database_key_index);
});
},
);
// We want to give the participants in a deterministic order
// (at least for this execution, not necessarily across executions),
// no matter where it started on the stack. Find the minimum
// key and rotate it to the front.
let min = v.iter().min().unwrap();
let index = v.iter().position(|p| p == min).unwrap();
v.rotate_left(index);
// No need to store extra memory.
v.shrink_to_fit();
Cycle::new(Arc::new(v))
};
debug!(
"cycle {:?}, cycle_query {:#?}",
cycle.debug(db),
cycle_query,
);
// We can remove the cycle participants from the list of dependencies;
// they are a strongly connected component (SCC) and we only care about
// dependencies to things outside the SCC that control whether it will
// form again.
cycle_query.remove_cycle_participants(&cycle);
// Mark each cycle participant that has recovery set, along with
// any frames that come after them on the same thread. Those frames
// are going to be unwound so that fallback can occur.
dg.for_each_cycle_participant(from_id, &mut from_stack, database_key_index, to_id, |aqs| {
aqs.iter_mut()
.skip_while(
|aq| match db.cycle_recovery_strategy(aq.database_key_index) {
CycleRecoveryStrategy::Panic => true,
CycleRecoveryStrategy::Fallback => false,
},
)
.for_each(|aq| {
debug!("marking {:?} for fallback", aq.database_key_index.debug(db));
aq.take_inputs_from(&cycle_query);
assert!(aq.cycle.is_none());
aq.cycle = Some(cycle.clone());
});
});
// Unblock every thread that has cycle recovery with a `WaitResult::Cycle`.
// They will throw the cycle, which will be caught by the frame that has
// cycle recovery so that it can execute that recovery.
let (me_recovered, others_recovered) =
dg.maybe_unblock_runtimes_in_cycle(from_id, &from_stack, database_key_index, to_id);
self.local_state.restore_query_stack(from_stack);
if me_recovered {
// If the current thread has recovery, we want to throw
// so that it can begin.
cycle.throw()
} else if others_recovered {
// If other threads have recovery but we didn't: return and we will block on them.
} else {
// if nobody has recover, then we panic
panic_any(cycle);
}
}
/// Block until `other_id` completes executing `database_key`;
/// panic or unwind in the case of a cycle.
///
/// `query_mutex_guard` is the guard for the current query's state;
/// it will be dropped after we have successfully registered the
/// dependency.
///
/// # Propagating panics
///
/// If the thread `other_id` panics, then our thread is considered
/// cancelled, so this function will panic with a `Cancelled` value.
///
/// # Cycle handling
///
/// If the thread `other_id` already depends on the current thread,
/// and hence there is a cycle in the query graph, then this function
/// will unwind instead of returning normally. The method of unwinding
/// depends on the [`Self::mutual_cycle_recovery_strategy`]
/// of the cycle participants:
///
/// * [`CycleRecoveryStrategy::Panic`]: panic with the [`Cycle`] as the value.
/// * [`CycleRecoveryStrategy::Fallback`]: initiate unwinding with [`CycleParticipant::unwind`].
pub(crate) fn block_on_or_unwind<QueryMutexGuard>(
&self,
db: &dyn Database,
database_key: DatabaseKeyIndex,
other_id: RuntimeId,
query_mutex_guard: QueryMutexGuard,
) {
let mut dg = self.shared_state.dependency_graph.lock();
if dg.depends_on(other_id, self.id()) {
self.unblock_cycle_and_maybe_throw(db, &mut dg, database_key, other_id);
// If the above fn returns, then (via cycle recovery) it has unblocked the
// cycle, so we can continue.
assert!(!dg.depends_on(other_id, self.id()));
}
db.salsa_event(Event {
runtime_id: self.id(),
kind: EventKind::WillBlockOn {
other_runtime_id: other_id,
database_key,
},
});
let stack = self.local_state.take_query_stack();
let (stack, result) = DependencyGraph::block_on(
dg,
self.id(),
database_key,
other_id,
stack,
query_mutex_guard,
);
self.local_state.restore_query_stack(stack);
match result {
WaitResult::Completed => (),
// If the other thread panicked, then we consider this thread
// cancelled. The assumption is that the panic will be detected
// by the other thread and responded to appropriately.
WaitResult::Panicked => Cancelled::PropagatedPanic.throw(),
WaitResult::Cycle(c) => c.throw(),
}
}
/// Invoked when this runtime completed computing `database_key` with
/// the given result `wait_result` (`wait_result` should be `None` if
/// computing `database_key` panicked and could not complete).
/// This function unblocks any dependent queries and allows them
/// to continue executing.
pub(crate) fn unblock_queries_blocked_on(
&self,
database_key: DatabaseKeyIndex,
wait_result: WaitResult,
) {
self.shared_state
.dependency_graph
.lock()
.unblock_runtimes_blocked_on(database_key, wait_result);
}
}
/// State that will be common to all threads (when we support multiple threads)
struct SharedState {
/// Stores the next id to use for a snapshotted runtime (starts at 1).
next_id: AtomicUsize,
/// Whenever derived queries are executing, they acquire this lock
/// in read mode. Mutating inputs (and thus creating a new
/// revision) requires a write lock (thus guaranteeing that no
/// derived queries are in progress). Note that this is not needed
/// to prevent **race conditions** -- the revision counter itself
/// is stored in an `AtomicUsize` so it can be cheaply read
/// without acquiring the lock. Rather, the `query_lock` is used
/// to ensure a higher-level consistency property.
query_lock: RwLock<()>,
/// This is typically equal to `revision` -- set to `revision+1`
/// when a new revision is pending (which implies that the current
/// revision is cancelled).
pending_revision: AtomicRevision,
/// Stores the "last change" revision for values of each duration.
/// This vector is always of length at least 1 (for Durability 0)
/// but its total length depends on the number of durations. The
/// element at index 0 is special as it represents the "current
/// revision". In general, we have the invariant that revisions
/// in here are *declining* -- that is, `revisions[i] >=
/// revisions[i + 1]`, for all `i`. This is because when you
/// modify a value with durability D, that implies that values
/// with durability less than D may have changed too.
revisions: Vec<AtomicRevision>,
/// The dependency graph tracks which runtimes are blocked on one
/// another, waiting for queries to terminate.
dependency_graph: Mutex<DependencyGraph>,
}
impl SharedState {
fn with_durabilities(durabilities: usize) -> Self {
SharedState {
next_id: AtomicUsize::new(1),
query_lock: Default::default(),
revisions: (0..durabilities).map(|_| AtomicRevision::start()).collect(),
pending_revision: AtomicRevision::start(),
dependency_graph: Default::default(),
}
}
}
impl std::panic::RefUnwindSafe for SharedState {}
impl Default for SharedState {
fn default() -> Self {
Self::with_durabilities(Durability::LEN)
}
}
impl std::fmt::Debug for SharedState {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let query_lock = if self.query_lock.try_write().is_some() {
"<unlocked>"
} else if self.query_lock.try_read().is_some() {
"<rlocked>"
} else {
"<wlocked>"
};
fmt.debug_struct("SharedState")
.field("query_lock", &query_lock)
.field("revisions", &self.revisions)
.field("pending_revision", &self.pending_revision)
.finish()
}
}
#[derive(Debug)]
struct ActiveQuery {
/// What query is executing
database_key_index: DatabaseKeyIndex,
/// Minimum durability of inputs observed so far.
durability: Durability,
/// Maximum revision of all inputs observed. If we observe an
/// untracked read, this will be set to the most recent revision.
changed_at: Revision,
/// Set of subqueries that were accessed thus far, or `None` if
/// there was an untracked the read.
dependencies: Option<FxIndexSet<DatabaseKeyIndex>>,
/// Stores the entire cycle, if one is found and this query is part of it.
cycle: Option<Cycle>,
}
impl ActiveQuery {
fn new(database_key_index: DatabaseKeyIndex) -> Self {
ActiveQuery {
database_key_index,
durability: Durability::MAX,
changed_at: Revision::start(),
dependencies: Some(FxIndexSet::default()),
cycle: None,
}
}
fn add_read(&mut self, input: DatabaseKeyIndex, durability: Durability, revision: Revision) {
if let Some(set) = &mut self.dependencies {
set.insert(input);
}
self.durability = self.durability.min(durability);
self.changed_at = self.changed_at.max(revision);
}
fn add_untracked_read(&mut self, changed_at: Revision) {
self.dependencies = None;
self.durability = Durability::LOW;
self.changed_at = changed_at;
}
fn add_synthetic_read(&mut self, durability: Durability, revision: Revision) {
self.dependencies = None;
self.durability = self.durability.min(durability);
self.changed_at = self.changed_at.max(revision);
}
pub(crate) fn revisions(&self) -> QueryRevisions {
let inputs = match &self.dependencies {
None => QueryInputs::Untracked,
Some(dependencies) => {
if dependencies.is_empty() {
QueryInputs::NoInputs
} else {
QueryInputs::Tracked {
inputs: dependencies.iter().copied().collect(),
}
}
}
};
QueryRevisions {
changed_at: self.changed_at,
inputs,
durability: self.durability,
}
}
/// Adds any dependencies from `other` into `self`.
/// Used during cycle recovery, see [`Runtime::create_cycle_error`].
fn add_from(&mut self, other: &ActiveQuery) {
self.changed_at = self.changed_at.max(other.changed_at);
self.durability = self.durability.min(other.durability);
if let Some(other_dependencies) = &other.dependencies {
if let Some(my_dependencies) = &mut self.dependencies {
my_dependencies.extend(other_dependencies.iter().copied());
}
} else {
self.dependencies = None;
}
}
/// Removes the participants in `cycle` from my dependencies.
/// Used during cycle recovery, see [`Runtime::create_cycle_error`].
fn remove_cycle_participants(&mut self, cycle: &Cycle) {
if let Some(my_dependencies) = &mut self.dependencies {
for p in cycle.participant_keys() {
my_dependencies.remove(&p);
}
}
}
/// Copy the changed-at, durability, and dependencies from `cycle_query`.
/// Used during cycle recovery, see [`Runtime::create_cycle_error`].
pub(crate) fn take_inputs_from(&mut self, cycle_query: &ActiveQuery) {
self.changed_at = cycle_query.changed_at;
self.durability = cycle_query.durability;
self.dependencies = cycle_query.dependencies.clone();
}
}
/// A unique identifier for a particular runtime. Each time you create
/// a snapshot, a fresh `RuntimeId` is generated. Once a snapshot is
/// complete, its `RuntimeId` may potentially be re-used.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub struct RuntimeId {
counter: usize,
}
#[derive(Clone, Debug)]
pub(crate) struct StampedValue<V> {
pub(crate) value: V,
pub(crate) durability: Durability,
pub(crate) changed_at: Revision,
}
struct RevisionGuard {
shared_state: Arc<SharedState>,
}
impl RevisionGuard {
fn new(shared_state: &Arc<SharedState>) -> Self {
// Subtle: we use a "recursive" lock here so that it is not an
// error to acquire a read-lock when one is already held (this
// happens when a query uses `snapshot` to spawn off parallel
// workers, for example).
//
// This has the side-effect that we are responsible to ensure
// that people contending for the write lock do not starve,
// but this is what we achieve via the cancellation mechanism.
//
// (In particular, since we only ever have one "mutating
// handle" to the database, the only contention for the global
// query lock occurs when there are "futures" evaluating
// queries in parallel, and those futures hold a read-lock
// already, so the starvation problem is more about them bring
// themselves to a close, versus preventing other people from
// *starting* work).
unsafe {
shared_state.query_lock.raw().lock_shared_recursive();
}
Self {
shared_state: shared_state.clone(),
}
}
}
impl Drop for RevisionGuard {
fn drop(&mut self) {
// Release our read-lock without using RAII. As documented in
// `Snapshot::new` above, this requires the unsafe keyword.
unsafe {
self.shared_state.query_lock.raw().unlock_shared(); | } | }
} | random_line_split |
runtime.rs | use crate::durability::Durability;
use crate::hash::*;
use crate::plumbing::CycleRecoveryStrategy;
use crate::revision::{AtomicRevision, Revision};
use crate::{Cancelled, Cycle, Database, DatabaseKeyIndex, Event, EventKind};
use log::debug;
use parking_lot::lock_api::{RawRwLock, RawRwLockRecursive};
use parking_lot::{Mutex, RwLock};
use std::hash::Hash;
use std::panic::panic_any;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
mod dependency_graph;
use dependency_graph::DependencyGraph;
pub(crate) mod local_state;
use local_state::LocalState;
use self::local_state::{ActiveQueryGuard, QueryInputs, QueryRevisions};
/// The salsa runtime stores the storage for all queries as well as
/// tracking the query stack and dependencies between cycles.
///
/// Each new runtime you create (e.g., via `Runtime::new` or
/// `Runtime::default`) will have an independent set of query storage
/// associated with it. Normally, therefore, you only do this once, at
/// the start of your application.
pub struct Runtime {
/// Our unique runtime id.
id: RuntimeId,
/// If this is a "forked" runtime, then the `revision_guard` will
/// be `Some`; this guard holds a read-lock on the global query
/// lock.
revision_guard: Option<RevisionGuard>,
/// Local state that is specific to this runtime (thread).
local_state: LocalState,
/// Shared state that is accessible via all runtimes.
shared_state: Arc<SharedState>,
}
#[derive(Clone, Debug)]
pub(crate) enum WaitResult {
Completed,
Panicked,
Cycle(Cycle),
}
impl Default for Runtime {
fn default() -> Self {
Runtime {
id: RuntimeId { counter: 0 },
revision_guard: None,
shared_state: Default::default(),
local_state: Default::default(),
}
}
}
impl std::fmt::Debug for Runtime {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
fmt.debug_struct("Runtime")
.field("id", &self.id())
.field("forked", &self.revision_guard.is_some())
.field("shared_state", &self.shared_state)
.finish()
}
}
impl Runtime {
/// Create a new runtime; equivalent to `Self::default`. This is
/// used when creating a new database.
pub fn new() -> Self {
Self::default()
}
/// See [`crate::storage::Storage::snapshot`].
pub(crate) fn snapshot(&self) -> Self {
if self.local_state.query_in_progress() {
panic!("it is not legal to `snapshot` during a query (see salsa-rs/salsa#80)");
}
let revision_guard = RevisionGuard::new(&self.shared_state);
let id = RuntimeId {
counter: self.shared_state.next_id.fetch_add(1, Ordering::SeqCst),
};
Runtime {
id,
revision_guard: Some(revision_guard),
shared_state: self.shared_state.clone(),
local_state: Default::default(),
}
}
/// A "synthetic write" causes the system to act *as though* some
/// input of durability `durability` has changed. This is mostly
/// useful for profiling scenarios.
///
/// **WARNING:** Just like an ordinary write, this method triggers
/// cancellation. If you invoke it while a snapshot exists, it
/// will block until that snapshot is dropped -- if that snapshot
/// is owned by the current thread, this could trigger deadlock.
pub fn synthetic_write(&mut self, durability: Durability) {
self.with_incremented_revision(|_next_revision| Some(durability));
}
/// The unique identifier attached to this `SalsaRuntime`. Each
/// snapshotted runtime has a distinct identifier.
#[inline]
pub fn id(&self) -> RuntimeId {
self.id
}
/// Returns the database-key for the query that this thread is
/// actively executing (if any).
pub fn active_query(&self) -> Option<DatabaseKeyIndex> {
self.local_state.active_query()
}
/// Read current value of the revision counter.
#[inline]
pub(crate) fn current_revision(&self) -> Revision {
self.shared_state.revisions[0].load()
}
/// The revision in which values with durability `d` may have last
/// changed. For D0, this is just the current revision. But for
/// higher levels of durability, this value may lag behind the
/// current revision. If we encounter a value of durability Di,
/// then, we can check this function to get a "bound" on when the
/// value may have changed, which allows us to skip walking its
/// dependencies.
#[inline]
pub(crate) fn last_changed_revision(&self, d: Durability) -> Revision {
self.shared_state.revisions[d.index()].load()
}
/// Read current value of the revision counter.
#[inline]
pub(crate) fn pending_revision(&self) -> Revision {
self.shared_state.pending_revision.load()
}
#[cold]
pub(crate) fn unwind_cancelled(&self) {
self.report_untracked_read();
Cancelled::PendingWrite.throw();
}
/// Acquires the **global query write lock** (ensuring that no queries are
/// executing) and then increments the current revision counter; invokes
/// `op` with the global query write lock still held.
///
/// While we wait to acquire the global query write lock, this method will
/// also increment `pending_revision_increments`, thus signalling to queries
/// that their results are "cancelled" and they should abort as expeditiously
/// as possible.
///
/// The `op` closure should actually perform the writes needed. It is given
/// the new revision as an argument, and its return value indicates whether
/// any pre-existing value was modified:
///
/// - returning `None` means that no pre-existing value was modified (this
/// could occur e.g. when setting some key on an input that was never set
/// before)
/// - returning `Some(d)` indicates that a pre-existing value was modified
/// and it had the durability `d`. This will update the records for when
/// values with each durability were modified.
///
/// Note that, given our writer model, we can assume that only one thread is
/// attempting to increment the global revision at a time.
pub(crate) fn with_incremented_revision<F>(&mut self, op: F)
where
F: FnOnce(Revision) -> Option<Durability>,
{
log::debug!("increment_revision()");
if !self.permits_increment() {
panic!("increment_revision invoked during a query computation");
}
// Set the `pending_revision` field so that people
// know current revision is cancelled.
let current_revision = self.shared_state.pending_revision.fetch_then_increment();
// To modify the revision, we need the lock.
let shared_state = self.shared_state.clone();
let _lock = shared_state.query_lock.write();
let old_revision = self.shared_state.revisions[0].fetch_then_increment();
assert_eq!(current_revision, old_revision);
let new_revision = current_revision.next();
debug!("increment_revision: incremented to {:?}", new_revision);
if let Some(d) = op(new_revision) {
for rev in &self.shared_state.revisions[1..=d.index()] {
rev.store(new_revision);
}
}
}
pub(crate) fn permits_increment(&self) -> bool {
self.revision_guard.is_none() && !self.local_state.query_in_progress()
}
#[inline]
pub(crate) fn push_query(&self, database_key_index: DatabaseKeyIndex) -> ActiveQueryGuard<'_> {
self.local_state.push_query(database_key_index)
}
/// Reports that the currently active query read the result from
/// another query.
///
/// Also checks whether the "cycle participant" flag is set on
/// the current stack frame -- if so, panics with `CycleParticipant`
/// value, which should be caught by the code executing the query.
///
/// # Parameters
///
/// - `database_key`: the query whose result was read
/// - `changed_revision`: the last revision in which the result of that
/// query had changed
pub(crate) fn report_query_read_and_unwind_if_cycle_resulted(
&self,
input: DatabaseKeyIndex,
durability: Durability,
changed_at: Revision,
) {
self.local_state
.report_query_read_and_unwind_if_cycle_resulted(input, durability, changed_at);
}
/// Reports that the query depends on some state unknown to salsa.
///
/// Queries which report untracked reads will be re-executed in the next
/// revision.
pub fn | (&self) {
self.local_state
.report_untracked_read(self.current_revision());
}
/// Acts as though the current query had read an input with the given durability; this will force the current query's durability to be at most `durability`.
///
/// This is mostly useful to control the durability level for [on-demand inputs](https://salsa-rs.github.io/salsa/common_patterns/on_demand_inputs.html).
pub fn report_synthetic_read(&self, durability: Durability) {
let changed_at = self.last_changed_revision(durability);
self.local_state
.report_synthetic_read(durability, changed_at);
}
/// Handles a cycle in the dependency graph that was detected when the
/// current thread tried to block on `database_key_index` which is being
/// executed by `to_id`. If this function returns, then `to_id` no longer
/// depends on the current thread, and so we should continue executing
/// as normal. Otherwise, the function will throw a `Cycle` which is expected
/// to be caught by some frame on our stack. This occurs either if there is
/// a frame on our stack with cycle recovery (possibly the top one!) or if there
/// is no cycle recovery at all.
fn unblock_cycle_and_maybe_throw(
&self,
db: &dyn Database,
dg: &mut DependencyGraph,
database_key_index: DatabaseKeyIndex,
to_id: RuntimeId,
) {
debug!(
"unblock_cycle_and_maybe_throw(database_key={:?})",
database_key_index
);
let mut from_stack = self.local_state.take_query_stack();
let from_id = self.id();
// Make a "dummy stack frame". As we iterate through the cycle, we will collect the
// inputs from each participant. Then, if we are participating in cycle recovery, we
// will propagate those results to all participants.
let mut cycle_query = ActiveQuery::new(database_key_index);
// Identify the cycle participants:
let cycle = {
let mut v = vec![];
dg.for_each_cycle_participant(
from_id,
&mut from_stack,
database_key_index,
to_id,
|aqs| {
aqs.iter_mut().for_each(|aq| {
cycle_query.add_from(aq);
v.push(aq.database_key_index);
});
},
);
// We want to give the participants in a deterministic order
// (at least for this execution, not necessarily across executions),
// no matter where it started on the stack. Find the minimum
// key and rotate it to the front.
let min = v.iter().min().unwrap();
let index = v.iter().position(|p| p == min).unwrap();
v.rotate_left(index);
// No need to store extra memory.
v.shrink_to_fit();
Cycle::new(Arc::new(v))
};
debug!(
"cycle {:?}, cycle_query {:#?}",
cycle.debug(db),
cycle_query,
);
// We can remove the cycle participants from the list of dependencies;
// they are a strongly connected component (SCC) and we only care about
// dependencies to things outside the SCC that control whether it will
// form again.
cycle_query.remove_cycle_participants(&cycle);
// Mark each cycle participant that has recovery set, along with
// any frames that come after them on the same thread. Those frames
// are going to be unwound so that fallback can occur.
dg.for_each_cycle_participant(from_id, &mut from_stack, database_key_index, to_id, |aqs| {
aqs.iter_mut()
.skip_while(
|aq| match db.cycle_recovery_strategy(aq.database_key_index) {
CycleRecoveryStrategy::Panic => true,
CycleRecoveryStrategy::Fallback => false,
},
)
.for_each(|aq| {
debug!("marking {:?} for fallback", aq.database_key_index.debug(db));
aq.take_inputs_from(&cycle_query);
assert!(aq.cycle.is_none());
aq.cycle = Some(cycle.clone());
});
});
// Unblock every thread that has cycle recovery with a `WaitResult::Cycle`.
// They will throw the cycle, which will be caught by the frame that has
// cycle recovery so that it can execute that recovery.
let (me_recovered, others_recovered) =
dg.maybe_unblock_runtimes_in_cycle(from_id, &from_stack, database_key_index, to_id);
self.local_state.restore_query_stack(from_stack);
if me_recovered {
// If the current thread has recovery, we want to throw
// so that it can begin.
cycle.throw()
} else if others_recovered {
// If other threads have recovery but we didn't: return and we will block on them.
} else {
// if nobody has recover, then we panic
panic_any(cycle);
}
}
/// Block until `other_id` completes executing `database_key`;
/// panic or unwind in the case of a cycle.
///
/// `query_mutex_guard` is the guard for the current query's state;
/// it will be dropped after we have successfully registered the
/// dependency.
///
/// # Propagating panics
///
/// If the thread `other_id` panics, then our thread is considered
/// cancelled, so this function will panic with a `Cancelled` value.
///
/// # Cycle handling
///
/// If the thread `other_id` already depends on the current thread,
/// and hence there is a cycle in the query graph, then this function
/// will unwind instead of returning normally. The method of unwinding
/// depends on the [`Self::mutual_cycle_recovery_strategy`]
/// of the cycle participants:
///
/// * [`CycleRecoveryStrategy::Panic`]: panic with the [`Cycle`] as the value.
/// * [`CycleRecoveryStrategy::Fallback`]: initiate unwinding with [`CycleParticipant::unwind`].
pub(crate) fn block_on_or_unwind<QueryMutexGuard>(
&self,
db: &dyn Database,
database_key: DatabaseKeyIndex,
other_id: RuntimeId,
query_mutex_guard: QueryMutexGuard,
) {
let mut dg = self.shared_state.dependency_graph.lock();
if dg.depends_on(other_id, self.id()) {
self.unblock_cycle_and_maybe_throw(db, &mut dg, database_key, other_id);
// If the above fn returns, then (via cycle recovery) it has unblocked the
// cycle, so we can continue.
assert!(!dg.depends_on(other_id, self.id()));
}
db.salsa_event(Event {
runtime_id: self.id(),
kind: EventKind::WillBlockOn {
other_runtime_id: other_id,
database_key,
},
});
let stack = self.local_state.take_query_stack();
let (stack, result) = DependencyGraph::block_on(
dg,
self.id(),
database_key,
other_id,
stack,
query_mutex_guard,
);
self.local_state.restore_query_stack(stack);
match result {
WaitResult::Completed => (),
// If the other thread panicked, then we consider this thread
// cancelled. The assumption is that the panic will be detected
// by the other thread and responded to appropriately.
WaitResult::Panicked => Cancelled::PropagatedPanic.throw(),
WaitResult::Cycle(c) => c.throw(),
}
}
/// Invoked when this runtime completed computing `database_key` with
/// the given result `wait_result` (`wait_result` should be `None` if
/// computing `database_key` panicked and could not complete).
/// This function unblocks any dependent queries and allows them
/// to continue executing.
pub(crate) fn unblock_queries_blocked_on(
&self,
database_key: DatabaseKeyIndex,
wait_result: WaitResult,
) {
self.shared_state
.dependency_graph
.lock()
.unblock_runtimes_blocked_on(database_key, wait_result);
}
}
/// State that will be common to all threads (when we support multiple threads)
struct SharedState {
/// Stores the next id to use for a snapshotted runtime (starts at 1).
next_id: AtomicUsize,
/// Whenever derived queries are executing, they acquire this lock
/// in read mode. Mutating inputs (and thus creating a new
/// revision) requires a write lock (thus guaranteeing that no
/// derived queries are in progress). Note that this is not needed
/// to prevent **race conditions** -- the revision counter itself
/// is stored in an `AtomicUsize` so it can be cheaply read
/// without acquiring the lock. Rather, the `query_lock` is used
/// to ensure a higher-level consistency property.
query_lock: RwLock<()>,
/// This is typically equal to `revision` -- set to `revision+1`
/// when a new revision is pending (which implies that the current
/// revision is cancelled).
pending_revision: AtomicRevision,
/// Stores the "last change" revision for values of each duration.
/// This vector is always of length at least 1 (for Durability 0)
/// but its total length depends on the number of durations. The
/// element at index 0 is special as it represents the "current
/// revision". In general, we have the invariant that revisions
/// in here are *declining* -- that is, `revisions[i] >=
/// revisions[i + 1]`, for all `i`. This is because when you
/// modify a value with durability D, that implies that values
/// with durability less than D may have changed too.
revisions: Vec<AtomicRevision>,
/// The dependency graph tracks which runtimes are blocked on one
/// another, waiting for queries to terminate.
dependency_graph: Mutex<DependencyGraph>,
}
impl SharedState {
fn with_durabilities(durabilities: usize) -> Self {
SharedState {
next_id: AtomicUsize::new(1),
query_lock: Default::default(),
revisions: (0..durabilities).map(|_| AtomicRevision::start()).collect(),
pending_revision: AtomicRevision::start(),
dependency_graph: Default::default(),
}
}
}
impl std::panic::RefUnwindSafe for SharedState {}
impl Default for SharedState {
fn default() -> Self {
Self::with_durabilities(Durability::LEN)
}
}
impl std::fmt::Debug for SharedState {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let query_lock = if self.query_lock.try_write().is_some() {
"<unlocked>"
} else if self.query_lock.try_read().is_some() {
"<rlocked>"
} else {
"<wlocked>"
};
fmt.debug_struct("SharedState")
.field("query_lock", &query_lock)
.field("revisions", &self.revisions)
.field("pending_revision", &self.pending_revision)
.finish()
}
}
#[derive(Debug)]
struct ActiveQuery {
/// What query is executing
database_key_index: DatabaseKeyIndex,
/// Minimum durability of inputs observed so far.
durability: Durability,
/// Maximum revision of all inputs observed. If we observe an
/// untracked read, this will be set to the most recent revision.
changed_at: Revision,
/// Set of subqueries that were accessed thus far, or `None` if
/// there was an untracked the read.
dependencies: Option<FxIndexSet<DatabaseKeyIndex>>,
/// Stores the entire cycle, if one is found and this query is part of it.
cycle: Option<Cycle>,
}
impl ActiveQuery {
fn new(database_key_index: DatabaseKeyIndex) -> Self {
ActiveQuery {
database_key_index,
durability: Durability::MAX,
changed_at: Revision::start(),
dependencies: Some(FxIndexSet::default()),
cycle: None,
}
}
fn add_read(&mut self, input: DatabaseKeyIndex, durability: Durability, revision: Revision) {
if let Some(set) = &mut self.dependencies {
set.insert(input);
}
self.durability = self.durability.min(durability);
self.changed_at = self.changed_at.max(revision);
}
fn add_untracked_read(&mut self, changed_at: Revision) {
self.dependencies = None;
self.durability = Durability::LOW;
self.changed_at = changed_at;
}
fn add_synthetic_read(&mut self, durability: Durability, revision: Revision) {
self.dependencies = None;
self.durability = self.durability.min(durability);
self.changed_at = self.changed_at.max(revision);
}
pub(crate) fn revisions(&self) -> QueryRevisions {
let inputs = match &self.dependencies {
None => QueryInputs::Untracked,
Some(dependencies) => {
if dependencies.is_empty() {
QueryInputs::NoInputs
} else {
QueryInputs::Tracked {
inputs: dependencies.iter().copied().collect(),
}
}
}
};
QueryRevisions {
changed_at: self.changed_at,
inputs,
durability: self.durability,
}
}
/// Adds any dependencies from `other` into `self`.
/// Used during cycle recovery, see [`Runtime::create_cycle_error`].
fn add_from(&mut self, other: &ActiveQuery) {
self.changed_at = self.changed_at.max(other.changed_at);
self.durability = self.durability.min(other.durability);
if let Some(other_dependencies) = &other.dependencies {
if let Some(my_dependencies) = &mut self.dependencies {
my_dependencies.extend(other_dependencies.iter().copied());
}
} else {
self.dependencies = None;
}
}
/// Removes the participants in `cycle` from my dependencies.
/// Used during cycle recovery, see [`Runtime::create_cycle_error`].
fn remove_cycle_participants(&mut self, cycle: &Cycle) {
if let Some(my_dependencies) = &mut self.dependencies {
for p in cycle.participant_keys() {
my_dependencies.remove(&p);
}
}
}
/// Copy the changed-at, durability, and dependencies from `cycle_query`.
/// Used during cycle recovery, see [`Runtime::create_cycle_error`].
pub(crate) fn take_inputs_from(&mut self, cycle_query: &ActiveQuery) {
self.changed_at = cycle_query.changed_at;
self.durability = cycle_query.durability;
self.dependencies = cycle_query.dependencies.clone();
}
}
/// A unique identifier for a particular runtime. Each time you create
/// a snapshot, a fresh `RuntimeId` is generated. Once a snapshot is
/// complete, its `RuntimeId` may potentially be re-used.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub struct RuntimeId {
counter: usize,
}
#[derive(Clone, Debug)]
pub(crate) struct StampedValue<V> {
pub(crate) value: V,
pub(crate) durability: Durability,
pub(crate) changed_at: Revision,
}
struct RevisionGuard {
shared_state: Arc<SharedState>,
}
impl RevisionGuard {
fn new(shared_state: &Arc<SharedState>) -> Self {
// Subtle: we use a "recursive" lock here so that it is not an
// error to acquire a read-lock when one is already held (this
// happens when a query uses `snapshot` to spawn off parallel
// workers, for example).
//
// This has the side-effect that we are responsible to ensure
// that people contending for the write lock do not starve,
// but this is what we achieve via the cancellation mechanism.
//
// (In particular, since we only ever have one "mutating
// handle" to the database, the only contention for the global
// query lock occurs when there are "futures" evaluating
// queries in parallel, and those futures hold a read-lock
// already, so the starvation problem is more about them bring
// themselves to a close, versus preventing other people from
// *starting* work).
unsafe {
shared_state.query_lock.raw().lock_shared_recursive();
}
Self {
shared_state: shared_state.clone(),
}
}
}
impl Drop for RevisionGuard {
fn drop(&mut self) {
// Release our read-lock without using RAII. As documented in
// `Snapshot::new` above, this requires the unsafe keyword.
unsafe {
self.shared_state.query_lock.raw().unlock_shared();
}
}
}
| report_untracked_read | identifier_name |
runtime.rs | use crate::durability::Durability;
use crate::hash::*;
use crate::plumbing::CycleRecoveryStrategy;
use crate::revision::{AtomicRevision, Revision};
use crate::{Cancelled, Cycle, Database, DatabaseKeyIndex, Event, EventKind};
use log::debug;
use parking_lot::lock_api::{RawRwLock, RawRwLockRecursive};
use parking_lot::{Mutex, RwLock};
use std::hash::Hash;
use std::panic::panic_any;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
mod dependency_graph;
use dependency_graph::DependencyGraph;
pub(crate) mod local_state;
use local_state::LocalState;
use self::local_state::{ActiveQueryGuard, QueryInputs, QueryRevisions};
/// The salsa runtime stores the storage for all queries as well as
/// tracking the query stack and dependencies between cycles.
///
/// Each new runtime you create (e.g., via `Runtime::new` or
/// `Runtime::default`) will have an independent set of query storage
/// associated with it. Normally, therefore, you only do this once, at
/// the start of your application.
pub struct Runtime {
/// Our unique runtime id.
id: RuntimeId,
/// If this is a "forked" runtime, then the `revision_guard` will
/// be `Some`; this guard holds a read-lock on the global query
/// lock.
revision_guard: Option<RevisionGuard>,
/// Local state that is specific to this runtime (thread).
local_state: LocalState,
/// Shared state that is accessible via all runtimes.
shared_state: Arc<SharedState>,
}
#[derive(Clone, Debug)]
pub(crate) enum WaitResult {
Completed,
Panicked,
Cycle(Cycle),
}
impl Default for Runtime {
fn default() -> Self {
Runtime {
id: RuntimeId { counter: 0 },
revision_guard: None,
shared_state: Default::default(),
local_state: Default::default(),
}
}
}
impl std::fmt::Debug for Runtime {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
fmt.debug_struct("Runtime")
.field("id", &self.id())
.field("forked", &self.revision_guard.is_some())
.field("shared_state", &self.shared_state)
.finish()
}
}
impl Runtime {
/// Create a new runtime; equivalent to `Self::default`. This is
/// used when creating a new database.
pub fn new() -> Self {
Self::default()
}
/// See [`crate::storage::Storage::snapshot`].
pub(crate) fn snapshot(&self) -> Self {
if self.local_state.query_in_progress() {
panic!("it is not legal to `snapshot` during a query (see salsa-rs/salsa#80)");
}
let revision_guard = RevisionGuard::new(&self.shared_state);
let id = RuntimeId {
counter: self.shared_state.next_id.fetch_add(1, Ordering::SeqCst),
};
Runtime {
id,
revision_guard: Some(revision_guard),
shared_state: self.shared_state.clone(),
local_state: Default::default(),
}
}
/// A "synthetic write" causes the system to act *as though* some
/// input of durability `durability` has changed. This is mostly
/// useful for profiling scenarios.
///
/// **WARNING:** Just like an ordinary write, this method triggers
/// cancellation. If you invoke it while a snapshot exists, it
/// will block until that snapshot is dropped -- if that snapshot
/// is owned by the current thread, this could trigger deadlock.
pub fn synthetic_write(&mut self, durability: Durability) {
self.with_incremented_revision(|_next_revision| Some(durability));
}
/// The unique identifier attached to this `SalsaRuntime`. Each
/// snapshotted runtime has a distinct identifier.
#[inline]
pub fn id(&self) -> RuntimeId {
self.id
}
/// Returns the database-key for the query that this thread is
/// actively executing (if any).
pub fn active_query(&self) -> Option<DatabaseKeyIndex> {
self.local_state.active_query()
}
/// Read current value of the revision counter.
#[inline]
pub(crate) fn current_revision(&self) -> Revision {
self.shared_state.revisions[0].load()
}
/// The revision in which values with durability `d` may have last
/// changed. For D0, this is just the current revision. But for
/// higher levels of durability, this value may lag behind the
/// current revision. If we encounter a value of durability Di,
/// then, we can check this function to get a "bound" on when the
/// value may have changed, which allows us to skip walking its
/// dependencies.
#[inline]
pub(crate) fn last_changed_revision(&self, d: Durability) -> Revision {
self.shared_state.revisions[d.index()].load()
}
/// Read current value of the revision counter.
#[inline]
pub(crate) fn pending_revision(&self) -> Revision {
self.shared_state.pending_revision.load()
}
#[cold]
pub(crate) fn unwind_cancelled(&self) {
self.report_untracked_read();
Cancelled::PendingWrite.throw();
}
/// Acquires the **global query write lock** (ensuring that no queries are
/// executing) and then increments the current revision counter; invokes
/// `op` with the global query write lock still held.
///
/// While we wait to acquire the global query write lock, this method will
/// also increment `pending_revision_increments`, thus signalling to queries
/// that their results are "cancelled" and they should abort as expeditiously
/// as possible.
///
/// The `op` closure should actually perform the writes needed. It is given
/// the new revision as an argument, and its return value indicates whether
/// any pre-existing value was modified:
///
/// - returning `None` means that no pre-existing value was modified (this
/// could occur e.g. when setting some key on an input that was never set
/// before)
/// - returning `Some(d)` indicates that a pre-existing value was modified
/// and it had the durability `d`. This will update the records for when
/// values with each durability were modified.
///
/// Note that, given our writer model, we can assume that only one thread is
/// attempting to increment the global revision at a time.
pub(crate) fn with_incremented_revision<F>(&mut self, op: F)
where
F: FnOnce(Revision) -> Option<Durability>,
{
log::debug!("increment_revision()");
if !self.permits_increment() {
panic!("increment_revision invoked during a query computation");
}
// Set the `pending_revision` field so that people
// know current revision is cancelled.
let current_revision = self.shared_state.pending_revision.fetch_then_increment();
// To modify the revision, we need the lock.
let shared_state = self.shared_state.clone();
let _lock = shared_state.query_lock.write();
let old_revision = self.shared_state.revisions[0].fetch_then_increment();
assert_eq!(current_revision, old_revision);
let new_revision = current_revision.next();
debug!("increment_revision: incremented to {:?}", new_revision);
if let Some(d) = op(new_revision) |
}
pub(crate) fn permits_increment(&self) -> bool {
self.revision_guard.is_none() && !self.local_state.query_in_progress()
}
#[inline]
pub(crate) fn push_query(&self, database_key_index: DatabaseKeyIndex) -> ActiveQueryGuard<'_> {
self.local_state.push_query(database_key_index)
}
/// Reports that the currently active query read the result from
/// another query.
///
/// Also checks whether the "cycle participant" flag is set on
/// the current stack frame -- if so, panics with `CycleParticipant`
/// value, which should be caught by the code executing the query.
///
/// # Parameters
///
/// - `database_key`: the query whose result was read
/// - `changed_revision`: the last revision in which the result of that
/// query had changed
pub(crate) fn report_query_read_and_unwind_if_cycle_resulted(
&self,
input: DatabaseKeyIndex,
durability: Durability,
changed_at: Revision,
) {
self.local_state
.report_query_read_and_unwind_if_cycle_resulted(input, durability, changed_at);
}
/// Reports that the query depends on some state unknown to salsa.
///
/// Queries which report untracked reads will be re-executed in the next
/// revision.
pub fn report_untracked_read(&self) {
self.local_state
.report_untracked_read(self.current_revision());
}
/// Acts as though the current query had read an input with the given durability; this will force the current query's durability to be at most `durability`.
///
/// This is mostly useful to control the durability level for [on-demand inputs](https://salsa-rs.github.io/salsa/common_patterns/on_demand_inputs.html).
pub fn report_synthetic_read(&self, durability: Durability) {
let changed_at = self.last_changed_revision(durability);
self.local_state
.report_synthetic_read(durability, changed_at);
}
/// Handles a cycle in the dependency graph that was detected when the
/// current thread tried to block on `database_key_index` which is being
/// executed by `to_id`. If this function returns, then `to_id` no longer
/// depends on the current thread, and so we should continue executing
/// as normal. Otherwise, the function will throw a `Cycle` which is expected
/// to be caught by some frame on our stack. This occurs either if there is
/// a frame on our stack with cycle recovery (possibly the top one!) or if there
/// is no cycle recovery at all.
fn unblock_cycle_and_maybe_throw(
&self,
db: &dyn Database,
dg: &mut DependencyGraph,
database_key_index: DatabaseKeyIndex,
to_id: RuntimeId,
) {
debug!(
"unblock_cycle_and_maybe_throw(database_key={:?})",
database_key_index
);
let mut from_stack = self.local_state.take_query_stack();
let from_id = self.id();
// Make a "dummy stack frame". As we iterate through the cycle, we will collect the
// inputs from each participant. Then, if we are participating in cycle recovery, we
// will propagate those results to all participants.
let mut cycle_query = ActiveQuery::new(database_key_index);
// Identify the cycle participants:
let cycle = {
let mut v = vec![];
dg.for_each_cycle_participant(
from_id,
&mut from_stack,
database_key_index,
to_id,
|aqs| {
aqs.iter_mut().for_each(|aq| {
cycle_query.add_from(aq);
v.push(aq.database_key_index);
});
},
);
// We want to give the participants in a deterministic order
// (at least for this execution, not necessarily across executions),
// no matter where it started on the stack. Find the minimum
// key and rotate it to the front.
let min = v.iter().min().unwrap();
let index = v.iter().position(|p| p == min).unwrap();
v.rotate_left(index);
// No need to store extra memory.
v.shrink_to_fit();
Cycle::new(Arc::new(v))
};
debug!(
"cycle {:?}, cycle_query {:#?}",
cycle.debug(db),
cycle_query,
);
// We can remove the cycle participants from the list of dependencies;
// they are a strongly connected component (SCC) and we only care about
// dependencies to things outside the SCC that control whether it will
// form again.
cycle_query.remove_cycle_participants(&cycle);
// Mark each cycle participant that has recovery set, along with
// any frames that come after them on the same thread. Those frames
// are going to be unwound so that fallback can occur.
dg.for_each_cycle_participant(from_id, &mut from_stack, database_key_index, to_id, |aqs| {
aqs.iter_mut()
.skip_while(
|aq| match db.cycle_recovery_strategy(aq.database_key_index) {
CycleRecoveryStrategy::Panic => true,
CycleRecoveryStrategy::Fallback => false,
},
)
.for_each(|aq| {
debug!("marking {:?} for fallback", aq.database_key_index.debug(db));
aq.take_inputs_from(&cycle_query);
assert!(aq.cycle.is_none());
aq.cycle = Some(cycle.clone());
});
});
// Unblock every thread that has cycle recovery with a `WaitResult::Cycle`.
// They will throw the cycle, which will be caught by the frame that has
// cycle recovery so that it can execute that recovery.
let (me_recovered, others_recovered) =
dg.maybe_unblock_runtimes_in_cycle(from_id, &from_stack, database_key_index, to_id);
self.local_state.restore_query_stack(from_stack);
if me_recovered {
// If the current thread has recovery, we want to throw
// so that it can begin.
cycle.throw()
} else if others_recovered {
// If other threads have recovery but we didn't: return and we will block on them.
} else {
// if nobody has recover, then we panic
panic_any(cycle);
}
}
/// Block until `other_id` completes executing `database_key`;
/// panic or unwind in the case of a cycle.
///
/// `query_mutex_guard` is the guard for the current query's state;
/// it will be dropped after we have successfully registered the
/// dependency.
///
/// # Propagating panics
///
/// If the thread `other_id` panics, then our thread is considered
/// cancelled, so this function will panic with a `Cancelled` value.
///
/// # Cycle handling
///
/// If the thread `other_id` already depends on the current thread,
/// and hence there is a cycle in the query graph, then this function
/// will unwind instead of returning normally. The method of unwinding
/// depends on the [`Self::mutual_cycle_recovery_strategy`]
/// of the cycle participants:
///
/// * [`CycleRecoveryStrategy::Panic`]: panic with the [`Cycle`] as the value.
/// * [`CycleRecoveryStrategy::Fallback`]: initiate unwinding with [`CycleParticipant::unwind`].
pub(crate) fn block_on_or_unwind<QueryMutexGuard>(
&self,
db: &dyn Database,
database_key: DatabaseKeyIndex,
other_id: RuntimeId,
query_mutex_guard: QueryMutexGuard,
) {
let mut dg = self.shared_state.dependency_graph.lock();
if dg.depends_on(other_id, self.id()) {
self.unblock_cycle_and_maybe_throw(db, &mut dg, database_key, other_id);
// If the above fn returns, then (via cycle recovery) it has unblocked the
// cycle, so we can continue.
assert!(!dg.depends_on(other_id, self.id()));
}
db.salsa_event(Event {
runtime_id: self.id(),
kind: EventKind::WillBlockOn {
other_runtime_id: other_id,
database_key,
},
});
let stack = self.local_state.take_query_stack();
let (stack, result) = DependencyGraph::block_on(
dg,
self.id(),
database_key,
other_id,
stack,
query_mutex_guard,
);
self.local_state.restore_query_stack(stack);
match result {
WaitResult::Completed => (),
// If the other thread panicked, then we consider this thread
// cancelled. The assumption is that the panic will be detected
// by the other thread and responded to appropriately.
WaitResult::Panicked => Cancelled::PropagatedPanic.throw(),
WaitResult::Cycle(c) => c.throw(),
}
}
/// Invoked when this runtime completed computing `database_key` with
/// the given result `wait_result` (`wait_result` should be `None` if
/// computing `database_key` panicked and could not complete).
/// This function unblocks any dependent queries and allows them
/// to continue executing.
pub(crate) fn unblock_queries_blocked_on(
&self,
database_key: DatabaseKeyIndex,
wait_result: WaitResult,
) {
self.shared_state
.dependency_graph
.lock()
.unblock_runtimes_blocked_on(database_key, wait_result);
}
}
/// State that will be common to all threads (when we support multiple threads)
struct SharedState {
/// Stores the next id to use for a snapshotted runtime (starts at 1).
next_id: AtomicUsize,
/// Whenever derived queries are executing, they acquire this lock
/// in read mode. Mutating inputs (and thus creating a new
/// revision) requires a write lock (thus guaranteeing that no
/// derived queries are in progress). Note that this is not needed
/// to prevent **race conditions** -- the revision counter itself
/// is stored in an `AtomicUsize` so it can be cheaply read
/// without acquiring the lock. Rather, the `query_lock` is used
/// to ensure a higher-level consistency property.
query_lock: RwLock<()>,
/// This is typically equal to `revision` -- set to `revision+1`
/// when a new revision is pending (which implies that the current
/// revision is cancelled).
pending_revision: AtomicRevision,
/// Stores the "last change" revision for values of each duration.
/// This vector is always of length at least 1 (for Durability 0)
/// but its total length depends on the number of durations. The
/// element at index 0 is special as it represents the "current
/// revision". In general, we have the invariant that revisions
/// in here are *declining* -- that is, `revisions[i] >=
/// revisions[i + 1]`, for all `i`. This is because when you
/// modify a value with durability D, that implies that values
/// with durability less than D may have changed too.
revisions: Vec<AtomicRevision>,
/// The dependency graph tracks which runtimes are blocked on one
/// another, waiting for queries to terminate.
dependency_graph: Mutex<DependencyGraph>,
}
impl SharedState {
fn with_durabilities(durabilities: usize) -> Self {
SharedState {
next_id: AtomicUsize::new(1),
query_lock: Default::default(),
revisions: (0..durabilities).map(|_| AtomicRevision::start()).collect(),
pending_revision: AtomicRevision::start(),
dependency_graph: Default::default(),
}
}
}
impl std::panic::RefUnwindSafe for SharedState {}
impl Default for SharedState {
fn default() -> Self {
Self::with_durabilities(Durability::LEN)
}
}
impl std::fmt::Debug for SharedState {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let query_lock = if self.query_lock.try_write().is_some() {
"<unlocked>"
} else if self.query_lock.try_read().is_some() {
"<rlocked>"
} else {
"<wlocked>"
};
fmt.debug_struct("SharedState")
.field("query_lock", &query_lock)
.field("revisions", &self.revisions)
.field("pending_revision", &self.pending_revision)
.finish()
}
}
#[derive(Debug)]
struct ActiveQuery {
/// What query is executing
database_key_index: DatabaseKeyIndex,
/// Minimum durability of inputs observed so far.
durability: Durability,
/// Maximum revision of all inputs observed. If we observe an
/// untracked read, this will be set to the most recent revision.
changed_at: Revision,
/// Set of subqueries that were accessed thus far, or `None` if
/// there was an untracked the read.
dependencies: Option<FxIndexSet<DatabaseKeyIndex>>,
/// Stores the entire cycle, if one is found and this query is part of it.
cycle: Option<Cycle>,
}
impl ActiveQuery {
fn new(database_key_index: DatabaseKeyIndex) -> Self {
ActiveQuery {
database_key_index,
durability: Durability::MAX,
changed_at: Revision::start(),
dependencies: Some(FxIndexSet::default()),
cycle: None,
}
}
fn add_read(&mut self, input: DatabaseKeyIndex, durability: Durability, revision: Revision) {
if let Some(set) = &mut self.dependencies {
set.insert(input);
}
self.durability = self.durability.min(durability);
self.changed_at = self.changed_at.max(revision);
}
fn add_untracked_read(&mut self, changed_at: Revision) {
self.dependencies = None;
self.durability = Durability::LOW;
self.changed_at = changed_at;
}
fn add_synthetic_read(&mut self, durability: Durability, revision: Revision) {
self.dependencies = None;
self.durability = self.durability.min(durability);
self.changed_at = self.changed_at.max(revision);
}
pub(crate) fn revisions(&self) -> QueryRevisions {
let inputs = match &self.dependencies {
None => QueryInputs::Untracked,
Some(dependencies) => {
if dependencies.is_empty() {
QueryInputs::NoInputs
} else {
QueryInputs::Tracked {
inputs: dependencies.iter().copied().collect(),
}
}
}
};
QueryRevisions {
changed_at: self.changed_at,
inputs,
durability: self.durability,
}
}
/// Adds any dependencies from `other` into `self`.
/// Used during cycle recovery, see [`Runtime::create_cycle_error`].
fn add_from(&mut self, other: &ActiveQuery) {
self.changed_at = self.changed_at.max(other.changed_at);
self.durability = self.durability.min(other.durability);
if let Some(other_dependencies) = &other.dependencies {
if let Some(my_dependencies) = &mut self.dependencies {
my_dependencies.extend(other_dependencies.iter().copied());
}
} else {
self.dependencies = None;
}
}
/// Removes the participants in `cycle` from my dependencies.
/// Used during cycle recovery, see [`Runtime::create_cycle_error`].
fn remove_cycle_participants(&mut self, cycle: &Cycle) {
if let Some(my_dependencies) = &mut self.dependencies {
for p in cycle.participant_keys() {
my_dependencies.remove(&p);
}
}
}
/// Copy the changed-at, durability, and dependencies from `cycle_query`.
/// Used during cycle recovery, see [`Runtime::create_cycle_error`].
pub(crate) fn take_inputs_from(&mut self, cycle_query: &ActiveQuery) {
self.changed_at = cycle_query.changed_at;
self.durability = cycle_query.durability;
self.dependencies = cycle_query.dependencies.clone();
}
}
/// A unique identifier for a particular runtime. Each time you create
/// a snapshot, a fresh `RuntimeId` is generated. Once a snapshot is
/// complete, its `RuntimeId` may potentially be re-used.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub struct RuntimeId {
counter: usize,
}
#[derive(Clone, Debug)]
pub(crate) struct StampedValue<V> {
pub(crate) value: V,
pub(crate) durability: Durability,
pub(crate) changed_at: Revision,
}
struct RevisionGuard {
shared_state: Arc<SharedState>,
}
impl RevisionGuard {
fn new(shared_state: &Arc<SharedState>) -> Self {
// Subtle: we use a "recursive" lock here so that it is not an
// error to acquire a read-lock when one is already held (this
// happens when a query uses `snapshot` to spawn off parallel
// workers, for example).
//
// This has the side-effect that we are responsible to ensure
// that people contending for the write lock do not starve,
// but this is what we achieve via the cancellation mechanism.
//
// (In particular, since we only ever have one "mutating
// handle" to the database, the only contention for the global
// query lock occurs when there are "futures" evaluating
// queries in parallel, and those futures hold a read-lock
// already, so the starvation problem is more about them bring
// themselves to a close, versus preventing other people from
// *starting* work).
unsafe {
shared_state.query_lock.raw().lock_shared_recursive();
}
Self {
shared_state: shared_state.clone(),
}
}
}
impl Drop for RevisionGuard {
fn drop(&mut self) {
// Release our read-lock without using RAII. As documented in
// `Snapshot::new` above, this requires the unsafe keyword.
unsafe {
self.shared_state.query_lock.raw().unlock_shared();
}
}
}
| {
for rev in &self.shared_state.revisions[1..=d.index()] {
rev.store(new_revision);
}
} | conditional_block |
runtime.rs | use crate::durability::Durability;
use crate::hash::*;
use crate::plumbing::CycleRecoveryStrategy;
use crate::revision::{AtomicRevision, Revision};
use crate::{Cancelled, Cycle, Database, DatabaseKeyIndex, Event, EventKind};
use log::debug;
use parking_lot::lock_api::{RawRwLock, RawRwLockRecursive};
use parking_lot::{Mutex, RwLock};
use std::hash::Hash;
use std::panic::panic_any;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
mod dependency_graph;
use dependency_graph::DependencyGraph;
pub(crate) mod local_state;
use local_state::LocalState;
use self::local_state::{ActiveQueryGuard, QueryInputs, QueryRevisions};
/// The salsa runtime stores the storage for all queries as well as
/// tracking the query stack and dependencies between cycles.
///
/// Each new runtime you create (e.g., via `Runtime::new` or
/// `Runtime::default`) will have an independent set of query storage
/// associated with it. Normally, therefore, you only do this once, at
/// the start of your application.
pub struct Runtime {
/// Our unique runtime id.
id: RuntimeId,
/// If this is a "forked" runtime, then the `revision_guard` will
/// be `Some`; this guard holds a read-lock on the global query
/// lock.
revision_guard: Option<RevisionGuard>,
/// Local state that is specific to this runtime (thread).
local_state: LocalState,
/// Shared state that is accessible via all runtimes.
shared_state: Arc<SharedState>,
}
#[derive(Clone, Debug)]
pub(crate) enum WaitResult {
Completed,
Panicked,
Cycle(Cycle),
}
impl Default for Runtime {
fn default() -> Self {
Runtime {
id: RuntimeId { counter: 0 },
revision_guard: None,
shared_state: Default::default(),
local_state: Default::default(),
}
}
}
impl std::fmt::Debug for Runtime {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
fmt.debug_struct("Runtime")
.field("id", &self.id())
.field("forked", &self.revision_guard.is_some())
.field("shared_state", &self.shared_state)
.finish()
}
}
impl Runtime {
/// Create a new runtime; equivalent to `Self::default`. This is
/// used when creating a new database.
pub fn new() -> Self {
Self::default()
}
/// See [`crate::storage::Storage::snapshot`].
pub(crate) fn snapshot(&self) -> Self {
if self.local_state.query_in_progress() {
panic!("it is not legal to `snapshot` during a query (see salsa-rs/salsa#80)");
}
let revision_guard = RevisionGuard::new(&self.shared_state);
let id = RuntimeId {
counter: self.shared_state.next_id.fetch_add(1, Ordering::SeqCst),
};
Runtime {
id,
revision_guard: Some(revision_guard),
shared_state: self.shared_state.clone(),
local_state: Default::default(),
}
}
/// A "synthetic write" causes the system to act *as though* some
/// input of durability `durability` has changed. This is mostly
/// useful for profiling scenarios.
///
/// **WARNING:** Just like an ordinary write, this method triggers
/// cancellation. If you invoke it while a snapshot exists, it
/// will block until that snapshot is dropped -- if that snapshot
/// is owned by the current thread, this could trigger deadlock.
pub fn synthetic_write(&mut self, durability: Durability) {
self.with_incremented_revision(|_next_revision| Some(durability));
}
/// The unique identifier attached to this `SalsaRuntime`. Each
/// snapshotted runtime has a distinct identifier.
#[inline]
pub fn id(&self) -> RuntimeId {
self.id
}
/// Returns the database-key for the query that this thread is
/// actively executing (if any).
pub fn active_query(&self) -> Option<DatabaseKeyIndex> {
self.local_state.active_query()
}
/// Read current value of the revision counter.
#[inline]
pub(crate) fn current_revision(&self) -> Revision {
self.shared_state.revisions[0].load()
}
/// The revision in which values with durability `d` may have last
/// changed. For D0, this is just the current revision. But for
/// higher levels of durability, this value may lag behind the
/// current revision. If we encounter a value of durability Di,
/// then, we can check this function to get a "bound" on when the
/// value may have changed, which allows us to skip walking its
/// dependencies.
#[inline]
pub(crate) fn last_changed_revision(&self, d: Durability) -> Revision {
self.shared_state.revisions[d.index()].load()
}
/// Read current value of the revision counter.
#[inline]
pub(crate) fn pending_revision(&self) -> Revision {
self.shared_state.pending_revision.load()
}
#[cold]
pub(crate) fn unwind_cancelled(&self) {
self.report_untracked_read();
Cancelled::PendingWrite.throw();
}
/// Acquires the **global query write lock** (ensuring that no queries are
/// executing) and then increments the current revision counter; invokes
/// `op` with the global query write lock still held.
///
/// While we wait to acquire the global query write lock, this method will
/// also increment `pending_revision_increments`, thus signalling to queries
/// that their results are "cancelled" and they should abort as expeditiously
/// as possible.
///
/// The `op` closure should actually perform the writes needed. It is given
/// the new revision as an argument, and its return value indicates whether
/// any pre-existing value was modified:
///
/// - returning `None` means that no pre-existing value was modified (this
/// could occur e.g. when setting some key on an input that was never set
/// before)
/// - returning `Some(d)` indicates that a pre-existing value was modified
/// and it had the durability `d`. This will update the records for when
/// values with each durability were modified.
///
/// Note that, given our writer model, we can assume that only one thread is
/// attempting to increment the global revision at a time.
pub(crate) fn with_incremented_revision<F>(&mut self, op: F)
where
F: FnOnce(Revision) -> Option<Durability>,
{
log::debug!("increment_revision()");
if !self.permits_increment() {
panic!("increment_revision invoked during a query computation");
}
// Set the `pending_revision` field so that people
// know current revision is cancelled.
let current_revision = self.shared_state.pending_revision.fetch_then_increment();
// To modify the revision, we need the lock.
let shared_state = self.shared_state.clone();
let _lock = shared_state.query_lock.write();
let old_revision = self.shared_state.revisions[0].fetch_then_increment();
assert_eq!(current_revision, old_revision);
let new_revision = current_revision.next();
debug!("increment_revision: incremented to {:?}", new_revision);
if let Some(d) = op(new_revision) {
for rev in &self.shared_state.revisions[1..=d.index()] {
rev.store(new_revision);
}
}
}
pub(crate) fn permits_increment(&self) -> bool {
self.revision_guard.is_none() && !self.local_state.query_in_progress()
}
#[inline]
pub(crate) fn push_query(&self, database_key_index: DatabaseKeyIndex) -> ActiveQueryGuard<'_> {
self.local_state.push_query(database_key_index)
}
/// Reports that the currently active query read the result from
/// another query.
///
/// Also checks whether the "cycle participant" flag is set on
/// the current stack frame -- if so, panics with `CycleParticipant`
/// value, which should be caught by the code executing the query.
///
/// # Parameters
///
/// - `database_key`: the query whose result was read
/// - `changed_revision`: the last revision in which the result of that
/// query had changed
pub(crate) fn report_query_read_and_unwind_if_cycle_resulted(
&self,
input: DatabaseKeyIndex,
durability: Durability,
changed_at: Revision,
) |
/// Reports that the query depends on some state unknown to salsa.
///
/// Queries which report untracked reads will be re-executed in the next
/// revision.
pub fn report_untracked_read(&self) {
self.local_state
.report_untracked_read(self.current_revision());
}
/// Acts as though the current query had read an input with the given durability; this will force the current query's durability to be at most `durability`.
///
/// This is mostly useful to control the durability level for [on-demand inputs](https://salsa-rs.github.io/salsa/common_patterns/on_demand_inputs.html).
pub fn report_synthetic_read(&self, durability: Durability) {
let changed_at = self.last_changed_revision(durability);
self.local_state
.report_synthetic_read(durability, changed_at);
}
/// Handles a cycle in the dependency graph that was detected when the
/// current thread tried to block on `database_key_index` which is being
/// executed by `to_id`. If this function returns, then `to_id` no longer
/// depends on the current thread, and so we should continue executing
/// as normal. Otherwise, the function will throw a `Cycle` which is expected
/// to be caught by some frame on our stack. This occurs either if there is
/// a frame on our stack with cycle recovery (possibly the top one!) or if there
/// is no cycle recovery at all.
fn unblock_cycle_and_maybe_throw(
&self,
db: &dyn Database,
dg: &mut DependencyGraph,
database_key_index: DatabaseKeyIndex,
to_id: RuntimeId,
) {
debug!(
"unblock_cycle_and_maybe_throw(database_key={:?})",
database_key_index
);
let mut from_stack = self.local_state.take_query_stack();
let from_id = self.id();
// Make a "dummy stack frame". As we iterate through the cycle, we will collect the
// inputs from each participant. Then, if we are participating in cycle recovery, we
// will propagate those results to all participants.
let mut cycle_query = ActiveQuery::new(database_key_index);
// Identify the cycle participants:
let cycle = {
let mut v = vec![];
dg.for_each_cycle_participant(
from_id,
&mut from_stack,
database_key_index,
to_id,
|aqs| {
aqs.iter_mut().for_each(|aq| {
cycle_query.add_from(aq);
v.push(aq.database_key_index);
});
},
);
// We want to give the participants in a deterministic order
// (at least for this execution, not necessarily across executions),
// no matter where it started on the stack. Find the minimum
// key and rotate it to the front.
let min = v.iter().min().unwrap();
let index = v.iter().position(|p| p == min).unwrap();
v.rotate_left(index);
// No need to store extra memory.
v.shrink_to_fit();
Cycle::new(Arc::new(v))
};
debug!(
"cycle {:?}, cycle_query {:#?}",
cycle.debug(db),
cycle_query,
);
// We can remove the cycle participants from the list of dependencies;
// they are a strongly connected component (SCC) and we only care about
// dependencies to things outside the SCC that control whether it will
// form again.
cycle_query.remove_cycle_participants(&cycle);
// Mark each cycle participant that has recovery set, along with
// any frames that come after them on the same thread. Those frames
// are going to be unwound so that fallback can occur.
dg.for_each_cycle_participant(from_id, &mut from_stack, database_key_index, to_id, |aqs| {
aqs.iter_mut()
.skip_while(
|aq| match db.cycle_recovery_strategy(aq.database_key_index) {
CycleRecoveryStrategy::Panic => true,
CycleRecoveryStrategy::Fallback => false,
},
)
.for_each(|aq| {
debug!("marking {:?} for fallback", aq.database_key_index.debug(db));
aq.take_inputs_from(&cycle_query);
assert!(aq.cycle.is_none());
aq.cycle = Some(cycle.clone());
});
});
// Unblock every thread that has cycle recovery with a `WaitResult::Cycle`.
// They will throw the cycle, which will be caught by the frame that has
// cycle recovery so that it can execute that recovery.
let (me_recovered, others_recovered) =
dg.maybe_unblock_runtimes_in_cycle(from_id, &from_stack, database_key_index, to_id);
self.local_state.restore_query_stack(from_stack);
if me_recovered {
// If the current thread has recovery, we want to throw
// so that it can begin.
cycle.throw()
} else if others_recovered {
// If other threads have recovery but we didn't: return and we will block on them.
} else {
// if nobody has recover, then we panic
panic_any(cycle);
}
}
/// Block until `other_id` completes executing `database_key`;
/// panic or unwind in the case of a cycle.
///
/// `query_mutex_guard` is the guard for the current query's state;
/// it will be dropped after we have successfully registered the
/// dependency.
///
/// # Propagating panics
///
/// If the thread `other_id` panics, then our thread is considered
/// cancelled, so this function will panic with a `Cancelled` value.
///
/// # Cycle handling
///
/// If the thread `other_id` already depends on the current thread,
/// and hence there is a cycle in the query graph, then this function
/// will unwind instead of returning normally. The method of unwinding
/// depends on the [`Self::mutual_cycle_recovery_strategy`]
/// of the cycle participants:
///
/// * [`CycleRecoveryStrategy::Panic`]: panic with the [`Cycle`] as the value.
/// * [`CycleRecoveryStrategy::Fallback`]: initiate unwinding with [`CycleParticipant::unwind`].
pub(crate) fn block_on_or_unwind<QueryMutexGuard>(
&self,
db: &dyn Database,
database_key: DatabaseKeyIndex,
other_id: RuntimeId,
query_mutex_guard: QueryMutexGuard,
) {
let mut dg = self.shared_state.dependency_graph.lock();
if dg.depends_on(other_id, self.id()) {
self.unblock_cycle_and_maybe_throw(db, &mut dg, database_key, other_id);
// If the above fn returns, then (via cycle recovery) it has unblocked the
// cycle, so we can continue.
assert!(!dg.depends_on(other_id, self.id()));
}
db.salsa_event(Event {
runtime_id: self.id(),
kind: EventKind::WillBlockOn {
other_runtime_id: other_id,
database_key,
},
});
let stack = self.local_state.take_query_stack();
let (stack, result) = DependencyGraph::block_on(
dg,
self.id(),
database_key,
other_id,
stack,
query_mutex_guard,
);
self.local_state.restore_query_stack(stack);
match result {
WaitResult::Completed => (),
// If the other thread panicked, then we consider this thread
// cancelled. The assumption is that the panic will be detected
// by the other thread and responded to appropriately.
WaitResult::Panicked => Cancelled::PropagatedPanic.throw(),
WaitResult::Cycle(c) => c.throw(),
}
}
/// Invoked when this runtime completed computing `database_key` with
/// the given result `wait_result` (`wait_result` should be `None` if
/// computing `database_key` panicked and could not complete).
/// This function unblocks any dependent queries and allows them
/// to continue executing.
pub(crate) fn unblock_queries_blocked_on(
&self,
database_key: DatabaseKeyIndex,
wait_result: WaitResult,
) {
self.shared_state
.dependency_graph
.lock()
.unblock_runtimes_blocked_on(database_key, wait_result);
}
}
/// State that will be common to all threads (when we support multiple threads)
struct SharedState {
/// Stores the next id to use for a snapshotted runtime (starts at 1).
next_id: AtomicUsize,
/// Whenever derived queries are executing, they acquire this lock
/// in read mode. Mutating inputs (and thus creating a new
/// revision) requires a write lock (thus guaranteeing that no
/// derived queries are in progress). Note that this is not needed
/// to prevent **race conditions** -- the revision counter itself
/// is stored in an `AtomicUsize` so it can be cheaply read
/// without acquiring the lock. Rather, the `query_lock` is used
/// to ensure a higher-level consistency property.
query_lock: RwLock<()>,
/// This is typically equal to `revision` -- set to `revision+1`
/// when a new revision is pending (which implies that the current
/// revision is cancelled).
pending_revision: AtomicRevision,
/// Stores the "last change" revision for values of each duration.
/// This vector is always of length at least 1 (for Durability 0)
/// but its total length depends on the number of durations. The
/// element at index 0 is special as it represents the "current
/// revision". In general, we have the invariant that revisions
/// in here are *declining* -- that is, `revisions[i] >=
/// revisions[i + 1]`, for all `i`. This is because when you
/// modify a value with durability D, that implies that values
/// with durability less than D may have changed too.
revisions: Vec<AtomicRevision>,
/// The dependency graph tracks which runtimes are blocked on one
/// another, waiting for queries to terminate.
dependency_graph: Mutex<DependencyGraph>,
}
impl SharedState {
fn with_durabilities(durabilities: usize) -> Self {
SharedState {
next_id: AtomicUsize::new(1),
query_lock: Default::default(),
revisions: (0..durabilities).map(|_| AtomicRevision::start()).collect(),
pending_revision: AtomicRevision::start(),
dependency_graph: Default::default(),
}
}
}
impl std::panic::RefUnwindSafe for SharedState {}
impl Default for SharedState {
fn default() -> Self {
Self::with_durabilities(Durability::LEN)
}
}
impl std::fmt::Debug for SharedState {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let query_lock = if self.query_lock.try_write().is_some() {
"<unlocked>"
} else if self.query_lock.try_read().is_some() {
"<rlocked>"
} else {
"<wlocked>"
};
fmt.debug_struct("SharedState")
.field("query_lock", &query_lock)
.field("revisions", &self.revisions)
.field("pending_revision", &self.pending_revision)
.finish()
}
}
#[derive(Debug)]
struct ActiveQuery {
/// What query is executing
database_key_index: DatabaseKeyIndex,
/// Minimum durability of inputs observed so far.
durability: Durability,
/// Maximum revision of all inputs observed. If we observe an
/// untracked read, this will be set to the most recent revision.
changed_at: Revision,
/// Set of subqueries that were accessed thus far, or `None` if
/// there was an untracked the read.
dependencies: Option<FxIndexSet<DatabaseKeyIndex>>,
/// Stores the entire cycle, if one is found and this query is part of it.
cycle: Option<Cycle>,
}
impl ActiveQuery {
fn new(database_key_index: DatabaseKeyIndex) -> Self {
ActiveQuery {
database_key_index,
durability: Durability::MAX,
changed_at: Revision::start(),
dependencies: Some(FxIndexSet::default()),
cycle: None,
}
}
fn add_read(&mut self, input: DatabaseKeyIndex, durability: Durability, revision: Revision) {
if let Some(set) = &mut self.dependencies {
set.insert(input);
}
self.durability = self.durability.min(durability);
self.changed_at = self.changed_at.max(revision);
}
fn add_untracked_read(&mut self, changed_at: Revision) {
self.dependencies = None;
self.durability = Durability::LOW;
self.changed_at = changed_at;
}
fn add_synthetic_read(&mut self, durability: Durability, revision: Revision) {
self.dependencies = None;
self.durability = self.durability.min(durability);
self.changed_at = self.changed_at.max(revision);
}
pub(crate) fn revisions(&self) -> QueryRevisions {
let inputs = match &self.dependencies {
None => QueryInputs::Untracked,
Some(dependencies) => {
if dependencies.is_empty() {
QueryInputs::NoInputs
} else {
QueryInputs::Tracked {
inputs: dependencies.iter().copied().collect(),
}
}
}
};
QueryRevisions {
changed_at: self.changed_at,
inputs,
durability: self.durability,
}
}
/// Adds any dependencies from `other` into `self`.
/// Used during cycle recovery, see [`Runtime::create_cycle_error`].
fn add_from(&mut self, other: &ActiveQuery) {
self.changed_at = self.changed_at.max(other.changed_at);
self.durability = self.durability.min(other.durability);
if let Some(other_dependencies) = &other.dependencies {
if let Some(my_dependencies) = &mut self.dependencies {
my_dependencies.extend(other_dependencies.iter().copied());
}
} else {
self.dependencies = None;
}
}
/// Removes the participants in `cycle` from my dependencies.
/// Used during cycle recovery, see [`Runtime::create_cycle_error`].
fn remove_cycle_participants(&mut self, cycle: &Cycle) {
if let Some(my_dependencies) = &mut self.dependencies {
for p in cycle.participant_keys() {
my_dependencies.remove(&p);
}
}
}
/// Copy the changed-at, durability, and dependencies from `cycle_query`.
/// Used during cycle recovery, see [`Runtime::create_cycle_error`].
pub(crate) fn take_inputs_from(&mut self, cycle_query: &ActiveQuery) {
self.changed_at = cycle_query.changed_at;
self.durability = cycle_query.durability;
self.dependencies = cycle_query.dependencies.clone();
}
}
/// A unique identifier for a particular runtime. Each time you create
/// a snapshot, a fresh `RuntimeId` is generated. Once a snapshot is
/// complete, its `RuntimeId` may potentially be re-used.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub struct RuntimeId {
counter: usize,
}
#[derive(Clone, Debug)]
pub(crate) struct StampedValue<V> {
pub(crate) value: V,
pub(crate) durability: Durability,
pub(crate) changed_at: Revision,
}
struct RevisionGuard {
shared_state: Arc<SharedState>,
}
impl RevisionGuard {
fn new(shared_state: &Arc<SharedState>) -> Self {
// Subtle: we use a "recursive" lock here so that it is not an
// error to acquire a read-lock when one is already held (this
// happens when a query uses `snapshot` to spawn off parallel
// workers, for example).
//
// This has the side-effect that we are responsible to ensure
// that people contending for the write lock do not starve,
// but this is what we achieve via the cancellation mechanism.
//
// (In particular, since we only ever have one "mutating
// handle" to the database, the only contention for the global
// query lock occurs when there are "futures" evaluating
// queries in parallel, and those futures hold a read-lock
// already, so the starvation problem is more about them bring
// themselves to a close, versus preventing other people from
// *starting* work).
unsafe {
shared_state.query_lock.raw().lock_shared_recursive();
}
Self {
shared_state: shared_state.clone(),
}
}
}
impl Drop for RevisionGuard {
fn drop(&mut self) {
// Release our read-lock without using RAII. As documented in
// `Snapshot::new` above, this requires the unsafe keyword.
unsafe {
self.shared_state.query_lock.raw().unlock_shared();
}
}
}
| {
self.local_state
.report_query_read_and_unwind_if_cycle_resulted(input, durability, changed_at);
} | identifier_body |
pg_cre_table.py | #!/usr/bin/env python3
# SPDX-License-Identifier: MIT
# Copyright (c) 2016-2020 Michael Purcaro, Henry Pratt, Jill Moore, Zhiping Weng
import psycopg2
import json
import itertools
from io import StringIO
import io
from operator import itemgetter
import sys
import os
from natsort import natsorted
from collections import namedtuple
import psycopg2.extras
from coord import Coord
from pg_common import PGcommon
from config import Config
sys.path.append(os.path.join(os.path.dirname(__file__), "../../common"))
from cre_utils import isaccession, isclose, checkChrom
sys.path.append(os.path.join(os.path.dirname(__file__), "../../utils"))
from utils import eprint
class PGcreTable(object):
infoFields = {"accession": "cre.accession",
"isproximal": "cre.isproximal",
"k4me3max": "cre.h3k4me3_max",
"k27acmax": "cre.h3k27ac_max",
"ctcfmax": "cre.ctcf_max",
"concordant": "cre.concordant"}
@staticmethod
def _getInfo():
pairs = []
for k, v in PGcreTable.infoFields.items():
pairs.append("'%s', %s" % (k, v))
return "json_build_object(" + ','.join(pairs) + ") as info"
def __init__(self, pw, assembly, ctmap, ctsTable):
self.pw = pw
self.assembly = assembly
self.ctmap = ctmap
self.ctsTable = ctsTable
self.tableName = self.assembly + "_cre_all"
self.ctSpecifc = {}
self.fields = [
"maxZ",
"cre.chrom", "cre.start",
"cre.stop - cre.start AS len",
"cre.gene_all_id", "cre.gene_pc_id",
"0::int as in_cart",
"cre.pct"]
self.whereClauses = []
def _getCtSpecific(self, useAccs):
pairs = []
if not useAccs:
for k, v in self.ctSpecifc.items():
pairs.append("'%s', %s" % (k, v))
return "json_build_object(" + ','.join(pairs) + ") as ctSpecifc"
def _sct(self, ct):
if ct in self.ctsTable:
self.fields.append("cre.creGroupsSpecific[%s] AS sct" % # TODO rename to sct
self.ctsTable[ct])
else:
self.fields.append("0::int AS sct")
def _buildWhereStatement(self, j, chrom, start, stop):
useAccs = self._accessions(j)
if useAccs and False:
self._notCtSpecific(j)
else:
ct = j.get("cellType", None)
self._sct(ct)
if ct:
self._ctSpecific(ct, j)
else:
self._notCtSpecific(j)
self._where(chrom, start, stop)
fields = ', '.join([PGcreTable._getInfo(), self._getCtSpecific(False)] + self.fields)
ret = ""
if len(self.whereClauses) > 0:
ret = "WHERE " + " and ".join(self.whereClauses)
return fields, ret
def geneTable(self, j, chrom, start, stop):
# print(self.assembly + '_gene_details')
rows = self.pw.fetchall("select_gene_table", """
SELECT * from {tableName}
WHERE transcript_id IN (
SELECT transcript_id from {tableName}
WHERE feature='transcript'
AND seqname='{seqname}'
AND (int4range({startpos}, {endpos}) &&
int4range(startpos, endpos) ))
""".format(tableName=self.assembly + '_gene_details',
seqname=chrom, startpos=start, endpos=stop))
response = []
transcript_id = ''
transcript_id_value = ''
for row in rows:
response.append({
'transcript_id': row[9],
'seqid': row[1].rstrip(),
'type': row[4],
'start': row[2],
'end': row[3],
'strand': row[6].rstrip(),
'exon_number': row[5],
'parent': row[7],
})
result = []
response = sorted(response, key=itemgetter('transcript_id'))
for (key, value) in itertools.groupby(response,
key=itemgetter('transcript_id')):
v = []
start = ''
end = ''
strand = ''
for i in value:
gtype = i.get('type')
if gtype == 'transcript':
start = i.get('start')
end = i.get('end')
strand = i.get('strand')
seqid = i.get('seqid')
if gtype == 'CDS' or gtype == 'exon':
v.append(i)
if len(v) > 0:
result.append({
'transcript_id': key,
'seqid': seqid,
'start': start,
'end': end,
'strand': strand,
'values': v,
})
return result
def | (self, j, chrom, start, stop):
"""
tfclause = "peakintersections.accession = cre.accession"
if "tfs" in j:
tfclause += " and peakintersections.tf ?| array(" + ",".join(["'%s'" % tf for tf in j["tfs"]]) + ")"
"""
fields, whereClause = self._buildWhereStatement(j, chrom, start, stop)
rows = self.pw.fetchall("cre_table", """
SELECT JSON_AGG(r) from(
SELECT {fields}, {vtn}.vistaids
FROM {tn} AS cre
INNER JOIN {ttn} ON {ttn}.accession = cre.accession
LEFT JOIN {vtn} ON {vtn}.accession = cre.accession
{whereClause}
ORDER BY maxz DESC
LIMIT 1000) r
""".format(fields=fields, tn=self.tableName,
vtn = self.assembly + "_vista",
ttn = self.assembly + "_ccres_toptier",
whereClause=whereClause))
r = rows[0][0]
if not r:
r = []
total = len(rows)
if total >= 1000: # reached query limit
total = self._creTableEstimate(curs, whereClause)
return {"cres": r, "total": total}
def _accessions(self, j):
accs = j.get("accessions", [])
if not accs or 0 == len(accs):
return False
if accs and len(accs) > 0:
if type(accs[0]) is dict:
accs = [x["value"] for x in accs if x["checked"]]
accs = [x for x in accs if isaccession(x)]
if accs:
accs = ["'%s'" % x.upper() for x in accs]
accsQuery = "cre.accession IN (%s)" % ','.join(accs)
self.whereClauses.append("(%s)" % accsQuery)
return True
return False
def _where(self, chrom, start, stop):
if chrom and start and stop:
self.whereClauses += ["cre.chrom = '%s'" % chrom,
"int4range(cre.start, cre.stop) && int4range(%s, %s)" % (int(start), int(stop))]
def _creTableEstimate(self, curs, whereClause):
# estimate count
# from https://wiki.postgresql.org/wiki/Count_estimate
# qoute escape from
# http://stackoverflow.com/a/12320729
r = self.pw.fetchone("_creTableEstimate", """
SELECT count(0)
FROM {tn} AS cre
{wc}
""".format(tn=self.tableName, wc=whereClause))
return r[0]
def _notCtSpecific(self, j):
# use max zscores
allmap = {"dnase": "dnase_max",
"promoter": "h3k4me3_max",
"enhancer": "h3k27ac_max",
"ctcf": "ctcf_max"}
for x in ["dnase", "promoter", "enhancer", "ctcf"]:
if "rank_%s_start" % x in j and "rank_%s_end" % x in j:
_range = [j["rank_%s_start" % x],
j["rank_%s_end" % x]]
self.whereClauses.append("(%s)" % " and ".join(
["cre.%s >= %f" % (allmap[x], _range[0]),
"cre.%s <= %f" % (allmap[x], _range[1])]))
self.fields.append("cre.%s AS %s_zscore" % (allmap[x], x))
def _ctSpecific(self, ct, j):
self.ctSpecifc["ct"] = "'" + ct + "'"
for name, exp in [("dnase", "dnase"),
("promoter", "h3k4me3"),
("enhancer", "h3k27ac"),
("ctcf", "ctcf")]:
if ct not in self.ctmap[name]:
self.fields.append("'' AS %s_zscore" % (name))
self.ctSpecifc[name + "_zscore"] = "null"
continue
cti = self.ctmap[name][ct]
self.fields.append("cre.%s_zscores[%d] AS %s_zscore" % (exp, cti, name))
self.ctSpecifc[name + "_zscore"] = "cre.%s_zscores[%d]" % (exp, cti)
if "rank_%s_start" % name in j and "rank_%s_end" % name in j:
_range = [j["rank_%s_start" % name],
j["rank_%s_end" % name]]
minDefault = -10.0 # must match slider default
maxDefault = 10.0 # must match slider default
if isclose(_range[0], minDefault) and isclose(_range[1], maxDefault):
continue # not actually filtering on zscore, yet...
if not isclose(_range[0], minDefault) and not isclose(_range[1], maxDefault):
self.whereClauses.append("(%s)" % " and ".join(
["cre.%s_zscores[%d] >= %f" % (exp, cti, _range[0]),
"cre.%s_zscores[%d] <= %f" % (exp, cti, _range[1])]))
elif not isclose(_range[0], minDefault):
self.whereClauses.append("(%s)" %
"cre.%s_zscores[%d] >= %f" % (exp, cti, _range[0]))
elif not isclose(_range[1], maxDefault):
self.whereClauses.append("(%s)" %
"cre.%s_zscores[%d] <= %f" % (exp, cti, _range[1]))
def creTableDownloadBed(self, j, fnp):
chrom = checkChrom(self.assembly, j)
start = j.get("coord_start", 0)
stop = j.get("coord_end", 0)
fields, whereClause = self._buildWhereStatement(j, chrom, start, stop)
fields = ', '.join(["cre.chrom", "cre.start",
"cre.stop",
"cre.accession", "dnase_max"])
q = """
COPY (
SELECT {fields}
FROM {tn} AS cre
INNER JOIN {ttn} ON {ttn}.accession = cre.accession
{whereClause}
) to STDOUT
with DELIMITER E'\t'
""".format(fields=fields,
tn=self.tableName,
ttn = self.assembly + "_ccres_toptier",
whereClause=whereClause)
self.pw.copy_expert("_cre_table_bed", q, fnp)
def creTableDownloadJson(self, j, fnp):
chrom = checkChrom(self.assembly, j)
start = j.get("coord_start", None)
stop = j.get("coord_end", None)
fields, whereClause = self._buildWhereStatement(j, chrom, start, stop)
q = """
copy (
SELECT JSON_AGG(r) from (
SELECT *
FROM {tn} AS cre
INNER JOIN {ttn} ON {ttn}.accession = cre.accession
{whereClause}
) r
) to STDOUT
with DELIMITER E'\t'
""".format(tn=self.tableName, ttn = self.assembly + "_ccres_toptier",
whereClause=whereClause)
sf = io.StringIO()
self.pw.copy_expert_file_handle("_cre_table_json", q, sf)
sf.seek(0)
with open(fnp, 'w') as f:
for line in sf.readlines():
f.write(line.replace("\\n", ""))
| creTable | identifier_name |
pg_cre_table.py | #!/usr/bin/env python3
# SPDX-License-Identifier: MIT
# Copyright (c) 2016-2020 Michael Purcaro, Henry Pratt, Jill Moore, Zhiping Weng
import psycopg2
import json
import itertools
from io import StringIO
import io
from operator import itemgetter
import sys
import os
from natsort import natsorted
from collections import namedtuple
import psycopg2.extras
from coord import Coord
from pg_common import PGcommon
from config import Config
sys.path.append(os.path.join(os.path.dirname(__file__), "../../common"))
from cre_utils import isaccession, isclose, checkChrom
sys.path.append(os.path.join(os.path.dirname(__file__), "../../utils"))
from utils import eprint
class PGcreTable(object):
infoFields = {"accession": "cre.accession",
"isproximal": "cre.isproximal",
"k4me3max": "cre.h3k4me3_max",
"k27acmax": "cre.h3k27ac_max",
"ctcfmax": "cre.ctcf_max",
"concordant": "cre.concordant"}
@staticmethod
def _getInfo():
pairs = []
for k, v in PGcreTable.infoFields.items():
pairs.append("'%s', %s" % (k, v))
return "json_build_object(" + ','.join(pairs) + ") as info"
def __init__(self, pw, assembly, ctmap, ctsTable):
self.pw = pw
self.assembly = assembly
self.ctmap = ctmap
self.ctsTable = ctsTable
self.tableName = self.assembly + "_cre_all"
self.ctSpecifc = {}
self.fields = [
"maxZ",
"cre.chrom", "cre.start",
"cre.stop - cre.start AS len",
"cre.gene_all_id", "cre.gene_pc_id",
"0::int as in_cart",
"cre.pct"]
self.whereClauses = []
def _getCtSpecific(self, useAccs):
pairs = []
if not useAccs:
for k, v in self.ctSpecifc.items():
pairs.append("'%s', %s" % (k, v))
return "json_build_object(" + ','.join(pairs) + ") as ctSpecifc"
def _sct(self, ct):
if ct in self.ctsTable:
self.fields.append("cre.creGroupsSpecific[%s] AS sct" % # TODO rename to sct
self.ctsTable[ct])
else:
self.fields.append("0::int AS sct")
def _buildWhereStatement(self, j, chrom, start, stop):
useAccs = self._accessions(j)
if useAccs and False:
self._notCtSpecific(j)
else:
ct = j.get("cellType", None)
self._sct(ct)
if ct:
self._ctSpecific(ct, j)
else:
self._notCtSpecific(j)
self._where(chrom, start, stop)
fields = ', '.join([PGcreTable._getInfo(), self._getCtSpecific(False)] + self.fields)
ret = ""
if len(self.whereClauses) > 0:
ret = "WHERE " + " and ".join(self.whereClauses)
return fields, ret
def geneTable(self, j, chrom, start, stop):
# print(self.assembly + '_gene_details')
rows = self.pw.fetchall("select_gene_table", """
SELECT * from {tableName}
WHERE transcript_id IN (
SELECT transcript_id from {tableName}
WHERE feature='transcript'
AND seqname='{seqname}'
AND (int4range({startpos}, {endpos}) &&
int4range(startpos, endpos) ))
""".format(tableName=self.assembly + '_gene_details',
seqname=chrom, startpos=start, endpos=stop))
response = []
transcript_id = ''
transcript_id_value = ''
for row in rows:
response.append({
'transcript_id': row[9],
'seqid': row[1].rstrip(),
'type': row[4],
'start': row[2],
'end': row[3],
'strand': row[6].rstrip(),
'exon_number': row[5],
'parent': row[7],
})
result = []
response = sorted(response, key=itemgetter('transcript_id'))
for (key, value) in itertools.groupby(response,
key=itemgetter('transcript_id')):
v = []
start = ''
end = ''
strand = ''
for i in value:
gtype = i.get('type')
if gtype == 'transcript':
start = i.get('start')
end = i.get('end')
strand = i.get('strand')
seqid = i.get('seqid')
if gtype == 'CDS' or gtype == 'exon':
v.append(i)
if len(v) > 0:
result.append({
'transcript_id': key,
'seqid': seqid,
'start': start,
'end': end,
'strand': strand,
'values': v,
})
return result
def creTable(self, j, chrom, start, stop):
"""
tfclause = "peakintersections.accession = cre.accession"
if "tfs" in j:
tfclause += " and peakintersections.tf ?| array(" + ",".join(["'%s'" % tf for tf in j["tfs"]]) + ")"
"""
fields, whereClause = self._buildWhereStatement(j, chrom, start, stop)
rows = self.pw.fetchall("cre_table", """
SELECT JSON_AGG(r) from(
SELECT {fields}, {vtn}.vistaids
FROM {tn} AS cre
INNER JOIN {ttn} ON {ttn}.accession = cre.accession
LEFT JOIN {vtn} ON {vtn}.accession = cre.accession
{whereClause}
ORDER BY maxz DESC
LIMIT 1000) r
""".format(fields=fields, tn=self.tableName,
vtn = self.assembly + "_vista",
ttn = self.assembly + "_ccres_toptier",
whereClause=whereClause))
r = rows[0][0]
if not r:
r = []
total = len(rows)
if total >= 1000: # reached query limit
total = self._creTableEstimate(curs, whereClause)
return {"cres": r, "total": total}
def _accessions(self, j):
accs = j.get("accessions", [])
if not accs or 0 == len(accs):
return False
if accs and len(accs) > 0:
if type(accs[0]) is dict:
accs = [x["value"] for x in accs if x["checked"]]
accs = [x for x in accs if isaccession(x)]
if accs:
accs = ["'%s'" % x.upper() for x in accs]
accsQuery = "cre.accession IN (%s)" % ','.join(accs)
self.whereClauses.append("(%s)" % accsQuery)
return True
return False
def _where(self, chrom, start, stop):
if chrom and start and stop:
self.whereClauses += ["cre.chrom = '%s'" % chrom,
"int4range(cre.start, cre.stop) && int4range(%s, %s)" % (int(start), int(stop))]
def _creTableEstimate(self, curs, whereClause):
# estimate count
# from https://wiki.postgresql.org/wiki/Count_estimate
# qoute escape from
# http://stackoverflow.com/a/12320729
r = self.pw.fetchone("_creTableEstimate", """
SELECT count(0)
FROM {tn} AS cre
{wc}
""".format(tn=self.tableName, wc=whereClause))
return r[0]
def _notCtSpecific(self, j):
# use max zscores
allmap = {"dnase": "dnase_max",
"promoter": "h3k4me3_max",
"enhancer": "h3k27ac_max",
"ctcf": "ctcf_max"}
for x in ["dnase", "promoter", "enhancer", "ctcf"]:
if "rank_%s_start" % x in j and "rank_%s_end" % x in j:
_range = [j["rank_%s_start" % x],
j["rank_%s_end" % x]]
self.whereClauses.append("(%s)" % " and ".join(
["cre.%s >= %f" % (allmap[x], _range[0]),
"cre.%s <= %f" % (allmap[x], _range[1])]))
self.fields.append("cre.%s AS %s_zscore" % (allmap[x], x))
def _ctSpecific(self, ct, j):
self.ctSpecifc["ct"] = "'" + ct + "'"
for name, exp in [("dnase", "dnase"),
("promoter", "h3k4me3"),
("enhancer", "h3k27ac"),
("ctcf", "ctcf")]:
if ct not in self.ctmap[name]:
self.fields.append("'' AS %s_zscore" % (name))
self.ctSpecifc[name + "_zscore"] = "null"
continue
cti = self.ctmap[name][ct]
self.fields.append("cre.%s_zscores[%d] AS %s_zscore" % (exp, cti, name))
self.ctSpecifc[name + "_zscore"] = "cre.%s_zscores[%d]" % (exp, cti)
if "rank_%s_start" % name in j and "rank_%s_end" % name in j:
_range = [j["rank_%s_start" % name],
j["rank_%s_end" % name]]
minDefault = -10.0 # must match slider default
maxDefault = 10.0 # must match slider default
if isclose(_range[0], minDefault) and isclose(_range[1], maxDefault):
continue # not actually filtering on zscore, yet...
if not isclose(_range[0], minDefault) and not isclose(_range[1], maxDefault):
|
elif not isclose(_range[0], minDefault):
self.whereClauses.append("(%s)" %
"cre.%s_zscores[%d] >= %f" % (exp, cti, _range[0]))
elif not isclose(_range[1], maxDefault):
self.whereClauses.append("(%s)" %
"cre.%s_zscores[%d] <= %f" % (exp, cti, _range[1]))
def creTableDownloadBed(self, j, fnp):
chrom = checkChrom(self.assembly, j)
start = j.get("coord_start", 0)
stop = j.get("coord_end", 0)
fields, whereClause = self._buildWhereStatement(j, chrom, start, stop)
fields = ', '.join(["cre.chrom", "cre.start",
"cre.stop",
"cre.accession", "dnase_max"])
q = """
COPY (
SELECT {fields}
FROM {tn} AS cre
INNER JOIN {ttn} ON {ttn}.accession = cre.accession
{whereClause}
) to STDOUT
with DELIMITER E'\t'
""".format(fields=fields,
tn=self.tableName,
ttn = self.assembly + "_ccres_toptier",
whereClause=whereClause)
self.pw.copy_expert("_cre_table_bed", q, fnp)
def creTableDownloadJson(self, j, fnp):
chrom = checkChrom(self.assembly, j)
start = j.get("coord_start", None)
stop = j.get("coord_end", None)
fields, whereClause = self._buildWhereStatement(j, chrom, start, stop)
q = """
copy (
SELECT JSON_AGG(r) from (
SELECT *
FROM {tn} AS cre
INNER JOIN {ttn} ON {ttn}.accession = cre.accession
{whereClause}
) r
) to STDOUT
with DELIMITER E'\t'
""".format(tn=self.tableName, ttn = self.assembly + "_ccres_toptier",
whereClause=whereClause)
sf = io.StringIO()
self.pw.copy_expert_file_handle("_cre_table_json", q, sf)
sf.seek(0)
with open(fnp, 'w') as f:
for line in sf.readlines():
f.write(line.replace("\\n", ""))
| self.whereClauses.append("(%s)" % " and ".join(
["cre.%s_zscores[%d] >= %f" % (exp, cti, _range[0]),
"cre.%s_zscores[%d] <= %f" % (exp, cti, _range[1])])) | conditional_block |
pg_cre_table.py | #!/usr/bin/env python3
# SPDX-License-Identifier: MIT
# Copyright (c) 2016-2020 Michael Purcaro, Henry Pratt, Jill Moore, Zhiping Weng
import psycopg2
import json
import itertools
from io import StringIO
import io
from operator import itemgetter
import sys
import os
from natsort import natsorted
from collections import namedtuple
import psycopg2.extras
from coord import Coord
from pg_common import PGcommon
from config import Config
sys.path.append(os.path.join(os.path.dirname(__file__), "../../common"))
from cre_utils import isaccession, isclose, checkChrom
sys.path.append(os.path.join(os.path.dirname(__file__), "../../utils"))
from utils import eprint
class PGcreTable(object):
infoFields = {"accession": "cre.accession",
"isproximal": "cre.isproximal",
"k4me3max": "cre.h3k4me3_max",
"k27acmax": "cre.h3k27ac_max",
"ctcfmax": "cre.ctcf_max",
"concordant": "cre.concordant"}
@staticmethod
def _getInfo():
pairs = []
for k, v in PGcreTable.infoFields.items():
pairs.append("'%s', %s" % (k, v))
return "json_build_object(" + ','.join(pairs) + ") as info"
def __init__(self, pw, assembly, ctmap, ctsTable):
self.pw = pw
self.assembly = assembly
self.ctmap = ctmap
self.ctsTable = ctsTable
self.tableName = self.assembly + "_cre_all"
self.ctSpecifc = {}
self.fields = [
"maxZ",
"cre.chrom", "cre.start",
"cre.stop - cre.start AS len",
"cre.gene_all_id", "cre.gene_pc_id",
"0::int as in_cart",
"cre.pct"]
self.whereClauses = []
def _getCtSpecific(self, useAccs):
|
def _sct(self, ct):
if ct in self.ctsTable:
self.fields.append("cre.creGroupsSpecific[%s] AS sct" % # TODO rename to sct
self.ctsTable[ct])
else:
self.fields.append("0::int AS sct")
def _buildWhereStatement(self, j, chrom, start, stop):
useAccs = self._accessions(j)
if useAccs and False:
self._notCtSpecific(j)
else:
ct = j.get("cellType", None)
self._sct(ct)
if ct:
self._ctSpecific(ct, j)
else:
self._notCtSpecific(j)
self._where(chrom, start, stop)
fields = ', '.join([PGcreTable._getInfo(), self._getCtSpecific(False)] + self.fields)
ret = ""
if len(self.whereClauses) > 0:
ret = "WHERE " + " and ".join(self.whereClauses)
return fields, ret
def geneTable(self, j, chrom, start, stop):
# print(self.assembly + '_gene_details')
rows = self.pw.fetchall("select_gene_table", """
SELECT * from {tableName}
WHERE transcript_id IN (
SELECT transcript_id from {tableName}
WHERE feature='transcript'
AND seqname='{seqname}'
AND (int4range({startpos}, {endpos}) &&
int4range(startpos, endpos) ))
""".format(tableName=self.assembly + '_gene_details',
seqname=chrom, startpos=start, endpos=stop))
response = []
transcript_id = ''
transcript_id_value = ''
for row in rows:
response.append({
'transcript_id': row[9],
'seqid': row[1].rstrip(),
'type': row[4],
'start': row[2],
'end': row[3],
'strand': row[6].rstrip(),
'exon_number': row[5],
'parent': row[7],
})
result = []
response = sorted(response, key=itemgetter('transcript_id'))
for (key, value) in itertools.groupby(response,
key=itemgetter('transcript_id')):
v = []
start = ''
end = ''
strand = ''
for i in value:
gtype = i.get('type')
if gtype == 'transcript':
start = i.get('start')
end = i.get('end')
strand = i.get('strand')
seqid = i.get('seqid')
if gtype == 'CDS' or gtype == 'exon':
v.append(i)
if len(v) > 0:
result.append({
'transcript_id': key,
'seqid': seqid,
'start': start,
'end': end,
'strand': strand,
'values': v,
})
return result
def creTable(self, j, chrom, start, stop):
"""
tfclause = "peakintersections.accession = cre.accession"
if "tfs" in j:
tfclause += " and peakintersections.tf ?| array(" + ",".join(["'%s'" % tf for tf in j["tfs"]]) + ")"
"""
fields, whereClause = self._buildWhereStatement(j, chrom, start, stop)
rows = self.pw.fetchall("cre_table", """
SELECT JSON_AGG(r) from(
SELECT {fields}, {vtn}.vistaids
FROM {tn} AS cre
INNER JOIN {ttn} ON {ttn}.accession = cre.accession
LEFT JOIN {vtn} ON {vtn}.accession = cre.accession
{whereClause}
ORDER BY maxz DESC
LIMIT 1000) r
""".format(fields=fields, tn=self.tableName,
vtn = self.assembly + "_vista",
ttn = self.assembly + "_ccres_toptier",
whereClause=whereClause))
r = rows[0][0]
if not r:
r = []
total = len(rows)
if total >= 1000: # reached query limit
total = self._creTableEstimate(curs, whereClause)
return {"cres": r, "total": total}
def _accessions(self, j):
accs = j.get("accessions", [])
if not accs or 0 == len(accs):
return False
if accs and len(accs) > 0:
if type(accs[0]) is dict:
accs = [x["value"] for x in accs if x["checked"]]
accs = [x for x in accs if isaccession(x)]
if accs:
accs = ["'%s'" % x.upper() for x in accs]
accsQuery = "cre.accession IN (%s)" % ','.join(accs)
self.whereClauses.append("(%s)" % accsQuery)
return True
return False
def _where(self, chrom, start, stop):
if chrom and start and stop:
self.whereClauses += ["cre.chrom = '%s'" % chrom,
"int4range(cre.start, cre.stop) && int4range(%s, %s)" % (int(start), int(stop))]
def _creTableEstimate(self, curs, whereClause):
# estimate count
# from https://wiki.postgresql.org/wiki/Count_estimate
# qoute escape from
# http://stackoverflow.com/a/12320729
r = self.pw.fetchone("_creTableEstimate", """
SELECT count(0)
FROM {tn} AS cre
{wc}
""".format(tn=self.tableName, wc=whereClause))
return r[0]
def _notCtSpecific(self, j):
# use max zscores
allmap = {"dnase": "dnase_max",
"promoter": "h3k4me3_max",
"enhancer": "h3k27ac_max",
"ctcf": "ctcf_max"}
for x in ["dnase", "promoter", "enhancer", "ctcf"]:
if "rank_%s_start" % x in j and "rank_%s_end" % x in j:
_range = [j["rank_%s_start" % x],
j["rank_%s_end" % x]]
self.whereClauses.append("(%s)" % " and ".join(
["cre.%s >= %f" % (allmap[x], _range[0]),
"cre.%s <= %f" % (allmap[x], _range[1])]))
self.fields.append("cre.%s AS %s_zscore" % (allmap[x], x))
def _ctSpecific(self, ct, j):
self.ctSpecifc["ct"] = "'" + ct + "'"
for name, exp in [("dnase", "dnase"),
("promoter", "h3k4me3"),
("enhancer", "h3k27ac"),
("ctcf", "ctcf")]:
if ct not in self.ctmap[name]:
self.fields.append("'' AS %s_zscore" % (name))
self.ctSpecifc[name + "_zscore"] = "null"
continue
cti = self.ctmap[name][ct]
self.fields.append("cre.%s_zscores[%d] AS %s_zscore" % (exp, cti, name))
self.ctSpecifc[name + "_zscore"] = "cre.%s_zscores[%d]" % (exp, cti)
if "rank_%s_start" % name in j and "rank_%s_end" % name in j:
_range = [j["rank_%s_start" % name],
j["rank_%s_end" % name]]
minDefault = -10.0 # must match slider default
maxDefault = 10.0 # must match slider default
if isclose(_range[0], minDefault) and isclose(_range[1], maxDefault):
continue # not actually filtering on zscore, yet...
if not isclose(_range[0], minDefault) and not isclose(_range[1], maxDefault):
self.whereClauses.append("(%s)" % " and ".join(
["cre.%s_zscores[%d] >= %f" % (exp, cti, _range[0]),
"cre.%s_zscores[%d] <= %f" % (exp, cti, _range[1])]))
elif not isclose(_range[0], minDefault):
self.whereClauses.append("(%s)" %
"cre.%s_zscores[%d] >= %f" % (exp, cti, _range[0]))
elif not isclose(_range[1], maxDefault):
self.whereClauses.append("(%s)" %
"cre.%s_zscores[%d] <= %f" % (exp, cti, _range[1]))
def creTableDownloadBed(self, j, fnp):
chrom = checkChrom(self.assembly, j)
start = j.get("coord_start", 0)
stop = j.get("coord_end", 0)
fields, whereClause = self._buildWhereStatement(j, chrom, start, stop)
fields = ', '.join(["cre.chrom", "cre.start",
"cre.stop",
"cre.accession", "dnase_max"])
q = """
COPY (
SELECT {fields}
FROM {tn} AS cre
INNER JOIN {ttn} ON {ttn}.accession = cre.accession
{whereClause}
) to STDOUT
with DELIMITER E'\t'
""".format(fields=fields,
tn=self.tableName,
ttn = self.assembly + "_ccres_toptier",
whereClause=whereClause)
self.pw.copy_expert("_cre_table_bed", q, fnp)
def creTableDownloadJson(self, j, fnp):
chrom = checkChrom(self.assembly, j)
start = j.get("coord_start", None)
stop = j.get("coord_end", None)
fields, whereClause = self._buildWhereStatement(j, chrom, start, stop)
q = """
copy (
SELECT JSON_AGG(r) from (
SELECT *
FROM {tn} AS cre
INNER JOIN {ttn} ON {ttn}.accession = cre.accession
{whereClause}
) r
) to STDOUT
with DELIMITER E'\t'
""".format(tn=self.tableName, ttn = self.assembly + "_ccres_toptier",
whereClause=whereClause)
sf = io.StringIO()
self.pw.copy_expert_file_handle("_cre_table_json", q, sf)
sf.seek(0)
with open(fnp, 'w') as f:
for line in sf.readlines():
f.write(line.replace("\\n", ""))
| pairs = []
if not useAccs:
for k, v in self.ctSpecifc.items():
pairs.append("'%s', %s" % (k, v))
return "json_build_object(" + ','.join(pairs) + ") as ctSpecifc" | identifier_body |
pg_cre_table.py | #!/usr/bin/env python3
# SPDX-License-Identifier: MIT
# Copyright (c) 2016-2020 Michael Purcaro, Henry Pratt, Jill Moore, Zhiping Weng
import psycopg2
import json
import itertools
from io import StringIO
import io
from operator import itemgetter
import sys
import os
from natsort import natsorted
from collections import namedtuple
import psycopg2.extras
from coord import Coord
from pg_common import PGcommon
from config import Config
sys.path.append(os.path.join(os.path.dirname(__file__), "../../common"))
from cre_utils import isaccession, isclose, checkChrom
sys.path.append(os.path.join(os.path.dirname(__file__), "../../utils"))
from utils import eprint
class PGcreTable(object):
infoFields = {"accession": "cre.accession",
"isproximal": "cre.isproximal",
"k4me3max": "cre.h3k4me3_max",
"k27acmax": "cre.h3k27ac_max",
"ctcfmax": "cre.ctcf_max",
"concordant": "cre.concordant"}
@staticmethod
def _getInfo():
pairs = []
for k, v in PGcreTable.infoFields.items():
pairs.append("'%s', %s" % (k, v))
return "json_build_object(" + ','.join(pairs) + ") as info"
def __init__(self, pw, assembly, ctmap, ctsTable):
self.pw = pw
self.assembly = assembly
self.ctmap = ctmap
self.ctsTable = ctsTable
self.tableName = self.assembly + "_cre_all"
self.ctSpecifc = {}
self.fields = [
"maxZ",
"cre.chrom", "cre.start",
"cre.stop - cre.start AS len",
"cre.gene_all_id", "cre.gene_pc_id",
"0::int as in_cart",
"cre.pct"]
self.whereClauses = []
def _getCtSpecific(self, useAccs):
pairs = []
if not useAccs:
for k, v in self.ctSpecifc.items():
pairs.append("'%s', %s" % (k, v))
return "json_build_object(" + ','.join(pairs) + ") as ctSpecifc"
def _sct(self, ct):
if ct in self.ctsTable:
self.fields.append("cre.creGroupsSpecific[%s] AS sct" % # TODO rename to sct
self.ctsTable[ct])
else:
self.fields.append("0::int AS sct")
def _buildWhereStatement(self, j, chrom, start, stop):
useAccs = self._accessions(j)
if useAccs and False:
self._notCtSpecific(j)
else:
ct = j.get("cellType", None)
self._sct(ct)
if ct:
self._ctSpecific(ct, j)
else:
self._notCtSpecific(j)
self._where(chrom, start, stop)
fields = ', '.join([PGcreTable._getInfo(), self._getCtSpecific(False)] + self.fields)
ret = ""
if len(self.whereClauses) > 0:
ret = "WHERE " + " and ".join(self.whereClauses)
return fields, ret
def geneTable(self, j, chrom, start, stop):
# print(self.assembly + '_gene_details')
rows = self.pw.fetchall("select_gene_table", """
SELECT * from {tableName}
WHERE transcript_id IN (
SELECT transcript_id from {tableName}
WHERE feature='transcript'
AND seqname='{seqname}'
AND (int4range({startpos}, {endpos}) &&
int4range(startpos, endpos) ))
""".format(tableName=self.assembly + '_gene_details',
seqname=chrom, startpos=start, endpos=stop))
response = []
transcript_id = ''
transcript_id_value = ''
for row in rows:
response.append({
'transcript_id': row[9],
'seqid': row[1].rstrip(),
'type': row[4],
'start': row[2],
'end': row[3], | result = []
response = sorted(response, key=itemgetter('transcript_id'))
for (key, value) in itertools.groupby(response,
key=itemgetter('transcript_id')):
v = []
start = ''
end = ''
strand = ''
for i in value:
gtype = i.get('type')
if gtype == 'transcript':
start = i.get('start')
end = i.get('end')
strand = i.get('strand')
seqid = i.get('seqid')
if gtype == 'CDS' or gtype == 'exon':
v.append(i)
if len(v) > 0:
result.append({
'transcript_id': key,
'seqid': seqid,
'start': start,
'end': end,
'strand': strand,
'values': v,
})
return result
def creTable(self, j, chrom, start, stop):
"""
tfclause = "peakintersections.accession = cre.accession"
if "tfs" in j:
tfclause += " and peakintersections.tf ?| array(" + ",".join(["'%s'" % tf for tf in j["tfs"]]) + ")"
"""
fields, whereClause = self._buildWhereStatement(j, chrom, start, stop)
rows = self.pw.fetchall("cre_table", """
SELECT JSON_AGG(r) from(
SELECT {fields}, {vtn}.vistaids
FROM {tn} AS cre
INNER JOIN {ttn} ON {ttn}.accession = cre.accession
LEFT JOIN {vtn} ON {vtn}.accession = cre.accession
{whereClause}
ORDER BY maxz DESC
LIMIT 1000) r
""".format(fields=fields, tn=self.tableName,
vtn = self.assembly + "_vista",
ttn = self.assembly + "_ccres_toptier",
whereClause=whereClause))
r = rows[0][0]
if not r:
r = []
total = len(rows)
if total >= 1000: # reached query limit
total = self._creTableEstimate(curs, whereClause)
return {"cres": r, "total": total}
def _accessions(self, j):
accs = j.get("accessions", [])
if not accs or 0 == len(accs):
return False
if accs and len(accs) > 0:
if type(accs[0]) is dict:
accs = [x["value"] for x in accs if x["checked"]]
accs = [x for x in accs if isaccession(x)]
if accs:
accs = ["'%s'" % x.upper() for x in accs]
accsQuery = "cre.accession IN (%s)" % ','.join(accs)
self.whereClauses.append("(%s)" % accsQuery)
return True
return False
def _where(self, chrom, start, stop):
if chrom and start and stop:
self.whereClauses += ["cre.chrom = '%s'" % chrom,
"int4range(cre.start, cre.stop) && int4range(%s, %s)" % (int(start), int(stop))]
def _creTableEstimate(self, curs, whereClause):
# estimate count
# from https://wiki.postgresql.org/wiki/Count_estimate
# qoute escape from
# http://stackoverflow.com/a/12320729
r = self.pw.fetchone("_creTableEstimate", """
SELECT count(0)
FROM {tn} AS cre
{wc}
""".format(tn=self.tableName, wc=whereClause))
return r[0]
def _notCtSpecific(self, j):
# use max zscores
allmap = {"dnase": "dnase_max",
"promoter": "h3k4me3_max",
"enhancer": "h3k27ac_max",
"ctcf": "ctcf_max"}
for x in ["dnase", "promoter", "enhancer", "ctcf"]:
if "rank_%s_start" % x in j and "rank_%s_end" % x in j:
_range = [j["rank_%s_start" % x],
j["rank_%s_end" % x]]
self.whereClauses.append("(%s)" % " and ".join(
["cre.%s >= %f" % (allmap[x], _range[0]),
"cre.%s <= %f" % (allmap[x], _range[1])]))
self.fields.append("cre.%s AS %s_zscore" % (allmap[x], x))
def _ctSpecific(self, ct, j):
self.ctSpecifc["ct"] = "'" + ct + "'"
for name, exp in [("dnase", "dnase"),
("promoter", "h3k4me3"),
("enhancer", "h3k27ac"),
("ctcf", "ctcf")]:
if ct not in self.ctmap[name]:
self.fields.append("'' AS %s_zscore" % (name))
self.ctSpecifc[name + "_zscore"] = "null"
continue
cti = self.ctmap[name][ct]
self.fields.append("cre.%s_zscores[%d] AS %s_zscore" % (exp, cti, name))
self.ctSpecifc[name + "_zscore"] = "cre.%s_zscores[%d]" % (exp, cti)
if "rank_%s_start" % name in j and "rank_%s_end" % name in j:
_range = [j["rank_%s_start" % name],
j["rank_%s_end" % name]]
minDefault = -10.0 # must match slider default
maxDefault = 10.0 # must match slider default
if isclose(_range[0], minDefault) and isclose(_range[1], maxDefault):
continue # not actually filtering on zscore, yet...
if not isclose(_range[0], minDefault) and not isclose(_range[1], maxDefault):
self.whereClauses.append("(%s)" % " and ".join(
["cre.%s_zscores[%d] >= %f" % (exp, cti, _range[0]),
"cre.%s_zscores[%d] <= %f" % (exp, cti, _range[1])]))
elif not isclose(_range[0], minDefault):
self.whereClauses.append("(%s)" %
"cre.%s_zscores[%d] >= %f" % (exp, cti, _range[0]))
elif not isclose(_range[1], maxDefault):
self.whereClauses.append("(%s)" %
"cre.%s_zscores[%d] <= %f" % (exp, cti, _range[1]))
def creTableDownloadBed(self, j, fnp):
chrom = checkChrom(self.assembly, j)
start = j.get("coord_start", 0)
stop = j.get("coord_end", 0)
fields, whereClause = self._buildWhereStatement(j, chrom, start, stop)
fields = ', '.join(["cre.chrom", "cre.start",
"cre.stop",
"cre.accession", "dnase_max"])
q = """
COPY (
SELECT {fields}
FROM {tn} AS cre
INNER JOIN {ttn} ON {ttn}.accession = cre.accession
{whereClause}
) to STDOUT
with DELIMITER E'\t'
""".format(fields=fields,
tn=self.tableName,
ttn = self.assembly + "_ccres_toptier",
whereClause=whereClause)
self.pw.copy_expert("_cre_table_bed", q, fnp)
def creTableDownloadJson(self, j, fnp):
chrom = checkChrom(self.assembly, j)
start = j.get("coord_start", None)
stop = j.get("coord_end", None)
fields, whereClause = self._buildWhereStatement(j, chrom, start, stop)
q = """
copy (
SELECT JSON_AGG(r) from (
SELECT *
FROM {tn} AS cre
INNER JOIN {ttn} ON {ttn}.accession = cre.accession
{whereClause}
) r
) to STDOUT
with DELIMITER E'\t'
""".format(tn=self.tableName, ttn = self.assembly + "_ccres_toptier",
whereClause=whereClause)
sf = io.StringIO()
self.pw.copy_expert_file_handle("_cre_table_json", q, sf)
sf.seek(0)
with open(fnp, 'w') as f:
for line in sf.readlines():
f.write(line.replace("\\n", "")) | 'strand': row[6].rstrip(),
'exon_number': row[5],
'parent': row[7],
}) | random_line_split |
main.go | package main
import (
"encoding/binary"
"encoding/json"
"errors"
"flag"
"fmt"
"github.com/dgryski/go-metro"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"os/signal"
"path"
"regexp"
"strconv"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
)
type Stats struct {
Tags map[string]uint64
Offset uint64
File string
}
type PostingsList struct {
descriptor *os.File
offset uint64
}
func (this *PostingsList) newTermQuery() *Term {
postings := make([]byte, this.offset)
n, err := this.descriptor.ReadAt(postings, 0)
if n != len(postings) && err != nil {
postings = []byte{}
}
return &Term{
cursor: -1,
postings: postings,
QueryBase: QueryBase{NOT_READY},
}
}
type StoreItem struct {
path string
root string
descriptor *os.File
index map[string]*PostingsList
offset uint64
sync.RWMutex
}
var nonAlphaNumeric = regexp.MustCompile("[^a-zA-Z0-9]+")
func sanitize(s string) string {
return nonAlphaNumeric.ReplaceAllLiteralString(s, "")
}
func openAtEnd(filePath string) (*os.File, uint64) {
f, err := os.OpenFile(filePath, os.O_CREATE|os.O_RDWR, 0600)
if err != nil {
panic(err)
}
offset, err := f.Seek(0, 2)
if err != nil {
panic(err)
}
log.Printf("openning: %s with offset: %d", filePath, offset)
return f, uint64(offset)
}
func NewStorage(root string) *StoreItem {
os.MkdirAll(root, 0700)
filePath := path.Join(root, "append.raw")
f, offset := openAtEnd(filePath)
si := &StoreItem{
offset: uint64(offset),
path: filePath,
index: map[string]*PostingsList{},
descriptor: f,
root: root,
}
files, err := ioutil.ReadDir(root)
if err != nil {
panic(err)
}
for _, dirFile := range files {
if strings.HasSuffix(dirFile.Name(), ".postings") {
dot := strings.IndexRune(dirFile.Name(), '.')
idxName := dirFile.Name()[:dot]
si.CreatePostingsList(idxName)
}
}
return si
}
func (this *StoreItem) | (name string) *PostingsList {
this.RLock()
name = sanitize(name)
if p, ok := this.index[name]; ok {
this.RUnlock()
return p
}
this.RUnlock()
this.Lock()
defer this.Unlock()
if p, ok := this.index[name]; ok {
return p
}
f, offset := openAtEnd(path.Join(this.root, fmt.Sprintf("%s.postings", name)))
p := &PostingsList{
descriptor: f,
offset: offset,
}
this.index[name] = p
return p
}
func (this *StoreItem) stats() *Stats {
out := &Stats{
Tags: make(map[string]uint64),
Offset: this.offset,
File: this.path,
}
this.RLock()
defer this.RUnlock()
for name, index := range this.index {
out.Tags[name] = index.offset / 8
}
return out
}
func (this *StoreItem) scan(cb func(uint64, []byte) bool) {
SCAN:
for offset := uint64(0); offset < this.offset; {
// this is lockless, which means we could read a header,
// but the data might be incomplete
dataLen, _, allocSize, err := readHeader(this.descriptor, offset)
if err != nil {
break SCAN
}
output := make([]byte, dataLen)
_, err = this.descriptor.ReadAt(output, int64(offset)+int64(headerLen))
if err != nil {
break SCAN
}
if !cb(offset, output) {
break SCAN
}
offset += uint64(allocSize) + uint64(headerLen)
}
}
func (this *StoreItem) ExecuteQuery(query Query, cb func(uint64, []byte) bool) {
for query.Next() != NO_MORE {
offset := uint64(query.GetDocId())
dataLen, _, _, err := readHeader(this.descriptor, offset)
if err != nil {
break
}
output := make([]byte, dataLen)
_, err = this.descriptor.ReadAt(output, int64(offset)+int64(headerLen))
if err != nil {
break
}
if !cb(offset, output) {
break
}
}
}
const headerLen = 4 + 8 + 4 + 4
func readHeader(file *os.File, offset uint64) (uint32, uint64, uint32, error) {
headerBytes := make([]byte, headerLen)
_, err := file.ReadAt(headerBytes, int64(offset))
if err != nil {
return 0, 0, 0, err
}
dataLen := binary.LittleEndian.Uint32(headerBytes[0:])
nextBlock := binary.LittleEndian.Uint64(headerBytes[4:])
allocSize := binary.LittleEndian.Uint32(headerBytes[12:])
checksum := binary.LittleEndian.Uint32(headerBytes[16:])
computedChecksum := crc(headerBytes[0:16])
if checksum != computedChecksum {
return 0, 0, 0, errors.New(fmt.Sprintf("wrong checksum got: %d, expected: %d", computedChecksum, checksum))
}
return dataLen, nextBlock, allocSize, nil
}
func (this *StoreItem) writeHeader(currentOffset uint64, dataLen uint32, nextBlockOffset uint64, allocSize uint32) {
header := make([]byte, headerLen)
binary.LittleEndian.PutUint32(header[0:], uint32(dataLen))
binary.LittleEndian.PutUint64(header[4:], uint64(0))
binary.LittleEndian.PutUint32(header[12:], allocSize)
checksum := crc(header[0:16])
binary.LittleEndian.PutUint32(header[16:], checksum)
_, err := this.descriptor.WriteAt(header, int64(currentOffset))
if err != nil {
panic(err)
}
}
func (this *StoreItem) appendPostings(name string, value uint64) {
p := this.CreatePostingsList(name)
data := make([]byte, 8)
binary.LittleEndian.PutUint64(data, value)
// add it to the end
offset := atomic.AddUint64(&p.offset, uint64(8)) - 8
p.descriptor.WriteAt(data, int64(offset))
}
func (this *StoreItem) read(offset uint64) (uint32, []byte, error) {
// lockless read
dataLen, _, _, err := readHeader(this.descriptor, offset)
if err != nil {
return 0, nil, err
}
output := make([]byte, dataLen)
_, err = this.descriptor.ReadAt(output, int64(offset)+int64(headerLen))
if err != nil {
return 0, nil, err
}
return dataLen, output, nil
}
func (this *StoreItem) append(allocSize uint32, data io.Reader) (uint64, error) {
dataRaw, err := ioutil.ReadAll(data)
if err != nil {
return 0, err
}
if len(dataRaw) > int(allocSize) {
allocSize = uint32(len(dataRaw))
}
offset := atomic.AddUint64(&this.offset, uint64(allocSize+headerLen))
currentOffset := offset - uint64(allocSize+headerLen)
_, err = this.descriptor.WriteAt(dataRaw, int64(currentOffset+headerLen))
if err != nil {
panic(err)
}
this.writeHeader(currentOffset, uint32(len(dataRaw)), 0, allocSize)
return currentOffset, nil
}
func (this *StoreItem) modify(offset uint64, pos int32, data io.Reader) error {
dataRaw, err := ioutil.ReadAll(data)
if err != nil {
return err
}
oldDataLen, _, allocSize, err := readHeader(this.descriptor, offset)
if err != nil {
return err
}
if pos < 0 {
pos = int32(oldDataLen)
}
end := uint32(pos) + uint32(len(dataRaw))
if end > allocSize {
return errors.New("pos+len > allocSize")
}
_, err = this.descriptor.WriteAt(dataRaw, int64(offset+uint64(headerLen)+uint64(pos)))
if err != nil {
panic(err)
}
if end > oldDataLen {
// need to recompute the header
this.writeHeader(offset, end, 0, allocSize)
}
return nil
}
func crc(b []byte) uint32 {
return uint32(metro.Hash64(b, 0) >> uint64(32))
}
type MultiStore struct {
stores map[string]*StoreItem
root string
sync.RWMutex
}
func (this *MultiStore) find(storageIdentifier string) *StoreItem {
if storageIdentifier == "" {
storageIdentifier = "default"
}
this.RLock()
storage, ok := this.stores[storageIdentifier]
this.RUnlock()
if !ok {
this.Lock()
defer this.Unlock()
storage, ok = this.stores[storageIdentifier]
if !ok {
storage = NewStorage(path.Join(this.root, storageIdentifier))
this.stores[storageIdentifier] = storage
}
}
return storage
}
func (this *MultiStore) close(storageIdentifier string) {
this.Lock()
defer this.Unlock()
if storageIdentifier == "" {
storageIdentifier = "default"
}
storage, ok := this.stores[storageIdentifier]
if ok {
storage.descriptor.Close()
log.Printf("closing: %s", storage.path)
}
delete(this.stores, storageIdentifier)
}
func (this *MultiStore) modify(storageIdentifier string, offset uint64, pos int32, data io.Reader) error {
return this.find(storageIdentifier).modify(offset, pos, data)
}
func (this *MultiStore) stats(storageIdentifier string) *Stats {
return this.find(storageIdentifier).stats()
}
func (this *MultiStore) append(storageIdentifier string, allocSize uint32, data io.Reader) (uint64, error) {
return this.find(storageIdentifier).append(allocSize, data)
}
func (this *MultiStore) read(storageIdentifier string, offset uint64) (uint32, []byte, error) {
return this.find(storageIdentifier).read(offset)
}
func (this *MultiStore) scan(storageIdentifier string, cb func(uint64, []byte) bool) {
this.find(storageIdentifier).scan(cb)
}
func (this *MultiStore) ExecuteQuery(storageIdentifier string, query Query, cb func(uint64, []byte) bool) {
this.find(storageIdentifier).ExecuteQuery(query, cb)
}
func makeTimestamp() int64 {
return time.Now().UnixNano() / int64(time.Millisecond)
}
func Log(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
t0 := makeTimestamp()
handler.ServeHTTP(w, r)
log.Printf("%s %s %s took: %d", r.RemoteAddr, r.Method, r.URL, makeTimestamp()-t0)
})
}
func getTags(tags string) []string {
if tags == "" {
return []string{}
}
splitted := strings.Split(tags, ",")
out := []string{}
for _, s := range splitted {
if s != "" {
out = append(out, s)
}
}
return out
}
const namespaceKey = "namespace"
const posKey = "pos"
const allocSizeKey = "allocSize"
const offsetKey = "offset"
const tagsKey = "tags"
func main() {
var pbind = flag.String("bind", ":8000", "address to bind to")
var proot = flag.String("root", "/tmp/rochefort", "root directory")
var pquiet = flag.Bool("quiet", false, "dont print any log messages")
flag.Parse()
multiStore := &MultiStore{
stores: make(map[string]*StoreItem),
root: *proot,
}
os.MkdirAll(*proot, 0700)
namespaces, err := ioutil.ReadDir(*proot)
if err != nil {
panic(err)
}
// open all files in the namespace
NAMESPACE:
for _, namespace := range namespaces {
if namespace.IsDir() {
files, err := ioutil.ReadDir(path.Join(*proot, namespace.Name()))
if err == nil {
for _, file := range files {
if strings.HasSuffix(file.Name(), ".raw") {
multiStore.find(namespace.Name())
continue NAMESPACE
}
}
}
}
}
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
go func() {
<-sigs
log.Printf("\nReceived an interrupt, stopping services...\n")
multiStore.Lock() // dont unlock it
for _, storage := range multiStore.stores {
storage.Lock() // dont unlock it
storage.descriptor.Close()
log.Printf("closing: %s", storage.path)
}
os.Exit(0)
}()
http.HandleFunc("/close", func(w http.ResponseWriter, r *http.Request) {
multiStore.close(r.URL.Query().Get(namespaceKey))
w.Header().Set("Content-Type", "application/json")
w.Write([]byte("{\"success\":true}"))
})
http.HandleFunc("/modify", func(w http.ResponseWriter, r *http.Request) {
offset, err := strconv.ParseUint(r.URL.Query().Get(offsetKey), 10, 64)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
} else {
pos, err := strconv.ParseInt(r.URL.Query().Get(posKey), 10, 32)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
} else {
err := multiStore.modify(r.URL.Query().Get(namespaceKey), offset, int32(pos), r.Body)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
} else {
w.Header().Set("Content-Type", "application/json")
w.Write([]byte("{\"success\":true}"))
}
}
}
})
http.HandleFunc("/append", func(w http.ResponseWriter, r *http.Request) {
allocSize := uint64(0)
if r.URL.Query().Get(allocSizeKey) != "" {
allocSizeInput, err := strconv.ParseUint(r.URL.Query().Get(allocSizeKey), 10, 64)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
return
} else {
allocSize = allocSizeInput
}
}
store := multiStore.find(r.URL.Query().Get(namespaceKey))
offset, err := store.append(uint32(allocSize), r.Body)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
} else {
tags := getTags(r.URL.Query().Get(tagsKey))
for _, t := range tags {
store.appendPostings(t, offset)
}
w.Header().Set("Content-Type", "application/json")
w.Write([]byte(fmt.Sprintf("{\"offset\":%d}", offset)))
}
})
http.HandleFunc("/get", func(w http.ResponseWriter, r *http.Request) {
offset, err := strconv.ParseUint(r.URL.Query().Get(offsetKey), 10, 64)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
} else {
_, data, err := multiStore.read(r.URL.Query().Get(namespaceKey), offset)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
} else {
w.Header().Set("Content-Type", "application/octet-stream")
w.Write(data)
}
}
})
http.HandleFunc("/scan", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/octet-stream")
header := make([]byte, 12)
cb := func(offset uint64, data []byte) bool {
binary.LittleEndian.PutUint32(header[0:], uint32(len(data)))
binary.LittleEndian.PutUint64(header[4:], offset)
_, err := w.Write(header)
if err != nil {
return false
}
_, err = w.Write(data)
if err != nil {
return false
}
return true
}
multiStore.scan(r.URL.Query().Get(namespaceKey), cb)
})
http.HandleFunc("/query", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/octet-stream")
defer r.Body.Close()
body, err := ioutil.ReadAll(r.Body)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
return
}
var decoded map[string]interface{}
err = json.Unmarshal(body, &decoded)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
return
}
stored := multiStore.find(r.URL.Query().Get(namespaceKey))
query, err := fromJSON(stored, decoded)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
return
}
header := make([]byte, 12)
cb := func(offset uint64, data []byte) bool {
binary.LittleEndian.PutUint32(header[0:], uint32(len(data)))
binary.LittleEndian.PutUint64(header[4:], offset)
_, err := w.Write(header)
if err != nil {
return false
}
_, err = w.Write(data)
if err != nil {
return false
}
return true
}
stored.ExecuteQuery(query, cb)
})
http.HandleFunc("/stat", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/octet-stream")
stats := multiStore.stats(r.URL.Query().Get(namespaceKey))
b, err := json.Marshal(stats)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
} else {
w.Header().Set("Content-Type", "application/json")
w.Write(b)
}
})
http.HandleFunc("/getMulti", func(w http.ResponseWriter, r *http.Request) {
dataLenRaw := make([]byte, 4)
defer r.Body.Close()
b, err := ioutil.ReadAll(r.Body)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(fmt.Sprintf("read: %s", err.Error())))
return
}
namespace := r.URL.Query().Get(namespaceKey)
if len(b)%8 != 0 {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(fmt.Sprintf("incomplete read: %d is not multiple of 8", len(b))))
return
}
w.Header().Set("Content-Type", "application/octet-stream")
for i := 0; i < len(b); i += 8 {
offset := binary.LittleEndian.Uint64(b[i:])
_, data, err := multiStore.read(namespace, offset)
// XXX: we ignore the error on purpose
// as the storage is not fsyncing, it could very well lose some updates
// also the data is barely checksummed, so might very well be corrupted
if err == nil {
binary.LittleEndian.PutUint32(dataLenRaw, uint32(len(data)))
_, err = w.Write(dataLenRaw)
if err != nil {
return
}
_, err = w.Write(data)
if err != nil {
return
}
}
}
})
if !*pquiet {
log.Printf("starting http server on %s", *pbind)
err := http.ListenAndServe(*pbind, Log(http.DefaultServeMux))
if err != nil {
log.Fatal(err)
}
} else {
err := http.ListenAndServe(*pbind, http.DefaultServeMux)
if err != nil {
log.Fatal(err)
}
}
}
| CreatePostingsList | identifier_name |
main.go | package main
import (
"encoding/binary"
"encoding/json"
"errors"
"flag"
"fmt"
"github.com/dgryski/go-metro"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"os/signal"
"path"
"regexp"
"strconv"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
)
type Stats struct {
Tags map[string]uint64
Offset uint64
File string
}
type PostingsList struct {
descriptor *os.File
offset uint64
}
func (this *PostingsList) newTermQuery() *Term {
postings := make([]byte, this.offset)
n, err := this.descriptor.ReadAt(postings, 0)
if n != len(postings) && err != nil {
postings = []byte{}
}
return &Term{
cursor: -1,
postings: postings,
QueryBase: QueryBase{NOT_READY},
}
}
type StoreItem struct {
path string
root string
descriptor *os.File
index map[string]*PostingsList
offset uint64
sync.RWMutex
}
var nonAlphaNumeric = regexp.MustCompile("[^a-zA-Z0-9]+")
func sanitize(s string) string {
return nonAlphaNumeric.ReplaceAllLiteralString(s, "")
}
func openAtEnd(filePath string) (*os.File, uint64) {
f, err := os.OpenFile(filePath, os.O_CREATE|os.O_RDWR, 0600)
if err != nil {
panic(err)
}
offset, err := f.Seek(0, 2)
if err != nil {
panic(err)
}
log.Printf("openning: %s with offset: %d", filePath, offset)
return f, uint64(offset)
}
func NewStorage(root string) *StoreItem {
os.MkdirAll(root, 0700)
filePath := path.Join(root, "append.raw")
f, offset := openAtEnd(filePath)
si := &StoreItem{
offset: uint64(offset),
path: filePath,
index: map[string]*PostingsList{},
descriptor: f,
root: root,
}
files, err := ioutil.ReadDir(root)
if err != nil {
panic(err)
}
for _, dirFile := range files {
if strings.HasSuffix(dirFile.Name(), ".postings") {
dot := strings.IndexRune(dirFile.Name(), '.')
idxName := dirFile.Name()[:dot]
si.CreatePostingsList(idxName)
}
}
return si
}
func (this *StoreItem) CreatePostingsList(name string) *PostingsList {
this.RLock()
name = sanitize(name)
if p, ok := this.index[name]; ok {
this.RUnlock()
return p
}
this.RUnlock()
this.Lock()
defer this.Unlock()
if p, ok := this.index[name]; ok {
return p
}
f, offset := openAtEnd(path.Join(this.root, fmt.Sprintf("%s.postings", name)))
p := &PostingsList{
descriptor: f,
offset: offset,
}
this.index[name] = p
return p
}
func (this *StoreItem) stats() *Stats {
out := &Stats{
Tags: make(map[string]uint64),
Offset: this.offset,
File: this.path,
}
this.RLock()
defer this.RUnlock()
for name, index := range this.index {
out.Tags[name] = index.offset / 8
}
return out
}
func (this *StoreItem) scan(cb func(uint64, []byte) bool) {
SCAN:
for offset := uint64(0); offset < this.offset; {
// this is lockless, which means we could read a header,
// but the data might be incomplete
dataLen, _, allocSize, err := readHeader(this.descriptor, offset)
if err != nil {
break SCAN
}
output := make([]byte, dataLen)
_, err = this.descriptor.ReadAt(output, int64(offset)+int64(headerLen))
if err != nil {
break SCAN
}
if !cb(offset, output) {
break SCAN
}
offset += uint64(allocSize) + uint64(headerLen)
}
}
func (this *StoreItem) ExecuteQuery(query Query, cb func(uint64, []byte) bool) {
for query.Next() != NO_MORE {
offset := uint64(query.GetDocId())
dataLen, _, _, err := readHeader(this.descriptor, offset)
if err != nil {
break
}
output := make([]byte, dataLen)
_, err = this.descriptor.ReadAt(output, int64(offset)+int64(headerLen))
if err != nil {
break
}
if !cb(offset, output) {
break
}
}
}
const headerLen = 4 + 8 + 4 + 4
func readHeader(file *os.File, offset uint64) (uint32, uint64, uint32, error) {
headerBytes := make([]byte, headerLen)
_, err := file.ReadAt(headerBytes, int64(offset))
if err != nil {
return 0, 0, 0, err
}
dataLen := binary.LittleEndian.Uint32(headerBytes[0:])
nextBlock := binary.LittleEndian.Uint64(headerBytes[4:])
allocSize := binary.LittleEndian.Uint32(headerBytes[12:])
checksum := binary.LittleEndian.Uint32(headerBytes[16:])
computedChecksum := crc(headerBytes[0:16])
if checksum != computedChecksum {
return 0, 0, 0, errors.New(fmt.Sprintf("wrong checksum got: %d, expected: %d", computedChecksum, checksum))
}
return dataLen, nextBlock, allocSize, nil
}
func (this *StoreItem) writeHeader(currentOffset uint64, dataLen uint32, nextBlockOffset uint64, allocSize uint32) {
header := make([]byte, headerLen)
binary.LittleEndian.PutUint32(header[0:], uint32(dataLen))
binary.LittleEndian.PutUint64(header[4:], uint64(0))
binary.LittleEndian.PutUint32(header[12:], allocSize)
checksum := crc(header[0:16])
binary.LittleEndian.PutUint32(header[16:], checksum)
_, err := this.descriptor.WriteAt(header, int64(currentOffset))
if err != nil {
panic(err)
}
}
func (this *StoreItem) appendPostings(name string, value uint64) {
p := this.CreatePostingsList(name)
data := make([]byte, 8)
binary.LittleEndian.PutUint64(data, value)
// add it to the end
offset := atomic.AddUint64(&p.offset, uint64(8)) - 8
p.descriptor.WriteAt(data, int64(offset))
}
func (this *StoreItem) read(offset uint64) (uint32, []byte, error) {
// lockless read
dataLen, _, _, err := readHeader(this.descriptor, offset)
if err != nil {
return 0, nil, err
}
output := make([]byte, dataLen)
_, err = this.descriptor.ReadAt(output, int64(offset)+int64(headerLen))
if err != nil {
return 0, nil, err
}
return dataLen, output, nil
}
func (this *StoreItem) append(allocSize uint32, data io.Reader) (uint64, error) {
dataRaw, err := ioutil.ReadAll(data)
if err != nil {
return 0, err
}
if len(dataRaw) > int(allocSize) {
allocSize = uint32(len(dataRaw))
}
offset := atomic.AddUint64(&this.offset, uint64(allocSize+headerLen))
currentOffset := offset - uint64(allocSize+headerLen)
_, err = this.descriptor.WriteAt(dataRaw, int64(currentOffset+headerLen))
if err != nil {
panic(err)
}
this.writeHeader(currentOffset, uint32(len(dataRaw)), 0, allocSize)
return currentOffset, nil
}
func (this *StoreItem) modify(offset uint64, pos int32, data io.Reader) error {
dataRaw, err := ioutil.ReadAll(data)
if err != nil {
return err
}
oldDataLen, _, allocSize, err := readHeader(this.descriptor, offset)
if err != nil {
return err
}
if pos < 0 {
pos = int32(oldDataLen)
}
end := uint32(pos) + uint32(len(dataRaw))
if end > allocSize {
return errors.New("pos+len > allocSize")
}
_, err = this.descriptor.WriteAt(dataRaw, int64(offset+uint64(headerLen)+uint64(pos)))
if err != nil {
panic(err)
}
if end > oldDataLen {
// need to recompute the header
this.writeHeader(offset, end, 0, allocSize)
}
return nil
}
func crc(b []byte) uint32 {
return uint32(metro.Hash64(b, 0) >> uint64(32))
}
type MultiStore struct {
stores map[string]*StoreItem
root string
sync.RWMutex
}
func (this *MultiStore) find(storageIdentifier string) *StoreItem {
if storageIdentifier == "" {
storageIdentifier = "default"
}
this.RLock()
storage, ok := this.stores[storageIdentifier]
this.RUnlock()
if !ok {
this.Lock()
defer this.Unlock()
storage, ok = this.stores[storageIdentifier]
if !ok {
storage = NewStorage(path.Join(this.root, storageIdentifier))
this.stores[storageIdentifier] = storage
}
}
return storage
}
func (this *MultiStore) close(storageIdentifier string) {
this.Lock()
defer this.Unlock()
if storageIdentifier == "" {
storageIdentifier = "default"
}
storage, ok := this.stores[storageIdentifier]
if ok {
storage.descriptor.Close()
log.Printf("closing: %s", storage.path)
}
delete(this.stores, storageIdentifier)
}
func (this *MultiStore) modify(storageIdentifier string, offset uint64, pos int32, data io.Reader) error {
return this.find(storageIdentifier).modify(offset, pos, data)
}
func (this *MultiStore) stats(storageIdentifier string) *Stats {
return this.find(storageIdentifier).stats()
}
func (this *MultiStore) append(storageIdentifier string, allocSize uint32, data io.Reader) (uint64, error) {
return this.find(storageIdentifier).append(allocSize, data)
}
func (this *MultiStore) read(storageIdentifier string, offset uint64) (uint32, []byte, error) {
return this.find(storageIdentifier).read(offset)
}
func (this *MultiStore) scan(storageIdentifier string, cb func(uint64, []byte) bool) {
this.find(storageIdentifier).scan(cb)
}
func (this *MultiStore) ExecuteQuery(storageIdentifier string, query Query, cb func(uint64, []byte) bool) {
this.find(storageIdentifier).ExecuteQuery(query, cb)
}
func makeTimestamp() int64 {
return time.Now().UnixNano() / int64(time.Millisecond)
}
func Log(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
t0 := makeTimestamp()
handler.ServeHTTP(w, r)
log.Printf("%s %s %s took: %d", r.RemoteAddr, r.Method, r.URL, makeTimestamp()-t0)
})
}
func getTags(tags string) []string {
if tags == "" {
return []string{}
}
splitted := strings.Split(tags, ",")
out := []string{}
for _, s := range splitted {
if s != "" {
out = append(out, s)
}
}
return out
}
const namespaceKey = "namespace"
const posKey = "pos"
const allocSizeKey = "allocSize"
const offsetKey = "offset"
const tagsKey = "tags"
func main() | {
var pbind = flag.String("bind", ":8000", "address to bind to")
var proot = flag.String("root", "/tmp/rochefort", "root directory")
var pquiet = flag.Bool("quiet", false, "dont print any log messages")
flag.Parse()
multiStore := &MultiStore{
stores: make(map[string]*StoreItem),
root: *proot,
}
os.MkdirAll(*proot, 0700)
namespaces, err := ioutil.ReadDir(*proot)
if err != nil {
panic(err)
}
// open all files in the namespace
NAMESPACE:
for _, namespace := range namespaces {
if namespace.IsDir() {
files, err := ioutil.ReadDir(path.Join(*proot, namespace.Name()))
if err == nil {
for _, file := range files {
if strings.HasSuffix(file.Name(), ".raw") {
multiStore.find(namespace.Name())
continue NAMESPACE
}
}
}
}
}
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
go func() {
<-sigs
log.Printf("\nReceived an interrupt, stopping services...\n")
multiStore.Lock() // dont unlock it
for _, storage := range multiStore.stores {
storage.Lock() // dont unlock it
storage.descriptor.Close()
log.Printf("closing: %s", storage.path)
}
os.Exit(0)
}()
http.HandleFunc("/close", func(w http.ResponseWriter, r *http.Request) {
multiStore.close(r.URL.Query().Get(namespaceKey))
w.Header().Set("Content-Type", "application/json")
w.Write([]byte("{\"success\":true}"))
})
http.HandleFunc("/modify", func(w http.ResponseWriter, r *http.Request) {
offset, err := strconv.ParseUint(r.URL.Query().Get(offsetKey), 10, 64)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
} else {
pos, err := strconv.ParseInt(r.URL.Query().Get(posKey), 10, 32)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
} else {
err := multiStore.modify(r.URL.Query().Get(namespaceKey), offset, int32(pos), r.Body)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
} else {
w.Header().Set("Content-Type", "application/json")
w.Write([]byte("{\"success\":true}"))
}
}
}
})
http.HandleFunc("/append", func(w http.ResponseWriter, r *http.Request) {
allocSize := uint64(0)
if r.URL.Query().Get(allocSizeKey) != "" {
allocSizeInput, err := strconv.ParseUint(r.URL.Query().Get(allocSizeKey), 10, 64)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
return
} else {
allocSize = allocSizeInput
}
}
store := multiStore.find(r.URL.Query().Get(namespaceKey))
offset, err := store.append(uint32(allocSize), r.Body)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
} else {
tags := getTags(r.URL.Query().Get(tagsKey))
for _, t := range tags {
store.appendPostings(t, offset)
}
w.Header().Set("Content-Type", "application/json")
w.Write([]byte(fmt.Sprintf("{\"offset\":%d}", offset)))
}
})
http.HandleFunc("/get", func(w http.ResponseWriter, r *http.Request) {
offset, err := strconv.ParseUint(r.URL.Query().Get(offsetKey), 10, 64)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
} else {
_, data, err := multiStore.read(r.URL.Query().Get(namespaceKey), offset)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
} else {
w.Header().Set("Content-Type", "application/octet-stream")
w.Write(data)
}
}
})
http.HandleFunc("/scan", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/octet-stream")
header := make([]byte, 12)
cb := func(offset uint64, data []byte) bool {
binary.LittleEndian.PutUint32(header[0:], uint32(len(data)))
binary.LittleEndian.PutUint64(header[4:], offset)
_, err := w.Write(header)
if err != nil {
return false
}
_, err = w.Write(data)
if err != nil {
return false
}
return true
}
multiStore.scan(r.URL.Query().Get(namespaceKey), cb)
})
http.HandleFunc("/query", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/octet-stream")
defer r.Body.Close()
body, err := ioutil.ReadAll(r.Body)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
return
}
var decoded map[string]interface{}
err = json.Unmarshal(body, &decoded)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
return
}
stored := multiStore.find(r.URL.Query().Get(namespaceKey))
query, err := fromJSON(stored, decoded)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
return
}
header := make([]byte, 12)
cb := func(offset uint64, data []byte) bool {
binary.LittleEndian.PutUint32(header[0:], uint32(len(data)))
binary.LittleEndian.PutUint64(header[4:], offset)
_, err := w.Write(header)
if err != nil {
return false
}
_, err = w.Write(data)
if err != nil {
return false
}
return true
}
stored.ExecuteQuery(query, cb)
})
http.HandleFunc("/stat", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/octet-stream")
stats := multiStore.stats(r.URL.Query().Get(namespaceKey))
b, err := json.Marshal(stats)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
} else {
w.Header().Set("Content-Type", "application/json")
w.Write(b)
}
})
http.HandleFunc("/getMulti", func(w http.ResponseWriter, r *http.Request) {
dataLenRaw := make([]byte, 4)
defer r.Body.Close()
b, err := ioutil.ReadAll(r.Body)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(fmt.Sprintf("read: %s", err.Error())))
return
}
namespace := r.URL.Query().Get(namespaceKey)
if len(b)%8 != 0 {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(fmt.Sprintf("incomplete read: %d is not multiple of 8", len(b))))
return
}
w.Header().Set("Content-Type", "application/octet-stream")
for i := 0; i < len(b); i += 8 {
offset := binary.LittleEndian.Uint64(b[i:])
_, data, err := multiStore.read(namespace, offset)
// XXX: we ignore the error on purpose
// as the storage is not fsyncing, it could very well lose some updates
// also the data is barely checksummed, so might very well be corrupted
if err == nil {
binary.LittleEndian.PutUint32(dataLenRaw, uint32(len(data)))
_, err = w.Write(dataLenRaw)
if err != nil {
return
}
_, err = w.Write(data)
if err != nil {
return
}
}
}
})
if !*pquiet {
log.Printf("starting http server on %s", *pbind)
err := http.ListenAndServe(*pbind, Log(http.DefaultServeMux))
if err != nil {
log.Fatal(err)
}
} else {
err := http.ListenAndServe(*pbind, http.DefaultServeMux)
if err != nil {
log.Fatal(err)
}
}
} | identifier_body | |
main.go | package main
import (
"encoding/binary"
"encoding/json"
"errors"
"flag"
"fmt"
"github.com/dgryski/go-metro"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"os/signal"
"path"
"regexp"
"strconv"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
)
type Stats struct {
Tags map[string]uint64
Offset uint64
File string
}
type PostingsList struct {
descriptor *os.File
offset uint64
}
func (this *PostingsList) newTermQuery() *Term {
postings := make([]byte, this.offset)
n, err := this.descriptor.ReadAt(postings, 0)
if n != len(postings) && err != nil {
postings = []byte{}
}
return &Term{
cursor: -1,
postings: postings,
QueryBase: QueryBase{NOT_READY},
}
}
type StoreItem struct {
path string
root string
descriptor *os.File
index map[string]*PostingsList
offset uint64
sync.RWMutex
}
var nonAlphaNumeric = regexp.MustCompile("[^a-zA-Z0-9]+")
func sanitize(s string) string {
return nonAlphaNumeric.ReplaceAllLiteralString(s, "")
}
func openAtEnd(filePath string) (*os.File, uint64) {
f, err := os.OpenFile(filePath, os.O_CREATE|os.O_RDWR, 0600)
if err != nil {
panic(err)
}
offset, err := f.Seek(0, 2)
if err != nil {
panic(err)
}
log.Printf("openning: %s with offset: %d", filePath, offset)
return f, uint64(offset)
}
func NewStorage(root string) *StoreItem {
os.MkdirAll(root, 0700)
filePath := path.Join(root, "append.raw")
f, offset := openAtEnd(filePath)
si := &StoreItem{
offset: uint64(offset),
path: filePath,
index: map[string]*PostingsList{},
descriptor: f,
root: root,
}
files, err := ioutil.ReadDir(root)
if err != nil {
panic(err)
}
for _, dirFile := range files {
if strings.HasSuffix(dirFile.Name(), ".postings") {
dot := strings.IndexRune(dirFile.Name(), '.')
idxName := dirFile.Name()[:dot]
si.CreatePostingsList(idxName)
}
}
return si
}
func (this *StoreItem) CreatePostingsList(name string) *PostingsList {
this.RLock()
name = sanitize(name)
if p, ok := this.index[name]; ok {
this.RUnlock()
return p
}
this.RUnlock()
this.Lock()
defer this.Unlock()
if p, ok := this.index[name]; ok {
return p
}
f, offset := openAtEnd(path.Join(this.root, fmt.Sprintf("%s.postings", name)))
p := &PostingsList{
descriptor: f,
offset: offset,
}
this.index[name] = p
return p
}
func (this *StoreItem) stats() *Stats {
out := &Stats{
Tags: make(map[string]uint64),
Offset: this.offset,
File: this.path,
}
this.RLock()
defer this.RUnlock()
for name, index := range this.index {
out.Tags[name] = index.offset / 8
}
return out
}
func (this *StoreItem) scan(cb func(uint64, []byte) bool) {
SCAN:
for offset := uint64(0); offset < this.offset; {
// this is lockless, which means we could read a header,
// but the data might be incomplete
dataLen, _, allocSize, err := readHeader(this.descriptor, offset)
if err != nil {
break SCAN
}
output := make([]byte, dataLen)
_, err = this.descriptor.ReadAt(output, int64(offset)+int64(headerLen))
if err != nil {
break SCAN
}
if !cb(offset, output) {
break SCAN
}
offset += uint64(allocSize) + uint64(headerLen)
}
}
func (this *StoreItem) ExecuteQuery(query Query, cb func(uint64, []byte) bool) {
for query.Next() != NO_MORE {
offset := uint64(query.GetDocId())
dataLen, _, _, err := readHeader(this.descriptor, offset)
if err != nil {
break
}
output := make([]byte, dataLen)
_, err = this.descriptor.ReadAt(output, int64(offset)+int64(headerLen))
if err != nil {
break
}
if !cb(offset, output) {
break
}
}
}
const headerLen = 4 + 8 + 4 + 4
func readHeader(file *os.File, offset uint64) (uint32, uint64, uint32, error) {
headerBytes := make([]byte, headerLen)
_, err := file.ReadAt(headerBytes, int64(offset))
if err != nil {
return 0, 0, 0, err
}
dataLen := binary.LittleEndian.Uint32(headerBytes[0:])
nextBlock := binary.LittleEndian.Uint64(headerBytes[4:])
allocSize := binary.LittleEndian.Uint32(headerBytes[12:])
checksum := binary.LittleEndian.Uint32(headerBytes[16:])
computedChecksum := crc(headerBytes[0:16])
if checksum != computedChecksum {
return 0, 0, 0, errors.New(fmt.Sprintf("wrong checksum got: %d, expected: %d", computedChecksum, checksum))
}
return dataLen, nextBlock, allocSize, nil
}
func (this *StoreItem) writeHeader(currentOffset uint64, dataLen uint32, nextBlockOffset uint64, allocSize uint32) {
header := make([]byte, headerLen)
binary.LittleEndian.PutUint32(header[0:], uint32(dataLen))
binary.LittleEndian.PutUint64(header[4:], uint64(0))
binary.LittleEndian.PutUint32(header[12:], allocSize)
checksum := crc(header[0:16])
binary.LittleEndian.PutUint32(header[16:], checksum)
_, err := this.descriptor.WriteAt(header, int64(currentOffset))
if err != nil {
panic(err)
}
}
func (this *StoreItem) appendPostings(name string, value uint64) {
p := this.CreatePostingsList(name)
data := make([]byte, 8)
binary.LittleEndian.PutUint64(data, value)
// add it to the end
offset := atomic.AddUint64(&p.offset, uint64(8)) - 8
p.descriptor.WriteAt(data, int64(offset))
}
func (this *StoreItem) read(offset uint64) (uint32, []byte, error) {
// lockless read
dataLen, _, _, err := readHeader(this.descriptor, offset)
if err != nil {
return 0, nil, err
}
output := make([]byte, dataLen)
_, err = this.descriptor.ReadAt(output, int64(offset)+int64(headerLen))
if err != nil {
return 0, nil, err
}
return dataLen, output, nil
}
func (this *StoreItem) append(allocSize uint32, data io.Reader) (uint64, error) {
dataRaw, err := ioutil.ReadAll(data)
if err != nil {
return 0, err
}
if len(dataRaw) > int(allocSize) {
allocSize = uint32(len(dataRaw))
}
offset := atomic.AddUint64(&this.offset, uint64(allocSize+headerLen))
currentOffset := offset - uint64(allocSize+headerLen)
_, err = this.descriptor.WriteAt(dataRaw, int64(currentOffset+headerLen))
if err != nil {
panic(err)
}
this.writeHeader(currentOffset, uint32(len(dataRaw)), 0, allocSize)
return currentOffset, nil
}
func (this *StoreItem) modify(offset uint64, pos int32, data io.Reader) error {
dataRaw, err := ioutil.ReadAll(data)
if err != nil {
return err
}
oldDataLen, _, allocSize, err := readHeader(this.descriptor, offset)
if err != nil {
return err
}
if pos < 0 {
pos = int32(oldDataLen)
}
end := uint32(pos) + uint32(len(dataRaw))
if end > allocSize {
return errors.New("pos+len > allocSize")
}
_, err = this.descriptor.WriteAt(dataRaw, int64(offset+uint64(headerLen)+uint64(pos)))
if err != nil {
panic(err)
}
if end > oldDataLen {
// need to recompute the header
this.writeHeader(offset, end, 0, allocSize)
}
return nil
}
func crc(b []byte) uint32 {
return uint32(metro.Hash64(b, 0) >> uint64(32))
}
type MultiStore struct {
stores map[string]*StoreItem
root string
sync.RWMutex
}
func (this *MultiStore) find(storageIdentifier string) *StoreItem {
if storageIdentifier == "" {
storageIdentifier = "default"
}
this.RLock()
storage, ok := this.stores[storageIdentifier]
this.RUnlock()
if !ok {
this.Lock()
defer this.Unlock()
storage, ok = this.stores[storageIdentifier]
if !ok {
storage = NewStorage(path.Join(this.root, storageIdentifier))
this.stores[storageIdentifier] = storage
}
}
return storage
}
func (this *MultiStore) close(storageIdentifier string) {
this.Lock()
defer this.Unlock()
if storageIdentifier == "" {
storageIdentifier = "default"
}
storage, ok := this.stores[storageIdentifier]
if ok |
delete(this.stores, storageIdentifier)
}
func (this *MultiStore) modify(storageIdentifier string, offset uint64, pos int32, data io.Reader) error {
return this.find(storageIdentifier).modify(offset, pos, data)
}
func (this *MultiStore) stats(storageIdentifier string) *Stats {
return this.find(storageIdentifier).stats()
}
func (this *MultiStore) append(storageIdentifier string, allocSize uint32, data io.Reader) (uint64, error) {
return this.find(storageIdentifier).append(allocSize, data)
}
func (this *MultiStore) read(storageIdentifier string, offset uint64) (uint32, []byte, error) {
return this.find(storageIdentifier).read(offset)
}
func (this *MultiStore) scan(storageIdentifier string, cb func(uint64, []byte) bool) {
this.find(storageIdentifier).scan(cb)
}
func (this *MultiStore) ExecuteQuery(storageIdentifier string, query Query, cb func(uint64, []byte) bool) {
this.find(storageIdentifier).ExecuteQuery(query, cb)
}
func makeTimestamp() int64 {
return time.Now().UnixNano() / int64(time.Millisecond)
}
func Log(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
t0 := makeTimestamp()
handler.ServeHTTP(w, r)
log.Printf("%s %s %s took: %d", r.RemoteAddr, r.Method, r.URL, makeTimestamp()-t0)
})
}
func getTags(tags string) []string {
if tags == "" {
return []string{}
}
splitted := strings.Split(tags, ",")
out := []string{}
for _, s := range splitted {
if s != "" {
out = append(out, s)
}
}
return out
}
const namespaceKey = "namespace"
const posKey = "pos"
const allocSizeKey = "allocSize"
const offsetKey = "offset"
const tagsKey = "tags"
func main() {
var pbind = flag.String("bind", ":8000", "address to bind to")
var proot = flag.String("root", "/tmp/rochefort", "root directory")
var pquiet = flag.Bool("quiet", false, "dont print any log messages")
flag.Parse()
multiStore := &MultiStore{
stores: make(map[string]*StoreItem),
root: *proot,
}
os.MkdirAll(*proot, 0700)
namespaces, err := ioutil.ReadDir(*proot)
if err != nil {
panic(err)
}
// open all files in the namespace
NAMESPACE:
for _, namespace := range namespaces {
if namespace.IsDir() {
files, err := ioutil.ReadDir(path.Join(*proot, namespace.Name()))
if err == nil {
for _, file := range files {
if strings.HasSuffix(file.Name(), ".raw") {
multiStore.find(namespace.Name())
continue NAMESPACE
}
}
}
}
}
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
go func() {
<-sigs
log.Printf("\nReceived an interrupt, stopping services...\n")
multiStore.Lock() // dont unlock it
for _, storage := range multiStore.stores {
storage.Lock() // dont unlock it
storage.descriptor.Close()
log.Printf("closing: %s", storage.path)
}
os.Exit(0)
}()
http.HandleFunc("/close", func(w http.ResponseWriter, r *http.Request) {
multiStore.close(r.URL.Query().Get(namespaceKey))
w.Header().Set("Content-Type", "application/json")
w.Write([]byte("{\"success\":true}"))
})
http.HandleFunc("/modify", func(w http.ResponseWriter, r *http.Request) {
offset, err := strconv.ParseUint(r.URL.Query().Get(offsetKey), 10, 64)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
} else {
pos, err := strconv.ParseInt(r.URL.Query().Get(posKey), 10, 32)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
} else {
err := multiStore.modify(r.URL.Query().Get(namespaceKey), offset, int32(pos), r.Body)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
} else {
w.Header().Set("Content-Type", "application/json")
w.Write([]byte("{\"success\":true}"))
}
}
}
})
http.HandleFunc("/append", func(w http.ResponseWriter, r *http.Request) {
allocSize := uint64(0)
if r.URL.Query().Get(allocSizeKey) != "" {
allocSizeInput, err := strconv.ParseUint(r.URL.Query().Get(allocSizeKey), 10, 64)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
return
} else {
allocSize = allocSizeInput
}
}
store := multiStore.find(r.URL.Query().Get(namespaceKey))
offset, err := store.append(uint32(allocSize), r.Body)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
} else {
tags := getTags(r.URL.Query().Get(tagsKey))
for _, t := range tags {
store.appendPostings(t, offset)
}
w.Header().Set("Content-Type", "application/json")
w.Write([]byte(fmt.Sprintf("{\"offset\":%d}", offset)))
}
})
http.HandleFunc("/get", func(w http.ResponseWriter, r *http.Request) {
offset, err := strconv.ParseUint(r.URL.Query().Get(offsetKey), 10, 64)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
} else {
_, data, err := multiStore.read(r.URL.Query().Get(namespaceKey), offset)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
} else {
w.Header().Set("Content-Type", "application/octet-stream")
w.Write(data)
}
}
})
http.HandleFunc("/scan", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/octet-stream")
header := make([]byte, 12)
cb := func(offset uint64, data []byte) bool {
binary.LittleEndian.PutUint32(header[0:], uint32(len(data)))
binary.LittleEndian.PutUint64(header[4:], offset)
_, err := w.Write(header)
if err != nil {
return false
}
_, err = w.Write(data)
if err != nil {
return false
}
return true
}
multiStore.scan(r.URL.Query().Get(namespaceKey), cb)
})
http.HandleFunc("/query", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/octet-stream")
defer r.Body.Close()
body, err := ioutil.ReadAll(r.Body)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
return
}
var decoded map[string]interface{}
err = json.Unmarshal(body, &decoded)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
return
}
stored := multiStore.find(r.URL.Query().Get(namespaceKey))
query, err := fromJSON(stored, decoded)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
return
}
header := make([]byte, 12)
cb := func(offset uint64, data []byte) bool {
binary.LittleEndian.PutUint32(header[0:], uint32(len(data)))
binary.LittleEndian.PutUint64(header[4:], offset)
_, err := w.Write(header)
if err != nil {
return false
}
_, err = w.Write(data)
if err != nil {
return false
}
return true
}
stored.ExecuteQuery(query, cb)
})
http.HandleFunc("/stat", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/octet-stream")
stats := multiStore.stats(r.URL.Query().Get(namespaceKey))
b, err := json.Marshal(stats)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
} else {
w.Header().Set("Content-Type", "application/json")
w.Write(b)
}
})
http.HandleFunc("/getMulti", func(w http.ResponseWriter, r *http.Request) {
dataLenRaw := make([]byte, 4)
defer r.Body.Close()
b, err := ioutil.ReadAll(r.Body)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(fmt.Sprintf("read: %s", err.Error())))
return
}
namespace := r.URL.Query().Get(namespaceKey)
if len(b)%8 != 0 {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(fmt.Sprintf("incomplete read: %d is not multiple of 8", len(b))))
return
}
w.Header().Set("Content-Type", "application/octet-stream")
for i := 0; i < len(b); i += 8 {
offset := binary.LittleEndian.Uint64(b[i:])
_, data, err := multiStore.read(namespace, offset)
// XXX: we ignore the error on purpose
// as the storage is not fsyncing, it could very well lose some updates
// also the data is barely checksummed, so might very well be corrupted
if err == nil {
binary.LittleEndian.PutUint32(dataLenRaw, uint32(len(data)))
_, err = w.Write(dataLenRaw)
if err != nil {
return
}
_, err = w.Write(data)
if err != nil {
return
}
}
}
})
if !*pquiet {
log.Printf("starting http server on %s", *pbind)
err := http.ListenAndServe(*pbind, Log(http.DefaultServeMux))
if err != nil {
log.Fatal(err)
}
} else {
err := http.ListenAndServe(*pbind, http.DefaultServeMux)
if err != nil {
log.Fatal(err)
}
}
}
| {
storage.descriptor.Close()
log.Printf("closing: %s", storage.path)
} | conditional_block |
main.go | package main
import (
"encoding/binary"
"encoding/json"
"errors"
"flag"
"fmt"
"github.com/dgryski/go-metro"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"os/signal"
"path"
"regexp"
"strconv"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
)
type Stats struct {
Tags map[string]uint64
Offset uint64
File string
}
type PostingsList struct {
descriptor *os.File
offset uint64
}
func (this *PostingsList) newTermQuery() *Term {
postings := make([]byte, this.offset)
n, err := this.descriptor.ReadAt(postings, 0)
if n != len(postings) && err != nil {
postings = []byte{}
}
return &Term{
cursor: -1,
postings: postings,
QueryBase: QueryBase{NOT_READY},
}
}
type StoreItem struct {
path string
root string
descriptor *os.File
index map[string]*PostingsList
offset uint64
sync.RWMutex
}
var nonAlphaNumeric = regexp.MustCompile("[^a-zA-Z0-9]+")
func sanitize(s string) string {
return nonAlphaNumeric.ReplaceAllLiteralString(s, "")
}
func openAtEnd(filePath string) (*os.File, uint64) {
f, err := os.OpenFile(filePath, os.O_CREATE|os.O_RDWR, 0600)
if err != nil {
panic(err)
}
offset, err := f.Seek(0, 2)
if err != nil {
panic(err)
}
log.Printf("openning: %s with offset: %d", filePath, offset) | }
func NewStorage(root string) *StoreItem {
os.MkdirAll(root, 0700)
filePath := path.Join(root, "append.raw")
f, offset := openAtEnd(filePath)
si := &StoreItem{
offset: uint64(offset),
path: filePath,
index: map[string]*PostingsList{},
descriptor: f,
root: root,
}
files, err := ioutil.ReadDir(root)
if err != nil {
panic(err)
}
for _, dirFile := range files {
if strings.HasSuffix(dirFile.Name(), ".postings") {
dot := strings.IndexRune(dirFile.Name(), '.')
idxName := dirFile.Name()[:dot]
si.CreatePostingsList(idxName)
}
}
return si
}
func (this *StoreItem) CreatePostingsList(name string) *PostingsList {
this.RLock()
name = sanitize(name)
if p, ok := this.index[name]; ok {
this.RUnlock()
return p
}
this.RUnlock()
this.Lock()
defer this.Unlock()
if p, ok := this.index[name]; ok {
return p
}
f, offset := openAtEnd(path.Join(this.root, fmt.Sprintf("%s.postings", name)))
p := &PostingsList{
descriptor: f,
offset: offset,
}
this.index[name] = p
return p
}
func (this *StoreItem) stats() *Stats {
out := &Stats{
Tags: make(map[string]uint64),
Offset: this.offset,
File: this.path,
}
this.RLock()
defer this.RUnlock()
for name, index := range this.index {
out.Tags[name] = index.offset / 8
}
return out
}
func (this *StoreItem) scan(cb func(uint64, []byte) bool) {
SCAN:
for offset := uint64(0); offset < this.offset; {
// this is lockless, which means we could read a header,
// but the data might be incomplete
dataLen, _, allocSize, err := readHeader(this.descriptor, offset)
if err != nil {
break SCAN
}
output := make([]byte, dataLen)
_, err = this.descriptor.ReadAt(output, int64(offset)+int64(headerLen))
if err != nil {
break SCAN
}
if !cb(offset, output) {
break SCAN
}
offset += uint64(allocSize) + uint64(headerLen)
}
}
func (this *StoreItem) ExecuteQuery(query Query, cb func(uint64, []byte) bool) {
for query.Next() != NO_MORE {
offset := uint64(query.GetDocId())
dataLen, _, _, err := readHeader(this.descriptor, offset)
if err != nil {
break
}
output := make([]byte, dataLen)
_, err = this.descriptor.ReadAt(output, int64(offset)+int64(headerLen))
if err != nil {
break
}
if !cb(offset, output) {
break
}
}
}
const headerLen = 4 + 8 + 4 + 4
func readHeader(file *os.File, offset uint64) (uint32, uint64, uint32, error) {
headerBytes := make([]byte, headerLen)
_, err := file.ReadAt(headerBytes, int64(offset))
if err != nil {
return 0, 0, 0, err
}
dataLen := binary.LittleEndian.Uint32(headerBytes[0:])
nextBlock := binary.LittleEndian.Uint64(headerBytes[4:])
allocSize := binary.LittleEndian.Uint32(headerBytes[12:])
checksum := binary.LittleEndian.Uint32(headerBytes[16:])
computedChecksum := crc(headerBytes[0:16])
if checksum != computedChecksum {
return 0, 0, 0, errors.New(fmt.Sprintf("wrong checksum got: %d, expected: %d", computedChecksum, checksum))
}
return dataLen, nextBlock, allocSize, nil
}
func (this *StoreItem) writeHeader(currentOffset uint64, dataLen uint32, nextBlockOffset uint64, allocSize uint32) {
header := make([]byte, headerLen)
binary.LittleEndian.PutUint32(header[0:], uint32(dataLen))
binary.LittleEndian.PutUint64(header[4:], uint64(0))
binary.LittleEndian.PutUint32(header[12:], allocSize)
checksum := crc(header[0:16])
binary.LittleEndian.PutUint32(header[16:], checksum)
_, err := this.descriptor.WriteAt(header, int64(currentOffset))
if err != nil {
panic(err)
}
}
func (this *StoreItem) appendPostings(name string, value uint64) {
p := this.CreatePostingsList(name)
data := make([]byte, 8)
binary.LittleEndian.PutUint64(data, value)
// add it to the end
offset := atomic.AddUint64(&p.offset, uint64(8)) - 8
p.descriptor.WriteAt(data, int64(offset))
}
func (this *StoreItem) read(offset uint64) (uint32, []byte, error) {
// lockless read
dataLen, _, _, err := readHeader(this.descriptor, offset)
if err != nil {
return 0, nil, err
}
output := make([]byte, dataLen)
_, err = this.descriptor.ReadAt(output, int64(offset)+int64(headerLen))
if err != nil {
return 0, nil, err
}
return dataLen, output, nil
}
func (this *StoreItem) append(allocSize uint32, data io.Reader) (uint64, error) {
dataRaw, err := ioutil.ReadAll(data)
if err != nil {
return 0, err
}
if len(dataRaw) > int(allocSize) {
allocSize = uint32(len(dataRaw))
}
offset := atomic.AddUint64(&this.offset, uint64(allocSize+headerLen))
currentOffset := offset - uint64(allocSize+headerLen)
_, err = this.descriptor.WriteAt(dataRaw, int64(currentOffset+headerLen))
if err != nil {
panic(err)
}
this.writeHeader(currentOffset, uint32(len(dataRaw)), 0, allocSize)
return currentOffset, nil
}
func (this *StoreItem) modify(offset uint64, pos int32, data io.Reader) error {
dataRaw, err := ioutil.ReadAll(data)
if err != nil {
return err
}
oldDataLen, _, allocSize, err := readHeader(this.descriptor, offset)
if err != nil {
return err
}
if pos < 0 {
pos = int32(oldDataLen)
}
end := uint32(pos) + uint32(len(dataRaw))
if end > allocSize {
return errors.New("pos+len > allocSize")
}
_, err = this.descriptor.WriteAt(dataRaw, int64(offset+uint64(headerLen)+uint64(pos)))
if err != nil {
panic(err)
}
if end > oldDataLen {
// need to recompute the header
this.writeHeader(offset, end, 0, allocSize)
}
return nil
}
func crc(b []byte) uint32 {
return uint32(metro.Hash64(b, 0) >> uint64(32))
}
type MultiStore struct {
stores map[string]*StoreItem
root string
sync.RWMutex
}
func (this *MultiStore) find(storageIdentifier string) *StoreItem {
if storageIdentifier == "" {
storageIdentifier = "default"
}
this.RLock()
storage, ok := this.stores[storageIdentifier]
this.RUnlock()
if !ok {
this.Lock()
defer this.Unlock()
storage, ok = this.stores[storageIdentifier]
if !ok {
storage = NewStorage(path.Join(this.root, storageIdentifier))
this.stores[storageIdentifier] = storage
}
}
return storage
}
func (this *MultiStore) close(storageIdentifier string) {
this.Lock()
defer this.Unlock()
if storageIdentifier == "" {
storageIdentifier = "default"
}
storage, ok := this.stores[storageIdentifier]
if ok {
storage.descriptor.Close()
log.Printf("closing: %s", storage.path)
}
delete(this.stores, storageIdentifier)
}
func (this *MultiStore) modify(storageIdentifier string, offset uint64, pos int32, data io.Reader) error {
return this.find(storageIdentifier).modify(offset, pos, data)
}
func (this *MultiStore) stats(storageIdentifier string) *Stats {
return this.find(storageIdentifier).stats()
}
func (this *MultiStore) append(storageIdentifier string, allocSize uint32, data io.Reader) (uint64, error) {
return this.find(storageIdentifier).append(allocSize, data)
}
func (this *MultiStore) read(storageIdentifier string, offset uint64) (uint32, []byte, error) {
return this.find(storageIdentifier).read(offset)
}
func (this *MultiStore) scan(storageIdentifier string, cb func(uint64, []byte) bool) {
this.find(storageIdentifier).scan(cb)
}
func (this *MultiStore) ExecuteQuery(storageIdentifier string, query Query, cb func(uint64, []byte) bool) {
this.find(storageIdentifier).ExecuteQuery(query, cb)
}
func makeTimestamp() int64 {
return time.Now().UnixNano() / int64(time.Millisecond)
}
func Log(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
t0 := makeTimestamp()
handler.ServeHTTP(w, r)
log.Printf("%s %s %s took: %d", r.RemoteAddr, r.Method, r.URL, makeTimestamp()-t0)
})
}
func getTags(tags string) []string {
if tags == "" {
return []string{}
}
splitted := strings.Split(tags, ",")
out := []string{}
for _, s := range splitted {
if s != "" {
out = append(out, s)
}
}
return out
}
const namespaceKey = "namespace"
const posKey = "pos"
const allocSizeKey = "allocSize"
const offsetKey = "offset"
const tagsKey = "tags"
func main() {
var pbind = flag.String("bind", ":8000", "address to bind to")
var proot = flag.String("root", "/tmp/rochefort", "root directory")
var pquiet = flag.Bool("quiet", false, "dont print any log messages")
flag.Parse()
multiStore := &MultiStore{
stores: make(map[string]*StoreItem),
root: *proot,
}
os.MkdirAll(*proot, 0700)
namespaces, err := ioutil.ReadDir(*proot)
if err != nil {
panic(err)
}
// open all files in the namespace
NAMESPACE:
for _, namespace := range namespaces {
if namespace.IsDir() {
files, err := ioutil.ReadDir(path.Join(*proot, namespace.Name()))
if err == nil {
for _, file := range files {
if strings.HasSuffix(file.Name(), ".raw") {
multiStore.find(namespace.Name())
continue NAMESPACE
}
}
}
}
}
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
go func() {
<-sigs
log.Printf("\nReceived an interrupt, stopping services...\n")
multiStore.Lock() // dont unlock it
for _, storage := range multiStore.stores {
storage.Lock() // dont unlock it
storage.descriptor.Close()
log.Printf("closing: %s", storage.path)
}
os.Exit(0)
}()
http.HandleFunc("/close", func(w http.ResponseWriter, r *http.Request) {
multiStore.close(r.URL.Query().Get(namespaceKey))
w.Header().Set("Content-Type", "application/json")
w.Write([]byte("{\"success\":true}"))
})
http.HandleFunc("/modify", func(w http.ResponseWriter, r *http.Request) {
offset, err := strconv.ParseUint(r.URL.Query().Get(offsetKey), 10, 64)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
} else {
pos, err := strconv.ParseInt(r.URL.Query().Get(posKey), 10, 32)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
} else {
err := multiStore.modify(r.URL.Query().Get(namespaceKey), offset, int32(pos), r.Body)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
} else {
w.Header().Set("Content-Type", "application/json")
w.Write([]byte("{\"success\":true}"))
}
}
}
})
http.HandleFunc("/append", func(w http.ResponseWriter, r *http.Request) {
allocSize := uint64(0)
if r.URL.Query().Get(allocSizeKey) != "" {
allocSizeInput, err := strconv.ParseUint(r.URL.Query().Get(allocSizeKey), 10, 64)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
return
} else {
allocSize = allocSizeInput
}
}
store := multiStore.find(r.URL.Query().Get(namespaceKey))
offset, err := store.append(uint32(allocSize), r.Body)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
} else {
tags := getTags(r.URL.Query().Get(tagsKey))
for _, t := range tags {
store.appendPostings(t, offset)
}
w.Header().Set("Content-Type", "application/json")
w.Write([]byte(fmt.Sprintf("{\"offset\":%d}", offset)))
}
})
http.HandleFunc("/get", func(w http.ResponseWriter, r *http.Request) {
offset, err := strconv.ParseUint(r.URL.Query().Get(offsetKey), 10, 64)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
} else {
_, data, err := multiStore.read(r.URL.Query().Get(namespaceKey), offset)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
} else {
w.Header().Set("Content-Type", "application/octet-stream")
w.Write(data)
}
}
})
http.HandleFunc("/scan", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/octet-stream")
header := make([]byte, 12)
cb := func(offset uint64, data []byte) bool {
binary.LittleEndian.PutUint32(header[0:], uint32(len(data)))
binary.LittleEndian.PutUint64(header[4:], offset)
_, err := w.Write(header)
if err != nil {
return false
}
_, err = w.Write(data)
if err != nil {
return false
}
return true
}
multiStore.scan(r.URL.Query().Get(namespaceKey), cb)
})
http.HandleFunc("/query", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/octet-stream")
defer r.Body.Close()
body, err := ioutil.ReadAll(r.Body)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
return
}
var decoded map[string]interface{}
err = json.Unmarshal(body, &decoded)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
return
}
stored := multiStore.find(r.URL.Query().Get(namespaceKey))
query, err := fromJSON(stored, decoded)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
return
}
header := make([]byte, 12)
cb := func(offset uint64, data []byte) bool {
binary.LittleEndian.PutUint32(header[0:], uint32(len(data)))
binary.LittleEndian.PutUint64(header[4:], offset)
_, err := w.Write(header)
if err != nil {
return false
}
_, err = w.Write(data)
if err != nil {
return false
}
return true
}
stored.ExecuteQuery(query, cb)
})
http.HandleFunc("/stat", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/octet-stream")
stats := multiStore.stats(r.URL.Query().Get(namespaceKey))
b, err := json.Marshal(stats)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
} else {
w.Header().Set("Content-Type", "application/json")
w.Write(b)
}
})
http.HandleFunc("/getMulti", func(w http.ResponseWriter, r *http.Request) {
dataLenRaw := make([]byte, 4)
defer r.Body.Close()
b, err := ioutil.ReadAll(r.Body)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(fmt.Sprintf("read: %s", err.Error())))
return
}
namespace := r.URL.Query().Get(namespaceKey)
if len(b)%8 != 0 {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(fmt.Sprintf("incomplete read: %d is not multiple of 8", len(b))))
return
}
w.Header().Set("Content-Type", "application/octet-stream")
for i := 0; i < len(b); i += 8 {
offset := binary.LittleEndian.Uint64(b[i:])
_, data, err := multiStore.read(namespace, offset)
// XXX: we ignore the error on purpose
// as the storage is not fsyncing, it could very well lose some updates
// also the data is barely checksummed, so might very well be corrupted
if err == nil {
binary.LittleEndian.PutUint32(dataLenRaw, uint32(len(data)))
_, err = w.Write(dataLenRaw)
if err != nil {
return
}
_, err = w.Write(data)
if err != nil {
return
}
}
}
})
if !*pquiet {
log.Printf("starting http server on %s", *pbind)
err := http.ListenAndServe(*pbind, Log(http.DefaultServeMux))
if err != nil {
log.Fatal(err)
}
} else {
err := http.ListenAndServe(*pbind, http.DefaultServeMux)
if err != nil {
log.Fatal(err)
}
}
} | return f, uint64(offset) | random_line_split |
ProjectWatcher.ts | // Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT license.
// See LICENSE in the project root for license information.
import * as fs from 'fs';
import * as os from 'os';
import * as readline from 'readline';
import { once } from 'events';
import { getRepoRoot } from '@rushstack/package-deps-hash';
import { Colors, Path, ITerminal, FileSystemStats, FileSystem } from '@rushstack/node-core-library';
import { Git } from './Git';
import { ProjectChangeAnalyzer } from './ProjectChangeAnalyzer';
import { RushConfiguration } from '../api/RushConfiguration';
import { RushConfigurationProject } from '../api/RushConfigurationProject';
export interface IProjectWatcherOptions {
debounceMs?: number;
rushConfiguration: RushConfiguration;
projectsToWatch: ReadonlySet<RushConfigurationProject>;
terminal: ITerminal;
initialState?: ProjectChangeAnalyzer | undefined;
}
export interface IProjectChangeResult {
/**
* The set of projects that have changed since the last iteration
*/
changedProjects: ReadonlySet<RushConfigurationProject>;
/**
* Contains the git hashes for all tracked files in the repo
*/
state: ProjectChangeAnalyzer;
}
interface IPathWatchOptions {
recurse: boolean;
}
/**
* This class is for incrementally watching a set of projects in the repository for changes.
*
* We are manually using fs.watch() instead of `chokidar` because all we want from the file system watcher is a boolean
* signal indicating that "at least 1 file in a watched project changed". We then defer to ProjectChangeAnalyzer (which
* is responsible for change detection in all incremental builds) to determine what actually chanaged.
*
* Calling `waitForChange()` will return a promise that resolves when the package-deps of one or
* more projects differ from the value the previous time it was invoked. The first time will always resolve with the full selection.
*/
export class ProjectWatcher {
private readonly _debounceMs: number;
private readonly _repoRoot: string;
private readonly _rushConfiguration: RushConfiguration;
private readonly _projectsToWatch: ReadonlySet<RushConfigurationProject>;
private readonly _terminal: ITerminal;
private _initialState: ProjectChangeAnalyzer | undefined;
private _previousState: ProjectChangeAnalyzer | undefined;
private _hasRenderedStatus: boolean;
public constructor(options: IProjectWatcherOptions) {
const { debounceMs = 1000, rushConfiguration, projectsToWatch, terminal, initialState } = options;
this._debounceMs = debounceMs;
this._rushConfiguration = rushConfiguration;
this._projectsToWatch = projectsToWatch;
this._terminal = terminal;
const gitPath: string = new Git(rushConfiguration).getGitPathOrThrow();
this._repoRoot = Path.convertToSlashes(getRepoRoot(rushConfiguration.rushJsonFolder, gitPath));
this._initialState = initialState;
this._previousState = initialState;
this._hasRenderedStatus = false;
}
/**
* Waits for a change to the package-deps of one or more of the selected projects, since the previous invocation.
* Will return immediately the first time it is invoked, since no state has been recorded.
* If no change is currently present, watches the source tree of all selected projects for file changes.
*/
public async waitForChange(onWatchingFiles?: () => void): Promise<IProjectChangeResult> {
const initialChangeResult: IProjectChangeResult = await this._computeChanged();
// Ensure that the new state is recorded so that we don't loop infinitely
this._commitChanges(initialChangeResult.state);
if (initialChangeResult.changedProjects.size) {
return initialChangeResult;
}
const previousState: ProjectChangeAnalyzer = initialChangeResult.state;
const repoRoot: string = Path.convertToSlashes(this._rushConfiguration.rushJsonFolder);
// Map of path to whether config for the path
const pathsToWatch: Map<string, IPathWatchOptions> = new Map();
// Node 12 supports the "recursive" parameter to fs.watch only on win32 and OSX
// https://nodejs.org/docs/latest-v12.x/api/fs.html#fs_caveats
const useNativeRecursiveWatch: boolean = os.platform() === 'win32' || os.platform() === 'darwin';
if (useNativeRecursiveWatch) {
// Watch the root non-recursively
pathsToWatch.set(repoRoot, { recurse: false });
// Watch the rush config folder non-recursively
pathsToWatch.set(Path.convertToSlashes(this._rushConfiguration.commonRushConfigFolder), {
recurse: false
});
for (const project of this._projectsToWatch) {
// Use recursive watch in individual project folders
pathsToWatch.set(Path.convertToSlashes(project.projectFolder), { recurse: true });
}
} else {
for (const project of this._projectsToWatch) {
const projectState: Map<string, string> = (await previousState._tryGetProjectDependenciesAsync(
project,
this._terminal
))!;
const prefixLength: number = project.projectFolder.length - repoRoot.length - 1;
// Watch files in the root of the project, or
for (const pathToWatch of ProjectWatcher._enumeratePathsToWatch(projectState.keys(), prefixLength)) {
pathsToWatch.set(`${this._repoRoot}/${pathToWatch}`, { recurse: true });
}
}
}
const watchers: Map<string, fs.FSWatcher> = new Map();
const watchedResult: IProjectChangeResult = await new Promise(
(resolve: (result: IProjectChangeResult) => void, reject: (err: Error) => void) => {
let timeout: NodeJS.Timeout | undefined;
let terminated: boolean = false;
const terminal: ITerminal = this._terminal;
const debounceMs: number = this._debounceMs;
this._hasRenderedStatus = false;
const resolveIfChanged = async (): Promise<void> => {
timeout = undefined;
if (terminated) {
return;
}
try {
this._setStatus(`Evaluating changes to tracked files...`);
const result: IProjectChangeResult = await this._computeChanged();
this._setStatus(`Finished analyzing.`);
// Need an async tick to allow for more file system events to be handled
process.nextTick(() => {
if (timeout) {
// If another file has changed, wait for another pass.
this._setStatus(`More file changes detected, aborting.`);
return;
}
this._commitChanges(result.state);
if (result.changedProjects.size) {
terminated = true;
terminal.writeLine();
resolve(result);
} else {
this._setStatus(`No changes detected to tracked files.`);
}
});
} catch (err) {
// eslint-disable-next-line require-atomic-updates
terminated = true;
terminal.writeLine();
reject(err as NodeJS.ErrnoException);
}
};
for (const [pathToWatch, { recurse }] of pathsToWatch) {
addWatcher(pathToWatch, recurse);
}
if (onWatchingFiles) {
onWatchingFiles();
}
function onError(err: Error): void {
if (terminated) {
return;
}
terminated = true;
terminal.writeLine();
reject(err);
}
function addWatcher(watchedPath: string, recursive: boolean): void {
if (watchers.has(watchedPath)) {
return;
}
const listener: (event: string, fileName: string) => void = changeListener(watchedPath, recursive);
const watcher: fs.FSWatcher = fs.watch(
watchedPath,
{
encoding: 'utf-8',
recursive: recursive && useNativeRecursiveWatch
},
listener
);
watchers.set(watchedPath, watcher);
watcher.on('error', (err) => {
watchers.delete(watchedPath);
onError(err);
});
}
function innerListener(root: string, recursive: boolean, event: string, fileName: string): void {
try {
if (terminated) {
return;
}
if (fileName === '.git' || fileName === 'node_modules') {
return;
}
// Handling for added directories
if (recursive && !useNativeRecursiveWatch) {
const decodedName: string = fileName && fileName.toString();
const normalizedName: string = decodedName && Path.convertToSlashes(decodedName);
const fullName: string = normalizedName && `${root}/${normalizedName}`;
if (fullName && !watchers.has(fullName)) {
try {
const stat: FileSystemStats = FileSystem.getStatistics(fullName);
if (stat.isDirectory()) {
addWatcher(fullName, true);
}
} catch (err) {
const code: string | undefined = (err as NodeJS.ErrnoException).code;
if (code !== 'ENOENT' && code !== 'ENOTDIR') {
throw err;
}
}
}
}
// Use a timeout to debounce changes, e.g. bulk copying files into the directory while the watcher is running.
if (timeout) {
clearTimeout(timeout);
}
timeout = setTimeout(resolveIfChanged, debounceMs);
} catch (err) {
terminated = true;
terminal.writeLine();
reject(err as NodeJS.ErrnoException);
}
}
function changeListener(root: string, recursive: boolean): (event: string, fileName: string) => void {
return innerListener.bind(0, root, recursive);
}
}
);
const closePromises: Promise<void>[] = [];
for (const [watchedPath, watcher] of watchers) {
closePromises.push(
once(watcher, 'close').then(() => {
watchers.delete(watchedPath);
})
);
watcher.close();
}
await Promise.all(closePromises);
return watchedResult;
}
private _setStatus(status: string): void {
if (this._hasRenderedStatus) {
readline.clearLine(process.stdout, 0);
readline.cursorTo(process.stdout, 0);
} else {
this._hasRenderedStatus = true;
}
this._terminal.write(Colors.bold(Colors.cyan(`Watch Status: ${status}`)));
}
/**
* Determines which, if any, projects (within the selection) have new hashes for files that are not in .gitignore
*/
private async _computeChanged(): Promise<IProjectChangeResult> {
const state: ProjectChangeAnalyzer = new ProjectChangeAnalyzer(this._rushConfiguration);
const previousState: ProjectChangeAnalyzer | undefined = this._previousState;
if (!previousState) {
return {
changedProjects: this._projectsToWatch,
state
};
}
const changedProjects: Set<RushConfigurationProject> = new Set();
for (const project of this._projectsToWatch) {
const [previous, current] = await Promise.all([
previousState._tryGetProjectDependenciesAsync(project, this._terminal),
state._tryGetProjectDependenciesAsync(project, this._terminal)
]);
if (ProjectWatcher._haveProjectDepsChanged(previous, current)) {
// May need to detect if the nature of the change will break the process, e.g. changes to package.json
changedProjects.add(project);
}
}
return {
changedProjects,
state
};
}
private _commitChanges(state: ProjectChangeAnalyzer): void {
this._previousState = state;
if (!this._initialState) {
this._initialState = state;
}
}
/**
* Tests for inequality of the passed Maps. Order invariant.
*
* @returns `true` if the maps are different, `false` otherwise
*/
private static _haveProjectDepsChanged(
prev: Map<string, string> | undefined,
next: Map<string, string> | undefined
): boolean {
if (!prev && !next) {
return false;
}
if (!prev || !next) {
return true;
}
if (prev.size !== next.size) {
return true;
}
for (const [key, value] of prev) {
if (next.get(key) !== value) |
}
return false;
}
private static *_enumeratePathsToWatch(paths: Iterable<string>, prefixLength: number): Iterable<string> {
for (const path of paths) {
const rootSlashIndex: number = path.indexOf('/', prefixLength);
if (rootSlashIndex < 0) {
yield path;
return;
}
yield path.slice(0, rootSlashIndex);
let slashIndex: number = path.indexOf('/', rootSlashIndex + 1);
while (slashIndex >= 0) {
yield path.slice(0, slashIndex);
slashIndex = path.indexOf('/', slashIndex + 1);
}
}
}
}
| {
return true;
} | conditional_block |
ProjectWatcher.ts | // Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT license.
// See LICENSE in the project root for license information.
import * as fs from 'fs';
import * as os from 'os';
import * as readline from 'readline';
import { once } from 'events';
import { getRepoRoot } from '@rushstack/package-deps-hash';
import { Colors, Path, ITerminal, FileSystemStats, FileSystem } from '@rushstack/node-core-library';
import { Git } from './Git';
import { ProjectChangeAnalyzer } from './ProjectChangeAnalyzer';
import { RushConfiguration } from '../api/RushConfiguration';
import { RushConfigurationProject } from '../api/RushConfigurationProject';
export interface IProjectWatcherOptions {
debounceMs?: number;
rushConfiguration: RushConfiguration;
projectsToWatch: ReadonlySet<RushConfigurationProject>;
terminal: ITerminal;
initialState?: ProjectChangeAnalyzer | undefined;
}
export interface IProjectChangeResult {
/**
* The set of projects that have changed since the last iteration
*/
changedProjects: ReadonlySet<RushConfigurationProject>;
/**
* Contains the git hashes for all tracked files in the repo
*/
state: ProjectChangeAnalyzer;
}
interface IPathWatchOptions {
recurse: boolean;
}
/**
* This class is for incrementally watching a set of projects in the repository for changes.
*
* We are manually using fs.watch() instead of `chokidar` because all we want from the file system watcher is a boolean
* signal indicating that "at least 1 file in a watched project changed". We then defer to ProjectChangeAnalyzer (which
* is responsible for change detection in all incremental builds) to determine what actually chanaged.
*
* Calling `waitForChange()` will return a promise that resolves when the package-deps of one or
* more projects differ from the value the previous time it was invoked. The first time will always resolve with the full selection.
*/
export class ProjectWatcher {
private readonly _debounceMs: number;
private readonly _repoRoot: string;
private readonly _rushConfiguration: RushConfiguration;
private readonly _projectsToWatch: ReadonlySet<RushConfigurationProject>;
private readonly _terminal: ITerminal;
private _initialState: ProjectChangeAnalyzer | undefined;
private _previousState: ProjectChangeAnalyzer | undefined;
private _hasRenderedStatus: boolean;
public constructor(options: IProjectWatcherOptions) {
const { debounceMs = 1000, rushConfiguration, projectsToWatch, terminal, initialState } = options;
this._debounceMs = debounceMs;
this._rushConfiguration = rushConfiguration;
this._projectsToWatch = projectsToWatch;
this._terminal = terminal;
const gitPath: string = new Git(rushConfiguration).getGitPathOrThrow();
this._repoRoot = Path.convertToSlashes(getRepoRoot(rushConfiguration.rushJsonFolder, gitPath));
this._initialState = initialState;
this._previousState = initialState;
this._hasRenderedStatus = false;
}
/**
* Waits for a change to the package-deps of one or more of the selected projects, since the previous invocation.
* Will return immediately the first time it is invoked, since no state has been recorded.
* If no change is currently present, watches the source tree of all selected projects for file changes.
*/
public async waitForChange(onWatchingFiles?: () => void): Promise<IProjectChangeResult> |
private _setStatus(status: string): void {
if (this._hasRenderedStatus) {
readline.clearLine(process.stdout, 0);
readline.cursorTo(process.stdout, 0);
} else {
this._hasRenderedStatus = true;
}
this._terminal.write(Colors.bold(Colors.cyan(`Watch Status: ${status}`)));
}
/**
* Determines which, if any, projects (within the selection) have new hashes for files that are not in .gitignore
*/
private async _computeChanged(): Promise<IProjectChangeResult> {
const state: ProjectChangeAnalyzer = new ProjectChangeAnalyzer(this._rushConfiguration);
const previousState: ProjectChangeAnalyzer | undefined = this._previousState;
if (!previousState) {
return {
changedProjects: this._projectsToWatch,
state
};
}
const changedProjects: Set<RushConfigurationProject> = new Set();
for (const project of this._projectsToWatch) {
const [previous, current] = await Promise.all([
previousState._tryGetProjectDependenciesAsync(project, this._terminal),
state._tryGetProjectDependenciesAsync(project, this._terminal)
]);
if (ProjectWatcher._haveProjectDepsChanged(previous, current)) {
// May need to detect if the nature of the change will break the process, e.g. changes to package.json
changedProjects.add(project);
}
}
return {
changedProjects,
state
};
}
private _commitChanges(state: ProjectChangeAnalyzer): void {
this._previousState = state;
if (!this._initialState) {
this._initialState = state;
}
}
/**
* Tests for inequality of the passed Maps. Order invariant.
*
* @returns `true` if the maps are different, `false` otherwise
*/
private static _haveProjectDepsChanged(
prev: Map<string, string> | undefined,
next: Map<string, string> | undefined
): boolean {
if (!prev && !next) {
return false;
}
if (!prev || !next) {
return true;
}
if (prev.size !== next.size) {
return true;
}
for (const [key, value] of prev) {
if (next.get(key) !== value) {
return true;
}
}
return false;
}
private static *_enumeratePathsToWatch(paths: Iterable<string>, prefixLength: number): Iterable<string> {
for (const path of paths) {
const rootSlashIndex: number = path.indexOf('/', prefixLength);
if (rootSlashIndex < 0) {
yield path;
return;
}
yield path.slice(0, rootSlashIndex);
let slashIndex: number = path.indexOf('/', rootSlashIndex + 1);
while (slashIndex >= 0) {
yield path.slice(0, slashIndex);
slashIndex = path.indexOf('/', slashIndex + 1);
}
}
}
}
| {
const initialChangeResult: IProjectChangeResult = await this._computeChanged();
// Ensure that the new state is recorded so that we don't loop infinitely
this._commitChanges(initialChangeResult.state);
if (initialChangeResult.changedProjects.size) {
return initialChangeResult;
}
const previousState: ProjectChangeAnalyzer = initialChangeResult.state;
const repoRoot: string = Path.convertToSlashes(this._rushConfiguration.rushJsonFolder);
// Map of path to whether config for the path
const pathsToWatch: Map<string, IPathWatchOptions> = new Map();
// Node 12 supports the "recursive" parameter to fs.watch only on win32 and OSX
// https://nodejs.org/docs/latest-v12.x/api/fs.html#fs_caveats
const useNativeRecursiveWatch: boolean = os.platform() === 'win32' || os.platform() === 'darwin';
if (useNativeRecursiveWatch) {
// Watch the root non-recursively
pathsToWatch.set(repoRoot, { recurse: false });
// Watch the rush config folder non-recursively
pathsToWatch.set(Path.convertToSlashes(this._rushConfiguration.commonRushConfigFolder), {
recurse: false
});
for (const project of this._projectsToWatch) {
// Use recursive watch in individual project folders
pathsToWatch.set(Path.convertToSlashes(project.projectFolder), { recurse: true });
}
} else {
for (const project of this._projectsToWatch) {
const projectState: Map<string, string> = (await previousState._tryGetProjectDependenciesAsync(
project,
this._terminal
))!;
const prefixLength: number = project.projectFolder.length - repoRoot.length - 1;
// Watch files in the root of the project, or
for (const pathToWatch of ProjectWatcher._enumeratePathsToWatch(projectState.keys(), prefixLength)) {
pathsToWatch.set(`${this._repoRoot}/${pathToWatch}`, { recurse: true });
}
}
}
const watchers: Map<string, fs.FSWatcher> = new Map();
const watchedResult: IProjectChangeResult = await new Promise(
(resolve: (result: IProjectChangeResult) => void, reject: (err: Error) => void) => {
let timeout: NodeJS.Timeout | undefined;
let terminated: boolean = false;
const terminal: ITerminal = this._terminal;
const debounceMs: number = this._debounceMs;
this._hasRenderedStatus = false;
const resolveIfChanged = async (): Promise<void> => {
timeout = undefined;
if (terminated) {
return;
}
try {
this._setStatus(`Evaluating changes to tracked files...`);
const result: IProjectChangeResult = await this._computeChanged();
this._setStatus(`Finished analyzing.`);
// Need an async tick to allow for more file system events to be handled
process.nextTick(() => {
if (timeout) {
// If another file has changed, wait for another pass.
this._setStatus(`More file changes detected, aborting.`);
return;
}
this._commitChanges(result.state);
if (result.changedProjects.size) {
terminated = true;
terminal.writeLine();
resolve(result);
} else {
this._setStatus(`No changes detected to tracked files.`);
}
});
} catch (err) {
// eslint-disable-next-line require-atomic-updates
terminated = true;
terminal.writeLine();
reject(err as NodeJS.ErrnoException);
}
};
for (const [pathToWatch, { recurse }] of pathsToWatch) {
addWatcher(pathToWatch, recurse);
}
if (onWatchingFiles) {
onWatchingFiles();
}
function onError(err: Error): void {
if (terminated) {
return;
}
terminated = true;
terminal.writeLine();
reject(err);
}
function addWatcher(watchedPath: string, recursive: boolean): void {
if (watchers.has(watchedPath)) {
return;
}
const listener: (event: string, fileName: string) => void = changeListener(watchedPath, recursive);
const watcher: fs.FSWatcher = fs.watch(
watchedPath,
{
encoding: 'utf-8',
recursive: recursive && useNativeRecursiveWatch
},
listener
);
watchers.set(watchedPath, watcher);
watcher.on('error', (err) => {
watchers.delete(watchedPath);
onError(err);
});
}
function innerListener(root: string, recursive: boolean, event: string, fileName: string): void {
try {
if (terminated) {
return;
}
if (fileName === '.git' || fileName === 'node_modules') {
return;
}
// Handling for added directories
if (recursive && !useNativeRecursiveWatch) {
const decodedName: string = fileName && fileName.toString();
const normalizedName: string = decodedName && Path.convertToSlashes(decodedName);
const fullName: string = normalizedName && `${root}/${normalizedName}`;
if (fullName && !watchers.has(fullName)) {
try {
const stat: FileSystemStats = FileSystem.getStatistics(fullName);
if (stat.isDirectory()) {
addWatcher(fullName, true);
}
} catch (err) {
const code: string | undefined = (err as NodeJS.ErrnoException).code;
if (code !== 'ENOENT' && code !== 'ENOTDIR') {
throw err;
}
}
}
}
// Use a timeout to debounce changes, e.g. bulk copying files into the directory while the watcher is running.
if (timeout) {
clearTimeout(timeout);
}
timeout = setTimeout(resolveIfChanged, debounceMs);
} catch (err) {
terminated = true;
terminal.writeLine();
reject(err as NodeJS.ErrnoException);
}
}
function changeListener(root: string, recursive: boolean): (event: string, fileName: string) => void {
return innerListener.bind(0, root, recursive);
}
}
);
const closePromises: Promise<void>[] = [];
for (const [watchedPath, watcher] of watchers) {
closePromises.push(
once(watcher, 'close').then(() => {
watchers.delete(watchedPath);
})
);
watcher.close();
}
await Promise.all(closePromises);
return watchedResult;
} | identifier_body |
ProjectWatcher.ts | // Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT license.
// See LICENSE in the project root for license information.
import * as fs from 'fs';
import * as os from 'os';
import * as readline from 'readline';
import { once } from 'events';
import { getRepoRoot } from '@rushstack/package-deps-hash';
import { Colors, Path, ITerminal, FileSystemStats, FileSystem } from '@rushstack/node-core-library';
import { Git } from './Git';
import { ProjectChangeAnalyzer } from './ProjectChangeAnalyzer';
import { RushConfiguration } from '../api/RushConfiguration';
import { RushConfigurationProject } from '../api/RushConfigurationProject';
export interface IProjectWatcherOptions {
debounceMs?: number;
rushConfiguration: RushConfiguration;
projectsToWatch: ReadonlySet<RushConfigurationProject>;
terminal: ITerminal;
initialState?: ProjectChangeAnalyzer | undefined;
}
export interface IProjectChangeResult {
/**
* The set of projects that have changed since the last iteration
*/
changedProjects: ReadonlySet<RushConfigurationProject>;
/**
* Contains the git hashes for all tracked files in the repo
*/
state: ProjectChangeAnalyzer;
}
interface IPathWatchOptions {
recurse: boolean;
}
/**
* This class is for incrementally watching a set of projects in the repository for changes.
*
* We are manually using fs.watch() instead of `chokidar` because all we want from the file system watcher is a boolean
* signal indicating that "at least 1 file in a watched project changed". We then defer to ProjectChangeAnalyzer (which
* is responsible for change detection in all incremental builds) to determine what actually chanaged.
*
* Calling `waitForChange()` will return a promise that resolves when the package-deps of one or
* more projects differ from the value the previous time it was invoked. The first time will always resolve with the full selection.
*/
export class ProjectWatcher {
private readonly _debounceMs: number;
private readonly _repoRoot: string;
private readonly _rushConfiguration: RushConfiguration;
private readonly _projectsToWatch: ReadonlySet<RushConfigurationProject>;
private readonly _terminal: ITerminal;
private _initialState: ProjectChangeAnalyzer | undefined;
private _previousState: ProjectChangeAnalyzer | undefined;
private _hasRenderedStatus: boolean;
public constructor(options: IProjectWatcherOptions) {
const { debounceMs = 1000, rushConfiguration, projectsToWatch, terminal, initialState } = options;
this._debounceMs = debounceMs;
this._rushConfiguration = rushConfiguration;
this._projectsToWatch = projectsToWatch;
this._terminal = terminal;
const gitPath: string = new Git(rushConfiguration).getGitPathOrThrow();
this._repoRoot = Path.convertToSlashes(getRepoRoot(rushConfiguration.rushJsonFolder, gitPath));
this._initialState = initialState;
this._previousState = initialState;
this._hasRenderedStatus = false;
}
/**
* Waits for a change to the package-deps of one or more of the selected projects, since the previous invocation.
* Will return immediately the first time it is invoked, since no state has been recorded.
* If no change is currently present, watches the source tree of all selected projects for file changes.
*/
public async waitForChange(onWatchingFiles?: () => void): Promise<IProjectChangeResult> {
const initialChangeResult: IProjectChangeResult = await this._computeChanged();
// Ensure that the new state is recorded so that we don't loop infinitely
this._commitChanges(initialChangeResult.state);
if (initialChangeResult.changedProjects.size) {
return initialChangeResult;
}
const previousState: ProjectChangeAnalyzer = initialChangeResult.state;
const repoRoot: string = Path.convertToSlashes(this._rushConfiguration.rushJsonFolder);
// Map of path to whether config for the path
const pathsToWatch: Map<string, IPathWatchOptions> = new Map();
// Node 12 supports the "recursive" parameter to fs.watch only on win32 and OSX
// https://nodejs.org/docs/latest-v12.x/api/fs.html#fs_caveats
const useNativeRecursiveWatch: boolean = os.platform() === 'win32' || os.platform() === 'darwin';
if (useNativeRecursiveWatch) {
// Watch the root non-recursively
pathsToWatch.set(repoRoot, { recurse: false });
// Watch the rush config folder non-recursively
pathsToWatch.set(Path.convertToSlashes(this._rushConfiguration.commonRushConfigFolder), {
recurse: false
});
for (const project of this._projectsToWatch) {
// Use recursive watch in individual project folders
pathsToWatch.set(Path.convertToSlashes(project.projectFolder), { recurse: true });
}
} else {
for (const project of this._projectsToWatch) {
const projectState: Map<string, string> = (await previousState._tryGetProjectDependenciesAsync(
project,
this._terminal
))!;
const prefixLength: number = project.projectFolder.length - repoRoot.length - 1;
// Watch files in the root of the project, or
for (const pathToWatch of ProjectWatcher._enumeratePathsToWatch(projectState.keys(), prefixLength)) {
pathsToWatch.set(`${this._repoRoot}/${pathToWatch}`, { recurse: true });
}
}
}
const watchers: Map<string, fs.FSWatcher> = new Map();
const watchedResult: IProjectChangeResult = await new Promise(
(resolve: (result: IProjectChangeResult) => void, reject: (err: Error) => void) => {
let timeout: NodeJS.Timeout | undefined;
let terminated: boolean = false;
const terminal: ITerminal = this._terminal;
const debounceMs: number = this._debounceMs;
this._hasRenderedStatus = false;
const resolveIfChanged = async (): Promise<void> => {
timeout = undefined;
if (terminated) {
return;
}
try {
this._setStatus(`Evaluating changes to tracked files...`);
const result: IProjectChangeResult = await this._computeChanged();
this._setStatus(`Finished analyzing.`);
// Need an async tick to allow for more file system events to be handled
process.nextTick(() => {
if (timeout) {
// If another file has changed, wait for another pass.
this._setStatus(`More file changes detected, aborting.`);
return;
}
this._commitChanges(result.state);
if (result.changedProjects.size) {
terminated = true;
terminal.writeLine();
resolve(result);
} else {
this._setStatus(`No changes detected to tracked files.`);
}
});
} catch (err) {
// eslint-disable-next-line require-atomic-updates
terminated = true;
terminal.writeLine();
reject(err as NodeJS.ErrnoException);
}
};
for (const [pathToWatch, { recurse }] of pathsToWatch) {
addWatcher(pathToWatch, recurse);
}
if (onWatchingFiles) {
onWatchingFiles();
}
function onError(err: Error): void {
if (terminated) {
return;
}
terminated = true;
terminal.writeLine();
reject(err);
}
function addWatcher(watchedPath: string, recursive: boolean): void {
if (watchers.has(watchedPath)) {
return;
}
const listener: (event: string, fileName: string) => void = changeListener(watchedPath, recursive);
const watcher: fs.FSWatcher = fs.watch(
watchedPath,
{
encoding: 'utf-8',
recursive: recursive && useNativeRecursiveWatch
},
listener
);
watchers.set(watchedPath, watcher);
watcher.on('error', (err) => {
watchers.delete(watchedPath);
onError(err);
});
}
function innerListener(root: string, recursive: boolean, event: string, fileName: string): void {
try {
if (terminated) {
return;
}
if (fileName === '.git' || fileName === 'node_modules') {
return;
}
// Handling for added directories
if (recursive && !useNativeRecursiveWatch) {
const decodedName: string = fileName && fileName.toString();
const normalizedName: string = decodedName && Path.convertToSlashes(decodedName);
const fullName: string = normalizedName && `${root}/${normalizedName}`;
if (fullName && !watchers.has(fullName)) {
try {
const stat: FileSystemStats = FileSystem.getStatistics(fullName);
if (stat.isDirectory()) {
addWatcher(fullName, true);
}
} catch (err) {
const code: string | undefined = (err as NodeJS.ErrnoException).code;
if (code !== 'ENOENT' && code !== 'ENOTDIR') {
throw err;
}
}
}
}
// Use a timeout to debounce changes, e.g. bulk copying files into the directory while the watcher is running.
if (timeout) {
clearTimeout(timeout);
}
timeout = setTimeout(resolveIfChanged, debounceMs);
} catch (err) {
terminated = true;
terminal.writeLine();
reject(err as NodeJS.ErrnoException);
}
}
function changeListener(root: string, recursive: boolean): (event: string, fileName: string) => void {
return innerListener.bind(0, root, recursive);
}
}
);
const closePromises: Promise<void>[] = [];
for (const [watchedPath, watcher] of watchers) {
closePromises.push(
once(watcher, 'close').then(() => {
watchers.delete(watchedPath);
})
);
watcher.close();
}
await Promise.all(closePromises);
return watchedResult;
}
private | (status: string): void {
if (this._hasRenderedStatus) {
readline.clearLine(process.stdout, 0);
readline.cursorTo(process.stdout, 0);
} else {
this._hasRenderedStatus = true;
}
this._terminal.write(Colors.bold(Colors.cyan(`Watch Status: ${status}`)));
}
/**
* Determines which, if any, projects (within the selection) have new hashes for files that are not in .gitignore
*/
private async _computeChanged(): Promise<IProjectChangeResult> {
const state: ProjectChangeAnalyzer = new ProjectChangeAnalyzer(this._rushConfiguration);
const previousState: ProjectChangeAnalyzer | undefined = this._previousState;
if (!previousState) {
return {
changedProjects: this._projectsToWatch,
state
};
}
const changedProjects: Set<RushConfigurationProject> = new Set();
for (const project of this._projectsToWatch) {
const [previous, current] = await Promise.all([
previousState._tryGetProjectDependenciesAsync(project, this._terminal),
state._tryGetProjectDependenciesAsync(project, this._terminal)
]);
if (ProjectWatcher._haveProjectDepsChanged(previous, current)) {
// May need to detect if the nature of the change will break the process, e.g. changes to package.json
changedProjects.add(project);
}
}
return {
changedProjects,
state
};
}
private _commitChanges(state: ProjectChangeAnalyzer): void {
this._previousState = state;
if (!this._initialState) {
this._initialState = state;
}
}
/**
* Tests for inequality of the passed Maps. Order invariant.
*
* @returns `true` if the maps are different, `false` otherwise
*/
private static _haveProjectDepsChanged(
prev: Map<string, string> | undefined,
next: Map<string, string> | undefined
): boolean {
if (!prev && !next) {
return false;
}
if (!prev || !next) {
return true;
}
if (prev.size !== next.size) {
return true;
}
for (const [key, value] of prev) {
if (next.get(key) !== value) {
return true;
}
}
return false;
}
private static *_enumeratePathsToWatch(paths: Iterable<string>, prefixLength: number): Iterable<string> {
for (const path of paths) {
const rootSlashIndex: number = path.indexOf('/', prefixLength);
if (rootSlashIndex < 0) {
yield path;
return;
}
yield path.slice(0, rootSlashIndex);
let slashIndex: number = path.indexOf('/', rootSlashIndex + 1);
while (slashIndex >= 0) {
yield path.slice(0, slashIndex);
slashIndex = path.indexOf('/', slashIndex + 1);
}
}
}
}
| _setStatus | identifier_name |
ProjectWatcher.ts | // Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT license.
// See LICENSE in the project root for license information.
import * as fs from 'fs';
import * as os from 'os';
import * as readline from 'readline';
import { once } from 'events';
import { getRepoRoot } from '@rushstack/package-deps-hash';
import { Colors, Path, ITerminal, FileSystemStats, FileSystem } from '@rushstack/node-core-library';
import { Git } from './Git';
import { ProjectChangeAnalyzer } from './ProjectChangeAnalyzer';
import { RushConfiguration } from '../api/RushConfiguration';
import { RushConfigurationProject } from '../api/RushConfigurationProject';
export interface IProjectWatcherOptions {
debounceMs?: number;
rushConfiguration: RushConfiguration;
projectsToWatch: ReadonlySet<RushConfigurationProject>;
terminal: ITerminal;
initialState?: ProjectChangeAnalyzer | undefined;
}
export interface IProjectChangeResult {
/**
* The set of projects that have changed since the last iteration
*/
changedProjects: ReadonlySet<RushConfigurationProject>;
/**
* Contains the git hashes for all tracked files in the repo
*/
state: ProjectChangeAnalyzer;
}
interface IPathWatchOptions {
recurse: boolean;
}
/**
* This class is for incrementally watching a set of projects in the repository for changes.
*
* We are manually using fs.watch() instead of `chokidar` because all we want from the file system watcher is a boolean
* signal indicating that "at least 1 file in a watched project changed". We then defer to ProjectChangeAnalyzer (which
* is responsible for change detection in all incremental builds) to determine what actually chanaged.
*
* Calling `waitForChange()` will return a promise that resolves when the package-deps of one or
* more projects differ from the value the previous time it was invoked. The first time will always resolve with the full selection.
*/
export class ProjectWatcher {
private readonly _debounceMs: number;
private readonly _repoRoot: string;
private readonly _rushConfiguration: RushConfiguration;
private readonly _projectsToWatch: ReadonlySet<RushConfigurationProject>;
private readonly _terminal: ITerminal;
private _initialState: ProjectChangeAnalyzer | undefined;
private _previousState: ProjectChangeAnalyzer | undefined;
private _hasRenderedStatus: boolean;
public constructor(options: IProjectWatcherOptions) {
const { debounceMs = 1000, rushConfiguration, projectsToWatch, terminal, initialState } = options;
this._debounceMs = debounceMs;
this._rushConfiguration = rushConfiguration;
this._projectsToWatch = projectsToWatch;
this._terminal = terminal;
const gitPath: string = new Git(rushConfiguration).getGitPathOrThrow();
this._repoRoot = Path.convertToSlashes(getRepoRoot(rushConfiguration.rushJsonFolder, gitPath));
this._initialState = initialState;
this._previousState = initialState;
this._hasRenderedStatus = false;
}
/**
* Waits for a change to the package-deps of one or more of the selected projects, since the previous invocation.
* Will return immediately the first time it is invoked, since no state has been recorded.
* If no change is currently present, watches the source tree of all selected projects for file changes.
*/
public async waitForChange(onWatchingFiles?: () => void): Promise<IProjectChangeResult> {
const initialChangeResult: IProjectChangeResult = await this._computeChanged();
// Ensure that the new state is recorded so that we don't loop infinitely
this._commitChanges(initialChangeResult.state);
if (initialChangeResult.changedProjects.size) {
return initialChangeResult;
}
const previousState: ProjectChangeAnalyzer = initialChangeResult.state;
const repoRoot: string = Path.convertToSlashes(this._rushConfiguration.rushJsonFolder);
// Map of path to whether config for the path
const pathsToWatch: Map<string, IPathWatchOptions> = new Map();
// Node 12 supports the "recursive" parameter to fs.watch only on win32 and OSX
// https://nodejs.org/docs/latest-v12.x/api/fs.html#fs_caveats
const useNativeRecursiveWatch: boolean = os.platform() === 'win32' || os.platform() === 'darwin';
if (useNativeRecursiveWatch) {
// Watch the root non-recursively
pathsToWatch.set(repoRoot, { recurse: false });
// Watch the rush config folder non-recursively
pathsToWatch.set(Path.convertToSlashes(this._rushConfiguration.commonRushConfigFolder), {
recurse: false
});
for (const project of this._projectsToWatch) {
// Use recursive watch in individual project folders
pathsToWatch.set(Path.convertToSlashes(project.projectFolder), { recurse: true });
}
} else {
for (const project of this._projectsToWatch) {
const projectState: Map<string, string> = (await previousState._tryGetProjectDependenciesAsync(
project,
this._terminal
))!;
const prefixLength: number = project.projectFolder.length - repoRoot.length - 1;
// Watch files in the root of the project, or
for (const pathToWatch of ProjectWatcher._enumeratePathsToWatch(projectState.keys(), prefixLength)) {
pathsToWatch.set(`${this._repoRoot}/${pathToWatch}`, { recurse: true });
}
}
}
const watchers: Map<string, fs.FSWatcher> = new Map();
const watchedResult: IProjectChangeResult = await new Promise(
(resolve: (result: IProjectChangeResult) => void, reject: (err: Error) => void) => {
let timeout: NodeJS.Timeout | undefined;
let terminated: boolean = false;
const terminal: ITerminal = this._terminal;
const debounceMs: number = this._debounceMs;
this._hasRenderedStatus = false;
const resolveIfChanged = async (): Promise<void> => {
timeout = undefined;
if (terminated) {
return;
}
try {
this._setStatus(`Evaluating changes to tracked files...`);
const result: IProjectChangeResult = await this._computeChanged();
this._setStatus(`Finished analyzing.`);
// Need an async tick to allow for more file system events to be handled
process.nextTick(() => {
if (timeout) {
// If another file has changed, wait for another pass.
this._setStatus(`More file changes detected, aborting.`);
return;
}
this._commitChanges(result.state);
if (result.changedProjects.size) {
terminated = true;
terminal.writeLine();
resolve(result);
} else {
this._setStatus(`No changes detected to tracked files.`);
}
});
} catch (err) {
// eslint-disable-next-line require-atomic-updates
terminated = true;
terminal.writeLine();
reject(err as NodeJS.ErrnoException);
}
};
for (const [pathToWatch, { recurse }] of pathsToWatch) {
addWatcher(pathToWatch, recurse);
}
if (onWatchingFiles) {
onWatchingFiles();
}
function onError(err: Error): void {
if (terminated) {
return;
}
terminated = true;
terminal.writeLine();
reject(err);
}
function addWatcher(watchedPath: string, recursive: boolean): void {
if (watchers.has(watchedPath)) {
return;
}
const listener: (event: string, fileName: string) => void = changeListener(watchedPath, recursive);
const watcher: fs.FSWatcher = fs.watch(
watchedPath,
{
encoding: 'utf-8',
recursive: recursive && useNativeRecursiveWatch
},
listener
);
watchers.set(watchedPath, watcher);
watcher.on('error', (err) => {
watchers.delete(watchedPath);
onError(err);
});
}
function innerListener(root: string, recursive: boolean, event: string, fileName: string): void {
try {
if (terminated) {
return;
}
if (fileName === '.git' || fileName === 'node_modules') {
return;
}
// Handling for added directories
if (recursive && !useNativeRecursiveWatch) {
const decodedName: string = fileName && fileName.toString();
const normalizedName: string = decodedName && Path.convertToSlashes(decodedName);
const fullName: string = normalizedName && `${root}/${normalizedName}`;
if (fullName && !watchers.has(fullName)) {
try {
const stat: FileSystemStats = FileSystem.getStatistics(fullName);
if (stat.isDirectory()) {
addWatcher(fullName, true);
}
} catch (err) {
const code: string | undefined = (err as NodeJS.ErrnoException).code;
if (code !== 'ENOENT' && code !== 'ENOTDIR') {
throw err;
}
}
}
}
// Use a timeout to debounce changes, e.g. bulk copying files into the directory while the watcher is running.
if (timeout) {
clearTimeout(timeout);
}
timeout = setTimeout(resolveIfChanged, debounceMs);
} catch (err) {
terminated = true;
terminal.writeLine();
reject(err as NodeJS.ErrnoException);
}
}
function changeListener(root: string, recursive: boolean): (event: string, fileName: string) => void {
return innerListener.bind(0, root, recursive);
}
}
);
const closePromises: Promise<void>[] = [];
for (const [watchedPath, watcher] of watchers) {
closePromises.push(
once(watcher, 'close').then(() => {
watchers.delete(watchedPath);
})
);
watcher.close();
}
await Promise.all(closePromises);
return watchedResult;
}
private _setStatus(status: string): void {
if (this._hasRenderedStatus) {
readline.clearLine(process.stdout, 0);
readline.cursorTo(process.stdout, 0);
} else {
this._hasRenderedStatus = true;
}
this._terminal.write(Colors.bold(Colors.cyan(`Watch Status: ${status}`)));
}
/**
* Determines which, if any, projects (within the selection) have new hashes for files that are not in .gitignore
*/
private async _computeChanged(): Promise<IProjectChangeResult> {
const state: ProjectChangeAnalyzer = new ProjectChangeAnalyzer(this._rushConfiguration);
const previousState: ProjectChangeAnalyzer | undefined = this._previousState;
if (!previousState) {
return {
changedProjects: this._projectsToWatch,
state
};
}
const changedProjects: Set<RushConfigurationProject> = new Set();
for (const project of this._projectsToWatch) {
const [previous, current] = await Promise.all([
previousState._tryGetProjectDependenciesAsync(project, this._terminal),
state._tryGetProjectDependenciesAsync(project, this._terminal)
]);
if (ProjectWatcher._haveProjectDepsChanged(previous, current)) {
// May need to detect if the nature of the change will break the process, e.g. changes to package.json
changedProjects.add(project);
}
}
return {
changedProjects,
state
};
}
private _commitChanges(state: ProjectChangeAnalyzer): void {
this._previousState = state;
if (!this._initialState) {
this._initialState = state;
}
}
/**
* Tests for inequality of the passed Maps. Order invariant.
*
* @returns `true` if the maps are different, `false` otherwise
*/
private static _haveProjectDepsChanged(
prev: Map<string, string> | undefined,
next: Map<string, string> | undefined
): boolean {
if (!prev && !next) {
return false;
}
if (!prev || !next) {
return true;
}
if (prev.size !== next.size) {
return true;
}
for (const [key, value] of prev) {
if (next.get(key) !== value) {
return true;
}
}
return false;
}
private static *_enumeratePathsToWatch(paths: Iterable<string>, prefixLength: number): Iterable<string> {
for (const path of paths) {
const rootSlashIndex: number = path.indexOf('/', prefixLength);
if (rootSlashIndex < 0) { | }
yield path.slice(0, rootSlashIndex);
let slashIndex: number = path.indexOf('/', rootSlashIndex + 1);
while (slashIndex >= 0) {
yield path.slice(0, slashIndex);
slashIndex = path.indexOf('/', slashIndex + 1);
}
}
}
} | yield path;
return; | random_line_split |
ti_eclipsify.py | #!/usr/bin/env python | """
* tidevtools 'ti_eclipsify' - Prepare a Titanium mobile 1.8.0+ project folder
* for importing into Eclipse.
*
* Copyright (c) 2010-2012 by Bill Dawson
* Licensed under the terms of the Apache Public License
* Please see the LICENSE included with this distribution for details.
* http://github.com/billdawson/tidevtools
*
* Just run this script at the top of a project folder.
* See ti_eclipsify.md for more details.
"""
import sys, os, shutil
# Contents for Eclipse/ADT required project files.
project_properties="""target=android-17
apk-configurations=
android.library.reference.1=../android/titanium
android.library.reference.2=../android/modules/accelerometer
android.library.reference.3=../android/modules/analytics
android.library.reference.4=../android/modules/android
android.library.reference.5=../android/modules/app
android.library.reference.6=../android/runtime/common
android.library.reference.7=../android/runtime/v8
android.library.reference.8=../android/modules/calendar
android.library.reference.9=../android/modules/contacts
android.library.reference.10=../android/modules/database
android.library.reference.11=../android/modules/geolocation
android.library.reference.12=../android/modules/filesystem
android.library.reference.13=../android/modules/gesture
android.library.reference.14=../android/modules/locale
android.library.reference.15=../android/modules/map
android.library.reference.16=../android/modules/media
android.library.reference.17=../android/modules/network
android.library.reference.18=../android/modules/platform
android.library.reference.19=../android/modules/ui
android.library.reference.20=../android/modules/utils
android.library.reference.21=../android/modules/xml
"""
dot_classpath_part1="""<?xml version="1.0" encoding="UTF-8"?>
<classpath>
<classpathentry kind="src" path="src"/>
<classpathentry kind="src" path="gen"/>
<classpathentry kind="con" path="com.android.ide.eclipse.adt.ANDROID_FRAMEWORK"/>
<classpathentry exported="true" kind="con" path="com.android.ide.eclipse.adt.LIBRARIES"/>
<classpathentry kind="lib" path="/titanium/lib/commons-logging-1.1.1.jar"/>
<classpathentry kind="lib" path="/titanium/lib/ti-commons-codec-1.3.jar"/>
<classpathentry kind="lib" path="/titanium-dist/lib/kroll-apt.jar"/>
<classpathentry kind="lib" path="/titanium-xml/lib/jaxen-1.1.1.jar"/>
<classpathentry kind="lib" path="/titanium/lib/android-support-v4.jar"/>
<classpathentry kind="lib" path="/titanium/lib/thirdparty.jar"/>
"""
dot_classpath_part2="""
<classpathentry kind="output" path="bin/classes"/>
</classpath>
"""
dot_project="""<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>[PROJECT_NAME]</name>
<comment></comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>com.android.ide.eclipse.adt.ResourceManagerBuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>com.android.ide.eclipse.adt.PreCompilerBuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>org.eclipse.jdt.core.javabuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>com.android.ide.eclipse.adt.ApkBuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>com.android.ide.eclipse.adt.AndroidNature</nature>
<nature>org.eclipse.jdt.core.javanature</nature>
</natures>
</projectDescription>
"""
this_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append(this_path)
try:
import ticommon
except:
print >> sys.stderr, "[ERROR] Couldn't load ticommon from %s. It should be sitting side-by-side with this script. Message: &%s." % (this_path, err)
sys.exit(1)
############## DEFAULTS ########################
# Put a file named tidevtools_settings.py in the
# same folder as this file, then you can override this
TIMOBILE_SRC = ''
#################################################
if os.path.exists(os.path.join(this_path, 'tidevtools_settings.py')):
execfile(os.path.join(this_path, 'tidevtools_settings.py'))
if not os.path.exists(TIMOBILE_SRC):
print >> sys.stderr, "[ERROR] Could not locate the Titanium Mobile SDK sources. Please create a 'tidevtools_settings.py' in the same folder as this script file and add a string variable named TIMOBILE_SRC which is set to the path where the Titanium Mobile SDK sources are located."
sys.exit(1)
sys.path.append(os.path.join(TIMOBILE_SRC, "support", "android"))
from tilogger import *
log = TiLogger(None, level=TiLogger.INFO)
if not os.path.exists('tiapp.xml'):
log.error("I don't see any tiapp.xml file here. \nLooks like \n%s \nis not a Titanium project folder. Exiting..." % os.getcwd())
sys.exit(1)
resources_folder = os.path.join('.', 'Resources')
if not os.path.exists(resources_folder):
log.error("Couldn't find a Resources folder here.")
sys.exit(1)
resources_android_folder = os.path.join(resources_folder, 'android')
android_folder = os.path.join('.', 'build', 'android')
assets_folder = os.path.join(android_folder, 'assets')
bin_assets_folder = os.path.join(android_folder, "bin", "assets")
libs_folder = os.path.join(android_folder, "libs")
required_folders = (android_folder,
os.path.join(assets_folder),
os.path.join(android_folder, "res"),
os.path.join(android_folder, "gen"))
for required in required_folders:
if not os.path.exists(required):
log.error("You need to build your project one time with Titanium Studio before 'eclipsifying' it.")
sys.exit(1)
# For V8, copy required native libraries to libs/
if not os.path.exists(libs_folder):
os.makedirs(libs_folder)
""" Apparently not required anymore
src_libs_dir = os.path.join(TIMOBILE_SRC, "dist", "android", "libs")
if os.path.exists(src_libs_dir):
for root, dirs, files in os.walk(src_libs_dir):
for filename in files:
full_path = os.path.join(root, filename)
rel_path = os.path.relpath(full_path, src_libs_dir)
dest_file = os.path.join(os.path.abspath(libs_folder), rel_path)
if not os.path.exists(dest_file):
if not os.path.exists(os.path.dirname(dest_file)):
os.makedirs(os.path.dirname(dest_file))
shutil.copyfile(full_path, dest_file)
"""
app_info = ticommon.get_app_info('.')
appid = app_info["id"]
project_name = app_info["name"]
gen_folder = os.path.join(android_folder, 'gen', ticommon.appid_to_path(appid))
if not os.path.exists(gen_folder):
os.makedirs(gen_folder)
src_folder = os.path.abspath(os.path.join(android_folder, 'src', ticommon.appid_to_path(appid)))
r_file = os.path.join(src_folder, 'R.java')
if os.path.exists(r_file):
shutil.copyfile(r_file, os.path.join(gen_folder, 'R.java'))
os.remove(r_file)
# put everything that's in gen/, except R.java, into src/. Eclipse (or the ADT plugin, whatever)
# likes to cleanout the gen/ folder when building, which is really annoying when suddenly all of
# our generated classes disappear.
gen_files = [x for x in os.listdir(gen_folder) if x != 'R.java' and x.endswith('.java')]
if gen_files:
if not os.path.exists(src_folder):
os.makedirs(src_folder)
for one_gen_file in gen_files:
shutil.copyfile(os.path.join(gen_folder, one_gen_file), os.path.join(src_folder, one_gen_file))
os.remove(os.path.join(gen_folder, one_gen_file))
# Get rid of calls to TiVerify in the Application.java
application_java = [f for f in gen_files if f.endswith("Application.java")]
if application_java:
application_java = os.path.abspath(os.path.join(src_folder, application_java[0]))
lines = open(application_java, 'r').readlines()
lines = [l for l in lines if "TiVerify" not in l and "verify.verify" not in l]
open(application_java, "w").write("".join(lines))
# To avoid the Android 2373 warning, set special property in AppInfo.java
appinfo_java = [f for f in gen_files if f.endswith("AppInfo.java")]
if appinfo_java:
appinfo_java = os.path.abspath(os.path.join(src_folder, appinfo_java[0]))
lines = open(appinfo_java, 'r').readlines()
lines_out = []
for l in lines:
if l.endswith("app.getAppProperties();\n"):
lines_out.append(l)
lines_out.append('\t\t\t\t\tproperties.setBool("ti.android.bug2373.disableDetection", true);\n')
lines_out.append('\t\t\t\t\tappProperties.setBool("ti.android.bug2373.disableDetection", true);\n')
else:
lines_out.append(l)
with open(appinfo_java, 'w') as f:
f.write("".join(lines_out))
# Remove all code for starting up the Javascript debugger.
if application_java:
lines = open(application_java, 'r').readlines()
lines = [l for l in lines if "debug" not in l.lower()]
open(application_java, "w").write("".join(lines))
# if bin/assets/app.json is there, copy it to assets/app.json
if os.path.exists(os.path.join(bin_assets_folder, "app.json")):
shutil.copyfile(os.path.join(bin_assets_folder, "app.json"), os.path.join(assets_folder, "app.json"))
# if bin/assets/index.json is there, copy it to assets/index.json
if os.path.exists(os.path.join(bin_assets_folder, "index.json")):
shutil.copyfile(os.path.join(bin_assets_folder, "index.json"), os.path.join(assets_folder, "index.json"))
if ticommon.is_windows():
log.info("Copying Resources and tiapp.xml to assets folder because you're running Windows and therefore we're not going to make symlinks")
shutil.copytree(resources_folder, os.path.join(assets_folder, 'Resources'))
shutil.copy(os.path.join('.', 'tiapp.xml'), assets_folder)
else:
resources_dest = os.path.abspath(os.path.join(assets_folder, 'Resources'))
tiapp_dest = os.path.abspath(os.path.join(assets_folder, 'tiapp.xml'))
if not os.path.exists(resources_dest):
os.symlink(os.path.abspath(resources_folder), resources_dest)
if not os.path.exists(tiapp_dest):
os.symlink(os.path.abspath(os.path.join('.', 'tiapp.xml')), tiapp_dest)
if os.path.exists(resources_android_folder):
res_android_files = os.listdir(resources_android_folder)
if res_android_files:
for one_res_android_file in res_android_files:
one_res_android_file_dest = os.path.join(resources_dest, one_res_android_file)
log.info(one_res_android_file_dest)
if not os.path.exists(one_res_android_file_dest):
one_res_android_file_src = os.path.abspath(os.path.join(resources_android_folder, one_res_android_file))
#log.info("sym: " + one_res_android_file_dest + ' -> ' + one_res_android_file_src)
os.symlink(one_res_android_file_src, one_res_android_file_dest)
# put debuggable=true in Android manifest so you can do device debugging.
import codecs, re
f = codecs.open(os.path.join(android_folder, 'AndroidManifest.xml'), 'r', 'utf-8')
xml = f.read()
f.close()
xml = re.sub(r'android\:debuggable="false"', 'android:debuggable="true"', xml)
f = codecs.open(os.path.join(android_folder, 'AndroidManifest.xml'), 'w', 'utf-8')
f.write(xml)
# Get the modules used in the application
import xml.etree.ElementTree as ET
tree = ET.parse(os.path.join('.', 'tiapp.xml'))
root = tree.getroot()
modules = root.findall(".//modules/module[@platform='android']")
dot_classpath_entries = []
for module in modules:
module_name = module.text
module_version = module.get('version')
module_path = os.path.join('.', 'modules', 'android', module_name, module_version)
module_jar_name = module_name.rsplit('.', 1)[1] + '.jar'
module_jar_path = os.path.abspath(os.path.join(module_path, module_jar_name))
#log.info(module_name + ': ' + module_version + ' | ' + module_jar_name)
#log.info(module_jar_path)
dot_classpath_entries.append(' <classpathentry exported="true" kind="lib" path="' + module_jar_path + '"/>')
module_lib_path = os.path.join(module_path, 'lib')
if os.path.exists(module_lib_path):
module_lib_jars = os.listdir(module_lib_path)
for module_lib_jar in module_lib_jars:
module_lib_jar_path = os.path.abspath(os.path.join(module_lib_path, module_lib_jar))
dot_classpath_entries.append(' <classpathentry exported="true" kind="lib" path="' + module_lib_jar_path + '"/>')
module_libs_dir = os.path.join(module_path, "libs")
if os.path.exists(module_libs_dir):
for root, dirs, files in os.walk(module_libs_dir):
for filename in files:
full_path = os.path.join(root, filename)
rel_path = os.path.relpath(full_path, module_libs_dir)
dest_file = os.path.join(os.path.abspath(libs_folder), rel_path)
if not os.path.exists(dest_file):
if not os.path.exists(os.path.dirname(dest_file)):
os.makedirs(os.path.dirname(dest_file))
shutil.copyfile(full_path, dest_file)
# Write the required Eclipse/ADT .project, .classpath and project.properties files.
f = codecs.open(os.path.join(android_folder, ".classpath"), "w")
dot_classpath = ''.join([dot_classpath_part1, '\n'.join(dot_classpath_entries), dot_classpath_part2])
f.write(dot_classpath)
f.close()
f = codecs.open(os.path.join(android_folder, ".project"), "w")
f.write(dot_project.replace("[PROJECT_NAME]", project_name))
f.close()
f = codecs.open(os.path.join(android_folder, "project.properties"), "w")
f.write(project_properties)
f.close()
# Fixup Android library project paths in project.properties
props_file = os.path.join(android_folder, "project.properties")
f = codecs.open(props_file, 'r', 'utf-8')
lines = f.readlines()
newlines = []
f.close()
for line in lines:
if not line.startswith('android.library.reference'):
newlines.append(line)
continue
# Special case: the titanium module is only one folder
# down from "android" (other modules are two folders down)
titanium_module = "android%stitanium" % os.sep
if line.strip().endswith(titanium_module):
rel_path = titanium_module
else:
rel_path = os.sep.join(line.strip().split(os.sep)[-3:])
if not rel_path.startswith("android"):
newlines.append(line)
continue
full_path = os.path.join(TIMOBILE_SRC, rel_path)
if not os.path.exists(full_path):
newlines.append(line)
continue
newlines.append("%s=%s\n" % (line.split("=")[0], os.path.relpath(full_path, android_folder)))
f = codecs.open(props_file, 'w', 'utf-8')
f.write("".join(newlines))
f.close() | random_line_split | |
ti_eclipsify.py | #!/usr/bin/env python
"""
* tidevtools 'ti_eclipsify' - Prepare a Titanium mobile 1.8.0+ project folder
* for importing into Eclipse.
*
* Copyright (c) 2010-2012 by Bill Dawson
* Licensed under the terms of the Apache Public License
* Please see the LICENSE included with this distribution for details.
* http://github.com/billdawson/tidevtools
*
* Just run this script at the top of a project folder.
* See ti_eclipsify.md for more details.
"""
import sys, os, shutil
# Contents for Eclipse/ADT required project files.
project_properties="""target=android-17
apk-configurations=
android.library.reference.1=../android/titanium
android.library.reference.2=../android/modules/accelerometer
android.library.reference.3=../android/modules/analytics
android.library.reference.4=../android/modules/android
android.library.reference.5=../android/modules/app
android.library.reference.6=../android/runtime/common
android.library.reference.7=../android/runtime/v8
android.library.reference.8=../android/modules/calendar
android.library.reference.9=../android/modules/contacts
android.library.reference.10=../android/modules/database
android.library.reference.11=../android/modules/geolocation
android.library.reference.12=../android/modules/filesystem
android.library.reference.13=../android/modules/gesture
android.library.reference.14=../android/modules/locale
android.library.reference.15=../android/modules/map
android.library.reference.16=../android/modules/media
android.library.reference.17=../android/modules/network
android.library.reference.18=../android/modules/platform
android.library.reference.19=../android/modules/ui
android.library.reference.20=../android/modules/utils
android.library.reference.21=../android/modules/xml
"""
dot_classpath_part1="""<?xml version="1.0" encoding="UTF-8"?>
<classpath>
<classpathentry kind="src" path="src"/>
<classpathentry kind="src" path="gen"/>
<classpathentry kind="con" path="com.android.ide.eclipse.adt.ANDROID_FRAMEWORK"/>
<classpathentry exported="true" kind="con" path="com.android.ide.eclipse.adt.LIBRARIES"/>
<classpathentry kind="lib" path="/titanium/lib/commons-logging-1.1.1.jar"/>
<classpathentry kind="lib" path="/titanium/lib/ti-commons-codec-1.3.jar"/>
<classpathentry kind="lib" path="/titanium-dist/lib/kroll-apt.jar"/>
<classpathentry kind="lib" path="/titanium-xml/lib/jaxen-1.1.1.jar"/>
<classpathentry kind="lib" path="/titanium/lib/android-support-v4.jar"/>
<classpathentry kind="lib" path="/titanium/lib/thirdparty.jar"/>
"""
dot_classpath_part2="""
<classpathentry kind="output" path="bin/classes"/>
</classpath>
"""
dot_project="""<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>[PROJECT_NAME]</name>
<comment></comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>com.android.ide.eclipse.adt.ResourceManagerBuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>com.android.ide.eclipse.adt.PreCompilerBuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>org.eclipse.jdt.core.javabuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>com.android.ide.eclipse.adt.ApkBuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>com.android.ide.eclipse.adt.AndroidNature</nature>
<nature>org.eclipse.jdt.core.javanature</nature>
</natures>
</projectDescription>
"""
this_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append(this_path)
try:
import ticommon
except:
print >> sys.stderr, "[ERROR] Couldn't load ticommon from %s. It should be sitting side-by-side with this script. Message: &%s." % (this_path, err)
sys.exit(1)
############## DEFAULTS ########################
# Put a file named tidevtools_settings.py in the
# same folder as this file, then you can override this
TIMOBILE_SRC = ''
#################################################
if os.path.exists(os.path.join(this_path, 'tidevtools_settings.py')):
execfile(os.path.join(this_path, 'tidevtools_settings.py'))
if not os.path.exists(TIMOBILE_SRC):
print >> sys.stderr, "[ERROR] Could not locate the Titanium Mobile SDK sources. Please create a 'tidevtools_settings.py' in the same folder as this script file and add a string variable named TIMOBILE_SRC which is set to the path where the Titanium Mobile SDK sources are located."
sys.exit(1)
sys.path.append(os.path.join(TIMOBILE_SRC, "support", "android"))
from tilogger import *
log = TiLogger(None, level=TiLogger.INFO)
if not os.path.exists('tiapp.xml'):
log.error("I don't see any tiapp.xml file here. \nLooks like \n%s \nis not a Titanium project folder. Exiting..." % os.getcwd())
sys.exit(1)
resources_folder = os.path.join('.', 'Resources')
if not os.path.exists(resources_folder):
log.error("Couldn't find a Resources folder here.")
sys.exit(1)
resources_android_folder = os.path.join(resources_folder, 'android')
android_folder = os.path.join('.', 'build', 'android')
assets_folder = os.path.join(android_folder, 'assets')
bin_assets_folder = os.path.join(android_folder, "bin", "assets")
libs_folder = os.path.join(android_folder, "libs")
required_folders = (android_folder,
os.path.join(assets_folder),
os.path.join(android_folder, "res"),
os.path.join(android_folder, "gen"))
for required in required_folders:
if not os.path.exists(required):
log.error("You need to build your project one time with Titanium Studio before 'eclipsifying' it.")
sys.exit(1)
# For V8, copy required native libraries to libs/
if not os.path.exists(libs_folder):
os.makedirs(libs_folder)
""" Apparently not required anymore
src_libs_dir = os.path.join(TIMOBILE_SRC, "dist", "android", "libs")
if os.path.exists(src_libs_dir):
for root, dirs, files in os.walk(src_libs_dir):
for filename in files:
full_path = os.path.join(root, filename)
rel_path = os.path.relpath(full_path, src_libs_dir)
dest_file = os.path.join(os.path.abspath(libs_folder), rel_path)
if not os.path.exists(dest_file):
if not os.path.exists(os.path.dirname(dest_file)):
os.makedirs(os.path.dirname(dest_file))
shutil.copyfile(full_path, dest_file)
"""
app_info = ticommon.get_app_info('.')
appid = app_info["id"]
project_name = app_info["name"]
gen_folder = os.path.join(android_folder, 'gen', ticommon.appid_to_path(appid))
if not os.path.exists(gen_folder):
os.makedirs(gen_folder)
src_folder = os.path.abspath(os.path.join(android_folder, 'src', ticommon.appid_to_path(appid)))
r_file = os.path.join(src_folder, 'R.java')
if os.path.exists(r_file):
shutil.copyfile(r_file, os.path.join(gen_folder, 'R.java'))
os.remove(r_file)
# put everything that's in gen/, except R.java, into src/. Eclipse (or the ADT plugin, whatever)
# likes to cleanout the gen/ folder when building, which is really annoying when suddenly all of
# our generated classes disappear.
gen_files = [x for x in os.listdir(gen_folder) if x != 'R.java' and x.endswith('.java')]
if gen_files:
if not os.path.exists(src_folder):
os.makedirs(src_folder)
for one_gen_file in gen_files:
shutil.copyfile(os.path.join(gen_folder, one_gen_file), os.path.join(src_folder, one_gen_file))
os.remove(os.path.join(gen_folder, one_gen_file))
# Get rid of calls to TiVerify in the Application.java
application_java = [f for f in gen_files if f.endswith("Application.java")]
if application_java:
application_java = os.path.abspath(os.path.join(src_folder, application_java[0]))
lines = open(application_java, 'r').readlines()
lines = [l for l in lines if "TiVerify" not in l and "verify.verify" not in l]
open(application_java, "w").write("".join(lines))
# To avoid the Android 2373 warning, set special property in AppInfo.java
appinfo_java = [f for f in gen_files if f.endswith("AppInfo.java")]
if appinfo_java:
appinfo_java = os.path.abspath(os.path.join(src_folder, appinfo_java[0]))
lines = open(appinfo_java, 'r').readlines()
lines_out = []
for l in lines:
if l.endswith("app.getAppProperties();\n"):
lines_out.append(l)
lines_out.append('\t\t\t\t\tproperties.setBool("ti.android.bug2373.disableDetection", true);\n')
lines_out.append('\t\t\t\t\tappProperties.setBool("ti.android.bug2373.disableDetection", true);\n')
else:
lines_out.append(l)
with open(appinfo_java, 'w') as f:
f.write("".join(lines_out))
# Remove all code for starting up the Javascript debugger.
if application_java:
lines = open(application_java, 'r').readlines()
lines = [l for l in lines if "debug" not in l.lower()]
open(application_java, "w").write("".join(lines))
# if bin/assets/app.json is there, copy it to assets/app.json
if os.path.exists(os.path.join(bin_assets_folder, "app.json")):
shutil.copyfile(os.path.join(bin_assets_folder, "app.json"), os.path.join(assets_folder, "app.json"))
# if bin/assets/index.json is there, copy it to assets/index.json
if os.path.exists(os.path.join(bin_assets_folder, "index.json")):
shutil.copyfile(os.path.join(bin_assets_folder, "index.json"), os.path.join(assets_folder, "index.json"))
if ticommon.is_windows():
log.info("Copying Resources and tiapp.xml to assets folder because you're running Windows and therefore we're not going to make symlinks")
shutil.copytree(resources_folder, os.path.join(assets_folder, 'Resources'))
shutil.copy(os.path.join('.', 'tiapp.xml'), assets_folder)
else:
resources_dest = os.path.abspath(os.path.join(assets_folder, 'Resources'))
tiapp_dest = os.path.abspath(os.path.join(assets_folder, 'tiapp.xml'))
if not os.path.exists(resources_dest):
os.symlink(os.path.abspath(resources_folder), resources_dest)
if not os.path.exists(tiapp_dest):
os.symlink(os.path.abspath(os.path.join('.', 'tiapp.xml')), tiapp_dest)
if os.path.exists(resources_android_folder):
res_android_files = os.listdir(resources_android_folder)
if res_android_files:
for one_res_android_file in res_android_files:
one_res_android_file_dest = os.path.join(resources_dest, one_res_android_file)
log.info(one_res_android_file_dest)
if not os.path.exists(one_res_android_file_dest):
one_res_android_file_src = os.path.abspath(os.path.join(resources_android_folder, one_res_android_file))
#log.info("sym: " + one_res_android_file_dest + ' -> ' + one_res_android_file_src)
os.symlink(one_res_android_file_src, one_res_android_file_dest)
# put debuggable=true in Android manifest so you can do device debugging.
import codecs, re
f = codecs.open(os.path.join(android_folder, 'AndroidManifest.xml'), 'r', 'utf-8')
xml = f.read()
f.close()
xml = re.sub(r'android\:debuggable="false"', 'android:debuggable="true"', xml)
f = codecs.open(os.path.join(android_folder, 'AndroidManifest.xml'), 'w', 'utf-8')
f.write(xml)
# Get the modules used in the application
import xml.etree.ElementTree as ET
tree = ET.parse(os.path.join('.', 'tiapp.xml'))
root = tree.getroot()
modules = root.findall(".//modules/module[@platform='android']")
dot_classpath_entries = []
for module in modules:
module_name = module.text
module_version = module.get('version')
module_path = os.path.join('.', 'modules', 'android', module_name, module_version)
module_jar_name = module_name.rsplit('.', 1)[1] + '.jar'
module_jar_path = os.path.abspath(os.path.join(module_path, module_jar_name))
#log.info(module_name + ': ' + module_version + ' | ' + module_jar_name)
#log.info(module_jar_path)
dot_classpath_entries.append(' <classpathentry exported="true" kind="lib" path="' + module_jar_path + '"/>')
module_lib_path = os.path.join(module_path, 'lib')
if os.path.exists(module_lib_path):
module_lib_jars = os.listdir(module_lib_path)
for module_lib_jar in module_lib_jars:
module_lib_jar_path = os.path.abspath(os.path.join(module_lib_path, module_lib_jar))
dot_classpath_entries.append(' <classpathentry exported="true" kind="lib" path="' + module_lib_jar_path + '"/>')
module_libs_dir = os.path.join(module_path, "libs")
if os.path.exists(module_libs_dir):
for root, dirs, files in os.walk(module_libs_dir):
for filename in files:
full_path = os.path.join(root, filename)
rel_path = os.path.relpath(full_path, module_libs_dir)
dest_file = os.path.join(os.path.abspath(libs_folder), rel_path)
if not os.path.exists(dest_file):
if not os.path.exists(os.path.dirname(dest_file)):
os.makedirs(os.path.dirname(dest_file))
shutil.copyfile(full_path, dest_file)
# Write the required Eclipse/ADT .project, .classpath and project.properties files.
f = codecs.open(os.path.join(android_folder, ".classpath"), "w")
dot_classpath = ''.join([dot_classpath_part1, '\n'.join(dot_classpath_entries), dot_classpath_part2])
f.write(dot_classpath)
f.close()
f = codecs.open(os.path.join(android_folder, ".project"), "w")
f.write(dot_project.replace("[PROJECT_NAME]", project_name))
f.close()
f = codecs.open(os.path.join(android_folder, "project.properties"), "w")
f.write(project_properties)
f.close()
# Fixup Android library project paths in project.properties
props_file = os.path.join(android_folder, "project.properties")
f = codecs.open(props_file, 'r', 'utf-8')
lines = f.readlines()
newlines = []
f.close()
for line in lines:
if not line.startswith('android.library.reference'):
newlines.append(line)
continue
# Special case: the titanium module is only one folder
# down from "android" (other modules are two folders down)
titanium_module = "android%stitanium" % os.sep
if line.strip().endswith(titanium_module):
rel_path = titanium_module
else:
rel_path = os.sep.join(line.strip().split(os.sep)[-3:])
if not rel_path.startswith("android"):
|
full_path = os.path.join(TIMOBILE_SRC, rel_path)
if not os.path.exists(full_path):
newlines.append(line)
continue
newlines.append("%s=%s\n" % (line.split("=")[0], os.path.relpath(full_path, android_folder)))
f = codecs.open(props_file, 'w', 'utf-8')
f.write("".join(newlines))
f.close()
| newlines.append(line)
continue | conditional_block |
arp_gmp_intf_entry.pb.go | /*
Copyright 2019 Cisco Systems
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: arp_gmp_intf_entry.proto
package cisco_ios_xr_ipv4_arp_oper_arp_gmp_vrfs_vrf_interface_configured_ips_interface_configured_ip
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type ArpGmpIntfEntry_KEYS struct {
VrfName string `protobuf:"bytes,1,opt,name=vrf_name,json=vrfName,proto3" json:"vrf_name,omitempty"`
InterfaceName string `protobuf:"bytes,2,opt,name=interface_name,json=interfaceName,proto3" json:"interface_name,omitempty"`
Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ArpGmpIntfEntry_KEYS) Reset() { *m = ArpGmpIntfEntry_KEYS{} }
func (m *ArpGmpIntfEntry_KEYS) String() string { return proto.CompactTextString(m) }
func (*ArpGmpIntfEntry_KEYS) ProtoMessage() {}
func (*ArpGmpIntfEntry_KEYS) Descriptor() ([]byte, []int) {
return fileDescriptor_ec3e2d5120a99909, []int{0}
}
func (m *ArpGmpIntfEntry_KEYS) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ArpGmpIntfEntry_KEYS.Unmarshal(m, b)
}
func (m *ArpGmpIntfEntry_KEYS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ArpGmpIntfEntry_KEYS.Marshal(b, m, deterministic)
}
func (m *ArpGmpIntfEntry_KEYS) XXX_Merge(src proto.Message) {
xxx_messageInfo_ArpGmpIntfEntry_KEYS.Merge(m, src)
}
func (m *ArpGmpIntfEntry_KEYS) XXX_Size() int {
return xxx_messageInfo_ArpGmpIntfEntry_KEYS.Size(m)
}
func (m *ArpGmpIntfEntry_KEYS) XXX_DiscardUnknown() {
xxx_messageInfo_ArpGmpIntfEntry_KEYS.DiscardUnknown(m)
}
var xxx_messageInfo_ArpGmpIntfEntry_KEYS proto.InternalMessageInfo
func (m *ArpGmpIntfEntry_KEYS) GetVrfName() string {
if m != nil {
return m.VrfName
}
return ""
}
func (m *ArpGmpIntfEntry_KEYS) GetInterfaceName() string {
if m != nil {
return m.InterfaceName
}
return ""
}
func (m *ArpGmpIntfEntry_KEYS) GetAddress() string {
if m != nil {
return m.Address
}
return ""
}
type ArpGmpConfigEntry struct {
IpAddress string `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"`
HardwareAddress string `protobuf:"bytes,2,opt,name=hardware_address,json=hardwareAddress,proto3" json:"hardware_address,omitempty"`
EncapsulationType string `protobuf:"bytes,3,opt,name=encapsulation_type,json=encapsulationType,proto3" json:"encapsulation_type,omitempty"`
EntryType string `protobuf:"bytes,4,opt,name=entry_type,json=entryType,proto3" json:"entry_type,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ArpGmpConfigEntry) Reset() { *m = ArpGmpConfigEntry{} }
func (m *ArpGmpConfigEntry) String() string { return proto.CompactTextString(m) }
func (*ArpGmpConfigEntry) ProtoMessage() {}
func (*ArpGmpConfigEntry) Descriptor() ([]byte, []int) {
return fileDescriptor_ec3e2d5120a99909, []int{1}
}
func (m *ArpGmpConfigEntry) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ArpGmpConfigEntry.Unmarshal(m, b)
}
func (m *ArpGmpConfigEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ArpGmpConfigEntry.Marshal(b, m, deterministic)
}
func (m *ArpGmpConfigEntry) XXX_Merge(src proto.Message) {
xxx_messageInfo_ArpGmpConfigEntry.Merge(m, src)
}
func (m *ArpGmpConfigEntry) XXX_Size() int {
return xxx_messageInfo_ArpGmpConfigEntry.Size(m)
}
func (m *ArpGmpConfigEntry) XXX_DiscardUnknown() {
xxx_messageInfo_ArpGmpConfigEntry.DiscardUnknown(m)
}
var xxx_messageInfo_ArpGmpConfigEntry proto.InternalMessageInfo
func (m *ArpGmpConfigEntry) GetIpAddress() string {
if m != nil {
return m.IpAddress
}
return ""
}
func (m *ArpGmpConfigEntry) GetHardwareAddress() string {
if m != nil {
return m.HardwareAddress
}
return ""
}
func (m *ArpGmpConfigEntry) GetEncapsulationType() string {
if m != nil {
return m.EncapsulationType
}
return ""
}
func (m *ArpGmpConfigEntry) GetEntryType() string {
if m != nil {
return m.EntryType
}
return ""
}
type ArpGmpIntfEntry struct {
InterfaceNameXr string `protobuf:"bytes,50,opt,name=interface_name_xr,json=interfaceNameXr,proto3" json:"interface_name_xr,omitempty"`
ReferenceCount uint32 `protobuf:"varint,51,opt,name=reference_count,json=referenceCount,proto3" json:"reference_count,omitempty"`
AssociatedConfigurationEntry *ArpGmpConfigEntry `protobuf:"bytes,52,opt,name=associated_configuration_entry,json=associatedConfigurationEntry,proto3" json:"associated_configuration_entry,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ArpGmpIntfEntry) Reset() { *m = ArpGmpIntfEntry{} }
func (m *ArpGmpIntfEntry) String() string { return proto.CompactTextString(m) }
func (*ArpGmpIntfEntry) ProtoMessage() {}
func (*ArpGmpIntfEntry) Descriptor() ([]byte, []int) {
return fileDescriptor_ec3e2d5120a99909, []int{2}
}
func (m *ArpGmpIntfEntry) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ArpGmpIntfEntry.Unmarshal(m, b)
}
func (m *ArpGmpIntfEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ArpGmpIntfEntry.Marshal(b, m, deterministic)
}
func (m *ArpGmpIntfEntry) XXX_Merge(src proto.Message) {
xxx_messageInfo_ArpGmpIntfEntry.Merge(m, src)
}
func (m *ArpGmpIntfEntry) XXX_Size() int {
return xxx_messageInfo_ArpGmpIntfEntry.Size(m)
}
func (m *ArpGmpIntfEntry) XXX_DiscardUnknown() {
xxx_messageInfo_ArpGmpIntfEntry.DiscardUnknown(m)
}
var xxx_messageInfo_ArpGmpIntfEntry proto.InternalMessageInfo
func (m *ArpGmpIntfEntry) GetInterfaceNameXr() string {
if m != nil {
return m.InterfaceNameXr
}
return ""
}
func (m *ArpGmpIntfEntry) GetReferenceCount() uint32 {
if m != nil {
return m.ReferenceCount
}
return 0
}
func (m *ArpGmpIntfEntry) GetAssociatedConfigurationEntry() *ArpGmpConfigEntry {
if m != nil {
return m.AssociatedConfigurationEntry
}
return nil
}
func | () {
proto.RegisterType((*ArpGmpIntfEntry_KEYS)(nil), "cisco_ios_xr_ipv4_arp_oper.arp_gmp.vrfs.vrf.interface_configured_ips.interface_configured_ip.arp_gmp_intf_entry_KEYS")
proto.RegisterType((*ArpGmpConfigEntry)(nil), "cisco_ios_xr_ipv4_arp_oper.arp_gmp.vrfs.vrf.interface_configured_ips.interface_configured_ip.arp_gmp_config_entry")
proto.RegisterType((*ArpGmpIntfEntry)(nil), "cisco_ios_xr_ipv4_arp_oper.arp_gmp.vrfs.vrf.interface_configured_ips.interface_configured_ip.arp_gmp_intf_entry")
}
func init() { proto.RegisterFile("arp_gmp_intf_entry.proto", fileDescriptor_ec3e2d5120a99909) }
var fileDescriptor_ec3e2d5120a99909 = []byte{
// 361 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x92, 0xc1, 0x4a, 0x3b, 0x31,
0x10, 0xc6, 0xd9, 0xfe, 0xff, 0x58, 0x1b, 0x69, 0x6b, 0x83, 0xe0, 0x0a, 0x2a, 0xa5, 0x20, 0x56,
0xc1, 0x3d, 0xb4, 0x7d, 0x01, 0x29, 0x3d, 0x09, 0x1e, 0xaa, 0x07, 0x05, 0x21, 0xc4, 0xdd, 0xd9,
0x1a, 0xb0, 0x49, 0x98, 0x64, 0xd7, 0xf6, 0x7d, 0x3c, 0x7b, 0xf6, 0xf1, 0x64, 0x93, 0xee, 0xd6,
0x52, 0x3d, 0x7a, 0x59, 0xd8, 0x6f, 0xbe, 0xe4, 0xf7, 0x65, 0x66, 0x48, 0xc8, 0x51, 0xb3, 0xd9,
0x5c, 0x33, 0x21, 0x6d, 0xca, 0x40, 0x5a, 0x5c, 0x46, 0x1a, 0x95, 0x55, 0xf4, 0x29, 0x16, 0x26,
0x56, 0x4c, 0x28, 0xc3, 0x16, 0xc8, 0x84, 0xce, 0x47, 0xac, 0xf0, 0x2a, 0x0d, 0x18, 0xad, 0x0e,
0x45, 0x39, 0xa6, 0xa6, 0xf8, 0x44, 0x42, 0x5a, 0xc0, 0x94, 0xc7, 0xc0, 0x62, 0x25, 0x53, 0x31,
0xcb, 0x10, 0x12, 0x26, 0xb4, 0xf9, 0xad, 0xd0, 0xcb, 0xc8, 0xe1, 0x36, 0x99, 0xdd, 0x4c, 0x1e,
0xef, 0xe8, 0x11, 0xd9, 0xcd, 0x31, 0x65, 0x92, 0xcf, 0x21, 0x0c, 0xba, 0x41, 0xbf, 0x31, 0xad,
0xe7, 0x98, 0xde, 0xf2, 0x39, 0xd0, 0x33, 0xd2, 0x5a, 0x5f, 0xe8, 0x0c, 0x35, 0x67, 0x68, 0x56,
0xaa, 0xb3, 0x85, 0xa4, 0xce, 0x93, 0x04, 0xc1, 0x98, 0xf0, 0x9f, 0xbf, 0x60, 0xf5, 0xdb, 0xfb,
0x08, 0xc8, 0x41, 0xc9, 0xf5, 0x81, 0x3c, 0x99, 0x9e, 0x10, 0x22, 0x34, 0x2b, 0x4f, 0x79, 0x6c,
0x43, 0xe8, 0x6b, 0x2f, 0xd0, 0x0b, 0xb2, 0xff, 0xc2, 0x31, 0x79, 0xe3, 0x08, 0x95, 0xc9, 0xa3,
0xdb, 0xa5, 0x5e, 0x5a, 0xaf, 0x08, 0x05, 0x19, 0x73, 0x6d, 0xb2, 0x57, 0x6e, 0x85, 0x92, 0xcc,
0x2e, 0x35, 0xac, 0x72, 0x74, 0x36, 0x2a, 0xf7, 0x4b, 0x0d, 0x05, 0xd8, 0xbf, 0xdd, 0xd9, 0xfe,
0x7b, 0xb0, 0x53, 0x8a, 0x72, 0xef, 0xbd, 0x46, 0xe8, 0x76, 0xa3, 0xe8, 0x25, 0xe9, 0x6c, 0x36,
0x82, 0x2d, 0x30, 0x1c, 0xf8, 0x40, 0x1b, 0xbd, 0x78, 0x40, 0x7a, 0x4e, 0xda, 0x08, 0x29, 0x20,
0x48, 0x37, 0x85, 0x4c, 0xda, 0x70, 0xd8, 0x0d, 0xfa, 0xcd, 0x69, 0xab, 0x92, 0xc7, 0x85, 0x4a,
0x3f, 0x03, 0x72, 0xca, 0x8d, 0x51, 0xb1, 0xe0, 0x16, 0x92, 0x6a, 0x60, 0xfe, 0x15, 0x8e, 0x1b,
0x8e, 0xba, 0x41, 0x7f, 0x6f, 0x80, 0xd1, 0x5f, 0xee, 0x46, 0xf4, 0xd3, 0x80, 0xa6, 0xc7, 0xeb,
0x64, 0xe3, 0xef, 0xc1, 0x26, 0x45, 0xf5, 0x79, 0xc7, 0xed, 0xec, 0xf0, 0x2b, 0x00, 0x00, 0xff,
0xff, 0x3b, 0x10, 0x86, 0xee, 0xcf, 0x02, 0x00, 0x00,
}
| init | identifier_name |
arp_gmp_intf_entry.pb.go | /*
Copyright 2019 Cisco Systems
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: arp_gmp_intf_entry.proto
package cisco_ios_xr_ipv4_arp_oper_arp_gmp_vrfs_vrf_interface_configured_ips_interface_configured_ip
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type ArpGmpIntfEntry_KEYS struct {
VrfName string `protobuf:"bytes,1,opt,name=vrf_name,json=vrfName,proto3" json:"vrf_name,omitempty"`
InterfaceName string `protobuf:"bytes,2,opt,name=interface_name,json=interfaceName,proto3" json:"interface_name,omitempty"`
Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ArpGmpIntfEntry_KEYS) Reset() { *m = ArpGmpIntfEntry_KEYS{} }
func (m *ArpGmpIntfEntry_KEYS) String() string { return proto.CompactTextString(m) }
func (*ArpGmpIntfEntry_KEYS) ProtoMessage() {}
func (*ArpGmpIntfEntry_KEYS) Descriptor() ([]byte, []int) {
return fileDescriptor_ec3e2d5120a99909, []int{0}
}
func (m *ArpGmpIntfEntry_KEYS) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ArpGmpIntfEntry_KEYS.Unmarshal(m, b)
}
func (m *ArpGmpIntfEntry_KEYS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ArpGmpIntfEntry_KEYS.Marshal(b, m, deterministic)
}
func (m *ArpGmpIntfEntry_KEYS) XXX_Merge(src proto.Message) {
xxx_messageInfo_ArpGmpIntfEntry_KEYS.Merge(m, src)
}
func (m *ArpGmpIntfEntry_KEYS) XXX_Size() int {
return xxx_messageInfo_ArpGmpIntfEntry_KEYS.Size(m)
}
func (m *ArpGmpIntfEntry_KEYS) XXX_DiscardUnknown() {
xxx_messageInfo_ArpGmpIntfEntry_KEYS.DiscardUnknown(m)
}
var xxx_messageInfo_ArpGmpIntfEntry_KEYS proto.InternalMessageInfo
func (m *ArpGmpIntfEntry_KEYS) GetVrfName() string {
if m != nil {
return m.VrfName
}
return ""
}
func (m *ArpGmpIntfEntry_KEYS) GetInterfaceName() string {
if m != nil {
return m.InterfaceName
}
return ""
}
func (m *ArpGmpIntfEntry_KEYS) GetAddress() string {
if m != nil {
return m.Address
}
return ""
}
type ArpGmpConfigEntry struct {
IpAddress string `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"`
HardwareAddress string `protobuf:"bytes,2,opt,name=hardware_address,json=hardwareAddress,proto3" json:"hardware_address,omitempty"`
EncapsulationType string `protobuf:"bytes,3,opt,name=encapsulation_type,json=encapsulationType,proto3" json:"encapsulation_type,omitempty"`
EntryType string `protobuf:"bytes,4,opt,name=entry_type,json=entryType,proto3" json:"entry_type,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ArpGmpConfigEntry) Reset() { *m = ArpGmpConfigEntry{} }
func (m *ArpGmpConfigEntry) String() string { return proto.CompactTextString(m) }
func (*ArpGmpConfigEntry) ProtoMessage() {}
func (*ArpGmpConfigEntry) Descriptor() ([]byte, []int) {
return fileDescriptor_ec3e2d5120a99909, []int{1}
} | func (m *ArpGmpConfigEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ArpGmpConfigEntry.Marshal(b, m, deterministic)
}
func (m *ArpGmpConfigEntry) XXX_Merge(src proto.Message) {
xxx_messageInfo_ArpGmpConfigEntry.Merge(m, src)
}
func (m *ArpGmpConfigEntry) XXX_Size() int {
return xxx_messageInfo_ArpGmpConfigEntry.Size(m)
}
func (m *ArpGmpConfigEntry) XXX_DiscardUnknown() {
xxx_messageInfo_ArpGmpConfigEntry.DiscardUnknown(m)
}
var xxx_messageInfo_ArpGmpConfigEntry proto.InternalMessageInfo
func (m *ArpGmpConfigEntry) GetIpAddress() string {
if m != nil {
return m.IpAddress
}
return ""
}
func (m *ArpGmpConfigEntry) GetHardwareAddress() string {
if m != nil {
return m.HardwareAddress
}
return ""
}
func (m *ArpGmpConfigEntry) GetEncapsulationType() string {
if m != nil {
return m.EncapsulationType
}
return ""
}
func (m *ArpGmpConfigEntry) GetEntryType() string {
if m != nil {
return m.EntryType
}
return ""
}
type ArpGmpIntfEntry struct {
InterfaceNameXr string `protobuf:"bytes,50,opt,name=interface_name_xr,json=interfaceNameXr,proto3" json:"interface_name_xr,omitempty"`
ReferenceCount uint32 `protobuf:"varint,51,opt,name=reference_count,json=referenceCount,proto3" json:"reference_count,omitempty"`
AssociatedConfigurationEntry *ArpGmpConfigEntry `protobuf:"bytes,52,opt,name=associated_configuration_entry,json=associatedConfigurationEntry,proto3" json:"associated_configuration_entry,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ArpGmpIntfEntry) Reset() { *m = ArpGmpIntfEntry{} }
func (m *ArpGmpIntfEntry) String() string { return proto.CompactTextString(m) }
func (*ArpGmpIntfEntry) ProtoMessage() {}
func (*ArpGmpIntfEntry) Descriptor() ([]byte, []int) {
return fileDescriptor_ec3e2d5120a99909, []int{2}
}
func (m *ArpGmpIntfEntry) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ArpGmpIntfEntry.Unmarshal(m, b)
}
func (m *ArpGmpIntfEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ArpGmpIntfEntry.Marshal(b, m, deterministic)
}
func (m *ArpGmpIntfEntry) XXX_Merge(src proto.Message) {
xxx_messageInfo_ArpGmpIntfEntry.Merge(m, src)
}
func (m *ArpGmpIntfEntry) XXX_Size() int {
return xxx_messageInfo_ArpGmpIntfEntry.Size(m)
}
func (m *ArpGmpIntfEntry) XXX_DiscardUnknown() {
xxx_messageInfo_ArpGmpIntfEntry.DiscardUnknown(m)
}
var xxx_messageInfo_ArpGmpIntfEntry proto.InternalMessageInfo
func (m *ArpGmpIntfEntry) GetInterfaceNameXr() string {
if m != nil {
return m.InterfaceNameXr
}
return ""
}
func (m *ArpGmpIntfEntry) GetReferenceCount() uint32 {
if m != nil {
return m.ReferenceCount
}
return 0
}
func (m *ArpGmpIntfEntry) GetAssociatedConfigurationEntry() *ArpGmpConfigEntry {
if m != nil {
return m.AssociatedConfigurationEntry
}
return nil
}
func init() {
proto.RegisterType((*ArpGmpIntfEntry_KEYS)(nil), "cisco_ios_xr_ipv4_arp_oper.arp_gmp.vrfs.vrf.interface_configured_ips.interface_configured_ip.arp_gmp_intf_entry_KEYS")
proto.RegisterType((*ArpGmpConfigEntry)(nil), "cisco_ios_xr_ipv4_arp_oper.arp_gmp.vrfs.vrf.interface_configured_ips.interface_configured_ip.arp_gmp_config_entry")
proto.RegisterType((*ArpGmpIntfEntry)(nil), "cisco_ios_xr_ipv4_arp_oper.arp_gmp.vrfs.vrf.interface_configured_ips.interface_configured_ip.arp_gmp_intf_entry")
}
func init() { proto.RegisterFile("arp_gmp_intf_entry.proto", fileDescriptor_ec3e2d5120a99909) }
var fileDescriptor_ec3e2d5120a99909 = []byte{
// 361 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x92, 0xc1, 0x4a, 0x3b, 0x31,
0x10, 0xc6, 0xd9, 0xfe, 0xff, 0x58, 0x1b, 0x69, 0x6b, 0x83, 0xe0, 0x0a, 0x2a, 0xa5, 0x20, 0x56,
0xc1, 0x3d, 0xb4, 0x7d, 0x01, 0x29, 0x3d, 0x09, 0x1e, 0xaa, 0x07, 0x05, 0x21, 0xc4, 0xdd, 0xd9,
0x1a, 0xb0, 0x49, 0x98, 0x64, 0xd7, 0xf6, 0x7d, 0x3c, 0x7b, 0xf6, 0xf1, 0x64, 0x93, 0xee, 0xd6,
0x52, 0x3d, 0x7a, 0x59, 0xd8, 0x6f, 0xbe, 0xe4, 0xf7, 0x65, 0x66, 0x48, 0xc8, 0x51, 0xb3, 0xd9,
0x5c, 0x33, 0x21, 0x6d, 0xca, 0x40, 0x5a, 0x5c, 0x46, 0x1a, 0x95, 0x55, 0xf4, 0x29, 0x16, 0x26,
0x56, 0x4c, 0x28, 0xc3, 0x16, 0xc8, 0x84, 0xce, 0x47, 0xac, 0xf0, 0x2a, 0x0d, 0x18, 0xad, 0x0e,
0x45, 0x39, 0xa6, 0xa6, 0xf8, 0x44, 0x42, 0x5a, 0xc0, 0x94, 0xc7, 0xc0, 0x62, 0x25, 0x53, 0x31,
0xcb, 0x10, 0x12, 0x26, 0xb4, 0xf9, 0xad, 0xd0, 0xcb, 0xc8, 0xe1, 0x36, 0x99, 0xdd, 0x4c, 0x1e,
0xef, 0xe8, 0x11, 0xd9, 0xcd, 0x31, 0x65, 0x92, 0xcf, 0x21, 0x0c, 0xba, 0x41, 0xbf, 0x31, 0xad,
0xe7, 0x98, 0xde, 0xf2, 0x39, 0xd0, 0x33, 0xd2, 0x5a, 0x5f, 0xe8, 0x0c, 0x35, 0x67, 0x68, 0x56,
0xaa, 0xb3, 0x85, 0xa4, 0xce, 0x93, 0x04, 0xc1, 0x98, 0xf0, 0x9f, 0xbf, 0x60, 0xf5, 0xdb, 0xfb,
0x08, 0xc8, 0x41, 0xc9, 0xf5, 0x81, 0x3c, 0x99, 0x9e, 0x10, 0x22, 0x34, 0x2b, 0x4f, 0x79, 0x6c,
0x43, 0xe8, 0x6b, 0x2f, 0xd0, 0x0b, 0xb2, 0xff, 0xc2, 0x31, 0x79, 0xe3, 0x08, 0x95, 0xc9, 0xa3,
0xdb, 0xa5, 0x5e, 0x5a, 0xaf, 0x08, 0x05, 0x19, 0x73, 0x6d, 0xb2, 0x57, 0x6e, 0x85, 0x92, 0xcc,
0x2e, 0x35, 0xac, 0x72, 0x74, 0x36, 0x2a, 0xf7, 0x4b, 0x0d, 0x05, 0xd8, 0xbf, 0xdd, 0xd9, 0xfe,
0x7b, 0xb0, 0x53, 0x8a, 0x72, 0xef, 0xbd, 0x46, 0xe8, 0x76, 0xa3, 0xe8, 0x25, 0xe9, 0x6c, 0x36,
0x82, 0x2d, 0x30, 0x1c, 0xf8, 0x40, 0x1b, 0xbd, 0x78, 0x40, 0x7a, 0x4e, 0xda, 0x08, 0x29, 0x20,
0x48, 0x37, 0x85, 0x4c, 0xda, 0x70, 0xd8, 0x0d, 0xfa, 0xcd, 0x69, 0xab, 0x92, 0xc7, 0x85, 0x4a,
0x3f, 0x03, 0x72, 0xca, 0x8d, 0x51, 0xb1, 0xe0, 0x16, 0x92, 0x6a, 0x60, 0xfe, 0x15, 0x8e, 0x1b,
0x8e, 0xba, 0x41, 0x7f, 0x6f, 0x80, 0xd1, 0x5f, 0xee, 0x46, 0xf4, 0xd3, 0x80, 0xa6, 0xc7, 0xeb,
0x64, 0xe3, 0xef, 0xc1, 0x26, 0x45, 0xf5, 0x79, 0xc7, 0xed, 0xec, 0xf0, 0x2b, 0x00, 0x00, 0xff,
0xff, 0x3b, 0x10, 0x86, 0xee, 0xcf, 0x02, 0x00, 0x00,
} |
func (m *ArpGmpConfigEntry) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ArpGmpConfigEntry.Unmarshal(m, b)
} | random_line_split |
arp_gmp_intf_entry.pb.go | /*
Copyright 2019 Cisco Systems
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: arp_gmp_intf_entry.proto
package cisco_ios_xr_ipv4_arp_oper_arp_gmp_vrfs_vrf_interface_configured_ips_interface_configured_ip
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type ArpGmpIntfEntry_KEYS struct {
VrfName string `protobuf:"bytes,1,opt,name=vrf_name,json=vrfName,proto3" json:"vrf_name,omitempty"`
InterfaceName string `protobuf:"bytes,2,opt,name=interface_name,json=interfaceName,proto3" json:"interface_name,omitempty"`
Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ArpGmpIntfEntry_KEYS) Reset() { *m = ArpGmpIntfEntry_KEYS{} }
func (m *ArpGmpIntfEntry_KEYS) String() string { return proto.CompactTextString(m) }
func (*ArpGmpIntfEntry_KEYS) ProtoMessage() {}
func (*ArpGmpIntfEntry_KEYS) Descriptor() ([]byte, []int) {
return fileDescriptor_ec3e2d5120a99909, []int{0}
}
func (m *ArpGmpIntfEntry_KEYS) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ArpGmpIntfEntry_KEYS.Unmarshal(m, b)
}
func (m *ArpGmpIntfEntry_KEYS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ArpGmpIntfEntry_KEYS.Marshal(b, m, deterministic)
}
func (m *ArpGmpIntfEntry_KEYS) XXX_Merge(src proto.Message) {
xxx_messageInfo_ArpGmpIntfEntry_KEYS.Merge(m, src)
}
func (m *ArpGmpIntfEntry_KEYS) XXX_Size() int {
return xxx_messageInfo_ArpGmpIntfEntry_KEYS.Size(m)
}
func (m *ArpGmpIntfEntry_KEYS) XXX_DiscardUnknown() {
xxx_messageInfo_ArpGmpIntfEntry_KEYS.DiscardUnknown(m)
}
var xxx_messageInfo_ArpGmpIntfEntry_KEYS proto.InternalMessageInfo
func (m *ArpGmpIntfEntry_KEYS) GetVrfName() string {
if m != nil {
return m.VrfName
}
return ""
}
func (m *ArpGmpIntfEntry_KEYS) GetInterfaceName() string {
if m != nil {
return m.InterfaceName
}
return ""
}
func (m *ArpGmpIntfEntry_KEYS) GetAddress() string {
if m != nil {
return m.Address
}
return ""
}
type ArpGmpConfigEntry struct {
IpAddress string `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"`
HardwareAddress string `protobuf:"bytes,2,opt,name=hardware_address,json=hardwareAddress,proto3" json:"hardware_address,omitempty"`
EncapsulationType string `protobuf:"bytes,3,opt,name=encapsulation_type,json=encapsulationType,proto3" json:"encapsulation_type,omitempty"`
EntryType string `protobuf:"bytes,4,opt,name=entry_type,json=entryType,proto3" json:"entry_type,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ArpGmpConfigEntry) Reset() { *m = ArpGmpConfigEntry{} }
func (m *ArpGmpConfigEntry) String() string { return proto.CompactTextString(m) }
func (*ArpGmpConfigEntry) ProtoMessage() {}
func (*ArpGmpConfigEntry) Descriptor() ([]byte, []int) {
return fileDescriptor_ec3e2d5120a99909, []int{1}
}
func (m *ArpGmpConfigEntry) XXX_Unmarshal(b []byte) error |
func (m *ArpGmpConfigEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ArpGmpConfigEntry.Marshal(b, m, deterministic)
}
func (m *ArpGmpConfigEntry) XXX_Merge(src proto.Message) {
xxx_messageInfo_ArpGmpConfigEntry.Merge(m, src)
}
func (m *ArpGmpConfigEntry) XXX_Size() int {
return xxx_messageInfo_ArpGmpConfigEntry.Size(m)
}
func (m *ArpGmpConfigEntry) XXX_DiscardUnknown() {
xxx_messageInfo_ArpGmpConfigEntry.DiscardUnknown(m)
}
var xxx_messageInfo_ArpGmpConfigEntry proto.InternalMessageInfo
func (m *ArpGmpConfigEntry) GetIpAddress() string {
if m != nil {
return m.IpAddress
}
return ""
}
func (m *ArpGmpConfigEntry) GetHardwareAddress() string {
if m != nil {
return m.HardwareAddress
}
return ""
}
func (m *ArpGmpConfigEntry) GetEncapsulationType() string {
if m != nil {
return m.EncapsulationType
}
return ""
}
func (m *ArpGmpConfigEntry) GetEntryType() string {
if m != nil {
return m.EntryType
}
return ""
}
type ArpGmpIntfEntry struct {
InterfaceNameXr string `protobuf:"bytes,50,opt,name=interface_name_xr,json=interfaceNameXr,proto3" json:"interface_name_xr,omitempty"`
ReferenceCount uint32 `protobuf:"varint,51,opt,name=reference_count,json=referenceCount,proto3" json:"reference_count,omitempty"`
AssociatedConfigurationEntry *ArpGmpConfigEntry `protobuf:"bytes,52,opt,name=associated_configuration_entry,json=associatedConfigurationEntry,proto3" json:"associated_configuration_entry,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ArpGmpIntfEntry) Reset() { *m = ArpGmpIntfEntry{} }
func (m *ArpGmpIntfEntry) String() string { return proto.CompactTextString(m) }
func (*ArpGmpIntfEntry) ProtoMessage() {}
func (*ArpGmpIntfEntry) Descriptor() ([]byte, []int) {
return fileDescriptor_ec3e2d5120a99909, []int{2}
}
func (m *ArpGmpIntfEntry) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ArpGmpIntfEntry.Unmarshal(m, b)
}
func (m *ArpGmpIntfEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ArpGmpIntfEntry.Marshal(b, m, deterministic)
}
func (m *ArpGmpIntfEntry) XXX_Merge(src proto.Message) {
xxx_messageInfo_ArpGmpIntfEntry.Merge(m, src)
}
func (m *ArpGmpIntfEntry) XXX_Size() int {
return xxx_messageInfo_ArpGmpIntfEntry.Size(m)
}
func (m *ArpGmpIntfEntry) XXX_DiscardUnknown() {
xxx_messageInfo_ArpGmpIntfEntry.DiscardUnknown(m)
}
var xxx_messageInfo_ArpGmpIntfEntry proto.InternalMessageInfo
func (m *ArpGmpIntfEntry) GetInterfaceNameXr() string {
if m != nil {
return m.InterfaceNameXr
}
return ""
}
func (m *ArpGmpIntfEntry) GetReferenceCount() uint32 {
if m != nil {
return m.ReferenceCount
}
return 0
}
func (m *ArpGmpIntfEntry) GetAssociatedConfigurationEntry() *ArpGmpConfigEntry {
if m != nil {
return m.AssociatedConfigurationEntry
}
return nil
}
func init() {
proto.RegisterType((*ArpGmpIntfEntry_KEYS)(nil), "cisco_ios_xr_ipv4_arp_oper.arp_gmp.vrfs.vrf.interface_configured_ips.interface_configured_ip.arp_gmp_intf_entry_KEYS")
proto.RegisterType((*ArpGmpConfigEntry)(nil), "cisco_ios_xr_ipv4_arp_oper.arp_gmp.vrfs.vrf.interface_configured_ips.interface_configured_ip.arp_gmp_config_entry")
proto.RegisterType((*ArpGmpIntfEntry)(nil), "cisco_ios_xr_ipv4_arp_oper.arp_gmp.vrfs.vrf.interface_configured_ips.interface_configured_ip.arp_gmp_intf_entry")
}
func init() { proto.RegisterFile("arp_gmp_intf_entry.proto", fileDescriptor_ec3e2d5120a99909) }
var fileDescriptor_ec3e2d5120a99909 = []byte{
// 361 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x92, 0xc1, 0x4a, 0x3b, 0x31,
0x10, 0xc6, 0xd9, 0xfe, 0xff, 0x58, 0x1b, 0x69, 0x6b, 0x83, 0xe0, 0x0a, 0x2a, 0xa5, 0x20, 0x56,
0xc1, 0x3d, 0xb4, 0x7d, 0x01, 0x29, 0x3d, 0x09, 0x1e, 0xaa, 0x07, 0x05, 0x21, 0xc4, 0xdd, 0xd9,
0x1a, 0xb0, 0x49, 0x98, 0x64, 0xd7, 0xf6, 0x7d, 0x3c, 0x7b, 0xf6, 0xf1, 0x64, 0x93, 0xee, 0xd6,
0x52, 0x3d, 0x7a, 0x59, 0xd8, 0x6f, 0xbe, 0xe4, 0xf7, 0x65, 0x66, 0x48, 0xc8, 0x51, 0xb3, 0xd9,
0x5c, 0x33, 0x21, 0x6d, 0xca, 0x40, 0x5a, 0x5c, 0x46, 0x1a, 0x95, 0x55, 0xf4, 0x29, 0x16, 0x26,
0x56, 0x4c, 0x28, 0xc3, 0x16, 0xc8, 0x84, 0xce, 0x47, 0xac, 0xf0, 0x2a, 0x0d, 0x18, 0xad, 0x0e,
0x45, 0x39, 0xa6, 0xa6, 0xf8, 0x44, 0x42, 0x5a, 0xc0, 0x94, 0xc7, 0xc0, 0x62, 0x25, 0x53, 0x31,
0xcb, 0x10, 0x12, 0x26, 0xb4, 0xf9, 0xad, 0xd0, 0xcb, 0xc8, 0xe1, 0x36, 0x99, 0xdd, 0x4c, 0x1e,
0xef, 0xe8, 0x11, 0xd9, 0xcd, 0x31, 0x65, 0x92, 0xcf, 0x21, 0x0c, 0xba, 0x41, 0xbf, 0x31, 0xad,
0xe7, 0x98, 0xde, 0xf2, 0x39, 0xd0, 0x33, 0xd2, 0x5a, 0x5f, 0xe8, 0x0c, 0x35, 0x67, 0x68, 0x56,
0xaa, 0xb3, 0x85, 0xa4, 0xce, 0x93, 0x04, 0xc1, 0x98, 0xf0, 0x9f, 0xbf, 0x60, 0xf5, 0xdb, 0xfb,
0x08, 0xc8, 0x41, 0xc9, 0xf5, 0x81, 0x3c, 0x99, 0x9e, 0x10, 0x22, 0x34, 0x2b, 0x4f, 0x79, 0x6c,
0x43, 0xe8, 0x6b, 0x2f, 0xd0, 0x0b, 0xb2, 0xff, 0xc2, 0x31, 0x79, 0xe3, 0x08, 0x95, 0xc9, 0xa3,
0xdb, 0xa5, 0x5e, 0x5a, 0xaf, 0x08, 0x05, 0x19, 0x73, 0x6d, 0xb2, 0x57, 0x6e, 0x85, 0x92, 0xcc,
0x2e, 0x35, 0xac, 0x72, 0x74, 0x36, 0x2a, 0xf7, 0x4b, 0x0d, 0x05, 0xd8, 0xbf, 0xdd, 0xd9, 0xfe,
0x7b, 0xb0, 0x53, 0x8a, 0x72, 0xef, 0xbd, 0x46, 0xe8, 0x76, 0xa3, 0xe8, 0x25, 0xe9, 0x6c, 0x36,
0x82, 0x2d, 0x30, 0x1c, 0xf8, 0x40, 0x1b, 0xbd, 0x78, 0x40, 0x7a, 0x4e, 0xda, 0x08, 0x29, 0x20,
0x48, 0x37, 0x85, 0x4c, 0xda, 0x70, 0xd8, 0x0d, 0xfa, 0xcd, 0x69, 0xab, 0x92, 0xc7, 0x85, 0x4a,
0x3f, 0x03, 0x72, 0xca, 0x8d, 0x51, 0xb1, 0xe0, 0x16, 0x92, 0x6a, 0x60, 0xfe, 0x15, 0x8e, 0x1b,
0x8e, 0xba, 0x41, 0x7f, 0x6f, 0x80, 0xd1, 0x5f, 0xee, 0x46, 0xf4, 0xd3, 0x80, 0xa6, 0xc7, 0xeb,
0x64, 0xe3, 0xef, 0xc1, 0x26, 0x45, 0xf5, 0x79, 0xc7, 0xed, 0xec, 0xf0, 0x2b, 0x00, 0x00, 0xff,
0xff, 0x3b, 0x10, 0x86, 0xee, 0xcf, 0x02, 0x00, 0x00,
}
| {
return xxx_messageInfo_ArpGmpConfigEntry.Unmarshal(m, b)
} | identifier_body |
arp_gmp_intf_entry.pb.go | /*
Copyright 2019 Cisco Systems
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: arp_gmp_intf_entry.proto
package cisco_ios_xr_ipv4_arp_oper_arp_gmp_vrfs_vrf_interface_configured_ips_interface_configured_ip
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type ArpGmpIntfEntry_KEYS struct {
VrfName string `protobuf:"bytes,1,opt,name=vrf_name,json=vrfName,proto3" json:"vrf_name,omitempty"`
InterfaceName string `protobuf:"bytes,2,opt,name=interface_name,json=interfaceName,proto3" json:"interface_name,omitempty"`
Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ArpGmpIntfEntry_KEYS) Reset() { *m = ArpGmpIntfEntry_KEYS{} }
func (m *ArpGmpIntfEntry_KEYS) String() string { return proto.CompactTextString(m) }
func (*ArpGmpIntfEntry_KEYS) ProtoMessage() {}
func (*ArpGmpIntfEntry_KEYS) Descriptor() ([]byte, []int) {
return fileDescriptor_ec3e2d5120a99909, []int{0}
}
func (m *ArpGmpIntfEntry_KEYS) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ArpGmpIntfEntry_KEYS.Unmarshal(m, b)
}
func (m *ArpGmpIntfEntry_KEYS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ArpGmpIntfEntry_KEYS.Marshal(b, m, deterministic)
}
func (m *ArpGmpIntfEntry_KEYS) XXX_Merge(src proto.Message) {
xxx_messageInfo_ArpGmpIntfEntry_KEYS.Merge(m, src)
}
func (m *ArpGmpIntfEntry_KEYS) XXX_Size() int {
return xxx_messageInfo_ArpGmpIntfEntry_KEYS.Size(m)
}
func (m *ArpGmpIntfEntry_KEYS) XXX_DiscardUnknown() {
xxx_messageInfo_ArpGmpIntfEntry_KEYS.DiscardUnknown(m)
}
var xxx_messageInfo_ArpGmpIntfEntry_KEYS proto.InternalMessageInfo
func (m *ArpGmpIntfEntry_KEYS) GetVrfName() string {
if m != nil {
return m.VrfName
}
return ""
}
func (m *ArpGmpIntfEntry_KEYS) GetInterfaceName() string {
if m != nil {
return m.InterfaceName
}
return ""
}
func (m *ArpGmpIntfEntry_KEYS) GetAddress() string {
if m != nil |
return ""
}
type ArpGmpConfigEntry struct {
IpAddress string `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"`
HardwareAddress string `protobuf:"bytes,2,opt,name=hardware_address,json=hardwareAddress,proto3" json:"hardware_address,omitempty"`
EncapsulationType string `protobuf:"bytes,3,opt,name=encapsulation_type,json=encapsulationType,proto3" json:"encapsulation_type,omitempty"`
EntryType string `protobuf:"bytes,4,opt,name=entry_type,json=entryType,proto3" json:"entry_type,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ArpGmpConfigEntry) Reset() { *m = ArpGmpConfigEntry{} }
func (m *ArpGmpConfigEntry) String() string { return proto.CompactTextString(m) }
func (*ArpGmpConfigEntry) ProtoMessage() {}
func (*ArpGmpConfigEntry) Descriptor() ([]byte, []int) {
return fileDescriptor_ec3e2d5120a99909, []int{1}
}
func (m *ArpGmpConfigEntry) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ArpGmpConfigEntry.Unmarshal(m, b)
}
func (m *ArpGmpConfigEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ArpGmpConfigEntry.Marshal(b, m, deterministic)
}
func (m *ArpGmpConfigEntry) XXX_Merge(src proto.Message) {
xxx_messageInfo_ArpGmpConfigEntry.Merge(m, src)
}
func (m *ArpGmpConfigEntry) XXX_Size() int {
return xxx_messageInfo_ArpGmpConfigEntry.Size(m)
}
func (m *ArpGmpConfigEntry) XXX_DiscardUnknown() {
xxx_messageInfo_ArpGmpConfigEntry.DiscardUnknown(m)
}
var xxx_messageInfo_ArpGmpConfigEntry proto.InternalMessageInfo
func (m *ArpGmpConfigEntry) GetIpAddress() string {
if m != nil {
return m.IpAddress
}
return ""
}
func (m *ArpGmpConfigEntry) GetHardwareAddress() string {
if m != nil {
return m.HardwareAddress
}
return ""
}
func (m *ArpGmpConfigEntry) GetEncapsulationType() string {
if m != nil {
return m.EncapsulationType
}
return ""
}
func (m *ArpGmpConfigEntry) GetEntryType() string {
if m != nil {
return m.EntryType
}
return ""
}
type ArpGmpIntfEntry struct {
InterfaceNameXr string `protobuf:"bytes,50,opt,name=interface_name_xr,json=interfaceNameXr,proto3" json:"interface_name_xr,omitempty"`
ReferenceCount uint32 `protobuf:"varint,51,opt,name=reference_count,json=referenceCount,proto3" json:"reference_count,omitempty"`
AssociatedConfigurationEntry *ArpGmpConfigEntry `protobuf:"bytes,52,opt,name=associated_configuration_entry,json=associatedConfigurationEntry,proto3" json:"associated_configuration_entry,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ArpGmpIntfEntry) Reset() { *m = ArpGmpIntfEntry{} }
func (m *ArpGmpIntfEntry) String() string { return proto.CompactTextString(m) }
func (*ArpGmpIntfEntry) ProtoMessage() {}
func (*ArpGmpIntfEntry) Descriptor() ([]byte, []int) {
return fileDescriptor_ec3e2d5120a99909, []int{2}
}
func (m *ArpGmpIntfEntry) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ArpGmpIntfEntry.Unmarshal(m, b)
}
func (m *ArpGmpIntfEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ArpGmpIntfEntry.Marshal(b, m, deterministic)
}
func (m *ArpGmpIntfEntry) XXX_Merge(src proto.Message) {
xxx_messageInfo_ArpGmpIntfEntry.Merge(m, src)
}
func (m *ArpGmpIntfEntry) XXX_Size() int {
return xxx_messageInfo_ArpGmpIntfEntry.Size(m)
}
func (m *ArpGmpIntfEntry) XXX_DiscardUnknown() {
xxx_messageInfo_ArpGmpIntfEntry.DiscardUnknown(m)
}
var xxx_messageInfo_ArpGmpIntfEntry proto.InternalMessageInfo
func (m *ArpGmpIntfEntry) GetInterfaceNameXr() string {
if m != nil {
return m.InterfaceNameXr
}
return ""
}
func (m *ArpGmpIntfEntry) GetReferenceCount() uint32 {
if m != nil {
return m.ReferenceCount
}
return 0
}
func (m *ArpGmpIntfEntry) GetAssociatedConfigurationEntry() *ArpGmpConfigEntry {
if m != nil {
return m.AssociatedConfigurationEntry
}
return nil
}
func init() {
proto.RegisterType((*ArpGmpIntfEntry_KEYS)(nil), "cisco_ios_xr_ipv4_arp_oper.arp_gmp.vrfs.vrf.interface_configured_ips.interface_configured_ip.arp_gmp_intf_entry_KEYS")
proto.RegisterType((*ArpGmpConfigEntry)(nil), "cisco_ios_xr_ipv4_arp_oper.arp_gmp.vrfs.vrf.interface_configured_ips.interface_configured_ip.arp_gmp_config_entry")
proto.RegisterType((*ArpGmpIntfEntry)(nil), "cisco_ios_xr_ipv4_arp_oper.arp_gmp.vrfs.vrf.interface_configured_ips.interface_configured_ip.arp_gmp_intf_entry")
}
func init() { proto.RegisterFile("arp_gmp_intf_entry.proto", fileDescriptor_ec3e2d5120a99909) }
var fileDescriptor_ec3e2d5120a99909 = []byte{
// 361 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x92, 0xc1, 0x4a, 0x3b, 0x31,
0x10, 0xc6, 0xd9, 0xfe, 0xff, 0x58, 0x1b, 0x69, 0x6b, 0x83, 0xe0, 0x0a, 0x2a, 0xa5, 0x20, 0x56,
0xc1, 0x3d, 0xb4, 0x7d, 0x01, 0x29, 0x3d, 0x09, 0x1e, 0xaa, 0x07, 0x05, 0x21, 0xc4, 0xdd, 0xd9,
0x1a, 0xb0, 0x49, 0x98, 0x64, 0xd7, 0xf6, 0x7d, 0x3c, 0x7b, 0xf6, 0xf1, 0x64, 0x93, 0xee, 0xd6,
0x52, 0x3d, 0x7a, 0x59, 0xd8, 0x6f, 0xbe, 0xe4, 0xf7, 0x65, 0x66, 0x48, 0xc8, 0x51, 0xb3, 0xd9,
0x5c, 0x33, 0x21, 0x6d, 0xca, 0x40, 0x5a, 0x5c, 0x46, 0x1a, 0x95, 0x55, 0xf4, 0x29, 0x16, 0x26,
0x56, 0x4c, 0x28, 0xc3, 0x16, 0xc8, 0x84, 0xce, 0x47, 0xac, 0xf0, 0x2a, 0x0d, 0x18, 0xad, 0x0e,
0x45, 0x39, 0xa6, 0xa6, 0xf8, 0x44, 0x42, 0x5a, 0xc0, 0x94, 0xc7, 0xc0, 0x62, 0x25, 0x53, 0x31,
0xcb, 0x10, 0x12, 0x26, 0xb4, 0xf9, 0xad, 0xd0, 0xcb, 0xc8, 0xe1, 0x36, 0x99, 0xdd, 0x4c, 0x1e,
0xef, 0xe8, 0x11, 0xd9, 0xcd, 0x31, 0x65, 0x92, 0xcf, 0x21, 0x0c, 0xba, 0x41, 0xbf, 0x31, 0xad,
0xe7, 0x98, 0xde, 0xf2, 0x39, 0xd0, 0x33, 0xd2, 0x5a, 0x5f, 0xe8, 0x0c, 0x35, 0x67, 0x68, 0x56,
0xaa, 0xb3, 0x85, 0xa4, 0xce, 0x93, 0x04, 0xc1, 0x98, 0xf0, 0x9f, 0xbf, 0x60, 0xf5, 0xdb, 0xfb,
0x08, 0xc8, 0x41, 0xc9, 0xf5, 0x81, 0x3c, 0x99, 0x9e, 0x10, 0x22, 0x34, 0x2b, 0x4f, 0x79, 0x6c,
0x43, 0xe8, 0x6b, 0x2f, 0xd0, 0x0b, 0xb2, 0xff, 0xc2, 0x31, 0x79, 0xe3, 0x08, 0x95, 0xc9, 0xa3,
0xdb, 0xa5, 0x5e, 0x5a, 0xaf, 0x08, 0x05, 0x19, 0x73, 0x6d, 0xb2, 0x57, 0x6e, 0x85, 0x92, 0xcc,
0x2e, 0x35, 0xac, 0x72, 0x74, 0x36, 0x2a, 0xf7, 0x4b, 0x0d, 0x05, 0xd8, 0xbf, 0xdd, 0xd9, 0xfe,
0x7b, 0xb0, 0x53, 0x8a, 0x72, 0xef, 0xbd, 0x46, 0xe8, 0x76, 0xa3, 0xe8, 0x25, 0xe9, 0x6c, 0x36,
0x82, 0x2d, 0x30, 0x1c, 0xf8, 0x40, 0x1b, 0xbd, 0x78, 0x40, 0x7a, 0x4e, 0xda, 0x08, 0x29, 0x20,
0x48, 0x37, 0x85, 0x4c, 0xda, 0x70, 0xd8, 0x0d, 0xfa, 0xcd, 0x69, 0xab, 0x92, 0xc7, 0x85, 0x4a,
0x3f, 0x03, 0x72, 0xca, 0x8d, 0x51, 0xb1, 0xe0, 0x16, 0x92, 0x6a, 0x60, 0xfe, 0x15, 0x8e, 0x1b,
0x8e, 0xba, 0x41, 0x7f, 0x6f, 0x80, 0xd1, 0x5f, 0xee, 0x46, 0xf4, 0xd3, 0x80, 0xa6, 0xc7, 0xeb,
0x64, 0xe3, 0xef, 0xc1, 0x26, 0x45, 0xf5, 0x79, 0xc7, 0xed, 0xec, 0xf0, 0x2b, 0x00, 0x00, 0xff,
0xff, 0x3b, 0x10, 0x86, 0xee, 0xcf, 0x02, 0x00, 0x00,
}
| {
return m.Address
} | conditional_block |
manage_cpu_test.go | /*
* Copyright (c) 2021 THL A29 Limited, a Tencent company.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
*
* You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package manager
import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"testing"
"github.com/tencent/caelus/pkg/caelus/types"
"github.com/tencent/caelus/pkg/caelus/util/cgroup"
"github.com/shirou/gopsutil/cpu"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/sets"
)
type cpuBtTestData struct {
describe string
cpuStatic bool
cpusetRecovered bool
limitCores int64
expectPercent string
}
// TestQosCpuBT_Manage tests cpu bt qos manager
func TestQosCpuBT_Manage(t *testing.T) {
if !cgroup.CPUOfflineSupported() {
t.Skipf("cpu qos bt test skipped for not supported")
}
total, err := getTotalCpus()
if err != nil {
t.Skipf("cpu qos bt test skipped for get total cpus err: %v", err)
}
// recover origin value
procOffline := "/proc/offline/"
var originValues = make(map[string]string)
cpus, err := ioutil.ReadDir(procOffline)
if err != nil {
t.Skipf("read offline path(%s) failed: %v", procOffline, err)
}
for _, f := range cpus {
p := path.Join(procOffline, f.Name())
value, err := ioutil.ReadFile(p)
if err != nil {
t.Skipf("read offline file(%s) err: %v", p, err)
return
}
originValues[p] = string(value)
}
defer func() {
for p, v := range originValues {
ioutil.WriteFile(p, []byte(v), 0664)
}
}()
minCpuBtPercent = 50
testCases := []cpuBtTestData{
{
describe: "normal test",
cpuStatic: false,
cpusetRecovered: true,
limitCores: total,
expectPercent: "100",
},
{
describe: "min limit test",
cpuStatic: false,
cpusetRecovered: true,
limitCores: 1,
expectPercent: fmt.Sprintf("%d", minCpuBtPercent),
},
{
describe: "static test",
cpuStatic: true,
cpusetRecovered: true,
limitCores: total,
expectPercent: "100",
},
{
describe: "cpuset recovered",
cpuStatic: false,
cpusetRecovered: false,
limitCores: total,
expectPercent: "100",
},
}
offlineCg := "/sys/fs/cgroup/cpu,cpuacct" + types.CgroupOffline
// offlineCpusetCg should be under the offlineCg path, we not creates it as this just for test
offlineCpusetCg := "/sys/fs/cgroup/cpuset/offlinetest"
offlineCpusetCgInRoot := "/offlinetest"
for _, tc := range testCases {
btQos := &qosCpuBT{
kubeletStatic: tc.cpuStatic,
}
cpusetRecovered = tc.cpusetRecovered
func() {
existed, err := mkdirCgPath(offlineCg)
if err != nil {
t.Fatalf("mkdir offline cgroup %s err: %v", offlineCg, err)
}
if !existed {
defer os.RemoveAll(offlineCg)
}
existed, err = mkdirCgPath(offlineCpusetCg)
if err != nil {
t.Fatalf("mkdir offline cgroup %s err: %v", offlineCpusetCg, err)
}
if !existed {
defer os.RemoveAll(offlineCpusetCg)
}
btQos.Manage(&CgroupResourceConfig{
Resources: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(tc.limitCores*1000, resource.DecimalSI),
},
OfflineCgroups: []string{
offlineCpusetCgInRoot,
},
})
valueBytes, err := ioutil.ReadFile(path.Join(offlineCg, "cpu.offline"))
if err != nil {
t.Fatalf("read offline cgroup %s err: %v", offlineCg, err)
}
if strings.Trim(string(valueBytes), "\n") != "1" {
t.Fatalf("cpu qos bt test case(%s) failed, offline(%s) not enabled", tc.describe, offlineCg)
}
for p := range originValues {
vByts, err := ioutil.ReadFile(p)
if err != nil {
t.Fatalf("read cpu offline file(%s) err: %v", p, err)
}
if strings.Trim(string(vByts), "\n") != tc.expectPercent {
t.Fatalf("cpu qos bt test case(%s) unexpect result, expect %s, got %s",
tc.describe, tc.expectPercent, string(vByts))
}
}
cpusetStr, err := readCpuSetCgroup(offlineCpusetCg)
if err != nil {
t.Fatalf("read cpuset cgroup %s err: %v", offlineCpusetCg, err)
}
if (!tc.cpuStatic && tc.cpusetRecovered) && len(cpusetStr) != 0 {
t.Fatalf("cpu qos bt test case(%s) failed, static is false, should not set cpusets: %s",
tc.describe, cpusetStr)
}
if (tc.cpuStatic || !tc.cpusetRecovered) && len(cpusetStr) == 0 {
t.Fatalf("cpu qos bt test case(%s) failed, static is true, should set cpusets, got null",
tc.describe)
}
}()
}
}
type cpuSetTestData struct {
describe string
reserved sets.Int
limit int64
onlineIsolate bool
expect struct {
offline string
online string
}
}
// TestQosCpuSet_Manage tests cpuset qos manager
func TestQosCpuSet_Manage(t *testing.T) {
total, err := getTotalCpus()
if err != nil {
t.Skipf("cpu qos cpuset skipped for get total cpu err: %v", err)
}
lastCoreStr := fmt.Sprintf("%d", total-1)
lastSecCoreStr := fmt.Sprintf("%d", total-2)
leftCoreStr := fmt.Sprintf("0-%d", total-2)
if total == 2 {
leftCoreStr = "0"
}
testCases := []cpuSetTestData{
{
describe: "no reserved",
reserved: sets.NewInt(),
limit: 1,
onlineIsolate: false,
expect: struct {
offline string
online string
}{offline: lastCoreStr, online: ""},
},
{
describe: "has reserved",
reserved: sets.NewInt([]int{int(total) - 1}...),
limit: 1,
onlineIsolate: false,
expect: struct {
offline string
online string
}{offline: lastSecCoreStr, online: ""},
},
{
describe: "online isolate enable",
reserved: sets.NewInt(),
limit: 1,
onlineIsolate: true,
expect: struct {
offline string
online string
}{offline: lastCoreStr, online: leftCoreStr},
},
}
cpusetCg := "/sys/fs/cgroup/cpuset"
offlineCgInRoot := "/offlinetest"
onlineCgInRoot := "/onlinetest"
offlineCg := path.Join(cpusetCg, offlineCgInRoot)
onlineCg := path.Join(cpusetCg, onlineCgInRoot)
for _, tc := range testCases {
qosCpuset := &qosCpuSet{
onlineIsolate: tc.onlineIsolate,
reserved: tc.reserved,
lastOfflineCgs: newCgroupPaths(),
lastOnlineCgs: newCgroupPaths(),
}
func() {
existed, err := mkdirCgPath(offlineCg)
if err != nil {
t.Fatalf("mkdir offline cgroup %s err: %v", offlineCg, err)
}
if !existed {
defer os.RemoveAll(offlineCg)
}
existed, err = mkdirCgPath(onlineCg)
if err != nil {
t.Fatalf("mkdir online cgroup %s err: %v", onlineCg, err)
}
if !existed {
defer os.RemoveAll(onlineCg)
}
qosCpuset.Manage(&CgroupResourceConfig{
Resources: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(tc.limit*1000, resource.DecimalSI),
},
OfflineCgroups: []string{
offlineCgInRoot,
},
OnlineCgroups: []string{
onlineCgInRoot,
},
})
offlineCpusets, err := readCpuSetCgroup(offlineCg)
if err != nil {
t.Fatalf("read cpuset cgroup %s err: %v", offlineCg, err)
}
if offlineCpusets != tc.expect.offline {
t.Fatalf("cpu qos cpuset test case(%s) failed, expect offline %s, got %s",
tc.describe, tc.expect.offline, offlineCpusets)
}
onCpusets, err := readCpuSetCgroup(onlineCg)
if err != nil {
t.Fatalf("read cpuset cgroup %s err: %v", onlineCg, err)
}
if onCpusets != tc.expect.online {
t.Fatalf("cpu qos cpuset test case(%s) failed, expect online %s, got %s",
tc.describe, tc.expect.online, onCpusets)
}
}()
}
}
type cpuQuotaTestData struct {
describe string
limit int64
weight *uint64
expect struct {
limit string
weight string
}
}
// TestQosCpuQuota_Manage test cpu quota qos manager
func TestQosCpuQuota_Manage(t *testing.T) {
testCases := []cpuQuotaTestData{
{
describe: "quota test",
limit: 2,
weight: nil,
expect: struct {
limit string
weight string
}{limit: "2000000", weight: "1024"},
},
{
describe: "weight test",
limit: 2,
weight: uint64Pointer(2),
expect: struct {
limit string
weight string
}{limit: "2000000", weight: "2"},
},
}
quotaCgPath := "/sys/fs/cgroup/cpu,cpuacct" + types.CgroupOffline
offlinePath := "/sys/fs/cgroup/cpu,cpuacct" + types.CgroupOffline + "/test"
offlinePathInRoot := types.CgroupOffline + "/test"
for _, tc := range testCases {
qosQuota := &qosCpuQuota{
shareWeight: tc.weight,
kubeletStatic: false,
lastOfflineCgs: newCgroupPaths(),
}
func() {
existed, err := mkdirCgPath(quotaCgPath)
if err != nil {
t.Fatalf("mkdir offline cgroup %s err: %v", quotaCgPath, err)
}
if !existed {
defer os.RemoveAll(quotaCgPath)
}
existed, err = mkdirCgPath(offlinePath)
if err != nil {
t.Fatalf("mkdir online cgroup %s err: %v", offlinePath, err)
}
if !existed {
defer os.RemoveAll(offlinePath)
}
qosQuota.Manage(&CgroupResourceConfig{
Resources: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(tc.limit*1000, resource.DecimalSI),
},
OfflineCgroups: []string{
offlinePathInRoot,
},
})
quotaBytes, err := ioutil.ReadFile(path.Join(quotaCgPath, "cpu.cfs_quota_us"))
if err != nil {
t.Fatalf("read cpu quota for %s err: %v", quotaCgPath, err)
}
if strings.Trim(string(quotaBytes), "\n") != tc.expect.limit {
t.Fatalf("cpu qos quota test case(%s) failed, expect quota: %s, got: %s",
tc.describe, tc.expect.limit, strings.Trim(string(quotaBytes), "\n"))
}
shareBytes, err := ioutil.ReadFile(path.Join(offlinePath, "cpu.shares"))
if err != nil {
t.Fatalf("read cpu share for %s err: %v", offlinePath, err)
}
if strings.Trim(string(shareBytes), "\n") != tc.expect.weight {
t.Fatalf("cpu qos quota test case(%s) failed, expect share: %s, got: %s",
tc.describe, tc.expect.weight, strings.Trim(string(shareBytes), "\n"))
}
}()
}
}
func getTotalCpus() (int64, error) {
cpuInfo, err := cpu.Info()
if err != nil {
return 0, err
}
return int64(len(cpuInfo)), nil
}
func readCpuSetCgroup(cgPath string) (value string, err error) {
data, err := ioutil.ReadFile(filepath.Join(cgPath, "cpuset.cpus"))
if err != nil {
return "", err
}
return strings.Replace(string(data), "\n", "", -1), nil
}
func mkdirCgPath(cgPath string) (existed bool, err error) {
existed = true
_, err = os.Stat(cgPath)
if err != nil {
if os.IsNotExist(err) {
existed = false
err = os.MkdirAll(cgPath, 0755)
}
}
return existed, err
}
func | (value uint64) *uint64 {
var p *uint64
p = new(uint64)
*p = value
return p
}
| uint64Pointer | identifier_name |
manage_cpu_test.go | /*
* Copyright (c) 2021 THL A29 Limited, a Tencent company.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
*
* You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package manager
import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"testing"
"github.com/tencent/caelus/pkg/caelus/types"
"github.com/tencent/caelus/pkg/caelus/util/cgroup"
"github.com/shirou/gopsutil/cpu"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/sets"
)
type cpuBtTestData struct {
describe string
cpuStatic bool
cpusetRecovered bool
limitCores int64
expectPercent string
}
// TestQosCpuBT_Manage tests cpu bt qos manager
func TestQosCpuBT_Manage(t *testing.T) {
if !cgroup.CPUOfflineSupported() {
t.Skipf("cpu qos bt test skipped for not supported")
}
total, err := getTotalCpus()
if err != nil {
t.Skipf("cpu qos bt test skipped for get total cpus err: %v", err)
}
// recover origin value
procOffline := "/proc/offline/"
var originValues = make(map[string]string)
cpus, err := ioutil.ReadDir(procOffline)
if err != nil {
t.Skipf("read offline path(%s) failed: %v", procOffline, err)
}
for _, f := range cpus {
p := path.Join(procOffline, f.Name())
value, err := ioutil.ReadFile(p)
if err != nil {
t.Skipf("read offline file(%s) err: %v", p, err)
return
}
originValues[p] = string(value)
}
defer func() {
for p, v := range originValues {
ioutil.WriteFile(p, []byte(v), 0664)
}
}()
minCpuBtPercent = 50
testCases := []cpuBtTestData{
{
describe: "normal test",
cpuStatic: false,
cpusetRecovered: true,
limitCores: total,
expectPercent: "100",
},
{
describe: "min limit test",
cpuStatic: false,
cpusetRecovered: true,
limitCores: 1,
expectPercent: fmt.Sprintf("%d", minCpuBtPercent),
},
{
describe: "static test",
cpuStatic: true,
cpusetRecovered: true,
limitCores: total,
expectPercent: "100",
},
{
describe: "cpuset recovered",
cpuStatic: false,
cpusetRecovered: false,
limitCores: total,
expectPercent: "100",
},
}
offlineCg := "/sys/fs/cgroup/cpu,cpuacct" + types.CgroupOffline
// offlineCpusetCg should be under the offlineCg path, we not creates it as this just for test
offlineCpusetCg := "/sys/fs/cgroup/cpuset/offlinetest"
offlineCpusetCgInRoot := "/offlinetest"
for _, tc := range testCases {
btQos := &qosCpuBT{
kubeletStatic: tc.cpuStatic,
}
cpusetRecovered = tc.cpusetRecovered
func() {
existed, err := mkdirCgPath(offlineCg)
if err != nil {
t.Fatalf("mkdir offline cgroup %s err: %v", offlineCg, err)
}
if !existed {
defer os.RemoveAll(offlineCg)
}
existed, err = mkdirCgPath(offlineCpusetCg)
if err != nil {
t.Fatalf("mkdir offline cgroup %s err: %v", offlineCpusetCg, err)
}
if !existed {
defer os.RemoveAll(offlineCpusetCg)
}
btQos.Manage(&CgroupResourceConfig{
Resources: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(tc.limitCores*1000, resource.DecimalSI),
},
OfflineCgroups: []string{
offlineCpusetCgInRoot,
},
})
valueBytes, err := ioutil.ReadFile(path.Join(offlineCg, "cpu.offline"))
if err != nil {
t.Fatalf("read offline cgroup %s err: %v", offlineCg, err)
}
if strings.Trim(string(valueBytes), "\n") != "1" {
t.Fatalf("cpu qos bt test case(%s) failed, offline(%s) not enabled", tc.describe, offlineCg)
}
for p := range originValues {
vByts, err := ioutil.ReadFile(p)
if err != nil {
t.Fatalf("read cpu offline file(%s) err: %v", p, err)
}
if strings.Trim(string(vByts), "\n") != tc.expectPercent {
t.Fatalf("cpu qos bt test case(%s) unexpect result, expect %s, got %s",
tc.describe, tc.expectPercent, string(vByts))
}
}
cpusetStr, err := readCpuSetCgroup(offlineCpusetCg)
if err != nil {
t.Fatalf("read cpuset cgroup %s err: %v", offlineCpusetCg, err)
}
if (!tc.cpuStatic && tc.cpusetRecovered) && len(cpusetStr) != 0 {
t.Fatalf("cpu qos bt test case(%s) failed, static is false, should not set cpusets: %s",
tc.describe, cpusetStr)
}
if (tc.cpuStatic || !tc.cpusetRecovered) && len(cpusetStr) == 0 {
t.Fatalf("cpu qos bt test case(%s) failed, static is true, should set cpusets, got null",
tc.describe)
}
}()
}
}
type cpuSetTestData struct {
describe string
reserved sets.Int
limit int64
onlineIsolate bool
expect struct {
offline string
online string
}
}
// TestQosCpuSet_Manage tests cpuset qos manager
func TestQosCpuSet_Manage(t *testing.T) {
total, err := getTotalCpus()
if err != nil {
t.Skipf("cpu qos cpuset skipped for get total cpu err: %v", err)
}
lastCoreStr := fmt.Sprintf("%d", total-1)
lastSecCoreStr := fmt.Sprintf("%d", total-2)
leftCoreStr := fmt.Sprintf("0-%d", total-2)
if total == 2 {
leftCoreStr = "0"
}
testCases := []cpuSetTestData{
{
describe: "no reserved",
reserved: sets.NewInt(),
limit: 1,
onlineIsolate: false,
expect: struct {
offline string
online string
}{offline: lastCoreStr, online: ""},
},
{
describe: "has reserved",
reserved: sets.NewInt([]int{int(total) - 1}...),
limit: 1,
onlineIsolate: false,
expect: struct {
offline string
online string
}{offline: lastSecCoreStr, online: ""},
},
{
describe: "online isolate enable",
reserved: sets.NewInt(),
limit: 1,
onlineIsolate: true,
expect: struct {
offline string
online string
}{offline: lastCoreStr, online: leftCoreStr},
},
}
cpusetCg := "/sys/fs/cgroup/cpuset"
offlineCgInRoot := "/offlinetest"
onlineCgInRoot := "/onlinetest"
offlineCg := path.Join(cpusetCg, offlineCgInRoot)
onlineCg := path.Join(cpusetCg, onlineCgInRoot)
for _, tc := range testCases {
qosCpuset := &qosCpuSet{
onlineIsolate: tc.onlineIsolate,
reserved: tc.reserved,
lastOfflineCgs: newCgroupPaths(),
lastOnlineCgs: newCgroupPaths(),
}
func() {
existed, err := mkdirCgPath(offlineCg)
if err != nil {
t.Fatalf("mkdir offline cgroup %s err: %v", offlineCg, err)
}
if !existed {
defer os.RemoveAll(offlineCg)
}
existed, err = mkdirCgPath(onlineCg)
if err != nil {
t.Fatalf("mkdir online cgroup %s err: %v", onlineCg, err)
}
if !existed {
defer os.RemoveAll(onlineCg)
}
qosCpuset.Manage(&CgroupResourceConfig{
Resources: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(tc.limit*1000, resource.DecimalSI),
},
OfflineCgroups: []string{
offlineCgInRoot,
},
OnlineCgroups: []string{
onlineCgInRoot,
},
})
offlineCpusets, err := readCpuSetCgroup(offlineCg)
if err != nil {
t.Fatalf("read cpuset cgroup %s err: %v", offlineCg, err)
}
if offlineCpusets != tc.expect.offline {
t.Fatalf("cpu qos cpuset test case(%s) failed, expect offline %s, got %s",
tc.describe, tc.expect.offline, offlineCpusets)
}
onCpusets, err := readCpuSetCgroup(onlineCg)
if err != nil {
t.Fatalf("read cpuset cgroup %s err: %v", onlineCg, err)
}
if onCpusets != tc.expect.online |
}()
}
}
type cpuQuotaTestData struct {
describe string
limit int64
weight *uint64
expect struct {
limit string
weight string
}
}
// TestQosCpuQuota_Manage test cpu quota qos manager
func TestQosCpuQuota_Manage(t *testing.T) {
testCases := []cpuQuotaTestData{
{
describe: "quota test",
limit: 2,
weight: nil,
expect: struct {
limit string
weight string
}{limit: "2000000", weight: "1024"},
},
{
describe: "weight test",
limit: 2,
weight: uint64Pointer(2),
expect: struct {
limit string
weight string
}{limit: "2000000", weight: "2"},
},
}
quotaCgPath := "/sys/fs/cgroup/cpu,cpuacct" + types.CgroupOffline
offlinePath := "/sys/fs/cgroup/cpu,cpuacct" + types.CgroupOffline + "/test"
offlinePathInRoot := types.CgroupOffline + "/test"
for _, tc := range testCases {
qosQuota := &qosCpuQuota{
shareWeight: tc.weight,
kubeletStatic: false,
lastOfflineCgs: newCgroupPaths(),
}
func() {
existed, err := mkdirCgPath(quotaCgPath)
if err != nil {
t.Fatalf("mkdir offline cgroup %s err: %v", quotaCgPath, err)
}
if !existed {
defer os.RemoveAll(quotaCgPath)
}
existed, err = mkdirCgPath(offlinePath)
if err != nil {
t.Fatalf("mkdir online cgroup %s err: %v", offlinePath, err)
}
if !existed {
defer os.RemoveAll(offlinePath)
}
qosQuota.Manage(&CgroupResourceConfig{
Resources: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(tc.limit*1000, resource.DecimalSI),
},
OfflineCgroups: []string{
offlinePathInRoot,
},
})
quotaBytes, err := ioutil.ReadFile(path.Join(quotaCgPath, "cpu.cfs_quota_us"))
if err != nil {
t.Fatalf("read cpu quota for %s err: %v", quotaCgPath, err)
}
if strings.Trim(string(quotaBytes), "\n") != tc.expect.limit {
t.Fatalf("cpu qos quota test case(%s) failed, expect quota: %s, got: %s",
tc.describe, tc.expect.limit, strings.Trim(string(quotaBytes), "\n"))
}
shareBytes, err := ioutil.ReadFile(path.Join(offlinePath, "cpu.shares"))
if err != nil {
t.Fatalf("read cpu share for %s err: %v", offlinePath, err)
}
if strings.Trim(string(shareBytes), "\n") != tc.expect.weight {
t.Fatalf("cpu qos quota test case(%s) failed, expect share: %s, got: %s",
tc.describe, tc.expect.weight, strings.Trim(string(shareBytes), "\n"))
}
}()
}
}
func getTotalCpus() (int64, error) {
cpuInfo, err := cpu.Info()
if err != nil {
return 0, err
}
return int64(len(cpuInfo)), nil
}
func readCpuSetCgroup(cgPath string) (value string, err error) {
data, err := ioutil.ReadFile(filepath.Join(cgPath, "cpuset.cpus"))
if err != nil {
return "", err
}
return strings.Replace(string(data), "\n", "", -1), nil
}
func mkdirCgPath(cgPath string) (existed bool, err error) {
existed = true
_, err = os.Stat(cgPath)
if err != nil {
if os.IsNotExist(err) {
existed = false
err = os.MkdirAll(cgPath, 0755)
}
}
return existed, err
}
func uint64Pointer(value uint64) *uint64 {
var p *uint64
p = new(uint64)
*p = value
return p
}
| {
t.Fatalf("cpu qos cpuset test case(%s) failed, expect online %s, got %s",
tc.describe, tc.expect.online, onCpusets)
} | conditional_block |
manage_cpu_test.go | /*
* Copyright (c) 2021 THL A29 Limited, a Tencent company.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
*
* You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package manager
import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"testing"
"github.com/tencent/caelus/pkg/caelus/types"
"github.com/tencent/caelus/pkg/caelus/util/cgroup"
"github.com/shirou/gopsutil/cpu"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/sets"
)
type cpuBtTestData struct {
describe string
cpuStatic bool
cpusetRecovered bool
limitCores int64
expectPercent string
}
// TestQosCpuBT_Manage tests cpu bt qos manager
func TestQosCpuBT_Manage(t *testing.T) {
if !cgroup.CPUOfflineSupported() {
t.Skipf("cpu qos bt test skipped for not supported")
}
total, err := getTotalCpus()
if err != nil {
t.Skipf("cpu qos bt test skipped for get total cpus err: %v", err)
}
// recover origin value
procOffline := "/proc/offline/"
var originValues = make(map[string]string)
cpus, err := ioutil.ReadDir(procOffline)
if err != nil {
t.Skipf("read offline path(%s) failed: %v", procOffline, err)
}
for _, f := range cpus {
p := path.Join(procOffline, f.Name())
value, err := ioutil.ReadFile(p)
if err != nil {
t.Skipf("read offline file(%s) err: %v", p, err)
return
}
originValues[p] = string(value)
}
defer func() {
for p, v := range originValues {
ioutil.WriteFile(p, []byte(v), 0664)
}
}()
minCpuBtPercent = 50
testCases := []cpuBtTestData{
{
describe: "normal test",
cpuStatic: false,
cpusetRecovered: true,
limitCores: total,
expectPercent: "100",
},
{
describe: "min limit test",
cpuStatic: false,
cpusetRecovered: true,
limitCores: 1,
expectPercent: fmt.Sprintf("%d", minCpuBtPercent),
},
{
describe: "static test",
cpuStatic: true,
cpusetRecovered: true,
limitCores: total,
expectPercent: "100",
},
{
describe: "cpuset recovered",
cpuStatic: false,
cpusetRecovered: false,
limitCores: total,
expectPercent: "100",
},
}
offlineCg := "/sys/fs/cgroup/cpu,cpuacct" + types.CgroupOffline
// offlineCpusetCg should be under the offlineCg path, we not creates it as this just for test
offlineCpusetCg := "/sys/fs/cgroup/cpuset/offlinetest"
offlineCpusetCgInRoot := "/offlinetest"
for _, tc := range testCases {
btQos := &qosCpuBT{
kubeletStatic: tc.cpuStatic,
}
cpusetRecovered = tc.cpusetRecovered
func() {
existed, err := mkdirCgPath(offlineCg)
if err != nil {
t.Fatalf("mkdir offline cgroup %s err: %v", offlineCg, err)
}
if !existed {
defer os.RemoveAll(offlineCg)
}
existed, err = mkdirCgPath(offlineCpusetCg)
if err != nil {
t.Fatalf("mkdir offline cgroup %s err: %v", offlineCpusetCg, err)
}
if !existed {
defer os.RemoveAll(offlineCpusetCg)
}
btQos.Manage(&CgroupResourceConfig{
Resources: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(tc.limitCores*1000, resource.DecimalSI),
},
OfflineCgroups: []string{
offlineCpusetCgInRoot,
},
})
valueBytes, err := ioutil.ReadFile(path.Join(offlineCg, "cpu.offline"))
if err != nil {
t.Fatalf("read offline cgroup %s err: %v", offlineCg, err)
}
if strings.Trim(string(valueBytes), "\n") != "1" {
t.Fatalf("cpu qos bt test case(%s) failed, offline(%s) not enabled", tc.describe, offlineCg)
}
for p := range originValues {
vByts, err := ioutil.ReadFile(p)
if err != nil {
t.Fatalf("read cpu offline file(%s) err: %v", p, err)
}
if strings.Trim(string(vByts), "\n") != tc.expectPercent {
t.Fatalf("cpu qos bt test case(%s) unexpect result, expect %s, got %s",
tc.describe, tc.expectPercent, string(vByts))
}
}
cpusetStr, err := readCpuSetCgroup(offlineCpusetCg)
if err != nil {
t.Fatalf("read cpuset cgroup %s err: %v", offlineCpusetCg, err)
}
if (!tc.cpuStatic && tc.cpusetRecovered) && len(cpusetStr) != 0 {
t.Fatalf("cpu qos bt test case(%s) failed, static is false, should not set cpusets: %s",
tc.describe, cpusetStr)
}
if (tc.cpuStatic || !tc.cpusetRecovered) && len(cpusetStr) == 0 {
t.Fatalf("cpu qos bt test case(%s) failed, static is true, should set cpusets, got null",
tc.describe)
}
}()
}
}
type cpuSetTestData struct {
describe string
reserved sets.Int
limit int64
onlineIsolate bool
expect struct {
offline string
online string
}
}
// TestQosCpuSet_Manage tests cpuset qos manager
func TestQosCpuSet_Manage(t *testing.T) {
total, err := getTotalCpus()
if err != nil {
t.Skipf("cpu qos cpuset skipped for get total cpu err: %v", err)
}
lastCoreStr := fmt.Sprintf("%d", total-1)
lastSecCoreStr := fmt.Sprintf("%d", total-2)
leftCoreStr := fmt.Sprintf("0-%d", total-2)
if total == 2 {
leftCoreStr = "0"
}
testCases := []cpuSetTestData{
{
describe: "no reserved",
reserved: sets.NewInt(),
limit: 1,
onlineIsolate: false,
expect: struct {
offline string
online string
}{offline: lastCoreStr, online: ""},
},
{
describe: "has reserved",
reserved: sets.NewInt([]int{int(total) - 1}...),
limit: 1,
onlineIsolate: false,
expect: struct {
offline string
online string
}{offline: lastSecCoreStr, online: ""},
},
{
describe: "online isolate enable",
reserved: sets.NewInt(),
limit: 1,
onlineIsolate: true,
expect: struct {
offline string
online string
}{offline: lastCoreStr, online: leftCoreStr},
},
}
cpusetCg := "/sys/fs/cgroup/cpuset"
offlineCgInRoot := "/offlinetest"
onlineCgInRoot := "/onlinetest"
offlineCg := path.Join(cpusetCg, offlineCgInRoot)
onlineCg := path.Join(cpusetCg, onlineCgInRoot)
for _, tc := range testCases {
qosCpuset := &qosCpuSet{
onlineIsolate: tc.onlineIsolate,
reserved: tc.reserved,
lastOfflineCgs: newCgroupPaths(),
lastOnlineCgs: newCgroupPaths(),
}
func() {
existed, err := mkdirCgPath(offlineCg)
if err != nil {
t.Fatalf("mkdir offline cgroup %s err: %v", offlineCg, err)
}
if !existed {
defer os.RemoveAll(offlineCg)
}
existed, err = mkdirCgPath(onlineCg)
if err != nil {
t.Fatalf("mkdir online cgroup %s err: %v", onlineCg, err)
}
if !existed {
defer os.RemoveAll(onlineCg)
}
qosCpuset.Manage(&CgroupResourceConfig{
Resources: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(tc.limit*1000, resource.DecimalSI),
},
OfflineCgroups: []string{
offlineCgInRoot,
},
OnlineCgroups: []string{
onlineCgInRoot,
},
})
offlineCpusets, err := readCpuSetCgroup(offlineCg)
if err != nil {
t.Fatalf("read cpuset cgroup %s err: %v", offlineCg, err)
}
if offlineCpusets != tc.expect.offline {
t.Fatalf("cpu qos cpuset test case(%s) failed, expect offline %s, got %s",
tc.describe, tc.expect.offline, offlineCpusets)
}
onCpusets, err := readCpuSetCgroup(onlineCg)
if err != nil {
t.Fatalf("read cpuset cgroup %s err: %v", onlineCg, err)
}
if onCpusets != tc.expect.online {
t.Fatalf("cpu qos cpuset test case(%s) failed, expect online %s, got %s",
tc.describe, tc.expect.online, onCpusets)
}
}()
}
}
type cpuQuotaTestData struct {
describe string
limit int64
weight *uint64
expect struct {
limit string
weight string
}
}
// TestQosCpuQuota_Manage test cpu quota qos manager
func TestQosCpuQuota_Manage(t *testing.T) {
testCases := []cpuQuotaTestData{
{
describe: "quota test",
limit: 2,
weight: nil,
expect: struct {
limit string
weight string
}{limit: "2000000", weight: "1024"},
},
{
describe: "weight test",
limit: 2,
weight: uint64Pointer(2),
expect: struct {
limit string
weight string
}{limit: "2000000", weight: "2"},
},
}
quotaCgPath := "/sys/fs/cgroup/cpu,cpuacct" + types.CgroupOffline
offlinePath := "/sys/fs/cgroup/cpu,cpuacct" + types.CgroupOffline + "/test"
offlinePathInRoot := types.CgroupOffline + "/test"
for _, tc := range testCases {
qosQuota := &qosCpuQuota{
shareWeight: tc.weight,
kubeletStatic: false,
lastOfflineCgs: newCgroupPaths(),
}
func() {
existed, err := mkdirCgPath(quotaCgPath)
if err != nil {
t.Fatalf("mkdir offline cgroup %s err: %v", quotaCgPath, err)
}
if !existed {
defer os.RemoveAll(quotaCgPath)
}
existed, err = mkdirCgPath(offlinePath)
if err != nil {
t.Fatalf("mkdir online cgroup %s err: %v", offlinePath, err)
}
if !existed {
defer os.RemoveAll(offlinePath)
}
qosQuota.Manage(&CgroupResourceConfig{
Resources: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(tc.limit*1000, resource.DecimalSI),
},
OfflineCgroups: []string{
offlinePathInRoot,
},
})
quotaBytes, err := ioutil.ReadFile(path.Join(quotaCgPath, "cpu.cfs_quota_us"))
if err != nil {
t.Fatalf("read cpu quota for %s err: %v", quotaCgPath, err)
}
if strings.Trim(string(quotaBytes), "\n") != tc.expect.limit {
t.Fatalf("cpu qos quota test case(%s) failed, expect quota: %s, got: %s",
tc.describe, tc.expect.limit, strings.Trim(string(quotaBytes), "\n"))
}
shareBytes, err := ioutil.ReadFile(path.Join(offlinePath, "cpu.shares"))
if err != nil {
t.Fatalf("read cpu share for %s err: %v", offlinePath, err)
}
if strings.Trim(string(shareBytes), "\n") != tc.expect.weight {
t.Fatalf("cpu qos quota test case(%s) failed, expect share: %s, got: %s",
tc.describe, tc.expect.weight, strings.Trim(string(shareBytes), "\n"))
}
}()
}
}
func getTotalCpus() (int64, error) {
cpuInfo, err := cpu.Info()
if err != nil {
return 0, err
}
return int64(len(cpuInfo)), nil
}
func readCpuSetCgroup(cgPath string) (value string, err error) {
data, err := ioutil.ReadFile(filepath.Join(cgPath, "cpuset.cpus"))
if err != nil {
return "", err
}
return strings.Replace(string(data), "\n", "", -1), nil
}
func mkdirCgPath(cgPath string) (existed bool, err error) {
existed = true
_, err = os.Stat(cgPath)
if err != nil {
if os.IsNotExist(err) {
existed = false
err = os.MkdirAll(cgPath, 0755)
}
}
return existed, err
}
|
return p
} | func uint64Pointer(value uint64) *uint64 {
var p *uint64
p = new(uint64)
*p = value | random_line_split |
manage_cpu_test.go | /*
* Copyright (c) 2021 THL A29 Limited, a Tencent company.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
*
* You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package manager
import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"testing"
"github.com/tencent/caelus/pkg/caelus/types"
"github.com/tencent/caelus/pkg/caelus/util/cgroup"
"github.com/shirou/gopsutil/cpu"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/sets"
)
type cpuBtTestData struct {
describe string
cpuStatic bool
cpusetRecovered bool
limitCores int64
expectPercent string
}
// TestQosCpuBT_Manage tests cpu bt qos manager
func TestQosCpuBT_Manage(t *testing.T) {
if !cgroup.CPUOfflineSupported() {
t.Skipf("cpu qos bt test skipped for not supported")
}
total, err := getTotalCpus()
if err != nil {
t.Skipf("cpu qos bt test skipped for get total cpus err: %v", err)
}
// recover origin value
procOffline := "/proc/offline/"
var originValues = make(map[string]string)
cpus, err := ioutil.ReadDir(procOffline)
if err != nil {
t.Skipf("read offline path(%s) failed: %v", procOffline, err)
}
for _, f := range cpus {
p := path.Join(procOffline, f.Name())
value, err := ioutil.ReadFile(p)
if err != nil {
t.Skipf("read offline file(%s) err: %v", p, err)
return
}
originValues[p] = string(value)
}
defer func() {
for p, v := range originValues {
ioutil.WriteFile(p, []byte(v), 0664)
}
}()
minCpuBtPercent = 50
testCases := []cpuBtTestData{
{
describe: "normal test",
cpuStatic: false,
cpusetRecovered: true,
limitCores: total,
expectPercent: "100",
},
{
describe: "min limit test",
cpuStatic: false,
cpusetRecovered: true,
limitCores: 1,
expectPercent: fmt.Sprintf("%d", minCpuBtPercent),
},
{
describe: "static test",
cpuStatic: true,
cpusetRecovered: true,
limitCores: total,
expectPercent: "100",
},
{
describe: "cpuset recovered",
cpuStatic: false,
cpusetRecovered: false,
limitCores: total,
expectPercent: "100",
},
}
offlineCg := "/sys/fs/cgroup/cpu,cpuacct" + types.CgroupOffline
// offlineCpusetCg should be under the offlineCg path, we not creates it as this just for test
offlineCpusetCg := "/sys/fs/cgroup/cpuset/offlinetest"
offlineCpusetCgInRoot := "/offlinetest"
for _, tc := range testCases {
btQos := &qosCpuBT{
kubeletStatic: tc.cpuStatic,
}
cpusetRecovered = tc.cpusetRecovered
func() {
existed, err := mkdirCgPath(offlineCg)
if err != nil {
t.Fatalf("mkdir offline cgroup %s err: %v", offlineCg, err)
}
if !existed {
defer os.RemoveAll(offlineCg)
}
existed, err = mkdirCgPath(offlineCpusetCg)
if err != nil {
t.Fatalf("mkdir offline cgroup %s err: %v", offlineCpusetCg, err)
}
if !existed {
defer os.RemoveAll(offlineCpusetCg)
}
btQos.Manage(&CgroupResourceConfig{
Resources: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(tc.limitCores*1000, resource.DecimalSI),
},
OfflineCgroups: []string{
offlineCpusetCgInRoot,
},
})
valueBytes, err := ioutil.ReadFile(path.Join(offlineCg, "cpu.offline"))
if err != nil {
t.Fatalf("read offline cgroup %s err: %v", offlineCg, err)
}
if strings.Trim(string(valueBytes), "\n") != "1" {
t.Fatalf("cpu qos bt test case(%s) failed, offline(%s) not enabled", tc.describe, offlineCg)
}
for p := range originValues {
vByts, err := ioutil.ReadFile(p)
if err != nil {
t.Fatalf("read cpu offline file(%s) err: %v", p, err)
}
if strings.Trim(string(vByts), "\n") != tc.expectPercent {
t.Fatalf("cpu qos bt test case(%s) unexpect result, expect %s, got %s",
tc.describe, tc.expectPercent, string(vByts))
}
}
cpusetStr, err := readCpuSetCgroup(offlineCpusetCg)
if err != nil {
t.Fatalf("read cpuset cgroup %s err: %v", offlineCpusetCg, err)
}
if (!tc.cpuStatic && tc.cpusetRecovered) && len(cpusetStr) != 0 {
t.Fatalf("cpu qos bt test case(%s) failed, static is false, should not set cpusets: %s",
tc.describe, cpusetStr)
}
if (tc.cpuStatic || !tc.cpusetRecovered) && len(cpusetStr) == 0 {
t.Fatalf("cpu qos bt test case(%s) failed, static is true, should set cpusets, got null",
tc.describe)
}
}()
}
}
type cpuSetTestData struct {
describe string
reserved sets.Int
limit int64
onlineIsolate bool
expect struct {
offline string
online string
}
}
// TestQosCpuSet_Manage tests cpuset qos manager
func TestQosCpuSet_Manage(t *testing.T) {
total, err := getTotalCpus()
if err != nil {
t.Skipf("cpu qos cpuset skipped for get total cpu err: %v", err)
}
lastCoreStr := fmt.Sprintf("%d", total-1)
lastSecCoreStr := fmt.Sprintf("%d", total-2)
leftCoreStr := fmt.Sprintf("0-%d", total-2)
if total == 2 {
leftCoreStr = "0"
}
testCases := []cpuSetTestData{
{
describe: "no reserved",
reserved: sets.NewInt(),
limit: 1,
onlineIsolate: false,
expect: struct {
offline string
online string
}{offline: lastCoreStr, online: ""},
},
{
describe: "has reserved",
reserved: sets.NewInt([]int{int(total) - 1}...),
limit: 1,
onlineIsolate: false,
expect: struct {
offline string
online string
}{offline: lastSecCoreStr, online: ""},
},
{
describe: "online isolate enable",
reserved: sets.NewInt(),
limit: 1,
onlineIsolate: true,
expect: struct {
offline string
online string
}{offline: lastCoreStr, online: leftCoreStr},
},
}
cpusetCg := "/sys/fs/cgroup/cpuset"
offlineCgInRoot := "/offlinetest"
onlineCgInRoot := "/onlinetest"
offlineCg := path.Join(cpusetCg, offlineCgInRoot)
onlineCg := path.Join(cpusetCg, onlineCgInRoot)
for _, tc := range testCases {
qosCpuset := &qosCpuSet{
onlineIsolate: tc.onlineIsolate,
reserved: tc.reserved,
lastOfflineCgs: newCgroupPaths(),
lastOnlineCgs: newCgroupPaths(),
}
func() {
existed, err := mkdirCgPath(offlineCg)
if err != nil {
t.Fatalf("mkdir offline cgroup %s err: %v", offlineCg, err)
}
if !existed {
defer os.RemoveAll(offlineCg)
}
existed, err = mkdirCgPath(onlineCg)
if err != nil {
t.Fatalf("mkdir online cgroup %s err: %v", onlineCg, err)
}
if !existed {
defer os.RemoveAll(onlineCg)
}
qosCpuset.Manage(&CgroupResourceConfig{
Resources: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(tc.limit*1000, resource.DecimalSI),
},
OfflineCgroups: []string{
offlineCgInRoot,
},
OnlineCgroups: []string{
onlineCgInRoot,
},
})
offlineCpusets, err := readCpuSetCgroup(offlineCg)
if err != nil {
t.Fatalf("read cpuset cgroup %s err: %v", offlineCg, err)
}
if offlineCpusets != tc.expect.offline {
t.Fatalf("cpu qos cpuset test case(%s) failed, expect offline %s, got %s",
tc.describe, tc.expect.offline, offlineCpusets)
}
onCpusets, err := readCpuSetCgroup(onlineCg)
if err != nil {
t.Fatalf("read cpuset cgroup %s err: %v", onlineCg, err)
}
if onCpusets != tc.expect.online {
t.Fatalf("cpu qos cpuset test case(%s) failed, expect online %s, got %s",
tc.describe, tc.expect.online, onCpusets)
}
}()
}
}
type cpuQuotaTestData struct {
describe string
limit int64
weight *uint64
expect struct {
limit string
weight string
}
}
// TestQosCpuQuota_Manage test cpu quota qos manager
func TestQosCpuQuota_Manage(t *testing.T) {
testCases := []cpuQuotaTestData{
{
describe: "quota test",
limit: 2,
weight: nil,
expect: struct {
limit string
weight string
}{limit: "2000000", weight: "1024"},
},
{
describe: "weight test",
limit: 2,
weight: uint64Pointer(2),
expect: struct {
limit string
weight string
}{limit: "2000000", weight: "2"},
},
}
quotaCgPath := "/sys/fs/cgroup/cpu,cpuacct" + types.CgroupOffline
offlinePath := "/sys/fs/cgroup/cpu,cpuacct" + types.CgroupOffline + "/test"
offlinePathInRoot := types.CgroupOffline + "/test"
for _, tc := range testCases {
qosQuota := &qosCpuQuota{
shareWeight: tc.weight,
kubeletStatic: false,
lastOfflineCgs: newCgroupPaths(),
}
func() {
existed, err := mkdirCgPath(quotaCgPath)
if err != nil {
t.Fatalf("mkdir offline cgroup %s err: %v", quotaCgPath, err)
}
if !existed {
defer os.RemoveAll(quotaCgPath)
}
existed, err = mkdirCgPath(offlinePath)
if err != nil {
t.Fatalf("mkdir online cgroup %s err: %v", offlinePath, err)
}
if !existed {
defer os.RemoveAll(offlinePath)
}
qosQuota.Manage(&CgroupResourceConfig{
Resources: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(tc.limit*1000, resource.DecimalSI),
},
OfflineCgroups: []string{
offlinePathInRoot,
},
})
quotaBytes, err := ioutil.ReadFile(path.Join(quotaCgPath, "cpu.cfs_quota_us"))
if err != nil {
t.Fatalf("read cpu quota for %s err: %v", quotaCgPath, err)
}
if strings.Trim(string(quotaBytes), "\n") != tc.expect.limit {
t.Fatalf("cpu qos quota test case(%s) failed, expect quota: %s, got: %s",
tc.describe, tc.expect.limit, strings.Trim(string(quotaBytes), "\n"))
}
shareBytes, err := ioutil.ReadFile(path.Join(offlinePath, "cpu.shares"))
if err != nil {
t.Fatalf("read cpu share for %s err: %v", offlinePath, err)
}
if strings.Trim(string(shareBytes), "\n") != tc.expect.weight {
t.Fatalf("cpu qos quota test case(%s) failed, expect share: %s, got: %s",
tc.describe, tc.expect.weight, strings.Trim(string(shareBytes), "\n"))
}
}()
}
}
func getTotalCpus() (int64, error) |
func readCpuSetCgroup(cgPath string) (value string, err error) {
data, err := ioutil.ReadFile(filepath.Join(cgPath, "cpuset.cpus"))
if err != nil {
return "", err
}
return strings.Replace(string(data), "\n", "", -1), nil
}
func mkdirCgPath(cgPath string) (existed bool, err error) {
existed = true
_, err = os.Stat(cgPath)
if err != nil {
if os.IsNotExist(err) {
existed = false
err = os.MkdirAll(cgPath, 0755)
}
}
return existed, err
}
func uint64Pointer(value uint64) *uint64 {
var p *uint64
p = new(uint64)
*p = value
return p
}
| {
cpuInfo, err := cpu.Info()
if err != nil {
return 0, err
}
return int64(len(cpuInfo)), nil
} | identifier_body |
scanner.rs | use crate::file::{FileContent, FileSet};
use crate::metadata::Metadata;
use std::cell::RefCell;
use std::cmp;
use std::collections::btree_map::Entry as BTreeEntry;
use std::collections::hash_map::Entry as HashEntry;
use std::collections::BTreeMap;
use std::collections::BinaryHeap;
use std::collections::HashMap;
use std::collections::HashSet;
use std::ffi::OsString;
use std::fmt::Debug;
use std::fs;
use std::io;
use std::os::unix::fs::MetadataExt;
use std::path::Path;
use std::rc::Rc;
use std::sync::atomic::AtomicU32;
use std::sync::atomic::Ordering;
use std::time::{Duration, Instant};
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum RunMode {
/// Merges paths in memory, but not on disk. Gives realistic UI output.
DryRun,
/// Like dry run, but completely skips deduping, with no UI for dupes.
DryRunNoMerging,
Hardlink,
}
#[derive(Debug)]
pub struct Settings {
/// Ignore files smaller than a filesystem block.
/// Deduping of such files is unlikely to save space.
pub ignore_small: bool,
pub run_mode: RunMode,
// If 1, go to flush. If > 1, abort immediately.
pub break_on: Option<&'static AtomicU32>,
}
impl Settings {
pub fn breaks(&self) -> u32 {
if let Some(break_on) = self.break_on {
break_on.load(Ordering::SeqCst)
} else {
0
}
}
}
#[derive(Debug, Default, Copy, Clone)]
#[cfg_attr(feature = "json", derive(serde_derive::Serialize))]
pub struct Stats {
pub added: usize,
pub skipped: usize,
pub dupes: usize,
pub bytes_deduplicated: usize,
pub hardlinks: usize,
pub bytes_saved_by_hardlinks: usize,
}
pub trait ScanListener: Debug {
fn file_scanned(&mut self, path: &Path, stats: &Stats);
fn scan_over(&self, scanner: &Scanner, stats: &Stats, scan_duration: Duration);
fn hardlinked(&mut self, src: &Path, dst: &Path);
fn duplicate_found(&mut self, src: &Path, dst: &Path);
}
#[derive(Debug)]
struct SilentListener;
impl ScanListener for SilentListener {
fn file_scanned(&mut self, _: &Path, _: &Stats) {}
fn scan_over(&self, _: &Scanner, _: &Stats, _: Duration) {}
fn hardlinked(&mut self, _: &Path, _: &Path) {}
fn duplicate_found(&mut self, _: &Path, _: &Path) {}
}
type RcFileSet = Rc<RefCell<FileSet>>;
#[derive(Debug)]
pub struct Scanner {
/// All hardlinks of the same inode have to be treated as the same file
by_inode: HashMap<(u64, u64), RcFileSet>,
/// See Hasher for explanation
by_content: BTreeMap<FileContent, Vec<RcFileSet>>,
/// Directories left to scan. Sorted by inode number.
/// I'm assuming scanning in this order is faster, since inode is related to file's age,
/// which is related to its physical position on disk, which makes the scan more sequential.
to_scan: BinaryHeap<(u64, Box<Path>)>,
scan_listener: Box<dyn ScanListener>,
stats: Stats,
exclude: HashSet<OsString>,
pub settings: Settings,
deferred_count: usize,
next_deferred_count: usize,
}
impl Scanner {
pub fn new() -> Self {
Scanner {
settings: Settings {
ignore_small: true,
run_mode: RunMode::Hardlink,
break_on: None,
},
by_inode: HashMap::new(),
by_content: BTreeMap::new(),
to_scan: BinaryHeap::new(),
scan_listener: Box::new(SilentListener),
stats: Stats::default(),
exclude: HashSet::new(),
deferred_count: 0,
next_deferred_count: 4096,
}
}
pub fn exclude(&mut self, exclude: Vec<String>) {
self.exclude = exclude.into_iter().map(From::from).collect();
}
/// Set the scan listener. Caution: This overrides previously set listeners!
/// Use a multiplexing listener if multiple listeners are required.
pub fn set_listener(&mut self, listener: Box<dyn ScanListener>) {
self.scan_listener = listener;
}
/// Scan any file or directory for dupes.
/// Dedupe is done within the path as well as against all previously added paths.
pub fn scan(&mut self, path: impl AsRef<Path>) -> io::Result<()> {
self.enqueue(path)?;
self.flush()?;
Ok(())
}
pub fn enqueue(&mut self, path: impl AsRef<Path>) -> io::Result<()> {
let path = fs::canonicalize(path)?.into_boxed_path();
let metadata = fs::symlink_metadata(&path)?;
self.add(path, &metadata)?;
Ok(())
}
/// Drains the queue of directories to scan
pub fn flush(&mut self) -> io::Result<()> {
let start_time = Instant::now();
while let Some((_, path)) = self.to_scan.pop() {
if let Err(err) = self.scan_dir(&path) {
eprintln!("Error scanning {}: {}", path.display(), err);
self.stats.skipped += 1;
}
if self.settings.breaks() > 0 {
eprintln!("Stopping scan");
break;
}
}
self.flush_deferred();
let scan_duration = Instant::now().duration_since(start_time);
self.scan_listener.scan_over(self, &self.stats, scan_duration);
Ok(())
}
fn scan_dir(&mut self, path: &Path) -> io::Result<()> {
// Errors are ignored here, since it's super common to find permission denied and unreadable symlinks,
// and it'd be annoying if that aborted the whole operation.
// FIXME: store the errors somehow to report them in a controlled manner
for entry in fs::read_dir(path)?.filter_map(|p| p.ok()) {
if self.settings.breaks() > 0 {
break;
}
let path = entry.path();
if let Some(file_name) = path.file_name() {
if self.exclude.contains(file_name) {
self.stats.skipped += 1;
continue;
}
}
if let Err(err) = self.add(path.into_boxed_path(), &entry.metadata()?) {
eprintln!("{}: {}", entry.path().display(), err);
}
}
Ok(())
}
fn add(&mut self, path: Box<Path>, metadata: &fs::Metadata) -> io::Result<()> {
self.scan_listener.file_scanned(&path, &self.stats);
let ty = metadata.file_type();
if ty.is_dir() {
// Inode is truncated to group scanning of roughly close inodes together,
// But still preserve some directory traversal order.
// Negation to scan from the highest (assuming latest) first.
let order_key = !(metadata.ino() >> 8);
self.to_scan.push((order_key, path));
return Ok(());
} else if ty.is_symlink() || !ty.is_file() {
// Support for traversing symlinks would require preventing loops
// Deduping /dev/ would be funny
self.stats.skipped += 1;
return Ok(());
}
// APFS reports 4*MB* block size
let small_size = cmp::min(16 * 1024, metadata.blksize());
if metadata.size() == 0 || (self.settings.ignore_small && metadata.size() < small_size) {
self.stats.skipped += 1;
return Ok(());
} | self.stats.added += 1;
if let Some(fileset) = self.new_fileset(&path, metadata) {
self.dedupe_by_content(fileset, path, metadata)?;
} else {
self.stats.hardlinks += 1;
self.stats.bytes_saved_by_hardlinks += metadata.size() as usize;
}
Ok(())
}
/// Creates a new fileset if it's a new file.
/// Returns None if it's a hardlink of a file already seen.
fn new_fileset(&mut self, path: &Path, metadata: &fs::Metadata) -> Option<RcFileSet> {
let path: Box<Path> = path.into();
let device_inode = (metadata.dev(), metadata.ino());
match self.by_inode.entry(device_inode) {
HashEntry::Vacant(e) => {
let fileset = Rc::new(RefCell::new(FileSet::new(path, metadata.nlink())));
e.insert(Rc::clone(&fileset)); // clone just bumps a refcount here
Some(fileset)
},
HashEntry::Occupied(mut e) => {
// This case may require a deferred deduping later,
// if the new link belongs to an old fileset that has already been deduped.
let mut t = e.get_mut().borrow_mut();
t.push(path);
None
},
}
}
/// Here's where all the magic happens
fn dedupe_by_content(&mut self, fileset: RcFileSet, path: Box<Path>, metadata: &fs::Metadata) -> io::Result<()> {
let mut deferred = false;
match self.by_content.entry(FileContent::new(path, Metadata::new(metadata))) {
BTreeEntry::Vacant(e) => {
// Seems unique so far
e.insert(vec![fileset]);
},
BTreeEntry::Occupied(mut e) => {
// Found a dupe!
self.stats.dupes += 1;
self.stats.bytes_deduplicated += metadata.size() as usize;
let filesets = e.get_mut();
filesets.push(fileset);
// Deduping can either be done immediately or later. Immediate is more cache-friendly and interactive,
// but for files that already have hardlinks it can cause unnecessary re-linking. So if there are
// hardlinks in the set, wait until the end to dedupe when all hardlinks are known.
if filesets.iter().all(|set| set.borrow().links() == 1) {
Self::dedupe(filesets, self.settings.run_mode, &mut *self.scan_listener)?;
} else {
deferred = true;
}
},
}
// Periodically flush deferred files to avoid building a huge queue
// (the growing limit is a compromise between responsiveness
// and potential to hit a pathological case of hardlinking with wrong hardlink groups)
if deferred {
self.deferred_count += 1;
if self.deferred_count >= self.next_deferred_count {
self.next_deferred_count *= 2;
self.deferred_count = 0;
self.flush_deferred();
}
}
Ok(())
}
fn flush_deferred(&mut self) {
for filesets in self.by_content.values_mut() {
if self.settings.breaks() > 1 {
eprintln!("Aborting");
break;
}
if let Err(err) = Self::dedupe(filesets, self.settings.run_mode, &mut *self.scan_listener) {
eprintln!("{}", err);
}
}
}
fn dedupe(filesets: &mut [RcFileSet], run_mode: RunMode, scan_listener: &mut dyn ScanListener) -> io::Result<()> {
if run_mode == RunMode::DryRunNoMerging {
return Ok(());
}
// Find file with the largest number of hardlinks, since it's less work to merge a small group into a large group
let mut largest_idx = 0;
let mut largest_links = 0;
let mut nonempty_filesets = 0;
for (idx, fileset) in filesets.iter().enumerate() {
let fileset = fileset.borrow();
if !fileset.paths.is_empty() {
// Only actual paths we can merge matter here
nonempty_filesets += 1;
}
let links = fileset.links();
if links > largest_links {
largest_idx = idx;
largest_links = links;
}
}
if nonempty_filesets == 0 {
return Ok(()); // Already merged
}
// The set is still going to be in use! So everything has to be updated to make sense for the next call
let merged_paths = &mut { filesets[largest_idx].borrow_mut() }.paths;
let source_path = merged_paths[0].clone();
for (i, set) in filesets.iter().enumerate() {
// We don't want to merge the set with itself
if i == largest_idx {
continue;
}
let paths = &mut set.borrow_mut().paths;
// dest_path will be "lost" on error, but that's fine, since we don't want to dedupe it if it causes errors
for dest_path in paths.drain(..) {
assert_ne!(&source_path, &dest_path);
debug_assert_ne!(fs::symlink_metadata(&source_path)?.ino(), fs::symlink_metadata(&dest_path)?.ino());
if run_mode == RunMode::DryRun {
scan_listener.duplicate_found(&dest_path, &source_path);
merged_paths.push(dest_path);
continue;
}
let temp_path = dest_path.with_file_name(".tmp-dupe-e1iIQcBFn5pC4MUSm-xkcd-221");
debug_assert!(!temp_path.exists());
debug_assert!(source_path.exists());
debug_assert!(dest_path.exists());
// In posix link guarantees not to overwrite, and mv guarantes to move atomically
// so this two-step replacement is pretty robust
if let Err(err) = fs::hard_link(&source_path, &temp_path) {
eprintln!("unable to hardlink {} {} due to {}", source_path.display(), temp_path.display(), err);
let _ = fs::remove_file(temp_path);
return Err(err);
}
if let Err(err) = fs::rename(&temp_path, &dest_path) {
eprintln!("unable to rename {} {} due to {}", temp_path.display(), dest_path.display(), err);
let _ = fs::remove_file(temp_path);
return Err(err);
}
debug_assert!(!temp_path.exists());
debug_assert!(source_path.exists());
debug_assert!(dest_path.exists());
scan_listener.hardlinked(&dest_path, &source_path);
merged_paths.push(dest_path);
}
}
Ok(())
}
pub fn dupes(&self) -> Vec<Vec<FileSet>> {
self.by_content.values().map(|filesets| {
filesets.iter().map(|d|{
let tmp = d.borrow();
(*tmp).clone()
}).collect()
}).collect()
}
} | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.