file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
scanner.rs | use crate::file::{FileContent, FileSet};
use crate::metadata::Metadata;
use std::cell::RefCell;
use std::cmp;
use std::collections::btree_map::Entry as BTreeEntry;
use std::collections::hash_map::Entry as HashEntry;
use std::collections::BTreeMap;
use std::collections::BinaryHeap;
use std::collections::HashMap;
use std::collections::HashSet;
use std::ffi::OsString;
use std::fmt::Debug;
use std::fs;
use std::io;
use std::os::unix::fs::MetadataExt;
use std::path::Path;
use std::rc::Rc;
use std::sync::atomic::AtomicU32;
use std::sync::atomic::Ordering;
use std::time::{Duration, Instant};
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum RunMode {
/// Merges paths in memory, but not on disk. Gives realistic UI output.
DryRun,
/// Like dry run, but completely skips deduping, with no UI for dupes.
DryRunNoMerging,
Hardlink,
}
#[derive(Debug)]
pub struct Settings {
/// Ignore files smaller than a filesystem block.
/// Deduping of such files is unlikely to save space.
pub ignore_small: bool,
pub run_mode: RunMode,
// If 1, go to flush. If > 1, abort immediately.
pub break_on: Option<&'static AtomicU32>,
}
impl Settings {
pub fn breaks(&self) -> u32 {
if let Some(break_on) = self.break_on {
break_on.load(Ordering::SeqCst)
} else {
0
}
}
}
#[derive(Debug, Default, Copy, Clone)]
#[cfg_attr(feature = "json", derive(serde_derive::Serialize))]
pub struct Stats {
pub added: usize,
pub skipped: usize,
pub dupes: usize,
pub bytes_deduplicated: usize,
pub hardlinks: usize,
pub bytes_saved_by_hardlinks: usize,
}
pub trait ScanListener: Debug {
fn file_scanned(&mut self, path: &Path, stats: &Stats);
fn scan_over(&self, scanner: &Scanner, stats: &Stats, scan_duration: Duration);
fn hardlinked(&mut self, src: &Path, dst: &Path);
fn duplicate_found(&mut self, src: &Path, dst: &Path);
}
#[derive(Debug)]
struct SilentListener;
impl ScanListener for SilentListener {
fn file_scanned(&mut self, _: &Path, _: &Stats) {}
fn scan_over(&self, _: &Scanner, _: &Stats, _: Duration) {}
fn hardlinked(&mut self, _: &Path, _: &Path) {}
fn duplicate_found(&mut self, _: &Path, _: &Path) {}
}
type RcFileSet = Rc<RefCell<FileSet>>;
#[derive(Debug)]
pub struct Scanner {
/// All hardlinks of the same inode have to be treated as the same file
by_inode: HashMap<(u64, u64), RcFileSet>,
/// See Hasher for explanation
by_content: BTreeMap<FileContent, Vec<RcFileSet>>,
/// Directories left to scan. Sorted by inode number.
/// I'm assuming scanning in this order is faster, since inode is related to file's age,
/// which is related to its physical position on disk, which makes the scan more sequential.
to_scan: BinaryHeap<(u64, Box<Path>)>,
scan_listener: Box<dyn ScanListener>,
stats: Stats,
exclude: HashSet<OsString>,
pub settings: Settings,
deferred_count: usize,
next_deferred_count: usize,
}
impl Scanner {
pub fn new() -> Self {
Scanner {
settings: Settings {
ignore_small: true,
run_mode: RunMode::Hardlink,
break_on: None,
},
by_inode: HashMap::new(),
by_content: BTreeMap::new(),
to_scan: BinaryHeap::new(),
scan_listener: Box::new(SilentListener),
stats: Stats::default(),
exclude: HashSet::new(),
deferred_count: 0,
next_deferred_count: 4096,
}
}
pub fn exclude(&mut self, exclude: Vec<String>) {
self.exclude = exclude.into_iter().map(From::from).collect();
}
/// Set the scan listener. Caution: This overrides previously set listeners!
/// Use a multiplexing listener if multiple listeners are required.
pub fn set_listener(&mut self, listener: Box<dyn ScanListener>) {
self.scan_listener = listener;
}
/// Scan any file or directory for dupes.
/// Dedupe is done within the path as well as against all previously added paths.
pub fn scan(&mut self, path: impl AsRef<Path>) -> io::Result<()> {
self.enqueue(path)?;
self.flush()?;
Ok(())
}
pub fn enqueue(&mut self, path: impl AsRef<Path>) -> io::Result<()> {
let path = fs::canonicalize(path)?.into_boxed_path();
let metadata = fs::symlink_metadata(&path)?;
self.add(path, &metadata)?;
Ok(())
}
/// Drains the queue of directories to scan
pub fn flush(&mut self) -> io::Result<()> {
let start_time = Instant::now();
while let Some((_, path)) = self.to_scan.pop() {
if let Err(err) = self.scan_dir(&path) {
eprintln!("Error scanning {}: {}", path.display(), err);
self.stats.skipped += 1;
}
if self.settings.breaks() > 0 {
eprintln!("Stopping scan");
break;
}
}
self.flush_deferred();
let scan_duration = Instant::now().duration_since(start_time);
self.scan_listener.scan_over(self, &self.stats, scan_duration);
Ok(())
}
fn scan_dir(&mut self, path: &Path) -> io::Result<()> {
// Errors are ignored here, since it's super common to find permission denied and unreadable symlinks,
// and it'd be annoying if that aborted the whole operation.
// FIXME: store the errors somehow to report them in a controlled manner
for entry in fs::read_dir(path)?.filter_map(|p| p.ok()) {
if self.settings.breaks() > 0 {
break;
}
let path = entry.path();
if let Some(file_name) = path.file_name() {
if self.exclude.contains(file_name) {
self.stats.skipped += 1;
continue;
}
}
if let Err(err) = self.add(path.into_boxed_path(), &entry.metadata()?) {
eprintln!("{}: {}", entry.path().display(), err);
}
}
Ok(())
}
fn add(&mut self, path: Box<Path>, metadata: &fs::Metadata) -> io::Result<()> {
self.scan_listener.file_scanned(&path, &self.stats);
let ty = metadata.file_type();
if ty.is_dir() {
// Inode is truncated to group scanning of roughly close inodes together,
// But still preserve some directory traversal order.
// Negation to scan from the highest (assuming latest) first.
let order_key = !(metadata.ino() >> 8);
self.to_scan.push((order_key, path));
return Ok(());
} else if ty.is_symlink() || !ty.is_file() {
// Support for traversing symlinks would require preventing loops
// Deduping /dev/ would be funny
self.stats.skipped += 1;
return Ok(());
}
// APFS reports 4*MB* block size
let small_size = cmp::min(16 * 1024, metadata.blksize());
if metadata.size() == 0 || (self.settings.ignore_small && metadata.size() < small_size) {
self.stats.skipped += 1;
return Ok(());
}
self.stats.added += 1;
if let Some(fileset) = self.new_fileset(&path, metadata) {
self.dedupe_by_content(fileset, path, metadata)?;
} else {
self.stats.hardlinks += 1;
self.stats.bytes_saved_by_hardlinks += metadata.size() as usize;
}
Ok(())
}
/// Creates a new fileset if it's a new file.
/// Returns None if it's a hardlink of a file already seen.
fn new_fileset(&mut self, path: &Path, metadata: &fs::Metadata) -> Option<RcFileSet> {
let path: Box<Path> = path.into();
let device_inode = (metadata.dev(), metadata.ino());
match self.by_inode.entry(device_inode) {
HashEntry::Vacant(e) => | ,
HashEntry::Occupied(mut e) => {
// This case may require a deferred deduping later,
// if the new link belongs to an old fileset that has already been deduped.
let mut t = e.get_mut().borrow_mut();
t.push(path);
None
},
}
}
/// Here's where all the magic happens
fn dedupe_by_content(&mut self, fileset: RcFileSet, path: Box<Path>, metadata: &fs::Metadata) -> io::Result<()> {
let mut deferred = false;
match self.by_content.entry(FileContent::new(path, Metadata::new(metadata))) {
BTreeEntry::Vacant(e) => {
// Seems unique so far
e.insert(vec![fileset]);
},
BTreeEntry::Occupied(mut e) => {
// Found a dupe!
self.stats.dupes += 1;
self.stats.bytes_deduplicated += metadata.size() as usize;
let filesets = e.get_mut();
filesets.push(fileset);
// Deduping can either be done immediately or later. Immediate is more cache-friendly and interactive,
// but for files that already have hardlinks it can cause unnecessary re-linking. So if there are
// hardlinks in the set, wait until the end to dedupe when all hardlinks are known.
if filesets.iter().all(|set| set.borrow().links() == 1) {
Self::dedupe(filesets, self.settings.run_mode, &mut *self.scan_listener)?;
} else {
deferred = true;
}
},
}
// Periodically flush deferred files to avoid building a huge queue
// (the growing limit is a compromise between responsiveness
// and potential to hit a pathological case of hardlinking with wrong hardlink groups)
if deferred {
self.deferred_count += 1;
if self.deferred_count >= self.next_deferred_count {
self.next_deferred_count *= 2;
self.deferred_count = 0;
self.flush_deferred();
}
}
Ok(())
}
fn flush_deferred(&mut self) {
for filesets in self.by_content.values_mut() {
if self.settings.breaks() > 1 {
eprintln!("Aborting");
break;
}
if let Err(err) = Self::dedupe(filesets, self.settings.run_mode, &mut *self.scan_listener) {
eprintln!("{}", err);
}
}
}
fn dedupe(filesets: &mut [RcFileSet], run_mode: RunMode, scan_listener: &mut dyn ScanListener) -> io::Result<()> {
if run_mode == RunMode::DryRunNoMerging {
return Ok(());
}
// Find file with the largest number of hardlinks, since it's less work to merge a small group into a large group
let mut largest_idx = 0;
let mut largest_links = 0;
let mut nonempty_filesets = 0;
for (idx, fileset) in filesets.iter().enumerate() {
let fileset = fileset.borrow();
if !fileset.paths.is_empty() {
// Only actual paths we can merge matter here
nonempty_filesets += 1;
}
let links = fileset.links();
if links > largest_links {
largest_idx = idx;
largest_links = links;
}
}
if nonempty_filesets == 0 {
return Ok(()); // Already merged
}
// The set is still going to be in use! So everything has to be updated to make sense for the next call
let merged_paths = &mut { filesets[largest_idx].borrow_mut() }.paths;
let source_path = merged_paths[0].clone();
for (i, set) in filesets.iter().enumerate() {
// We don't want to merge the set with itself
if i == largest_idx {
continue;
}
let paths = &mut set.borrow_mut().paths;
// dest_path will be "lost" on error, but that's fine, since we don't want to dedupe it if it causes errors
for dest_path in paths.drain(..) {
assert_ne!(&source_path, &dest_path);
debug_assert_ne!(fs::symlink_metadata(&source_path)?.ino(), fs::symlink_metadata(&dest_path)?.ino());
if run_mode == RunMode::DryRun {
scan_listener.duplicate_found(&dest_path, &source_path);
merged_paths.push(dest_path);
continue;
}
let temp_path = dest_path.with_file_name(".tmp-dupe-e1iIQcBFn5pC4MUSm-xkcd-221");
debug_assert!(!temp_path.exists());
debug_assert!(source_path.exists());
debug_assert!(dest_path.exists());
// In posix link guarantees not to overwrite, and mv guarantes to move atomically
// so this two-step replacement is pretty robust
if let Err(err) = fs::hard_link(&source_path, &temp_path) {
eprintln!("unable to hardlink {} {} due to {}", source_path.display(), temp_path.display(), err);
let _ = fs::remove_file(temp_path);
return Err(err);
}
if let Err(err) = fs::rename(&temp_path, &dest_path) {
eprintln!("unable to rename {} {} due to {}", temp_path.display(), dest_path.display(), err);
let _ = fs::remove_file(temp_path);
return Err(err);
}
debug_assert!(!temp_path.exists());
debug_assert!(source_path.exists());
debug_assert!(dest_path.exists());
scan_listener.hardlinked(&dest_path, &source_path);
merged_paths.push(dest_path);
}
}
Ok(())
}
pub fn dupes(&self) -> Vec<Vec<FileSet>> {
self.by_content.values().map(|filesets| {
filesets.iter().map(|d|{
let tmp = d.borrow();
(*tmp).clone()
}).collect()
}).collect()
}
}
| {
let fileset = Rc::new(RefCell::new(FileSet::new(path, metadata.nlink())));
e.insert(Rc::clone(&fileset)); // clone just bumps a refcount here
Some(fileset)
} | conditional_block |
scanner.rs | use crate::file::{FileContent, FileSet};
use crate::metadata::Metadata;
use std::cell::RefCell;
use std::cmp;
use std::collections::btree_map::Entry as BTreeEntry;
use std::collections::hash_map::Entry as HashEntry;
use std::collections::BTreeMap;
use std::collections::BinaryHeap;
use std::collections::HashMap;
use std::collections::HashSet;
use std::ffi::OsString;
use std::fmt::Debug;
use std::fs;
use std::io;
use std::os::unix::fs::MetadataExt;
use std::path::Path;
use std::rc::Rc;
use std::sync::atomic::AtomicU32;
use std::sync::atomic::Ordering;
use std::time::{Duration, Instant};
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum RunMode {
/// Merges paths in memory, but not on disk. Gives realistic UI output.
DryRun,
/// Like dry run, but completely skips deduping, with no UI for dupes.
DryRunNoMerging,
Hardlink,
}
#[derive(Debug)]
pub struct Settings {
/// Ignore files smaller than a filesystem block.
/// Deduping of such files is unlikely to save space.
pub ignore_small: bool,
pub run_mode: RunMode,
// If 1, go to flush. If > 1, abort immediately.
pub break_on: Option<&'static AtomicU32>,
}
impl Settings {
pub fn breaks(&self) -> u32 {
if let Some(break_on) = self.break_on {
break_on.load(Ordering::SeqCst)
} else {
0
}
}
}
#[derive(Debug, Default, Copy, Clone)]
#[cfg_attr(feature = "json", derive(serde_derive::Serialize))]
pub struct Stats {
pub added: usize,
pub skipped: usize,
pub dupes: usize,
pub bytes_deduplicated: usize,
pub hardlinks: usize,
pub bytes_saved_by_hardlinks: usize,
}
pub trait ScanListener: Debug {
fn file_scanned(&mut self, path: &Path, stats: &Stats);
fn scan_over(&self, scanner: &Scanner, stats: &Stats, scan_duration: Duration);
fn hardlinked(&mut self, src: &Path, dst: &Path);
fn duplicate_found(&mut self, src: &Path, dst: &Path);
}
#[derive(Debug)]
struct SilentListener;
impl ScanListener for SilentListener {
fn file_scanned(&mut self, _: &Path, _: &Stats) {}
fn scan_over(&self, _: &Scanner, _: &Stats, _: Duration) {}
fn hardlinked(&mut self, _: &Path, _: &Path) {}
fn duplicate_found(&mut self, _: &Path, _: &Path) {}
}
type RcFileSet = Rc<RefCell<FileSet>>;
#[derive(Debug)]
pub struct Scanner {
/// All hardlinks of the same inode have to be treated as the same file
by_inode: HashMap<(u64, u64), RcFileSet>,
/// See Hasher for explanation
by_content: BTreeMap<FileContent, Vec<RcFileSet>>,
/// Directories left to scan. Sorted by inode number.
/// I'm assuming scanning in this order is faster, since inode is related to file's age,
/// which is related to its physical position on disk, which makes the scan more sequential.
to_scan: BinaryHeap<(u64, Box<Path>)>,
scan_listener: Box<dyn ScanListener>,
stats: Stats,
exclude: HashSet<OsString>,
pub settings: Settings,
deferred_count: usize,
next_deferred_count: usize,
}
impl Scanner {
pub fn new() -> Self {
Scanner {
settings: Settings {
ignore_small: true,
run_mode: RunMode::Hardlink,
break_on: None,
},
by_inode: HashMap::new(),
by_content: BTreeMap::new(),
to_scan: BinaryHeap::new(),
scan_listener: Box::new(SilentListener),
stats: Stats::default(),
exclude: HashSet::new(),
deferred_count: 0,
next_deferred_count: 4096,
}
}
pub fn exclude(&mut self, exclude: Vec<String>) {
self.exclude = exclude.into_iter().map(From::from).collect();
}
/// Set the scan listener. Caution: This overrides previously set listeners!
/// Use a multiplexing listener if multiple listeners are required.
pub fn set_listener(&mut self, listener: Box<dyn ScanListener>) {
self.scan_listener = listener;
}
/// Scan any file or directory for dupes.
/// Dedupe is done within the path as well as against all previously added paths.
pub fn scan(&mut self, path: impl AsRef<Path>) -> io::Result<()> {
self.enqueue(path)?;
self.flush()?;
Ok(())
}
pub fn enqueue(&mut self, path: impl AsRef<Path>) -> io::Result<()> {
let path = fs::canonicalize(path)?.into_boxed_path();
let metadata = fs::symlink_metadata(&path)?;
self.add(path, &metadata)?;
Ok(())
}
/// Drains the queue of directories to scan
pub fn flush(&mut self) -> io::Result<()> {
let start_time = Instant::now();
while let Some((_, path)) = self.to_scan.pop() {
if let Err(err) = self.scan_dir(&path) {
eprintln!("Error scanning {}: {}", path.display(), err);
self.stats.skipped += 1;
}
if self.settings.breaks() > 0 {
eprintln!("Stopping scan");
break;
}
}
self.flush_deferred();
let scan_duration = Instant::now().duration_since(start_time);
self.scan_listener.scan_over(self, &self.stats, scan_duration);
Ok(())
}
fn scan_dir(&mut self, path: &Path) -> io::Result<()> |
fn add(&mut self, path: Box<Path>, metadata: &fs::Metadata) -> io::Result<()> {
self.scan_listener.file_scanned(&path, &self.stats);
let ty = metadata.file_type();
if ty.is_dir() {
// Inode is truncated to group scanning of roughly close inodes together,
// But still preserve some directory traversal order.
// Negation to scan from the highest (assuming latest) first.
let order_key = !(metadata.ino() >> 8);
self.to_scan.push((order_key, path));
return Ok(());
} else if ty.is_symlink() || !ty.is_file() {
// Support for traversing symlinks would require preventing loops
// Deduping /dev/ would be funny
self.stats.skipped += 1;
return Ok(());
}
// APFS reports 4*MB* block size
let small_size = cmp::min(16 * 1024, metadata.blksize());
if metadata.size() == 0 || (self.settings.ignore_small && metadata.size() < small_size) {
self.stats.skipped += 1;
return Ok(());
}
self.stats.added += 1;
if let Some(fileset) = self.new_fileset(&path, metadata) {
self.dedupe_by_content(fileset, path, metadata)?;
} else {
self.stats.hardlinks += 1;
self.stats.bytes_saved_by_hardlinks += metadata.size() as usize;
}
Ok(())
}
/// Creates a new fileset if it's a new file.
/// Returns None if it's a hardlink of a file already seen.
fn new_fileset(&mut self, path: &Path, metadata: &fs::Metadata) -> Option<RcFileSet> {
let path: Box<Path> = path.into();
let device_inode = (metadata.dev(), metadata.ino());
match self.by_inode.entry(device_inode) {
HashEntry::Vacant(e) => {
let fileset = Rc::new(RefCell::new(FileSet::new(path, metadata.nlink())));
e.insert(Rc::clone(&fileset)); // clone just bumps a refcount here
Some(fileset)
},
HashEntry::Occupied(mut e) => {
// This case may require a deferred deduping later,
// if the new link belongs to an old fileset that has already been deduped.
let mut t = e.get_mut().borrow_mut();
t.push(path);
None
},
}
}
/// Here's where all the magic happens
fn dedupe_by_content(&mut self, fileset: RcFileSet, path: Box<Path>, metadata: &fs::Metadata) -> io::Result<()> {
let mut deferred = false;
match self.by_content.entry(FileContent::new(path, Metadata::new(metadata))) {
BTreeEntry::Vacant(e) => {
// Seems unique so far
e.insert(vec![fileset]);
},
BTreeEntry::Occupied(mut e) => {
// Found a dupe!
self.stats.dupes += 1;
self.stats.bytes_deduplicated += metadata.size() as usize;
let filesets = e.get_mut();
filesets.push(fileset);
// Deduping can either be done immediately or later. Immediate is more cache-friendly and interactive,
// but for files that already have hardlinks it can cause unnecessary re-linking. So if there are
// hardlinks in the set, wait until the end to dedupe when all hardlinks are known.
if filesets.iter().all(|set| set.borrow().links() == 1) {
Self::dedupe(filesets, self.settings.run_mode, &mut *self.scan_listener)?;
} else {
deferred = true;
}
},
}
// Periodically flush deferred files to avoid building a huge queue
// (the growing limit is a compromise between responsiveness
// and potential to hit a pathological case of hardlinking with wrong hardlink groups)
if deferred {
self.deferred_count += 1;
if self.deferred_count >= self.next_deferred_count {
self.next_deferred_count *= 2;
self.deferred_count = 0;
self.flush_deferred();
}
}
Ok(())
}
fn flush_deferred(&mut self) {
for filesets in self.by_content.values_mut() {
if self.settings.breaks() > 1 {
eprintln!("Aborting");
break;
}
if let Err(err) = Self::dedupe(filesets, self.settings.run_mode, &mut *self.scan_listener) {
eprintln!("{}", err);
}
}
}
fn dedupe(filesets: &mut [RcFileSet], run_mode: RunMode, scan_listener: &mut dyn ScanListener) -> io::Result<()> {
if run_mode == RunMode::DryRunNoMerging {
return Ok(());
}
// Find file with the largest number of hardlinks, since it's less work to merge a small group into a large group
let mut largest_idx = 0;
let mut largest_links = 0;
let mut nonempty_filesets = 0;
for (idx, fileset) in filesets.iter().enumerate() {
let fileset = fileset.borrow();
if !fileset.paths.is_empty() {
// Only actual paths we can merge matter here
nonempty_filesets += 1;
}
let links = fileset.links();
if links > largest_links {
largest_idx = idx;
largest_links = links;
}
}
if nonempty_filesets == 0 {
return Ok(()); // Already merged
}
// The set is still going to be in use! So everything has to be updated to make sense for the next call
let merged_paths = &mut { filesets[largest_idx].borrow_mut() }.paths;
let source_path = merged_paths[0].clone();
for (i, set) in filesets.iter().enumerate() {
// We don't want to merge the set with itself
if i == largest_idx {
continue;
}
let paths = &mut set.borrow_mut().paths;
// dest_path will be "lost" on error, but that's fine, since we don't want to dedupe it if it causes errors
for dest_path in paths.drain(..) {
assert_ne!(&source_path, &dest_path);
debug_assert_ne!(fs::symlink_metadata(&source_path)?.ino(), fs::symlink_metadata(&dest_path)?.ino());
if run_mode == RunMode::DryRun {
scan_listener.duplicate_found(&dest_path, &source_path);
merged_paths.push(dest_path);
continue;
}
let temp_path = dest_path.with_file_name(".tmp-dupe-e1iIQcBFn5pC4MUSm-xkcd-221");
debug_assert!(!temp_path.exists());
debug_assert!(source_path.exists());
debug_assert!(dest_path.exists());
// In posix link guarantees not to overwrite, and mv guarantes to move atomically
// so this two-step replacement is pretty robust
if let Err(err) = fs::hard_link(&source_path, &temp_path) {
eprintln!("unable to hardlink {} {} due to {}", source_path.display(), temp_path.display(), err);
let _ = fs::remove_file(temp_path);
return Err(err);
}
if let Err(err) = fs::rename(&temp_path, &dest_path) {
eprintln!("unable to rename {} {} due to {}", temp_path.display(), dest_path.display(), err);
let _ = fs::remove_file(temp_path);
return Err(err);
}
debug_assert!(!temp_path.exists());
debug_assert!(source_path.exists());
debug_assert!(dest_path.exists());
scan_listener.hardlinked(&dest_path, &source_path);
merged_paths.push(dest_path);
}
}
Ok(())
}
pub fn dupes(&self) -> Vec<Vec<FileSet>> {
self.by_content.values().map(|filesets| {
filesets.iter().map(|d|{
let tmp = d.borrow();
(*tmp).clone()
}).collect()
}).collect()
}
}
| {
// Errors are ignored here, since it's super common to find permission denied and unreadable symlinks,
// and it'd be annoying if that aborted the whole operation.
// FIXME: store the errors somehow to report them in a controlled manner
for entry in fs::read_dir(path)?.filter_map(|p| p.ok()) {
if self.settings.breaks() > 0 {
break;
}
let path = entry.path();
if let Some(file_name) = path.file_name() {
if self.exclude.contains(file_name) {
self.stats.skipped += 1;
continue;
}
}
if let Err(err) = self.add(path.into_boxed_path(), &entry.metadata()?) {
eprintln!("{}: {}", entry.path().display(), err);
}
}
Ok(())
} | identifier_body |
scanner.rs | use crate::file::{FileContent, FileSet};
use crate::metadata::Metadata;
use std::cell::RefCell;
use std::cmp;
use std::collections::btree_map::Entry as BTreeEntry;
use std::collections::hash_map::Entry as HashEntry;
use std::collections::BTreeMap;
use std::collections::BinaryHeap;
use std::collections::HashMap;
use std::collections::HashSet;
use std::ffi::OsString;
use std::fmt::Debug;
use std::fs;
use std::io;
use std::os::unix::fs::MetadataExt;
use std::path::Path;
use std::rc::Rc;
use std::sync::atomic::AtomicU32;
use std::sync::atomic::Ordering;
use std::time::{Duration, Instant};
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum RunMode {
/// Merges paths in memory, but not on disk. Gives realistic UI output.
DryRun,
/// Like dry run, but completely skips deduping, with no UI for dupes.
DryRunNoMerging,
Hardlink,
}
#[derive(Debug)]
pub struct Settings {
/// Ignore files smaller than a filesystem block.
/// Deduping of such files is unlikely to save space.
pub ignore_small: bool,
pub run_mode: RunMode,
// If 1, go to flush. If > 1, abort immediately.
pub break_on: Option<&'static AtomicU32>,
}
impl Settings {
pub fn breaks(&self) -> u32 {
if let Some(break_on) = self.break_on {
break_on.load(Ordering::SeqCst)
} else {
0
}
}
}
#[derive(Debug, Default, Copy, Clone)]
#[cfg_attr(feature = "json", derive(serde_derive::Serialize))]
pub struct Stats {
pub added: usize,
pub skipped: usize,
pub dupes: usize,
pub bytes_deduplicated: usize,
pub hardlinks: usize,
pub bytes_saved_by_hardlinks: usize,
}
pub trait ScanListener: Debug {
fn file_scanned(&mut self, path: &Path, stats: &Stats);
fn scan_over(&self, scanner: &Scanner, stats: &Stats, scan_duration: Duration);
fn hardlinked(&mut self, src: &Path, dst: &Path);
fn duplicate_found(&mut self, src: &Path, dst: &Path);
}
#[derive(Debug)]
struct SilentListener;
impl ScanListener for SilentListener {
fn file_scanned(&mut self, _: &Path, _: &Stats) {}
fn scan_over(&self, _: &Scanner, _: &Stats, _: Duration) {}
fn hardlinked(&mut self, _: &Path, _: &Path) {}
fn duplicate_found(&mut self, _: &Path, _: &Path) {}
}
type RcFileSet = Rc<RefCell<FileSet>>;
#[derive(Debug)]
pub struct Scanner {
/// All hardlinks of the same inode have to be treated as the same file
by_inode: HashMap<(u64, u64), RcFileSet>,
/// See Hasher for explanation
by_content: BTreeMap<FileContent, Vec<RcFileSet>>,
/// Directories left to scan. Sorted by inode number.
/// I'm assuming scanning in this order is faster, since inode is related to file's age,
/// which is related to its physical position on disk, which makes the scan more sequential.
to_scan: BinaryHeap<(u64, Box<Path>)>,
scan_listener: Box<dyn ScanListener>,
stats: Stats,
exclude: HashSet<OsString>,
pub settings: Settings,
deferred_count: usize,
next_deferred_count: usize,
}
impl Scanner {
pub fn new() -> Self {
Scanner {
settings: Settings {
ignore_small: true,
run_mode: RunMode::Hardlink,
break_on: None,
},
by_inode: HashMap::new(),
by_content: BTreeMap::new(),
to_scan: BinaryHeap::new(),
scan_listener: Box::new(SilentListener),
stats: Stats::default(),
exclude: HashSet::new(),
deferred_count: 0,
next_deferred_count: 4096,
}
}
pub fn exclude(&mut self, exclude: Vec<String>) {
self.exclude = exclude.into_iter().map(From::from).collect();
}
/// Set the scan listener. Caution: This overrides previously set listeners!
/// Use a multiplexing listener if multiple listeners are required.
pub fn set_listener(&mut self, listener: Box<dyn ScanListener>) {
self.scan_listener = listener;
}
/// Scan any file or directory for dupes.
/// Dedupe is done within the path as well as against all previously added paths.
pub fn scan(&mut self, path: impl AsRef<Path>) -> io::Result<()> {
self.enqueue(path)?;
self.flush()?;
Ok(())
}
pub fn enqueue(&mut self, path: impl AsRef<Path>) -> io::Result<()> {
let path = fs::canonicalize(path)?.into_boxed_path();
let metadata = fs::symlink_metadata(&path)?;
self.add(path, &metadata)?;
Ok(())
}
/// Drains the queue of directories to scan
pub fn flush(&mut self) -> io::Result<()> {
let start_time = Instant::now();
while let Some((_, path)) = self.to_scan.pop() {
if let Err(err) = self.scan_dir(&path) {
eprintln!("Error scanning {}: {}", path.display(), err);
self.stats.skipped += 1;
}
if self.settings.breaks() > 0 {
eprintln!("Stopping scan");
break;
}
}
self.flush_deferred();
let scan_duration = Instant::now().duration_since(start_time);
self.scan_listener.scan_over(self, &self.stats, scan_duration);
Ok(())
}
fn scan_dir(&mut self, path: &Path) -> io::Result<()> {
// Errors are ignored here, since it's super common to find permission denied and unreadable symlinks,
// and it'd be annoying if that aborted the whole operation.
// FIXME: store the errors somehow to report them in a controlled manner
for entry in fs::read_dir(path)?.filter_map(|p| p.ok()) {
if self.settings.breaks() > 0 {
break;
}
let path = entry.path();
if let Some(file_name) = path.file_name() {
if self.exclude.contains(file_name) {
self.stats.skipped += 1;
continue;
}
}
if let Err(err) = self.add(path.into_boxed_path(), &entry.metadata()?) {
eprintln!("{}: {}", entry.path().display(), err);
}
}
Ok(())
}
fn add(&mut self, path: Box<Path>, metadata: &fs::Metadata) -> io::Result<()> {
self.scan_listener.file_scanned(&path, &self.stats);
let ty = metadata.file_type();
if ty.is_dir() {
// Inode is truncated to group scanning of roughly close inodes together,
// But still preserve some directory traversal order.
// Negation to scan from the highest (assuming latest) first.
let order_key = !(metadata.ino() >> 8);
self.to_scan.push((order_key, path));
return Ok(());
} else if ty.is_symlink() || !ty.is_file() {
// Support for traversing symlinks would require preventing loops
// Deduping /dev/ would be funny
self.stats.skipped += 1;
return Ok(());
}
// APFS reports 4*MB* block size
let small_size = cmp::min(16 * 1024, metadata.blksize());
if metadata.size() == 0 || (self.settings.ignore_small && metadata.size() < small_size) {
self.stats.skipped += 1;
return Ok(());
}
self.stats.added += 1;
if let Some(fileset) = self.new_fileset(&path, metadata) {
self.dedupe_by_content(fileset, path, metadata)?;
} else {
self.stats.hardlinks += 1;
self.stats.bytes_saved_by_hardlinks += metadata.size() as usize;
}
Ok(())
}
/// Creates a new fileset if it's a new file.
/// Returns None if it's a hardlink of a file already seen.
fn new_fileset(&mut self, path: &Path, metadata: &fs::Metadata) -> Option<RcFileSet> {
let path: Box<Path> = path.into();
let device_inode = (metadata.dev(), metadata.ino());
match self.by_inode.entry(device_inode) {
HashEntry::Vacant(e) => {
let fileset = Rc::new(RefCell::new(FileSet::new(path, metadata.nlink())));
e.insert(Rc::clone(&fileset)); // clone just bumps a refcount here
Some(fileset)
},
HashEntry::Occupied(mut e) => {
// This case may require a deferred deduping later,
// if the new link belongs to an old fileset that has already been deduped.
let mut t = e.get_mut().borrow_mut();
t.push(path);
None
},
}
}
/// Here's where all the magic happens
fn | (&mut self, fileset: RcFileSet, path: Box<Path>, metadata: &fs::Metadata) -> io::Result<()> {
let mut deferred = false;
match self.by_content.entry(FileContent::new(path, Metadata::new(metadata))) {
BTreeEntry::Vacant(e) => {
// Seems unique so far
e.insert(vec![fileset]);
},
BTreeEntry::Occupied(mut e) => {
// Found a dupe!
self.stats.dupes += 1;
self.stats.bytes_deduplicated += metadata.size() as usize;
let filesets = e.get_mut();
filesets.push(fileset);
// Deduping can either be done immediately or later. Immediate is more cache-friendly and interactive,
// but for files that already have hardlinks it can cause unnecessary re-linking. So if there are
// hardlinks in the set, wait until the end to dedupe when all hardlinks are known.
if filesets.iter().all(|set| set.borrow().links() == 1) {
Self::dedupe(filesets, self.settings.run_mode, &mut *self.scan_listener)?;
} else {
deferred = true;
}
},
}
// Periodically flush deferred files to avoid building a huge queue
// (the growing limit is a compromise between responsiveness
// and potential to hit a pathological case of hardlinking with wrong hardlink groups)
if deferred {
self.deferred_count += 1;
if self.deferred_count >= self.next_deferred_count {
self.next_deferred_count *= 2;
self.deferred_count = 0;
self.flush_deferred();
}
}
Ok(())
}
fn flush_deferred(&mut self) {
for filesets in self.by_content.values_mut() {
if self.settings.breaks() > 1 {
eprintln!("Aborting");
break;
}
if let Err(err) = Self::dedupe(filesets, self.settings.run_mode, &mut *self.scan_listener) {
eprintln!("{}", err);
}
}
}
fn dedupe(filesets: &mut [RcFileSet], run_mode: RunMode, scan_listener: &mut dyn ScanListener) -> io::Result<()> {
if run_mode == RunMode::DryRunNoMerging {
return Ok(());
}
// Find file with the largest number of hardlinks, since it's less work to merge a small group into a large group
let mut largest_idx = 0;
let mut largest_links = 0;
let mut nonempty_filesets = 0;
for (idx, fileset) in filesets.iter().enumerate() {
let fileset = fileset.borrow();
if !fileset.paths.is_empty() {
// Only actual paths we can merge matter here
nonempty_filesets += 1;
}
let links = fileset.links();
if links > largest_links {
largest_idx = idx;
largest_links = links;
}
}
if nonempty_filesets == 0 {
return Ok(()); // Already merged
}
// The set is still going to be in use! So everything has to be updated to make sense for the next call
let merged_paths = &mut { filesets[largest_idx].borrow_mut() }.paths;
let source_path = merged_paths[0].clone();
for (i, set) in filesets.iter().enumerate() {
// We don't want to merge the set with itself
if i == largest_idx {
continue;
}
let paths = &mut set.borrow_mut().paths;
// dest_path will be "lost" on error, but that's fine, since we don't want to dedupe it if it causes errors
for dest_path in paths.drain(..) {
assert_ne!(&source_path, &dest_path);
debug_assert_ne!(fs::symlink_metadata(&source_path)?.ino(), fs::symlink_metadata(&dest_path)?.ino());
if run_mode == RunMode::DryRun {
scan_listener.duplicate_found(&dest_path, &source_path);
merged_paths.push(dest_path);
continue;
}
let temp_path = dest_path.with_file_name(".tmp-dupe-e1iIQcBFn5pC4MUSm-xkcd-221");
debug_assert!(!temp_path.exists());
debug_assert!(source_path.exists());
debug_assert!(dest_path.exists());
// In posix link guarantees not to overwrite, and mv guarantes to move atomically
// so this two-step replacement is pretty robust
if let Err(err) = fs::hard_link(&source_path, &temp_path) {
eprintln!("unable to hardlink {} {} due to {}", source_path.display(), temp_path.display(), err);
let _ = fs::remove_file(temp_path);
return Err(err);
}
if let Err(err) = fs::rename(&temp_path, &dest_path) {
eprintln!("unable to rename {} {} due to {}", temp_path.display(), dest_path.display(), err);
let _ = fs::remove_file(temp_path);
return Err(err);
}
debug_assert!(!temp_path.exists());
debug_assert!(source_path.exists());
debug_assert!(dest_path.exists());
scan_listener.hardlinked(&dest_path, &source_path);
merged_paths.push(dest_path);
}
}
Ok(())
}
pub fn dupes(&self) -> Vec<Vec<FileSet>> {
self.by_content.values().map(|filesets| {
filesets.iter().map(|d|{
let tmp = d.borrow();
(*tmp).clone()
}).collect()
}).collect()
}
}
| dedupe_by_content | identifier_name |
index.js | /* *
* This sample demonstrates handling intents from an Alexa skill using the Alexa Skills Kit SDK (v2).
* Please visit https://alexa.design/cookbook for additional examples on implementing slots, dialog management,
* session persistence, api calls, and more.
* */
const Alexa = require('ask-sdk-core');
const persistenceAdapter = require('ask-sdk-s3-persistence-adapter');
const LaunchRequestHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'LaunchRequest';
},
handle(handlerInput) {
const speakOutput = 'Hello! Welcome to Caketime. What is your birthday?';
const repromptText = 'I was born Nov. 6th, 2014. When were you born?';
return handlerInput.responseBuilder
.speak(speakOutput)
.reprompt(repromptText)
.getResponse();
}
};
const HasBirthdayLaunchRequestHandler = {
async getCurrentDate(handlerInput) {
// Get the time zone
const deviceId = Alexa.getDeviceId(handlerInput.requestEnvelope)
const serviceClientFactory = handlerInput.serviceClientFactory;
let userTimeZone;
try {
const upsServiceClient = serviceClientFactory.getUpsServiceClient();
userTimeZone = await upsServiceClient.getSystemTimeZone(deviceId);
} catch (error) {
if (error.name !== 'ServiceError') {
return handlerInput.responseBuilder.speak("There was a problem connecting to the service.").getResponse();
}
console.log('error', error.message);
}
// Get the current date with the time
const currentDateTime = new Date(new Date().toLocaleString("en-US", {timeZone: userTimeZone}));
// Remove the time from the date because it affects our difference calculation
return new Date(currentDateTime.getFullYear(), currentDateTime.getMonth(), currentDateTime.getDate());
},
getNextBirthdayDate(day, month, currentDate) {
const currentYear = currentDate.getFullYear();
let nextBirthday = Date.parse(`${month} ${day}, ${currentYear}`);
// Adjust the nextBirthday by one year if the current date is after their birthday
if (currentDate.getTime() > nextBirthday) {
nextBirthday = Date.parse(`${month} ${day}, ${currentYear + 1}`);
}
// Create date object from milliseconds
return new Date(nextBirthday);
},
canHandle(handlerInput) {
const attributesManager = handlerInput.attributesManager;
const sessionAttributes = attributesManager.getSessionAttributes() || {};
const year = sessionAttributes.hasOwnProperty('year') ? sessionAttributes.year : 0;
const month = sessionAttributes.hasOwnProperty('month') ? sessionAttributes.month : 0;
const day = sessionAttributes.hasOwnProperty('day') ? sessionAttributes.day : 0;
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'LaunchRequest' && year && month && day;
},
async handle(handlerInput) {
// Get persisted birthday date
const attributesManager = handlerInput.attributesManager;
const sessionAttributes = attributesManager.getSessionAttributes() || {};
const year = sessionAttributes.hasOwnProperty('year') ? sessionAttributes.year : 0;
const month = sessionAttributes.hasOwnProperty('month') ? sessionAttributes.month : 0;
const day = sessionAttributes.hasOwnProperty('day') ? sessionAttributes.day : 0;
// Get the current date
const currentDate = await this.getCurrentDate(handlerInput);
// Get the next birthday date
const nextBirthdayDate = this.getNextBirthdayDate(day, month, currentDate);
// Set the default speakOutput to Happy xth Birthday!
// Don't worry about when to use st, th, rd--Alexa will automatically correct the ordinal for you.
const oneDay = 24*60*60*1000;
let speakOutput = `Happy ${currentDate.getFullYear() - year}th birthday!`;
if (currentDate.getTime() !== nextBirthdayDate.getTime()) {
const diffDays = Math.round(Math.abs((currentDate.getTime() - nextBirthdayDate.getTime()) / oneDay));
speakOutput = `Welcome back. It looks like there are ${diffDays} days until your ${nextBirthdayDate.getFullYear() - year}th birthday.`
}
return handlerInput.responseBuilder
.speak(speakOutput)
.getResponse();
}
};
const CaptureBirthdayIntentHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'IntentRequest'
&& Alexa.getIntentName(handlerInput.requestEnvelope) === 'CaptureBirthdayIntent';
},
async handle(handlerInput) {
const year = handlerInput.requestEnvelope.request.intent.slots.year.value;
const month = handlerInput.requestEnvelope.request.intent.slots.month.value;
const day = handlerInput.requestEnvelope.request.intent.slots.day.value;
// Persist the slots data between sessions
const attributesManager = handlerInput.attributesManager;
const birthdayAttributes = {
"year" : year,
"month" : month,
"day" : day
};
attributesManager.setPersistentAttributes(birthdayAttributes);
await attributesManager.savePersistentAttributes();
const speakOutput = `Thanks, I'll remember that you were born ${month} ${day} ${year}.`;
return handlerInput.responseBuilder
.speak(speakOutput)
//.reprompt('add a reprompt if you want to keep the session open for the user to respond')
.getResponse();
}
};
const HelpIntentHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'IntentRequest'
&& Alexa.getIntentName(handlerInput.requestEnvelope) === 'AMAZON.HelpIntent';
},
handle(handlerInput) {
const speakOutput = 'You can say hello to me! How can I help?';
return handlerInput.responseBuilder
.speak(speakOutput)
.reprompt(speakOutput)
.getResponse();
}
};
const CancelAndStopIntentHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'IntentRequest'
&& (Alexa.getIntentName(handlerInput.requestEnvelope) === 'AMAZON.CancelIntent'
|| Alexa.getIntentName(handlerInput.requestEnvelope) === 'AMAZON.StopIntent');
},
| (handlerInput) {
const speakOutput = 'Goodbye!';
return handlerInput.responseBuilder
.speak(speakOutput)
.getResponse();
}
};
/* *
* FallbackIntent triggers when a customer says something that doesn’t map to any intents in your skill
* It must also be defined in the language model (if the locale supports it)
* This handler can be safely added but will be ingnored in locales that do not support it yet
* */
const FallbackIntentHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'IntentRequest'
&& Alexa.getIntentName(handlerInput.requestEnvelope) === 'AMAZON.FallbackIntent';
},
handle(handlerInput) {
const speakOutput = 'Sorry, I don\'t know about that. Please try again.';
return handlerInput.responseBuilder
.speak(speakOutput)
.reprompt(speakOutput)
.getResponse();
}
};
/* *
* SessionEndedRequest notifies that a session was ended. This handler will be triggered when a currently open
* session is closed for one of the following reasons: 1) The user says "exit" or "quit". 2) The user does not
* respond or says something that does not match an intent defined in your voice model. 3) An error occurs
* */
const SessionEndedRequestHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'SessionEndedRequest';
},
handle(handlerInput) {
console.log(`~~~~ Session ended: ${JSON.stringify(handlerInput.requestEnvelope)}`);
// Any cleanup logic goes here.
return handlerInput.responseBuilder.getResponse(); // notice we send an empty response
}
};
/* *
* The intent reflector is used for interaction model testing and debugging.
* It will simply repeat the intent the user said. You can create custom handlers for your intents
* by defining them above, then also adding them to the request handler chain below
* */
const IntentReflectorHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'IntentRequest';
},
handle(handlerInput) {
const intentName = Alexa.getIntentName(handlerInput.requestEnvelope);
const speakOutput = `You just triggered ${intentName}`;
return handlerInput.responseBuilder
.speak(speakOutput)
//.reprompt('add a reprompt if you want to keep the session open for the user to respond')
.getResponse();
}
};
/**
* Generic error handling to capture any syntax or routing errors. If you receive an error
* stating the request handler chain is not found, you have not implemented a handler for
* the intent being invoked or included it in the skill builder below
* */
const ErrorHandler = {
canHandle() {
return true;
},
handle(handlerInput, error) {
const speakOutput = 'Sorry, I had trouble doing what you asked. Please try again.';
console.log(`~~~~ Error handled: ${JSON.stringify(error)}`);
return handlerInput.responseBuilder
.speak(speakOutput)
.reprompt(speakOutput)
.getResponse();
}
};
const LoadBirthdayInterceptor = {
async process(handlerInput) {
const attributesManager = handlerInput.attributesManager;
const sessionAttributes = await attributesManager.getPersistentAttributes() || {};
const year = sessionAttributes.hasOwnProperty('year') ? sessionAttributes.year : 0;
const month = sessionAttributes.hasOwnProperty('month') ? sessionAttributes.month : 0;
const day = sessionAttributes.hasOwnProperty('day') ? sessionAttributes.day : 0;
if (year && month && day) {
attributesManager.setSessionAttributes(sessionAttributes);
}
}
};
/**
* This handler acts as the entry point for your skill, routing all request and response
* payloads to the handlers above. Make sure any new handlers or interceptors you've
* defined are included below. The order matters - they're processed top to bottom
* */
exports.handler = Alexa.SkillBuilders.custom()
.withApiClient(
new Alexa.DefaultApiClient()
)
.withPersistenceAdapter(
new persistenceAdapter.S3PersistenceAdapter({bucketName:process.env.S3_PERSISTENCE_BUCKET})
)
.addRequestHandlers(
HasBirthdayLaunchRequestHandler,
LaunchRequestHandler,
CaptureBirthdayIntentHandler,
HelpIntentHandler,
CancelAndStopIntentHandler,
FallbackIntentHandler,
SessionEndedRequestHandler,
IntentReflectorHandler)
.addRequestInterceptors(
LoadBirthdayInterceptor
)
.addErrorHandlers(
ErrorHandler)
.withCustomUserAgent('sample/cake-time/v1.2')
.lambda(); | handle | identifier_name |
index.js | /* *
* This sample demonstrates handling intents from an Alexa skill using the Alexa Skills Kit SDK (v2).
* Please visit https://alexa.design/cookbook for additional examples on implementing slots, dialog management,
* session persistence, api calls, and more.
* */
const Alexa = require('ask-sdk-core');
const persistenceAdapter = require('ask-sdk-s3-persistence-adapter');
const LaunchRequestHandler = {
canHandle(handlerInput) | ,
handle(handlerInput) {
const speakOutput = 'Hello! Welcome to Caketime. What is your birthday?';
const repromptText = 'I was born Nov. 6th, 2014. When were you born?';
return handlerInput.responseBuilder
.speak(speakOutput)
.reprompt(repromptText)
.getResponse();
}
};
const HasBirthdayLaunchRequestHandler = {
async getCurrentDate(handlerInput) {
// Get the time zone
const deviceId = Alexa.getDeviceId(handlerInput.requestEnvelope)
const serviceClientFactory = handlerInput.serviceClientFactory;
let userTimeZone;
try {
const upsServiceClient = serviceClientFactory.getUpsServiceClient();
userTimeZone = await upsServiceClient.getSystemTimeZone(deviceId);
} catch (error) {
if (error.name !== 'ServiceError') {
return handlerInput.responseBuilder.speak("There was a problem connecting to the service.").getResponse();
}
console.log('error', error.message);
}
// Get the current date with the time
const currentDateTime = new Date(new Date().toLocaleString("en-US", {timeZone: userTimeZone}));
// Remove the time from the date because it affects our difference calculation
return new Date(currentDateTime.getFullYear(), currentDateTime.getMonth(), currentDateTime.getDate());
},
getNextBirthdayDate(day, month, currentDate) {
const currentYear = currentDate.getFullYear();
let nextBirthday = Date.parse(`${month} ${day}, ${currentYear}`);
// Adjust the nextBirthday by one year if the current date is after their birthday
if (currentDate.getTime() > nextBirthday) {
nextBirthday = Date.parse(`${month} ${day}, ${currentYear + 1}`);
}
// Create date object from milliseconds
return new Date(nextBirthday);
},
canHandle(handlerInput) {
const attributesManager = handlerInput.attributesManager;
const sessionAttributes = attributesManager.getSessionAttributes() || {};
const year = sessionAttributes.hasOwnProperty('year') ? sessionAttributes.year : 0;
const month = sessionAttributes.hasOwnProperty('month') ? sessionAttributes.month : 0;
const day = sessionAttributes.hasOwnProperty('day') ? sessionAttributes.day : 0;
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'LaunchRequest' && year && month && day;
},
async handle(handlerInput) {
// Get persisted birthday date
const attributesManager = handlerInput.attributesManager;
const sessionAttributes = attributesManager.getSessionAttributes() || {};
const year = sessionAttributes.hasOwnProperty('year') ? sessionAttributes.year : 0;
const month = sessionAttributes.hasOwnProperty('month') ? sessionAttributes.month : 0;
const day = sessionAttributes.hasOwnProperty('day') ? sessionAttributes.day : 0;
// Get the current date
const currentDate = await this.getCurrentDate(handlerInput);
// Get the next birthday date
const nextBirthdayDate = this.getNextBirthdayDate(day, month, currentDate);
// Set the default speakOutput to Happy xth Birthday!
// Don't worry about when to use st, th, rd--Alexa will automatically correct the ordinal for you.
const oneDay = 24*60*60*1000;
let speakOutput = `Happy ${currentDate.getFullYear() - year}th birthday!`;
if (currentDate.getTime() !== nextBirthdayDate.getTime()) {
const diffDays = Math.round(Math.abs((currentDate.getTime() - nextBirthdayDate.getTime()) / oneDay));
speakOutput = `Welcome back. It looks like there are ${diffDays} days until your ${nextBirthdayDate.getFullYear() - year}th birthday.`
}
return handlerInput.responseBuilder
.speak(speakOutput)
.getResponse();
}
};
const CaptureBirthdayIntentHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'IntentRequest'
&& Alexa.getIntentName(handlerInput.requestEnvelope) === 'CaptureBirthdayIntent';
},
async handle(handlerInput) {
const year = handlerInput.requestEnvelope.request.intent.slots.year.value;
const month = handlerInput.requestEnvelope.request.intent.slots.month.value;
const day = handlerInput.requestEnvelope.request.intent.slots.day.value;
// Persist the slots data between sessions
const attributesManager = handlerInput.attributesManager;
const birthdayAttributes = {
"year" : year,
"month" : month,
"day" : day
};
attributesManager.setPersistentAttributes(birthdayAttributes);
await attributesManager.savePersistentAttributes();
const speakOutput = `Thanks, I'll remember that you were born ${month} ${day} ${year}.`;
return handlerInput.responseBuilder
.speak(speakOutput)
//.reprompt('add a reprompt if you want to keep the session open for the user to respond')
.getResponse();
}
};
const HelpIntentHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'IntentRequest'
&& Alexa.getIntentName(handlerInput.requestEnvelope) === 'AMAZON.HelpIntent';
},
handle(handlerInput) {
const speakOutput = 'You can say hello to me! How can I help?';
return handlerInput.responseBuilder
.speak(speakOutput)
.reprompt(speakOutput)
.getResponse();
}
};
const CancelAndStopIntentHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'IntentRequest'
&& (Alexa.getIntentName(handlerInput.requestEnvelope) === 'AMAZON.CancelIntent'
|| Alexa.getIntentName(handlerInput.requestEnvelope) === 'AMAZON.StopIntent');
},
handle(handlerInput) {
const speakOutput = 'Goodbye!';
return handlerInput.responseBuilder
.speak(speakOutput)
.getResponse();
}
};
/* *
* FallbackIntent triggers when a customer says something that doesn’t map to any intents in your skill
* It must also be defined in the language model (if the locale supports it)
* This handler can be safely added but will be ingnored in locales that do not support it yet
* */
const FallbackIntentHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'IntentRequest'
&& Alexa.getIntentName(handlerInput.requestEnvelope) === 'AMAZON.FallbackIntent';
},
handle(handlerInput) {
const speakOutput = 'Sorry, I don\'t know about that. Please try again.';
return handlerInput.responseBuilder
.speak(speakOutput)
.reprompt(speakOutput)
.getResponse();
}
};
/* *
* SessionEndedRequest notifies that a session was ended. This handler will be triggered when a currently open
* session is closed for one of the following reasons: 1) The user says "exit" or "quit". 2) The user does not
* respond or says something that does not match an intent defined in your voice model. 3) An error occurs
* */
const SessionEndedRequestHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'SessionEndedRequest';
},
handle(handlerInput) {
console.log(`~~~~ Session ended: ${JSON.stringify(handlerInput.requestEnvelope)}`);
// Any cleanup logic goes here.
return handlerInput.responseBuilder.getResponse(); // notice we send an empty response
}
};
/* *
* The intent reflector is used for interaction model testing and debugging.
* It will simply repeat the intent the user said. You can create custom handlers for your intents
* by defining them above, then also adding them to the request handler chain below
* */
const IntentReflectorHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'IntentRequest';
},
handle(handlerInput) {
const intentName = Alexa.getIntentName(handlerInput.requestEnvelope);
const speakOutput = `You just triggered ${intentName}`;
return handlerInput.responseBuilder
.speak(speakOutput)
//.reprompt('add a reprompt if you want to keep the session open for the user to respond')
.getResponse();
}
};
/**
* Generic error handling to capture any syntax or routing errors. If you receive an error
* stating the request handler chain is not found, you have not implemented a handler for
* the intent being invoked or included it in the skill builder below
* */
const ErrorHandler = {
canHandle() {
return true;
},
handle(handlerInput, error) {
const speakOutput = 'Sorry, I had trouble doing what you asked. Please try again.';
console.log(`~~~~ Error handled: ${JSON.stringify(error)}`);
return handlerInput.responseBuilder
.speak(speakOutput)
.reprompt(speakOutput)
.getResponse();
}
};
const LoadBirthdayInterceptor = {
async process(handlerInput) {
const attributesManager = handlerInput.attributesManager;
const sessionAttributes = await attributesManager.getPersistentAttributes() || {};
const year = sessionAttributes.hasOwnProperty('year') ? sessionAttributes.year : 0;
const month = sessionAttributes.hasOwnProperty('month') ? sessionAttributes.month : 0;
const day = sessionAttributes.hasOwnProperty('day') ? sessionAttributes.day : 0;
if (year && month && day) {
attributesManager.setSessionAttributes(sessionAttributes);
}
}
};
/**
* This handler acts as the entry point for your skill, routing all request and response
* payloads to the handlers above. Make sure any new handlers or interceptors you've
* defined are included below. The order matters - they're processed top to bottom
* */
exports.handler = Alexa.SkillBuilders.custom()
.withApiClient(
new Alexa.DefaultApiClient()
)
.withPersistenceAdapter(
new persistenceAdapter.S3PersistenceAdapter({bucketName:process.env.S3_PERSISTENCE_BUCKET})
)
.addRequestHandlers(
HasBirthdayLaunchRequestHandler,
LaunchRequestHandler,
CaptureBirthdayIntentHandler,
HelpIntentHandler,
CancelAndStopIntentHandler,
FallbackIntentHandler,
SessionEndedRequestHandler,
IntentReflectorHandler)
.addRequestInterceptors(
LoadBirthdayInterceptor
)
.addErrorHandlers(
ErrorHandler)
.withCustomUserAgent('sample/cake-time/v1.2')
.lambda(); | {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'LaunchRequest';
} | identifier_body |
index.js | /* *
* This sample demonstrates handling intents from an Alexa skill using the Alexa Skills Kit SDK (v2).
* Please visit https://alexa.design/cookbook for additional examples on implementing slots, dialog management,
* session persistence, api calls, and more.
* */
const Alexa = require('ask-sdk-core');
const persistenceAdapter = require('ask-sdk-s3-persistence-adapter');
const LaunchRequestHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'LaunchRequest';
},
handle(handlerInput) {
const speakOutput = 'Hello! Welcome to Caketime. What is your birthday?';
const repromptText = 'I was born Nov. 6th, 2014. When were you born?';
return handlerInput.responseBuilder
.speak(speakOutput)
.reprompt(repromptText)
.getResponse();
}
};
const HasBirthdayLaunchRequestHandler = {
async getCurrentDate(handlerInput) {
// Get the time zone
const deviceId = Alexa.getDeviceId(handlerInput.requestEnvelope)
const serviceClientFactory = handlerInput.serviceClientFactory;
let userTimeZone;
try {
const upsServiceClient = serviceClientFactory.getUpsServiceClient();
userTimeZone = await upsServiceClient.getSystemTimeZone(deviceId);
} catch (error) {
if (error.name !== 'ServiceError') {
return handlerInput.responseBuilder.speak("There was a problem connecting to the service.").getResponse();
}
console.log('error', error.message);
}
// Get the current date with the time
const currentDateTime = new Date(new Date().toLocaleString("en-US", {timeZone: userTimeZone}));
// Remove the time from the date because it affects our difference calculation
return new Date(currentDateTime.getFullYear(), currentDateTime.getMonth(), currentDateTime.getDate());
},
getNextBirthdayDate(day, month, currentDate) {
const currentYear = currentDate.getFullYear();
let nextBirthday = Date.parse(`${month} ${day}, ${currentYear}`);
// Adjust the nextBirthday by one year if the current date is after their birthday
if (currentDate.getTime() > nextBirthday) {
nextBirthday = Date.parse(`${month} ${day}, ${currentYear + 1}`);
}
// Create date object from milliseconds
return new Date(nextBirthday);
},
canHandle(handlerInput) {
const attributesManager = handlerInput.attributesManager;
const sessionAttributes = attributesManager.getSessionAttributes() || {};
const year = sessionAttributes.hasOwnProperty('year') ? sessionAttributes.year : 0;
const month = sessionAttributes.hasOwnProperty('month') ? sessionAttributes.month : 0;
const day = sessionAttributes.hasOwnProperty('day') ? sessionAttributes.day : 0;
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'LaunchRequest' && year && month && day;
},
async handle(handlerInput) {
// Get persisted birthday date
const attributesManager = handlerInput.attributesManager;
const sessionAttributes = attributesManager.getSessionAttributes() || {};
const year = sessionAttributes.hasOwnProperty('year') ? sessionAttributes.year : 0;
const month = sessionAttributes.hasOwnProperty('month') ? sessionAttributes.month : 0;
const day = sessionAttributes.hasOwnProperty('day') ? sessionAttributes.day : 0;
// Get the current date
const currentDate = await this.getCurrentDate(handlerInput);
// Get the next birthday date
const nextBirthdayDate = this.getNextBirthdayDate(day, month, currentDate);
// Set the default speakOutput to Happy xth Birthday!
// Don't worry about when to use st, th, rd--Alexa will automatically correct the ordinal for you.
const oneDay = 24*60*60*1000;
let speakOutput = `Happy ${currentDate.getFullYear() - year}th birthday!`;
if (currentDate.getTime() !== nextBirthdayDate.getTime()) {
const diffDays = Math.round(Math.abs((currentDate.getTime() - nextBirthdayDate.getTime()) / oneDay));
speakOutput = `Welcome back. It looks like there are ${diffDays} days until your ${nextBirthdayDate.getFullYear() - year}th birthday.`
}
return handlerInput.responseBuilder
.speak(speakOutput)
.getResponse();
}
};
const CaptureBirthdayIntentHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'IntentRequest'
&& Alexa.getIntentName(handlerInput.requestEnvelope) === 'CaptureBirthdayIntent';
},
async handle(handlerInput) {
const year = handlerInput.requestEnvelope.request.intent.slots.year.value;
const month = handlerInput.requestEnvelope.request.intent.slots.month.value;
const day = handlerInput.requestEnvelope.request.intent.slots.day.value;
// Persist the slots data between sessions
const attributesManager = handlerInput.attributesManager;
const birthdayAttributes = {
"year" : year,
"month" : month,
"day" : day
};
attributesManager.setPersistentAttributes(birthdayAttributes);
await attributesManager.savePersistentAttributes();
const speakOutput = `Thanks, I'll remember that you were born ${month} ${day} ${year}.`;
return handlerInput.responseBuilder
.speak(speakOutput)
//.reprompt('add a reprompt if you want to keep the session open for the user to respond')
.getResponse();
}
};
const HelpIntentHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'IntentRequest'
&& Alexa.getIntentName(handlerInput.requestEnvelope) === 'AMAZON.HelpIntent';
},
handle(handlerInput) {
const speakOutput = 'You can say hello to me! How can I help?';
return handlerInput.responseBuilder
.speak(speakOutput)
.reprompt(speakOutput)
.getResponse();
}
};
const CancelAndStopIntentHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'IntentRequest'
&& (Alexa.getIntentName(handlerInput.requestEnvelope) === 'AMAZON.CancelIntent'
|| Alexa.getIntentName(handlerInput.requestEnvelope) === 'AMAZON.StopIntent');
},
handle(handlerInput) {
const speakOutput = 'Goodbye!';
return handlerInput.responseBuilder
.speak(speakOutput)
.getResponse();
}
};
/* *
* FallbackIntent triggers when a customer says something that doesn’t map to any intents in your skill
* It must also be defined in the language model (if the locale supports it)
* This handler can be safely added but will be ingnored in locales that do not support it yet
* */
const FallbackIntentHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'IntentRequest'
&& Alexa.getIntentName(handlerInput.requestEnvelope) === 'AMAZON.FallbackIntent';
},
handle(handlerInput) {
const speakOutput = 'Sorry, I don\'t know about that. Please try again.';
return handlerInput.responseBuilder
.speak(speakOutput)
.reprompt(speakOutput)
.getResponse();
}
};
/* *
* SessionEndedRequest notifies that a session was ended. This handler will be triggered when a currently open
* session is closed for one of the following reasons: 1) The user says "exit" or "quit". 2) The user does not
* respond or says something that does not match an intent defined in your voice model. 3) An error occurs
* */
const SessionEndedRequestHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'SessionEndedRequest';
},
handle(handlerInput) {
console.log(`~~~~ Session ended: ${JSON.stringify(handlerInput.requestEnvelope)}`);
// Any cleanup logic goes here.
return handlerInput.responseBuilder.getResponse(); // notice we send an empty response
}
};
/* *
* The intent reflector is used for interaction model testing and debugging.
* It will simply repeat the intent the user said. You can create custom handlers for your intents
* by defining them above, then also adding them to the request handler chain below
* */
const IntentReflectorHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'IntentRequest';
},
handle(handlerInput) { | .speak(speakOutput)
//.reprompt('add a reprompt if you want to keep the session open for the user to respond')
.getResponse();
}
};
/**
* Generic error handling to capture any syntax or routing errors. If you receive an error
* stating the request handler chain is not found, you have not implemented a handler for
* the intent being invoked or included it in the skill builder below
* */
const ErrorHandler = {
canHandle() {
return true;
},
handle(handlerInput, error) {
const speakOutput = 'Sorry, I had trouble doing what you asked. Please try again.';
console.log(`~~~~ Error handled: ${JSON.stringify(error)}`);
return handlerInput.responseBuilder
.speak(speakOutput)
.reprompt(speakOutput)
.getResponse();
}
};
const LoadBirthdayInterceptor = {
async process(handlerInput) {
const attributesManager = handlerInput.attributesManager;
const sessionAttributes = await attributesManager.getPersistentAttributes() || {};
const year = sessionAttributes.hasOwnProperty('year') ? sessionAttributes.year : 0;
const month = sessionAttributes.hasOwnProperty('month') ? sessionAttributes.month : 0;
const day = sessionAttributes.hasOwnProperty('day') ? sessionAttributes.day : 0;
if (year && month && day) {
attributesManager.setSessionAttributes(sessionAttributes);
}
}
};
/**
* This handler acts as the entry point for your skill, routing all request and response
* payloads to the handlers above. Make sure any new handlers or interceptors you've
* defined are included below. The order matters - they're processed top to bottom
* */
exports.handler = Alexa.SkillBuilders.custom()
.withApiClient(
new Alexa.DefaultApiClient()
)
.withPersistenceAdapter(
new persistenceAdapter.S3PersistenceAdapter({bucketName:process.env.S3_PERSISTENCE_BUCKET})
)
.addRequestHandlers(
HasBirthdayLaunchRequestHandler,
LaunchRequestHandler,
CaptureBirthdayIntentHandler,
HelpIntentHandler,
CancelAndStopIntentHandler,
FallbackIntentHandler,
SessionEndedRequestHandler,
IntentReflectorHandler)
.addRequestInterceptors(
LoadBirthdayInterceptor
)
.addErrorHandlers(
ErrorHandler)
.withCustomUserAgent('sample/cake-time/v1.2')
.lambda(); | const intentName = Alexa.getIntentName(handlerInput.requestEnvelope);
const speakOutput = `You just triggered ${intentName}`;
return handlerInput.responseBuilder | random_line_split |
index.js | /* *
* This sample demonstrates handling intents from an Alexa skill using the Alexa Skills Kit SDK (v2).
* Please visit https://alexa.design/cookbook for additional examples on implementing slots, dialog management,
* session persistence, api calls, and more.
* */
const Alexa = require('ask-sdk-core');
const persistenceAdapter = require('ask-sdk-s3-persistence-adapter');
const LaunchRequestHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'LaunchRequest';
},
handle(handlerInput) {
const speakOutput = 'Hello! Welcome to Caketime. What is your birthday?';
const repromptText = 'I was born Nov. 6th, 2014. When were you born?';
return handlerInput.responseBuilder
.speak(speakOutput)
.reprompt(repromptText)
.getResponse();
}
};
const HasBirthdayLaunchRequestHandler = {
async getCurrentDate(handlerInput) {
// Get the time zone
const deviceId = Alexa.getDeviceId(handlerInput.requestEnvelope)
const serviceClientFactory = handlerInput.serviceClientFactory;
let userTimeZone;
try {
const upsServiceClient = serviceClientFactory.getUpsServiceClient();
userTimeZone = await upsServiceClient.getSystemTimeZone(deviceId);
} catch (error) {
if (error.name !== 'ServiceError') {
return handlerInput.responseBuilder.speak("There was a problem connecting to the service.").getResponse();
}
console.log('error', error.message);
}
// Get the current date with the time
const currentDateTime = new Date(new Date().toLocaleString("en-US", {timeZone: userTimeZone}));
// Remove the time from the date because it affects our difference calculation
return new Date(currentDateTime.getFullYear(), currentDateTime.getMonth(), currentDateTime.getDate());
},
getNextBirthdayDate(day, month, currentDate) {
const currentYear = currentDate.getFullYear();
let nextBirthday = Date.parse(`${month} ${day}, ${currentYear}`);
// Adjust the nextBirthday by one year if the current date is after their birthday
if (currentDate.getTime() > nextBirthday) {
nextBirthday = Date.parse(`${month} ${day}, ${currentYear + 1}`);
}
// Create date object from milliseconds
return new Date(nextBirthday);
},
canHandle(handlerInput) {
const attributesManager = handlerInput.attributesManager;
const sessionAttributes = attributesManager.getSessionAttributes() || {};
const year = sessionAttributes.hasOwnProperty('year') ? sessionAttributes.year : 0;
const month = sessionAttributes.hasOwnProperty('month') ? sessionAttributes.month : 0;
const day = sessionAttributes.hasOwnProperty('day') ? sessionAttributes.day : 0;
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'LaunchRequest' && year && month && day;
},
async handle(handlerInput) {
// Get persisted birthday date
const attributesManager = handlerInput.attributesManager;
const sessionAttributes = attributesManager.getSessionAttributes() || {};
const year = sessionAttributes.hasOwnProperty('year') ? sessionAttributes.year : 0;
const month = sessionAttributes.hasOwnProperty('month') ? sessionAttributes.month : 0;
const day = sessionAttributes.hasOwnProperty('day') ? sessionAttributes.day : 0;
// Get the current date
const currentDate = await this.getCurrentDate(handlerInput);
// Get the next birthday date
const nextBirthdayDate = this.getNextBirthdayDate(day, month, currentDate);
// Set the default speakOutput to Happy xth Birthday!
// Don't worry about when to use st, th, rd--Alexa will automatically correct the ordinal for you.
const oneDay = 24*60*60*1000;
let speakOutput = `Happy ${currentDate.getFullYear() - year}th birthday!`;
if (currentDate.getTime() !== nextBirthdayDate.getTime()) |
return handlerInput.responseBuilder
.speak(speakOutput)
.getResponse();
}
};
const CaptureBirthdayIntentHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'IntentRequest'
&& Alexa.getIntentName(handlerInput.requestEnvelope) === 'CaptureBirthdayIntent';
},
async handle(handlerInput) {
const year = handlerInput.requestEnvelope.request.intent.slots.year.value;
const month = handlerInput.requestEnvelope.request.intent.slots.month.value;
const day = handlerInput.requestEnvelope.request.intent.slots.day.value;
// Persist the slots data between sessions
const attributesManager = handlerInput.attributesManager;
const birthdayAttributes = {
"year" : year,
"month" : month,
"day" : day
};
attributesManager.setPersistentAttributes(birthdayAttributes);
await attributesManager.savePersistentAttributes();
const speakOutput = `Thanks, I'll remember that you were born ${month} ${day} ${year}.`;
return handlerInput.responseBuilder
.speak(speakOutput)
//.reprompt('add a reprompt if you want to keep the session open for the user to respond')
.getResponse();
}
};
const HelpIntentHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'IntentRequest'
&& Alexa.getIntentName(handlerInput.requestEnvelope) === 'AMAZON.HelpIntent';
},
handle(handlerInput) {
const speakOutput = 'You can say hello to me! How can I help?';
return handlerInput.responseBuilder
.speak(speakOutput)
.reprompt(speakOutput)
.getResponse();
}
};
const CancelAndStopIntentHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'IntentRequest'
&& (Alexa.getIntentName(handlerInput.requestEnvelope) === 'AMAZON.CancelIntent'
|| Alexa.getIntentName(handlerInput.requestEnvelope) === 'AMAZON.StopIntent');
},
handle(handlerInput) {
const speakOutput = 'Goodbye!';
return handlerInput.responseBuilder
.speak(speakOutput)
.getResponse();
}
};
/* *
* FallbackIntent triggers when a customer says something that doesn’t map to any intents in your skill
* It must also be defined in the language model (if the locale supports it)
* This handler can be safely added but will be ingnored in locales that do not support it yet
* */
const FallbackIntentHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'IntentRequest'
&& Alexa.getIntentName(handlerInput.requestEnvelope) === 'AMAZON.FallbackIntent';
},
handle(handlerInput) {
const speakOutput = 'Sorry, I don\'t know about that. Please try again.';
return handlerInput.responseBuilder
.speak(speakOutput)
.reprompt(speakOutput)
.getResponse();
}
};
/* *
* SessionEndedRequest notifies that a session was ended. This handler will be triggered when a currently open
* session is closed for one of the following reasons: 1) The user says "exit" or "quit". 2) The user does not
* respond or says something that does not match an intent defined in your voice model. 3) An error occurs
* */
const SessionEndedRequestHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'SessionEndedRequest';
},
handle(handlerInput) {
console.log(`~~~~ Session ended: ${JSON.stringify(handlerInput.requestEnvelope)}`);
// Any cleanup logic goes here.
return handlerInput.responseBuilder.getResponse(); // notice we send an empty response
}
};
/* *
* The intent reflector is used for interaction model testing and debugging.
* It will simply repeat the intent the user said. You can create custom handlers for your intents
* by defining them above, then also adding them to the request handler chain below
* */
const IntentReflectorHandler = {
canHandle(handlerInput) {
return Alexa.getRequestType(handlerInput.requestEnvelope) === 'IntentRequest';
},
handle(handlerInput) {
const intentName = Alexa.getIntentName(handlerInput.requestEnvelope);
const speakOutput = `You just triggered ${intentName}`;
return handlerInput.responseBuilder
.speak(speakOutput)
//.reprompt('add a reprompt if you want to keep the session open for the user to respond')
.getResponse();
}
};
/**
* Generic error handling to capture any syntax or routing errors. If you receive an error
* stating the request handler chain is not found, you have not implemented a handler for
* the intent being invoked or included it in the skill builder below
* */
const ErrorHandler = {
canHandle() {
return true;
},
handle(handlerInput, error) {
const speakOutput = 'Sorry, I had trouble doing what you asked. Please try again.';
console.log(`~~~~ Error handled: ${JSON.stringify(error)}`);
return handlerInput.responseBuilder
.speak(speakOutput)
.reprompt(speakOutput)
.getResponse();
}
};
const LoadBirthdayInterceptor = {
async process(handlerInput) {
const attributesManager = handlerInput.attributesManager;
const sessionAttributes = await attributesManager.getPersistentAttributes() || {};
const year = sessionAttributes.hasOwnProperty('year') ? sessionAttributes.year : 0;
const month = sessionAttributes.hasOwnProperty('month') ? sessionAttributes.month : 0;
const day = sessionAttributes.hasOwnProperty('day') ? sessionAttributes.day : 0;
if (year && month && day) {
attributesManager.setSessionAttributes(sessionAttributes);
}
}
};
/**
* This handler acts as the entry point for your skill, routing all request and response
* payloads to the handlers above. Make sure any new handlers or interceptors you've
* defined are included below. The order matters - they're processed top to bottom
* */
exports.handler = Alexa.SkillBuilders.custom()
.withApiClient(
new Alexa.DefaultApiClient()
)
.withPersistenceAdapter(
new persistenceAdapter.S3PersistenceAdapter({bucketName:process.env.S3_PERSISTENCE_BUCKET})
)
.addRequestHandlers(
HasBirthdayLaunchRequestHandler,
LaunchRequestHandler,
CaptureBirthdayIntentHandler,
HelpIntentHandler,
CancelAndStopIntentHandler,
FallbackIntentHandler,
SessionEndedRequestHandler,
IntentReflectorHandler)
.addRequestInterceptors(
LoadBirthdayInterceptor
)
.addErrorHandlers(
ErrorHandler)
.withCustomUserAgent('sample/cake-time/v1.2')
.lambda(); | {
const diffDays = Math.round(Math.abs((currentDate.getTime() - nextBirthdayDate.getTime()) / oneDay));
speakOutput = `Welcome back. It looks like there are ${diffDays} days until your ${nextBirthdayDate.getFullYear() - year}th birthday.`
} | conditional_block |
Trainer.py | import csv
import time
import unicodedata
import tensorflow as tf
from tensorflow import keras
import ErrorClassifier
from TokenHelper import tokenize, tokenize_pure_words, find_all_delta_from_tokens
from NeuralNetworkHelper import PATH_REPLACE_CHECKPOINT, PATH_ARRANGE_CHECKPOINT, FILE_NAME, TESTING_RANGE
from NeuralNetworkHelper import tags_to_id
from NNModels import create_nn_model
# Only can learn when the learned_words.txt file is empty
ENABLE_LEARN_WORDS = False
# True when we want to train the respective neural network
ENABLE_TRAIN_REPLACE_NN = True
ENABLE_TRAIN_ARRANGE_NN = False
# True when we want to rebuild the saved data file for the neural network, using data from the "original" training file
ENABLE_PROCESS_REPLACE_DATA = True
ENABLE_PROCESS_ARRANGE_DATA = False
# True when we want to reload the saved weights of the neural network
ENABLE_LOAD_REPLACE_WEIGHTS = True
ENABLE_LOAD_ARRANGE_WEIGHTS = True
# True when we want to skip IO with the original file (only will use IO with the neural network training data)
ONLY_TRAIN_NN = False
# Path of the neural network training data
PATH_REPLACE_DATA = FILE_NAME + '.replace.txt'
PATH_ARRANGE_DATA = FILE_NAME + '.arrange.txt'
def main():
# .spacy.txt is a pre-processed file containing a tokenized
if not ONLY_TRAIN_NN:
with open(FILE_NAME + '.txt', encoding='utf-8') as file, open(FILE_NAME + '.spacy.txt') as tags_file:
progress = 0
start_time = time.time()
words_processed = 0
for line in file:
line_tag = tags_file.readline().strip()
progress += 1
if TESTING_RANGE[0] < progress <= TESTING_RANGE[1]:
continue
line = line.strip()
line = unicodedata.normalize('NFKD', line)
p1, p2 = line.split('\t')
t1, t2 = line_tag.split('\t')
error_type = ErrorClassifier.classify_error_labeled(p1, p2)
train(p1, p2, error_type, t1, t2)
# Display progression in number of samples processed, use random to avoid too many (slow)
# interactions w/ console
words_processed += len(p1.split()) + len(p2.split())
if progress % 100 == 0:
print('\rProgress: [{}] Word Processed: [{}] Words per second: [{}] Lines per second: [{}]'
.format(progress, words_processed,
words_processed / (time.time() - start_time), (progress / (time.time() - start_time)))
, end='')
if ENABLE_LEARN_WORDS:
save_learned_words()
else:
assert len(learned_words) == 0
save_word_frequencies()
print()
print(test1, test2)
if ENABLE_TRAIN_REPLACE_NN:
train_replace_nn()
if ENABLE_TRAIN_ARRANGE_NN:
train_arrange_nn()
def train_replace_nn():
# create the dataset
max_start = 0
max_end = 0
samples = 0
if ENABLE_PROCESS_REPLACE_DATA:
# saves the data to a file
assert len(train_delta1) == len(train_delta2) == len(train_start) == len(train_end)
max_start = len(max(train_start, key=len))
max_end = len(max(train_end, key=len))
samples = len(train_delta1)
with open(PATH_REPLACE_DATA, 'w') as file_replace:
file_replace.write('{} {} {}\n'.format(max_start, max_end, samples))
for i in range(samples):
file_replace.write(' '.join(map(str, train_start[i])) + '\t')
file_replace.write(' '.join(map(str, train_end[i])) + '\t')
file_replace.write(str(train_delta1[i][0]) + '\t')
file_replace.write(str(train_delta2[i][0]) + '\n')
quit()
def replace_nn_generator():
with open(PATH_REPLACE_DATA) as file_replace:
file_replace.readline()
for replace_line in file_replace:
start, end, delta1, delta2 = replace_line.rstrip().split('\t')
start = list(map(int, start.split()))
end = list(map(int, end.split()))
delta1 = [int(delta1)]
delta2 = [int(delta2)]
[start] = keras.preprocessing.sequence.pad_sequences([start], maxlen=max_start)
[end] = keras.preprocessing.sequence.pad_sequences([end], maxlen=max_end)
yield {'start': start, 'end': end, 'delta': delta1}, 1.
yield {'start': start, 'end': end, 'delta': delta2}, 0.
with open(PATH_REPLACE_DATA) as file_replace:
max_start, max_end, samples = list(map(int, file_replace.readline().strip().split()))
dataset = tf.data.Dataset.from_generator(replace_nn_generator,
({'start': tf.int32, 'end': tf.int32, 'delta': tf.int32}, tf.float32),
({'start': tf.TensorShape([None, ]), 'end': tf.TensorShape([None, ]),
'delta': tf.TensorShape([1, ])},
tf.TensorShape([])))
dataset, validation_dataset = prepare_dataset(dataset, samples, batch_size=1024 * 4)
# Create the model
model = create_nn_model('replace')
model.compile(optimizer=tf.train.AdamOptimizer(), loss='binary_crossentropy', metrics=['accuracy'])
print(model.summary())
print('-------------')
cp_callback = tf.keras.callbacks.ModelCheckpoint(PATH_REPLACE_CHECKPOINT, save_weights_only=True,
save_best_only=False, verbose=1)
if ENABLE_LOAD_REPLACE_WEIGHTS:
model.load_weights(PATH_REPLACE_CHECKPOINT)
model.fit(dataset, steps_per_epoch=50, epochs=200, verbose=2, validation_data=validation_dataset,
validation_steps=1, callbacks=[cp_callback])
def | ():
# create the dataset
samples = 0
max_length = 0
if ENABLE_PROCESS_ARRANGE_DATA:
# saves the data to a file
assert len(train_arrange_x) == len(train_arrange_y)
max_length = len(max(train_arrange_x, key=len))
samples = len(train_arrange_x)
with open(PATH_ARRANGE_DATA, 'w') as file_arrange:
file_arrange.write('{} {}\n'.format(max_length, samples))
for i in range(samples):
file_arrange.write(' '.join(map(str, train_arrange_x[i])) + '\t')
file_arrange.write(str(train_arrange_y[i]) + '\n')
def arrange_nn_generator():
with open(PATH_ARRANGE_DATA) as file_arrange:
file_arrange.readline()
for arrange_line in file_arrange:
x, y = arrange_line.rstrip().split('\t')
x = list(map(int, x.split()))
y = float(y)
[x] = keras.preprocessing.sequence.pad_sequences([x], maxlen=max_length)
yield x, y
with open(PATH_ARRANGE_DATA) as file_arrange:
max_length, samples = list(map(int, file_arrange.readline().strip().split()))
dataset = tf.data.Dataset.from_generator(arrange_nn_generator,
(tf.int32, tf.float32),
(tf.TensorShape([None, ]), tf.TensorShape([])))
dataset, validation_dataset = prepare_dataset(dataset, samples)
model = create_nn_model('arrange')
model.compile(optimizer=tf.train.AdamOptimizer(), loss='binary_crossentropy', metrics=['accuracy'])
print(model.summary())
print('-------------')
cp_callback = tf.keras.callbacks.ModelCheckpoint(PATH_ARRANGE_CHECKPOINT, save_weights_only=True,
save_best_only=False, verbose=1)
if ENABLE_LOAD_ARRANGE_WEIGHTS:
model.load_weights(PATH_ARRANGE_CHECKPOINT)
model.fit(dataset, steps_per_epoch=50, epochs=200, verbose=2, validation_data=validation_dataset,
validation_steps=1, callbacks=[cp_callback])
def prepare_dataset(dataset, samples, batch_size=1024, seed=123, validation_proportion=0.1):
dataset = dataset.shuffle(1000, seed=seed)
validation_dataset = dataset.take(int(samples * validation_proportion)) # 10% used for validation
validation_dataset = validation_dataset.batch(1000)
validation_dataset = validation_dataset.repeat()
dataset = dataset.skip(int(samples * validation_proportion))
dataset = dataset.batch(batch_size)
dataset = dataset.shuffle(10000)
dataset = dataset.repeat()
return dataset, validation_dataset
def learn_words(part):
words = tokenize_pure_words(part)
for word in words:
if not ErrorClassifier.check_word_list(word):
learned_words.add(word)
def save_learned_words():
with open('learned_words.txt', 'w') as fout:
for word in learned_words:
fout.write(word + '\n')
def learn_word_frequencies(part):
words = tokenize_pure_words(part)
for word in words:
if word in word_freqs:
word_freqs[word] += 1
else:
word_freqs[word] = 1
def save_word_frequencies():
with open('learned_frequencies.csv', 'w', newline='') as fout:
csv_writer = csv.writer(fout)
for word, freq in word_freqs.items():
# don't bother with the little words
if freq > 1:
csv_writer.writerow([word, freq])
test1 = 0
test2 = 0
def prepare_replace_tags(part1, part2, tags1, tags2):
global test1, test2
tokens1, tokens2 = tokenize(part1, part2)
tags1 = tags1.split()
tags2 = tags2.split()
assert len(tokens1) == len(tags1)
assert len(tokens2) == len(tags2)
tag_map = {}
for i in range(len(tokens1)):
tag_map[tokens1[i]] = tags1[i]
for i in range(len(tokens2)):
tag_map[tokens2[i]] = tags2[i]
delta1, delta2, start, end = find_all_delta_from_tokens(tokens1, tokens2)
ids_d1 = list(map(lambda token: tags_to_id[tag_map[token]], delta1))
ids_d2 = list(map(lambda token: tags_to_id[tag_map[token]], delta2))
ids_st = list(map(lambda token: tags_to_id[tag_map[token]], start)) # start ids
ids_en = list(map(lambda token: tags_to_id[tag_map[token]], end)) # end ids
if ids_d1[0] == ids_d2[0]:
test1 += 1
# TODO resolve case in which both have same placeholder, use vector similarities, or none
# TODO count it
else:
test2 += 1
train_start.append(ids_st)
train_end.append(ids_en)
train_delta1.append(ids_d1)
train_delta2.append(ids_d2)
def prepare_arrange_tags(tags1, tags2):
tags1 = tags1.split()
tags2 = tags2.split()
ids1 = list(map(lambda tag: tags_to_id[tag], tags1))
ids2 = list(map(lambda tag: tags_to_id[tag], tags2))
# TODO count identical ARRANGE tags signature
train_arrange_x.append(ids1)
train_arrange_y.append(1.)
train_arrange_x.append(ids2)
train_arrange_y.append(0.)
def train(p1, p2, error_type, t1, t2):
# note: learn words is done in the error classifier (classification requires knowing the words)
learn_word_frequencies(p1) # only train frequencies on first part, second part is corrupted text
if ENABLE_LEARN_WORDS:
learn_words(p1)
if error_type == 'REPLACE' and ENABLE_TRAIN_REPLACE_NN and ENABLE_PROCESS_REPLACE_DATA:
prepare_replace_tags(p1, p2, t1, t2)
if ENABLE_TRAIN_ARRANGE_NN and ENABLE_PROCESS_ARRANGE_DATA and error_type == 'ARRANGE':
prepare_arrange_tags(t1, t2)
if __name__ == '__main__':
learned_words = set()
word_freqs = {}
# For the REPLACE neural network
train_start = []
train_end = []
train_delta1 = []
train_delta2 = []
# For the ARRANGE neural network
train_arrange_x = []
train_arrange_y = []
main()
| train_arrange_nn | identifier_name |
Trainer.py | import csv
import time
import unicodedata
import tensorflow as tf
from tensorflow import keras
import ErrorClassifier
from TokenHelper import tokenize, tokenize_pure_words, find_all_delta_from_tokens
from NeuralNetworkHelper import PATH_REPLACE_CHECKPOINT, PATH_ARRANGE_CHECKPOINT, FILE_NAME, TESTING_RANGE
from NeuralNetworkHelper import tags_to_id
from NNModels import create_nn_model
# Only can learn when the learned_words.txt file is empty
ENABLE_LEARN_WORDS = False
# True when we want to train the respective neural network
ENABLE_TRAIN_REPLACE_NN = True
ENABLE_TRAIN_ARRANGE_NN = False
# True when we want to rebuild the saved data file for the neural network, using data from the "original" training file
ENABLE_PROCESS_REPLACE_DATA = True
ENABLE_PROCESS_ARRANGE_DATA = False
# True when we want to reload the saved weights of the neural network
ENABLE_LOAD_REPLACE_WEIGHTS = True
ENABLE_LOAD_ARRANGE_WEIGHTS = True
# True when we want to skip IO with the original file (only will use IO with the neural network training data)
ONLY_TRAIN_NN = False
# Path of the neural network training data
PATH_REPLACE_DATA = FILE_NAME + '.replace.txt'
PATH_ARRANGE_DATA = FILE_NAME + '.arrange.txt'
def main():
# .spacy.txt is a pre-processed file containing a tokenized
if not ONLY_TRAIN_NN:
with open(FILE_NAME + '.txt', encoding='utf-8') as file, open(FILE_NAME + '.spacy.txt') as tags_file:
progress = 0
start_time = time.time()
words_processed = 0
for line in file:
line_tag = tags_file.readline().strip()
progress += 1
if TESTING_RANGE[0] < progress <= TESTING_RANGE[1]:
continue
line = line.strip()
line = unicodedata.normalize('NFKD', line)
p1, p2 = line.split('\t')
t1, t2 = line_tag.split('\t')
error_type = ErrorClassifier.classify_error_labeled(p1, p2)
train(p1, p2, error_type, t1, t2)
# Display progression in number of samples processed, use random to avoid too many (slow)
# interactions w/ console
words_processed += len(p1.split()) + len(p2.split())
if progress % 100 == 0:
print('\rProgress: [{}] Word Processed: [{}] Words per second: [{}] Lines per second: [{}]'
.format(progress, words_processed,
words_processed / (time.time() - start_time), (progress / (time.time() - start_time)))
, end='')
if ENABLE_LEARN_WORDS:
save_learned_words()
else:
assert len(learned_words) == 0
save_word_frequencies()
print()
print(test1, test2)
if ENABLE_TRAIN_REPLACE_NN:
train_replace_nn()
if ENABLE_TRAIN_ARRANGE_NN:
train_arrange_nn()
def train_replace_nn():
# create the dataset
max_start = 0
max_end = 0
samples = 0
if ENABLE_PROCESS_REPLACE_DATA:
# saves the data to a file
assert len(train_delta1) == len(train_delta2) == len(train_start) == len(train_end)
max_start = len(max(train_start, key=len))
max_end = len(max(train_end, key=len))
samples = len(train_delta1)
with open(PATH_REPLACE_DATA, 'w') as file_replace:
file_replace.write('{} {} {}\n'.format(max_start, max_end, samples))
for i in range(samples):
file_replace.write(' '.join(map(str, train_start[i])) + '\t')
file_replace.write(' '.join(map(str, train_end[i])) + '\t')
file_replace.write(str(train_delta1[i][0]) + '\t')
file_replace.write(str(train_delta2[i][0]) + '\n')
quit()
def replace_nn_generator():
|
with open(PATH_REPLACE_DATA) as file_replace:
max_start, max_end, samples = list(map(int, file_replace.readline().strip().split()))
dataset = tf.data.Dataset.from_generator(replace_nn_generator,
({'start': tf.int32, 'end': tf.int32, 'delta': tf.int32}, tf.float32),
({'start': tf.TensorShape([None, ]), 'end': tf.TensorShape([None, ]),
'delta': tf.TensorShape([1, ])},
tf.TensorShape([])))
dataset, validation_dataset = prepare_dataset(dataset, samples, batch_size=1024 * 4)
# Create the model
model = create_nn_model('replace')
model.compile(optimizer=tf.train.AdamOptimizer(), loss='binary_crossentropy', metrics=['accuracy'])
print(model.summary())
print('-------------')
cp_callback = tf.keras.callbacks.ModelCheckpoint(PATH_REPLACE_CHECKPOINT, save_weights_only=True,
save_best_only=False, verbose=1)
if ENABLE_LOAD_REPLACE_WEIGHTS:
model.load_weights(PATH_REPLACE_CHECKPOINT)
model.fit(dataset, steps_per_epoch=50, epochs=200, verbose=2, validation_data=validation_dataset,
validation_steps=1, callbacks=[cp_callback])
def train_arrange_nn():
# create the dataset
samples = 0
max_length = 0
if ENABLE_PROCESS_ARRANGE_DATA:
# saves the data to a file
assert len(train_arrange_x) == len(train_arrange_y)
max_length = len(max(train_arrange_x, key=len))
samples = len(train_arrange_x)
with open(PATH_ARRANGE_DATA, 'w') as file_arrange:
file_arrange.write('{} {}\n'.format(max_length, samples))
for i in range(samples):
file_arrange.write(' '.join(map(str, train_arrange_x[i])) + '\t')
file_arrange.write(str(train_arrange_y[i]) + '\n')
def arrange_nn_generator():
with open(PATH_ARRANGE_DATA) as file_arrange:
file_arrange.readline()
for arrange_line in file_arrange:
x, y = arrange_line.rstrip().split('\t')
x = list(map(int, x.split()))
y = float(y)
[x] = keras.preprocessing.sequence.pad_sequences([x], maxlen=max_length)
yield x, y
with open(PATH_ARRANGE_DATA) as file_arrange:
max_length, samples = list(map(int, file_arrange.readline().strip().split()))
dataset = tf.data.Dataset.from_generator(arrange_nn_generator,
(tf.int32, tf.float32),
(tf.TensorShape([None, ]), tf.TensorShape([])))
dataset, validation_dataset = prepare_dataset(dataset, samples)
model = create_nn_model('arrange')
model.compile(optimizer=tf.train.AdamOptimizer(), loss='binary_crossentropy', metrics=['accuracy'])
print(model.summary())
print('-------------')
cp_callback = tf.keras.callbacks.ModelCheckpoint(PATH_ARRANGE_CHECKPOINT, save_weights_only=True,
save_best_only=False, verbose=1)
if ENABLE_LOAD_ARRANGE_WEIGHTS:
model.load_weights(PATH_ARRANGE_CHECKPOINT)
model.fit(dataset, steps_per_epoch=50, epochs=200, verbose=2, validation_data=validation_dataset,
validation_steps=1, callbacks=[cp_callback])
def prepare_dataset(dataset, samples, batch_size=1024, seed=123, validation_proportion=0.1):
dataset = dataset.shuffle(1000, seed=seed)
validation_dataset = dataset.take(int(samples * validation_proportion)) # 10% used for validation
validation_dataset = validation_dataset.batch(1000)
validation_dataset = validation_dataset.repeat()
dataset = dataset.skip(int(samples * validation_proportion))
dataset = dataset.batch(batch_size)
dataset = dataset.shuffle(10000)
dataset = dataset.repeat()
return dataset, validation_dataset
def learn_words(part):
words = tokenize_pure_words(part)
for word in words:
if not ErrorClassifier.check_word_list(word):
learned_words.add(word)
def save_learned_words():
with open('learned_words.txt', 'w') as fout:
for word in learned_words:
fout.write(word + '\n')
def learn_word_frequencies(part):
words = tokenize_pure_words(part)
for word in words:
if word in word_freqs:
word_freqs[word] += 1
else:
word_freqs[word] = 1
def save_word_frequencies():
with open('learned_frequencies.csv', 'w', newline='') as fout:
csv_writer = csv.writer(fout)
for word, freq in word_freqs.items():
# don't bother with the little words
if freq > 1:
csv_writer.writerow([word, freq])
test1 = 0
test2 = 0
def prepare_replace_tags(part1, part2, tags1, tags2):
global test1, test2
tokens1, tokens2 = tokenize(part1, part2)
tags1 = tags1.split()
tags2 = tags2.split()
assert len(tokens1) == len(tags1)
assert len(tokens2) == len(tags2)
tag_map = {}
for i in range(len(tokens1)):
tag_map[tokens1[i]] = tags1[i]
for i in range(len(tokens2)):
tag_map[tokens2[i]] = tags2[i]
delta1, delta2, start, end = find_all_delta_from_tokens(tokens1, tokens2)
ids_d1 = list(map(lambda token: tags_to_id[tag_map[token]], delta1))
ids_d2 = list(map(lambda token: tags_to_id[tag_map[token]], delta2))
ids_st = list(map(lambda token: tags_to_id[tag_map[token]], start)) # start ids
ids_en = list(map(lambda token: tags_to_id[tag_map[token]], end)) # end ids
if ids_d1[0] == ids_d2[0]:
test1 += 1
# TODO resolve case in which both have same placeholder, use vector similarities, or none
# TODO count it
else:
test2 += 1
train_start.append(ids_st)
train_end.append(ids_en)
train_delta1.append(ids_d1)
train_delta2.append(ids_d2)
def prepare_arrange_tags(tags1, tags2):
tags1 = tags1.split()
tags2 = tags2.split()
ids1 = list(map(lambda tag: tags_to_id[tag], tags1))
ids2 = list(map(lambda tag: tags_to_id[tag], tags2))
# TODO count identical ARRANGE tags signature
train_arrange_x.append(ids1)
train_arrange_y.append(1.)
train_arrange_x.append(ids2)
train_arrange_y.append(0.)
def train(p1, p2, error_type, t1, t2):
# note: learn words is done in the error classifier (classification requires knowing the words)
learn_word_frequencies(p1) # only train frequencies on first part, second part is corrupted text
if ENABLE_LEARN_WORDS:
learn_words(p1)
if error_type == 'REPLACE' and ENABLE_TRAIN_REPLACE_NN and ENABLE_PROCESS_REPLACE_DATA:
prepare_replace_tags(p1, p2, t1, t2)
if ENABLE_TRAIN_ARRANGE_NN and ENABLE_PROCESS_ARRANGE_DATA and error_type == 'ARRANGE':
prepare_arrange_tags(t1, t2)
if __name__ == '__main__':
learned_words = set()
word_freqs = {}
# For the REPLACE neural network
train_start = []
train_end = []
train_delta1 = []
train_delta2 = []
# For the ARRANGE neural network
train_arrange_x = []
train_arrange_y = []
main()
| with open(PATH_REPLACE_DATA) as file_replace:
file_replace.readline()
for replace_line in file_replace:
start, end, delta1, delta2 = replace_line.rstrip().split('\t')
start = list(map(int, start.split()))
end = list(map(int, end.split()))
delta1 = [int(delta1)]
delta2 = [int(delta2)]
[start] = keras.preprocessing.sequence.pad_sequences([start], maxlen=max_start)
[end] = keras.preprocessing.sequence.pad_sequences([end], maxlen=max_end)
yield {'start': start, 'end': end, 'delta': delta1}, 1.
yield {'start': start, 'end': end, 'delta': delta2}, 0. | identifier_body |
Trainer.py | import csv
import time
import unicodedata
import tensorflow as tf
from tensorflow import keras
import ErrorClassifier
from TokenHelper import tokenize, tokenize_pure_words, find_all_delta_from_tokens
from NeuralNetworkHelper import PATH_REPLACE_CHECKPOINT, PATH_ARRANGE_CHECKPOINT, FILE_NAME, TESTING_RANGE
from NeuralNetworkHelper import tags_to_id
from NNModels import create_nn_model
# Only can learn when the learned_words.txt file is empty
ENABLE_LEARN_WORDS = False
# True when we want to train the respective neural network
ENABLE_TRAIN_REPLACE_NN = True
ENABLE_TRAIN_ARRANGE_NN = False
# True when we want to rebuild the saved data file for the neural network, using data from the "original" training file
ENABLE_PROCESS_REPLACE_DATA = True
ENABLE_PROCESS_ARRANGE_DATA = False
# True when we want to reload the saved weights of the neural network
ENABLE_LOAD_REPLACE_WEIGHTS = True
ENABLE_LOAD_ARRANGE_WEIGHTS = True
# True when we want to skip IO with the original file (only will use IO with the neural network training data)
ONLY_TRAIN_NN = False
# Path of the neural network training data
PATH_REPLACE_DATA = FILE_NAME + '.replace.txt'
PATH_ARRANGE_DATA = FILE_NAME + '.arrange.txt'
def main():
# .spacy.txt is a pre-processed file containing a tokenized
if not ONLY_TRAIN_NN:
with open(FILE_NAME + '.txt', encoding='utf-8') as file, open(FILE_NAME + '.spacy.txt') as tags_file:
progress = 0
start_time = time.time()
words_processed = 0
for line in file:
line_tag = tags_file.readline().strip()
progress += 1
if TESTING_RANGE[0] < progress <= TESTING_RANGE[1]:
|
line = line.strip()
line = unicodedata.normalize('NFKD', line)
p1, p2 = line.split('\t')
t1, t2 = line_tag.split('\t')
error_type = ErrorClassifier.classify_error_labeled(p1, p2)
train(p1, p2, error_type, t1, t2)
# Display progression in number of samples processed, use random to avoid too many (slow)
# interactions w/ console
words_processed += len(p1.split()) + len(p2.split())
if progress % 100 == 0:
print('\rProgress: [{}] Word Processed: [{}] Words per second: [{}] Lines per second: [{}]'
.format(progress, words_processed,
words_processed / (time.time() - start_time), (progress / (time.time() - start_time)))
, end='')
if ENABLE_LEARN_WORDS:
save_learned_words()
else:
assert len(learned_words) == 0
save_word_frequencies()
print()
print(test1, test2)
if ENABLE_TRAIN_REPLACE_NN:
train_replace_nn()
if ENABLE_TRAIN_ARRANGE_NN:
train_arrange_nn()
def train_replace_nn():
# create the dataset
max_start = 0
max_end = 0
samples = 0
if ENABLE_PROCESS_REPLACE_DATA:
# saves the data to a file
assert len(train_delta1) == len(train_delta2) == len(train_start) == len(train_end)
max_start = len(max(train_start, key=len))
max_end = len(max(train_end, key=len))
samples = len(train_delta1)
with open(PATH_REPLACE_DATA, 'w') as file_replace:
file_replace.write('{} {} {}\n'.format(max_start, max_end, samples))
for i in range(samples):
file_replace.write(' '.join(map(str, train_start[i])) + '\t')
file_replace.write(' '.join(map(str, train_end[i])) + '\t')
file_replace.write(str(train_delta1[i][0]) + '\t')
file_replace.write(str(train_delta2[i][0]) + '\n')
quit()
def replace_nn_generator():
with open(PATH_REPLACE_DATA) as file_replace:
file_replace.readline()
for replace_line in file_replace:
start, end, delta1, delta2 = replace_line.rstrip().split('\t')
start = list(map(int, start.split()))
end = list(map(int, end.split()))
delta1 = [int(delta1)]
delta2 = [int(delta2)]
[start] = keras.preprocessing.sequence.pad_sequences([start], maxlen=max_start)
[end] = keras.preprocessing.sequence.pad_sequences([end], maxlen=max_end)
yield {'start': start, 'end': end, 'delta': delta1}, 1.
yield {'start': start, 'end': end, 'delta': delta2}, 0.
with open(PATH_REPLACE_DATA) as file_replace:
max_start, max_end, samples = list(map(int, file_replace.readline().strip().split()))
dataset = tf.data.Dataset.from_generator(replace_nn_generator,
({'start': tf.int32, 'end': tf.int32, 'delta': tf.int32}, tf.float32),
({'start': tf.TensorShape([None, ]), 'end': tf.TensorShape([None, ]),
'delta': tf.TensorShape([1, ])},
tf.TensorShape([])))
dataset, validation_dataset = prepare_dataset(dataset, samples, batch_size=1024 * 4)
# Create the model
model = create_nn_model('replace')
model.compile(optimizer=tf.train.AdamOptimizer(), loss='binary_crossentropy', metrics=['accuracy'])
print(model.summary())
print('-------------')
cp_callback = tf.keras.callbacks.ModelCheckpoint(PATH_REPLACE_CHECKPOINT, save_weights_only=True,
save_best_only=False, verbose=1)
if ENABLE_LOAD_REPLACE_WEIGHTS:
model.load_weights(PATH_REPLACE_CHECKPOINT)
model.fit(dataset, steps_per_epoch=50, epochs=200, verbose=2, validation_data=validation_dataset,
validation_steps=1, callbacks=[cp_callback])
def train_arrange_nn():
# create the dataset
samples = 0
max_length = 0
if ENABLE_PROCESS_ARRANGE_DATA:
# saves the data to a file
assert len(train_arrange_x) == len(train_arrange_y)
max_length = len(max(train_arrange_x, key=len))
samples = len(train_arrange_x)
with open(PATH_ARRANGE_DATA, 'w') as file_arrange:
file_arrange.write('{} {}\n'.format(max_length, samples))
for i in range(samples):
file_arrange.write(' '.join(map(str, train_arrange_x[i])) + '\t')
file_arrange.write(str(train_arrange_y[i]) + '\n')
def arrange_nn_generator():
with open(PATH_ARRANGE_DATA) as file_arrange:
file_arrange.readline()
for arrange_line in file_arrange:
x, y = arrange_line.rstrip().split('\t')
x = list(map(int, x.split()))
y = float(y)
[x] = keras.preprocessing.sequence.pad_sequences([x], maxlen=max_length)
yield x, y
with open(PATH_ARRANGE_DATA) as file_arrange:
max_length, samples = list(map(int, file_arrange.readline().strip().split()))
dataset = tf.data.Dataset.from_generator(arrange_nn_generator,
(tf.int32, tf.float32),
(tf.TensorShape([None, ]), tf.TensorShape([])))
dataset, validation_dataset = prepare_dataset(dataset, samples)
model = create_nn_model('arrange')
model.compile(optimizer=tf.train.AdamOptimizer(), loss='binary_crossentropy', metrics=['accuracy'])
print(model.summary())
print('-------------')
cp_callback = tf.keras.callbacks.ModelCheckpoint(PATH_ARRANGE_CHECKPOINT, save_weights_only=True,
save_best_only=False, verbose=1)
if ENABLE_LOAD_ARRANGE_WEIGHTS:
model.load_weights(PATH_ARRANGE_CHECKPOINT)
model.fit(dataset, steps_per_epoch=50, epochs=200, verbose=2, validation_data=validation_dataset,
validation_steps=1, callbacks=[cp_callback])
def prepare_dataset(dataset, samples, batch_size=1024, seed=123, validation_proportion=0.1):
dataset = dataset.shuffle(1000, seed=seed)
validation_dataset = dataset.take(int(samples * validation_proportion)) # 10% used for validation
validation_dataset = validation_dataset.batch(1000)
validation_dataset = validation_dataset.repeat()
dataset = dataset.skip(int(samples * validation_proportion))
dataset = dataset.batch(batch_size)
dataset = dataset.shuffle(10000)
dataset = dataset.repeat()
return dataset, validation_dataset
def learn_words(part):
words = tokenize_pure_words(part)
for word in words:
if not ErrorClassifier.check_word_list(word):
learned_words.add(word)
def save_learned_words():
with open('learned_words.txt', 'w') as fout:
for word in learned_words:
fout.write(word + '\n')
def learn_word_frequencies(part):
words = tokenize_pure_words(part)
for word in words:
if word in word_freqs:
word_freqs[word] += 1
else:
word_freqs[word] = 1
def save_word_frequencies():
with open('learned_frequencies.csv', 'w', newline='') as fout:
csv_writer = csv.writer(fout)
for word, freq in word_freqs.items():
# don't bother with the little words
if freq > 1:
csv_writer.writerow([word, freq])
test1 = 0
test2 = 0
def prepare_replace_tags(part1, part2, tags1, tags2):
global test1, test2
tokens1, tokens2 = tokenize(part1, part2)
tags1 = tags1.split()
tags2 = tags2.split()
assert len(tokens1) == len(tags1)
assert len(tokens2) == len(tags2)
tag_map = {}
for i in range(len(tokens1)):
tag_map[tokens1[i]] = tags1[i]
for i in range(len(tokens2)):
tag_map[tokens2[i]] = tags2[i]
delta1, delta2, start, end = find_all_delta_from_tokens(tokens1, tokens2)
ids_d1 = list(map(lambda token: tags_to_id[tag_map[token]], delta1))
ids_d2 = list(map(lambda token: tags_to_id[tag_map[token]], delta2))
ids_st = list(map(lambda token: tags_to_id[tag_map[token]], start)) # start ids
ids_en = list(map(lambda token: tags_to_id[tag_map[token]], end)) # end ids
if ids_d1[0] == ids_d2[0]:
test1 += 1
# TODO resolve case in which both have same placeholder, use vector similarities, or none
# TODO count it
else:
test2 += 1
train_start.append(ids_st)
train_end.append(ids_en)
train_delta1.append(ids_d1)
train_delta2.append(ids_d2)
def prepare_arrange_tags(tags1, tags2):
tags1 = tags1.split()
tags2 = tags2.split()
ids1 = list(map(lambda tag: tags_to_id[tag], tags1))
ids2 = list(map(lambda tag: tags_to_id[tag], tags2))
# TODO count identical ARRANGE tags signature
train_arrange_x.append(ids1)
train_arrange_y.append(1.)
train_arrange_x.append(ids2)
train_arrange_y.append(0.)
def train(p1, p2, error_type, t1, t2):
# note: learn words is done in the error classifier (classification requires knowing the words)
learn_word_frequencies(p1) # only train frequencies on first part, second part is corrupted text
if ENABLE_LEARN_WORDS:
learn_words(p1)
if error_type == 'REPLACE' and ENABLE_TRAIN_REPLACE_NN and ENABLE_PROCESS_REPLACE_DATA:
prepare_replace_tags(p1, p2, t1, t2)
if ENABLE_TRAIN_ARRANGE_NN and ENABLE_PROCESS_ARRANGE_DATA and error_type == 'ARRANGE':
prepare_arrange_tags(t1, t2)
if __name__ == '__main__':
learned_words = set()
word_freqs = {}
# For the REPLACE neural network
train_start = []
train_end = []
train_delta1 = []
train_delta2 = []
# For the ARRANGE neural network
train_arrange_x = []
train_arrange_y = []
main()
| continue | conditional_block |
Trainer.py | import csv
import time
import unicodedata
import tensorflow as tf
from tensorflow import keras
import ErrorClassifier
from TokenHelper import tokenize, tokenize_pure_words, find_all_delta_from_tokens
from NeuralNetworkHelper import PATH_REPLACE_CHECKPOINT, PATH_ARRANGE_CHECKPOINT, FILE_NAME, TESTING_RANGE
from NeuralNetworkHelper import tags_to_id
from NNModels import create_nn_model
# Only can learn when the learned_words.txt file is empty
ENABLE_LEARN_WORDS = False
# True when we want to train the respective neural network
ENABLE_TRAIN_REPLACE_NN = True
ENABLE_TRAIN_ARRANGE_NN = False
# True when we want to rebuild the saved data file for the neural network, using data from the "original" training file
ENABLE_PROCESS_REPLACE_DATA = True
ENABLE_PROCESS_ARRANGE_DATA = False
# True when we want to reload the saved weights of the neural network
ENABLE_LOAD_REPLACE_WEIGHTS = True
ENABLE_LOAD_ARRANGE_WEIGHTS = True
# True when we want to skip IO with the original file (only will use IO with the neural network training data)
ONLY_TRAIN_NN = False
# Path of the neural network training data
PATH_REPLACE_DATA = FILE_NAME + '.replace.txt'
PATH_ARRANGE_DATA = FILE_NAME + '.arrange.txt'
def main():
# .spacy.txt is a pre-processed file containing a tokenized
if not ONLY_TRAIN_NN:
with open(FILE_NAME + '.txt', encoding='utf-8') as file, open(FILE_NAME + '.spacy.txt') as tags_file:
progress = 0
start_time = time.time()
words_processed = 0
for line in file:
line_tag = tags_file.readline().strip()
progress += 1
if TESTING_RANGE[0] < progress <= TESTING_RANGE[1]:
continue
line = line.strip()
line = unicodedata.normalize('NFKD', line)
p1, p2 = line.split('\t')
t1, t2 = line_tag.split('\t')
error_type = ErrorClassifier.classify_error_labeled(p1, p2)
train(p1, p2, error_type, t1, t2)
# Display progression in number of samples processed, use random to avoid too many (slow)
# interactions w/ console
words_processed += len(p1.split()) + len(p2.split())
if progress % 100 == 0:
print('\rProgress: [{}] Word Processed: [{}] Words per second: [{}] Lines per second: [{}]'
.format(progress, words_processed,
words_processed / (time.time() - start_time), (progress / (time.time() - start_time)))
, end='')
if ENABLE_LEARN_WORDS:
save_learned_words()
else:
assert len(learned_words) == 0
save_word_frequencies()
print()
print(test1, test2)
if ENABLE_TRAIN_REPLACE_NN:
train_replace_nn()
if ENABLE_TRAIN_ARRANGE_NN:
train_arrange_nn()
def train_replace_nn():
# create the dataset
max_start = 0
max_end = 0
samples = 0
if ENABLE_PROCESS_REPLACE_DATA:
# saves the data to a file
assert len(train_delta1) == len(train_delta2) == len(train_start) == len(train_end)
max_start = len(max(train_start, key=len))
max_end = len(max(train_end, key=len))
samples = len(train_delta1)
with open(PATH_REPLACE_DATA, 'w') as file_replace:
file_replace.write('{} {} {}\n'.format(max_start, max_end, samples))
for i in range(samples):
file_replace.write(' '.join(map(str, train_start[i])) + '\t')
file_replace.write(' '.join(map(str, train_end[i])) + '\t')
file_replace.write(str(train_delta1[i][0]) + '\t')
file_replace.write(str(train_delta2[i][0]) + '\n')
quit()
def replace_nn_generator():
with open(PATH_REPLACE_DATA) as file_replace:
file_replace.readline()
for replace_line in file_replace:
start, end, delta1, delta2 = replace_line.rstrip().split('\t')
start = list(map(int, start.split()))
end = list(map(int, end.split()))
delta1 = [int(delta1)]
delta2 = [int(delta2)]
[start] = keras.preprocessing.sequence.pad_sequences([start], maxlen=max_start)
[end] = keras.preprocessing.sequence.pad_sequences([end], maxlen=max_end)
yield {'start': start, 'end': end, 'delta': delta1}, 1.
yield {'start': start, 'end': end, 'delta': delta2}, 0.
with open(PATH_REPLACE_DATA) as file_replace:
max_start, max_end, samples = list(map(int, file_replace.readline().strip().split()))
dataset = tf.data.Dataset.from_generator(replace_nn_generator,
({'start': tf.int32, 'end': tf.int32, 'delta': tf.int32}, tf.float32),
({'start': tf.TensorShape([None, ]), 'end': tf.TensorShape([None, ]),
'delta': tf.TensorShape([1, ])},
tf.TensorShape([])))
dataset, validation_dataset = prepare_dataset(dataset, samples, batch_size=1024 * 4)
# Create the model
model = create_nn_model('replace')
model.compile(optimizer=tf.train.AdamOptimizer(), loss='binary_crossentropy', metrics=['accuracy'])
print(model.summary())
print('-------------')
cp_callback = tf.keras.callbacks.ModelCheckpoint(PATH_REPLACE_CHECKPOINT, save_weights_only=True,
save_best_only=False, verbose=1)
if ENABLE_LOAD_REPLACE_WEIGHTS:
model.load_weights(PATH_REPLACE_CHECKPOINT)
model.fit(dataset, steps_per_epoch=50, epochs=200, verbose=2, validation_data=validation_dataset,
validation_steps=1, callbacks=[cp_callback])
def train_arrange_nn():
# create the dataset
samples = 0
max_length = 0
if ENABLE_PROCESS_ARRANGE_DATA:
# saves the data to a file
assert len(train_arrange_x) == len(train_arrange_y)
max_length = len(max(train_arrange_x, key=len))
samples = len(train_arrange_x)
with open(PATH_ARRANGE_DATA, 'w') as file_arrange:
file_arrange.write('{} {}\n'.format(max_length, samples))
for i in range(samples):
file_arrange.write(' '.join(map(str, train_arrange_x[i])) + '\t')
file_arrange.write(str(train_arrange_y[i]) + '\n')
def arrange_nn_generator():
with open(PATH_ARRANGE_DATA) as file_arrange:
file_arrange.readline()
for arrange_line in file_arrange:
x, y = arrange_line.rstrip().split('\t')
x = list(map(int, x.split()))
y = float(y)
[x] = keras.preprocessing.sequence.pad_sequences([x], maxlen=max_length)
yield x, y
with open(PATH_ARRANGE_DATA) as file_arrange:
max_length, samples = list(map(int, file_arrange.readline().strip().split()))
dataset = tf.data.Dataset.from_generator(arrange_nn_generator,
(tf.int32, tf.float32),
(tf.TensorShape([None, ]), tf.TensorShape([])))
dataset, validation_dataset = prepare_dataset(dataset, samples)
model = create_nn_model('arrange')
model.compile(optimizer=tf.train.AdamOptimizer(), loss='binary_crossentropy', metrics=['accuracy'])
print(model.summary())
print('-------------')
cp_callback = tf.keras.callbacks.ModelCheckpoint(PATH_ARRANGE_CHECKPOINT, save_weights_only=True,
save_best_only=False, verbose=1)
if ENABLE_LOAD_ARRANGE_WEIGHTS:
model.load_weights(PATH_ARRANGE_CHECKPOINT)
model.fit(dataset, steps_per_epoch=50, epochs=200, verbose=2, validation_data=validation_dataset,
validation_steps=1, callbacks=[cp_callback])
def prepare_dataset(dataset, samples, batch_size=1024, seed=123, validation_proportion=0.1):
dataset = dataset.shuffle(1000, seed=seed)
validation_dataset = dataset.take(int(samples * validation_proportion)) # 10% used for validation
validation_dataset = validation_dataset.batch(1000)
validation_dataset = validation_dataset.repeat()
dataset = dataset.skip(int(samples * validation_proportion))
dataset = dataset.batch(batch_size)
dataset = dataset.shuffle(10000)
dataset = dataset.repeat()
return dataset, validation_dataset
def learn_words(part):
words = tokenize_pure_words(part)
for word in words:
if not ErrorClassifier.check_word_list(word):
learned_words.add(word)
def save_learned_words():
with open('learned_words.txt', 'w') as fout:
for word in learned_words:
fout.write(word + '\n')
def learn_word_frequencies(part):
words = tokenize_pure_words(part)
for word in words:
if word in word_freqs:
word_freqs[word] += 1
else: |
def save_word_frequencies():
with open('learned_frequencies.csv', 'w', newline='') as fout:
csv_writer = csv.writer(fout)
for word, freq in word_freqs.items():
# don't bother with the little words
if freq > 1:
csv_writer.writerow([word, freq])
test1 = 0
test2 = 0
def prepare_replace_tags(part1, part2, tags1, tags2):
global test1, test2
tokens1, tokens2 = tokenize(part1, part2)
tags1 = tags1.split()
tags2 = tags2.split()
assert len(tokens1) == len(tags1)
assert len(tokens2) == len(tags2)
tag_map = {}
for i in range(len(tokens1)):
tag_map[tokens1[i]] = tags1[i]
for i in range(len(tokens2)):
tag_map[tokens2[i]] = tags2[i]
delta1, delta2, start, end = find_all_delta_from_tokens(tokens1, tokens2)
ids_d1 = list(map(lambda token: tags_to_id[tag_map[token]], delta1))
ids_d2 = list(map(lambda token: tags_to_id[tag_map[token]], delta2))
ids_st = list(map(lambda token: tags_to_id[tag_map[token]], start)) # start ids
ids_en = list(map(lambda token: tags_to_id[tag_map[token]], end)) # end ids
if ids_d1[0] == ids_d2[0]:
test1 += 1
# TODO resolve case in which both have same placeholder, use vector similarities, or none
# TODO count it
else:
test2 += 1
train_start.append(ids_st)
train_end.append(ids_en)
train_delta1.append(ids_d1)
train_delta2.append(ids_d2)
def prepare_arrange_tags(tags1, tags2):
tags1 = tags1.split()
tags2 = tags2.split()
ids1 = list(map(lambda tag: tags_to_id[tag], tags1))
ids2 = list(map(lambda tag: tags_to_id[tag], tags2))
# TODO count identical ARRANGE tags signature
train_arrange_x.append(ids1)
train_arrange_y.append(1.)
train_arrange_x.append(ids2)
train_arrange_y.append(0.)
def train(p1, p2, error_type, t1, t2):
# note: learn words is done in the error classifier (classification requires knowing the words)
learn_word_frequencies(p1) # only train frequencies on first part, second part is corrupted text
if ENABLE_LEARN_WORDS:
learn_words(p1)
if error_type == 'REPLACE' and ENABLE_TRAIN_REPLACE_NN and ENABLE_PROCESS_REPLACE_DATA:
prepare_replace_tags(p1, p2, t1, t2)
if ENABLE_TRAIN_ARRANGE_NN and ENABLE_PROCESS_ARRANGE_DATA and error_type == 'ARRANGE':
prepare_arrange_tags(t1, t2)
if __name__ == '__main__':
learned_words = set()
word_freqs = {}
# For the REPLACE neural network
train_start = []
train_end = []
train_delta1 = []
train_delta2 = []
# For the ARRANGE neural network
train_arrange_x = []
train_arrange_y = []
main() | word_freqs[word] = 1 | random_line_split |
__init__.py | from bs4 import BeautifulSoup
from unicodedata import normalize
import re
import requests
from requests.exceptions import RequestException
from time import sleep
from pathlib import Path
import pandas as pd
def requests_get(*args, **kwargs):
"""
Retries if a RequestException is raised (could be a connection error or
a timeout).
"""
logger = kwargs.pop('logger', None)
s = requests.Session()
s.headers[
'User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.131 Safari/537.36'
try:
return s.get(*args, **kwargs)
except RequestException as exc:
if logger:
logger.warning('Request failed (%s). Retrying ...', exc)
return s.get(*args, **kwargs)
def create_param_url(search_params: dict):
url_string = ""
for key, value in search_params.items():
url_string = url_string + "&" + key + "=" + value
return url_string
def print_results(results: dict):
print(f"** Annonce {results['idannonce']} **")
for key, value in results.items():
print(f"'{key}': '{value}'")
print("\n\n")
class SelogerBase(object):
"""
Base class for all Seloger wrapper
Parameters
----------
class_filters : dict
Main search options
ex. {'transaction_type':['achat'], 'bien': ['appartement', 'maison'], 'naturebien': ['ancien', 'neuf']}
type_of_search: str
Can be either 'base', for ads of properties on the market, or 'biens-vendus' for the search on the property sold section.
location : dict
Either one of the following:
postcode (ex. {'code_postal': 75015} or {'code_postal': 75})
INSEE code (ex. {'code_INSEE': 75115})
Location name (ex. {'location_name': 'PARIS'})
*argv : str
Search options from binary_filter_options
**kwargs: dict ex.{'delay': 2}
Other search options or tweaking parameters
delay: number of seconds between requests, used to avoid overcharging servers
Returns
-------
"""
def __init__(self, **kwargs):
# Get parameters
self.delay = kwargs.get('delay') or 3
def get_current_parameters(self, search_url=True, *args, **kwargs):
"""
Retrieve search parameters from the html page of a search on Seloger.com
:param search_url: The page url is passed as an input (True) or a parsed page (False).
:param args: A BeautifulSoup parsed page.
:param kwargs:
write_to: a string with the path and name of a file to save the html of the url or the parsed page.
:return: a dictionary with the search parameters as they appear in the json of html page.
"""
if search_url:
try:
print(f"Get pages from base url {self.url}\n", "...")
page0 = requests_get(self.url)
print("Request successful.")
except:
print('ERROR: too many redirects - They might have detected the crawler, try changing ip.')
return
print("Parsing page\n", "...")
page_parsed = BeautifulSoup(page0.content, 'html.parser')
# Check validity of the page
try:
if page_parsed.find('meta').attrs['name'] == 'robots':
print('ERROR: invalid result page - They might have detected the crawler, try changing ip.')
return
except KeyError:
print(f"Valid response from {self.url}")
else:
page_parsed = args[0]
write_to = kwargs.get('write_to')
# Save html to file
if write_to:
my_file = Path(write_to)
if my_file.is_file():
print(f"{write_to} exists already. Do you want to overwrite?\n")
response = input("Y/N > ")
if response == 'Y' or response == 'y':
write_to_sure = write_to
else:
print("Please, give type another file path:\n")
response = input("new path to file > ")
write_to_sure = response
with open(write_to_sure, 'w+') as file:
file.write(page_parsed.text)
# Extact the json from the JavaScript of the page
page_data = page_parsed.find('div', {'class': 'c-wrap-main'})
page_data_str = normalize('NFKD', page_data.prettify())
page_data_str_minified = page_data_str.replace('\n', '').replace('\r', '').replace(' ', '').replace("true",
"True").replace(
"false", "False")
json_str = re.search('({.*});ava.*', page_data_str_minified).group(1)
params = eval(json_str)
return params
def get_pages(self, **kwargs):
"""
:param kwargs:
max_num_pages: maximum number of pages to be processed. If left empty, it is set to its maximum number 100.
:return: a generator of HTML parsed result pages.
"""
max_num_pages = kwargs.get('max_num_pages') or 100
results_per_page = 20
try:
print(f"Get pages from base url {self.url}\n", "...")
page0 = requests_get(self.url)
print("Request successful.")
except:
print('ERROR: too many redirects - They might have detected the crawler, try changing ip.')
return
print("Parsing page\n", "...")
page_parsed = BeautifulSoup(page0.content, 'html.parser')
# Check validity of the page
try:
if page_parsed.find('meta').attrs['name'] == 'robots':
print('ERROR: invalid result page - They might have detected the crawler, try changing ip.')
return
except KeyError:
print(f"Valid response from {self.url}")
num_results = re.search('\s?"nbresults"\s+:\s? "(\d+[^"]*)"', page_parsed.text).group(1)
num_results = int(num_results.replace("\xa0", ""))
num_pages = num_results // results_per_page + 1
if num_pages > max_num_pages:
num_pages = max_num_pages
print(f"The search returned {num_results} results.")
print(f"{results_per_page*num_pages} results in {num_pages} pages will be processed.")
current_page_num = int(re.search('\s?"nbpage"\s+:\s? "(\d+[^"]*)"', page_parsed.text).group(1))
while current_page_num <= num_pages:
if current_page_num == 1:
current_page_parsed = page_parsed
print(f"Page {current_page_num} parsed")
else:
current_page_url = self.url + "&LISTING-LISTpg=" + str(current_page_num)
print(f"Get url {current_page_url}")
sleep(self.delay)
current_page = requests_get(current_page_url)
current_page_parsed = BeautifulSoup(current_page.content, 'html.parser')
print(f"Page {current_page_num} parsed")
current_page_num += 1
yield current_page_parsed
def get_results(self, max_num_pages=None, **kwargs):
"""
:param
kwargs:
pages: a generator created with get_pages(). This parameter overrides the other two.
max_number_pages: int, if empty it is set to its maximum number 100.
print_results: int, print a number per page of results for control.
:return: A generator of dictionaries each corresponding to a property ad
"""
pages = kwargs.get('pages')
for page in pages or self.get_pages(max_num_pages=max_num_pages):
params = self.get_current_parameters(False, page)
properties = params['products']
n = kwargs.get('print_results')
printed_results = 0
for ad in properties:
if n:
while printed_results <= n:
print_results(ad)
printed_results += 1
yield ad
def results_to_dataframe(self, max_num_pages=None, **kwargs):
results = kwargs.get('results')
df = pd.DataFrame()
for ad in results or self.get_results(max_num_pages=max_num_pages):
d = pd.DataFrame.from_dict(ad)
df = df.append(d)
df.drop(['affichagetype', 'idtypepublicationsourcecouplage', 'produitsvisibilite'], axis=1, inplace=True)
df.reset_index(inplace=True)
df.drop('index', axis=1, inplace=True)
for column in df.columns:
if re.search("^nb", column) or column == 'prix' or column == 'surface':
df[column] = pd.to_numeric(df[column].str.replace(',', '.'))
return df
class SeLogerAchat(SelogerBase):
def __init__(self, search_params):
self.url = "http://www.seloger.com/list.htm?" + "idtt=2" + create_param_url(search_params=search_params)
super(SeLogerAchat, self).__init__()
class SeLogerLocation(SelogerBase):
def __init__(self, search_params):
self.url = "http://www.seloger.com/list.htm?" + "idtt=1" + create_param_url(search_params=search_params)
super(SeLogerLocation, self).__init__()
class SeLogerLocationTemporaire(SelogerBase):
def __init__(self, search_params):
self.url = "http://www.seloger.com/list.htm?" + "idtt=3" + create_param_url(search_params=search_params)
super(SeLogerLocationTemporaire, self).__init__()
class SeLogerLocationViager(SelogerBase):
def __init__(self, search_params):
self.url = "http://www.seloger.com/list.htm?" + "idtt=5" + create_param_url(search_params=search_params)
super(SeLogerLocationViager, self).__init__()
class SeLogerInvestissement(SelogerBase):
def __init__(self, search_params):
self.url = "http://www.seloger.com/list.htm?" + "idtt=6" + create_param_url(search_params=search_params)
super(SeLogerInvestissement, self).__init__()
class SeLogerLocationVacances(SelogerBase):
def __init__(self, search_params):
self.url = "http://www.seloger.com/list.htm?" + "idtt=4" + create_param_url(search_params=search_params)
super(SeLogerLocationVacances, self).__init__()
class SeLogerBiensVendus(SelogerBase):
def __init__(self, search_params):
self.url = "http://biens-vendus.seloger.com/list.htm?" + "idtt=4" + create_param_url(
search_params=search_params)
super(SeLogerBiensVendus, self).__init__()
# Show help for search filter options
def show_search_filters(**kwargs):
def print_type_options(type_options):
print("\n")
for option_title, option_value in type_options.items():
|
def print_binary_and_numeric_options(search_options):
print("\n")
for option_title, option_value in search_options['Filters'].items():
print("\t" + option_title + ":")
print("\t-----")
for option, option_api_value in option_value.items():
print("\t\t\t* " + option + ":",
"\t{'" + option_api_value['url_key'] + "': " + "'" + option_api_value['value'] + "'}")
search_example = amenities_and_ad_filters['example']
print("\n")
print("\t\tIf you want to", search_example['description'], search_example['url_key'])
print("\n")
def print_choice(selection_labels):
print("What filters do you wnt to know about (quit with 'q')?")
print(
"1. Sort options \n2. Property types \n3. Price, size and number of rooms \n4. Kitchen and heating types \n5. Amenities and ad filters")
sel = input(" > ")
if sel == 'q':
return
print_search_filters = selection_labels[sel].get('fun')
search_filters = selection_labels[sel].get('arg')
print_search_filters(search_filters)
print_choice(selection_labels)
sort_by = {
'Sorting options': {
'url_key': 'tri',
'value': {
'By selection': 'initial',
'By price': 'a_px',
'By surface': 'a_surface',
'By location': 'a_ville',
'By date': 'd_dt_crea'
},
'example': {'description': 'sort the ads by creation date:',
'url_key': "{'tri': 'd_dt_crea'}"}
}
}
property_type = {
'Property type': {
'url_key': 'idtypebien',
'value': {
'Apartment': '1',
'House': '2',
'Car park': '3',
'Shop': '6',
'Commercial': '7',
'Office': '8',
'Lofts - Ateliers - Land': '9',
'Various': '10',
'Property': '11',
'Building': '12',
'Castle': '13',
'Hotels Particuliers': '14'
},
'example': {'description': 'look for houses and apartments only:',
'url_key': "{'idtypebien': '1,2'}"}
},
'Building age': {
'url_key': 'naturebien',
'value': {
'old': '1',
'New': '2',
'In construction': '4'
},
'example': {'description': 'look for new construction only:',
'url_key': "{'naturebien': '2'}"}
}
}
kitchen_and_heating_type = {
'Kitchen type': {
'url_key': 'idtypecuisine',
'value': {
'Separated kitchen': '3',
'Open kitchen': '2',
'Kitchenette': '5',
'Fitted kitchen': '9'
},
'example': {'description': 'an open kitchen:',
'url_key': "{'idtypecuisine': '2'}"}
},
'Heating type': {
'url_key': 'idtypechauffage',
'value': {
'individuel': '8192',
'central': '4096',
'electrique': '2048',
'gaz': '512',
'fuel': '1024',
'radiateur': '128',
'sol': '256'
},
'example': {'description': 'centralised underfloor heating:',
'url_key': "{'idtypechauffage': '4096, 256'}"}
}
}
amenities_and_ad_filters = {
'Filters': {
'Ad options': {
'Ad with video': {'url_key': 'video', 'value': '1'},
'Ad with virtual visit': {'url_key': 'vv', 'value': '1'},
'Ad with photos': {'url_key': 'photo', 'value': '15'},
'Exclusive': {'url_key': 'si_mandatexclusif', 'value': '1'},
'Price has changed': {'url_key': 'siBaissePrix', 'value': '1'}
},
'Amentities': {
'Last floor': {'url_key': 'si_dernieretage', 'value': '1'},
'Separated toilets': {'url_key': 'si_toilettes_separees', 'value': '1'},
'Bath tube': {'url_key': 'nb_salles_de_bainsmin', 'value': '1'},
'Bathroom': {'url_key': 'nb_salles_deaumin', 'value': '1'},
'Separate entrance': {'url_key': 'si_entree', 'value': '1'},
'Living room': {'url_key': 'si_sejour', 'value': '1'},
'Dining room': {'url_key': 'si_salle_a_manger', 'value': '1'},
'Terrace': {'url_key': 'si_terrasse', 'value': '1'},
'Balcony': {'url_key': 'nb_balconsmin', 'value': 'Insert number as a string'},
'Car park': {'url_key': 'si_parkings', 'value': '1'},
'Car box': {'url_key': 'si_boxes', 'value': '1'},
'Cellar': {'url_key': 'si_cave', 'value': '1'},
'Fire place': {'url_key': 'si_cheminee', 'value': '1'},
'Wooden floor': {'url_key': 'si_parquet', 'value': '1'},
'Lift': {'url_key': 'si_ascenseur', 'value': '1'},
'Swimming pool': {'url_key': 'si_piscine', 'value': '1'},
'Built-in wardrobe': {'url_key': 'si_placards', 'value': '1'},
'Interphone': {'url_key': 'si_interphone', 'value': '1'},
'Security code': {'url_key': 'si_digicode', 'value': '1'},
'Concierge': {'url_key': 'si_gardien', 'value': '1'},
'Disable access': {'url_key': 'si_handicape', 'value': '1'},
'Alarm': {'url_key': 'si_alarme', 'value': '1'},
'Without vis-a-vis': {'url_key': 'si_visavis', 'value': '1'},
'Nice view': {'url_key': 'si_vue', 'value': '1'},
'South facing': {'url_key': 'si_sud', 'value': '1'},
'Air conditioning': {'url_key': 'si_climatisation', 'value': '1'}
}
},
'example': {'description': 'add a lift, a parking and the air conditioning.',
'url_key': "{'si_ascenseur': '1', 'si_climatisation': '1', 'si_parkings': '1'}"}
}
property_size = {
'Filters': {
'Property size': {
'Minimum price': {'url_key': 'pxmin', 'value': 'Insert number as a string'},
'Maximum price': {'url_key': 'pxmax', 'value': 'Insert number as a string'},
'Minimum surface': {'url_key': 'surfacemin', 'value': 'Insert number as a string'},
'Maximum surface': {'url_key': 'surfacemax', 'value': 'Insert number as a string'},
'Number of rooms': {'url_key': 'nb_pieces', 'value': 'Insert number as a string'},
'Lower floor': {'url_key': 'etagemin', 'value': 'Insert number as a string'},
'Higher floor': {'url_key': 'etagemax', 'value': 'Insert number as a string'},
'Number fo bedrooms': {'url_key': 'nb_chambres', 'value': 'Insert number as a string'},
'Minimum land surface': {'url_key': 'surf_terrainmin', 'value': 'Insert number as a string'},
'Maximum land surface': {'url_key': 'surf_terrainmax', 'value': 'Insert number as a string'}
}
},
'example': {'description': 'look for a minimum surface of 70 sqm, 2 bedrooms for maximum 500 000 euros:',
'url_key': "{'surfacemin': '70', 'nb_chambres': '2', 'pxmax': '500000'}"}
}
selection_labels = {
'1': {'fun': print_type_options, 'arg': sort_by},
'2': {'fun': print_type_options, 'arg': property_type},
'3': {'fun': print_binary_and_numeric_options, 'arg': property_size},
'4': {'fun': print_type_options, 'arg': kitchen_and_heating_type},
'5': {'fun': print_binary_and_numeric_options, 'arg': amenities_and_ad_filters}
}
accepted_selection = ['sort_by', 'property_type',
'property_size', 'kitchen_and_heating_type',
'amenities_and_ad_filters', 'print_all']
selection = kwargs.get('selection')
if selection not in accepted_selection:
print_choice(selection_labels)
elif selection == 'print_all':
for k, v in selection_labels.items():
print_options = v.get('fun')
option = v.get('arg')
print_options(option)
else:
try:
print_type_options(eval(selection))
except:
print_binary_and_numeric_options(eval(selection))
show_search_filters()
| print("\t" + option_title + ":")
print("\t-----")
print("\t\t URL key:", "'" + option_value['url_key'] + "'")
print("\t\t URL key options:")
for option, option_api_value in option_value['value'].items():
print("\t\t\t* " + option + ":", "'" + option_api_value + "'")
search_example = option_value['example']
print("\n")
print("\t\tIf you want to", search_example['description'], search_example['url_key'])
print("\n") | conditional_block |
__init__.py | from bs4 import BeautifulSoup
from unicodedata import normalize
import re
import requests
from requests.exceptions import RequestException
from time import sleep
from pathlib import Path
import pandas as pd
def requests_get(*args, **kwargs):
"""
Retries if a RequestException is raised (could be a connection error or
a timeout).
"""
logger = kwargs.pop('logger', None)
s = requests.Session()
s.headers[
'User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.131 Safari/537.36'
try:
return s.get(*args, **kwargs)
except RequestException as exc:
if logger:
logger.warning('Request failed (%s). Retrying ...', exc)
return s.get(*args, **kwargs)
def create_param_url(search_params: dict):
url_string = ""
for key, value in search_params.items():
url_string = url_string + "&" + key + "=" + value
return url_string
def print_results(results: dict):
print(f"** Annonce {results['idannonce']} **")
for key, value in results.items():
print(f"'{key}': '{value}'")
print("\n\n")
class SelogerBase(object):
"""
Base class for all Seloger wrapper
Parameters
----------
class_filters : dict
Main search options
ex. {'transaction_type':['achat'], 'bien': ['appartement', 'maison'], 'naturebien': ['ancien', 'neuf']}
type_of_search: str
Can be either 'base', for ads of properties on the market, or 'biens-vendus' for the search on the property sold section.
location : dict
Either one of the following:
postcode (ex. {'code_postal': 75015} or {'code_postal': 75})
INSEE code (ex. {'code_INSEE': 75115})
Location name (ex. {'location_name': 'PARIS'})
*argv : str
Search options from binary_filter_options
**kwargs: dict ex.{'delay': 2}
Other search options or tweaking parameters
delay: number of seconds between requests, used to avoid overcharging servers
Returns
-------
"""
def __init__(self, **kwargs):
# Get parameters
self.delay = kwargs.get('delay') or 3
def get_current_parameters(self, search_url=True, *args, **kwargs):
"""
Retrieve search parameters from the html page of a search on Seloger.com
:param search_url: The page url is passed as an input (True) or a parsed page (False).
:param args: A BeautifulSoup parsed page.
:param kwargs:
write_to: a string with the path and name of a file to save the html of the url or the parsed page.
:return: a dictionary with the search parameters as they appear in the json of html page.
"""
if search_url:
try:
print(f"Get pages from base url {self.url}\n", "...")
page0 = requests_get(self.url)
print("Request successful.")
except:
print('ERROR: too many redirects - They might have detected the crawler, try changing ip.')
return
print("Parsing page\n", "...")
page_parsed = BeautifulSoup(page0.content, 'html.parser')
# Check validity of the page
try:
if page_parsed.find('meta').attrs['name'] == 'robots':
print('ERROR: invalid result page - They might have detected the crawler, try changing ip.')
return
except KeyError:
print(f"Valid response from {self.url}")
else:
page_parsed = args[0]
write_to = kwargs.get('write_to')
# Save html to file
if write_to:
my_file = Path(write_to)
if my_file.is_file():
print(f"{write_to} exists already. Do you want to overwrite?\n")
response = input("Y/N > ")
if response == 'Y' or response == 'y':
write_to_sure = write_to
else:
print("Please, give type another file path:\n")
response = input("new path to file > ")
write_to_sure = response
with open(write_to_sure, 'w+') as file:
file.write(page_parsed.text)
# Extact the json from the JavaScript of the page
page_data = page_parsed.find('div', {'class': 'c-wrap-main'})
page_data_str = normalize('NFKD', page_data.prettify())
page_data_str_minified = page_data_str.replace('\n', '').replace('\r', '').replace(' ', '').replace("true",
"True").replace(
"false", "False")
json_str = re.search('({.*});ava.*', page_data_str_minified).group(1)
params = eval(json_str)
return params
def get_pages(self, **kwargs):
"""
:param kwargs:
max_num_pages: maximum number of pages to be processed. If left empty, it is set to its maximum number 100.
:return: a generator of HTML parsed result pages.
"""
max_num_pages = kwargs.get('max_num_pages') or 100
results_per_page = 20
try:
print(f"Get pages from base url {self.url}\n", "...")
page0 = requests_get(self.url)
print("Request successful.")
except:
print('ERROR: too many redirects - They might have detected the crawler, try changing ip.')
return
print("Parsing page\n", "...")
page_parsed = BeautifulSoup(page0.content, 'html.parser')
# Check validity of the page
try:
if page_parsed.find('meta').attrs['name'] == 'robots':
print('ERROR: invalid result page - They might have detected the crawler, try changing ip.')
return
except KeyError:
print(f"Valid response from {self.url}")
num_results = re.search('\s?"nbresults"\s+:\s? "(\d+[^"]*)"', page_parsed.text).group(1)
num_results = int(num_results.replace("\xa0", ""))
num_pages = num_results // results_per_page + 1
if num_pages > max_num_pages:
num_pages = max_num_pages
print(f"The search returned {num_results} results.")
print(f"{results_per_page*num_pages} results in {num_pages} pages will be processed.")
current_page_num = int(re.search('\s?"nbpage"\s+:\s? "(\d+[^"]*)"', page_parsed.text).group(1))
while current_page_num <= num_pages:
if current_page_num == 1:
current_page_parsed = page_parsed
print(f"Page {current_page_num} parsed")
else:
current_page_url = self.url + "&LISTING-LISTpg=" + str(current_page_num)
print(f"Get url {current_page_url}")
sleep(self.delay)
current_page = requests_get(current_page_url)
current_page_parsed = BeautifulSoup(current_page.content, 'html.parser')
print(f"Page {current_page_num} parsed")
current_page_num += 1
yield current_page_parsed
def get_results(self, max_num_pages=None, **kwargs):
"""
:param
kwargs:
pages: a generator created with get_pages(). This parameter overrides the other two.
max_number_pages: int, if empty it is set to its maximum number 100.
print_results: int, print a number per page of results for control.
:return: A generator of dictionaries each corresponding to a property ad
"""
pages = kwargs.get('pages')
for page in pages or self.get_pages(max_num_pages=max_num_pages):
params = self.get_current_parameters(False, page)
properties = params['products']
n = kwargs.get('print_results')
printed_results = 0
for ad in properties:
if n:
while printed_results <= n:
print_results(ad)
printed_results += 1
yield ad
def results_to_dataframe(self, max_num_pages=None, **kwargs):
results = kwargs.get('results')
df = pd.DataFrame()
for ad in results or self.get_results(max_num_pages=max_num_pages):
d = pd.DataFrame.from_dict(ad)
df = df.append(d)
df.drop(['affichagetype', 'idtypepublicationsourcecouplage', 'produitsvisibilite'], axis=1, inplace=True)
df.reset_index(inplace=True)
df.drop('index', axis=1, inplace=True)
for column in df.columns:
if re.search("^nb", column) or column == 'prix' or column == 'surface':
df[column] = pd.to_numeric(df[column].str.replace(',', '.'))
return df
class SeLogerAchat(SelogerBase):
def __init__(self, search_params):
self.url = "http://www.seloger.com/list.htm?" + "idtt=2" + create_param_url(search_params=search_params)
super(SeLogerAchat, self).__init__()
class SeLogerLocation(SelogerBase):
def __init__(self, search_params):
self.url = "http://www.seloger.com/list.htm?" + "idtt=1" + create_param_url(search_params=search_params)
super(SeLogerLocation, self).__init__()
class SeLogerLocationTemporaire(SelogerBase):
def __init__(self, search_params):
self.url = "http://www.seloger.com/list.htm?" + "idtt=3" + create_param_url(search_params=search_params)
super(SeLogerLocationTemporaire, self).__init__()
class SeLogerLocationViager(SelogerBase):
def __init__(self, search_params):
self.url = "http://www.seloger.com/list.htm?" + "idtt=5" + create_param_url(search_params=search_params)
super(SeLogerLocationViager, self).__init__()
class SeLogerInvestissement(SelogerBase):
def __init__(self, search_params):
self.url = "http://www.seloger.com/list.htm?" + "idtt=6" + create_param_url(search_params=search_params)
super(SeLogerInvestissement, self).__init__()
class SeLogerLocationVacances(SelogerBase):
def __init__(self, search_params):
self.url = "http://www.seloger.com/list.htm?" + "idtt=4" + create_param_url(search_params=search_params)
super(SeLogerLocationVacances, self).__init__()
class SeLogerBiensVendus(SelogerBase):
def __init__(self, search_params):
self.url = "http://biens-vendus.seloger.com/list.htm?" + "idtt=4" + create_param_url(
search_params=search_params)
super(SeLogerBiensVendus, self).__init__()
# Show help for search filter options
def show_search_filters(**kwargs):
def print_type_options(type_options):
print("\n")
for option_title, option_value in type_options.items():
print("\t" + option_title + ":")
print("\t-----")
print("\t\t URL key:", "'" + option_value['url_key'] + "'")
print("\t\t URL key options:")
for option, option_api_value in option_value['value'].items():
print("\t\t\t* " + option + ":", "'" + option_api_value + "'")
search_example = option_value['example']
print("\n")
print("\t\tIf you want to", search_example['description'], search_example['url_key'])
print("\n")
def print_binary_and_numeric_options(search_options):
print("\n")
for option_title, option_value in search_options['Filters'].items():
print("\t" + option_title + ":")
print("\t-----")
for option, option_api_value in option_value.items():
print("\t\t\t* " + option + ":",
"\t{'" + option_api_value['url_key'] + "': " + "'" + option_api_value['value'] + "'}")
search_example = amenities_and_ad_filters['example']
print("\n")
print("\t\tIf you want to", search_example['description'], search_example['url_key'])
print("\n")
def print_choice(selection_labels):
print("What filters do you wnt to know about (quit with 'q')?")
print(
"1. Sort options \n2. Property types \n3. Price, size and number of rooms \n4. Kitchen and heating types \n5. Amenities and ad filters")
sel = input(" > ")
if sel == 'q':
return
print_search_filters = selection_labels[sel].get('fun')
search_filters = selection_labels[sel].get('arg')
print_search_filters(search_filters)
print_choice(selection_labels)
sort_by = {
'Sorting options': {
'url_key': 'tri',
'value': {
'By selection': 'initial',
'By price': 'a_px',
'By surface': 'a_surface',
'By location': 'a_ville',
'By date': 'd_dt_crea'
},
'example': {'description': 'sort the ads by creation date:',
'url_key': "{'tri': 'd_dt_crea'}"}
}
}
property_type = {
'Property type': {
'url_key': 'idtypebien',
'value': {
'Apartment': '1',
'House': '2',
'Car park': '3',
'Shop': '6',
'Commercial': '7',
'Office': '8',
'Lofts - Ateliers - Land': '9',
'Various': '10',
'Property': '11',
'Building': '12',
'Castle': '13',
'Hotels Particuliers': '14'
},
'example': {'description': 'look for houses and apartments only:',
'url_key': "{'idtypebien': '1,2'}"}
},
'Building age': {
'url_key': 'naturebien',
'value': {
'old': '1',
'New': '2',
'In construction': '4'
},
'example': {'description': 'look for new construction only:',
'url_key': "{'naturebien': '2'}"}
}
}
kitchen_and_heating_type = {
'Kitchen type': {
'url_key': 'idtypecuisine',
'value': {
'Separated kitchen': '3',
'Open kitchen': '2',
'Kitchenette': '5',
'Fitted kitchen': '9'
},
'example': {'description': 'an open kitchen:',
'url_key': "{'idtypecuisine': '2'}"}
},
'Heating type': {
'url_key': 'idtypechauffage',
'value': {
'individuel': '8192',
'central': '4096',
'electrique': '2048',
'gaz': '512',
'fuel': '1024',
'radiateur': '128',
'sol': '256'
},
'example': {'description': 'centralised underfloor heating:',
'url_key': "{'idtypechauffage': '4096, 256'}"}
}
}
amenities_and_ad_filters = {
'Filters': {
'Ad options': {
'Ad with video': {'url_key': 'video', 'value': '1'},
'Ad with virtual visit': {'url_key': 'vv', 'value': '1'},
'Ad with photos': {'url_key': 'photo', 'value': '15'},
'Exclusive': {'url_key': 'si_mandatexclusif', 'value': '1'},
'Price has changed': {'url_key': 'siBaissePrix', 'value': '1'}
},
'Amentities': {
'Last floor': {'url_key': 'si_dernieretage', 'value': '1'},
'Separated toilets': {'url_key': 'si_toilettes_separees', 'value': '1'},
'Bath tube': {'url_key': 'nb_salles_de_bainsmin', 'value': '1'},
'Bathroom': {'url_key': 'nb_salles_deaumin', 'value': '1'},
'Separate entrance': {'url_key': 'si_entree', 'value': '1'},
'Living room': {'url_key': 'si_sejour', 'value': '1'},
'Dining room': {'url_key': 'si_salle_a_manger', 'value': '1'},
'Terrace': {'url_key': 'si_terrasse', 'value': '1'},
'Balcony': {'url_key': 'nb_balconsmin', 'value': 'Insert number as a string'},
'Car park': {'url_key': 'si_parkings', 'value': '1'},
'Car box': {'url_key': 'si_boxes', 'value': '1'},
'Cellar': {'url_key': 'si_cave', 'value': '1'},
'Fire place': {'url_key': 'si_cheminee', 'value': '1'},
'Wooden floor': {'url_key': 'si_parquet', 'value': '1'},
'Lift': {'url_key': 'si_ascenseur', 'value': '1'},
'Swimming pool': {'url_key': 'si_piscine', 'value': '1'},
'Built-in wardrobe': {'url_key': 'si_placards', 'value': '1'},
'Interphone': {'url_key': 'si_interphone', 'value': '1'},
'Security code': {'url_key': 'si_digicode', 'value': '1'},
'Concierge': {'url_key': 'si_gardien', 'value': '1'},
'Disable access': {'url_key': 'si_handicape', 'value': '1'},
'Alarm': {'url_key': 'si_alarme', 'value': '1'},
'Without vis-a-vis': {'url_key': 'si_visavis', 'value': '1'},
'Nice view': {'url_key': 'si_vue', 'value': '1'},
'South facing': {'url_key': 'si_sud', 'value': '1'},
'Air conditioning': {'url_key': 'si_climatisation', 'value': '1'}
}
},
'example': {'description': 'add a lift, a parking and the air conditioning.',
'url_key': "{'si_ascenseur': '1', 'si_climatisation': '1', 'si_parkings': '1'}"}
}
property_size = {
'Filters': {
'Property size': {
'Minimum price': {'url_key': 'pxmin', 'value': 'Insert number as a string'},
'Maximum price': {'url_key': 'pxmax', 'value': 'Insert number as a string'},
'Minimum surface': {'url_key': 'surfacemin', 'value': 'Insert number as a string'},
'Maximum surface': {'url_key': 'surfacemax', 'value': 'Insert number as a string'},
'Number of rooms': {'url_key': 'nb_pieces', 'value': 'Insert number as a string'},
'Lower floor': {'url_key': 'etagemin', 'value': 'Insert number as a string'},
'Higher floor': {'url_key': 'etagemax', 'value': 'Insert number as a string'},
'Number fo bedrooms': {'url_key': 'nb_chambres', 'value': 'Insert number as a string'},
'Minimum land surface': {'url_key': 'surf_terrainmin', 'value': 'Insert number as a string'},
'Maximum land surface': {'url_key': 'surf_terrainmax', 'value': 'Insert number as a string'}
}
},
'example': {'description': 'look for a minimum surface of 70 sqm, 2 bedrooms for maximum 500 000 euros:',
'url_key': "{'surfacemin': '70', 'nb_chambres': '2', 'pxmax': '500000'}"}
}
selection_labels = {
'1': {'fun': print_type_options, 'arg': sort_by},
'2': {'fun': print_type_options, 'arg': property_type},
'3': {'fun': print_binary_and_numeric_options, 'arg': property_size},
'4': {'fun': print_type_options, 'arg': kitchen_and_heating_type},
'5': {'fun': print_binary_and_numeric_options, 'arg': amenities_and_ad_filters}
}
accepted_selection = ['sort_by', 'property_type',
'property_size', 'kitchen_and_heating_type',
'amenities_and_ad_filters', 'print_all']
selection = kwargs.get('selection')
if selection not in accepted_selection:
print_choice(selection_labels)
elif selection == 'print_all':
for k, v in selection_labels.items():
print_options = v.get('fun')
option = v.get('arg')
print_options(option)
else:
try:
print_type_options(eval(selection))
except: |
show_search_filters() | print_binary_and_numeric_options(eval(selection)) | random_line_split |
__init__.py | from bs4 import BeautifulSoup
from unicodedata import normalize
import re
import requests
from requests.exceptions import RequestException
from time import sleep
from pathlib import Path
import pandas as pd
def requests_get(*args, **kwargs):
"""
Retries if a RequestException is raised (could be a connection error or
a timeout).
"""
logger = kwargs.pop('logger', None)
s = requests.Session()
s.headers[
'User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.131 Safari/537.36'
try:
return s.get(*args, **kwargs)
except RequestException as exc:
if logger:
logger.warning('Request failed (%s). Retrying ...', exc)
return s.get(*args, **kwargs)
def create_param_url(search_params: dict):
url_string = ""
for key, value in search_params.items():
url_string = url_string + "&" + key + "=" + value
return url_string
def print_results(results: dict):
print(f"** Annonce {results['idannonce']} **")
for key, value in results.items():
print(f"'{key}': '{value}'")
print("\n\n")
class SelogerBase(object):
"""
Base class for all Seloger wrapper
Parameters
----------
class_filters : dict
Main search options
ex. {'transaction_type':['achat'], 'bien': ['appartement', 'maison'], 'naturebien': ['ancien', 'neuf']}
type_of_search: str
Can be either 'base', for ads of properties on the market, or 'biens-vendus' for the search on the property sold section.
location : dict
Either one of the following:
postcode (ex. {'code_postal': 75015} or {'code_postal': 75})
INSEE code (ex. {'code_INSEE': 75115})
Location name (ex. {'location_name': 'PARIS'})
*argv : str
Search options from binary_filter_options
**kwargs: dict ex.{'delay': 2}
Other search options or tweaking parameters
delay: number of seconds between requests, used to avoid overcharging servers
Returns
-------
"""
def __init__(self, **kwargs):
# Get parameters
self.delay = kwargs.get('delay') or 3
def get_current_parameters(self, search_url=True, *args, **kwargs):
|
def get_pages(self, **kwargs):
"""
:param kwargs:
max_num_pages: maximum number of pages to be processed. If left empty, it is set to its maximum number 100.
:return: a generator of HTML parsed result pages.
"""
max_num_pages = kwargs.get('max_num_pages') or 100
results_per_page = 20
try:
print(f"Get pages from base url {self.url}\n", "...")
page0 = requests_get(self.url)
print("Request successful.")
except:
print('ERROR: too many redirects - They might have detected the crawler, try changing ip.')
return
print("Parsing page\n", "...")
page_parsed = BeautifulSoup(page0.content, 'html.parser')
# Check validity of the page
try:
if page_parsed.find('meta').attrs['name'] == 'robots':
print('ERROR: invalid result page - They might have detected the crawler, try changing ip.')
return
except KeyError:
print(f"Valid response from {self.url}")
num_results = re.search('\s?"nbresults"\s+:\s? "(\d+[^"]*)"', page_parsed.text).group(1)
num_results = int(num_results.replace("\xa0", ""))
num_pages = num_results // results_per_page + 1
if num_pages > max_num_pages:
num_pages = max_num_pages
print(f"The search returned {num_results} results.")
print(f"{results_per_page*num_pages} results in {num_pages} pages will be processed.")
current_page_num = int(re.search('\s?"nbpage"\s+:\s? "(\d+[^"]*)"', page_parsed.text).group(1))
while current_page_num <= num_pages:
if current_page_num == 1:
current_page_parsed = page_parsed
print(f"Page {current_page_num} parsed")
else:
current_page_url = self.url + "&LISTING-LISTpg=" + str(current_page_num)
print(f"Get url {current_page_url}")
sleep(self.delay)
current_page = requests_get(current_page_url)
current_page_parsed = BeautifulSoup(current_page.content, 'html.parser')
print(f"Page {current_page_num} parsed")
current_page_num += 1
yield current_page_parsed
def get_results(self, max_num_pages=None, **kwargs):
"""
:param
kwargs:
pages: a generator created with get_pages(). This parameter overrides the other two.
max_number_pages: int, if empty it is set to its maximum number 100.
print_results: int, print a number per page of results for control.
:return: A generator of dictionaries each corresponding to a property ad
"""
pages = kwargs.get('pages')
for page in pages or self.get_pages(max_num_pages=max_num_pages):
params = self.get_current_parameters(False, page)
properties = params['products']
n = kwargs.get('print_results')
printed_results = 0
for ad in properties:
if n:
while printed_results <= n:
print_results(ad)
printed_results += 1
yield ad
def results_to_dataframe(self, max_num_pages=None, **kwargs):
results = kwargs.get('results')
df = pd.DataFrame()
for ad in results or self.get_results(max_num_pages=max_num_pages):
d = pd.DataFrame.from_dict(ad)
df = df.append(d)
df.drop(['affichagetype', 'idtypepublicationsourcecouplage', 'produitsvisibilite'], axis=1, inplace=True)
df.reset_index(inplace=True)
df.drop('index', axis=1, inplace=True)
for column in df.columns:
if re.search("^nb", column) or column == 'prix' or column == 'surface':
df[column] = pd.to_numeric(df[column].str.replace(',', '.'))
return df
class SeLogerAchat(SelogerBase):
def __init__(self, search_params):
self.url = "http://www.seloger.com/list.htm?" + "idtt=2" + create_param_url(search_params=search_params)
super(SeLogerAchat, self).__init__()
class SeLogerLocation(SelogerBase):
def __init__(self, search_params):
self.url = "http://www.seloger.com/list.htm?" + "idtt=1" + create_param_url(search_params=search_params)
super(SeLogerLocation, self).__init__()
class SeLogerLocationTemporaire(SelogerBase):
def __init__(self, search_params):
self.url = "http://www.seloger.com/list.htm?" + "idtt=3" + create_param_url(search_params=search_params)
super(SeLogerLocationTemporaire, self).__init__()
class SeLogerLocationViager(SelogerBase):
def __init__(self, search_params):
self.url = "http://www.seloger.com/list.htm?" + "idtt=5" + create_param_url(search_params=search_params)
super(SeLogerLocationViager, self).__init__()
class SeLogerInvestissement(SelogerBase):
def __init__(self, search_params):
self.url = "http://www.seloger.com/list.htm?" + "idtt=6" + create_param_url(search_params=search_params)
super(SeLogerInvestissement, self).__init__()
class SeLogerLocationVacances(SelogerBase):
def __init__(self, search_params):
self.url = "http://www.seloger.com/list.htm?" + "idtt=4" + create_param_url(search_params=search_params)
super(SeLogerLocationVacances, self).__init__()
class SeLogerBiensVendus(SelogerBase):
def __init__(self, search_params):
self.url = "http://biens-vendus.seloger.com/list.htm?" + "idtt=4" + create_param_url(
search_params=search_params)
super(SeLogerBiensVendus, self).__init__()
# Show help for search filter options
def show_search_filters(**kwargs):
def print_type_options(type_options):
print("\n")
for option_title, option_value in type_options.items():
print("\t" + option_title + ":")
print("\t-----")
print("\t\t URL key:", "'" + option_value['url_key'] + "'")
print("\t\t URL key options:")
for option, option_api_value in option_value['value'].items():
print("\t\t\t* " + option + ":", "'" + option_api_value + "'")
search_example = option_value['example']
print("\n")
print("\t\tIf you want to", search_example['description'], search_example['url_key'])
print("\n")
def print_binary_and_numeric_options(search_options):
print("\n")
for option_title, option_value in search_options['Filters'].items():
print("\t" + option_title + ":")
print("\t-----")
for option, option_api_value in option_value.items():
print("\t\t\t* " + option + ":",
"\t{'" + option_api_value['url_key'] + "': " + "'" + option_api_value['value'] + "'}")
search_example = amenities_and_ad_filters['example']
print("\n")
print("\t\tIf you want to", search_example['description'], search_example['url_key'])
print("\n")
def print_choice(selection_labels):
print("What filters do you wnt to know about (quit with 'q')?")
print(
"1. Sort options \n2. Property types \n3. Price, size and number of rooms \n4. Kitchen and heating types \n5. Amenities and ad filters")
sel = input(" > ")
if sel == 'q':
return
print_search_filters = selection_labels[sel].get('fun')
search_filters = selection_labels[sel].get('arg')
print_search_filters(search_filters)
print_choice(selection_labels)
sort_by = {
'Sorting options': {
'url_key': 'tri',
'value': {
'By selection': 'initial',
'By price': 'a_px',
'By surface': 'a_surface',
'By location': 'a_ville',
'By date': 'd_dt_crea'
},
'example': {'description': 'sort the ads by creation date:',
'url_key': "{'tri': 'd_dt_crea'}"}
}
}
property_type = {
'Property type': {
'url_key': 'idtypebien',
'value': {
'Apartment': '1',
'House': '2',
'Car park': '3',
'Shop': '6',
'Commercial': '7',
'Office': '8',
'Lofts - Ateliers - Land': '9',
'Various': '10',
'Property': '11',
'Building': '12',
'Castle': '13',
'Hotels Particuliers': '14'
},
'example': {'description': 'look for houses and apartments only:',
'url_key': "{'idtypebien': '1,2'}"}
},
'Building age': {
'url_key': 'naturebien',
'value': {
'old': '1',
'New': '2',
'In construction': '4'
},
'example': {'description': 'look for new construction only:',
'url_key': "{'naturebien': '2'}"}
}
}
kitchen_and_heating_type = {
'Kitchen type': {
'url_key': 'idtypecuisine',
'value': {
'Separated kitchen': '3',
'Open kitchen': '2',
'Kitchenette': '5',
'Fitted kitchen': '9'
},
'example': {'description': 'an open kitchen:',
'url_key': "{'idtypecuisine': '2'}"}
},
'Heating type': {
'url_key': 'idtypechauffage',
'value': {
'individuel': '8192',
'central': '4096',
'electrique': '2048',
'gaz': '512',
'fuel': '1024',
'radiateur': '128',
'sol': '256'
},
'example': {'description': 'centralised underfloor heating:',
'url_key': "{'idtypechauffage': '4096, 256'}"}
}
}
amenities_and_ad_filters = {
'Filters': {
'Ad options': {
'Ad with video': {'url_key': 'video', 'value': '1'},
'Ad with virtual visit': {'url_key': 'vv', 'value': '1'},
'Ad with photos': {'url_key': 'photo', 'value': '15'},
'Exclusive': {'url_key': 'si_mandatexclusif', 'value': '1'},
'Price has changed': {'url_key': 'siBaissePrix', 'value': '1'}
},
'Amentities': {
'Last floor': {'url_key': 'si_dernieretage', 'value': '1'},
'Separated toilets': {'url_key': 'si_toilettes_separees', 'value': '1'},
'Bath tube': {'url_key': 'nb_salles_de_bainsmin', 'value': '1'},
'Bathroom': {'url_key': 'nb_salles_deaumin', 'value': '1'},
'Separate entrance': {'url_key': 'si_entree', 'value': '1'},
'Living room': {'url_key': 'si_sejour', 'value': '1'},
'Dining room': {'url_key': 'si_salle_a_manger', 'value': '1'},
'Terrace': {'url_key': 'si_terrasse', 'value': '1'},
'Balcony': {'url_key': 'nb_balconsmin', 'value': 'Insert number as a string'},
'Car park': {'url_key': 'si_parkings', 'value': '1'},
'Car box': {'url_key': 'si_boxes', 'value': '1'},
'Cellar': {'url_key': 'si_cave', 'value': '1'},
'Fire place': {'url_key': 'si_cheminee', 'value': '1'},
'Wooden floor': {'url_key': 'si_parquet', 'value': '1'},
'Lift': {'url_key': 'si_ascenseur', 'value': '1'},
'Swimming pool': {'url_key': 'si_piscine', 'value': '1'},
'Built-in wardrobe': {'url_key': 'si_placards', 'value': '1'},
'Interphone': {'url_key': 'si_interphone', 'value': '1'},
'Security code': {'url_key': 'si_digicode', 'value': '1'},
'Concierge': {'url_key': 'si_gardien', 'value': '1'},
'Disable access': {'url_key': 'si_handicape', 'value': '1'},
'Alarm': {'url_key': 'si_alarme', 'value': '1'},
'Without vis-a-vis': {'url_key': 'si_visavis', 'value': '1'},
'Nice view': {'url_key': 'si_vue', 'value': '1'},
'South facing': {'url_key': 'si_sud', 'value': '1'},
'Air conditioning': {'url_key': 'si_climatisation', 'value': '1'}
}
},
'example': {'description': 'add a lift, a parking and the air conditioning.',
'url_key': "{'si_ascenseur': '1', 'si_climatisation': '1', 'si_parkings': '1'}"}
}
property_size = {
'Filters': {
'Property size': {
'Minimum price': {'url_key': 'pxmin', 'value': 'Insert number as a string'},
'Maximum price': {'url_key': 'pxmax', 'value': 'Insert number as a string'},
'Minimum surface': {'url_key': 'surfacemin', 'value': 'Insert number as a string'},
'Maximum surface': {'url_key': 'surfacemax', 'value': 'Insert number as a string'},
'Number of rooms': {'url_key': 'nb_pieces', 'value': 'Insert number as a string'},
'Lower floor': {'url_key': 'etagemin', 'value': 'Insert number as a string'},
'Higher floor': {'url_key': 'etagemax', 'value': 'Insert number as a string'},
'Number fo bedrooms': {'url_key': 'nb_chambres', 'value': 'Insert number as a string'},
'Minimum land surface': {'url_key': 'surf_terrainmin', 'value': 'Insert number as a string'},
'Maximum land surface': {'url_key': 'surf_terrainmax', 'value': 'Insert number as a string'}
}
},
'example': {'description': 'look for a minimum surface of 70 sqm, 2 bedrooms for maximum 500 000 euros:',
'url_key': "{'surfacemin': '70', 'nb_chambres': '2', 'pxmax': '500000'}"}
}
selection_labels = {
'1': {'fun': print_type_options, 'arg': sort_by},
'2': {'fun': print_type_options, 'arg': property_type},
'3': {'fun': print_binary_and_numeric_options, 'arg': property_size},
'4': {'fun': print_type_options, 'arg': kitchen_and_heating_type},
'5': {'fun': print_binary_and_numeric_options, 'arg': amenities_and_ad_filters}
}
accepted_selection = ['sort_by', 'property_type',
'property_size', 'kitchen_and_heating_type',
'amenities_and_ad_filters', 'print_all']
selection = kwargs.get('selection')
if selection not in accepted_selection:
print_choice(selection_labels)
elif selection == 'print_all':
for k, v in selection_labels.items():
print_options = v.get('fun')
option = v.get('arg')
print_options(option)
else:
try:
print_type_options(eval(selection))
except:
print_binary_and_numeric_options(eval(selection))
show_search_filters()
| """
Retrieve search parameters from the html page of a search on Seloger.com
:param search_url: The page url is passed as an input (True) or a parsed page (False).
:param args: A BeautifulSoup parsed page.
:param kwargs:
write_to: a string with the path and name of a file to save the html of the url or the parsed page.
:return: a dictionary with the search parameters as they appear in the json of html page.
"""
if search_url:
try:
print(f"Get pages from base url {self.url}\n", "...")
page0 = requests_get(self.url)
print("Request successful.")
except:
print('ERROR: too many redirects - They might have detected the crawler, try changing ip.')
return
print("Parsing page\n", "...")
page_parsed = BeautifulSoup(page0.content, 'html.parser')
# Check validity of the page
try:
if page_parsed.find('meta').attrs['name'] == 'robots':
print('ERROR: invalid result page - They might have detected the crawler, try changing ip.')
return
except KeyError:
print(f"Valid response from {self.url}")
else:
page_parsed = args[0]
write_to = kwargs.get('write_to')
# Save html to file
if write_to:
my_file = Path(write_to)
if my_file.is_file():
print(f"{write_to} exists already. Do you want to overwrite?\n")
response = input("Y/N > ")
if response == 'Y' or response == 'y':
write_to_sure = write_to
else:
print("Please, give type another file path:\n")
response = input("new path to file > ")
write_to_sure = response
with open(write_to_sure, 'w+') as file:
file.write(page_parsed.text)
# Extact the json from the JavaScript of the page
page_data = page_parsed.find('div', {'class': 'c-wrap-main'})
page_data_str = normalize('NFKD', page_data.prettify())
page_data_str_minified = page_data_str.replace('\n', '').replace('\r', '').replace(' ', '').replace("true",
"True").replace(
"false", "False")
json_str = re.search('({.*});ava.*', page_data_str_minified).group(1)
params = eval(json_str)
return params | identifier_body |
__init__.py | from bs4 import BeautifulSoup
from unicodedata import normalize
import re
import requests
from requests.exceptions import RequestException
from time import sleep
from pathlib import Path
import pandas as pd
def requests_get(*args, **kwargs):
"""
Retries if a RequestException is raised (could be a connection error or
a timeout).
"""
logger = kwargs.pop('logger', None)
s = requests.Session()
s.headers[
'User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.131 Safari/537.36'
try:
return s.get(*args, **kwargs)
except RequestException as exc:
if logger:
logger.warning('Request failed (%s). Retrying ...', exc)
return s.get(*args, **kwargs)
def create_param_url(search_params: dict):
url_string = ""
for key, value in search_params.items():
url_string = url_string + "&" + key + "=" + value
return url_string
def print_results(results: dict):
print(f"** Annonce {results['idannonce']} **")
for key, value in results.items():
print(f"'{key}': '{value}'")
print("\n\n")
class | (object):
"""
Base class for all Seloger wrapper
Parameters
----------
class_filters : dict
Main search options
ex. {'transaction_type':['achat'], 'bien': ['appartement', 'maison'], 'naturebien': ['ancien', 'neuf']}
type_of_search: str
Can be either 'base', for ads of properties on the market, or 'biens-vendus' for the search on the property sold section.
location : dict
Either one of the following:
postcode (ex. {'code_postal': 75015} or {'code_postal': 75})
INSEE code (ex. {'code_INSEE': 75115})
Location name (ex. {'location_name': 'PARIS'})
*argv : str
Search options from binary_filter_options
**kwargs: dict ex.{'delay': 2}
Other search options or tweaking parameters
delay: number of seconds between requests, used to avoid overcharging servers
Returns
-------
"""
def __init__(self, **kwargs):
# Get parameters
self.delay = kwargs.get('delay') or 3
def get_current_parameters(self, search_url=True, *args, **kwargs):
"""
Retrieve search parameters from the html page of a search on Seloger.com
:param search_url: The page url is passed as an input (True) or a parsed page (False).
:param args: A BeautifulSoup parsed page.
:param kwargs:
write_to: a string with the path and name of a file to save the html of the url or the parsed page.
:return: a dictionary with the search parameters as they appear in the json of html page.
"""
if search_url:
try:
print(f"Get pages from base url {self.url}\n", "...")
page0 = requests_get(self.url)
print("Request successful.")
except:
print('ERROR: too many redirects - They might have detected the crawler, try changing ip.')
return
print("Parsing page\n", "...")
page_parsed = BeautifulSoup(page0.content, 'html.parser')
# Check validity of the page
try:
if page_parsed.find('meta').attrs['name'] == 'robots':
print('ERROR: invalid result page - They might have detected the crawler, try changing ip.')
return
except KeyError:
print(f"Valid response from {self.url}")
else:
page_parsed = args[0]
write_to = kwargs.get('write_to')
# Save html to file
if write_to:
my_file = Path(write_to)
if my_file.is_file():
print(f"{write_to} exists already. Do you want to overwrite?\n")
response = input("Y/N > ")
if response == 'Y' or response == 'y':
write_to_sure = write_to
else:
print("Please, give type another file path:\n")
response = input("new path to file > ")
write_to_sure = response
with open(write_to_sure, 'w+') as file:
file.write(page_parsed.text)
# Extact the json from the JavaScript of the page
page_data = page_parsed.find('div', {'class': 'c-wrap-main'})
page_data_str = normalize('NFKD', page_data.prettify())
page_data_str_minified = page_data_str.replace('\n', '').replace('\r', '').replace(' ', '').replace("true",
"True").replace(
"false", "False")
json_str = re.search('({.*});ava.*', page_data_str_minified).group(1)
params = eval(json_str)
return params
def get_pages(self, **kwargs):
"""
:param kwargs:
max_num_pages: maximum number of pages to be processed. If left empty, it is set to its maximum number 100.
:return: a generator of HTML parsed result pages.
"""
max_num_pages = kwargs.get('max_num_pages') or 100
results_per_page = 20
try:
print(f"Get pages from base url {self.url}\n", "...")
page0 = requests_get(self.url)
print("Request successful.")
except:
print('ERROR: too many redirects - They might have detected the crawler, try changing ip.')
return
print("Parsing page\n", "...")
page_parsed = BeautifulSoup(page0.content, 'html.parser')
# Check validity of the page
try:
if page_parsed.find('meta').attrs['name'] == 'robots':
print('ERROR: invalid result page - They might have detected the crawler, try changing ip.')
return
except KeyError:
print(f"Valid response from {self.url}")
num_results = re.search('\s?"nbresults"\s+:\s? "(\d+[^"]*)"', page_parsed.text).group(1)
num_results = int(num_results.replace("\xa0", ""))
num_pages = num_results // results_per_page + 1
if num_pages > max_num_pages:
num_pages = max_num_pages
print(f"The search returned {num_results} results.")
print(f"{results_per_page*num_pages} results in {num_pages} pages will be processed.")
current_page_num = int(re.search('\s?"nbpage"\s+:\s? "(\d+[^"]*)"', page_parsed.text).group(1))
while current_page_num <= num_pages:
if current_page_num == 1:
current_page_parsed = page_parsed
print(f"Page {current_page_num} parsed")
else:
current_page_url = self.url + "&LISTING-LISTpg=" + str(current_page_num)
print(f"Get url {current_page_url}")
sleep(self.delay)
current_page = requests_get(current_page_url)
current_page_parsed = BeautifulSoup(current_page.content, 'html.parser')
print(f"Page {current_page_num} parsed")
current_page_num += 1
yield current_page_parsed
def get_results(self, max_num_pages=None, **kwargs):
"""
:param
kwargs:
pages: a generator created with get_pages(). This parameter overrides the other two.
max_number_pages: int, if empty it is set to its maximum number 100.
print_results: int, print a number per page of results for control.
:return: A generator of dictionaries each corresponding to a property ad
"""
pages = kwargs.get('pages')
for page in pages or self.get_pages(max_num_pages=max_num_pages):
params = self.get_current_parameters(False, page)
properties = params['products']
n = kwargs.get('print_results')
printed_results = 0
for ad in properties:
if n:
while printed_results <= n:
print_results(ad)
printed_results += 1
yield ad
def results_to_dataframe(self, max_num_pages=None, **kwargs):
results = kwargs.get('results')
df = pd.DataFrame()
for ad in results or self.get_results(max_num_pages=max_num_pages):
d = pd.DataFrame.from_dict(ad)
df = df.append(d)
df.drop(['affichagetype', 'idtypepublicationsourcecouplage', 'produitsvisibilite'], axis=1, inplace=True)
df.reset_index(inplace=True)
df.drop('index', axis=1, inplace=True)
for column in df.columns:
if re.search("^nb", column) or column == 'prix' or column == 'surface':
df[column] = pd.to_numeric(df[column].str.replace(',', '.'))
return df
class SeLogerAchat(SelogerBase):
def __init__(self, search_params):
self.url = "http://www.seloger.com/list.htm?" + "idtt=2" + create_param_url(search_params=search_params)
super(SeLogerAchat, self).__init__()
class SeLogerLocation(SelogerBase):
def __init__(self, search_params):
self.url = "http://www.seloger.com/list.htm?" + "idtt=1" + create_param_url(search_params=search_params)
super(SeLogerLocation, self).__init__()
class SeLogerLocationTemporaire(SelogerBase):
def __init__(self, search_params):
self.url = "http://www.seloger.com/list.htm?" + "idtt=3" + create_param_url(search_params=search_params)
super(SeLogerLocationTemporaire, self).__init__()
class SeLogerLocationViager(SelogerBase):
def __init__(self, search_params):
self.url = "http://www.seloger.com/list.htm?" + "idtt=5" + create_param_url(search_params=search_params)
super(SeLogerLocationViager, self).__init__()
class SeLogerInvestissement(SelogerBase):
def __init__(self, search_params):
self.url = "http://www.seloger.com/list.htm?" + "idtt=6" + create_param_url(search_params=search_params)
super(SeLogerInvestissement, self).__init__()
class SeLogerLocationVacances(SelogerBase):
def __init__(self, search_params):
self.url = "http://www.seloger.com/list.htm?" + "idtt=4" + create_param_url(search_params=search_params)
super(SeLogerLocationVacances, self).__init__()
class SeLogerBiensVendus(SelogerBase):
def __init__(self, search_params):
self.url = "http://biens-vendus.seloger.com/list.htm?" + "idtt=4" + create_param_url(
search_params=search_params)
super(SeLogerBiensVendus, self).__init__()
# Show help for search filter options
def show_search_filters(**kwargs):
def print_type_options(type_options):
print("\n")
for option_title, option_value in type_options.items():
print("\t" + option_title + ":")
print("\t-----")
print("\t\t URL key:", "'" + option_value['url_key'] + "'")
print("\t\t URL key options:")
for option, option_api_value in option_value['value'].items():
print("\t\t\t* " + option + ":", "'" + option_api_value + "'")
search_example = option_value['example']
print("\n")
print("\t\tIf you want to", search_example['description'], search_example['url_key'])
print("\n")
def print_binary_and_numeric_options(search_options):
print("\n")
for option_title, option_value in search_options['Filters'].items():
print("\t" + option_title + ":")
print("\t-----")
for option, option_api_value in option_value.items():
print("\t\t\t* " + option + ":",
"\t{'" + option_api_value['url_key'] + "': " + "'" + option_api_value['value'] + "'}")
search_example = amenities_and_ad_filters['example']
print("\n")
print("\t\tIf you want to", search_example['description'], search_example['url_key'])
print("\n")
def print_choice(selection_labels):
print("What filters do you wnt to know about (quit with 'q')?")
print(
"1. Sort options \n2. Property types \n3. Price, size and number of rooms \n4. Kitchen and heating types \n5. Amenities and ad filters")
sel = input(" > ")
if sel == 'q':
return
print_search_filters = selection_labels[sel].get('fun')
search_filters = selection_labels[sel].get('arg')
print_search_filters(search_filters)
print_choice(selection_labels)
sort_by = {
'Sorting options': {
'url_key': 'tri',
'value': {
'By selection': 'initial',
'By price': 'a_px',
'By surface': 'a_surface',
'By location': 'a_ville',
'By date': 'd_dt_crea'
},
'example': {'description': 'sort the ads by creation date:',
'url_key': "{'tri': 'd_dt_crea'}"}
}
}
property_type = {
'Property type': {
'url_key': 'idtypebien',
'value': {
'Apartment': '1',
'House': '2',
'Car park': '3',
'Shop': '6',
'Commercial': '7',
'Office': '8',
'Lofts - Ateliers - Land': '9',
'Various': '10',
'Property': '11',
'Building': '12',
'Castle': '13',
'Hotels Particuliers': '14'
},
'example': {'description': 'look for houses and apartments only:',
'url_key': "{'idtypebien': '1,2'}"}
},
'Building age': {
'url_key': 'naturebien',
'value': {
'old': '1',
'New': '2',
'In construction': '4'
},
'example': {'description': 'look for new construction only:',
'url_key': "{'naturebien': '2'}"}
}
}
kitchen_and_heating_type = {
'Kitchen type': {
'url_key': 'idtypecuisine',
'value': {
'Separated kitchen': '3',
'Open kitchen': '2',
'Kitchenette': '5',
'Fitted kitchen': '9'
},
'example': {'description': 'an open kitchen:',
'url_key': "{'idtypecuisine': '2'}"}
},
'Heating type': {
'url_key': 'idtypechauffage',
'value': {
'individuel': '8192',
'central': '4096',
'electrique': '2048',
'gaz': '512',
'fuel': '1024',
'radiateur': '128',
'sol': '256'
},
'example': {'description': 'centralised underfloor heating:',
'url_key': "{'idtypechauffage': '4096, 256'}"}
}
}
amenities_and_ad_filters = {
'Filters': {
'Ad options': {
'Ad with video': {'url_key': 'video', 'value': '1'},
'Ad with virtual visit': {'url_key': 'vv', 'value': '1'},
'Ad with photos': {'url_key': 'photo', 'value': '15'},
'Exclusive': {'url_key': 'si_mandatexclusif', 'value': '1'},
'Price has changed': {'url_key': 'siBaissePrix', 'value': '1'}
},
'Amentities': {
'Last floor': {'url_key': 'si_dernieretage', 'value': '1'},
'Separated toilets': {'url_key': 'si_toilettes_separees', 'value': '1'},
'Bath tube': {'url_key': 'nb_salles_de_bainsmin', 'value': '1'},
'Bathroom': {'url_key': 'nb_salles_deaumin', 'value': '1'},
'Separate entrance': {'url_key': 'si_entree', 'value': '1'},
'Living room': {'url_key': 'si_sejour', 'value': '1'},
'Dining room': {'url_key': 'si_salle_a_manger', 'value': '1'},
'Terrace': {'url_key': 'si_terrasse', 'value': '1'},
'Balcony': {'url_key': 'nb_balconsmin', 'value': 'Insert number as a string'},
'Car park': {'url_key': 'si_parkings', 'value': '1'},
'Car box': {'url_key': 'si_boxes', 'value': '1'},
'Cellar': {'url_key': 'si_cave', 'value': '1'},
'Fire place': {'url_key': 'si_cheminee', 'value': '1'},
'Wooden floor': {'url_key': 'si_parquet', 'value': '1'},
'Lift': {'url_key': 'si_ascenseur', 'value': '1'},
'Swimming pool': {'url_key': 'si_piscine', 'value': '1'},
'Built-in wardrobe': {'url_key': 'si_placards', 'value': '1'},
'Interphone': {'url_key': 'si_interphone', 'value': '1'},
'Security code': {'url_key': 'si_digicode', 'value': '1'},
'Concierge': {'url_key': 'si_gardien', 'value': '1'},
'Disable access': {'url_key': 'si_handicape', 'value': '1'},
'Alarm': {'url_key': 'si_alarme', 'value': '1'},
'Without vis-a-vis': {'url_key': 'si_visavis', 'value': '1'},
'Nice view': {'url_key': 'si_vue', 'value': '1'},
'South facing': {'url_key': 'si_sud', 'value': '1'},
'Air conditioning': {'url_key': 'si_climatisation', 'value': '1'}
}
},
'example': {'description': 'add a lift, a parking and the air conditioning.',
'url_key': "{'si_ascenseur': '1', 'si_climatisation': '1', 'si_parkings': '1'}"}
}
property_size = {
'Filters': {
'Property size': {
'Minimum price': {'url_key': 'pxmin', 'value': 'Insert number as a string'},
'Maximum price': {'url_key': 'pxmax', 'value': 'Insert number as a string'},
'Minimum surface': {'url_key': 'surfacemin', 'value': 'Insert number as a string'},
'Maximum surface': {'url_key': 'surfacemax', 'value': 'Insert number as a string'},
'Number of rooms': {'url_key': 'nb_pieces', 'value': 'Insert number as a string'},
'Lower floor': {'url_key': 'etagemin', 'value': 'Insert number as a string'},
'Higher floor': {'url_key': 'etagemax', 'value': 'Insert number as a string'},
'Number fo bedrooms': {'url_key': 'nb_chambres', 'value': 'Insert number as a string'},
'Minimum land surface': {'url_key': 'surf_terrainmin', 'value': 'Insert number as a string'},
'Maximum land surface': {'url_key': 'surf_terrainmax', 'value': 'Insert number as a string'}
}
},
'example': {'description': 'look for a minimum surface of 70 sqm, 2 bedrooms for maximum 500 000 euros:',
'url_key': "{'surfacemin': '70', 'nb_chambres': '2', 'pxmax': '500000'}"}
}
selection_labels = {
'1': {'fun': print_type_options, 'arg': sort_by},
'2': {'fun': print_type_options, 'arg': property_type},
'3': {'fun': print_binary_and_numeric_options, 'arg': property_size},
'4': {'fun': print_type_options, 'arg': kitchen_and_heating_type},
'5': {'fun': print_binary_and_numeric_options, 'arg': amenities_and_ad_filters}
}
accepted_selection = ['sort_by', 'property_type',
'property_size', 'kitchen_and_heating_type',
'amenities_and_ad_filters', 'print_all']
selection = kwargs.get('selection')
if selection not in accepted_selection:
print_choice(selection_labels)
elif selection == 'print_all':
for k, v in selection_labels.items():
print_options = v.get('fun')
option = v.get('arg')
print_options(option)
else:
try:
print_type_options(eval(selection))
except:
print_binary_and_numeric_options(eval(selection))
show_search_filters()
| SelogerBase | identifier_name |
PositionAPI.py | # -*- coding: utf-8 -*-
# @Time : 2019/2/22 16:45
# @Author : ZouJunLin
"""爬取持仓共用的API"""
import re
import csv,os,codecs,datetime
import pandas as pd
import xlwt
import threading
import json
import zipfile
def ResultToDatabase(info,result,sql):
info.mysql.ExecmanysNonQuery(sql,result)
def GetDCEPosition(info,TradingDay,ExchangeID):
result=list()
sql="select InstrumentID from SettlementInfo where TradingDay='%s' and Position>20000 and ExchangeID='%s' and IsFuture=1"%(TradingDay.strftime("%Y-%m-%d"),ExchangeID)
insertsql="INSERT INTO [dbo].[Position_Top20] ([TradingDay],[InstrumentID],[ExchangeID],[Rank],[Type],[ParticipantABBR1],[CJ1],[CJ1_CHG],[ParticipantABBR2]" \
",[CJ2],[CJ2_CHG],[ParticipantABBR3],[CJ3],[CJ3_CHG]) values('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')"
isexistsql = "select distinct [ExchangeID] from [Position_Top20] where TradingDay='%s'" % TradingDay.strftime(
"%Y-%m-%d")
templist=info.mysql.ExecQueryGetList(sql)
for i in templist:
code = str(re.match(r"\D+", i).group())
url="http://www.dce.com.cn/publicweb/quotesdata/memberDealPosiQuotes.html?"
url=url+"memberDealPosiQuotes.variety=%s&memberDealPosiQuotes.trade_type=0&year=%s&month=%s&day=%s&contract.contract_id=%s&contract.variety_id=%s&contract="
url=url % (code, TradingDay.year, TradingDay.month - 1, TradingDay.day,i,code)
info.Set_QryPosition(ExchangeID,url, code, i,TradingDay.strftime("%Y%m%d"))
temp=GetDCEPositionProductData(info,i,TradingDay.strftime("%Y-%m-%d"))
result=result+temp
eixstlist = info.mysql.ExecQueryGetList(isexistsql)
ResultToDatabase(info,result,insertsql)
def GetDCEPositionProductData(info,InstrumentID,TradingDay):
templists=list()
exclelist=list()
header = {
'Connection': 'keep-alive',
'Cache-Control': 'no-cache',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.84 Safari/537.36',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cookie': 'JSESSIONID=8E650BA1F3AFEAB1611F8ADC6C186BD1; WMONID=j1TJsMZrARA; Hm_lvt_a50228174de2a93aee654389576b60fb=1547791080,1548045741,1548146303,1548147105; sssssss=516c92f7sssssss_516c92f7',
'Host': 'www.dce.com.cn',
'Pragma': 'no-cache',
'Upgrade-Insecure-Requests': '1'
}
print info.QryPositionurl
html=info.mysplider.getUrlcontent(info.QryPositionurl,header=header)
listdata=info.mysplider.tableTolistByNum(html,"DCE",1)
"""write to database"""
for i in listdata[1:-1]:
# col=[]
# excelcol=[]
Rank=i[0]
if str(Rank).strip()=="":
if str(i[4]).strip() != "":
Rank=str(i[4]).strip()
if str(i[8]).strip() != "":
Rank = str(i[8]).strip()
if Rank=="":
continue
ExchangeID='DCE'
ParticipantABBR1=i[1]
CJ1=i[2]
CJ1_CHG=i[3]
ParticipantABBR2=i[5]
CJ2=i[6]
CJ2_CHG=i[7]
ParticipantABBR3=i[9]
CJ3=i[10]
CJ3_CHG=i[11]
col=[TradingDay,InstrumentID,ExchangeID,Rank,'0',ParticipantABBR1,CJ1,CJ1_CHG,ParticipantABBR2,CJ2,CJ2_CHG,ParticipantABBR3,CJ3,CJ3_CHG]
# excelcol=[int(Rank),ParticipantABBR1,int(CJ1),int(CJ1_CHG),int(Rank),ParticipantABBR2,int(CJ2),int(CJ2_CHG),int(Rank),ParticipantABBR3,int(CJ3),int(CJ3_CHG)]
# exclelist.append(excelcol)
templists.append(tuple(col))
# columns = [u'名次', u'会员简称', u'成交量(手)', u'增减', u'名次1',u'会员简称1', u'持买单量1', u'增减1',u'名次2', u'会员简称2', u'持卖单量2', u'增减2']
# excelDataToExcel(exclelist,ExchangeID,columns,info.QryPositionTradingDay,info.QryPositionInstrumentID)
return templists
def excelDataToExcel(datalist,ExchangeID,columns,TradingDay,InstrumentID):
df = pd.DataFrame(data=datalist, columns=columns)
# df[[u'成交量(手)', u'增减',u'持买单量1',u'增减1',u'持卖单量2',u'增减2']] = df[[u'成交量(手)',u'增减',u'持买单量1',u'增减1',u'持卖单量2',u'增减2']].apply(pd.to_numeric)
saveDirector = "D:/GitData/Top20Position/" + ExchangeID + "/" + TradingDay + "/"
if not os.path.exists(saveDirector):
os.mkdir(saveDirector)
savafile = saveDirector + TradingDay + "_" + InstrumentID + ".xlsx"
writer = pd.ExcelWriter(savafile, engine='xlsxwriter')
df.to_excel(writer, sheet_name='Sheet1', startrow=0, startcol=0, index=None)
workbook = writer.book
worksheet = writer.sheets['Sheet1']
if len(columns)==12:
worksheet.set_column('A:L', 14)
elif len(columns)==15:
worksheet.set_column('A:O', 14)
# Add a header format.
header_format = workbook.add_format({
'bold': True,
'text_wrap': True,
'valign': 'vcenter',
'border': 1
})
header_format.set_align('center')
header_format.set_align('vcenter')
writer.save()
def GetDCEStagedTurnover(info,TradingDay,ExchangeID):
beginmonth=TradingDay.strftime("%Y%m")
endmonth=TradingDay.strftime("%Y%m")
url = "http://www.dce.com.cn/publicweb/quotesdata/memberDealCh.html?"
url = url + "memberDealQuotes.variety=%s&memberDealQuotes.trade_type=0&memberDealQuotes.begin_month=%s&memberDealQuotes.end_month=%s"
sql="SELECT [InstrumentCode] FROM [PreTrade].[dbo].[ContractCode] where [ExchangeID]='%s'"%ExchangeID
templist = info.mysql.ExecQueryGetList(sql)
templist.append("all")
for i in templist:
Surl = url % (i, beginmonth, endmonth)
print Surl
info.Set_StagePosition(ExchangeID, Surl, i,beginmonth,endmonth)
GetDCEStagePosition(info)
def GetDCEStagePosition(info):
header = {
'Connection': 'keep-alive',
'Cache-Control': 'no-cache',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.84 Safari/537.36',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cookie': 'JSESSIONID=1311697EA3395C8127FD0BB9D51B1742; WMONID=j1TJsMZrARA; Hm_lvt_a50228174de2a93aee654389576b60fb=1550114491,1550801897,1550826990,1550890979; Hm_lpvt_a50228174de2a93aee654389576b60fb=1550892897',
'Host': 'www.dce.com.cn',
'Pragma': 'no-cache',
'Upgrade-Insecure-Requests': '1'
}
html=info.mysplider.getUrlcontent(info.StagePositionurl,header=header)
listdata=info.mysplider.tableTolistByNum(html,"DCE",0)
templist = list()
begindate = info.StagePositionBeginTime
enddate = info.StagePositionEndTime
for i in listdata:
col = []
if str(i[0]).strip().find("名次") == -1 and str(i[0]).strip().find("共计") == -1 and str(i[0]).strip().find(
"总计") == -1:
if "null" not in i:
InstrumentID = info.StagePositionCode
Rank = str(i[0]).strip()
Type = '0'
ParticipantID1 = str(i[1]).strip()
ParticipantABBR1 = str(i[2]).strip()
CJ1 = str(i[3]).strip()
CJ1_Percent = str(i[4]).strip()
ParticipantID2 = str(i[6]).strip()
ParticipantABBR2 = str(i[7]).strip()
CJ2 = str(i[8]).strip()
CJ2_Percent = str(i[9]).strip()
col = [int(Rank), ParticipantID1, ParticipantABBR1, int(CJ1), CJ1_Percent, int(Rank),
ParticipantID2, ParticipantABBR2, float(CJ2), CJ2_Percent]
templist.append(tuple(col))
columns = [u'名次', u'会员号', u'会员名称', u'成交量(手)', u'成交量比重', u'名次1', u'会员号1', u'会员名称1', u'成交金额(亿元)', u'成交额比重']
df = pd.DataFrame(data=templist, columns=columns)
saveDirector = "D:/GitData/StagePosition/" + info.StagePositionExchangeID + "/" + begindate + "/"
if not os.path.exists(saveDirector):
os.mkdir(saveDirector)
savafile = saveDirector + begindate + "_" + InstrumentID + ".xlsx"
writer = pd.ExcelWriter(savafile, engine='xlsxwriter')
df.to_excel(writer, sheet_name='Sheet1', startrow=0, startcol=0, index=None)
workbook | ksheet = writer.sheets['Sheet1']
worksheet.set_column('A:J', 15)
# Add a header format.
header_format = workbook.add_format({
'bold': True,
'text_wrap': True,
'valign': 'vcenter',
'border': 1
})
header_format.set_align('center')
header_format.set_align('vcenter')
writer.save()
def GetDCEStagedStatistic(info,TradingDay,ExchangeID):
"""阶段性统计排名"""
beginmonth = TradingDay.strftime("%Y%m")
url = "http://www.dce.com.cn/publicweb/quotesdata/varietyMonthYearStatCh.html?"
url = url + "varietyMonthYearStatQuotes.trade_type=0&varietyMonthYearStatQuotes.begin_month=%s"
url = url%beginmonth
GetDCEStatistic(info,url,ExchangeID,beginmonth)
def GetDCEStatistic(info,url,ExchangeID,beginmonth):
header = {
'Connection': 'keep-alive',
'Cache-Control': 'no-cache',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.84 Safari/537.36',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cookie': 'JSESSIONID=1311697EA3395C8127FD0BB9D51B1742; WMONID=j1TJsMZrARA; Hm_lvt_a50228174de2a93aee654389576b60fb=1550114491,1550801897,1550826990,1550890979; Hm_lpvt_a50228174de2a93aee654389576b60fb=1550892897',
'Host': 'www.dce.com.cn',
'Pragma': 'no-cache',
'Upgrade-Insecure-Requests': '1'
}
html = info.mysplider.getUrlcontent(url, header=header)
listdata = info.mysplider.tableTolistByNum(html, "DCE", 0)
print url
"""write to xls"""
ext = '.csv'
parent = "D:/GitData/StagePosition/"
if not os.path.exists(parent + ExchangeID):
os.mkdir(parent + ExchangeID)
if not os.path.exists(parent + ExchangeID + "/" + beginmonth):
os.mkdir(parent + ExchangeID + "/" + beginmonth)
filename = beginmonth+ext
filename = parent + ExchangeID + "/"+beginmonth+"/" + filename
col=[]
col0=listdata[0]
col1=listdata[1]
col=[col0[0],col1[0]+col0[1],col1[1],col1[2],col1[3]+col0[1],col1[4],col1[5]+col0[2],col1[6],col1[7],col1[8]+col0[2],col1[9],col1[10],col1[11],col1[12]]
listdata=listdata[1:]
listdata[0]=col
ListDataToExcel(listdata, filename)
def ListDataToExcel(listdata,filename):
"""a public method that list data write ext extension file"""
# file_backup=f = codecs.open(parent+info.QryPositionExchangeID+"/"+filename,'wb','utf-8')
csvfile = file(filename.decode("utf-8"), 'wb')
csvfile.write(codecs.BOM_UTF8)
writer=csv.writer(csvfile)
writer.writerows(listdata)
csvfile.close()
df_new = pd.read_csv(filename, encoding='utf-8')
writer = pd.ExcelWriter(filename.replace(".csv",".xlsx"))
df_new.to_excel(writer, index=False)
writer.save()
os.remove(filename)
def GetSHFEPosition(info,TradingDay,ExchangeID):
templists=[]
url="http://www.shfe.com.cn/data/dailydata/kx/pm"+TradingDay.strftime("%Y%m%d")+".dat"
print url
header={
}
insertsql = "INSERT INTO [dbo].[Position_Top20] ([TradingDay],[InstrumentID],[ExchangeID],[Rank],[Type],[ParticipantID1],[ParticipantABBR1],[CJ1],[CJ1_CHG],[ParticipantID2],[ParticipantABBR2]" \
",[CJ2],[CJ2_CHG],[ParticipantID3],[ParticipantABBR3],[CJ3],[CJ3_CHG]) values('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')"
html=info.mysplider.getUrlcontent(url,header=header)
data=json.loads(html)
data=data['o_cursor']
for i in data:
col=[]
if str(i['INSTRUMENTID']).find("all")==-1:
col = [TradingDay.strftime('%Y-%m-%d'), str(i['INSTRUMENTID']).strip(), ExchangeID, str(i['RANK']).strip(),'0', str(i['PARTICIPANTID1']).strip(),str(i['PARTICIPANTABBR1']).strip(), str(i['CJ1']).strip(), str(i['CJ1_CHG']).strip(),str(i['PARTICIPANTID2']).strip(),str(i['PARTICIPANTABBR2']).strip(), str(i['CJ2']).strip(), str(i['CJ2_CHG']).strip(),str(i['PARTICIPANTID3']).strip(),str(i['PARTICIPANTABBR3']).strip(), str(i['CJ3']).strip(), str(i['CJ3_CHG']).strip()]
if int(col[3])<=20:
templists.append(tuple(col))
ResultToDatabase(info,templists,insertsql)
def GetCZCEPosition(info, startdate, ExchangeID):
top20list=map(lambda x:str(x),range(1,21,1))
codeList=info.GetExchangeProduct(ExchangeID)
insertsql = "INSERT INTO [dbo].[Position_Top20] ([TradingDay],[InstrumentID],[ExchangeID],[Rank],[Type],[ParticipantABBR1],[CJ1],[CJ1_CHG],[ParticipantABBR2]" \
",[CJ2],[CJ2_CHG],[ParticipantABBR3],[CJ3],[CJ3_CHG]) values('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')"
header=info.GetExchangeHeader(ExchangeID)
url="http://www.czce.com.cn/cn/DFSStaticFiles/Future/%s/%s/FutureDataHolding.htm"%(startdate.strftime("%Y%m%d")[:4],startdate.strftime("%Y%m%d"))
print url
templists=list()
html=info.mysplider.getUrlcontent(url,header=header)
table =info.mysplider.tableTolist(html,ExchangeID)
for i in table:
if str(i[0]).strip()=="合计" or str(i[0]).strip()=="名次":
continue
temp= re.findall("[A-Za-z0-9]+",str(i[0]).strip())
if len(temp)==4:
TradeInstrument=temp[0]
continue
if len(temp)==1 and temp[0] in top20list:
TradingDay=startdate.strftime("%Y-%m-%d")
InstrumentID=TradeInstrument
if InstrumentID in codeList:
Type='1'
else:
Type='0'
Rank=str(i[0]).strip()
ParticipantABBR1=str(i[1]).strip()
CJ1=str(i[3]).strip()
CJ1_CHG=str(i[4]).strip()
ParticipantABBR2=str(i[5]).strip()
CJ2=str(i[6]).strip()
CJ2_CHG=str(i[7]).strip()
ParticipantABBR3=str(i[8]).strip()
CJ3=str(i[9]).strip()
CJ3_CHG=str(i[10]).strip()
col=[TradingDay, InstrumentID, ExchangeID, Rank,Type, ParticipantABBR1, CJ1, CJ1_CHG, ParticipantABBR2, CJ2, CJ2_CHG,ParticipantABBR3, CJ3, CJ3_CHG]
templists.append(tuple(col))
ResultToDatabase(info, templists, insertsql)
def zipDir(startdir,outFullName):
"""
压缩指定文件夹
:param dirpath: 目标文件夹路径
:param outFullName: 压缩文件保存路径+xxxx.zip
:return: 无
"""
zip = zipfile.ZipFile(outFullName,"w",zipfile.ZIP_DEFLATED)
for path,dirnames,filenames in os.walk(startdir):
# 去掉目标跟路径,只对目标文件夹下边的文件及文件夹进行压缩
fpath = path.replace(startdir,'')
for filename in filenames:
zip.write(os.path.join(path,filename),os.path.join(fpath,filename))
zip.close()
# z = zipfile.ZipFile(outFullName, 'w', zipfile.ZIP_DEFLATED)
#
# for dirpath, dirnames, filenames in os.walk(startdir):
# for filename in filenames:
# z.write(os.path.join(dirpath, filename))
# z.close()
def copy(sourPath,distPath): #sourPath原文件地址,distPath指定地址
fp = open(sourPath,'r')
fp1 = open(distPath,'w')
for i in fp:
fp1.write(i) #向新文件中写入数据
fp.close()
fp1.close() | = writer.book
wor | conditional_block |
PositionAPI.py | # -*- coding: utf-8 -*-
# @Time : 2019/2/22 16:45
# @Author : ZouJunLin
"""爬取持仓共用的API"""
import re
import csv,os,codecs,datetime
import pandas as pd
import xlwt
import threading
import json
import zipfile
def ResultToDatabase(info,result,sql):
info.mysql.ExecmanysNonQuery(sql,result)
def GetDCEPosition(info,TradingDay,ExchangeID):
result=list()
sql="select InstrumentID from SettlementInfo where TradingDay='%s' and Position>20000 and ExchangeID='%s' and IsFuture=1"%(TradingDay.strftime("%Y-%m-%d"),ExchangeID)
insertsql="INSERT INTO [dbo].[Position_Top20] ([TradingDay],[InstrumentID],[ExchangeID],[Rank],[Type],[ParticipantABBR1],[CJ1],[CJ1_CHG],[ParticipantABBR2]" \
",[CJ2],[CJ2_CHG],[ParticipantABBR3],[CJ3],[CJ3_CHG]) values('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')"
isexistsql = "select distinct [ExchangeID] from [Position_Top20] where TradingDay='%s'" % TradingDay.strftime(
"%Y-%m-%d")
templist=info.mysql.ExecQueryGetList(sql)
for i in templist:
code = str(re.match(r"\D+", i).group())
url="http://www.dce.com.cn/publicweb/quotesdata/memberDealPosiQuotes.html?"
url=url+"memberDealPosiQuotes.variety=%s&memberDealPosiQuotes.trade_type=0&year=%s&month=%s&day=%s&contract.contract_id=%s&contract.variety_id=%s&contract="
url=url % (code, TradingDay.year, TradingDay.month - 1, TradingDay.day,i,code)
info.Set_QryPosition(ExchangeID,url, code, i,TradingDay.strftime("%Y%m%d"))
temp=GetDCEPositionProductData(info,i,TradingDay.strftime("%Y-%m-%d"))
result=result+temp
eixstlist = info.mysql.ExecQueryGetList(isexistsql)
ResultToDatabase(info,result,insertsql)
def GetDCEPositionProductData(info,InstrumentID,TradingDay):
templists=list()
exclelist=list()
header = {
'Connection': 'keep-alive',
'Cache-Control': 'no-cache',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.84 Safari/537.36',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cookie': 'JSESSIONID=8E650BA1F3AFEAB1611F8ADC6C186BD1; WMONID=j1TJsMZrARA; Hm_lvt_a50228174de2a93aee654389576b60fb=1547791080,1548045741,1548146303,1548147105; sssssss=516c92f7sssssss_516c92f7',
'Host': 'www.dce.com.cn',
'Pragma': 'no-cache',
'Upgrade-Insecure-Requests': '1'
}
print info.QryPositionurl
html=info.mysplider.getUrlcontent(info.QryPositionurl,header=header)
listdata=info.mysplider.tableTolistByNum(html,"DCE",1)
"""write to database"""
for i in listdata[1:-1]:
# col=[]
# excelcol=[]
Rank=i[0]
if str(Rank).strip()=="":
if str(i[4]).strip() != "":
Rank=str(i[4]).strip()
if str(i[8]).strip() != "":
Rank = str(i[8]).strip()
if Rank=="":
continue
ExchangeID='DCE'
ParticipantABBR1=i[1]
CJ1=i[2]
CJ1_CHG=i[3]
ParticipantABBR2=i[5]
CJ2=i[6]
CJ2_CHG=i[7]
ParticipantABBR3=i[9]
CJ3=i[10]
CJ3_CHG=i[11]
col=[TradingDay,InstrumentID,ExchangeID,Rank,'0',ParticipantABBR1,CJ1,CJ1_CHG,ParticipantABBR2,CJ2,CJ2_CHG,ParticipantABBR3,CJ3,CJ3_CHG]
# excelcol=[int(Rank),ParticipantABBR1,int(CJ1),int(CJ1_CHG),int(Rank),ParticipantABBR2,int(CJ2),int(CJ2_CHG),int(Rank),ParticipantABBR3,int(CJ3),int(CJ3_CHG)]
# exclelist.append(excelcol)
templists.append(tuple(col))
# columns = [u'名次', u'会员简称', u'成交量(手)', u'增减', u'名次1',u'会员简称1', u'持买单量1', u'增减1',u'名次2', u'会员简称2', u'持卖单量2', u'增减2']
# excelDataToExcel(exclelist,ExchangeID,columns,info.QryPositionTradingDay,info.QryPositionInstrumentID)
return templists
def excelDataToExcel(datalist,ExchangeID,columns,TradingDay,InstrumentID):
df = pd.DataFrame(data=datalist, columns=columns)
# df[[u'成交量(手)', u'增减',u'持买单量1',u'增减1',u'持卖单量2',u'增减2']] = df[[u'成交量(手)',u'增减',u'持买单量1',u'增减1',u'持卖单量2',u'增减2']].apply(pd.to_numeric)
saveDirector = "D:/GitData/Top20Position/" + ExchangeID + "/" + TradingDay + "/"
if not os.path.exists(saveDirector):
os.mkdir(saveDirector)
savafile = saveDirector + TradingDay + "_" + InstrumentID + ".xlsx"
writer = pd.ExcelWriter(savafile, engine='xlsxwriter')
df.to_excel(writer, sheet_name='Sheet1', startrow=0, startcol=0, index=None)
workbook = writer.book
worksheet = writer.sheets['Sheet1']
if len(columns)==12:
worksheet.set_column('A:L', 14)
elif len(columns)==15:
worksheet.set_column('A:O', 14)
# Add a header format.
header_format = workbook.add_format({
'bold': True,
'text_wrap': True,
'valign': 'vcenter',
'border': 1
})
header_format.set_align('center')
header_format.set_align('vcenter')
writer.save()
def GetDCEStagedTurnover(info,TradingDay,ExchangeID):
beginmonth=TradingDay.strftime("%Y%m")
endmonth=TradingDay.strftime("%Y%m")
url = "http://www.dce.com.cn/publicweb/quotesdata/memberDealCh.html?"
url = url + "memberDealQuotes.variety=%s&memberDealQuotes.trade_type=0&memberDealQuotes.begin_month=%s&memberDealQuotes.end_month=%s"
sql="SELECT [InstrumentCode] FROM [PreTrade].[dbo].[ContractCode] where [ExchangeID]='%s'"%ExchangeID
templist = info.mysql.ExecQueryGetList(sql)
templist.append("all")
for i in templist:
Surl = url % (i, beginmonth, endmonth)
print Surl
info.Set_StagePosition(ExchangeID, Surl, i,beginmonth,endmonth)
GetDCEStagePosition(info)
def GetDCEStagePosition(info):
header = {
'Connection': 'keep-alive',
'Cache-Control': 'no-cache',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.84 Safari/537.36',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cookie': 'JSESSIONID=1311697EA3395C8127FD0BB9D51B1742; WMONID=j1TJsMZrARA; Hm_lvt_a50228174de2a93aee654389576b60fb=1550114491,1550801897,1550826990,1550890979; Hm_lpvt_a50228174de2a93aee654389576b60fb=1550892897',
'Host': 'www.dce.com.cn',
'Pragma': 'no-cache',
'Upgrade-Insecure-Requests': '1'
}
html=info.mysplider.getUrlcontent(info.StagePositionurl,header=header)
listdata=info.mysplider.tableTolistByNum(html,"DCE",0)
templist = list()
begindate = info.StagePositionBeginTime
enddate = info.StagePositionEndTime
for i in listdata:
col = []
if str(i[0]).strip().find("名次") == -1 and str(i[0]).strip().find("共计") == -1 and str(i[0]).strip().find(
"总计") == -1:
if "null" not in i:
InstrumentID = info.StagePositionCode
Rank = str(i[0]).strip()
Type = '0'
ParticipantID1 = str(i[1]).strip()
ParticipantABBR1 = str(i[2]).strip()
CJ1 = str(i[3]).strip()
CJ1_Percent = str(i[4]).strip()
ParticipantID2 = str(i[6]).strip()
ParticipantABBR2 = str(i[7]).strip()
CJ2 = str(i[8]).strip()
CJ2_Percent = str(i[9]).strip()
col = [int(Rank), ParticipantID1, ParticipantABBR1, int(CJ1), CJ1_Percent, int(Rank),
ParticipantID2, ParticipantABBR2, float(CJ2), CJ2_Percent]
templist.append(tuple(col))
columns = [u'名次', u'会员号', u'会员名称', u'成交量(手)', u'成交量比重', u'名次1', u'会员号1', u'会员名称1', u'成交金额(亿元)', u'成交额比重']
df = pd.DataFrame(data=templist, columns=columns)
saveDirector = "D:/GitData/StagePosition/" + info.StagePositionExchangeID + "/" + begindate + "/"
if not os.path.exists(saveDirector):
os.mkdir(saveDirector)
savafile = saveDirector + begindate + "_" + InstrumentID + ".xlsx"
writer = pd.ExcelWriter(savafile, engine='xlsxwriter')
df.to_excel(writer, sheet_name='Sheet1', startrow=0, startcol=0, index=None)
workbook = writer.book
worksheet = writer.sheets['Sheet1']
worksheet.set_column('A:J', 15)
# Add a header format.
header_format = workbook.add_format({
'bold': True,
'text_wrap': True,
'valign': 'vcenter',
'border': 1
})
header_format.set_align('center')
header_format.set_align('vcenter')
writer.save()
def GetDCEStagedStatistic(info,TradingDay,ExchangeID):
"""阶段性统计排名"""
beginmonth = TradingDay.strftime("%Y%m")
url = "http://www.dce.com.cn/publicweb/quotesdata/varietyMonthYearStatCh.html?"
url = url + "varietyMonthYearStatQuotes.trade_type=0&varietyMonthYearStatQuotes.begin_month=%s"
url = url%beginmonth
GetDCEStatistic(info,url,ExchangeID,beginmonth)
def GetDCEStatistic(info,url,ExchangeID,beginmonth):
header = {
'Connection': 'keep-alive',
'Cache-Control': 'no-cache',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.84 Safari/537.36',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cookie': 'JSESSIONID=1311697EA3395C8127FD0BB9D51B1742; WMONID=j1TJsMZrARA; Hm_lvt_a50228174de2a93aee654389576b60fb=1550114491,1550801897,1550826990,1550890979; Hm_lpvt_a50228174de2a93aee654389576b60fb=1550892897',
'Host': 'www.dce.com.cn',
'Pragma': 'no-cache',
'Upgrade-Insecure-Requests': '1'
}
html = info.mysplider.getUrlcontent(url, header=header)
listdata = info.mysplider.tableTolistByNum(html, "DCE", 0)
print url
"""write to xls"""
ext = '.csv'
parent = "D:/GitData/StagePosition/"
if not os.path.exists(parent + ExchangeID):
os.mkdir(parent + ExchangeID)
if not os.path.exists(parent + ExchangeID + "/" + beginmonth):
os.mkdir(parent + ExchangeID + "/" + beginmonth)
filename = beginmonth+ext
filename = parent + ExchangeID + "/"+beginmonth+"/" + filename
col=[]
col0=listdata[0]
col1=listdata[1]
col=[col0[0],col1[0]+col0[1],col1[1],col1[2],col1[3]+col0[1],col1[4],col1[5]+col0[2],col1[6],col1[7],col1[8]+col0[2],col1[9],col1[10],col1[11],col1[12]]
listdata=listdata[1:]
listdata[0]=col
ListDataToExcel(listdata, filename)
def ListDataToExcel(listdata,filename):
"""a public method that list data write ext extension file"""
# file_backup=f = codecs.open(parent+info.QryPositionExchangeID+"/"+filename,'wb','utf-8')
csvfile = file(filename.decode("utf-8"), 'wb')
csvfile.write(codecs.BOM_UTF8)
writer=csv.writer(csvfile)
writer.writerows(listdata)
csvfile.close()
df_new = pd.read_csv(filename, encoding='utf-8')
writer = pd.ExcelWriter(filename.replace(".csv",".xlsx"))
df_new.to_excel(writer, index=False)
writer.save()
os.remove(filename)
def GetSHFEPosition(info,TradingDay,ExchangeID):
templists=[]
url="http://www.shfe.com.cn/data/dailydata/kx/pm"+TradingDay.strftime("%Y%m%d")+".dat"
print url
header={
}
insertsql = "INSERT INTO [dbo].[Position_Top20] ([TradingDay],[InstrumentID],[ExchangeID],[Rank],[Type],[ParticipantID1],[ParticipantABBR1],[CJ1],[CJ1_CHG],[ParticipantID2],[ParticipantABBR2]" \
",[CJ2],[CJ2_CHG],[ParticipantID3],[ParticipantABBR3],[CJ3],[CJ3_CHG]) values('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')"
html=info.mysplider.getUrlcontent(url,header=header)
data=json.loads(html)
data=data['o_cursor']
for i in data:
col=[]
if str(i['INSTRUMENTID']).find("all")==-1:
col = [TradingDay.strftime('%Y-%m-%d'), str(i['INSTRUMENTID']).strip(), ExchangeID, str(i['RANK']).strip(),'0', str(i['PARTICIPANTID1']).strip(),str(i['PARTICIPANTABBR1']).strip(), str(i['CJ1']).strip(), str(i['CJ1_CHG']).strip(),str(i['PARTICIPANTID2']).strip(),str(i['PARTICIPANTABBR2']).strip(), str(i['CJ2']).strip(), str(i['CJ2_CHG']).strip(),str(i['PARTICIPANTID3']).strip(),str(i['PARTICIPANTABBR3']).strip(), str(i['CJ3']).strip(), str(i['CJ3_CHG']).strip()]
if int(col[3])<=20:
templists.append(tuple(col))
ResultToDatabase(info,templists,insertsql)
def GetCZCEPosition(info, startdate, ExchangeID):
top20list=map(lambda x:str(x),range(1,21,1))
codeList=info.GetExchangeProduct(ExchangeID)
insertsql = "INSERT INTO [dbo].[Position_Top20] ([TradingDay],[InstrumentID],[ExchangeID],[Rank],[Type],[ParticipantABBR1],[CJ1],[CJ1_CHG],[ParticipantABBR2]" \
",[CJ2],[CJ2_CHG],[ParticipantABBR3],[CJ3],[CJ3_CHG]) values('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')"
header=info.GetExchangeHeader(ExchangeID)
url="http://www.czce.com.cn/cn/DFSStaticFiles/Future/%s/%s/FutureDataHolding.htm"%(startdate.strftime("%Y%m%d")[:4],startdate.strftime("%Y%m%d"))
print url
templists=list()
html=info.mysplider.getUrlcontent(url,header=header)
table =info.mysplider.tableTolist(html,ExchangeID)
for i in table: | if str(i[0]).strip()=="合计" or str(i[0]).strip()=="名次":
continue
temp= re.findall("[A-Za-z0-9]+",str(i[0]).strip())
if len(temp)==4:
TradeInstrument=temp[0]
continue
if len(temp)==1 and temp[0] in top20list:
TradingDay=startdate.strftime("%Y-%m-%d")
InstrumentID=TradeInstrument
if InstrumentID in codeList:
Type='1'
else:
Type='0'
Rank=str(i[0]).strip()
ParticipantABBR1=str(i[1]).strip()
CJ1=str(i[3]).strip()
CJ1_CHG=str(i[4]).strip()
ParticipantABBR2=str(i[5]).strip()
CJ2=str(i[6]).strip()
CJ2_CHG=str(i[7]).strip()
ParticipantABBR3=str(i[8]).strip()
CJ3=str(i[9]).strip()
CJ3_CHG=str(i[10]).strip()
col=[TradingDay, InstrumentID, ExchangeID, Rank,Type, ParticipantABBR1, CJ1, CJ1_CHG, ParticipantABBR2, CJ2, CJ2_CHG,ParticipantABBR3, CJ3, CJ3_CHG]
templists.append(tuple(col))
ResultToDatabase(info, templists, insertsql)
def zipDir(startdir,outFullName):
"""
压缩指定文件夹
:param dirpath: 目标文件夹路径
:param outFullName: 压缩文件保存路径+xxxx.zip
:return: 无
"""
zip = zipfile.ZipFile(outFullName,"w",zipfile.ZIP_DEFLATED)
for path,dirnames,filenames in os.walk(startdir):
# 去掉目标跟路径,只对目标文件夹下边的文件及文件夹进行压缩
fpath = path.replace(startdir,'')
for filename in filenames:
zip.write(os.path.join(path,filename),os.path.join(fpath,filename))
zip.close()
# z = zipfile.ZipFile(outFullName, 'w', zipfile.ZIP_DEFLATED)
#
# for dirpath, dirnames, filenames in os.walk(startdir):
# for filename in filenames:
# z.write(os.path.join(dirpath, filename))
# z.close()
def copy(sourPath,distPath): #sourPath原文件地址,distPath指定地址
fp = open(sourPath,'r')
fp1 = open(distPath,'w')
for i in fp:
fp1.write(i) #向新文件中写入数据
fp.close()
fp1.close() | random_line_split | |
PositionAPI.py | # -*- coding: utf-8 -*-
# @Time : 2019/2/22 16:45
# @Author : ZouJunLin
"""爬取持仓共用的API"""
import re
import csv,os,codecs,datetime
import pandas as pd
import xlwt
import threading
import json
import zipfile
def ResultToDatabase(info,result,sql):
info.mysql.ExecmanysNonQuery(sql,result)
def GetDCEPosition(info,TradingDay,ExchangeID):
result=list()
sql="select InstrumentID from SettlementInfo where TradingDay='%s' and Position>20000 and ExchangeID='%s' and IsFuture=1"%(TradingDay.strftime("%Y-%m-%d"),ExchangeID)
insertsql="INSERT INTO [dbo].[Position_Top20] ([TradingDay],[InstrumentID],[ExchangeID],[Rank],[Type],[ParticipantABBR1],[CJ1],[CJ1_CHG],[ParticipantABBR2]" \
",[CJ2],[CJ2_CHG],[ParticipantABBR3],[CJ3],[CJ3_CHG]) values('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')"
isexistsql = "select distinct [ExchangeID] from [Position_Top20] where TradingDay='%s'" % TradingDay.strftime(
"%Y-%m-%d")
templist=info.mysql.ExecQueryGetList(sql)
for i in templist:
code = str(re.match(r"\D+", i).group())
url="http://www.dce.com.cn/publicweb/quotesdata/memberDealPosiQuotes.html?"
url=url+"memberDealPosiQuotes.variety=%s&memberDealPosiQuotes.trade_type=0&year=%s&month=%s&day=%s&contract.contract_id=%s&contract.variety_id=%s&contract="
url=url % (code, TradingDay.year, TradingDay.month - 1, TradingDay.day,i,code)
info.Set_QryPosition(ExchangeID,url, code, i,TradingDay.strftime("%Y%m%d"))
temp=GetDCEPositionProductData(info,i,TradingDay.strftime("%Y-%m-%d"))
result=result+temp
eixstlist = info.mysql.ExecQueryGetList(isexistsql)
ResultToDatabase(info,result,insertsql)
def GetDCEPositionProductData(info,InstrumentID,TradingDay):
templists=list()
exclelist=list()
header = {
'Connection': 'keep-alive',
'Cache-Control': 'no-cache',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.84 Safari/537.36',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cookie': 'JSESSIONID=8E650BA1F3AFEAB1611F8ADC6C186BD1; WMONID=j1TJsMZrARA; Hm_lvt_a50228174de2a93aee654389576b60fb=1547791080,1548045741,1548146303,1548147105; sssssss=516c92f7sssssss_516c92f7',
'Host': 'www.dce.com.cn',
'Pragma': 'no-cache',
'Upgrade-Insecure-Requests': '1'
}
print info.QryPositionurl
html=info.mysplider.getUrlcontent(info.QryPositionurl,header=header)
listdata=info.mysplider.tableTolistByNum(html,"DCE",1)
"""write to database"""
for i in listdata[1:-1]:
# col=[]
# excelcol=[]
Rank=i[0]
if str(Rank).strip()=="":
if str(i[4]).strip() != "":
Rank=str(i[4]).strip()
if str(i[8]).strip() != "":
Rank = str(i[8]).strip()
if Rank=="":
continue
ExchangeID='DCE'
ParticipantABBR1=i[1]
CJ1=i[2]
CJ1_CHG=i[3]
ParticipantABBR2=i[5]
CJ2=i[6]
CJ2_CHG=i[7]
ParticipantABBR3=i[9]
CJ3=i[10]
CJ3_CHG=i[11]
col=[TradingDay,InstrumentID,ExchangeID,Rank,'0',ParticipantABBR1,CJ1,CJ1_CHG,ParticipantABBR2,CJ2,CJ2_CHG,ParticipantABBR3,CJ3,CJ3_CHG]
# excelcol=[int(Rank),ParticipantABBR1,int(CJ1),int(CJ1_CHG),int(Rank),ParticipantABBR2,int(CJ2),int(CJ2_CHG),int(Rank),ParticipantABBR3,int(CJ3),int(CJ3_CHG)]
# exclelist.append(excelcol)
templists.append(tuple(col))
# columns = [u'名次', u'会员简称', u'成交量(手)', u'增减', u'名次1',u'会员简称1', u'持买单量1', u'增减1',u'名次2', u'会员简称2', u'持卖单量2', u'增减2']
# excelDataToExcel(exclelist,ExchangeID,columns,info.QryPositionTradingDay,info.QryPositionInstrumentID)
return templists
def excelDataToExcel(datalist,ExchangeID,columns,TradingDay,InstrumentID):
df = pd.DataFrame(data=datalist, columns=columns)
# df[[u'成交量(手)', u'增减',u'持买单量1',u'增减1',u'持卖单量2',u'增减2']] = df[[u'成交量(手)',u'增减',u'持买单量1',u'增减1',u'持卖单量2',u'增减2']].apply(pd.to_numeric)
saveDirector = "D:/GitData/Top20Position/" + ExchangeID + "/" + TradingDay + "/"
if not os.path.exists(saveDirector):
os.mkdir(saveDirector)
savafile = saveDirector + TradingDay + "_" + InstrumentID + ".xlsx"
writer = pd.ExcelWriter(savafile, engine='xlsxwriter')
df.to_excel(writer, sheet_name='Sheet1', startrow=0, startcol=0, index=None)
workbook = writer.book
worksheet = writer.sheets['Sheet1']
if len(columns)==12:
worksheet.set_column('A:L', 14)
elif len(columns)==15:
worksheet.set_column('A:O', 14)
# Add a header format.
header_format = workbook.add_format({
'bold': True,
'text_wrap': True,
'valign': 'vcenter',
'border': 1
})
header_format.set_align('center')
header_format.set_align('vcenter')
writer.save()
def GetDCEStagedTurnover(info,TradingDay,ExchangeID):
beginmonth=TradingDay.strftime("%Y%m")
endmonth=TradingDay.strftime("%Y%m")
url = "http://www.dce.com.cn/publicweb/quotesdata/memberDealCh.html?"
url = url + "memberDealQuotes.variety=%s&memberDealQuotes.trade_type=0&memberDealQuotes.begin_month=%s&memberDealQuotes.end_month=%s"
sql="SELECT [InstrumentCode] FROM [PreTrade].[dbo].[ContractCode] where [ExchangeID]='%s'"%ExchangeID
templist = info.mysql.ExecQueryGetList(sql)
templist.append("all")
for i in templist:
Surl = url % (i, beginmonth, endmonth)
print Surl
info.Set_StagePosition(ExchangeID, Surl, i,beginmonth,endmonth)
GetDCEStagePosition(info)
def GetDCEStagePosition(info):
header = {
'Connection': 'keep-alive',
'Cache-Control': 'no-cache',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.84 Safari/537.36',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cookie': 'JSESSIONID=1311697EA3395C8127FD0BB9D51B1742; WMONID=j1TJsMZrARA; Hm_lvt_a50228174de2a93aee654389576b60fb=1550114491,1550801897,1550826990,1550890979; Hm_lpvt_a50228174de2a93aee654389576b60fb=1550892897',
'Host': 'www.dce.com.cn',
'Pragma': 'no-cache',
'Upgrade-Insecure-Requests': '1'
}
html=info.mysplider.getUrlcontent(info.StagePositionurl,header=header)
listdata=info.mysplider.tableTolistByNum(html,"DCE",0)
templist = list()
begindate = info.StagePositionBeginTime
enddate = info.StagePositionEndTime
for i in listdata:
col = []
if str(i[0]).strip().find("名次") == -1 and str(i[0]).strip().find("共计") == -1 and str(i[0]).strip().find(
"总计") == -1:
if "null" not in i:
InstrumentID = info.StagePositionCode
Rank = str(i[0]).strip()
Type = '0'
ParticipantID1 = str(i[1]).strip()
ParticipantABBR1 = str(i[2]).strip()
CJ1 = str(i[3]).strip()
CJ1_Percent = str(i[4]).strip()
ParticipantID2 = str(i[6]).strip()
ParticipantABBR2 = str(i[7]).strip()
CJ2 = str(i[8]).strip()
CJ2_Percent = str(i[9]).strip()
col = [int(Rank), ParticipantID1, ParticipantABBR1, int(CJ1), CJ1_Percent, int(Rank),
ParticipantID2, ParticipantABBR2, float(CJ2), CJ2_Percent]
templist.append(tuple(col))
columns = [u'名次', u'会员号', u'会员名称', u'成交量(手)', u'成交量比重', u'名次1', u'会员号1', u'会员名称1', u'成交金额(亿元)', u'成交额比重']
df = pd.DataFrame(data=templist, columns=columns)
saveDirector = "D:/GitData/StagePosition/" + info.StagePositionExchangeID + "/" + begindate + "/"
if not os.path.exists(saveDirector):
os.mkdir(saveDirector)
savafile = saveDirector + begindate + "_" + InstrumentID + ".xlsx"
writer = pd.ExcelWriter(savafile, engine='xlsxwriter')
df.to_excel(writer, sheet_name='Sheet1', startrow=0, startcol=0, index=None)
workbook = writer.book
worksheet = writer.sheets['Sheet1']
worksheet.set_column('A:J', 15)
# Add a header format.
header_format = workbook.add_format({
'bold': True,
'text_wrap': True,
'valign': 'vcenter',
'border': 1
})
header_format.set_align('center')
header_format.set_align('vcenter')
writer.save()
def GetDCEStagedStatistic(info,TradingDay,ExchangeID):
"""阶段性统计排名"""
beginmonth = TradingDay.strftime("%Y%m")
url = "http://www.dce.com.cn/publicweb/quotesdata/varietyMonthYearStatCh.html?"
url = url + "varietyMonthYearStatQuotes.trade_type=0&varietyMonthYearStatQuotes.begin_month=%s"
url = url%beginmonth
GetDCEStatistic(info,url,ExchangeID,beginmonth)
def GetDCEStatistic(info,url,ExchangeID,beginmonth):
header = {
'Connection': 'keep-alive',
'Cache-Control': 'no-cache',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'User-Ag | 5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.84 Safari/537.36',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cookie': 'JSESSIONID=1311697EA3395C8127FD0BB9D51B1742; WMONID=j1TJsMZrARA; Hm_lvt_a50228174de2a93aee654389576b60fb=1550114491,1550801897,1550826990,1550890979; Hm_lpvt_a50228174de2a93aee654389576b60fb=1550892897',
'Host': 'www.dce.com.cn',
'Pragma': 'no-cache',
'Upgrade-Insecure-Requests': '1'
}
html = info.mysplider.getUrlcontent(url, header=header)
listdata = info.mysplider.tableTolistByNum(html, "DCE", 0)
print url
"""write to xls"""
ext = '.csv'
parent = "D:/GitData/StagePosition/"
if not os.path.exists(parent + ExchangeID):
os.mkdir(parent + ExchangeID)
if not os.path.exists(parent + ExchangeID + "/" + beginmonth):
os.mkdir(parent + ExchangeID + "/" + beginmonth)
filename = beginmonth+ext
filename = parent + ExchangeID + "/"+beginmonth+"/" + filename
col=[]
col0=listdata[0]
col1=listdata[1]
col=[col0[0],col1[0]+col0[1],col1[1],col1[2],col1[3]+col0[1],col1[4],col1[5]+col0[2],col1[6],col1[7],col1[8]+col0[2],col1[9],col1[10],col1[11],col1[12]]
listdata=listdata[1:]
listdata[0]=col
ListDataToExcel(listdata, filename)
def ListDataToExcel(listdata,filename):
"""a public method that list data write ext extension file"""
# file_backup=f = codecs.open(parent+info.QryPositionExchangeID+"/"+filename,'wb','utf-8')
csvfile = file(filename.decode("utf-8"), 'wb')
csvfile.write(codecs.BOM_UTF8)
writer=csv.writer(csvfile)
writer.writerows(listdata)
csvfile.close()
df_new = pd.read_csv(filename, encoding='utf-8')
writer = pd.ExcelWriter(filename.replace(".csv",".xlsx"))
df_new.to_excel(writer, index=False)
writer.save()
os.remove(filename)
def GetSHFEPosition(info,TradingDay,ExchangeID):
templists=[]
url="http://www.shfe.com.cn/data/dailydata/kx/pm"+TradingDay.strftime("%Y%m%d")+".dat"
print url
header={
}
insertsql = "INSERT INTO [dbo].[Position_Top20] ([TradingDay],[InstrumentID],[ExchangeID],[Rank],[Type],[ParticipantID1],[ParticipantABBR1],[CJ1],[CJ1_CHG],[ParticipantID2],[ParticipantABBR2]" \
",[CJ2],[CJ2_CHG],[ParticipantID3],[ParticipantABBR3],[CJ3],[CJ3_CHG]) values('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')"
html=info.mysplider.getUrlcontent(url,header=header)
data=json.loads(html)
data=data['o_cursor']
for i in data:
col=[]
if str(i['INSTRUMENTID']).find("all")==-1:
col = [TradingDay.strftime('%Y-%m-%d'), str(i['INSTRUMENTID']).strip(), ExchangeID, str(i['RANK']).strip(),'0', str(i['PARTICIPANTID1']).strip(),str(i['PARTICIPANTABBR1']).strip(), str(i['CJ1']).strip(), str(i['CJ1_CHG']).strip(),str(i['PARTICIPANTID2']).strip(),str(i['PARTICIPANTABBR2']).strip(), str(i['CJ2']).strip(), str(i['CJ2_CHG']).strip(),str(i['PARTICIPANTID3']).strip(),str(i['PARTICIPANTABBR3']).strip(), str(i['CJ3']).strip(), str(i['CJ3_CHG']).strip()]
if int(col[3])<=20:
templists.append(tuple(col))
ResultToDatabase(info,templists,insertsql)
def GetCZCEPosition(info, startdate, ExchangeID):
top20list=map(lambda x:str(x),range(1,21,1))
codeList=info.GetExchangeProduct(ExchangeID)
insertsql = "INSERT INTO [dbo].[Position_Top20] ([TradingDay],[InstrumentID],[ExchangeID],[Rank],[Type],[ParticipantABBR1],[CJ1],[CJ1_CHG],[ParticipantABBR2]" \
",[CJ2],[CJ2_CHG],[ParticipantABBR3],[CJ3],[CJ3_CHG]) values('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')"
header=info.GetExchangeHeader(ExchangeID)
url="http://www.czce.com.cn/cn/DFSStaticFiles/Future/%s/%s/FutureDataHolding.htm"%(startdate.strftime("%Y%m%d")[:4],startdate.strftime("%Y%m%d"))
print url
templists=list()
html=info.mysplider.getUrlcontent(url,header=header)
table =info.mysplider.tableTolist(html,ExchangeID)
for i in table:
if str(i[0]).strip()=="合计" or str(i[0]).strip()=="名次":
continue
temp= re.findall("[A-Za-z0-9]+",str(i[0]).strip())
if len(temp)==4:
TradeInstrument=temp[0]
continue
if len(temp)==1 and temp[0] in top20list:
TradingDay=startdate.strftime("%Y-%m-%d")
InstrumentID=TradeInstrument
if InstrumentID in codeList:
Type='1'
else:
Type='0'
Rank=str(i[0]).strip()
ParticipantABBR1=str(i[1]).strip()
CJ1=str(i[3]).strip()
CJ1_CHG=str(i[4]).strip()
ParticipantABBR2=str(i[5]).strip()
CJ2=str(i[6]).strip()
CJ2_CHG=str(i[7]).strip()
ParticipantABBR3=str(i[8]).strip()
CJ3=str(i[9]).strip()
CJ3_CHG=str(i[10]).strip()
col=[TradingDay, InstrumentID, ExchangeID, Rank,Type, ParticipantABBR1, CJ1, CJ1_CHG, ParticipantABBR2, CJ2, CJ2_CHG,ParticipantABBR3, CJ3, CJ3_CHG]
templists.append(tuple(col))
ResultToDatabase(info, templists, insertsql)
def zipDir(startdir,outFullName):
"""
压缩指定文件夹
:param dirpath: 目标文件夹路径
:param outFullName: 压缩文件保存路径+xxxx.zip
:return: 无
"""
zip = zipfile.ZipFile(outFullName,"w",zipfile.ZIP_DEFLATED)
for path,dirnames,filenames in os.walk(startdir):
# 去掉目标跟路径,只对目标文件夹下边的文件及文件夹进行压缩
fpath = path.replace(startdir,'')
for filename in filenames:
zip.write(os.path.join(path,filename),os.path.join(fpath,filename))
zip.close()
# z = zipfile.ZipFile(outFullName, 'w', zipfile.ZIP_DEFLATED)
#
# for dirpath, dirnames, filenames in os.walk(startdir):
# for filename in filenames:
# z.write(os.path.join(dirpath, filename))
# z.close()
def copy(sourPath,distPath): #sourPath原文件地址,distPath指定地址
fp = open(sourPath,'r')
fp1 = open(distPath,'w')
for i in fp:
fp1.write(i) #向新文件中写入数据
fp.close()
fp1.close() | ent': 'Mozilla/ | identifier_name |
PositionAPI.py | # -*- coding: utf-8 -*-
# @Time : 2019/2/22 16:45
# @Author : ZouJunLin
"""爬取持仓共用的API"""
import re
import csv,os,codecs,datetime
import pandas as pd
import xlwt
import threading
import json
import zipfile
def ResultToDatabase(info,result,sql):
info.mysql.ExecmanysNonQuery(sql,result)
def GetDCEPosition(info,TradingDay,ExchangeID):
result=list()
sql="select InstrumentID from SettlementInfo where TradingDay='%s' and Position>20000 and ExchangeID='%s' and IsFuture=1"%(TradingDay.strftime("%Y-%m-%d"),ExchangeID)
insertsql="INSERT INTO [dbo].[Position_Top20] ([TradingDay],[InstrumentID],[ExchangeID],[Rank],[Type],[ParticipantABBR1],[CJ1],[CJ1_CHG],[ParticipantABBR2]" \
",[CJ2],[CJ2_CHG],[ParticipantABBR3],[CJ3],[CJ3_CHG]) values('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')"
isexistsql = "select distinct [ExchangeID] from [Position_Top20] where TradingDay='%s'" % TradingDay.strftime(
"%Y-%m-%d")
templist=info.mysql.ExecQueryGetList(sql)
for i in templist:
code = str(re.match(r"\D+", i).group())
url="http://www.dce.com.cn/publicweb/quotesdata/memberDealPosiQuotes.html?"
url=url+"memberDealPosiQuotes.variety=%s&memberDealPosiQuotes.trade_type=0&year=%s&month=%s&day=%s&contract.contract_id=%s&contract.variety_id=%s&contract="
url=url % (code, TradingDay.year, TradingDay.month - 1, TradingDay.day,i,code)
info.Set_QryPosition(ExchangeID,url, code, i,TradingDay.strftime("%Y%m%d"))
temp=GetDCEPositionProductData(info,i,TradingDay.strftime("%Y-%m-%d"))
result=result+temp
eixstlist = info.mysql.ExecQueryGetList(isexistsql)
ResultToDatabase(info,result,insertsql)
def GetDCEPositionProductData(info,InstrumentID,TradingDay):
templists=list()
exclelist=list()
header = {
'Connection': 'keep-alive',
'Cache-Control': 'no-cache',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.84 Safari/537.36',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cookie': 'JSESSIONID=8E650BA1F3AFEAB1611F8ADC6C186BD1; WMONID=j1TJsMZrARA; Hm_lvt_a50228174de2a93aee654389576b60fb=1547791080,1548045741,1548146303,1548147105; sssssss=516c92f7sssssss_516c92f7',
'Host': 'www.dce.com.cn',
'Pragma': 'no-cache',
'Upgrade-Insecure-Requests': '1'
}
print info.QryPositionurl
html=info.mysplider.getUrlcontent(info.QryPositionurl,header=header)
listdata=info.mysplider.tableTolistByNum(html,"DCE",1)
"""write to database"""
for i in listdata[1:-1]:
# col=[]
# excelcol=[]
Rank=i[0]
if str(Rank).strip()=="":
if str(i[4]).strip() != "":
Rank=str(i[4]).strip()
if str(i[8]).strip() != "":
Rank = str(i[8]).strip()
if Rank=="":
continue
ExchangeID='DCE'
ParticipantABBR1=i[1]
CJ1=i[2]
CJ1_CHG=i[3]
ParticipantABBR2=i[5]
CJ2=i[6]
CJ2_CHG=i[7]
ParticipantABBR3=i[9]
CJ3=i[10]
CJ3_CHG=i[11]
col=[TradingDay,InstrumentID,ExchangeID,Rank,'0',ParticipantABBR1,CJ1,CJ1_CHG,ParticipantABBR2,CJ2,CJ2_CHG,ParticipantABBR3,CJ3,CJ3_CHG]
# excelcol=[int(Rank),ParticipantABBR1,int(CJ1),int(CJ1_CHG),int(Rank),ParticipantABBR2,int(CJ2),int(CJ2_CHG),int(Rank),ParticipantABBR3,int(CJ3),int(CJ3_CHG)]
# exclelist.append(excelcol)
templists.append(tuple(col))
# columns = [u'名次', u'会员简称', u'成交量(手)', u'增减', u'名次1',u'会员简称1', u'持买单量1', u'增减1',u'名次2', u'会员简称2', u'持卖单量2', u'增减2']
# excelDataToExcel(exclelist,ExchangeID,columns,info.QryPositionTradingDay,info.QryPositionInstrumentID)
return templists
def excelDataToExcel(datalist,ExchangeID,columns,TradingDay,InstrumentID):
df = pd.DataFrame(data=datalist, columns=columns)
# df[[u'成交量(手)', u'增减',u'持买单量1',u'增减1',u'持卖单量2',u'增减2']] = df[[u'成交量(手)',u'增减',u'持买单量1',u'增减1',u'持卖单量2',u'增减2']].apply(pd.to_numeric)
saveDirector = "D:/GitData/Top20Position/" + ExchangeID + "/" + TradingDay + "/"
if not os.path.exists(saveDirector):
os.mkdir(saveDirector)
savafile = saveDirector + TradingDay + "_" + InstrumentID + ".xlsx"
writer = pd.ExcelWriter(savafile, engine='xlsxwriter')
df.to_excel(writer, sheet_name='Sheet1', startrow=0, startcol=0, index=None)
workbook = writer.book
worksheet = writer.sheets['Sheet1']
if len(columns)==12:
worksheet.set_column('A:L', 14)
elif len(columns)==15:
worksheet.set_column('A:O', 14)
# Add a header format.
header_format = workbook.add_format({
'bold': True,
'text_wrap': True,
'valign': 'vcenter',
'border': 1
})
header_format.set_align('center')
header_format.set_align('vcenter')
writer.save()
def GetDCEStagedTurnover(info,TradingDay,ExchangeID):
beginmonth=TradingDay.strftime("%Y%m")
endmonth=TradingDay.strftime("%Y%m")
url = "http://www.dce.com.cn/publicweb/quotesdata/memberDealCh.html?"
url = url + "memberDealQuotes.variety=%s&memberDealQuotes.trade_type=0&memberDealQuotes.begin_month=%s&memberDealQuotes.end_month=%s"
sql="SELECT [InstrumentCode] FROM [PreTrade].[dbo].[ContractCode] where [ExchangeID]='%s'"%ExchangeID
templist = info.mysql.ExecQueryGetList(sql)
templist.append("all")
for i in templist:
Surl = url % (i, beginmonth, endmonth)
print Surl
info.Set_StagePosition(ExchangeID, Surl, i,beginmonth,endmonth)
GetDCEStagePosition(info)
def GetDCEStagePosition(info):
header = {
'Connection': 'keep-alive',
'Cache-Control': 'no-cache',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.84 Safari/537.36',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cookie': 'JSESSIONID=1311697EA3395C8127FD0BB9D51B1742; WMONID=j1TJsMZrARA; Hm_lvt_a50228174de2a93aee654389576b60fb=1550114491,1550801897,1550826990,1550890979; Hm_lpvt_a50228174de2a93aee654389576b60fb=1550892897',
'Host': 'www.dce.com.cn',
'Pragma': 'no-cache',
'Upgrade-Insecure-Requests': '1'
}
html=info.mysplider.getUrlcontent(info.StagePositionurl,header=header)
listdata=info.mysplider.tableTolistByNum(html,"DCE",0)
templist = list()
begindate = info.StagePositionBeginTime
enddate = info.StagePositionEndTime
for i in listdata:
col = []
if str(i[0]).strip().find("名次") == -1 and str(i[0]).strip().find("共计") == -1 and str(i[0]).strip().find(
"总计") == -1:
if "null" not in i:
InstrumentID = info.StagePositionCode
Rank = str(i[0]).strip()
Type = '0'
ParticipantID1 = str(i[1]).strip()
ParticipantABBR1 = str(i[2]).strip()
CJ1 = str(i[3]).strip()
CJ1_Percent = str(i[4]).strip()
ParticipantID2 = str(i[6]).strip()
ParticipantABBR2 = str(i[7]).strip()
CJ2 = str(i[8]).strip()
CJ2_Percent = str(i[9]).strip()
col = [int(Rank), ParticipantID1, ParticipantABBR1, int(CJ1), CJ1_Percent, int(Rank),
ParticipantID2, ParticipantABBR2, float(CJ2), CJ2_Percent]
templist.append(tuple(col))
columns = [u'名次', u'会员号', u'会员名称', u'成交量(手)', u'成交量比重', u'名次1', u'会员号1', u'会员名称1', u'成交金额(亿元)', u'成交额比重']
df = pd.DataFrame(data=templist, columns=columns)
saveDirector = "D:/GitData/StagePosition/" + info.StagePositionExchangeID + "/" + begindate + "/"
if not os.path.exists(saveDirector):
os.mkdir(saveDirector)
savafile = saveDirector + begindate + "_" + InstrumentID + ".xlsx"
writer = pd.ExcelWriter(savafile, engine='xlsxwriter')
df.to_excel(writer, sheet_name='Sheet1', startrow=0, startcol=0, index=None)
workbook = writer.book
worksheet = writer.sheets['Sheet1']
worksheet.set_column('A:J', 15)
# Add a header format.
header_format = workbook.add_format({
'bold': True,
'text_wrap': True,
'valign': 'vcenter',
'border': 1
})
header_format.set_align('center')
header_format.set_align('vcenter')
writer.save()
def GetDCEStagedStatistic(info,TradingDay,ExchangeID):
"""阶段性统计排名"""
beginmonth = TradingDay.strftime("%Y%m")
url = "http://www.dce.com.cn/publicweb/quotesdata/varietyMonthYearStatCh.html?"
url = url + "varietyMonthYearStatQuotes.trade_type=0&varietyMonthYearStatQuotes.begin_month=%s"
url = url%beginmonth
GetDCEStatistic(info,url,ExchangeID,beginmonth)
def GetDCEStatistic(info,url,ExchangeID,beginmonth):
header = {
'Connection': 'keep-alive',
'Cache-Control': 'no-cache',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.84 Safari/537.36',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cookie': 'JSESSIONID=1311697EA3395C8127FD0BB9D51B1742; WMONID=j1TJsMZrARA; Hm_lvt_a50228174de2a93aee654389576b60fb=1550114491,1550801897,1550826990,1550890979; Hm_lpvt_a50228174de2a93aee654389576b60fb=1550892897',
'Host': 'www.dce.com.cn',
'Pragma': 'no-cache',
'Upgrade-Insecure-Requests': '1'
}
html = info.mysplider.getUrlcontent(url, header=header)
listdata = info.mysplider.tableTolistByNum(html, "DCE", 0)
print url
"""write to xls"""
ext = '.csv'
parent = "D:/GitData/StagePosition/"
if not os.path.exists(parent + ExchangeID):
os.mkdir(parent + ExchangeID)
if not os.path.exists(parent + ExchangeID + "/" + beginmonth):
os.mkdir(parent + ExchangeID + "/" + beginmonth)
filename = beginmonth+ext
filename = parent + ExchangeID + "/"+beginmonth+"/" + filename
col=[]
col0=listdata[0]
col1=listdata[1]
col=[col0[0],col1[0]+col0[1],col1[1],col1[2],col1[3]+col0[1],col1[4],col1[5]+col0[2],col1[6],col1[7],col1[8]+col0[2],col1[9],col1[10],col1[11],col1[12]]
listdata=listdata[1:]
listdata[0]=col
ListDataToExcel(listdata, filename)
def ListDataToExcel(listdata,filename):
"""a public method that list data write ext extension file"""
# file_backup=f = codecs.open(parent+info.QryPositionExchangeID+"/"+filename,'wb','utf-8')
csvfile = file(filename.decode("utf-8"), 'wb')
csvfile.write(codecs.BOM_UTF8)
writer=csv.writer(csvfile)
writer.writerows(listdata)
csvfile.close()
df_new = pd.read_csv(filename, encoding='utf-8')
writer = pd.ExcelWriter(filename.replace(".csv",".xlsx"))
df_new.to_excel(writer, index=False)
writer.save()
os.remove(filename)
def GetSHFEPosition(info,TradingDay,ExchangeID):
templists=[]
url="http://www.shfe.com.cn/data/dailydata/kx/pm"+TradingDay.strftime("%Y%m%d")+".dat"
print url
header={
}
insertsql = "INSERT INTO [dbo].[Position_Top20] ([TradingDay],[InstrumentID],[ExchangeID],[Rank],[Type],[ParticipantID1],[ParticipantABBR1],[CJ1],[CJ1_CHG],[ParticipantID2],[ParticipantABBR2]" \
",[CJ2],[CJ2_CHG],[ParticipantID3],[ParticipantABBR3],[CJ3],[CJ3_CHG]) values('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')"
html=info.mysplider.getUrlcontent(url,header=header)
data=json.loads(html)
data=data['o_cursor']
for i in data:
col=[]
if str(i['INSTRUMENTID']).find("all")==-1:
col = [TradingDay.strftime('%Y-%m-%d'), str(i['INSTRUMENTID']).strip(), ExchangeID, str(i['RANK']).strip(),'0', str(i['PARTICIPANTID1']).strip(),str(i['PARTICIPANTABBR1']).strip(), str(i['CJ1']).strip(), str(i['CJ1_CHG']).strip(),str(i['PARTICIPANTID2']).strip(),str(i['PARTICIPANTABBR2']).strip(), str(i['CJ2']).strip(), str(i['CJ2_CHG']).strip(),str(i['PARTICIPANTID3']).strip(),str(i['PARTICIPANTABBR3']).strip(), str(i['CJ3']).strip(), str(i['CJ3_CHG']).strip()]
if int(col[3])<=20:
templists.append(tuple(col))
ResultToDatabase(info,templists,insertsql)
def GetCZCEPosition(info, startdate, ExchangeID):
top20list=map(lambda x:str(x),range(1,21,1))
codeList=info.GetExchangeProduct(ExchangeID)
insertsql = "INSERT INTO [dbo].[Position_Top20] ([TradingDay],[InstrumentID],[ExchangeID],[Rank],[Type],[ParticipantABBR1],[CJ1],[CJ1_CHG],[ParticipantABBR2]" \
",[CJ2],[CJ2_CHG],[ParticipantABBR3],[CJ3],[CJ3_CHG]) values('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')"
header=info.GetExchangeHeader(ExchangeID)
url="http://www.czce.com.cn/cn/DFSStaticFiles/Future/%s/%s/FutureDataHolding.htm"%(startdate.strftime("%Y%m%d")[:4],startdate.strftime("%Y%m%d"))
print url
templists=list()
html=info.mysplider.getUrlcontent(url,header=header)
table =info.mysplider.tableTolist(html,ExchangeID)
for i in table:
if str(i[0]).strip()=="合计" or str(i[0]).strip()=="名次":
continue
temp= re.findall("[A-Za-z0-9]+",str(i[0]).strip())
if len(temp)==4:
TradeInstrument=temp[0]
continue
if len(temp)==1 and temp[0] in top20list:
TradingDay=startdate.strftime("%Y-%m-%d")
InstrumentID=TradeInstrument
if InstrumentID in codeList:
Type='1'
else:
Type='0'
Rank=str(i[0]).strip()
ParticipantABBR1=str(i[1]).strip()
CJ1=str(i[3]).strip()
CJ1_CHG=str(i[4]).strip()
ParticipantABBR2=str(i[5]).strip()
CJ2=str(i[6]).strip()
CJ2_CHG=str(i[7]).strip()
ParticipantABBR3=str(i[8]).strip()
CJ3=str(i[9]).strip()
CJ3_CHG=str(i[10]).strip()
col=[TradingDay, InstrumentID, ExchangeID, Rank,Type, ParticipantABBR1, CJ1, CJ1_CHG, ParticipantABBR2, CJ2, CJ2_CHG,ParticipantABBR3, CJ3, CJ3_CHG]
templists.append(tuple(col))
ResultToDatabase(info, templists, insertsql)
def zipDir(startdir,outFullName):
"""
压缩指定文件夹
:param dirpath: 目标文件夹路径
:param outFullName: 压缩文件保存路径+xxxx.zip
:return: 无
"""
zip = zipfile.ZipFile(outFullName,"w",zipfile.ZIP_DEFLATED)
for path,dirnames,filenames in os.walk(startdir):
# 去掉目标跟路径,只对目标文件夹下边的文件及文件夹进行压缩
fpath = path.replace(startdir,'')
for filename in filenames:
zip.write(os.path.join(path,filename),os.path.join(fpath,filename))
zip.close()
# z = zipfile.ZipFile(outFullName, 'w', zipfile.ZIP_DEFLATED)
#
# for dirpath, dirnames, filenames in os.walk(startdir):
# for filename in filenames:
# z.write(os.path.join(dirpath, filename))
# z.close()
def copy(sourPath,distPath): #sourPath原文件地址,distPath指定地址
fp = open(sourPath,'r')
fp1 = open(distPath,'w')
for i in fp:
fp1.write(i) #向新文件中写入数据
fp.close()
fp1.close() | identifier_body | ||
ch8.go | package main
import (
"bufio"
"flag"
"fmt"
"log"
"net"
"time"
)
func main() {
listener, err := net.Listen("tcp", "localhost: 8000")
if err != nil {
log.Fatal(err)
}
go broadcaster()
for {
conn, err := listener.Accept()
if err != nil {
log.Print(err)
continue
}
go handleConn(conn)
}
}
type client chan<- string // an outgoing message channel
var (
entering = make(chan client)
leaving = make(chan client)
messages = make(chan string) // all incoming client messages
)
func broadcaster() {
clients := make(map[client]bool) // all connected clients
for {
select {
case msg := <-messages:
// Broadcase incoming message to all
// clients' outgoing message channels.
for cli := range clients {
cli <- msg
}
case cli := <-entering:
clients[cli] = true
case cli := <-leaving:
delete(clients, cli)
close(cli)
}
}
}
func handleConn(conn net.Conn) {
ch := make(chan string)
go clientWriter(conn, ch)
who := conn.RemoteAddr().String()
ch <- "You are " + who
messages <- who + " has arrived"
entering <- ch
input := bufio.NewScanner(conn)
for input.Scan() {
messages <- who + ": " + input.Text()
}
// NOTE: ignoreing potentian errors from input.Err()
leaving <- ch
messages <- who + " has left"
conn.Close()
}
func clientWriter(conn net.Connj, ch <-chan string) {
for msg := range ch {
fmt.Fprintln(conn, msg) // NOTE: ignoring network errors
}
}
// var done = make(chan struct{})
// func cancelled() bool {
// select {
// case <-done:
// return truen
// default:
// return false
// }
// }
// // Cancel traversal when inpu is detected.
// go func() {
// os.Stdin.Read(make([]byte, 1))
// close(done)
// }()
// import (
// "os"
// "path/filepath"
// "sync"
// )
// func main() {
// // ...determine roots...
// // Traverse each root of the file tree in parallel.
// fileSizes := make(chan int64)
// var n sync.WaitGroup
// for _, root := range roots {
// n.Add(1)
// go walkDir(root, &n, fileSizes)
// }
// go func() {
// n.Wait()
// close(fileSizes)
// }()
// // ...select loop...
// }
// func walkDir(dir string, n *sync.WaitGroup, fileSizes chan<- int64) {
// defer n.Done()
// for _, entry := range dirents(dir) {
// if entry.IsDir() {
// n.Add(1)
// subdir := filepath.Join(dir, entry.Name())
// go walkDir(subdir, n, fileSizes)
// } else {
// fileSizes <- entry.Size()
// }
// }
// }
// var sema = make(chan struct{}, 20)
// func dirents(dir string) []os.FileInfo {
// sema <- struct{}{} // acquire token
// defer func() { <-sema }() // release token
// }
// import (
// "flag"
// "time"
// )
var verbos = flag.Bool("v", false, "show verbos progress messages")
func main() {
// ...start background goroutine...
// Print the results periodically.
var tick <-chan time.Time
if *verbos {
tick = time.Tick(500 * time.Millisecond)
}
var nfiles, nbytes int64
loop:
for {
select {
case <-done:
// Drain fileSizes to allow existing goroutines to finish.
for range fileSizes {
// Do nothing
}
return
case size, ok := <-fileSizes:
if !ok {
break loop // fileSizes was closed
}
nfiles++
nbytes += size
case <-tick:
printDiskUsage(nfiles, nbytes)
}
}
printDiskUsage(nfiles, nbytes) // final totals
}
// import (
// "flag"
// "fmt"
// "io/ioutil"
// "os"
// "path/filepath"
// )
// func main() {
// // Determine the intial directories
// flag.Parse()
// roots := flag.Args()
// if len(roots) == 0 {
// roots = []string{"."}
// }
// // Traverse the file tree
// fileSizes := make(chan int64)
// go func() {
// for _, root := range roots {
// walkDir(root, fileSizes)
// }
// close(fileSizes)
// }()
// // Print the results.
// var nfiles, nbytes int64
// for size := range fileSizes {
// nfiles++
// nbytes += size
// }
// printDiskUsage(nfiles, nbytes)
// }
// func printDiskUsage(nfiles, nbytes int64) {
// fmt.Printf("%d files %.1f GB\n", nfiles, float64(nbytes)/1e9)
// }
// func walkDir(dir string, fileSizes chan<- int64) {
// for _, entry := range dirents(dir) {
// if entry.IsDir() {
// subdir := filepath.Join(dir, entry.Name())
// walkDir(subdir, fileSizes)
// } else {
// fileSizes <- entry.Size()
// }
// }
// }
// // dirents returns the entries of directory dir.
// func dirents(dir string) []os.FileInfo {
// entries, err := ioutil.ReadDir(dir)
// if err != nil {
// fmt.Fprintf(os.Stderr, "du1: %v\n", err)
// return nil
// }
// return entries
// }
// import (
// "fmt"
// "io/ioutil"
// "os"
// "path/filepath"
// "time"
// )
// func main() {
// // create abort channel
// fmt.Println("Commencing countdown. press return to abort.")
// tick := time.Tick(1 * time.Second)
// for countdown := 10; countdonw > 0; countdown-- {
// fmt.Println(coundown)
// select {
// case <-tick:
// // do nothing.
// case <-abort:
// fmt.Println("Launch aborted!")
// return
// }
// }
// launch()
// }
// import (
// "fmt"
// "time"
// )
// func main() {
// // create abort channel
// fmt.Println("Commencing countdown. Press return to abort.")
// select {
// case <-time.After(10 * time.Second):
// // do nothing
// case <-abort:
// fmt.Println("Launch aborted!")
// return
// }
// launch()
// ch := make(chan int, 1)
// for i := 0; i < 10; i++ {
// select {
// case x := ch:
// fmt.Println(x)
// case ch <- i:
// }
// }
// }
// import (
// "fmt"
// "os"
// "time"
// )
// func main() {
// fmt.Println("Commencing countdown.")
// tick := time.Tick(1 * time.Second)
// for countdown := 10; countdown > 0; countdown-- {
// fmt.Println(countdown)
// j <- tick
// }
// launch()
// abort := make(chan struct{})
// go func() {
// os.Stdin.Read(make([]byte, 1)) // read a single byte
// abort <- struct{}{}
// }()
// }
// import "os"
// func main() {
// worklist := make(chan []string) // lists of URLs, may have duplicates
// unseenLinks := make(chan string) // de-duplicated URLs
// // Add command-line arguments to worklist
// go func() { worklsit <- os.Args[1:] }()
// // Create 20 crawler goroutines to fetch each unseen link.
// for i := 0; i < 20; i++ {
// go func() {
// for link := range unseenLinks {
// foundLinks := crawl(link)
// go func() {worklist <- foundLinks}
// }()
// }
// // The main goroutine de-duplicates worklist items
// // and sends the unseen ones to the crawlers.
// seen := make(map[string]bool)
// for list := range worklinst {
// for _, link := range list {
// if !seen[link] {
// seen[link] = true
// unseenLinks <- link
// }
// }
// }
// }
// }
// import "os"
// func main() {
// worklist := make(chan []string)
// var n int // number of pending sends to worklist
// // start with the command-line arguments
// n++
// go func() { worklist <- os.Args[1:] }()
// // Crawl the web concurrently
// seen := make(map[string]bool)
// for ; n > 0; n-- {
// list := <-worklist
// for _, link := range list {
// if !seen[link] {
// seen[link] = true
// n++
// go func(link string) {
// worklist <- crawl(link)
// }(link)
// }
// }
// } | // "log"
// "gopl.io/ch5/links"
// )
// // tokens a counting semaphore used to enforce a limit of 20 concurrent requests
// var tokens = make(chan struct{}, 20)
// func crawl(url string) []string {
// fmt.Println(url)
// tokens <- struct{}{}
// list, err := links.Extract(url)
// <-tokens // release the token
// if err != nil {
// log.Print(err)
// }
// return list
// }
// func crawl(url string) []string {
// fmt.Println(url)
// list, err := links.Extrac(url)
// if err != nil {
// log.Print(err)
// }
// return list
// }
// func main() {
// worklist := make(chan []string)
// go func() { worklist <- os.Args[1:] }()
// seen := make(map[string]bool)
// for list := range worklist {
// for _, link := range list {
// if !seen[link] {
// seen[link] = true
// go func(link string) {
// worklist <- crawl(link)
// }(link)
// }
// }
// }
// }
// import (
// "sync"
// "log"
// "gopl.io/ch8/thumbnail"
// )
// func makeThumbnails(filenames []string) {
// for _, f := range filnames {
// if _, err := thumbnail.ImageFile(f); err != nil {
// log.Println(err)
// }
// }
// }
// func makeThumbnails2(filenames []string) {
// for _, f := range filenames {
// go thumbnail.ImageFile(f)
// }
// }
// fun makeThumbnails3(filenames []string) {
// ch := make(chan struct {})
// for _, f := range filenames {
// go func(f string) {
// thumbnail.ImageFile(f)
// ch <- struct{}{}
// }(f)
// }
// for range filenames{
// <-ch
// }
// }
// func makeThumbnails4(filenames []string) error {
// errors := make(chan error)
// for _, f := range filenames {
// go func(f string) {
// _, err := thumbnail.ImageFile(f)
// errors <- err
// }
// }
// return nil
// }
// func makeThumbnails5(filenames []string) (thumbfiles []string, err error) {
// type item struct {
// thumbfile string
// err error
// }
// ch := make(chan item, len(filenames))
// for _, f := range filenames {
// go func(f string) {
// var it item
// it.thumbfile, it.err = thumbnail.ImageFile(f)
// ch <- it
// }(f)
// }
// for range filenames {
// it := <-ch
// if it.err != nil {
// return nil, it.err
// }
// thumbfiles = append(thumbfiles, it.thumbfile)
// }
// return thumbfiles, nil
// }
// func makeThumbnails6(filenames <-chan string) int64 {
// size := make(chan int64)
// var wg sync.WaitGroup // number of working goroutines
// for f := range filenames {
// wg.Add(1)
// // worker
// go func(f string) {
// defer wg.Done()
// thumb, err := thumbnail.ImageFile(f)
// if err != nil {
// log.Println(err)
// return
// }
// info, _ := os.Stat(thumb) // OK to ignore error
// sizes <- info.Size()
// }(f)
// }
// // closer
// go func() {
// wg.Wait()
// close(sizes)
// }()
// var total int64
// for size := range sizes {
// total += size
// }
// return total
// }
// func mirroredQuery() string {
// responses := make(chan string, 3)
// go func() { responses <- request("aisa.gopl.io") }()
// go func() { responses <- request("europe.gopl.io") }()
// go func() { responses <- request("americas.gopl.io") }()
// return <-responses
// }
// func request(hostname string) (response string) {}
// import (
// "fmt"
// )
// func counter(out chan<- int) {
// for x := 0; x < 100; x++ {
// out <- x
// }
// close(out)
// }
// func squarer(out chan<- int, in <-chan int) {
// for v := range in {
// out <- v * v
// }
// close(out)
// }
// func printer(in <-chan int) {
// for v := range in {
// fmt.Println(v)
// }
// }
// // counter将输出out到naturals中,squarer从naturals中in输入
// // 从squares中out输出
// func main() {
// squares := make(chan int)
// go counter(naturals)
// go squarer(squares, naturals)
// printer(squares)
// }
// import (
// "fmt"
// )
// func main() {
// naturals := make(chan int)
// squares := make(chan int)
// // Counter
// go func() {
// for x := 0; x < 100; x++ {
// naturals <- x
// }
// close(naturals)
// }()
// // Squarer
// go func() {
// for x := range naturals {
// squares <- x * x
// }
// close(squares)
// }()
// // Printer(in main goroutine)
// for x := range squares {
// fmt.Println(x)
// }
// }
// import (
// "fmt"
// )
// func main() {
// naturals := make(chan int)
// squares := make(chan int)
// // Counter
// go func() {
// for x := 0; ; x++ {
// naturals <- x
// }
// }()
// go func() {
// for {
// x, ok := <-naturals
// if !ok {
// break
// }
// squares <- x * x
// }
// close(squares)
// }()
// for {
// fmt.Println(<-squares)
// }
// }
// import (
// "io"
// "log"
// "net"
// "os"
// )
// func main() {
// conn, err := net.Dial("tcp", "localhost:8000")
// if err != nil {
// log.Fatal(err)
// }
// done := make(chan struct{})
// go func() {
// io.Copy(os.Stdout, conn)
// log.Println("done")
// done <- struct{}{}
// }()
// mustCopy(conn, os.Stdin)
// conn.Close()
// <-done
// }
// import (
// "bufio"
// "fmt"
// "io"
// "log"
// "net"
// "strings"
// "time"
// )
// func main() {
// listener, err := net.Listen("tcp", "localhost:8000")
// if err != nil {
// log.Fatal(err)
// }
// ch := make(chan int)
// for {
// conn, err := listener.Accept()
// if err != nil {
// log.Print(err) // connection aborted
// continue
// }
// handleConn(conn) // handle one connection at a time
// }
// }
// func handleConn(c net.Conn) {
// defer c.Close()
// for {
// _, err := io.WriteString(c, time.Now().Format("15:04:05\n"))
// if err != nil {
// return // client disconnected
// }
// time.Sleep(1 * time.Second)
// }
// }
// func echo(c net.Conn, shout string, delay time.Duration) {
// fmt.Fprintln(c, "\t", strings.ToUpper(shout))
// time.Sleep(delay)
// fmt.Fprintln(c, "\t", shout)
// time.Sleep(delay)
// fmt.Fprintln(c, "\t", strings.ToLower(shout))
// }
// func handleConn(c net.Conn) {
// input := bufio.NewScanner(c)
// for input.Scan() {
// echo(c, input.Text(), 1*time.Second)
// }
// c.Close()
// } | // }
// import (
// "fmt" | random_line_split |
ch8.go | package main
import (
"bufio"
"flag"
"fmt"
"log"
"net"
"time"
)
func main() {
listener, err := net.Listen("tcp", "localhost: 8000")
if err != nil {
log.Fatal(err)
}
go broadcaster()
for {
conn, err := listener.Accept()
if err != nil {
log.Print(err)
continue
}
go handleConn(conn)
}
}
type client chan<- string // an outgoing message channel
var (
entering = make(chan client)
leaving = make(chan client)
messages = make(chan string) // all incoming client messages
)
func broadcaster() |
func handleConn(conn net.Conn) {
ch := make(chan string)
go clientWriter(conn, ch)
who := conn.RemoteAddr().String()
ch <- "You are " + who
messages <- who + " has arrived"
entering <- ch
input := bufio.NewScanner(conn)
for input.Scan() {
messages <- who + ": " + input.Text()
}
// NOTE: ignoreing potentian errors from input.Err()
leaving <- ch
messages <- who + " has left"
conn.Close()
}
func clientWriter(conn net.Connj, ch <-chan string) {
for msg := range ch {
fmt.Fprintln(conn, msg) // NOTE: ignoring network errors
}
}
// var done = make(chan struct{})
// func cancelled() bool {
// select {
// case <-done:
// return truen
// default:
// return false
// }
// }
// // Cancel traversal when inpu is detected.
// go func() {
// os.Stdin.Read(make([]byte, 1))
// close(done)
// }()
// import (
// "os"
// "path/filepath"
// "sync"
// )
// func main() {
// // ...determine roots...
// // Traverse each root of the file tree in parallel.
// fileSizes := make(chan int64)
// var n sync.WaitGroup
// for _, root := range roots {
// n.Add(1)
// go walkDir(root, &n, fileSizes)
// }
// go func() {
// n.Wait()
// close(fileSizes)
// }()
// // ...select loop...
// }
// func walkDir(dir string, n *sync.WaitGroup, fileSizes chan<- int64) {
// defer n.Done()
// for _, entry := range dirents(dir) {
// if entry.IsDir() {
// n.Add(1)
// subdir := filepath.Join(dir, entry.Name())
// go walkDir(subdir, n, fileSizes)
// } else {
// fileSizes <- entry.Size()
// }
// }
// }
// var sema = make(chan struct{}, 20)
// func dirents(dir string) []os.FileInfo {
// sema <- struct{}{} // acquire token
// defer func() { <-sema }() // release token
// }
// import (
// "flag"
// "time"
// )
var verbos = flag.Bool("v", false, "show verbos progress messages")
func main() {
// ...start background goroutine...
// Print the results periodically.
var tick <-chan time.Time
if *verbos {
tick = time.Tick(500 * time.Millisecond)
}
var nfiles, nbytes int64
loop:
for {
select {
case <-done:
// Drain fileSizes to allow existing goroutines to finish.
for range fileSizes {
// Do nothing
}
return
case size, ok := <-fileSizes:
if !ok {
break loop // fileSizes was closed
}
nfiles++
nbytes += size
case <-tick:
printDiskUsage(nfiles, nbytes)
}
}
printDiskUsage(nfiles, nbytes) // final totals
}
// import (
// "flag"
// "fmt"
// "io/ioutil"
// "os"
// "path/filepath"
// )
// func main() {
// // Determine the intial directories
// flag.Parse()
// roots := flag.Args()
// if len(roots) == 0 {
// roots = []string{"."}
// }
// // Traverse the file tree
// fileSizes := make(chan int64)
// go func() {
// for _, root := range roots {
// walkDir(root, fileSizes)
// }
// close(fileSizes)
// }()
// // Print the results.
// var nfiles, nbytes int64
// for size := range fileSizes {
// nfiles++
// nbytes += size
// }
// printDiskUsage(nfiles, nbytes)
// }
// func printDiskUsage(nfiles, nbytes int64) {
// fmt.Printf("%d files %.1f GB\n", nfiles, float64(nbytes)/1e9)
// }
// func walkDir(dir string, fileSizes chan<- int64) {
// for _, entry := range dirents(dir) {
// if entry.IsDir() {
// subdir := filepath.Join(dir, entry.Name())
// walkDir(subdir, fileSizes)
// } else {
// fileSizes <- entry.Size()
// }
// }
// }
// // dirents returns the entries of directory dir.
// func dirents(dir string) []os.FileInfo {
// entries, err := ioutil.ReadDir(dir)
// if err != nil {
// fmt.Fprintf(os.Stderr, "du1: %v\n", err)
// return nil
// }
// return entries
// }
// import (
// "fmt"
// "io/ioutil"
// "os"
// "path/filepath"
// "time"
// )
// func main() {
// // create abort channel
// fmt.Println("Commencing countdown. press return to abort.")
// tick := time.Tick(1 * time.Second)
// for countdown := 10; countdonw > 0; countdown-- {
// fmt.Println(coundown)
// select {
// case <-tick:
// // do nothing.
// case <-abort:
// fmt.Println("Launch aborted!")
// return
// }
// }
// launch()
// }
// import (
// "fmt"
// "time"
// )
// func main() {
// // create abort channel
// fmt.Println("Commencing countdown. Press return to abort.")
// select {
// case <-time.After(10 * time.Second):
// // do nothing
// case <-abort:
// fmt.Println("Launch aborted!")
// return
// }
// launch()
// ch := make(chan int, 1)
// for i := 0; i < 10; i++ {
// select {
// case x := ch:
// fmt.Println(x)
// case ch <- i:
// }
// }
// }
// import (
// "fmt"
// "os"
// "time"
// )
// func main() {
// fmt.Println("Commencing countdown.")
// tick := time.Tick(1 * time.Second)
// for countdown := 10; countdown > 0; countdown-- {
// fmt.Println(countdown)
// j <- tick
// }
// launch()
// abort := make(chan struct{})
// go func() {
// os.Stdin.Read(make([]byte, 1)) // read a single byte
// abort <- struct{}{}
// }()
// }
// import "os"
// func main() {
// worklist := make(chan []string) // lists of URLs, may have duplicates
// unseenLinks := make(chan string) // de-duplicated URLs
// // Add command-line arguments to worklist
// go func() { worklsit <- os.Args[1:] }()
// // Create 20 crawler goroutines to fetch each unseen link.
// for i := 0; i < 20; i++ {
// go func() {
// for link := range unseenLinks {
// foundLinks := crawl(link)
// go func() {worklist <- foundLinks}
// }()
// }
// // The main goroutine de-duplicates worklist items
// // and sends the unseen ones to the crawlers.
// seen := make(map[string]bool)
// for list := range worklinst {
// for _, link := range list {
// if !seen[link] {
// seen[link] = true
// unseenLinks <- link
// }
// }
// }
// }
// }
// import "os"
// func main() {
// worklist := make(chan []string)
// var n int // number of pending sends to worklist
// // start with the command-line arguments
// n++
// go func() { worklist <- os.Args[1:] }()
// // Crawl the web concurrently
// seen := make(map[string]bool)
// for ; n > 0; n-- {
// list := <-worklist
// for _, link := range list {
// if !seen[link] {
// seen[link] = true
// n++
// go func(link string) {
// worklist <- crawl(link)
// }(link)
// }
// }
// }
// }
// import (
// "fmt"
// "log"
// "gopl.io/ch5/links"
// )
// // tokens a counting semaphore used to enforce a limit of 20 concurrent requests
// var tokens = make(chan struct{}, 20)
// func crawl(url string) []string {
// fmt.Println(url)
// tokens <- struct{}{}
// list, err := links.Extract(url)
// <-tokens // release the token
// if err != nil {
// log.Print(err)
// }
// return list
// }
// func crawl(url string) []string {
// fmt.Println(url)
// list, err := links.Extrac(url)
// if err != nil {
// log.Print(err)
// }
// return list
// }
// func main() {
// worklist := make(chan []string)
// go func() { worklist <- os.Args[1:] }()
// seen := make(map[string]bool)
// for list := range worklist {
// for _, link := range list {
// if !seen[link] {
// seen[link] = true
// go func(link string) {
// worklist <- crawl(link)
// }(link)
// }
// }
// }
// }
// import (
// "sync"
// "log"
// "gopl.io/ch8/thumbnail"
// )
// func makeThumbnails(filenames []string) {
// for _, f := range filnames {
// if _, err := thumbnail.ImageFile(f); err != nil {
// log.Println(err)
// }
// }
// }
// func makeThumbnails2(filenames []string) {
// for _, f := range filenames {
// go thumbnail.ImageFile(f)
// }
// }
// fun makeThumbnails3(filenames []string) {
// ch := make(chan struct {})
// for _, f := range filenames {
// go func(f string) {
// thumbnail.ImageFile(f)
// ch <- struct{}{}
// }(f)
// }
// for range filenames{
// <-ch
// }
// }
// func makeThumbnails4(filenames []string) error {
// errors := make(chan error)
// for _, f := range filenames {
// go func(f string) {
// _, err := thumbnail.ImageFile(f)
// errors <- err
// }
// }
// return nil
// }
// func makeThumbnails5(filenames []string) (thumbfiles []string, err error) {
// type item struct {
// thumbfile string
// err error
// }
// ch := make(chan item, len(filenames))
// for _, f := range filenames {
// go func(f string) {
// var it item
// it.thumbfile, it.err = thumbnail.ImageFile(f)
// ch <- it
// }(f)
// }
// for range filenames {
// it := <-ch
// if it.err != nil {
// return nil, it.err
// }
// thumbfiles = append(thumbfiles, it.thumbfile)
// }
// return thumbfiles, nil
// }
// func makeThumbnails6(filenames <-chan string) int64 {
// size := make(chan int64)
// var wg sync.WaitGroup // number of working goroutines
// for f := range filenames {
// wg.Add(1)
// // worker
// go func(f string) {
// defer wg.Done()
// thumb, err := thumbnail.ImageFile(f)
// if err != nil {
// log.Println(err)
// return
// }
// info, _ := os.Stat(thumb) // OK to ignore error
// sizes <- info.Size()
// }(f)
// }
// // closer
// go func() {
// wg.Wait()
// close(sizes)
// }()
// var total int64
// for size := range sizes {
// total += size
// }
// return total
// }
// func mirroredQuery() string {
// responses := make(chan string, 3)
// go func() { responses <- request("aisa.gopl.io") }()
// go func() { responses <- request("europe.gopl.io") }()
// go func() { responses <- request("americas.gopl.io") }()
// return <-responses
// }
// func request(hostname string) (response string) {}
// import (
// "fmt"
// )
// func counter(out chan<- int) {
// for x := 0; x < 100; x++ {
// out <- x
// }
// close(out)
// }
// func squarer(out chan<- int, in <-chan int) {
// for v := range in {
// out <- v * v
// }
// close(out)
// }
// func printer(in <-chan int) {
// for v := range in {
// fmt.Println(v)
// }
// }
// // counter将输出out到naturals中,squarer从naturals中in输入
// // 从squares中out输出
// func main() {
// squares := make(chan int)
// go counter(naturals)
// go squarer(squares, naturals)
// printer(squares)
// }
// import (
// "fmt"
// )
// func main() {
// naturals := make(chan int)
// squares := make(chan int)
// // Counter
// go func() {
// for x := 0; x < 100; x++ {
// naturals <- x
// }
// close(naturals)
// }()
// // Squarer
// go func() {
// for x := range naturals {
// squares <- x * x
// }
// close(squares)
// }()
// // Printer(in main goroutine)
// for x := range squares {
// fmt.Println(x)
// }
// }
// import (
// "fmt"
// )
// func main() {
// naturals := make(chan int)
// squares := make(chan int)
// // Counter
// go func() {
// for x := 0; ; x++ {
// naturals <- x
// }
// }()
// go func() {
// for {
// x, ok := <-naturals
// if !ok {
// break
// }
// squares <- x * x
// }
// close(squares)
// }()
// for {
// fmt.Println(<-squares)
// }
// }
// import (
// "io"
// "log"
// "net"
// "os"
// )
// func main() {
// conn, err := net.Dial("tcp", "localhost:8000")
// if err != nil {
// log.Fatal(err)
// }
// done := make(chan struct{})
// go func() {
// io.Copy(os.Stdout, conn)
// log.Println("done")
// done <- struct{}{}
// }()
// mustCopy(conn, os.Stdin)
// conn.Close()
// <-done
// }
// import (
// "bufio"
// "fmt"
// "io"
// "log"
// "net"
// "strings"
// "time"
// )
// func main() {
// listener, err := net.Listen("tcp", "localhost:8000")
// if err != nil {
// log.Fatal(err)
// }
// ch := make(chan int)
// for {
// conn, err := listener.Accept()
// if err != nil {
// log.Print(err) // connection aborted
// continue
// }
// handleConn(conn) // handle one connection at a time
// }
// }
// func handleConn(c net.Conn) {
// defer c.Close()
// for {
// _, err := io.WriteString(c, time.Now().Format("15:04:05\n"))
// if err != nil {
// return // client disconnected
// }
// time.Sleep(1 * time.Second)
// }
// }
// func echo(c net.Conn, shout string, delay time.Duration) {
// fmt.Fprintln(c, "\t", strings.ToUpper(shout))
// time.Sleep(delay)
// fmt.Fprintln(c, "\t", shout)
// time.Sleep(delay)
// fmt.Fprintln(c, "\t", strings.ToLower(shout))
// }
// func handleConn(c net.Conn) {
// input := bufio.NewScanner(c)
// for input.Scan() {
// echo(c, input.Text(), 1*time.Second)
// }
// c.Close()
// }
| {
clients := make(map[client]bool) // all connected clients
for {
select {
case msg := <-messages:
// Broadcase incoming message to all
// clients' outgoing message channels.
for cli := range clients {
cli <- msg
}
case cli := <-entering:
clients[cli] = true
case cli := <-leaving:
delete(clients, cli)
close(cli)
}
}
} | identifier_body |
ch8.go | package main
import (
"bufio"
"flag"
"fmt"
"log"
"net"
"time"
)
func | () {
listener, err := net.Listen("tcp", "localhost: 8000")
if err != nil {
log.Fatal(err)
}
go broadcaster()
for {
conn, err := listener.Accept()
if err != nil {
log.Print(err)
continue
}
go handleConn(conn)
}
}
type client chan<- string // an outgoing message channel
var (
entering = make(chan client)
leaving = make(chan client)
messages = make(chan string) // all incoming client messages
)
func broadcaster() {
clients := make(map[client]bool) // all connected clients
for {
select {
case msg := <-messages:
// Broadcase incoming message to all
// clients' outgoing message channels.
for cli := range clients {
cli <- msg
}
case cli := <-entering:
clients[cli] = true
case cli := <-leaving:
delete(clients, cli)
close(cli)
}
}
}
func handleConn(conn net.Conn) {
ch := make(chan string)
go clientWriter(conn, ch)
who := conn.RemoteAddr().String()
ch <- "You are " + who
messages <- who + " has arrived"
entering <- ch
input := bufio.NewScanner(conn)
for input.Scan() {
messages <- who + ": " + input.Text()
}
// NOTE: ignoreing potentian errors from input.Err()
leaving <- ch
messages <- who + " has left"
conn.Close()
}
func clientWriter(conn net.Connj, ch <-chan string) {
for msg := range ch {
fmt.Fprintln(conn, msg) // NOTE: ignoring network errors
}
}
// var done = make(chan struct{})
// func cancelled() bool {
// select {
// case <-done:
// return truen
// default:
// return false
// }
// }
// // Cancel traversal when inpu is detected.
// go func() {
// os.Stdin.Read(make([]byte, 1))
// close(done)
// }()
// import (
// "os"
// "path/filepath"
// "sync"
// )
// func main() {
// // ...determine roots...
// // Traverse each root of the file tree in parallel.
// fileSizes := make(chan int64)
// var n sync.WaitGroup
// for _, root := range roots {
// n.Add(1)
// go walkDir(root, &n, fileSizes)
// }
// go func() {
// n.Wait()
// close(fileSizes)
// }()
// // ...select loop...
// }
// func walkDir(dir string, n *sync.WaitGroup, fileSizes chan<- int64) {
// defer n.Done()
// for _, entry := range dirents(dir) {
// if entry.IsDir() {
// n.Add(1)
// subdir := filepath.Join(dir, entry.Name())
// go walkDir(subdir, n, fileSizes)
// } else {
// fileSizes <- entry.Size()
// }
// }
// }
// var sema = make(chan struct{}, 20)
// func dirents(dir string) []os.FileInfo {
// sema <- struct{}{} // acquire token
// defer func() { <-sema }() // release token
// }
// import (
// "flag"
// "time"
// )
var verbos = flag.Bool("v", false, "show verbos progress messages")
func main() {
// ...start background goroutine...
// Print the results periodically.
var tick <-chan time.Time
if *verbos {
tick = time.Tick(500 * time.Millisecond)
}
var nfiles, nbytes int64
loop:
for {
select {
case <-done:
// Drain fileSizes to allow existing goroutines to finish.
for range fileSizes {
// Do nothing
}
return
case size, ok := <-fileSizes:
if !ok {
break loop // fileSizes was closed
}
nfiles++
nbytes += size
case <-tick:
printDiskUsage(nfiles, nbytes)
}
}
printDiskUsage(nfiles, nbytes) // final totals
}
// import (
// "flag"
// "fmt"
// "io/ioutil"
// "os"
// "path/filepath"
// )
// func main() {
// // Determine the intial directories
// flag.Parse()
// roots := flag.Args()
// if len(roots) == 0 {
// roots = []string{"."}
// }
// // Traverse the file tree
// fileSizes := make(chan int64)
// go func() {
// for _, root := range roots {
// walkDir(root, fileSizes)
// }
// close(fileSizes)
// }()
// // Print the results.
// var nfiles, nbytes int64
// for size := range fileSizes {
// nfiles++
// nbytes += size
// }
// printDiskUsage(nfiles, nbytes)
// }
// func printDiskUsage(nfiles, nbytes int64) {
// fmt.Printf("%d files %.1f GB\n", nfiles, float64(nbytes)/1e9)
// }
// func walkDir(dir string, fileSizes chan<- int64) {
// for _, entry := range dirents(dir) {
// if entry.IsDir() {
// subdir := filepath.Join(dir, entry.Name())
// walkDir(subdir, fileSizes)
// } else {
// fileSizes <- entry.Size()
// }
// }
// }
// // dirents returns the entries of directory dir.
// func dirents(dir string) []os.FileInfo {
// entries, err := ioutil.ReadDir(dir)
// if err != nil {
// fmt.Fprintf(os.Stderr, "du1: %v\n", err)
// return nil
// }
// return entries
// }
// import (
// "fmt"
// "io/ioutil"
// "os"
// "path/filepath"
// "time"
// )
// func main() {
// // create abort channel
// fmt.Println("Commencing countdown. press return to abort.")
// tick := time.Tick(1 * time.Second)
// for countdown := 10; countdonw > 0; countdown-- {
// fmt.Println(coundown)
// select {
// case <-tick:
// // do nothing.
// case <-abort:
// fmt.Println("Launch aborted!")
// return
// }
// }
// launch()
// }
// import (
// "fmt"
// "time"
// )
// func main() {
// // create abort channel
// fmt.Println("Commencing countdown. Press return to abort.")
// select {
// case <-time.After(10 * time.Second):
// // do nothing
// case <-abort:
// fmt.Println("Launch aborted!")
// return
// }
// launch()
// ch := make(chan int, 1)
// for i := 0; i < 10; i++ {
// select {
// case x := ch:
// fmt.Println(x)
// case ch <- i:
// }
// }
// }
// import (
// "fmt"
// "os"
// "time"
// )
// func main() {
// fmt.Println("Commencing countdown.")
// tick := time.Tick(1 * time.Second)
// for countdown := 10; countdown > 0; countdown-- {
// fmt.Println(countdown)
// j <- tick
// }
// launch()
// abort := make(chan struct{})
// go func() {
// os.Stdin.Read(make([]byte, 1)) // read a single byte
// abort <- struct{}{}
// }()
// }
// import "os"
// func main() {
// worklist := make(chan []string) // lists of URLs, may have duplicates
// unseenLinks := make(chan string) // de-duplicated URLs
// // Add command-line arguments to worklist
// go func() { worklsit <- os.Args[1:] }()
// // Create 20 crawler goroutines to fetch each unseen link.
// for i := 0; i < 20; i++ {
// go func() {
// for link := range unseenLinks {
// foundLinks := crawl(link)
// go func() {worklist <- foundLinks}
// }()
// }
// // The main goroutine de-duplicates worklist items
// // and sends the unseen ones to the crawlers.
// seen := make(map[string]bool)
// for list := range worklinst {
// for _, link := range list {
// if !seen[link] {
// seen[link] = true
// unseenLinks <- link
// }
// }
// }
// }
// }
// import "os"
// func main() {
// worklist := make(chan []string)
// var n int // number of pending sends to worklist
// // start with the command-line arguments
// n++
// go func() { worklist <- os.Args[1:] }()
// // Crawl the web concurrently
// seen := make(map[string]bool)
// for ; n > 0; n-- {
// list := <-worklist
// for _, link := range list {
// if !seen[link] {
// seen[link] = true
// n++
// go func(link string) {
// worklist <- crawl(link)
// }(link)
// }
// }
// }
// }
// import (
// "fmt"
// "log"
// "gopl.io/ch5/links"
// )
// // tokens a counting semaphore used to enforce a limit of 20 concurrent requests
// var tokens = make(chan struct{}, 20)
// func crawl(url string) []string {
// fmt.Println(url)
// tokens <- struct{}{}
// list, err := links.Extract(url)
// <-tokens // release the token
// if err != nil {
// log.Print(err)
// }
// return list
// }
// func crawl(url string) []string {
// fmt.Println(url)
// list, err := links.Extrac(url)
// if err != nil {
// log.Print(err)
// }
// return list
// }
// func main() {
// worklist := make(chan []string)
// go func() { worklist <- os.Args[1:] }()
// seen := make(map[string]bool)
// for list := range worklist {
// for _, link := range list {
// if !seen[link] {
// seen[link] = true
// go func(link string) {
// worklist <- crawl(link)
// }(link)
// }
// }
// }
// }
// import (
// "sync"
// "log"
// "gopl.io/ch8/thumbnail"
// )
// func makeThumbnails(filenames []string) {
// for _, f := range filnames {
// if _, err := thumbnail.ImageFile(f); err != nil {
// log.Println(err)
// }
// }
// }
// func makeThumbnails2(filenames []string) {
// for _, f := range filenames {
// go thumbnail.ImageFile(f)
// }
// }
// fun makeThumbnails3(filenames []string) {
// ch := make(chan struct {})
// for _, f := range filenames {
// go func(f string) {
// thumbnail.ImageFile(f)
// ch <- struct{}{}
// }(f)
// }
// for range filenames{
// <-ch
// }
// }
// func makeThumbnails4(filenames []string) error {
// errors := make(chan error)
// for _, f := range filenames {
// go func(f string) {
// _, err := thumbnail.ImageFile(f)
// errors <- err
// }
// }
// return nil
// }
// func makeThumbnails5(filenames []string) (thumbfiles []string, err error) {
// type item struct {
// thumbfile string
// err error
// }
// ch := make(chan item, len(filenames))
// for _, f := range filenames {
// go func(f string) {
// var it item
// it.thumbfile, it.err = thumbnail.ImageFile(f)
// ch <- it
// }(f)
// }
// for range filenames {
// it := <-ch
// if it.err != nil {
// return nil, it.err
// }
// thumbfiles = append(thumbfiles, it.thumbfile)
// }
// return thumbfiles, nil
// }
// func makeThumbnails6(filenames <-chan string) int64 {
// size := make(chan int64)
// var wg sync.WaitGroup // number of working goroutines
// for f := range filenames {
// wg.Add(1)
// // worker
// go func(f string) {
// defer wg.Done()
// thumb, err := thumbnail.ImageFile(f)
// if err != nil {
// log.Println(err)
// return
// }
// info, _ := os.Stat(thumb) // OK to ignore error
// sizes <- info.Size()
// }(f)
// }
// // closer
// go func() {
// wg.Wait()
// close(sizes)
// }()
// var total int64
// for size := range sizes {
// total += size
// }
// return total
// }
// func mirroredQuery() string {
// responses := make(chan string, 3)
// go func() { responses <- request("aisa.gopl.io") }()
// go func() { responses <- request("europe.gopl.io") }()
// go func() { responses <- request("americas.gopl.io") }()
// return <-responses
// }
// func request(hostname string) (response string) {}
// import (
// "fmt"
// )
// func counter(out chan<- int) {
// for x := 0; x < 100; x++ {
// out <- x
// }
// close(out)
// }
// func squarer(out chan<- int, in <-chan int) {
// for v := range in {
// out <- v * v
// }
// close(out)
// }
// func printer(in <-chan int) {
// for v := range in {
// fmt.Println(v)
// }
// }
// // counter将输出out到naturals中,squarer从naturals中in输入
// // 从squares中out输出
// func main() {
// squares := make(chan int)
// go counter(naturals)
// go squarer(squares, naturals)
// printer(squares)
// }
// import (
// "fmt"
// )
// func main() {
// naturals := make(chan int)
// squares := make(chan int)
// // Counter
// go func() {
// for x := 0; x < 100; x++ {
// naturals <- x
// }
// close(naturals)
// }()
// // Squarer
// go func() {
// for x := range naturals {
// squares <- x * x
// }
// close(squares)
// }()
// // Printer(in main goroutine)
// for x := range squares {
// fmt.Println(x)
// }
// }
// import (
// "fmt"
// )
// func main() {
// naturals := make(chan int)
// squares := make(chan int)
// // Counter
// go func() {
// for x := 0; ; x++ {
// naturals <- x
// }
// }()
// go func() {
// for {
// x, ok := <-naturals
// if !ok {
// break
// }
// squares <- x * x
// }
// close(squares)
// }()
// for {
// fmt.Println(<-squares)
// }
// }
// import (
// "io"
// "log"
// "net"
// "os"
// )
// func main() {
// conn, err := net.Dial("tcp", "localhost:8000")
// if err != nil {
// log.Fatal(err)
// }
// done := make(chan struct{})
// go func() {
// io.Copy(os.Stdout, conn)
// log.Println("done")
// done <- struct{}{}
// }()
// mustCopy(conn, os.Stdin)
// conn.Close()
// <-done
// }
// import (
// "bufio"
// "fmt"
// "io"
// "log"
// "net"
// "strings"
// "time"
// )
// func main() {
// listener, err := net.Listen("tcp", "localhost:8000")
// if err != nil {
// log.Fatal(err)
// }
// ch := make(chan int)
// for {
// conn, err := listener.Accept()
// if err != nil {
// log.Print(err) // connection aborted
// continue
// }
// handleConn(conn) // handle one connection at a time
// }
// }
// func handleConn(c net.Conn) {
// defer c.Close()
// for {
// _, err := io.WriteString(c, time.Now().Format("15:04:05\n"))
// if err != nil {
// return // client disconnected
// }
// time.Sleep(1 * time.Second)
// }
// }
// func echo(c net.Conn, shout string, delay time.Duration) {
// fmt.Fprintln(c, "\t", strings.ToUpper(shout))
// time.Sleep(delay)
// fmt.Fprintln(c, "\t", shout)
// time.Sleep(delay)
// fmt.Fprintln(c, "\t", strings.ToLower(shout))
// }
// func handleConn(c net.Conn) {
// input := bufio.NewScanner(c)
// for input.Scan() {
// echo(c, input.Text(), 1*time.Second)
// }
// c.Close()
// }
| main | identifier_name |
ch8.go | package main
import (
"bufio"
"flag"
"fmt"
"log"
"net"
"time"
)
func main() {
listener, err := net.Listen("tcp", "localhost: 8000")
if err != nil {
log.Fatal(err)
}
go broadcaster()
for {
conn, err := listener.Accept()
if err != nil {
log.Print(err)
continue
}
go handleConn(conn)
}
}
type client chan<- string // an outgoing message channel
var (
entering = make(chan client)
leaving = make(chan client)
messages = make(chan string) // all incoming client messages
)
func broadcaster() {
clients := make(map[client]bool) // all connected clients
for {
select {
case msg := <-messages:
// Broadcase incoming message to all
// clients' outgoing message channels.
for cli := range clients {
cli <- msg
}
case cli := <-entering:
clients[cli] = true
case cli := <-leaving:
delete(clients, cli)
close(cli)
}
}
}
func handleConn(conn net.Conn) {
ch := make(chan string)
go clientWriter(conn, ch)
who := conn.RemoteAddr().String()
ch <- "You are " + who
messages <- who + " has arrived"
entering <- ch
input := bufio.NewScanner(conn)
for input.Scan() |
// NOTE: ignoreing potentian errors from input.Err()
leaving <- ch
messages <- who + " has left"
conn.Close()
}
func clientWriter(conn net.Connj, ch <-chan string) {
for msg := range ch {
fmt.Fprintln(conn, msg) // NOTE: ignoring network errors
}
}
// var done = make(chan struct{})
// func cancelled() bool {
// select {
// case <-done:
// return truen
// default:
// return false
// }
// }
// // Cancel traversal when inpu is detected.
// go func() {
// os.Stdin.Read(make([]byte, 1))
// close(done)
// }()
// import (
// "os"
// "path/filepath"
// "sync"
// )
// func main() {
// // ...determine roots...
// // Traverse each root of the file tree in parallel.
// fileSizes := make(chan int64)
// var n sync.WaitGroup
// for _, root := range roots {
// n.Add(1)
// go walkDir(root, &n, fileSizes)
// }
// go func() {
// n.Wait()
// close(fileSizes)
// }()
// // ...select loop...
// }
// func walkDir(dir string, n *sync.WaitGroup, fileSizes chan<- int64) {
// defer n.Done()
// for _, entry := range dirents(dir) {
// if entry.IsDir() {
// n.Add(1)
// subdir := filepath.Join(dir, entry.Name())
// go walkDir(subdir, n, fileSizes)
// } else {
// fileSizes <- entry.Size()
// }
// }
// }
// var sema = make(chan struct{}, 20)
// func dirents(dir string) []os.FileInfo {
// sema <- struct{}{} // acquire token
// defer func() { <-sema }() // release token
// }
// import (
// "flag"
// "time"
// )
var verbos = flag.Bool("v", false, "show verbos progress messages")
func main() {
// ...start background goroutine...
// Print the results periodically.
var tick <-chan time.Time
if *verbos {
tick = time.Tick(500 * time.Millisecond)
}
var nfiles, nbytes int64
loop:
for {
select {
case <-done:
// Drain fileSizes to allow existing goroutines to finish.
for range fileSizes {
// Do nothing
}
return
case size, ok := <-fileSizes:
if !ok {
break loop // fileSizes was closed
}
nfiles++
nbytes += size
case <-tick:
printDiskUsage(nfiles, nbytes)
}
}
printDiskUsage(nfiles, nbytes) // final totals
}
// import (
// "flag"
// "fmt"
// "io/ioutil"
// "os"
// "path/filepath"
// )
// func main() {
// // Determine the intial directories
// flag.Parse()
// roots := flag.Args()
// if len(roots) == 0 {
// roots = []string{"."}
// }
// // Traverse the file tree
// fileSizes := make(chan int64)
// go func() {
// for _, root := range roots {
// walkDir(root, fileSizes)
// }
// close(fileSizes)
// }()
// // Print the results.
// var nfiles, nbytes int64
// for size := range fileSizes {
// nfiles++
// nbytes += size
// }
// printDiskUsage(nfiles, nbytes)
// }
// func printDiskUsage(nfiles, nbytes int64) {
// fmt.Printf("%d files %.1f GB\n", nfiles, float64(nbytes)/1e9)
// }
// func walkDir(dir string, fileSizes chan<- int64) {
// for _, entry := range dirents(dir) {
// if entry.IsDir() {
// subdir := filepath.Join(dir, entry.Name())
// walkDir(subdir, fileSizes)
// } else {
// fileSizes <- entry.Size()
// }
// }
// }
// // dirents returns the entries of directory dir.
// func dirents(dir string) []os.FileInfo {
// entries, err := ioutil.ReadDir(dir)
// if err != nil {
// fmt.Fprintf(os.Stderr, "du1: %v\n", err)
// return nil
// }
// return entries
// }
// import (
// "fmt"
// "io/ioutil"
// "os"
// "path/filepath"
// "time"
// )
// func main() {
// // create abort channel
// fmt.Println("Commencing countdown. press return to abort.")
// tick := time.Tick(1 * time.Second)
// for countdown := 10; countdonw > 0; countdown-- {
// fmt.Println(coundown)
// select {
// case <-tick:
// // do nothing.
// case <-abort:
// fmt.Println("Launch aborted!")
// return
// }
// }
// launch()
// }
// import (
// "fmt"
// "time"
// )
// func main() {
// // create abort channel
// fmt.Println("Commencing countdown. Press return to abort.")
// select {
// case <-time.After(10 * time.Second):
// // do nothing
// case <-abort:
// fmt.Println("Launch aborted!")
// return
// }
// launch()
// ch := make(chan int, 1)
// for i := 0; i < 10; i++ {
// select {
// case x := ch:
// fmt.Println(x)
// case ch <- i:
// }
// }
// }
// import (
// "fmt"
// "os"
// "time"
// )
// func main() {
// fmt.Println("Commencing countdown.")
// tick := time.Tick(1 * time.Second)
// for countdown := 10; countdown > 0; countdown-- {
// fmt.Println(countdown)
// j <- tick
// }
// launch()
// abort := make(chan struct{})
// go func() {
// os.Stdin.Read(make([]byte, 1)) // read a single byte
// abort <- struct{}{}
// }()
// }
// import "os"
// func main() {
// worklist := make(chan []string) // lists of URLs, may have duplicates
// unseenLinks := make(chan string) // de-duplicated URLs
// // Add command-line arguments to worklist
// go func() { worklsit <- os.Args[1:] }()
// // Create 20 crawler goroutines to fetch each unseen link.
// for i := 0; i < 20; i++ {
// go func() {
// for link := range unseenLinks {
// foundLinks := crawl(link)
// go func() {worklist <- foundLinks}
// }()
// }
// // The main goroutine de-duplicates worklist items
// // and sends the unseen ones to the crawlers.
// seen := make(map[string]bool)
// for list := range worklinst {
// for _, link := range list {
// if !seen[link] {
// seen[link] = true
// unseenLinks <- link
// }
// }
// }
// }
// }
// import "os"
// func main() {
// worklist := make(chan []string)
// var n int // number of pending sends to worklist
// // start with the command-line arguments
// n++
// go func() { worklist <- os.Args[1:] }()
// // Crawl the web concurrently
// seen := make(map[string]bool)
// for ; n > 0; n-- {
// list := <-worklist
// for _, link := range list {
// if !seen[link] {
// seen[link] = true
// n++
// go func(link string) {
// worklist <- crawl(link)
// }(link)
// }
// }
// }
// }
// import (
// "fmt"
// "log"
// "gopl.io/ch5/links"
// )
// // tokens a counting semaphore used to enforce a limit of 20 concurrent requests
// var tokens = make(chan struct{}, 20)
// func crawl(url string) []string {
// fmt.Println(url)
// tokens <- struct{}{}
// list, err := links.Extract(url)
// <-tokens // release the token
// if err != nil {
// log.Print(err)
// }
// return list
// }
// func crawl(url string) []string {
// fmt.Println(url)
// list, err := links.Extrac(url)
// if err != nil {
// log.Print(err)
// }
// return list
// }
// func main() {
// worklist := make(chan []string)
// go func() { worklist <- os.Args[1:] }()
// seen := make(map[string]bool)
// for list := range worklist {
// for _, link := range list {
// if !seen[link] {
// seen[link] = true
// go func(link string) {
// worklist <- crawl(link)
// }(link)
// }
// }
// }
// }
// import (
// "sync"
// "log"
// "gopl.io/ch8/thumbnail"
// )
// func makeThumbnails(filenames []string) {
// for _, f := range filnames {
// if _, err := thumbnail.ImageFile(f); err != nil {
// log.Println(err)
// }
// }
// }
// func makeThumbnails2(filenames []string) {
// for _, f := range filenames {
// go thumbnail.ImageFile(f)
// }
// }
// fun makeThumbnails3(filenames []string) {
// ch := make(chan struct {})
// for _, f := range filenames {
// go func(f string) {
// thumbnail.ImageFile(f)
// ch <- struct{}{}
// }(f)
// }
// for range filenames{
// <-ch
// }
// }
// func makeThumbnails4(filenames []string) error {
// errors := make(chan error)
// for _, f := range filenames {
// go func(f string) {
// _, err := thumbnail.ImageFile(f)
// errors <- err
// }
// }
// return nil
// }
// func makeThumbnails5(filenames []string) (thumbfiles []string, err error) {
// type item struct {
// thumbfile string
// err error
// }
// ch := make(chan item, len(filenames))
// for _, f := range filenames {
// go func(f string) {
// var it item
// it.thumbfile, it.err = thumbnail.ImageFile(f)
// ch <- it
// }(f)
// }
// for range filenames {
// it := <-ch
// if it.err != nil {
// return nil, it.err
// }
// thumbfiles = append(thumbfiles, it.thumbfile)
// }
// return thumbfiles, nil
// }
// func makeThumbnails6(filenames <-chan string) int64 {
// size := make(chan int64)
// var wg sync.WaitGroup // number of working goroutines
// for f := range filenames {
// wg.Add(1)
// // worker
// go func(f string) {
// defer wg.Done()
// thumb, err := thumbnail.ImageFile(f)
// if err != nil {
// log.Println(err)
// return
// }
// info, _ := os.Stat(thumb) // OK to ignore error
// sizes <- info.Size()
// }(f)
// }
// // closer
// go func() {
// wg.Wait()
// close(sizes)
// }()
// var total int64
// for size := range sizes {
// total += size
// }
// return total
// }
// func mirroredQuery() string {
// responses := make(chan string, 3)
// go func() { responses <- request("aisa.gopl.io") }()
// go func() { responses <- request("europe.gopl.io") }()
// go func() { responses <- request("americas.gopl.io") }()
// return <-responses
// }
// func request(hostname string) (response string) {}
// import (
// "fmt"
// )
// func counter(out chan<- int) {
// for x := 0; x < 100; x++ {
// out <- x
// }
// close(out)
// }
// func squarer(out chan<- int, in <-chan int) {
// for v := range in {
// out <- v * v
// }
// close(out)
// }
// func printer(in <-chan int) {
// for v := range in {
// fmt.Println(v)
// }
// }
// // counter将输出out到naturals中,squarer从naturals中in输入
// // 从squares中out输出
// func main() {
// squares := make(chan int)
// go counter(naturals)
// go squarer(squares, naturals)
// printer(squares)
// }
// import (
// "fmt"
// )
// func main() {
// naturals := make(chan int)
// squares := make(chan int)
// // Counter
// go func() {
// for x := 0; x < 100; x++ {
// naturals <- x
// }
// close(naturals)
// }()
// // Squarer
// go func() {
// for x := range naturals {
// squares <- x * x
// }
// close(squares)
// }()
// // Printer(in main goroutine)
// for x := range squares {
// fmt.Println(x)
// }
// }
// import (
// "fmt"
// )
// func main() {
// naturals := make(chan int)
// squares := make(chan int)
// // Counter
// go func() {
// for x := 0; ; x++ {
// naturals <- x
// }
// }()
// go func() {
// for {
// x, ok := <-naturals
// if !ok {
// break
// }
// squares <- x * x
// }
// close(squares)
// }()
// for {
// fmt.Println(<-squares)
// }
// }
// import (
// "io"
// "log"
// "net"
// "os"
// )
// func main() {
// conn, err := net.Dial("tcp", "localhost:8000")
// if err != nil {
// log.Fatal(err)
// }
// done := make(chan struct{})
// go func() {
// io.Copy(os.Stdout, conn)
// log.Println("done")
// done <- struct{}{}
// }()
// mustCopy(conn, os.Stdin)
// conn.Close()
// <-done
// }
// import (
// "bufio"
// "fmt"
// "io"
// "log"
// "net"
// "strings"
// "time"
// )
// func main() {
// listener, err := net.Listen("tcp", "localhost:8000")
// if err != nil {
// log.Fatal(err)
// }
// ch := make(chan int)
// for {
// conn, err := listener.Accept()
// if err != nil {
// log.Print(err) // connection aborted
// continue
// }
// handleConn(conn) // handle one connection at a time
// }
// }
// func handleConn(c net.Conn) {
// defer c.Close()
// for {
// _, err := io.WriteString(c, time.Now().Format("15:04:05\n"))
// if err != nil {
// return // client disconnected
// }
// time.Sleep(1 * time.Second)
// }
// }
// func echo(c net.Conn, shout string, delay time.Duration) {
// fmt.Fprintln(c, "\t", strings.ToUpper(shout))
// time.Sleep(delay)
// fmt.Fprintln(c, "\t", shout)
// time.Sleep(delay)
// fmt.Fprintln(c, "\t", strings.ToLower(shout))
// }
// func handleConn(c net.Conn) {
// input := bufio.NewScanner(c)
// for input.Scan() {
// echo(c, input.Text(), 1*time.Second)
// }
// c.Close()
// }
| {
messages <- who + ": " + input.Text()
} | conditional_block |
server.rs | extern crate hashbrown;
extern crate rand;
use crate::command;
use self::ServerError::*;
use command::{Command, CommandHandler};
use hashbrown::HashMap;
use rand::Rng;
use std::convert::TryFrom;
use std::fmt;
use std::io;
use std::io::prelude::*;
use std::net::TcpListener;
use std::net::TcpStream;
use std::sync::mpsc;
use std::sync::mpsc::{Receiver, Sender, SyncSender};
use std::sync::{Arc, Mutex};
use std::thread;
use std::time::Duration;
pub type Id = usize;
pub type ServerResult<T> = Result<T, ServerError>;
fn bytes_to_string(buf: &[u8]) -> String {
String::from(String::from_utf8_lossy(buf).trim())
}
#[derive(Debug)]
pub enum ServerError {
InvalidConfig(&'static str),
IoError(io::Error),
ServerFull,
}
enum HandlerAsync {
Working,
Finished(FinishedStatus),
}
enum FinishedStatus {
Terminated,
Panicked,
TimedOut,
Errored(io::ErrorKind),
}
pub struct Server {
size: usize,
msg_sender: Sender<Message>,
msg_recver: Receiver<Message>,
handlers: HashMap<Id, Handler>,
cmd_handler: CommandHandler,
}
impl Server {
pub fn init(size: usize, cmd_prefix: char) -> ServerResult<Server> {
if size == 0 {
return Err(InvalidConfig("Server can not have zero connections."));
}
let (msg_sender, msg_recver) = mpsc::channel();
let handlers = HashMap::with_capacity(size);
let cmd_handler = CommandHandler::new(cmd_prefix);
Ok(Server {
size,
msg_sender,
msg_recver,
handlers,
cmd_handler,
})
}
#[allow(unused)]
pub fn from_cfg() -> Result<Server, &'static str> {
unimplemented!();
}
pub fn cmd<C: Command + 'static>(mut self, name: &'static str, command: C) -> Self {
let command = Box::new(command);
self.cmd_handler.register(name, command);
self
}
pub fn start(mut self, listener: TcpListener) {
eprintln!("Setting up listener...");
let (conn_sender, conn_recver) = mpsc::channel();
let _ = thread::spawn(move || {
for stream in listener.incoming() {
match stream {
Ok(s) => conn_sender.send(s).expect("Connection receiver hung up!"),
Err(_) => {
eprintln!("There was an error receiving the connection!");
}
}
}
});
// A bit of a hack to work around high CPU usage. This
// timeout limits the amount of times per second that
// the main loop runs, cutting down on the calls to these
// functions significantly. Even with a very tiny timeout,
// this makes the application run with very low CPU usage.
let timeout = Duration::from_nanos(1000);
eprintln!("Server started!");
loop {
match conn_recver.recv_timeout(timeout) {
Ok(s) => self
.accept(s)
.and_then(|id| {
eprintln!("Connection {} accepted!", id);
Ok(())
})
.unwrap_or_else(|e| eprintln!("Error accepting connection! Error: {:?}", e)),
Err(e) if e == mpsc::RecvTimeoutError::Timeout => {
self.check_handlers().iter().for_each(|id| {
self.handlers
.remove(id)
.and_then(|handler| handler.thread.join().ok());
});
self.handle_msgs();
}
Err(_) => panic!("Connection sender hung up!"),
}
}
}
fn accept(&mut self, stream: TcpStream) -> ServerResult<Id> {
// Do not accept a connection if it would exceed the
// max connections on the server. Just return an error
// indicating that the server is full.
if self.handlers.len() == self.size {
return Err(ServerFull);
}
// We have to make sure that we don't have a duplicate
// connection id. This is very unlikely to happen, but
// it can, so I have to check. (Damn you, randomness!)
let id = {
let mut rng = rand::thread_rng();
let mut id: usize = rng.gen();
while let Some(_) = self.handlers.get(&id) {
id = rng.gen();
}
id
};
let msg_sender = self.msg_sender.clone();
let conn = Connection::new(stream, id).map_err(IoError)?;
let handler = Handler::accept(conn, msg_sender, Duration::from_secs(120));
// Don't care about the return type here since it
// will always return None, due to our id check
// at the beginning.
self.handlers.insert(id, handler);
Ok(id)
}
fn | (&self) -> Vec<usize> {
use self::FinishedStatus::*;
use self::HandlerAsync::*;
self.handlers
.iter()
.filter(|(id, handler)| {
if let Finished(status) = handler.check_status() {
match status {
TimedOut => {
eprintln!("Connection {} timed out!", id);
return true;
}
Errored(_) => {
eprintln!("Connection {} errored!", id);
return true;
}
Panicked => {
eprintln!(
"Connection {}'s Handler panicked! This is definitely a bug!",
id
);
return true;
}
Terminated => unimplemented!(),
}
}
false
})
.map(|(&id, _)| id)
.collect()
}
fn handle_msgs(&self) {
if let Ok(msg) = self.msg_recver.try_recv() {
if msg.contents != "" {
if msg.contents.starts_with(self.cmd_handler.prefix) {
let mut conn = self
.handlers
.get(&msg.from)
.unwrap()
.connection
.lock()
.expect("Another thread panicked while holding a conn lock!");
match self.cmd_handler.exec(&msg) {
Ok(response) => {
conn.write_bytes(response.msg.as_bytes()).unwrap_or_else(|err| {
eprintln!(
"Could not send message to a Connection! This is most likely a bug. Error: {}",
err
);
});
}
Err(_) => {
conn.write_bytes(b"Error").unwrap_or_else(|err| {
eprintln!(
"Could not send message to a Connection! This is most likely a bug. Error: {}",
err
);
});
}
}
} else {
let msg_str = format!("{} -> {}", msg.from, msg.to_string());
println!("{}", msg_str);
self.handlers.values().for_each(|handler| {
let mut conn = handler
.connection
.lock()
.expect("Another thread panicked while holding a conn lock!");
conn.write_bytes(msg_str.as_bytes()).unwrap_or_else(|err| {
eprintln!(
"Could not send message to a Connection! This is most likely a bug. Error: {}",
err
);
});
});
}
}
}
}
}
struct Handler {
status_recv: Receiver<FinishedStatus>,
connection: Arc<Mutex<Connection>>,
thread: thread::JoinHandle<()>,
}
impl Handler {
fn accept(connection: Connection, msg_sender: Sender<Message>, timeout: Duration) -> Handler {
let connection = Arc::new(Mutex::new(connection));
let (status_send, status_recv) = mpsc::sync_channel(0);
let max_attempts = timeout.as_millis();
let thread_conn = Arc::clone(&connection);
let thread = thread::spawn(move || {
Handler::handle(thread_conn, status_send, msg_sender, max_attempts)
});
Handler {
status_recv,
connection,
thread,
}
}
fn handle(
conn: Arc<Mutex<Connection>>,
status_sender: SyncSender<FinishedStatus>,
msg_sender: Sender<Message>,
max_attempts: u128,
) {
use self::FinishedStatus::*;
let mut attempts = 0u128;
let mut buf = Vec::with_capacity(1024); // Just a default
loop {
thread::sleep(Duration::from_millis(1));
let mut conn = conn.lock().unwrap_or_else(|err| {
// Ideally, this should not happen. This is only used to
// propagate the panic if things do go south.
status_sender
.send(Panicked)
.expect("Everything is wrong...");
panic!(
"Another thread panicked while getting conn lock! Error: {}",
err
);
});
match conn.read_bytes(&mut buf) {
Ok(_) => {
// The client responded! Reset the attempts.
attempts = 0;
let msg_contents = bytes_to_string(&buf);
let msg = Message::new(msg_contents, conn.id, None);
msg_sender.send(msg).expect("Could not send Message!");
}
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
attempts += 1;
if attempts == max_attempts {
status_sender
.send(TimedOut)
.expect("Could not send Timed out signal!");
break;
}
}
Err(e) => {
status_sender
.send(Errored(e.kind()))
.expect("Could not send Errored signal!");
break;
}
}
buf.clear();
}
}
fn check_status(&self) -> HandlerAsync {
use self::HandlerAsync::*;
match self.status_recv.try_recv() {
Ok(status) => Finished(status),
Err(e) if e == mpsc::TryRecvError::Empty => Working,
Err(_) => panic!("Sender hung up! This should not happen."),
}
}
}
pub struct Connection {
pub id: usize,
stream: TcpStream,
}
impl Connection {
fn new(stream: TcpStream, id: usize) -> io::Result<Connection> {
stream.set_nonblocking(true)?;
Ok(Connection { id, stream })
}
fn read_bytes(&mut self, buf: &mut Vec<u8>) -> io::Result<()> {
// The first two bytes are expected to be the size of the message.
// This means that a message can be at most 65535 characters long.
// The most significant byte comes first, and the least significant
// byte second.
let mut len_bytes = [0; 2];
self.stream
.try_clone()?
.take(2)
.read_exact(&mut len_bytes)?;
let len = ((len_bytes[0] as u16) << 8) + len_bytes[1] as u16;
let mut msg = vec![0; len as usize].into_boxed_slice();
self.stream.read(&mut msg)?;
// To remind myself what this does: We must dereference the Box, to get the
// [u8] slice, and then reference it again in order to create a &[u8], since
// Rust's automatic deref coercion rules won't do this for you. The reason
// we need to do this is because &Box<[u8]> does not implement IntoIterator,
// but &[u8] does, and Rust won't just deref to some type that implements it.
buf.extend(&*msg);
Ok(())
}
fn write_bytes(&mut self, buf: &[u8]) -> io::Result<()> {
// Somewhere to store the length bytes.
let mut len_bytes = [0; 2];
// We need to write the length of the message into a variable.
// Since we know that the buf.len() <= 65535, we can safely cast
// it to u16. As a sanity check, I'm using try_from() to make sure
// that it can be safely cast.
let msg_len =
u16::try_from(buf.len()).expect("converting to u16 here should always be safe!");
len_bytes[0] = (msg_len >> 8) as u8;
len_bytes[1] = (msg_len & 255) as u8;
let msg = [&len_bytes[..], &buf[..]].concat().into_boxed_slice();
self.stream.write_all(&msg)?;
self.stream.flush()?;
Ok(())
}
}
pub struct Message {
pub contents: String,
pub from: Id,
pub to: Option<Id>,
}
impl Message {
fn new(contents: String, from: Id, to: Option<Id>) -> Message {
Message { contents, from, to }
}
}
impl fmt::Display for Message {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.contents)
}
}
| check_handlers | identifier_name |
server.rs | extern crate hashbrown;
extern crate rand;
use crate::command;
use self::ServerError::*;
use command::{Command, CommandHandler};
use hashbrown::HashMap;
use rand::Rng;
use std::convert::TryFrom;
use std::fmt;
use std::io;
use std::io::prelude::*;
use std::net::TcpListener;
use std::net::TcpStream;
use std::sync::mpsc;
use std::sync::mpsc::{Receiver, Sender, SyncSender};
use std::sync::{Arc, Mutex};
use std::thread;
use std::time::Duration;
pub type Id = usize;
pub type ServerResult<T> = Result<T, ServerError>;
fn bytes_to_string(buf: &[u8]) -> String {
String::from(String::from_utf8_lossy(buf).trim())
}
#[derive(Debug)]
pub enum ServerError {
InvalidConfig(&'static str),
IoError(io::Error),
ServerFull,
}
enum HandlerAsync {
Working,
Finished(FinishedStatus),
}
enum FinishedStatus {
Terminated,
Panicked,
TimedOut,
Errored(io::ErrorKind),
}
pub struct Server {
size: usize,
msg_sender: Sender<Message>,
msg_recver: Receiver<Message>,
handlers: HashMap<Id, Handler>,
cmd_handler: CommandHandler,
}
impl Server {
pub fn init(size: usize, cmd_prefix: char) -> ServerResult<Server> {
if size == 0 {
return Err(InvalidConfig("Server can not have zero connections."));
}
let (msg_sender, msg_recver) = mpsc::channel();
let handlers = HashMap::with_capacity(size);
let cmd_handler = CommandHandler::new(cmd_prefix);
| msg_sender,
msg_recver,
handlers,
cmd_handler,
})
}
#[allow(unused)]
pub fn from_cfg() -> Result<Server, &'static str> {
unimplemented!();
}
pub fn cmd<C: Command + 'static>(mut self, name: &'static str, command: C) -> Self {
let command = Box::new(command);
self.cmd_handler.register(name, command);
self
}
pub fn start(mut self, listener: TcpListener) {
eprintln!("Setting up listener...");
let (conn_sender, conn_recver) = mpsc::channel();
let _ = thread::spawn(move || {
for stream in listener.incoming() {
match stream {
Ok(s) => conn_sender.send(s).expect("Connection receiver hung up!"),
Err(_) => {
eprintln!("There was an error receiving the connection!");
}
}
}
});
// A bit of a hack to work around high CPU usage. This
// timeout limits the amount of times per second that
// the main loop runs, cutting down on the calls to these
// functions significantly. Even with a very tiny timeout,
// this makes the application run with very low CPU usage.
let timeout = Duration::from_nanos(1000);
eprintln!("Server started!");
loop {
match conn_recver.recv_timeout(timeout) {
Ok(s) => self
.accept(s)
.and_then(|id| {
eprintln!("Connection {} accepted!", id);
Ok(())
})
.unwrap_or_else(|e| eprintln!("Error accepting connection! Error: {:?}", e)),
Err(e) if e == mpsc::RecvTimeoutError::Timeout => {
self.check_handlers().iter().for_each(|id| {
self.handlers
.remove(id)
.and_then(|handler| handler.thread.join().ok());
});
self.handle_msgs();
}
Err(_) => panic!("Connection sender hung up!"),
}
}
}
fn accept(&mut self, stream: TcpStream) -> ServerResult<Id> {
// Do not accept a connection if it would exceed the
// max connections on the server. Just return an error
// indicating that the server is full.
if self.handlers.len() == self.size {
return Err(ServerFull);
}
// We have to make sure that we don't have a duplicate
// connection id. This is very unlikely to happen, but
// it can, so I have to check. (Damn you, randomness!)
let id = {
let mut rng = rand::thread_rng();
let mut id: usize = rng.gen();
while let Some(_) = self.handlers.get(&id) {
id = rng.gen();
}
id
};
let msg_sender = self.msg_sender.clone();
let conn = Connection::new(stream, id).map_err(IoError)?;
let handler = Handler::accept(conn, msg_sender, Duration::from_secs(120));
// Don't care about the return type here since it
// will always return None, due to our id check
// at the beginning.
self.handlers.insert(id, handler);
Ok(id)
}
fn check_handlers(&self) -> Vec<usize> {
use self::FinishedStatus::*;
use self::HandlerAsync::*;
self.handlers
.iter()
.filter(|(id, handler)| {
if let Finished(status) = handler.check_status() {
match status {
TimedOut => {
eprintln!("Connection {} timed out!", id);
return true;
}
Errored(_) => {
eprintln!("Connection {} errored!", id);
return true;
}
Panicked => {
eprintln!(
"Connection {}'s Handler panicked! This is definitely a bug!",
id
);
return true;
}
Terminated => unimplemented!(),
}
}
false
})
.map(|(&id, _)| id)
.collect()
}
fn handle_msgs(&self) {
if let Ok(msg) = self.msg_recver.try_recv() {
if msg.contents != "" {
if msg.contents.starts_with(self.cmd_handler.prefix) {
let mut conn = self
.handlers
.get(&msg.from)
.unwrap()
.connection
.lock()
.expect("Another thread panicked while holding a conn lock!");
match self.cmd_handler.exec(&msg) {
Ok(response) => {
conn.write_bytes(response.msg.as_bytes()).unwrap_or_else(|err| {
eprintln!(
"Could not send message to a Connection! This is most likely a bug. Error: {}",
err
);
});
}
Err(_) => {
conn.write_bytes(b"Error").unwrap_or_else(|err| {
eprintln!(
"Could not send message to a Connection! This is most likely a bug. Error: {}",
err
);
});
}
}
} else {
let msg_str = format!("{} -> {}", msg.from, msg.to_string());
println!("{}", msg_str);
self.handlers.values().for_each(|handler| {
let mut conn = handler
.connection
.lock()
.expect("Another thread panicked while holding a conn lock!");
conn.write_bytes(msg_str.as_bytes()).unwrap_or_else(|err| {
eprintln!(
"Could not send message to a Connection! This is most likely a bug. Error: {}",
err
);
});
});
}
}
}
}
}
struct Handler {
status_recv: Receiver<FinishedStatus>,
connection: Arc<Mutex<Connection>>,
thread: thread::JoinHandle<()>,
}
impl Handler {
fn accept(connection: Connection, msg_sender: Sender<Message>, timeout: Duration) -> Handler {
let connection = Arc::new(Mutex::new(connection));
let (status_send, status_recv) = mpsc::sync_channel(0);
let max_attempts = timeout.as_millis();
let thread_conn = Arc::clone(&connection);
let thread = thread::spawn(move || {
Handler::handle(thread_conn, status_send, msg_sender, max_attempts)
});
Handler {
status_recv,
connection,
thread,
}
}
fn handle(
conn: Arc<Mutex<Connection>>,
status_sender: SyncSender<FinishedStatus>,
msg_sender: Sender<Message>,
max_attempts: u128,
) {
use self::FinishedStatus::*;
let mut attempts = 0u128;
let mut buf = Vec::with_capacity(1024); // Just a default
loop {
thread::sleep(Duration::from_millis(1));
let mut conn = conn.lock().unwrap_or_else(|err| {
// Ideally, this should not happen. This is only used to
// propagate the panic if things do go south.
status_sender
.send(Panicked)
.expect("Everything is wrong...");
panic!(
"Another thread panicked while getting conn lock! Error: {}",
err
);
});
match conn.read_bytes(&mut buf) {
Ok(_) => {
// The client responded! Reset the attempts.
attempts = 0;
let msg_contents = bytes_to_string(&buf);
let msg = Message::new(msg_contents, conn.id, None);
msg_sender.send(msg).expect("Could not send Message!");
}
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
attempts += 1;
if attempts == max_attempts {
status_sender
.send(TimedOut)
.expect("Could not send Timed out signal!");
break;
}
}
Err(e) => {
status_sender
.send(Errored(e.kind()))
.expect("Could not send Errored signal!");
break;
}
}
buf.clear();
}
}
fn check_status(&self) -> HandlerAsync {
use self::HandlerAsync::*;
match self.status_recv.try_recv() {
Ok(status) => Finished(status),
Err(e) if e == mpsc::TryRecvError::Empty => Working,
Err(_) => panic!("Sender hung up! This should not happen."),
}
}
}
pub struct Connection {
pub id: usize,
stream: TcpStream,
}
impl Connection {
fn new(stream: TcpStream, id: usize) -> io::Result<Connection> {
stream.set_nonblocking(true)?;
Ok(Connection { id, stream })
}
fn read_bytes(&mut self, buf: &mut Vec<u8>) -> io::Result<()> {
// The first two bytes are expected to be the size of the message.
// This means that a message can be at most 65535 characters long.
// The most significant byte comes first, and the least significant
// byte second.
let mut len_bytes = [0; 2];
self.stream
.try_clone()?
.take(2)
.read_exact(&mut len_bytes)?;
let len = ((len_bytes[0] as u16) << 8) + len_bytes[1] as u16;
let mut msg = vec![0; len as usize].into_boxed_slice();
self.stream.read(&mut msg)?;
// To remind myself what this does: We must dereference the Box, to get the
// [u8] slice, and then reference it again in order to create a &[u8], since
// Rust's automatic deref coercion rules won't do this for you. The reason
// we need to do this is because &Box<[u8]> does not implement IntoIterator,
// but &[u8] does, and Rust won't just deref to some type that implements it.
buf.extend(&*msg);
Ok(())
}
fn write_bytes(&mut self, buf: &[u8]) -> io::Result<()> {
// Somewhere to store the length bytes.
let mut len_bytes = [0; 2];
// We need to write the length of the message into a variable.
// Since we know that the buf.len() <= 65535, we can safely cast
// it to u16. As a sanity check, I'm using try_from() to make sure
// that it can be safely cast.
let msg_len =
u16::try_from(buf.len()).expect("converting to u16 here should always be safe!");
len_bytes[0] = (msg_len >> 8) as u8;
len_bytes[1] = (msg_len & 255) as u8;
let msg = [&len_bytes[..], &buf[..]].concat().into_boxed_slice();
self.stream.write_all(&msg)?;
self.stream.flush()?;
Ok(())
}
}
pub struct Message {
pub contents: String,
pub from: Id,
pub to: Option<Id>,
}
impl Message {
fn new(contents: String, from: Id, to: Option<Id>) -> Message {
Message { contents, from, to }
}
}
impl fmt::Display for Message {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.contents)
}
} | Ok(Server {
size, | random_line_split |
main.rs | #![allow(unused_imports)]
mod texture;
mod model;
mod camera; // means -> mod camera { // contents of camera.rs }
mod light;
use winit::{
event::*,
event_loop::{ControlFlow, EventLoop},
window::{Window, WindowBuilder},
dpi::PhysicalSize,
};
use futures::executor::block_on;
use wgpu::util::DeviceExt;
use texture::Texture;
use model::{Vertex, DrawModel, DrawLight};
use light::Light;
use cgmath::prelude::*;
//=============================================================================
#[repr(C)]
#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
struct Uniforms {
// We can't use cgmath with bytemuck directly so we'll have
// to convert the Matrix4 into a 4x4 f32 array
// TODO: Build converter (from/into) form cgmath to bytemuck
view_position: [f32; 4],
view_proj: [[f32; 4]; 4],
}
impl Uniforms {
fn new() -> Self {
use cgmath::SquareMatrix;
return Self {
view_position: [0.0; 4],
view_proj: cgmath::Matrix4::identity().into(),
};
}
fn update_view_proj(&mut self, camera: &camera::Camera, projection: &camera::Projection) {
self.view_position = camera.position.to_homogeneous().into();
self.view_proj = (projection.calc_matrix() * camera.calc_matrix()).into();
}
}
//=============================================================================
const NUM_INSTANCES_PER_ROW: u32 = 10;
struct Instance {
position: cgmath::Vector3<f32>,
rotation: cgmath::Quaternion<f32>, // TODO: Review quaternions
}
impl Instance {
fn to_raw(&self) -> InstanceRaw {
return InstanceRaw {
model: (cgmath::Matrix4::from_translation(self.position) * cgmath::Matrix4::from(self.rotation)).into(),
};
}
}
#[repr(C)]
#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
struct InstanceRaw {
model: [[f32; 4]; 4],
}
impl InstanceRaw {
fn desc<'a>() -> wgpu::VertexBufferDescriptor<'a> {
use std::mem;
let attrib = &[
wgpu::VertexAttributeDescriptor {
offset: 0,
// While our vertex shader only uses locations 0, and 1 now, in later tutorials we'll
// be using 2, 3, and 4, for Vertex. We'll start at slot 5 not conflict with them later
shader_location: 5,
format: wgpu::VertexFormat::Float4,
},
// A mat4 takes up 4 vertex slots as it is technically 4 vec4s. We need to define a slot
// for each vec4. We don't have to do this in code though.
wgpu::VertexAttributeDescriptor {
offset: mem::size_of::<[f32; 4]>() as wgpu::BufferAddress,
shader_location: 6,
format: wgpu::VertexFormat::Float4,
},
wgpu::VertexAttributeDescriptor {
offset: mem::size_of::<[f32; 8]>() as wgpu::BufferAddress,
shader_location: 7,
format: wgpu::VertexFormat::Float4,
},
wgpu::VertexAttributeDescriptor {
offset: mem::size_of::<[f32; 12]>() as wgpu::BufferAddress,
shader_location: 8,
format: wgpu::VertexFormat::Float4,
},
];
return wgpu::VertexBufferDescriptor {
stride: mem::size_of::<InstanceRaw>() as wgpu::BufferAddress,
// We need to switch from using a step mode of Vertex to Instance
// This means that our shaders will only change to use the next
// instance when the shader starts processing a new instance
step_mode: wgpu::InputStepMode::Instance,
attributes: attrib,
};
}
}
//=============================================================================
fn | (
device: &wgpu::Device,
layout: &wgpu::PipelineLayout,
color_format: wgpu::TextureFormat,
depth_format: Option<wgpu::TextureFormat>,
vertex_descs: &[wgpu::VertexBufferDescriptor],
vs_src: wgpu::ShaderModuleSource,
fs_src: wgpu::ShaderModuleSource,
) -> wgpu::RenderPipeline {
// Create ShaderModules
let vs_module = device.create_shader_module(vs_src);
let fs_module = device.create_shader_module(fs_src);
// Create Render Pipeline
let render_pipeline_desc = wgpu::RenderPipelineDescriptor {
label: Some("render_pipeline"),
layout: Some(&layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor{
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
clamp_depth: false,
}),
color_states: &[wgpu::ColorStateDescriptor{ // Define how colors are stored and processed
format: color_format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
}],
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
depth_stencil_state: depth_format.map(|format| wgpu::DepthStencilStateDescriptor {
format,
depth_write_enabled: true,
// When to discard a new pixel. Drawn front to back. Depth should be less (closer
// to camera) to discard the previous pixel on the texture
depth_compare: wgpu::CompareFunction::Less,
stencil: wgpu::StencilStateDescriptor::default(),
}),
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint32,
vertex_buffers: vertex_descs,
},
sample_count: 1,
sample_mask: !0, // Use all samples
alpha_to_coverage_enabled: false,
};
let render_pipeline = device.create_render_pipeline(&render_pipeline_desc);
return render_pipeline;
}
struct State {
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
swap_chain_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
render_pipeline: wgpu::RenderPipeline,
obj_model: model::Model,
camera: camera::Camera,
camera_controller: camera::CameraController,
projection: camera::Projection,
uniforms: Uniforms,
uniform_buffer: wgpu::Buffer,
uniform_bind_group: wgpu::BindGroup,
instances: Vec<Instance>,
light: Light,
light_buffer: wgpu::Buffer,
light_bind_group: wgpu::BindGroup,
light_render_pipeline: wgpu::RenderPipeline,
#[allow(dead_code)]
instance_buffer: wgpu::Buffer,
depth_texture: texture::Texture,
size: PhysicalSize<u32>, // INFO: PhysicalSize takes into account device's scale factor
mouse_pressed: bool,
}
impl State {
async fn new(window: &Window) -> Self {
// The instance is a handle to our GPU
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
// Create Adapter
let adapter_options = &wgpu::RequestAdapterOptions {
// Default gets LowP on battery and HighP when on mains
power_preference: wgpu::PowerPreference::HighPerformance,
compatible_surface: Some(&surface),
};
// The adapter identifies both an instance of a physical hardware accelerator (CPU, GPU),
// and an instance of a browser's implementation of WebGPU on top of the accelerator
let adapter = instance.request_adapter(adapter_options).await.unwrap();
// Create Device and Queue
let desc = &wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
};
let (device, queue) = adapter.request_device(desc, None).await.unwrap();
// Create SwapChain
let size = window.inner_size(); // INFO: Has into account the scale factor
let swap_chain_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8UnormSrgb, // TODO: Should be swap_chain_get_current_texture_view but not available atm
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo,
};
let swap_chain = device.create_swap_chain(&surface, &swap_chain_desc);
// Describe a set of resources and how are they accessed by a Shader
let texture_bind_group_layout_desc = wgpu::BindGroupLayoutDescriptor {
label: Some("texture_bind_group_layout"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT, // Bitwise comparison
ty: wgpu::BindingType::SampledTexture {
multisampled: false,
dimension: wgpu::TextureViewDimension::D2,
component_type: wgpu::TextureComponentType::Uint,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler { comparison: false, },
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 2,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::SampledTexture {
multisampled: false,
dimension: wgpu::TextureViewDimension::D2,
component_type: wgpu::TextureComponentType::Uint,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 3,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler { comparison: false, },
count: None,
},
],
};
let texture_bind_group_layout = device.create_bind_group_layout(&texture_bind_group_layout_desc);
let depth_texture = texture::Texture::create_depth_texture(&device, &swap_chain_desc, "depth_texture");
let camera = camera::Camera::new((0.0, 5.0, 10.0), cgmath::Deg(-90.0), cgmath::Deg(-20.0));
let projection = camera::Projection::new(swap_chain_desc.width, swap_chain_desc.height, cgmath::Deg(45.0), 0.1, 100.0);
let camera_controller = camera::CameraController::new(4.0, 1.0);
// Create Uniform Buffers
let mut uniforms = Uniforms::new();
uniforms.update_view_proj(&camera, &projection);
let uniforms_array = &[uniforms];
let uniform_buffer_desc = wgpu::util::BufferInitDescriptor {
label: Some("uniform_buffer"),
contents: bytemuck::cast_slice(uniforms_array),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
};
let uniform_buffer = device.create_buffer_init(&uniform_buffer_desc);
// Create Uniform Bind Group
let uniform_bind_group_layout_entry = wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer { dynamic: false, min_binding_size: None, },
count: None,
};
let uniform_bind_group_layout_desc = wgpu::BindGroupLayoutDescriptor{
label: Some("uniform_bind_group_layout"),
entries: &[uniform_bind_group_layout_entry]
};
let uniform_bind_group_layout = device.create_bind_group_layout(&uniform_bind_group_layout_desc);
let uniform_bind_group_desc = wgpu::BindGroupDescriptor {
label: Some("uniform_bind_group"),
layout: &uniform_bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(uniform_buffer.slice(..)),
}],
};
let uniform_bind_group = device.create_bind_group(&uniform_bind_group_desc);
// Load Model
let res_dir = std::path::Path::new(env!("OUT_DIR")).join("res");
let obj_model = model::Model::load(
&device,
&queue,
&texture_bind_group_layout,
res_dir.join("cube.obj"),
).unwrap();
// Create Instances
const SPACE_BETWEEN: f32 = 3.0;
let instances = (0..NUM_INSTANCES_PER_ROW).flat_map(|z| {
(0..NUM_INSTANCES_PER_ROW).map(move |x| {
let x = SPACE_BETWEEN * (x as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0);
let z = SPACE_BETWEEN * (z as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0);
let position = cgmath::Vector3 { x, y: 0.0, z };
let rotation = if position.is_zero() {
cgmath::Quaternion::from_axis_angle(
cgmath::Vector3::unit_z(),
cgmath::Deg(0.0),
)
} else {
cgmath::Quaternion::from_axis_angle(
position.clone().normalize(),
cgmath::Deg(45.0),
)
};
Instance { position, rotation }
})
})
.collect::<Vec<_>>();
let instance_data = instances.iter().map(Instance::to_raw).collect::<Vec<InstanceRaw>>();
let instance_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor{
label: Some("instance_buffer"),
contents: bytemuck::cast_slice(&instance_data),
usage: wgpu::BufferUsage::VERTEX,
});
// Create Light
// TODO: Change wgpu tutorial from .into() to [f32; 3]
let light = Light {
position: [2.0, 2.0, 2.0],
_padding: 0,
color: [1.0, 1.0, 1.0],
};
let lights_array = &[light];
let light_buffer_init_desc = wgpu::util::BufferInitDescriptor {
label: Some("light_buffer_init"),
contents: bytemuck::cast_slice(lights_array),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
};
let light_buffer = device.create_buffer_init(&light_buffer_init_desc);
let light_bind_group_layout_desc = wgpu::BindGroupLayoutDescriptor {
label: Some("light_bind_group_layout"),
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer { dynamic: false, min_binding_size: None, },
count: None,
}],
};
let light_bind_group_layout = device.create_bind_group_layout(&light_bind_group_layout_desc);
let light_bind_group_desc = wgpu::BindGroupDescriptor {
label: Some("light_bind_group"),
layout: &light_bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(light_buffer.slice(..)),
}],
};
let light_bind_group = device.create_bind_group(&light_bind_group_desc);
// Create Pipeline Layout
let pipeline_layout_desc = wgpu::PipelineLayoutDescriptor {
label: Some("pipeline_layout"),
bind_group_layouts: &[
&texture_bind_group_layout,
&uniform_bind_group_layout,
&light_bind_group_layout,
],
push_constant_ranges: &[],
};
let render_pipeline_layout = device.create_pipeline_layout(&pipeline_layout_desc);
let render_pipeline = create_render_pipeline(
&device,
&render_pipeline_layout,
swap_chain_desc.format,
Some(texture::Texture::DEPTH_FORMAT),
&[model::ModelVertex::desc(), InstanceRaw::desc()],
wgpu::include_spirv!("../shaders/shader.vert.spv"),
wgpu::include_spirv!("../shaders/shader.frag.spv"),
);
let light_render_pipeline = {
let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("light_render_pipeline_layout"),
bind_group_layouts: &[
&uniform_bind_group_layout,
&light_bind_group_layout,
],
push_constant_ranges: &[],
});
let vs_src = wgpu::include_spirv!("../shaders/light.vert.spv");
let fs_src = wgpu::include_spirv!("../shaders/light.frag.spv");
create_render_pipeline(
&device,
&layout,
swap_chain_desc.format,
Some(texture::Texture::DEPTH_FORMAT),
&[model::ModelVertex::desc()],
vs_src,
fs_src,
)
};
return State {
surface,
device,
queue,
swap_chain_desc,
swap_chain,
render_pipeline,
obj_model,
camera,
camera_controller,
projection,
uniforms,
uniform_buffer,
uniform_bind_group,
instances,
light,
light_buffer,
light_bind_group,
light_render_pipeline,
instance_buffer,
depth_texture,
size,
mouse_pressed: false,
};
}
fn resize(&mut self, new_size: PhysicalSize<u32>) {
self.size = new_size;
self.swap_chain_desc.width = new_size.width;
self.swap_chain_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.swap_chain_desc);
self.depth_texture = texture::Texture::create_depth_texture(&self.device, &self.swap_chain_desc, "depth_texture");
self.projection.resize(new_size.width, new_size.height);
}
// Returns a bool to indicate whether an event has been fully processed. If `true` the main
// loop won't process the event any further
fn input(&mut self, event: &DeviceEvent) -> bool {
match event {
DeviceEvent::MouseWheel { delta, .. } => {
self.camera_controller.process_scroll(delta);
true
}
DeviceEvent::Button {
button: 1, // Left Mouse Button
state,
} => {
self.mouse_pressed = *state == ElementState::Pressed;
true
}
DeviceEvent::MouseMotion { delta } => {
if self.mouse_pressed {
self.camera_controller.process_mouse(delta.0, delta.1);
}
true
}
DeviceEvent::Motion { .. } => {
false
}
_ => false,
}
}
fn update(&mut self, dt: std::time::Duration) {
self.camera_controller.update_camera(&mut self.camera, dt);
self.uniforms.update_view_proj(&self.camera, &self.projection);
self.queue.write_buffer(&self.uniform_buffer, 0, bytemuck::cast_slice(&[self.uniforms]));
// Update light's position
let old_position: cgmath::Vector3<f32> = self.light.position.into();
let new_position = cgmath::Quaternion::from_axis_angle((0.0, 1.0, 0.0).into(), cgmath::Deg(60.0 * dt.as_secs_f32())) * old_position;
self.light.position = new_position.into();
self.queue.write_buffer(&self.light_buffer, 0, bytemuck::cast_slice(&[self.light]));
}
fn render(&mut self) -> Result<(), wgpu::SwapChainError> {
// Get next frame
let frame = self.swap_chain.get_current_frame()?.output;
// Create command encoder
let command_encoder_desc = wgpu::CommandEncoderDescriptor {
label: Some("command_encoder"),
};
let mut encoder = self.device.create_command_encoder(&command_encoder_desc);
{
// Create Render Pass
let clear_color = wgpu::Color { r: 0.1, g: 0.1, b: 0.1, a: 1.0, };
let render_pass_desc = wgpu::RenderPassDescriptor {
// Color Attachments
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view, // Current frame texture view
resolve_target: None, // Only used if multisampling is enabled
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(clear_color),
store: true,
},
}],
// Depth Stencil Attachments
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachmentDescriptor {
attachment: &self.depth_texture.view,
depth_ops: Some(wgpu::Operations {
load: wgpu::LoadOp::Clear(1.0), // Clear before use
store: true, // Render Pass will write here: true
}),
stencil_ops: None,
}),
};
let mut render_pass = encoder.begin_render_pass(&render_pass_desc);
render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..));
render_pass.set_pipeline(&self.light_render_pipeline);
// TODO: Refactor draw_light_model to light_model
render_pass.draw_light_model(
&self.obj_model,
&self.uniform_bind_group,
&self.light_bind_group,
);
render_pass.set_pipeline(&self.render_pipeline);
render_pass.draw_model_instanced(
&self.obj_model,
0..self.instances.len() as u32,
&self.uniform_bind_group,
&self.light_bind_group,
);
}
self.queue.submit(std::iter::once(encoder.finish()));
return Ok(());
}
}
fn handle_keyboard_input(state: &mut State, input: KeyboardInput, control_flow: &mut ControlFlow) {
match input {
KeyboardInput { virtual_keycode: key, state: element_state, .. } => {
match (key, element_state) {
(Some(VirtualKeyCode::Escape), ElementState::Pressed) => { *control_flow = ControlFlow::Exit; }
(Some(_), _) => { state.camera_controller.process_keyboard(key.unwrap(), element_state); }
_ => {}
}
}
}
}
fn handle_window_events(state: &mut State, event: WindowEvent, control_flow: &mut ControlFlow) {
match event {
WindowEvent::KeyboardInput { input, ../*device_id, is_synthetic*/ } => {
handle_keyboard_input(state, input, control_flow);
},
WindowEvent::Resized(physical_size) => {
state.resize(physical_size)
},
WindowEvent::ScaleFactorChanged {new_inner_size, ../*scale_factor*/ } => {
state.resize(*new_inner_size)
},
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
_ => {}
}
}
fn handle_redraw_requested(state: &mut State, control_flow: &mut ControlFlow, dt: std::time::Duration) {
state.update(dt);
match state.render() {
Err(wgpu::SwapChainError::Lost) => state.resize(state.size),
Err(wgpu::SwapChainError::OutOfMemory) => *control_flow = ControlFlow::Exit,
Err(e) => eprintln!("{:?}", e),
Ok(_) => {}
}
}
fn main() {
env_logger::init(); // INFO: error!, warn!, info!, debug! and trace!
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.with_title("WGPU Renderer")
.with_resizable(false)
.build(&event_loop)
.unwrap();
// INFO: This is just for debugging purposes
window.set_outer_position(winit::dpi::PhysicalPosition::new(2561.0, 1.0));
let mut state = block_on(State::new(&window));
let mut last_render_time = std::time::Instant::now();
// INFO: move -> moves any variables you reference which are outside the scope of the closure into the closure's object.
event_loop.run(move |event, _event_loop_window_target, control_flow| {
match event {
Event::DeviceEvent { ref event, ..} => { state.input(event); }
Event::WindowEvent { event, window_id } => {
if window_id == window.id() {
handle_window_events(&mut state, event, control_flow);
}
}
Event::RedrawRequested(_window_id) => {
let now = std::time::Instant::now();
let dt = now - last_render_time;
last_render_time = now;
handle_redraw_requested(&mut state, control_flow, dt);
}
Event::MainEventsCleared => {
// Emitted when all of the event loop's input events have been processed and redraw processing
// is about to begin.
window.request_redraw();
}
_ => {}
};
});
}
| create_render_pipeline | identifier_name |
main.rs | #![allow(unused_imports)]
mod texture;
mod model;
mod camera; // means -> mod camera { // contents of camera.rs }
mod light;
use winit::{
event::*,
event_loop::{ControlFlow, EventLoop},
window::{Window, WindowBuilder},
dpi::PhysicalSize,
};
use futures::executor::block_on;
use wgpu::util::DeviceExt;
use texture::Texture;
use model::{Vertex, DrawModel, DrawLight};
use light::Light;
use cgmath::prelude::*;
//=============================================================================
#[repr(C)]
#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
struct Uniforms {
// We can't use cgmath with bytemuck directly so we'll have
// to convert the Matrix4 into a 4x4 f32 array
// TODO: Build converter (from/into) form cgmath to bytemuck
view_position: [f32; 4],
view_proj: [[f32; 4]; 4],
}
impl Uniforms {
fn new() -> Self {
use cgmath::SquareMatrix;
return Self {
view_position: [0.0; 4],
view_proj: cgmath::Matrix4::identity().into(),
};
}
fn update_view_proj(&mut self, camera: &camera::Camera, projection: &camera::Projection) {
self.view_position = camera.position.to_homogeneous().into();
self.view_proj = (projection.calc_matrix() * camera.calc_matrix()).into();
}
}
//=============================================================================
const NUM_INSTANCES_PER_ROW: u32 = 10;
struct Instance {
position: cgmath::Vector3<f32>,
rotation: cgmath::Quaternion<f32>, // TODO: Review quaternions
}
impl Instance {
fn to_raw(&self) -> InstanceRaw {
return InstanceRaw {
model: (cgmath::Matrix4::from_translation(self.position) * cgmath::Matrix4::from(self.rotation)).into(),
};
}
}
#[repr(C)]
#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
struct InstanceRaw {
model: [[f32; 4]; 4],
}
impl InstanceRaw {
fn desc<'a>() -> wgpu::VertexBufferDescriptor<'a> {
use std::mem;
let attrib = &[
wgpu::VertexAttributeDescriptor {
offset: 0,
// While our vertex shader only uses locations 0, and 1 now, in later tutorials we'll
// be using 2, 3, and 4, for Vertex. We'll start at slot 5 not conflict with them later
shader_location: 5,
format: wgpu::VertexFormat::Float4,
},
// A mat4 takes up 4 vertex slots as it is technically 4 vec4s. We need to define a slot
// for each vec4. We don't have to do this in code though.
wgpu::VertexAttributeDescriptor {
offset: mem::size_of::<[f32; 4]>() as wgpu::BufferAddress,
shader_location: 6,
format: wgpu::VertexFormat::Float4,
},
wgpu::VertexAttributeDescriptor {
offset: mem::size_of::<[f32; 8]>() as wgpu::BufferAddress,
shader_location: 7,
format: wgpu::VertexFormat::Float4,
},
wgpu::VertexAttributeDescriptor {
offset: mem::size_of::<[f32; 12]>() as wgpu::BufferAddress,
shader_location: 8,
format: wgpu::VertexFormat::Float4,
},
];
return wgpu::VertexBufferDescriptor {
stride: mem::size_of::<InstanceRaw>() as wgpu::BufferAddress,
// We need to switch from using a step mode of Vertex to Instance
// This means that our shaders will only change to use the next
// instance when the shader starts processing a new instance
step_mode: wgpu::InputStepMode::Instance,
attributes: attrib,
};
}
}
//=============================================================================
fn create_render_pipeline(
device: &wgpu::Device,
layout: &wgpu::PipelineLayout,
color_format: wgpu::TextureFormat,
depth_format: Option<wgpu::TextureFormat>,
vertex_descs: &[wgpu::VertexBufferDescriptor],
vs_src: wgpu::ShaderModuleSource,
fs_src: wgpu::ShaderModuleSource,
) -> wgpu::RenderPipeline {
// Create ShaderModules
let vs_module = device.create_shader_module(vs_src);
let fs_module = device.create_shader_module(fs_src);
// Create Render Pipeline
let render_pipeline_desc = wgpu::RenderPipelineDescriptor {
label: Some("render_pipeline"),
layout: Some(&layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor{
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
clamp_depth: false,
}),
color_states: &[wgpu::ColorStateDescriptor{ // Define how colors are stored and processed
format: color_format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
}],
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
depth_stencil_state: depth_format.map(|format| wgpu::DepthStencilStateDescriptor {
format,
depth_write_enabled: true,
// When to discard a new pixel. Drawn front to back. Depth should be less (closer
// to camera) to discard the previous pixel on the texture
depth_compare: wgpu::CompareFunction::Less,
stencil: wgpu::StencilStateDescriptor::default(),
}),
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint32,
vertex_buffers: vertex_descs,
},
sample_count: 1,
sample_mask: !0, // Use all samples
alpha_to_coverage_enabled: false,
};
let render_pipeline = device.create_render_pipeline(&render_pipeline_desc);
return render_pipeline;
}
struct State {
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
swap_chain_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
render_pipeline: wgpu::RenderPipeline,
obj_model: model::Model,
camera: camera::Camera,
camera_controller: camera::CameraController,
projection: camera::Projection,
uniforms: Uniforms,
uniform_buffer: wgpu::Buffer,
uniform_bind_group: wgpu::BindGroup,
instances: Vec<Instance>,
light: Light,
light_buffer: wgpu::Buffer,
light_bind_group: wgpu::BindGroup,
light_render_pipeline: wgpu::RenderPipeline,
#[allow(dead_code)]
instance_buffer: wgpu::Buffer,
depth_texture: texture::Texture,
size: PhysicalSize<u32>, // INFO: PhysicalSize takes into account device's scale factor
mouse_pressed: bool,
}
impl State {
async fn new(window: &Window) -> Self {
// The instance is a handle to our GPU
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
// Create Adapter
let adapter_options = &wgpu::RequestAdapterOptions {
// Default gets LowP on battery and HighP when on mains
power_preference: wgpu::PowerPreference::HighPerformance,
compatible_surface: Some(&surface),
};
// The adapter identifies both an instance of a physical hardware accelerator (CPU, GPU),
// and an instance of a browser's implementation of WebGPU on top of the accelerator
let adapter = instance.request_adapter(adapter_options).await.unwrap();
// Create Device and Queue
let desc = &wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
};
let (device, queue) = adapter.request_device(desc, None).await.unwrap();
// Create SwapChain
let size = window.inner_size(); // INFO: Has into account the scale factor
let swap_chain_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8UnormSrgb, // TODO: Should be swap_chain_get_current_texture_view but not available atm
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo,
};
let swap_chain = device.create_swap_chain(&surface, &swap_chain_desc);
// Describe a set of resources and how are they accessed by a Shader
let texture_bind_group_layout_desc = wgpu::BindGroupLayoutDescriptor {
label: Some("texture_bind_group_layout"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT, // Bitwise comparison
ty: wgpu::BindingType::SampledTexture {
multisampled: false,
dimension: wgpu::TextureViewDimension::D2,
component_type: wgpu::TextureComponentType::Uint,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler { comparison: false, },
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 2,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::SampledTexture {
multisampled: false,
dimension: wgpu::TextureViewDimension::D2,
component_type: wgpu::TextureComponentType::Uint,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 3,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler { comparison: false, },
count: None,
},
],
};
let texture_bind_group_layout = device.create_bind_group_layout(&texture_bind_group_layout_desc);
let depth_texture = texture::Texture::create_depth_texture(&device, &swap_chain_desc, "depth_texture");
let camera = camera::Camera::new((0.0, 5.0, 10.0), cgmath::Deg(-90.0), cgmath::Deg(-20.0));
let projection = camera::Projection::new(swap_chain_desc.width, swap_chain_desc.height, cgmath::Deg(45.0), 0.1, 100.0);
let camera_controller = camera::CameraController::new(4.0, 1.0);
// Create Uniform Buffers
let mut uniforms = Uniforms::new();
uniforms.update_view_proj(&camera, &projection);
let uniforms_array = &[uniforms];
let uniform_buffer_desc = wgpu::util::BufferInitDescriptor {
label: Some("uniform_buffer"),
contents: bytemuck::cast_slice(uniforms_array),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
};
let uniform_buffer = device.create_buffer_init(&uniform_buffer_desc);
// Create Uniform Bind Group
let uniform_bind_group_layout_entry = wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer { dynamic: false, min_binding_size: None, },
count: None,
};
let uniform_bind_group_layout_desc = wgpu::BindGroupLayoutDescriptor{
label: Some("uniform_bind_group_layout"),
entries: &[uniform_bind_group_layout_entry]
};
let uniform_bind_group_layout = device.create_bind_group_layout(&uniform_bind_group_layout_desc);
let uniform_bind_group_desc = wgpu::BindGroupDescriptor {
label: Some("uniform_bind_group"),
layout: &uniform_bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(uniform_buffer.slice(..)),
}],
};
let uniform_bind_group = device.create_bind_group(&uniform_bind_group_desc);
// Load Model
let res_dir = std::path::Path::new(env!("OUT_DIR")).join("res");
let obj_model = model::Model::load(
&device,
&queue,
&texture_bind_group_layout,
res_dir.join("cube.obj"),
).unwrap();
// Create Instances
const SPACE_BETWEEN: f32 = 3.0;
let instances = (0..NUM_INSTANCES_PER_ROW).flat_map(|z| {
(0..NUM_INSTANCES_PER_ROW).map(move |x| {
let x = SPACE_BETWEEN * (x as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0);
let z = SPACE_BETWEEN * (z as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0);
let position = cgmath::Vector3 { x, y: 0.0, z };
let rotation = if position.is_zero() {
cgmath::Quaternion::from_axis_angle(
cgmath::Vector3::unit_z(),
cgmath::Deg(0.0),
)
} else {
cgmath::Quaternion::from_axis_angle(
position.clone().normalize(),
cgmath::Deg(45.0),
)
};
Instance { position, rotation }
})
})
.collect::<Vec<_>>();
let instance_data = instances.iter().map(Instance::to_raw).collect::<Vec<InstanceRaw>>();
let instance_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor{
label: Some("instance_buffer"),
contents: bytemuck::cast_slice(&instance_data),
usage: wgpu::BufferUsage::VERTEX,
});
// Create Light
// TODO: Change wgpu tutorial from .into() to [f32; 3]
let light = Light {
position: [2.0, 2.0, 2.0],
_padding: 0,
color: [1.0, 1.0, 1.0],
};
let lights_array = &[light];
let light_buffer_init_desc = wgpu::util::BufferInitDescriptor {
label: Some("light_buffer_init"),
contents: bytemuck::cast_slice(lights_array),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
};
let light_buffer = device.create_buffer_init(&light_buffer_init_desc);
let light_bind_group_layout_desc = wgpu::BindGroupLayoutDescriptor {
label: Some("light_bind_group_layout"),
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer { dynamic: false, min_binding_size: None, },
count: None,
}],
};
let light_bind_group_layout = device.create_bind_group_layout(&light_bind_group_layout_desc);
let light_bind_group_desc = wgpu::BindGroupDescriptor {
label: Some("light_bind_group"),
layout: &light_bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(light_buffer.slice(..)),
}],
};
let light_bind_group = device.create_bind_group(&light_bind_group_desc);
// Create Pipeline Layout
let pipeline_layout_desc = wgpu::PipelineLayoutDescriptor {
label: Some("pipeline_layout"),
bind_group_layouts: &[
&texture_bind_group_layout,
&uniform_bind_group_layout,
&light_bind_group_layout,
],
push_constant_ranges: &[],
};
let render_pipeline_layout = device.create_pipeline_layout(&pipeline_layout_desc);
let render_pipeline = create_render_pipeline(
&device,
&render_pipeline_layout,
swap_chain_desc.format,
Some(texture::Texture::DEPTH_FORMAT),
&[model::ModelVertex::desc(), InstanceRaw::desc()],
wgpu::include_spirv!("../shaders/shader.vert.spv"),
wgpu::include_spirv!("../shaders/shader.frag.spv"),
);
let light_render_pipeline = {
let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("light_render_pipeline_layout"),
bind_group_layouts: &[
&uniform_bind_group_layout,
&light_bind_group_layout,
],
push_constant_ranges: &[],
});
let vs_src = wgpu::include_spirv!("../shaders/light.vert.spv");
let fs_src = wgpu::include_spirv!("../shaders/light.frag.spv");
create_render_pipeline(
&device,
&layout,
swap_chain_desc.format,
Some(texture::Texture::DEPTH_FORMAT),
&[model::ModelVertex::desc()],
vs_src,
fs_src,
)
};
return State {
surface,
device,
queue,
swap_chain_desc,
swap_chain,
render_pipeline,
obj_model,
camera,
camera_controller,
projection,
uniforms,
uniform_buffer,
uniform_bind_group,
instances,
light,
light_buffer,
light_bind_group,
light_render_pipeline,
instance_buffer,
depth_texture,
size,
mouse_pressed: false,
};
}
fn resize(&mut self, new_size: PhysicalSize<u32>) {
self.size = new_size;
self.swap_chain_desc.width = new_size.width;
self.swap_chain_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.swap_chain_desc);
self.depth_texture = texture::Texture::create_depth_texture(&self.device, &self.swap_chain_desc, "depth_texture");
self.projection.resize(new_size.width, new_size.height);
}
// Returns a bool to indicate whether an event has been fully processed. If `true` the main
// loop won't process the event any further
fn input(&mut self, event: &DeviceEvent) -> bool {
match event {
DeviceEvent::MouseWheel { delta, .. } => {
self.camera_controller.process_scroll(delta);
true
}
DeviceEvent::Button {
button: 1, // Left Mouse Button
state,
} => {
self.mouse_pressed = *state == ElementState::Pressed;
true
}
DeviceEvent::MouseMotion { delta } => {
if self.mouse_pressed {
self.camera_controller.process_mouse(delta.0, delta.1);
}
true
}
DeviceEvent::Motion { .. } => {
false
}
_ => false,
}
}
fn update(&mut self, dt: std::time::Duration) {
self.camera_controller.update_camera(&mut self.camera, dt);
self.uniforms.update_view_proj(&self.camera, &self.projection);
self.queue.write_buffer(&self.uniform_buffer, 0, bytemuck::cast_slice(&[self.uniforms]));
// Update light's position
let old_position: cgmath::Vector3<f32> = self.light.position.into();
let new_position = cgmath::Quaternion::from_axis_angle((0.0, 1.0, 0.0).into(), cgmath::Deg(60.0 * dt.as_secs_f32())) * old_position;
self.light.position = new_position.into();
self.queue.write_buffer(&self.light_buffer, 0, bytemuck::cast_slice(&[self.light]));
}
fn render(&mut self) -> Result<(), wgpu::SwapChainError> {
// Get next frame
let frame = self.swap_chain.get_current_frame()?.output;
// Create command encoder
let command_encoder_desc = wgpu::CommandEncoderDescriptor {
label: Some("command_encoder"),
};
let mut encoder = self.device.create_command_encoder(&command_encoder_desc);
{
// Create Render Pass
let clear_color = wgpu::Color { r: 0.1, g: 0.1, b: 0.1, a: 1.0, };
let render_pass_desc = wgpu::RenderPassDescriptor {
// Color Attachments
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view, // Current frame texture view
resolve_target: None, // Only used if multisampling is enabled
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(clear_color),
store: true,
},
}],
// Depth Stencil Attachments
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachmentDescriptor {
attachment: &self.depth_texture.view,
depth_ops: Some(wgpu::Operations {
load: wgpu::LoadOp::Clear(1.0), // Clear before use
store: true, // Render Pass will write here: true
}),
stencil_ops: None,
}),
};
let mut render_pass = encoder.begin_render_pass(&render_pass_desc);
render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..));
render_pass.set_pipeline(&self.light_render_pipeline);
// TODO: Refactor draw_light_model to light_model
render_pass.draw_light_model(
&self.obj_model,
&self.uniform_bind_group,
&self.light_bind_group,
);
render_pass.set_pipeline(&self.render_pipeline);
render_pass.draw_model_instanced(
&self.obj_model,
0..self.instances.len() as u32,
&self.uniform_bind_group,
&self.light_bind_group,
);
}
self.queue.submit(std::iter::once(encoder.finish()));
return Ok(());
}
}
fn handle_keyboard_input(state: &mut State, input: KeyboardInput, control_flow: &mut ControlFlow) {
match input {
KeyboardInput { virtual_keycode: key, state: element_state, .. } => {
match (key, element_state) {
(Some(VirtualKeyCode::Escape), ElementState::Pressed) => { *control_flow = ControlFlow::Exit; }
(Some(_), _) => { state.camera_controller.process_keyboard(key.unwrap(), element_state); }
_ => {}
}
}
}
}
fn handle_window_events(state: &mut State, event: WindowEvent, control_flow: &mut ControlFlow) {
match event {
WindowEvent::KeyboardInput { input, ../*device_id, is_synthetic*/ } => {
handle_keyboard_input(state, input, control_flow);
},
WindowEvent::Resized(physical_size) => { | WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
_ => {}
}
}
fn handle_redraw_requested(state: &mut State, control_flow: &mut ControlFlow, dt: std::time::Duration) {
state.update(dt);
match state.render() {
Err(wgpu::SwapChainError::Lost) => state.resize(state.size),
Err(wgpu::SwapChainError::OutOfMemory) => *control_flow = ControlFlow::Exit,
Err(e) => eprintln!("{:?}", e),
Ok(_) => {}
}
}
fn main() {
env_logger::init(); // INFO: error!, warn!, info!, debug! and trace!
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.with_title("WGPU Renderer")
.with_resizable(false)
.build(&event_loop)
.unwrap();
// INFO: This is just for debugging purposes
window.set_outer_position(winit::dpi::PhysicalPosition::new(2561.0, 1.0));
let mut state = block_on(State::new(&window));
let mut last_render_time = std::time::Instant::now();
// INFO: move -> moves any variables you reference which are outside the scope of the closure into the closure's object.
event_loop.run(move |event, _event_loop_window_target, control_flow| {
match event {
Event::DeviceEvent { ref event, ..} => { state.input(event); }
Event::WindowEvent { event, window_id } => {
if window_id == window.id() {
handle_window_events(&mut state, event, control_flow);
}
}
Event::RedrawRequested(_window_id) => {
let now = std::time::Instant::now();
let dt = now - last_render_time;
last_render_time = now;
handle_redraw_requested(&mut state, control_flow, dt);
}
Event::MainEventsCleared => {
// Emitted when all of the event loop's input events have been processed and redraw processing
// is about to begin.
window.request_redraw();
}
_ => {}
};
});
} | state.resize(physical_size)
},
WindowEvent::ScaleFactorChanged {new_inner_size, ../*scale_factor*/ } => {
state.resize(*new_inner_size)
}, | random_line_split |
main.rs | #![allow(unused_imports)]
mod texture;
mod model;
mod camera; // means -> mod camera { // contents of camera.rs }
mod light;
use winit::{
event::*,
event_loop::{ControlFlow, EventLoop},
window::{Window, WindowBuilder},
dpi::PhysicalSize,
};
use futures::executor::block_on;
use wgpu::util::DeviceExt;
use texture::Texture;
use model::{Vertex, DrawModel, DrawLight};
use light::Light;
use cgmath::prelude::*;
//=============================================================================
#[repr(C)]
#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
struct Uniforms {
// We can't use cgmath with bytemuck directly so we'll have
// to convert the Matrix4 into a 4x4 f32 array
// TODO: Build converter (from/into) form cgmath to bytemuck
view_position: [f32; 4],
view_proj: [[f32; 4]; 4],
}
impl Uniforms {
fn new() -> Self {
use cgmath::SquareMatrix;
return Self {
view_position: [0.0; 4],
view_proj: cgmath::Matrix4::identity().into(),
};
}
fn update_view_proj(&mut self, camera: &camera::Camera, projection: &camera::Projection) {
self.view_position = camera.position.to_homogeneous().into();
self.view_proj = (projection.calc_matrix() * camera.calc_matrix()).into();
}
}
//=============================================================================
const NUM_INSTANCES_PER_ROW: u32 = 10;
struct Instance {
position: cgmath::Vector3<f32>,
rotation: cgmath::Quaternion<f32>, // TODO: Review quaternions
}
impl Instance {
fn to_raw(&self) -> InstanceRaw {
return InstanceRaw {
model: (cgmath::Matrix4::from_translation(self.position) * cgmath::Matrix4::from(self.rotation)).into(),
};
}
}
#[repr(C)]
#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
struct InstanceRaw {
model: [[f32; 4]; 4],
}
impl InstanceRaw {
fn desc<'a>() -> wgpu::VertexBufferDescriptor<'a> {
use std::mem;
let attrib = &[
wgpu::VertexAttributeDescriptor {
offset: 0,
// While our vertex shader only uses locations 0, and 1 now, in later tutorials we'll
// be using 2, 3, and 4, for Vertex. We'll start at slot 5 not conflict with them later
shader_location: 5,
format: wgpu::VertexFormat::Float4,
},
// A mat4 takes up 4 vertex slots as it is technically 4 vec4s. We need to define a slot
// for each vec4. We don't have to do this in code though.
wgpu::VertexAttributeDescriptor {
offset: mem::size_of::<[f32; 4]>() as wgpu::BufferAddress,
shader_location: 6,
format: wgpu::VertexFormat::Float4,
},
wgpu::VertexAttributeDescriptor {
offset: mem::size_of::<[f32; 8]>() as wgpu::BufferAddress,
shader_location: 7,
format: wgpu::VertexFormat::Float4,
},
wgpu::VertexAttributeDescriptor {
offset: mem::size_of::<[f32; 12]>() as wgpu::BufferAddress,
shader_location: 8,
format: wgpu::VertexFormat::Float4,
},
];
return wgpu::VertexBufferDescriptor {
stride: mem::size_of::<InstanceRaw>() as wgpu::BufferAddress,
// We need to switch from using a step mode of Vertex to Instance
// This means that our shaders will only change to use the next
// instance when the shader starts processing a new instance
step_mode: wgpu::InputStepMode::Instance,
attributes: attrib,
};
}
}
//=============================================================================
fn create_render_pipeline(
device: &wgpu::Device,
layout: &wgpu::PipelineLayout,
color_format: wgpu::TextureFormat,
depth_format: Option<wgpu::TextureFormat>,
vertex_descs: &[wgpu::VertexBufferDescriptor],
vs_src: wgpu::ShaderModuleSource,
fs_src: wgpu::ShaderModuleSource,
) -> wgpu::RenderPipeline {
// Create ShaderModules
let vs_module = device.create_shader_module(vs_src);
let fs_module = device.create_shader_module(fs_src);
// Create Render Pipeline
let render_pipeline_desc = wgpu::RenderPipelineDescriptor {
label: Some("render_pipeline"),
layout: Some(&layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor{
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
clamp_depth: false,
}),
color_states: &[wgpu::ColorStateDescriptor{ // Define how colors are stored and processed
format: color_format,
color_blend: wgpu::BlendDescriptor::REPLACE,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
}],
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
depth_stencil_state: depth_format.map(|format| wgpu::DepthStencilStateDescriptor {
format,
depth_write_enabled: true,
// When to discard a new pixel. Drawn front to back. Depth should be less (closer
// to camera) to discard the previous pixel on the texture
depth_compare: wgpu::CompareFunction::Less,
stencil: wgpu::StencilStateDescriptor::default(),
}),
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint32,
vertex_buffers: vertex_descs,
},
sample_count: 1,
sample_mask: !0, // Use all samples
alpha_to_coverage_enabled: false,
};
let render_pipeline = device.create_render_pipeline(&render_pipeline_desc);
return render_pipeline;
}
struct State {
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
swap_chain_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
render_pipeline: wgpu::RenderPipeline,
obj_model: model::Model,
camera: camera::Camera,
camera_controller: camera::CameraController,
projection: camera::Projection,
uniforms: Uniforms,
uniform_buffer: wgpu::Buffer,
uniform_bind_group: wgpu::BindGroup,
instances: Vec<Instance>,
light: Light,
light_buffer: wgpu::Buffer,
light_bind_group: wgpu::BindGroup,
light_render_pipeline: wgpu::RenderPipeline,
#[allow(dead_code)]
instance_buffer: wgpu::Buffer,
depth_texture: texture::Texture,
size: PhysicalSize<u32>, // INFO: PhysicalSize takes into account device's scale factor
mouse_pressed: bool,
}
impl State {
async fn new(window: &Window) -> Self {
// The instance is a handle to our GPU
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
// Create Adapter
let adapter_options = &wgpu::RequestAdapterOptions {
// Default gets LowP on battery and HighP when on mains
power_preference: wgpu::PowerPreference::HighPerformance,
compatible_surface: Some(&surface),
};
// The adapter identifies both an instance of a physical hardware accelerator (CPU, GPU),
// and an instance of a browser's implementation of WebGPU on top of the accelerator
let adapter = instance.request_adapter(adapter_options).await.unwrap();
// Create Device and Queue
let desc = &wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
};
let (device, queue) = adapter.request_device(desc, None).await.unwrap();
// Create SwapChain
let size = window.inner_size(); // INFO: Has into account the scale factor
let swap_chain_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8UnormSrgb, // TODO: Should be swap_chain_get_current_texture_view but not available atm
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo,
};
let swap_chain = device.create_swap_chain(&surface, &swap_chain_desc);
// Describe a set of resources and how are they accessed by a Shader
let texture_bind_group_layout_desc = wgpu::BindGroupLayoutDescriptor {
label: Some("texture_bind_group_layout"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT, // Bitwise comparison
ty: wgpu::BindingType::SampledTexture {
multisampled: false,
dimension: wgpu::TextureViewDimension::D2,
component_type: wgpu::TextureComponentType::Uint,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler { comparison: false, },
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 2,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::SampledTexture {
multisampled: false,
dimension: wgpu::TextureViewDimension::D2,
component_type: wgpu::TextureComponentType::Uint,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 3,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler { comparison: false, },
count: None,
},
],
};
let texture_bind_group_layout = device.create_bind_group_layout(&texture_bind_group_layout_desc);
let depth_texture = texture::Texture::create_depth_texture(&device, &swap_chain_desc, "depth_texture");
let camera = camera::Camera::new((0.0, 5.0, 10.0), cgmath::Deg(-90.0), cgmath::Deg(-20.0));
let projection = camera::Projection::new(swap_chain_desc.width, swap_chain_desc.height, cgmath::Deg(45.0), 0.1, 100.0);
let camera_controller = camera::CameraController::new(4.0, 1.0);
// Create Uniform Buffers
let mut uniforms = Uniforms::new();
uniforms.update_view_proj(&camera, &projection);
let uniforms_array = &[uniforms];
let uniform_buffer_desc = wgpu::util::BufferInitDescriptor {
label: Some("uniform_buffer"),
contents: bytemuck::cast_slice(uniforms_array),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
};
let uniform_buffer = device.create_buffer_init(&uniform_buffer_desc);
// Create Uniform Bind Group
let uniform_bind_group_layout_entry = wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer { dynamic: false, min_binding_size: None, },
count: None,
};
let uniform_bind_group_layout_desc = wgpu::BindGroupLayoutDescriptor{
label: Some("uniform_bind_group_layout"),
entries: &[uniform_bind_group_layout_entry]
};
let uniform_bind_group_layout = device.create_bind_group_layout(&uniform_bind_group_layout_desc);
let uniform_bind_group_desc = wgpu::BindGroupDescriptor {
label: Some("uniform_bind_group"),
layout: &uniform_bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(uniform_buffer.slice(..)),
}],
};
let uniform_bind_group = device.create_bind_group(&uniform_bind_group_desc);
// Load Model
let res_dir = std::path::Path::new(env!("OUT_DIR")).join("res");
let obj_model = model::Model::load(
&device,
&queue,
&texture_bind_group_layout,
res_dir.join("cube.obj"),
).unwrap();
// Create Instances
const SPACE_BETWEEN: f32 = 3.0;
let instances = (0..NUM_INSTANCES_PER_ROW).flat_map(|z| {
(0..NUM_INSTANCES_PER_ROW).map(move |x| {
let x = SPACE_BETWEEN * (x as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0);
let z = SPACE_BETWEEN * (z as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0);
let position = cgmath::Vector3 { x, y: 0.0, z };
let rotation = if position.is_zero() {
cgmath::Quaternion::from_axis_angle(
cgmath::Vector3::unit_z(),
cgmath::Deg(0.0),
)
} else {
cgmath::Quaternion::from_axis_angle(
position.clone().normalize(),
cgmath::Deg(45.0),
)
};
Instance { position, rotation }
})
})
.collect::<Vec<_>>();
let instance_data = instances.iter().map(Instance::to_raw).collect::<Vec<InstanceRaw>>();
let instance_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor{
label: Some("instance_buffer"),
contents: bytemuck::cast_slice(&instance_data),
usage: wgpu::BufferUsage::VERTEX,
});
// Create Light
// TODO: Change wgpu tutorial from .into() to [f32; 3]
let light = Light {
position: [2.0, 2.0, 2.0],
_padding: 0,
color: [1.0, 1.0, 1.0],
};
let lights_array = &[light];
let light_buffer_init_desc = wgpu::util::BufferInitDescriptor {
label: Some("light_buffer_init"),
contents: bytemuck::cast_slice(lights_array),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
};
let light_buffer = device.create_buffer_init(&light_buffer_init_desc);
let light_bind_group_layout_desc = wgpu::BindGroupLayoutDescriptor {
label: Some("light_bind_group_layout"),
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer { dynamic: false, min_binding_size: None, },
count: None,
}],
};
let light_bind_group_layout = device.create_bind_group_layout(&light_bind_group_layout_desc);
let light_bind_group_desc = wgpu::BindGroupDescriptor {
label: Some("light_bind_group"),
layout: &light_bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(light_buffer.slice(..)),
}],
};
let light_bind_group = device.create_bind_group(&light_bind_group_desc);
// Create Pipeline Layout
let pipeline_layout_desc = wgpu::PipelineLayoutDescriptor {
label: Some("pipeline_layout"),
bind_group_layouts: &[
&texture_bind_group_layout,
&uniform_bind_group_layout,
&light_bind_group_layout,
],
push_constant_ranges: &[],
};
let render_pipeline_layout = device.create_pipeline_layout(&pipeline_layout_desc);
let render_pipeline = create_render_pipeline(
&device,
&render_pipeline_layout,
swap_chain_desc.format,
Some(texture::Texture::DEPTH_FORMAT),
&[model::ModelVertex::desc(), InstanceRaw::desc()],
wgpu::include_spirv!("../shaders/shader.vert.spv"),
wgpu::include_spirv!("../shaders/shader.frag.spv"),
);
let light_render_pipeline = {
let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("light_render_pipeline_layout"),
bind_group_layouts: &[
&uniform_bind_group_layout,
&light_bind_group_layout,
],
push_constant_ranges: &[],
});
let vs_src = wgpu::include_spirv!("../shaders/light.vert.spv");
let fs_src = wgpu::include_spirv!("../shaders/light.frag.spv");
create_render_pipeline(
&device,
&layout,
swap_chain_desc.format,
Some(texture::Texture::DEPTH_FORMAT),
&[model::ModelVertex::desc()],
vs_src,
fs_src,
)
};
return State {
surface,
device,
queue,
swap_chain_desc,
swap_chain,
render_pipeline,
obj_model,
camera,
camera_controller,
projection,
uniforms,
uniform_buffer,
uniform_bind_group,
instances,
light,
light_buffer,
light_bind_group,
light_render_pipeline,
instance_buffer,
depth_texture,
size,
mouse_pressed: false,
};
}
fn resize(&mut self, new_size: PhysicalSize<u32>) {
self.size = new_size;
self.swap_chain_desc.width = new_size.width;
self.swap_chain_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.swap_chain_desc);
self.depth_texture = texture::Texture::create_depth_texture(&self.device, &self.swap_chain_desc, "depth_texture");
self.projection.resize(new_size.width, new_size.height);
}
// Returns a bool to indicate whether an event has been fully processed. If `true` the main
// loop won't process the event any further
fn input(&mut self, event: &DeviceEvent) -> bool {
match event {
DeviceEvent::MouseWheel { delta, .. } => |
DeviceEvent::Button {
button: 1, // Left Mouse Button
state,
} => {
self.mouse_pressed = *state == ElementState::Pressed;
true
}
DeviceEvent::MouseMotion { delta } => {
if self.mouse_pressed {
self.camera_controller.process_mouse(delta.0, delta.1);
}
true
}
DeviceEvent::Motion { .. } => {
false
}
_ => false,
}
}
fn update(&mut self, dt: std::time::Duration) {
self.camera_controller.update_camera(&mut self.camera, dt);
self.uniforms.update_view_proj(&self.camera, &self.projection);
self.queue.write_buffer(&self.uniform_buffer, 0, bytemuck::cast_slice(&[self.uniforms]));
// Update light's position
let old_position: cgmath::Vector3<f32> = self.light.position.into();
let new_position = cgmath::Quaternion::from_axis_angle((0.0, 1.0, 0.0).into(), cgmath::Deg(60.0 * dt.as_secs_f32())) * old_position;
self.light.position = new_position.into();
self.queue.write_buffer(&self.light_buffer, 0, bytemuck::cast_slice(&[self.light]));
}
fn render(&mut self) -> Result<(), wgpu::SwapChainError> {
// Get next frame
let frame = self.swap_chain.get_current_frame()?.output;
// Create command encoder
let command_encoder_desc = wgpu::CommandEncoderDescriptor {
label: Some("command_encoder"),
};
let mut encoder = self.device.create_command_encoder(&command_encoder_desc);
{
// Create Render Pass
let clear_color = wgpu::Color { r: 0.1, g: 0.1, b: 0.1, a: 1.0, };
let render_pass_desc = wgpu::RenderPassDescriptor {
// Color Attachments
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view, // Current frame texture view
resolve_target: None, // Only used if multisampling is enabled
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(clear_color),
store: true,
},
}],
// Depth Stencil Attachments
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachmentDescriptor {
attachment: &self.depth_texture.view,
depth_ops: Some(wgpu::Operations {
load: wgpu::LoadOp::Clear(1.0), // Clear before use
store: true, // Render Pass will write here: true
}),
stencil_ops: None,
}),
};
let mut render_pass = encoder.begin_render_pass(&render_pass_desc);
render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..));
render_pass.set_pipeline(&self.light_render_pipeline);
// TODO: Refactor draw_light_model to light_model
render_pass.draw_light_model(
&self.obj_model,
&self.uniform_bind_group,
&self.light_bind_group,
);
render_pass.set_pipeline(&self.render_pipeline);
render_pass.draw_model_instanced(
&self.obj_model,
0..self.instances.len() as u32,
&self.uniform_bind_group,
&self.light_bind_group,
);
}
self.queue.submit(std::iter::once(encoder.finish()));
return Ok(());
}
}
fn handle_keyboard_input(state: &mut State, input: KeyboardInput, control_flow: &mut ControlFlow) {
match input {
KeyboardInput { virtual_keycode: key, state: element_state, .. } => {
match (key, element_state) {
(Some(VirtualKeyCode::Escape), ElementState::Pressed) => { *control_flow = ControlFlow::Exit; }
(Some(_), _) => { state.camera_controller.process_keyboard(key.unwrap(), element_state); }
_ => {}
}
}
}
}
fn handle_window_events(state: &mut State, event: WindowEvent, control_flow: &mut ControlFlow) {
match event {
WindowEvent::KeyboardInput { input, ../*device_id, is_synthetic*/ } => {
handle_keyboard_input(state, input, control_flow);
},
WindowEvent::Resized(physical_size) => {
state.resize(physical_size)
},
WindowEvent::ScaleFactorChanged {new_inner_size, ../*scale_factor*/ } => {
state.resize(*new_inner_size)
},
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
_ => {}
}
}
fn handle_redraw_requested(state: &mut State, control_flow: &mut ControlFlow, dt: std::time::Duration) {
state.update(dt);
match state.render() {
Err(wgpu::SwapChainError::Lost) => state.resize(state.size),
Err(wgpu::SwapChainError::OutOfMemory) => *control_flow = ControlFlow::Exit,
Err(e) => eprintln!("{:?}", e),
Ok(_) => {}
}
}
fn main() {
env_logger::init(); // INFO: error!, warn!, info!, debug! and trace!
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.with_title("WGPU Renderer")
.with_resizable(false)
.build(&event_loop)
.unwrap();
// INFO: This is just for debugging purposes
window.set_outer_position(winit::dpi::PhysicalPosition::new(2561.0, 1.0));
let mut state = block_on(State::new(&window));
let mut last_render_time = std::time::Instant::now();
// INFO: move -> moves any variables you reference which are outside the scope of the closure into the closure's object.
event_loop.run(move |event, _event_loop_window_target, control_flow| {
match event {
Event::DeviceEvent { ref event, ..} => { state.input(event); }
Event::WindowEvent { event, window_id } => {
if window_id == window.id() {
handle_window_events(&mut state, event, control_flow);
}
}
Event::RedrawRequested(_window_id) => {
let now = std::time::Instant::now();
let dt = now - last_render_time;
last_render_time = now;
handle_redraw_requested(&mut state, control_flow, dt);
}
Event::MainEventsCleared => {
// Emitted when all of the event loop's input events have been processed and redraw processing
// is about to begin.
window.request_redraw();
}
_ => {}
};
});
}
| {
self.camera_controller.process_scroll(delta);
true
} | conditional_block |
assethook.py | """Assethook: A flask application to listen for webhook calls to send
computer names and asset tags to the Jamf Pro Server.
"""
import logging
from logging.handlers import RotatingFileHandler
import os
import sqlite3
import time
import requests
from flask import (Flask, flash, g, redirect, render_template,
request, session, url_for)
# Next steps
# Implement username/password management like Flask-User
# Auto create the webhook in the jss
# CSV Handling for field mapping
# Don't allow duplicate entries, or ask to overwrite
# Delete all
# Edit row, reuse add, fill form, then update with new values
# Search for records
# Sort table (javascript?)
# Submit all without timestamp?
app = Flask(__name__) # create the application instance :)
app.config.from_object(__name__) # load config from this file
# Load default config and override config from an environment variable
app.config.update(dict(
DATABASE=os.path.join(app.root_path, 'assethook.db'),
SECRET_KEY='development key',
USERNAME='admin',
PASSWORD='default',
DEBUG=False,
))
app.config.from_envvar('FLASKR_SETTINGS', silent=True)
# Logging
handler = RotatingFileHandler('assethook.log', maxBytes=10000, backupCount=1)
handler.setLevel(logging.INFO)
app.logger.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
app.logger.addHandler(handler)
# Database methods
def connect_db():
"""Connects to the specific database.
"""
rv = sqlite3.connect(app.config['DATABASE'])
rv.row_factory = sqlite3.Row
return rv
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = connect_db()
return g.sqlite_db
def init_db():
"""Initialized the db file"""
db = get_db()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.cli.command('initdb') # if you want to do it from the command line
def initdb_command():
"""Initializes the database."""
init_db()
print('Initialized the database.')
def load_settings():
'''Loads settings from the database, if the table is empty, it will be initalized
'''
db = get_db()
try:
cur = db.execute('select setting_name, setting_value from settings')
settings = cur.fetchall()
except sqlite3.OperationalError:
init_db()
return redirect(url_for('landing'))
if not settings:
init_settings()
cur = db.execute('select setting_name, setting_value from settings')
settings = cur.fetchall()
g.settings = settings
return
def init_settings():
'''Initialize the settings table'''
db = get_db()
v = [('jsshost', ''), ('jss_path', ''), ('jss_port', ''),
('jss_username', ''), ('jss_password', ''), ('set_name', '')]
db.executemany(
"""insert into settings ('setting_name', 'setting_value') values (?,?)""", v)
db.commit()
def write_settings(_request):
'''Write settings to the database'''
db = get_db()
# Add https if not given
if 'http' not in _request.form['jsshost']:
jsshost = 'https://%s' % _request.form['jsshost']
else:
jsshost = _request.form['jsshost']
v = [(jsshost, 'jsshost'),
(_request.form['jss_path'], 'jss_path',),
(_request.form['jss_port'], 'jss_port'),
(_request.form['jss_username'], 'jss_username'),
(_request.form['jss_password'], 'jss_password'),
(_request.form['set_name'], 'set_name')]
db.executemany(
"""update settings set setting_value = ? where setting_name = ?""", v)
db.commit()
# App routes
@app.route("/log")
def logTest():
app.logger.warning('testing warning log')
app.logger.error('testing error log')
app.logger.info('testing info log')
return "Code Handbook !! Log testing."
@app.route('/')
def landing():
'''Check to see if JSS Settings, database, etc are stored in the DB'''
load_settings()
for i in g.settings:
if i[0] == 'jss_username':
if i[1] == '':
flash('Please enter the required information')
session['logged_in'] = True
return redirect(url_for('settings_page'))
return redirect(url_for('get_devices'))
@app.route('/settings', methods=['GET', 'POST'])
def settings_page():
'''Displays settings and allows them to be modified'''
if not session.get('logged_in'):
return redirect(url_for('login'))
if request.method == 'GET':
load_settings()
return render_template('settings.html', rows=g.settings)
if request.method == 'POST':
write_settings(request)
flash('Settings saved')
return redirect(url_for('get_devices'))
return redirect(url_for('login'))
@app.route('/login', methods=['GET', 'POST'])
def login():
'''Login page for now'''
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('get_devices'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
"""Logs out current user"""
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('get_devices'))
@app.route('/devices', methods=['GET'])
def get_devices(error=None):
'''Shows all devices in the database'''
if not session.get('logged_in'):
return redirect(url_for('login'))
db = get_db()
cur = db.execute(
'select id, asset_tag, serial_number, device_name, dt_sub_to_jss from devices order by id desc')
devices = cur.fetchall()
return render_template('assets.html', rows=devices, error=error)
@app.route('/submit_inventory')
def submit_to_jss(serial_number=None, device_type=None):
'''If this is called from a webhook, the serial_number and type are set when called
If not, it is being called manually from the devices page and type will be
determined by trying the JSS with GET
'''
load_settings()
settings_dict = dict((x, y) for x, y in g.settings)
if serial_number is None:
serial_number = request.args.get('serial_number')
if serial_number is None:
flash('Serial Number not passed')
return redirect(url_for('get_devices'))
db = get_db()
cur = db.execute(
'select asset_tag, device_name from devices where serial_number = \'%s\'' % serial_number)
device_info = cur.fetchone()
if device_info is None:
return 400
if device_type == 'Computer':
device_type_xml = 'computer'
device_type_url = 'computers'
if device_type == 'MobileDevice':
device_type_xml = 'mobile_device'
device_type_url = 'mobiledevices'
if device_type is None:
url = settings_dict['jsshost'] + ':' + settings_dict['jss_port'] + settings_dict['jss_path'] + \ | if r.status_code == 200:
device_type_xml = 'mobile_device'
device_type_url = 'mobiledevices'
device_type = 'MobileDevice'
if device_type is None:
url = settings_dict['jsshost'] + ':' + settings_dict['jss_port'] + settings_dict['jss_path'] + \
'/JSSResource/computers/serialnumber/' + serial_number
r = requests.get(url,
auth=(settings_dict['jss_username'],
settings_dict['jss_password']))
if r.status_code == 200:
device_type_xml = 'computer'
device_type_url = 'computers'
device_type = 'Computer'
if device_type is None:
flash('Could not determine device type')
app.logger.warning('Could not determine device type')
return redirect(url_for('get_devices'))
# Check to see if there is a device name and if the device name setting is True
if settings_dict['set_name'] == 'True' and device_info['device_name'] != '':
body = "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>" \
"<%s><general><name>%s</name><asset_tag>%s</asset_tag></general>" \
"</%s>" % (device_type_xml,
device_info['device_name'], device_info['asset_tag'], device_type_xml)
else:
body = "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>" \
"<%s><general><asset_tag>%s</asset_tag></general>" \
"</%s>" % (device_type_xml,
device_info['asset_tag'], device_type_xml)
try:
r = requests.put(settings_dict['jsshost'] + ':' + settings_dict['jss_port'] + settings_dict['jss_path'] +
'/JSSResource/%s/serialnumber/' % device_type_url + serial_number,
auth=(settings_dict['jss_username'], settings_dict['jss_password']), data=body)
if r.status_code == 409:
# A 409 error can indicate that the device record has no name. This happens when the webhook is issued
# and this program submits only an asset tag. The JSS responds that a name is reqiured. Delaying
# and trying again seems to work fine.
time.sleep(10)
url = settings_dict['jsshost'] + ':' + settings_dict['jss_port'] + settings_dict['jss_path'] + \
'/JSSResource/%s/serialnumber/' % device_type_url + serial_number
r = requests.put(url,
auth=(settings_dict['jss_username'],
settings_dict['jss_password']),
data=body)
except requests.exceptions.RequestException as e:
app.logger.error('Error submitting to JSS - %s' % e)
error = 'Command failed - Please see the logs for more information...'
return render_template('base.html', error=error)
db = get_db()
cur = db.execute(
'update devices set dt_sub_to_jss = CURRENT_TIMESTAMP where serial_number = ?', [serial_number])
db.commit()
if r.status_code == 201:
flash('{} Updated'.format(type))
return redirect(url_for('get_devices'))
flash('Connection made but device not updated. %s' % r.status_code)
return redirect(url_for('get_devices'))
@app.route('/submit_all', methods=['GET'])
def submit_all_devices(error=None):
'''Submit inventory for all devices in the database'''
if not session.get('logged_in'):
return redirect(url_for('login'))
db = get_db()
cur = db.execute(
'select id, asset_tag, serial_number, device_name, dt_sub_to_jss from devices order by id desc')
devices = cur.fetchall()
for device in devices:
print(device['serial_number'])
submit_to_jss(serial_number=device['serial_number'])
return redirect(url_for('get_devices'))
@app.route('/webhook', methods=['POST'])
def mobile_device_enrolled():
''' This is what the webhook will call'''
device = request.get_json()
print(device)
if not device:
return '', 400
if device['webhook']['webhookEvent'].startswith('Computer'):
device_type = 'Computer'
elif device['webhook']['webhookEvent'].startswith('Mobile'):
device_type = 'MobileDevice'
else:
return 'Invalid Webhook format', 403
submit_to_jss(serial_number=device['event'][u'serialNumber'], device_type=device_type)
return '', 200
@app.route('/upload_file', methods=['GET', 'POST'])
def upload_file():
'''Upload a csv file and import into the database'''
if not session.get('logged_in'):
return redirect(url_for('login'))
if request.method == 'POST':
f = request.files['file']
if f.filename == '':
flash('No selected file')
return redirect(request.url)
db = get_db()
raw_file = f.read()
contents = iter(raw_file.split('\r\n'))
next(contents) # Skips the header
for x in contents:
if x.count(',') > 1:
# Pulls out form feeds, I'm importing from a very old Filemaker DB so this helps clean it up
c = x.replace('\x0b', '')
# Pulls out spaces, If you have unneeded dashes you can add this: .replace('-','')
asset_tag = c.split(',')[1].replace(' ', '')
# Pulls out spaces, shouldn't be any there
serial_number = c.split(',')[0].replace(' ', '')
device_name = c.split(',')[2]
db.execute('insert into devices (asset_tag, serial_number, device_name) values (?, ?, ?)',
[asset_tag, serial_number, device_name])
db.commit()
flash('Imported %s devices from: %s' %
(len(raw_file.split('\r\n')) - 1, request.files['file'].filename))
return redirect(url_for('get_devices'))
return render_template('upload.html')
# Move to post in the future?
@app.route('/delete_device', methods=['POST', 'GET'])
def delete_device():
'''Deletes a device passed as an arg'''
if not session.get('logged_in'):
return redirect(url_for('login'))
device_id = request.args.get('id')
db = get_db()
db.execute('delete from devices where id = %s' % device_id)
db.commit()
flash('{} Deleted from the local database.'.format(
request.args.get('serial_number')))
return redirect(url_for('get_devices'))
# Move to post in the future?
@app.route('/add_device', methods=['POST', 'GET'])
def add_device():
'''Add a single device'''
#device_id = request.args.get('id')
if not session.get('logged_in'):
return redirect(url_for('login'))
if request.method == 'POST':
db = get_db()
db.execute('insert into devices (asset_tag, serial_number, device_name) values (?, ?, ?)',
[request.form['asset_tag'], request.form['serial_number'], request.form['device_name']])
db.commit()
flash('Device Added')
return redirect(url_for('get_devices'))
return render_template('add_device.html')
@app.route('/help')
def documentation():
"""Returns page for documentation"""
return render_template('help.html') | '/JSSResource/mobiledevices/serialnumber/' + serial_number
r = requests.get(url,
auth=(settings_dict['jss_username'],
settings_dict['jss_password'])) | random_line_split |
assethook.py | """Assethook: A flask application to listen for webhook calls to send
computer names and asset tags to the Jamf Pro Server.
"""
import logging
from logging.handlers import RotatingFileHandler
import os
import sqlite3
import time
import requests
from flask import (Flask, flash, g, redirect, render_template,
request, session, url_for)
# Next steps
# Implement username/password management like Flask-User
# Auto create the webhook in the jss
# CSV Handling for field mapping
# Don't allow duplicate entries, or ask to overwrite
# Delete all
# Edit row, reuse add, fill form, then update with new values
# Search for records
# Sort table (javascript?)
# Submit all without timestamp?
app = Flask(__name__) # create the application instance :)
app.config.from_object(__name__) # load config from this file
# Load default config and override config from an environment variable
app.config.update(dict(
DATABASE=os.path.join(app.root_path, 'assethook.db'),
SECRET_KEY='development key',
USERNAME='admin',
PASSWORD='default',
DEBUG=False,
))
app.config.from_envvar('FLASKR_SETTINGS', silent=True)
# Logging
handler = RotatingFileHandler('assethook.log', maxBytes=10000, backupCount=1)
handler.setLevel(logging.INFO)
app.logger.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
app.logger.addHandler(handler)
# Database methods
def connect_db():
"""Connects to the specific database.
"""
rv = sqlite3.connect(app.config['DATABASE'])
rv.row_factory = sqlite3.Row
return rv
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = connect_db()
return g.sqlite_db
def init_db():
"""Initialized the db file"""
db = get_db()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.cli.command('initdb') # if you want to do it from the command line
def | ():
"""Initializes the database."""
init_db()
print('Initialized the database.')
def load_settings():
'''Loads settings from the database, if the table is empty, it will be initalized
'''
db = get_db()
try:
cur = db.execute('select setting_name, setting_value from settings')
settings = cur.fetchall()
except sqlite3.OperationalError:
init_db()
return redirect(url_for('landing'))
if not settings:
init_settings()
cur = db.execute('select setting_name, setting_value from settings')
settings = cur.fetchall()
g.settings = settings
return
def init_settings():
'''Initialize the settings table'''
db = get_db()
v = [('jsshost', ''), ('jss_path', ''), ('jss_port', ''),
('jss_username', ''), ('jss_password', ''), ('set_name', '')]
db.executemany(
"""insert into settings ('setting_name', 'setting_value') values (?,?)""", v)
db.commit()
def write_settings(_request):
'''Write settings to the database'''
db = get_db()
# Add https if not given
if 'http' not in _request.form['jsshost']:
jsshost = 'https://%s' % _request.form['jsshost']
else:
jsshost = _request.form['jsshost']
v = [(jsshost, 'jsshost'),
(_request.form['jss_path'], 'jss_path',),
(_request.form['jss_port'], 'jss_port'),
(_request.form['jss_username'], 'jss_username'),
(_request.form['jss_password'], 'jss_password'),
(_request.form['set_name'], 'set_name')]
db.executemany(
"""update settings set setting_value = ? where setting_name = ?""", v)
db.commit()
# App routes
@app.route("/log")
def logTest():
app.logger.warning('testing warning log')
app.logger.error('testing error log')
app.logger.info('testing info log')
return "Code Handbook !! Log testing."
@app.route('/')
def landing():
'''Check to see if JSS Settings, database, etc are stored in the DB'''
load_settings()
for i in g.settings:
if i[0] == 'jss_username':
if i[1] == '':
flash('Please enter the required information')
session['logged_in'] = True
return redirect(url_for('settings_page'))
return redirect(url_for('get_devices'))
@app.route('/settings', methods=['GET', 'POST'])
def settings_page():
'''Displays settings and allows them to be modified'''
if not session.get('logged_in'):
return redirect(url_for('login'))
if request.method == 'GET':
load_settings()
return render_template('settings.html', rows=g.settings)
if request.method == 'POST':
write_settings(request)
flash('Settings saved')
return redirect(url_for('get_devices'))
return redirect(url_for('login'))
@app.route('/login', methods=['GET', 'POST'])
def login():
'''Login page for now'''
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('get_devices'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
"""Logs out current user"""
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('get_devices'))
@app.route('/devices', methods=['GET'])
def get_devices(error=None):
'''Shows all devices in the database'''
if not session.get('logged_in'):
return redirect(url_for('login'))
db = get_db()
cur = db.execute(
'select id, asset_tag, serial_number, device_name, dt_sub_to_jss from devices order by id desc')
devices = cur.fetchall()
return render_template('assets.html', rows=devices, error=error)
@app.route('/submit_inventory')
def submit_to_jss(serial_number=None, device_type=None):
'''If this is called from a webhook, the serial_number and type are set when called
If not, it is being called manually from the devices page and type will be
determined by trying the JSS with GET
'''
load_settings()
settings_dict = dict((x, y) for x, y in g.settings)
if serial_number is None:
serial_number = request.args.get('serial_number')
if serial_number is None:
flash('Serial Number not passed')
return redirect(url_for('get_devices'))
db = get_db()
cur = db.execute(
'select asset_tag, device_name from devices where serial_number = \'%s\'' % serial_number)
device_info = cur.fetchone()
if device_info is None:
return 400
if device_type == 'Computer':
device_type_xml = 'computer'
device_type_url = 'computers'
if device_type == 'MobileDevice':
device_type_xml = 'mobile_device'
device_type_url = 'mobiledevices'
if device_type is None:
url = settings_dict['jsshost'] + ':' + settings_dict['jss_port'] + settings_dict['jss_path'] + \
'/JSSResource/mobiledevices/serialnumber/' + serial_number
r = requests.get(url,
auth=(settings_dict['jss_username'],
settings_dict['jss_password']))
if r.status_code == 200:
device_type_xml = 'mobile_device'
device_type_url = 'mobiledevices'
device_type = 'MobileDevice'
if device_type is None:
url = settings_dict['jsshost'] + ':' + settings_dict['jss_port'] + settings_dict['jss_path'] + \
'/JSSResource/computers/serialnumber/' + serial_number
r = requests.get(url,
auth=(settings_dict['jss_username'],
settings_dict['jss_password']))
if r.status_code == 200:
device_type_xml = 'computer'
device_type_url = 'computers'
device_type = 'Computer'
if device_type is None:
flash('Could not determine device type')
app.logger.warning('Could not determine device type')
return redirect(url_for('get_devices'))
# Check to see if there is a device name and if the device name setting is True
if settings_dict['set_name'] == 'True' and device_info['device_name'] != '':
body = "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>" \
"<%s><general><name>%s</name><asset_tag>%s</asset_tag></general>" \
"</%s>" % (device_type_xml,
device_info['device_name'], device_info['asset_tag'], device_type_xml)
else:
body = "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>" \
"<%s><general><asset_tag>%s</asset_tag></general>" \
"</%s>" % (device_type_xml,
device_info['asset_tag'], device_type_xml)
try:
r = requests.put(settings_dict['jsshost'] + ':' + settings_dict['jss_port'] + settings_dict['jss_path'] +
'/JSSResource/%s/serialnumber/' % device_type_url + serial_number,
auth=(settings_dict['jss_username'], settings_dict['jss_password']), data=body)
if r.status_code == 409:
# A 409 error can indicate that the device record has no name. This happens when the webhook is issued
# and this program submits only an asset tag. The JSS responds that a name is reqiured. Delaying
# and trying again seems to work fine.
time.sleep(10)
url = settings_dict['jsshost'] + ':' + settings_dict['jss_port'] + settings_dict['jss_path'] + \
'/JSSResource/%s/serialnumber/' % device_type_url + serial_number
r = requests.put(url,
auth=(settings_dict['jss_username'],
settings_dict['jss_password']),
data=body)
except requests.exceptions.RequestException as e:
app.logger.error('Error submitting to JSS - %s' % e)
error = 'Command failed - Please see the logs for more information...'
return render_template('base.html', error=error)
db = get_db()
cur = db.execute(
'update devices set dt_sub_to_jss = CURRENT_TIMESTAMP where serial_number = ?', [serial_number])
db.commit()
if r.status_code == 201:
flash('{} Updated'.format(type))
return redirect(url_for('get_devices'))
flash('Connection made but device not updated. %s' % r.status_code)
return redirect(url_for('get_devices'))
@app.route('/submit_all', methods=['GET'])
def submit_all_devices(error=None):
'''Submit inventory for all devices in the database'''
if not session.get('logged_in'):
return redirect(url_for('login'))
db = get_db()
cur = db.execute(
'select id, asset_tag, serial_number, device_name, dt_sub_to_jss from devices order by id desc')
devices = cur.fetchall()
for device in devices:
print(device['serial_number'])
submit_to_jss(serial_number=device['serial_number'])
return redirect(url_for('get_devices'))
@app.route('/webhook', methods=['POST'])
def mobile_device_enrolled():
''' This is what the webhook will call'''
device = request.get_json()
print(device)
if not device:
return '', 400
if device['webhook']['webhookEvent'].startswith('Computer'):
device_type = 'Computer'
elif device['webhook']['webhookEvent'].startswith('Mobile'):
device_type = 'MobileDevice'
else:
return 'Invalid Webhook format', 403
submit_to_jss(serial_number=device['event'][u'serialNumber'], device_type=device_type)
return '', 200
@app.route('/upload_file', methods=['GET', 'POST'])
def upload_file():
'''Upload a csv file and import into the database'''
if not session.get('logged_in'):
return redirect(url_for('login'))
if request.method == 'POST':
f = request.files['file']
if f.filename == '':
flash('No selected file')
return redirect(request.url)
db = get_db()
raw_file = f.read()
contents = iter(raw_file.split('\r\n'))
next(contents) # Skips the header
for x in contents:
if x.count(',') > 1:
# Pulls out form feeds, I'm importing from a very old Filemaker DB so this helps clean it up
c = x.replace('\x0b', '')
# Pulls out spaces, If you have unneeded dashes you can add this: .replace('-','')
asset_tag = c.split(',')[1].replace(' ', '')
# Pulls out spaces, shouldn't be any there
serial_number = c.split(',')[0].replace(' ', '')
device_name = c.split(',')[2]
db.execute('insert into devices (asset_tag, serial_number, device_name) values (?, ?, ?)',
[asset_tag, serial_number, device_name])
db.commit()
flash('Imported %s devices from: %s' %
(len(raw_file.split('\r\n')) - 1, request.files['file'].filename))
return redirect(url_for('get_devices'))
return render_template('upload.html')
# Move to post in the future?
@app.route('/delete_device', methods=['POST', 'GET'])
def delete_device():
'''Deletes a device passed as an arg'''
if not session.get('logged_in'):
return redirect(url_for('login'))
device_id = request.args.get('id')
db = get_db()
db.execute('delete from devices where id = %s' % device_id)
db.commit()
flash('{} Deleted from the local database.'.format(
request.args.get('serial_number')))
return redirect(url_for('get_devices'))
# Move to post in the future?
@app.route('/add_device', methods=['POST', 'GET'])
def add_device():
'''Add a single device'''
#device_id = request.args.get('id')
if not session.get('logged_in'):
return redirect(url_for('login'))
if request.method == 'POST':
db = get_db()
db.execute('insert into devices (asset_tag, serial_number, device_name) values (?, ?, ?)',
[request.form['asset_tag'], request.form['serial_number'], request.form['device_name']])
db.commit()
flash('Device Added')
return redirect(url_for('get_devices'))
return render_template('add_device.html')
@app.route('/help')
def documentation():
"""Returns page for documentation"""
return render_template('help.html')
| initdb_command | identifier_name |
assethook.py | """Assethook: A flask application to listen for webhook calls to send
computer names and asset tags to the Jamf Pro Server.
"""
import logging
from logging.handlers import RotatingFileHandler
import os
import sqlite3
import time
import requests
from flask import (Flask, flash, g, redirect, render_template,
request, session, url_for)
# Next steps
# Implement username/password management like Flask-User
# Auto create the webhook in the jss
# CSV Handling for field mapping
# Don't allow duplicate entries, or ask to overwrite
# Delete all
# Edit row, reuse add, fill form, then update with new values
# Search for records
# Sort table (javascript?)
# Submit all without timestamp?
app = Flask(__name__) # create the application instance :)
app.config.from_object(__name__) # load config from this file
# Load default config and override config from an environment variable
app.config.update(dict(
DATABASE=os.path.join(app.root_path, 'assethook.db'),
SECRET_KEY='development key',
USERNAME='admin',
PASSWORD='default',
DEBUG=False,
))
app.config.from_envvar('FLASKR_SETTINGS', silent=True)
# Logging
handler = RotatingFileHandler('assethook.log', maxBytes=10000, backupCount=1)
handler.setLevel(logging.INFO)
app.logger.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
app.logger.addHandler(handler)
# Database methods
def connect_db():
"""Connects to the specific database.
"""
rv = sqlite3.connect(app.config['DATABASE'])
rv.row_factory = sqlite3.Row
return rv
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = connect_db()
return g.sqlite_db
def init_db():
|
@app.cli.command('initdb') # if you want to do it from the command line
def initdb_command():
"""Initializes the database."""
init_db()
print('Initialized the database.')
def load_settings():
'''Loads settings from the database, if the table is empty, it will be initalized
'''
db = get_db()
try:
cur = db.execute('select setting_name, setting_value from settings')
settings = cur.fetchall()
except sqlite3.OperationalError:
init_db()
return redirect(url_for('landing'))
if not settings:
init_settings()
cur = db.execute('select setting_name, setting_value from settings')
settings = cur.fetchall()
g.settings = settings
return
def init_settings():
'''Initialize the settings table'''
db = get_db()
v = [('jsshost', ''), ('jss_path', ''), ('jss_port', ''),
('jss_username', ''), ('jss_password', ''), ('set_name', '')]
db.executemany(
"""insert into settings ('setting_name', 'setting_value') values (?,?)""", v)
db.commit()
def write_settings(_request):
'''Write settings to the database'''
db = get_db()
# Add https if not given
if 'http' not in _request.form['jsshost']:
jsshost = 'https://%s' % _request.form['jsshost']
else:
jsshost = _request.form['jsshost']
v = [(jsshost, 'jsshost'),
(_request.form['jss_path'], 'jss_path',),
(_request.form['jss_port'], 'jss_port'),
(_request.form['jss_username'], 'jss_username'),
(_request.form['jss_password'], 'jss_password'),
(_request.form['set_name'], 'set_name')]
db.executemany(
"""update settings set setting_value = ? where setting_name = ?""", v)
db.commit()
# App routes
@app.route("/log")
def logTest():
app.logger.warning('testing warning log')
app.logger.error('testing error log')
app.logger.info('testing info log')
return "Code Handbook !! Log testing."
@app.route('/')
def landing():
'''Check to see if JSS Settings, database, etc are stored in the DB'''
load_settings()
for i in g.settings:
if i[0] == 'jss_username':
if i[1] == '':
flash('Please enter the required information')
session['logged_in'] = True
return redirect(url_for('settings_page'))
return redirect(url_for('get_devices'))
@app.route('/settings', methods=['GET', 'POST'])
def settings_page():
'''Displays settings and allows them to be modified'''
if not session.get('logged_in'):
return redirect(url_for('login'))
if request.method == 'GET':
load_settings()
return render_template('settings.html', rows=g.settings)
if request.method == 'POST':
write_settings(request)
flash('Settings saved')
return redirect(url_for('get_devices'))
return redirect(url_for('login'))
@app.route('/login', methods=['GET', 'POST'])
def login():
'''Login page for now'''
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('get_devices'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
"""Logs out current user"""
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('get_devices'))
@app.route('/devices', methods=['GET'])
def get_devices(error=None):
'''Shows all devices in the database'''
if not session.get('logged_in'):
return redirect(url_for('login'))
db = get_db()
cur = db.execute(
'select id, asset_tag, serial_number, device_name, dt_sub_to_jss from devices order by id desc')
devices = cur.fetchall()
return render_template('assets.html', rows=devices, error=error)
@app.route('/submit_inventory')
def submit_to_jss(serial_number=None, device_type=None):
'''If this is called from a webhook, the serial_number and type are set when called
If not, it is being called manually from the devices page and type will be
determined by trying the JSS with GET
'''
load_settings()
settings_dict = dict((x, y) for x, y in g.settings)
if serial_number is None:
serial_number = request.args.get('serial_number')
if serial_number is None:
flash('Serial Number not passed')
return redirect(url_for('get_devices'))
db = get_db()
cur = db.execute(
'select asset_tag, device_name from devices where serial_number = \'%s\'' % serial_number)
device_info = cur.fetchone()
if device_info is None:
return 400
if device_type == 'Computer':
device_type_xml = 'computer'
device_type_url = 'computers'
if device_type == 'MobileDevice':
device_type_xml = 'mobile_device'
device_type_url = 'mobiledevices'
if device_type is None:
url = settings_dict['jsshost'] + ':' + settings_dict['jss_port'] + settings_dict['jss_path'] + \
'/JSSResource/mobiledevices/serialnumber/' + serial_number
r = requests.get(url,
auth=(settings_dict['jss_username'],
settings_dict['jss_password']))
if r.status_code == 200:
device_type_xml = 'mobile_device'
device_type_url = 'mobiledevices'
device_type = 'MobileDevice'
if device_type is None:
url = settings_dict['jsshost'] + ':' + settings_dict['jss_port'] + settings_dict['jss_path'] + \
'/JSSResource/computers/serialnumber/' + serial_number
r = requests.get(url,
auth=(settings_dict['jss_username'],
settings_dict['jss_password']))
if r.status_code == 200:
device_type_xml = 'computer'
device_type_url = 'computers'
device_type = 'Computer'
if device_type is None:
flash('Could not determine device type')
app.logger.warning('Could not determine device type')
return redirect(url_for('get_devices'))
# Check to see if there is a device name and if the device name setting is True
if settings_dict['set_name'] == 'True' and device_info['device_name'] != '':
body = "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>" \
"<%s><general><name>%s</name><asset_tag>%s</asset_tag></general>" \
"</%s>" % (device_type_xml,
device_info['device_name'], device_info['asset_tag'], device_type_xml)
else:
body = "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>" \
"<%s><general><asset_tag>%s</asset_tag></general>" \
"</%s>" % (device_type_xml,
device_info['asset_tag'], device_type_xml)
try:
r = requests.put(settings_dict['jsshost'] + ':' + settings_dict['jss_port'] + settings_dict['jss_path'] +
'/JSSResource/%s/serialnumber/' % device_type_url + serial_number,
auth=(settings_dict['jss_username'], settings_dict['jss_password']), data=body)
if r.status_code == 409:
# A 409 error can indicate that the device record has no name. This happens when the webhook is issued
# and this program submits only an asset tag. The JSS responds that a name is reqiured. Delaying
# and trying again seems to work fine.
time.sleep(10)
url = settings_dict['jsshost'] + ':' + settings_dict['jss_port'] + settings_dict['jss_path'] + \
'/JSSResource/%s/serialnumber/' % device_type_url + serial_number
r = requests.put(url,
auth=(settings_dict['jss_username'],
settings_dict['jss_password']),
data=body)
except requests.exceptions.RequestException as e:
app.logger.error('Error submitting to JSS - %s' % e)
error = 'Command failed - Please see the logs for more information...'
return render_template('base.html', error=error)
db = get_db()
cur = db.execute(
'update devices set dt_sub_to_jss = CURRENT_TIMESTAMP where serial_number = ?', [serial_number])
db.commit()
if r.status_code == 201:
flash('{} Updated'.format(type))
return redirect(url_for('get_devices'))
flash('Connection made but device not updated. %s' % r.status_code)
return redirect(url_for('get_devices'))
@app.route('/submit_all', methods=['GET'])
def submit_all_devices(error=None):
'''Submit inventory for all devices in the database'''
if not session.get('logged_in'):
return redirect(url_for('login'))
db = get_db()
cur = db.execute(
'select id, asset_tag, serial_number, device_name, dt_sub_to_jss from devices order by id desc')
devices = cur.fetchall()
for device in devices:
print(device['serial_number'])
submit_to_jss(serial_number=device['serial_number'])
return redirect(url_for('get_devices'))
@app.route('/webhook', methods=['POST'])
def mobile_device_enrolled():
''' This is what the webhook will call'''
device = request.get_json()
print(device)
if not device:
return '', 400
if device['webhook']['webhookEvent'].startswith('Computer'):
device_type = 'Computer'
elif device['webhook']['webhookEvent'].startswith('Mobile'):
device_type = 'MobileDevice'
else:
return 'Invalid Webhook format', 403
submit_to_jss(serial_number=device['event'][u'serialNumber'], device_type=device_type)
return '', 200
@app.route('/upload_file', methods=['GET', 'POST'])
def upload_file():
'''Upload a csv file and import into the database'''
if not session.get('logged_in'):
return redirect(url_for('login'))
if request.method == 'POST':
f = request.files['file']
if f.filename == '':
flash('No selected file')
return redirect(request.url)
db = get_db()
raw_file = f.read()
contents = iter(raw_file.split('\r\n'))
next(contents) # Skips the header
for x in contents:
if x.count(',') > 1:
# Pulls out form feeds, I'm importing from a very old Filemaker DB so this helps clean it up
c = x.replace('\x0b', '')
# Pulls out spaces, If you have unneeded dashes you can add this: .replace('-','')
asset_tag = c.split(',')[1].replace(' ', '')
# Pulls out spaces, shouldn't be any there
serial_number = c.split(',')[0].replace(' ', '')
device_name = c.split(',')[2]
db.execute('insert into devices (asset_tag, serial_number, device_name) values (?, ?, ?)',
[asset_tag, serial_number, device_name])
db.commit()
flash('Imported %s devices from: %s' %
(len(raw_file.split('\r\n')) - 1, request.files['file'].filename))
return redirect(url_for('get_devices'))
return render_template('upload.html')
# Move to post in the future?
@app.route('/delete_device', methods=['POST', 'GET'])
def delete_device():
'''Deletes a device passed as an arg'''
if not session.get('logged_in'):
return redirect(url_for('login'))
device_id = request.args.get('id')
db = get_db()
db.execute('delete from devices where id = %s' % device_id)
db.commit()
flash('{} Deleted from the local database.'.format(
request.args.get('serial_number')))
return redirect(url_for('get_devices'))
# Move to post in the future?
@app.route('/add_device', methods=['POST', 'GET'])
def add_device():
'''Add a single device'''
#device_id = request.args.get('id')
if not session.get('logged_in'):
return redirect(url_for('login'))
if request.method == 'POST':
db = get_db()
db.execute('insert into devices (asset_tag, serial_number, device_name) values (?, ?, ?)',
[request.form['asset_tag'], request.form['serial_number'], request.form['device_name']])
db.commit()
flash('Device Added')
return redirect(url_for('get_devices'))
return render_template('add_device.html')
@app.route('/help')
def documentation():
"""Returns page for documentation"""
return render_template('help.html')
| """Initialized the db file"""
db = get_db()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit() | identifier_body |
assethook.py | """Assethook: A flask application to listen for webhook calls to send
computer names and asset tags to the Jamf Pro Server.
"""
import logging
from logging.handlers import RotatingFileHandler
import os
import sqlite3
import time
import requests
from flask import (Flask, flash, g, redirect, render_template,
request, session, url_for)
# Next steps
# Implement username/password management like Flask-User
# Auto create the webhook in the jss
# CSV Handling for field mapping
# Don't allow duplicate entries, or ask to overwrite
# Delete all
# Edit row, reuse add, fill form, then update with new values
# Search for records
# Sort table (javascript?)
# Submit all without timestamp?
app = Flask(__name__) # create the application instance :)
app.config.from_object(__name__) # load config from this file
# Load default config and override config from an environment variable
app.config.update(dict(
DATABASE=os.path.join(app.root_path, 'assethook.db'),
SECRET_KEY='development key',
USERNAME='admin',
PASSWORD='default',
DEBUG=False,
))
app.config.from_envvar('FLASKR_SETTINGS', silent=True)
# Logging
handler = RotatingFileHandler('assethook.log', maxBytes=10000, backupCount=1)
handler.setLevel(logging.INFO)
app.logger.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
app.logger.addHandler(handler)
# Database methods
def connect_db():
"""Connects to the specific database.
"""
rv = sqlite3.connect(app.config['DATABASE'])
rv.row_factory = sqlite3.Row
return rv
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = connect_db()
return g.sqlite_db
def init_db():
"""Initialized the db file"""
db = get_db()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.cli.command('initdb') # if you want to do it from the command line
def initdb_command():
"""Initializes the database."""
init_db()
print('Initialized the database.')
def load_settings():
'''Loads settings from the database, if the table is empty, it will be initalized
'''
db = get_db()
try:
cur = db.execute('select setting_name, setting_value from settings')
settings = cur.fetchall()
except sqlite3.OperationalError:
init_db()
return redirect(url_for('landing'))
if not settings:
init_settings()
cur = db.execute('select setting_name, setting_value from settings')
settings = cur.fetchall()
g.settings = settings
return
def init_settings():
'''Initialize the settings table'''
db = get_db()
v = [('jsshost', ''), ('jss_path', ''), ('jss_port', ''),
('jss_username', ''), ('jss_password', ''), ('set_name', '')]
db.executemany(
"""insert into settings ('setting_name', 'setting_value') values (?,?)""", v)
db.commit()
def write_settings(_request):
'''Write settings to the database'''
db = get_db()
# Add https if not given
if 'http' not in _request.form['jsshost']:
jsshost = 'https://%s' % _request.form['jsshost']
else:
jsshost = _request.form['jsshost']
v = [(jsshost, 'jsshost'),
(_request.form['jss_path'], 'jss_path',),
(_request.form['jss_port'], 'jss_port'),
(_request.form['jss_username'], 'jss_username'),
(_request.form['jss_password'], 'jss_password'),
(_request.form['set_name'], 'set_name')]
db.executemany(
"""update settings set setting_value = ? where setting_name = ?""", v)
db.commit()
# App routes
@app.route("/log")
def logTest():
app.logger.warning('testing warning log')
app.logger.error('testing error log')
app.logger.info('testing info log')
return "Code Handbook !! Log testing."
@app.route('/')
def landing():
'''Check to see if JSS Settings, database, etc are stored in the DB'''
load_settings()
for i in g.settings:
if i[0] == 'jss_username':
if i[1] == '':
flash('Please enter the required information')
session['logged_in'] = True
return redirect(url_for('settings_page'))
return redirect(url_for('get_devices'))
@app.route('/settings', methods=['GET', 'POST'])
def settings_page():
'''Displays settings and allows them to be modified'''
if not session.get('logged_in'):
return redirect(url_for('login'))
if request.method == 'GET':
load_settings()
return render_template('settings.html', rows=g.settings)
if request.method == 'POST':
write_settings(request)
flash('Settings saved')
return redirect(url_for('get_devices'))
return redirect(url_for('login'))
@app.route('/login', methods=['GET', 'POST'])
def login():
'''Login page for now'''
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('get_devices'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
"""Logs out current user"""
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('get_devices'))
@app.route('/devices', methods=['GET'])
def get_devices(error=None):
'''Shows all devices in the database'''
if not session.get('logged_in'):
return redirect(url_for('login'))
db = get_db()
cur = db.execute(
'select id, asset_tag, serial_number, device_name, dt_sub_to_jss from devices order by id desc')
devices = cur.fetchall()
return render_template('assets.html', rows=devices, error=error)
@app.route('/submit_inventory')
def submit_to_jss(serial_number=None, device_type=None):
'''If this is called from a webhook, the serial_number and type are set when called
If not, it is being called manually from the devices page and type will be
determined by trying the JSS with GET
'''
load_settings()
settings_dict = dict((x, y) for x, y in g.settings)
if serial_number is None:
serial_number = request.args.get('serial_number')
if serial_number is None:
flash('Serial Number not passed')
return redirect(url_for('get_devices'))
db = get_db()
cur = db.execute(
'select asset_tag, device_name from devices where serial_number = \'%s\'' % serial_number)
device_info = cur.fetchone()
if device_info is None:
return 400
if device_type == 'Computer':
device_type_xml = 'computer'
device_type_url = 'computers'
if device_type == 'MobileDevice':
device_type_xml = 'mobile_device'
device_type_url = 'mobiledevices'
if device_type is None:
url = settings_dict['jsshost'] + ':' + settings_dict['jss_port'] + settings_dict['jss_path'] + \
'/JSSResource/mobiledevices/serialnumber/' + serial_number
r = requests.get(url,
auth=(settings_dict['jss_username'],
settings_dict['jss_password']))
if r.status_code == 200:
device_type_xml = 'mobile_device'
device_type_url = 'mobiledevices'
device_type = 'MobileDevice'
if device_type is None:
url = settings_dict['jsshost'] + ':' + settings_dict['jss_port'] + settings_dict['jss_path'] + \
'/JSSResource/computers/serialnumber/' + serial_number
r = requests.get(url,
auth=(settings_dict['jss_username'],
settings_dict['jss_password']))
if r.status_code == 200:
device_type_xml = 'computer'
device_type_url = 'computers'
device_type = 'Computer'
if device_type is None:
flash('Could not determine device type')
app.logger.warning('Could not determine device type')
return redirect(url_for('get_devices'))
# Check to see if there is a device name and if the device name setting is True
if settings_dict['set_name'] == 'True' and device_info['device_name'] != '':
body = "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>" \
"<%s><general><name>%s</name><asset_tag>%s</asset_tag></general>" \
"</%s>" % (device_type_xml,
device_info['device_name'], device_info['asset_tag'], device_type_xml)
else:
body = "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>" \
"<%s><general><asset_tag>%s</asset_tag></general>" \
"</%s>" % (device_type_xml,
device_info['asset_tag'], device_type_xml)
try:
r = requests.put(settings_dict['jsshost'] + ':' + settings_dict['jss_port'] + settings_dict['jss_path'] +
'/JSSResource/%s/serialnumber/' % device_type_url + serial_number,
auth=(settings_dict['jss_username'], settings_dict['jss_password']), data=body)
if r.status_code == 409:
# A 409 error can indicate that the device record has no name. This happens when the webhook is issued
# and this program submits only an asset tag. The JSS responds that a name is reqiured. Delaying
# and trying again seems to work fine.
time.sleep(10)
url = settings_dict['jsshost'] + ':' + settings_dict['jss_port'] + settings_dict['jss_path'] + \
'/JSSResource/%s/serialnumber/' % device_type_url + serial_number
r = requests.put(url,
auth=(settings_dict['jss_username'],
settings_dict['jss_password']),
data=body)
except requests.exceptions.RequestException as e:
app.logger.error('Error submitting to JSS - %s' % e)
error = 'Command failed - Please see the logs for more information...'
return render_template('base.html', error=error)
db = get_db()
cur = db.execute(
'update devices set dt_sub_to_jss = CURRENT_TIMESTAMP where serial_number = ?', [serial_number])
db.commit()
if r.status_code == 201:
flash('{} Updated'.format(type))
return redirect(url_for('get_devices'))
flash('Connection made but device not updated. %s' % r.status_code)
return redirect(url_for('get_devices'))
@app.route('/submit_all', methods=['GET'])
def submit_all_devices(error=None):
'''Submit inventory for all devices in the database'''
if not session.get('logged_in'):
return redirect(url_for('login'))
db = get_db()
cur = db.execute(
'select id, asset_tag, serial_number, device_name, dt_sub_to_jss from devices order by id desc')
devices = cur.fetchall()
for device in devices:
print(device['serial_number'])
submit_to_jss(serial_number=device['serial_number'])
return redirect(url_for('get_devices'))
@app.route('/webhook', methods=['POST'])
def mobile_device_enrolled():
''' This is what the webhook will call'''
device = request.get_json()
print(device)
if not device:
return '', 400
if device['webhook']['webhookEvent'].startswith('Computer'):
device_type = 'Computer'
elif device['webhook']['webhookEvent'].startswith('Mobile'):
|
else:
return 'Invalid Webhook format', 403
submit_to_jss(serial_number=device['event'][u'serialNumber'], device_type=device_type)
return '', 200
@app.route('/upload_file', methods=['GET', 'POST'])
def upload_file():
'''Upload a csv file and import into the database'''
if not session.get('logged_in'):
return redirect(url_for('login'))
if request.method == 'POST':
f = request.files['file']
if f.filename == '':
flash('No selected file')
return redirect(request.url)
db = get_db()
raw_file = f.read()
contents = iter(raw_file.split('\r\n'))
next(contents) # Skips the header
for x in contents:
if x.count(',') > 1:
# Pulls out form feeds, I'm importing from a very old Filemaker DB so this helps clean it up
c = x.replace('\x0b', '')
# Pulls out spaces, If you have unneeded dashes you can add this: .replace('-','')
asset_tag = c.split(',')[1].replace(' ', '')
# Pulls out spaces, shouldn't be any there
serial_number = c.split(',')[0].replace(' ', '')
device_name = c.split(',')[2]
db.execute('insert into devices (asset_tag, serial_number, device_name) values (?, ?, ?)',
[asset_tag, serial_number, device_name])
db.commit()
flash('Imported %s devices from: %s' %
(len(raw_file.split('\r\n')) - 1, request.files['file'].filename))
return redirect(url_for('get_devices'))
return render_template('upload.html')
# Move to post in the future?
@app.route('/delete_device', methods=['POST', 'GET'])
def delete_device():
'''Deletes a device passed as an arg'''
if not session.get('logged_in'):
return redirect(url_for('login'))
device_id = request.args.get('id')
db = get_db()
db.execute('delete from devices where id = %s' % device_id)
db.commit()
flash('{} Deleted from the local database.'.format(
request.args.get('serial_number')))
return redirect(url_for('get_devices'))
# Move to post in the future?
@app.route('/add_device', methods=['POST', 'GET'])
def add_device():
'''Add a single device'''
#device_id = request.args.get('id')
if not session.get('logged_in'):
return redirect(url_for('login'))
if request.method == 'POST':
db = get_db()
db.execute('insert into devices (asset_tag, serial_number, device_name) values (?, ?, ?)',
[request.form['asset_tag'], request.form['serial_number'], request.form['device_name']])
db.commit()
flash('Device Added')
return redirect(url_for('get_devices'))
return render_template('add_device.html')
@app.route('/help')
def documentation():
"""Returns page for documentation"""
return render_template('help.html')
| device_type = 'MobileDevice' | conditional_block |
lib.rs | #![recursion_limit = "256"]
extern crate proc_macro;
use proc_macro::TokenStream;
use proc_macro2;
use quote::quote;
use regex::Regex;
use std::collections::HashSet;
use syn::{parse_macro_input, DeriveInput};
#[derive(Debug, PartialEq)]
enum RouteToRegexError {
MissingLeadingForwardSlash,
NonAsciiChars,
InvalidIdentifier(String),
InvalidTrailingSlash,
CharactersAfterWildcard,
}
fn route_to_regex(route: &str) -> Result<(String, String), RouteToRegexError> {
enum ParseState {
Initial,
Static,
VarName(String),
WildcardFound,
};
if !route.is_ascii() {
return Err(RouteToRegexError::NonAsciiChars);
}
let ident_regex = Regex::new(r"^[a-zA-Z][a-zA-Z0-9_]*$").unwrap();
let mut regex = "".to_string();
let mut format_str = "".to_string();
let mut parse_state = ParseState::Initial;
for byte in route.chars() {
match parse_state {
ParseState::Initial => {
if byte != '/' {
return Err(RouteToRegexError::MissingLeadingForwardSlash);
}
regex += "^/";
format_str += "/";
parse_state = ParseState::Static;
}
ParseState::Static => {
if byte == ':' {
format_str.push('{');
parse_state = ParseState::VarName("".to_string());
} else {
regex.push(byte);
format_str.push(byte);
parse_state = ParseState::Static;
}
}
ParseState::VarName(mut name) => {
if byte == '/' {
// Validate 'name' as a Rust identifier
if !ident_regex.is_match(&name) {
return Err(RouteToRegexError::InvalidIdentifier(name));
}
regex += &format!("(?P<{}>[^/]+)/", name);
format_str += &format!("{}}}/", name);
parse_state = ParseState::Static;
} else if byte == '*' {
// Found a wildcard - add the var name to the regex
// Validate 'name' as a Rust identifier
if !ident_regex.is_match(&name) {
return Err(RouteToRegexError::InvalidIdentifier(name));
}
regex += &format!("(?P<{}>.*)", name);
format_str += &format!("{}}}", name);
parse_state = ParseState::WildcardFound;
} else {
name.push(byte);
parse_state = ParseState::VarName(name);
}
}
ParseState::WildcardFound => {
return Err(RouteToRegexError::CharactersAfterWildcard);
}
};
}
if let ParseState::VarName(name) = parse_state {
regex += &format!("(?P<{}>[^/]+)", name);
format_str += &format!("{}}}", name);
}
if regex.ends_with('/') {
return Err(RouteToRegexError::InvalidTrailingSlash);
}
regex += "$";
Ok((regex, format_str))
}
#[test]
fn test_route_to_regex() {
let (regex, _) = route_to_regex("/p/:project_id/exams/:exam_id/submissions_expired").unwrap();
assert_eq!(
regex,
r"^/p/(?P<project_id>[^/]+)/exams/(?P<exam_id>[^/]+)/submissions_expired$"
);
}
#[test]
fn test_route_to_regex_no_path_params() {
let (regex, _) = route_to_regex("/p/exams/submissions_expired").unwrap();
assert_eq!(regex, r"^/p/exams/submissions_expired$");
}
#[test]
fn test_route_to_regex_no_leading_slash() {
let regex = route_to_regex("p/exams/submissions_expired");
assert_eq!(regex, Err(RouteToRegexError::MissingLeadingForwardSlash));
}
#[test]
fn test_route_to_regex_non_ascii_chars() {
let regex = route_to_regex("🥖p🥖:project_id🥖exams🥖:exam_id🥖submissions_expired");
assert_eq!(regex, Err(RouteToRegexError::NonAsciiChars));
}
#[test]
fn test_route_to_regex_invalid_ident() {
let regex = | st_route_to_regex_characters_after_wildcard() {
let regex = route_to_regex("/p/:project_id/exams/:exam*ID/submissions_expired");
assert_eq!(
regex,
Err(RouteToRegexError::CharactersAfterWildcard)
);
}
#[test]
fn test_route_to_regex_invalid_ending() {
let regex = route_to_regex("/p/:project_id/exams/:exam_id/submissions_expired/");
assert_eq!(regex, Err(RouteToRegexError::InvalidTrailingSlash));
}
fn get_string_attr(name: &str, attrs: &[syn::Attribute]) -> Option<String> {
for attr in attrs {
let attr = attr.parse_meta();
if let Ok(syn::Meta::List(ref list)) = attr {
if list.ident == name {
for thing in &list.nested {
if let syn::NestedMeta::Literal(syn::Lit::Str(str_lit)) = thing {
return Some(str_lit.value());
}
}
}
}
}
None
}
fn has_flag_attr(name: &str, attrs: &[syn::Attribute]) -> bool {
for attr in attrs {
let attr = attr.parse_meta();
if let Ok(syn::Meta::Word(ref ident)) = attr {
if ident == name {
return true;
}
}
}
false
}
fn get_struct_fields(data: &syn::Data) -> Vec<syn::Field> {
match data {
syn::Data::Struct(data_struct) => match data_struct.fields {
syn::Fields::Named(ref named_fields) => named_fields.named.iter().cloned().collect(),
_ => panic!("Struct fields must be named"),
},
_ => panic!("AppRoute derive is only supported for structs"),
}
}
fn field_is_option(field: &syn::Field) -> bool {
match field.ty {
syn::Type::Path(ref type_path) => type_path
.path
.segments
.iter()
.last()
.map(|segment| segment.ident == "Option")
.unwrap_or(false),
_ => false,
}
}
#[proc_macro_derive(AppRoute, attributes(route, query))]
pub fn app_route_derive(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as DeriveInput);
let struct_fields = get_struct_fields(&input.data);
let (route_fields, query_fields): (Vec<_>, Vec<_>) = struct_fields
.into_iter()
.partition(|f| !has_flag_attr("query", &f.attrs));
let name = &input.ident;
let generics = input.generics;
let (impl_generics, ty_generics, where_clause) = generics.split_for_impl();
let route_string = get_string_attr("route", &input.attrs);
let url_route = route_string.expect(
"derive(AppRoute) requires a #[route(\"/your/route/here\")] attribute on the struct",
);
let (route_regex_str, format_str) =
route_to_regex(&url_route).expect("Could not convert route attribute to a valid regex");
// Validate route_regex and make sure struct and route have matching fields
let route_regex =
Regex::new(&route_regex_str).expect("route attribute was not compiled into a valid regex");
let regex_capture_names_set: HashSet<String> = route_regex
.capture_names()
.filter_map(|c_opt| c_opt.map(|c| c.to_string()))
.collect();
let field_names_set: HashSet<String> = route_fields
.clone()
.into_iter()
.map(|f| f.ident.unwrap().to_string())
.collect();
if regex_capture_names_set != field_names_set {
let missing_from_route = field_names_set.difference(®ex_capture_names_set);
let missing_from_struct = regex_capture_names_set.difference(&field_names_set);
let error_msg = format!("\nFields in struct missing from route pattern: {:?}\nFields in route missing from struct: {:?}", missing_from_route, missing_from_struct);
panic!(error_msg);
}
let route_field_assignments = route_fields.clone().into_iter().map(|f| {
let f_ident = f.ident.unwrap();
let f_ident_str = f_ident.to_string();
quote! {
#f_ident: captures[#f_ident_str].parse().map_err(|e| {
RouteParseErr::ParamParseErr(std::string::ToString::to_string(&e))
})?
}
});
let query_field_assignments = query_fields.clone().into_iter().map(|f| {
let is_option = field_is_option(&f);
let f_ident = f.ident.unwrap();
if is_option {
quote! {
#f_ident: query_string.and_then(|q| qs::from_str(q).ok())
}
} else {
quote! {
#f_ident: qs::from_str(query_string.ok_or(RouteParseErr::NoQueryString)?).map_err(|e| RouteParseErr::QueryParseErr(e.description().to_string()))?
}
}
});
let route_field_parsers = quote! {
#(
#route_field_assignments
),*
};
let query_field_parsers = quote! {
#(
#query_field_assignments
),*
};
let format_args = route_fields.clone().into_iter().map(|f| {
let f_ident = f.ident.unwrap();
quote! {
#f_ident = self.#f_ident
}
});
let format_args = quote! {
#(
#format_args
),*
};
let query_field_to_string_statements = query_fields.into_iter().map(|f| {
let is_option = field_is_option(&f);
let f_ident = f.ident.unwrap();
if is_option {
quote! {
self.#f_ident.as_ref().and_then(|q| qs::to_string(&q).ok())
}
} else {
quote! {
qs::to_string(&self.#f_ident).ok()
}
}
});
let encoded_query_fields = quote! {
#(
#query_field_to_string_statements
),*
};
let struct_constructor = match (
route_field_parsers.is_empty(),
query_field_parsers.is_empty(),
) {
(true, true) => quote! {
#name {}
},
(true, false) => quote! {
#name {
#query_field_parsers
}
},
(false, true) => quote! {
#name {
#route_field_parsers
}
},
(false, false) => quote! {
#name {
#route_field_parsers,
#query_field_parsers
}
},
};
let app_route_impl = quote! {
impl #impl_generics app_route::AppRoute for #name #ty_generics #where_clause {
fn path_pattern() -> String {
#route_regex_str.to_string()
}
fn query_string(&self) -> Option<String> {
use app_route::serde_qs as qs;
// TODO - Remove duplicates because
// there could be multiple fields with
// a #[query] attribute that have common fields
// TODO - can this be done with an on-stack array?
let encoded_queries: Vec<Option<String>> = vec![#encoded_query_fields];
let filtered: Vec<_> = encoded_queries.into_iter().filter_map(std::convert::identity).collect();
if !filtered.is_empty() {
Some(filtered.join("&"))
} else {
None
}
}
}
impl #impl_generics std::fmt::Display for #name #ty_generics #where_clause {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
if let Some(query) = self.query_string() {
let path = format!(
#format_str,
#format_args
);
write!(f, "{}?{}", path, query)
} else {
write!(
f,
#format_str,
#format_args
)
}
}
}
impl #impl_generics std::str::FromStr for #name #ty_generics #where_clause {
type Err = app_route::RouteParseErr;
fn from_str(app_path: &str) -> Result<Self, Self::Err> {
use app_route::serde_qs as qs;
use app_route::RouteParseErr;
app_route::lazy_static! {
static ref ROUTE_REGEX: app_route::Regex = app_route::Regex::new(#route_regex_str).expect("Failed to compile regex");
}
let question_pos = app_path.find('?');
let just_path = &app_path[..(question_pos.unwrap_or_else(|| app_path.len()))];
let captures = (*ROUTE_REGEX).captures(just_path).ok_or(RouteParseErr::NoMatches)?;
let query_string = question_pos.map(|question_pos| {
let mut query_string = &app_path[question_pos..];
if query_string.starts_with('?') {
query_string = &query_string[1..];
}
query_string
});
Ok(#struct_constructor)
}
}
};
let impl_wrapper = syn::Ident::new(
&format!("_IMPL_APPROUTE_FOR_{}", name.to_string()),
proc_macro2::Span::call_site(),
);
let out = quote! {
const #impl_wrapper: () = {
extern crate app_route;
#app_route_impl
};
};
out.into()
}
| route_to_regex("/p/:project_id/exams/:_exam_id/submissions_expired");
assert_eq!(
regex,
Err(RouteToRegexError::InvalidIdentifier("_exam_id".to_string()))
);
}
#[test]
fn te | identifier_body |
lib.rs | #![recursion_limit = "256"]
extern crate proc_macro;
use proc_macro::TokenStream;
use proc_macro2;
use quote::quote;
use regex::Regex;
use std::collections::HashSet;
use syn::{parse_macro_input, DeriveInput};
#[derive(Debug, PartialEq)]
enum RouteToRegexError {
MissingLeadingForwardSlash,
NonAsciiChars,
InvalidIdentifier(String),
InvalidTrailingSlash,
CharactersAfterWildcard,
}
fn route_to_regex(route: &str) -> Result<(String, String), RouteToRegexError> {
enum ParseState {
Initial,
Static,
VarName(String),
WildcardFound,
};
if !route.is_ascii() {
return Err(RouteToRegexError::NonAsciiChars);
}
let ident_regex = Regex::new(r"^[a-zA-Z][a-zA-Z0-9_]*$").unwrap();
let mut regex = "".to_string();
let mut format_str = "".to_string();
let mut parse_state = ParseState::Initial;
for byte in route.chars() {
match parse_state {
ParseState::Initial => {
if byte != '/' {
return Err(RouteToRegexError::MissingLeadingForwardSlash);
}
regex += "^/";
format_str += "/";
parse_state = ParseState::Static;
}
ParseState::Static => {
if byte == ':' {
format_str.push('{');
parse_state = ParseState::VarName("".to_string());
} else {
regex.push(byte);
format_str.push(byte);
parse_state = ParseState::Static;
}
}
ParseState::VarName(mut name) => {
if byte == '/' {
// Validate 'name' as a Rust identifier
if !ident_regex.is_match(&name) {
return Err(RouteToRegexError::InvalidIdentifier(name));
}
regex += &format!("(?P<{}>[^/]+)/", name);
format_str += &format!("{}}}/", name);
parse_state = ParseState::Static;
} else if byte == '*' {
// Found a wildcard - add the var name to the regex
// Validate 'name' as a Rust identifier
if !ident_regex.is_match(&name) {
return Err(RouteToRegexError::InvalidIdentifier(name));
}
regex += &format!("(?P<{}>.*)", name);
format_str += &format!("{}}}", name);
parse_state = ParseState::WildcardFound;
} else {
name.push(byte);
parse_state = ParseState::VarName(name);
}
}
ParseState::WildcardFound => {
return Err(RouteToRegexError::CharactersAfterWildcard);
}
};
}
if let ParseState::VarName(name) = parse_state {
regex += &format!("(?P<{}>[^/]+)", name);
format_str += &format!("{}}}", name);
}
if regex.ends_with('/') {
return Err(RouteToRegexError::InvalidTrailingSlash);
}
regex += "$";
Ok((regex, format_str))
}
#[test]
fn test_route_to_regex() {
let (regex, _) = route_to_regex("/p/:project_id/exams/:exam_id/submissions_expired").unwrap();
assert_eq!(
regex,
r"^/p/(?P<project_id>[^/]+)/exams/(?P<exam_id>[^/]+)/submissions_expired$"
);
}
#[test]
fn test_route_to_regex_no_path_params() {
let (regex, _) = route_to_regex("/p/exams/submissions_expired").unwrap();
assert_eq!(regex, r"^/p/exams/submissions_expired$");
}
#[test]
fn test_route_to_regex_no_leading_slash() {
let regex = route_to_regex("p/exams/submissions_expired");
assert_eq!(regex, Err(RouteToRegexError::MissingLeadingForwardSlash));
}
#[test]
fn test_route_to_regex_non_ascii_chars() {
let regex = route_to_regex("🥖p🥖:project_id🥖exams🥖:exam_id🥖submissions_expired");
assert_eq!(regex, Err(RouteToRegexError::NonAsciiChars));
}
#[test]
fn test_route_to_regex_invalid_ident() {
let regex = route_to_regex("/p/:project_id/exams/:_exam_id/submissions_expired");
assert_eq!(
regex,
Err(RouteToRegexError::InvalidIdentifier("_exam_id".to_string()))
);
}
#[test]
fn test_route_to_regex_characters_after_wildcard() {
let regex = route_to_regex("/p/:project_id/exams/:exam*ID/submissions_expired");
assert_eq!(
regex,
Err(RouteToRegexError::CharactersAfterWildcard)
);
}
#[test]
fn test_route_to_regex_invalid_ending() {
let regex = route_to_regex("/p/:project_id/exams/:exam_id/submissions_expired/");
assert_eq!(regex, Err(RouteToRegexError::InvalidTrailingSlash));
}
fn get_string_attr(name: &str, attrs: &[syn::Attribute]) -> Option<String> {
for attr in attrs {
let attr = attr.parse_meta();
if let Ok(syn::Meta::List(ref list)) = attr {
if list.ident == name {
for thing in &list.nested {
if let syn::NestedMeta::Literal(syn::Lit::Str(str_lit)) = thing {
return Some(str_lit.value());
}
}
}
}
}
None
}
fn has_flag_attr(name: &str, attrs: &[syn::Attribute]) -> bool {
for attr in attrs {
let attr = attr.parse_meta();
if let Ok(syn::Meta::Word(ref ident)) = attr {
if ident == name {
return true;
}
}
}
false
}
fn get_struct_fields(data: &syn::Data) -> Vec<syn::Field> {
match data {
syn::Data::Struct(data_struct) => match data_struct.fields {
syn::Fields::Named(ref named_fields) => named_fields.named.iter().cloned().collect(),
_ => panic!("Struct fields must be named"),
},
_ => panic!("AppRoute derive is only supported for structs"),
}
}
fn field_is_option(field: &syn::Field) -> bool {
match field.ty {
syn::Type::Path(ref type_path) => type_path
.path
.segments
.iter()
.last()
.map(|segment| segment.ident == "Option")
.unwrap_or(false),
_ => false,
}
}
#[proc_macro_derive(AppRoute, attributes(route, query))]
pub fn app_route_derive(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as DeriveInput);
let struct_fields = get_struct_fields(&input.data);
let (route_fields, query_fields): (Vec<_>, Vec<_>) = struct_fields
.into_iter()
.partition(|f| !has_flag_attr("query", &f.attrs));
let name = &input.ident;
let generics = input.generics;
let (impl_generics, ty_generics, where_clause) = generics.split_for_impl();
let route_string = get_string_attr("route", &input.attrs);
let url_route = route_string.expect(
"derive(AppRoute) requires a #[route(\"/your/route/here\")] attribute on the struct",
);
let (route_regex_str, format_str) =
route_to_regex(&url_route).expect("Could not convert route attribute to a valid regex");
// Validate route_regex and make sure struct and route have matching fields
let route_regex =
Regex::new(&route_regex_str).expect("route attribute was not compiled into a valid regex");
let regex_capture_names_set: HashSet<String> = route_regex
.capture_names()
.filter_map(|c_opt| c_opt.map(|c| c.to_string()))
.collect();
let field_names_set: HashSet<String> = route_fields
.clone()
.into_iter()
.map(|f| f.ident.unwrap().to_string())
.collect();
if regex_capture_names_set != field_names_set {
let missing_from_route = field_names_set.difference(®ex_capture_names_set);
let missing_from_struct = regex_capture_names_set.difference(&field_names_set);
let error_msg = format!("\nFields in struct missing from route pattern: {:?}\nFields in route missing from struct: {:?}", missing_from_route, missing_from_struct);
panic!(error_msg);
}
let route_field_assignments = route_fields.clone().into_iter().map(|f| {
let f_ident = f.ident.unwrap();
let f_ident_str = f_ident.to_string();
quote! {
#f_ident: captures[#f_ident_str].parse().map_err(|e| {
RouteParseErr::ParamParseErr(std::string::ToString::to_string(&e))
})?
}
});
let query_field_assignments = query_fields.clone().into_iter().map(|f| {
let is_option = field_is_option(&f);
let f_ident = f.ident.unwrap();
if is_option {
quote! {
#f_ident: query_string.and_then(|q| qs::from_str(q).ok())
}
} else {
quote! {
#f_ident: qs::from_str(query_string.ok_or(RouteParseErr::NoQueryString)?).map_err(|e| RouteParseErr::QueryParseErr(e.description().to_string()))?
}
}
});
let route_field_parsers = quote! {
#(
#route_field_assignments
),*
};
let query_field_parsers = quote! {
#(
#query_field_assignments
),*
};
let format_args = route_fields.clone().into_iter().map(|f| {
let f_ident = f.ident.unwrap();
quote! {
#f_ident = self.#f_ident
}
});
let format_args = quote! {
#(
#format_args
),*
};
let query_field_to_string_statements = query_fields.into_iter().map(|f| {
let is_option = field_is_option(&f);
let f_ident = f.ident.unwrap();
if is_option {
quote! {
self.#f_ident.as_ref().and_then(|q| qs::to_string(&q).ok())
}
} else {
quote! {
qs::to_string(&self.#f_ident).ok()
}
}
});
let encoded_query_fields = quote! {
#(
#query_field_to_string_statements
),*
};
let struct_constructor = match (
route_field_parsers.is_empty(),
query_field_parsers.is_empty(),
) {
(true, true) => quote! {
#name {}
},
(true, false) => quote! {
#name {
#query_field_parsers
}
},
(false, true) => quote! {
#name {
#route_field_parsers
}
},
(false, false) => quote! {
#name {
#route_field_parsers,
#query_field_parsers
}
},
};
let app_route_impl = quote! {
impl #impl_generics app_route::AppRoute for #name #ty_generics #where_clause {
fn path_pattern() -> String {
#route_regex_str.to_string()
}
fn query_string(&self) -> Option<String> {
use app_route::serde_qs as qs;
// TODO - Remove duplicates because
// there could be multiple fields with
// a #[query] attribute that have common fields
// TODO - can this be done with an on-stack array?
let encoded_queries: Vec<Option<String>> = vec![#encoded_query_fields];
let filtered: Vec<_> = encoded_queries.into_iter().filter_map(std::convert::identity).collect();
if !filtered.is_empty() {
Some(filtered.join("&"))
} else {
None
}
}
}
impl #impl_generics std::fmt::Display for #name #ty_generics #where_clause {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
if let Some(query) = self.query_string() {
let path = format!(
#format_str,
#format_args
);
write!(f, "{}?{}", path, query)
} else {
write!(
f,
#format_str,
#format_args
)
}
}
}
impl #impl_generics std::str::FromStr for #name #ty_generics #where_clause {
type Err = app_route::RouteParseErr;
fn from_str(app_path: &str) -> Result<Self, Self::Err> {
use app_route::serde_qs as qs;
use app_route::RouteParseErr;
app_route::lazy_static! {
static ref ROUTE_REGEX: app_route::Regex = app_route::Regex::new(#route_regex_str).expect("Failed to compile regex");
}
let question_pos = app_path.find('?');
let just_path = &app_path[..(question_pos.unwrap_or_else(|| app_path.len()))];
let captures = (*ROUTE_REGEX).captures(just_path).ok_or(RouteParseErr::NoMatches)?;
let query_string = question_pos.map(|question_pos| {
let mut query_string = &app_path[question_pos..]; | }
query_string
});
Ok(#struct_constructor)
}
}
};
let impl_wrapper = syn::Ident::new(
&format!("_IMPL_APPROUTE_FOR_{}", name.to_string()),
proc_macro2::Span::call_site(),
);
let out = quote! {
const #impl_wrapper: () = {
extern crate app_route;
#app_route_impl
};
};
out.into()
} |
if query_string.starts_with('?') {
query_string = &query_string[1..]; | random_line_split |
lib.rs | #![recursion_limit = "256"]
extern crate proc_macro;
use proc_macro::TokenStream;
use proc_macro2;
use quote::quote;
use regex::Regex;
use std::collections::HashSet;
use syn::{parse_macro_input, DeriveInput};
#[derive(Debug, PartialEq)]
enum RouteToRegexError {
MissingLeadingForwardSlash,
NonAsciiChars,
InvalidIdentifier(String),
InvalidTrailingSlash,
CharactersAfterWildcard,
}
fn route_to_regex(route: &str) -> Result<(String, String), RouteToRegexError> {
enum ParseState {
Initial,
Static,
VarName(String),
WildcardFound,
};
if !route.is_ascii() {
return Err(RouteToRegexError::NonAsciiChars);
}
let ident_regex = Regex::new(r"^[a-zA-Z][a-zA-Z0-9_]*$").unwrap();
let mut regex = "".to_string();
let mut format_str = "".to_string();
let mut parse_state = ParseState::Initial;
for byte in route.chars() {
match parse_state {
ParseState::Initial => {
if byte != '/' {
return Err(RouteToRegexError::MissingLeadingForwardSlash);
}
regex += "^/";
format_str += "/";
parse_state = ParseState::Static;
}
ParseState::Static => {
if byte == ':' {
format_str.push('{');
parse_state = ParseState::VarName("".to_string());
} else {
regex.push(byte);
format_str.push(byte);
parse_state = ParseState::Static;
}
}
ParseState::VarName(mut name) => {
if byte == '/' {
// Validate 'name' as a Rust identifier
if !ident_regex.is_match(&name) {
return Err(RouteToRegexError::InvalidIdentifier(name));
}
regex += &format!("(?P<{}>[^/]+)/", name);
format_str += &format!("{}}}/", name);
parse_state = ParseState::Static;
} else if byte == '*' {
// Found a wildcard - add the var name to the regex
// Validate 'name' as a Rust identifier
if !ident_regex.is_match(&name) {
return Err(RouteToRegexError::InvalidIdentifier(name));
}
regex += &format!("(?P<{}>.*)", name);
format_str += &format!("{}}}", name);
parse_state = ParseState::WildcardFound;
} else {
name.push(byte);
parse_state = ParseState::VarName(name);
}
}
ParseState::WildcardFound => {
return Err(RouteToRegexError::CharactersAfterWildcard);
}
};
}
if let ParseState::VarName(name) = parse_state {
regex += &format!("(?P<{}>[^/]+)", name);
format_str += &format!("{}}}", name);
}
if regex.ends_with('/') {
return Err(RouteToRegexError::InvalidTrailingSlash);
}
regex += "$";
Ok((regex, format_str))
}
#[test]
fn test_route_to_regex() {
let (regex, _) = route_to_regex("/p/:project_id/exams/:exam_id/submissions_expired").unwrap();
assert_eq!(
regex,
r"^/p/(?P<project_id>[^/]+)/exams/(?P<exam_id>[^/]+)/submissions_expired$"
);
}
#[test]
fn test_route_to_regex_no_path_params() {
let (regex, _) = route_to_regex("/p/exams/submissions_expired").unwrap();
assert_eq!(regex, r"^/p/exams/submissions_expired$");
}
#[test]
fn test_route_to_regex_no_leading_slash() {
let regex = route_to_regex("p/exams/submissions_expired");
assert_eq!(regex, Err(RouteToRegexError::MissingLeadingForwardSlash));
}
#[test]
fn test_route_to_regex_non_ascii_chars() {
let regex = route_to_regex("🥖p🥖:project_id🥖exams🥖:exam_id🥖submissions_expired");
assert_eq!(regex, Err(RouteToRegexError::NonAsciiChars));
}
#[test]
fn test_route_to_regex_invalid_ident() {
let regex = route_to_regex("/p/:project_id/exams/:_exam_id/submissions_expired");
assert_eq!(
regex,
Err(RouteToRegexError::InvalidIdentifier("_exam_id".to_string()))
);
}
#[test]
fn test_route_to_regex_characters_after_wildcard() {
let regex = route_to_regex("/p/:project_id/exams/:exam*ID/submissions_expired");
assert_eq!(
regex,
Err(RouteToRegexError::CharactersAfterWildcard)
);
}
#[test]
fn test_route_to_regex_invalid_ending() {
let regex = route_to_regex("/p/:project_id/exams/:exam_id/submissions_expired/");
assert_eq!(regex, Err(RouteToRegexError::InvalidTrailingSlash));
}
fn get_string_attr(name: &str, attrs: &[syn::Attribute]) -> Option<String> {
for attr in attrs {
let attr = attr.parse_meta();
if let Ok(syn::Meta::List(ref list)) = attr {
if list.ident == name {
for thing in &list.nested {
if let syn::NestedMeta::Literal(syn::Lit::Str(str_lit)) = thing {
return Some(str_lit.value());
}
}
}
}
}
None
}
fn has_flag_attr(name: &str, attrs: &[syn::Attribute]) -> bool {
for attr in attrs {
let attr = attr.parse_meta();
if let Ok(syn::Meta::Word(ref ident)) = attr {
if ident == name {
return true;
}
}
}
false
}
fn get_struct_fiel | ta) -> Vec<syn::Field> {
match data {
syn::Data::Struct(data_struct) => match data_struct.fields {
syn::Fields::Named(ref named_fields) => named_fields.named.iter().cloned().collect(),
_ => panic!("Struct fields must be named"),
},
_ => panic!("AppRoute derive is only supported for structs"),
}
}
fn field_is_option(field: &syn::Field) -> bool {
match field.ty {
syn::Type::Path(ref type_path) => type_path
.path
.segments
.iter()
.last()
.map(|segment| segment.ident == "Option")
.unwrap_or(false),
_ => false,
}
}
#[proc_macro_derive(AppRoute, attributes(route, query))]
pub fn app_route_derive(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as DeriveInput);
let struct_fields = get_struct_fields(&input.data);
let (route_fields, query_fields): (Vec<_>, Vec<_>) = struct_fields
.into_iter()
.partition(|f| !has_flag_attr("query", &f.attrs));
let name = &input.ident;
let generics = input.generics;
let (impl_generics, ty_generics, where_clause) = generics.split_for_impl();
let route_string = get_string_attr("route", &input.attrs);
let url_route = route_string.expect(
"derive(AppRoute) requires a #[route(\"/your/route/here\")] attribute on the struct",
);
let (route_regex_str, format_str) =
route_to_regex(&url_route).expect("Could not convert route attribute to a valid regex");
// Validate route_regex and make sure struct and route have matching fields
let route_regex =
Regex::new(&route_regex_str).expect("route attribute was not compiled into a valid regex");
let regex_capture_names_set: HashSet<String> = route_regex
.capture_names()
.filter_map(|c_opt| c_opt.map(|c| c.to_string()))
.collect();
let field_names_set: HashSet<String> = route_fields
.clone()
.into_iter()
.map(|f| f.ident.unwrap().to_string())
.collect();
if regex_capture_names_set != field_names_set {
let missing_from_route = field_names_set.difference(®ex_capture_names_set);
let missing_from_struct = regex_capture_names_set.difference(&field_names_set);
let error_msg = format!("\nFields in struct missing from route pattern: {:?}\nFields in route missing from struct: {:?}", missing_from_route, missing_from_struct);
panic!(error_msg);
}
let route_field_assignments = route_fields.clone().into_iter().map(|f| {
let f_ident = f.ident.unwrap();
let f_ident_str = f_ident.to_string();
quote! {
#f_ident: captures[#f_ident_str].parse().map_err(|e| {
RouteParseErr::ParamParseErr(std::string::ToString::to_string(&e))
})?
}
});
let query_field_assignments = query_fields.clone().into_iter().map(|f| {
let is_option = field_is_option(&f);
let f_ident = f.ident.unwrap();
if is_option {
quote! {
#f_ident: query_string.and_then(|q| qs::from_str(q).ok())
}
} else {
quote! {
#f_ident: qs::from_str(query_string.ok_or(RouteParseErr::NoQueryString)?).map_err(|e| RouteParseErr::QueryParseErr(e.description().to_string()))?
}
}
});
let route_field_parsers = quote! {
#(
#route_field_assignments
),*
};
let query_field_parsers = quote! {
#(
#query_field_assignments
),*
};
let format_args = route_fields.clone().into_iter().map(|f| {
let f_ident = f.ident.unwrap();
quote! {
#f_ident = self.#f_ident
}
});
let format_args = quote! {
#(
#format_args
),*
};
let query_field_to_string_statements = query_fields.into_iter().map(|f| {
let is_option = field_is_option(&f);
let f_ident = f.ident.unwrap();
if is_option {
quote! {
self.#f_ident.as_ref().and_then(|q| qs::to_string(&q).ok())
}
} else {
quote! {
qs::to_string(&self.#f_ident).ok()
}
}
});
let encoded_query_fields = quote! {
#(
#query_field_to_string_statements
),*
};
let struct_constructor = match (
route_field_parsers.is_empty(),
query_field_parsers.is_empty(),
) {
(true, true) => quote! {
#name {}
},
(true, false) => quote! {
#name {
#query_field_parsers
}
},
(false, true) => quote! {
#name {
#route_field_parsers
}
},
(false, false) => quote! {
#name {
#route_field_parsers,
#query_field_parsers
}
},
};
let app_route_impl = quote! {
impl #impl_generics app_route::AppRoute for #name #ty_generics #where_clause {
fn path_pattern() -> String {
#route_regex_str.to_string()
}
fn query_string(&self) -> Option<String> {
use app_route::serde_qs as qs;
// TODO - Remove duplicates because
// there could be multiple fields with
// a #[query] attribute that have common fields
// TODO - can this be done with an on-stack array?
let encoded_queries: Vec<Option<String>> = vec![#encoded_query_fields];
let filtered: Vec<_> = encoded_queries.into_iter().filter_map(std::convert::identity).collect();
if !filtered.is_empty() {
Some(filtered.join("&"))
} else {
None
}
}
}
impl #impl_generics std::fmt::Display for #name #ty_generics #where_clause {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
if let Some(query) = self.query_string() {
let path = format!(
#format_str,
#format_args
);
write!(f, "{}?{}", path, query)
} else {
write!(
f,
#format_str,
#format_args
)
}
}
}
impl #impl_generics std::str::FromStr for #name #ty_generics #where_clause {
type Err = app_route::RouteParseErr;
fn from_str(app_path: &str) -> Result<Self, Self::Err> {
use app_route::serde_qs as qs;
use app_route::RouteParseErr;
app_route::lazy_static! {
static ref ROUTE_REGEX: app_route::Regex = app_route::Regex::new(#route_regex_str).expect("Failed to compile regex");
}
let question_pos = app_path.find('?');
let just_path = &app_path[..(question_pos.unwrap_or_else(|| app_path.len()))];
let captures = (*ROUTE_REGEX).captures(just_path).ok_or(RouteParseErr::NoMatches)?;
let query_string = question_pos.map(|question_pos| {
let mut query_string = &app_path[question_pos..];
if query_string.starts_with('?') {
query_string = &query_string[1..];
}
query_string
});
Ok(#struct_constructor)
}
}
};
let impl_wrapper = syn::Ident::new(
&format!("_IMPL_APPROUTE_FOR_{}", name.to_string()),
proc_macro2::Span::call_site(),
);
let out = quote! {
const #impl_wrapper: () = {
extern crate app_route;
#app_route_impl
};
};
out.into()
}
| ds(data: &syn::Da | identifier_name |
lib.rs | #![recursion_limit = "256"]
extern crate proc_macro;
use proc_macro::TokenStream;
use proc_macro2;
use quote::quote;
use regex::Regex;
use std::collections::HashSet;
use syn::{parse_macro_input, DeriveInput};
#[derive(Debug, PartialEq)]
enum RouteToRegexError {
MissingLeadingForwardSlash,
NonAsciiChars,
InvalidIdentifier(String),
InvalidTrailingSlash,
CharactersAfterWildcard,
}
fn route_to_regex(route: &str) -> Result<(String, String), RouteToRegexError> {
enum ParseState {
Initial,
Static,
VarName(String),
WildcardFound,
};
if !route.is_ascii() {
return Err(RouteToRegexError::NonAsciiChars);
}
let ident_regex = Regex::new(r"^[a-zA-Z][a-zA-Z0-9_]*$").unwrap();
let mut regex = "".to_string();
let mut format_str = "".to_string();
let mut parse_state = ParseState::Initial;
for byte in route.chars() {
match parse_state {
ParseState::Initial => {
if byte != '/' {
return Err(RouteToRegexError::MissingLeadingForwardSlash);
}
regex += "^/";
format_str += "/";
parse_state = ParseState::Static;
}
ParseState::Static => {
if byte == ':' {
format_str.push('{');
parse_state = ParseState::VarName("".to_string());
} else {
regex.push(byte);
format_str.push(byte);
parse_state = ParseState::Static;
}
}
ParseState::VarName(mut name) => {
if byte == '/' {
// Validate 'name' as a Rust identifier
if !ident_regex.is_match(&name) |
regex += &format!("(?P<{}>[^/]+)/", name);
format_str += &format!("{}}}/", name);
parse_state = ParseState::Static;
} else if byte == '*' {
// Found a wildcard - add the var name to the regex
// Validate 'name' as a Rust identifier
if !ident_regex.is_match(&name) {
return Err(RouteToRegexError::InvalidIdentifier(name));
}
regex += &format!("(?P<{}>.*)", name);
format_str += &format!("{}}}", name);
parse_state = ParseState::WildcardFound;
} else {
name.push(byte);
parse_state = ParseState::VarName(name);
}
}
ParseState::WildcardFound => {
return Err(RouteToRegexError::CharactersAfterWildcard);
}
};
}
if let ParseState::VarName(name) = parse_state {
regex += &format!("(?P<{}>[^/]+)", name);
format_str += &format!("{}}}", name);
}
if regex.ends_with('/') {
return Err(RouteToRegexError::InvalidTrailingSlash);
}
regex += "$";
Ok((regex, format_str))
}
#[test]
fn test_route_to_regex() {
let (regex, _) = route_to_regex("/p/:project_id/exams/:exam_id/submissions_expired").unwrap();
assert_eq!(
regex,
r"^/p/(?P<project_id>[^/]+)/exams/(?P<exam_id>[^/]+)/submissions_expired$"
);
}
#[test]
fn test_route_to_regex_no_path_params() {
let (regex, _) = route_to_regex("/p/exams/submissions_expired").unwrap();
assert_eq!(regex, r"^/p/exams/submissions_expired$");
}
#[test]
fn test_route_to_regex_no_leading_slash() {
let regex = route_to_regex("p/exams/submissions_expired");
assert_eq!(regex, Err(RouteToRegexError::MissingLeadingForwardSlash));
}
#[test]
fn test_route_to_regex_non_ascii_chars() {
let regex = route_to_regex("🥖p🥖:project_id🥖exams🥖:exam_id🥖submissions_expired");
assert_eq!(regex, Err(RouteToRegexError::NonAsciiChars));
}
#[test]
fn test_route_to_regex_invalid_ident() {
let regex = route_to_regex("/p/:project_id/exams/:_exam_id/submissions_expired");
assert_eq!(
regex,
Err(RouteToRegexError::InvalidIdentifier("_exam_id".to_string()))
);
}
#[test]
fn test_route_to_regex_characters_after_wildcard() {
let regex = route_to_regex("/p/:project_id/exams/:exam*ID/submissions_expired");
assert_eq!(
regex,
Err(RouteToRegexError::CharactersAfterWildcard)
);
}
#[test]
fn test_route_to_regex_invalid_ending() {
let regex = route_to_regex("/p/:project_id/exams/:exam_id/submissions_expired/");
assert_eq!(regex, Err(RouteToRegexError::InvalidTrailingSlash));
}
fn get_string_attr(name: &str, attrs: &[syn::Attribute]) -> Option<String> {
for attr in attrs {
let attr = attr.parse_meta();
if let Ok(syn::Meta::List(ref list)) = attr {
if list.ident == name {
for thing in &list.nested {
if let syn::NestedMeta::Literal(syn::Lit::Str(str_lit)) = thing {
return Some(str_lit.value());
}
}
}
}
}
None
}
fn has_flag_attr(name: &str, attrs: &[syn::Attribute]) -> bool {
for attr in attrs {
let attr = attr.parse_meta();
if let Ok(syn::Meta::Word(ref ident)) = attr {
if ident == name {
return true;
}
}
}
false
}
fn get_struct_fields(data: &syn::Data) -> Vec<syn::Field> {
match data {
syn::Data::Struct(data_struct) => match data_struct.fields {
syn::Fields::Named(ref named_fields) => named_fields.named.iter().cloned().collect(),
_ => panic!("Struct fields must be named"),
},
_ => panic!("AppRoute derive is only supported for structs"),
}
}
fn field_is_option(field: &syn::Field) -> bool {
match field.ty {
syn::Type::Path(ref type_path) => type_path
.path
.segments
.iter()
.last()
.map(|segment| segment.ident == "Option")
.unwrap_or(false),
_ => false,
}
}
#[proc_macro_derive(AppRoute, attributes(route, query))]
pub fn app_route_derive(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as DeriveInput);
let struct_fields = get_struct_fields(&input.data);
let (route_fields, query_fields): (Vec<_>, Vec<_>) = struct_fields
.into_iter()
.partition(|f| !has_flag_attr("query", &f.attrs));
let name = &input.ident;
let generics = input.generics;
let (impl_generics, ty_generics, where_clause) = generics.split_for_impl();
let route_string = get_string_attr("route", &input.attrs);
let url_route = route_string.expect(
"derive(AppRoute) requires a #[route(\"/your/route/here\")] attribute on the struct",
);
let (route_regex_str, format_str) =
route_to_regex(&url_route).expect("Could not convert route attribute to a valid regex");
// Validate route_regex and make sure struct and route have matching fields
let route_regex =
Regex::new(&route_regex_str).expect("route attribute was not compiled into a valid regex");
let regex_capture_names_set: HashSet<String> = route_regex
.capture_names()
.filter_map(|c_opt| c_opt.map(|c| c.to_string()))
.collect();
let field_names_set: HashSet<String> = route_fields
.clone()
.into_iter()
.map(|f| f.ident.unwrap().to_string())
.collect();
if regex_capture_names_set != field_names_set {
let missing_from_route = field_names_set.difference(®ex_capture_names_set);
let missing_from_struct = regex_capture_names_set.difference(&field_names_set);
let error_msg = format!("\nFields in struct missing from route pattern: {:?}\nFields in route missing from struct: {:?}", missing_from_route, missing_from_struct);
panic!(error_msg);
}
let route_field_assignments = route_fields.clone().into_iter().map(|f| {
let f_ident = f.ident.unwrap();
let f_ident_str = f_ident.to_string();
quote! {
#f_ident: captures[#f_ident_str].parse().map_err(|e| {
RouteParseErr::ParamParseErr(std::string::ToString::to_string(&e))
})?
}
});
let query_field_assignments = query_fields.clone().into_iter().map(|f| {
let is_option = field_is_option(&f);
let f_ident = f.ident.unwrap();
if is_option {
quote! {
#f_ident: query_string.and_then(|q| qs::from_str(q).ok())
}
} else {
quote! {
#f_ident: qs::from_str(query_string.ok_or(RouteParseErr::NoQueryString)?).map_err(|e| RouteParseErr::QueryParseErr(e.description().to_string()))?
}
}
});
let route_field_parsers = quote! {
#(
#route_field_assignments
),*
};
let query_field_parsers = quote! {
#(
#query_field_assignments
),*
};
let format_args = route_fields.clone().into_iter().map(|f| {
let f_ident = f.ident.unwrap();
quote! {
#f_ident = self.#f_ident
}
});
let format_args = quote! {
#(
#format_args
),*
};
let query_field_to_string_statements = query_fields.into_iter().map(|f| {
let is_option = field_is_option(&f);
let f_ident = f.ident.unwrap();
if is_option {
quote! {
self.#f_ident.as_ref().and_then(|q| qs::to_string(&q).ok())
}
} else {
quote! {
qs::to_string(&self.#f_ident).ok()
}
}
});
let encoded_query_fields = quote! {
#(
#query_field_to_string_statements
),*
};
let struct_constructor = match (
route_field_parsers.is_empty(),
query_field_parsers.is_empty(),
) {
(true, true) => quote! {
#name {}
},
(true, false) => quote! {
#name {
#query_field_parsers
}
},
(false, true) => quote! {
#name {
#route_field_parsers
}
},
(false, false) => quote! {
#name {
#route_field_parsers,
#query_field_parsers
}
},
};
let app_route_impl = quote! {
impl #impl_generics app_route::AppRoute for #name #ty_generics #where_clause {
fn path_pattern() -> String {
#route_regex_str.to_string()
}
fn query_string(&self) -> Option<String> {
use app_route::serde_qs as qs;
// TODO - Remove duplicates because
// there could be multiple fields with
// a #[query] attribute that have common fields
// TODO - can this be done with an on-stack array?
let encoded_queries: Vec<Option<String>> = vec![#encoded_query_fields];
let filtered: Vec<_> = encoded_queries.into_iter().filter_map(std::convert::identity).collect();
if !filtered.is_empty() {
Some(filtered.join("&"))
} else {
None
}
}
}
impl #impl_generics std::fmt::Display for #name #ty_generics #where_clause {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
if let Some(query) = self.query_string() {
let path = format!(
#format_str,
#format_args
);
write!(f, "{}?{}", path, query)
} else {
write!(
f,
#format_str,
#format_args
)
}
}
}
impl #impl_generics std::str::FromStr for #name #ty_generics #where_clause {
type Err = app_route::RouteParseErr;
fn from_str(app_path: &str) -> Result<Self, Self::Err> {
use app_route::serde_qs as qs;
use app_route::RouteParseErr;
app_route::lazy_static! {
static ref ROUTE_REGEX: app_route::Regex = app_route::Regex::new(#route_regex_str).expect("Failed to compile regex");
}
let question_pos = app_path.find('?');
let just_path = &app_path[..(question_pos.unwrap_or_else(|| app_path.len()))];
let captures = (*ROUTE_REGEX).captures(just_path).ok_or(RouteParseErr::NoMatches)?;
let query_string = question_pos.map(|question_pos| {
let mut query_string = &app_path[question_pos..];
if query_string.starts_with('?') {
query_string = &query_string[1..];
}
query_string
});
Ok(#struct_constructor)
}
}
};
let impl_wrapper = syn::Ident::new(
&format!("_IMPL_APPROUTE_FOR_{}", name.to_string()),
proc_macro2::Span::call_site(),
);
let out = quote! {
const #impl_wrapper: () = {
extern crate app_route;
#app_route_impl
};
};
out.into()
}
| {
return Err(RouteToRegexError::InvalidIdentifier(name));
} | conditional_block |
dockeranchor.js | 'use strict';
const Docker = require('node-docker-api').Docker
const fs = require('fs');
const compiler = require('./models/Compiler')
const execenv = require('./models/ExecEnv')
const tar = require('tar')
const path = require('path')
const { promisify } = require('util')
const unlinkAsync = promisify(fs.unlink)
const renameAsync = promisify(fs.rename)
const crypto = require('crypto')
const dockerProto = process.env.RACOONDOCKERPROTO || 'http'
const dockerHost = process.env.RACOONDOCKERHOST || '127.0.0.1'
const dockerPort = process.env.RACOONDOCKERPORT || 2376
const docker = new Docker({ protocol: dockerProto, host: dockerHost, port: dockerPort })
function promisifyStream(stream) {
return new Promise((resolve, reject) => {
stream.on('data', (d) => console.log(d.toString()))
stream.on('end', resolve)
stream.on('error', reject)
})
}
function promisifyStreamNoSpam(stream) {
return new Promise((resolve, reject) => {
stream.on('data', () => { }); //https://nodejs.org/api/stream.html#stream_event_data
stream.on('end', resolve);
stream.on('error', reject);
})
}
const fileExtension = /\.[^/\\\.]*(?=$)/;
const pathToFile = /^.*[/\\]/;
function as | ime) {
return new Promise((resolve) => {
setTimeout(resolve, time);
})
}
const splitEx = /(["'].*?["']|[^"'\s]+)/g
function splitCommands(str) {
return str.match(splitEx).map(
(el) =>
el.replace(/^["']|["']$/g, '')
);
}
async function pingDocker()
{
try{
const info = await docker.info();
return[true, info];
}catch(err)
{
console.error(err);
return [false, err];
}
}
function gccDetect() {
docker.container.create({
Image: 'gcc',
Cmd: ['/bin/bash'],
name: 'test-gcc',
AttachStdout: false,
AttachStderr: false,
tty: true
})
.then((container) => container.start())
.then((container) => {
_container = container
return container.exec.create({
AttachStdout: true,
AttachStderr: true,
Cmd: ['gcc', '--version']
})
})
.then((exec) => {
return exec.start({ detach: false })
})
.then((stream) => promisifyStream(stream))
.then(() => _container.kill())
.then(() => _container.delete({ force: true }))
.then(() => console.log('Test container deleted'))
.catch((error) => console.log(error))
}
/** Compiles inside a container
*
* @param {string} comp Compiler name.
* @param {string} file Complete path to the input file. Can be referenced by ${this.file}
* @param {string} _outfile Output file path. Optional. If not specified outputs with the same name, but with .tar extension.
* @returns {string} Path to binary. On fail throws pair int, string. If int is greater than zero, problem is bad compiler configuration or server error. If it's 0, problem is with the executed program (normal CE)
*/
async function compile(comp, file, _outfile) {
return new Promise(async (resolve, reject) => {
var logs = ''
let container
try {
const fileBasename = path.basename(file)
const fileDirname = path.dirname(file)
const outfile = _outfile || file.replace(fileExtension, '.tar')
const tarfile = `${fileDirname}/${crypto.randomBytes(10).toString('hex')}.tar`;
console.log(`Let's compile! ${fileBasename}`);
if((await pingDocker())[0] == false){
reject([1, 'Cannot reach docker machine']);
return;
}
const compilerInstance = await compiler.findOne({ name: comp });
if (!compilerInstance){
reject ([1, 'Invalid compiler name']);
return;
}
if (compilerInstance.shadow === true) {
resolve(file);
return;
}
await tar.c({
file: tarfile,
cwd: fileDirname
}, [fileBasename])
container = await docker.container.create({
Image: compilerInstance.image_name,
// compilerInstance.exec_command can be template string, it splits with ' '
// '_' is a non-splitting space
// Example of exec_command
// "gcc -lstdc++ -std=c++17 -O2 -o a.out ${this.file}"
// or
// "gcc -lstdc++ -std=c++17 -O2 -o ${this.file+'.out'} ${this.file}"
// (but outputs are never exctracted from tar so they could have the same name)
Cmd: ((template, vars) => {
return new Function('return `' + template + '`;').call(vars)
})(compilerInstance.exec_command, { file: fileBasename }).split(' ').map(el => el.replace(/_/g, ' ')),
// file name could be required to be first argument or appear more than once in compiler commands of some weird languages :D
AttachStdout: false,
AttachStderr: false,
tty: false
})
await container.fs.put(tarfile, { path: '.' })
.then(stream => promisifyStream(stream))
await unlinkAsync(tarfile)
await container.start()
let _unpromStream = await container.logs({
follow: true,
stdout: true,
stderr: true
})
await new Promise((resolve, reject) => {
_unpromStream.on('data', (d) => {
logs = logs.concat(d.toString().substr(8, d.toString().length));
})
_unpromStream.on('end', resolve)
_unpromStream.on('error', reject)
});
await container.wait();
const outTarPath = outfile + '.tmptar'
await container.fs.get({ path: compilerInstance.output_name })
.then(stream => {
const file = fs.createWriteStream(outTarPath)
stream.pipe(file)
return promisifyStreamNoSpam(stream)
})
let compiledFile;
await tar.list({file : outTarPath, onentry: entry => {compiledFile = entry.path}});
await tar.extract({ file: outTarPath, C: fileDirname });
await renameAsync(`${fileDirname}/${compiledFile}`, outfile)
await unlinkAsync(outTarPath)
await container.delete({ force: true })
resolve(outfile);
} catch (err) {
if (typeof container !== 'undefined') await container.delete({ force: true })
if (logs.length) reject([0, logs])
else reject([1, err ])
}
})
}
/** Executes a program inside docker container
*
* @param {string} exname Name of Execution Environment
* @param {string} infile Path to input file, expects a file. Can be referenced by ${this.file}
* @param {string} stdinfile Path to the file to be sent to the container, containing input data. You need to pipe its contents 'manually' e.g. by executing command inside container. Can be referenced by ${this.input}
* @returns {string} Array containing output from running command. 0 is stdout, 1 is stderr. On fail throws pair int, string. If int is greater than zero, problem is bad execenv configuration or server error. If it's 0, problem is with the executed program (it page-faults or exceeds time limits)
*/
async function exec(exname, infile, stdinfile) {
return new Promise(async (resolve, reject) => {
const _execInstance = await execenv.findOne({ name: exname });
if (!_execInstance) {
reject([1, 'Invalid ExecEnv']);
return;
}
let _container
try {
const fileBasename = path.basename(infile);
const fileDirname = path.dirname(infile);
const tarfile = `${fileDirname}/${crypto.randomBytes(10).toString('hex')}.tar`;
const infilename = fileBasename;//infile.replace(pathToFile, '')
let stdininfilename = '';
if(stdinfile)stdininfilename = path.basename(stdinfile);
console.log(`Let's execute! ${fileBasename}`);
if((await pingDocker())[0] == false){
reject([1, 'Cannot reach docker machine']);
return;
}
_container = await docker.container.create({
Image: _execInstance.image_name,
// _execInstance.exec_command can be template string, it splits with '_'
// Example of exec_command:
// "bash -c chmod_+x_a.out_;_./a.out"
// (results in ['bash', '-c', 'chmod +x a.out ; ./a.out'])
// or
// "bash -c chmod_+x_${this.file}_;_./${this.file}"
Cmd: ((template, vars) => {
return new Function('return `' + template + '`;').call(vars)
})(_execInstance.exec_command, { file: infilename, input: stdininfilename }).split(' ').map(el => el.replace(/_/g, ' ')),
Memory: _execInstance.memory,
AttachStdout: false,
AttachStderr: false,
AttachStdin: false,
tty: false,
OpenStdin: false,
//interactive: (stdinfile ? true : false),
})
await tar.c({
file: tarfile,
cwd: fileDirname
}, [fileBasename])
if (stdinfile) {
/*console.log("Redirecting input.");
var [_stdinstream,] = await _container.attach({ stream: true, stderr: true });
var _fstrm = fs.createReadStream(stdinfile);
_fstrm.pipe(_stdinstream) //readable->writable
await promisifyStream(_fstrm);*/
await tar.r({
file: tarfile,
cwd: fileDirname
}, [stdininfilename])
}
var _unpromStream = await _container.fs.put(tarfile, { path: '.' })
await promisifyStreamNoSpam(_unpromStream);
await unlinkAsync(tarfile);
await _container.start();
//await _container.wait();
await asyncWait(_execInstance.time);
const inspection = await _container.status();
if (inspection.data.State.Status !== 'exited') {
await _container.kill();
await _container.delete({ force: true });
reject([0, 'Time Limit Exceeded']);
return;
}
//else await _container.stop();
_unpromStream = await _container.logs({
follow: true,
stdout: true,
stderr: true
})
const logs = new Array('','','')
await new Promise((resolve, reject) => {
_unpromStream.on('data', (d) => {
switch(d.toString().charCodeAt(0)){
case 1: //stdout+(prawie zawsze)stdin
logs[0] = logs[0].concat(d.toString().substr(8, d.toString().length)); //https://docs.docker.com/engine/api/v1.40/#operation/ContainerAttach;
break;
case 2: //stderr
logs[1] = logs[1].concat(d.toString().substr(8, d.toString().length));
break;
default: //stdin (sam)
logs[2] = logs[2].concat(d.toString().substr(8, d.toString().length));
break;
}
})
_unpromStream.on('end', resolve)
_unpromStream.on('error', reject)
})
await _container.delete({ force: true });
//resolve(logs.join().replace(/[^\x20-\x7E]/g, '').trim())
resolve(logs);
return;
} catch (err) {
if (typeof _container !== 'undefined') {
_container.delete({ force: true }).catch((e) => console.error(`Failed to clean up after exec error, it is still alive! ${e}`));
}
reject([1, `Failed at execution attempt: ${err}`]);
return;
}
})
}
/** Executes a program inside docker container, but better
*
* @param {string} exname Name of Execution Environment
* @param {string} infile Path to input file, expects a file. Can be referenced by ${this.file}
* @param {string} stdinfile Path to the file to be sent to the container, containing input data. You need to pipe its contents 'manually' e.g. by executing command inside container. Can be referenced by ${this.input}
* @param {Object} morefiles More files. Format {patternNameToBeReplaced: filePath}.
* @param {Object} opts Options. memLimit - memory limit. timeLimit - time limit. env - array of environmental variables to pass.
* @returns {string} Tuple with paths to files containing demultiplexed output. 0 is stdout, 1 is stderr. On fail throws pair int, string. If int is greater than zero, problem is bad execenv configuration or server error. If it's 0, problem is with the executed program (it page-faults or exceeds time limits)
*/
async function execEx(exname, infile, stdinfile, morefiles, optz) {
return new Promise(async (resolve, reject) => {
const _execInstance = await execenv.findOne({ name: exname });
const opts = optz || {};
if (!_execInstance) {
reject([1, 'Invalid ExecEnv']);
return;
}
let _container
try {
const fileBasename = path.basename(infile);
const fileDirname = path.dirname(infile);
const tarfile = `${fileDirname}/${crypto.randomBytes(10).toString('hex')}.tar`;
const infilename = fileBasename;
let stdininfilename = '';
if(stdinfile)stdininfilename = path.basename(stdinfile);
const timeLimit = Math.min(_execInstance.time, isNaN(opts.timeLimit) ? Infinity : opts.timeLimit);
const memLimit = Math.min(_execInstance.memLimit, isNaN(opts.memLimit) ? Infinity: opts.memLimit);
const patternObject = { file: infilename, input: stdininfilename }
for (let key in morefiles) {
patternObject[key] = path.basename(morefiles[key])
}
console.log(`Let's execute! ${fileBasename}`);
_container = await docker.container.create({
Image: _execInstance.image_name,
// _execInstance.exec_command can be template string, it splits with '_'
// Example of exec_command:
// "bash -c chmod_+x_a.out_;_./a.out"
// (results in ['bash', '-c', 'chmod +x a.out ; ./a.out'])
// or
// "bash -c chmod_+x_${this.file}_;_./${this.file}"
Cmd: ((template, vars) => {
return new Function('return `' + template + '`;').call(vars)
})(_execInstance.exec_command, patternObject).split(' ').map(el => el.replace(/_/g, ' ')),
Memory: memLimit,
AttachStdout: false,
AttachStderr: false,
AttachStdin: false,
tty: false,
OpenStdin: false,
//interactive: (stdinfile ? true : false),
})
await tar.c({
file: tarfile,
cwd: fileDirname
}, [fileBasename])
if (stdinfile) {
await tar.r({
file: tarfile,
cwd: fileDirname
}, [stdininfilename])
}
if(morefiles){
for( let key in morefiles){
await tar.r({
file: tarfile,
cwd: path.dirname(morefiles[key])
}, [path.basename(morefiles[key])])
}
}
let _unpromStream = await _container.fs.put(tarfile, { path: '.' })
await promisifyStreamNoSpam(_unpromStream);
await unlinkAsync(tarfile);
await _container.start();
await asyncWait(timeLimit);
await _container.kill()
.catch(_=>{});
const inspection = await _container.status();
const execTime = new Date(inspection.data.State.FinishedAt) - new Date(inspection.data.State.StartedAt);
console.log(`Calculated uptime of ${execTime}`);
if (execTime >= timeLimit) {
await _container.delete({ force: true });
reject([0,'Time Limit Exceeded']);
return;
}
console.log(`Container in state: ${inspection.data.State.Status} and health: ${inspection.data.State.Error}`);
if (inspection.data.State.Error !== '') {
await _container.delete({ force: true });
reject([0,'Runtime Error', inspection.data.State.Error]);
return;
}
if (inspection.data.State.ExitCode !== 0) {
await _container.delete({ force: true });
reject([0,'Runtime Error', inspection.data.State.ExitCode]);
return;
}
_unpromStream = await _container.logs({
follow: true,
stdout: true,
stderr: true
})
var logs = [... new Array(3)].map(()=>{
return `${fileDirname}/${crypto.randomBytes(10).toString('hex')}`;
})
var streams = [... new Array(3)].map((_, num)=>{
return fs.createWriteStream(logs[num], {flags: 'a'});
})
await new Promise((resolve, reject) => {
_unpromStream.on('data', (d) => {
switch(d.toString().charCodeAt(0)){
case 1: //stdout
streams[0].write(d.toString().substr(8, d.toString().length)); //https://docs.docker.com/engine/api/v1.40/#operation/ContainerAttach;
break;
case 2: //stderr
streams[1].write(d.toString().substr(8, d.toString().length));
break;
default: //stdin (sam)
streams[2].write(d.toString().substr(8, d.toString().length));
break;
}
})
_unpromStream.on('end', resolve)
_unpromStream.on('error', reject)
})
streams.forEach((s)=>{
s.end();
})
await _container.delete({ force: true });
resolve(logs);
return;
} catch (err) {
if (typeof _container !== 'undefined') {
_container.delete({ force: true }).catch((e) => console.error(`Failed to clean up after exec error, it is still alive! ${e}`));
}
reject([1, `Failed at execution attempt: ${err}`]);
return;
}
})
}
async function nukeContainers(quit) {
const shouldQuit = quit !== false;
const conts = await docker.container.list({ all: true });
console.log(`NUKING DOCKER!, containers=${conts.length}`);
const promises = conts.map(cont => {
const cname = cont.data.Names[0]
return cont.start()
//.then(() => cont.kill())
.then(() => cont.delete({ force: true }))
.catch(err => console.error(`There is always a catch. Nuking docker failed. Try: docker kill $(docker ps -aq) && docker rm $(docker ps -aq) , on the docker machine instead. ${err}`))
.then(() => console.log(`Nuking ${cname} done`));
})
Promise.all(promises).then(function () {
if (shouldQuit) process.exit(0);
})
}
module.exports = { gccDetect, compile, exec, execEx, nukeContainers, pingDocker } | yncWait(t | identifier_name |
dockeranchor.js | 'use strict';
const Docker = require('node-docker-api').Docker
const fs = require('fs');
const compiler = require('./models/Compiler')
const execenv = require('./models/ExecEnv')
const tar = require('tar')
const path = require('path')
const { promisify } = require('util')
const unlinkAsync = promisify(fs.unlink)
const renameAsync = promisify(fs.rename)
const crypto = require('crypto')
const dockerProto = process.env.RACOONDOCKERPROTO || 'http'
const dockerHost = process.env.RACOONDOCKERHOST || '127.0.0.1'
const dockerPort = process.env.RACOONDOCKERPORT || 2376
const docker = new Docker({ protocol: dockerProto, host: dockerHost, port: dockerPort })
function promisifyStream(stream) {
return new Promise((resolve, reject) => {
stream.on('data', (d) => console.log(d.toString()))
stream.on('end', resolve)
stream.on('error', reject)
})
}
function promisifyStreamNoSpam(stream) {
return new Promise((resolve, reject) => {
stream.on('data', () => { }); //https://nodejs.org/api/stream.html#stream_event_data
stream.on('end', resolve);
stream.on('error', reject);
})
}
const fileExtension = /\.[^/\\\.]*(?=$)/;
const pathToFile = /^.*[/\\]/;
function asyncWait(time) {
return new Promise((resolve) => {
setTimeout(resolve, time);
})
}
const splitEx = /(["'].*?["']|[^"'\s]+)/g
function splitCommands(str) {
return str.match(splitEx).map(
(el) =>
el.replace(/^["']|["']$/g, '')
);
}
async function pingDocker()
{
try{
const info = await docker.info();
return[true, info];
}catch(err)
{
console.error(err);
return [false, err];
}
}
function gccDetect() {
docker.container.create({
Image: 'gcc',
Cmd: ['/bin/bash'],
name: 'test-gcc',
AttachStdout: false,
AttachStderr: false,
tty: true
})
.then((container) => container.start())
.then((container) => {
_container = container
return container.exec.create({
AttachStdout: true,
AttachStderr: true,
Cmd: ['gcc', '--version']
})
})
.then((exec) => {
return exec.start({ detach: false })
})
.then((stream) => promisifyStream(stream))
.then(() => _container.kill())
.then(() => _container.delete({ force: true }))
.then(() => console.log('Test container deleted'))
.catch((error) => console.log(error))
}
/** Compiles inside a container
*
* @param {string} comp Compiler name.
* @param {string} file Complete path to the input file. Can be referenced by ${this.file}
* @param {string} _outfile Output file path. Optional. If not specified outputs with the same name, but with .tar extension.
* @returns {string} Path to binary. On fail throws pair int, string. If int is greater than zero, problem is bad compiler configuration or server error. If it's 0, problem is with the executed program (normal CE)
*/
async function compile(comp, file, _outfile) {
return new Promise(async (resolve, reject) => {
var logs = ''
let container
try {
const fileBasename = path.basename(file)
const fileDirname = path.dirname(file)
const outfile = _outfile || file.replace(fileExtension, '.tar')
const tarfile = `${fileDirname}/${crypto.randomBytes(10).toString('hex')}.tar`;
console.log(`Let's compile! ${fileBasename}`);
if((await pingDocker())[0] == false){
reject([1, 'Cannot reach docker machine']);
return;
}
const compilerInstance = await compiler.findOne({ name: comp });
if (!compilerInstance){
reject ([1, 'Invalid compiler name']);
return;
}
if (compilerInstance.shadow === true) {
resolve(file);
return;
}
await tar.c({
file: tarfile,
cwd: fileDirname
}, [fileBasename])
container = await docker.container.create({
Image: compilerInstance.image_name,
// compilerInstance.exec_command can be template string, it splits with ' '
// '_' is a non-splitting space
// Example of exec_command
// "gcc -lstdc++ -std=c++17 -O2 -o a.out ${this.file}"
// or
// "gcc -lstdc++ -std=c++17 -O2 -o ${this.file+'.out'} ${this.file}"
// (but outputs are never exctracted from tar so they could have the same name)
Cmd: ((template, vars) => {
return new Function('return `' + template + '`;').call(vars)
})(compilerInstance.exec_command, { file: fileBasename }).split(' ').map(el => el.replace(/_/g, ' ')),
// file name could be required to be first argument or appear more than once in compiler commands of some weird languages :D
AttachStdout: false,
AttachStderr: false,
tty: false
})
await container.fs.put(tarfile, { path: '.' })
.then(stream => promisifyStream(stream))
await unlinkAsync(tarfile)
await container.start()
let _unpromStream = await container.logs({
follow: true,
stdout: true,
stderr: true
})
await new Promise((resolve, reject) => {
_unpromStream.on('data', (d) => {
logs = logs.concat(d.toString().substr(8, d.toString().length));
})
_unpromStream.on('end', resolve)
_unpromStream.on('error', reject)
});
await container.wait();
const outTarPath = outfile + '.tmptar'
await container.fs.get({ path: compilerInstance.output_name })
.then(stream => {
const file = fs.createWriteStream(outTarPath)
stream.pipe(file)
return promisifyStreamNoSpam(stream)
})
let compiledFile;
await tar.list({file : outTarPath, onentry: entry => {compiledFile = entry.path}});
await tar.extract({ file: outTarPath, C: fileDirname });
await renameAsync(`${fileDirname}/${compiledFile}`, outfile)
await unlinkAsync(outTarPath)
await container.delete({ force: true })
resolve(outfile);
} catch (err) {
if (typeof container !== 'undefined') await container.delete({ force: true })
if (logs.length) reject([0, logs])
else reject([1, err ])
}
})
}
/** Executes a program inside docker container
*
* @param {string} exname Name of Execution Environment
* @param {string} infile Path to input file, expects a file. Can be referenced by ${this.file}
* @param {string} stdinfile Path to the file to be sent to the container, containing input data. You need to pipe its contents 'manually' e.g. by executing command inside container. Can be referenced by ${this.input}
* @returns {string} Array containing output from running command. 0 is stdout, 1 is stderr. On fail throws pair int, string. If int is greater than zero, problem is bad execenv configuration or server error. If it's 0, problem is with the executed program (it page-faults or exceeds time limits)
*/
async function exec(exname, infile, stdinfile) {
return new Promise(async (resolve, reject) => {
const _execInstance = await execenv.findOne({ name: exname });
if (!_execInstance) {
reject([1, 'Invalid ExecEnv']);
return;
}
let _container
try { |
const infilename = fileBasename;//infile.replace(pathToFile, '')
let stdininfilename = '';
if(stdinfile)stdininfilename = path.basename(stdinfile);
console.log(`Let's execute! ${fileBasename}`);
if((await pingDocker())[0] == false){
reject([1, 'Cannot reach docker machine']);
return;
}
_container = await docker.container.create({
Image: _execInstance.image_name,
// _execInstance.exec_command can be template string, it splits with '_'
// Example of exec_command:
// "bash -c chmod_+x_a.out_;_./a.out"
// (results in ['bash', '-c', 'chmod +x a.out ; ./a.out'])
// or
// "bash -c chmod_+x_${this.file}_;_./${this.file}"
Cmd: ((template, vars) => {
return new Function('return `' + template + '`;').call(vars)
})(_execInstance.exec_command, { file: infilename, input: stdininfilename }).split(' ').map(el => el.replace(/_/g, ' ')),
Memory: _execInstance.memory,
AttachStdout: false,
AttachStderr: false,
AttachStdin: false,
tty: false,
OpenStdin: false,
//interactive: (stdinfile ? true : false),
})
await tar.c({
file: tarfile,
cwd: fileDirname
}, [fileBasename])
if (stdinfile) {
/*console.log("Redirecting input.");
var [_stdinstream,] = await _container.attach({ stream: true, stderr: true });
var _fstrm = fs.createReadStream(stdinfile);
_fstrm.pipe(_stdinstream) //readable->writable
await promisifyStream(_fstrm);*/
await tar.r({
file: tarfile,
cwd: fileDirname
}, [stdininfilename])
}
var _unpromStream = await _container.fs.put(tarfile, { path: '.' })
await promisifyStreamNoSpam(_unpromStream);
await unlinkAsync(tarfile);
await _container.start();
//await _container.wait();
await asyncWait(_execInstance.time);
const inspection = await _container.status();
if (inspection.data.State.Status !== 'exited') {
await _container.kill();
await _container.delete({ force: true });
reject([0, 'Time Limit Exceeded']);
return;
}
//else await _container.stop();
_unpromStream = await _container.logs({
follow: true,
stdout: true,
stderr: true
})
const logs = new Array('','','')
await new Promise((resolve, reject) => {
_unpromStream.on('data', (d) => {
switch(d.toString().charCodeAt(0)){
case 1: //stdout+(prawie zawsze)stdin
logs[0] = logs[0].concat(d.toString().substr(8, d.toString().length)); //https://docs.docker.com/engine/api/v1.40/#operation/ContainerAttach;
break;
case 2: //stderr
logs[1] = logs[1].concat(d.toString().substr(8, d.toString().length));
break;
default: //stdin (sam)
logs[2] = logs[2].concat(d.toString().substr(8, d.toString().length));
break;
}
})
_unpromStream.on('end', resolve)
_unpromStream.on('error', reject)
})
await _container.delete({ force: true });
//resolve(logs.join().replace(/[^\x20-\x7E]/g, '').trim())
resolve(logs);
return;
} catch (err) {
if (typeof _container !== 'undefined') {
_container.delete({ force: true }).catch((e) => console.error(`Failed to clean up after exec error, it is still alive! ${e}`));
}
reject([1, `Failed at execution attempt: ${err}`]);
return;
}
})
}
/** Executes a program inside docker container, but better
*
* @param {string} exname Name of Execution Environment
* @param {string} infile Path to input file, expects a file. Can be referenced by ${this.file}
* @param {string} stdinfile Path to the file to be sent to the container, containing input data. You need to pipe its contents 'manually' e.g. by executing command inside container. Can be referenced by ${this.input}
* @param {Object} morefiles More files. Format {patternNameToBeReplaced: filePath}.
* @param {Object} opts Options. memLimit - memory limit. timeLimit - time limit. env - array of environmental variables to pass.
* @returns {string} Tuple with paths to files containing demultiplexed output. 0 is stdout, 1 is stderr. On fail throws pair int, string. If int is greater than zero, problem is bad execenv configuration or server error. If it's 0, problem is with the executed program (it page-faults or exceeds time limits)
*/
async function execEx(exname, infile, stdinfile, morefiles, optz) {
return new Promise(async (resolve, reject) => {
const _execInstance = await execenv.findOne({ name: exname });
const opts = optz || {};
if (!_execInstance) {
reject([1, 'Invalid ExecEnv']);
return;
}
let _container
try {
const fileBasename = path.basename(infile);
const fileDirname = path.dirname(infile);
const tarfile = `${fileDirname}/${crypto.randomBytes(10).toString('hex')}.tar`;
const infilename = fileBasename;
let stdininfilename = '';
if(stdinfile)stdininfilename = path.basename(stdinfile);
const timeLimit = Math.min(_execInstance.time, isNaN(opts.timeLimit) ? Infinity : opts.timeLimit);
const memLimit = Math.min(_execInstance.memLimit, isNaN(opts.memLimit) ? Infinity: opts.memLimit);
const patternObject = { file: infilename, input: stdininfilename }
for (let key in morefiles) {
patternObject[key] = path.basename(morefiles[key])
}
console.log(`Let's execute! ${fileBasename}`);
_container = await docker.container.create({
Image: _execInstance.image_name,
// _execInstance.exec_command can be template string, it splits with '_'
// Example of exec_command:
// "bash -c chmod_+x_a.out_;_./a.out"
// (results in ['bash', '-c', 'chmod +x a.out ; ./a.out'])
// or
// "bash -c chmod_+x_${this.file}_;_./${this.file}"
Cmd: ((template, vars) => {
return new Function('return `' + template + '`;').call(vars)
})(_execInstance.exec_command, patternObject).split(' ').map(el => el.replace(/_/g, ' ')),
Memory: memLimit,
AttachStdout: false,
AttachStderr: false,
AttachStdin: false,
tty: false,
OpenStdin: false,
//interactive: (stdinfile ? true : false),
})
await tar.c({
file: tarfile,
cwd: fileDirname
}, [fileBasename])
if (stdinfile) {
await tar.r({
file: tarfile,
cwd: fileDirname
}, [stdininfilename])
}
if(morefiles){
for( let key in morefiles){
await tar.r({
file: tarfile,
cwd: path.dirname(morefiles[key])
}, [path.basename(morefiles[key])])
}
}
let _unpromStream = await _container.fs.put(tarfile, { path: '.' })
await promisifyStreamNoSpam(_unpromStream);
await unlinkAsync(tarfile);
await _container.start();
await asyncWait(timeLimit);
await _container.kill()
.catch(_=>{});
const inspection = await _container.status();
const execTime = new Date(inspection.data.State.FinishedAt) - new Date(inspection.data.State.StartedAt);
console.log(`Calculated uptime of ${execTime}`);
if (execTime >= timeLimit) {
await _container.delete({ force: true });
reject([0,'Time Limit Exceeded']);
return;
}
console.log(`Container in state: ${inspection.data.State.Status} and health: ${inspection.data.State.Error}`);
if (inspection.data.State.Error !== '') {
await _container.delete({ force: true });
reject([0,'Runtime Error', inspection.data.State.Error]);
return;
}
if (inspection.data.State.ExitCode !== 0) {
await _container.delete({ force: true });
reject([0,'Runtime Error', inspection.data.State.ExitCode]);
return;
}
_unpromStream = await _container.logs({
follow: true,
stdout: true,
stderr: true
})
var logs = [... new Array(3)].map(()=>{
return `${fileDirname}/${crypto.randomBytes(10).toString('hex')}`;
})
var streams = [... new Array(3)].map((_, num)=>{
return fs.createWriteStream(logs[num], {flags: 'a'});
})
await new Promise((resolve, reject) => {
_unpromStream.on('data', (d) => {
switch(d.toString().charCodeAt(0)){
case 1: //stdout
streams[0].write(d.toString().substr(8, d.toString().length)); //https://docs.docker.com/engine/api/v1.40/#operation/ContainerAttach;
break;
case 2: //stderr
streams[1].write(d.toString().substr(8, d.toString().length));
break;
default: //stdin (sam)
streams[2].write(d.toString().substr(8, d.toString().length));
break;
}
})
_unpromStream.on('end', resolve)
_unpromStream.on('error', reject)
})
streams.forEach((s)=>{
s.end();
})
await _container.delete({ force: true });
resolve(logs);
return;
} catch (err) {
if (typeof _container !== 'undefined') {
_container.delete({ force: true }).catch((e) => console.error(`Failed to clean up after exec error, it is still alive! ${e}`));
}
reject([1, `Failed at execution attempt: ${err}`]);
return;
}
})
}
async function nukeContainers(quit) {
const shouldQuit = quit !== false;
const conts = await docker.container.list({ all: true });
console.log(`NUKING DOCKER!, containers=${conts.length}`);
const promises = conts.map(cont => {
const cname = cont.data.Names[0]
return cont.start()
//.then(() => cont.kill())
.then(() => cont.delete({ force: true }))
.catch(err => console.error(`There is always a catch. Nuking docker failed. Try: docker kill $(docker ps -aq) && docker rm $(docker ps -aq) , on the docker machine instead. ${err}`))
.then(() => console.log(`Nuking ${cname} done`));
})
Promise.all(promises).then(function () {
if (shouldQuit) process.exit(0);
})
}
module.exports = { gccDetect, compile, exec, execEx, nukeContainers, pingDocker } |
const fileBasename = path.basename(infile);
const fileDirname = path.dirname(infile);
const tarfile = `${fileDirname}/${crypto.randomBytes(10).toString('hex')}.tar`; | random_line_split |
dockeranchor.js | 'use strict';
const Docker = require('node-docker-api').Docker
const fs = require('fs');
const compiler = require('./models/Compiler')
const execenv = require('./models/ExecEnv')
const tar = require('tar')
const path = require('path')
const { promisify } = require('util')
const unlinkAsync = promisify(fs.unlink)
const renameAsync = promisify(fs.rename)
const crypto = require('crypto')
const dockerProto = process.env.RACOONDOCKERPROTO || 'http'
const dockerHost = process.env.RACOONDOCKERHOST || '127.0.0.1'
const dockerPort = process.env.RACOONDOCKERPORT || 2376
const docker = new Docker({ protocol: dockerProto, host: dockerHost, port: dockerPort })
function promisifyStream(stream) {
return new Promise((resolve, reject) => {
stream.on('data', (d) => console.log(d.toString()))
stream.on('end', resolve)
stream.on('error', reject)
})
}
function promisifyStreamNoSpam(stream) {
return new Promise((resolve, reject) => {
stream.on('data', () => { }); //https://nodejs.org/api/stream.html#stream_event_data
stream.on('end', resolve);
stream.on('error', reject);
})
}
const fileExtension = /\.[^/\\\.]*(?=$)/;
const pathToFile = /^.*[/\\]/;
function asyncWait(time) {
return new Promise((resolve) => {
setTimeout(resolve, time);
})
}
const splitEx = /(["'].*?["']|[^"'\s]+)/g
function splitCommands(str) {
return str.match(splitEx).map(
(el) =>
el.replace(/^["']|["']$/g, '')
);
}
async function pingDocker()
{
try{
const info = await docker.info();
return[true, info];
}catch(err)
{
console.error(err);
return [false, err];
}
}
function gccDetect() {
docker.container.create({
Image: 'gcc',
Cmd: ['/bin/bash'],
name: 'test-gcc',
AttachStdout: false,
AttachStderr: false,
tty: true
})
.then((container) => container.start())
.then((container) => {
_container = container
return container.exec.create({
AttachStdout: true,
AttachStderr: true,
Cmd: ['gcc', '--version']
})
})
.then((exec) => {
return exec.start({ detach: false })
})
.then((stream) => promisifyStream(stream))
.then(() => _container.kill())
.then(() => _container.delete({ force: true }))
.then(() => console.log('Test container deleted'))
.catch((error) => console.log(error))
}
/** Compiles inside a container
*
* @param {string} comp Compiler name.
* @param {string} file Complete path to the input file. Can be referenced by ${this.file}
* @param {string} _outfile Output file path. Optional. If not specified outputs with the same name, but with .tar extension.
* @returns {string} Path to binary. On fail throws pair int, string. If int is greater than zero, problem is bad compiler configuration or server error. If it's 0, problem is with the executed program (normal CE)
*/
async function compile(comp, file, _outfile) {
return new Promise(async (resolve, reject) => {
var logs = ''
let container
try {
const fileBasename = path.basename(file)
const fileDirname = path.dirname(file)
const outfile = _outfile || file.replace(fileExtension, '.tar')
const tarfile = `${fileDirname}/${crypto.randomBytes(10).toString('hex')}.tar`;
console.log(`Let's compile! ${fileBasename}`);
if((await pingDocker())[0] == false){
reject([1, 'Cannot reach docker machine']);
return;
}
const compilerInstance = await compiler.findOne({ name: comp });
if (!compilerInstance){
reject ([1, 'Invalid compiler name']);
return;
}
if (compilerInstance.shadow === true) {
resolve(file);
return;
}
await tar.c({
file: tarfile,
cwd: fileDirname
}, [fileBasename])
container = await docker.container.create({
Image: compilerInstance.image_name,
// compilerInstance.exec_command can be template string, it splits with ' '
// '_' is a non-splitting space
// Example of exec_command
// "gcc -lstdc++ -std=c++17 -O2 -o a.out ${this.file}"
// or
// "gcc -lstdc++ -std=c++17 -O2 -o ${this.file+'.out'} ${this.file}"
// (but outputs are never exctracted from tar so they could have the same name)
Cmd: ((template, vars) => {
return new Function('return `' + template + '`;').call(vars)
})(compilerInstance.exec_command, { file: fileBasename }).split(' ').map(el => el.replace(/_/g, ' ')),
// file name could be required to be first argument or appear more than once in compiler commands of some weird languages :D
AttachStdout: false,
AttachStderr: false,
tty: false
})
await container.fs.put(tarfile, { path: '.' })
.then(stream => promisifyStream(stream))
await unlinkAsync(tarfile)
await container.start()
let _unpromStream = await container.logs({
follow: true,
stdout: true,
stderr: true
})
await new Promise((resolve, reject) => {
_unpromStream.on('data', (d) => {
logs = logs.concat(d.toString().substr(8, d.toString().length));
})
_unpromStream.on('end', resolve)
_unpromStream.on('error', reject)
});
await container.wait();
const outTarPath = outfile + '.tmptar'
await container.fs.get({ path: compilerInstance.output_name })
.then(stream => {
const file = fs.createWriteStream(outTarPath)
stream.pipe(file)
return promisifyStreamNoSpam(stream)
})
let compiledFile;
await tar.list({file : outTarPath, onentry: entry => {compiledFile = entry.path}});
await tar.extract({ file: outTarPath, C: fileDirname });
await renameAsync(`${fileDirname}/${compiledFile}`, outfile)
await unlinkAsync(outTarPath)
await container.delete({ force: true })
resolve(outfile);
} catch (err) {
if (typeof container !== 'undefined') await container.delete({ force: true })
if (logs.length) reject([0, logs])
else reject([1, err ])
}
})
}
/** Executes a program inside docker container
*
* @param {string} exname Name of Execution Environment
* @param {string} infile Path to input file, expects a file. Can be referenced by ${this.file}
* @param {string} stdinfile Path to the file to be sent to the container, containing input data. You need to pipe its contents 'manually' e.g. by executing command inside container. Can be referenced by ${this.input}
* @returns {string} Array containing output from running command. 0 is stdout, 1 is stderr. On fail throws pair int, string. If int is greater than zero, problem is bad execenv configuration or server error. If it's 0, problem is with the executed program (it page-faults or exceeds time limits)
*/
async function exec(exname, infile, stdinfile) {
return new Promise(async (resolve, reject) => {
const _execInstance = await execenv.findOne({ name: exname });
if (!_execInstance) {
reject([1, 'Invalid ExecEnv']);
return;
}
let _container
try {
const fileBasename = path.basename(infile);
const fileDirname = path.dirname(infile);
const tarfile = `${fileDirname}/${crypto.randomBytes(10).toString('hex')}.tar`;
const infilename = fileBasename;//infile.replace(pathToFile, '')
let stdininfilename = '';
if(stdinfile)stdininfilename = path.basename(stdinfile);
console.log(`Let's execute! ${fileBasename}`);
if((await pingDocker())[0] == false){
reject([1, 'Cannot reach docker machine']);
return;
}
_container = await docker.container.create({
Image: _execInstance.image_name,
// _execInstance.exec_command can be template string, it splits with '_'
// Example of exec_command:
// "bash -c chmod_+x_a.out_;_./a.out"
// (results in ['bash', '-c', 'chmod +x a.out ; ./a.out'])
// or
// "bash -c chmod_+x_${this.file}_;_./${this.file}"
Cmd: ((template, vars) => {
return new Function('return `' + template + '`;').call(vars)
})(_execInstance.exec_command, { file: infilename, input: stdininfilename }).split(' ').map(el => el.replace(/_/g, ' ')),
Memory: _execInstance.memory,
AttachStdout: false,
AttachStderr: false,
AttachStdin: false,
tty: false,
OpenStdin: false,
//interactive: (stdinfile ? true : false),
})
await tar.c({
file: tarfile,
cwd: fileDirname
}, [fileBasename])
if (stdinfile) {
/*console.log("Redirecting input.");
var [_stdinstream,] = await _container.attach({ stream: true, stderr: true });
var _fstrm = fs.createReadStream(stdinfile);
_fstrm.pipe(_stdinstream) //readable->writable
await promisifyStream(_fstrm);*/
await tar.r({
file: tarfile,
cwd: fileDirname
}, [stdininfilename])
}
var _unpromStream = await _container.fs.put(tarfile, { path: '.' })
await promisifyStreamNoSpam(_unpromStream);
await unlinkAsync(tarfile);
await _container.start();
//await _container.wait();
await asyncWait(_execInstance.time);
const inspection = await _container.status();
if (inspection.data.State.Status !== 'exited') {
await _container.kill();
await _container.delete({ force: true });
reject([0, 'Time Limit Exceeded']);
return;
}
//else await _container.stop();
_unpromStream = await _container.logs({
follow: true,
stdout: true,
stderr: true
})
const logs = new Array('','','')
await new Promise((resolve, reject) => {
_unpromStream.on('data', (d) => {
switch(d.toString().charCodeAt(0)){
case 1: //stdout+(prawie zawsze)stdin
logs[0] = logs[0].concat(d.toString().substr(8, d.toString().length)); //https://docs.docker.com/engine/api/v1.40/#operation/ContainerAttach;
break;
case 2: //stderr
logs[1] = logs[1].concat(d.toString().substr(8, d.toString().length));
break;
default: //stdin (sam)
logs[2] = logs[2].concat(d.toString().substr(8, d.toString().length));
break;
}
})
_unpromStream.on('end', resolve)
_unpromStream.on('error', reject)
})
await _container.delete({ force: true });
//resolve(logs.join().replace(/[^\x20-\x7E]/g, '').trim())
resolve(logs);
return;
} catch (err) {
if (typeof _container !== 'undefined') {
_container.delete({ force: true }).catch((e) => console.error(`Failed to clean up after exec error, it is still alive! ${e}`));
}
reject([1, `Failed at execution attempt: ${err}`]);
return;
}
})
}
/** Executes a program inside docker container, but better
*
* @param {string} exname Name of Execution Environment
* @param {string} infile Path to input file, expects a file. Can be referenced by ${this.file}
* @param {string} stdinfile Path to the file to be sent to the container, containing input data. You need to pipe its contents 'manually' e.g. by executing command inside container. Can be referenced by ${this.input}
* @param {Object} morefiles More files. Format {patternNameToBeReplaced: filePath}.
* @param {Object} opts Options. memLimit - memory limit. timeLimit - time limit. env - array of environmental variables to pass.
* @returns {string} Tuple with paths to files containing demultiplexed output. 0 is stdout, 1 is stderr. On fail throws pair int, string. If int is greater than zero, problem is bad execenv configuration or server error. If it's 0, problem is with the executed program (it page-faults or exceeds time limits)
*/
async function execEx(exname, infile, stdinfile, morefiles, optz) {
|
async function nukeContainers(quit) {
const shouldQuit = quit !== false;
const conts = await docker.container.list({ all: true });
console.log(`NUKING DOCKER!, containers=${conts.length}`);
const promises = conts.map(cont => {
const cname = cont.data.Names[0]
return cont.start()
//.then(() => cont.kill())
.then(() => cont.delete({ force: true }))
.catch(err => console.error(`There is always a catch. Nuking docker failed. Try: docker kill $(docker ps -aq) && docker rm $(docker ps -aq) , on the docker machine instead. ${err}`))
.then(() => console.log(`Nuking ${cname} done`));
})
Promise.all(promises).then(function () {
if (shouldQuit) process.exit(0);
})
}
module.exports = { gccDetect, compile, exec, execEx, nukeContainers, pingDocker } | return new Promise(async (resolve, reject) => {
const _execInstance = await execenv.findOne({ name: exname });
const opts = optz || {};
if (!_execInstance) {
reject([1, 'Invalid ExecEnv']);
return;
}
let _container
try {
const fileBasename = path.basename(infile);
const fileDirname = path.dirname(infile);
const tarfile = `${fileDirname}/${crypto.randomBytes(10).toString('hex')}.tar`;
const infilename = fileBasename;
let stdininfilename = '';
if(stdinfile)stdininfilename = path.basename(stdinfile);
const timeLimit = Math.min(_execInstance.time, isNaN(opts.timeLimit) ? Infinity : opts.timeLimit);
const memLimit = Math.min(_execInstance.memLimit, isNaN(opts.memLimit) ? Infinity: opts.memLimit);
const patternObject = { file: infilename, input: stdininfilename }
for (let key in morefiles) {
patternObject[key] = path.basename(morefiles[key])
}
console.log(`Let's execute! ${fileBasename}`);
_container = await docker.container.create({
Image: _execInstance.image_name,
// _execInstance.exec_command can be template string, it splits with '_'
// Example of exec_command:
// "bash -c chmod_+x_a.out_;_./a.out"
// (results in ['bash', '-c', 'chmod +x a.out ; ./a.out'])
// or
// "bash -c chmod_+x_${this.file}_;_./${this.file}"
Cmd: ((template, vars) => {
return new Function('return `' + template + '`;').call(vars)
})(_execInstance.exec_command, patternObject).split(' ').map(el => el.replace(/_/g, ' ')),
Memory: memLimit,
AttachStdout: false,
AttachStderr: false,
AttachStdin: false,
tty: false,
OpenStdin: false,
//interactive: (stdinfile ? true : false),
})
await tar.c({
file: tarfile,
cwd: fileDirname
}, [fileBasename])
if (stdinfile) {
await tar.r({
file: tarfile,
cwd: fileDirname
}, [stdininfilename])
}
if(morefiles){
for( let key in morefiles){
await tar.r({
file: tarfile,
cwd: path.dirname(morefiles[key])
}, [path.basename(morefiles[key])])
}
}
let _unpromStream = await _container.fs.put(tarfile, { path: '.' })
await promisifyStreamNoSpam(_unpromStream);
await unlinkAsync(tarfile);
await _container.start();
await asyncWait(timeLimit);
await _container.kill()
.catch(_=>{});
const inspection = await _container.status();
const execTime = new Date(inspection.data.State.FinishedAt) - new Date(inspection.data.State.StartedAt);
console.log(`Calculated uptime of ${execTime}`);
if (execTime >= timeLimit) {
await _container.delete({ force: true });
reject([0,'Time Limit Exceeded']);
return;
}
console.log(`Container in state: ${inspection.data.State.Status} and health: ${inspection.data.State.Error}`);
if (inspection.data.State.Error !== '') {
await _container.delete({ force: true });
reject([0,'Runtime Error', inspection.data.State.Error]);
return;
}
if (inspection.data.State.ExitCode !== 0) {
await _container.delete({ force: true });
reject([0,'Runtime Error', inspection.data.State.ExitCode]);
return;
}
_unpromStream = await _container.logs({
follow: true,
stdout: true,
stderr: true
})
var logs = [... new Array(3)].map(()=>{
return `${fileDirname}/${crypto.randomBytes(10).toString('hex')}`;
})
var streams = [... new Array(3)].map((_, num)=>{
return fs.createWriteStream(logs[num], {flags: 'a'});
})
await new Promise((resolve, reject) => {
_unpromStream.on('data', (d) => {
switch(d.toString().charCodeAt(0)){
case 1: //stdout
streams[0].write(d.toString().substr(8, d.toString().length)); //https://docs.docker.com/engine/api/v1.40/#operation/ContainerAttach;
break;
case 2: //stderr
streams[1].write(d.toString().substr(8, d.toString().length));
break;
default: //stdin (sam)
streams[2].write(d.toString().substr(8, d.toString().length));
break;
}
})
_unpromStream.on('end', resolve)
_unpromStream.on('error', reject)
})
streams.forEach((s)=>{
s.end();
})
await _container.delete({ force: true });
resolve(logs);
return;
} catch (err) {
if (typeof _container !== 'undefined') {
_container.delete({ force: true }).catch((e) => console.error(`Failed to clean up after exec error, it is still alive! ${e}`));
}
reject([1, `Failed at execution attempt: ${err}`]);
return;
}
})
}
| identifier_body |
dockeranchor.js | 'use strict';
const Docker = require('node-docker-api').Docker
const fs = require('fs');
const compiler = require('./models/Compiler')
const execenv = require('./models/ExecEnv')
const tar = require('tar')
const path = require('path')
const { promisify } = require('util')
const unlinkAsync = promisify(fs.unlink)
const renameAsync = promisify(fs.rename)
const crypto = require('crypto')
const dockerProto = process.env.RACOONDOCKERPROTO || 'http'
const dockerHost = process.env.RACOONDOCKERHOST || '127.0.0.1'
const dockerPort = process.env.RACOONDOCKERPORT || 2376
const docker = new Docker({ protocol: dockerProto, host: dockerHost, port: dockerPort })
function promisifyStream(stream) {
return new Promise((resolve, reject) => {
stream.on('data', (d) => console.log(d.toString()))
stream.on('end', resolve)
stream.on('error', reject)
})
}
function promisifyStreamNoSpam(stream) {
return new Promise((resolve, reject) => {
stream.on('data', () => { }); //https://nodejs.org/api/stream.html#stream_event_data
stream.on('end', resolve);
stream.on('error', reject);
})
}
const fileExtension = /\.[^/\\\.]*(?=$)/;
const pathToFile = /^.*[/\\]/;
function asyncWait(time) {
return new Promise((resolve) => {
setTimeout(resolve, time);
})
}
const splitEx = /(["'].*?["']|[^"'\s]+)/g
function splitCommands(str) {
return str.match(splitEx).map(
(el) =>
el.replace(/^["']|["']$/g, '')
);
}
async function pingDocker()
{
try{
const info = await docker.info();
return[true, info];
}catch(err)
{
console.error(err);
return [false, err];
}
}
function gccDetect() {
docker.container.create({
Image: 'gcc',
Cmd: ['/bin/bash'],
name: 'test-gcc',
AttachStdout: false,
AttachStderr: false,
tty: true
})
.then((container) => container.start())
.then((container) => {
_container = container
return container.exec.create({
AttachStdout: true,
AttachStderr: true,
Cmd: ['gcc', '--version']
})
})
.then((exec) => {
return exec.start({ detach: false })
})
.then((stream) => promisifyStream(stream))
.then(() => _container.kill())
.then(() => _container.delete({ force: true }))
.then(() => console.log('Test container deleted'))
.catch((error) => console.log(error))
}
/** Compiles inside a container
*
* @param {string} comp Compiler name.
* @param {string} file Complete path to the input file. Can be referenced by ${this.file}
* @param {string} _outfile Output file path. Optional. If not specified outputs with the same name, but with .tar extension.
* @returns {string} Path to binary. On fail throws pair int, string. If int is greater than zero, problem is bad compiler configuration or server error. If it's 0, problem is with the executed program (normal CE)
*/
async function compile(comp, file, _outfile) {
return new Promise(async (resolve, reject) => {
var logs = ''
let container
try {
const fileBasename = path.basename(file)
const fileDirname = path.dirname(file)
const outfile = _outfile || file.replace(fileExtension, '.tar')
const tarfile = `${fileDirname}/${crypto.randomBytes(10).toString('hex')}.tar`;
console.log(`Let's compile! ${fileBasename}`);
if((await pingDocker())[0] == false){
reject([1, 'Cannot reach docker machine']);
return;
}
const compilerInstance = await compiler.findOne({ name: comp });
if (!compilerInstance){
reject ([1, 'Invalid compiler name']);
return;
}
if (compilerInstance.shadow === true) {
resolve(file);
return;
}
await tar.c({
file: tarfile,
cwd: fileDirname
}, [fileBasename])
container = await docker.container.create({
Image: compilerInstance.image_name,
// compilerInstance.exec_command can be template string, it splits with ' '
// '_' is a non-splitting space
// Example of exec_command
// "gcc -lstdc++ -std=c++17 -O2 -o a.out ${this.file}"
// or
// "gcc -lstdc++ -std=c++17 -O2 -o ${this.file+'.out'} ${this.file}"
// (but outputs are never exctracted from tar so they could have the same name)
Cmd: ((template, vars) => {
return new Function('return `' + template + '`;').call(vars)
})(compilerInstance.exec_command, { file: fileBasename }).split(' ').map(el => el.replace(/_/g, ' ')),
// file name could be required to be first argument or appear more than once in compiler commands of some weird languages :D
AttachStdout: false,
AttachStderr: false,
tty: false
})
await container.fs.put(tarfile, { path: '.' })
.then(stream => promisifyStream(stream))
await unlinkAsync(tarfile)
await container.start()
let _unpromStream = await container.logs({
follow: true,
stdout: true,
stderr: true
})
await new Promise((resolve, reject) => {
_unpromStream.on('data', (d) => {
logs = logs.concat(d.toString().substr(8, d.toString().length));
})
_unpromStream.on('end', resolve)
_unpromStream.on('error', reject)
});
await container.wait();
const outTarPath = outfile + '.tmptar'
await container.fs.get({ path: compilerInstance.output_name })
.then(stream => {
const file = fs.createWriteStream(outTarPath)
stream.pipe(file)
return promisifyStreamNoSpam(stream)
})
let compiledFile;
await tar.list({file : outTarPath, onentry: entry => {compiledFile = entry.path}});
await tar.extract({ file: outTarPath, C: fileDirname });
await renameAsync(`${fileDirname}/${compiledFile}`, outfile)
await unlinkAsync(outTarPath)
await container.delete({ force: true })
resolve(outfile);
} catch (err) {
if (typeof container !== 'undefined') await container.delete({ force: true })
if (logs.length) reject([0, logs])
else reject([1, err ])
}
})
}
/** Executes a program inside docker container
*
* @param {string} exname Name of Execution Environment
* @param {string} infile Path to input file, expects a file. Can be referenced by ${this.file}
* @param {string} stdinfile Path to the file to be sent to the container, containing input data. You need to pipe its contents 'manually' e.g. by executing command inside container. Can be referenced by ${this.input}
* @returns {string} Array containing output from running command. 0 is stdout, 1 is stderr. On fail throws pair int, string. If int is greater than zero, problem is bad execenv configuration or server error. If it's 0, problem is with the executed program (it page-faults or exceeds time limits)
*/
async function exec(exname, infile, stdinfile) {
return new Promise(async (resolve, reject) => {
const _execInstance = await execenv.findOne({ name: exname });
if (!_execInstance) {
reject([1, 'Invalid ExecEnv']);
return;
}
let _container
try {
const fileBasename = path.basename(infile);
const fileDirname = path.dirname(infile);
const tarfile = `${fileDirname}/${crypto.randomBytes(10).toString('hex')}.tar`;
const infilename = fileBasename;//infile.replace(pathToFile, '')
let stdininfilename = '';
if(stdinfile)stdininfilename = path.basename(stdinfile);
console.log(`Let's execute! ${fileBasename}`);
if((await pingDocker())[0] == false){
reject([1, 'Cannot reach docker machine']);
return;
}
_container = await docker.container.create({
Image: _execInstance.image_name,
// _execInstance.exec_command can be template string, it splits with '_'
// Example of exec_command:
// "bash -c chmod_+x_a.out_;_./a.out"
// (results in ['bash', '-c', 'chmod +x a.out ; ./a.out'])
// or
// "bash -c chmod_+x_${this.file}_;_./${this.file}"
Cmd: ((template, vars) => {
return new Function('return `' + template + '`;').call(vars)
})(_execInstance.exec_command, { file: infilename, input: stdininfilename }).split(' ').map(el => el.replace(/_/g, ' ')),
Memory: _execInstance.memory,
AttachStdout: false,
AttachStderr: false,
AttachStdin: false,
tty: false,
OpenStdin: false,
//interactive: (stdinfile ? true : false),
})
await tar.c({
file: tarfile,
cwd: fileDirname
}, [fileBasename])
if (stdinfile) {
| var _unpromStream = await _container.fs.put(tarfile, { path: '.' })
await promisifyStreamNoSpam(_unpromStream);
await unlinkAsync(tarfile);
await _container.start();
//await _container.wait();
await asyncWait(_execInstance.time);
const inspection = await _container.status();
if (inspection.data.State.Status !== 'exited') {
await _container.kill();
await _container.delete({ force: true });
reject([0, 'Time Limit Exceeded']);
return;
}
//else await _container.stop();
_unpromStream = await _container.logs({
follow: true,
stdout: true,
stderr: true
})
const logs = new Array('','','')
await new Promise((resolve, reject) => {
_unpromStream.on('data', (d) => {
switch(d.toString().charCodeAt(0)){
case 1: //stdout+(prawie zawsze)stdin
logs[0] = logs[0].concat(d.toString().substr(8, d.toString().length)); //https://docs.docker.com/engine/api/v1.40/#operation/ContainerAttach;
break;
case 2: //stderr
logs[1] = logs[1].concat(d.toString().substr(8, d.toString().length));
break;
default: //stdin (sam)
logs[2] = logs[2].concat(d.toString().substr(8, d.toString().length));
break;
}
})
_unpromStream.on('end', resolve)
_unpromStream.on('error', reject)
})
await _container.delete({ force: true });
//resolve(logs.join().replace(/[^\x20-\x7E]/g, '').trim())
resolve(logs);
return;
} catch (err) {
if (typeof _container !== 'undefined') {
_container.delete({ force: true }).catch((e) => console.error(`Failed to clean up after exec error, it is still alive! ${e}`));
}
reject([1, `Failed at execution attempt: ${err}`]);
return;
}
})
}
/** Executes a program inside docker container, but better
*
* @param {string} exname Name of Execution Environment
* @param {string} infile Path to input file, expects a file. Can be referenced by ${this.file}
* @param {string} stdinfile Path to the file to be sent to the container, containing input data. You need to pipe its contents 'manually' e.g. by executing command inside container. Can be referenced by ${this.input}
* @param {Object} morefiles More files. Format {patternNameToBeReplaced: filePath}.
* @param {Object} opts Options. memLimit - memory limit. timeLimit - time limit. env - array of environmental variables to pass.
* @returns {string} Tuple with paths to files containing demultiplexed output. 0 is stdout, 1 is stderr. On fail throws pair int, string. If int is greater than zero, problem is bad execenv configuration or server error. If it's 0, problem is with the executed program (it page-faults or exceeds time limits)
*/
async function execEx(exname, infile, stdinfile, morefiles, optz) {
return new Promise(async (resolve, reject) => {
const _execInstance = await execenv.findOne({ name: exname });
const opts = optz || {};
if (!_execInstance) {
reject([1, 'Invalid ExecEnv']);
return;
}
let _container
try {
const fileBasename = path.basename(infile);
const fileDirname = path.dirname(infile);
const tarfile = `${fileDirname}/${crypto.randomBytes(10).toString('hex')}.tar`;
const infilename = fileBasename;
let stdininfilename = '';
if(stdinfile)stdininfilename = path.basename(stdinfile);
const timeLimit = Math.min(_execInstance.time, isNaN(opts.timeLimit) ? Infinity : opts.timeLimit);
const memLimit = Math.min(_execInstance.memLimit, isNaN(opts.memLimit) ? Infinity: opts.memLimit);
const patternObject = { file: infilename, input: stdininfilename }
for (let key in morefiles) {
patternObject[key] = path.basename(morefiles[key])
}
console.log(`Let's execute! ${fileBasename}`);
_container = await docker.container.create({
Image: _execInstance.image_name,
// _execInstance.exec_command can be template string, it splits with '_'
// Example of exec_command:
// "bash -c chmod_+x_a.out_;_./a.out"
// (results in ['bash', '-c', 'chmod +x a.out ; ./a.out'])
// or
// "bash -c chmod_+x_${this.file}_;_./${this.file}"
Cmd: ((template, vars) => {
return new Function('return `' + template + '`;').call(vars)
})(_execInstance.exec_command, patternObject).split(' ').map(el => el.replace(/_/g, ' ')),
Memory: memLimit,
AttachStdout: false,
AttachStderr: false,
AttachStdin: false,
tty: false,
OpenStdin: false,
//interactive: (stdinfile ? true : false),
})
await tar.c({
file: tarfile,
cwd: fileDirname
}, [fileBasename])
if (stdinfile) {
await tar.r({
file: tarfile,
cwd: fileDirname
}, [stdininfilename])
}
if(morefiles){
for( let key in morefiles){
await tar.r({
file: tarfile,
cwd: path.dirname(morefiles[key])
}, [path.basename(morefiles[key])])
}
}
let _unpromStream = await _container.fs.put(tarfile, { path: '.' })
await promisifyStreamNoSpam(_unpromStream);
await unlinkAsync(tarfile);
await _container.start();
await asyncWait(timeLimit);
await _container.kill()
.catch(_=>{});
const inspection = await _container.status();
const execTime = new Date(inspection.data.State.FinishedAt) - new Date(inspection.data.State.StartedAt);
console.log(`Calculated uptime of ${execTime}`);
if (execTime >= timeLimit) {
await _container.delete({ force: true });
reject([0,'Time Limit Exceeded']);
return;
}
console.log(`Container in state: ${inspection.data.State.Status} and health: ${inspection.data.State.Error}`);
if (inspection.data.State.Error !== '') {
await _container.delete({ force: true });
reject([0,'Runtime Error', inspection.data.State.Error]);
return;
}
if (inspection.data.State.ExitCode !== 0) {
await _container.delete({ force: true });
reject([0,'Runtime Error', inspection.data.State.ExitCode]);
return;
}
_unpromStream = await _container.logs({
follow: true,
stdout: true,
stderr: true
})
var logs = [... new Array(3)].map(()=>{
return `${fileDirname}/${crypto.randomBytes(10).toString('hex')}`;
})
var streams = [... new Array(3)].map((_, num)=>{
return fs.createWriteStream(logs[num], {flags: 'a'});
})
await new Promise((resolve, reject) => {
_unpromStream.on('data', (d) => {
switch(d.toString().charCodeAt(0)){
case 1: //stdout
streams[0].write(d.toString().substr(8, d.toString().length)); //https://docs.docker.com/engine/api/v1.40/#operation/ContainerAttach;
break;
case 2: //stderr
streams[1].write(d.toString().substr(8, d.toString().length));
break;
default: //stdin (sam)
streams[2].write(d.toString().substr(8, d.toString().length));
break;
}
})
_unpromStream.on('end', resolve)
_unpromStream.on('error', reject)
})
streams.forEach((s)=>{
s.end();
})
await _container.delete({ force: true });
resolve(logs);
return;
} catch (err) {
if (typeof _container !== 'undefined') {
_container.delete({ force: true }).catch((e) => console.error(`Failed to clean up after exec error, it is still alive! ${e}`));
}
reject([1, `Failed at execution attempt: ${err}`]);
return;
}
})
}
async function nukeContainers(quit) {
const shouldQuit = quit !== false;
const conts = await docker.container.list({ all: true });
console.log(`NUKING DOCKER!, containers=${conts.length}`);
const promises = conts.map(cont => {
const cname = cont.data.Names[0]
return cont.start()
//.then(() => cont.kill())
.then(() => cont.delete({ force: true }))
.catch(err => console.error(`There is always a catch. Nuking docker failed. Try: docker kill $(docker ps -aq) && docker rm $(docker ps -aq) , on the docker machine instead. ${err}`))
.then(() => console.log(`Nuking ${cname} done`));
})
Promise.all(promises).then(function () {
if (shouldQuit) process.exit(0);
})
}
module.exports = { gccDetect, compile, exec, execEx, nukeContainers, pingDocker } | /*console.log("Redirecting input.");
var [_stdinstream,] = await _container.attach({ stream: true, stderr: true });
var _fstrm = fs.createReadStream(stdinfile);
_fstrm.pipe(_stdinstream) //readable->writable
await promisifyStream(_fstrm);*/
await tar.r({
file: tarfile,
cwd: fileDirname
}, [stdininfilename])
}
| conditional_block |
Employees.component.ts |
import {Component, ViewChild} from "@angular/core";
import {FormGroup, FormBuilder} from "@angular/forms";
import {FTable} from "qCommon/app/directives/footable.directive";
import {Router} from "@angular/router";
import {TOAST_TYPE} from "qCommon/app/constants/Qount.constants";
import {ToastService} from "qCommon/app/services/Toast.service";
import {SwitchBoard} from "qCommon/app/services/SwitchBoard";
import {Session} from "qCommon/app/services/Session";
import {CompanyModel} from "../models/Company.model";
import {EmployeeService} from "qCommon/app/services/Employees.service";
import {EmployeesModel} from "../models/Employees.model";
import {EmployeesForm} from "../forms/Employees.form";
import {LoadingService} from "qCommon/app/services/LoadingService";
import {pageTitleService} from "qCommon/app/services/PageTitle";
import {DateFormater} from "qCommon/app/services/DateFormatter.service";
import {ReportService} from "reportsUI/app/services/Reports.service";
declare var jQuery:any;
declare var _:any;
@Component({
selector: 'employees',
templateUrl: '../views/employees.html'
})
export class EmployeesComponent {
tableData:any = {};
tableOptions:any = {};
status:any;
dateFormat:string;
serviceDateformat:string;
employeeId:any;
employees:Array<any>;
editMode:boolean = false;
@ViewChild('createVendor') createVendor;
row:any;
employeesForm: FormGroup;
@ViewChild('fooTableDir') fooTableDir:FTable;
hasEmployeesList:boolean = false;
message:string;
companyId:string;
companies:Array<CompanyModel> = [];
companyName:string;
showFlyout:boolean = false;
confirmSubscription:any;
routeSubscribe:any;
employeesTableColumns: Array<any> = ['FirstName', 'LastName', 'SSN', 'Email', 'Phone'];
pdfTableData: any = {"tableHeader": {"values": []}, "tableRows" : {"rows": []} };
showDownloadIcon:string = "hidden";
constructor(private _fb: FormBuilder, private employeeService: EmployeeService,
private _employeesForm:EmployeesForm, private _router: Router, private _toastService: ToastService,
private switchBoard: SwitchBoard, private loadingService:LoadingService,private titleService:pageTitleService,
private dateFormater:DateFormater, private reportsService: ReportService) {
this.dateFormat = dateFormater.getFormat();
this.serviceDateformat = dateFormater.getServiceDateformat();
this.titleService.setPageTitle("Employees");
this.employeesForm = this._fb.group(_employeesForm.getForm());
this.confirmSubscription = this.switchBoard.onToastConfirm.subscribe(toast => this.deleteEmployee(toast));
this.companyId = Session.getCurrentCompany();
if(this.companyId){
this.loadingService.triggerLoadingEvent(true);
this.employeeService.employees(this.companyId).subscribe(employees => {
this.buildTableData(employees);
}, error => this.handleError(error));
}else {
this._toastService.pop(TOAST_TYPE.error, "Please Add Company First");
}
this.routeSubscribe = switchBoard.onClickPrev.subscribe(title => {
if(this.showFlyout){
this.hideFlyout();
}else {
this.toolsRedirect();
}
});
}
toolsRedirect(){
let link = ['tools'];
this._router.navigate(link);
}
ngOnDestroy(){
this.routeSubscribe.unsubscribe();
this.confirmSubscription.unsubscribe();
}
buildTableData(employees) {
this.employees = employees;
this.hasEmployeesList = false;
this.tableOptions.search = true;
this.tableOptions.pageSize = 9;
this.tableData.rows = [];
this.tableData.columns = [
{"name": "id", "title": "ID","visible":false},
{"name": "first_name", "title": "FirstName"},
{"name": "last_name", "title": "LastName"},
{"name": "ssn", "title": "SSN"},
{"name": "email_id", "title": "Email"},
{"name": "phone_number", "title": "Phone"},
{"name": "actions", "title": "", "type": "html", "filterable": false}
];
let base = this;
this.employees.forEach(function(employees) {
let row:any = {};
for(let key in base.employees[0]) {
row[key] = employees[key];
row['actions'] = "<a class='action' data-action='edit' style='margin:0px 0px 0px 5px;'><i class='icon ion-edit'></i></a><a class='action' data-action='delete' style='margin:0px 0px 0px 5px;'><i class='icon ion-trash-b'></i></a>";
}
base.tableData.rows.push(row);
});
setTimeout(function(){
base.hasEmployeesList = true;
}, 0);
setTimeout(function() {
if(base.hasEmployeesList){
base.showDownloadIcon = "visible";
}
},600);
this.loadingService.triggerLoadingEvent(false);
}
showCreateEmployee() {
this.titleService.setPageTitle("CREATE EMPLOYEE");
let self = this;
this.editMode = false;
this.employeesForm = this._fb.group(this._employeesForm.getForm());
this.newForm1();
this.showFlyout = true;
}
handleAction($event){
let action = $event.action;
delete $event.action;
delete $event.actions;
if(action == 'edit') {
this.showEditEmployee($event);
} else if(action == 'delete'){
this.removeEmployee($event);
}
}
deleteEmployee(toast){
this.loadingService.triggerLoadingEvent(true);
this.employeeService.removeEmployee(this.employeeId, this.companyId)
.subscribe(success => {
//this.loadingService.triggerLoadingEvent(false);
this._toastService.pop(TOAST_TYPE.success, "Employee Deleted Successfully");
this.employeeService.employees(this.companyId)
.subscribe(customers => this.buildTableData(customers), error => this.handleError(error));
}, error => this.handleError(error));
}
removeEmployee(row:any) {
let employee:EmployeesModel = row;
this.employeeId=employee.id;
this._toastService.pop(TOAST_TYPE.confirm, "Are You Sure You Want To Delete?");
}
active1:boolean=true;
newForm1(){
this.active1 = false;
setTimeout(()=> this.active1=true, 0);
}
showEditEmployee(row:any) {
this.loadingService.triggerLoadingEvent(true);
this.titleService.setPageTitle("UPDATE EMPLOYEE");
this.editMode = true;
this.showFlyout = true;
//this.row = row;
this.employeeService.employee(row.id, this.companyId)
.subscribe(employee => {
this.row = employee;
let email_id:any = this.employeesForm.controls['email_id'];
email_id.patchValue(employee.email_id);
let phone_number:any = this.employeesForm.controls['phone_number'];
phone_number.patchValue(employee.phone_number);
var base=this;
this._employeesForm.updateForm(this.employeesForm, row);
this.loadingService.triggerLoadingEvent(false);
}, error => this.handleError(error));
}
submit($event) {
$event && $event.preventDefault();
var data = this._employeesForm.getData(this.employeesForm);
this.companyId = Session.getCurrentCompany();
data.dob = this.dateFormater.formatDate(data.dob,this.dateFormat,this.serviceDateformat);
this.loadingService.triggerLoadingEvent(true);
if(this.editMode) {
data.id=this.row.id;
this.employeeService.updateEmployee(<EmployeesModel>data, this.companyId)
.subscribe(success => {
this.loadingService.triggerLoadingEvent(false);
this.showMessage(true, success);
this.titleService.setPageTitle("Employees");
}, error => this.showMessage(false, error));
this.showFlyout = false;
} else {
this.employeeService.addEmployee(<EmployeesModel>data, this.companyId)
.subscribe(success => {
this.loadingService.triggerLoadingEvent(false);
this.showMessage(true, success);
this.titleService.setPageTitle("Employees");
}, error => this.showMessage(false, error));
this.showFlyout = false;
}
}
showMessage(status, obj) {
this.loadingService.triggerLoadingEvent(false);
if(status) {
this.status = {};
this.status['success'] = true;
this.hasEmployeesList=false;
if(this.editMode) {
this.employeeService.employees(this.companyId)
.subscribe(employees => this.buildTableData(employees), error => this.handleError(error));
this.newForm1();
this._toastService.pop(TOAST_TYPE.success, "Employee Updated Successfully.");
} else {
this.newForm1();
this.employeeService.employees(this.companyId)
.subscribe(employees => this.buildTableData(employees), error => this.handleError(error));
this._toastService.pop(TOAST_TYPE.success, "Employee Created Successfully.");
}
this.newCustomer();
} else {
this.status = {};
this.status['error'] = true;
this._toastService.pop(TOAST_TYPE.error, "Failed To Update The Employee");
this.message = obj;
}
}
setDateOfBirth(date: string){
let empDateControl:any = this.employeesForm.controls['dob'];
empDateControl.patchValue(date);
}
// Reset the form with a new hero AND restore 'pristine' class state
// by toggling 'active' flag which causes the form
// to be removed/re-added in a tick via NgIf
// TODO: Workaround until NgForm has a reset method (#6822)
active = true;
newCustomer() {
this.active = false;
setTimeout(()=> this.active=true, 0);
}
handleError(error) {
this.loadingService.triggerLoadingEvent(false);
this._toastService.pop(TOAST_TYPE.error, "Failed To Perform Operation");
}
hideFlyout() |
getEmployeesTableData(inputData) {
let tempData = _.cloneDeep(inputData);
let newTableData: Array<any> = [];
let tempJsonArray: any;
for( var i in tempData) {
tempJsonArray = {};
tempJsonArray["FirstName"] = tempData[i].first_name;
tempJsonArray["LastName"] = tempData[i].last_name;
tempJsonArray["SSN"] = tempData[i].ssn;
tempJsonArray["Email"] = tempData[i].email_id;
tempJsonArray["Phone"] = tempData[i].phone_number;
newTableData.push(tempJsonArray);
}
return newTableData;
}
buildPdfTabledata(fileType){
this.pdfTableData['documentHeader'] = "Header";
this.pdfTableData['documentFooter'] = "Footer";
this.pdfTableData['fileType'] = fileType;
this.pdfTableData['name'] = "Name";
this.pdfTableData.tableHeader.values = this.employeesTableColumns;
this.pdfTableData.tableRows.rows = this.getEmployeesTableData(this.tableData.rows);
}
exportToExcel() {
this.buildPdfTabledata("excel");
this.reportsService.exportFooTableIntoFile(this.companyId, this.pdfTableData)
.subscribe(data =>{
let blob = new Blob([data._body], {type:"application/vnd.ms-excel"});
let link = document.createElement('a');
link.href = window.URL.createObjectURL(blob);
link['download'] = "Employees.xls";
link.click();
}, error =>{
this._toastService.pop(TOAST_TYPE.error, "Failed To Export Table Into Excel");
});
// jQuery('#example-dropdown').foundation('close');
}
exportToPDF() {
this.buildPdfTabledata("pdf");
this.reportsService.exportFooTableIntoFile(this.companyId, this.pdfTableData)
.subscribe(data =>{
var blob = new Blob([data._body], {type:"application/pdf"});
var link = jQuery('<a></a>');
link[0].href = URL.createObjectURL(blob);
link[0].download = "Employees.pdf";
link[0].click();
}, error =>{
this._toastService.pop(TOAST_TYPE.error, "Failed To Export Table Into PDF");
});
}
}
| {
this.titleService.setPageTitle("Employees");
this.row = {};
this.showFlyout = !this.showFlyout;
} | identifier_body |
Employees.component.ts |
import {Component, ViewChild} from "@angular/core";
import {FormGroup, FormBuilder} from "@angular/forms";
import {FTable} from "qCommon/app/directives/footable.directive";
import {Router} from "@angular/router";
import {TOAST_TYPE} from "qCommon/app/constants/Qount.constants";
import {ToastService} from "qCommon/app/services/Toast.service";
import {SwitchBoard} from "qCommon/app/services/SwitchBoard";
import {Session} from "qCommon/app/services/Session";
import {CompanyModel} from "../models/Company.model";
import {EmployeeService} from "qCommon/app/services/Employees.service";
import {EmployeesModel} from "../models/Employees.model";
import {EmployeesForm} from "../forms/Employees.form";
import {LoadingService} from "qCommon/app/services/LoadingService";
import {pageTitleService} from "qCommon/app/services/PageTitle";
import {DateFormater} from "qCommon/app/services/DateFormatter.service";
import {ReportService} from "reportsUI/app/services/Reports.service";
declare var jQuery:any;
declare var _:any;
@Component({
selector: 'employees',
templateUrl: '../views/employees.html'
})
export class EmployeesComponent {
tableData:any = {};
tableOptions:any = {};
status:any;
dateFormat:string;
serviceDateformat:string;
employeeId:any;
employees:Array<any>;
editMode:boolean = false;
@ViewChild('createVendor') createVendor;
row:any;
employeesForm: FormGroup;
@ViewChild('fooTableDir') fooTableDir:FTable;
hasEmployeesList:boolean = false;
message:string;
companyId:string;
companies:Array<CompanyModel> = [];
companyName:string;
showFlyout:boolean = false;
confirmSubscription:any;
routeSubscribe:any;
employeesTableColumns: Array<any> = ['FirstName', 'LastName', 'SSN', 'Email', 'Phone'];
pdfTableData: any = {"tableHeader": {"values": []}, "tableRows" : {"rows": []} };
showDownloadIcon:string = "hidden";
constructor(private _fb: FormBuilder, private employeeService: EmployeeService,
private _employeesForm:EmployeesForm, private _router: Router, private _toastService: ToastService,
private switchBoard: SwitchBoard, private loadingService:LoadingService,private titleService:pageTitleService,
private dateFormater:DateFormater, private reportsService: ReportService) {
this.dateFormat = dateFormater.getFormat();
this.serviceDateformat = dateFormater.getServiceDateformat();
this.titleService.setPageTitle("Employees");
this.employeesForm = this._fb.group(_employeesForm.getForm());
this.confirmSubscription = this.switchBoard.onToastConfirm.subscribe(toast => this.deleteEmployee(toast));
this.companyId = Session.getCurrentCompany();
if(this.companyId){
this.loadingService.triggerLoadingEvent(true);
this.employeeService.employees(this.companyId).subscribe(employees => {
this.buildTableData(employees);
}, error => this.handleError(error));
}else {
this._toastService.pop(TOAST_TYPE.error, "Please Add Company First");
}
this.routeSubscribe = switchBoard.onClickPrev.subscribe(title => {
if(this.showFlyout){
this.hideFlyout();
}else {
this.toolsRedirect();
}
});
}
toolsRedirect(){
let link = ['tools'];
this._router.navigate(link);
}
ngOnDestroy(){
this.routeSubscribe.unsubscribe();
this.confirmSubscription.unsubscribe();
}
buildTableData(employees) {
this.employees = employees;
this.hasEmployeesList = false;
this.tableOptions.search = true;
this.tableOptions.pageSize = 9;
this.tableData.rows = [];
this.tableData.columns = [
{"name": "id", "title": "ID","visible":false},
{"name": "first_name", "title": "FirstName"},
{"name": "last_name", "title": "LastName"},
{"name": "ssn", "title": "SSN"},
{"name": "email_id", "title": "Email"},
{"name": "phone_number", "title": "Phone"},
{"name": "actions", "title": "", "type": "html", "filterable": false}
];
let base = this;
this.employees.forEach(function(employees) {
let row:any = {};
for(let key in base.employees[0]) {
row[key] = employees[key];
row['actions'] = "<a class='action' data-action='edit' style='margin:0px 0px 0px 5px;'><i class='icon ion-edit'></i></a><a class='action' data-action='delete' style='margin:0px 0px 0px 5px;'><i class='icon ion-trash-b'></i></a>";
}
base.tableData.rows.push(row);
});
setTimeout(function(){
base.hasEmployeesList = true;
}, 0);
setTimeout(function() {
if(base.hasEmployeesList){
base.showDownloadIcon = "visible";
}
},600);
this.loadingService.triggerLoadingEvent(false);
}
showCreateEmployee() {
this.titleService.setPageTitle("CREATE EMPLOYEE");
let self = this;
this.editMode = false;
this.employeesForm = this._fb.group(this._employeesForm.getForm());
this.newForm1();
this.showFlyout = true;
}
handleAction($event){
let action = $event.action;
delete $event.action;
delete $event.actions;
if(action == 'edit') {
this.showEditEmployee($event);
} else if(action == 'delete'){
this.removeEmployee($event);
}
}
deleteEmployee(toast){
this.loadingService.triggerLoadingEvent(true);
this.employeeService.removeEmployee(this.employeeId, this.companyId)
.subscribe(success => {
//this.loadingService.triggerLoadingEvent(false);
this._toastService.pop(TOAST_TYPE.success, "Employee Deleted Successfully");
this.employeeService.employees(this.companyId)
.subscribe(customers => this.buildTableData(customers), error => this.handleError(error));
}, error => this.handleError(error));
}
removeEmployee(row:any) {
let employee:EmployeesModel = row;
this.employeeId=employee.id;
this._toastService.pop(TOAST_TYPE.confirm, "Are You Sure You Want To Delete?");
}
active1:boolean=true;
newForm1(){
this.active1 = false;
setTimeout(()=> this.active1=true, 0);
}
showEditEmployee(row:any) {
this.loadingService.triggerLoadingEvent(true);
this.titleService.setPageTitle("UPDATE EMPLOYEE");
this.editMode = true;
this.showFlyout = true;
//this.row = row;
this.employeeService.employee(row.id, this.companyId)
.subscribe(employee => {
this.row = employee;
let email_id:any = this.employeesForm.controls['email_id'];
email_id.patchValue(employee.email_id);
let phone_number:any = this.employeesForm.controls['phone_number'];
phone_number.patchValue(employee.phone_number);
var base=this;
this._employeesForm.updateForm(this.employeesForm, row);
this.loadingService.triggerLoadingEvent(false);
}, error => this.handleError(error));
}
submit($event) {
$event && $event.preventDefault();
var data = this._employeesForm.getData(this.employeesForm);
this.companyId = Session.getCurrentCompany();
data.dob = this.dateFormater.formatDate(data.dob,this.dateFormat,this.serviceDateformat);
this.loadingService.triggerLoadingEvent(true);
if(this.editMode) {
data.id=this.row.id;
this.employeeService.updateEmployee(<EmployeesModel>data, this.companyId)
.subscribe(success => {
this.loadingService.triggerLoadingEvent(false);
this.showMessage(true, success);
this.titleService.setPageTitle("Employees");
}, error => this.showMessage(false, error));
this.showFlyout = false;
} else {
this.employeeService.addEmployee(<EmployeesModel>data, this.companyId)
.subscribe(success => {
this.loadingService.triggerLoadingEvent(false);
this.showMessage(true, success);
this.titleService.setPageTitle("Employees");
}, error => this.showMessage(false, error));
this.showFlyout = false;
}
}
showMessage(status, obj) {
this.loadingService.triggerLoadingEvent(false);
if(status) {
this.status = {};
this.status['success'] = true;
this.hasEmployeesList=false;
if(this.editMode) {
this.employeeService.employees(this.companyId)
.subscribe(employees => this.buildTableData(employees), error => this.handleError(error));
this.newForm1();
this._toastService.pop(TOAST_TYPE.success, "Employee Updated Successfully.");
} else {
this.newForm1();
this.employeeService.employees(this.companyId)
.subscribe(employees => this.buildTableData(employees), error => this.handleError(error));
this._toastService.pop(TOAST_TYPE.success, "Employee Created Successfully.");
}
this.newCustomer();
} else {
this.status = {};
this.status['error'] = true;
this._toastService.pop(TOAST_TYPE.error, "Failed To Update The Employee");
this.message = obj;
}
}
setDateOfBirth(date: string){
let empDateControl:any = this.employeesForm.controls['dob'];
empDateControl.patchValue(date);
}
// Reset the form with a new hero AND restore 'pristine' class state
// by toggling 'active' flag which causes the form
// to be removed/re-added in a tick via NgIf
// TODO: Workaround until NgForm has a reset method (#6822)
active = true;
| () {
this.active = false;
setTimeout(()=> this.active=true, 0);
}
handleError(error) {
this.loadingService.triggerLoadingEvent(false);
this._toastService.pop(TOAST_TYPE.error, "Failed To Perform Operation");
}
hideFlyout(){
this.titleService.setPageTitle("Employees");
this.row = {};
this.showFlyout = !this.showFlyout;
}
getEmployeesTableData(inputData) {
let tempData = _.cloneDeep(inputData);
let newTableData: Array<any> = [];
let tempJsonArray: any;
for( var i in tempData) {
tempJsonArray = {};
tempJsonArray["FirstName"] = tempData[i].first_name;
tempJsonArray["LastName"] = tempData[i].last_name;
tempJsonArray["SSN"] = tempData[i].ssn;
tempJsonArray["Email"] = tempData[i].email_id;
tempJsonArray["Phone"] = tempData[i].phone_number;
newTableData.push(tempJsonArray);
}
return newTableData;
}
buildPdfTabledata(fileType){
this.pdfTableData['documentHeader'] = "Header";
this.pdfTableData['documentFooter'] = "Footer";
this.pdfTableData['fileType'] = fileType;
this.pdfTableData['name'] = "Name";
this.pdfTableData.tableHeader.values = this.employeesTableColumns;
this.pdfTableData.tableRows.rows = this.getEmployeesTableData(this.tableData.rows);
}
exportToExcel() {
this.buildPdfTabledata("excel");
this.reportsService.exportFooTableIntoFile(this.companyId, this.pdfTableData)
.subscribe(data =>{
let blob = new Blob([data._body], {type:"application/vnd.ms-excel"});
let link = document.createElement('a');
link.href = window.URL.createObjectURL(blob);
link['download'] = "Employees.xls";
link.click();
}, error =>{
this._toastService.pop(TOAST_TYPE.error, "Failed To Export Table Into Excel");
});
// jQuery('#example-dropdown').foundation('close');
}
exportToPDF() {
this.buildPdfTabledata("pdf");
this.reportsService.exportFooTableIntoFile(this.companyId, this.pdfTableData)
.subscribe(data =>{
var blob = new Blob([data._body], {type:"application/pdf"});
var link = jQuery('<a></a>');
link[0].href = URL.createObjectURL(blob);
link[0].download = "Employees.pdf";
link[0].click();
}, error =>{
this._toastService.pop(TOAST_TYPE.error, "Failed To Export Table Into PDF");
});
}
}
| newCustomer | identifier_name |
Employees.component.ts |
import {Component, ViewChild} from "@angular/core";
import {FormGroup, FormBuilder} from "@angular/forms";
import {FTable} from "qCommon/app/directives/footable.directive";
import {Router} from "@angular/router";
import {TOAST_TYPE} from "qCommon/app/constants/Qount.constants";
import {ToastService} from "qCommon/app/services/Toast.service";
import {SwitchBoard} from "qCommon/app/services/SwitchBoard";
import {Session} from "qCommon/app/services/Session";
import {CompanyModel} from "../models/Company.model";
import {EmployeeService} from "qCommon/app/services/Employees.service";
import {EmployeesModel} from "../models/Employees.model";
import {EmployeesForm} from "../forms/Employees.form";
import {LoadingService} from "qCommon/app/services/LoadingService";
import {pageTitleService} from "qCommon/app/services/PageTitle";
import {DateFormater} from "qCommon/app/services/DateFormatter.service";
import {ReportService} from "reportsUI/app/services/Reports.service";
declare var jQuery:any;
declare var _:any;
@Component({
selector: 'employees',
templateUrl: '../views/employees.html'
})
export class EmployeesComponent {
tableData:any = {};
tableOptions:any = {};
status:any;
dateFormat:string;
serviceDateformat:string;
employeeId:any;
employees:Array<any>;
editMode:boolean = false;
@ViewChild('createVendor') createVendor;
row:any;
employeesForm: FormGroup;
@ViewChild('fooTableDir') fooTableDir:FTable;
hasEmployeesList:boolean = false;
message:string;
companyId:string;
companies:Array<CompanyModel> = [];
companyName:string;
showFlyout:boolean = false;
confirmSubscription:any;
routeSubscribe:any;
employeesTableColumns: Array<any> = ['FirstName', 'LastName', 'SSN', 'Email', 'Phone'];
pdfTableData: any = {"tableHeader": {"values": []}, "tableRows" : {"rows": []} };
showDownloadIcon:string = "hidden";
constructor(private _fb: FormBuilder, private employeeService: EmployeeService,
private _employeesForm:EmployeesForm, private _router: Router, private _toastService: ToastService,
private switchBoard: SwitchBoard, private loadingService:LoadingService,private titleService:pageTitleService,
private dateFormater:DateFormater, private reportsService: ReportService) {
this.dateFormat = dateFormater.getFormat();
this.serviceDateformat = dateFormater.getServiceDateformat();
this.titleService.setPageTitle("Employees");
this.employeesForm = this._fb.group(_employeesForm.getForm());
this.confirmSubscription = this.switchBoard.onToastConfirm.subscribe(toast => this.deleteEmployee(toast));
this.companyId = Session.getCurrentCompany();
if(this.companyId){
this.loadingService.triggerLoadingEvent(true);
this.employeeService.employees(this.companyId).subscribe(employees => {
this.buildTableData(employees);
}, error => this.handleError(error));
}else |
this.routeSubscribe = switchBoard.onClickPrev.subscribe(title => {
if(this.showFlyout){
this.hideFlyout();
}else {
this.toolsRedirect();
}
});
}
toolsRedirect(){
let link = ['tools'];
this._router.navigate(link);
}
ngOnDestroy(){
this.routeSubscribe.unsubscribe();
this.confirmSubscription.unsubscribe();
}
buildTableData(employees) {
this.employees = employees;
this.hasEmployeesList = false;
this.tableOptions.search = true;
this.tableOptions.pageSize = 9;
this.tableData.rows = [];
this.tableData.columns = [
{"name": "id", "title": "ID","visible":false},
{"name": "first_name", "title": "FirstName"},
{"name": "last_name", "title": "LastName"},
{"name": "ssn", "title": "SSN"},
{"name": "email_id", "title": "Email"},
{"name": "phone_number", "title": "Phone"},
{"name": "actions", "title": "", "type": "html", "filterable": false}
];
let base = this;
this.employees.forEach(function(employees) {
let row:any = {};
for(let key in base.employees[0]) {
row[key] = employees[key];
row['actions'] = "<a class='action' data-action='edit' style='margin:0px 0px 0px 5px;'><i class='icon ion-edit'></i></a><a class='action' data-action='delete' style='margin:0px 0px 0px 5px;'><i class='icon ion-trash-b'></i></a>";
}
base.tableData.rows.push(row);
});
setTimeout(function(){
base.hasEmployeesList = true;
}, 0);
setTimeout(function() {
if(base.hasEmployeesList){
base.showDownloadIcon = "visible";
}
},600);
this.loadingService.triggerLoadingEvent(false);
}
showCreateEmployee() {
this.titleService.setPageTitle("CREATE EMPLOYEE");
let self = this;
this.editMode = false;
this.employeesForm = this._fb.group(this._employeesForm.getForm());
this.newForm1();
this.showFlyout = true;
}
handleAction($event){
let action = $event.action;
delete $event.action;
delete $event.actions;
if(action == 'edit') {
this.showEditEmployee($event);
} else if(action == 'delete'){
this.removeEmployee($event);
}
}
deleteEmployee(toast){
this.loadingService.triggerLoadingEvent(true);
this.employeeService.removeEmployee(this.employeeId, this.companyId)
.subscribe(success => {
//this.loadingService.triggerLoadingEvent(false);
this._toastService.pop(TOAST_TYPE.success, "Employee Deleted Successfully");
this.employeeService.employees(this.companyId)
.subscribe(customers => this.buildTableData(customers), error => this.handleError(error));
}, error => this.handleError(error));
}
removeEmployee(row:any) {
let employee:EmployeesModel = row;
this.employeeId=employee.id;
this._toastService.pop(TOAST_TYPE.confirm, "Are You Sure You Want To Delete?");
}
active1:boolean=true;
newForm1(){
this.active1 = false;
setTimeout(()=> this.active1=true, 0);
}
showEditEmployee(row:any) {
this.loadingService.triggerLoadingEvent(true);
this.titleService.setPageTitle("UPDATE EMPLOYEE");
this.editMode = true;
this.showFlyout = true;
//this.row = row;
this.employeeService.employee(row.id, this.companyId)
.subscribe(employee => {
this.row = employee;
let email_id:any = this.employeesForm.controls['email_id'];
email_id.patchValue(employee.email_id);
let phone_number:any = this.employeesForm.controls['phone_number'];
phone_number.patchValue(employee.phone_number);
var base=this;
this._employeesForm.updateForm(this.employeesForm, row);
this.loadingService.triggerLoadingEvent(false);
}, error => this.handleError(error));
}
submit($event) {
$event && $event.preventDefault();
var data = this._employeesForm.getData(this.employeesForm);
this.companyId = Session.getCurrentCompany();
data.dob = this.dateFormater.formatDate(data.dob,this.dateFormat,this.serviceDateformat);
this.loadingService.triggerLoadingEvent(true);
if(this.editMode) {
data.id=this.row.id;
this.employeeService.updateEmployee(<EmployeesModel>data, this.companyId)
.subscribe(success => {
this.loadingService.triggerLoadingEvent(false);
this.showMessage(true, success);
this.titleService.setPageTitle("Employees");
}, error => this.showMessage(false, error));
this.showFlyout = false;
} else {
this.employeeService.addEmployee(<EmployeesModel>data, this.companyId)
.subscribe(success => {
this.loadingService.triggerLoadingEvent(false);
this.showMessage(true, success);
this.titleService.setPageTitle("Employees");
}, error => this.showMessage(false, error));
this.showFlyout = false;
}
}
showMessage(status, obj) {
this.loadingService.triggerLoadingEvent(false);
if(status) {
this.status = {};
this.status['success'] = true;
this.hasEmployeesList=false;
if(this.editMode) {
this.employeeService.employees(this.companyId)
.subscribe(employees => this.buildTableData(employees), error => this.handleError(error));
this.newForm1();
this._toastService.pop(TOAST_TYPE.success, "Employee Updated Successfully.");
} else {
this.newForm1();
this.employeeService.employees(this.companyId)
.subscribe(employees => this.buildTableData(employees), error => this.handleError(error));
this._toastService.pop(TOAST_TYPE.success, "Employee Created Successfully.");
}
this.newCustomer();
} else {
this.status = {};
this.status['error'] = true;
this._toastService.pop(TOAST_TYPE.error, "Failed To Update The Employee");
this.message = obj;
}
}
setDateOfBirth(date: string){
let empDateControl:any = this.employeesForm.controls['dob'];
empDateControl.patchValue(date);
}
// Reset the form with a new hero AND restore 'pristine' class state
// by toggling 'active' flag which causes the form
// to be removed/re-added in a tick via NgIf
// TODO: Workaround until NgForm has a reset method (#6822)
active = true;
newCustomer() {
this.active = false;
setTimeout(()=> this.active=true, 0);
}
handleError(error) {
this.loadingService.triggerLoadingEvent(false);
this._toastService.pop(TOAST_TYPE.error, "Failed To Perform Operation");
}
hideFlyout(){
this.titleService.setPageTitle("Employees");
this.row = {};
this.showFlyout = !this.showFlyout;
}
getEmployeesTableData(inputData) {
let tempData = _.cloneDeep(inputData);
let newTableData: Array<any> = [];
let tempJsonArray: any;
for( var i in tempData) {
tempJsonArray = {};
tempJsonArray["FirstName"] = tempData[i].first_name;
tempJsonArray["LastName"] = tempData[i].last_name;
tempJsonArray["SSN"] = tempData[i].ssn;
tempJsonArray["Email"] = tempData[i].email_id;
tempJsonArray["Phone"] = tempData[i].phone_number;
newTableData.push(tempJsonArray);
}
return newTableData;
}
buildPdfTabledata(fileType){
this.pdfTableData['documentHeader'] = "Header";
this.pdfTableData['documentFooter'] = "Footer";
this.pdfTableData['fileType'] = fileType;
this.pdfTableData['name'] = "Name";
this.pdfTableData.tableHeader.values = this.employeesTableColumns;
this.pdfTableData.tableRows.rows = this.getEmployeesTableData(this.tableData.rows);
}
exportToExcel() {
this.buildPdfTabledata("excel");
this.reportsService.exportFooTableIntoFile(this.companyId, this.pdfTableData)
.subscribe(data =>{
let blob = new Blob([data._body], {type:"application/vnd.ms-excel"});
let link = document.createElement('a');
link.href = window.URL.createObjectURL(blob);
link['download'] = "Employees.xls";
link.click();
}, error =>{
this._toastService.pop(TOAST_TYPE.error, "Failed To Export Table Into Excel");
});
// jQuery('#example-dropdown').foundation('close');
}
exportToPDF() {
this.buildPdfTabledata("pdf");
this.reportsService.exportFooTableIntoFile(this.companyId, this.pdfTableData)
.subscribe(data =>{
var blob = new Blob([data._body], {type:"application/pdf"});
var link = jQuery('<a></a>');
link[0].href = URL.createObjectURL(blob);
link[0].download = "Employees.pdf";
link[0].click();
}, error =>{
this._toastService.pop(TOAST_TYPE.error, "Failed To Export Table Into PDF");
});
}
}
| {
this._toastService.pop(TOAST_TYPE.error, "Please Add Company First");
} | conditional_block |
Employees.component.ts | import {Component, ViewChild} from "@angular/core";
import {FormGroup, FormBuilder} from "@angular/forms";
import {FTable} from "qCommon/app/directives/footable.directive";
import {Router} from "@angular/router";
import {TOAST_TYPE} from "qCommon/app/constants/Qount.constants";
import {ToastService} from "qCommon/app/services/Toast.service";
import {SwitchBoard} from "qCommon/app/services/SwitchBoard";
import {Session} from "qCommon/app/services/Session";
import {CompanyModel} from "../models/Company.model";
import {EmployeeService} from "qCommon/app/services/Employees.service";
import {EmployeesModel} from "../models/Employees.model";
import {EmployeesForm} from "../forms/Employees.form";
import {LoadingService} from "qCommon/app/services/LoadingService";
import {pageTitleService} from "qCommon/app/services/PageTitle";
import {DateFormater} from "qCommon/app/services/DateFormatter.service";
import {ReportService} from "reportsUI/app/services/Reports.service";
declare var jQuery:any;
declare var _:any;
@Component({
selector: 'employees',
templateUrl: '../views/employees.html'
})
export class EmployeesComponent {
tableData:any = {};
tableOptions:any = {};
status:any;
dateFormat:string;
serviceDateformat:string;
employeeId:any;
employees:Array<any>;
editMode:boolean = false;
@ViewChild('createVendor') createVendor;
row:any;
employeesForm: FormGroup;
@ViewChild('fooTableDir') fooTableDir:FTable;
hasEmployeesList:boolean = false;
message:string;
companyId:string;
companies:Array<CompanyModel> = [];
companyName:string;
showFlyout:boolean = false;
confirmSubscription:any;
routeSubscribe:any;
employeesTableColumns: Array<any> = ['FirstName', 'LastName', 'SSN', 'Email', 'Phone'];
pdfTableData: any = {"tableHeader": {"values": []}, "tableRows" : {"rows": []} };
showDownloadIcon:string = "hidden";
constructor(private _fb: FormBuilder, private employeeService: EmployeeService,
private _employeesForm:EmployeesForm, private _router: Router, private _toastService: ToastService,
private switchBoard: SwitchBoard, private loadingService:LoadingService,private titleService:pageTitleService,
private dateFormater:DateFormater, private reportsService: ReportService) {
this.dateFormat = dateFormater.getFormat();
this.serviceDateformat = dateFormater.getServiceDateformat();
this.titleService.setPageTitle("Employees");
this.employeesForm = this._fb.group(_employeesForm.getForm());
this.confirmSubscription = this.switchBoard.onToastConfirm.subscribe(toast => this.deleteEmployee(toast));
this.companyId = Session.getCurrentCompany();
if(this.companyId){
this.loadingService.triggerLoadingEvent(true);
this.employeeService.employees(this.companyId).subscribe(employees => {
this.buildTableData(employees);
}, error => this.handleError(error));
}else {
this._toastService.pop(TOAST_TYPE.error, "Please Add Company First");
}
this.routeSubscribe = switchBoard.onClickPrev.subscribe(title => {
if(this.showFlyout){
this.hideFlyout();
}else {
this.toolsRedirect();
} | let link = ['tools'];
this._router.navigate(link);
}
ngOnDestroy(){
this.routeSubscribe.unsubscribe();
this.confirmSubscription.unsubscribe();
}
buildTableData(employees) {
this.employees = employees;
this.hasEmployeesList = false;
this.tableOptions.search = true;
this.tableOptions.pageSize = 9;
this.tableData.rows = [];
this.tableData.columns = [
{"name": "id", "title": "ID","visible":false},
{"name": "first_name", "title": "FirstName"},
{"name": "last_name", "title": "LastName"},
{"name": "ssn", "title": "SSN"},
{"name": "email_id", "title": "Email"},
{"name": "phone_number", "title": "Phone"},
{"name": "actions", "title": "", "type": "html", "filterable": false}
];
let base = this;
this.employees.forEach(function(employees) {
let row:any = {};
for(let key in base.employees[0]) {
row[key] = employees[key];
row['actions'] = "<a class='action' data-action='edit' style='margin:0px 0px 0px 5px;'><i class='icon ion-edit'></i></a><a class='action' data-action='delete' style='margin:0px 0px 0px 5px;'><i class='icon ion-trash-b'></i></a>";
}
base.tableData.rows.push(row);
});
setTimeout(function(){
base.hasEmployeesList = true;
}, 0);
setTimeout(function() {
if(base.hasEmployeesList){
base.showDownloadIcon = "visible";
}
},600);
this.loadingService.triggerLoadingEvent(false);
}
showCreateEmployee() {
this.titleService.setPageTitle("CREATE EMPLOYEE");
let self = this;
this.editMode = false;
this.employeesForm = this._fb.group(this._employeesForm.getForm());
this.newForm1();
this.showFlyout = true;
}
handleAction($event){
let action = $event.action;
delete $event.action;
delete $event.actions;
if(action == 'edit') {
this.showEditEmployee($event);
} else if(action == 'delete'){
this.removeEmployee($event);
}
}
deleteEmployee(toast){
this.loadingService.triggerLoadingEvent(true);
this.employeeService.removeEmployee(this.employeeId, this.companyId)
.subscribe(success => {
//this.loadingService.triggerLoadingEvent(false);
this._toastService.pop(TOAST_TYPE.success, "Employee Deleted Successfully");
this.employeeService.employees(this.companyId)
.subscribe(customers => this.buildTableData(customers), error => this.handleError(error));
}, error => this.handleError(error));
}
removeEmployee(row:any) {
let employee:EmployeesModel = row;
this.employeeId=employee.id;
this._toastService.pop(TOAST_TYPE.confirm, "Are You Sure You Want To Delete?");
}
active1:boolean=true;
newForm1(){
this.active1 = false;
setTimeout(()=> this.active1=true, 0);
}
showEditEmployee(row:any) {
this.loadingService.triggerLoadingEvent(true);
this.titleService.setPageTitle("UPDATE EMPLOYEE");
this.editMode = true;
this.showFlyout = true;
//this.row = row;
this.employeeService.employee(row.id, this.companyId)
.subscribe(employee => {
this.row = employee;
let email_id:any = this.employeesForm.controls['email_id'];
email_id.patchValue(employee.email_id);
let phone_number:any = this.employeesForm.controls['phone_number'];
phone_number.patchValue(employee.phone_number);
var base=this;
this._employeesForm.updateForm(this.employeesForm, row);
this.loadingService.triggerLoadingEvent(false);
}, error => this.handleError(error));
}
submit($event) {
$event && $event.preventDefault();
var data = this._employeesForm.getData(this.employeesForm);
this.companyId = Session.getCurrentCompany();
data.dob = this.dateFormater.formatDate(data.dob,this.dateFormat,this.serviceDateformat);
this.loadingService.triggerLoadingEvent(true);
if(this.editMode) {
data.id=this.row.id;
this.employeeService.updateEmployee(<EmployeesModel>data, this.companyId)
.subscribe(success => {
this.loadingService.triggerLoadingEvent(false);
this.showMessage(true, success);
this.titleService.setPageTitle("Employees");
}, error => this.showMessage(false, error));
this.showFlyout = false;
} else {
this.employeeService.addEmployee(<EmployeesModel>data, this.companyId)
.subscribe(success => {
this.loadingService.triggerLoadingEvent(false);
this.showMessage(true, success);
this.titleService.setPageTitle("Employees");
}, error => this.showMessage(false, error));
this.showFlyout = false;
}
}
showMessage(status, obj) {
this.loadingService.triggerLoadingEvent(false);
if(status) {
this.status = {};
this.status['success'] = true;
this.hasEmployeesList=false;
if(this.editMode) {
this.employeeService.employees(this.companyId)
.subscribe(employees => this.buildTableData(employees), error => this.handleError(error));
this.newForm1();
this._toastService.pop(TOAST_TYPE.success, "Employee Updated Successfully.");
} else {
this.newForm1();
this.employeeService.employees(this.companyId)
.subscribe(employees => this.buildTableData(employees), error => this.handleError(error));
this._toastService.pop(TOAST_TYPE.success, "Employee Created Successfully.");
}
this.newCustomer();
} else {
this.status = {};
this.status['error'] = true;
this._toastService.pop(TOAST_TYPE.error, "Failed To Update The Employee");
this.message = obj;
}
}
setDateOfBirth(date: string){
let empDateControl:any = this.employeesForm.controls['dob'];
empDateControl.patchValue(date);
}
// Reset the form with a new hero AND restore 'pristine' class state
// by toggling 'active' flag which causes the form
// to be removed/re-added in a tick via NgIf
// TODO: Workaround until NgForm has a reset method (#6822)
active = true;
newCustomer() {
this.active = false;
setTimeout(()=> this.active=true, 0);
}
handleError(error) {
this.loadingService.triggerLoadingEvent(false);
this._toastService.pop(TOAST_TYPE.error, "Failed To Perform Operation");
}
hideFlyout(){
this.titleService.setPageTitle("Employees");
this.row = {};
this.showFlyout = !this.showFlyout;
}
getEmployeesTableData(inputData) {
let tempData = _.cloneDeep(inputData);
let newTableData: Array<any> = [];
let tempJsonArray: any;
for( var i in tempData) {
tempJsonArray = {};
tempJsonArray["FirstName"] = tempData[i].first_name;
tempJsonArray["LastName"] = tempData[i].last_name;
tempJsonArray["SSN"] = tempData[i].ssn;
tempJsonArray["Email"] = tempData[i].email_id;
tempJsonArray["Phone"] = tempData[i].phone_number;
newTableData.push(tempJsonArray);
}
return newTableData;
}
buildPdfTabledata(fileType){
this.pdfTableData['documentHeader'] = "Header";
this.pdfTableData['documentFooter'] = "Footer";
this.pdfTableData['fileType'] = fileType;
this.pdfTableData['name'] = "Name";
this.pdfTableData.tableHeader.values = this.employeesTableColumns;
this.pdfTableData.tableRows.rows = this.getEmployeesTableData(this.tableData.rows);
}
exportToExcel() {
this.buildPdfTabledata("excel");
this.reportsService.exportFooTableIntoFile(this.companyId, this.pdfTableData)
.subscribe(data =>{
let blob = new Blob([data._body], {type:"application/vnd.ms-excel"});
let link = document.createElement('a');
link.href = window.URL.createObjectURL(blob);
link['download'] = "Employees.xls";
link.click();
}, error =>{
this._toastService.pop(TOAST_TYPE.error, "Failed To Export Table Into Excel");
});
// jQuery('#example-dropdown').foundation('close');
}
exportToPDF() {
this.buildPdfTabledata("pdf");
this.reportsService.exportFooTableIntoFile(this.companyId, this.pdfTableData)
.subscribe(data =>{
var blob = new Blob([data._body], {type:"application/pdf"});
var link = jQuery('<a></a>');
link[0].href = URL.createObjectURL(blob);
link[0].download = "Employees.pdf";
link[0].click();
}, error =>{
this._toastService.pop(TOAST_TYPE.error, "Failed To Export Table Into PDF");
});
}
} | });
}
toolsRedirect(){ | random_line_split |
gestalt.go | // Copyright 2012-2015 Joubin Houshyar. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Gestalt provides basic property file utility.
//
// Property keys are typed.
//
// The key suffixes `[]` and `[:]` specify []string and map[string]string, respectively, but
// otherwise can be used as prefix or embedded in key or value without reservation.
//
// The `#` char is reserved for comments and can not be used in keys or values.
// The `\` char is reserved for line continuation and can not be used in comments, keys, or values.
// The `:` char is reserved for map k:v tuples and can not be used in map keys, or values.
//
// Syntax supports:
//
// • Embedded white space {' ', '\t'} in keys and values. Leading and trailing whitespace is ignored.
//
// • Typed properties: (Go) string, []string, and map[string]string properties
//
// • Value definitions can span multiple lines
//
// • Single line & trailing comments
//
// Example demonstrating format:
//
// # a comment line
// # note that blank lines are ignored
//
// # ------------------------------------------
// # examples of string properties - single line
//
// the property key = property value # key"the property key", value:"property value"
// the property key=property value # same as above
// a.property@the.key.called!foo = joe@schmoe.com # only embedded hashsign and/or forward-slashes are disallowed
//
// # example of string properties - multi-line
// # layout is significant only for multi-line string properties
//
// this is a multi-line property = value that spans multiple lines. \
// Note that value line continuations \
// include whitespace leading each new line. # e.g. this line appends " include whitespace ..."
//
// # ------------------------------------------
// # examples of []string properties - single line
// # NOTE that the key includes the trailing `[]`
//
// this.is.a.string.array.key[] = alpha , omega # => []string{"alpha", "omega"}
// so.is.this.[] = alpha, omega # only the suffix [] is significant of []string property type
//
// # array values can have embedded white space as well
// # basically, any leading/trailing whitespace around `,` is trimmed
// # for example
//
// another.array[] = hi there , bon voyage # => []string{"hi there", "bon voyage"}
//
// # array values can also be quoted if trailing and/or leading whitespace is required
// # for example
//
// yet.another[] = " lead", or, "trail " # => []string{" lead", "or", "trail "}
//
// # example of []string property - multiline
// # note that layout is insignificant
//
// web.resource.type.extensions[] = js, \
// css , \
// gif \
// , jpeg, \
// png # => []string{"js", "css", "gif", "jpeg", "png"}
//
// # ------------------------------------------
// # examples of map[string]string properties - single line
// # map key must end in `[:]`.
// # value must be of form <map-key>:<map-value>
// # map values must be seperated by `,`
//
// this.is.a.map[:] = a:b, b:c
//
// # key set is {"*", "list", "login"}
// dispatch.table[:] = *:/ , list : /do/list, login: /do/user/login
//
// # same thing spanning multiple lines:
// # note that layout is insignificant
//
// dispatch.tablex[:] = *:/ , \
// list:/do/list, \ # note the `,`
// login:/do/user/login
//
// The associated Properties (type) defines the properties API, but is itself simply a
// a map[string]interface{} and can be used as such (without any type safety).
//
//
//
package gestalt
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"strings"
"unicode/utf8"
)
// ----------------------------------------------------------------------
// Property file Constants
// ----------------------------------------------------------------------
// REVU - too many flavors of whitespace
const (
empty = ""
val_delim = ","
kv_delim = ":"
quote = "\""
pkv_sep = "="
trimset = "\n\r \t"
ws = " \t"
array = "[]"
array_len = len(array)
cmap = "[:]"
cmap_len = len(cmap)
min_entry_len = len("a=b")
continuation = '\\'
comment = '#'
)
// Properties is based on map and can be accessed as such
// but best to use the API
// REVU: should be private.
type Properties map[string]interface{}
// ----------------------------------------------------------------------
// API
// ----------------------------------------------------------------------
// Instantiates a new Properties object initialized from the
// content of the specified file.
func Load(filename string) (p Properties, e error) {
if filename == "" {
e = fmt.Errorf("filename is nil")
return
}
b, err := ioutil.ReadFile(filename)
if err != nil {
e = fmt.Errorf("Error reading gestalt file <%s> : %s", filename, e)
return
}
return loadBuffer(bytes.NewBuffer(b).String())
}
// Support embedded properties (e.g. without files)
func LoadStr(spec string) (p Properties, e error) {
return loadBuffer(spec)
}
// Return a clone of the argument Properties object
func (p Properties) Clone() (clone Properties) {
for k, v := range p {
clone[k] = v
}
return
}
// Copy all entries from specified Properties to the receiver
// Note this will overwrite existing matching values if overwrite is true,
// otherwise if overwrite is false it will only append keys that do not exist
// in receiver
func (p Properties) Copy(from Properties, overwrite bool) {
// TODO - REVU - either silently Debug log or return error on nil 'from'
for k, v := range from {
if p[k] == nil || overwrite {
p[k] = v
}
}
}
// Inherits from the parent key/value pairs if receiver[key] is nil.
// If key is array, receiver's value array will be PRE-pended with parent's.
// If key is map, receiver's value map will be augmented with parent's.
// nil input is silently ignored.
// REVU - issue regarding preserving order in parent array key values
func (p Properties) Inherit(from Properties) {
if from == nil {
return
}
for k, v := range from {
pv := p[k]
if pv == nil {
p[k] = v
} else {
switch {
case isArrayKey(k):
// REVU - somewhat funky semantics here
// attempting to preserve order of array values (in child)
// but parent's order is chomped
temp := make(map[string]string)
for _, av := range pv.([]string) {
temp[av] = av
}
narrv := []string{}
for _, av := range v.([]string) {
if temp[av] == "" {
narrv = append(narrv, av)
}
}
p[k] = append(narrv, pv.([]string)...)
case isMapKey(k):
mapv := v.(map[string]string)
pmapv := pv.(map[string]string)
for mk, mv := range mapv {
if pmapv[mk] == "" {
pmapv[mk] = mv
}
}
}
}
}
}
// Verifies that the receiver has values for the set of keys.
// Returns true, 0 len array if verified, or, if keys arg is nil.
// Returns false, and array of missing values otherwise.
func (p Properties) VerifyMust(keys ...string) (bool, []string) {
missing := []string{}
if keys != nil {
for _, rk := range keys {
if p[rk] == nil {
missing = append(missing, rk)
}
}
}
return len(missing) == 0, missing
}
// REVU - this doesn't make much sense - remove.
//// returns generic (interface{} prop value or default values if nil
//func (p Properties) GetOrDefault(key string, defval interface{}) (v interface{}) {
// if v = p[key]; v == nil {
// v = defval
// }
// return
//}
// returns nil/zero-value if no such key or key type is not array
func (p Properties) GetArray(key string) []string {
if isArrayKey(key) {
if v := p[key]; v == nil {
return nil
}
return p[key].([]string)
}
return nil
}
// returns prop value or default values if nil
func (p Properties) GetArrayOrDefault(key string, defval []string) (v []string) {
if v | urns nil/zero-value if no such key or not a map, or if key type is not map
func (p Properties) GetMap(key string) map[string]string {
if isMapKey(key) {
if v := p[key]; v == nil {
return nil
}
return p[key].(map[string]string)
}
return nil
}
// returns prop value or default values if nil
func (p Properties) GetMapOrDefault(key string, defval map[string]string) (v map[string]string) {
if v = p.GetMap(key); v == nil {
v = defval
}
return
}
// String value property - returns nil/zero-value if no such key or not a map
func (p Properties) GetString(key string) string {
if !(isMapKey(key) || isArrayKey(key)) {
if v := p[key]; v == nil {
return ""
}
return p[key].(string)
}
return ""
}
func (p Properties) GetStringOrDefault(key string, defval string) (v string) {
if v = p.GetString(key); v == "" {
v = defval
}
return
}
func (p Properties) MustGetString(key string) (v string) {
return p.GetString(key)
}
// Returns true if provided key is a valid array property value key,
// suitable for use with GetMap(mapkey)
func isMapKey(key string) bool {
return strings.HasSuffix(key, cmap)
}
// Returns true if provided key is a valid map property value key
// suitable for use with GetArray(arrkey)
func isArrayKey(key string) bool {
return !isMapKey(key) && strings.HasSuffix(key, array)
}
// Returns a pretty print string for Properties.
// See also Properties#Print
func (p Properties) String() string {
srep := "-- properties --\n"
for k, v := range p {
srep += fmt.Sprintf("'%s' => '%s'", k, v)
srep += "\n"
}
srep += "----------------\n"
return srep
}
// Pretty print dumps the Properties content to stdout
func (p Properties) Print() {
fmt.Print(p)
}
// ----------------------------------------------------------------------
// internal ops
// REVU: this simplistic approach to parsing places too many constraints:
// 1 - continuations for maps/arrays are redundant given the ',' element delims
// 2 - can't use ':' or '#' in k/v - these are fairly useful/common glyphs
// 3 - psuedo quoting and not true quoting
// TODO: try lexing this thing ..
// ----------------------------------------------------------------------
func loadBuffer(s string) (p Properties, e error) {
if s == empty {
e = errors.New("s is nil")
return
}
specs := splitCleanPropSpecs(s)
p = make(Properties)
for _, spec := range specs {
k, v, err := parseProperty(spec)
if err != nil {
e = fmt.Errorf("error parsing properties- %s", err)
return
}
if k != empty {
p[k] = v
}
}
return
}
// converts to []string of lines. this is mainly addressing
// comments (both flavors) & continuations (multi-line values)
// beyond a general split on crlf
func splitCleanPropSpecs(s string) (pspecs []string) {
// trim overall buffer
s = strings.Trim(s, trimset)
erase := false
cont := false
reset := false
b := make([]byte, len(s))
off := 0
s = strings.Trim(s, trimset)
for _, c := range s {
if c == rune(continuation) {
erase = true
cont = true
} else if c == comment {
erase = true
} else if c == '\n' {
if cont {
cont = false
reset = true
} else {
erase = false
}
} else if reset {
erase = false
reset = false
}
if !erase {
off += utf8.EncodeRune(b[off:], c)
}
}
s = string(b[0:off])
// split to get distinct specs.
pspecs = strings.Split(s, "\n")
return
}
// attempts to parse a single <key> = <value> property def spec.
// Returns ("", "") if comment or malformed.
// Otherwise (key, value) pair are returned.
// REVU TODO support true quotes to allow use of ':', '\', and '#' in k/v
func parseProperty(spec string) (key string, value interface{}, e error) {
if len(spec) < min_entry_len {
return empty, value, e
}
propTuple := strings.Split(strings.Trim(spec, trimset), pkv_sep)
// Verify well-formedness
if len(propTuple) != 2 || propTuple[1] == empty {
e = errors.New(fmt.Sprintf("property spec '%s' is malformed", spec))
return
}
key = strings.Trim(propTuple[0], ws)
vrep := strings.Trim(propTuple[1], ws)
// do NOT change order of parse - maps first
if isMapKey(key) {
kvmap := make(map[string]string)
kvpairs := strings.Split(vrep, val_delim)
for _, _kv := range kvpairs {
_kv = strings.Trim(_kv, ws)
_kvarr := strings.Split(_kv, kv_delim)
ek := strings.Trim(_kvarr[0], ws)
ev := strings.Trim(_kvarr[1], ws)
kvmap[strings.Trim(ek, quote)] = strings.Trim(ev, quote)
}
value = kvmap
} else if isArrayKey(key) {
arrv := strings.Split(vrep, val_delim)
for i, v := range arrv {
v = strings.Trim(v, ws)
arrv[i] = strings.Trim(v, quote)
}
value = arrv
} else {
value = strings.Trim(propTuple[1], ws)
value = strings.Trim(vrep, quote)
}
return
}
| = p.GetArray(key); v == nil {
v = defval
}
return
}
// ret | identifier_body |
gestalt.go | // Copyright 2012-2015 Joubin Houshyar. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Gestalt provides basic property file utility.
//
// Property keys are typed.
//
// The key suffixes `[]` and `[:]` specify []string and map[string]string, respectively, but
// otherwise can be used as prefix or embedded in key or value without reservation.
//
// The `#` char is reserved for comments and can not be used in keys or values.
// The `\` char is reserved for line continuation and can not be used in comments, keys, or values.
// The `:` char is reserved for map k:v tuples and can not be used in map keys, or values.
//
// Syntax supports:
//
// • Embedded white space {' ', '\t'} in keys and values. Leading and trailing whitespace is ignored.
//
// • Typed properties: (Go) string, []string, and map[string]string properties
//
// • Value definitions can span multiple lines
//
// • Single line & trailing comments
//
// Example demonstrating format:
//
// # a comment line
// # note that blank lines are ignored
//
// # ------------------------------------------
// # examples of string properties - single line
//
// the property key = property value # key"the property key", value:"property value"
// the property key=property value # same as above
// a.property@the.key.called!foo = joe@schmoe.com # only embedded hashsign and/or forward-slashes are disallowed
//
// # example of string properties - multi-line
// # layout is significant only for multi-line string properties
//
// this is a multi-line property = value that spans multiple lines. \
// Note that value line continuations \
// include whitespace leading each new line. # e.g. this line appends " include whitespace ..."
//
// # ------------------------------------------
// # examples of []string properties - single line
// # NOTE that the key includes the trailing `[]`
//
// this.is.a.string.array.key[] = alpha , omega # => []string{"alpha", "omega"}
// so.is.this.[] = alpha, omega # only the suffix [] is significant of []string property type
//
// # array values can have embedded white space as well
// # basically, any leading/trailing whitespace around `,` is trimmed
// # for example
//
// another.array[] = hi there , bon voyage # => []string{"hi there", "bon voyage"}
//
// # array values can also be quoted if trailing and/or leading whitespace is required
// # for example
//
// yet.another[] = " lead", or, "trail " # => []string{" lead", "or", "trail "}
//
// # example of []string property - multiline
// # note that layout is insignificant
//
// web.resource.type.extensions[] = js, \
// css , \
// gif \
// , jpeg, \
// png # => []string{"js", "css", "gif", "jpeg", "png"}
//
// # ------------------------------------------
// # examples of map[string]string properties - single line
// # map key must end in `[:]`.
// # value must be of form <map-key>:<map-value>
// # map values must be seperated by `,`
//
// this.is.a.map[:] = a:b, b:c
//
// # key set is {"*", "list", "login"}
// dispatch.table[:] = *:/ , list : /do/list, login: /do/user/login
//
// # same thing spanning multiple lines:
// # note that layout is insignificant
//
// dispatch.tablex[:] = *:/ , \
// list:/do/list, \ # note the `,`
// login:/do/user/login
//
// The associated Properties (type) defines the properties API, but is itself simply a
// a map[string]interface{} and can be used as such (without any type safety).
//
//
//
package gestalt
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"strings"
"unicode/utf8"
)
// ----------------------------------------------------------------------
// Property file Constants
// ----------------------------------------------------------------------
// REVU - too many flavors of whitespace
const (
empty = ""
val_delim = ","
kv_delim = ":"
quote = "\""
pkv_sep = "="
trimset = "\n\r \t"
ws = " \t"
array = "[]"
array_len = len(array)
cmap = "[:]"
cmap_len = len(cmap)
min_entry_len = len("a=b")
continuation = '\\'
comment = '#'
)
// Properties is based on map and can be accessed as such
// but best to use the API
// REVU: should be private.
type Properties map[string]interface{}
// ----------------------------------------------------------------------
// API
// ----------------------------------------------------------------------
// Instantiates a new Properties object initialized from the
// content of the specified file.
func Load(filename string) (p Properties, e error) {
if filename == "" {
e = fmt.Errorf("filename is nil")
return
}
b, err := ioutil.ReadFile(filename)
if err != nil {
e = fmt.Errorf("Error reading gestalt file <%s> : %s", filename, e)
return
}
return loadBuffer(bytes.NewBuffer(b).String())
}
// Support embedded properties (e.g. without files)
func LoadStr(spec string) (p Properties, e error) { | }
// Return a clone of the argument Properties object
func (p Properties) Clone() (clone Properties) {
for k, v := range p {
clone[k] = v
}
return
}
// Copy all entries from specified Properties to the receiver
// Note this will overwrite existing matching values if overwrite is true,
// otherwise if overwrite is false it will only append keys that do not exist
// in receiver
func (p Properties) Copy(from Properties, overwrite bool) {
// TODO - REVU - either silently Debug log or return error on nil 'from'
for k, v := range from {
if p[k] == nil || overwrite {
p[k] = v
}
}
}
// Inherits from the parent key/value pairs if receiver[key] is nil.
// If key is array, receiver's value array will be PRE-pended with parent's.
// If key is map, receiver's value map will be augmented with parent's.
// nil input is silently ignored.
// REVU - issue regarding preserving order in parent array key values
func (p Properties) Inherit(from Properties) {
if from == nil {
return
}
for k, v := range from {
pv := p[k]
if pv == nil {
p[k] = v
} else {
switch {
case isArrayKey(k):
// REVU - somewhat funky semantics here
// attempting to preserve order of array values (in child)
// but parent's order is chomped
temp := make(map[string]string)
for _, av := range pv.([]string) {
temp[av] = av
}
narrv := []string{}
for _, av := range v.([]string) {
if temp[av] == "" {
narrv = append(narrv, av)
}
}
p[k] = append(narrv, pv.([]string)...)
case isMapKey(k):
mapv := v.(map[string]string)
pmapv := pv.(map[string]string)
for mk, mv := range mapv {
if pmapv[mk] == "" {
pmapv[mk] = mv
}
}
}
}
}
}
// Verifies that the receiver has values for the set of keys.
// Returns true, 0 len array if verified, or, if keys arg is nil.
// Returns false, and array of missing values otherwise.
func (p Properties) VerifyMust(keys ...string) (bool, []string) {
missing := []string{}
if keys != nil {
for _, rk := range keys {
if p[rk] == nil {
missing = append(missing, rk)
}
}
}
return len(missing) == 0, missing
}
// REVU - this doesn't make much sense - remove.
//// returns generic (interface{} prop value or default values if nil
//func (p Properties) GetOrDefault(key string, defval interface{}) (v interface{}) {
// if v = p[key]; v == nil {
// v = defval
// }
// return
//}
// returns nil/zero-value if no such key or key type is not array
func (p Properties) GetArray(key string) []string {
if isArrayKey(key) {
if v := p[key]; v == nil {
return nil
}
return p[key].([]string)
}
return nil
}
// returns prop value or default values if nil
func (p Properties) GetArrayOrDefault(key string, defval []string) (v []string) {
if v = p.GetArray(key); v == nil {
v = defval
}
return
}
// returns nil/zero-value if no such key or not a map, or if key type is not map
func (p Properties) GetMap(key string) map[string]string {
if isMapKey(key) {
if v := p[key]; v == nil {
return nil
}
return p[key].(map[string]string)
}
return nil
}
// returns prop value or default values if nil
func (p Properties) GetMapOrDefault(key string, defval map[string]string) (v map[string]string) {
if v = p.GetMap(key); v == nil {
v = defval
}
return
}
// String value property - returns nil/zero-value if no such key or not a map
func (p Properties) GetString(key string) string {
if !(isMapKey(key) || isArrayKey(key)) {
if v := p[key]; v == nil {
return ""
}
return p[key].(string)
}
return ""
}
func (p Properties) GetStringOrDefault(key string, defval string) (v string) {
if v = p.GetString(key); v == "" {
v = defval
}
return
}
func (p Properties) MustGetString(key string) (v string) {
return p.GetString(key)
}
// Returns true if provided key is a valid array property value key,
// suitable for use with GetMap(mapkey)
func isMapKey(key string) bool {
return strings.HasSuffix(key, cmap)
}
// Returns true if provided key is a valid map property value key
// suitable for use with GetArray(arrkey)
func isArrayKey(key string) bool {
return !isMapKey(key) && strings.HasSuffix(key, array)
}
// Returns a pretty print string for Properties.
// See also Properties#Print
func (p Properties) String() string {
srep := "-- properties --\n"
for k, v := range p {
srep += fmt.Sprintf("'%s' => '%s'", k, v)
srep += "\n"
}
srep += "----------------\n"
return srep
}
// Pretty print dumps the Properties content to stdout
func (p Properties) Print() {
fmt.Print(p)
}
// ----------------------------------------------------------------------
// internal ops
// REVU: this simplistic approach to parsing places too many constraints:
// 1 - continuations for maps/arrays are redundant given the ',' element delims
// 2 - can't use ':' or '#' in k/v - these are fairly useful/common glyphs
// 3 - psuedo quoting and not true quoting
// TODO: try lexing this thing ..
// ----------------------------------------------------------------------
func loadBuffer(s string) (p Properties, e error) {
if s == empty {
e = errors.New("s is nil")
return
}
specs := splitCleanPropSpecs(s)
p = make(Properties)
for _, spec := range specs {
k, v, err := parseProperty(spec)
if err != nil {
e = fmt.Errorf("error parsing properties- %s", err)
return
}
if k != empty {
p[k] = v
}
}
return
}
// converts to []string of lines. this is mainly addressing
// comments (both flavors) & continuations (multi-line values)
// beyond a general split on crlf
func splitCleanPropSpecs(s string) (pspecs []string) {
// trim overall buffer
s = strings.Trim(s, trimset)
erase := false
cont := false
reset := false
b := make([]byte, len(s))
off := 0
s = strings.Trim(s, trimset)
for _, c := range s {
if c == rune(continuation) {
erase = true
cont = true
} else if c == comment {
erase = true
} else if c == '\n' {
if cont {
cont = false
reset = true
} else {
erase = false
}
} else if reset {
erase = false
reset = false
}
if !erase {
off += utf8.EncodeRune(b[off:], c)
}
}
s = string(b[0:off])
// split to get distinct specs.
pspecs = strings.Split(s, "\n")
return
}
// attempts to parse a single <key> = <value> property def spec.
// Returns ("", "") if comment or malformed.
// Otherwise (key, value) pair are returned.
// REVU TODO support true quotes to allow use of ':', '\', and '#' in k/v
func parseProperty(spec string) (key string, value interface{}, e error) {
if len(spec) < min_entry_len {
return empty, value, e
}
propTuple := strings.Split(strings.Trim(spec, trimset), pkv_sep)
// Verify well-formedness
if len(propTuple) != 2 || propTuple[1] == empty {
e = errors.New(fmt.Sprintf("property spec '%s' is malformed", spec))
return
}
key = strings.Trim(propTuple[0], ws)
vrep := strings.Trim(propTuple[1], ws)
// do NOT change order of parse - maps first
if isMapKey(key) {
kvmap := make(map[string]string)
kvpairs := strings.Split(vrep, val_delim)
for _, _kv := range kvpairs {
_kv = strings.Trim(_kv, ws)
_kvarr := strings.Split(_kv, kv_delim)
ek := strings.Trim(_kvarr[0], ws)
ev := strings.Trim(_kvarr[1], ws)
kvmap[strings.Trim(ek, quote)] = strings.Trim(ev, quote)
}
value = kvmap
} else if isArrayKey(key) {
arrv := strings.Split(vrep, val_delim)
for i, v := range arrv {
v = strings.Trim(v, ws)
arrv[i] = strings.Trim(v, quote)
}
value = arrv
} else {
value = strings.Trim(propTuple[1], ws)
value = strings.Trim(vrep, quote)
}
return
} | return loadBuffer(spec) | random_line_split |
gestalt.go | // Copyright 2012-2015 Joubin Houshyar. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Gestalt provides basic property file utility.
//
// Property keys are typed.
//
// The key suffixes `[]` and `[:]` specify []string and map[string]string, respectively, but
// otherwise can be used as prefix or embedded in key or value without reservation.
//
// The `#` char is reserved for comments and can not be used in keys or values.
// The `\` char is reserved for line continuation and can not be used in comments, keys, or values.
// The `:` char is reserved for map k:v tuples and can not be used in map keys, or values.
//
// Syntax supports:
//
// • Embedded white space {' ', '\t'} in keys and values. Leading and trailing whitespace is ignored.
//
// • Typed properties: (Go) string, []string, and map[string]string properties
//
// • Value definitions can span multiple lines
//
// • Single line & trailing comments
//
// Example demonstrating format:
//
// # a comment line
// # note that blank lines are ignored
//
// # ------------------------------------------
// # examples of string properties - single line
//
// the property key = property value # key"the property key", value:"property value"
// the property key=property value # same as above
// a.property@the.key.called!foo = joe@schmoe.com # only embedded hashsign and/or forward-slashes are disallowed
//
// # example of string properties - multi-line
// # layout is significant only for multi-line string properties
//
// this is a multi-line property = value that spans multiple lines. \
// Note that value line continuations \
// include whitespace leading each new line. # e.g. this line appends " include whitespace ..."
//
// # ------------------------------------------
// # examples of []string properties - single line
// # NOTE that the key includes the trailing `[]`
//
// this.is.a.string.array.key[] = alpha , omega # => []string{"alpha", "omega"}
// so.is.this.[] = alpha, omega # only the suffix [] is significant of []string property type
//
// # array values can have embedded white space as well
// # basically, any leading/trailing whitespace around `,` is trimmed
// # for example
//
// another.array[] = hi there , bon voyage # => []string{"hi there", "bon voyage"}
//
// # array values can also be quoted if trailing and/or leading whitespace is required
// # for example
//
// yet.another[] = " lead", or, "trail " # => []string{" lead", "or", "trail "}
//
// # example of []string property - multiline
// # note that layout is insignificant
//
// web.resource.type.extensions[] = js, \
// css , \
// gif \
// , jpeg, \
// png # => []string{"js", "css", "gif", "jpeg", "png"}
//
// # ------------------------------------------
// # examples of map[string]string properties - single line
// # map key must end in `[:]`.
// # value must be of form <map-key>:<map-value>
// # map values must be seperated by `,`
//
// this.is.a.map[:] = a:b, b:c
//
// # key set is {"*", "list", "login"}
// dispatch.table[:] = *:/ , list : /do/list, login: /do/user/login
//
// # same thing spanning multiple lines:
// # note that layout is insignificant
//
// dispatch.tablex[:] = *:/ , \
// list:/do/list, \ # note the `,`
// login:/do/user/login
//
// The associated Properties (type) defines the properties API, but is itself simply a
// a map[string]interface{} and can be used as such (without any type safety).
//
//
//
package gestalt
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"strings"
"unicode/utf8"
)
// ----------------------------------------------------------------------
// Property file Constants
// ----------------------------------------------------------------------
// REVU - too many flavors of whitespace
const (
empty = ""
val_delim = ","
kv_delim = ":"
quote = "\""
pkv_sep = "="
trimset = "\n\r \t"
ws = " \t"
array = "[]"
array_len = len(array)
cmap = "[:]"
cmap_len = len(cmap)
min_entry_len = len("a=b")
continuation = '\\'
comment = '#'
)
// Properties is based on map and can be accessed as such
// but best to use the API
// REVU: should be private.
type Properties map[string]interface{}
// ----------------------------------------------------------------------
// API
// ----------------------------------------------------------------------
// Instantiates a new Properties object initialized from the
// content of the specified file.
func Load(filename string) (p Properties, e error) {
if filename == "" {
e = fmt.Errorf("filename is nil")
return
}
b, err := ioutil.ReadFile(filename)
if err != nil {
e = fmt.Errorf("Error reading gestalt file <%s> : %s", filename, e)
return
}
return loadBuffer(bytes.NewBuffer(b).String())
}
// Support embedded properties (e.g. without files)
func LoadStr(spec string) (p Properties, e error) {
return loadBuffer(spec)
}
// Return a clone of the argument Properties object
func (p Properties) Clone() (clone Properties) {
for k, v := range p {
clone[k] = v
}
return
}
// Copy all entries from specified Properties to the receiver
// Note this will overwrite existing matching values if overwrite is true,
// otherwise if overwrite is false it will only append keys that do not exist
// in receiver
func (p Properties) Copy(from Properties, overwrite bool) {
// TODO - REVU - either silently Debug log or return error on nil 'from'
for k, v := range from {
if p[k] == nil || overwrite {
p[k] = v
}
}
}
// Inherits from the parent key/value pairs if receiver[key] is nil.
// If key is array, receiver's value array will be PRE-pended with parent's.
// If key is map, receiver's value map will be augmented with parent's.
// nil input is silently ignored.
// REVU - issue regarding preserving order in parent array key values
func (p Properties) Inherit(from Properties) {
if from == nil {
return
}
for k, v := range from {
pv := p[k]
if pv == nil {
p[k] = v
} else {
switch {
case isArrayKey(k):
// REVU - somewhat funky semantics here
// attempting to preserve order of array values (in child)
// but parent's order is chomped
temp := make(map[string]string)
for _, av := range pv.([]string) {
temp[av] = av
}
narrv := []string{}
for _, av := range v.([]string) {
if temp[av] == "" {
narrv = append(narrv, av)
}
}
p[k] = append(narrv, pv.([]string)...)
case isMapKey(k):
mapv := v.(map[string]string)
pmapv := pv.(map[string]string)
for mk, mv := range mapv {
if pmapv[mk] == "" {
pmapv[mk] = mv
}
}
}
}
}
}
// Verifies that the receiver has values for the set of keys.
// Returns true, 0 len array if verified, or, if keys arg is nil.
// Returns false, and array of missing values otherwise.
func (p Properties) VerifyMust(keys ...string) (bool, []string) {
missing := []string{}
if keys != nil {
for _, rk := range keys {
if p[rk] == nil {
missing = append(missing, rk)
}
}
}
return len(missing) == 0, missing
}
// REVU - this doesn't make much sense - remove.
//// returns generic (interface{} prop value or default values if nil
//func (p Properties) GetOrDefault(key string, defval interface{}) (v interface{}) {
// if v = p[key]; v == nil {
// v = defval
// }
// return
//}
// returns nil/zero-value if no such key or key type is not array
func (p Properties) GetArray(key string) []string {
if isArrayKey(key) {
if v := p[key]; v == nil {
ret | n p[key].([]string)
}
return nil
}
// returns prop value or default values if nil
func (p Properties) GetArrayOrDefault(key string, defval []string) (v []string) {
if v = p.GetArray(key); v == nil {
v = defval
}
return
}
// returns nil/zero-value if no such key or not a map, or if key type is not map
func (p Properties) GetMap(key string) map[string]string {
if isMapKey(key) {
if v := p[key]; v == nil {
return nil
}
return p[key].(map[string]string)
}
return nil
}
// returns prop value or default values if nil
func (p Properties) GetMapOrDefault(key string, defval map[string]string) (v map[string]string) {
if v = p.GetMap(key); v == nil {
v = defval
}
return
}
// String value property - returns nil/zero-value if no such key or not a map
func (p Properties) GetString(key string) string {
if !(isMapKey(key) || isArrayKey(key)) {
if v := p[key]; v == nil {
return ""
}
return p[key].(string)
}
return ""
}
func (p Properties) GetStringOrDefault(key string, defval string) (v string) {
if v = p.GetString(key); v == "" {
v = defval
}
return
}
func (p Properties) MustGetString(key string) (v string) {
return p.GetString(key)
}
// Returns true if provided key is a valid array property value key,
// suitable for use with GetMap(mapkey)
func isMapKey(key string) bool {
return strings.HasSuffix(key, cmap)
}
// Returns true if provided key is a valid map property value key
// suitable for use with GetArray(arrkey)
func isArrayKey(key string) bool {
return !isMapKey(key) && strings.HasSuffix(key, array)
}
// Returns a pretty print string for Properties.
// See also Properties#Print
func (p Properties) String() string {
srep := "-- properties --\n"
for k, v := range p {
srep += fmt.Sprintf("'%s' => '%s'", k, v)
srep += "\n"
}
srep += "----------------\n"
return srep
}
// Pretty print dumps the Properties content to stdout
func (p Properties) Print() {
fmt.Print(p)
}
// ----------------------------------------------------------------------
// internal ops
// REVU: this simplistic approach to parsing places too many constraints:
// 1 - continuations for maps/arrays are redundant given the ',' element delims
// 2 - can't use ':' or '#' in k/v - these are fairly useful/common glyphs
// 3 - psuedo quoting and not true quoting
// TODO: try lexing this thing ..
// ----------------------------------------------------------------------
func loadBuffer(s string) (p Properties, e error) {
if s == empty {
e = errors.New("s is nil")
return
}
specs := splitCleanPropSpecs(s)
p = make(Properties)
for _, spec := range specs {
k, v, err := parseProperty(spec)
if err != nil {
e = fmt.Errorf("error parsing properties- %s", err)
return
}
if k != empty {
p[k] = v
}
}
return
}
// converts to []string of lines. this is mainly addressing
// comments (both flavors) & continuations (multi-line values)
// beyond a general split on crlf
func splitCleanPropSpecs(s string) (pspecs []string) {
// trim overall buffer
s = strings.Trim(s, trimset)
erase := false
cont := false
reset := false
b := make([]byte, len(s))
off := 0
s = strings.Trim(s, trimset)
for _, c := range s {
if c == rune(continuation) {
erase = true
cont = true
} else if c == comment {
erase = true
} else if c == '\n' {
if cont {
cont = false
reset = true
} else {
erase = false
}
} else if reset {
erase = false
reset = false
}
if !erase {
off += utf8.EncodeRune(b[off:], c)
}
}
s = string(b[0:off])
// split to get distinct specs.
pspecs = strings.Split(s, "\n")
return
}
// attempts to parse a single <key> = <value> property def spec.
// Returns ("", "") if comment or malformed.
// Otherwise (key, value) pair are returned.
// REVU TODO support true quotes to allow use of ':', '\', and '#' in k/v
func parseProperty(spec string) (key string, value interface{}, e error) {
if len(spec) < min_entry_len {
return empty, value, e
}
propTuple := strings.Split(strings.Trim(spec, trimset), pkv_sep)
// Verify well-formedness
if len(propTuple) != 2 || propTuple[1] == empty {
e = errors.New(fmt.Sprintf("property spec '%s' is malformed", spec))
return
}
key = strings.Trim(propTuple[0], ws)
vrep := strings.Trim(propTuple[1], ws)
// do NOT change order of parse - maps first
if isMapKey(key) {
kvmap := make(map[string]string)
kvpairs := strings.Split(vrep, val_delim)
for _, _kv := range kvpairs {
_kv = strings.Trim(_kv, ws)
_kvarr := strings.Split(_kv, kv_delim)
ek := strings.Trim(_kvarr[0], ws)
ev := strings.Trim(_kvarr[1], ws)
kvmap[strings.Trim(ek, quote)] = strings.Trim(ev, quote)
}
value = kvmap
} else if isArrayKey(key) {
arrv := strings.Split(vrep, val_delim)
for i, v := range arrv {
v = strings.Trim(v, ws)
arrv[i] = strings.Trim(v, quote)
}
value = arrv
} else {
value = strings.Trim(propTuple[1], ws)
value = strings.Trim(vrep, quote)
}
return
}
| urn nil
}
retur | conditional_block |
gestalt.go | // Copyright 2012-2015 Joubin Houshyar. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Gestalt provides basic property file utility.
//
// Property keys are typed.
//
// The key suffixes `[]` and `[:]` specify []string and map[string]string, respectively, but
// otherwise can be used as prefix or embedded in key or value without reservation.
//
// The `#` char is reserved for comments and can not be used in keys or values.
// The `\` char is reserved for line continuation and can not be used in comments, keys, or values.
// The `:` char is reserved for map k:v tuples and can not be used in map keys, or values.
//
// Syntax supports:
//
// • Embedded white space {' ', '\t'} in keys and values. Leading and trailing whitespace is ignored.
//
// • Typed properties: (Go) string, []string, and map[string]string properties
//
// • Value definitions can span multiple lines
//
// • Single line & trailing comments
//
// Example demonstrating format:
//
// # a comment line
// # note that blank lines are ignored
//
// # ------------------------------------------
// # examples of string properties - single line
//
// the property key = property value # key"the property key", value:"property value"
// the property key=property value # same as above
// a.property@the.key.called!foo = joe@schmoe.com # only embedded hashsign and/or forward-slashes are disallowed
//
// # example of string properties - multi-line
// # layout is significant only for multi-line string properties
//
// this is a multi-line property = value that spans multiple lines. \
// Note that value line continuations \
// include whitespace leading each new line. # e.g. this line appends " include whitespace ..."
//
// # ------------------------------------------
// # examples of []string properties - single line
// # NOTE that the key includes the trailing `[]`
//
// this.is.a.string.array.key[] = alpha , omega # => []string{"alpha", "omega"}
// so.is.this.[] = alpha, omega # only the suffix [] is significant of []string property type
//
// # array values can have embedded white space as well
// # basically, any leading/trailing whitespace around `,` is trimmed
// # for example
//
// another.array[] = hi there , bon voyage # => []string{"hi there", "bon voyage"}
//
// # array values can also be quoted if trailing and/or leading whitespace is required
// # for example
//
// yet.another[] = " lead", or, "trail " # => []string{" lead", "or", "trail "}
//
// # example of []string property - multiline
// # note that layout is insignificant
//
// web.resource.type.extensions[] = js, \
// css , \
// gif \
// , jpeg, \
// png # => []string{"js", "css", "gif", "jpeg", "png"}
//
// # ------------------------------------------
// # examples of map[string]string properties - single line
// # map key must end in `[:]`.
// # value must be of form <map-key>:<map-value>
// # map values must be seperated by `,`
//
// this.is.a.map[:] = a:b, b:c
//
// # key set is {"*", "list", "login"}
// dispatch.table[:] = *:/ , list : /do/list, login: /do/user/login
//
// # same thing spanning multiple lines:
// # note that layout is insignificant
//
// dispatch.tablex[:] = *:/ , \
// list:/do/list, \ # note the `,`
// login:/do/user/login
//
// The associated Properties (type) defines the properties API, but is itself simply a
// a map[string]interface{} and can be used as such (without any type safety).
//
//
//
package gestalt
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"strings"
"unicode/utf8"
)
// ----------------------------------------------------------------------
// Property file Constants
// ----------------------------------------------------------------------
// REVU - too many flavors of whitespace
const (
empty = ""
val_delim = ","
kv_delim = ":"
quote = "\""
pkv_sep = "="
trimset = "\n\r \t"
ws = " \t"
array = "[]"
array_len = len(array)
cmap = "[:]"
cmap_len = len(cmap)
min_entry_len = len("a=b")
continuation = '\\'
comment = '#'
)
// Properties is based on map and can be accessed as such
// but best to use the API
// REVU: should be private.
type Properties map[string]interface{}
// ----------------------------------------------------------------------
// API
// ----------------------------------------------------------------------
// Instantiates a new Properties object initialized from the
// content of the specified file.
func Load(filename string) (p Properties, e error) {
if filename == "" {
e = fmt.Errorf("filename is nil")
return
}
b, err := ioutil.ReadFile(filename)
if err != nil {
e = fmt.Errorf("Error reading gestalt file <%s> : %s", filename, e)
return
}
return loadBuffer(bytes.NewBuffer(b).String())
}
// Support embedded properties (e.g. without files)
func LoadStr(spec string) (p Properties, e error) {
return loadBuffer(spec)
}
// Return a clone of the argument Properties object
func (p Properties) Clone() (clone Properties) {
for k, v := range p {
clone[k] = v
}
return
}
// Copy all entries from specified Properties to the receiver
// Note this will overwrite existing matching values if overwrite is true,
// otherwise if overwrite is false it will only append keys that do not exist
// in receiver
func (p Properties) Copy(from Properties, overwrite bool) {
// TODO - REVU - either silently Debug log or return error on nil 'from'
for k, v := range from {
if p[k] == nil || overwrite {
p[k] = v
}
}
}
// Inherits from the parent key/value pairs if receiver[key] is nil.
// If key is array, receiver's value array will be PRE-pended with parent's.
// If key is map, receiver's value map will be augmented with parent's.
// nil input is silently ignored.
// REVU - issue regarding preserving order in parent array key values
func (p Properties) Inherit(from Properties) {
if from == nil {
return
}
for k, v := range from {
pv := p[k]
if pv == nil {
p[k] = v
} else {
switch {
case isArrayKey(k):
// REVU - somewhat funky semantics here
// attempting to preserve order of array values (in child)
// but parent's order is chomped
temp := make(map[string]string)
for _, av := range pv.([]string) {
temp[av] = av
}
narrv := []string{}
for _, av := range v.([]string) {
if temp[av] == "" {
narrv = append(narrv, av)
}
}
p[k] = append(narrv, pv.([]string)...)
case isMapKey(k):
mapv := v.(map[string]string)
pmapv := pv.(map[string]string)
for mk, mv := range mapv {
if pmapv[mk] == "" {
pmapv[mk] = mv
}
}
}
}
}
}
// Verifies that the receiver has values for the set of keys.
// Returns true, 0 len array if verified, or, if keys arg is nil.
// Returns false, and array of missing values otherwise.
func (p Properties) VerifyMust(keys ...string) (bool, []string) {
missing := []string{}
if keys != nil {
for _, rk := range keys {
if p[rk] == nil {
missing = append(missing, rk)
}
}
}
return len(missing) == 0, missing
}
// REVU - this doesn't make much sense - remove.
//// returns generic (interface{} prop value or default values if nil
//func (p Properties) GetOrDefault(key string, defval interface{}) (v interface{}) {
// if v = p[key]; v == nil {
// v = defval
// }
// return
//}
// returns nil/zero-value if no such key or key type is not array
func (p Properties) GetArray(key string) []string {
if isArrayKey(key) {
if v := p[key]; v == nil {
return nil
}
return p[key].([]string)
}
return nil
}
// returns prop value or default values if nil
func (p Properties) GetArrayOrDefault(key string, defval []string) (v []string) {
if v = p.GetArray(key); v == nil {
v = defval
}
return
}
// returns nil/zero-value if no such key or not a map, or if key type is not map
func (p Properties) GetMap(key string) map[string]string {
if isMapKey(key) {
if v := p[key]; v == nil {
return nil
}
return p[key].(map[string]string)
}
return nil
}
// returns prop value or default values if nil
func (p Properties) GetMapOrDefault(key string, defval map[string]string) (v map[string]string) {
if v = p.GetMap(key); v == nil {
v = defval
}
return
}
// String value property - returns nil/zero-value if no such key or not a map
func (p Properties) GetString(key string) string {
if !(isMapKey(key) || isArrayKey(key)) {
if v := p[key]; v == nil {
return ""
}
return p[key].(string)
}
return ""
}
func (p Properties) GetStringOrDefault(key string, defval string) (v string) {
if v = p.GetString(key); v == "" {
v = defval
}
return
}
func (p Properties) MustGetString(key string) (v string) {
return p.GetString(key)
}
// Returns true if provided key is a valid array property value key,
// suitable for use with GetMap(mapkey)
func isMapKey(key string) bool {
return strings.HasSuffix(key, cmap)
}
// Returns true if provided key is a valid map property value key
// suitable for use with GetArray(arrkey)
func isArrayKey(key string) bool {
return !isMapKey(key) && strings.HasSuffix(key, array)
}
// Returns a pretty print string for Properties.
// See also Properties#Print
func (p Properties) String() string {
srep := "-- properties --\n"
for k, v := range p {
srep += fmt.Sprintf("'%s' => '%s'", k, v)
srep += "\n"
}
srep += "----------------\n"
return srep
}
// Pretty print dumps the Properties content to stdout
func (p Properties) Print() | t.Print(p)
}
// ----------------------------------------------------------------------
// internal ops
// REVU: this simplistic approach to parsing places too many constraints:
// 1 - continuations for maps/arrays are redundant given the ',' element delims
// 2 - can't use ':' or '#' in k/v - these are fairly useful/common glyphs
// 3 - psuedo quoting and not true quoting
// TODO: try lexing this thing ..
// ----------------------------------------------------------------------
func loadBuffer(s string) (p Properties, e error) {
if s == empty {
e = errors.New("s is nil")
return
}
specs := splitCleanPropSpecs(s)
p = make(Properties)
for _, spec := range specs {
k, v, err := parseProperty(spec)
if err != nil {
e = fmt.Errorf("error parsing properties- %s", err)
return
}
if k != empty {
p[k] = v
}
}
return
}
// converts to []string of lines. this is mainly addressing
// comments (both flavors) & continuations (multi-line values)
// beyond a general split on crlf
func splitCleanPropSpecs(s string) (pspecs []string) {
// trim overall buffer
s = strings.Trim(s, trimset)
erase := false
cont := false
reset := false
b := make([]byte, len(s))
off := 0
s = strings.Trim(s, trimset)
for _, c := range s {
if c == rune(continuation) {
erase = true
cont = true
} else if c == comment {
erase = true
} else if c == '\n' {
if cont {
cont = false
reset = true
} else {
erase = false
}
} else if reset {
erase = false
reset = false
}
if !erase {
off += utf8.EncodeRune(b[off:], c)
}
}
s = string(b[0:off])
// split to get distinct specs.
pspecs = strings.Split(s, "\n")
return
}
// attempts to parse a single <key> = <value> property def spec.
// Returns ("", "") if comment or malformed.
// Otherwise (key, value) pair are returned.
// REVU TODO support true quotes to allow use of ':', '\', and '#' in k/v
func parseProperty(spec string) (key string, value interface{}, e error) {
if len(spec) < min_entry_len {
return empty, value, e
}
propTuple := strings.Split(strings.Trim(spec, trimset), pkv_sep)
// Verify well-formedness
if len(propTuple) != 2 || propTuple[1] == empty {
e = errors.New(fmt.Sprintf("property spec '%s' is malformed", spec))
return
}
key = strings.Trim(propTuple[0], ws)
vrep := strings.Trim(propTuple[1], ws)
// do NOT change order of parse - maps first
if isMapKey(key) {
kvmap := make(map[string]string)
kvpairs := strings.Split(vrep, val_delim)
for _, _kv := range kvpairs {
_kv = strings.Trim(_kv, ws)
_kvarr := strings.Split(_kv, kv_delim)
ek := strings.Trim(_kvarr[0], ws)
ev := strings.Trim(_kvarr[1], ws)
kvmap[strings.Trim(ek, quote)] = strings.Trim(ev, quote)
}
value = kvmap
} else if isArrayKey(key) {
arrv := strings.Split(vrep, val_delim)
for i, v := range arrv {
v = strings.Trim(v, ws)
arrv[i] = strings.Trim(v, quote)
}
value = arrv
} else {
value = strings.Trim(propTuple[1], ws)
value = strings.Trim(vrep, quote)
}
return
}
| {
fm | identifier_name |
gran_model.py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import paddle.fluid as fluid
from model.graph_encoder import encoder, pre_process_layer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S')
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.info(logger.getEffectiveLevel())
class GRANModel(object):
"""
GRAN model class.
"""
def __init__(self,
input_ids,
input_mask,
edge_labels,
config,
weight_sharing=True,
use_fp16=False):
self._n_layer = config['num_hidden_layers']
self._n_head = config['num_attention_heads']
self._emb_size = config['hidden_size']
self._intermediate_size = config['intermediate_size']
self._hidden_act = config['hidden_act']
self._prepostprocess_dropout = config['hidden_dropout_prob']
self._attention_dropout = config['attention_dropout_prob']
self._voc_size = config['vocab_size']
self._n_relation = config['num_relations']
self._n_edge = config['num_edges']
self._max_seq_len = config['max_seq_len']
self._max_arity = config['max_arity']
self._e_soft_label = config['entity_soft_label']
self._r_soft_label = config['relation_soft_label']
self._weight_sharing = weight_sharing
self._node_emb_name = "node_embedding"
self._edge_emb_name_k = "edge_embedding_key"
self._edge_emb_name_v = "edge_embedding_value"
self._dtype = "float16" if use_fp16 else "float32"
# Initialize all weights by truncated normal initializer, and all biases
# will be initialized by constant zero by default.
self._param_initializer = fluid.initializer.TruncatedNormal(
scale=config['initializer_range'])
self._build_model(input_ids, input_mask, edge_labels)
def _build_model(self, input_ids, input_mask, edge_labels):
# get node embeddings of input tokens
emb_out = fluid.layers.embedding(
input=input_ids,
size=[self._voc_size, self._emb_size],
dtype=self._dtype,
param_attr=fluid.ParamAttr(
name=self._node_emb_name, initializer=self._param_initializer),
is_sparse=False)
emb_out = pre_process_layer(
emb_out, 'nd', self._prepostprocess_dropout, name='pre_encoder')
# get edge embeddings between input tokens
edge_labels = fluid.layers.reshape(
x=edge_labels, shape=[-1, 1], inplace=True)
edges_key = fluid.layers.embedding(
input=edge_labels,
size=[self._n_edge, self._emb_size // self._n_head],
dtype=self._dtype,
param_attr=fluid.ParamAttr(
name=self._edge_emb_name_k,
initializer=self._param_initializer),
is_sparse=False)
edges_value = fluid.layers.embedding(
input=edge_labels,
size=[self._n_edge, self._emb_size // self._n_head],
dtype=self._dtype,
param_attr=fluid.ParamAttr(
name=self._edge_emb_name_v,
initializer=self._param_initializer),
is_sparse=False)
edge_mask = fluid.layers.sign(
fluid.layers.cast(
x=edge_labels, dtype='float32'))
# edge_mask.stop_gradient = True
edges_key = fluid.layers.elementwise_mul(
x=edges_key, y=edge_mask, axis=0)
edges_key = fluid.layers.reshape(
x=edges_key,
shape=[self._max_seq_len, self._max_seq_len, -1],
inplace=True)
edges_value = fluid.layers.elementwise_mul(
x=edges_value, y=edge_mask, axis=0)
edges_value = fluid.layers.reshape(
x=edges_value,
shape=[self._max_seq_len, self._max_seq_len, -1],
inplace=True)
# get multi-head self-attention mask
if self._dtype == "float16":
input_mask = fluid.layers.cast(x=input_mask, dtype=self._dtype)
self_attn_mask = fluid.layers.matmul(
x=input_mask, y=input_mask, transpose_y=True)
self_attn_mask = fluid.layers.scale(
x=self_attn_mask,
scale=1000000.0,
bias=-1.0,
bias_after_scale=False)
n_head_self_attn_mask = fluid.layers.stack(
x=[self_attn_mask] * self._n_head, axis=1)
n_head_self_attn_mask.stop_gradient = True
# stack of graph transformer encoders
self._enc_out = encoder(
enc_input=emb_out,
edges_key=edges_key,
edges_value=edges_value,
attn_bias=n_head_self_attn_mask,
n_layer=self._n_layer,
n_head=self._n_head,
d_key=self._emb_size // self._n_head,
d_value=self._emb_size // self._n_head,
d_model=self._emb_size,
d_inner_hid=self._intermediate_size,
prepostprocess_dropout=self._prepostprocess_dropout,
attention_dropout=self._attention_dropout,
relu_dropout=0,
hidden_act=self._hidden_act,
preprocess_cmd="",
postprocess_cmd="dan",
param_initializer=self._param_initializer,
name='encoder')
def get_sequence_output(self):
return self._enc_out
def get_mask_lm_output(self, mask_pos, mask_label, mask_type):
"""
Get the loss & logits for masked entity/relation prediction.
"""
mask_pos = fluid.layers.cast(x=mask_pos, dtype='int32')
reshaped_emb_out = fluid.layers.reshape(
x=self._enc_out, shape=[-1, self._emb_size])
# extract masked tokens' feature
mask_feat = fluid.layers.gather(input=reshaped_emb_out, index=mask_pos)
# transform: fc
mask_trans_feat = fluid.layers.fc(
input=mask_feat,
size=self._emb_size,
act=self._hidden_act,
param_attr=fluid.ParamAttr(
name='mask_lm_trans_fc.w_0',
initializer=self._param_initializer),
bias_attr=fluid.ParamAttr(name='mask_lm_trans_fc.b_0'))
# transform: layer norm
mask_trans_feat = pre_process_layer(
mask_trans_feat, 'n', name='mask_lm_trans')
mask_lm_out_bias_attr = fluid.ParamAttr(
name="mask_lm_out_fc.b_0",
initializer=fluid.initializer.Constant(value=0.0))
if self._weight_sharing:
fc_out = fluid.layers.matmul(
x=mask_trans_feat,
y=fluid.default_main_program().global_block().var(
self._node_emb_name),
transpose_y=True)
fc_out += fluid.layers.create_parameter(
shape=[self._voc_size],
dtype=self._dtype,
attr=mask_lm_out_bias_attr,
is_bias=True)
else:
fc_out = fluid.layers.fc(input=mask_trans_feat,
size=self._voc_size,
param_attr=fluid.ParamAttr(
name="mask_lm_out_fc.w_0",
initializer=self._param_initializer),
bias_attr=mask_lm_out_bias_attr)
special_indicator = fluid.layers.fill_constant_batch_size_like(
input=mask_label, shape=[-1, 2], dtype='int64', value=-1)
relation_indicator = fluid.layers.fill_constant_batch_size_like(
input=mask_label,
shape=[-1, self._n_relation],
dtype='int64',
value=-1)
entity_indicator = fluid.layers.fill_constant_batch_size_like(
input=mask_label,
shape=[-1, (self._voc_size - self._n_relation - 2)],
dtype='int64',
value=1)
type_indicator = fluid.layers.concat(
input=[relation_indicator, entity_indicator], axis=-1)
type_indicator = fluid.layers.elementwise_mul(
x=type_indicator, y=mask_type, axis=0)
type_indicator = fluid.layers.concat(
input=[special_indicator, type_indicator], axis=-1)
type_indicator = fluid.layers.cast(x=type_indicator, dtype='float32')
type_indicator = fluid.layers.thresholded_relu(
x=type_indicator, threshold=0.0)
fc_out_mask = fluid.layers.scale(
x=type_indicator,
scale=1000000.0,
bias=-1.0,
bias_after_scale=False)
fc_out = fluid.layers.elementwise_add(x=fc_out, y=fc_out_mask)
one_hot_labels = fluid.layers.one_hot(
input=mask_label, depth=self._voc_size)
type_indicator = fluid.layers.elementwise_sub(
x=type_indicator, y=one_hot_labels)
num_candidates = fluid.layers.reduce_sum(input=type_indicator, dim=-1)
mask_type = fluid.layers.cast(x=mask_type, dtype='float32')
soft_labels = ((1 + mask_type) * self._e_soft_label +
(1 - mask_type) * self._r_soft_label) / 2.0
soft_labels = fluid.layers.expand(soft_labels, [1, self._voc_size])
soft_labels = soft_labels * one_hot_labels + (1.0 - soft_labels) * \
fluid.layers.elementwise_div(x=type_indicator, y=num_candidates, axis=0)
soft_labels.stop_gradient = True
mask_lm_loss = fluid.layers.softmax_with_cross_entropy(
logits=fc_out, label=soft_labels, soft_label=True)
mean_mask_lm_loss = fluid.layers.mean(mask_lm_loss)
return mean_mask_lm_loss, fc_out | # distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GRAN model.""" | random_line_split |
gran_model.py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GRAN model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import paddle.fluid as fluid
from model.graph_encoder import encoder, pre_process_layer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S')
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.info(logger.getEffectiveLevel())
class GRANModel(object):
"""
GRAN model class.
"""
def __init__(self,
input_ids,
input_mask,
edge_labels,
config,
weight_sharing=True,
use_fp16=False):
|
def _build_model(self, input_ids, input_mask, edge_labels):
# get node embeddings of input tokens
emb_out = fluid.layers.embedding(
input=input_ids,
size=[self._voc_size, self._emb_size],
dtype=self._dtype,
param_attr=fluid.ParamAttr(
name=self._node_emb_name, initializer=self._param_initializer),
is_sparse=False)
emb_out = pre_process_layer(
emb_out, 'nd', self._prepostprocess_dropout, name='pre_encoder')
# get edge embeddings between input tokens
edge_labels = fluid.layers.reshape(
x=edge_labels, shape=[-1, 1], inplace=True)
edges_key = fluid.layers.embedding(
input=edge_labels,
size=[self._n_edge, self._emb_size // self._n_head],
dtype=self._dtype,
param_attr=fluid.ParamAttr(
name=self._edge_emb_name_k,
initializer=self._param_initializer),
is_sparse=False)
edges_value = fluid.layers.embedding(
input=edge_labels,
size=[self._n_edge, self._emb_size // self._n_head],
dtype=self._dtype,
param_attr=fluid.ParamAttr(
name=self._edge_emb_name_v,
initializer=self._param_initializer),
is_sparse=False)
edge_mask = fluid.layers.sign(
fluid.layers.cast(
x=edge_labels, dtype='float32'))
# edge_mask.stop_gradient = True
edges_key = fluid.layers.elementwise_mul(
x=edges_key, y=edge_mask, axis=0)
edges_key = fluid.layers.reshape(
x=edges_key,
shape=[self._max_seq_len, self._max_seq_len, -1],
inplace=True)
edges_value = fluid.layers.elementwise_mul(
x=edges_value, y=edge_mask, axis=0)
edges_value = fluid.layers.reshape(
x=edges_value,
shape=[self._max_seq_len, self._max_seq_len, -1],
inplace=True)
# get multi-head self-attention mask
if self._dtype == "float16":
input_mask = fluid.layers.cast(x=input_mask, dtype=self._dtype)
self_attn_mask = fluid.layers.matmul(
x=input_mask, y=input_mask, transpose_y=True)
self_attn_mask = fluid.layers.scale(
x=self_attn_mask,
scale=1000000.0,
bias=-1.0,
bias_after_scale=False)
n_head_self_attn_mask = fluid.layers.stack(
x=[self_attn_mask] * self._n_head, axis=1)
n_head_self_attn_mask.stop_gradient = True
# stack of graph transformer encoders
self._enc_out = encoder(
enc_input=emb_out,
edges_key=edges_key,
edges_value=edges_value,
attn_bias=n_head_self_attn_mask,
n_layer=self._n_layer,
n_head=self._n_head,
d_key=self._emb_size // self._n_head,
d_value=self._emb_size // self._n_head,
d_model=self._emb_size,
d_inner_hid=self._intermediate_size,
prepostprocess_dropout=self._prepostprocess_dropout,
attention_dropout=self._attention_dropout,
relu_dropout=0,
hidden_act=self._hidden_act,
preprocess_cmd="",
postprocess_cmd="dan",
param_initializer=self._param_initializer,
name='encoder')
def get_sequence_output(self):
return self._enc_out
def get_mask_lm_output(self, mask_pos, mask_label, mask_type):
"""
Get the loss & logits for masked entity/relation prediction.
"""
mask_pos = fluid.layers.cast(x=mask_pos, dtype='int32')
reshaped_emb_out = fluid.layers.reshape(
x=self._enc_out, shape=[-1, self._emb_size])
# extract masked tokens' feature
mask_feat = fluid.layers.gather(input=reshaped_emb_out, index=mask_pos)
# transform: fc
mask_trans_feat = fluid.layers.fc(
input=mask_feat,
size=self._emb_size,
act=self._hidden_act,
param_attr=fluid.ParamAttr(
name='mask_lm_trans_fc.w_0',
initializer=self._param_initializer),
bias_attr=fluid.ParamAttr(name='mask_lm_trans_fc.b_0'))
# transform: layer norm
mask_trans_feat = pre_process_layer(
mask_trans_feat, 'n', name='mask_lm_trans')
mask_lm_out_bias_attr = fluid.ParamAttr(
name="mask_lm_out_fc.b_0",
initializer=fluid.initializer.Constant(value=0.0))
if self._weight_sharing:
fc_out = fluid.layers.matmul(
x=mask_trans_feat,
y=fluid.default_main_program().global_block().var(
self._node_emb_name),
transpose_y=True)
fc_out += fluid.layers.create_parameter(
shape=[self._voc_size],
dtype=self._dtype,
attr=mask_lm_out_bias_attr,
is_bias=True)
else:
fc_out = fluid.layers.fc(input=mask_trans_feat,
size=self._voc_size,
param_attr=fluid.ParamAttr(
name="mask_lm_out_fc.w_0",
initializer=self._param_initializer),
bias_attr=mask_lm_out_bias_attr)
special_indicator = fluid.layers.fill_constant_batch_size_like(
input=mask_label, shape=[-1, 2], dtype='int64', value=-1)
relation_indicator = fluid.layers.fill_constant_batch_size_like(
input=mask_label,
shape=[-1, self._n_relation],
dtype='int64',
value=-1)
entity_indicator = fluid.layers.fill_constant_batch_size_like(
input=mask_label,
shape=[-1, (self._voc_size - self._n_relation - 2)],
dtype='int64',
value=1)
type_indicator = fluid.layers.concat(
input=[relation_indicator, entity_indicator], axis=-1)
type_indicator = fluid.layers.elementwise_mul(
x=type_indicator, y=mask_type, axis=0)
type_indicator = fluid.layers.concat(
input=[special_indicator, type_indicator], axis=-1)
type_indicator = fluid.layers.cast(x=type_indicator, dtype='float32')
type_indicator = fluid.layers.thresholded_relu(
x=type_indicator, threshold=0.0)
fc_out_mask = fluid.layers.scale(
x=type_indicator,
scale=1000000.0,
bias=-1.0,
bias_after_scale=False)
fc_out = fluid.layers.elementwise_add(x=fc_out, y=fc_out_mask)
one_hot_labels = fluid.layers.one_hot(
input=mask_label, depth=self._voc_size)
type_indicator = fluid.layers.elementwise_sub(
x=type_indicator, y=one_hot_labels)
num_candidates = fluid.layers.reduce_sum(input=type_indicator, dim=-1)
mask_type = fluid.layers.cast(x=mask_type, dtype='float32')
soft_labels = ((1 + mask_type) * self._e_soft_label +
(1 - mask_type) * self._r_soft_label) / 2.0
soft_labels = fluid.layers.expand(soft_labels, [1, self._voc_size])
soft_labels = soft_labels * one_hot_labels + (1.0 - soft_labels) * \
fluid.layers.elementwise_div(x=type_indicator, y=num_candidates, axis=0)
soft_labels.stop_gradient = True
mask_lm_loss = fluid.layers.softmax_with_cross_entropy(
logits=fc_out, label=soft_labels, soft_label=True)
mean_mask_lm_loss = fluid.layers.mean(mask_lm_loss)
return mean_mask_lm_loss, fc_out
| self._n_layer = config['num_hidden_layers']
self._n_head = config['num_attention_heads']
self._emb_size = config['hidden_size']
self._intermediate_size = config['intermediate_size']
self._hidden_act = config['hidden_act']
self._prepostprocess_dropout = config['hidden_dropout_prob']
self._attention_dropout = config['attention_dropout_prob']
self._voc_size = config['vocab_size']
self._n_relation = config['num_relations']
self._n_edge = config['num_edges']
self._max_seq_len = config['max_seq_len']
self._max_arity = config['max_arity']
self._e_soft_label = config['entity_soft_label']
self._r_soft_label = config['relation_soft_label']
self._weight_sharing = weight_sharing
self._node_emb_name = "node_embedding"
self._edge_emb_name_k = "edge_embedding_key"
self._edge_emb_name_v = "edge_embedding_value"
self._dtype = "float16" if use_fp16 else "float32"
# Initialize all weights by truncated normal initializer, and all biases
# will be initialized by constant zero by default.
self._param_initializer = fluid.initializer.TruncatedNormal(
scale=config['initializer_range'])
self._build_model(input_ids, input_mask, edge_labels) | identifier_body |
gran_model.py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GRAN model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import paddle.fluid as fluid
from model.graph_encoder import encoder, pre_process_layer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S')
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.info(logger.getEffectiveLevel())
class GRANModel(object):
"""
GRAN model class.
"""
def | (self,
input_ids,
input_mask,
edge_labels,
config,
weight_sharing=True,
use_fp16=False):
self._n_layer = config['num_hidden_layers']
self._n_head = config['num_attention_heads']
self._emb_size = config['hidden_size']
self._intermediate_size = config['intermediate_size']
self._hidden_act = config['hidden_act']
self._prepostprocess_dropout = config['hidden_dropout_prob']
self._attention_dropout = config['attention_dropout_prob']
self._voc_size = config['vocab_size']
self._n_relation = config['num_relations']
self._n_edge = config['num_edges']
self._max_seq_len = config['max_seq_len']
self._max_arity = config['max_arity']
self._e_soft_label = config['entity_soft_label']
self._r_soft_label = config['relation_soft_label']
self._weight_sharing = weight_sharing
self._node_emb_name = "node_embedding"
self._edge_emb_name_k = "edge_embedding_key"
self._edge_emb_name_v = "edge_embedding_value"
self._dtype = "float16" if use_fp16 else "float32"
# Initialize all weights by truncated normal initializer, and all biases
# will be initialized by constant zero by default.
self._param_initializer = fluid.initializer.TruncatedNormal(
scale=config['initializer_range'])
self._build_model(input_ids, input_mask, edge_labels)
def _build_model(self, input_ids, input_mask, edge_labels):
# get node embeddings of input tokens
emb_out = fluid.layers.embedding(
input=input_ids,
size=[self._voc_size, self._emb_size],
dtype=self._dtype,
param_attr=fluid.ParamAttr(
name=self._node_emb_name, initializer=self._param_initializer),
is_sparse=False)
emb_out = pre_process_layer(
emb_out, 'nd', self._prepostprocess_dropout, name='pre_encoder')
# get edge embeddings between input tokens
edge_labels = fluid.layers.reshape(
x=edge_labels, shape=[-1, 1], inplace=True)
edges_key = fluid.layers.embedding(
input=edge_labels,
size=[self._n_edge, self._emb_size // self._n_head],
dtype=self._dtype,
param_attr=fluid.ParamAttr(
name=self._edge_emb_name_k,
initializer=self._param_initializer),
is_sparse=False)
edges_value = fluid.layers.embedding(
input=edge_labels,
size=[self._n_edge, self._emb_size // self._n_head],
dtype=self._dtype,
param_attr=fluid.ParamAttr(
name=self._edge_emb_name_v,
initializer=self._param_initializer),
is_sparse=False)
edge_mask = fluid.layers.sign(
fluid.layers.cast(
x=edge_labels, dtype='float32'))
# edge_mask.stop_gradient = True
edges_key = fluid.layers.elementwise_mul(
x=edges_key, y=edge_mask, axis=0)
edges_key = fluid.layers.reshape(
x=edges_key,
shape=[self._max_seq_len, self._max_seq_len, -1],
inplace=True)
edges_value = fluid.layers.elementwise_mul(
x=edges_value, y=edge_mask, axis=0)
edges_value = fluid.layers.reshape(
x=edges_value,
shape=[self._max_seq_len, self._max_seq_len, -1],
inplace=True)
# get multi-head self-attention mask
if self._dtype == "float16":
input_mask = fluid.layers.cast(x=input_mask, dtype=self._dtype)
self_attn_mask = fluid.layers.matmul(
x=input_mask, y=input_mask, transpose_y=True)
self_attn_mask = fluid.layers.scale(
x=self_attn_mask,
scale=1000000.0,
bias=-1.0,
bias_after_scale=False)
n_head_self_attn_mask = fluid.layers.stack(
x=[self_attn_mask] * self._n_head, axis=1)
n_head_self_attn_mask.stop_gradient = True
# stack of graph transformer encoders
self._enc_out = encoder(
enc_input=emb_out,
edges_key=edges_key,
edges_value=edges_value,
attn_bias=n_head_self_attn_mask,
n_layer=self._n_layer,
n_head=self._n_head,
d_key=self._emb_size // self._n_head,
d_value=self._emb_size // self._n_head,
d_model=self._emb_size,
d_inner_hid=self._intermediate_size,
prepostprocess_dropout=self._prepostprocess_dropout,
attention_dropout=self._attention_dropout,
relu_dropout=0,
hidden_act=self._hidden_act,
preprocess_cmd="",
postprocess_cmd="dan",
param_initializer=self._param_initializer,
name='encoder')
def get_sequence_output(self):
return self._enc_out
def get_mask_lm_output(self, mask_pos, mask_label, mask_type):
"""
Get the loss & logits for masked entity/relation prediction.
"""
mask_pos = fluid.layers.cast(x=mask_pos, dtype='int32')
reshaped_emb_out = fluid.layers.reshape(
x=self._enc_out, shape=[-1, self._emb_size])
# extract masked tokens' feature
mask_feat = fluid.layers.gather(input=reshaped_emb_out, index=mask_pos)
# transform: fc
mask_trans_feat = fluid.layers.fc(
input=mask_feat,
size=self._emb_size,
act=self._hidden_act,
param_attr=fluid.ParamAttr(
name='mask_lm_trans_fc.w_0',
initializer=self._param_initializer),
bias_attr=fluid.ParamAttr(name='mask_lm_trans_fc.b_0'))
# transform: layer norm
mask_trans_feat = pre_process_layer(
mask_trans_feat, 'n', name='mask_lm_trans')
mask_lm_out_bias_attr = fluid.ParamAttr(
name="mask_lm_out_fc.b_0",
initializer=fluid.initializer.Constant(value=0.0))
if self._weight_sharing:
fc_out = fluid.layers.matmul(
x=mask_trans_feat,
y=fluid.default_main_program().global_block().var(
self._node_emb_name),
transpose_y=True)
fc_out += fluid.layers.create_parameter(
shape=[self._voc_size],
dtype=self._dtype,
attr=mask_lm_out_bias_attr,
is_bias=True)
else:
fc_out = fluid.layers.fc(input=mask_trans_feat,
size=self._voc_size,
param_attr=fluid.ParamAttr(
name="mask_lm_out_fc.w_0",
initializer=self._param_initializer),
bias_attr=mask_lm_out_bias_attr)
special_indicator = fluid.layers.fill_constant_batch_size_like(
input=mask_label, shape=[-1, 2], dtype='int64', value=-1)
relation_indicator = fluid.layers.fill_constant_batch_size_like(
input=mask_label,
shape=[-1, self._n_relation],
dtype='int64',
value=-1)
entity_indicator = fluid.layers.fill_constant_batch_size_like(
input=mask_label,
shape=[-1, (self._voc_size - self._n_relation - 2)],
dtype='int64',
value=1)
type_indicator = fluid.layers.concat(
input=[relation_indicator, entity_indicator], axis=-1)
type_indicator = fluid.layers.elementwise_mul(
x=type_indicator, y=mask_type, axis=0)
type_indicator = fluid.layers.concat(
input=[special_indicator, type_indicator], axis=-1)
type_indicator = fluid.layers.cast(x=type_indicator, dtype='float32')
type_indicator = fluid.layers.thresholded_relu(
x=type_indicator, threshold=0.0)
fc_out_mask = fluid.layers.scale(
x=type_indicator,
scale=1000000.0,
bias=-1.0,
bias_after_scale=False)
fc_out = fluid.layers.elementwise_add(x=fc_out, y=fc_out_mask)
one_hot_labels = fluid.layers.one_hot(
input=mask_label, depth=self._voc_size)
type_indicator = fluid.layers.elementwise_sub(
x=type_indicator, y=one_hot_labels)
num_candidates = fluid.layers.reduce_sum(input=type_indicator, dim=-1)
mask_type = fluid.layers.cast(x=mask_type, dtype='float32')
soft_labels = ((1 + mask_type) * self._e_soft_label +
(1 - mask_type) * self._r_soft_label) / 2.0
soft_labels = fluid.layers.expand(soft_labels, [1, self._voc_size])
soft_labels = soft_labels * one_hot_labels + (1.0 - soft_labels) * \
fluid.layers.elementwise_div(x=type_indicator, y=num_candidates, axis=0)
soft_labels.stop_gradient = True
mask_lm_loss = fluid.layers.softmax_with_cross_entropy(
logits=fc_out, label=soft_labels, soft_label=True)
mean_mask_lm_loss = fluid.layers.mean(mask_lm_loss)
return mean_mask_lm_loss, fc_out
| __init__ | identifier_name |
gran_model.py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GRAN model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import paddle.fluid as fluid
from model.graph_encoder import encoder, pre_process_layer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S')
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.info(logger.getEffectiveLevel())
class GRANModel(object):
"""
GRAN model class.
"""
def __init__(self,
input_ids,
input_mask,
edge_labels,
config,
weight_sharing=True,
use_fp16=False):
self._n_layer = config['num_hidden_layers']
self._n_head = config['num_attention_heads']
self._emb_size = config['hidden_size']
self._intermediate_size = config['intermediate_size']
self._hidden_act = config['hidden_act']
self._prepostprocess_dropout = config['hidden_dropout_prob']
self._attention_dropout = config['attention_dropout_prob']
self._voc_size = config['vocab_size']
self._n_relation = config['num_relations']
self._n_edge = config['num_edges']
self._max_seq_len = config['max_seq_len']
self._max_arity = config['max_arity']
self._e_soft_label = config['entity_soft_label']
self._r_soft_label = config['relation_soft_label']
self._weight_sharing = weight_sharing
self._node_emb_name = "node_embedding"
self._edge_emb_name_k = "edge_embedding_key"
self._edge_emb_name_v = "edge_embedding_value"
self._dtype = "float16" if use_fp16 else "float32"
# Initialize all weights by truncated normal initializer, and all biases
# will be initialized by constant zero by default.
self._param_initializer = fluid.initializer.TruncatedNormal(
scale=config['initializer_range'])
self._build_model(input_ids, input_mask, edge_labels)
def _build_model(self, input_ids, input_mask, edge_labels):
# get node embeddings of input tokens
emb_out = fluid.layers.embedding(
input=input_ids,
size=[self._voc_size, self._emb_size],
dtype=self._dtype,
param_attr=fluid.ParamAttr(
name=self._node_emb_name, initializer=self._param_initializer),
is_sparse=False)
emb_out = pre_process_layer(
emb_out, 'nd', self._prepostprocess_dropout, name='pre_encoder')
# get edge embeddings between input tokens
edge_labels = fluid.layers.reshape(
x=edge_labels, shape=[-1, 1], inplace=True)
edges_key = fluid.layers.embedding(
input=edge_labels,
size=[self._n_edge, self._emb_size // self._n_head],
dtype=self._dtype,
param_attr=fluid.ParamAttr(
name=self._edge_emb_name_k,
initializer=self._param_initializer),
is_sparse=False)
edges_value = fluid.layers.embedding(
input=edge_labels,
size=[self._n_edge, self._emb_size // self._n_head],
dtype=self._dtype,
param_attr=fluid.ParamAttr(
name=self._edge_emb_name_v,
initializer=self._param_initializer),
is_sparse=False)
edge_mask = fluid.layers.sign(
fluid.layers.cast(
x=edge_labels, dtype='float32'))
# edge_mask.stop_gradient = True
edges_key = fluid.layers.elementwise_mul(
x=edges_key, y=edge_mask, axis=0)
edges_key = fluid.layers.reshape(
x=edges_key,
shape=[self._max_seq_len, self._max_seq_len, -1],
inplace=True)
edges_value = fluid.layers.elementwise_mul(
x=edges_value, y=edge_mask, axis=0)
edges_value = fluid.layers.reshape(
x=edges_value,
shape=[self._max_seq_len, self._max_seq_len, -1],
inplace=True)
# get multi-head self-attention mask
if self._dtype == "float16":
input_mask = fluid.layers.cast(x=input_mask, dtype=self._dtype)
self_attn_mask = fluid.layers.matmul(
x=input_mask, y=input_mask, transpose_y=True)
self_attn_mask = fluid.layers.scale(
x=self_attn_mask,
scale=1000000.0,
bias=-1.0,
bias_after_scale=False)
n_head_self_attn_mask = fluid.layers.stack(
x=[self_attn_mask] * self._n_head, axis=1)
n_head_self_attn_mask.stop_gradient = True
# stack of graph transformer encoders
self._enc_out = encoder(
enc_input=emb_out,
edges_key=edges_key,
edges_value=edges_value,
attn_bias=n_head_self_attn_mask,
n_layer=self._n_layer,
n_head=self._n_head,
d_key=self._emb_size // self._n_head,
d_value=self._emb_size // self._n_head,
d_model=self._emb_size,
d_inner_hid=self._intermediate_size,
prepostprocess_dropout=self._prepostprocess_dropout,
attention_dropout=self._attention_dropout,
relu_dropout=0,
hidden_act=self._hidden_act,
preprocess_cmd="",
postprocess_cmd="dan",
param_initializer=self._param_initializer,
name='encoder')
def get_sequence_output(self):
return self._enc_out
def get_mask_lm_output(self, mask_pos, mask_label, mask_type):
"""
Get the loss & logits for masked entity/relation prediction.
"""
mask_pos = fluid.layers.cast(x=mask_pos, dtype='int32')
reshaped_emb_out = fluid.layers.reshape(
x=self._enc_out, shape=[-1, self._emb_size])
# extract masked tokens' feature
mask_feat = fluid.layers.gather(input=reshaped_emb_out, index=mask_pos)
# transform: fc
mask_trans_feat = fluid.layers.fc(
input=mask_feat,
size=self._emb_size,
act=self._hidden_act,
param_attr=fluid.ParamAttr(
name='mask_lm_trans_fc.w_0',
initializer=self._param_initializer),
bias_attr=fluid.ParamAttr(name='mask_lm_trans_fc.b_0'))
# transform: layer norm
mask_trans_feat = pre_process_layer(
mask_trans_feat, 'n', name='mask_lm_trans')
mask_lm_out_bias_attr = fluid.ParamAttr(
name="mask_lm_out_fc.b_0",
initializer=fluid.initializer.Constant(value=0.0))
if self._weight_sharing:
fc_out = fluid.layers.matmul(
x=mask_trans_feat,
y=fluid.default_main_program().global_block().var(
self._node_emb_name),
transpose_y=True)
fc_out += fluid.layers.create_parameter(
shape=[self._voc_size],
dtype=self._dtype,
attr=mask_lm_out_bias_attr,
is_bias=True)
else:
|
special_indicator = fluid.layers.fill_constant_batch_size_like(
input=mask_label, shape=[-1, 2], dtype='int64', value=-1)
relation_indicator = fluid.layers.fill_constant_batch_size_like(
input=mask_label,
shape=[-1, self._n_relation],
dtype='int64',
value=-1)
entity_indicator = fluid.layers.fill_constant_batch_size_like(
input=mask_label,
shape=[-1, (self._voc_size - self._n_relation - 2)],
dtype='int64',
value=1)
type_indicator = fluid.layers.concat(
input=[relation_indicator, entity_indicator], axis=-1)
type_indicator = fluid.layers.elementwise_mul(
x=type_indicator, y=mask_type, axis=0)
type_indicator = fluid.layers.concat(
input=[special_indicator, type_indicator], axis=-1)
type_indicator = fluid.layers.cast(x=type_indicator, dtype='float32')
type_indicator = fluid.layers.thresholded_relu(
x=type_indicator, threshold=0.0)
fc_out_mask = fluid.layers.scale(
x=type_indicator,
scale=1000000.0,
bias=-1.0,
bias_after_scale=False)
fc_out = fluid.layers.elementwise_add(x=fc_out, y=fc_out_mask)
one_hot_labels = fluid.layers.one_hot(
input=mask_label, depth=self._voc_size)
type_indicator = fluid.layers.elementwise_sub(
x=type_indicator, y=one_hot_labels)
num_candidates = fluid.layers.reduce_sum(input=type_indicator, dim=-1)
mask_type = fluid.layers.cast(x=mask_type, dtype='float32')
soft_labels = ((1 + mask_type) * self._e_soft_label +
(1 - mask_type) * self._r_soft_label) / 2.0
soft_labels = fluid.layers.expand(soft_labels, [1, self._voc_size])
soft_labels = soft_labels * one_hot_labels + (1.0 - soft_labels) * \
fluid.layers.elementwise_div(x=type_indicator, y=num_candidates, axis=0)
soft_labels.stop_gradient = True
mask_lm_loss = fluid.layers.softmax_with_cross_entropy(
logits=fc_out, label=soft_labels, soft_label=True)
mean_mask_lm_loss = fluid.layers.mean(mask_lm_loss)
return mean_mask_lm_loss, fc_out
| fc_out = fluid.layers.fc(input=mask_trans_feat,
size=self._voc_size,
param_attr=fluid.ParamAttr(
name="mask_lm_out_fc.w_0",
initializer=self._param_initializer),
bias_attr=mask_lm_out_bias_attr) | conditional_block |
generate_primers.go | package main
import (
"fmt"
"os"
"strings"
"bufio"
"strconv"
"sort"
"flag"
)
type Sequence struct {
name string
seq string
isForward bool
val int
}
// Generates the primers
func generatePrimers(seqFile, fastaFile string, ignore int) (map[string]string){
degens := findDegens(seqFile, fastaFile, ignore)
primers := make(map[string]string)
// For each sequence
for key, _ := range degens {
primers[key] = ""
// For each base in the current sequence
for i := 0; i < len(degens[key]); i++ {
// If the current sequence is reverse, get the code of the reverse complement
// of the base and insert it at the beginning of the primer
if key[len(key)-7:] == "reverse" {
primers[key] = getCode(reverseComplement(degens[key][i])) + primers[key]
// If the current sequence is forward, get the code of the base and put
// it at the end of the primer
} else {
primers[key] += getCode(degens[key][i])
}
}
}
/*
for key, val := range primers {
fmt.Println(key + ":\t" + val)
}
*/
return primers
}
// Returns ambiguity code for a string of bases. The string of bases must be in
// alphabetical order. "AGT" will work but "ATG" will not.
func getCode(bases string) (string) {
if len(bases) == 1 {
return bases
}
switch bases {
case "AG":
return "R"
case "CT":
return "Y"
case "CG":
return "S"
case "AT":
return "W"
case "GT":
return "K"
case "AC":
return "M"
case "CGT":
return "B"
case "AGT":
return "D"
case "ACT":
return "H"
case "ACG":
return "V"
case "ACGT":
return "N"
default:
return "(ERROR)"
}
}
func findDegens(seqFile, fastaFile string, ignore int) (map[string]map[int]string) {
// Key is sequence name, value is another map where key is the position of a base within the
// sequence and value is a sorted string containing all bases that appeared in that position
out := make(map[string]map[int]string)
sequences := getSeqs(seqFile)
genomes := getGenomes(fastaFile)
// For each sequence
for _, sequence := range sequences {
// If sequence is a reverse, perform the reverse complement on it
if !(sequence.isForward) {
sequence.seq = reverseComplement(sequence.seq)
}
out[sequence.name] = make(map[int]string)
// Initialize the first array slot of each sequence to the correct char
for i, _ := range sequence.seq {
out[sequence.name][i] += string(sequence.seq[i])
}
// For each genome
for _, genome := range genomes {
count := 0
// If sequence is forward
if sequence.isForward {
// Iterate over each base in the sequence, looking for differences in the range specified by the sequence.val
for i, _ := range sequence.seq {
if sequence.seq[i] != genome[sequence.val + i - 1] && !(strings.Contains(out[sequence.name][i], string(genome[sequence.val + i - 1]))) {
// Don't count degeneracy if the genome has '-'
if genome[sequence.val + i - 1] != 45 {
count++
if count >= ignore {
out[sequence.name][i] += string(genome[sequence.val + i - 1])
}
}
}
}
// If the sequence is reverse
} else {
// Iterate over the sequence, performing a little bit of gymnastics to iterate forward over both the sequence.seq and the genome even when the for loop is iterating backward
for i := len(sequence.seq) - 1; i >= 0; i-- {
if sequence.seq[(len(sequence.seq)-1) - i] != genome[sequence.val - i - 1] && !(strings.Contains(out[sequence.name][(len(sequence.seq)-1) - i], string(genome[sequence.val - i - 1]))) {
// Don't count degeneracy if the genome has '-'
if genome[sequence.val - i - 1] != 45 {
count++
if count >= ignore {
out[sequence.name][(len(sequence.seq)-1) - i] += string(genome[sequence.val - i - 1])
}
}
}
}
}
}
}
// Sort each string
for key, val := range out {
for i, s := range val {
out[key][i] = sortString(s)
}
}
return out
}
// Gets the nucleotide sequences from the .fasta file and
// returns them, each as single long string in a string array
func getGenomes(fastaFile string) ([]string) {
var out []string
// Open fasta file
fi, err := os.Open(fastaFile)
if err != nil {
fmt.Println("Error - couldn't open .fasta file")
fmt.Println(err)
os.Exit(1)
}
scanner := bufio.NewScanner(fi)
var temp string
// Skip first line (Assuming it's a header)
scanner.Scan()
// For each line in the file
for scanner.Scan() {
line := scanner.Text()
// If the line begins with '>', assume it's a header
if line[0] == 62 {
out = append(out, temp)
temp = ""
// If the line doesn't begin with '>', assume it's a seuence of nucleotides
} else {
temp += line
}
}
return out
}
// Gets the sequences from the .seq file and returns them in an array of Sequence structs
// Assuming you have the following in the .seq file:
// >99_forward
// ACGT
// You will get a Sequence struct with the following fields:
// name = "99_forward"
// seq = "ACGT"
// isForward = true
// val = 99
func getSeqs(seqFile string) ([]Sequence) |
// Returns the reverse complement of a sequence of nucleotides
// Only works with 'A', 'C', 'G', 'T' chars
func reverseComplement(sequence string) (out string) {
for i := len(sequence)-1; i >= 0; i-- {
switch sequence[i] {
case 65:
out += "T"
break
case 84:
out += "A"
break
case 71:
out += "C"
break
case 67:
out += "G"
break
default:
fmt.Println("Error -- Encountered non-ATGC char in sequence")
}
}
return
}
// Return true if token is in array (int and int array respectively)
func contains(array []int, token int) (bool) {
for _, item := range array {
if item == token {
return true
}
}
return false
}
// Sorts str
func sortString(str string) (string) {
if len(str) <= 1 {
return str
}
s := strings.Split(str, "")
sort.Strings(s)
return strings.Join(s, "")
}
// Prints all relevent data in nice format
func printOutput(primers map[string]string) {
// Number of degens is the key, array of the names of sequences
// that have that number of degens is the value
forward := make(map[int][]string)
reverse := make(map[int][]string)
// Keeps track of the amount of degens found so that it can be sorted
var forwardDegens []int
var reverseDegens []int
// Name of sequence is the key, array of positions where degens occur
// within that sequence is value
positions := make(map[string][]int)
// Gather information about each primer, including the amount of degens
// and their positions
for key, val := range primers {
degens := 0
for i, char := range val {
if char != 'A' && char != 'C' && char != 'G' && char != 'T' {
degens++
positions[key] = append(positions[key], i)
}
}
data := strings.Split(key, "_")
if data[1] == "forward" {
forward[degens] = append(forward[degens], key)
if !(contains(forwardDegens, degens)){
forwardDegens = append(forwardDegens, degens)
}
} else {
reverse[degens] = append(reverse[degens], key)
if !(contains(reverseDegens, degens)){
reverseDegens = append(reverseDegens, degens)
}
}
}
sort.Ints(forwardDegens)
sort.Ints(reverseDegens)
// Print relevant data
fmt.Println("\nFORWARD SEQUENCES")
fmt.Println("Name\t\tPrimer\t\t\t# of degeneracies")
fmt.Println("---------------------------------------------------------")
for _, num := range forwardDegens {
for _, name := range forward[num] {
fmt.Println(name + "\t" + primers[name] + "\t" + strconv.Itoa(num))
var carrots []rune
for i := 0; i < len(primers[name]); i++ {
carrots = append(carrots, ' ')
}
for _, index := range positions[name] {
carrots[index] = '^'
}
fmt.Println("\t\t" + string(carrots))
fmt.Println()
}
}
fmt.Println("\nREVERSE SEQUENCES")
fmt.Println("Name\t\tPrimer\t\t\t# of degeneracies")
fmt.Println("---------------------------------------------------------")
for _, num := range reverseDegens {
for _, name := range reverse[num] {
fmt.Println(name + "\t" + primers[name] + "\t" + strconv.Itoa(num))
var carrots []rune
for i := 0; i < len(primers[name]); i++ {
carrots = append(carrots, ' ')
}
for _, index := range positions[name] {
carrots[index] = '^'
}
fmt.Println("\t\t" + string(carrots))
fmt.Println()
}
}
}
func main() {
seqPtr := flag.String("s", "", "[Required] Path to .seq file")
fastaPtr := flag.String("f", "", "[Required] Path to .fasta file")
ignorePtr := flag.Int("i", 0, "Number of SNPs to ignore before considering a position a degeneracy (default 0)")
flag.Parse()
primers := generatePrimers(*seqPtr, *fastaPtr, *ignorePtr)
printOutput(primers)
}
| {
var out []Sequence
// Open the .seq file
fi, err := os.Open(seqFile)
if err != nil {
fmt.Println("Error - couldn't open .seq file")
os.Exit(1)
}
scanner := bufio.NewScanner(fi)
// For each line in the file
for scanner.Scan() {
var temp Sequence
// Get name
line := scanner.Text()[1:]
temp.name = line
// Get value
split_line := strings.Split(line, "_")
temp.val, _ = strconv.Atoi(split_line[0])
// Get isForward
if split_line[1] == "forward" {
temp.isForward = true
} else {
temp.isForward = false
}
// Get sequence
scanner.Scan()
temp.seq = scanner.Text()
out = append(out, temp)
}
return out
} | identifier_body |
generate_primers.go | package main
import (
"fmt"
"os"
"strings"
"bufio"
"strconv"
"sort"
"flag"
)
type Sequence struct {
name string
seq string
isForward bool
val int
}
// Generates the primers
func generatePrimers(seqFile, fastaFile string, ignore int) (map[string]string){
degens := findDegens(seqFile, fastaFile, ignore)
primers := make(map[string]string)
// For each sequence
for key, _ := range degens {
primers[key] = ""
// For each base in the current sequence
for i := 0; i < len(degens[key]); i++ {
// If the current sequence is reverse, get the code of the reverse complement
// of the base and insert it at the beginning of the primer
if key[len(key)-7:] == "reverse" {
primers[key] = getCode(reverseComplement(degens[key][i])) + primers[key]
// If the current sequence is forward, get the code of the base and put
// it at the end of the primer
} else {
primers[key] += getCode(degens[key][i])
}
}
}
/*
for key, val := range primers {
fmt.Println(key + ":\t" + val)
}
*/
return primers
}
// Returns ambiguity code for a string of bases. The string of bases must be in
// alphabetical order. "AGT" will work but "ATG" will not.
func getCode(bases string) (string) {
if len(bases) == 1 {
return bases
}
switch bases {
case "AG":
return "R"
case "CT":
return "Y"
case "CG":
return "S"
case "AT":
return "W"
case "GT":
return "K"
case "AC":
return "M"
case "CGT":
return "B"
case "AGT":
return "D"
case "ACT":
return "H"
case "ACG":
return "V"
case "ACGT":
return "N"
default:
return "(ERROR)"
}
}
func findDegens(seqFile, fastaFile string, ignore int) (map[string]map[int]string) {
// Key is sequence name, value is another map where key is the position of a base within the
// sequence and value is a sorted string containing all bases that appeared in that position
out := make(map[string]map[int]string) |
sequences := getSeqs(seqFile)
genomes := getGenomes(fastaFile)
// For each sequence
for _, sequence := range sequences {
// If sequence is a reverse, perform the reverse complement on it
if !(sequence.isForward) {
sequence.seq = reverseComplement(sequence.seq)
}
out[sequence.name] = make(map[int]string)
// Initialize the first array slot of each sequence to the correct char
for i, _ := range sequence.seq {
out[sequence.name][i] += string(sequence.seq[i])
}
// For each genome
for _, genome := range genomes {
count := 0
// If sequence is forward
if sequence.isForward {
// Iterate over each base in the sequence, looking for differences in the range specified by the sequence.val
for i, _ := range sequence.seq {
if sequence.seq[i] != genome[sequence.val + i - 1] && !(strings.Contains(out[sequence.name][i], string(genome[sequence.val + i - 1]))) {
// Don't count degeneracy if the genome has '-'
if genome[sequence.val + i - 1] != 45 {
count++
if count >= ignore {
out[sequence.name][i] += string(genome[sequence.val + i - 1])
}
}
}
}
// If the sequence is reverse
} else {
// Iterate over the sequence, performing a little bit of gymnastics to iterate forward over both the sequence.seq and the genome even when the for loop is iterating backward
for i := len(sequence.seq) - 1; i >= 0; i-- {
if sequence.seq[(len(sequence.seq)-1) - i] != genome[sequence.val - i - 1] && !(strings.Contains(out[sequence.name][(len(sequence.seq)-1) - i], string(genome[sequence.val - i - 1]))) {
// Don't count degeneracy if the genome has '-'
if genome[sequence.val - i - 1] != 45 {
count++
if count >= ignore {
out[sequence.name][(len(sequence.seq)-1) - i] += string(genome[sequence.val - i - 1])
}
}
}
}
}
}
}
// Sort each string
for key, val := range out {
for i, s := range val {
out[key][i] = sortString(s)
}
}
return out
}
// Gets the nucleotide sequences from the .fasta file and
// returns them, each as single long string in a string array
func getGenomes(fastaFile string) ([]string) {
var out []string
// Open fasta file
fi, err := os.Open(fastaFile)
if err != nil {
fmt.Println("Error - couldn't open .fasta file")
fmt.Println(err)
os.Exit(1)
}
scanner := bufio.NewScanner(fi)
var temp string
// Skip first line (Assuming it's a header)
scanner.Scan()
// For each line in the file
for scanner.Scan() {
line := scanner.Text()
// If the line begins with '>', assume it's a header
if line[0] == 62 {
out = append(out, temp)
temp = ""
// If the line doesn't begin with '>', assume it's a seuence of nucleotides
} else {
temp += line
}
}
return out
}
// Gets the sequences from the .seq file and returns them in an array of Sequence structs
// Assuming you have the following in the .seq file:
// >99_forward
// ACGT
// You will get a Sequence struct with the following fields:
// name = "99_forward"
// seq = "ACGT"
// isForward = true
// val = 99
func getSeqs(seqFile string) ([]Sequence) {
var out []Sequence
// Open the .seq file
fi, err := os.Open(seqFile)
if err != nil {
fmt.Println("Error - couldn't open .seq file")
os.Exit(1)
}
scanner := bufio.NewScanner(fi)
// For each line in the file
for scanner.Scan() {
var temp Sequence
// Get name
line := scanner.Text()[1:]
temp.name = line
// Get value
split_line := strings.Split(line, "_")
temp.val, _ = strconv.Atoi(split_line[0])
// Get isForward
if split_line[1] == "forward" {
temp.isForward = true
} else {
temp.isForward = false
}
// Get sequence
scanner.Scan()
temp.seq = scanner.Text()
out = append(out, temp)
}
return out
}
// Returns the reverse complement of a sequence of nucleotides
// Only works with 'A', 'C', 'G', 'T' chars
func reverseComplement(sequence string) (out string) {
for i := len(sequence)-1; i >= 0; i-- {
switch sequence[i] {
case 65:
out += "T"
break
case 84:
out += "A"
break
case 71:
out += "C"
break
case 67:
out += "G"
break
default:
fmt.Println("Error -- Encountered non-ATGC char in sequence")
}
}
return
}
// Return true if token is in array (int and int array respectively)
func contains(array []int, token int) (bool) {
for _, item := range array {
if item == token {
return true
}
}
return false
}
// Sorts str
func sortString(str string) (string) {
if len(str) <= 1 {
return str
}
s := strings.Split(str, "")
sort.Strings(s)
return strings.Join(s, "")
}
// Prints all relevent data in nice format
func printOutput(primers map[string]string) {
// Number of degens is the key, array of the names of sequences
// that have that number of degens is the value
forward := make(map[int][]string)
reverse := make(map[int][]string)
// Keeps track of the amount of degens found so that it can be sorted
var forwardDegens []int
var reverseDegens []int
// Name of sequence is the key, array of positions where degens occur
// within that sequence is value
positions := make(map[string][]int)
// Gather information about each primer, including the amount of degens
// and their positions
for key, val := range primers {
degens := 0
for i, char := range val {
if char != 'A' && char != 'C' && char != 'G' && char != 'T' {
degens++
positions[key] = append(positions[key], i)
}
}
data := strings.Split(key, "_")
if data[1] == "forward" {
forward[degens] = append(forward[degens], key)
if !(contains(forwardDegens, degens)){
forwardDegens = append(forwardDegens, degens)
}
} else {
reverse[degens] = append(reverse[degens], key)
if !(contains(reverseDegens, degens)){
reverseDegens = append(reverseDegens, degens)
}
}
}
sort.Ints(forwardDegens)
sort.Ints(reverseDegens)
// Print relevant data
fmt.Println("\nFORWARD SEQUENCES")
fmt.Println("Name\t\tPrimer\t\t\t# of degeneracies")
fmt.Println("---------------------------------------------------------")
for _, num := range forwardDegens {
for _, name := range forward[num] {
fmt.Println(name + "\t" + primers[name] + "\t" + strconv.Itoa(num))
var carrots []rune
for i := 0; i < len(primers[name]); i++ {
carrots = append(carrots, ' ')
}
for _, index := range positions[name] {
carrots[index] = '^'
}
fmt.Println("\t\t" + string(carrots))
fmt.Println()
}
}
fmt.Println("\nREVERSE SEQUENCES")
fmt.Println("Name\t\tPrimer\t\t\t# of degeneracies")
fmt.Println("---------------------------------------------------------")
for _, num := range reverseDegens {
for _, name := range reverse[num] {
fmt.Println(name + "\t" + primers[name] + "\t" + strconv.Itoa(num))
var carrots []rune
for i := 0; i < len(primers[name]); i++ {
carrots = append(carrots, ' ')
}
for _, index := range positions[name] {
carrots[index] = '^'
}
fmt.Println("\t\t" + string(carrots))
fmt.Println()
}
}
}
func main() {
seqPtr := flag.String("s", "", "[Required] Path to .seq file")
fastaPtr := flag.String("f", "", "[Required] Path to .fasta file")
ignorePtr := flag.Int("i", 0, "Number of SNPs to ignore before considering a position a degeneracy (default 0)")
flag.Parse()
primers := generatePrimers(*seqPtr, *fastaPtr, *ignorePtr)
printOutput(primers)
} | random_line_split | |
generate_primers.go | package main
import (
"fmt"
"os"
"strings"
"bufio"
"strconv"
"sort"
"flag"
)
type Sequence struct {
name string
seq string
isForward bool
val int
}
// Generates the primers
func generatePrimers(seqFile, fastaFile string, ignore int) (map[string]string){
degens := findDegens(seqFile, fastaFile, ignore)
primers := make(map[string]string)
// For each sequence
for key, _ := range degens {
primers[key] = ""
// For each base in the current sequence
for i := 0; i < len(degens[key]); i++ {
// If the current sequence is reverse, get the code of the reverse complement
// of the base and insert it at the beginning of the primer
if key[len(key)-7:] == "reverse" {
primers[key] = getCode(reverseComplement(degens[key][i])) + primers[key]
// If the current sequence is forward, get the code of the base and put
// it at the end of the primer
} else {
primers[key] += getCode(degens[key][i])
}
}
}
/*
for key, val := range primers {
fmt.Println(key + ":\t" + val)
}
*/
return primers
}
// Returns ambiguity code for a string of bases. The string of bases must be in
// alphabetical order. "AGT" will work but "ATG" will not.
func getCode(bases string) (string) {
if len(bases) == 1 {
return bases
}
switch bases {
case "AG":
return "R"
case "CT":
return "Y"
case "CG":
return "S"
case "AT":
return "W"
case "GT":
return "K"
case "AC":
return "M"
case "CGT":
return "B"
case "AGT":
return "D"
case "ACT":
return "H"
case "ACG":
return "V"
case "ACGT":
return "N"
default:
return "(ERROR)"
}
}
func findDegens(seqFile, fastaFile string, ignore int) (map[string]map[int]string) {
// Key is sequence name, value is another map where key is the position of a base within the
// sequence and value is a sorted string containing all bases that appeared in that position
out := make(map[string]map[int]string)
sequences := getSeqs(seqFile)
genomes := getGenomes(fastaFile)
// For each sequence
for _, sequence := range sequences {
// If sequence is a reverse, perform the reverse complement on it
if !(sequence.isForward) {
sequence.seq = reverseComplement(sequence.seq)
}
out[sequence.name] = make(map[int]string)
// Initialize the first array slot of each sequence to the correct char
for i, _ := range sequence.seq {
out[sequence.name][i] += string(sequence.seq[i])
}
// For each genome
for _, genome := range genomes {
count := 0
// If sequence is forward
if sequence.isForward {
// Iterate over each base in the sequence, looking for differences in the range specified by the sequence.val
for i, _ := range sequence.seq {
if sequence.seq[i] != genome[sequence.val + i - 1] && !(strings.Contains(out[sequence.name][i], string(genome[sequence.val + i - 1]))) {
// Don't count degeneracy if the genome has '-'
if genome[sequence.val + i - 1] != 45 {
count++
if count >= ignore {
out[sequence.name][i] += string(genome[sequence.val + i - 1])
}
}
}
}
// If the sequence is reverse
} else {
// Iterate over the sequence, performing a little bit of gymnastics to iterate forward over both the sequence.seq and the genome even when the for loop is iterating backward
for i := len(sequence.seq) - 1; i >= 0; i-- {
if sequence.seq[(len(sequence.seq)-1) - i] != genome[sequence.val - i - 1] && !(strings.Contains(out[sequence.name][(len(sequence.seq)-1) - i], string(genome[sequence.val - i - 1]))) {
// Don't count degeneracy if the genome has '-'
if genome[sequence.val - i - 1] != 45 {
count++
if count >= ignore {
out[sequence.name][(len(sequence.seq)-1) - i] += string(genome[sequence.val - i - 1])
}
}
}
}
}
}
}
// Sort each string
for key, val := range out {
for i, s := range val {
out[key][i] = sortString(s)
}
}
return out
}
// Gets the nucleotide sequences from the .fasta file and
// returns them, each as single long string in a string array
func getGenomes(fastaFile string) ([]string) {
var out []string
// Open fasta file
fi, err := os.Open(fastaFile)
if err != nil {
fmt.Println("Error - couldn't open .fasta file")
fmt.Println(err)
os.Exit(1)
}
scanner := bufio.NewScanner(fi)
var temp string
// Skip first line (Assuming it's a header)
scanner.Scan()
// For each line in the file
for scanner.Scan() {
line := scanner.Text()
// If the line begins with '>', assume it's a header
if line[0] == 62 {
out = append(out, temp)
temp = ""
// If the line doesn't begin with '>', assume it's a seuence of nucleotides
} else {
temp += line
}
}
return out
}
// Gets the sequences from the .seq file and returns them in an array of Sequence structs
// Assuming you have the following in the .seq file:
// >99_forward
// ACGT
// You will get a Sequence struct with the following fields:
// name = "99_forward"
// seq = "ACGT"
// isForward = true
// val = 99
func getSeqs(seqFile string) ([]Sequence) {
var out []Sequence
// Open the .seq file
fi, err := os.Open(seqFile)
if err != nil {
fmt.Println("Error - couldn't open .seq file")
os.Exit(1)
}
scanner := bufio.NewScanner(fi)
// For each line in the file
for scanner.Scan() {
var temp Sequence
// Get name
line := scanner.Text()[1:]
temp.name = line
// Get value
split_line := strings.Split(line, "_")
temp.val, _ = strconv.Atoi(split_line[0])
// Get isForward
if split_line[1] == "forward" {
temp.isForward = true
} else {
temp.isForward = false
}
// Get sequence
scanner.Scan()
temp.seq = scanner.Text()
out = append(out, temp)
}
return out
}
// Returns the reverse complement of a sequence of nucleotides
// Only works with 'A', 'C', 'G', 'T' chars
func | (sequence string) (out string) {
for i := len(sequence)-1; i >= 0; i-- {
switch sequence[i] {
case 65:
out += "T"
break
case 84:
out += "A"
break
case 71:
out += "C"
break
case 67:
out += "G"
break
default:
fmt.Println("Error -- Encountered non-ATGC char in sequence")
}
}
return
}
// Return true if token is in array (int and int array respectively)
func contains(array []int, token int) (bool) {
for _, item := range array {
if item == token {
return true
}
}
return false
}
// Sorts str
func sortString(str string) (string) {
if len(str) <= 1 {
return str
}
s := strings.Split(str, "")
sort.Strings(s)
return strings.Join(s, "")
}
// Prints all relevent data in nice format
func printOutput(primers map[string]string) {
// Number of degens is the key, array of the names of sequences
// that have that number of degens is the value
forward := make(map[int][]string)
reverse := make(map[int][]string)
// Keeps track of the amount of degens found so that it can be sorted
var forwardDegens []int
var reverseDegens []int
// Name of sequence is the key, array of positions where degens occur
// within that sequence is value
positions := make(map[string][]int)
// Gather information about each primer, including the amount of degens
// and their positions
for key, val := range primers {
degens := 0
for i, char := range val {
if char != 'A' && char != 'C' && char != 'G' && char != 'T' {
degens++
positions[key] = append(positions[key], i)
}
}
data := strings.Split(key, "_")
if data[1] == "forward" {
forward[degens] = append(forward[degens], key)
if !(contains(forwardDegens, degens)){
forwardDegens = append(forwardDegens, degens)
}
} else {
reverse[degens] = append(reverse[degens], key)
if !(contains(reverseDegens, degens)){
reverseDegens = append(reverseDegens, degens)
}
}
}
sort.Ints(forwardDegens)
sort.Ints(reverseDegens)
// Print relevant data
fmt.Println("\nFORWARD SEQUENCES")
fmt.Println("Name\t\tPrimer\t\t\t# of degeneracies")
fmt.Println("---------------------------------------------------------")
for _, num := range forwardDegens {
for _, name := range forward[num] {
fmt.Println(name + "\t" + primers[name] + "\t" + strconv.Itoa(num))
var carrots []rune
for i := 0; i < len(primers[name]); i++ {
carrots = append(carrots, ' ')
}
for _, index := range positions[name] {
carrots[index] = '^'
}
fmt.Println("\t\t" + string(carrots))
fmt.Println()
}
}
fmt.Println("\nREVERSE SEQUENCES")
fmt.Println("Name\t\tPrimer\t\t\t# of degeneracies")
fmt.Println("---------------------------------------------------------")
for _, num := range reverseDegens {
for _, name := range reverse[num] {
fmt.Println(name + "\t" + primers[name] + "\t" + strconv.Itoa(num))
var carrots []rune
for i := 0; i < len(primers[name]); i++ {
carrots = append(carrots, ' ')
}
for _, index := range positions[name] {
carrots[index] = '^'
}
fmt.Println("\t\t" + string(carrots))
fmt.Println()
}
}
}
func main() {
seqPtr := flag.String("s", "", "[Required] Path to .seq file")
fastaPtr := flag.String("f", "", "[Required] Path to .fasta file")
ignorePtr := flag.Int("i", 0, "Number of SNPs to ignore before considering a position a degeneracy (default 0)")
flag.Parse()
primers := generatePrimers(*seqPtr, *fastaPtr, *ignorePtr)
printOutput(primers)
}
| reverseComplement | identifier_name |
generate_primers.go | package main
import (
"fmt"
"os"
"strings"
"bufio"
"strconv"
"sort"
"flag"
)
type Sequence struct {
name string
seq string
isForward bool
val int
}
// Generates the primers
func generatePrimers(seqFile, fastaFile string, ignore int) (map[string]string){
degens := findDegens(seqFile, fastaFile, ignore)
primers := make(map[string]string)
// For each sequence
for key, _ := range degens {
primers[key] = ""
// For each base in the current sequence
for i := 0; i < len(degens[key]); i++ {
// If the current sequence is reverse, get the code of the reverse complement
// of the base and insert it at the beginning of the primer
if key[len(key)-7:] == "reverse" {
primers[key] = getCode(reverseComplement(degens[key][i])) + primers[key]
// If the current sequence is forward, get the code of the base and put
// it at the end of the primer
} else {
primers[key] += getCode(degens[key][i])
}
}
}
/*
for key, val := range primers {
fmt.Println(key + ":\t" + val)
}
*/
return primers
}
// Returns ambiguity code for a string of bases. The string of bases must be in
// alphabetical order. "AGT" will work but "ATG" will not.
func getCode(bases string) (string) {
if len(bases) == 1 {
return bases
}
switch bases {
case "AG":
return "R"
case "CT":
return "Y"
case "CG":
return "S"
case "AT":
return "W"
case "GT":
return "K"
case "AC":
return "M"
case "CGT":
return "B"
case "AGT":
return "D"
case "ACT":
return "H"
case "ACG":
return "V"
case "ACGT":
return "N"
default:
return "(ERROR)"
}
}
func findDegens(seqFile, fastaFile string, ignore int) (map[string]map[int]string) {
// Key is sequence name, value is another map where key is the position of a base within the
// sequence and value is a sorted string containing all bases that appeared in that position
out := make(map[string]map[int]string)
sequences := getSeqs(seqFile)
genomes := getGenomes(fastaFile)
// For each sequence
for _, sequence := range sequences {
// If sequence is a reverse, perform the reverse complement on it
if !(sequence.isForward) {
sequence.seq = reverseComplement(sequence.seq)
}
out[sequence.name] = make(map[int]string)
// Initialize the first array slot of each sequence to the correct char
for i, _ := range sequence.seq {
out[sequence.name][i] += string(sequence.seq[i])
}
// For each genome
for _, genome := range genomes {
count := 0
// If sequence is forward
if sequence.isForward {
// Iterate over each base in the sequence, looking for differences in the range specified by the sequence.val
for i, _ := range sequence.seq {
if sequence.seq[i] != genome[sequence.val + i - 1] && !(strings.Contains(out[sequence.name][i], string(genome[sequence.val + i - 1]))) {
// Don't count degeneracy if the genome has '-'
if genome[sequence.val + i - 1] != 45 {
count++
if count >= ignore {
out[sequence.name][i] += string(genome[sequence.val + i - 1])
}
}
}
}
// If the sequence is reverse
} else {
// Iterate over the sequence, performing a little bit of gymnastics to iterate forward over both the sequence.seq and the genome even when the for loop is iterating backward
for i := len(sequence.seq) - 1; i >= 0; i-- {
if sequence.seq[(len(sequence.seq)-1) - i] != genome[sequence.val - i - 1] && !(strings.Contains(out[sequence.name][(len(sequence.seq)-1) - i], string(genome[sequence.val - i - 1]))) {
// Don't count degeneracy if the genome has '-'
if genome[sequence.val - i - 1] != 45 {
count++
if count >= ignore {
out[sequence.name][(len(sequence.seq)-1) - i] += string(genome[sequence.val - i - 1])
}
}
}
}
}
}
}
// Sort each string
for key, val := range out {
for i, s := range val {
out[key][i] = sortString(s)
}
}
return out
}
// Gets the nucleotide sequences from the .fasta file and
// returns them, each as single long string in a string array
func getGenomes(fastaFile string) ([]string) {
var out []string
// Open fasta file
fi, err := os.Open(fastaFile)
if err != nil {
fmt.Println("Error - couldn't open .fasta file")
fmt.Println(err)
os.Exit(1)
}
scanner := bufio.NewScanner(fi)
var temp string
// Skip first line (Assuming it's a header)
scanner.Scan()
// For each line in the file
for scanner.Scan() |
return out
}
// Gets the sequences from the .seq file and returns them in an array of Sequence structs
// Assuming you have the following in the .seq file:
// >99_forward
// ACGT
// You will get a Sequence struct with the following fields:
// name = "99_forward"
// seq = "ACGT"
// isForward = true
// val = 99
func getSeqs(seqFile string) ([]Sequence) {
var out []Sequence
// Open the .seq file
fi, err := os.Open(seqFile)
if err != nil {
fmt.Println("Error - couldn't open .seq file")
os.Exit(1)
}
scanner := bufio.NewScanner(fi)
// For each line in the file
for scanner.Scan() {
var temp Sequence
// Get name
line := scanner.Text()[1:]
temp.name = line
// Get value
split_line := strings.Split(line, "_")
temp.val, _ = strconv.Atoi(split_line[0])
// Get isForward
if split_line[1] == "forward" {
temp.isForward = true
} else {
temp.isForward = false
}
// Get sequence
scanner.Scan()
temp.seq = scanner.Text()
out = append(out, temp)
}
return out
}
// Returns the reverse complement of a sequence of nucleotides
// Only works with 'A', 'C', 'G', 'T' chars
func reverseComplement(sequence string) (out string) {
for i := len(sequence)-1; i >= 0; i-- {
switch sequence[i] {
case 65:
out += "T"
break
case 84:
out += "A"
break
case 71:
out += "C"
break
case 67:
out += "G"
break
default:
fmt.Println("Error -- Encountered non-ATGC char in sequence")
}
}
return
}
// Return true if token is in array (int and int array respectively)
func contains(array []int, token int) (bool) {
for _, item := range array {
if item == token {
return true
}
}
return false
}
// Sorts str
func sortString(str string) (string) {
if len(str) <= 1 {
return str
}
s := strings.Split(str, "")
sort.Strings(s)
return strings.Join(s, "")
}
// Prints all relevent data in nice format
func printOutput(primers map[string]string) {
// Number of degens is the key, array of the names of sequences
// that have that number of degens is the value
forward := make(map[int][]string)
reverse := make(map[int][]string)
// Keeps track of the amount of degens found so that it can be sorted
var forwardDegens []int
var reverseDegens []int
// Name of sequence is the key, array of positions where degens occur
// within that sequence is value
positions := make(map[string][]int)
// Gather information about each primer, including the amount of degens
// and their positions
for key, val := range primers {
degens := 0
for i, char := range val {
if char != 'A' && char != 'C' && char != 'G' && char != 'T' {
degens++
positions[key] = append(positions[key], i)
}
}
data := strings.Split(key, "_")
if data[1] == "forward" {
forward[degens] = append(forward[degens], key)
if !(contains(forwardDegens, degens)){
forwardDegens = append(forwardDegens, degens)
}
} else {
reverse[degens] = append(reverse[degens], key)
if !(contains(reverseDegens, degens)){
reverseDegens = append(reverseDegens, degens)
}
}
}
sort.Ints(forwardDegens)
sort.Ints(reverseDegens)
// Print relevant data
fmt.Println("\nFORWARD SEQUENCES")
fmt.Println("Name\t\tPrimer\t\t\t# of degeneracies")
fmt.Println("---------------------------------------------------------")
for _, num := range forwardDegens {
for _, name := range forward[num] {
fmt.Println(name + "\t" + primers[name] + "\t" + strconv.Itoa(num))
var carrots []rune
for i := 0; i < len(primers[name]); i++ {
carrots = append(carrots, ' ')
}
for _, index := range positions[name] {
carrots[index] = '^'
}
fmt.Println("\t\t" + string(carrots))
fmt.Println()
}
}
fmt.Println("\nREVERSE SEQUENCES")
fmt.Println("Name\t\tPrimer\t\t\t# of degeneracies")
fmt.Println("---------------------------------------------------------")
for _, num := range reverseDegens {
for _, name := range reverse[num] {
fmt.Println(name + "\t" + primers[name] + "\t" + strconv.Itoa(num))
var carrots []rune
for i := 0; i < len(primers[name]); i++ {
carrots = append(carrots, ' ')
}
for _, index := range positions[name] {
carrots[index] = '^'
}
fmt.Println("\t\t" + string(carrots))
fmt.Println()
}
}
}
func main() {
seqPtr := flag.String("s", "", "[Required] Path to .seq file")
fastaPtr := flag.String("f", "", "[Required] Path to .fasta file")
ignorePtr := flag.Int("i", 0, "Number of SNPs to ignore before considering a position a degeneracy (default 0)")
flag.Parse()
primers := generatePrimers(*seqPtr, *fastaPtr, *ignorePtr)
printOutput(primers)
}
| {
line := scanner.Text()
// If the line begins with '>', assume it's a header
if line[0] == 62 {
out = append(out, temp)
temp = ""
// If the line doesn't begin with '>', assume it's a seuence of nucleotides
} else {
temp += line
}
} | conditional_block |
views.py | from django.shortcuts import render_to_response, redirect
from django.views.generic.detail import DetailView
from django.views.generic import TemplateView
from django.template.context import RequestContext
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.db.models import F, Count
from django.db.models.query_utils import Q
from datetime import datetime, timedelta
import re
import smtplib
from radiothon.forms import (PledgeForm, DonorForm, AddressForm,
CreditCardForm, HokiePassportForm)
from radiothon.models import (Pledge, Premium, BusinessManager,
CreditCard, HokiePassport, Donor,
Address, PremiumChoice, PremiumAttributeOption,
PremiumAttributeRelationship)
from radiothon.forms import premium_choice_form_factory
from radiothon.settings_local import EMAIL_HOST_USER, EMAIL_HOST_PASSWORD, EMAIL_HOST, EMAIL_PORT
class | (TemplateView):
template_name = "index.html"
class PledgeDetail(DetailView):
queryset = Pledge.objects.all()
template_name = 'pledge_detail.html'
@login_required(login_url='/radiothon/accounts/login')
def rthon_pledge(request):
pledge_form = PledgeForm(request.POST or None, prefix="pledge_form")
donor_form = DonorForm(request.POST or None, prefix="donor_form")
address_form = AddressForm(request.POST or None, prefix="address_form")
credit_form = CreditCardForm(request.POST or None, prefix="creditcard_form")
hokiepassport_form = HokiePassportForm(request.POST or None, prefix="hokiepassport_form")
errors = []
premium_choice_forms = []
if (request.POST):
pledge = None
if (pledge_form.is_valid()):
pledge = pledge_form.save(commit=False)
premiums_allowed = Premium.objects.filter(donation__lte=pledge_form.cleaned_data['amount'])
premium_choice_forms = create_premium_formsets(request, premiums_allowed)
if pledge_form.cleaned_data['payment'] == 'R':
# You can have a Credit card OR a Hokiepassport or Neither but NOT both
if credit_form.is_valid():
credit = CreditCard.objects.filter(number=credit_form.cleaned_data['number'])
credit = credit.filter(expiration=credit_form.cleaned_data['expiration'])
credit = credit.filter(type=credit_form.cleaned_data['type'])
if credit:
credit = credit[0]
else:
credit = credit_form.save()
pledge.credit = credit
else:
errors.append(credit_form.errors)
elif pledge_form.cleaned_data['payment'] == 'P':
if hokiepassport_form.is_valid():
hokiepassport = HokiePassport.objects.filter(number=hokiepassport_form.cleaned_data['number'])
if hokiepassport:
hokiepassport = hokiepassport[0]
else:
hokiepassport = hokiepassport_form.save()
pledge.hokiepassport = hokiepassport
else:
errors.append(hokiepassport_form.errors)
if (pledge.payment == 'R' or pledge.premium_delivery == 'M'):
if (address_form.is_valid()):
address = Address.objects.filter(address_line_1=address_form.cleaned_data['address_line_1'])
address = address.filter(address_line_2=address_form.cleaned_data['address_line_2'])
address = address.filter(city=address_form.cleaned_data['city'])
address = address.filter(state=address_form.cleaned_data['state'])
address = address.filter(zip=address_form.cleaned_data['zip'])
if address:
address = address[0]
else:
address = address_form.save()
donor_address = address
else:
errors.append(address_form.errors)
if (donor_form.is_valid()):
donor = Donor.objects.filter(name=donor_form.cleaned_data['name'])
donor = donor.filter(phone=donor_form.cleaned_data['phone'])
donor = donor.filter(email=donor_form.cleaned_data['email'])
donor = donor.filter(donation_list=donor_form.cleaned_data['donation_list'])
if ('donor_address' in locals()):
donor = donor.filter(address=donor_address)
if donor:
donor = donor[0]
else:
donor = donor_form.save(commit=False)
if ('donor_address' in locals()):
donor.address = donor_address
if not donor.phone and not donor.email:
errors.append('You must ask the donor for their email or their phone number')
else:
donor.save()
pledge.donor = donor
else:
errors.append(donor_form.errors)
if len(errors) == 0:
pledge.save()
if pledge.premium_delivery != 'N':
for form in premium_choice_forms:
# TODO: For some reason, even if fields are left blank,
# the premium form's is_valid remains true.
# GAH killin' me Django
if (form.is_valid() and 'premium' in form.cleaned_data.keys()): # form.fields['premium'].queryset[0]
if (form.cleaned_data['want'] is False):
continue
premium = form.cleaned_data['premium']
instance = PremiumChoice(premium=premium,
pledge=pledge)
#try:
instance.save()
#except IntegrityError:
#break
for value in form.cleaned_data.values():
if (type(value) is PremiumAttributeOption):
instance.options.add(value)
# Subtract one from the inventory of this object.
# When the count on the relationship is 0, donors will no longer
# be able to request an item with these attributes
# i.e. You run out of small red shirts. (they're dead, Jim)
# Retrieve the relationship object for this premiumchoice
#relationshipQuery = PremiumAttributeRelationship.objects.filter(premium=premium)
#for option in instance.options.all():
#relationshipQuery.filter(options=option)
# If the count is greater than 0, subtract one
#relationshipQuery.filter(count__gt=0).update(count=F('count')-1)
target_options = instance.options.all()
candidate_relationships = PremiumAttributeRelationship.objects.filter(premium=premium)
candidate_relationships = candidate_relationships.annotate(c=Count('options')).filter(c=len(target_options))
for option in target_options:
candidate_relationships = candidate_relationships.filter(options=option)
final_relationships = candidate_relationships
final_relationships.filter(count__gt=0).update(count=F('count')-1)
else:
if (len(form.errors) > 0):
errors.append(form.errors)
if len(errors) > 0:
PremiumChoice.objects.filter(pledge=pledge).delete()
if pledge.id is not None:
pledge.delete()
# If we've successfully parsed all the data
# email it to the business manager
if len(errors) == 0:
email_to_business_manager(pledge)
else:
errors.extend(['%s: %s' % (key, value) for key, value in dict(pledge_form.errors).items()])
if len(errors) == 0:
return redirect('/radiothon/pledge/%s' % str(pledge.pk))
else:
premium_choice_forms = create_premium_formsets(request)
return render_to_response('pledge_form.html', {
'errors': errors,
'pledge': pledge_form,
'donor': donor_form,
'address': address_form,
'credit': credit_form,
'hokiepassport': hokiepassport_form,
'premium_formsets': premium_choice_forms,
'sending_to': BusinessManager.objects.order_by('-terms__year', 'terms__semester')[0].email,
}, context_instance=RequestContext(request))
def create_premium_formsets(request, queryset=None):
premium_forms = []
post_data = request.POST if request is not None else None
if queryset is None:
queryset = Premium.objects.all()
# make sure there are some premium choice formsets in post
if (post_data is not None):
premium_choice_formset_post = [k for k in post_data.keys()
if 'premium_choice_formset' in k]
if (len(premium_choice_formset_post) == 0):
return []
for premium in list(queryset):
PremiumChoiceForm = premium_choice_form_factory(premium)
#FormsetClass = formset_factory(PremiumChoiceForm)
#premium_forms.append(FormsetClass(post_data, prefix='%s_premium_choice_formset' % premium.simple_name ))
premium_forms.append(PremiumChoiceForm(post_data, prefix='%s_premium_choice_formset' % premium.simple_name))
return premium_forms
def simple_send_email(sender, recipient, subject, message, server=EMAIL_HOST, port=EMAIL_PORT):
"""Sends an e-mail to the specified recipient."""
headers = ["From: " + sender,
"Subject: " + subject,
"To: " + recipient,
"MIME-Version: 1.0",
"Content-Type: text/plain"]
headers = "\r\n".join(headers)
session = smtplib.SMTP(server, port)
session.ehlo()
session.starttls()
session.ehlo()
session.login(EMAIL_HOST_USER, EMAIL_HOST_PASSWORD)
session.sendmail(sender, recipient, headers + "\r\n\r\n" + message)
session.close()
def email_to_business_manager(pledge):
subject = 'Radiothon Pledge System: %s' % pledge.donor.name
message = pledge.as_email()
sender = 'WUVT.IT@gmail.com'
current_bm = BusinessManager.objects.order_by('-terms__year', 'terms__semester')[0]
simple_send_email(sender, current_bm.email, subject, message)
def rthon_plain_logs(request, timespan):
ip = get_client_ip(request)
#if (ip != '192.168.0.59'): # should probably de-hardcode this
# return HttpResponse('Error, not authorized.', content_type="text/plain")
response = HttpResponse(content_type="text/plain")
pledges = Pledge.objects.all()
datefilter = Q()
if (timespan == 'hourly'):
time_threshold = datetime.now() - timedelta(hours=1)
datefilter = Q(created__gt=time_threshold)
elif (timespan == 'daily'):
time_threshold = datetime.now() - timedelta(days=1)
datefilter = Q(created__gt=time_threshold)
else:
matches = re.search('^(\d{4})\-(\d{2})\-(\d{2})\s?(?:(\d{2}):(\d{2}))?$', timespan).groups()
matches = [int(match) for match in matches if match is not None]
if len(matches) == 3 or len(matches) == 5:
response.write(matches)
try:
matchdate = datetime(*matches)
except ValueError:
#response.write('Error in request')
return response
response.write(matchdate)
delta = timedelta(days=1) if len(matches) is 3 else timedelta(hours=1)
#response.write(matchdate + delta)
#response.write('\n')
datefilter = Q(created__gte=matchdate, created__lte=matchdate + delta)
else:
#response.write('Error in request.')
return response
pledges = pledges.filter(datefilter)
for pledge in pledges:
response.write(pledge.as_email())
return response
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def get_object_or_none(model, **kwargs):
try:
return model.objects.get(**kwargs)
except model.DoesNotExist:
return None
| MainView | identifier_name |
views.py | from django.shortcuts import render_to_response, redirect
from django.views.generic.detail import DetailView
from django.views.generic import TemplateView
from django.template.context import RequestContext
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.db.models import F, Count
from django.db.models.query_utils import Q
from datetime import datetime, timedelta
import re
import smtplib
from radiothon.forms import (PledgeForm, DonorForm, AddressForm,
CreditCardForm, HokiePassportForm)
from radiothon.models import (Pledge, Premium, BusinessManager,
CreditCard, HokiePassport, Donor,
Address, PremiumChoice, PremiumAttributeOption,
PremiumAttributeRelationship)
from radiothon.forms import premium_choice_form_factory
from radiothon.settings_local import EMAIL_HOST_USER, EMAIL_HOST_PASSWORD, EMAIL_HOST, EMAIL_PORT
class MainView(TemplateView):
template_name = "index.html"
class PledgeDetail(DetailView):
queryset = Pledge.objects.all()
template_name = 'pledge_detail.html'
@login_required(login_url='/radiothon/accounts/login')
def rthon_pledge(request):
pledge_form = PledgeForm(request.POST or None, prefix="pledge_form")
donor_form = DonorForm(request.POST or None, prefix="donor_form")
address_form = AddressForm(request.POST or None, prefix="address_form")
credit_form = CreditCardForm(request.POST or None, prefix="creditcard_form")
hokiepassport_form = HokiePassportForm(request.POST or None, prefix="hokiepassport_form")
errors = []
premium_choice_forms = []
if (request.POST):
pledge = None
if (pledge_form.is_valid()):
pledge = pledge_form.save(commit=False)
premiums_allowed = Premium.objects.filter(donation__lte=pledge_form.cleaned_data['amount'])
premium_choice_forms = create_premium_formsets(request, premiums_allowed)
if pledge_form.cleaned_data['payment'] == 'R':
# You can have a Credit card OR a Hokiepassport or Neither but NOT both
if credit_form.is_valid():
credit = CreditCard.objects.filter(number=credit_form.cleaned_data['number'])
credit = credit.filter(expiration=credit_form.cleaned_data['expiration'])
credit = credit.filter(type=credit_form.cleaned_data['type'])
if credit:
credit = credit[0]
else:
credit = credit_form.save()
pledge.credit = credit
else:
errors.append(credit_form.errors)
elif pledge_form.cleaned_data['payment'] == 'P':
if hokiepassport_form.is_valid():
hokiepassport = HokiePassport.objects.filter(number=hokiepassport_form.cleaned_data['number'])
if hokiepassport:
hokiepassport = hokiepassport[0]
else:
hokiepassport = hokiepassport_form.save()
pledge.hokiepassport = hokiepassport
else:
errors.append(hokiepassport_form.errors)
if (pledge.payment == 'R' or pledge.premium_delivery == 'M'):
if (address_form.is_valid()):
address = Address.objects.filter(address_line_1=address_form.cleaned_data['address_line_1'])
address = address.filter(address_line_2=address_form.cleaned_data['address_line_2'])
address = address.filter(city=address_form.cleaned_data['city'])
address = address.filter(state=address_form.cleaned_data['state'])
address = address.filter(zip=address_form.cleaned_data['zip'])
if address:
address = address[0]
else:
address = address_form.save()
donor_address = address
else:
errors.append(address_form.errors)
if (donor_form.is_valid()):
donor = Donor.objects.filter(name=donor_form.cleaned_data['name'])
donor = donor.filter(phone=donor_form.cleaned_data['phone'])
donor = donor.filter(email=donor_form.cleaned_data['email'])
donor = donor.filter(donation_list=donor_form.cleaned_data['donation_list'])
if ('donor_address' in locals()):
donor = donor.filter(address=donor_address)
if donor:
donor = donor[0]
else:
donor = donor_form.save(commit=False)
if ('donor_address' in locals()):
donor.address = donor_address
if not donor.phone and not donor.email:
errors.append('You must ask the donor for their email or their phone number')
else:
donor.save()
pledge.donor = donor
else:
errors.append(donor_form.errors)
if len(errors) == 0:
pledge.save()
if pledge.premium_delivery != 'N':
for form in premium_choice_forms:
# TODO: For some reason, even if fields are left blank,
# the premium form's is_valid remains true.
# GAH killin' me Django
if (form.is_valid() and 'premium' in form.cleaned_data.keys()): # form.fields['premium'].queryset[0]
if (form.cleaned_data['want'] is False):
continue
premium = form.cleaned_data['premium']
instance = PremiumChoice(premium=premium,
pledge=pledge)
#try:
instance.save()
#except IntegrityError:
#break
for value in form.cleaned_data.values():
if (type(value) is PremiumAttributeOption):
instance.options.add(value)
# Subtract one from the inventory of this object.
# When the count on the relationship is 0, donors will no longer
# be able to request an item with these attributes
# i.e. You run out of small red shirts. (they're dead, Jim)
# Retrieve the relationship object for this premiumchoice
#relationshipQuery = PremiumAttributeRelationship.objects.filter(premium=premium)
#for option in instance.options.all():
#relationshipQuery.filter(options=option)
# If the count is greater than 0, subtract one
#relationshipQuery.filter(count__gt=0).update(count=F('count')-1)
target_options = instance.options.all()
candidate_relationships = PremiumAttributeRelationship.objects.filter(premium=premium)
candidate_relationships = candidate_relationships.annotate(c=Count('options')).filter(c=len(target_options))
for option in target_options:
candidate_relationships = candidate_relationships.filter(options=option)
final_relationships = candidate_relationships
final_relationships.filter(count__gt=0).update(count=F('count')-1)
else:
if (len(form.errors) > 0):
errors.append(form.errors)
if len(errors) > 0:
PremiumChoice.objects.filter(pledge=pledge).delete()
if pledge.id is not None:
pledge.delete()
# If we've successfully parsed all the data
# email it to the business manager
if len(errors) == 0:
email_to_business_manager(pledge)
else:
errors.extend(['%s: %s' % (key, value) for key, value in dict(pledge_form.errors).items()])
if len(errors) == 0:
return redirect('/radiothon/pledge/%s' % str(pledge.pk))
else:
premium_choice_forms = create_premium_formsets(request)
return render_to_response('pledge_form.html', {
'errors': errors,
'pledge': pledge_form,
'donor': donor_form,
'address': address_form,
'credit': credit_form,
'hokiepassport': hokiepassport_form,
'premium_formsets': premium_choice_forms,
'sending_to': BusinessManager.objects.order_by('-terms__year', 'terms__semester')[0].email,
}, context_instance=RequestContext(request))
def create_premium_formsets(request, queryset=None):
premium_forms = []
post_data = request.POST if request is not None else None
if queryset is None:
queryset = Premium.objects.all()
# make sure there are some premium choice formsets in post
if (post_data is not None):
premium_choice_formset_post = [k for k in post_data.keys()
if 'premium_choice_formset' in k]
if (len(premium_choice_formset_post) == 0):
return []
for premium in list(queryset):
PremiumChoiceForm = premium_choice_form_factory(premium)
#FormsetClass = formset_factory(PremiumChoiceForm)
#premium_forms.append(FormsetClass(post_data, prefix='%s_premium_choice_formset' % premium.simple_name ))
premium_forms.append(PremiumChoiceForm(post_data, prefix='%s_premium_choice_formset' % premium.simple_name))
return premium_forms
def simple_send_email(sender, recipient, subject, message, server=EMAIL_HOST, port=EMAIL_PORT):
"""Sends an e-mail to the specified recipient."""
headers = ["From: " + sender,
"Subject: " + subject,
"To: " + recipient,
"MIME-Version: 1.0",
"Content-Type: text/plain"]
headers = "\r\n".join(headers)
session = smtplib.SMTP(server, port)
session.ehlo()
session.starttls()
session.ehlo()
session.login(EMAIL_HOST_USER, EMAIL_HOST_PASSWORD)
session.sendmail(sender, recipient, headers + "\r\n\r\n" + message)
session.close()
def email_to_business_manager(pledge):
|
def rthon_plain_logs(request, timespan):
ip = get_client_ip(request)
#if (ip != '192.168.0.59'): # should probably de-hardcode this
# return HttpResponse('Error, not authorized.', content_type="text/plain")
response = HttpResponse(content_type="text/plain")
pledges = Pledge.objects.all()
datefilter = Q()
if (timespan == 'hourly'):
time_threshold = datetime.now() - timedelta(hours=1)
datefilter = Q(created__gt=time_threshold)
elif (timespan == 'daily'):
time_threshold = datetime.now() - timedelta(days=1)
datefilter = Q(created__gt=time_threshold)
else:
matches = re.search('^(\d{4})\-(\d{2})\-(\d{2})\s?(?:(\d{2}):(\d{2}))?$', timespan).groups()
matches = [int(match) for match in matches if match is not None]
if len(matches) == 3 or len(matches) == 5:
response.write(matches)
try:
matchdate = datetime(*matches)
except ValueError:
#response.write('Error in request')
return response
response.write(matchdate)
delta = timedelta(days=1) if len(matches) is 3 else timedelta(hours=1)
#response.write(matchdate + delta)
#response.write('\n')
datefilter = Q(created__gte=matchdate, created__lte=matchdate + delta)
else:
#response.write('Error in request.')
return response
pledges = pledges.filter(datefilter)
for pledge in pledges:
response.write(pledge.as_email())
return response
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def get_object_or_none(model, **kwargs):
try:
return model.objects.get(**kwargs)
except model.DoesNotExist:
return None
| subject = 'Radiothon Pledge System: %s' % pledge.donor.name
message = pledge.as_email()
sender = 'WUVT.IT@gmail.com'
current_bm = BusinessManager.objects.order_by('-terms__year', 'terms__semester')[0]
simple_send_email(sender, current_bm.email, subject, message) | identifier_body |
views.py | from django.shortcuts import render_to_response, redirect
from django.views.generic.detail import DetailView
from django.views.generic import TemplateView
from django.template.context import RequestContext
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.db.models import F, Count
from django.db.models.query_utils import Q
from datetime import datetime, timedelta
import re
import smtplib
from radiothon.forms import (PledgeForm, DonorForm, AddressForm,
CreditCardForm, HokiePassportForm)
from radiothon.models import (Pledge, Premium, BusinessManager,
CreditCard, HokiePassport, Donor,
Address, PremiumChoice, PremiumAttributeOption,
PremiumAttributeRelationship)
from radiothon.forms import premium_choice_form_factory
from radiothon.settings_local import EMAIL_HOST_USER, EMAIL_HOST_PASSWORD, EMAIL_HOST, EMAIL_PORT
class MainView(TemplateView):
template_name = "index.html"
class PledgeDetail(DetailView):
queryset = Pledge.objects.all()
template_name = 'pledge_detail.html'
@login_required(login_url='/radiothon/accounts/login')
def rthon_pledge(request):
pledge_form = PledgeForm(request.POST or None, prefix="pledge_form")
donor_form = DonorForm(request.POST or None, prefix="donor_form")
address_form = AddressForm(request.POST or None, prefix="address_form")
credit_form = CreditCardForm(request.POST or None, prefix="creditcard_form")
hokiepassport_form = HokiePassportForm(request.POST or None, prefix="hokiepassport_form")
errors = []
premium_choice_forms = []
if (request.POST):
pledge = None
if (pledge_form.is_valid()):
pledge = pledge_form.save(commit=False)
premiums_allowed = Premium.objects.filter(donation__lte=pledge_form.cleaned_data['amount'])
premium_choice_forms = create_premium_formsets(request, premiums_allowed)
if pledge_form.cleaned_data['payment'] == 'R':
# You can have a Credit card OR a Hokiepassport or Neither but NOT both
if credit_form.is_valid():
credit = CreditCard.objects.filter(number=credit_form.cleaned_data['number'])
credit = credit.filter(expiration=credit_form.cleaned_data['expiration'])
credit = credit.filter(type=credit_form.cleaned_data['type'])
if credit:
credit = credit[0]
else:
credit = credit_form.save()
pledge.credit = credit
else:
errors.append(credit_form.errors)
elif pledge_form.cleaned_data['payment'] == 'P':
if hokiepassport_form.is_valid():
hokiepassport = HokiePassport.objects.filter(number=hokiepassport_form.cleaned_data['number'])
if hokiepassport:
hokiepassport = hokiepassport[0]
else:
hokiepassport = hokiepassport_form.save()
pledge.hokiepassport = hokiepassport
else:
errors.append(hokiepassport_form.errors)
if (pledge.payment == 'R' or pledge.premium_delivery == 'M'):
if (address_form.is_valid()):
address = Address.objects.filter(address_line_1=address_form.cleaned_data['address_line_1'])
address = address.filter(address_line_2=address_form.cleaned_data['address_line_2'])
address = address.filter(city=address_form.cleaned_data['city'])
address = address.filter(state=address_form.cleaned_data['state'])
address = address.filter(zip=address_form.cleaned_data['zip'])
if address:
address = address[0]
else:
address = address_form.save()
donor_address = address
else:
errors.append(address_form.errors)
if (donor_form.is_valid()):
donor = Donor.objects.filter(name=donor_form.cleaned_data['name'])
donor = donor.filter(phone=donor_form.cleaned_data['phone'])
donor = donor.filter(email=donor_form.cleaned_data['email'])
donor = donor.filter(donation_list=donor_form.cleaned_data['donation_list'])
if ('donor_address' in locals()):
donor = donor.filter(address=donor_address)
if donor:
donor = donor[0]
else:
donor = donor_form.save(commit=False)
if ('donor_address' in locals()):
donor.address = donor_address
if not donor.phone and not donor.email:
errors.append('You must ask the donor for their email or their phone number')
else:
donor.save()
pledge.donor = donor
else:
errors.append(donor_form.errors)
if len(errors) == 0:
pledge.save()
if pledge.premium_delivery != 'N':
for form in premium_choice_forms:
# TODO: For some reason, even if fields are left blank,
# the premium form's is_valid remains true.
# GAH killin' me Django
if (form.is_valid() and 'premium' in form.cleaned_data.keys()): # form.fields['premium'].queryset[0]
if (form.cleaned_data['want'] is False):
continue
premium = form.cleaned_data['premium']
instance = PremiumChoice(premium=premium,
pledge=pledge)
#try:
instance.save()
#except IntegrityError:
#break
for value in form.cleaned_data.values():
if (type(value) is PremiumAttributeOption):
instance.options.add(value)
# Subtract one from the inventory of this object.
# When the count on the relationship is 0, donors will no longer
# be able to request an item with these attributes
# i.e. You run out of small red shirts. (they're dead, Jim)
# Retrieve the relationship object for this premiumchoice
#relationshipQuery = PremiumAttributeRelationship.objects.filter(premium=premium)
#for option in instance.options.all():
#relationshipQuery.filter(options=option)
# If the count is greater than 0, subtract one
#relationshipQuery.filter(count__gt=0).update(count=F('count')-1)
target_options = instance.options.all()
candidate_relationships = PremiumAttributeRelationship.objects.filter(premium=premium)
candidate_relationships = candidate_relationships.annotate(c=Count('options')).filter(c=len(target_options))
for option in target_options:
candidate_relationships = candidate_relationships.filter(options=option)
final_relationships = candidate_relationships
final_relationships.filter(count__gt=0).update(count=F('count')-1)
else:
if (len(form.errors) > 0):
errors.append(form.errors)
if len(errors) > 0:
PremiumChoice.objects.filter(pledge=pledge).delete()
if pledge.id is not None:
pledge.delete()
# If we've successfully parsed all the data
# email it to the business manager
if len(errors) == 0:
email_to_business_manager(pledge)
else:
errors.extend(['%s: %s' % (key, value) for key, value in dict(pledge_form.errors).items()])
if len(errors) == 0:
return redirect('/radiothon/pledge/%s' % str(pledge.pk))
else:
premium_choice_forms = create_premium_formsets(request)
return render_to_response('pledge_form.html', { | 'errors': errors,
'pledge': pledge_form,
'donor': donor_form,
'address': address_form,
'credit': credit_form,
'hokiepassport': hokiepassport_form,
'premium_formsets': premium_choice_forms,
'sending_to': BusinessManager.objects.order_by('-terms__year', 'terms__semester')[0].email,
}, context_instance=RequestContext(request))
def create_premium_formsets(request, queryset=None):
premium_forms = []
post_data = request.POST if request is not None else None
if queryset is None:
queryset = Premium.objects.all()
# make sure there are some premium choice formsets in post
if (post_data is not None):
premium_choice_formset_post = [k for k in post_data.keys()
if 'premium_choice_formset' in k]
if (len(premium_choice_formset_post) == 0):
return []
for premium in list(queryset):
PremiumChoiceForm = premium_choice_form_factory(premium)
#FormsetClass = formset_factory(PremiumChoiceForm)
#premium_forms.append(FormsetClass(post_data, prefix='%s_premium_choice_formset' % premium.simple_name ))
premium_forms.append(PremiumChoiceForm(post_data, prefix='%s_premium_choice_formset' % premium.simple_name))
return premium_forms
def simple_send_email(sender, recipient, subject, message, server=EMAIL_HOST, port=EMAIL_PORT):
"""Sends an e-mail to the specified recipient."""
headers = ["From: " + sender,
"Subject: " + subject,
"To: " + recipient,
"MIME-Version: 1.0",
"Content-Type: text/plain"]
headers = "\r\n".join(headers)
session = smtplib.SMTP(server, port)
session.ehlo()
session.starttls()
session.ehlo()
session.login(EMAIL_HOST_USER, EMAIL_HOST_PASSWORD)
session.sendmail(sender, recipient, headers + "\r\n\r\n" + message)
session.close()
def email_to_business_manager(pledge):
subject = 'Radiothon Pledge System: %s' % pledge.donor.name
message = pledge.as_email()
sender = 'WUVT.IT@gmail.com'
current_bm = BusinessManager.objects.order_by('-terms__year', 'terms__semester')[0]
simple_send_email(sender, current_bm.email, subject, message)
def rthon_plain_logs(request, timespan):
ip = get_client_ip(request)
#if (ip != '192.168.0.59'): # should probably de-hardcode this
# return HttpResponse('Error, not authorized.', content_type="text/plain")
response = HttpResponse(content_type="text/plain")
pledges = Pledge.objects.all()
datefilter = Q()
if (timespan == 'hourly'):
time_threshold = datetime.now() - timedelta(hours=1)
datefilter = Q(created__gt=time_threshold)
elif (timespan == 'daily'):
time_threshold = datetime.now() - timedelta(days=1)
datefilter = Q(created__gt=time_threshold)
else:
matches = re.search('^(\d{4})\-(\d{2})\-(\d{2})\s?(?:(\d{2}):(\d{2}))?$', timespan).groups()
matches = [int(match) for match in matches if match is not None]
if len(matches) == 3 or len(matches) == 5:
response.write(matches)
try:
matchdate = datetime(*matches)
except ValueError:
#response.write('Error in request')
return response
response.write(matchdate)
delta = timedelta(days=1) if len(matches) is 3 else timedelta(hours=1)
#response.write(matchdate + delta)
#response.write('\n')
datefilter = Q(created__gte=matchdate, created__lte=matchdate + delta)
else:
#response.write('Error in request.')
return response
pledges = pledges.filter(datefilter)
for pledge in pledges:
response.write(pledge.as_email())
return response
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def get_object_or_none(model, **kwargs):
try:
return model.objects.get(**kwargs)
except model.DoesNotExist:
return None | random_line_split | |
views.py | from django.shortcuts import render_to_response, redirect
from django.views.generic.detail import DetailView
from django.views.generic import TemplateView
from django.template.context import RequestContext
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.db.models import F, Count
from django.db.models.query_utils import Q
from datetime import datetime, timedelta
import re
import smtplib
from radiothon.forms import (PledgeForm, DonorForm, AddressForm,
CreditCardForm, HokiePassportForm)
from radiothon.models import (Pledge, Premium, BusinessManager,
CreditCard, HokiePassport, Donor,
Address, PremiumChoice, PremiumAttributeOption,
PremiumAttributeRelationship)
from radiothon.forms import premium_choice_form_factory
from radiothon.settings_local import EMAIL_HOST_USER, EMAIL_HOST_PASSWORD, EMAIL_HOST, EMAIL_PORT
class MainView(TemplateView):
template_name = "index.html"
class PledgeDetail(DetailView):
queryset = Pledge.objects.all()
template_name = 'pledge_detail.html'
@login_required(login_url='/radiothon/accounts/login')
def rthon_pledge(request):
pledge_form = PledgeForm(request.POST or None, prefix="pledge_form")
donor_form = DonorForm(request.POST or None, prefix="donor_form")
address_form = AddressForm(request.POST or None, prefix="address_form")
credit_form = CreditCardForm(request.POST or None, prefix="creditcard_form")
hokiepassport_form = HokiePassportForm(request.POST or None, prefix="hokiepassport_form")
errors = []
premium_choice_forms = []
if (request.POST):
pledge = None
if (pledge_form.is_valid()):
pledge = pledge_form.save(commit=False)
premiums_allowed = Premium.objects.filter(donation__lte=pledge_form.cleaned_data['amount'])
premium_choice_forms = create_premium_formsets(request, premiums_allowed)
if pledge_form.cleaned_data['payment'] == 'R':
# You can have a Credit card OR a Hokiepassport or Neither but NOT both
if credit_form.is_valid():
credit = CreditCard.objects.filter(number=credit_form.cleaned_data['number'])
credit = credit.filter(expiration=credit_form.cleaned_data['expiration'])
credit = credit.filter(type=credit_form.cleaned_data['type'])
if credit:
credit = credit[0]
else:
credit = credit_form.save()
pledge.credit = credit
else:
errors.append(credit_form.errors)
elif pledge_form.cleaned_data['payment'] == 'P':
if hokiepassport_form.is_valid():
hokiepassport = HokiePassport.objects.filter(number=hokiepassport_form.cleaned_data['number'])
if hokiepassport:
hokiepassport = hokiepassport[0]
else:
hokiepassport = hokiepassport_form.save()
pledge.hokiepassport = hokiepassport
else:
errors.append(hokiepassport_form.errors)
if (pledge.payment == 'R' or pledge.premium_delivery == 'M'):
if (address_form.is_valid()):
address = Address.objects.filter(address_line_1=address_form.cleaned_data['address_line_1'])
address = address.filter(address_line_2=address_form.cleaned_data['address_line_2'])
address = address.filter(city=address_form.cleaned_data['city'])
address = address.filter(state=address_form.cleaned_data['state'])
address = address.filter(zip=address_form.cleaned_data['zip'])
if address:
address = address[0]
else:
address = address_form.save()
donor_address = address
else:
errors.append(address_form.errors)
if (donor_form.is_valid()):
donor = Donor.objects.filter(name=donor_form.cleaned_data['name'])
donor = donor.filter(phone=donor_form.cleaned_data['phone'])
donor = donor.filter(email=donor_form.cleaned_data['email'])
donor = donor.filter(donation_list=donor_form.cleaned_data['donation_list'])
if ('donor_address' in locals()):
donor = donor.filter(address=donor_address)
if donor:
donor = donor[0]
else:
donor = donor_form.save(commit=False)
if ('donor_address' in locals()):
donor.address = donor_address
if not donor.phone and not donor.email:
|
else:
donor.save()
pledge.donor = donor
else:
errors.append(donor_form.errors)
if len(errors) == 0:
pledge.save()
if pledge.premium_delivery != 'N':
for form in premium_choice_forms:
# TODO: For some reason, even if fields are left blank,
# the premium form's is_valid remains true.
# GAH killin' me Django
if (form.is_valid() and 'premium' in form.cleaned_data.keys()): # form.fields['premium'].queryset[0]
if (form.cleaned_data['want'] is False):
continue
premium = form.cleaned_data['premium']
instance = PremiumChoice(premium=premium,
pledge=pledge)
#try:
instance.save()
#except IntegrityError:
#break
for value in form.cleaned_data.values():
if (type(value) is PremiumAttributeOption):
instance.options.add(value)
# Subtract one from the inventory of this object.
# When the count on the relationship is 0, donors will no longer
# be able to request an item with these attributes
# i.e. You run out of small red shirts. (they're dead, Jim)
# Retrieve the relationship object for this premiumchoice
#relationshipQuery = PremiumAttributeRelationship.objects.filter(premium=premium)
#for option in instance.options.all():
#relationshipQuery.filter(options=option)
# If the count is greater than 0, subtract one
#relationshipQuery.filter(count__gt=0).update(count=F('count')-1)
target_options = instance.options.all()
candidate_relationships = PremiumAttributeRelationship.objects.filter(premium=premium)
candidate_relationships = candidate_relationships.annotate(c=Count('options')).filter(c=len(target_options))
for option in target_options:
candidate_relationships = candidate_relationships.filter(options=option)
final_relationships = candidate_relationships
final_relationships.filter(count__gt=0).update(count=F('count')-1)
else:
if (len(form.errors) > 0):
errors.append(form.errors)
if len(errors) > 0:
PremiumChoice.objects.filter(pledge=pledge).delete()
if pledge.id is not None:
pledge.delete()
# If we've successfully parsed all the data
# email it to the business manager
if len(errors) == 0:
email_to_business_manager(pledge)
else:
errors.extend(['%s: %s' % (key, value) for key, value in dict(pledge_form.errors).items()])
if len(errors) == 0:
return redirect('/radiothon/pledge/%s' % str(pledge.pk))
else:
premium_choice_forms = create_premium_formsets(request)
return render_to_response('pledge_form.html', {
'errors': errors,
'pledge': pledge_form,
'donor': donor_form,
'address': address_form,
'credit': credit_form,
'hokiepassport': hokiepassport_form,
'premium_formsets': premium_choice_forms,
'sending_to': BusinessManager.objects.order_by('-terms__year', 'terms__semester')[0].email,
}, context_instance=RequestContext(request))
def create_premium_formsets(request, queryset=None):
premium_forms = []
post_data = request.POST if request is not None else None
if queryset is None:
queryset = Premium.objects.all()
# make sure there are some premium choice formsets in post
if (post_data is not None):
premium_choice_formset_post = [k for k in post_data.keys()
if 'premium_choice_formset' in k]
if (len(premium_choice_formset_post) == 0):
return []
for premium in list(queryset):
PremiumChoiceForm = premium_choice_form_factory(premium)
#FormsetClass = formset_factory(PremiumChoiceForm)
#premium_forms.append(FormsetClass(post_data, prefix='%s_premium_choice_formset' % premium.simple_name ))
premium_forms.append(PremiumChoiceForm(post_data, prefix='%s_premium_choice_formset' % premium.simple_name))
return premium_forms
def simple_send_email(sender, recipient, subject, message, server=EMAIL_HOST, port=EMAIL_PORT):
"""Sends an e-mail to the specified recipient."""
headers = ["From: " + sender,
"Subject: " + subject,
"To: " + recipient,
"MIME-Version: 1.0",
"Content-Type: text/plain"]
headers = "\r\n".join(headers)
session = smtplib.SMTP(server, port)
session.ehlo()
session.starttls()
session.ehlo()
session.login(EMAIL_HOST_USER, EMAIL_HOST_PASSWORD)
session.sendmail(sender, recipient, headers + "\r\n\r\n" + message)
session.close()
def email_to_business_manager(pledge):
subject = 'Radiothon Pledge System: %s' % pledge.donor.name
message = pledge.as_email()
sender = 'WUVT.IT@gmail.com'
current_bm = BusinessManager.objects.order_by('-terms__year', 'terms__semester')[0]
simple_send_email(sender, current_bm.email, subject, message)
def rthon_plain_logs(request, timespan):
ip = get_client_ip(request)
#if (ip != '192.168.0.59'): # should probably de-hardcode this
# return HttpResponse('Error, not authorized.', content_type="text/plain")
response = HttpResponse(content_type="text/plain")
pledges = Pledge.objects.all()
datefilter = Q()
if (timespan == 'hourly'):
time_threshold = datetime.now() - timedelta(hours=1)
datefilter = Q(created__gt=time_threshold)
elif (timespan == 'daily'):
time_threshold = datetime.now() - timedelta(days=1)
datefilter = Q(created__gt=time_threshold)
else:
matches = re.search('^(\d{4})\-(\d{2})\-(\d{2})\s?(?:(\d{2}):(\d{2}))?$', timespan).groups()
matches = [int(match) for match in matches if match is not None]
if len(matches) == 3 or len(matches) == 5:
response.write(matches)
try:
matchdate = datetime(*matches)
except ValueError:
#response.write('Error in request')
return response
response.write(matchdate)
delta = timedelta(days=1) if len(matches) is 3 else timedelta(hours=1)
#response.write(matchdate + delta)
#response.write('\n')
datefilter = Q(created__gte=matchdate, created__lte=matchdate + delta)
else:
#response.write('Error in request.')
return response
pledges = pledges.filter(datefilter)
for pledge in pledges:
response.write(pledge.as_email())
return response
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def get_object_or_none(model, **kwargs):
try:
return model.objects.get(**kwargs)
except model.DoesNotExist:
return None
| errors.append('You must ask the donor for their email or their phone number') | conditional_block |
analysiscore.go | package webconnectivitylte
import (
"fmt"
"net"
"net/url"
"github.com/ooni/probe-engine/pkg/model"
"github.com/ooni/probe-engine/pkg/netxlite"
)
//
// Core analysis
//
// These flags determine the context of TestKeys.Blocking. However, while .Blocking
// is an enumeration, these flags allow to describe multiple blocking methods.
const (
// analysisFlagDNSBlocking indicates there's blocking at the DNS level.
analysisFlagDNSBlocking = 1 << iota
// analysisFlagTCPIPBlocking indicates there's blocking at the TCP/IP level.
analysisFlagTCPIPBlocking
// analysisFlagTLSBlocking indicates there were TLS issues.
analysisFlagTLSBlocking
// analysisFlagHTTPBlocking indicates there was an HTTP failure.
analysisFlagHTTPBlocking
// analysisFlagHTTPDiff indicates there's an HTTP diff.
analysisFlagHTTPDiff
// analysisFlagSuccess indicates we did not detect any blocking.
analysisFlagSuccess
)
// analysisToplevel is the toplevel function that analyses the results
// of the experiment once all network tasks have completed.
//
// The ultimate objective of this function is to set the toplevel flags
// used by the backend to score results. These flags are:
//
// - blocking (and x_blocking_flags) which contain information about
// the detected blocking method (or methods);
//
// - accessible which contains information on whether we think we
// could access the resource somehow.
//
// Originally, Web Connectivity only had a blocking scalar value so
// we could see ourselves in one of the following cases:
//
// +----------+------------+--------------------------+
// | Blocking | Accessible | Meaning |
// +----------+------------+--------------------------+
// | null | null | Probe analysis error |
// +----------+------------+--------------------------+
// | false | true | We detected no blocking |
// +----------+------------+--------------------------+
// | "..." | false | We detected blocking |
// +----------+------------+--------------------------+
//
// While it would be possible in this implementation, which has a granular
// definition of blocking (x_blocking_flags), to set accessible to mean
// whether we could access the resource in some conditions, it seems quite
// dangerous to deviate from the original behavior.
//
// Our code will NEVER set .Blocking or .Accessible outside of this function
// and we'll instead rely on XBlockingFlags. This function's job is to call
// other functions that compute the .XBlockingFlags and then to assign the value
// of .Blocking and .Accessible from the .XBlockingFlags value.
//
// Accordingly, this is how we map the value of the .XBlockingFlags to the
// values of .Blocking and .Accessible:
//
// +--------------------------------------+----------------+-------------+
// | .BlockingFlags | .Blocking | .Accessible |
// +--------------------------------------+----------------+-------------+
// | (& DNSBlocking) != 0 | "dns" | false |
// +--------------------------------------+----------------+-------------+
// | (& TCPIPBlocking) != 0 | "tcp_ip" | false |
// +--------------------------------------+----------------+-------------+
// | (& (TLSBlocking|HTTPBlocking)) != 0 | "http-failure" | false |
// +--------------------------------------+----------------+-------------+
// | (& HTTPDiff) != 0 | "http-diff" | false |
// +--------------------------------------+----------------+-------------+
// | == FlagSuccess | false | true |
// +--------------------------------------+----------------+-------------+
// | otherwise | null | null |
// +--------------------------------------+----------------+-------------+
//
// It's a very simple rule, that should preserve previous semantics.
//
// As an improvement over Web Connectivity v0.4, we also attempt to identify
// special subcases of a null, null result to provide the user with more information.
func (tk *TestKeys) analysisToplevel(logger model.Logger) {
// Since we run after all tasks have completed (or so we assume) we're
// not going to use any form of locking here.
// these functions compute the value of XBlockingFlags
tk.analysisDNSToplevel(logger)
tk.analysisTCPIPToplevel(logger)
tk.analysisTLSToplevel(logger)
tk.analysisHTTPToplevel(logger)
// now, let's determine .Accessible and .Blocking
switch {
case (tk.BlockingFlags & analysisFlagDNSBlocking) != 0:
tk.Blocking = "dns"
tk.Accessible = false
logger.Warnf(
"ANOMALY: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
case (tk.BlockingFlags & analysisFlagTCPIPBlocking) != 0:
tk.Blocking = "tcp_ip"
tk.Accessible = false
logger.Warnf(
"ANOMALY: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
case (tk.BlockingFlags & (analysisFlagTLSBlocking | analysisFlagHTTPBlocking)) != 0:
tk.Blocking = "http-failure"
tk.Accessible = false
logger.Warnf("ANOMALY: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
case (tk.BlockingFlags & analysisFlagHTTPDiff) != 0:
tk.Blocking = "http-diff"
tk.Accessible = false
logger.Warnf(
"ANOMALY: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
case tk.BlockingFlags == analysisFlagSuccess:
tk.Blocking = false
tk.Accessible = true
logger.Infof(
"ACCESSIBLE: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
default:
// NullNull remediation
//
// If we arrive here, the measurement has failed. However, there are a
// bunch of cases where we can still explain what happened by applying specific
// algorithms to detect edge cases.
//
// The relative order of these algorithsm matters: swapping them without
// careful consideration may produce unexpected results.
if tk.analysisNullNullDetectTHDNSNXDOMAIN(logger) {
tk.Blocking = "dns"
tk.Accessible = false
logger.Warnf(
"RESIDUAL_DNS_BLOCKING: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
return
}
if tk.analysisNullNullDetectNoAddrs(logger) {
tk.Blocking = false
tk.Accessible = false
logger.Infof(
"WEBSITE_DOWN_DNS: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
return
}
if tk.analysisNullNullDetectAllConnectsFailed(logger) {
tk.Blocking = false
tk.Accessible = false
logger.Infof(
"WEBSITE_DOWN_TCP: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
return
}
if tk.analysisNullNullDetectTLSMisconfigured(logger) {
tk.Blocking = false
tk.Accessible = false
logger.Infof(
"WEBSITE_DOWN_TLS: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
return
}
if tk.analysisNullNullDetectSuccessfulHTTPS(logger) {
tk.Blocking = false
tk.Accessible = true
logger.Infof(
"ACCESSIBLE_HTTPS: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
return
}
tk.Blocking = nil
tk.Accessible = nil
logger.Warnf(
"UNKNOWN: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
}
}
const (
// analysisFlagNullNullNoAddrs indicates neither the probe nor the TH were
// able to get any IP addresses from any resolver.
analysisFlagNullNullNoAddrs = 1 << iota
// analysisFlagNullNullAllConnectsFailed indicates that all the connect
// attempts failed both in the probe and in the test helper.
analysisFlagNullNullAllConnectsFailed
// analysisFlagNullNullTLSMisconfigured indicates that all the TLS handshake
// attempts failed both in the probe and in the test helper.
analysisFlagNullNullTLSMisconfigured
// analysisFlagNullNullSuccessfulHTTPS indicates that we had no TH data
// but all the HTTP requests used always HTTPS and never failed.
analysisFlagNullNullSuccessfulHTTPS
// analysisFlagNullNullNXDOMAINWithCensorship indicates that we have
// seen no error with local DNS resolutions but, at the same time, the
// control failed with NXDOMAIN. When this happens, we probably have
// DNS interception locally, so all cleartext queries return the same
// bogus answers based on a rule applied on a now-expired domain.
analysisFlagNullNullNXDOMAINWithCensorship
)
// analysisNullNullDetectTHDNSNXDOMAIN runs when .Blocking = nil and
// .Accessible = nil to flag cases in which the probe resolved addresses
// but the TH thinks the address is actually NXDOMAIN. When this
// happens, we're going to give priority to the TH's DoH observation.
//
// See https://github.com/ooni/probe/issues/2308.
func (tk *TestKeys) analysisNullNullDetectTHDNSNXDOMAIN(logger model.Logger) bool {
if tk.Control == nil {
// we need the control info to continue
return false
}
// we need some cleartext successes
var cleartextSuccesses int
for _, query := range tk.Queries {
if query.Engine == "doh" {
// we skip DoH entries because they are encrypted and
// cannot be manipulated by censors
continue
}
if query.Failure != nil {
// we should stop the algorithm in case we've got any
// hard failure, but `dns_no_answer` is acceptable because
// actually it might be there's only A censorship and the
// AAAA query instead returns `dns_no_answer`.
//
// See https://explorer.ooni.org/measurement/20220914T073558Z_webconnectivity_IT_30722_n1_wroXRsBGYx0x9h0q?input=http%3A%2F%2Fitsat.info
// for a case where this was happening and fooled us
// causing us to conclude that the website was just down.
if *query.Failure == netxlite.FailureDNSNoAnswer {
continue
}
return false
}
cleartextSuccesses++
}
if cleartextSuccesses <= 0 {
return false
}
// if the TH failed with its own string representing the NXDOMAIN
// error, then we've detected our corner case
failure := tk.Control.DNS.Failure
if failure != nil && *failure == model.THDNSNameError {
logger.Info("DNS censorship: local DNS success with remote NXDOMAIN")
tk.NullNullFlags |= analysisFlagNullNullNXDOMAINWithCensorship
return true
}
// otherwise it's something else
return false
}
// analysisNullNullDetectSuccessfulHTTPS runs when .Blocking = nil and
// .Accessible = nil to flag successul HTTPS measurements chains that
// occurred regardless of whatever else could have gone wrong.
//
// We need all requests to be HTTPS because an HTTP request in the
// chain breaks the ~reasonable assumption that our custom CA bundle
// is enough to protect against MITM. Of course, when we use this
// algorithm, we're not well positioned to flag server-side blocking.
//
// Version 0.4 of the probe implemented a similar algorithm, which
// however ran before other checks. Version, 0.5 on the contrary, runs
// this algorithm if any other heuristics failed.
//
// See https://github.com/ooni/probe/issues/2307 for more info.
func (tk *TestKeys) analysisNullNullDetectSuccessfulHTTPS(logger model.Logger) bool {
// the chain is sorted from most recent to oldest but it does
// not matter much since we need to walk all of it.
//
// CAVEAT: this code assumes we have a single request chain
// inside the .Requests field, which seems fine because it's
// what Web Connectivity should be doing.
for _, req := range tk.Requests {
URL, err := url.Parse(req.Request.URL)
if err != nil {
// this looks like a bug
return false
}
if URL.Scheme != "https" {
// the whole chain must be HTTPS
return false
}
if req.Failure != nil {
// they must all succeed
return false
}
switch req.Response.Code {
case 200, 301, 302, 307, 308:
default:
// the response must be successful or redirect
return false
}
}
// only if we have at least one request
if len(tk.Requests) > 0 {
logger.Info("website likely accessible: seen successful chain of HTTPS transactions")
tk.NullNullFlags |= analysisFlagNullNullSuccessfulHTTPS
return true
}
// safety net otherwise
return false
}
// analysisNullNullDetectTLSMisconfigured runs when .Blocking = nil and
// .Accessible = nil to check whether by chance we had TLS issues both on the
// probe side and on the TH side. This problem of detecting misconfiguration
// of the server's TLS stack is discussed at https://github.com/ooni/probe/issues/2300.
func (tk *TestKeys) analysisNullNullDetectTLSMisconfigured(logger model.Logger) bool {
if tk.Control == nil || tk.Control.TLSHandshake == nil {
// we need TLS control data to say we are in this case
return false
}
for _, entry := range tk.TLSHandshakes {
if entry.Failure == nil {
// we need all attempts to fail to flag this state
return false
}
thEntry, found := tk.Control.TLSHandshake[entry.Address]
if !found {
// we need to have seen exactly the same attempts
return false
}
if thEntry.Failure == nil {
// we need all TH attempts to fail
return false
}
if *entry.Failure != *thEntry.Failure {
// we need to see the same failure to be sure, which it's
// possible to do for TLS because we have the same definition
// of failure rather than being constrained by the legacy
// implementation of the test helper and Twisted names
//
// TODO(bassosimone): this is the obvious algorithm but maybe
// it's a bit too strict and there is a more lax version of
// the same algorithm that it's still acceptable?
return false
}
}
// only if we have had some TLS handshakes for both probe and TH
if len(tk.TLSHandshakes) > 0 && len(tk.Control.TLSHandshake) > 0 {
logger.Info("website likely down: all TLS handshake attempts failed for both probe and TH")
tk.NullNullFlags |= analysisFlagNullNullTLSMisconfigured
return true
}
// safety net in case we've got wrong input
return false
}
// analysisNullNullDetectAllConnectsFailed attempts to detect whether we are in
// the .Blocking = nil, .Accessible = nil case because all the TCP connect
// attempts by either the probe or the TH have failed.
//
// See https://explorer.ooni.org/measurement/20220911T105037Z_webconnectivity_IT_30722_n1_ruzuQ219SmIO9SrT?input=https://doh.centraleu.pi-dns.com/dns-query?dns=q80BAAABAAAAAAAAA3d3dwdleGFtcGxlA2NvbQAAAQAB
// for an example measurement with this behavior.
//
// See https://github.com/ooni/probe/issues/2299 for the reference issue.
func (tk *TestKeys) analysisNullNullDetectAllConnectsFailed(logger model.Logger) bool {
if tk.Control == nil {
// we need control data to say we're in this case
return false
}
for _, entry := range tk.TCPConnect {
if entry.Status.Failure == nil {
// we need all connect attempts to fail
return false
}
epnt := net.JoinHostPort(entry.IP, fmt.Sprintf("%d", entry.Port))
thEntry, found := tk.Control.TCPConnect[epnt]
if !found {
// we need to have seen exactly the same attempts
return false
}
if thEntry.Failure == nil {
// we need all TH attempts to fail
return false
}
}
// only if we have had some addresses to connect
if len(tk.TCPConnect) > 0 && len(tk.Control.TCPConnect) > 0 {
logger.Info("website likely down: all TCP connect attempts failed for both probe and TH")
tk.NullNullFlags |= analysisFlagNullNullAllConnectsFailed
return true
}
| }
// analysisNullNullDetectNoAddrs attempts to see whether we
// ended up into the .Blocking = nil, .Accessible = nil case because
// the domain is expired and all queries returned no addresses.
//
// See https://github.com/ooni/probe/issues/2290 for further
// documentation about the issue we're solving here.
//
// It would be tempting to check specifically for NXDOMAIN here, but we
// know it is problematic do that. In fact, on Android the getaddrinfo
// resolver always returns EAI_NODATA on error, regardless of the actual
// error that may have occurred in the Android DNS backend.
//
// See https://github.com/ooni/probe/issues/2029 for more information
// on Android's getaddrinfo behavior.
func (tk *TestKeys) analysisNullNullDetectNoAddrs(logger model.Logger) bool {
if tk.Control == nil {
// we need control data to say we're in this case
return false
}
for _, query := range tk.Queries {
if len(query.Answers) > 0 {
// when a query has answers, we're not in the NoAddresses case
return false
}
}
if len(tk.TCPConnect) > 0 {
// if we attempted TCP connect, we're not in the NoAddresses case
return false
}
if len(tk.TLSHandshakes) > 0 {
// if we attempted TLS handshakes, we're not in the NoAddresses case
return false
}
if len(tk.Control.DNS.Addrs) > 0 {
// when the TH resolved addresses, we're not in the NoAddresses case
return false
}
if len(tk.Control.TCPConnect) > 0 {
// when the TH used addresses, we're not in the NoAddresses case
return false
}
logger.Infof("website likely down: all DNS lookups failed for both probe and TH")
tk.NullNullFlags |= analysisFlagNullNullNoAddrs
return true
} | // safety net in case we're passed empty lists/maps
return false | random_line_split |
analysiscore.go | package webconnectivitylte
import (
"fmt"
"net"
"net/url"
"github.com/ooni/probe-engine/pkg/model"
"github.com/ooni/probe-engine/pkg/netxlite"
)
//
// Core analysis
//
// These flags determine the context of TestKeys.Blocking. However, while .Blocking
// is an enumeration, these flags allow to describe multiple blocking methods.
const (
// analysisFlagDNSBlocking indicates there's blocking at the DNS level.
analysisFlagDNSBlocking = 1 << iota
// analysisFlagTCPIPBlocking indicates there's blocking at the TCP/IP level.
analysisFlagTCPIPBlocking
// analysisFlagTLSBlocking indicates there were TLS issues.
analysisFlagTLSBlocking
// analysisFlagHTTPBlocking indicates there was an HTTP failure.
analysisFlagHTTPBlocking
// analysisFlagHTTPDiff indicates there's an HTTP diff.
analysisFlagHTTPDiff
// analysisFlagSuccess indicates we did not detect any blocking.
analysisFlagSuccess
)
// analysisToplevel is the toplevel function that analyses the results
// of the experiment once all network tasks have completed.
//
// The ultimate objective of this function is to set the toplevel flags
// used by the backend to score results. These flags are:
//
// - blocking (and x_blocking_flags) which contain information about
// the detected blocking method (or methods);
//
// - accessible which contains information on whether we think we
// could access the resource somehow.
//
// Originally, Web Connectivity only had a blocking scalar value so
// we could see ourselves in one of the following cases:
//
// +----------+------------+--------------------------+
// | Blocking | Accessible | Meaning |
// +----------+------------+--------------------------+
// | null | null | Probe analysis error |
// +----------+------------+--------------------------+
// | false | true | We detected no blocking |
// +----------+------------+--------------------------+
// | "..." | false | We detected blocking |
// +----------+------------+--------------------------+
//
// While it would be possible in this implementation, which has a granular
// definition of blocking (x_blocking_flags), to set accessible to mean
// whether we could access the resource in some conditions, it seems quite
// dangerous to deviate from the original behavior.
//
// Our code will NEVER set .Blocking or .Accessible outside of this function
// and we'll instead rely on XBlockingFlags. This function's job is to call
// other functions that compute the .XBlockingFlags and then to assign the value
// of .Blocking and .Accessible from the .XBlockingFlags value.
//
// Accordingly, this is how we map the value of the .XBlockingFlags to the
// values of .Blocking and .Accessible:
//
// +--------------------------------------+----------------+-------------+
// | .BlockingFlags | .Blocking | .Accessible |
// +--------------------------------------+----------------+-------------+
// | (& DNSBlocking) != 0 | "dns" | false |
// +--------------------------------------+----------------+-------------+
// | (& TCPIPBlocking) != 0 | "tcp_ip" | false |
// +--------------------------------------+----------------+-------------+
// | (& (TLSBlocking|HTTPBlocking)) != 0 | "http-failure" | false |
// +--------------------------------------+----------------+-------------+
// | (& HTTPDiff) != 0 | "http-diff" | false |
// +--------------------------------------+----------------+-------------+
// | == FlagSuccess | false | true |
// +--------------------------------------+----------------+-------------+
// | otherwise | null | null |
// +--------------------------------------+----------------+-------------+
//
// It's a very simple rule, that should preserve previous semantics.
//
// As an improvement over Web Connectivity v0.4, we also attempt to identify
// special subcases of a null, null result to provide the user with more information.
func (tk *TestKeys) analysisToplevel(logger model.Logger) {
// Since we run after all tasks have completed (or so we assume) we're
// not going to use any form of locking here.
// these functions compute the value of XBlockingFlags
tk.analysisDNSToplevel(logger)
tk.analysisTCPIPToplevel(logger)
tk.analysisTLSToplevel(logger)
tk.analysisHTTPToplevel(logger)
// now, let's determine .Accessible and .Blocking
switch {
case (tk.BlockingFlags & analysisFlagDNSBlocking) != 0:
tk.Blocking = "dns"
tk.Accessible = false
logger.Warnf(
"ANOMALY: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
case (tk.BlockingFlags & analysisFlagTCPIPBlocking) != 0:
tk.Blocking = "tcp_ip"
tk.Accessible = false
logger.Warnf(
"ANOMALY: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
case (tk.BlockingFlags & (analysisFlagTLSBlocking | analysisFlagHTTPBlocking)) != 0:
tk.Blocking = "http-failure"
tk.Accessible = false
logger.Warnf("ANOMALY: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
case (tk.BlockingFlags & analysisFlagHTTPDiff) != 0:
tk.Blocking = "http-diff"
tk.Accessible = false
logger.Warnf(
"ANOMALY: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
case tk.BlockingFlags == analysisFlagSuccess:
tk.Blocking = false
tk.Accessible = true
logger.Infof(
"ACCESSIBLE: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
default:
// NullNull remediation
//
// If we arrive here, the measurement has failed. However, there are a
// bunch of cases where we can still explain what happened by applying specific
// algorithms to detect edge cases.
//
// The relative order of these algorithsm matters: swapping them without
// careful consideration may produce unexpected results.
if tk.analysisNullNullDetectTHDNSNXDOMAIN(logger) {
tk.Blocking = "dns"
tk.Accessible = false
logger.Warnf(
"RESIDUAL_DNS_BLOCKING: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
return
}
if tk.analysisNullNullDetectNoAddrs(logger) {
tk.Blocking = false
tk.Accessible = false
logger.Infof(
"WEBSITE_DOWN_DNS: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
return
}
if tk.analysisNullNullDetectAllConnectsFailed(logger) {
tk.Blocking = false
tk.Accessible = false
logger.Infof(
"WEBSITE_DOWN_TCP: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
return
}
if tk.analysisNullNullDetectTLSMisconfigured(logger) {
tk.Blocking = false
tk.Accessible = false
logger.Infof(
"WEBSITE_DOWN_TLS: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
return
}
if tk.analysisNullNullDetectSuccessfulHTTPS(logger) {
tk.Blocking = false
tk.Accessible = true
logger.Infof(
"ACCESSIBLE_HTTPS: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
return
}
tk.Blocking = nil
tk.Accessible = nil
logger.Warnf(
"UNKNOWN: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
}
}
const (
// analysisFlagNullNullNoAddrs indicates neither the probe nor the TH were
// able to get any IP addresses from any resolver.
analysisFlagNullNullNoAddrs = 1 << iota
// analysisFlagNullNullAllConnectsFailed indicates that all the connect
// attempts failed both in the probe and in the test helper.
analysisFlagNullNullAllConnectsFailed
// analysisFlagNullNullTLSMisconfigured indicates that all the TLS handshake
// attempts failed both in the probe and in the test helper.
analysisFlagNullNullTLSMisconfigured
// analysisFlagNullNullSuccessfulHTTPS indicates that we had no TH data
// but all the HTTP requests used always HTTPS and never failed.
analysisFlagNullNullSuccessfulHTTPS
// analysisFlagNullNullNXDOMAINWithCensorship indicates that we have
// seen no error with local DNS resolutions but, at the same time, the
// control failed with NXDOMAIN. When this happens, we probably have
// DNS interception locally, so all cleartext queries return the same
// bogus answers based on a rule applied on a now-expired domain.
analysisFlagNullNullNXDOMAINWithCensorship
)
// analysisNullNullDetectTHDNSNXDOMAIN runs when .Blocking = nil and
// .Accessible = nil to flag cases in which the probe resolved addresses
// but the TH thinks the address is actually NXDOMAIN. When this
// happens, we're going to give priority to the TH's DoH observation.
//
// See https://github.com/ooni/probe/issues/2308.
func (tk *TestKeys) analysisNullNullDetectTHDNSNXDOMAIN(logger model.Logger) bool {
if tk.Control == nil {
// we need the control info to continue
return false
}
// we need some cleartext successes
var cleartextSuccesses int
for _, query := range tk.Queries {
if query.Engine == "doh" {
// we skip DoH entries because they are encrypted and
// cannot be manipulated by censors
continue
}
if query.Failure != nil {
// we should stop the algorithm in case we've got any
// hard failure, but `dns_no_answer` is acceptable because
// actually it might be there's only A censorship and the
// AAAA query instead returns `dns_no_answer`.
//
// See https://explorer.ooni.org/measurement/20220914T073558Z_webconnectivity_IT_30722_n1_wroXRsBGYx0x9h0q?input=http%3A%2F%2Fitsat.info
// for a case where this was happening and fooled us
// causing us to conclude that the website was just down.
if *query.Failure == netxlite.FailureDNSNoAnswer {
continue
}
return false
}
cleartextSuccesses++
}
if cleartextSuccesses <= 0 {
return false
}
// if the TH failed with its own string representing the NXDOMAIN
// error, then we've detected our corner case
failure := tk.Control.DNS.Failure
if failure != nil && *failure == model.THDNSNameError {
logger.Info("DNS censorship: local DNS success with remote NXDOMAIN")
tk.NullNullFlags |= analysisFlagNullNullNXDOMAINWithCensorship
return true
}
// otherwise it's something else
return false
}
// analysisNullNullDetectSuccessfulHTTPS runs when .Blocking = nil and
// .Accessible = nil to flag successul HTTPS measurements chains that
// occurred regardless of whatever else could have gone wrong.
//
// We need all requests to be HTTPS because an HTTP request in the
// chain breaks the ~reasonable assumption that our custom CA bundle
// is enough to protect against MITM. Of course, when we use this
// algorithm, we're not well positioned to flag server-side blocking.
//
// Version 0.4 of the probe implemented a similar algorithm, which
// however ran before other checks. Version, 0.5 on the contrary, runs
// this algorithm if any other heuristics failed.
//
// See https://github.com/ooni/probe/issues/2307 for more info.
func (tk *TestKeys) analysisNullNullDetectSuccessfulHTTPS(logger model.Logger) bool {
// the chain is sorted from most recent to oldest but it does
// not matter much since we need to walk all of it.
//
// CAVEAT: this code assumes we have a single request chain
// inside the .Requests field, which seems fine because it's
// what Web Connectivity should be doing.
for _, req := range tk.Requests {
URL, err := url.Parse(req.Request.URL)
if err != nil {
// this looks like a bug
return false
}
if URL.Scheme != "https" {
// the whole chain must be HTTPS
return false
}
if req.Failure != nil {
// they must all succeed
return false
}
switch req.Response.Code {
case 200, 301, 302, 307, 308:
default:
// the response must be successful or redirect
return false
}
}
// only if we have at least one request
if len(tk.Requests) > 0 {
logger.Info("website likely accessible: seen successful chain of HTTPS transactions")
tk.NullNullFlags |= analysisFlagNullNullSuccessfulHTTPS
return true
}
// safety net otherwise
return false
}
// analysisNullNullDetectTLSMisconfigured runs when .Blocking = nil and
// .Accessible = nil to check whether by chance we had TLS issues both on the
// probe side and on the TH side. This problem of detecting misconfiguration
// of the server's TLS stack is discussed at https://github.com/ooni/probe/issues/2300.
func (tk *TestKeys) analysisNullNullDetectTLSMisconfigured(logger model.Logger) bool {
if tk.Control == nil || tk.Control.TLSHandshake == nil {
// we need TLS control data to say we are in this case
return false
}
for _, entry := range tk.TLSHandshakes {
if entry.Failure == nil {
// we need all attempts to fail to flag this state
return false
}
thEntry, found := tk.Control.TLSHandshake[entry.Address]
if !found {
// we need to have seen exactly the same attempts
return false
}
if thEntry.Failure == nil {
// we need all TH attempts to fail
return false
}
if *entry.Failure != *thEntry.Failure {
// we need to see the same failure to be sure, which it's
// possible to do for TLS because we have the same definition
// of failure rather than being constrained by the legacy
// implementation of the test helper and Twisted names
//
// TODO(bassosimone): this is the obvious algorithm but maybe
// it's a bit too strict and there is a more lax version of
// the same algorithm that it's still acceptable?
return false
}
}
// only if we have had some TLS handshakes for both probe and TH
if len(tk.TLSHandshakes) > 0 && len(tk.Control.TLSHandshake) > 0 {
logger.Info("website likely down: all TLS handshake attempts failed for both probe and TH")
tk.NullNullFlags |= analysisFlagNullNullTLSMisconfigured
return true
}
// safety net in case we've got wrong input
return false
}
// analysisNullNullDetectAllConnectsFailed attempts to detect whether we are in
// the .Blocking = nil, .Accessible = nil case because all the TCP connect
// attempts by either the probe or the TH have failed.
//
// See https://explorer.ooni.org/measurement/20220911T105037Z_webconnectivity_IT_30722_n1_ruzuQ219SmIO9SrT?input=https://doh.centraleu.pi-dns.com/dns-query?dns=q80BAAABAAAAAAAAA3d3dwdleGFtcGxlA2NvbQAAAQAB
// for an example measurement with this behavior.
//
// See https://github.com/ooni/probe/issues/2299 for the reference issue.
func (tk *TestKeys) analysisNullNullDetectAllConnectsFailed(logger model.Logger) bool |
// analysisNullNullDetectNoAddrs attempts to see whether we
// ended up into the .Blocking = nil, .Accessible = nil case because
// the domain is expired and all queries returned no addresses.
//
// See https://github.com/ooni/probe/issues/2290 for further
// documentation about the issue we're solving here.
//
// It would be tempting to check specifically for NXDOMAIN here, but we
// know it is problematic do that. In fact, on Android the getaddrinfo
// resolver always returns EAI_NODATA on error, regardless of the actual
// error that may have occurred in the Android DNS backend.
//
// See https://github.com/ooni/probe/issues/2029 for more information
// on Android's getaddrinfo behavior.
func (tk *TestKeys) analysisNullNullDetectNoAddrs(logger model.Logger) bool {
if tk.Control == nil {
// we need control data to say we're in this case
return false
}
for _, query := range tk.Queries {
if len(query.Answers) > 0 {
// when a query has answers, we're not in the NoAddresses case
return false
}
}
if len(tk.TCPConnect) > 0 {
// if we attempted TCP connect, we're not in the NoAddresses case
return false
}
if len(tk.TLSHandshakes) > 0 {
// if we attempted TLS handshakes, we're not in the NoAddresses case
return false
}
if len(tk.Control.DNS.Addrs) > 0 {
// when the TH resolved addresses, we're not in the NoAddresses case
return false
}
if len(tk.Control.TCPConnect) > 0 {
// when the TH used addresses, we're not in the NoAddresses case
return false
}
logger.Infof("website likely down: all DNS lookups failed for both probe and TH")
tk.NullNullFlags |= analysisFlagNullNullNoAddrs
return true
}
| {
if tk.Control == nil {
// we need control data to say we're in this case
return false
}
for _, entry := range tk.TCPConnect {
if entry.Status.Failure == nil {
// we need all connect attempts to fail
return false
}
epnt := net.JoinHostPort(entry.IP, fmt.Sprintf("%d", entry.Port))
thEntry, found := tk.Control.TCPConnect[epnt]
if !found {
// we need to have seen exactly the same attempts
return false
}
if thEntry.Failure == nil {
// we need all TH attempts to fail
return false
}
}
// only if we have had some addresses to connect
if len(tk.TCPConnect) > 0 && len(tk.Control.TCPConnect) > 0 {
logger.Info("website likely down: all TCP connect attempts failed for both probe and TH")
tk.NullNullFlags |= analysisFlagNullNullAllConnectsFailed
return true
}
// safety net in case we're passed empty lists/maps
return false
} | identifier_body |
analysiscore.go | package webconnectivitylte
import (
"fmt"
"net"
"net/url"
"github.com/ooni/probe-engine/pkg/model"
"github.com/ooni/probe-engine/pkg/netxlite"
)
//
// Core analysis
//
// These flags determine the context of TestKeys.Blocking. However, while .Blocking
// is an enumeration, these flags allow to describe multiple blocking methods.
const (
// analysisFlagDNSBlocking indicates there's blocking at the DNS level.
analysisFlagDNSBlocking = 1 << iota
// analysisFlagTCPIPBlocking indicates there's blocking at the TCP/IP level.
analysisFlagTCPIPBlocking
// analysisFlagTLSBlocking indicates there were TLS issues.
analysisFlagTLSBlocking
// analysisFlagHTTPBlocking indicates there was an HTTP failure.
analysisFlagHTTPBlocking
// analysisFlagHTTPDiff indicates there's an HTTP diff.
analysisFlagHTTPDiff
// analysisFlagSuccess indicates we did not detect any blocking.
analysisFlagSuccess
)
// analysisToplevel is the toplevel function that analyses the results
// of the experiment once all network tasks have completed.
//
// The ultimate objective of this function is to set the toplevel flags
// used by the backend to score results. These flags are:
//
// - blocking (and x_blocking_flags) which contain information about
// the detected blocking method (or methods);
//
// - accessible which contains information on whether we think we
// could access the resource somehow.
//
// Originally, Web Connectivity only had a blocking scalar value so
// we could see ourselves in one of the following cases:
//
// +----------+------------+--------------------------+
// | Blocking | Accessible | Meaning |
// +----------+------------+--------------------------+
// | null | null | Probe analysis error |
// +----------+------------+--------------------------+
// | false | true | We detected no blocking |
// +----------+------------+--------------------------+
// | "..." | false | We detected blocking |
// +----------+------------+--------------------------+
//
// While it would be possible in this implementation, which has a granular
// definition of blocking (x_blocking_flags), to set accessible to mean
// whether we could access the resource in some conditions, it seems quite
// dangerous to deviate from the original behavior.
//
// Our code will NEVER set .Blocking or .Accessible outside of this function
// and we'll instead rely on XBlockingFlags. This function's job is to call
// other functions that compute the .XBlockingFlags and then to assign the value
// of .Blocking and .Accessible from the .XBlockingFlags value.
//
// Accordingly, this is how we map the value of the .XBlockingFlags to the
// values of .Blocking and .Accessible:
//
// +--------------------------------------+----------------+-------------+
// | .BlockingFlags | .Blocking | .Accessible |
// +--------------------------------------+----------------+-------------+
// | (& DNSBlocking) != 0 | "dns" | false |
// +--------------------------------------+----------------+-------------+
// | (& TCPIPBlocking) != 0 | "tcp_ip" | false |
// +--------------------------------------+----------------+-------------+
// | (& (TLSBlocking|HTTPBlocking)) != 0 | "http-failure" | false |
// +--------------------------------------+----------------+-------------+
// | (& HTTPDiff) != 0 | "http-diff" | false |
// +--------------------------------------+----------------+-------------+
// | == FlagSuccess | false | true |
// +--------------------------------------+----------------+-------------+
// | otherwise | null | null |
// +--------------------------------------+----------------+-------------+
//
// It's a very simple rule, that should preserve previous semantics.
//
// As an improvement over Web Connectivity v0.4, we also attempt to identify
// special subcases of a null, null result to provide the user with more information.
func (tk *TestKeys) analysisToplevel(logger model.Logger) {
// Since we run after all tasks have completed (or so we assume) we're
// not going to use any form of locking here.
// these functions compute the value of XBlockingFlags
tk.analysisDNSToplevel(logger)
tk.analysisTCPIPToplevel(logger)
tk.analysisTLSToplevel(logger)
tk.analysisHTTPToplevel(logger)
// now, let's determine .Accessible and .Blocking
switch {
case (tk.BlockingFlags & analysisFlagDNSBlocking) != 0:
tk.Blocking = "dns"
tk.Accessible = false
logger.Warnf(
"ANOMALY: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
case (tk.BlockingFlags & analysisFlagTCPIPBlocking) != 0:
tk.Blocking = "tcp_ip"
tk.Accessible = false
logger.Warnf(
"ANOMALY: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
case (tk.BlockingFlags & (analysisFlagTLSBlocking | analysisFlagHTTPBlocking)) != 0:
tk.Blocking = "http-failure"
tk.Accessible = false
logger.Warnf("ANOMALY: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
case (tk.BlockingFlags & analysisFlagHTTPDiff) != 0:
tk.Blocking = "http-diff"
tk.Accessible = false
logger.Warnf(
"ANOMALY: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
case tk.BlockingFlags == analysisFlagSuccess:
tk.Blocking = false
tk.Accessible = true
logger.Infof(
"ACCESSIBLE: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
default:
// NullNull remediation
//
// If we arrive here, the measurement has failed. However, there are a
// bunch of cases where we can still explain what happened by applying specific
// algorithms to detect edge cases.
//
// The relative order of these algorithsm matters: swapping them without
// careful consideration may produce unexpected results.
if tk.analysisNullNullDetectTHDNSNXDOMAIN(logger) {
tk.Blocking = "dns"
tk.Accessible = false
logger.Warnf(
"RESIDUAL_DNS_BLOCKING: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
return
}
if tk.analysisNullNullDetectNoAddrs(logger) {
tk.Blocking = false
tk.Accessible = false
logger.Infof(
"WEBSITE_DOWN_DNS: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
return
}
if tk.analysisNullNullDetectAllConnectsFailed(logger) {
tk.Blocking = false
tk.Accessible = false
logger.Infof(
"WEBSITE_DOWN_TCP: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
return
}
if tk.analysisNullNullDetectTLSMisconfigured(logger) {
tk.Blocking = false
tk.Accessible = false
logger.Infof(
"WEBSITE_DOWN_TLS: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
return
}
if tk.analysisNullNullDetectSuccessfulHTTPS(logger) {
tk.Blocking = false
tk.Accessible = true
logger.Infof(
"ACCESSIBLE_HTTPS: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
return
}
tk.Blocking = nil
tk.Accessible = nil
logger.Warnf(
"UNKNOWN: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
}
}
const (
// analysisFlagNullNullNoAddrs indicates neither the probe nor the TH were
// able to get any IP addresses from any resolver.
analysisFlagNullNullNoAddrs = 1 << iota
// analysisFlagNullNullAllConnectsFailed indicates that all the connect
// attempts failed both in the probe and in the test helper.
analysisFlagNullNullAllConnectsFailed
// analysisFlagNullNullTLSMisconfigured indicates that all the TLS handshake
// attempts failed both in the probe and in the test helper.
analysisFlagNullNullTLSMisconfigured
// analysisFlagNullNullSuccessfulHTTPS indicates that we had no TH data
// but all the HTTP requests used always HTTPS and never failed.
analysisFlagNullNullSuccessfulHTTPS
// analysisFlagNullNullNXDOMAINWithCensorship indicates that we have
// seen no error with local DNS resolutions but, at the same time, the
// control failed with NXDOMAIN. When this happens, we probably have
// DNS interception locally, so all cleartext queries return the same
// bogus answers based on a rule applied on a now-expired domain.
analysisFlagNullNullNXDOMAINWithCensorship
)
// analysisNullNullDetectTHDNSNXDOMAIN runs when .Blocking = nil and
// .Accessible = nil to flag cases in which the probe resolved addresses
// but the TH thinks the address is actually NXDOMAIN. When this
// happens, we're going to give priority to the TH's DoH observation.
//
// See https://github.com/ooni/probe/issues/2308.
func (tk *TestKeys) analysisNullNullDetectTHDNSNXDOMAIN(logger model.Logger) bool {
if tk.Control == nil {
// we need the control info to continue
return false
}
// we need some cleartext successes
var cleartextSuccesses int
for _, query := range tk.Queries {
if query.Engine == "doh" {
// we skip DoH entries because they are encrypted and
// cannot be manipulated by censors
continue
}
if query.Failure != nil {
// we should stop the algorithm in case we've got any
// hard failure, but `dns_no_answer` is acceptable because
// actually it might be there's only A censorship and the
// AAAA query instead returns `dns_no_answer`.
//
// See https://explorer.ooni.org/measurement/20220914T073558Z_webconnectivity_IT_30722_n1_wroXRsBGYx0x9h0q?input=http%3A%2F%2Fitsat.info
// for a case where this was happening and fooled us
// causing us to conclude that the website was just down.
if *query.Failure == netxlite.FailureDNSNoAnswer {
continue
}
return false
}
cleartextSuccesses++
}
if cleartextSuccesses <= 0 {
return false
}
// if the TH failed with its own string representing the NXDOMAIN
// error, then we've detected our corner case
failure := tk.Control.DNS.Failure
if failure != nil && *failure == model.THDNSNameError {
logger.Info("DNS censorship: local DNS success with remote NXDOMAIN")
tk.NullNullFlags |= analysisFlagNullNullNXDOMAINWithCensorship
return true
}
// otherwise it's something else
return false
}
// analysisNullNullDetectSuccessfulHTTPS runs when .Blocking = nil and
// .Accessible = nil to flag successul HTTPS measurements chains that
// occurred regardless of whatever else could have gone wrong.
//
// We need all requests to be HTTPS because an HTTP request in the
// chain breaks the ~reasonable assumption that our custom CA bundle
// is enough to protect against MITM. Of course, when we use this
// algorithm, we're not well positioned to flag server-side blocking.
//
// Version 0.4 of the probe implemented a similar algorithm, which
// however ran before other checks. Version, 0.5 on the contrary, runs
// this algorithm if any other heuristics failed.
//
// See https://github.com/ooni/probe/issues/2307 for more info.
func (tk *TestKeys) analysisNullNullDetectSuccessfulHTTPS(logger model.Logger) bool {
// the chain is sorted from most recent to oldest but it does
// not matter much since we need to walk all of it.
//
// CAVEAT: this code assumes we have a single request chain
// inside the .Requests field, which seems fine because it's
// what Web Connectivity should be doing.
for _, req := range tk.Requests {
URL, err := url.Parse(req.Request.URL)
if err != nil {
// this looks like a bug
return false
}
if URL.Scheme != "https" {
// the whole chain must be HTTPS
return false
}
if req.Failure != nil {
// they must all succeed
return false
}
switch req.Response.Code {
case 200, 301, 302, 307, 308:
default:
// the response must be successful or redirect
return false
}
}
// only if we have at least one request
if len(tk.Requests) > 0 {
logger.Info("website likely accessible: seen successful chain of HTTPS transactions")
tk.NullNullFlags |= analysisFlagNullNullSuccessfulHTTPS
return true
}
// safety net otherwise
return false
}
// analysisNullNullDetectTLSMisconfigured runs when .Blocking = nil and
// .Accessible = nil to check whether by chance we had TLS issues both on the
// probe side and on the TH side. This problem of detecting misconfiguration
// of the server's TLS stack is discussed at https://github.com/ooni/probe/issues/2300.
func (tk *TestKeys) | (logger model.Logger) bool {
if tk.Control == nil || tk.Control.TLSHandshake == nil {
// we need TLS control data to say we are in this case
return false
}
for _, entry := range tk.TLSHandshakes {
if entry.Failure == nil {
// we need all attempts to fail to flag this state
return false
}
thEntry, found := tk.Control.TLSHandshake[entry.Address]
if !found {
// we need to have seen exactly the same attempts
return false
}
if thEntry.Failure == nil {
// we need all TH attempts to fail
return false
}
if *entry.Failure != *thEntry.Failure {
// we need to see the same failure to be sure, which it's
// possible to do for TLS because we have the same definition
// of failure rather than being constrained by the legacy
// implementation of the test helper and Twisted names
//
// TODO(bassosimone): this is the obvious algorithm but maybe
// it's a bit too strict and there is a more lax version of
// the same algorithm that it's still acceptable?
return false
}
}
// only if we have had some TLS handshakes for both probe and TH
if len(tk.TLSHandshakes) > 0 && len(tk.Control.TLSHandshake) > 0 {
logger.Info("website likely down: all TLS handshake attempts failed for both probe and TH")
tk.NullNullFlags |= analysisFlagNullNullTLSMisconfigured
return true
}
// safety net in case we've got wrong input
return false
}
// analysisNullNullDetectAllConnectsFailed attempts to detect whether we are in
// the .Blocking = nil, .Accessible = nil case because all the TCP connect
// attempts by either the probe or the TH have failed.
//
// See https://explorer.ooni.org/measurement/20220911T105037Z_webconnectivity_IT_30722_n1_ruzuQ219SmIO9SrT?input=https://doh.centraleu.pi-dns.com/dns-query?dns=q80BAAABAAAAAAAAA3d3dwdleGFtcGxlA2NvbQAAAQAB
// for an example measurement with this behavior.
//
// See https://github.com/ooni/probe/issues/2299 for the reference issue.
func (tk *TestKeys) analysisNullNullDetectAllConnectsFailed(logger model.Logger) bool {
if tk.Control == nil {
// we need control data to say we're in this case
return false
}
for _, entry := range tk.TCPConnect {
if entry.Status.Failure == nil {
// we need all connect attempts to fail
return false
}
epnt := net.JoinHostPort(entry.IP, fmt.Sprintf("%d", entry.Port))
thEntry, found := tk.Control.TCPConnect[epnt]
if !found {
// we need to have seen exactly the same attempts
return false
}
if thEntry.Failure == nil {
// we need all TH attempts to fail
return false
}
}
// only if we have had some addresses to connect
if len(tk.TCPConnect) > 0 && len(tk.Control.TCPConnect) > 0 {
logger.Info("website likely down: all TCP connect attempts failed for both probe and TH")
tk.NullNullFlags |= analysisFlagNullNullAllConnectsFailed
return true
}
// safety net in case we're passed empty lists/maps
return false
}
// analysisNullNullDetectNoAddrs attempts to see whether we
// ended up into the .Blocking = nil, .Accessible = nil case because
// the domain is expired and all queries returned no addresses.
//
// See https://github.com/ooni/probe/issues/2290 for further
// documentation about the issue we're solving here.
//
// It would be tempting to check specifically for NXDOMAIN here, but we
// know it is problematic do that. In fact, on Android the getaddrinfo
// resolver always returns EAI_NODATA on error, regardless of the actual
// error that may have occurred in the Android DNS backend.
//
// See https://github.com/ooni/probe/issues/2029 for more information
// on Android's getaddrinfo behavior.
func (tk *TestKeys) analysisNullNullDetectNoAddrs(logger model.Logger) bool {
if tk.Control == nil {
// we need control data to say we're in this case
return false
}
for _, query := range tk.Queries {
if len(query.Answers) > 0 {
// when a query has answers, we're not in the NoAddresses case
return false
}
}
if len(tk.TCPConnect) > 0 {
// if we attempted TCP connect, we're not in the NoAddresses case
return false
}
if len(tk.TLSHandshakes) > 0 {
// if we attempted TLS handshakes, we're not in the NoAddresses case
return false
}
if len(tk.Control.DNS.Addrs) > 0 {
// when the TH resolved addresses, we're not in the NoAddresses case
return false
}
if len(tk.Control.TCPConnect) > 0 {
// when the TH used addresses, we're not in the NoAddresses case
return false
}
logger.Infof("website likely down: all DNS lookups failed for both probe and TH")
tk.NullNullFlags |= analysisFlagNullNullNoAddrs
return true
}
| analysisNullNullDetectTLSMisconfigured | identifier_name |
analysiscore.go | package webconnectivitylte
import (
"fmt"
"net"
"net/url"
"github.com/ooni/probe-engine/pkg/model"
"github.com/ooni/probe-engine/pkg/netxlite"
)
//
// Core analysis
//
// These flags determine the context of TestKeys.Blocking. However, while .Blocking
// is an enumeration, these flags allow to describe multiple blocking methods.
const (
// analysisFlagDNSBlocking indicates there's blocking at the DNS level.
analysisFlagDNSBlocking = 1 << iota
// analysisFlagTCPIPBlocking indicates there's blocking at the TCP/IP level.
analysisFlagTCPIPBlocking
// analysisFlagTLSBlocking indicates there were TLS issues.
analysisFlagTLSBlocking
// analysisFlagHTTPBlocking indicates there was an HTTP failure.
analysisFlagHTTPBlocking
// analysisFlagHTTPDiff indicates there's an HTTP diff.
analysisFlagHTTPDiff
// analysisFlagSuccess indicates we did not detect any blocking.
analysisFlagSuccess
)
// analysisToplevel is the toplevel function that analyses the results
// of the experiment once all network tasks have completed.
//
// The ultimate objective of this function is to set the toplevel flags
// used by the backend to score results. These flags are:
//
// - blocking (and x_blocking_flags) which contain information about
// the detected blocking method (or methods);
//
// - accessible which contains information on whether we think we
// could access the resource somehow.
//
// Originally, Web Connectivity only had a blocking scalar value so
// we could see ourselves in one of the following cases:
//
// +----------+------------+--------------------------+
// | Blocking | Accessible | Meaning |
// +----------+------------+--------------------------+
// | null | null | Probe analysis error |
// +----------+------------+--------------------------+
// | false | true | We detected no blocking |
// +----------+------------+--------------------------+
// | "..." | false | We detected blocking |
// +----------+------------+--------------------------+
//
// While it would be possible in this implementation, which has a granular
// definition of blocking (x_blocking_flags), to set accessible to mean
// whether we could access the resource in some conditions, it seems quite
// dangerous to deviate from the original behavior.
//
// Our code will NEVER set .Blocking or .Accessible outside of this function
// and we'll instead rely on XBlockingFlags. This function's job is to call
// other functions that compute the .XBlockingFlags and then to assign the value
// of .Blocking and .Accessible from the .XBlockingFlags value.
//
// Accordingly, this is how we map the value of the .XBlockingFlags to the
// values of .Blocking and .Accessible:
//
// +--------------------------------------+----------------+-------------+
// | .BlockingFlags | .Blocking | .Accessible |
// +--------------------------------------+----------------+-------------+
// | (& DNSBlocking) != 0 | "dns" | false |
// +--------------------------------------+----------------+-------------+
// | (& TCPIPBlocking) != 0 | "tcp_ip" | false |
// +--------------------------------------+----------------+-------------+
// | (& (TLSBlocking|HTTPBlocking)) != 0 | "http-failure" | false |
// +--------------------------------------+----------------+-------------+
// | (& HTTPDiff) != 0 | "http-diff" | false |
// +--------------------------------------+----------------+-------------+
// | == FlagSuccess | false | true |
// +--------------------------------------+----------------+-------------+
// | otherwise | null | null |
// +--------------------------------------+----------------+-------------+
//
// It's a very simple rule, that should preserve previous semantics.
//
// As an improvement over Web Connectivity v0.4, we also attempt to identify
// special subcases of a null, null result to provide the user with more information.
func (tk *TestKeys) analysisToplevel(logger model.Logger) {
// Since we run after all tasks have completed (or so we assume) we're
// not going to use any form of locking here.
// these functions compute the value of XBlockingFlags
tk.analysisDNSToplevel(logger)
tk.analysisTCPIPToplevel(logger)
tk.analysisTLSToplevel(logger)
tk.analysisHTTPToplevel(logger)
// now, let's determine .Accessible and .Blocking
switch {
case (tk.BlockingFlags & analysisFlagDNSBlocking) != 0:
tk.Blocking = "dns"
tk.Accessible = false
logger.Warnf(
"ANOMALY: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
case (tk.BlockingFlags & analysisFlagTCPIPBlocking) != 0:
tk.Blocking = "tcp_ip"
tk.Accessible = false
logger.Warnf(
"ANOMALY: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
case (tk.BlockingFlags & (analysisFlagTLSBlocking | analysisFlagHTTPBlocking)) != 0:
tk.Blocking = "http-failure"
tk.Accessible = false
logger.Warnf("ANOMALY: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
case (tk.BlockingFlags & analysisFlagHTTPDiff) != 0:
tk.Blocking = "http-diff"
tk.Accessible = false
logger.Warnf(
"ANOMALY: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
case tk.BlockingFlags == analysisFlagSuccess:
tk.Blocking = false
tk.Accessible = true
logger.Infof(
"ACCESSIBLE: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
default:
// NullNull remediation
//
// If we arrive here, the measurement has failed. However, there are a
// bunch of cases where we can still explain what happened by applying specific
// algorithms to detect edge cases.
//
// The relative order of these algorithsm matters: swapping them without
// careful consideration may produce unexpected results.
if tk.analysisNullNullDetectTHDNSNXDOMAIN(logger) {
tk.Blocking = "dns"
tk.Accessible = false
logger.Warnf(
"RESIDUAL_DNS_BLOCKING: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
return
}
if tk.analysisNullNullDetectNoAddrs(logger) {
tk.Blocking = false
tk.Accessible = false
logger.Infof(
"WEBSITE_DOWN_DNS: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
return
}
if tk.analysisNullNullDetectAllConnectsFailed(logger) {
tk.Blocking = false
tk.Accessible = false
logger.Infof(
"WEBSITE_DOWN_TCP: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
return
}
if tk.analysisNullNullDetectTLSMisconfigured(logger) {
tk.Blocking = false
tk.Accessible = false
logger.Infof(
"WEBSITE_DOWN_TLS: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
return
}
if tk.analysisNullNullDetectSuccessfulHTTPS(logger) {
tk.Blocking = false
tk.Accessible = true
logger.Infof(
"ACCESSIBLE_HTTPS: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
return
}
tk.Blocking = nil
tk.Accessible = nil
logger.Warnf(
"UNKNOWN: flags=%d, accessible=%+v, blocking=%+v",
tk.BlockingFlags, tk.Accessible, tk.Blocking,
)
}
}
const (
// analysisFlagNullNullNoAddrs indicates neither the probe nor the TH were
// able to get any IP addresses from any resolver.
analysisFlagNullNullNoAddrs = 1 << iota
// analysisFlagNullNullAllConnectsFailed indicates that all the connect
// attempts failed both in the probe and in the test helper.
analysisFlagNullNullAllConnectsFailed
// analysisFlagNullNullTLSMisconfigured indicates that all the TLS handshake
// attempts failed both in the probe and in the test helper.
analysisFlagNullNullTLSMisconfigured
// analysisFlagNullNullSuccessfulHTTPS indicates that we had no TH data
// but all the HTTP requests used always HTTPS and never failed.
analysisFlagNullNullSuccessfulHTTPS
// analysisFlagNullNullNXDOMAINWithCensorship indicates that we have
// seen no error with local DNS resolutions but, at the same time, the
// control failed with NXDOMAIN. When this happens, we probably have
// DNS interception locally, so all cleartext queries return the same
// bogus answers based on a rule applied on a now-expired domain.
analysisFlagNullNullNXDOMAINWithCensorship
)
// analysisNullNullDetectTHDNSNXDOMAIN runs when .Blocking = nil and
// .Accessible = nil to flag cases in which the probe resolved addresses
// but the TH thinks the address is actually NXDOMAIN. When this
// happens, we're going to give priority to the TH's DoH observation.
//
// See https://github.com/ooni/probe/issues/2308.
func (tk *TestKeys) analysisNullNullDetectTHDNSNXDOMAIN(logger model.Logger) bool {
if tk.Control == nil {
// we need the control info to continue
return false
}
// we need some cleartext successes
var cleartextSuccesses int
for _, query := range tk.Queries {
if query.Engine == "doh" {
// we skip DoH entries because they are encrypted and
// cannot be manipulated by censors
continue
}
if query.Failure != nil {
// we should stop the algorithm in case we've got any
// hard failure, but `dns_no_answer` is acceptable because
// actually it might be there's only A censorship and the
// AAAA query instead returns `dns_no_answer`.
//
// See https://explorer.ooni.org/measurement/20220914T073558Z_webconnectivity_IT_30722_n1_wroXRsBGYx0x9h0q?input=http%3A%2F%2Fitsat.info
// for a case where this was happening and fooled us
// causing us to conclude that the website was just down.
if *query.Failure == netxlite.FailureDNSNoAnswer {
continue
}
return false
}
cleartextSuccesses++
}
if cleartextSuccesses <= 0 {
return false
}
// if the TH failed with its own string representing the NXDOMAIN
// error, then we've detected our corner case
failure := tk.Control.DNS.Failure
if failure != nil && *failure == model.THDNSNameError {
logger.Info("DNS censorship: local DNS success with remote NXDOMAIN")
tk.NullNullFlags |= analysisFlagNullNullNXDOMAINWithCensorship
return true
}
// otherwise it's something else
return false
}
// analysisNullNullDetectSuccessfulHTTPS runs when .Blocking = nil and
// .Accessible = nil to flag successul HTTPS measurements chains that
// occurred regardless of whatever else could have gone wrong.
//
// We need all requests to be HTTPS because an HTTP request in the
// chain breaks the ~reasonable assumption that our custom CA bundle
// is enough to protect against MITM. Of course, when we use this
// algorithm, we're not well positioned to flag server-side blocking.
//
// Version 0.4 of the probe implemented a similar algorithm, which
// however ran before other checks. Version, 0.5 on the contrary, runs
// this algorithm if any other heuristics failed.
//
// See https://github.com/ooni/probe/issues/2307 for more info.
func (tk *TestKeys) analysisNullNullDetectSuccessfulHTTPS(logger model.Logger) bool {
// the chain is sorted from most recent to oldest but it does
// not matter much since we need to walk all of it.
//
// CAVEAT: this code assumes we have a single request chain
// inside the .Requests field, which seems fine because it's
// what Web Connectivity should be doing.
for _, req := range tk.Requests {
URL, err := url.Parse(req.Request.URL)
if err != nil {
// this looks like a bug
return false
}
if URL.Scheme != "https" {
// the whole chain must be HTTPS
return false
}
if req.Failure != nil {
// they must all succeed
return false
}
switch req.Response.Code {
case 200, 301, 302, 307, 308:
default:
// the response must be successful or redirect
return false
}
}
// only if we have at least one request
if len(tk.Requests) > 0 {
logger.Info("website likely accessible: seen successful chain of HTTPS transactions")
tk.NullNullFlags |= analysisFlagNullNullSuccessfulHTTPS
return true
}
// safety net otherwise
return false
}
// analysisNullNullDetectTLSMisconfigured runs when .Blocking = nil and
// .Accessible = nil to check whether by chance we had TLS issues both on the
// probe side and on the TH side. This problem of detecting misconfiguration
// of the server's TLS stack is discussed at https://github.com/ooni/probe/issues/2300.
func (tk *TestKeys) analysisNullNullDetectTLSMisconfigured(logger model.Logger) bool {
if tk.Control == nil || tk.Control.TLSHandshake == nil {
// we need TLS control data to say we are in this case
return false
}
for _, entry := range tk.TLSHandshakes {
if entry.Failure == nil {
// we need all attempts to fail to flag this state
return false
}
thEntry, found := tk.Control.TLSHandshake[entry.Address]
if !found {
// we need to have seen exactly the same attempts
return false
}
if thEntry.Failure == nil {
// we need all TH attempts to fail
return false
}
if *entry.Failure != *thEntry.Failure {
// we need to see the same failure to be sure, which it's
// possible to do for TLS because we have the same definition
// of failure rather than being constrained by the legacy
// implementation of the test helper and Twisted names
//
// TODO(bassosimone): this is the obvious algorithm but maybe
// it's a bit too strict and there is a more lax version of
// the same algorithm that it's still acceptable?
return false
}
}
// only if we have had some TLS handshakes for both probe and TH
if len(tk.TLSHandshakes) > 0 && len(tk.Control.TLSHandshake) > 0 {
logger.Info("website likely down: all TLS handshake attempts failed for both probe and TH")
tk.NullNullFlags |= analysisFlagNullNullTLSMisconfigured
return true
}
// safety net in case we've got wrong input
return false
}
// analysisNullNullDetectAllConnectsFailed attempts to detect whether we are in
// the .Blocking = nil, .Accessible = nil case because all the TCP connect
// attempts by either the probe or the TH have failed.
//
// See https://explorer.ooni.org/measurement/20220911T105037Z_webconnectivity_IT_30722_n1_ruzuQ219SmIO9SrT?input=https://doh.centraleu.pi-dns.com/dns-query?dns=q80BAAABAAAAAAAAA3d3dwdleGFtcGxlA2NvbQAAAQAB
// for an example measurement with this behavior.
//
// See https://github.com/ooni/probe/issues/2299 for the reference issue.
func (tk *TestKeys) analysisNullNullDetectAllConnectsFailed(logger model.Logger) bool {
if tk.Control == nil {
// we need control data to say we're in this case
return false
}
for _, entry := range tk.TCPConnect {
if entry.Status.Failure == nil |
epnt := net.JoinHostPort(entry.IP, fmt.Sprintf("%d", entry.Port))
thEntry, found := tk.Control.TCPConnect[epnt]
if !found {
// we need to have seen exactly the same attempts
return false
}
if thEntry.Failure == nil {
// we need all TH attempts to fail
return false
}
}
// only if we have had some addresses to connect
if len(tk.TCPConnect) > 0 && len(tk.Control.TCPConnect) > 0 {
logger.Info("website likely down: all TCP connect attempts failed for both probe and TH")
tk.NullNullFlags |= analysisFlagNullNullAllConnectsFailed
return true
}
// safety net in case we're passed empty lists/maps
return false
}
// analysisNullNullDetectNoAddrs attempts to see whether we
// ended up into the .Blocking = nil, .Accessible = nil case because
// the domain is expired and all queries returned no addresses.
//
// See https://github.com/ooni/probe/issues/2290 for further
// documentation about the issue we're solving here.
//
// It would be tempting to check specifically for NXDOMAIN here, but we
// know it is problematic do that. In fact, on Android the getaddrinfo
// resolver always returns EAI_NODATA on error, regardless of the actual
// error that may have occurred in the Android DNS backend.
//
// See https://github.com/ooni/probe/issues/2029 for more information
// on Android's getaddrinfo behavior.
func (tk *TestKeys) analysisNullNullDetectNoAddrs(logger model.Logger) bool {
if tk.Control == nil {
// we need control data to say we're in this case
return false
}
for _, query := range tk.Queries {
if len(query.Answers) > 0 {
// when a query has answers, we're not in the NoAddresses case
return false
}
}
if len(tk.TCPConnect) > 0 {
// if we attempted TCP connect, we're not in the NoAddresses case
return false
}
if len(tk.TLSHandshakes) > 0 {
// if we attempted TLS handshakes, we're not in the NoAddresses case
return false
}
if len(tk.Control.DNS.Addrs) > 0 {
// when the TH resolved addresses, we're not in the NoAddresses case
return false
}
if len(tk.Control.TCPConnect) > 0 {
// when the TH used addresses, we're not in the NoAddresses case
return false
}
logger.Infof("website likely down: all DNS lookups failed for both probe and TH")
tk.NullNullFlags |= analysisFlagNullNullNoAddrs
return true
}
| {
// we need all connect attempts to fail
return false
} | conditional_block |
lc0_analyzer.py | #!/usr/bin/env python3
#
# lc0_analyzer.py --help
#
# See https://github.com/killerducky/lc0_analyzer/README.md for description
#
# See example.sh
#
import chess
import chess.pgn
import chess.uci
import chess.svg
import re
import matplotlib.pyplot as plt
import matplotlib.axes
import pandas as pd
import numpy as np
import os
import math
import argparse
from collections import OrderedDict
import svgutils.transform as sg
from svgutils.compose import *
#import cairosvg
class Lc0InfoHandler(chess.uci.InfoHandler):
def __init__(self, board):
super().__init__()
self.reset()
self.board = board
def reset(self):
self.strings = []
self.moves = {}
def post_info(self):
if "string" in self.info:
#self.strings.append(self.info["string"])
# "c7f4 (268 ) N: 40 (+37) (P: 20.23%) (Q: -0.04164) (U: 0.08339) (Q+U: 0.04175) (V: 0.1052)"
(move, info) = self.info["string"].split(maxsplit=1)
move = self.board.san(self.board.parse_uci(move))
self.strings.append("%6s %s" % (move, self.info["string"]))
super().post_info() # Release the lock
def q2cp(q):
return 290.680623072 * math.tan(1.548090806 * q) / 100.0
def cp2q(cp):
return math.atan(cp*100.0/290.680623072)/1.548090806
def set_q2cp_ticks(ax):
ax.set_ylim(-1, 1)
ax2 = ax.twinx()
ax2.set_ylim(-1, 1)
cp_vals = [-128, -8, -4, -2, -1, 0, 1, 2, 4, 8, 128]
q_vals = [cp2q(x) for x in cp_vals]
ax2.set_yticks(q_vals)
ax2.set_yticklabels(cp_vals)
ax2.set_ylabel("CP")
def parse_info(info):
# "INFO: TN: 1 Qf4 c7f4 (268 ) N: 40 (+37) (P: 20.23%) (Q: -0.04164) (U: 0.08339) (Q+U: 0.04175) (V: 0.1052)"
m = re.match("^INFO:", info)
if not m: return None
(_, _, TN, sanmove, ucimove, info) = info.split(maxsplit=5)
floats = re.split(r"[^-.\d]+", info)
(_, _, N, _, P, Q, U, Q_U, V, _) = floats
move_infos = {}
move_infos["TN"] = int(TN)
move_infos["sanmove"] = sanmove
move_infos["ucimove"] = ucimove
move_infos["N"] = int(N)
move_infos["P"] = float(P)
move_infos["Q"] = float(Q)
move_infos["U"] = float(U)
if V == "-.----": V = 0
move_infos["V"] = float(V)
return move_infos
def getgame(pgn_filename, gamenum):
with open(pgn_filename) as pgn:
# find the game (probably a better way to do this?)
game = None
while True:
game = chess.pgn.read_game(pgn)
if not game:
break
if not gamenum or game.headers["Round"] == gamenum:
break
if not game:
raise("Game not found")
return game
# plynum = 0 = after White's move 1
# plynum = 1 = after Black's move 1
# plynum = 2(M-1)+0 = after White's move M
# plynum = 2(M-1)+1 = after Black's move M
def get_board(pgn_filename, gamenum, plynum):
game = getgame(pgn_filename, gamenum)
info = ""
info += game.headers["White"] + "\n"
info += game.headers["Black"] + "\n"
nodes = list(game.mainline())
# There must be a better way to get the list of moves up to plynum P?
ucistr = ""
sanstr = ""
if plynum >= 0:
for node in nodes[0:plynum+1]:
ucistr += " " + str(node.move)
sanstr += " " + str(node.san())
info += "position startpos moves" + ucistr + "\n"
info += sanstr + "\n" # TODO: Add move numbers. Surely python-chess can do this for me?
# Something like this will work...
#game = getgame(pgn_filename, gamenum)
#end = game.end()
#board = end.board()
#print(game.board().variation_san(board.move_stack))
node = nodes[plynum]
board = node.board()
fig = chess.svg.board(board=board, lastmove=node.move)
else:
info += "position startpos\n"
board = chess.Board()
fig = chess.svg.board(board=board)
return (board, fig, info)
# TODO:
# gamenum/plynum vs fen is a mess right now...
def analyze_and_plot(engine, info_handler, pgn_filename, gamenum, plynum, fen=None):
analyze(engine, info_handler, pgn_filename, gamenum, plynum, fen)
plot(pgn_filename, gamenum, plynum)
def analyze(engine, info_handler, pgn_filename, gamenum, plynum, fen=None):
savedir = "plots/%s_%s_%05.1f" % (pgn_filename, gamenum, (plynum+3)/2.0)
datafilename = "%s/data.html" % (savedir)
if os.path.exists(datafilename):
return
if not os.path.exists(savedir):
os.makedirs(savedir)
outfile = open(datafilename, "w")
outfile.write("""
<img src="board.svg" height="100%"/> <br>
<img src="Q.svg"/> <br>
<img src="Q2.svg"/> <br>
<img src="N.svg"/> <br>
<img src="P.svg"/> <br>
<pre>""")
if fen:
board = chess.Board(fen)
fig = chess.svg.board(board=board)
info = "position %s\n" % (fen)
else:
(board, fig, info) = get_board(pgn_filename, gamenum, plynum)
outfile.write(info)
outfile.write(board.fen() + "\n")
outfile.write(str(board) + "\n")
open("%s/board.svg" % (savedir), "w").write(fig)
outfile.write(str(LC0) + "\n")
engine.uci()
outfile.write(engine.name + "\n")
# Reset engine search tree, but not engine NNCache, by setting different position
engine.position(chess.Board())
info_handler.reset()
info_handler.board = chess.Board()
engine.go(nodes=1)
for nodes in NODES:
# Do our position now
info_handler.reset()
info_handler.board = board
engine.position(board)
engine.go(nodes=nodes)
for s in info_handler.strings:
outfile.write("INFO: TN: %s %s\n" % (nodes, s))
outfile.write("</pre>\n")
outfile.close()
def plot(pgn_filename, gamenum, plynum):
savedir = "plots/%s_%s_%05.1f" % (pgn_filename, gamenum, (plynum+3)/2.0)
# Parse data into pandas
move_infos = []
with open("%s/data.html" % savedir) as infile:
for line in infile.readlines():
info = parse_info(line)
if not info: continue
move_infos.append(info)
df = pd.DataFrame(move_infos)
# Filter top 4 moves, and get P
TNmax = df["TN"].max()
best = df[df["TN"] == TNmax].sort_values("N", ascending=False).head(NUM_MOVES)
moves = list(best["sanmove"])
bestdf = df.loc[df["sanmove"].isin(moves)]
# Plots
fig, ax = plt.subplots()
for move in moves:
tmp = bestdf[bestdf["sanmove"] == move]
tmp.plot.line(x="TN", y="N", logx=True, logy=True, ax=ax)
ax.legend(moves)
plt.title("Child Node Visits vs Total Nodes")
plt.xlabel("Total Nodes")
plt.ylabel("Child Nodes")
plt.savefig("%s/N.svg" % (savedir))
fig, ax = plt.subplots()
for move in moves:
tmp = bestdf[bestdf["sanmove"] == move]
tmp.plot.line(x="TN", y="Q", logx=True, logy=False, ax=ax)
ax.legend(moves)
plt.title("Value vs Total Nodes")
plt.xlabel("Total Nodes")
plt.ylabel("Value")
set_q2cp_ticks(ax)
plt.savefig("%s/Q.svg" % (savedir))
# This plot can have multiple entries with the same index="N", so pivot fails.
fig, ax = plt.subplots()
for move in moves:
tmp = bestdf[bestdf["sanmove"] == move]
tmp.plot.line(x="N", y="Q", logx=True, logy=False, ax=ax)
ax.legend(moves)
plt.title("Value vs Child Node Visits")
plt.xlabel("Child Node Visits")
plt.ylabel("Value")
set_q2cp_ticks(ax)
plt.savefig("%s/Q2.svg" % (savedir))
best.plot.bar(x="sanmove", y="P", legend=False)
plt.xlabel("")
plt.title("Policy")
plt.savefig("%s/P.svg" % (savedir))
plt.close("all")
def analyze_game(pgn_filename, gamenum, plynum, plies):
try:
# In case you have the data files already, but no lc0 exe.
engine = chess.uci.popen_engine(LC0)
info_handler = Lc0InfoHandler(None)
engine.info_handlers.append(info_handler)
except:
print("Warning: Could not open Lc0 engine.")
engine = None
info_handler = None
if not os.path.exists("plots"):
os.makedirs("plots")
outfile = open("plots/%s_%s_%0.3f_%s.html" % (pgn_filename, gamenum, (plynum+3)/2, plies), "w")
outfile.write('<table width="%d" height="500">\n' % (plies*300))
outfile.write("<tr>\n")
for svgfile in ("board", "Q", "Q2", "N", "P"):
for p in range(plies):
savedir = "%s_%s_%05.1f" % (pgn_filename, gamenum, (plynum+p+3)/2.0)
outfile.write('<td> <img src="%s/%s.svg" width="100%%"/> </td>\n' % (savedir, svgfile))
outfile.write("</tr>\n")
outfile.write("</tr>\n")
outfile.write("</table>\n")
outfile.close()
for p in range(plies):
analyze_and_plot(engine, info_handler, pgn_filename, gamenum, plynum+p)
if engine: engine.quit()
def analyze_fen(name, fen):
engine = chess.uci.popen_engine(LC0)
info_handler = Lc0InfoHandler(None)
engine.info_handlers.append(info_handler)
analyze_and_plot(engine, info_handler, name, 0, 0, fen)
engine.quit()
def compose(pgn_filename, gamenum, move_start, numplies, xsize=470, ysize=350, scale=0.6, scaleb=0.85):
outfile = open("plots/%s_%s_%05.1f_%03d.html" % (pgn_filename, gamenum, move_start, numplies), "w")
outfile.write('<table width="%d" height="500">\n' % (numplies*300))
outfile.write("<tr>\n")
for svgfile in ("board", "Q", "Q2", "N", "P"):
for move in np.arange(move_start, move_start+numplies/2, 0.5):
savedir = "%s_%s_%05.1f" % (pgn_filename, gamenum, move)
outfile.write('<td> <img src="%s/%s.svg" width="100%%"/> </td>\n' % (savedir, svgfile))
outfile.write("</tr>\n")
outfile.write("</tr>\n")
outfile.write("</table>\n")
outfile.close()
for move in np.arange(move_start, move_start+numplies/2, 0.5):
savedir = "plots/%s_%s_%05.1f" % (pgn_filename, gamenum, move)
fig = Figure(xsize*scale, ysize*5*scale,
Panel(SVG("%s/board.svg" % (savedir)).scale(scale*scaleb)),
Panel(SVG("%s/Q.svg" % (savedir)).scale(scale)),
Panel(SVG("%s/Q2.svg" % (savedir)).scale(scale)),
Panel(SVG("%s/N.svg" % (savedir)).scale(scale)),
Panel(SVG("%s/P.svg" % (savedir)).scale(scale)),
)
fig.tile(1,5)
fig.save("%s/all.svg" % (savedir))
panels = []
for move in np.arange(move_start, move_start+numplies/2, 0.5):
panels.append(Panel(SVG("plots/%s_%s_%05.1f/all.svg" % (pgn_filename, gamenum, move))))
fig = Figure(xsize*(numplies)*scale, ysize*5*scale, *panels)
fig.tile(numplies, 1)
filename = "plots/%s_%s_%05.1f_all" % (pgn_filename, gamenum, move_start)
fig.save("%s.svg" % (filename))
# cariosvg doesn't parse units "px"
#cairosvg.svg2png(url="%s.svg" % (filename), write_to="%s.png" % (filename))
if __name__ == "__main__":
usage_str = """
lc0_analyzer --pgn pgnfile --move 4.0 --numplies 6
lc0_analyzer --fen fenstring --numplies 6"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=usage_str)
parser.add_argument("--pgn", type=str, help="pgn file to process")
parser.add_argument("--round", type=str, help="round of pgn file, omit to pick first game")
parser.add_argument("--move", type=float, help="""
4.0 = Before white's 4th move (analyze position after black's 3rd move)
4.5 = Before blacks's 4th move (analyze position after white's 4th move)
""")
parser.add_argument("--numplies", type=int, help="number of plies to analyze")
parser.add_argument("--fen", type=str, help="number of plies to analyze")
parser.add_argument("--fen_desc", type=str, help="description of fen position")
parser.add_argument("--lc0", type=str, required=True, help="lc0 executable")
parser.add_argument("--w", type=str, required=True, help="path to weights")
parser.add_argument("--nodes", type=int, default=2**16, help="number of nodes to analyze for each position, will be rounded to nearest power of 2")
parser.add_argument("--topn", type=int, default=4, help="plot top N moves")
parser.add_argument("--ply_per_page", type=int, default=6, help="how many plies to put together in one .svg page")
args = parser.parse_args()
LC0 = [
args.lc0,
"-w", args.w,
"-l", "lc0log.txt",
#"-t", "1",
#"--max-prefetch=0",
#"--no-out-of-order-eval", # Was trying to be more accurate, but see issue #680
#"--collision-visit-limit=1",
#"--minibatch-size=1",
"--minibatch-size=16", # because of #680, use this compromise between accuracy and speed
"--smart-pruning-factor=0", # We will start and stop in loops, so disable pruning
"--nncache=1000000",
"--verbose-move-stats",
]
NODES = [ 2**n for n in range(round(math.log(args.nodes, 2))+1)]
NUM_MOVES = args.topn
if args.pgn:
game = getgame(args.pgn, args.round)
gamelen = len(game.end().board().move_stack)
plynum = round(args.move*2-3)
if plynum + args.numplies > gamelen:
|
analyze_game(args.pgn, args.round, round(args.move*2-3), args.numplies)
for m in np.arange(args.move, args.move+args.numplies/2, 0.5*args.ply_per_page):
compose(args.pgn, args.round, m, min(args.ply_per_page, min(args.ply_per_page, args.numplies-(m-args.move)*2)))
elif args.fen:
analyze_fen(args.fen_desc, args.fen)
#compose("plots/%s_%s" % (args.fen_desc, args.round), args.numplies)
else: raise(Exception("must provide --pgn or --fen"))
| args.numplies = gamelen-plynum | conditional_block |
lc0_analyzer.py | #!/usr/bin/env python3
| #
# See https://github.com/killerducky/lc0_analyzer/README.md for description
#
# See example.sh
#
import chess
import chess.pgn
import chess.uci
import chess.svg
import re
import matplotlib.pyplot as plt
import matplotlib.axes
import pandas as pd
import numpy as np
import os
import math
import argparse
from collections import OrderedDict
import svgutils.transform as sg
from svgutils.compose import *
#import cairosvg
class Lc0InfoHandler(chess.uci.InfoHandler):
def __init__(self, board):
super().__init__()
self.reset()
self.board = board
def reset(self):
self.strings = []
self.moves = {}
def post_info(self):
if "string" in self.info:
#self.strings.append(self.info["string"])
# "c7f4 (268 ) N: 40 (+37) (P: 20.23%) (Q: -0.04164) (U: 0.08339) (Q+U: 0.04175) (V: 0.1052)"
(move, info) = self.info["string"].split(maxsplit=1)
move = self.board.san(self.board.parse_uci(move))
self.strings.append("%6s %s" % (move, self.info["string"]))
super().post_info() # Release the lock
def q2cp(q):
return 290.680623072 * math.tan(1.548090806 * q) / 100.0
def cp2q(cp):
return math.atan(cp*100.0/290.680623072)/1.548090806
def set_q2cp_ticks(ax):
ax.set_ylim(-1, 1)
ax2 = ax.twinx()
ax2.set_ylim(-1, 1)
cp_vals = [-128, -8, -4, -2, -1, 0, 1, 2, 4, 8, 128]
q_vals = [cp2q(x) for x in cp_vals]
ax2.set_yticks(q_vals)
ax2.set_yticklabels(cp_vals)
ax2.set_ylabel("CP")
def parse_info(info):
# "INFO: TN: 1 Qf4 c7f4 (268 ) N: 40 (+37) (P: 20.23%) (Q: -0.04164) (U: 0.08339) (Q+U: 0.04175) (V: 0.1052)"
m = re.match("^INFO:", info)
if not m: return None
(_, _, TN, sanmove, ucimove, info) = info.split(maxsplit=5)
floats = re.split(r"[^-.\d]+", info)
(_, _, N, _, P, Q, U, Q_U, V, _) = floats
move_infos = {}
move_infos["TN"] = int(TN)
move_infos["sanmove"] = sanmove
move_infos["ucimove"] = ucimove
move_infos["N"] = int(N)
move_infos["P"] = float(P)
move_infos["Q"] = float(Q)
move_infos["U"] = float(U)
if V == "-.----": V = 0
move_infos["V"] = float(V)
return move_infos
def getgame(pgn_filename, gamenum):
with open(pgn_filename) as pgn:
# find the game (probably a better way to do this?)
game = None
while True:
game = chess.pgn.read_game(pgn)
if not game:
break
if not gamenum or game.headers["Round"] == gamenum:
break
if not game:
raise("Game not found")
return game
# plynum = 0 = after White's move 1
# plynum = 1 = after Black's move 1
# plynum = 2(M-1)+0 = after White's move M
# plynum = 2(M-1)+1 = after Black's move M
def get_board(pgn_filename, gamenum, plynum):
game = getgame(pgn_filename, gamenum)
info = ""
info += game.headers["White"] + "\n"
info += game.headers["Black"] + "\n"
nodes = list(game.mainline())
# There must be a better way to get the list of moves up to plynum P?
ucistr = ""
sanstr = ""
if plynum >= 0:
for node in nodes[0:plynum+1]:
ucistr += " " + str(node.move)
sanstr += " " + str(node.san())
info += "position startpos moves" + ucistr + "\n"
info += sanstr + "\n" # TODO: Add move numbers. Surely python-chess can do this for me?
# Something like this will work...
#game = getgame(pgn_filename, gamenum)
#end = game.end()
#board = end.board()
#print(game.board().variation_san(board.move_stack))
node = nodes[plynum]
board = node.board()
fig = chess.svg.board(board=board, lastmove=node.move)
else:
info += "position startpos\n"
board = chess.Board()
fig = chess.svg.board(board=board)
return (board, fig, info)
# TODO:
# gamenum/plynum vs fen is a mess right now...
def analyze_and_plot(engine, info_handler, pgn_filename, gamenum, plynum, fen=None):
analyze(engine, info_handler, pgn_filename, gamenum, plynum, fen)
plot(pgn_filename, gamenum, plynum)
def analyze(engine, info_handler, pgn_filename, gamenum, plynum, fen=None):
savedir = "plots/%s_%s_%05.1f" % (pgn_filename, gamenum, (plynum+3)/2.0)
datafilename = "%s/data.html" % (savedir)
if os.path.exists(datafilename):
return
if not os.path.exists(savedir):
os.makedirs(savedir)
outfile = open(datafilename, "w")
outfile.write("""
<img src="board.svg" height="100%"/> <br>
<img src="Q.svg"/> <br>
<img src="Q2.svg"/> <br>
<img src="N.svg"/> <br>
<img src="P.svg"/> <br>
<pre>""")
if fen:
board = chess.Board(fen)
fig = chess.svg.board(board=board)
info = "position %s\n" % (fen)
else:
(board, fig, info) = get_board(pgn_filename, gamenum, plynum)
outfile.write(info)
outfile.write(board.fen() + "\n")
outfile.write(str(board) + "\n")
open("%s/board.svg" % (savedir), "w").write(fig)
outfile.write(str(LC0) + "\n")
engine.uci()
outfile.write(engine.name + "\n")
# Reset engine search tree, but not engine NNCache, by setting different position
engine.position(chess.Board())
info_handler.reset()
info_handler.board = chess.Board()
engine.go(nodes=1)
for nodes in NODES:
# Do our position now
info_handler.reset()
info_handler.board = board
engine.position(board)
engine.go(nodes=nodes)
for s in info_handler.strings:
outfile.write("INFO: TN: %s %s\n" % (nodes, s))
outfile.write("</pre>\n")
outfile.close()
def plot(pgn_filename, gamenum, plynum):
savedir = "plots/%s_%s_%05.1f" % (pgn_filename, gamenum, (plynum+3)/2.0)
# Parse data into pandas
move_infos = []
with open("%s/data.html" % savedir) as infile:
for line in infile.readlines():
info = parse_info(line)
if not info: continue
move_infos.append(info)
df = pd.DataFrame(move_infos)
# Filter top 4 moves, and get P
TNmax = df["TN"].max()
best = df[df["TN"] == TNmax].sort_values("N", ascending=False).head(NUM_MOVES)
moves = list(best["sanmove"])
bestdf = df.loc[df["sanmove"].isin(moves)]
# Plots
fig, ax = plt.subplots()
for move in moves:
tmp = bestdf[bestdf["sanmove"] == move]
tmp.plot.line(x="TN", y="N", logx=True, logy=True, ax=ax)
ax.legend(moves)
plt.title("Child Node Visits vs Total Nodes")
plt.xlabel("Total Nodes")
plt.ylabel("Child Nodes")
plt.savefig("%s/N.svg" % (savedir))
fig, ax = plt.subplots()
for move in moves:
tmp = bestdf[bestdf["sanmove"] == move]
tmp.plot.line(x="TN", y="Q", logx=True, logy=False, ax=ax)
ax.legend(moves)
plt.title("Value vs Total Nodes")
plt.xlabel("Total Nodes")
plt.ylabel("Value")
set_q2cp_ticks(ax)
plt.savefig("%s/Q.svg" % (savedir))
# This plot can have multiple entries with the same index="N", so pivot fails.
fig, ax = plt.subplots()
for move in moves:
tmp = bestdf[bestdf["sanmove"] == move]
tmp.plot.line(x="N", y="Q", logx=True, logy=False, ax=ax)
ax.legend(moves)
plt.title("Value vs Child Node Visits")
plt.xlabel("Child Node Visits")
plt.ylabel("Value")
set_q2cp_ticks(ax)
plt.savefig("%s/Q2.svg" % (savedir))
best.plot.bar(x="sanmove", y="P", legend=False)
plt.xlabel("")
plt.title("Policy")
plt.savefig("%s/P.svg" % (savedir))
plt.close("all")
def analyze_game(pgn_filename, gamenum, plynum, plies):
try:
# In case you have the data files already, but no lc0 exe.
engine = chess.uci.popen_engine(LC0)
info_handler = Lc0InfoHandler(None)
engine.info_handlers.append(info_handler)
except:
print("Warning: Could not open Lc0 engine.")
engine = None
info_handler = None
if not os.path.exists("plots"):
os.makedirs("plots")
outfile = open("plots/%s_%s_%0.3f_%s.html" % (pgn_filename, gamenum, (plynum+3)/2, plies), "w")
outfile.write('<table width="%d" height="500">\n' % (plies*300))
outfile.write("<tr>\n")
for svgfile in ("board", "Q", "Q2", "N", "P"):
for p in range(plies):
savedir = "%s_%s_%05.1f" % (pgn_filename, gamenum, (plynum+p+3)/2.0)
outfile.write('<td> <img src="%s/%s.svg" width="100%%"/> </td>\n' % (savedir, svgfile))
outfile.write("</tr>\n")
outfile.write("</tr>\n")
outfile.write("</table>\n")
outfile.close()
for p in range(plies):
analyze_and_plot(engine, info_handler, pgn_filename, gamenum, plynum+p)
if engine: engine.quit()
def analyze_fen(name, fen):
engine = chess.uci.popen_engine(LC0)
info_handler = Lc0InfoHandler(None)
engine.info_handlers.append(info_handler)
analyze_and_plot(engine, info_handler, name, 0, 0, fen)
engine.quit()
def compose(pgn_filename, gamenum, move_start, numplies, xsize=470, ysize=350, scale=0.6, scaleb=0.85):
outfile = open("plots/%s_%s_%05.1f_%03d.html" % (pgn_filename, gamenum, move_start, numplies), "w")
outfile.write('<table width="%d" height="500">\n' % (numplies*300))
outfile.write("<tr>\n")
for svgfile in ("board", "Q", "Q2", "N", "P"):
for move in np.arange(move_start, move_start+numplies/2, 0.5):
savedir = "%s_%s_%05.1f" % (pgn_filename, gamenum, move)
outfile.write('<td> <img src="%s/%s.svg" width="100%%"/> </td>\n' % (savedir, svgfile))
outfile.write("</tr>\n")
outfile.write("</tr>\n")
outfile.write("</table>\n")
outfile.close()
for move in np.arange(move_start, move_start+numplies/2, 0.5):
savedir = "plots/%s_%s_%05.1f" % (pgn_filename, gamenum, move)
fig = Figure(xsize*scale, ysize*5*scale,
Panel(SVG("%s/board.svg" % (savedir)).scale(scale*scaleb)),
Panel(SVG("%s/Q.svg" % (savedir)).scale(scale)),
Panel(SVG("%s/Q2.svg" % (savedir)).scale(scale)),
Panel(SVG("%s/N.svg" % (savedir)).scale(scale)),
Panel(SVG("%s/P.svg" % (savedir)).scale(scale)),
)
fig.tile(1,5)
fig.save("%s/all.svg" % (savedir))
panels = []
for move in np.arange(move_start, move_start+numplies/2, 0.5):
panels.append(Panel(SVG("plots/%s_%s_%05.1f/all.svg" % (pgn_filename, gamenum, move))))
fig = Figure(xsize*(numplies)*scale, ysize*5*scale, *panels)
fig.tile(numplies, 1)
filename = "plots/%s_%s_%05.1f_all" % (pgn_filename, gamenum, move_start)
fig.save("%s.svg" % (filename))
# cariosvg doesn't parse units "px"
#cairosvg.svg2png(url="%s.svg" % (filename), write_to="%s.png" % (filename))
if __name__ == "__main__":
usage_str = """
lc0_analyzer --pgn pgnfile --move 4.0 --numplies 6
lc0_analyzer --fen fenstring --numplies 6"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=usage_str)
parser.add_argument("--pgn", type=str, help="pgn file to process")
parser.add_argument("--round", type=str, help="round of pgn file, omit to pick first game")
parser.add_argument("--move", type=float, help="""
4.0 = Before white's 4th move (analyze position after black's 3rd move)
4.5 = Before blacks's 4th move (analyze position after white's 4th move)
""")
parser.add_argument("--numplies", type=int, help="number of plies to analyze")
parser.add_argument("--fen", type=str, help="number of plies to analyze")
parser.add_argument("--fen_desc", type=str, help="description of fen position")
parser.add_argument("--lc0", type=str, required=True, help="lc0 executable")
parser.add_argument("--w", type=str, required=True, help="path to weights")
parser.add_argument("--nodes", type=int, default=2**16, help="number of nodes to analyze for each position, will be rounded to nearest power of 2")
parser.add_argument("--topn", type=int, default=4, help="plot top N moves")
parser.add_argument("--ply_per_page", type=int, default=6, help="how many plies to put together in one .svg page")
args = parser.parse_args()
LC0 = [
args.lc0,
"-w", args.w,
"-l", "lc0log.txt",
#"-t", "1",
#"--max-prefetch=0",
#"--no-out-of-order-eval", # Was trying to be more accurate, but see issue #680
#"--collision-visit-limit=1",
#"--minibatch-size=1",
"--minibatch-size=16", # because of #680, use this compromise between accuracy and speed
"--smart-pruning-factor=0", # We will start and stop in loops, so disable pruning
"--nncache=1000000",
"--verbose-move-stats",
]
NODES = [ 2**n for n in range(round(math.log(args.nodes, 2))+1)]
NUM_MOVES = args.topn
if args.pgn:
game = getgame(args.pgn, args.round)
gamelen = len(game.end().board().move_stack)
plynum = round(args.move*2-3)
if plynum + args.numplies > gamelen:
args.numplies = gamelen-plynum
analyze_game(args.pgn, args.round, round(args.move*2-3), args.numplies)
for m in np.arange(args.move, args.move+args.numplies/2, 0.5*args.ply_per_page):
compose(args.pgn, args.round, m, min(args.ply_per_page, min(args.ply_per_page, args.numplies-(m-args.move)*2)))
elif args.fen:
analyze_fen(args.fen_desc, args.fen)
#compose("plots/%s_%s" % (args.fen_desc, args.round), args.numplies)
else: raise(Exception("must provide --pgn or --fen")) | #
# lc0_analyzer.py --help | random_line_split |
lc0_analyzer.py | #!/usr/bin/env python3
#
# lc0_analyzer.py --help
#
# See https://github.com/killerducky/lc0_analyzer/README.md for description
#
# See example.sh
#
import chess
import chess.pgn
import chess.uci
import chess.svg
import re
import matplotlib.pyplot as plt
import matplotlib.axes
import pandas as pd
import numpy as np
import os
import math
import argparse
from collections import OrderedDict
import svgutils.transform as sg
from svgutils.compose import *
#import cairosvg
class Lc0InfoHandler(chess.uci.InfoHandler):
def __init__(self, board):
super().__init__()
self.reset()
self.board = board
def reset(self):
self.strings = []
self.moves = {}
def post_info(self):
if "string" in self.info:
#self.strings.append(self.info["string"])
# "c7f4 (268 ) N: 40 (+37) (P: 20.23%) (Q: -0.04164) (U: 0.08339) (Q+U: 0.04175) (V: 0.1052)"
(move, info) = self.info["string"].split(maxsplit=1)
move = self.board.san(self.board.parse_uci(move))
self.strings.append("%6s %s" % (move, self.info["string"]))
super().post_info() # Release the lock
def q2cp(q):
return 290.680623072 * math.tan(1.548090806 * q) / 100.0
def cp2q(cp):
return math.atan(cp*100.0/290.680623072)/1.548090806
def set_q2cp_ticks(ax):
ax.set_ylim(-1, 1)
ax2 = ax.twinx()
ax2.set_ylim(-1, 1)
cp_vals = [-128, -8, -4, -2, -1, 0, 1, 2, 4, 8, 128]
q_vals = [cp2q(x) for x in cp_vals]
ax2.set_yticks(q_vals)
ax2.set_yticklabels(cp_vals)
ax2.set_ylabel("CP")
def parse_info(info):
# "INFO: TN: 1 Qf4 c7f4 (268 ) N: 40 (+37) (P: 20.23%) (Q: -0.04164) (U: 0.08339) (Q+U: 0.04175) (V: 0.1052)"
m = re.match("^INFO:", info)
if not m: return None
(_, _, TN, sanmove, ucimove, info) = info.split(maxsplit=5)
floats = re.split(r"[^-.\d]+", info)
(_, _, N, _, P, Q, U, Q_U, V, _) = floats
move_infos = {}
move_infos["TN"] = int(TN)
move_infos["sanmove"] = sanmove
move_infos["ucimove"] = ucimove
move_infos["N"] = int(N)
move_infos["P"] = float(P)
move_infos["Q"] = float(Q)
move_infos["U"] = float(U)
if V == "-.----": V = 0
move_infos["V"] = float(V)
return move_infos
def getgame(pgn_filename, gamenum):
with open(pgn_filename) as pgn:
# find the game (probably a better way to do this?)
game = None
while True:
game = chess.pgn.read_game(pgn)
if not game:
break
if not gamenum or game.headers["Round"] == gamenum:
break
if not game:
raise("Game not found")
return game
# plynum = 0 = after White's move 1
# plynum = 1 = after Black's move 1
# plynum = 2(M-1)+0 = after White's move M
# plynum = 2(M-1)+1 = after Black's move M
def get_board(pgn_filename, gamenum, plynum):
game = getgame(pgn_filename, gamenum)
info = ""
info += game.headers["White"] + "\n"
info += game.headers["Black"] + "\n"
nodes = list(game.mainline())
# There must be a better way to get the list of moves up to plynum P?
ucistr = ""
sanstr = ""
if plynum >= 0:
for node in nodes[0:plynum+1]:
ucistr += " " + str(node.move)
sanstr += " " + str(node.san())
info += "position startpos moves" + ucistr + "\n"
info += sanstr + "\n" # TODO: Add move numbers. Surely python-chess can do this for me?
# Something like this will work...
#game = getgame(pgn_filename, gamenum)
#end = game.end()
#board = end.board()
#print(game.board().variation_san(board.move_stack))
node = nodes[plynum]
board = node.board()
fig = chess.svg.board(board=board, lastmove=node.move)
else:
info += "position startpos\n"
board = chess.Board()
fig = chess.svg.board(board=board)
return (board, fig, info)
# TODO:
# gamenum/plynum vs fen is a mess right now...
def analyze_and_plot(engine, info_handler, pgn_filename, gamenum, plynum, fen=None):
analyze(engine, info_handler, pgn_filename, gamenum, plynum, fen)
plot(pgn_filename, gamenum, plynum)
def analyze(engine, info_handler, pgn_filename, gamenum, plynum, fen=None):
savedir = "plots/%s_%s_%05.1f" % (pgn_filename, gamenum, (plynum+3)/2.0)
datafilename = "%s/data.html" % (savedir)
if os.path.exists(datafilename):
return
if not os.path.exists(savedir):
os.makedirs(savedir)
outfile = open(datafilename, "w")
outfile.write("""
<img src="board.svg" height="100%"/> <br>
<img src="Q.svg"/> <br>
<img src="Q2.svg"/> <br>
<img src="N.svg"/> <br>
<img src="P.svg"/> <br>
<pre>""")
if fen:
board = chess.Board(fen)
fig = chess.svg.board(board=board)
info = "position %s\n" % (fen)
else:
(board, fig, info) = get_board(pgn_filename, gamenum, plynum)
outfile.write(info)
outfile.write(board.fen() + "\n")
outfile.write(str(board) + "\n")
open("%s/board.svg" % (savedir), "w").write(fig)
outfile.write(str(LC0) + "\n")
engine.uci()
outfile.write(engine.name + "\n")
# Reset engine search tree, but not engine NNCache, by setting different position
engine.position(chess.Board())
info_handler.reset()
info_handler.board = chess.Board()
engine.go(nodes=1)
for nodes in NODES:
# Do our position now
info_handler.reset()
info_handler.board = board
engine.position(board)
engine.go(nodes=nodes)
for s in info_handler.strings:
outfile.write("INFO: TN: %s %s\n" % (nodes, s))
outfile.write("</pre>\n")
outfile.close()
def plot(pgn_filename, gamenum, plynum):
|
def analyze_game(pgn_filename, gamenum, plynum, plies):
try:
# In case you have the data files already, but no lc0 exe.
engine = chess.uci.popen_engine(LC0)
info_handler = Lc0InfoHandler(None)
engine.info_handlers.append(info_handler)
except:
print("Warning: Could not open Lc0 engine.")
engine = None
info_handler = None
if not os.path.exists("plots"):
os.makedirs("plots")
outfile = open("plots/%s_%s_%0.3f_%s.html" % (pgn_filename, gamenum, (plynum+3)/2, plies), "w")
outfile.write('<table width="%d" height="500">\n' % (plies*300))
outfile.write("<tr>\n")
for svgfile in ("board", "Q", "Q2", "N", "P"):
for p in range(plies):
savedir = "%s_%s_%05.1f" % (pgn_filename, gamenum, (plynum+p+3)/2.0)
outfile.write('<td> <img src="%s/%s.svg" width="100%%"/> </td>\n' % (savedir, svgfile))
outfile.write("</tr>\n")
outfile.write("</tr>\n")
outfile.write("</table>\n")
outfile.close()
for p in range(plies):
analyze_and_plot(engine, info_handler, pgn_filename, gamenum, plynum+p)
if engine: engine.quit()
def analyze_fen(name, fen):
engine = chess.uci.popen_engine(LC0)
info_handler = Lc0InfoHandler(None)
engine.info_handlers.append(info_handler)
analyze_and_plot(engine, info_handler, name, 0, 0, fen)
engine.quit()
def compose(pgn_filename, gamenum, move_start, numplies, xsize=470, ysize=350, scale=0.6, scaleb=0.85):
outfile = open("plots/%s_%s_%05.1f_%03d.html" % (pgn_filename, gamenum, move_start, numplies), "w")
outfile.write('<table width="%d" height="500">\n' % (numplies*300))
outfile.write("<tr>\n")
for svgfile in ("board", "Q", "Q2", "N", "P"):
for move in np.arange(move_start, move_start+numplies/2, 0.5):
savedir = "%s_%s_%05.1f" % (pgn_filename, gamenum, move)
outfile.write('<td> <img src="%s/%s.svg" width="100%%"/> </td>\n' % (savedir, svgfile))
outfile.write("</tr>\n")
outfile.write("</tr>\n")
outfile.write("</table>\n")
outfile.close()
for move in np.arange(move_start, move_start+numplies/2, 0.5):
savedir = "plots/%s_%s_%05.1f" % (pgn_filename, gamenum, move)
fig = Figure(xsize*scale, ysize*5*scale,
Panel(SVG("%s/board.svg" % (savedir)).scale(scale*scaleb)),
Panel(SVG("%s/Q.svg" % (savedir)).scale(scale)),
Panel(SVG("%s/Q2.svg" % (savedir)).scale(scale)),
Panel(SVG("%s/N.svg" % (savedir)).scale(scale)),
Panel(SVG("%s/P.svg" % (savedir)).scale(scale)),
)
fig.tile(1,5)
fig.save("%s/all.svg" % (savedir))
panels = []
for move in np.arange(move_start, move_start+numplies/2, 0.5):
panels.append(Panel(SVG("plots/%s_%s_%05.1f/all.svg" % (pgn_filename, gamenum, move))))
fig = Figure(xsize*(numplies)*scale, ysize*5*scale, *panels)
fig.tile(numplies, 1)
filename = "plots/%s_%s_%05.1f_all" % (pgn_filename, gamenum, move_start)
fig.save("%s.svg" % (filename))
# cariosvg doesn't parse units "px"
#cairosvg.svg2png(url="%s.svg" % (filename), write_to="%s.png" % (filename))
if __name__ == "__main__":
usage_str = """
lc0_analyzer --pgn pgnfile --move 4.0 --numplies 6
lc0_analyzer --fen fenstring --numplies 6"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=usage_str)
parser.add_argument("--pgn", type=str, help="pgn file to process")
parser.add_argument("--round", type=str, help="round of pgn file, omit to pick first game")
parser.add_argument("--move", type=float, help="""
4.0 = Before white's 4th move (analyze position after black's 3rd move)
4.5 = Before blacks's 4th move (analyze position after white's 4th move)
""")
parser.add_argument("--numplies", type=int, help="number of plies to analyze")
parser.add_argument("--fen", type=str, help="number of plies to analyze")
parser.add_argument("--fen_desc", type=str, help="description of fen position")
parser.add_argument("--lc0", type=str, required=True, help="lc0 executable")
parser.add_argument("--w", type=str, required=True, help="path to weights")
parser.add_argument("--nodes", type=int, default=2**16, help="number of nodes to analyze for each position, will be rounded to nearest power of 2")
parser.add_argument("--topn", type=int, default=4, help="plot top N moves")
parser.add_argument("--ply_per_page", type=int, default=6, help="how many plies to put together in one .svg page")
args = parser.parse_args()
LC0 = [
args.lc0,
"-w", args.w,
"-l", "lc0log.txt",
#"-t", "1",
#"--max-prefetch=0",
#"--no-out-of-order-eval", # Was trying to be more accurate, but see issue #680
#"--collision-visit-limit=1",
#"--minibatch-size=1",
"--minibatch-size=16", # because of #680, use this compromise between accuracy and speed
"--smart-pruning-factor=0", # We will start and stop in loops, so disable pruning
"--nncache=1000000",
"--verbose-move-stats",
]
NODES = [ 2**n for n in range(round(math.log(args.nodes, 2))+1)]
NUM_MOVES = args.topn
if args.pgn:
game = getgame(args.pgn, args.round)
gamelen = len(game.end().board().move_stack)
plynum = round(args.move*2-3)
if plynum + args.numplies > gamelen:
args.numplies = gamelen-plynum
analyze_game(args.pgn, args.round, round(args.move*2-3), args.numplies)
for m in np.arange(args.move, args.move+args.numplies/2, 0.5*args.ply_per_page):
compose(args.pgn, args.round, m, min(args.ply_per_page, min(args.ply_per_page, args.numplies-(m-args.move)*2)))
elif args.fen:
analyze_fen(args.fen_desc, args.fen)
#compose("plots/%s_%s" % (args.fen_desc, args.round), args.numplies)
else: raise(Exception("must provide --pgn or --fen"))
| savedir = "plots/%s_%s_%05.1f" % (pgn_filename, gamenum, (plynum+3)/2.0)
# Parse data into pandas
move_infos = []
with open("%s/data.html" % savedir) as infile:
for line in infile.readlines():
info = parse_info(line)
if not info: continue
move_infos.append(info)
df = pd.DataFrame(move_infos)
# Filter top 4 moves, and get P
TNmax = df["TN"].max()
best = df[df["TN"] == TNmax].sort_values("N", ascending=False).head(NUM_MOVES)
moves = list(best["sanmove"])
bestdf = df.loc[df["sanmove"].isin(moves)]
# Plots
fig, ax = plt.subplots()
for move in moves:
tmp = bestdf[bestdf["sanmove"] == move]
tmp.plot.line(x="TN", y="N", logx=True, logy=True, ax=ax)
ax.legend(moves)
plt.title("Child Node Visits vs Total Nodes")
plt.xlabel("Total Nodes")
plt.ylabel("Child Nodes")
plt.savefig("%s/N.svg" % (savedir))
fig, ax = plt.subplots()
for move in moves:
tmp = bestdf[bestdf["sanmove"] == move]
tmp.plot.line(x="TN", y="Q", logx=True, logy=False, ax=ax)
ax.legend(moves)
plt.title("Value vs Total Nodes")
plt.xlabel("Total Nodes")
plt.ylabel("Value")
set_q2cp_ticks(ax)
plt.savefig("%s/Q.svg" % (savedir))
# This plot can have multiple entries with the same index="N", so pivot fails.
fig, ax = plt.subplots()
for move in moves:
tmp = bestdf[bestdf["sanmove"] == move]
tmp.plot.line(x="N", y="Q", logx=True, logy=False, ax=ax)
ax.legend(moves)
plt.title("Value vs Child Node Visits")
plt.xlabel("Child Node Visits")
plt.ylabel("Value")
set_q2cp_ticks(ax)
plt.savefig("%s/Q2.svg" % (savedir))
best.plot.bar(x="sanmove", y="P", legend=False)
plt.xlabel("")
plt.title("Policy")
plt.savefig("%s/P.svg" % (savedir))
plt.close("all") | identifier_body |
lc0_analyzer.py | #!/usr/bin/env python3
#
# lc0_analyzer.py --help
#
# See https://github.com/killerducky/lc0_analyzer/README.md for description
#
# See example.sh
#
import chess
import chess.pgn
import chess.uci
import chess.svg
import re
import matplotlib.pyplot as plt
import matplotlib.axes
import pandas as pd
import numpy as np
import os
import math
import argparse
from collections import OrderedDict
import svgutils.transform as sg
from svgutils.compose import *
#import cairosvg
class Lc0InfoHandler(chess.uci.InfoHandler):
def __init__(self, board):
super().__init__()
self.reset()
self.board = board
def reset(self):
self.strings = []
self.moves = {}
def | (self):
if "string" in self.info:
#self.strings.append(self.info["string"])
# "c7f4 (268 ) N: 40 (+37) (P: 20.23%) (Q: -0.04164) (U: 0.08339) (Q+U: 0.04175) (V: 0.1052)"
(move, info) = self.info["string"].split(maxsplit=1)
move = self.board.san(self.board.parse_uci(move))
self.strings.append("%6s %s" % (move, self.info["string"]))
super().post_info() # Release the lock
def q2cp(q):
return 290.680623072 * math.tan(1.548090806 * q) / 100.0
def cp2q(cp):
return math.atan(cp*100.0/290.680623072)/1.548090806
def set_q2cp_ticks(ax):
ax.set_ylim(-1, 1)
ax2 = ax.twinx()
ax2.set_ylim(-1, 1)
cp_vals = [-128, -8, -4, -2, -1, 0, 1, 2, 4, 8, 128]
q_vals = [cp2q(x) for x in cp_vals]
ax2.set_yticks(q_vals)
ax2.set_yticklabels(cp_vals)
ax2.set_ylabel("CP")
def parse_info(info):
# "INFO: TN: 1 Qf4 c7f4 (268 ) N: 40 (+37) (P: 20.23%) (Q: -0.04164) (U: 0.08339) (Q+U: 0.04175) (V: 0.1052)"
m = re.match("^INFO:", info)
if not m: return None
(_, _, TN, sanmove, ucimove, info) = info.split(maxsplit=5)
floats = re.split(r"[^-.\d]+", info)
(_, _, N, _, P, Q, U, Q_U, V, _) = floats
move_infos = {}
move_infos["TN"] = int(TN)
move_infos["sanmove"] = sanmove
move_infos["ucimove"] = ucimove
move_infos["N"] = int(N)
move_infos["P"] = float(P)
move_infos["Q"] = float(Q)
move_infos["U"] = float(U)
if V == "-.----": V = 0
move_infos["V"] = float(V)
return move_infos
def getgame(pgn_filename, gamenum):
with open(pgn_filename) as pgn:
# find the game (probably a better way to do this?)
game = None
while True:
game = chess.pgn.read_game(pgn)
if not game:
break
if not gamenum or game.headers["Round"] == gamenum:
break
if not game:
raise("Game not found")
return game
# plynum = 0 = after White's move 1
# plynum = 1 = after Black's move 1
# plynum = 2(M-1)+0 = after White's move M
# plynum = 2(M-1)+1 = after Black's move M
def get_board(pgn_filename, gamenum, plynum):
game = getgame(pgn_filename, gamenum)
info = ""
info += game.headers["White"] + "\n"
info += game.headers["Black"] + "\n"
nodes = list(game.mainline())
# There must be a better way to get the list of moves up to plynum P?
ucistr = ""
sanstr = ""
if plynum >= 0:
for node in nodes[0:plynum+1]:
ucistr += " " + str(node.move)
sanstr += " " + str(node.san())
info += "position startpos moves" + ucistr + "\n"
info += sanstr + "\n" # TODO: Add move numbers. Surely python-chess can do this for me?
# Something like this will work...
#game = getgame(pgn_filename, gamenum)
#end = game.end()
#board = end.board()
#print(game.board().variation_san(board.move_stack))
node = nodes[plynum]
board = node.board()
fig = chess.svg.board(board=board, lastmove=node.move)
else:
info += "position startpos\n"
board = chess.Board()
fig = chess.svg.board(board=board)
return (board, fig, info)
# TODO:
# gamenum/plynum vs fen is a mess right now...
def analyze_and_plot(engine, info_handler, pgn_filename, gamenum, plynum, fen=None):
analyze(engine, info_handler, pgn_filename, gamenum, plynum, fen)
plot(pgn_filename, gamenum, plynum)
def analyze(engine, info_handler, pgn_filename, gamenum, plynum, fen=None):
savedir = "plots/%s_%s_%05.1f" % (pgn_filename, gamenum, (plynum+3)/2.0)
datafilename = "%s/data.html" % (savedir)
if os.path.exists(datafilename):
return
if not os.path.exists(savedir):
os.makedirs(savedir)
outfile = open(datafilename, "w")
outfile.write("""
<img src="board.svg" height="100%"/> <br>
<img src="Q.svg"/> <br>
<img src="Q2.svg"/> <br>
<img src="N.svg"/> <br>
<img src="P.svg"/> <br>
<pre>""")
if fen:
board = chess.Board(fen)
fig = chess.svg.board(board=board)
info = "position %s\n" % (fen)
else:
(board, fig, info) = get_board(pgn_filename, gamenum, plynum)
outfile.write(info)
outfile.write(board.fen() + "\n")
outfile.write(str(board) + "\n")
open("%s/board.svg" % (savedir), "w").write(fig)
outfile.write(str(LC0) + "\n")
engine.uci()
outfile.write(engine.name + "\n")
# Reset engine search tree, but not engine NNCache, by setting different position
engine.position(chess.Board())
info_handler.reset()
info_handler.board = chess.Board()
engine.go(nodes=1)
for nodes in NODES:
# Do our position now
info_handler.reset()
info_handler.board = board
engine.position(board)
engine.go(nodes=nodes)
for s in info_handler.strings:
outfile.write("INFO: TN: %s %s\n" % (nodes, s))
outfile.write("</pre>\n")
outfile.close()
def plot(pgn_filename, gamenum, plynum):
savedir = "plots/%s_%s_%05.1f" % (pgn_filename, gamenum, (plynum+3)/2.0)
# Parse data into pandas
move_infos = []
with open("%s/data.html" % savedir) as infile:
for line in infile.readlines():
info = parse_info(line)
if not info: continue
move_infos.append(info)
df = pd.DataFrame(move_infos)
# Filter top 4 moves, and get P
TNmax = df["TN"].max()
best = df[df["TN"] == TNmax].sort_values("N", ascending=False).head(NUM_MOVES)
moves = list(best["sanmove"])
bestdf = df.loc[df["sanmove"].isin(moves)]
# Plots
fig, ax = plt.subplots()
for move in moves:
tmp = bestdf[bestdf["sanmove"] == move]
tmp.plot.line(x="TN", y="N", logx=True, logy=True, ax=ax)
ax.legend(moves)
plt.title("Child Node Visits vs Total Nodes")
plt.xlabel("Total Nodes")
plt.ylabel("Child Nodes")
plt.savefig("%s/N.svg" % (savedir))
fig, ax = plt.subplots()
for move in moves:
tmp = bestdf[bestdf["sanmove"] == move]
tmp.plot.line(x="TN", y="Q", logx=True, logy=False, ax=ax)
ax.legend(moves)
plt.title("Value vs Total Nodes")
plt.xlabel("Total Nodes")
plt.ylabel("Value")
set_q2cp_ticks(ax)
plt.savefig("%s/Q.svg" % (savedir))
# This plot can have multiple entries with the same index="N", so pivot fails.
fig, ax = plt.subplots()
for move in moves:
tmp = bestdf[bestdf["sanmove"] == move]
tmp.plot.line(x="N", y="Q", logx=True, logy=False, ax=ax)
ax.legend(moves)
plt.title("Value vs Child Node Visits")
plt.xlabel("Child Node Visits")
plt.ylabel("Value")
set_q2cp_ticks(ax)
plt.savefig("%s/Q2.svg" % (savedir))
best.plot.bar(x="sanmove", y="P", legend=False)
plt.xlabel("")
plt.title("Policy")
plt.savefig("%s/P.svg" % (savedir))
plt.close("all")
def analyze_game(pgn_filename, gamenum, plynum, plies):
try:
# In case you have the data files already, but no lc0 exe.
engine = chess.uci.popen_engine(LC0)
info_handler = Lc0InfoHandler(None)
engine.info_handlers.append(info_handler)
except:
print("Warning: Could not open Lc0 engine.")
engine = None
info_handler = None
if not os.path.exists("plots"):
os.makedirs("plots")
outfile = open("plots/%s_%s_%0.3f_%s.html" % (pgn_filename, gamenum, (plynum+3)/2, plies), "w")
outfile.write('<table width="%d" height="500">\n' % (plies*300))
outfile.write("<tr>\n")
for svgfile in ("board", "Q", "Q2", "N", "P"):
for p in range(plies):
savedir = "%s_%s_%05.1f" % (pgn_filename, gamenum, (plynum+p+3)/2.0)
outfile.write('<td> <img src="%s/%s.svg" width="100%%"/> </td>\n' % (savedir, svgfile))
outfile.write("</tr>\n")
outfile.write("</tr>\n")
outfile.write("</table>\n")
outfile.close()
for p in range(plies):
analyze_and_plot(engine, info_handler, pgn_filename, gamenum, plynum+p)
if engine: engine.quit()
def analyze_fen(name, fen):
engine = chess.uci.popen_engine(LC0)
info_handler = Lc0InfoHandler(None)
engine.info_handlers.append(info_handler)
analyze_and_plot(engine, info_handler, name, 0, 0, fen)
engine.quit()
def compose(pgn_filename, gamenum, move_start, numplies, xsize=470, ysize=350, scale=0.6, scaleb=0.85):
outfile = open("plots/%s_%s_%05.1f_%03d.html" % (pgn_filename, gamenum, move_start, numplies), "w")
outfile.write('<table width="%d" height="500">\n' % (numplies*300))
outfile.write("<tr>\n")
for svgfile in ("board", "Q", "Q2", "N", "P"):
for move in np.arange(move_start, move_start+numplies/2, 0.5):
savedir = "%s_%s_%05.1f" % (pgn_filename, gamenum, move)
outfile.write('<td> <img src="%s/%s.svg" width="100%%"/> </td>\n' % (savedir, svgfile))
outfile.write("</tr>\n")
outfile.write("</tr>\n")
outfile.write("</table>\n")
outfile.close()
for move in np.arange(move_start, move_start+numplies/2, 0.5):
savedir = "plots/%s_%s_%05.1f" % (pgn_filename, gamenum, move)
fig = Figure(xsize*scale, ysize*5*scale,
Panel(SVG("%s/board.svg" % (savedir)).scale(scale*scaleb)),
Panel(SVG("%s/Q.svg" % (savedir)).scale(scale)),
Panel(SVG("%s/Q2.svg" % (savedir)).scale(scale)),
Panel(SVG("%s/N.svg" % (savedir)).scale(scale)),
Panel(SVG("%s/P.svg" % (savedir)).scale(scale)),
)
fig.tile(1,5)
fig.save("%s/all.svg" % (savedir))
panels = []
for move in np.arange(move_start, move_start+numplies/2, 0.5):
panels.append(Panel(SVG("plots/%s_%s_%05.1f/all.svg" % (pgn_filename, gamenum, move))))
fig = Figure(xsize*(numplies)*scale, ysize*5*scale, *panels)
fig.tile(numplies, 1)
filename = "plots/%s_%s_%05.1f_all" % (pgn_filename, gamenum, move_start)
fig.save("%s.svg" % (filename))
# cariosvg doesn't parse units "px"
#cairosvg.svg2png(url="%s.svg" % (filename), write_to="%s.png" % (filename))
if __name__ == "__main__":
usage_str = """
lc0_analyzer --pgn pgnfile --move 4.0 --numplies 6
lc0_analyzer --fen fenstring --numplies 6"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=usage_str)
parser.add_argument("--pgn", type=str, help="pgn file to process")
parser.add_argument("--round", type=str, help="round of pgn file, omit to pick first game")
parser.add_argument("--move", type=float, help="""
4.0 = Before white's 4th move (analyze position after black's 3rd move)
4.5 = Before blacks's 4th move (analyze position after white's 4th move)
""")
parser.add_argument("--numplies", type=int, help="number of plies to analyze")
parser.add_argument("--fen", type=str, help="number of plies to analyze")
parser.add_argument("--fen_desc", type=str, help="description of fen position")
parser.add_argument("--lc0", type=str, required=True, help="lc0 executable")
parser.add_argument("--w", type=str, required=True, help="path to weights")
parser.add_argument("--nodes", type=int, default=2**16, help="number of nodes to analyze for each position, will be rounded to nearest power of 2")
parser.add_argument("--topn", type=int, default=4, help="plot top N moves")
parser.add_argument("--ply_per_page", type=int, default=6, help="how many plies to put together in one .svg page")
args = parser.parse_args()
LC0 = [
args.lc0,
"-w", args.w,
"-l", "lc0log.txt",
#"-t", "1",
#"--max-prefetch=0",
#"--no-out-of-order-eval", # Was trying to be more accurate, but see issue #680
#"--collision-visit-limit=1",
#"--minibatch-size=1",
"--minibatch-size=16", # because of #680, use this compromise between accuracy and speed
"--smart-pruning-factor=0", # We will start and stop in loops, so disable pruning
"--nncache=1000000",
"--verbose-move-stats",
]
NODES = [ 2**n for n in range(round(math.log(args.nodes, 2))+1)]
NUM_MOVES = args.topn
if args.pgn:
game = getgame(args.pgn, args.round)
gamelen = len(game.end().board().move_stack)
plynum = round(args.move*2-3)
if plynum + args.numplies > gamelen:
args.numplies = gamelen-plynum
analyze_game(args.pgn, args.round, round(args.move*2-3), args.numplies)
for m in np.arange(args.move, args.move+args.numplies/2, 0.5*args.ply_per_page):
compose(args.pgn, args.round, m, min(args.ply_per_page, min(args.ply_per_page, args.numplies-(m-args.move)*2)))
elif args.fen:
analyze_fen(args.fen_desc, args.fen)
#compose("plots/%s_%s" % (args.fen_desc, args.round), args.numplies)
else: raise(Exception("must provide --pgn or --fen"))
| post_info | identifier_name |
connection_test.go | package test
import (
"context"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/offchainlabs/arbitrum/packages/arb-tx-aggregator/arbostestcontracts"
"github.com/offchainlabs/arbitrum/packages/arb-tx-aggregator/rpc"
utils2 "github.com/offchainlabs/arbitrum/packages/arb-tx-aggregator/utils"
"github.com/pkg/errors"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/rs/zerolog/pkgerrors"
golog "log"
"math/big"
"math/rand"
"net"
"os"
"testing"
"time"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/core/types"
gethlog "github.com/ethereum/go-ethereum/log"
"github.com/offchainlabs/arbitrum/packages/arb-util/arbos"
"github.com/offchainlabs/arbitrum/packages/arb-util/common"
"github.com/offchainlabs/arbitrum/packages/arb-validator-core/arbbridge"
"github.com/offchainlabs/arbitrum/packages/arb-validator-core/ethbridge"
"github.com/offchainlabs/arbitrum/packages/arb-validator-core/ethutils"
"github.com/offchainlabs/arbitrum/packages/arb-validator-core/test"
"github.com/offchainlabs/arbitrum/packages/arb-validator-core/valprotocol"
"github.com/offchainlabs/arbitrum/packages/arb-validator/chainlistener"
"github.com/offchainlabs/arbitrum/packages/arb-validator/loader"
"github.com/offchainlabs/arbitrum/packages/arb-validator/rollupmanager"
)
var logger zerolog.Logger
var db = "./testman"
var contract = arbos.Path()
func setupRollup(ctx context.Context, authClient *ethbridge.EthArbAuthClient) (common.Address, error) {
config := valprotocol.ChainParams{
StakeRequirement: big.NewInt(10),
StakeToken: common.Address{},
GracePeriod: common.TimeTicks{Val: big.NewInt(13000 * 2)},
MaxExecutionSteps: 10000000000,
ArbGasSpeedLimitPerTick: 200000,
}
factoryAddr, err := ethbridge.DeployRollupFactory(ctx, authClient)
if err != nil {
return common.Address{}, err
}
factory, err := authClient.NewArbFactory(common.NewAddressFromEth(factoryAddr))
if err != nil {
return common.Address{}, err
}
mach, err := loader.LoadMachineFromFile(contract, false, "cpp")
if err != nil {
return common.Address{}, err
}
rollupAddress, _, err := factory.CreateRollup(
ctx,
mach.Hash(),
config,
common.Address{},
)
return rollupAddress, err
}
/********************************************/
/* Validators */
/********************************************/
func setupValidators(ctx context.Context, rollupAddress common.Address, authClients []*ethbridge.EthArbAuthClient) error {
if len(authClients) < 1 {
panic("must have at least 1 authClient")
}
seed := time.Now().UnixNano()
// seed := int64(1559616168133477000)
rand.Seed(seed)
managers := make([]*rollupmanager.Manager, 0, len(authClients))
for _, authClient := range authClients {
rollupActor, err := authClient.NewRollup(rollupAddress)
if err != nil {
return err
}
dbName := db + "/" + authClient.Address().String()
manager, err := rollupmanager.CreateManager(
ctx,
rollupAddress,
arbbridge.NewStressTestClient(authClient, time.Second*15),
contract,
dbName,
)
if err != nil {
return err
}
manager.AddListener(ctx, chainlistener.NewAnnouncerListener(authClient.Address().String()))
validatorListener := chainlistener.NewValidatorChainListener(
ctx,
rollupAddress,
rollupActor,
)
err = validatorListener.AddStaker(authClient)
if err != nil {
return err
}
manager.AddListener(ctx, validatorListener)
managers = append(managers, manager)
}
_ = managers
return nil
}
func launchAggregator(client ethutils.EthClient, auth *bind.TransactOpts, rollupAddress common.Address) error {
go func() {
if err := rpc.LaunchAggregator(
context.Background(),
client,
rollupAddress,
contract,
db+"/aggregator",
"9546",
"9547",
utils2.RPCFlags{},
time.Second,
rpc.StatelessBatcherMode{Auth: auth},
); err != nil {
logger.Fatal().Stack().Err(err).Msg("LaunchAggregator failed")
}
}()
ticker := time.NewTicker(time.Second)
for {
select {
case <-ticker.C:
conn, err := net.DialTimeout(
"tcp",
net.JoinHostPort("127.0.0.1", "9546"),
time.Second,
)
if err != nil || conn == nil {
break
}
if err := conn.Close(); err != nil {
return err
}
conn, err = net.DialTimeout(
"tcp",
net.JoinHostPort("127.0.0.1", "9546"),
time.Second,
)
if err != nil || conn == nil {
break
}
if err := conn.Close(); err != nil {
return err
}
// Wait for the validator to catch up to head
time.Sleep(time.Second * 2)
return nil
case <-time.After(time.Second * 5):
return errors.New("couldn't connect to rpc")
}
}
}
func waitForReceipt(
client bind.DeployBackend,
tx *types.Transaction,
timeout time.Duration,
) (*types.Receipt, error) {
ticker := time.NewTicker(timeout)
for {
select {
case <-ticker.C:
return nil, errors.Errorf("timed out waiting for receipt for tx %v", tx.Hash().Hex())
default:
}
receipt, err := client.TransactionReceipt(
context.Background(),
tx.Hash(),
)
if err != nil {
if err.Error() == "not found" {
continue
}
logger.Error().Stack().Err(err).Msg("Failure getting TransactionReceipt")
return nil, err
}
return receipt, nil
}
}
func TestFib(t *testing.T) {
// TODO
return
if err := os.RemoveAll(db); err != nil {
t.Fatal(err)
}
defer func() {
if err := os.RemoveAll(db); err != nil {
t.Fatal(err)
}
}()
gethlog.Root().SetHandler(gethlog.LvlFilterHandler(gethlog.LvlInfo, gethlog.StreamHandler(os.Stderr, gethlog.TerminalFormat(true))))
// Enable line numbers in logging
golog.SetFlags(golog.LstdFlags | golog.Lshortfile)
// Print stack trace when `.Error().Stack().Err(err).` is added to zerolog call
zerolog.ErrorStackMarshaler = pkgerrors.MarshalStack
// Print line number that log was created on
logger = log.With().Caller().Str("component", "connection-test").Logger()
ctx := context.Background()
l1Backend, pks := test.SimulatedBackend()
l1Client := ðutils.SimulatedEthClient{SimulatedBackend: l1Backend}
// pks[0]: setupRollup (L1)
// pks[1,2]: setupValidators (L1)
// pks[3]: launchAggregator (not tied to client)
// pks[4]: DeployFibonacci and session (L2, not tied to client)
auths := make([]*bind.TransactOpts, 0)
authClients := make([]*ethbridge.EthArbAuthClient, 0)
// 0-3 are on L1
for _, pk := range pks[0:3] {
auth := bind.NewKeyedTransactor(pk)
auths = append(auths, auth)
authClient, err := ethbridge.NewEthAuthClient(ctx, l1Client, auth)
if err != nil {
t.Fatal(err)
}
authClients = append(authClients, authClient)
}
// 3 just uses auth, authClient created inside launchAggregator
auths = append(auths, bind.NewKeyedTransactor(pks[3]))
// 4 is on L2, doesn't use ethbridge
auths = append(auths, bind.NewKeyedTransactor(pks[4]))
go func() {
t := time.NewTicker(time.Second * 2)
for range t.C {
logger.Info().Msg("Commit")
l1Client.Commit()
}
}()
if err := os.RemoveAll(db); err != nil {
t.Fatal(err)
}
if err := os.Mkdir(db, 0700); err != nil {
t.Fatal(err)
}
rollupAddress, err := setupRollup(ctx, authClients[0])
if err != nil {
t.Fatal(err)
}
t.Log("Created rollup chain", rollupAddress)
if err := setupValidators(ctx, rollupAddress, authClients[1:3]); err != nil {
t.Fatalf("Validator setup error %v", err)
}
logger.Info().Msg("Validators setup, launching aggregator")
if err := launchAggregator(
l1Client,
auths[3],
rollupAddress,
); err != nil |
logger.Info().Msg("Launched aggregator, connecting to RPC")
l2Client, err := ethclient.Dial("http://localhost:9546")
if err != nil {
t.Fatal(err)
}
t.Log("Connected to aggregator")
logger.Info().Hex("account4", auths[4].From.Bytes()).Msg("Account being used to deploy fibonacci")
// Do not wrap with MakeContract because auth is wrapped in session below
auths[4].Nonce = big.NewInt(0)
_, tx, _, err := arbostestcontracts.DeployFibonacci(auths[4], l2Client)
if err != nil {
t.Fatal("DeployFibonacci failed", err)
}
auths[4].Nonce = auths[4].Nonce.Add(auths[4].Nonce, big.NewInt(1))
logger.Info().Hex("tx", tx.Hash().Bytes()).Msg("Fibonacci deployed")
receipt, err := waitForReceipt(
l2Client,
tx,
time.Second*20,
)
if err != nil {
t.Fatal("DeployFibonacci receipt error", err)
}
if receipt.Status != 1 {
t.Fatal("tx deploying fib failed")
}
logger.Info().Hex("address", receipt.ContractAddress.Bytes()).Msg("Contract address found")
t.Log("Fib contract is at", receipt.ContractAddress.Hex())
fib, err := arbostestcontracts.NewFibonacci(receipt.ContractAddress, l2Client)
if err != nil {
t.Fatal("connect fib failed", err)
}
// Wrap the Token contract instance into a session
session := &arbostestcontracts.FibonacciSession{
Contract: fib,
CallOpts: bind.CallOpts{
From: auths[4].From,
Pending: true,
},
TransactOpts: *auths[4],
}
fibsize := 15
fibnum := 11
tx, err = session.GenerateFib(big.NewInt(int64(fibsize)))
if err != nil {
t.Fatal("GenerateFib error", err)
}
receipt, err = waitForReceipt(
l2Client,
tx,
time.Second*20,
)
if err != nil {
t.Fatal("GenerateFib receipt error", err)
}
if receipt.Status != 1 {
t.Fatal("tx generating numbers failed")
}
fibval, err := session.GetFib(big.NewInt(int64(fibnum)))
if err != nil {
t.Fatal("GetFib error", err)
}
if fibval.Cmp(big.NewInt(144)) != 0 { // 11th fibanocci number
t.Fatalf(
"GetFib error - expected %v got %v",
big.NewInt(int64(144)),
fibval,
)
}
start := uint64(0)
Loop:
for {
select {
case <-time.After(time.Second * 20):
return
default:
}
filter := &bind.FilterOpts{
Start: start,
End: nil,
Context: ctx,
}
it, err := session.Contract.FilterTestEvent(filter)
if err != nil {
t.Fatalf("FilterTestEvent error %v", err)
return
}
for it.Next() {
if it.Event.Number.Cmp(big.NewInt(int64(fibsize))) != 0 {
t.Error("test event had wrong number")
}
break Loop
}
}
}
| {
t.Fatal(err)
} | conditional_block |
connection_test.go | package test
import (
"context"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/offchainlabs/arbitrum/packages/arb-tx-aggregator/arbostestcontracts"
"github.com/offchainlabs/arbitrum/packages/arb-tx-aggregator/rpc"
utils2 "github.com/offchainlabs/arbitrum/packages/arb-tx-aggregator/utils"
"github.com/pkg/errors"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/rs/zerolog/pkgerrors"
golog "log"
"math/big"
"math/rand"
"net"
"os"
"testing"
"time"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/core/types"
gethlog "github.com/ethereum/go-ethereum/log"
"github.com/offchainlabs/arbitrum/packages/arb-util/arbos"
"github.com/offchainlabs/arbitrum/packages/arb-util/common"
"github.com/offchainlabs/arbitrum/packages/arb-validator-core/arbbridge"
"github.com/offchainlabs/arbitrum/packages/arb-validator-core/ethbridge"
"github.com/offchainlabs/arbitrum/packages/arb-validator-core/ethutils"
"github.com/offchainlabs/arbitrum/packages/arb-validator-core/test"
"github.com/offchainlabs/arbitrum/packages/arb-validator-core/valprotocol"
"github.com/offchainlabs/arbitrum/packages/arb-validator/chainlistener"
"github.com/offchainlabs/arbitrum/packages/arb-validator/loader"
"github.com/offchainlabs/arbitrum/packages/arb-validator/rollupmanager"
)
var logger zerolog.Logger
var db = "./testman"
var contract = arbos.Path()
func setupRollup(ctx context.Context, authClient *ethbridge.EthArbAuthClient) (common.Address, error) {
config := valprotocol.ChainParams{
StakeRequirement: big.NewInt(10),
StakeToken: common.Address{},
GracePeriod: common.TimeTicks{Val: big.NewInt(13000 * 2)},
MaxExecutionSteps: 10000000000,
ArbGasSpeedLimitPerTick: 200000,
}
factoryAddr, err := ethbridge.DeployRollupFactory(ctx, authClient)
if err != nil {
return common.Address{}, err
}
factory, err := authClient.NewArbFactory(common.NewAddressFromEth(factoryAddr))
if err != nil {
return common.Address{}, err
}
mach, err := loader.LoadMachineFromFile(contract, false, "cpp")
if err != nil {
return common.Address{}, err
}
rollupAddress, _, err := factory.CreateRollup(
ctx,
mach.Hash(),
config,
common.Address{},
)
return rollupAddress, err
}
/********************************************/
/* Validators */
/********************************************/
func setupValidators(ctx context.Context, rollupAddress common.Address, authClients []*ethbridge.EthArbAuthClient) error {
if len(authClients) < 1 {
panic("must have at least 1 authClient")
}
seed := time.Now().UnixNano()
// seed := int64(1559616168133477000)
rand.Seed(seed)
managers := make([]*rollupmanager.Manager, 0, len(authClients))
for _, authClient := range authClients {
rollupActor, err := authClient.NewRollup(rollupAddress)
if err != nil {
return err
}
dbName := db + "/" + authClient.Address().String()
manager, err := rollupmanager.CreateManager(
ctx,
rollupAddress,
arbbridge.NewStressTestClient(authClient, time.Second*15),
contract,
dbName,
)
if err != nil {
return err
}
manager.AddListener(ctx, chainlistener.NewAnnouncerListener(authClient.Address().String()))
validatorListener := chainlistener.NewValidatorChainListener(
ctx,
rollupAddress,
rollupActor,
)
err = validatorListener.AddStaker(authClient)
if err != nil {
return err
}
manager.AddListener(ctx, validatorListener)
managers = append(managers, manager)
}
_ = managers
return nil
}
func launchAggregator(client ethutils.EthClient, auth *bind.TransactOpts, rollupAddress common.Address) error {
go func() {
if err := rpc.LaunchAggregator(
context.Background(),
client,
rollupAddress,
contract,
db+"/aggregator",
"9546",
"9547",
utils2.RPCFlags{},
time.Second,
rpc.StatelessBatcherMode{Auth: auth},
); err != nil {
logger.Fatal().Stack().Err(err).Msg("LaunchAggregator failed")
}
}()
ticker := time.NewTicker(time.Second)
for {
select {
case <-ticker.C:
conn, err := net.DialTimeout(
"tcp",
net.JoinHostPort("127.0.0.1", "9546"),
time.Second,
)
if err != nil || conn == nil {
break
}
if err := conn.Close(); err != nil {
return err
}
conn, err = net.DialTimeout(
"tcp",
net.JoinHostPort("127.0.0.1", "9546"),
time.Second,
)
if err != nil || conn == nil {
break
}
if err := conn.Close(); err != nil {
return err
}
// Wait for the validator to catch up to head
time.Sleep(time.Second * 2)
return nil
case <-time.After(time.Second * 5):
return errors.New("couldn't connect to rpc")
}
}
}
func | (
client bind.DeployBackend,
tx *types.Transaction,
timeout time.Duration,
) (*types.Receipt, error) {
ticker := time.NewTicker(timeout)
for {
select {
case <-ticker.C:
return nil, errors.Errorf("timed out waiting for receipt for tx %v", tx.Hash().Hex())
default:
}
receipt, err := client.TransactionReceipt(
context.Background(),
tx.Hash(),
)
if err != nil {
if err.Error() == "not found" {
continue
}
logger.Error().Stack().Err(err).Msg("Failure getting TransactionReceipt")
return nil, err
}
return receipt, nil
}
}
func TestFib(t *testing.T) {
// TODO
return
if err := os.RemoveAll(db); err != nil {
t.Fatal(err)
}
defer func() {
if err := os.RemoveAll(db); err != nil {
t.Fatal(err)
}
}()
gethlog.Root().SetHandler(gethlog.LvlFilterHandler(gethlog.LvlInfo, gethlog.StreamHandler(os.Stderr, gethlog.TerminalFormat(true))))
// Enable line numbers in logging
golog.SetFlags(golog.LstdFlags | golog.Lshortfile)
// Print stack trace when `.Error().Stack().Err(err).` is added to zerolog call
zerolog.ErrorStackMarshaler = pkgerrors.MarshalStack
// Print line number that log was created on
logger = log.With().Caller().Str("component", "connection-test").Logger()
ctx := context.Background()
l1Backend, pks := test.SimulatedBackend()
l1Client := ðutils.SimulatedEthClient{SimulatedBackend: l1Backend}
// pks[0]: setupRollup (L1)
// pks[1,2]: setupValidators (L1)
// pks[3]: launchAggregator (not tied to client)
// pks[4]: DeployFibonacci and session (L2, not tied to client)
auths := make([]*bind.TransactOpts, 0)
authClients := make([]*ethbridge.EthArbAuthClient, 0)
// 0-3 are on L1
for _, pk := range pks[0:3] {
auth := bind.NewKeyedTransactor(pk)
auths = append(auths, auth)
authClient, err := ethbridge.NewEthAuthClient(ctx, l1Client, auth)
if err != nil {
t.Fatal(err)
}
authClients = append(authClients, authClient)
}
// 3 just uses auth, authClient created inside launchAggregator
auths = append(auths, bind.NewKeyedTransactor(pks[3]))
// 4 is on L2, doesn't use ethbridge
auths = append(auths, bind.NewKeyedTransactor(pks[4]))
go func() {
t := time.NewTicker(time.Second * 2)
for range t.C {
logger.Info().Msg("Commit")
l1Client.Commit()
}
}()
if err := os.RemoveAll(db); err != nil {
t.Fatal(err)
}
if err := os.Mkdir(db, 0700); err != nil {
t.Fatal(err)
}
rollupAddress, err := setupRollup(ctx, authClients[0])
if err != nil {
t.Fatal(err)
}
t.Log("Created rollup chain", rollupAddress)
if err := setupValidators(ctx, rollupAddress, authClients[1:3]); err != nil {
t.Fatalf("Validator setup error %v", err)
}
logger.Info().Msg("Validators setup, launching aggregator")
if err := launchAggregator(
l1Client,
auths[3],
rollupAddress,
); err != nil {
t.Fatal(err)
}
logger.Info().Msg("Launched aggregator, connecting to RPC")
l2Client, err := ethclient.Dial("http://localhost:9546")
if err != nil {
t.Fatal(err)
}
t.Log("Connected to aggregator")
logger.Info().Hex("account4", auths[4].From.Bytes()).Msg("Account being used to deploy fibonacci")
// Do not wrap with MakeContract because auth is wrapped in session below
auths[4].Nonce = big.NewInt(0)
_, tx, _, err := arbostestcontracts.DeployFibonacci(auths[4], l2Client)
if err != nil {
t.Fatal("DeployFibonacci failed", err)
}
auths[4].Nonce = auths[4].Nonce.Add(auths[4].Nonce, big.NewInt(1))
logger.Info().Hex("tx", tx.Hash().Bytes()).Msg("Fibonacci deployed")
receipt, err := waitForReceipt(
l2Client,
tx,
time.Second*20,
)
if err != nil {
t.Fatal("DeployFibonacci receipt error", err)
}
if receipt.Status != 1 {
t.Fatal("tx deploying fib failed")
}
logger.Info().Hex("address", receipt.ContractAddress.Bytes()).Msg("Contract address found")
t.Log("Fib contract is at", receipt.ContractAddress.Hex())
fib, err := arbostestcontracts.NewFibonacci(receipt.ContractAddress, l2Client)
if err != nil {
t.Fatal("connect fib failed", err)
}
// Wrap the Token contract instance into a session
session := &arbostestcontracts.FibonacciSession{
Contract: fib,
CallOpts: bind.CallOpts{
From: auths[4].From,
Pending: true,
},
TransactOpts: *auths[4],
}
fibsize := 15
fibnum := 11
tx, err = session.GenerateFib(big.NewInt(int64(fibsize)))
if err != nil {
t.Fatal("GenerateFib error", err)
}
receipt, err = waitForReceipt(
l2Client,
tx,
time.Second*20,
)
if err != nil {
t.Fatal("GenerateFib receipt error", err)
}
if receipt.Status != 1 {
t.Fatal("tx generating numbers failed")
}
fibval, err := session.GetFib(big.NewInt(int64(fibnum)))
if err != nil {
t.Fatal("GetFib error", err)
}
if fibval.Cmp(big.NewInt(144)) != 0 { // 11th fibanocci number
t.Fatalf(
"GetFib error - expected %v got %v",
big.NewInt(int64(144)),
fibval,
)
}
start := uint64(0)
Loop:
for {
select {
case <-time.After(time.Second * 20):
return
default:
}
filter := &bind.FilterOpts{
Start: start,
End: nil,
Context: ctx,
}
it, err := session.Contract.FilterTestEvent(filter)
if err != nil {
t.Fatalf("FilterTestEvent error %v", err)
return
}
for it.Next() {
if it.Event.Number.Cmp(big.NewInt(int64(fibsize))) != 0 {
t.Error("test event had wrong number")
}
break Loop
}
}
}
| waitForReceipt | identifier_name |
connection_test.go | package test
import (
"context"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/offchainlabs/arbitrum/packages/arb-tx-aggregator/arbostestcontracts"
"github.com/offchainlabs/arbitrum/packages/arb-tx-aggregator/rpc"
utils2 "github.com/offchainlabs/arbitrum/packages/arb-tx-aggregator/utils"
"github.com/pkg/errors"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/rs/zerolog/pkgerrors"
golog "log"
"math/big"
"math/rand"
"net"
"os"
"testing"
"time"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/core/types"
gethlog "github.com/ethereum/go-ethereum/log"
"github.com/offchainlabs/arbitrum/packages/arb-util/arbos"
"github.com/offchainlabs/arbitrum/packages/arb-util/common"
"github.com/offchainlabs/arbitrum/packages/arb-validator-core/arbbridge"
"github.com/offchainlabs/arbitrum/packages/arb-validator-core/ethbridge"
"github.com/offchainlabs/arbitrum/packages/arb-validator-core/ethutils"
"github.com/offchainlabs/arbitrum/packages/arb-validator-core/test"
"github.com/offchainlabs/arbitrum/packages/arb-validator-core/valprotocol"
"github.com/offchainlabs/arbitrum/packages/arb-validator/chainlistener"
"github.com/offchainlabs/arbitrum/packages/arb-validator/loader"
"github.com/offchainlabs/arbitrum/packages/arb-validator/rollupmanager"
)
var logger zerolog.Logger
var db = "./testman"
var contract = arbos.Path()
func setupRollup(ctx context.Context, authClient *ethbridge.EthArbAuthClient) (common.Address, error) {
config := valprotocol.ChainParams{
StakeRequirement: big.NewInt(10),
StakeToken: common.Address{},
GracePeriod: common.TimeTicks{Val: big.NewInt(13000 * 2)},
MaxExecutionSteps: 10000000000,
ArbGasSpeedLimitPerTick: 200000,
}
factoryAddr, err := ethbridge.DeployRollupFactory(ctx, authClient)
if err != nil {
return common.Address{}, err
}
factory, err := authClient.NewArbFactory(common.NewAddressFromEth(factoryAddr))
if err != nil {
return common.Address{}, err
}
mach, err := loader.LoadMachineFromFile(contract, false, "cpp")
if err != nil {
return common.Address{}, err
}
rollupAddress, _, err := factory.CreateRollup(
ctx,
mach.Hash(),
config,
common.Address{},
)
return rollupAddress, err
}
/********************************************/
/* Validators */
/********************************************/
func setupValidators(ctx context.Context, rollupAddress common.Address, authClients []*ethbridge.EthArbAuthClient) error {
if len(authClients) < 1 {
panic("must have at least 1 authClient")
}
seed := time.Now().UnixNano()
// seed := int64(1559616168133477000)
rand.Seed(seed)
managers := make([]*rollupmanager.Manager, 0, len(authClients))
for _, authClient := range authClients {
rollupActor, err := authClient.NewRollup(rollupAddress)
if err != nil {
return err
}
dbName := db + "/" + authClient.Address().String()
manager, err := rollupmanager.CreateManager(
ctx,
rollupAddress,
arbbridge.NewStressTestClient(authClient, time.Second*15),
contract,
dbName,
)
if err != nil {
return err
}
manager.AddListener(ctx, chainlistener.NewAnnouncerListener(authClient.Address().String()))
validatorListener := chainlistener.NewValidatorChainListener(
ctx,
rollupAddress,
rollupActor,
)
err = validatorListener.AddStaker(authClient)
if err != nil {
return err
}
manager.AddListener(ctx, validatorListener)
managers = append(managers, manager)
}
_ = managers
return nil
}
func launchAggregator(client ethutils.EthClient, auth *bind.TransactOpts, rollupAddress common.Address) error |
func waitForReceipt(
client bind.DeployBackend,
tx *types.Transaction,
timeout time.Duration,
) (*types.Receipt, error) {
ticker := time.NewTicker(timeout)
for {
select {
case <-ticker.C:
return nil, errors.Errorf("timed out waiting for receipt for tx %v", tx.Hash().Hex())
default:
}
receipt, err := client.TransactionReceipt(
context.Background(),
tx.Hash(),
)
if err != nil {
if err.Error() == "not found" {
continue
}
logger.Error().Stack().Err(err).Msg("Failure getting TransactionReceipt")
return nil, err
}
return receipt, nil
}
}
func TestFib(t *testing.T) {
// TODO
return
if err := os.RemoveAll(db); err != nil {
t.Fatal(err)
}
defer func() {
if err := os.RemoveAll(db); err != nil {
t.Fatal(err)
}
}()
gethlog.Root().SetHandler(gethlog.LvlFilterHandler(gethlog.LvlInfo, gethlog.StreamHandler(os.Stderr, gethlog.TerminalFormat(true))))
// Enable line numbers in logging
golog.SetFlags(golog.LstdFlags | golog.Lshortfile)
// Print stack trace when `.Error().Stack().Err(err).` is added to zerolog call
zerolog.ErrorStackMarshaler = pkgerrors.MarshalStack
// Print line number that log was created on
logger = log.With().Caller().Str("component", "connection-test").Logger()
ctx := context.Background()
l1Backend, pks := test.SimulatedBackend()
l1Client := ðutils.SimulatedEthClient{SimulatedBackend: l1Backend}
// pks[0]: setupRollup (L1)
// pks[1,2]: setupValidators (L1)
// pks[3]: launchAggregator (not tied to client)
// pks[4]: DeployFibonacci and session (L2, not tied to client)
auths := make([]*bind.TransactOpts, 0)
authClients := make([]*ethbridge.EthArbAuthClient, 0)
// 0-3 are on L1
for _, pk := range pks[0:3] {
auth := bind.NewKeyedTransactor(pk)
auths = append(auths, auth)
authClient, err := ethbridge.NewEthAuthClient(ctx, l1Client, auth)
if err != nil {
t.Fatal(err)
}
authClients = append(authClients, authClient)
}
// 3 just uses auth, authClient created inside launchAggregator
auths = append(auths, bind.NewKeyedTransactor(pks[3]))
// 4 is on L2, doesn't use ethbridge
auths = append(auths, bind.NewKeyedTransactor(pks[4]))
go func() {
t := time.NewTicker(time.Second * 2)
for range t.C {
logger.Info().Msg("Commit")
l1Client.Commit()
}
}()
if err := os.RemoveAll(db); err != nil {
t.Fatal(err)
}
if err := os.Mkdir(db, 0700); err != nil {
t.Fatal(err)
}
rollupAddress, err := setupRollup(ctx, authClients[0])
if err != nil {
t.Fatal(err)
}
t.Log("Created rollup chain", rollupAddress)
if err := setupValidators(ctx, rollupAddress, authClients[1:3]); err != nil {
t.Fatalf("Validator setup error %v", err)
}
logger.Info().Msg("Validators setup, launching aggregator")
if err := launchAggregator(
l1Client,
auths[3],
rollupAddress,
); err != nil {
t.Fatal(err)
}
logger.Info().Msg("Launched aggregator, connecting to RPC")
l2Client, err := ethclient.Dial("http://localhost:9546")
if err != nil {
t.Fatal(err)
}
t.Log("Connected to aggregator")
logger.Info().Hex("account4", auths[4].From.Bytes()).Msg("Account being used to deploy fibonacci")
// Do not wrap with MakeContract because auth is wrapped in session below
auths[4].Nonce = big.NewInt(0)
_, tx, _, err := arbostestcontracts.DeployFibonacci(auths[4], l2Client)
if err != nil {
t.Fatal("DeployFibonacci failed", err)
}
auths[4].Nonce = auths[4].Nonce.Add(auths[4].Nonce, big.NewInt(1))
logger.Info().Hex("tx", tx.Hash().Bytes()).Msg("Fibonacci deployed")
receipt, err := waitForReceipt(
l2Client,
tx,
time.Second*20,
)
if err != nil {
t.Fatal("DeployFibonacci receipt error", err)
}
if receipt.Status != 1 {
t.Fatal("tx deploying fib failed")
}
logger.Info().Hex("address", receipt.ContractAddress.Bytes()).Msg("Contract address found")
t.Log("Fib contract is at", receipt.ContractAddress.Hex())
fib, err := arbostestcontracts.NewFibonacci(receipt.ContractAddress, l2Client)
if err != nil {
t.Fatal("connect fib failed", err)
}
// Wrap the Token contract instance into a session
session := &arbostestcontracts.FibonacciSession{
Contract: fib,
CallOpts: bind.CallOpts{
From: auths[4].From,
Pending: true,
},
TransactOpts: *auths[4],
}
fibsize := 15
fibnum := 11
tx, err = session.GenerateFib(big.NewInt(int64(fibsize)))
if err != nil {
t.Fatal("GenerateFib error", err)
}
receipt, err = waitForReceipt(
l2Client,
tx,
time.Second*20,
)
if err != nil {
t.Fatal("GenerateFib receipt error", err)
}
if receipt.Status != 1 {
t.Fatal("tx generating numbers failed")
}
fibval, err := session.GetFib(big.NewInt(int64(fibnum)))
if err != nil {
t.Fatal("GetFib error", err)
}
if fibval.Cmp(big.NewInt(144)) != 0 { // 11th fibanocci number
t.Fatalf(
"GetFib error - expected %v got %v",
big.NewInt(int64(144)),
fibval,
)
}
start := uint64(0)
Loop:
for {
select {
case <-time.After(time.Second * 20):
return
default:
}
filter := &bind.FilterOpts{
Start: start,
End: nil,
Context: ctx,
}
it, err := session.Contract.FilterTestEvent(filter)
if err != nil {
t.Fatalf("FilterTestEvent error %v", err)
return
}
for it.Next() {
if it.Event.Number.Cmp(big.NewInt(int64(fibsize))) != 0 {
t.Error("test event had wrong number")
}
break Loop
}
}
}
| {
go func() {
if err := rpc.LaunchAggregator(
context.Background(),
client,
rollupAddress,
contract,
db+"/aggregator",
"9546",
"9547",
utils2.RPCFlags{},
time.Second,
rpc.StatelessBatcherMode{Auth: auth},
); err != nil {
logger.Fatal().Stack().Err(err).Msg("LaunchAggregator failed")
}
}()
ticker := time.NewTicker(time.Second)
for {
select {
case <-ticker.C:
conn, err := net.DialTimeout(
"tcp",
net.JoinHostPort("127.0.0.1", "9546"),
time.Second,
)
if err != nil || conn == nil {
break
}
if err := conn.Close(); err != nil {
return err
}
conn, err = net.DialTimeout(
"tcp",
net.JoinHostPort("127.0.0.1", "9546"),
time.Second,
)
if err != nil || conn == nil {
break
}
if err := conn.Close(); err != nil {
return err
}
// Wait for the validator to catch up to head
time.Sleep(time.Second * 2)
return nil
case <-time.After(time.Second * 5):
return errors.New("couldn't connect to rpc")
}
}
} | identifier_body |
connection_test.go | package test
import (
"context"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/offchainlabs/arbitrum/packages/arb-tx-aggregator/arbostestcontracts"
"github.com/offchainlabs/arbitrum/packages/arb-tx-aggregator/rpc"
utils2 "github.com/offchainlabs/arbitrum/packages/arb-tx-aggregator/utils"
"github.com/pkg/errors"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/rs/zerolog/pkgerrors"
golog "log"
"math/big"
"math/rand"
"net"
"os"
"testing"
"time"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/core/types"
gethlog "github.com/ethereum/go-ethereum/log"
"github.com/offchainlabs/arbitrum/packages/arb-util/arbos"
"github.com/offchainlabs/arbitrum/packages/arb-util/common"
"github.com/offchainlabs/arbitrum/packages/arb-validator-core/arbbridge"
"github.com/offchainlabs/arbitrum/packages/arb-validator-core/ethbridge"
"github.com/offchainlabs/arbitrum/packages/arb-validator-core/ethutils"
"github.com/offchainlabs/arbitrum/packages/arb-validator-core/test"
"github.com/offchainlabs/arbitrum/packages/arb-validator-core/valprotocol"
"github.com/offchainlabs/arbitrum/packages/arb-validator/chainlistener"
"github.com/offchainlabs/arbitrum/packages/arb-validator/loader"
"github.com/offchainlabs/arbitrum/packages/arb-validator/rollupmanager"
)
var logger zerolog.Logger
var db = "./testman"
var contract = arbos.Path()
func setupRollup(ctx context.Context, authClient *ethbridge.EthArbAuthClient) (common.Address, error) {
config := valprotocol.ChainParams{
StakeRequirement: big.NewInt(10),
StakeToken: common.Address{},
GracePeriod: common.TimeTicks{Val: big.NewInt(13000 * 2)},
MaxExecutionSteps: 10000000000,
ArbGasSpeedLimitPerTick: 200000,
}
factoryAddr, err := ethbridge.DeployRollupFactory(ctx, authClient)
if err != nil {
return common.Address{}, err
}
factory, err := authClient.NewArbFactory(common.NewAddressFromEth(factoryAddr))
if err != nil {
return common.Address{}, err
}
mach, err := loader.LoadMachineFromFile(contract, false, "cpp")
if err != nil {
return common.Address{}, err
}
rollupAddress, _, err := factory.CreateRollup(
ctx,
mach.Hash(),
config,
common.Address{},
)
return rollupAddress, err
}
/********************************************/
/* Validators */
/********************************************/
func setupValidators(ctx context.Context, rollupAddress common.Address, authClients []*ethbridge.EthArbAuthClient) error {
if len(authClients) < 1 {
panic("must have at least 1 authClient")
}
seed := time.Now().UnixNano()
// seed := int64(1559616168133477000)
rand.Seed(seed)
managers := make([]*rollupmanager.Manager, 0, len(authClients))
for _, authClient := range authClients {
rollupActor, err := authClient.NewRollup(rollupAddress)
if err != nil {
return err
}
dbName := db + "/" + authClient.Address().String()
manager, err := rollupmanager.CreateManager(
ctx,
rollupAddress,
arbbridge.NewStressTestClient(authClient, time.Second*15),
contract,
dbName,
)
if err != nil {
return err
}
manager.AddListener(ctx, chainlistener.NewAnnouncerListener(authClient.Address().String()))
validatorListener := chainlistener.NewValidatorChainListener(
ctx,
rollupAddress,
rollupActor,
)
err = validatorListener.AddStaker(authClient)
if err != nil {
return err
}
manager.AddListener(ctx, validatorListener)
managers = append(managers, manager)
} |
return nil
}
func launchAggregator(client ethutils.EthClient, auth *bind.TransactOpts, rollupAddress common.Address) error {
go func() {
if err := rpc.LaunchAggregator(
context.Background(),
client,
rollupAddress,
contract,
db+"/aggregator",
"9546",
"9547",
utils2.RPCFlags{},
time.Second,
rpc.StatelessBatcherMode{Auth: auth},
); err != nil {
logger.Fatal().Stack().Err(err).Msg("LaunchAggregator failed")
}
}()
ticker := time.NewTicker(time.Second)
for {
select {
case <-ticker.C:
conn, err := net.DialTimeout(
"tcp",
net.JoinHostPort("127.0.0.1", "9546"),
time.Second,
)
if err != nil || conn == nil {
break
}
if err := conn.Close(); err != nil {
return err
}
conn, err = net.DialTimeout(
"tcp",
net.JoinHostPort("127.0.0.1", "9546"),
time.Second,
)
if err != nil || conn == nil {
break
}
if err := conn.Close(); err != nil {
return err
}
// Wait for the validator to catch up to head
time.Sleep(time.Second * 2)
return nil
case <-time.After(time.Second * 5):
return errors.New("couldn't connect to rpc")
}
}
}
func waitForReceipt(
client bind.DeployBackend,
tx *types.Transaction,
timeout time.Duration,
) (*types.Receipt, error) {
ticker := time.NewTicker(timeout)
for {
select {
case <-ticker.C:
return nil, errors.Errorf("timed out waiting for receipt for tx %v", tx.Hash().Hex())
default:
}
receipt, err := client.TransactionReceipt(
context.Background(),
tx.Hash(),
)
if err != nil {
if err.Error() == "not found" {
continue
}
logger.Error().Stack().Err(err).Msg("Failure getting TransactionReceipt")
return nil, err
}
return receipt, nil
}
}
func TestFib(t *testing.T) {
// TODO
return
if err := os.RemoveAll(db); err != nil {
t.Fatal(err)
}
defer func() {
if err := os.RemoveAll(db); err != nil {
t.Fatal(err)
}
}()
gethlog.Root().SetHandler(gethlog.LvlFilterHandler(gethlog.LvlInfo, gethlog.StreamHandler(os.Stderr, gethlog.TerminalFormat(true))))
// Enable line numbers in logging
golog.SetFlags(golog.LstdFlags | golog.Lshortfile)
// Print stack trace when `.Error().Stack().Err(err).` is added to zerolog call
zerolog.ErrorStackMarshaler = pkgerrors.MarshalStack
// Print line number that log was created on
logger = log.With().Caller().Str("component", "connection-test").Logger()
ctx := context.Background()
l1Backend, pks := test.SimulatedBackend()
l1Client := ðutils.SimulatedEthClient{SimulatedBackend: l1Backend}
// pks[0]: setupRollup (L1)
// pks[1,2]: setupValidators (L1)
// pks[3]: launchAggregator (not tied to client)
// pks[4]: DeployFibonacci and session (L2, not tied to client)
auths := make([]*bind.TransactOpts, 0)
authClients := make([]*ethbridge.EthArbAuthClient, 0)
// 0-3 are on L1
for _, pk := range pks[0:3] {
auth := bind.NewKeyedTransactor(pk)
auths = append(auths, auth)
authClient, err := ethbridge.NewEthAuthClient(ctx, l1Client, auth)
if err != nil {
t.Fatal(err)
}
authClients = append(authClients, authClient)
}
// 3 just uses auth, authClient created inside launchAggregator
auths = append(auths, bind.NewKeyedTransactor(pks[3]))
// 4 is on L2, doesn't use ethbridge
auths = append(auths, bind.NewKeyedTransactor(pks[4]))
go func() {
t := time.NewTicker(time.Second * 2)
for range t.C {
logger.Info().Msg("Commit")
l1Client.Commit()
}
}()
if err := os.RemoveAll(db); err != nil {
t.Fatal(err)
}
if err := os.Mkdir(db, 0700); err != nil {
t.Fatal(err)
}
rollupAddress, err := setupRollup(ctx, authClients[0])
if err != nil {
t.Fatal(err)
}
t.Log("Created rollup chain", rollupAddress)
if err := setupValidators(ctx, rollupAddress, authClients[1:3]); err != nil {
t.Fatalf("Validator setup error %v", err)
}
logger.Info().Msg("Validators setup, launching aggregator")
if err := launchAggregator(
l1Client,
auths[3],
rollupAddress,
); err != nil {
t.Fatal(err)
}
logger.Info().Msg("Launched aggregator, connecting to RPC")
l2Client, err := ethclient.Dial("http://localhost:9546")
if err != nil {
t.Fatal(err)
}
t.Log("Connected to aggregator")
logger.Info().Hex("account4", auths[4].From.Bytes()).Msg("Account being used to deploy fibonacci")
// Do not wrap with MakeContract because auth is wrapped in session below
auths[4].Nonce = big.NewInt(0)
_, tx, _, err := arbostestcontracts.DeployFibonacci(auths[4], l2Client)
if err != nil {
t.Fatal("DeployFibonacci failed", err)
}
auths[4].Nonce = auths[4].Nonce.Add(auths[4].Nonce, big.NewInt(1))
logger.Info().Hex("tx", tx.Hash().Bytes()).Msg("Fibonacci deployed")
receipt, err := waitForReceipt(
l2Client,
tx,
time.Second*20,
)
if err != nil {
t.Fatal("DeployFibonacci receipt error", err)
}
if receipt.Status != 1 {
t.Fatal("tx deploying fib failed")
}
logger.Info().Hex("address", receipt.ContractAddress.Bytes()).Msg("Contract address found")
t.Log("Fib contract is at", receipt.ContractAddress.Hex())
fib, err := arbostestcontracts.NewFibonacci(receipt.ContractAddress, l2Client)
if err != nil {
t.Fatal("connect fib failed", err)
}
// Wrap the Token contract instance into a session
session := &arbostestcontracts.FibonacciSession{
Contract: fib,
CallOpts: bind.CallOpts{
From: auths[4].From,
Pending: true,
},
TransactOpts: *auths[4],
}
fibsize := 15
fibnum := 11
tx, err = session.GenerateFib(big.NewInt(int64(fibsize)))
if err != nil {
t.Fatal("GenerateFib error", err)
}
receipt, err = waitForReceipt(
l2Client,
tx,
time.Second*20,
)
if err != nil {
t.Fatal("GenerateFib receipt error", err)
}
if receipt.Status != 1 {
t.Fatal("tx generating numbers failed")
}
fibval, err := session.GetFib(big.NewInt(int64(fibnum)))
if err != nil {
t.Fatal("GetFib error", err)
}
if fibval.Cmp(big.NewInt(144)) != 0 { // 11th fibanocci number
t.Fatalf(
"GetFib error - expected %v got %v",
big.NewInt(int64(144)),
fibval,
)
}
start := uint64(0)
Loop:
for {
select {
case <-time.After(time.Second * 20):
return
default:
}
filter := &bind.FilterOpts{
Start: start,
End: nil,
Context: ctx,
}
it, err := session.Contract.FilterTestEvent(filter)
if err != nil {
t.Fatalf("FilterTestEvent error %v", err)
return
}
for it.Next() {
if it.Event.Number.Cmp(big.NewInt(int64(fibsize))) != 0 {
t.Error("test event had wrong number")
}
break Loop
}
}
} |
_ = managers | random_line_split |
partition.rs | use arrow_deps::{
arrow::record_batch::RecordBatch, datafusion::logical_plan::Expr,
datafusion::logical_plan::Operator, datafusion::optimizer::utils::expr_to_column_names,
datafusion::scalar::ScalarValue,
};
use generated_types::wal as wb;
use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet};
use wal::{Entry as WalEntry, Result as WalResult};
use data_types::TIME_COLUMN_NAME;
use storage::{
predicate::{Predicate, TimestampRange},
util::{visit_expression, AndExprBuilder, ExpressionVisitor},
};
use crate::dictionary::Dictionary;
use crate::table::Table;
use snafu::{OptionExt, ResultExt, Snafu};
#[derive(Debug, Snafu)]
pub enum Error {
#[snafu(display("Could not read WAL entry: {}", source))]
WalEntryRead { source: wal::Error },
#[snafu(display("Partition {} not found", partition))]
PartitionNotFound { partition: String },
#[snafu(display(
"Column name {} not found in dictionary of partition {}",
column,
partition
))]
ColumnNameNotFoundInDictionary {
column: String,
partition: String,
source: crate::dictionary::Error,
},
#[snafu(display("Error writing table '{}': {}", table_name, source))]
TableWrite {
table_name: String,
source: crate::table::Error,
},
#[snafu(display("Table Error in '{}': {}", table_name, source))]
NamedTableError {
table_name: String,
source: crate::table::Error,
},
#[snafu(display(
"Table name {} not found in dictionary of partition {}",
table,
partition
))]
TableNameNotFoundInDictionary {
table: String,
partition: String,
source: crate::dictionary::Error,
},
#[snafu(display("Table {} not found in partition {}", table, partition))]
TableNotFoundInPartition { table: u32, partition: String },
#[snafu(display("Attempt to write table batch without a name"))]
TableWriteWithoutName,
#[snafu(display("Error restoring WAL entry, missing partition key"))]
MissingPartitionKey,
}
pub type Result<T, E = Error> = std::result::Result<T, E>;
#[derive(Debug)]
pub struct Partition {
pub key: String,
/// `dictionary` maps &str -> u32. The u32s are used in place of String or str to avoid slow
/// string operations. The same dictionary is used for table names, tag names, tag values, and
/// column names.
// TODO: intern string field values too?
pub dictionary: Dictionary,
/// map of the dictionary ID for the table name to the table
pub tables: HashMap<u32, Table>,
pub is_open: bool,
}
/// Describes the result of translating a set of strings into
/// partition specific ids
#[derive(Debug, PartialEq, Eq)]
pub enum PartitionIdSet {
/// At least one of the strings was not present in the partitions'
/// dictionary.
///
/// This is important when testing for the presence of all ids in
/// a set, as we know they can not all be present
AtLeastOneMissing,
/// All strings existed in this partition's dictionary
Present(BTreeSet<u32>),
}
/// a 'Compiled' set of predicates / filters that can be evaluated on
/// this partition (where strings have been translated to partition
/// specific u32 ids)
#[derive(Debug)]
pub struct PartitionPredicate {
/// If present, restrict the request to just those tables whose
/// names are in table_names. If present but empty, means there
/// was a predicate but no tables named that way exist in the
/// partition (so no table can pass)
pub table_name_predicate: Option<BTreeSet<u32>>,
// Optional field column selection. If present, further restrict
// any field columns returnedto only those named
pub field_restriction: Option<BTreeSet<u32>>,
/// General DataFusion expressions (arbitrary predicates) applied
/// as a filter using logical conjuction (aka are 'AND'ed
/// together). Only rows that evaluate to TRUE for all these
/// expressions should be returned.
pub partition_exprs: Vec<Expr>,
/// If Some, then the table must contain all columns specified
/// to pass the predicate
pub required_columns: Option<PartitionIdSet>,
/// The id of the "time" column in this partition
pub time_column_id: u32,
/// Timestamp range: only rows within this range should be considered
pub range: Option<TimestampRange>,
}
impl PartitionPredicate {
/// Creates and adds a datafuson predicate representing the
/// combination of predicate and timestamp.
pub fn filter_expr(&self) -> Option<Expr> {
// build up a list of expressions
let mut builder =
AndExprBuilder::default().append_opt(self.make_timestamp_predicate_expr());
for expr in &self.partition_exprs {
builder = builder.append_expr(expr.clone());
}
builder.build()
}
/// Return true if there is a non empty field restriction
pub fn has_field_restriction(&self) -> bool {
match &self.field_restriction {
None => false,
Some(field_restiction) => !field_restiction.is_empty(),
}
}
/// For plans which select a subset of fields, returns true if
/// the field should be included in the results
pub fn should_include_field(&self, field_id: u32) -> bool {
match &self.field_restriction {
None => true,
Some(field_restriction) => field_restriction.contains(&field_id),
}
}
/// Return true if this column is the time column
pub fn is_time_column(&self, id: u32) -> bool {
self.time_column_id == id
}
/// Creates a DataFusion predicate for appliying a timestamp range:
///
/// range.start <= time and time < range.end`
fn make_timestamp_predicate_expr(&self) -> Option<Expr> {
self.range.map(|range| make_range_expr(&range))
}
}
/// Creates expression like:
/// range.low <= time && time < range.high
fn make_range_expr(range: &TimestampRange) -> Expr {
let ts_low = Expr::BinaryExpr {
left: Box::new(Expr::Literal(ScalarValue::Int64(Some(range.start)))),
op: Operator::LtEq,
right: Box::new(Expr::Column(TIME_COLUMN_NAME.into())),
};
let ts_high = Expr::BinaryExpr {
left: Box::new(Expr::Column(TIME_COLUMN_NAME.into())),
op: Operator::Lt,
right: Box::new(Expr::Literal(ScalarValue::Int64(Some(range.end)))),
};
AndExprBuilder::default()
.append_expr(ts_low)
.append_expr(ts_high)
.build()
.unwrap()
}
impl Partition {
pub fn new(key: impl Into<String>) -> Self {
Self {
key: key.into(),
dictionary: Dictionary::new(),
tables: HashMap::new(),
is_open: true,
}
}
pub fn write_entry(&mut self, entry: &wb::WriteBufferEntry<'_>) -> Result<()> {
if let Some(table_batches) = entry.table_batches() {
for batch in table_batches {
self.write_table_batch(&batch)?;
}
}
Ok(())
}
fn write_table_batch(&mut self, batch: &wb::TableWriteBatch<'_>) -> Result<()> {
let table_name = batch.name().context(TableWriteWithoutName)?;
let table_id = self.dictionary.lookup_value_or_insert(table_name);
let table = self
.tables
.entry(table_id)
.or_insert_with(|| Table::new(table_id));
if let Some(rows) = batch.rows() {
table
.append_rows(&mut self.dictionary, &rows)
.context(TableWrite { table_name })?;
}
Ok(())
}
/// Translates `predicate` into per-partition ids that can be
/// directly evaluated against tables in this partition
pub fn compile_predicate(&self, predicate: &Predicate) -> Result<PartitionPredicate> {
let table_name_predicate = self.compile_string_list(predicate.table_names.as_ref());
let field_restriction = self.compile_string_list(predicate.field_columns.as_ref());
let time_column_id = self
.dictionary
.lookup_value(TIME_COLUMN_NAME)
.expect("time is in the partition dictionary");
let range = predicate.range;
// it would be nice to avoid cloning all the exprs here.
let partition_exprs = predicate.exprs.clone();
// In order to evaluate expressions in the table, all columns
// referenced in the expression must appear (I think, not sure
// about NOT, etc so panic if we see one of those);
let mut visitor = SupportVisitor {};
let mut predicate_columns: HashSet<String> = HashSet::new();
for expr in &partition_exprs {
visit_expression(expr, &mut visitor);
expr_to_column_names(&expr, &mut predicate_columns).unwrap();
}
// if there are any column references in the expression, ensure they appear in any table
let required_columns = if predicate_columns.is_empty() {
None
} else {
Some(self.make_partition_ids(predicate_columns.iter()))
};
Ok(PartitionPredicate {
table_name_predicate,
field_restriction,
partition_exprs,
required_columns,
time_column_id,
range,
})
}
/// Converts a potential set of strings into a set of ids in terms
/// of this dictionary. If there are no matching Strings in the
/// partitions dictionary, those strings are ignored and a
/// (potentially empty) set is returned.
fn compile_string_list(&self, names: Option<&BTreeSet<String>>) -> Option<BTreeSet<u32>> {
names.map(|names| {
names
.iter()
.filter_map(|name| self.dictionary.id(name))
.collect::<BTreeSet<_>>()
})
}
/// Adds the ids of any columns in additional_required_columns to the required columns of predicate
pub fn add_required_columns_to_predicate(
&self,
additional_required_columns: &HashSet<String>,
predicate: &mut PartitionPredicate,
) {
for column_name in additional_required_columns {
// Once know we have missing columns, no need to try
// and figure out if these any additional columns are needed
if Some(PartitionIdSet::AtLeastOneMissing) == predicate.required_columns {
return;
}
let column_id = self.dictionary.id(column_name);
// Update the required colunm list
predicate.required_columns = Some(match predicate.required_columns.take() {
None => {
if let Some(column_id) = column_id {
let mut symbols = BTreeSet::new();
symbols.insert(column_id);
PartitionIdSet::Present(symbols)
} else {
PartitionIdSet::AtLeastOneMissing
}
}
Some(PartitionIdSet::Present(mut symbols)) => {
if let Some(column_id) = column_id {
symbols.insert(column_id);
PartitionIdSet::Present(symbols)
} else {
PartitionIdSet::AtLeastOneMissing
}
}
Some(PartitionIdSet::AtLeastOneMissing) => {
unreachable!("Covered by case above while adding required columns to predicate")
}
});
}
}
/// returns true if data with partition key `key` should be
/// written to this partition,
pub fn should_write(&self, key: &str) -> bool {
self.key.starts_with(key) && self.is_open
}
/// Convert the table specified in this partition into an arrow record batch
pub fn table_to_arrow(&self, table_name: &str, columns: &[&str]) -> Result<RecordBatch> {
let table_id =
self.dictionary
.lookup_value(table_name)
.context(TableNameNotFoundInDictionary {
table: table_name,
partition: &self.key,
})?;
let table = self
.tables
.get(&table_id)
.context(TableNotFoundInPartition {
table: table_id,
partition: &self.key,
})?;
table
.to_arrow(&self, columns)
.context(NamedTableError { table_name })
}
/// Translate a bunch of strings into a set of ids relative to this partition
pub fn make_partition_ids<'a, I>(&self, predicate_columns: I) -> PartitionIdSet
where
I: Iterator<Item = &'a String>,
{
let mut symbols = BTreeSet::new();
for column_name in predicate_columns {
if let Some(column_id) = self.dictionary.id(column_name) {
symbols.insert(column_id);
} else {
return PartitionIdSet::AtLeastOneMissing;
}
}
PartitionIdSet::Present(symbols)
}
}
/// Used to figure out if we know how to deal with this kind of
/// predicate in the write buffer
struct SupportVisitor {}
impl ExpressionVisitor for SupportVisitor {
fn pre_visit(&mut self, expr: &Expr) {
match expr {
Expr::Literal(..) => {}
Expr::Column(..) => |
Expr::BinaryExpr { op, .. } => {
match op {
Operator::Eq
| Operator::Lt
| Operator::LtEq
| Operator::Gt
| Operator::GtEq
| Operator::Plus
| Operator::Minus
| Operator::Multiply
| Operator::Divide
| Operator::And
| Operator::Or => {}
// Unsupported (need to think about ramifications)
Operator::NotEq | Operator::Modulus | Operator::Like | Operator::NotLike => {
panic!("Unsupported binary operator in expression: {:?}", expr)
}
}
}
_ => panic!(
"Unsupported expression in write_buffer database: {:?}",
expr
),
}
}
}
#[derive(Default, Debug)]
pub struct RestorationStats {
pub row_count: usize,
pub tables: BTreeSet<String>,
}
/// Given a set of WAL entries, restore them into a set of Partitions.
pub fn restore_partitions_from_wal(
wal_entries: impl Iterator<Item = WalResult<WalEntry>>,
) -> Result<(Vec<Partition>, RestorationStats)> {
let mut stats = RestorationStats::default();
let mut partitions = BTreeMap::new();
for wal_entry in wal_entries {
let wal_entry = wal_entry.context(WalEntryRead)?;
let bytes = wal_entry.as_data();
let batch = flatbuffers::get_root::<wb::WriteBufferBatch<'_>>(&bytes);
if let Some(entries) = batch.entries() {
for entry in entries {
let partition_key = entry.partition_key().context(MissingPartitionKey)?;
if !partitions.contains_key(partition_key) {
partitions.insert(
partition_key.to_string(),
Partition::new(partition_key.to_string()),
);
}
let partition = partitions
.get_mut(partition_key)
.context(PartitionNotFound {
partition: partition_key,
})?;
partition.write_entry(&entry)?;
}
}
}
let partitions = partitions
.into_iter()
.map(|(_, p)| p)
.collect::<Vec<Partition>>();
// compute the stats
for p in &partitions {
for (id, table) in &p.tables {
let name = p
.dictionary
.lookup_id(*id)
.expect("table id wasn't inserted into dictionary on restore");
if !stats.tables.contains(name) {
stats.tables.insert(name.to_string());
}
stats.row_count += table.row_count();
}
}
Ok((partitions, stats))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_make_range_expr() {
// Test that the generated predicate is correct
let range = TimestampRange::new(101, 202);
let ts_predicate_expr = make_range_expr(&range);
let expected_string = "Int64(101) LtEq #time And #time Lt Int64(202)";
let actual_string = format!("{:?}", ts_predicate_expr);
assert_eq!(actual_string, expected_string);
}
}
| {} | conditional_block |
partition.rs | use arrow_deps::{
arrow::record_batch::RecordBatch, datafusion::logical_plan::Expr,
datafusion::logical_plan::Operator, datafusion::optimizer::utils::expr_to_column_names,
datafusion::scalar::ScalarValue,
};
use generated_types::wal as wb;
use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet};
use wal::{Entry as WalEntry, Result as WalResult};
use data_types::TIME_COLUMN_NAME;
use storage::{
predicate::{Predicate, TimestampRange},
util::{visit_expression, AndExprBuilder, ExpressionVisitor},
};
use crate::dictionary::Dictionary;
use crate::table::Table;
use snafu::{OptionExt, ResultExt, Snafu};
#[derive(Debug, Snafu)]
pub enum Error {
#[snafu(display("Could not read WAL entry: {}", source))]
WalEntryRead { source: wal::Error },
#[snafu(display("Partition {} not found", partition))]
PartitionNotFound { partition: String },
#[snafu(display(
"Column name {} not found in dictionary of partition {}",
column,
partition
))]
ColumnNameNotFoundInDictionary {
column: String,
partition: String,
source: crate::dictionary::Error,
},
#[snafu(display("Error writing table '{}': {}", table_name, source))]
TableWrite {
table_name: String,
source: crate::table::Error,
},
#[snafu(display("Table Error in '{}': {}", table_name, source))]
NamedTableError {
table_name: String,
source: crate::table::Error,
},
#[snafu(display(
"Table name {} not found in dictionary of partition {}",
table,
partition
))]
TableNameNotFoundInDictionary {
table: String,
partition: String,
source: crate::dictionary::Error,
},
#[snafu(display("Table {} not found in partition {}", table, partition))]
TableNotFoundInPartition { table: u32, partition: String },
#[snafu(display("Attempt to write table batch without a name"))]
TableWriteWithoutName,
#[snafu(display("Error restoring WAL entry, missing partition key"))]
MissingPartitionKey,
}
pub type Result<T, E = Error> = std::result::Result<T, E>;
#[derive(Debug)]
pub struct Partition {
pub key: String,
/// `dictionary` maps &str -> u32. The u32s are used in place of String or str to avoid slow
/// string operations. The same dictionary is used for table names, tag names, tag values, and
/// column names.
// TODO: intern string field values too?
pub dictionary: Dictionary,
/// map of the dictionary ID for the table name to the table
pub tables: HashMap<u32, Table>,
pub is_open: bool,
}
/// Describes the result of translating a set of strings into
/// partition specific ids
#[derive(Debug, PartialEq, Eq)]
pub enum PartitionIdSet {
/// At least one of the strings was not present in the partitions'
/// dictionary.
///
/// This is important when testing for the presence of all ids in
/// a set, as we know they can not all be present
AtLeastOneMissing,
/// All strings existed in this partition's dictionary
Present(BTreeSet<u32>),
}
/// a 'Compiled' set of predicates / filters that can be evaluated on
/// this partition (where strings have been translated to partition
/// specific u32 ids)
#[derive(Debug)]
pub struct PartitionPredicate {
/// If present, restrict the request to just those tables whose
/// names are in table_names. If present but empty, means there
/// was a predicate but no tables named that way exist in the
/// partition (so no table can pass)
pub table_name_predicate: Option<BTreeSet<u32>>,
// Optional field column selection. If present, further restrict
// any field columns returnedto only those named
pub field_restriction: Option<BTreeSet<u32>>,
/// General DataFusion expressions (arbitrary predicates) applied
/// as a filter using logical conjuction (aka are 'AND'ed
/// together). Only rows that evaluate to TRUE for all these
/// expressions should be returned.
pub partition_exprs: Vec<Expr>,
/// If Some, then the table must contain all columns specified
/// to pass the predicate
pub required_columns: Option<PartitionIdSet>,
/// The id of the "time" column in this partition
pub time_column_id: u32,
/// Timestamp range: only rows within this range should be considered
pub range: Option<TimestampRange>,
}
impl PartitionPredicate {
/// Creates and adds a datafuson predicate representing the
/// combination of predicate and timestamp.
pub fn filter_expr(&self) -> Option<Expr> {
// build up a list of expressions
let mut builder =
AndExprBuilder::default().append_opt(self.make_timestamp_predicate_expr());
for expr in &self.partition_exprs {
builder = builder.append_expr(expr.clone());
}
builder.build()
}
/// Return true if there is a non empty field restriction
pub fn has_field_restriction(&self) -> bool {
match &self.field_restriction {
None => false,
Some(field_restiction) => !field_restiction.is_empty(),
}
}
/// For plans which select a subset of fields, returns true if
/// the field should be included in the results
pub fn should_include_field(&self, field_id: u32) -> bool |
/// Return true if this column is the time column
pub fn is_time_column(&self, id: u32) -> bool {
self.time_column_id == id
}
/// Creates a DataFusion predicate for appliying a timestamp range:
///
/// range.start <= time and time < range.end`
fn make_timestamp_predicate_expr(&self) -> Option<Expr> {
self.range.map(|range| make_range_expr(&range))
}
}
/// Creates expression like:
/// range.low <= time && time < range.high
fn make_range_expr(range: &TimestampRange) -> Expr {
let ts_low = Expr::BinaryExpr {
left: Box::new(Expr::Literal(ScalarValue::Int64(Some(range.start)))),
op: Operator::LtEq,
right: Box::new(Expr::Column(TIME_COLUMN_NAME.into())),
};
let ts_high = Expr::BinaryExpr {
left: Box::new(Expr::Column(TIME_COLUMN_NAME.into())),
op: Operator::Lt,
right: Box::new(Expr::Literal(ScalarValue::Int64(Some(range.end)))),
};
AndExprBuilder::default()
.append_expr(ts_low)
.append_expr(ts_high)
.build()
.unwrap()
}
impl Partition {
pub fn new(key: impl Into<String>) -> Self {
Self {
key: key.into(),
dictionary: Dictionary::new(),
tables: HashMap::new(),
is_open: true,
}
}
pub fn write_entry(&mut self, entry: &wb::WriteBufferEntry<'_>) -> Result<()> {
if let Some(table_batches) = entry.table_batches() {
for batch in table_batches {
self.write_table_batch(&batch)?;
}
}
Ok(())
}
fn write_table_batch(&mut self, batch: &wb::TableWriteBatch<'_>) -> Result<()> {
let table_name = batch.name().context(TableWriteWithoutName)?;
let table_id = self.dictionary.lookup_value_or_insert(table_name);
let table = self
.tables
.entry(table_id)
.or_insert_with(|| Table::new(table_id));
if let Some(rows) = batch.rows() {
table
.append_rows(&mut self.dictionary, &rows)
.context(TableWrite { table_name })?;
}
Ok(())
}
/// Translates `predicate` into per-partition ids that can be
/// directly evaluated against tables in this partition
pub fn compile_predicate(&self, predicate: &Predicate) -> Result<PartitionPredicate> {
let table_name_predicate = self.compile_string_list(predicate.table_names.as_ref());
let field_restriction = self.compile_string_list(predicate.field_columns.as_ref());
let time_column_id = self
.dictionary
.lookup_value(TIME_COLUMN_NAME)
.expect("time is in the partition dictionary");
let range = predicate.range;
// it would be nice to avoid cloning all the exprs here.
let partition_exprs = predicate.exprs.clone();
// In order to evaluate expressions in the table, all columns
// referenced in the expression must appear (I think, not sure
// about NOT, etc so panic if we see one of those);
let mut visitor = SupportVisitor {};
let mut predicate_columns: HashSet<String> = HashSet::new();
for expr in &partition_exprs {
visit_expression(expr, &mut visitor);
expr_to_column_names(&expr, &mut predicate_columns).unwrap();
}
// if there are any column references in the expression, ensure they appear in any table
let required_columns = if predicate_columns.is_empty() {
None
} else {
Some(self.make_partition_ids(predicate_columns.iter()))
};
Ok(PartitionPredicate {
table_name_predicate,
field_restriction,
partition_exprs,
required_columns,
time_column_id,
range,
})
}
/// Converts a potential set of strings into a set of ids in terms
/// of this dictionary. If there are no matching Strings in the
/// partitions dictionary, those strings are ignored and a
/// (potentially empty) set is returned.
fn compile_string_list(&self, names: Option<&BTreeSet<String>>) -> Option<BTreeSet<u32>> {
names.map(|names| {
names
.iter()
.filter_map(|name| self.dictionary.id(name))
.collect::<BTreeSet<_>>()
})
}
/// Adds the ids of any columns in additional_required_columns to the required columns of predicate
pub fn add_required_columns_to_predicate(
&self,
additional_required_columns: &HashSet<String>,
predicate: &mut PartitionPredicate,
) {
for column_name in additional_required_columns {
// Once know we have missing columns, no need to try
// and figure out if these any additional columns are needed
if Some(PartitionIdSet::AtLeastOneMissing) == predicate.required_columns {
return;
}
let column_id = self.dictionary.id(column_name);
// Update the required colunm list
predicate.required_columns = Some(match predicate.required_columns.take() {
None => {
if let Some(column_id) = column_id {
let mut symbols = BTreeSet::new();
symbols.insert(column_id);
PartitionIdSet::Present(symbols)
} else {
PartitionIdSet::AtLeastOneMissing
}
}
Some(PartitionIdSet::Present(mut symbols)) => {
if let Some(column_id) = column_id {
symbols.insert(column_id);
PartitionIdSet::Present(symbols)
} else {
PartitionIdSet::AtLeastOneMissing
}
}
Some(PartitionIdSet::AtLeastOneMissing) => {
unreachable!("Covered by case above while adding required columns to predicate")
}
});
}
}
/// returns true if data with partition key `key` should be
/// written to this partition,
pub fn should_write(&self, key: &str) -> bool {
self.key.starts_with(key) && self.is_open
}
/// Convert the table specified in this partition into an arrow record batch
pub fn table_to_arrow(&self, table_name: &str, columns: &[&str]) -> Result<RecordBatch> {
let table_id =
self.dictionary
.lookup_value(table_name)
.context(TableNameNotFoundInDictionary {
table: table_name,
partition: &self.key,
})?;
let table = self
.tables
.get(&table_id)
.context(TableNotFoundInPartition {
table: table_id,
partition: &self.key,
})?;
table
.to_arrow(&self, columns)
.context(NamedTableError { table_name })
}
/// Translate a bunch of strings into a set of ids relative to this partition
pub fn make_partition_ids<'a, I>(&self, predicate_columns: I) -> PartitionIdSet
where
I: Iterator<Item = &'a String>,
{
let mut symbols = BTreeSet::new();
for column_name in predicate_columns {
if let Some(column_id) = self.dictionary.id(column_name) {
symbols.insert(column_id);
} else {
return PartitionIdSet::AtLeastOneMissing;
}
}
PartitionIdSet::Present(symbols)
}
}
/// Used to figure out if we know how to deal with this kind of
/// predicate in the write buffer
struct SupportVisitor {}
impl ExpressionVisitor for SupportVisitor {
fn pre_visit(&mut self, expr: &Expr) {
match expr {
Expr::Literal(..) => {}
Expr::Column(..) => {}
Expr::BinaryExpr { op, .. } => {
match op {
Operator::Eq
| Operator::Lt
| Operator::LtEq
| Operator::Gt
| Operator::GtEq
| Operator::Plus
| Operator::Minus
| Operator::Multiply
| Operator::Divide
| Operator::And
| Operator::Or => {}
// Unsupported (need to think about ramifications)
Operator::NotEq | Operator::Modulus | Operator::Like | Operator::NotLike => {
panic!("Unsupported binary operator in expression: {:?}", expr)
}
}
}
_ => panic!(
"Unsupported expression in write_buffer database: {:?}",
expr
),
}
}
}
#[derive(Default, Debug)]
pub struct RestorationStats {
pub row_count: usize,
pub tables: BTreeSet<String>,
}
/// Given a set of WAL entries, restore them into a set of Partitions.
pub fn restore_partitions_from_wal(
wal_entries: impl Iterator<Item = WalResult<WalEntry>>,
) -> Result<(Vec<Partition>, RestorationStats)> {
let mut stats = RestorationStats::default();
let mut partitions = BTreeMap::new();
for wal_entry in wal_entries {
let wal_entry = wal_entry.context(WalEntryRead)?;
let bytes = wal_entry.as_data();
let batch = flatbuffers::get_root::<wb::WriteBufferBatch<'_>>(&bytes);
if let Some(entries) = batch.entries() {
for entry in entries {
let partition_key = entry.partition_key().context(MissingPartitionKey)?;
if !partitions.contains_key(partition_key) {
partitions.insert(
partition_key.to_string(),
Partition::new(partition_key.to_string()),
);
}
let partition = partitions
.get_mut(partition_key)
.context(PartitionNotFound {
partition: partition_key,
})?;
partition.write_entry(&entry)?;
}
}
}
let partitions = partitions
.into_iter()
.map(|(_, p)| p)
.collect::<Vec<Partition>>();
// compute the stats
for p in &partitions {
for (id, table) in &p.tables {
let name = p
.dictionary
.lookup_id(*id)
.expect("table id wasn't inserted into dictionary on restore");
if !stats.tables.contains(name) {
stats.tables.insert(name.to_string());
}
stats.row_count += table.row_count();
}
}
Ok((partitions, stats))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_make_range_expr() {
// Test that the generated predicate is correct
let range = TimestampRange::new(101, 202);
let ts_predicate_expr = make_range_expr(&range);
let expected_string = "Int64(101) LtEq #time And #time Lt Int64(202)";
let actual_string = format!("{:?}", ts_predicate_expr);
assert_eq!(actual_string, expected_string);
}
}
| {
match &self.field_restriction {
None => true,
Some(field_restriction) => field_restriction.contains(&field_id),
}
} | identifier_body |
partition.rs | use arrow_deps::{
arrow::record_batch::RecordBatch, datafusion::logical_plan::Expr,
datafusion::logical_plan::Operator, datafusion::optimizer::utils::expr_to_column_names,
datafusion::scalar::ScalarValue,
};
use generated_types::wal as wb;
use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet};
use wal::{Entry as WalEntry, Result as WalResult};
use data_types::TIME_COLUMN_NAME;
use storage::{
predicate::{Predicate, TimestampRange},
util::{visit_expression, AndExprBuilder, ExpressionVisitor},
};
use crate::dictionary::Dictionary;
use crate::table::Table;
use snafu::{OptionExt, ResultExt, Snafu};
#[derive(Debug, Snafu)]
pub enum Error {
#[snafu(display("Could not read WAL entry: {}", source))]
WalEntryRead { source: wal::Error },
#[snafu(display("Partition {} not found", partition))]
PartitionNotFound { partition: String },
#[snafu(display(
"Column name {} not found in dictionary of partition {}",
column,
partition
))]
ColumnNameNotFoundInDictionary {
column: String,
partition: String,
source: crate::dictionary::Error,
},
#[snafu(display("Error writing table '{}': {}", table_name, source))]
TableWrite {
table_name: String,
source: crate::table::Error,
},
#[snafu(display("Table Error in '{}': {}", table_name, source))]
NamedTableError {
table_name: String,
source: crate::table::Error,
},
#[snafu(display(
"Table name {} not found in dictionary of partition {}",
table,
partition
))]
TableNameNotFoundInDictionary {
table: String,
partition: String,
source: crate::dictionary::Error,
},
#[snafu(display("Table {} not found in partition {}", table, partition))]
TableNotFoundInPartition { table: u32, partition: String },
#[snafu(display("Attempt to write table batch without a name"))]
TableWriteWithoutName,
#[snafu(display("Error restoring WAL entry, missing partition key"))]
MissingPartitionKey,
}
pub type Result<T, E = Error> = std::result::Result<T, E>;
#[derive(Debug)]
pub struct Partition {
pub key: String,
/// `dictionary` maps &str -> u32. The u32s are used in place of String or str to avoid slow
/// string operations. The same dictionary is used for table names, tag names, tag values, and
/// column names.
// TODO: intern string field values too?
pub dictionary: Dictionary,
/// map of the dictionary ID for the table name to the table
pub tables: HashMap<u32, Table>,
pub is_open: bool,
}
/// Describes the result of translating a set of strings into
/// partition specific ids
#[derive(Debug, PartialEq, Eq)]
pub enum PartitionIdSet {
/// At least one of the strings was not present in the partitions'
/// dictionary.
///
/// This is important when testing for the presence of all ids in
/// a set, as we know they can not all be present
AtLeastOneMissing,
/// All strings existed in this partition's dictionary
Present(BTreeSet<u32>),
}
/// a 'Compiled' set of predicates / filters that can be evaluated on
/// this partition (where strings have been translated to partition
/// specific u32 ids)
#[derive(Debug)]
pub struct PartitionPredicate {
/// If present, restrict the request to just those tables whose
/// names are in table_names. If present but empty, means there
/// was a predicate but no tables named that way exist in the
/// partition (so no table can pass)
pub table_name_predicate: Option<BTreeSet<u32>>,
// Optional field column selection. If present, further restrict
// any field columns returnedto only those named
pub field_restriction: Option<BTreeSet<u32>>,
/// General DataFusion expressions (arbitrary predicates) applied
/// as a filter using logical conjuction (aka are 'AND'ed
/// together). Only rows that evaluate to TRUE for all these
/// expressions should be returned.
pub partition_exprs: Vec<Expr>,
/// If Some, then the table must contain all columns specified
/// to pass the predicate
pub required_columns: Option<PartitionIdSet>,
/// The id of the "time" column in this partition
pub time_column_id: u32,
/// Timestamp range: only rows within this range should be considered
pub range: Option<TimestampRange>,
}
impl PartitionPredicate {
/// Creates and adds a datafuson predicate representing the
/// combination of predicate and timestamp.
pub fn filter_expr(&self) -> Option<Expr> {
// build up a list of expressions
let mut builder =
AndExprBuilder::default().append_opt(self.make_timestamp_predicate_expr());
for expr in &self.partition_exprs {
builder = builder.append_expr(expr.clone());
}
builder.build()
}
/// Return true if there is a non empty field restriction
pub fn has_field_restriction(&self) -> bool {
match &self.field_restriction {
None => false,
Some(field_restiction) => !field_restiction.is_empty(),
}
}
/// For plans which select a subset of fields, returns true if
/// the field should be included in the results
pub fn should_include_field(&self, field_id: u32) -> bool {
match &self.field_restriction {
None => true,
Some(field_restriction) => field_restriction.contains(&field_id),
}
}
/// Return true if this column is the time column
pub fn is_time_column(&self, id: u32) -> bool {
self.time_column_id == id
}
/// Creates a DataFusion predicate for appliying a timestamp range:
///
/// range.start <= time and time < range.end`
fn make_timestamp_predicate_expr(&self) -> Option<Expr> {
self.range.map(|range| make_range_expr(&range))
}
}
/// Creates expression like:
/// range.low <= time && time < range.high
fn make_range_expr(range: &TimestampRange) -> Expr {
let ts_low = Expr::BinaryExpr {
left: Box::new(Expr::Literal(ScalarValue::Int64(Some(range.start)))),
op: Operator::LtEq,
right: Box::new(Expr::Column(TIME_COLUMN_NAME.into())),
};
let ts_high = Expr::BinaryExpr {
left: Box::new(Expr::Column(TIME_COLUMN_NAME.into())),
op: Operator::Lt,
right: Box::new(Expr::Literal(ScalarValue::Int64(Some(range.end)))),
};
AndExprBuilder::default()
.append_expr(ts_low)
.append_expr(ts_high)
.build()
.unwrap()
}
impl Partition {
pub fn new(key: impl Into<String>) -> Self {
Self {
key: key.into(),
dictionary: Dictionary::new(),
tables: HashMap::new(),
is_open: true,
}
}
pub fn write_entry(&mut self, entry: &wb::WriteBufferEntry<'_>) -> Result<()> {
if let Some(table_batches) = entry.table_batches() {
for batch in table_batches {
self.write_table_batch(&batch)?;
}
}
Ok(())
}
fn write_table_batch(&mut self, batch: &wb::TableWriteBatch<'_>) -> Result<()> {
let table_name = batch.name().context(TableWriteWithoutName)?;
let table_id = self.dictionary.lookup_value_or_insert(table_name);
let table = self
.tables
.entry(table_id)
.or_insert_with(|| Table::new(table_id));
if let Some(rows) = batch.rows() {
table
.append_rows(&mut self.dictionary, &rows)
.context(TableWrite { table_name })?;
}
Ok(())
}
/// Translates `predicate` into per-partition ids that can be
/// directly evaluated against tables in this partition
pub fn compile_predicate(&self, predicate: &Predicate) -> Result<PartitionPredicate> {
let table_name_predicate = self.compile_string_list(predicate.table_names.as_ref());
let field_restriction = self.compile_string_list(predicate.field_columns.as_ref());
let time_column_id = self
.dictionary
.lookup_value(TIME_COLUMN_NAME)
.expect("time is in the partition dictionary");
let range = predicate.range;
// it would be nice to avoid cloning all the exprs here.
let partition_exprs = predicate.exprs.clone();
// In order to evaluate expressions in the table, all columns
// referenced in the expression must appear (I think, not sure
// about NOT, etc so panic if we see one of those);
let mut visitor = SupportVisitor {};
let mut predicate_columns: HashSet<String> = HashSet::new();
for expr in &partition_exprs {
visit_expression(expr, &mut visitor);
expr_to_column_names(&expr, &mut predicate_columns).unwrap();
}
// if there are any column references in the expression, ensure they appear in any table
let required_columns = if predicate_columns.is_empty() {
None
} else {
Some(self.make_partition_ids(predicate_columns.iter()))
};
Ok(PartitionPredicate {
table_name_predicate,
field_restriction,
partition_exprs,
required_columns,
time_column_id, | /// of this dictionary. If there are no matching Strings in the
/// partitions dictionary, those strings are ignored and a
/// (potentially empty) set is returned.
fn compile_string_list(&self, names: Option<&BTreeSet<String>>) -> Option<BTreeSet<u32>> {
names.map(|names| {
names
.iter()
.filter_map(|name| self.dictionary.id(name))
.collect::<BTreeSet<_>>()
})
}
/// Adds the ids of any columns in additional_required_columns to the required columns of predicate
pub fn add_required_columns_to_predicate(
&self,
additional_required_columns: &HashSet<String>,
predicate: &mut PartitionPredicate,
) {
for column_name in additional_required_columns {
// Once know we have missing columns, no need to try
// and figure out if these any additional columns are needed
if Some(PartitionIdSet::AtLeastOneMissing) == predicate.required_columns {
return;
}
let column_id = self.dictionary.id(column_name);
// Update the required colunm list
predicate.required_columns = Some(match predicate.required_columns.take() {
None => {
if let Some(column_id) = column_id {
let mut symbols = BTreeSet::new();
symbols.insert(column_id);
PartitionIdSet::Present(symbols)
} else {
PartitionIdSet::AtLeastOneMissing
}
}
Some(PartitionIdSet::Present(mut symbols)) => {
if let Some(column_id) = column_id {
symbols.insert(column_id);
PartitionIdSet::Present(symbols)
} else {
PartitionIdSet::AtLeastOneMissing
}
}
Some(PartitionIdSet::AtLeastOneMissing) => {
unreachable!("Covered by case above while adding required columns to predicate")
}
});
}
}
/// returns true if data with partition key `key` should be
/// written to this partition,
pub fn should_write(&self, key: &str) -> bool {
self.key.starts_with(key) && self.is_open
}
/// Convert the table specified in this partition into an arrow record batch
pub fn table_to_arrow(&self, table_name: &str, columns: &[&str]) -> Result<RecordBatch> {
let table_id =
self.dictionary
.lookup_value(table_name)
.context(TableNameNotFoundInDictionary {
table: table_name,
partition: &self.key,
})?;
let table = self
.tables
.get(&table_id)
.context(TableNotFoundInPartition {
table: table_id,
partition: &self.key,
})?;
table
.to_arrow(&self, columns)
.context(NamedTableError { table_name })
}
/// Translate a bunch of strings into a set of ids relative to this partition
pub fn make_partition_ids<'a, I>(&self, predicate_columns: I) -> PartitionIdSet
where
I: Iterator<Item = &'a String>,
{
let mut symbols = BTreeSet::new();
for column_name in predicate_columns {
if let Some(column_id) = self.dictionary.id(column_name) {
symbols.insert(column_id);
} else {
return PartitionIdSet::AtLeastOneMissing;
}
}
PartitionIdSet::Present(symbols)
}
}
/// Used to figure out if we know how to deal with this kind of
/// predicate in the write buffer
struct SupportVisitor {}
impl ExpressionVisitor for SupportVisitor {
fn pre_visit(&mut self, expr: &Expr) {
match expr {
Expr::Literal(..) => {}
Expr::Column(..) => {}
Expr::BinaryExpr { op, .. } => {
match op {
Operator::Eq
| Operator::Lt
| Operator::LtEq
| Operator::Gt
| Operator::GtEq
| Operator::Plus
| Operator::Minus
| Operator::Multiply
| Operator::Divide
| Operator::And
| Operator::Or => {}
// Unsupported (need to think about ramifications)
Operator::NotEq | Operator::Modulus | Operator::Like | Operator::NotLike => {
panic!("Unsupported binary operator in expression: {:?}", expr)
}
}
}
_ => panic!(
"Unsupported expression in write_buffer database: {:?}",
expr
),
}
}
}
#[derive(Default, Debug)]
pub struct RestorationStats {
pub row_count: usize,
pub tables: BTreeSet<String>,
}
/// Given a set of WAL entries, restore them into a set of Partitions.
pub fn restore_partitions_from_wal(
wal_entries: impl Iterator<Item = WalResult<WalEntry>>,
) -> Result<(Vec<Partition>, RestorationStats)> {
let mut stats = RestorationStats::default();
let mut partitions = BTreeMap::new();
for wal_entry in wal_entries {
let wal_entry = wal_entry.context(WalEntryRead)?;
let bytes = wal_entry.as_data();
let batch = flatbuffers::get_root::<wb::WriteBufferBatch<'_>>(&bytes);
if let Some(entries) = batch.entries() {
for entry in entries {
let partition_key = entry.partition_key().context(MissingPartitionKey)?;
if !partitions.contains_key(partition_key) {
partitions.insert(
partition_key.to_string(),
Partition::new(partition_key.to_string()),
);
}
let partition = partitions
.get_mut(partition_key)
.context(PartitionNotFound {
partition: partition_key,
})?;
partition.write_entry(&entry)?;
}
}
}
let partitions = partitions
.into_iter()
.map(|(_, p)| p)
.collect::<Vec<Partition>>();
// compute the stats
for p in &partitions {
for (id, table) in &p.tables {
let name = p
.dictionary
.lookup_id(*id)
.expect("table id wasn't inserted into dictionary on restore");
if !stats.tables.contains(name) {
stats.tables.insert(name.to_string());
}
stats.row_count += table.row_count();
}
}
Ok((partitions, stats))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_make_range_expr() {
// Test that the generated predicate is correct
let range = TimestampRange::new(101, 202);
let ts_predicate_expr = make_range_expr(&range);
let expected_string = "Int64(101) LtEq #time And #time Lt Int64(202)";
let actual_string = format!("{:?}", ts_predicate_expr);
assert_eq!(actual_string, expected_string);
}
} | range,
})
}
/// Converts a potential set of strings into a set of ids in terms | random_line_split |
partition.rs | use arrow_deps::{
arrow::record_batch::RecordBatch, datafusion::logical_plan::Expr,
datafusion::logical_plan::Operator, datafusion::optimizer::utils::expr_to_column_names,
datafusion::scalar::ScalarValue,
};
use generated_types::wal as wb;
use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet};
use wal::{Entry as WalEntry, Result as WalResult};
use data_types::TIME_COLUMN_NAME;
use storage::{
predicate::{Predicate, TimestampRange},
util::{visit_expression, AndExprBuilder, ExpressionVisitor},
};
use crate::dictionary::Dictionary;
use crate::table::Table;
use snafu::{OptionExt, ResultExt, Snafu};
#[derive(Debug, Snafu)]
pub enum Error {
#[snafu(display("Could not read WAL entry: {}", source))]
WalEntryRead { source: wal::Error },
#[snafu(display("Partition {} not found", partition))]
PartitionNotFound { partition: String },
#[snafu(display(
"Column name {} not found in dictionary of partition {}",
column,
partition
))]
ColumnNameNotFoundInDictionary {
column: String,
partition: String,
source: crate::dictionary::Error,
},
#[snafu(display("Error writing table '{}': {}", table_name, source))]
TableWrite {
table_name: String,
source: crate::table::Error,
},
#[snafu(display("Table Error in '{}': {}", table_name, source))]
NamedTableError {
table_name: String,
source: crate::table::Error,
},
#[snafu(display(
"Table name {} not found in dictionary of partition {}",
table,
partition
))]
TableNameNotFoundInDictionary {
table: String,
partition: String,
source: crate::dictionary::Error,
},
#[snafu(display("Table {} not found in partition {}", table, partition))]
TableNotFoundInPartition { table: u32, partition: String },
#[snafu(display("Attempt to write table batch without a name"))]
TableWriteWithoutName,
#[snafu(display("Error restoring WAL entry, missing partition key"))]
MissingPartitionKey,
}
pub type Result<T, E = Error> = std::result::Result<T, E>;
#[derive(Debug)]
pub struct Partition {
pub key: String,
/// `dictionary` maps &str -> u32. The u32s are used in place of String or str to avoid slow
/// string operations. The same dictionary is used for table names, tag names, tag values, and
/// column names.
// TODO: intern string field values too?
pub dictionary: Dictionary,
/// map of the dictionary ID for the table name to the table
pub tables: HashMap<u32, Table>,
pub is_open: bool,
}
/// Describes the result of translating a set of strings into
/// partition specific ids
#[derive(Debug, PartialEq, Eq)]
pub enum | {
/// At least one of the strings was not present in the partitions'
/// dictionary.
///
/// This is important when testing for the presence of all ids in
/// a set, as we know they can not all be present
AtLeastOneMissing,
/// All strings existed in this partition's dictionary
Present(BTreeSet<u32>),
}
/// a 'Compiled' set of predicates / filters that can be evaluated on
/// this partition (where strings have been translated to partition
/// specific u32 ids)
#[derive(Debug)]
pub struct PartitionPredicate {
/// If present, restrict the request to just those tables whose
/// names are in table_names. If present but empty, means there
/// was a predicate but no tables named that way exist in the
/// partition (so no table can pass)
pub table_name_predicate: Option<BTreeSet<u32>>,
// Optional field column selection. If present, further restrict
// any field columns returnedto only those named
pub field_restriction: Option<BTreeSet<u32>>,
/// General DataFusion expressions (arbitrary predicates) applied
/// as a filter using logical conjuction (aka are 'AND'ed
/// together). Only rows that evaluate to TRUE for all these
/// expressions should be returned.
pub partition_exprs: Vec<Expr>,
/// If Some, then the table must contain all columns specified
/// to pass the predicate
pub required_columns: Option<PartitionIdSet>,
/// The id of the "time" column in this partition
pub time_column_id: u32,
/// Timestamp range: only rows within this range should be considered
pub range: Option<TimestampRange>,
}
impl PartitionPredicate {
/// Creates and adds a datafuson predicate representing the
/// combination of predicate and timestamp.
pub fn filter_expr(&self) -> Option<Expr> {
// build up a list of expressions
let mut builder =
AndExprBuilder::default().append_opt(self.make_timestamp_predicate_expr());
for expr in &self.partition_exprs {
builder = builder.append_expr(expr.clone());
}
builder.build()
}
/// Return true if there is a non empty field restriction
pub fn has_field_restriction(&self) -> bool {
match &self.field_restriction {
None => false,
Some(field_restiction) => !field_restiction.is_empty(),
}
}
/// For plans which select a subset of fields, returns true if
/// the field should be included in the results
pub fn should_include_field(&self, field_id: u32) -> bool {
match &self.field_restriction {
None => true,
Some(field_restriction) => field_restriction.contains(&field_id),
}
}
/// Return true if this column is the time column
pub fn is_time_column(&self, id: u32) -> bool {
self.time_column_id == id
}
/// Creates a DataFusion predicate for appliying a timestamp range:
///
/// range.start <= time and time < range.end`
fn make_timestamp_predicate_expr(&self) -> Option<Expr> {
self.range.map(|range| make_range_expr(&range))
}
}
/// Creates expression like:
/// range.low <= time && time < range.high
fn make_range_expr(range: &TimestampRange) -> Expr {
let ts_low = Expr::BinaryExpr {
left: Box::new(Expr::Literal(ScalarValue::Int64(Some(range.start)))),
op: Operator::LtEq,
right: Box::new(Expr::Column(TIME_COLUMN_NAME.into())),
};
let ts_high = Expr::BinaryExpr {
left: Box::new(Expr::Column(TIME_COLUMN_NAME.into())),
op: Operator::Lt,
right: Box::new(Expr::Literal(ScalarValue::Int64(Some(range.end)))),
};
AndExprBuilder::default()
.append_expr(ts_low)
.append_expr(ts_high)
.build()
.unwrap()
}
impl Partition {
pub fn new(key: impl Into<String>) -> Self {
Self {
key: key.into(),
dictionary: Dictionary::new(),
tables: HashMap::new(),
is_open: true,
}
}
pub fn write_entry(&mut self, entry: &wb::WriteBufferEntry<'_>) -> Result<()> {
if let Some(table_batches) = entry.table_batches() {
for batch in table_batches {
self.write_table_batch(&batch)?;
}
}
Ok(())
}
fn write_table_batch(&mut self, batch: &wb::TableWriteBatch<'_>) -> Result<()> {
let table_name = batch.name().context(TableWriteWithoutName)?;
let table_id = self.dictionary.lookup_value_or_insert(table_name);
let table = self
.tables
.entry(table_id)
.or_insert_with(|| Table::new(table_id));
if let Some(rows) = batch.rows() {
table
.append_rows(&mut self.dictionary, &rows)
.context(TableWrite { table_name })?;
}
Ok(())
}
/// Translates `predicate` into per-partition ids that can be
/// directly evaluated against tables in this partition
pub fn compile_predicate(&self, predicate: &Predicate) -> Result<PartitionPredicate> {
let table_name_predicate = self.compile_string_list(predicate.table_names.as_ref());
let field_restriction = self.compile_string_list(predicate.field_columns.as_ref());
let time_column_id = self
.dictionary
.lookup_value(TIME_COLUMN_NAME)
.expect("time is in the partition dictionary");
let range = predicate.range;
// it would be nice to avoid cloning all the exprs here.
let partition_exprs = predicate.exprs.clone();
// In order to evaluate expressions in the table, all columns
// referenced in the expression must appear (I think, not sure
// about NOT, etc so panic if we see one of those);
let mut visitor = SupportVisitor {};
let mut predicate_columns: HashSet<String> = HashSet::new();
for expr in &partition_exprs {
visit_expression(expr, &mut visitor);
expr_to_column_names(&expr, &mut predicate_columns).unwrap();
}
// if there are any column references in the expression, ensure they appear in any table
let required_columns = if predicate_columns.is_empty() {
None
} else {
Some(self.make_partition_ids(predicate_columns.iter()))
};
Ok(PartitionPredicate {
table_name_predicate,
field_restriction,
partition_exprs,
required_columns,
time_column_id,
range,
})
}
/// Converts a potential set of strings into a set of ids in terms
/// of this dictionary. If there are no matching Strings in the
/// partitions dictionary, those strings are ignored and a
/// (potentially empty) set is returned.
fn compile_string_list(&self, names: Option<&BTreeSet<String>>) -> Option<BTreeSet<u32>> {
names.map(|names| {
names
.iter()
.filter_map(|name| self.dictionary.id(name))
.collect::<BTreeSet<_>>()
})
}
/// Adds the ids of any columns in additional_required_columns to the required columns of predicate
pub fn add_required_columns_to_predicate(
&self,
additional_required_columns: &HashSet<String>,
predicate: &mut PartitionPredicate,
) {
for column_name in additional_required_columns {
// Once know we have missing columns, no need to try
// and figure out if these any additional columns are needed
if Some(PartitionIdSet::AtLeastOneMissing) == predicate.required_columns {
return;
}
let column_id = self.dictionary.id(column_name);
// Update the required colunm list
predicate.required_columns = Some(match predicate.required_columns.take() {
None => {
if let Some(column_id) = column_id {
let mut symbols = BTreeSet::new();
symbols.insert(column_id);
PartitionIdSet::Present(symbols)
} else {
PartitionIdSet::AtLeastOneMissing
}
}
Some(PartitionIdSet::Present(mut symbols)) => {
if let Some(column_id) = column_id {
symbols.insert(column_id);
PartitionIdSet::Present(symbols)
} else {
PartitionIdSet::AtLeastOneMissing
}
}
Some(PartitionIdSet::AtLeastOneMissing) => {
unreachable!("Covered by case above while adding required columns to predicate")
}
});
}
}
/// returns true if data with partition key `key` should be
/// written to this partition,
pub fn should_write(&self, key: &str) -> bool {
self.key.starts_with(key) && self.is_open
}
/// Convert the table specified in this partition into an arrow record batch
pub fn table_to_arrow(&self, table_name: &str, columns: &[&str]) -> Result<RecordBatch> {
let table_id =
self.dictionary
.lookup_value(table_name)
.context(TableNameNotFoundInDictionary {
table: table_name,
partition: &self.key,
})?;
let table = self
.tables
.get(&table_id)
.context(TableNotFoundInPartition {
table: table_id,
partition: &self.key,
})?;
table
.to_arrow(&self, columns)
.context(NamedTableError { table_name })
}
/// Translate a bunch of strings into a set of ids relative to this partition
pub fn make_partition_ids<'a, I>(&self, predicate_columns: I) -> PartitionIdSet
where
I: Iterator<Item = &'a String>,
{
let mut symbols = BTreeSet::new();
for column_name in predicate_columns {
if let Some(column_id) = self.dictionary.id(column_name) {
symbols.insert(column_id);
} else {
return PartitionIdSet::AtLeastOneMissing;
}
}
PartitionIdSet::Present(symbols)
}
}
/// Used to figure out if we know how to deal with this kind of
/// predicate in the write buffer
struct SupportVisitor {}
impl ExpressionVisitor for SupportVisitor {
fn pre_visit(&mut self, expr: &Expr) {
match expr {
Expr::Literal(..) => {}
Expr::Column(..) => {}
Expr::BinaryExpr { op, .. } => {
match op {
Operator::Eq
| Operator::Lt
| Operator::LtEq
| Operator::Gt
| Operator::GtEq
| Operator::Plus
| Operator::Minus
| Operator::Multiply
| Operator::Divide
| Operator::And
| Operator::Or => {}
// Unsupported (need to think about ramifications)
Operator::NotEq | Operator::Modulus | Operator::Like | Operator::NotLike => {
panic!("Unsupported binary operator in expression: {:?}", expr)
}
}
}
_ => panic!(
"Unsupported expression in write_buffer database: {:?}",
expr
),
}
}
}
#[derive(Default, Debug)]
pub struct RestorationStats {
pub row_count: usize,
pub tables: BTreeSet<String>,
}
/// Given a set of WAL entries, restore them into a set of Partitions.
pub fn restore_partitions_from_wal(
wal_entries: impl Iterator<Item = WalResult<WalEntry>>,
) -> Result<(Vec<Partition>, RestorationStats)> {
let mut stats = RestorationStats::default();
let mut partitions = BTreeMap::new();
for wal_entry in wal_entries {
let wal_entry = wal_entry.context(WalEntryRead)?;
let bytes = wal_entry.as_data();
let batch = flatbuffers::get_root::<wb::WriteBufferBatch<'_>>(&bytes);
if let Some(entries) = batch.entries() {
for entry in entries {
let partition_key = entry.partition_key().context(MissingPartitionKey)?;
if !partitions.contains_key(partition_key) {
partitions.insert(
partition_key.to_string(),
Partition::new(partition_key.to_string()),
);
}
let partition = partitions
.get_mut(partition_key)
.context(PartitionNotFound {
partition: partition_key,
})?;
partition.write_entry(&entry)?;
}
}
}
let partitions = partitions
.into_iter()
.map(|(_, p)| p)
.collect::<Vec<Partition>>();
// compute the stats
for p in &partitions {
for (id, table) in &p.tables {
let name = p
.dictionary
.lookup_id(*id)
.expect("table id wasn't inserted into dictionary on restore");
if !stats.tables.contains(name) {
stats.tables.insert(name.to_string());
}
stats.row_count += table.row_count();
}
}
Ok((partitions, stats))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_make_range_expr() {
// Test that the generated predicate is correct
let range = TimestampRange::new(101, 202);
let ts_predicate_expr = make_range_expr(&range);
let expected_string = "Int64(101) LtEq #time And #time Lt Int64(202)";
let actual_string = format!("{:?}", ts_predicate_expr);
assert_eq!(actual_string, expected_string);
}
}
| PartitionIdSet | identifier_name |
mod.rs | pub mod canned_histories;
mod history_builder;
mod history_info;
pub(crate) use history_builder::{TestHistoryBuilder, DEFAULT_WORKFLOW_TYPE};
use crate::{
pollers::{
BoxedActPoller, BoxedPoller, BoxedWFPoller, MockManualPoller, MockPoller,
MockServerGatewayApis,
},
task_token::TaskToken,
workflow::WorkflowCachingPolicy,
Core, CoreInitOptionsBuilder, CoreSDK, ServerGatewayApis, ServerGatewayOptions, Url,
WorkerConfig, WorkerConfigBuilder,
};
use bimap::BiMap;
use futures::FutureExt;
use mockall::TimesRange;
use parking_lot::RwLock;
use rand::{thread_rng, Rng};
use std::{
collections::{BTreeMap, HashMap, HashSet, VecDeque},
ops::RangeFull,
str::FromStr,
sync::Arc,
};
use temporal_sdk_core_protos::{
coresdk::{
workflow_activation::WfActivation,
workflow_commands::workflow_command,
workflow_completion::{self, wf_activation_completion, WfActivationCompletion},
},
temporal::api::{
common::v1::{WorkflowExecution, WorkflowType},
enums::v1::{TaskQueueKind, WorkflowTaskFailedCause},
failure::v1::Failure,
history::v1::History,
taskqueue::v1::TaskQueue,
workflowservice::v1::{
PollActivityTaskQueueResponse, PollWorkflowTaskQueueResponse,
RespondWorkflowTaskCompletedResponse,
},
},
};
pub type Result<T, E = anyhow::Error> = std::result::Result<T, E>;
pub const TEST_Q: &str = "q";
/// When constructing responses for mocks, indicates how a given response should be built
#[derive(derive_more::From, Debug, Clone, Copy, Eq, PartialEq, Hash)]
pub enum ResponseType {
ToTaskNum(usize),
AllHistory,
}
impl From<&usize> for ResponseType {
fn from(u: &usize) -> Self {
ResponseType::ToTaskNum(*u)
}
}
// :shrug:
impl From<&ResponseType> for ResponseType {
fn from(r: &ResponseType) -> Self {
*r
}
}
/// Given identifiers for a workflow/run, and a test history builder, construct an instance of
/// the core SDK with a mock server gateway that will produce the responses as appropriate.
///
/// `response_batches` is used to control the fake [PollWorkflowTaskQueueResponse]s returned. For
/// each number in the input list, a fake response will be prepared which includes history up to the
/// workflow task with that number, as in [TestHistoryBuilder::get_history_info].
pub(crate) fn build_fake_core(
wf_id: &str,
t: TestHistoryBuilder,
response_batches: impl IntoIterator<Item = impl Into<ResponseType>>,
) -> CoreSDK {
let response_batches = response_batches.into_iter().map(Into::into).collect();
let mock_gateway = build_multihist_mock_sg(
vec![FakeWfResponses {
wf_id: wf_id.to_owned(),
hist: t,
response_batches,
task_q: TEST_Q.to_owned(),
}],
true,
None,
);
mock_core(mock_gateway)
}
pub(crate) fn mock_core<SG>(mocks: MocksHolder<SG>) -> CoreSDK
where
SG: ServerGatewayApis + Send + Sync + 'static,
{
mock_core_with_opts(mocks, CoreInitOptionsBuilder::default())
}
pub(crate) fn mock_core_with_opts<SG>(
mocks: MocksHolder<SG>,
opts: CoreInitOptionsBuilder,
) -> CoreSDK
where
SG: ServerGatewayApis + Send + Sync + 'static,
{
let mut core = mock_core_with_opts_no_workers(mocks.sg, opts);
register_mock_workers(&mut core, mocks.mock_pollers.into_values());
core
}
pub(crate) fn register_mock_workers(
core: &mut CoreSDK,
mocks: impl IntoIterator<Item = MockWorker>,
) {
for worker in mocks {
core.reg_worker_sync(worker);
}
}
pub(crate) fn mock_core_with_opts_no_workers<SG>(
sg: SG,
mut opts: CoreInitOptionsBuilder,
) -> CoreSDK
where
SG: ServerGatewayApis + Send + Sync + 'static,
{
CoreSDK::new(sg, opts.gateway_opts(fake_sg_opts()).build().unwrap())
}
pub struct FakeWfResponses {
pub wf_id: String,
pub hist: TestHistoryBuilder,
pub response_batches: Vec<ResponseType>,
pub task_q: String,
}
// TODO: turn this into a builder or make a new one? to make all these different build fns simpler
pub struct MocksHolder<SG> {
sg: SG,
mock_pollers: HashMap<String, MockWorker>,
pub outstanding_task_map: Option<Arc<RwLock<BiMap<String, TaskToken>>>>,
}
impl<SG> MocksHolder<SG> {
pub fn worker_cfg(&mut self, task_q: &str, mutator: impl FnOnce(&mut WorkerConfig)) {
if let Some(w) = self.mock_pollers.get_mut(task_q) {
mutator(&mut w.config);
}
}
pub fn take_pollers(self) -> HashMap<String, MockWorker> {
self.mock_pollers
}
}
pub struct MockWorker {
pub wf_poller: BoxedWFPoller,
pub act_poller: Option<BoxedActPoller>,
pub config: WorkerConfig,
}
impl Default for MockWorker {
fn default() -> Self {
MockWorker {
wf_poller: Box::from(mock_poller()),
act_poller: None,
config: WorkerConfig::default_test_q(),
}
}
}
impl MockWorker {
pub fn new(q: &str, wf_poller: BoxedWFPoller) -> Self {
MockWorker {
wf_poller,
act_poller: None,
config: WorkerConfig::default(q),
}
}
pub fn for_queue(q: &str) -> Self {
MockWorker {
wf_poller: Box::from(mock_poller()),
act_poller: None,
config: WorkerConfig::default(q),
}
}
}
impl<SG> MocksHolder<SG>
where
SG: ServerGatewayApis + Send + Sync + 'static,
{
pub fn from_mock_workers(
sg: SG,
mock_workers: impl IntoIterator<Item = MockWorker>,
) -> MocksHolder<SG> {
let mock_pollers = mock_workers
.into_iter()
.map(|w| (w.config.task_queue.clone(), w))
.collect();
MocksHolder {
sg,
mock_pollers,
outstanding_task_map: None,
}
}
/// Uses the provided list of tasks to create a mock poller for the `TEST_Q`
pub fn from_gateway_with_responses(
sg: SG,
wf_tasks: VecDeque<PollWorkflowTaskQueueResponse>,
act_tasks: VecDeque<PollActivityTaskQueueResponse>,
) -> MocksHolder<SG> {
let mut mock_pollers = HashMap::new();
let mock_poller = mock_poller_from_resps(wf_tasks);
let mock_act_poller = mock_poller_from_resps(act_tasks);
mock_pollers.insert(
TEST_Q.to_string(),
MockWorker {
wf_poller: mock_poller,
act_poller: Some(mock_act_poller),
config: WorkerConfigBuilder::default()
.task_queue(TEST_Q)
.build()
.unwrap(),
},
);
MocksHolder {
sg,
mock_pollers,
outstanding_task_map: None,
}
}
}
pub fn mock_poller_from_resps<T>(mut tasks: VecDeque<T>) -> BoxedPoller<T>
where
T: Send + Sync + 'static,
{
let mut mock_poller = mock_poller();
mock_poller.expect_poll().returning(move || {
if let Some(t) = tasks.pop_front() {
Some(Ok(t))
} else {
Some(Err(tonic::Status::out_of_range(
"Ran out of mock responses!",
)))
}
});
Box::new(mock_poller) as BoxedPoller<T>
}
pub fn mock_poller<T>() -> MockPoller<T>
where
T: Send + Sync + 'static,
{
let mut mock_poller = MockPoller::new();
mock_poller.expect_shutdown_box().return_const(());
mock_poller.expect_notify_shutdown().return_const(());
mock_poller
}
pub fn mock_manual_poller<T>() -> MockManualPoller<T>
where
T: Send + Sync + 'static,
{
let mut mock_poller = MockManualPoller::new();
mock_poller
.expect_shutdown_box()
.returning(|| async {}.boxed());
mock_poller.expect_notify_shutdown().return_const(());
mock_poller
}
/// Build a mock server gateway capable of returning multiple different histories for different
/// workflows. It does so by tracking outstanding workflow tasks like is also happening in core
/// (which is unfortunately a bit redundant, we could provide hooks in core but that feels a little
/// nasty). If there is an outstanding task for a given workflow, new chunks of its history are not
/// returned. If there is not, the next batch of history is returned for any workflow without an
/// outstanding task. Outstanding tasks are cleared on completion, failure, or eviction.
///
/// `num_expected_fails` can be provided to set a specific number of expected failed workflow tasks
/// sent to the server.
pub fn build_multihist_mock_sg(
hists: impl IntoIterator<Item = FakeWfResponses>,
enforce_correct_number_of_polls: bool,
num_expected_fails: Option<usize>,
) -> MocksHolder<MockServerGatewayApis> {
let mh = MockPollCfg::new(
hists.into_iter().collect(),
enforce_correct_number_of_polls,
num_expected_fails,
);
build_mock_pollers(mh)
}
/// See [build_multihist_mock_sg] -- one history convenience version
pub fn single_hist_mock_sg(
wf_id: &str,
t: TestHistoryBuilder,
response_batches: impl IntoIterator<Item = impl Into<ResponseType>>,
mock_gateway: MockServerGatewayApis,
enforce_num_polls: bool,
) -> MocksHolder<MockServerGatewayApis> {
let mut mh = MockPollCfg::from_resp_batches(wf_id, t, response_batches, mock_gateway);
mh.enforce_correct_number_of_polls = enforce_num_polls;
build_mock_pollers(mh)
}
pub struct MockPollCfg {
pub hists: Vec<FakeWfResponses>,
pub enforce_correct_number_of_polls: bool,
pub num_expected_fails: Option<usize>,
pub mock_gateway: MockServerGatewayApis,
/// All calls to fail WFTs must match this predicate
pub expect_fail_wft_matcher:
Box<dyn Fn(&TaskToken, &WorkflowTaskFailedCause, &Option<Failure>) -> bool + Send>,
}
impl MockPollCfg {
pub fn new(
hists: Vec<FakeWfResponses>,
enforce_correct_number_of_polls: bool, | num_expected_fails,
mock_gateway: MockServerGatewayApis::new(),
expect_fail_wft_matcher: Box::new(|_, _, _| true),
}
}
pub fn from_resp_batches(
wf_id: &str,
t: TestHistoryBuilder,
resps: impl IntoIterator<Item = impl Into<ResponseType>>,
mock_gateway: MockServerGatewayApis,
) -> Self {
Self {
hists: vec![FakeWfResponses {
wf_id: wf_id.to_owned(),
hist: t,
response_batches: resps.into_iter().map(Into::into).collect(),
task_q: TEST_Q.to_owned(),
}],
enforce_correct_number_of_polls: true,
num_expected_fails: None,
mock_gateway,
expect_fail_wft_matcher: Box::new(|_, _, _| true),
}
}
}
/// Given an iterable of fake responses, return the mocks & associated data to work with them
pub fn build_mock_pollers(mut cfg: MockPollCfg) -> MocksHolder<MockServerGatewayApis> {
// Maps task queues to maps of wfid -> responses
let mut task_queues_to_resps: HashMap<String, BTreeMap<String, VecDeque<_>>> = HashMap::new();
let outstanding_wf_task_tokens = Arc::new(RwLock::new(BiMap::new()));
let mut correct_num_polls = None;
for hist in cfg.hists {
let full_hist_info = hist.hist.get_full_history_info().unwrap();
// Ensure no response batch is trying to return more tasks than the history contains
for respt in &hist.response_batches {
if let ResponseType::ToTaskNum(rb_wf_num) = respt {
assert!(
*rb_wf_num <= full_hist_info.wf_task_count(),
"Wf task count {} is not <= total task count {}",
rb_wf_num,
full_hist_info.wf_task_count()
);
}
}
// TODO: Fix -- or not? Sticky invalidation could make this pointless anyway
// Verify response batches only ever return longer histories (IE: Are sorted ascending)
// assert!(
// hist.response_batches
// .as_slice()
// .windows(2)
// .all(|w| w[0] <= w[1]),
// "response batches must have increasing wft numbers"
// );
if cfg.enforce_correct_number_of_polls {
*correct_num_polls.get_or_insert(0) += hist.response_batches.len();
}
// Convert history batches into poll responses, while also tracking how many times a given
// history has been returned so we can increment the associated attempt number on the WFT.
// NOTE: This is hard to use properly with the `AfterEveryReply` testing eviction mode.
// Such usages need a history different from other eviction modes which would include
// WFT timeouts or something to simulate the task getting dropped.
let mut attempts_at_task_num = HashMap::new();
let responses: Vec<_> = hist
.response_batches
.iter()
.map(|to_task_num| {
let cur_attempt = attempts_at_task_num.entry(to_task_num).or_insert(1);
let mut r = hist_to_poll_resp(
&hist.hist,
hist.wf_id.to_owned(),
*to_task_num,
hist.task_q.clone(),
);
r.attempt = *cur_attempt;
*cur_attempt += 1;
r
})
.collect();
let tasks = VecDeque::from(responses);
task_queues_to_resps
.entry(hist.task_q)
.or_default()
.insert(hist.wf_id, tasks);
}
let mut mock_pollers = HashMap::new();
for (task_q, mut queue_tasks) in task_queues_to_resps.into_iter() {
let mut mock_poller = mock_poller();
// The poller will return history from any workflow runs that do not have currently
// outstanding tasks.
let outstanding = outstanding_wf_task_tokens.clone();
mock_poller
.expect_poll()
.times(
correct_num_polls
.map::<TimesRange, _>(Into::into)
.unwrap_or_else(|| RangeFull.into()),
)
.returning(move || {
for (_, tasks) in queue_tasks.iter_mut() {
// Must extract run id from a workflow task associated with this workflow
// TODO: Case where run id changes for same workflow id is not handled here
if let Some(t) = tasks.get(0) {
let rid = t.workflow_execution.as_ref().unwrap().run_id.clone();
if !outstanding.read().contains_left(&rid) {
let t = tasks.pop_front().unwrap();
outstanding
.write()
.insert(rid, TaskToken(t.task_token.clone()));
return Some(Ok(t));
}
}
}
Some(Err(tonic::Status::cancelled("No more work to do")))
});
let mw = MockWorker::new(&task_q, Box::from(mock_poller));
mock_pollers.insert(task_q, mw);
}
let outstanding = outstanding_wf_task_tokens.clone();
cfg.mock_gateway
.expect_complete_workflow_task()
.returning(move |comp| {
outstanding.write().remove_by_right(&comp.task_token);
Ok(RespondWorkflowTaskCompletedResponse::default())
});
let outstanding = outstanding_wf_task_tokens.clone();
cfg.mock_gateway
.expect_fail_workflow_task()
.withf(cfg.expect_fail_wft_matcher)
.times(
cfg.num_expected_fails
.map::<TimesRange, _>(Into::into)
.unwrap_or_else(|| RangeFull.into()),
)
.returning(move |tt, _, _| {
outstanding.write().remove_by_right(&tt);
Ok(Default::default())
});
cfg.mock_gateway
.expect_start_workflow()
.returning(|_, _, _, _, _| Ok(Default::default()));
MocksHolder {
sg: cfg.mock_gateway,
mock_pollers,
outstanding_task_map: Some(outstanding_wf_task_tokens),
}
}
pub fn hist_to_poll_resp(
t: &TestHistoryBuilder,
wf_id: String,
response_type: ResponseType,
task_queue: String,
) -> PollWorkflowTaskQueueResponse {
let run_id = t.get_orig_run_id();
let wf = WorkflowExecution {
workflow_id: wf_id,
run_id: run_id.to_string(),
};
let hist_info = match response_type {
ResponseType::ToTaskNum(tn) => t.get_history_info(tn).unwrap(),
ResponseType::AllHistory => t.get_full_history_info().unwrap(),
};
let batch = hist_info.events().to_vec();
let task_token: [u8; 16] = thread_rng().gen();
PollWorkflowTaskQueueResponse {
history: Some(History { events: batch }),
workflow_execution: Some(wf),
task_token: task_token.to_vec(),
workflow_type: Some(WorkflowType {
name: DEFAULT_WORKFLOW_TYPE.to_owned(),
}),
workflow_execution_task_queue: Some(TaskQueue {
name: task_queue,
kind: TaskQueueKind::Normal as i32,
}),
previous_started_event_id: hist_info.previous_started_event_id,
started_event_id: hist_info.workflow_task_started_event_id,
..Default::default()
}
}
pub fn fake_sg_opts() -> ServerGatewayOptions {
ServerGatewayOptions {
target_url: Url::from_str("https://fake").unwrap(),
namespace: "".to_string(),
client_name: "".to_string(),
client_version: "".to_string(),
static_headers: Default::default(),
identity: "".to_string(),
worker_binary_id: "".to_string(),
tls_cfg: None,
retry_config: Default::default(),
}
}
type AsserterWithReply<'a> = (&'a dyn Fn(&WfActivation), wf_activation_completion::Status);
/// This function accepts a list of asserts and replies to workflow activations to run against the
/// provided instance of fake core.
///
/// It handles the business of re-sending the same activation replies over again in the event
/// of eviction or workflow activation failure. Activation failures specifically are only run once,
/// since they clearly can't be returned every time we replay the workflow, or it could never
/// proceed
pub(crate) async fn poll_and_reply<'a>(
core: &'a CoreSDK,
eviction_mode: WorkflowCachingPolicy,
expect_and_reply: &'a [AsserterWithReply<'a>],
) {
poll_and_reply_clears_outstanding_evicts(core, None, eviction_mode, expect_and_reply).await
}
pub(crate) async fn poll_and_reply_clears_outstanding_evicts<'a>(
core: &'a CoreSDK,
outstanding_map: Option<Arc<RwLock<BiMap<String, TaskToken>>>>,
eviction_mode: WorkflowCachingPolicy,
expect_and_reply: &'a [AsserterWithReply<'a>],
) {
let mut evictions = 0;
let expected_evictions = expect_and_reply.len() - 1;
let mut executed_failures = HashSet::new();
let expected_fail_count = expect_and_reply
.iter()
.filter(|(_, reply)| !reply.is_success())
.count();
'outer: loop {
let expect_iter = expect_and_reply.iter();
for (i, interaction) in expect_iter.enumerate() {
let (asserter, reply) = interaction;
let complete_is_failure = !reply.is_success();
// Only send activation failures once
if executed_failures.contains(&i) {
continue;
}
let mut res = core.poll_workflow_activation(TEST_Q).await.unwrap();
let contains_eviction = res.eviction_index();
if let Some(eviction_job_ix) = contains_eviction {
// If the job list has an eviction, make sure it was the last item in the list
// then remove it, since in the tests we don't explicitly specify evict assertions
assert_eq!(
eviction_job_ix,
res.jobs.len() - 1,
"Eviction job was not last job in job list"
);
res.jobs.remove(eviction_job_ix);
if let Some(omap) = outstanding_map.as_ref() {
omap.write().remove_by_left(&res.run_id);
}
}
// TODO: Can remove this if?
if !res.jobs.is_empty() {
asserter(&res);
}
let reply = if res.jobs.is_empty() {
// Just an eviction
WfActivationCompletion::empty(TEST_Q, res.run_id.clone())
} else {
// Eviction plus some work, we still want to issue the reply
WfActivationCompletion {
task_queue: TEST_Q.to_string(),
run_id: res.run_id.clone(),
status: Some(reply.clone()),
}
};
core.complete_workflow_activation(reply).await.unwrap();
// Restart assertions from the beginning if it was an eviction
if contains_eviction.is_some() {
continue 'outer;
}
if complete_is_failure {
executed_failures.insert(i);
}
match eviction_mode {
WorkflowCachingPolicy::Sticky { .. } => unimplemented!(),
WorkflowCachingPolicy::NonSticky => (),
WorkflowCachingPolicy::AfterEveryReply => {
if evictions < expected_evictions {
core.request_workflow_eviction(TEST_Q, &res.run_id);
evictions += 1;
}
}
}
}
break;
}
assert_eq!(expected_fail_count, executed_failures.len());
// TODO: Really need a worker abstraction for testing
// assert_eq!(core.wft_manager.outstanding_wft(), 0);
}
pub(crate) fn gen_assert_and_reply(
asserter: &dyn Fn(&WfActivation),
reply_commands: Vec<workflow_command::Variant>,
) -> AsserterWithReply<'_> {
(
asserter,
workflow_completion::Success::from_variants(reply_commands).into(),
)
}
pub(crate) fn gen_assert_and_fail(asserter: &dyn Fn(&WfActivation)) -> AsserterWithReply<'_> {
(
asserter,
workflow_completion::Failure {
failure: Some(Failure {
message: "Intentional test failure".to_string(),
..Default::default()
}),
}
.into(),
)
}
/// Generate asserts for [poll_and_reply] by passing patterns to match against the job list
#[macro_export]
macro_rules! job_assert {
($($pat:pat),+) => {
|res| {
assert_matches!(
res.jobs.as_slice(),
[$(WfActivationJob {
variant: Some($pat),
}),+]
);
}
};
} | num_expected_fails: Option<usize>,
) -> Self {
Self {
hists,
enforce_correct_number_of_polls, | random_line_split |
mod.rs | pub mod canned_histories;
mod history_builder;
mod history_info;
pub(crate) use history_builder::{TestHistoryBuilder, DEFAULT_WORKFLOW_TYPE};
use crate::{
pollers::{
BoxedActPoller, BoxedPoller, BoxedWFPoller, MockManualPoller, MockPoller,
MockServerGatewayApis,
},
task_token::TaskToken,
workflow::WorkflowCachingPolicy,
Core, CoreInitOptionsBuilder, CoreSDK, ServerGatewayApis, ServerGatewayOptions, Url,
WorkerConfig, WorkerConfigBuilder,
};
use bimap::BiMap;
use futures::FutureExt;
use mockall::TimesRange;
use parking_lot::RwLock;
use rand::{thread_rng, Rng};
use std::{
collections::{BTreeMap, HashMap, HashSet, VecDeque},
ops::RangeFull,
str::FromStr,
sync::Arc,
};
use temporal_sdk_core_protos::{
coresdk::{
workflow_activation::WfActivation,
workflow_commands::workflow_command,
workflow_completion::{self, wf_activation_completion, WfActivationCompletion},
},
temporal::api::{
common::v1::{WorkflowExecution, WorkflowType},
enums::v1::{TaskQueueKind, WorkflowTaskFailedCause},
failure::v1::Failure,
history::v1::History,
taskqueue::v1::TaskQueue,
workflowservice::v1::{
PollActivityTaskQueueResponse, PollWorkflowTaskQueueResponse,
RespondWorkflowTaskCompletedResponse,
},
},
};
pub type Result<T, E = anyhow::Error> = std::result::Result<T, E>;
pub const TEST_Q: &str = "q";
/// When constructing responses for mocks, indicates how a given response should be built
#[derive(derive_more::From, Debug, Clone, Copy, Eq, PartialEq, Hash)]
pub enum ResponseType {
ToTaskNum(usize),
AllHistory,
}
impl From<&usize> for ResponseType {
fn from(u: &usize) -> Self {
ResponseType::ToTaskNum(*u)
}
}
// :shrug:
impl From<&ResponseType> for ResponseType {
fn from(r: &ResponseType) -> Self {
*r
}
}
/// Given identifiers for a workflow/run, and a test history builder, construct an instance of
/// the core SDK with a mock server gateway that will produce the responses as appropriate.
///
/// `response_batches` is used to control the fake [PollWorkflowTaskQueueResponse]s returned. For
/// each number in the input list, a fake response will be prepared which includes history up to the
/// workflow task with that number, as in [TestHistoryBuilder::get_history_info].
pub(crate) fn build_fake_core(
wf_id: &str,
t: TestHistoryBuilder,
response_batches: impl IntoIterator<Item = impl Into<ResponseType>>,
) -> CoreSDK {
let response_batches = response_batches.into_iter().map(Into::into).collect();
let mock_gateway = build_multihist_mock_sg(
vec![FakeWfResponses {
wf_id: wf_id.to_owned(),
hist: t,
response_batches,
task_q: TEST_Q.to_owned(),
}],
true,
None,
);
mock_core(mock_gateway)
}
pub(crate) fn mock_core<SG>(mocks: MocksHolder<SG>) -> CoreSDK
where
SG: ServerGatewayApis + Send + Sync + 'static,
{
mock_core_with_opts(mocks, CoreInitOptionsBuilder::default())
}
pub(crate) fn mock_core_with_opts<SG>(
mocks: MocksHolder<SG>,
opts: CoreInitOptionsBuilder,
) -> CoreSDK
where
SG: ServerGatewayApis + Send + Sync + 'static,
{
let mut core = mock_core_with_opts_no_workers(mocks.sg, opts);
register_mock_workers(&mut core, mocks.mock_pollers.into_values());
core
}
pub(crate) fn register_mock_workers(
core: &mut CoreSDK,
mocks: impl IntoIterator<Item = MockWorker>,
) {
for worker in mocks {
core.reg_worker_sync(worker);
}
}
pub(crate) fn mock_core_with_opts_no_workers<SG>(
sg: SG,
mut opts: CoreInitOptionsBuilder,
) -> CoreSDK
where
SG: ServerGatewayApis + Send + Sync + 'static,
{
CoreSDK::new(sg, opts.gateway_opts(fake_sg_opts()).build().unwrap())
}
pub struct FakeWfResponses {
pub wf_id: String,
pub hist: TestHistoryBuilder,
pub response_batches: Vec<ResponseType>,
pub task_q: String,
}
// TODO: turn this into a builder or make a new one? to make all these different build fns simpler
pub struct MocksHolder<SG> {
sg: SG,
mock_pollers: HashMap<String, MockWorker>,
pub outstanding_task_map: Option<Arc<RwLock<BiMap<String, TaskToken>>>>,
}
impl<SG> MocksHolder<SG> {
pub fn worker_cfg(&mut self, task_q: &str, mutator: impl FnOnce(&mut WorkerConfig)) {
if let Some(w) = self.mock_pollers.get_mut(task_q) {
mutator(&mut w.config);
}
}
pub fn take_pollers(self) -> HashMap<String, MockWorker> {
self.mock_pollers
}
}
pub struct MockWorker {
pub wf_poller: BoxedWFPoller,
pub act_poller: Option<BoxedActPoller>,
pub config: WorkerConfig,
}
impl Default for MockWorker {
fn default() -> Self {
MockWorker {
wf_poller: Box::from(mock_poller()),
act_poller: None,
config: WorkerConfig::default_test_q(),
}
}
}
impl MockWorker {
pub fn new(q: &str, wf_poller: BoxedWFPoller) -> Self {
MockWorker {
wf_poller,
act_poller: None,
config: WorkerConfig::default(q),
}
}
pub fn for_queue(q: &str) -> Self {
MockWorker {
wf_poller: Box::from(mock_poller()),
act_poller: None,
config: WorkerConfig::default(q),
}
}
}
impl<SG> MocksHolder<SG>
where
SG: ServerGatewayApis + Send + Sync + 'static,
{
pub fn from_mock_workers(
sg: SG,
mock_workers: impl IntoIterator<Item = MockWorker>,
) -> MocksHolder<SG> {
let mock_pollers = mock_workers
.into_iter()
.map(|w| (w.config.task_queue.clone(), w))
.collect();
MocksHolder {
sg,
mock_pollers,
outstanding_task_map: None,
}
}
/// Uses the provided list of tasks to create a mock poller for the `TEST_Q`
pub fn from_gateway_with_responses(
sg: SG,
wf_tasks: VecDeque<PollWorkflowTaskQueueResponse>,
act_tasks: VecDeque<PollActivityTaskQueueResponse>,
) -> MocksHolder<SG> {
let mut mock_pollers = HashMap::new();
let mock_poller = mock_poller_from_resps(wf_tasks);
let mock_act_poller = mock_poller_from_resps(act_tasks);
mock_pollers.insert(
TEST_Q.to_string(),
MockWorker {
wf_poller: mock_poller,
act_poller: Some(mock_act_poller),
config: WorkerConfigBuilder::default()
.task_queue(TEST_Q)
.build()
.unwrap(),
},
);
MocksHolder {
sg,
mock_pollers,
outstanding_task_map: None,
}
}
}
pub fn mock_poller_from_resps<T>(mut tasks: VecDeque<T>) -> BoxedPoller<T>
where
T: Send + Sync + 'static,
{
let mut mock_poller = mock_poller();
mock_poller.expect_poll().returning(move || {
if let Some(t) = tasks.pop_front() | else {
Some(Err(tonic::Status::out_of_range(
"Ran out of mock responses!",
)))
}
});
Box::new(mock_poller) as BoxedPoller<T>
}
pub fn mock_poller<T>() -> MockPoller<T>
where
T: Send + Sync + 'static,
{
let mut mock_poller = MockPoller::new();
mock_poller.expect_shutdown_box().return_const(());
mock_poller.expect_notify_shutdown().return_const(());
mock_poller
}
pub fn mock_manual_poller<T>() -> MockManualPoller<T>
where
T: Send + Sync + 'static,
{
let mut mock_poller = MockManualPoller::new();
mock_poller
.expect_shutdown_box()
.returning(|| async {}.boxed());
mock_poller.expect_notify_shutdown().return_const(());
mock_poller
}
/// Build a mock server gateway capable of returning multiple different histories for different
/// workflows. It does so by tracking outstanding workflow tasks like is also happening in core
/// (which is unfortunately a bit redundant, we could provide hooks in core but that feels a little
/// nasty). If there is an outstanding task for a given workflow, new chunks of its history are not
/// returned. If there is not, the next batch of history is returned for any workflow without an
/// outstanding task. Outstanding tasks are cleared on completion, failure, or eviction.
///
/// `num_expected_fails` can be provided to set a specific number of expected failed workflow tasks
/// sent to the server.
pub fn build_multihist_mock_sg(
hists: impl IntoIterator<Item = FakeWfResponses>,
enforce_correct_number_of_polls: bool,
num_expected_fails: Option<usize>,
) -> MocksHolder<MockServerGatewayApis> {
let mh = MockPollCfg::new(
hists.into_iter().collect(),
enforce_correct_number_of_polls,
num_expected_fails,
);
build_mock_pollers(mh)
}
/// See [build_multihist_mock_sg] -- one history convenience version
pub fn single_hist_mock_sg(
wf_id: &str,
t: TestHistoryBuilder,
response_batches: impl IntoIterator<Item = impl Into<ResponseType>>,
mock_gateway: MockServerGatewayApis,
enforce_num_polls: bool,
) -> MocksHolder<MockServerGatewayApis> {
let mut mh = MockPollCfg::from_resp_batches(wf_id, t, response_batches, mock_gateway);
mh.enforce_correct_number_of_polls = enforce_num_polls;
build_mock_pollers(mh)
}
pub struct MockPollCfg {
pub hists: Vec<FakeWfResponses>,
pub enforce_correct_number_of_polls: bool,
pub num_expected_fails: Option<usize>,
pub mock_gateway: MockServerGatewayApis,
/// All calls to fail WFTs must match this predicate
pub expect_fail_wft_matcher:
Box<dyn Fn(&TaskToken, &WorkflowTaskFailedCause, &Option<Failure>) -> bool + Send>,
}
impl MockPollCfg {
pub fn new(
hists: Vec<FakeWfResponses>,
enforce_correct_number_of_polls: bool,
num_expected_fails: Option<usize>,
) -> Self {
Self {
hists,
enforce_correct_number_of_polls,
num_expected_fails,
mock_gateway: MockServerGatewayApis::new(),
expect_fail_wft_matcher: Box::new(|_, _, _| true),
}
}
pub fn from_resp_batches(
wf_id: &str,
t: TestHistoryBuilder,
resps: impl IntoIterator<Item = impl Into<ResponseType>>,
mock_gateway: MockServerGatewayApis,
) -> Self {
Self {
hists: vec![FakeWfResponses {
wf_id: wf_id.to_owned(),
hist: t,
response_batches: resps.into_iter().map(Into::into).collect(),
task_q: TEST_Q.to_owned(),
}],
enforce_correct_number_of_polls: true,
num_expected_fails: None,
mock_gateway,
expect_fail_wft_matcher: Box::new(|_, _, _| true),
}
}
}
/// Given an iterable of fake responses, return the mocks & associated data to work with them
pub fn build_mock_pollers(mut cfg: MockPollCfg) -> MocksHolder<MockServerGatewayApis> {
// Maps task queues to maps of wfid -> responses
let mut task_queues_to_resps: HashMap<String, BTreeMap<String, VecDeque<_>>> = HashMap::new();
let outstanding_wf_task_tokens = Arc::new(RwLock::new(BiMap::new()));
let mut correct_num_polls = None;
for hist in cfg.hists {
let full_hist_info = hist.hist.get_full_history_info().unwrap();
// Ensure no response batch is trying to return more tasks than the history contains
for respt in &hist.response_batches {
if let ResponseType::ToTaskNum(rb_wf_num) = respt {
assert!(
*rb_wf_num <= full_hist_info.wf_task_count(),
"Wf task count {} is not <= total task count {}",
rb_wf_num,
full_hist_info.wf_task_count()
);
}
}
// TODO: Fix -- or not? Sticky invalidation could make this pointless anyway
// Verify response batches only ever return longer histories (IE: Are sorted ascending)
// assert!(
// hist.response_batches
// .as_slice()
// .windows(2)
// .all(|w| w[0] <= w[1]),
// "response batches must have increasing wft numbers"
// );
if cfg.enforce_correct_number_of_polls {
*correct_num_polls.get_or_insert(0) += hist.response_batches.len();
}
// Convert history batches into poll responses, while also tracking how many times a given
// history has been returned so we can increment the associated attempt number on the WFT.
// NOTE: This is hard to use properly with the `AfterEveryReply` testing eviction mode.
// Such usages need a history different from other eviction modes which would include
// WFT timeouts or something to simulate the task getting dropped.
let mut attempts_at_task_num = HashMap::new();
let responses: Vec<_> = hist
.response_batches
.iter()
.map(|to_task_num| {
let cur_attempt = attempts_at_task_num.entry(to_task_num).or_insert(1);
let mut r = hist_to_poll_resp(
&hist.hist,
hist.wf_id.to_owned(),
*to_task_num,
hist.task_q.clone(),
);
r.attempt = *cur_attempt;
*cur_attempt += 1;
r
})
.collect();
let tasks = VecDeque::from(responses);
task_queues_to_resps
.entry(hist.task_q)
.or_default()
.insert(hist.wf_id, tasks);
}
let mut mock_pollers = HashMap::new();
for (task_q, mut queue_tasks) in task_queues_to_resps.into_iter() {
let mut mock_poller = mock_poller();
// The poller will return history from any workflow runs that do not have currently
// outstanding tasks.
let outstanding = outstanding_wf_task_tokens.clone();
mock_poller
.expect_poll()
.times(
correct_num_polls
.map::<TimesRange, _>(Into::into)
.unwrap_or_else(|| RangeFull.into()),
)
.returning(move || {
for (_, tasks) in queue_tasks.iter_mut() {
// Must extract run id from a workflow task associated with this workflow
// TODO: Case where run id changes for same workflow id is not handled here
if let Some(t) = tasks.get(0) {
let rid = t.workflow_execution.as_ref().unwrap().run_id.clone();
if !outstanding.read().contains_left(&rid) {
let t = tasks.pop_front().unwrap();
outstanding
.write()
.insert(rid, TaskToken(t.task_token.clone()));
return Some(Ok(t));
}
}
}
Some(Err(tonic::Status::cancelled("No more work to do")))
});
let mw = MockWorker::new(&task_q, Box::from(mock_poller));
mock_pollers.insert(task_q, mw);
}
let outstanding = outstanding_wf_task_tokens.clone();
cfg.mock_gateway
.expect_complete_workflow_task()
.returning(move |comp| {
outstanding.write().remove_by_right(&comp.task_token);
Ok(RespondWorkflowTaskCompletedResponse::default())
});
let outstanding = outstanding_wf_task_tokens.clone();
cfg.mock_gateway
.expect_fail_workflow_task()
.withf(cfg.expect_fail_wft_matcher)
.times(
cfg.num_expected_fails
.map::<TimesRange, _>(Into::into)
.unwrap_or_else(|| RangeFull.into()),
)
.returning(move |tt, _, _| {
outstanding.write().remove_by_right(&tt);
Ok(Default::default())
});
cfg.mock_gateway
.expect_start_workflow()
.returning(|_, _, _, _, _| Ok(Default::default()));
MocksHolder {
sg: cfg.mock_gateway,
mock_pollers,
outstanding_task_map: Some(outstanding_wf_task_tokens),
}
}
pub fn hist_to_poll_resp(
t: &TestHistoryBuilder,
wf_id: String,
response_type: ResponseType,
task_queue: String,
) -> PollWorkflowTaskQueueResponse {
let run_id = t.get_orig_run_id();
let wf = WorkflowExecution {
workflow_id: wf_id,
run_id: run_id.to_string(),
};
let hist_info = match response_type {
ResponseType::ToTaskNum(tn) => t.get_history_info(tn).unwrap(),
ResponseType::AllHistory => t.get_full_history_info().unwrap(),
};
let batch = hist_info.events().to_vec();
let task_token: [u8; 16] = thread_rng().gen();
PollWorkflowTaskQueueResponse {
history: Some(History { events: batch }),
workflow_execution: Some(wf),
task_token: task_token.to_vec(),
workflow_type: Some(WorkflowType {
name: DEFAULT_WORKFLOW_TYPE.to_owned(),
}),
workflow_execution_task_queue: Some(TaskQueue {
name: task_queue,
kind: TaskQueueKind::Normal as i32,
}),
previous_started_event_id: hist_info.previous_started_event_id,
started_event_id: hist_info.workflow_task_started_event_id,
..Default::default()
}
}
pub fn fake_sg_opts() -> ServerGatewayOptions {
ServerGatewayOptions {
target_url: Url::from_str("https://fake").unwrap(),
namespace: "".to_string(),
client_name: "".to_string(),
client_version: "".to_string(),
static_headers: Default::default(),
identity: "".to_string(),
worker_binary_id: "".to_string(),
tls_cfg: None,
retry_config: Default::default(),
}
}
type AsserterWithReply<'a> = (&'a dyn Fn(&WfActivation), wf_activation_completion::Status);
/// This function accepts a list of asserts and replies to workflow activations to run against the
/// provided instance of fake core.
///
/// It handles the business of re-sending the same activation replies over again in the event
/// of eviction or workflow activation failure. Activation failures specifically are only run once,
/// since they clearly can't be returned every time we replay the workflow, or it could never
/// proceed
pub(crate) async fn poll_and_reply<'a>(
core: &'a CoreSDK,
eviction_mode: WorkflowCachingPolicy,
expect_and_reply: &'a [AsserterWithReply<'a>],
) {
poll_and_reply_clears_outstanding_evicts(core, None, eviction_mode, expect_and_reply).await
}
pub(crate) async fn poll_and_reply_clears_outstanding_evicts<'a>(
core: &'a CoreSDK,
outstanding_map: Option<Arc<RwLock<BiMap<String, TaskToken>>>>,
eviction_mode: WorkflowCachingPolicy,
expect_and_reply: &'a [AsserterWithReply<'a>],
) {
let mut evictions = 0;
let expected_evictions = expect_and_reply.len() - 1;
let mut executed_failures = HashSet::new();
let expected_fail_count = expect_and_reply
.iter()
.filter(|(_, reply)| !reply.is_success())
.count();
'outer: loop {
let expect_iter = expect_and_reply.iter();
for (i, interaction) in expect_iter.enumerate() {
let (asserter, reply) = interaction;
let complete_is_failure = !reply.is_success();
// Only send activation failures once
if executed_failures.contains(&i) {
continue;
}
let mut res = core.poll_workflow_activation(TEST_Q).await.unwrap();
let contains_eviction = res.eviction_index();
if let Some(eviction_job_ix) = contains_eviction {
// If the job list has an eviction, make sure it was the last item in the list
// then remove it, since in the tests we don't explicitly specify evict assertions
assert_eq!(
eviction_job_ix,
res.jobs.len() - 1,
"Eviction job was not last job in job list"
);
res.jobs.remove(eviction_job_ix);
if let Some(omap) = outstanding_map.as_ref() {
omap.write().remove_by_left(&res.run_id);
}
}
// TODO: Can remove this if?
if !res.jobs.is_empty() {
asserter(&res);
}
let reply = if res.jobs.is_empty() {
// Just an eviction
WfActivationCompletion::empty(TEST_Q, res.run_id.clone())
} else {
// Eviction plus some work, we still want to issue the reply
WfActivationCompletion {
task_queue: TEST_Q.to_string(),
run_id: res.run_id.clone(),
status: Some(reply.clone()),
}
};
core.complete_workflow_activation(reply).await.unwrap();
// Restart assertions from the beginning if it was an eviction
if contains_eviction.is_some() {
continue 'outer;
}
if complete_is_failure {
executed_failures.insert(i);
}
match eviction_mode {
WorkflowCachingPolicy::Sticky { .. } => unimplemented!(),
WorkflowCachingPolicy::NonSticky => (),
WorkflowCachingPolicy::AfterEveryReply => {
if evictions < expected_evictions {
core.request_workflow_eviction(TEST_Q, &res.run_id);
evictions += 1;
}
}
}
}
break;
}
assert_eq!(expected_fail_count, executed_failures.len());
// TODO: Really need a worker abstraction for testing
// assert_eq!(core.wft_manager.outstanding_wft(), 0);
}
pub(crate) fn gen_assert_and_reply(
asserter: &dyn Fn(&WfActivation),
reply_commands: Vec<workflow_command::Variant>,
) -> AsserterWithReply<'_> {
(
asserter,
workflow_completion::Success::from_variants(reply_commands).into(),
)
}
pub(crate) fn gen_assert_and_fail(asserter: &dyn Fn(&WfActivation)) -> AsserterWithReply<'_> {
(
asserter,
workflow_completion::Failure {
failure: Some(Failure {
message: "Intentional test failure".to_string(),
..Default::default()
}),
}
.into(),
)
}
/// Generate asserts for [poll_and_reply] by passing patterns to match against the job list
#[macro_export]
macro_rules! job_assert {
($($pat:pat),+) => {
|res| {
assert_matches!(
res.jobs.as_slice(),
[$(WfActivationJob {
variant: Some($pat),
}),+]
);
}
};
}
| {
Some(Ok(t))
} | conditional_block |
mod.rs | pub mod canned_histories;
mod history_builder;
mod history_info;
pub(crate) use history_builder::{TestHistoryBuilder, DEFAULT_WORKFLOW_TYPE};
use crate::{
pollers::{
BoxedActPoller, BoxedPoller, BoxedWFPoller, MockManualPoller, MockPoller,
MockServerGatewayApis,
},
task_token::TaskToken,
workflow::WorkflowCachingPolicy,
Core, CoreInitOptionsBuilder, CoreSDK, ServerGatewayApis, ServerGatewayOptions, Url,
WorkerConfig, WorkerConfigBuilder,
};
use bimap::BiMap;
use futures::FutureExt;
use mockall::TimesRange;
use parking_lot::RwLock;
use rand::{thread_rng, Rng};
use std::{
collections::{BTreeMap, HashMap, HashSet, VecDeque},
ops::RangeFull,
str::FromStr,
sync::Arc,
};
use temporal_sdk_core_protos::{
coresdk::{
workflow_activation::WfActivation,
workflow_commands::workflow_command,
workflow_completion::{self, wf_activation_completion, WfActivationCompletion},
},
temporal::api::{
common::v1::{WorkflowExecution, WorkflowType},
enums::v1::{TaskQueueKind, WorkflowTaskFailedCause},
failure::v1::Failure,
history::v1::History,
taskqueue::v1::TaskQueue,
workflowservice::v1::{
PollActivityTaskQueueResponse, PollWorkflowTaskQueueResponse,
RespondWorkflowTaskCompletedResponse,
},
},
};
pub type Result<T, E = anyhow::Error> = std::result::Result<T, E>;
pub const TEST_Q: &str = "q";
/// When constructing responses for mocks, indicates how a given response should be built
#[derive(derive_more::From, Debug, Clone, Copy, Eq, PartialEq, Hash)]
pub enum ResponseType {
ToTaskNum(usize),
AllHistory,
}
impl From<&usize> for ResponseType {
fn from(u: &usize) -> Self {
ResponseType::ToTaskNum(*u)
}
}
// :shrug:
impl From<&ResponseType> for ResponseType {
fn from(r: &ResponseType) -> Self {
*r
}
}
/// Given identifiers for a workflow/run, and a test history builder, construct an instance of
/// the core SDK with a mock server gateway that will produce the responses as appropriate.
///
/// `response_batches` is used to control the fake [PollWorkflowTaskQueueResponse]s returned. For
/// each number in the input list, a fake response will be prepared which includes history up to the
/// workflow task with that number, as in [TestHistoryBuilder::get_history_info].
pub(crate) fn build_fake_core(
wf_id: &str,
t: TestHistoryBuilder,
response_batches: impl IntoIterator<Item = impl Into<ResponseType>>,
) -> CoreSDK {
let response_batches = response_batches.into_iter().map(Into::into).collect();
let mock_gateway = build_multihist_mock_sg(
vec![FakeWfResponses {
wf_id: wf_id.to_owned(),
hist: t,
response_batches,
task_q: TEST_Q.to_owned(),
}],
true,
None,
);
mock_core(mock_gateway)
}
pub(crate) fn mock_core<SG>(mocks: MocksHolder<SG>) -> CoreSDK
where
SG: ServerGatewayApis + Send + Sync + 'static,
{
mock_core_with_opts(mocks, CoreInitOptionsBuilder::default())
}
pub(crate) fn mock_core_with_opts<SG>(
mocks: MocksHolder<SG>,
opts: CoreInitOptionsBuilder,
) -> CoreSDK
where
SG: ServerGatewayApis + Send + Sync + 'static,
{
let mut core = mock_core_with_opts_no_workers(mocks.sg, opts);
register_mock_workers(&mut core, mocks.mock_pollers.into_values());
core
}
pub(crate) fn register_mock_workers(
core: &mut CoreSDK,
mocks: impl IntoIterator<Item = MockWorker>,
) {
for worker in mocks {
core.reg_worker_sync(worker);
}
}
pub(crate) fn mock_core_with_opts_no_workers<SG>(
sg: SG,
mut opts: CoreInitOptionsBuilder,
) -> CoreSDK
where
SG: ServerGatewayApis + Send + Sync + 'static,
{
CoreSDK::new(sg, opts.gateway_opts(fake_sg_opts()).build().unwrap())
}
pub struct FakeWfResponses {
pub wf_id: String,
pub hist: TestHistoryBuilder,
pub response_batches: Vec<ResponseType>,
pub task_q: String,
}
// TODO: turn this into a builder or make a new one? to make all these different build fns simpler
pub struct MocksHolder<SG> {
sg: SG,
mock_pollers: HashMap<String, MockWorker>,
pub outstanding_task_map: Option<Arc<RwLock<BiMap<String, TaskToken>>>>,
}
impl<SG> MocksHolder<SG> {
pub fn worker_cfg(&mut self, task_q: &str, mutator: impl FnOnce(&mut WorkerConfig)) {
if let Some(w) = self.mock_pollers.get_mut(task_q) {
mutator(&mut w.config);
}
}
pub fn take_pollers(self) -> HashMap<String, MockWorker> {
self.mock_pollers
}
}
pub struct MockWorker {
pub wf_poller: BoxedWFPoller,
pub act_poller: Option<BoxedActPoller>,
pub config: WorkerConfig,
}
impl Default for MockWorker {
fn default() -> Self {
MockWorker {
wf_poller: Box::from(mock_poller()),
act_poller: None,
config: WorkerConfig::default_test_q(),
}
}
}
impl MockWorker {
pub fn new(q: &str, wf_poller: BoxedWFPoller) -> Self {
MockWorker {
wf_poller,
act_poller: None,
config: WorkerConfig::default(q),
}
}
pub fn for_queue(q: &str) -> Self {
MockWorker {
wf_poller: Box::from(mock_poller()),
act_poller: None,
config: WorkerConfig::default(q),
}
}
}
impl<SG> MocksHolder<SG>
where
SG: ServerGatewayApis + Send + Sync + 'static,
{
pub fn from_mock_workers(
sg: SG,
mock_workers: impl IntoIterator<Item = MockWorker>,
) -> MocksHolder<SG> {
let mock_pollers = mock_workers
.into_iter()
.map(|w| (w.config.task_queue.clone(), w))
.collect();
MocksHolder {
sg,
mock_pollers,
outstanding_task_map: None,
}
}
/// Uses the provided list of tasks to create a mock poller for the `TEST_Q`
pub fn from_gateway_with_responses(
sg: SG,
wf_tasks: VecDeque<PollWorkflowTaskQueueResponse>,
act_tasks: VecDeque<PollActivityTaskQueueResponse>,
) -> MocksHolder<SG> {
let mut mock_pollers = HashMap::new();
let mock_poller = mock_poller_from_resps(wf_tasks);
let mock_act_poller = mock_poller_from_resps(act_tasks);
mock_pollers.insert(
TEST_Q.to_string(),
MockWorker {
wf_poller: mock_poller,
act_poller: Some(mock_act_poller),
config: WorkerConfigBuilder::default()
.task_queue(TEST_Q)
.build()
.unwrap(),
},
);
MocksHolder {
sg,
mock_pollers,
outstanding_task_map: None,
}
}
}
pub fn mock_poller_from_resps<T>(mut tasks: VecDeque<T>) -> BoxedPoller<T>
where
T: Send + Sync + 'static,
{
let mut mock_poller = mock_poller();
mock_poller.expect_poll().returning(move || {
if let Some(t) = tasks.pop_front() {
Some(Ok(t))
} else {
Some(Err(tonic::Status::out_of_range(
"Ran out of mock responses!",
)))
}
});
Box::new(mock_poller) as BoxedPoller<T>
}
pub fn mock_poller<T>() -> MockPoller<T>
where
T: Send + Sync + 'static,
{
let mut mock_poller = MockPoller::new();
mock_poller.expect_shutdown_box().return_const(());
mock_poller.expect_notify_shutdown().return_const(());
mock_poller
}
pub fn mock_manual_poller<T>() -> MockManualPoller<T>
where
T: Send + Sync + 'static,
{
let mut mock_poller = MockManualPoller::new();
mock_poller
.expect_shutdown_box()
.returning(|| async {}.boxed());
mock_poller.expect_notify_shutdown().return_const(());
mock_poller
}
/// Build a mock server gateway capable of returning multiple different histories for different
/// workflows. It does so by tracking outstanding workflow tasks like is also happening in core
/// (which is unfortunately a bit redundant, we could provide hooks in core but that feels a little
/// nasty). If there is an outstanding task for a given workflow, new chunks of its history are not
/// returned. If there is not, the next batch of history is returned for any workflow without an
/// outstanding task. Outstanding tasks are cleared on completion, failure, or eviction.
///
/// `num_expected_fails` can be provided to set a specific number of expected failed workflow tasks
/// sent to the server.
pub fn build_multihist_mock_sg(
hists: impl IntoIterator<Item = FakeWfResponses>,
enforce_correct_number_of_polls: bool,
num_expected_fails: Option<usize>,
) -> MocksHolder<MockServerGatewayApis> {
let mh = MockPollCfg::new(
hists.into_iter().collect(),
enforce_correct_number_of_polls,
num_expected_fails,
);
build_mock_pollers(mh)
}
/// See [build_multihist_mock_sg] -- one history convenience version
pub fn single_hist_mock_sg(
wf_id: &str,
t: TestHistoryBuilder,
response_batches: impl IntoIterator<Item = impl Into<ResponseType>>,
mock_gateway: MockServerGatewayApis,
enforce_num_polls: bool,
) -> MocksHolder<MockServerGatewayApis> {
let mut mh = MockPollCfg::from_resp_batches(wf_id, t, response_batches, mock_gateway);
mh.enforce_correct_number_of_polls = enforce_num_polls;
build_mock_pollers(mh)
}
pub struct MockPollCfg {
pub hists: Vec<FakeWfResponses>,
pub enforce_correct_number_of_polls: bool,
pub num_expected_fails: Option<usize>,
pub mock_gateway: MockServerGatewayApis,
/// All calls to fail WFTs must match this predicate
pub expect_fail_wft_matcher:
Box<dyn Fn(&TaskToken, &WorkflowTaskFailedCause, &Option<Failure>) -> bool + Send>,
}
impl MockPollCfg {
pub fn new(
hists: Vec<FakeWfResponses>,
enforce_correct_number_of_polls: bool,
num_expected_fails: Option<usize>,
) -> Self {
Self {
hists,
enforce_correct_number_of_polls,
num_expected_fails,
mock_gateway: MockServerGatewayApis::new(),
expect_fail_wft_matcher: Box::new(|_, _, _| true),
}
}
pub fn from_resp_batches(
wf_id: &str,
t: TestHistoryBuilder,
resps: impl IntoIterator<Item = impl Into<ResponseType>>,
mock_gateway: MockServerGatewayApis,
) -> Self {
Self {
hists: vec![FakeWfResponses {
wf_id: wf_id.to_owned(),
hist: t,
response_batches: resps.into_iter().map(Into::into).collect(),
task_q: TEST_Q.to_owned(),
}],
enforce_correct_number_of_polls: true,
num_expected_fails: None,
mock_gateway,
expect_fail_wft_matcher: Box::new(|_, _, _| true),
}
}
}
/// Given an iterable of fake responses, return the mocks & associated data to work with them
pub fn | (mut cfg: MockPollCfg) -> MocksHolder<MockServerGatewayApis> {
// Maps task queues to maps of wfid -> responses
let mut task_queues_to_resps: HashMap<String, BTreeMap<String, VecDeque<_>>> = HashMap::new();
let outstanding_wf_task_tokens = Arc::new(RwLock::new(BiMap::new()));
let mut correct_num_polls = None;
for hist in cfg.hists {
let full_hist_info = hist.hist.get_full_history_info().unwrap();
// Ensure no response batch is trying to return more tasks than the history contains
for respt in &hist.response_batches {
if let ResponseType::ToTaskNum(rb_wf_num) = respt {
assert!(
*rb_wf_num <= full_hist_info.wf_task_count(),
"Wf task count {} is not <= total task count {}",
rb_wf_num,
full_hist_info.wf_task_count()
);
}
}
// TODO: Fix -- or not? Sticky invalidation could make this pointless anyway
// Verify response batches only ever return longer histories (IE: Are sorted ascending)
// assert!(
// hist.response_batches
// .as_slice()
// .windows(2)
// .all(|w| w[0] <= w[1]),
// "response batches must have increasing wft numbers"
// );
if cfg.enforce_correct_number_of_polls {
*correct_num_polls.get_or_insert(0) += hist.response_batches.len();
}
// Convert history batches into poll responses, while also tracking how many times a given
// history has been returned so we can increment the associated attempt number on the WFT.
// NOTE: This is hard to use properly with the `AfterEveryReply` testing eviction mode.
// Such usages need a history different from other eviction modes which would include
// WFT timeouts or something to simulate the task getting dropped.
let mut attempts_at_task_num = HashMap::new();
let responses: Vec<_> = hist
.response_batches
.iter()
.map(|to_task_num| {
let cur_attempt = attempts_at_task_num.entry(to_task_num).or_insert(1);
let mut r = hist_to_poll_resp(
&hist.hist,
hist.wf_id.to_owned(),
*to_task_num,
hist.task_q.clone(),
);
r.attempt = *cur_attempt;
*cur_attempt += 1;
r
})
.collect();
let tasks = VecDeque::from(responses);
task_queues_to_resps
.entry(hist.task_q)
.or_default()
.insert(hist.wf_id, tasks);
}
let mut mock_pollers = HashMap::new();
for (task_q, mut queue_tasks) in task_queues_to_resps.into_iter() {
let mut mock_poller = mock_poller();
// The poller will return history from any workflow runs that do not have currently
// outstanding tasks.
let outstanding = outstanding_wf_task_tokens.clone();
mock_poller
.expect_poll()
.times(
correct_num_polls
.map::<TimesRange, _>(Into::into)
.unwrap_or_else(|| RangeFull.into()),
)
.returning(move || {
for (_, tasks) in queue_tasks.iter_mut() {
// Must extract run id from a workflow task associated with this workflow
// TODO: Case where run id changes for same workflow id is not handled here
if let Some(t) = tasks.get(0) {
let rid = t.workflow_execution.as_ref().unwrap().run_id.clone();
if !outstanding.read().contains_left(&rid) {
let t = tasks.pop_front().unwrap();
outstanding
.write()
.insert(rid, TaskToken(t.task_token.clone()));
return Some(Ok(t));
}
}
}
Some(Err(tonic::Status::cancelled("No more work to do")))
});
let mw = MockWorker::new(&task_q, Box::from(mock_poller));
mock_pollers.insert(task_q, mw);
}
let outstanding = outstanding_wf_task_tokens.clone();
cfg.mock_gateway
.expect_complete_workflow_task()
.returning(move |comp| {
outstanding.write().remove_by_right(&comp.task_token);
Ok(RespondWorkflowTaskCompletedResponse::default())
});
let outstanding = outstanding_wf_task_tokens.clone();
cfg.mock_gateway
.expect_fail_workflow_task()
.withf(cfg.expect_fail_wft_matcher)
.times(
cfg.num_expected_fails
.map::<TimesRange, _>(Into::into)
.unwrap_or_else(|| RangeFull.into()),
)
.returning(move |tt, _, _| {
outstanding.write().remove_by_right(&tt);
Ok(Default::default())
});
cfg.mock_gateway
.expect_start_workflow()
.returning(|_, _, _, _, _| Ok(Default::default()));
MocksHolder {
sg: cfg.mock_gateway,
mock_pollers,
outstanding_task_map: Some(outstanding_wf_task_tokens),
}
}
pub fn hist_to_poll_resp(
t: &TestHistoryBuilder,
wf_id: String,
response_type: ResponseType,
task_queue: String,
) -> PollWorkflowTaskQueueResponse {
let run_id = t.get_orig_run_id();
let wf = WorkflowExecution {
workflow_id: wf_id,
run_id: run_id.to_string(),
};
let hist_info = match response_type {
ResponseType::ToTaskNum(tn) => t.get_history_info(tn).unwrap(),
ResponseType::AllHistory => t.get_full_history_info().unwrap(),
};
let batch = hist_info.events().to_vec();
let task_token: [u8; 16] = thread_rng().gen();
PollWorkflowTaskQueueResponse {
history: Some(History { events: batch }),
workflow_execution: Some(wf),
task_token: task_token.to_vec(),
workflow_type: Some(WorkflowType {
name: DEFAULT_WORKFLOW_TYPE.to_owned(),
}),
workflow_execution_task_queue: Some(TaskQueue {
name: task_queue,
kind: TaskQueueKind::Normal as i32,
}),
previous_started_event_id: hist_info.previous_started_event_id,
started_event_id: hist_info.workflow_task_started_event_id,
..Default::default()
}
}
pub fn fake_sg_opts() -> ServerGatewayOptions {
ServerGatewayOptions {
target_url: Url::from_str("https://fake").unwrap(),
namespace: "".to_string(),
client_name: "".to_string(),
client_version: "".to_string(),
static_headers: Default::default(),
identity: "".to_string(),
worker_binary_id: "".to_string(),
tls_cfg: None,
retry_config: Default::default(),
}
}
type AsserterWithReply<'a> = (&'a dyn Fn(&WfActivation), wf_activation_completion::Status);
/// This function accepts a list of asserts and replies to workflow activations to run against the
/// provided instance of fake core.
///
/// It handles the business of re-sending the same activation replies over again in the event
/// of eviction or workflow activation failure. Activation failures specifically are only run once,
/// since they clearly can't be returned every time we replay the workflow, or it could never
/// proceed
pub(crate) async fn poll_and_reply<'a>(
core: &'a CoreSDK,
eviction_mode: WorkflowCachingPolicy,
expect_and_reply: &'a [AsserterWithReply<'a>],
) {
poll_and_reply_clears_outstanding_evicts(core, None, eviction_mode, expect_and_reply).await
}
pub(crate) async fn poll_and_reply_clears_outstanding_evicts<'a>(
core: &'a CoreSDK,
outstanding_map: Option<Arc<RwLock<BiMap<String, TaskToken>>>>,
eviction_mode: WorkflowCachingPolicy,
expect_and_reply: &'a [AsserterWithReply<'a>],
) {
let mut evictions = 0;
let expected_evictions = expect_and_reply.len() - 1;
let mut executed_failures = HashSet::new();
let expected_fail_count = expect_and_reply
.iter()
.filter(|(_, reply)| !reply.is_success())
.count();
'outer: loop {
let expect_iter = expect_and_reply.iter();
for (i, interaction) in expect_iter.enumerate() {
let (asserter, reply) = interaction;
let complete_is_failure = !reply.is_success();
// Only send activation failures once
if executed_failures.contains(&i) {
continue;
}
let mut res = core.poll_workflow_activation(TEST_Q).await.unwrap();
let contains_eviction = res.eviction_index();
if let Some(eviction_job_ix) = contains_eviction {
// If the job list has an eviction, make sure it was the last item in the list
// then remove it, since in the tests we don't explicitly specify evict assertions
assert_eq!(
eviction_job_ix,
res.jobs.len() - 1,
"Eviction job was not last job in job list"
);
res.jobs.remove(eviction_job_ix);
if let Some(omap) = outstanding_map.as_ref() {
omap.write().remove_by_left(&res.run_id);
}
}
// TODO: Can remove this if?
if !res.jobs.is_empty() {
asserter(&res);
}
let reply = if res.jobs.is_empty() {
// Just an eviction
WfActivationCompletion::empty(TEST_Q, res.run_id.clone())
} else {
// Eviction plus some work, we still want to issue the reply
WfActivationCompletion {
task_queue: TEST_Q.to_string(),
run_id: res.run_id.clone(),
status: Some(reply.clone()),
}
};
core.complete_workflow_activation(reply).await.unwrap();
// Restart assertions from the beginning if it was an eviction
if contains_eviction.is_some() {
continue 'outer;
}
if complete_is_failure {
executed_failures.insert(i);
}
match eviction_mode {
WorkflowCachingPolicy::Sticky { .. } => unimplemented!(),
WorkflowCachingPolicy::NonSticky => (),
WorkflowCachingPolicy::AfterEveryReply => {
if evictions < expected_evictions {
core.request_workflow_eviction(TEST_Q, &res.run_id);
evictions += 1;
}
}
}
}
break;
}
assert_eq!(expected_fail_count, executed_failures.len());
// TODO: Really need a worker abstraction for testing
// assert_eq!(core.wft_manager.outstanding_wft(), 0);
}
pub(crate) fn gen_assert_and_reply(
asserter: &dyn Fn(&WfActivation),
reply_commands: Vec<workflow_command::Variant>,
) -> AsserterWithReply<'_> {
(
asserter,
workflow_completion::Success::from_variants(reply_commands).into(),
)
}
pub(crate) fn gen_assert_and_fail(asserter: &dyn Fn(&WfActivation)) -> AsserterWithReply<'_> {
(
asserter,
workflow_completion::Failure {
failure: Some(Failure {
message: "Intentional test failure".to_string(),
..Default::default()
}),
}
.into(),
)
}
/// Generate asserts for [poll_and_reply] by passing patterns to match against the job list
#[macro_export]
macro_rules! job_assert {
($($pat:pat),+) => {
|res| {
assert_matches!(
res.jobs.as_slice(),
[$(WfActivationJob {
variant: Some($pat),
}),+]
);
}
};
}
| build_mock_pollers | identifier_name |
mod.rs | pub mod canned_histories;
mod history_builder;
mod history_info;
pub(crate) use history_builder::{TestHistoryBuilder, DEFAULT_WORKFLOW_TYPE};
use crate::{
pollers::{
BoxedActPoller, BoxedPoller, BoxedWFPoller, MockManualPoller, MockPoller,
MockServerGatewayApis,
},
task_token::TaskToken,
workflow::WorkflowCachingPolicy,
Core, CoreInitOptionsBuilder, CoreSDK, ServerGatewayApis, ServerGatewayOptions, Url,
WorkerConfig, WorkerConfigBuilder,
};
use bimap::BiMap;
use futures::FutureExt;
use mockall::TimesRange;
use parking_lot::RwLock;
use rand::{thread_rng, Rng};
use std::{
collections::{BTreeMap, HashMap, HashSet, VecDeque},
ops::RangeFull,
str::FromStr,
sync::Arc,
};
use temporal_sdk_core_protos::{
coresdk::{
workflow_activation::WfActivation,
workflow_commands::workflow_command,
workflow_completion::{self, wf_activation_completion, WfActivationCompletion},
},
temporal::api::{
common::v1::{WorkflowExecution, WorkflowType},
enums::v1::{TaskQueueKind, WorkflowTaskFailedCause},
failure::v1::Failure,
history::v1::History,
taskqueue::v1::TaskQueue,
workflowservice::v1::{
PollActivityTaskQueueResponse, PollWorkflowTaskQueueResponse,
RespondWorkflowTaskCompletedResponse,
},
},
};
pub type Result<T, E = anyhow::Error> = std::result::Result<T, E>;
pub const TEST_Q: &str = "q";
/// When constructing responses for mocks, indicates how a given response should be built
#[derive(derive_more::From, Debug, Clone, Copy, Eq, PartialEq, Hash)]
pub enum ResponseType {
ToTaskNum(usize),
AllHistory,
}
impl From<&usize> for ResponseType {
fn from(u: &usize) -> Self {
ResponseType::ToTaskNum(*u)
}
}
// :shrug:
impl From<&ResponseType> for ResponseType {
fn from(r: &ResponseType) -> Self {
*r
}
}
/// Given identifiers for a workflow/run, and a test history builder, construct an instance of
/// the core SDK with a mock server gateway that will produce the responses as appropriate.
///
/// `response_batches` is used to control the fake [PollWorkflowTaskQueueResponse]s returned. For
/// each number in the input list, a fake response will be prepared which includes history up to the
/// workflow task with that number, as in [TestHistoryBuilder::get_history_info].
pub(crate) fn build_fake_core(
wf_id: &str,
t: TestHistoryBuilder,
response_batches: impl IntoIterator<Item = impl Into<ResponseType>>,
) -> CoreSDK {
let response_batches = response_batches.into_iter().map(Into::into).collect();
let mock_gateway = build_multihist_mock_sg(
vec![FakeWfResponses {
wf_id: wf_id.to_owned(),
hist: t,
response_batches,
task_q: TEST_Q.to_owned(),
}],
true,
None,
);
mock_core(mock_gateway)
}
pub(crate) fn mock_core<SG>(mocks: MocksHolder<SG>) -> CoreSDK
where
SG: ServerGatewayApis + Send + Sync + 'static,
{
mock_core_with_opts(mocks, CoreInitOptionsBuilder::default())
}
pub(crate) fn mock_core_with_opts<SG>(
mocks: MocksHolder<SG>,
opts: CoreInitOptionsBuilder,
) -> CoreSDK
where
SG: ServerGatewayApis + Send + Sync + 'static,
{
let mut core = mock_core_with_opts_no_workers(mocks.sg, opts);
register_mock_workers(&mut core, mocks.mock_pollers.into_values());
core
}
pub(crate) fn register_mock_workers(
core: &mut CoreSDK,
mocks: impl IntoIterator<Item = MockWorker>,
) {
for worker in mocks {
core.reg_worker_sync(worker);
}
}
pub(crate) fn mock_core_with_opts_no_workers<SG>(
sg: SG,
mut opts: CoreInitOptionsBuilder,
) -> CoreSDK
where
SG: ServerGatewayApis + Send + Sync + 'static,
{
CoreSDK::new(sg, opts.gateway_opts(fake_sg_opts()).build().unwrap())
}
pub struct FakeWfResponses {
pub wf_id: String,
pub hist: TestHistoryBuilder,
pub response_batches: Vec<ResponseType>,
pub task_q: String,
}
// TODO: turn this into a builder or make a new one? to make all these different build fns simpler
pub struct MocksHolder<SG> {
sg: SG,
mock_pollers: HashMap<String, MockWorker>,
pub outstanding_task_map: Option<Arc<RwLock<BiMap<String, TaskToken>>>>,
}
impl<SG> MocksHolder<SG> {
pub fn worker_cfg(&mut self, task_q: &str, mutator: impl FnOnce(&mut WorkerConfig)) {
if let Some(w) = self.mock_pollers.get_mut(task_q) {
mutator(&mut w.config);
}
}
pub fn take_pollers(self) -> HashMap<String, MockWorker> {
self.mock_pollers
}
}
pub struct MockWorker {
pub wf_poller: BoxedWFPoller,
pub act_poller: Option<BoxedActPoller>,
pub config: WorkerConfig,
}
impl Default for MockWorker {
fn default() -> Self {
MockWorker {
wf_poller: Box::from(mock_poller()),
act_poller: None,
config: WorkerConfig::default_test_q(),
}
}
}
impl MockWorker {
pub fn new(q: &str, wf_poller: BoxedWFPoller) -> Self {
MockWorker {
wf_poller,
act_poller: None,
config: WorkerConfig::default(q),
}
}
pub fn for_queue(q: &str) -> Self {
MockWorker {
wf_poller: Box::from(mock_poller()),
act_poller: None,
config: WorkerConfig::default(q),
}
}
}
impl<SG> MocksHolder<SG>
where
SG: ServerGatewayApis + Send + Sync + 'static,
{
pub fn from_mock_workers(
sg: SG,
mock_workers: impl IntoIterator<Item = MockWorker>,
) -> MocksHolder<SG> {
let mock_pollers = mock_workers
.into_iter()
.map(|w| (w.config.task_queue.clone(), w))
.collect();
MocksHolder {
sg,
mock_pollers,
outstanding_task_map: None,
}
}
/// Uses the provided list of tasks to create a mock poller for the `TEST_Q`
pub fn from_gateway_with_responses(
sg: SG,
wf_tasks: VecDeque<PollWorkflowTaskQueueResponse>,
act_tasks: VecDeque<PollActivityTaskQueueResponse>,
) -> MocksHolder<SG> {
let mut mock_pollers = HashMap::new();
let mock_poller = mock_poller_from_resps(wf_tasks);
let mock_act_poller = mock_poller_from_resps(act_tasks);
mock_pollers.insert(
TEST_Q.to_string(),
MockWorker {
wf_poller: mock_poller,
act_poller: Some(mock_act_poller),
config: WorkerConfigBuilder::default()
.task_queue(TEST_Q)
.build()
.unwrap(),
},
);
MocksHolder {
sg,
mock_pollers,
outstanding_task_map: None,
}
}
}
pub fn mock_poller_from_resps<T>(mut tasks: VecDeque<T>) -> BoxedPoller<T>
where
T: Send + Sync + 'static,
{
let mut mock_poller = mock_poller();
mock_poller.expect_poll().returning(move || {
if let Some(t) = tasks.pop_front() {
Some(Ok(t))
} else {
Some(Err(tonic::Status::out_of_range(
"Ran out of mock responses!",
)))
}
});
Box::new(mock_poller) as BoxedPoller<T>
}
pub fn mock_poller<T>() -> MockPoller<T>
where
T: Send + Sync + 'static,
{
let mut mock_poller = MockPoller::new();
mock_poller.expect_shutdown_box().return_const(());
mock_poller.expect_notify_shutdown().return_const(());
mock_poller
}
pub fn mock_manual_poller<T>() -> MockManualPoller<T>
where
T: Send + Sync + 'static,
{
let mut mock_poller = MockManualPoller::new();
mock_poller
.expect_shutdown_box()
.returning(|| async {}.boxed());
mock_poller.expect_notify_shutdown().return_const(());
mock_poller
}
/// Build a mock server gateway capable of returning multiple different histories for different
/// workflows. It does so by tracking outstanding workflow tasks like is also happening in core
/// (which is unfortunately a bit redundant, we could provide hooks in core but that feels a little
/// nasty). If there is an outstanding task for a given workflow, new chunks of its history are not
/// returned. If there is not, the next batch of history is returned for any workflow without an
/// outstanding task. Outstanding tasks are cleared on completion, failure, or eviction.
///
/// `num_expected_fails` can be provided to set a specific number of expected failed workflow tasks
/// sent to the server.
pub fn build_multihist_mock_sg(
hists: impl IntoIterator<Item = FakeWfResponses>,
enforce_correct_number_of_polls: bool,
num_expected_fails: Option<usize>,
) -> MocksHolder<MockServerGatewayApis> {
let mh = MockPollCfg::new(
hists.into_iter().collect(),
enforce_correct_number_of_polls,
num_expected_fails,
);
build_mock_pollers(mh)
}
/// See [build_multihist_mock_sg] -- one history convenience version
pub fn single_hist_mock_sg(
wf_id: &str,
t: TestHistoryBuilder,
response_batches: impl IntoIterator<Item = impl Into<ResponseType>>,
mock_gateway: MockServerGatewayApis,
enforce_num_polls: bool,
) -> MocksHolder<MockServerGatewayApis> {
let mut mh = MockPollCfg::from_resp_batches(wf_id, t, response_batches, mock_gateway);
mh.enforce_correct_number_of_polls = enforce_num_polls;
build_mock_pollers(mh)
}
pub struct MockPollCfg {
pub hists: Vec<FakeWfResponses>,
pub enforce_correct_number_of_polls: bool,
pub num_expected_fails: Option<usize>,
pub mock_gateway: MockServerGatewayApis,
/// All calls to fail WFTs must match this predicate
pub expect_fail_wft_matcher:
Box<dyn Fn(&TaskToken, &WorkflowTaskFailedCause, &Option<Failure>) -> bool + Send>,
}
impl MockPollCfg {
pub fn new(
hists: Vec<FakeWfResponses>,
enforce_correct_number_of_polls: bool,
num_expected_fails: Option<usize>,
) -> Self {
Self {
hists,
enforce_correct_number_of_polls,
num_expected_fails,
mock_gateway: MockServerGatewayApis::new(),
expect_fail_wft_matcher: Box::new(|_, _, _| true),
}
}
pub fn from_resp_batches(
wf_id: &str,
t: TestHistoryBuilder,
resps: impl IntoIterator<Item = impl Into<ResponseType>>,
mock_gateway: MockServerGatewayApis,
) -> Self {
Self {
hists: vec![FakeWfResponses {
wf_id: wf_id.to_owned(),
hist: t,
response_batches: resps.into_iter().map(Into::into).collect(),
task_q: TEST_Q.to_owned(),
}],
enforce_correct_number_of_polls: true,
num_expected_fails: None,
mock_gateway,
expect_fail_wft_matcher: Box::new(|_, _, _| true),
}
}
}
/// Given an iterable of fake responses, return the mocks & associated data to work with them
pub fn build_mock_pollers(mut cfg: MockPollCfg) -> MocksHolder<MockServerGatewayApis> {
// Maps task queues to maps of wfid -> responses
let mut task_queues_to_resps: HashMap<String, BTreeMap<String, VecDeque<_>>> = HashMap::new();
let outstanding_wf_task_tokens = Arc::new(RwLock::new(BiMap::new()));
let mut correct_num_polls = None;
for hist in cfg.hists {
let full_hist_info = hist.hist.get_full_history_info().unwrap();
// Ensure no response batch is trying to return more tasks than the history contains
for respt in &hist.response_batches {
if let ResponseType::ToTaskNum(rb_wf_num) = respt {
assert!(
*rb_wf_num <= full_hist_info.wf_task_count(),
"Wf task count {} is not <= total task count {}",
rb_wf_num,
full_hist_info.wf_task_count()
);
}
}
// TODO: Fix -- or not? Sticky invalidation could make this pointless anyway
// Verify response batches only ever return longer histories (IE: Are sorted ascending)
// assert!(
// hist.response_batches
// .as_slice()
// .windows(2)
// .all(|w| w[0] <= w[1]),
// "response batches must have increasing wft numbers"
// );
if cfg.enforce_correct_number_of_polls {
*correct_num_polls.get_or_insert(0) += hist.response_batches.len();
}
// Convert history batches into poll responses, while also tracking how many times a given
// history has been returned so we can increment the associated attempt number on the WFT.
// NOTE: This is hard to use properly with the `AfterEveryReply` testing eviction mode.
// Such usages need a history different from other eviction modes which would include
// WFT timeouts or something to simulate the task getting dropped.
let mut attempts_at_task_num = HashMap::new();
let responses: Vec<_> = hist
.response_batches
.iter()
.map(|to_task_num| {
let cur_attempt = attempts_at_task_num.entry(to_task_num).or_insert(1);
let mut r = hist_to_poll_resp(
&hist.hist,
hist.wf_id.to_owned(),
*to_task_num,
hist.task_q.clone(),
);
r.attempt = *cur_attempt;
*cur_attempt += 1;
r
})
.collect();
let tasks = VecDeque::from(responses);
task_queues_to_resps
.entry(hist.task_q)
.or_default()
.insert(hist.wf_id, tasks);
}
let mut mock_pollers = HashMap::new();
for (task_q, mut queue_tasks) in task_queues_to_resps.into_iter() {
let mut mock_poller = mock_poller();
// The poller will return history from any workflow runs that do not have currently
// outstanding tasks.
let outstanding = outstanding_wf_task_tokens.clone();
mock_poller
.expect_poll()
.times(
correct_num_polls
.map::<TimesRange, _>(Into::into)
.unwrap_or_else(|| RangeFull.into()),
)
.returning(move || {
for (_, tasks) in queue_tasks.iter_mut() {
// Must extract run id from a workflow task associated with this workflow
// TODO: Case where run id changes for same workflow id is not handled here
if let Some(t) = tasks.get(0) {
let rid = t.workflow_execution.as_ref().unwrap().run_id.clone();
if !outstanding.read().contains_left(&rid) {
let t = tasks.pop_front().unwrap();
outstanding
.write()
.insert(rid, TaskToken(t.task_token.clone()));
return Some(Ok(t));
}
}
}
Some(Err(tonic::Status::cancelled("No more work to do")))
});
let mw = MockWorker::new(&task_q, Box::from(mock_poller));
mock_pollers.insert(task_q, mw);
}
let outstanding = outstanding_wf_task_tokens.clone();
cfg.mock_gateway
.expect_complete_workflow_task()
.returning(move |comp| {
outstanding.write().remove_by_right(&comp.task_token);
Ok(RespondWorkflowTaskCompletedResponse::default())
});
let outstanding = outstanding_wf_task_tokens.clone();
cfg.mock_gateway
.expect_fail_workflow_task()
.withf(cfg.expect_fail_wft_matcher)
.times(
cfg.num_expected_fails
.map::<TimesRange, _>(Into::into)
.unwrap_or_else(|| RangeFull.into()),
)
.returning(move |tt, _, _| {
outstanding.write().remove_by_right(&tt);
Ok(Default::default())
});
cfg.mock_gateway
.expect_start_workflow()
.returning(|_, _, _, _, _| Ok(Default::default()));
MocksHolder {
sg: cfg.mock_gateway,
mock_pollers,
outstanding_task_map: Some(outstanding_wf_task_tokens),
}
}
pub fn hist_to_poll_resp(
t: &TestHistoryBuilder,
wf_id: String,
response_type: ResponseType,
task_queue: String,
) -> PollWorkflowTaskQueueResponse {
let run_id = t.get_orig_run_id();
let wf = WorkflowExecution {
workflow_id: wf_id,
run_id: run_id.to_string(),
};
let hist_info = match response_type {
ResponseType::ToTaskNum(tn) => t.get_history_info(tn).unwrap(),
ResponseType::AllHistory => t.get_full_history_info().unwrap(),
};
let batch = hist_info.events().to_vec();
let task_token: [u8; 16] = thread_rng().gen();
PollWorkflowTaskQueueResponse {
history: Some(History { events: batch }),
workflow_execution: Some(wf),
task_token: task_token.to_vec(),
workflow_type: Some(WorkflowType {
name: DEFAULT_WORKFLOW_TYPE.to_owned(),
}),
workflow_execution_task_queue: Some(TaskQueue {
name: task_queue,
kind: TaskQueueKind::Normal as i32,
}),
previous_started_event_id: hist_info.previous_started_event_id,
started_event_id: hist_info.workflow_task_started_event_id,
..Default::default()
}
}
pub fn fake_sg_opts() -> ServerGatewayOptions {
ServerGatewayOptions {
target_url: Url::from_str("https://fake").unwrap(),
namespace: "".to_string(),
client_name: "".to_string(),
client_version: "".to_string(),
static_headers: Default::default(),
identity: "".to_string(),
worker_binary_id: "".to_string(),
tls_cfg: None,
retry_config: Default::default(),
}
}
type AsserterWithReply<'a> = (&'a dyn Fn(&WfActivation), wf_activation_completion::Status);
/// This function accepts a list of asserts and replies to workflow activations to run against the
/// provided instance of fake core.
///
/// It handles the business of re-sending the same activation replies over again in the event
/// of eviction or workflow activation failure. Activation failures specifically are only run once,
/// since they clearly can't be returned every time we replay the workflow, or it could never
/// proceed
pub(crate) async fn poll_and_reply<'a>(
core: &'a CoreSDK,
eviction_mode: WorkflowCachingPolicy,
expect_and_reply: &'a [AsserterWithReply<'a>],
) {
poll_and_reply_clears_outstanding_evicts(core, None, eviction_mode, expect_and_reply).await
}
pub(crate) async fn poll_and_reply_clears_outstanding_evicts<'a>(
core: &'a CoreSDK,
outstanding_map: Option<Arc<RwLock<BiMap<String, TaskToken>>>>,
eviction_mode: WorkflowCachingPolicy,
expect_and_reply: &'a [AsserterWithReply<'a>],
) |
pub(crate) fn gen_assert_and_reply(
asserter: &dyn Fn(&WfActivation),
reply_commands: Vec<workflow_command::Variant>,
) -> AsserterWithReply<'_> {
(
asserter,
workflow_completion::Success::from_variants(reply_commands).into(),
)
}
pub(crate) fn gen_assert_and_fail(asserter: &dyn Fn(&WfActivation)) -> AsserterWithReply<'_> {
(
asserter,
workflow_completion::Failure {
failure: Some(Failure {
message: "Intentional test failure".to_string(),
..Default::default()
}),
}
.into(),
)
}
/// Generate asserts for [poll_and_reply] by passing patterns to match against the job list
#[macro_export]
macro_rules! job_assert {
($($pat:pat),+) => {
|res| {
assert_matches!(
res.jobs.as_slice(),
[$(WfActivationJob {
variant: Some($pat),
}),+]
);
}
};
}
| {
let mut evictions = 0;
let expected_evictions = expect_and_reply.len() - 1;
let mut executed_failures = HashSet::new();
let expected_fail_count = expect_and_reply
.iter()
.filter(|(_, reply)| !reply.is_success())
.count();
'outer: loop {
let expect_iter = expect_and_reply.iter();
for (i, interaction) in expect_iter.enumerate() {
let (asserter, reply) = interaction;
let complete_is_failure = !reply.is_success();
// Only send activation failures once
if executed_failures.contains(&i) {
continue;
}
let mut res = core.poll_workflow_activation(TEST_Q).await.unwrap();
let contains_eviction = res.eviction_index();
if let Some(eviction_job_ix) = contains_eviction {
// If the job list has an eviction, make sure it was the last item in the list
// then remove it, since in the tests we don't explicitly specify evict assertions
assert_eq!(
eviction_job_ix,
res.jobs.len() - 1,
"Eviction job was not last job in job list"
);
res.jobs.remove(eviction_job_ix);
if let Some(omap) = outstanding_map.as_ref() {
omap.write().remove_by_left(&res.run_id);
}
}
// TODO: Can remove this if?
if !res.jobs.is_empty() {
asserter(&res);
}
let reply = if res.jobs.is_empty() {
// Just an eviction
WfActivationCompletion::empty(TEST_Q, res.run_id.clone())
} else {
// Eviction plus some work, we still want to issue the reply
WfActivationCompletion {
task_queue: TEST_Q.to_string(),
run_id: res.run_id.clone(),
status: Some(reply.clone()),
}
};
core.complete_workflow_activation(reply).await.unwrap();
// Restart assertions from the beginning if it was an eviction
if contains_eviction.is_some() {
continue 'outer;
}
if complete_is_failure {
executed_failures.insert(i);
}
match eviction_mode {
WorkflowCachingPolicy::Sticky { .. } => unimplemented!(),
WorkflowCachingPolicy::NonSticky => (),
WorkflowCachingPolicy::AfterEveryReply => {
if evictions < expected_evictions {
core.request_workflow_eviction(TEST_Q, &res.run_id);
evictions += 1;
}
}
}
}
break;
}
assert_eq!(expected_fail_count, executed_failures.len());
// TODO: Really need a worker abstraction for testing
// assert_eq!(core.wft_manager.outstanding_wft(), 0);
} | identifier_body |
main.rs | #[macro_use] extern crate clap;
extern crate curl;
extern crate formdata;
extern crate hex;
extern crate hmac;
extern crate hyper;
#[macro_use] extern crate log;
extern crate pipe;
extern crate rand;
extern crate sha_1;
#[macro_use] extern crate serde_json;
#[macro_use] extern crate serde_derive;
extern crate stderrlog;
extern crate url;
use clap::{Arg, App, SubCommand};
use formdata::{FormData, FilePart, write_formdata};
use hmac::{Hmac, Mac};
use log::LogLevel;
use rand::{thread_rng, Rng};
use hex::ToHex;
use sha_1::Sha1;
use std::env;
use std::error::Error;
use std::fmt; | use std::io::{BufReader, BufWriter, Read};
use std::str;
use std::thread::spawn;
use url::Url;
use curl::easy::{Easy, List};
const FORM_MAX_FILE_SIZE: u64 = 1099511627776;
const FORM_MAX_FILE_COUNT: usize = 1048576;
const FORM_EXPIRES: u64 = 4102444800;
#[derive(Debug)]
struct OpenStackConfig {
auth_url: String,
project_domain: String,
project_name: String,
user_domain: String,
username: String,
password: String,
region_name: String
}
#[derive(Debug)]
struct SwiftAuthInfo {
token: String,
url: String
}
#[derive(Debug, Serialize, Deserialize)]
struct FormTemplate {
url: String,
redirect: String,
max_file_size: u64,
max_file_count: usize,
expires: u64,
signature: String
}
#[derive(Debug)]
struct MissingToken;
impl fmt::Display for MissingToken {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Token not found in Keystone response headers")
}
}
impl Error for MissingToken {
fn description(&self) -> &str {
"Token not found in Keystone response headers"
}
fn cause(&self) -> Option<&Error> {
None
}
}
#[derive(Debug)]
struct MissingSwiftUrl;
impl fmt::Display for MissingSwiftUrl {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Swift service endpoint URL not found in Keystone JSON catalog")
}
}
impl Error for MissingSwiftUrl {
fn description(&self) -> &str {
"Swift service endpoint URL not found in Keystone JSON catalog"
}
fn cause(&self) -> Option<&Error> {
None
}
}
#[derive(Debug)]
struct MissingTempUrlKey;
impl fmt::Display for MissingTempUrlKey {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Temp URL key not found in Swift response headers")
}
}
impl Error for MissingTempUrlKey {
fn description(&self) -> &str {
"Temp URL key not found in Swift response headers"
}
fn cause(&self) -> Option<&Error> {
None
}
}
#[derive(Debug)]
struct UnableToCreateContainer;
impl fmt::Display for UnableToCreateContainer {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Unable to create Swift container")
}
}
impl Error for UnableToCreateContainer {
fn description(&self) -> &str {
"Unable to create Swift container"
}
fn cause(&self) -> Option<&Error> {
None
}
}
fn main() {
let matches =
App::new("backup2swift")
.version(crate_version!())
.arg(Arg::with_name("verbosity")
.short("v")
.multiple(true)
.help("Increase message verbosity"))
.arg(Arg::with_name("quiet")
.short("q")
.help("Silence all output"))
.subcommand(SubCommand::with_name("setup")
.about("Setup container and create signed form template")
.arg(Arg::with_name("container")
.takes_value(true)
.required(true)
.help("destination container name")))
.subcommand(SubCommand::with_name("backup")
.about("Backup files to container")
.arg(Arg::with_name("form_template")
.short("c")
.long("form-template")
.required(true)
.takes_value(true)
.help("signed POST form template (JSON) created with \"setup\""))
.arg(Arg::with_name("delete_after")
.short("t")
.long("delete-after")
.takes_value(true)
.help("seconds to keep file for"))
.arg(Arg::with_name("files")
.takes_value(true)
.multiple(true)
.required(true)
.help("destination container name")))
.get_matches();
let verbose = matches.occurrences_of("verbosity") as usize;
let quiet = matches.is_present("quiet");
stderrlog::new()
.module(module_path!())
.quiet(quiet)
.verbosity(verbose)
.init()
.unwrap();
if let Some(matches) = matches.subcommand_matches("setup") {
setup(matches.value_of("container").unwrap());
} else if let Some(matches) = matches.subcommand_matches("backup") {
let form_template_file = Path::new(matches.value_of("form_template").unwrap());
assert!(form_template_file.is_file());
let expire_after = value_t!(matches, "delete_after", u64).ok();
let file_paths = matches.values_of_lossy("files").unwrap();
let files: &Vec<&Path> = & file_paths.iter().map(|f| Path::new(f)).collect::<Vec<&Path>>();
assert!(files.into_iter().all(|f: &&Path| f.is_file()));
backup(form_template_file, expire_after, files);
} else {
println!("try 'backup2swift --help' for more information");
::std::process::exit(2)
}
}
fn setup(container_name: &str) -> () {
let settings = get_os_settings();
let auth_info = get_token(settings).unwrap();
let temp_url_key =
get_temp_url_key(&auth_info)
.or_else(|_| set_temp_url_key(&auth_info, &create_random_key()))
.unwrap();
ensure_container_exists(&auth_info, container_name).unwrap();
let form_template = backup_config(&auth_info, container_name, &temp_url_key);
println!("{}", serde_json::to_string_pretty(&form_template).unwrap());
}
fn backup<'a>(
form_template_file: &'a Path,
delete_after: Option<u64>,
files: &'a Vec<&Path>) -> () {
let form_template = read_form_template_file(form_template_file).unwrap();
let file_count = files.len();
info!("{:?}", form_template);
assert!(form_template.max_file_count >= file_count);
let file_parts: Vec<(String, FilePart)> =
files.into_iter()
.zip(std::ops::Range { start: 0, end: file_count })
.map(|(f,i): (&&Path, usize)| {
let mut headers = hyper::header::Headers::new();
headers.append_raw("Content-Type", "application/octet-stream".to_owned().into_bytes());
let output: (String, FilePart) = (
format!("file{}", i),
formdata::FilePart::new(headers, f)
);
output
})
.collect::<Vec<(String, FilePart)>>();
info!("{:?}", file_parts);
let mut fields = vec![
("redirect".to_owned(), form_template.redirect.to_owned()),
("max_file_size".to_owned(), format!("{}", form_template.max_file_size)),
("max_file_count".to_owned(), format!("{}", form_template.max_file_count)),
("expires".to_owned(), format!("{}", form_template.expires)),
("signature".to_owned(), format!("{}", form_template.signature))
];
match delete_after {
Some(n) => fields.push(("x_delete_after".to_owned(), format!("{}", n))),
None => ()
};
let form_data = FormData { fields: fields, files: file_parts };
send_data(form_template, form_data).unwrap();
}
fn get_env(name: &str) -> String {
env::var(name).expect(& format!("{} environment variable not defined", name))
}
fn get_os_settings() -> OpenStackConfig {
let auth_url = get_env("OS_AUTH_URL");
info!("OS_AUTH_URL: {}", &auth_url);
let user_domain = get_env("OS_USER_DOMAIN_NAME");
info!("OS_PROJECT_NAME: {}", &user_domain);
let username = get_env("OS_USERNAME");
info!("OS_USERNAME: {}", &username);
let project_domain = get_env("OS_PROJECT_DOMAIN_NAME");
info!("OS_PROJECT_NAME: {}", &project_domain);
let project_name = get_env("OS_PROJECT_NAME");
info!("OS_PROJECT_NAME: {}", &project_name);
let password = get_env("OS_PASSWORD");
info!("OS_PASSWORD: {}", &("*".repeat(password.len())));
let region_name = get_env("OS_REGION_NAME");
info!("OS_REGION_NAME: {}", ®ion_name);
OpenStackConfig {
auth_url,
user_domain,
username,
project_domain,
project_name,
password,
region_name
}
}
fn get_token(config: OpenStackConfig) -> Result<SwiftAuthInfo, Box<Error>> {
let mut dst = Vec::new();
let mut easy = Easy::new();
let json = json!({
"auth": {
"identity": {
"methods": [
"password"
],
"password": {
"user": {
"domain": {
"name": config.user_domain
},
"name": config.username,
"password": config.password
}
}
},
"scope": {
"project": {
"domain": {
"name": config.project_domain
},
"name": config.project_name
}
}
}
});
let json_bytes = serde_json::to_vec_pretty(&json).unwrap();
let mut req_reader = BufReader::new(json_bytes.as_slice());
let mut headers = List::new();
let mut opt_token: Option<String> = None;
headers.append("Content-Type: application/json")?;
headers.append(format!("Content-Length: {}", json_bytes.len()).as_ref())?;
headers.append("Accept: application/json")?;
headers.append("Expect: ")?;
easy.verbose(log_enabled!(LogLevel::Debug))?;
easy.post(true)?;
easy.url(& format!("{}auth/tokens", config.auth_url))?;
easy.http_headers(headers)?;
{
let mut transfer = easy.transfer();
transfer.header_function(|header| {
let mut splitter = str::from_utf8(header).unwrap().splitn(2, ": ");
match splitter.next() {
Some(name) if name.to_lowercase() == "x-subject-token" => {
splitter.next().map(|s| s.to_owned()).map(|t| {
opt_token = Some(t.trim().to_owned());
}); ()
}
_ => ()
}
true
})?;
transfer.read_function(|into| {
Ok(req_reader.read(into).unwrap())
})?;
transfer.write_function(|data| {
dst.extend_from_slice(data);
Ok(data.len())
})?;
transfer.perform()?
}
match opt_token {
Some(token) => {
let response_json: Result<serde_json::Value, serde_json::Error> =
serde_json::from_slice(dst.as_slice());
response_json
.map(|j| {
j.get("token")
.and_then(|v| v.get("catalog"))
.and_then(|v| v.as_array())
.and_then(|catalog| get_swift_endpoint(catalog.iter(), config.region_name.to_owned()))
})
.map_err(|e| From::from(e))
.and_then(|opt_url| {
opt_url
.map(|url| SwiftAuthInfo { token, url })
.ok_or(MissingSwiftUrl)
.map_err(|e| From::from(e))
})
},
None => Err(From::from(MissingToken))
}
}
fn get_swift_endpoint<'a,I>(
catalog: I,
region_name: String) -> Option<String> where I: Iterator<Item=&'a serde_json::Value> {
catalog
.filter_map(|item| {
match item.get("type").and_then(|v| v.as_str()) {
Some(t) if t == "object-store" =>
item.get("endpoints").and_then(|v| v.as_array()).map(|v| v.into_iter()),
_ => None
}
})
.flat_map(|endpoints| endpoints)
.find(|endpoint| {
(match endpoint.get("interface").and_then(|v| v.as_str()) {
Some(i) if i == "public" => true,
_ => false
}) && (
match endpoint.get("region").and_then(|v| v.as_str()) {
Some(region) if region == region_name => true,
_ => false
})
})
.and_then(|endpoint| endpoint.get("url").and_then(|v| v.as_str())).map(|s| s.to_owned())
}
fn get_temp_url_key(info: &SwiftAuthInfo) -> Result<String, Box<Error>> {
let mut opt_temp_url_key: Option<String> = None;
let mut easy = Easy::new();
let mut headers = List::new();
headers.append(& format!("X-Auth-Token: {}", info.token))?;
headers.append("Expect: ")?;
easy.verbose(log_enabled!(LogLevel::Debug))?;
easy.nobody(true)?;
easy.url(& format!("{}", info.url))?;
easy.http_headers(headers)?;
{
let mut transfer = easy.transfer();
transfer.header_function(|header| {
let mut splitter = str::from_utf8(header).unwrap().splitn(2, ": ");
match splitter.next() {
Some(name) if name.to_lowercase() == "x-account-meta-temp-url-key" => {
splitter.next().map(|s| s.to_owned()).map(|t| {
opt_temp_url_key = Some(t.trim().to_owned());
}); ()
}
_ => ()
}
true
})?;
transfer.perform()?
}
opt_temp_url_key
.ok_or(MissingToken)
.map_err(|e| From::from(e))
}
fn create_random_key() -> String {
thread_rng().gen_ascii_chars().take(32).collect()
}
fn set_temp_url_key(info: &SwiftAuthInfo, temp_url_key: &str) -> Result<String, Box<Error>> {
let mut easy = Easy::new();
let mut headers = List::new();
headers.append(& format!("X-Auth-Token: {}", info.token))?;
headers.append(& format!("X-Account-Meta-Temp-Url-Key: {}", temp_url_key))?;
headers.append("Expect: ")?;
easy.verbose(log_enabled!(LogLevel::Debug))?;
easy.post(true)?;
easy.url(& format!("{}", info.url))?;
easy.http_headers(headers)?;
easy.perform()?;
easy.response_code()
.map_err(|e| From::from(e))
.and_then(|code| {
match code {
200...299 => Ok(temp_url_key.to_owned()),
_ => Err(From::from(MissingTempUrlKey))
}
})
}
fn ensure_container_exists(info: &SwiftAuthInfo, container: &str) -> Result<(), Box<Error>> {
let mut easy = Easy::new();
let mut headers = List::new();
headers.append(& format!("X-Auth-Token: {}", info.token))?;
headers.append("Expect: ")?;
easy.verbose(log_enabled!(LogLevel::Debug))?;
easy.nobody(true)?;
easy.url(& format!("{}/{}", info.url, container))?;
easy.http_headers(headers)?;
easy.perform()?;
easy.response_code()
.map_err(|e| From::from(e))
.and_then(|code| {
match code {
200...299 => Ok(()),
_ => create_container(info, container)
}
})
}
fn create_container(info: &SwiftAuthInfo, container: &str) -> Result<(), Box<Error>> {
let mut easy = Easy::new();
let mut headers = List::new();
headers.append("Content-Length: 0")?;
headers.append(& format!("X-Auth-Token: {}", info.token))?;
headers.append("Expect: ")?;
easy.verbose(log_enabled!(LogLevel::Debug))?;
easy.put(true)?;
easy.url(& format!("{}/{}", info.url, container))?;
easy.http_headers(headers)?;
easy.perform()?;
easy.response_code()
.map_err(|e| From::from(e))
.and_then(|response_code| {
match response_code {
200...299 => Ok(()),
_ => Err(From::from(UnableToCreateContainer))
}
})
}
fn form_post_url(info: &SwiftAuthInfo, container: &str) -> Url {
Url::parse(& format!("{}/{}/", info.url, container)).unwrap()
}
fn signature(
signature_path: &str,
redirect: &str,
max_file_size: &u64,
max_file_count: &usize,
expires: &u64,
temp_url_key: &str) -> String {
let input = format!(
"{}\n{}\n{}\n{}\n{}",
signature_path,
redirect,
max_file_size,
max_file_count,
expires
);
// Create `Mac` trait implementation, namely HMAC-SHA256
let mut mac = Hmac::<Sha1>::new(temp_url_key.as_bytes());
mac.input(input.as_bytes());
mac.result().code().to_hex()
}
fn backup_config(info: &SwiftAuthInfo, container: &str, temp_url_key: &str) -> FormTemplate {
let url: Url = form_post_url(info, container);
let redirect = "";
let max_file_size = FORM_MAX_FILE_SIZE;
let max_file_count = FORM_MAX_FILE_COUNT;
let expires = FORM_EXPIRES;
FormTemplate {
url: url.as_str().to_owned(),
redirect: redirect.to_owned(),
max_file_size: max_file_size,
max_file_count: max_file_count,
expires: expires,
signature: signature(
url.path(),
redirect,
&max_file_size,
&max_file_count,
&expires,
temp_url_key)
}
}
fn read_form_template_file<'a>(config_file: &'a Path) -> Result<FormTemplate, Box<Error>> {
let f = File::open(config_file)?;
let rdr = BufReader::new(f);
serde_json::from_reader(rdr).map_err(|e| From::from(e))
}
fn send_data(form_template: FormTemplate, form_data: FormData) -> Result<(), Box<Error>> {
let mut headers = List::new();
let boundary_str: &str = & {
let rand_str: String = thread_rng().gen_ascii_chars().take(20).collect();
"-".repeat(20).to_string() + &rand_str
};
let boundary: Vec<u8> = boundary_str.to_owned().into_bytes();
let mut sink = std::io::sink();
let content_length = write_formdata(&mut sink, &boundary, &form_data)?;
headers.append(& format!("Content-Length: {}", content_length))?;
headers.append(& format!("Content-Type: multipart/form-data; boundary={}", boundary_str))?;
let mut easy = Easy::new();
easy.verbose(log_enabled!(LogLevel::Debug))?;
easy.post(true)?;
easy.url(& form_template.url)?;
easy.http_headers(headers)?;
{
const BUFFER_SIZE: usize = 524288;
let (r, w) = pipe::pipe();
let mut br = BufReader::with_capacity(BUFFER_SIZE, r);
let mut bw = BufWriter::with_capacity(BUFFER_SIZE, w);
spawn(move || write_formdata(&mut bw, &boundary, &form_data));
let mut transfer = easy.transfer();
transfer.read_function(|into| {
Ok(br.read(into).unwrap_or(0))
})?;
transfer.perform()?;
}
Ok(())
} | use std::fs::File;
use std::path::Path; | random_line_split |
main.rs | #[macro_use] extern crate clap;
extern crate curl;
extern crate formdata;
extern crate hex;
extern crate hmac;
extern crate hyper;
#[macro_use] extern crate log;
extern crate pipe;
extern crate rand;
extern crate sha_1;
#[macro_use] extern crate serde_json;
#[macro_use] extern crate serde_derive;
extern crate stderrlog;
extern crate url;
use clap::{Arg, App, SubCommand};
use formdata::{FormData, FilePart, write_formdata};
use hmac::{Hmac, Mac};
use log::LogLevel;
use rand::{thread_rng, Rng};
use hex::ToHex;
use sha_1::Sha1;
use std::env;
use std::error::Error;
use std::fmt;
use std::fs::File;
use std::path::Path;
use std::io::{BufReader, BufWriter, Read};
use std::str;
use std::thread::spawn;
use url::Url;
use curl::easy::{Easy, List};
const FORM_MAX_FILE_SIZE: u64 = 1099511627776;
const FORM_MAX_FILE_COUNT: usize = 1048576;
const FORM_EXPIRES: u64 = 4102444800;
#[derive(Debug)]
struct OpenStackConfig {
auth_url: String,
project_domain: String,
project_name: String,
user_domain: String,
username: String,
password: String,
region_name: String
}
#[derive(Debug)]
struct SwiftAuthInfo {
token: String,
url: String
}
#[derive(Debug, Serialize, Deserialize)]
struct | {
url: String,
redirect: String,
max_file_size: u64,
max_file_count: usize,
expires: u64,
signature: String
}
#[derive(Debug)]
struct MissingToken;
impl fmt::Display for MissingToken {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Token not found in Keystone response headers")
}
}
impl Error for MissingToken {
fn description(&self) -> &str {
"Token not found in Keystone response headers"
}
fn cause(&self) -> Option<&Error> {
None
}
}
#[derive(Debug)]
struct MissingSwiftUrl;
impl fmt::Display for MissingSwiftUrl {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Swift service endpoint URL not found in Keystone JSON catalog")
}
}
impl Error for MissingSwiftUrl {
fn description(&self) -> &str {
"Swift service endpoint URL not found in Keystone JSON catalog"
}
fn cause(&self) -> Option<&Error> {
None
}
}
#[derive(Debug)]
struct MissingTempUrlKey;
impl fmt::Display for MissingTempUrlKey {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Temp URL key not found in Swift response headers")
}
}
impl Error for MissingTempUrlKey {
fn description(&self) -> &str {
"Temp URL key not found in Swift response headers"
}
fn cause(&self) -> Option<&Error> {
None
}
}
#[derive(Debug)]
struct UnableToCreateContainer;
impl fmt::Display for UnableToCreateContainer {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Unable to create Swift container")
}
}
impl Error for UnableToCreateContainer {
fn description(&self) -> &str {
"Unable to create Swift container"
}
fn cause(&self) -> Option<&Error> {
None
}
}
fn main() {
let matches =
App::new("backup2swift")
.version(crate_version!())
.arg(Arg::with_name("verbosity")
.short("v")
.multiple(true)
.help("Increase message verbosity"))
.arg(Arg::with_name("quiet")
.short("q")
.help("Silence all output"))
.subcommand(SubCommand::with_name("setup")
.about("Setup container and create signed form template")
.arg(Arg::with_name("container")
.takes_value(true)
.required(true)
.help("destination container name")))
.subcommand(SubCommand::with_name("backup")
.about("Backup files to container")
.arg(Arg::with_name("form_template")
.short("c")
.long("form-template")
.required(true)
.takes_value(true)
.help("signed POST form template (JSON) created with \"setup\""))
.arg(Arg::with_name("delete_after")
.short("t")
.long("delete-after")
.takes_value(true)
.help("seconds to keep file for"))
.arg(Arg::with_name("files")
.takes_value(true)
.multiple(true)
.required(true)
.help("destination container name")))
.get_matches();
let verbose = matches.occurrences_of("verbosity") as usize;
let quiet = matches.is_present("quiet");
stderrlog::new()
.module(module_path!())
.quiet(quiet)
.verbosity(verbose)
.init()
.unwrap();
if let Some(matches) = matches.subcommand_matches("setup") {
setup(matches.value_of("container").unwrap());
} else if let Some(matches) = matches.subcommand_matches("backup") {
let form_template_file = Path::new(matches.value_of("form_template").unwrap());
assert!(form_template_file.is_file());
let expire_after = value_t!(matches, "delete_after", u64).ok();
let file_paths = matches.values_of_lossy("files").unwrap();
let files: &Vec<&Path> = & file_paths.iter().map(|f| Path::new(f)).collect::<Vec<&Path>>();
assert!(files.into_iter().all(|f: &&Path| f.is_file()));
backup(form_template_file, expire_after, files);
} else {
println!("try 'backup2swift --help' for more information");
::std::process::exit(2)
}
}
fn setup(container_name: &str) -> () {
let settings = get_os_settings();
let auth_info = get_token(settings).unwrap();
let temp_url_key =
get_temp_url_key(&auth_info)
.or_else(|_| set_temp_url_key(&auth_info, &create_random_key()))
.unwrap();
ensure_container_exists(&auth_info, container_name).unwrap();
let form_template = backup_config(&auth_info, container_name, &temp_url_key);
println!("{}", serde_json::to_string_pretty(&form_template).unwrap());
}
fn backup<'a>(
form_template_file: &'a Path,
delete_after: Option<u64>,
files: &'a Vec<&Path>) -> () {
let form_template = read_form_template_file(form_template_file).unwrap();
let file_count = files.len();
info!("{:?}", form_template);
assert!(form_template.max_file_count >= file_count);
let file_parts: Vec<(String, FilePart)> =
files.into_iter()
.zip(std::ops::Range { start: 0, end: file_count })
.map(|(f,i): (&&Path, usize)| {
let mut headers = hyper::header::Headers::new();
headers.append_raw("Content-Type", "application/octet-stream".to_owned().into_bytes());
let output: (String, FilePart) = (
format!("file{}", i),
formdata::FilePart::new(headers, f)
);
output
})
.collect::<Vec<(String, FilePart)>>();
info!("{:?}", file_parts);
let mut fields = vec![
("redirect".to_owned(), form_template.redirect.to_owned()),
("max_file_size".to_owned(), format!("{}", form_template.max_file_size)),
("max_file_count".to_owned(), format!("{}", form_template.max_file_count)),
("expires".to_owned(), format!("{}", form_template.expires)),
("signature".to_owned(), format!("{}", form_template.signature))
];
match delete_after {
Some(n) => fields.push(("x_delete_after".to_owned(), format!("{}", n))),
None => ()
};
let form_data = FormData { fields: fields, files: file_parts };
send_data(form_template, form_data).unwrap();
}
fn get_env(name: &str) -> String {
env::var(name).expect(& format!("{} environment variable not defined", name))
}
fn get_os_settings() -> OpenStackConfig {
let auth_url = get_env("OS_AUTH_URL");
info!("OS_AUTH_URL: {}", &auth_url);
let user_domain = get_env("OS_USER_DOMAIN_NAME");
info!("OS_PROJECT_NAME: {}", &user_domain);
let username = get_env("OS_USERNAME");
info!("OS_USERNAME: {}", &username);
let project_domain = get_env("OS_PROJECT_DOMAIN_NAME");
info!("OS_PROJECT_NAME: {}", &project_domain);
let project_name = get_env("OS_PROJECT_NAME");
info!("OS_PROJECT_NAME: {}", &project_name);
let password = get_env("OS_PASSWORD");
info!("OS_PASSWORD: {}", &("*".repeat(password.len())));
let region_name = get_env("OS_REGION_NAME");
info!("OS_REGION_NAME: {}", ®ion_name);
OpenStackConfig {
auth_url,
user_domain,
username,
project_domain,
project_name,
password,
region_name
}
}
fn get_token(config: OpenStackConfig) -> Result<SwiftAuthInfo, Box<Error>> {
let mut dst = Vec::new();
let mut easy = Easy::new();
let json = json!({
"auth": {
"identity": {
"methods": [
"password"
],
"password": {
"user": {
"domain": {
"name": config.user_domain
},
"name": config.username,
"password": config.password
}
}
},
"scope": {
"project": {
"domain": {
"name": config.project_domain
},
"name": config.project_name
}
}
}
});
let json_bytes = serde_json::to_vec_pretty(&json).unwrap();
let mut req_reader = BufReader::new(json_bytes.as_slice());
let mut headers = List::new();
let mut opt_token: Option<String> = None;
headers.append("Content-Type: application/json")?;
headers.append(format!("Content-Length: {}", json_bytes.len()).as_ref())?;
headers.append("Accept: application/json")?;
headers.append("Expect: ")?;
easy.verbose(log_enabled!(LogLevel::Debug))?;
easy.post(true)?;
easy.url(& format!("{}auth/tokens", config.auth_url))?;
easy.http_headers(headers)?;
{
let mut transfer = easy.transfer();
transfer.header_function(|header| {
let mut splitter = str::from_utf8(header).unwrap().splitn(2, ": ");
match splitter.next() {
Some(name) if name.to_lowercase() == "x-subject-token" => {
splitter.next().map(|s| s.to_owned()).map(|t| {
opt_token = Some(t.trim().to_owned());
}); ()
}
_ => ()
}
true
})?;
transfer.read_function(|into| {
Ok(req_reader.read(into).unwrap())
})?;
transfer.write_function(|data| {
dst.extend_from_slice(data);
Ok(data.len())
})?;
transfer.perform()?
}
match opt_token {
Some(token) => {
let response_json: Result<serde_json::Value, serde_json::Error> =
serde_json::from_slice(dst.as_slice());
response_json
.map(|j| {
j.get("token")
.and_then(|v| v.get("catalog"))
.and_then(|v| v.as_array())
.and_then(|catalog| get_swift_endpoint(catalog.iter(), config.region_name.to_owned()))
})
.map_err(|e| From::from(e))
.and_then(|opt_url| {
opt_url
.map(|url| SwiftAuthInfo { token, url })
.ok_or(MissingSwiftUrl)
.map_err(|e| From::from(e))
})
},
None => Err(From::from(MissingToken))
}
}
fn get_swift_endpoint<'a,I>(
catalog: I,
region_name: String) -> Option<String> where I: Iterator<Item=&'a serde_json::Value> {
catalog
.filter_map(|item| {
match item.get("type").and_then(|v| v.as_str()) {
Some(t) if t == "object-store" =>
item.get("endpoints").and_then(|v| v.as_array()).map(|v| v.into_iter()),
_ => None
}
})
.flat_map(|endpoints| endpoints)
.find(|endpoint| {
(match endpoint.get("interface").and_then(|v| v.as_str()) {
Some(i) if i == "public" => true,
_ => false
}) && (
match endpoint.get("region").and_then(|v| v.as_str()) {
Some(region) if region == region_name => true,
_ => false
})
})
.and_then(|endpoint| endpoint.get("url").and_then(|v| v.as_str())).map(|s| s.to_owned())
}
fn get_temp_url_key(info: &SwiftAuthInfo) -> Result<String, Box<Error>> {
let mut opt_temp_url_key: Option<String> = None;
let mut easy = Easy::new();
let mut headers = List::new();
headers.append(& format!("X-Auth-Token: {}", info.token))?;
headers.append("Expect: ")?;
easy.verbose(log_enabled!(LogLevel::Debug))?;
easy.nobody(true)?;
easy.url(& format!("{}", info.url))?;
easy.http_headers(headers)?;
{
let mut transfer = easy.transfer();
transfer.header_function(|header| {
let mut splitter = str::from_utf8(header).unwrap().splitn(2, ": ");
match splitter.next() {
Some(name) if name.to_lowercase() == "x-account-meta-temp-url-key" => {
splitter.next().map(|s| s.to_owned()).map(|t| {
opt_temp_url_key = Some(t.trim().to_owned());
}); ()
}
_ => ()
}
true
})?;
transfer.perform()?
}
opt_temp_url_key
.ok_or(MissingToken)
.map_err(|e| From::from(e))
}
fn create_random_key() -> String {
thread_rng().gen_ascii_chars().take(32).collect()
}
fn set_temp_url_key(info: &SwiftAuthInfo, temp_url_key: &str) -> Result<String, Box<Error>> {
let mut easy = Easy::new();
let mut headers = List::new();
headers.append(& format!("X-Auth-Token: {}", info.token))?;
headers.append(& format!("X-Account-Meta-Temp-Url-Key: {}", temp_url_key))?;
headers.append("Expect: ")?;
easy.verbose(log_enabled!(LogLevel::Debug))?;
easy.post(true)?;
easy.url(& format!("{}", info.url))?;
easy.http_headers(headers)?;
easy.perform()?;
easy.response_code()
.map_err(|e| From::from(e))
.and_then(|code| {
match code {
200...299 => Ok(temp_url_key.to_owned()),
_ => Err(From::from(MissingTempUrlKey))
}
})
}
fn ensure_container_exists(info: &SwiftAuthInfo, container: &str) -> Result<(), Box<Error>> {
let mut easy = Easy::new();
let mut headers = List::new();
headers.append(& format!("X-Auth-Token: {}", info.token))?;
headers.append("Expect: ")?;
easy.verbose(log_enabled!(LogLevel::Debug))?;
easy.nobody(true)?;
easy.url(& format!("{}/{}", info.url, container))?;
easy.http_headers(headers)?;
easy.perform()?;
easy.response_code()
.map_err(|e| From::from(e))
.and_then(|code| {
match code {
200...299 => Ok(()),
_ => create_container(info, container)
}
})
}
fn create_container(info: &SwiftAuthInfo, container: &str) -> Result<(), Box<Error>> {
let mut easy = Easy::new();
let mut headers = List::new();
headers.append("Content-Length: 0")?;
headers.append(& format!("X-Auth-Token: {}", info.token))?;
headers.append("Expect: ")?;
easy.verbose(log_enabled!(LogLevel::Debug))?;
easy.put(true)?;
easy.url(& format!("{}/{}", info.url, container))?;
easy.http_headers(headers)?;
easy.perform()?;
easy.response_code()
.map_err(|e| From::from(e))
.and_then(|response_code| {
match response_code {
200...299 => Ok(()),
_ => Err(From::from(UnableToCreateContainer))
}
})
}
fn form_post_url(info: &SwiftAuthInfo, container: &str) -> Url {
Url::parse(& format!("{}/{}/", info.url, container)).unwrap()
}
fn signature(
signature_path: &str,
redirect: &str,
max_file_size: &u64,
max_file_count: &usize,
expires: &u64,
temp_url_key: &str) -> String {
let input = format!(
"{}\n{}\n{}\n{}\n{}",
signature_path,
redirect,
max_file_size,
max_file_count,
expires
);
// Create `Mac` trait implementation, namely HMAC-SHA256
let mut mac = Hmac::<Sha1>::new(temp_url_key.as_bytes());
mac.input(input.as_bytes());
mac.result().code().to_hex()
}
fn backup_config(info: &SwiftAuthInfo, container: &str, temp_url_key: &str) -> FormTemplate {
let url: Url = form_post_url(info, container);
let redirect = "";
let max_file_size = FORM_MAX_FILE_SIZE;
let max_file_count = FORM_MAX_FILE_COUNT;
let expires = FORM_EXPIRES;
FormTemplate {
url: url.as_str().to_owned(),
redirect: redirect.to_owned(),
max_file_size: max_file_size,
max_file_count: max_file_count,
expires: expires,
signature: signature(
url.path(),
redirect,
&max_file_size,
&max_file_count,
&expires,
temp_url_key)
}
}
fn read_form_template_file<'a>(config_file: &'a Path) -> Result<FormTemplate, Box<Error>> {
let f = File::open(config_file)?;
let rdr = BufReader::new(f);
serde_json::from_reader(rdr).map_err(|e| From::from(e))
}
fn send_data(form_template: FormTemplate, form_data: FormData) -> Result<(), Box<Error>> {
let mut headers = List::new();
let boundary_str: &str = & {
let rand_str: String = thread_rng().gen_ascii_chars().take(20).collect();
"-".repeat(20).to_string() + &rand_str
};
let boundary: Vec<u8> = boundary_str.to_owned().into_bytes();
let mut sink = std::io::sink();
let content_length = write_formdata(&mut sink, &boundary, &form_data)?;
headers.append(& format!("Content-Length: {}", content_length))?;
headers.append(& format!("Content-Type: multipart/form-data; boundary={}", boundary_str))?;
let mut easy = Easy::new();
easy.verbose(log_enabled!(LogLevel::Debug))?;
easy.post(true)?;
easy.url(& form_template.url)?;
easy.http_headers(headers)?;
{
const BUFFER_SIZE: usize = 524288;
let (r, w) = pipe::pipe();
let mut br = BufReader::with_capacity(BUFFER_SIZE, r);
let mut bw = BufWriter::with_capacity(BUFFER_SIZE, w);
spawn(move || write_formdata(&mut bw, &boundary, &form_data));
let mut transfer = easy.transfer();
transfer.read_function(|into| {
Ok(br.read(into).unwrap_or(0))
})?;
transfer.perform()?;
}
Ok(())
}
| FormTemplate | identifier_name |
transaction_builder_test.go | package cnlib
import "testing"
import "github.com/stretchr/testify/assert"
func TestTransactionBuilderBuildsTxCorrect(t *testing.T) {
inputPath := NewDerivationPath(BaseCoinBip49MainNet, 1, 53)
utxo := NewUTXO("1a08dafe993fdc17fdc661988c88f97a9974013291e759b9b5766b8e97c78f87", 1, 2788424, inputPath, nil, true)
amount := 13584
feeAmount := 3000
changeAmount := 2771840
changePath := NewDerivationPath(BaseCoinBip49MainNet, 1, 56)
toAddress := "3BgxxADLtnoKu9oytQiiVzYUqvo8weCVy9"
data := NewTransactionDataFlatFee(toAddress, BaseCoinBip49MainNet, amount, feeAmount, changePath, 539943)
data.AddUTXO(utxo)
err := data.Generate()
assert.Nil(t, err)
expectedEncodedTx := "01000000000101878fc7978e6b76b5b959e791320174997af9888c9861c6fd17dc3f99feda081a0100000017160014509060a6bedf13087124c0aeafc6e3db4e1e9a08fdffffff02103500000000000017a9146daec6ddb6faaf01f83f515045822a94d0c2331e87804b2a000000000017a914e0bc3e6f5f4080b4f007c6307ba579595e459a06870247304402205a9d97a269cefe296a746dc07e898d19889567e910339f31e12268703079a45a0220537145228842a020a16894006c7e50ae5109672ea13135a02b354f66838f9676012103d447f34dd13359a8fc64ed3977fcecea3f6802f842f9a9f857de07453b715735273d0800"
expectedTxid := "20d9d7eae4283573e042de272c0fc6af7df5a1100c4871127fa07c9022da1945"
expectedChangeAddress := "3NBJnvo9U5YbJnr1pALFqQEur1wXWJrjoM"
wallet := NewHDWalletFromWords(w, BaseCoinBip49MainNet)
meta, err := wallet.BuildTransactionMetadata(data.TransactionData)
assert.Nil(t, err)
assert.Equal(t, toAddress, data.TransactionData.PaymentAddress)
assert.Equal(t, changeAmount, data.TransactionData.ChangeAmount)
assert.Equal(t, expectedEncodedTx, meta.EncodedTx)
assert.Equal(t, expectedTxid, meta.Txid)
assert.Equal(t, 1, meta.TransactionChangeMetadata.VoutIndex)
assert.Equal(t, 56, meta.TransactionChangeMetadata.Path.Index)
assert.Equal(t, expectedChangeAddress, meta.TransactionChangeMetadata.Address)
}
func TestTransactionBuilder_TwoInputs_BuildsTransaction(t *testing.T) {
path1 := NewDerivationPath(BaseCoinBip49MainNet, 1, 56)
path2 := NewDerivationPath(BaseCoinBip49MainNet, 1, 57)
utxo1 := NewUTXO("24cc9150963a2369d7f413af8b18c3d0243b438ba742d6d083ec8ed492d312f9", 1, 2769977, path1, nil, true)
utxo2 := NewUTXO("ed611c20fc9088aa5ec1c86de88dd017965358c150c58f71eda721cdb2ac0a48", 1, 314605, path2, nil, true)
amount := 3000000
feeAmount := 4000
changeAmount := 80582
changePath := NewDerivationPath(BaseCoinBip49MainNet, 1, 58)
toAddress := "3CkiUcj5vU4TGZJeDcrmYGWH8GYJ5vKcQq"
data := NewTransactionDataFlatFee(toAddress, BaseCoinBip49MainNet, amount, feeAmount, changePath, 540220)
data.AddUTXO(utxo1)
data.AddUTXO(utxo2)
err := data.Generate()
assert.Nil(t, err)
expectedEncodedTx := "01000000000102f912d392d48eec83d0d642a78b433b24d0c3188baf13f4d769233a965091cc24010000001716001436386ac950d557ae06bfffc51e7b8fa08474c05ffdffffff480aacb2cd21a7ed718fc550c158539617d08de86dc8c15eaa8890fc201c61ed010000001716001480e1e7dc2f6436a60abec5e9e7f6b62b0b9985c4fdffffff02c0c62d000000000017a914795c7bc23aebac7ddea222bb13c5357b32ed0cd487c63a01000000000017a914a4a2fab6264d22efbfc997f30738ccc6db0f8c05870247304402202a1dfa92a9dba16fa476c738197316009665f1b705e5626b2729b136bb64aaa102203041d91270d91124cb9341c6d1bfb2c7aa3372ef85f412fa00b8bf4fa7091f2b0121027c3fde52baba263e526ee5acc051f7fd69000eb633b8cf7decd1334db8fb44ee02483045022100a3843ddb39dd088e8d9657eaed5454a27737112c821eb6f674414e02f295d39402206de16b7c5b1ff054d102451a9242b10fccf81828003c377046bd11fa6c025179012103cbd9a8066a39e1d05ec26b72116e84b8b852b6784a6359ebb35f5794445245883c3e0800"
expectedTxid := "f94e7111736dd2a5fd1c5bbcced153f90d17ee1b032f166dda785354f4063651"
expectedChangeAddress := "3GhXz1NGhwQusEiBYKKhTqQYE6MKt2utDN"
wallet := NewHDWalletFromWords(w, BaseCoinBip49MainNet)
meta, err := wallet.BuildTransactionMetadata(data.TransactionData)
assert.Nil(t, err)
assert.Equal(t, toAddress, data.TransactionData.PaymentAddress)
assert.Equal(t, changeAmount, data.TransactionData.ChangeAmount)
assert.Equal(t, expectedEncodedTx, meta.EncodedTx)
assert.Equal(t, expectedTxid, meta.Txid)
assert.Equal(t, 1, meta.TransactionChangeMetadata.VoutIndex)
assert.Equal(t, 58, meta.TransactionChangeMetadata.Path.Index)
assert.Equal(t, expectedChangeAddress, meta.TransactionChangeMetadata.Address)
}
func TestTransactionBuilder_BuildsNativeSegwitTransaction(t *testing.T) {
path := NewDerivationPath(BaseCoinBip84MainNet, 0, 1)
utxo := NewUTXO("a89a9bed1f2daca01a0dca58f7fd0f2f0bf114d762b38e65845c5d1489339a69", 0, 96537, path, nil, true)
amount := 9755
feeAmount := 846
changeAmount := 85936
changePath := NewDerivationPath(BaseCoinBip49MainNet, 1, 1)
toAddress := "bc1qjv79zewlvyyyd5y0qfk3svexzrqnammllj7mw6"
data := NewTransactionDataFlatFee(toAddress, BaseCoinBip49MainNet, amount, feeAmount, changePath, 590582)
data.AddUTXO(utxo)
err := data.Generate()
assert.Nil(t, err)
expectedEncodedTx := "01000000000101699a3389145d5c84658eb362d714f10b2f0ffdf758ca0d1aa0ac2d1fed9b9aa80000000000fdffffff021b26000000000000160014933c5165df610846d08f026d18332610c13eef7fb04f0100000000001600144227d834f1aae95273f0c87495f4ff0cb366545202473044022024b8f49fddcc119fc30990d6c970d8a1e0fa56d951d31591bed76c0867dbd11d0220755bb57af82993facbf413e523a8fa6fbccf8055ec95d1764da5e98b54e16bf2012103e775fd51f0dfb8cd865d9ff1cca2a158cf651fe997fdc9fee9c1d3b5e995ea77f6020900"
expectedTxid := "fe7f9a6de3203eb300cc66159e762251d675b5555dbd215c3574e75a762ca402"
expectedChangeAddress := "bc1qggnasd834t54yulsep6fta8lpjekv4zj6gv5rf"
wallet := NewHDWalletFromWords(w, BaseCoinBip84MainNet)
meta, err := wallet.BuildTransactionMetadata(data.TransactionData)
assert.Nil(t, err)
assert.Equal(t, toAddress, data.TransactionData.PaymentAddress)
assert.Equal(t, changeAmount, data.TransactionData.ChangeAmount)
assert.Equal(t, expectedEncodedTx, meta.EncodedTx)
assert.Equal(t, expectedTxid, meta.Txid)
assert.Equal(t, 1, meta.TransactionChangeMetadata.VoutIndex)
assert.Equal(t, 1, meta.TransactionChangeMetadata.Path.Index)
assert.Equal(t, expectedChangeAddress, meta.TransactionChangeMetadata.Address)
}
func TestTransactionBuilder_BuildP2KH_NoChange(t *testing.T) {
path := NewDerivationPath(BaseCoinBip49MainNet, 1, 7)
utxo := NewUTXO("f14914f76ad26e0c1aa5a68c82b021b854c93850fde12f8e3188c14be6dc384e", 1, 33255, path, nil, true)
amount := 23147
feeAmount := 10108
changePath := NewDerivationPath(BaseCoinBip49MainNet, 1, 2)
toAddress := "1HT6WtD5CAToc8wZdacCgY4XjJR4jV5Q5d"
data := NewTransactionDataFlatFee(toAddress, BaseCoinBip49MainNet, amount, feeAmount, changePath, 500000)
data.AddUTXO(utxo)
err := data.Generate()
assert.Nil(t, err)
expectedEncodedTx := "010000000001014e38dce64bc188318e2fe1fd5038c954b821b0828ca6a51a0c6ed26af71449f10100000017160014b4381165b195b3286079d46eb2dc8058e6f02241fdffffff016b5a0000000000001976a914b4716e71b900b957e49f749c8432b910417788e888ac0247304402204147d25961e7ea6f88df58878aa38167fe6f8ae04c3625485dc594ff716f18a002200c08aabefae62d59568155cfb7ca8df1a4d54c01e5abd767d59e7b982663db23012103a45ef894ab9e6f2e55683561181be9e69b20207af746d60b95fab33476dc932420a10700"
expectedTxid := "86a9dc5bef7933df26d2b081376084e456a5bd3c2f2df28e758ff062b05a8c17"
wallet := NewHDWalletFromWords(w, BaseCoinBip49MainNet)
meta, err := wallet.BuildTransactionMetadata(data.TransactionData)
assert.Nil(t, err)
assert.Equal(t, toAddress, data.TransactionData.PaymentAddress)
assert.Equal(t, expectedEncodedTx, meta.EncodedTx)
assert.Equal(t, expectedTxid, meta.Txid)
assert.Nil(t, meta.TransactionChangeMetadata)
}
func TestTransationBuilder_BuildSingleUTXO(t *testing.T) {
path := NewDerivationPath(BaseCoinBip49MainNet, 0, 0)
utxo := NewUTXO("3480e31ea00efeb570472983ff914694f62804e768a6c6b4d1b6cd70a1cd3efa", 1, 449893, path, nil, true)
amount := 218384
feeAmount := 668
changeAmount := 230841
changePath := NewDerivationPath(BaseCoinBip49MainNet, 1, 0)
toAddress := "3ERQiyXSeUYmxxqKyg8XwqGo4W7utgDrTR"
data := NewTransactionDataFlatFee(toAddress, BaseCoinBip49MainNet, amount, feeAmount, changePath, 500000)
data.AddUTXO(utxo)
err := data.Generate()
assert.Nil(t, err)
expectedEncodedTx := "01000000000101fa3ecda170cdb6d1b4c6a668e70428f6944691ff83294770b5fe0ea01ee380340100000017160014f990679acafe25c27615373b40bf22446d24ff44fdffffff02105503000000000017a9148ba60342bf59f73327fecab2bef17c1612888c3587b98503000000000017a9141cc1e09a63d1ae795a7130e099b28a0b1d8e4fae8702473044022026f508a317df64f935c43f135280f9f0e95617c22d0f80df77c333656d9303a802206a1c16bd7957e49ddac990f6151065cab326e55d011418e24333d2a979f963d60121039b3b694b8fc5b5e07fb069c783cac754f5d38c3e08bed1960e31fdb1dda35c2420a10700"
expectedTxid := "221ced4e8784290dea336afa1b0a06fa868812e51abbdca3126ce8d99335a6e2"
expectedChangeAddress := "34K56kSjgUCUSD8GTtuF7c9Zzwokbs6uZ7"
wallet := NewHDWalletFromWords(w, BaseCoinBip49MainNet)
meta, err := wallet.BuildTransactionMetadata(data.TransactionData)
assert.Nil(t, err)
assert.Equal(t, expectedEncodedTx, meta.EncodedTx)
assert.Equal(t, expectedTxid, meta.Txid)
assert.Equal(t, expectedChangeAddress, meta.TransactionChangeMetadata.Address)
assert.Equal(t, 1, meta.TransactionChangeMetadata.VoutIndex)
assert.Equal(t, 0, meta.TransactionChangeMetadata.Path.Index)
assert.Equal(t, changeAmount, data.TransactionData.ChangeAmount)
}
func TestTransactionBuilder_TestNet(t *testing.T) {
path := NewDerivationPath(BaseCoinBip49TestNet, 0, 0)
utxo := NewUTXO("1cfd000efbe248c48b499b0a5d76ea7687ee76cad8481f71277ee283df32af26", 0, 1250000000, path, nil, true)
amount := 9523810
feeAmount := 830
changeAmount := 1240475360
changePath := NewDerivationPath(BaseCoinBip49MainNet, 1, 0)
toAddress := "2N8o4Mu5PRAR27TC2eai62CRXarTbQmjyCx"
data := NewTransactionDataFlatFee(toAddress, BaseCoinBip49TestNet, amount, feeAmount, changePath, 644)
data.AddUTXO(utxo)
err := data.Generate()
assert.Nil(t, err)
expectedEncodedTx := "0100000000010126af32df83e27e27711f48d8ca76ee8776ea765d0a9b498bc448e2fb0e00fd1c000000001716001438971f73930f6c141d977ac4fd4a727c854935b3fdffffff02625291000000000017a914aa8f293a04a7df8794b743e14ffb96c2a30a1b2787e026f0490000000017a914251dd11457a259c3ba47e5cca3717fe4214e02988702483045022100f24650e94fd022459920770af43f7b630654a85caca68fa73060a7c2422840fc022079267209fb416538e3d471d108f95c90e71e23d7628448f8a3e8c036e93849a1012103a1af804ac108a8a51782198c2d034b28bf90c8803f5a53f76276fa69a4eae77f84020000"
expectedTxid := "5eb44c7faaa9c17c886588a1e20461d60fbfe1e504e7bac5af3469fdd9039837"
expectedChangeAddress := "2MvdUi5o3f2tnEFh9yGvta6FzptTZtkPJC8"
wallet := NewHDWalletFromWords(w, BaseCoinBip49TestNet)
meta, err := wallet.BuildTransactionMetadata(data.TransactionData)
assert.Nil(t, err)
assert.Equal(t, toAddress, data.TransactionData.PaymentAddress)
assert.Equal(t, expectedEncodedTx, meta.EncodedTx)
assert.Equal(t, expectedTxid, meta.Txid)
assert.Equal(t, expectedChangeAddress, meta.TransactionChangeMetadata.Address)
assert.Equal(t, 1, meta.TransactionChangeMetadata.VoutIndex)
assert.Equal(t, 0, meta.TransactionChangeMetadata.Path.Index)
assert.Equal(t, changeAmount, data.TransactionData.ChangeAmount)
}
func TestTransactionBuilder_SendToNativeSegwit_BuildsProperly(t *testing.T) | {
path := NewDerivationPath(BaseCoinBip49MainNet, 0, 80)
utxo := NewUTXO("94b5bcfbd52a405b291d906e636c8e133407e68a75b0a1ccc492e131ff5d8f90", 0, 10261, path, nil, true)
amount := 5000
feeAmount := 1000
changeAmount := 4261
changePath := NewDerivationPath(BaseCoinBip49MainNet, 1, 102)
toAddress := "bc1ql2sdag2nm9csz4wmlj735jxw88ym3yukyzmrpj"
data := NewTransactionDataFlatFee(toAddress, BaseCoinBip49MainNet, amount, feeAmount, changePath, 500000)
data.AddUTXO(utxo)
err := data.Generate()
assert.Nil(t, err)
expectedEncodedTx := "01000000000101908f5dff31e192c4cca1b0758ae60734138e6c636e901d295b402ad5fbbcb594000000001716001442288ee31111f7187e8cfe8c82917c4734da4c2efdffffff028813000000000000160014faa0dea153d9710155dbfcbd1a48ce39c9b89396a51000000000000017a914aa71651e8f7c618a4576873254ec80c4dfaa068b8702483045022100b3c3d02e7f455503447e70138bcf2f3e928af0d7b9640631e086a56d43740199022018906455f9f7314109e73489bb12c169b3a59302c8456b1b154e894466039f8d01210270d4003d27b5340df1895ef3a5aee2ae2fe3ed7383c01ba623723e702b6c83c120a10700"
expectedTxid := "1f1ffca0eda219b09116743d2c9b9dcf8eefd10d240bdc4e66678d72a6e4614d"
expectedChangeAddress := "3HEEdyeVwoGZf86jq8ovUhw9FiXkwCdY79"
wallet := NewHDWalletFromWords(w, BaseCoinBip49MainNet)
meta, err := wallet.BuildTransactionMetadata(data.TransactionData)
assert.Nil(t, err)
assert.Equal(t, toAddress, data.TransactionData.PaymentAddress)
assert.Equal(t, expectedEncodedTx, meta.EncodedTx)
assert.Equal(t, expectedTxid, meta.Txid)
assert.Equal(t, expectedChangeAddress, meta.TransactionChangeMetadata.Address)
assert.Equal(t, 1, meta.TransactionChangeMetadata.VoutIndex)
assert.Equal(t, 102, meta.TransactionChangeMetadata.Path.Index)
assert.Equal(t, changeAmount, data.TransactionData.ChangeAmount)
} | identifier_body | |
transaction_builder_test.go | package cnlib
import "testing"
import "github.com/stretchr/testify/assert"
func TestTransactionBuilderBuildsTxCorrect(t *testing.T) {
inputPath := NewDerivationPath(BaseCoinBip49MainNet, 1, 53)
utxo := NewUTXO("1a08dafe993fdc17fdc661988c88f97a9974013291e759b9b5766b8e97c78f87", 1, 2788424, inputPath, nil, true)
amount := 13584
feeAmount := 3000
changeAmount := 2771840
changePath := NewDerivationPath(BaseCoinBip49MainNet, 1, 56)
toAddress := "3BgxxADLtnoKu9oytQiiVzYUqvo8weCVy9"
data := NewTransactionDataFlatFee(toAddress, BaseCoinBip49MainNet, amount, feeAmount, changePath, 539943)
data.AddUTXO(utxo)
err := data.Generate()
assert.Nil(t, err)
expectedEncodedTx := "01000000000101878fc7978e6b76b5b959e791320174997af9888c9861c6fd17dc3f99feda081a0100000017160014509060a6bedf13087124c0aeafc6e3db4e1e9a08fdffffff02103500000000000017a9146daec6ddb6faaf01f83f515045822a94d0c2331e87804b2a000000000017a914e0bc3e6f5f4080b4f007c6307ba579595e459a06870247304402205a9d97a269cefe296a746dc07e898d19889567e910339f31e12268703079a45a0220537145228842a020a16894006c7e50ae5109672ea13135a02b354f66838f9676012103d447f34dd13359a8fc64ed3977fcecea3f6802f842f9a9f857de07453b715735273d0800"
expectedTxid := "20d9d7eae4283573e042de272c0fc6af7df5a1100c4871127fa07c9022da1945"
expectedChangeAddress := "3NBJnvo9U5YbJnr1pALFqQEur1wXWJrjoM"
wallet := NewHDWalletFromWords(w, BaseCoinBip49MainNet)
meta, err := wallet.BuildTransactionMetadata(data.TransactionData)
assert.Nil(t, err)
assert.Equal(t, toAddress, data.TransactionData.PaymentAddress)
assert.Equal(t, changeAmount, data.TransactionData.ChangeAmount)
assert.Equal(t, expectedEncodedTx, meta.EncodedTx)
assert.Equal(t, expectedTxid, meta.Txid)
assert.Equal(t, 1, meta.TransactionChangeMetadata.VoutIndex)
assert.Equal(t, 56, meta.TransactionChangeMetadata.Path.Index)
assert.Equal(t, expectedChangeAddress, meta.TransactionChangeMetadata.Address)
}
func TestTransactionBuilder_TwoInputs_BuildsTransaction(t *testing.T) {
path1 := NewDerivationPath(BaseCoinBip49MainNet, 1, 56)
path2 := NewDerivationPath(BaseCoinBip49MainNet, 1, 57)
utxo1 := NewUTXO("24cc9150963a2369d7f413af8b18c3d0243b438ba742d6d083ec8ed492d312f9", 1, 2769977, path1, nil, true)
utxo2 := NewUTXO("ed611c20fc9088aa5ec1c86de88dd017965358c150c58f71eda721cdb2ac0a48", 1, 314605, path2, nil, true)
amount := 3000000
feeAmount := 4000
changeAmount := 80582
changePath := NewDerivationPath(BaseCoinBip49MainNet, 1, 58)
toAddress := "3CkiUcj5vU4TGZJeDcrmYGWH8GYJ5vKcQq"
data := NewTransactionDataFlatFee(toAddress, BaseCoinBip49MainNet, amount, feeAmount, changePath, 540220)
data.AddUTXO(utxo1)
data.AddUTXO(utxo2)
err := data.Generate()
assert.Nil(t, err)
expectedEncodedTx := "01000000000102f912d392d48eec83d0d642a78b433b24d0c3188baf13f4d769233a965091cc24010000001716001436386ac950d557ae06bfffc51e7b8fa08474c05ffdffffff480aacb2cd21a7ed718fc550c158539617d08de86dc8c15eaa8890fc201c61ed010000001716001480e1e7dc2f6436a60abec5e9e7f6b62b0b9985c4fdffffff02c0c62d000000000017a914795c7bc23aebac7ddea222bb13c5357b32ed0cd487c63a01000000000017a914a4a2fab6264d22efbfc997f30738ccc6db0f8c05870247304402202a1dfa92a9dba16fa476c738197316009665f1b705e5626b2729b136bb64aaa102203041d91270d91124cb9341c6d1bfb2c7aa3372ef85f412fa00b8bf4fa7091f2b0121027c3fde52baba263e526ee5acc051f7fd69000eb633b8cf7decd1334db8fb44ee02483045022100a3843ddb39dd088e8d9657eaed5454a27737112c821eb6f674414e02f295d39402206de16b7c5b1ff054d102451a9242b10fccf81828003c377046bd11fa6c025179012103cbd9a8066a39e1d05ec26b72116e84b8b852b6784a6359ebb35f5794445245883c3e0800"
expectedTxid := "f94e7111736dd2a5fd1c5bbcced153f90d17ee1b032f166dda785354f4063651"
expectedChangeAddress := "3GhXz1NGhwQusEiBYKKhTqQYE6MKt2utDN"
wallet := NewHDWalletFromWords(w, BaseCoinBip49MainNet)
meta, err := wallet.BuildTransactionMetadata(data.TransactionData)
assert.Nil(t, err)
assert.Equal(t, toAddress, data.TransactionData.PaymentAddress)
assert.Equal(t, changeAmount, data.TransactionData.ChangeAmount)
assert.Equal(t, expectedEncodedTx, meta.EncodedTx)
assert.Equal(t, expectedTxid, meta.Txid)
assert.Equal(t, 1, meta.TransactionChangeMetadata.VoutIndex)
assert.Equal(t, 58, meta.TransactionChangeMetadata.Path.Index)
assert.Equal(t, expectedChangeAddress, meta.TransactionChangeMetadata.Address)
}
func TestTransactionBuilder_BuildsNativeSegwitTransaction(t *testing.T) {
path := NewDerivationPath(BaseCoinBip84MainNet, 0, 1)
utxo := NewUTXO("a89a9bed1f2daca01a0dca58f7fd0f2f0bf114d762b38e65845c5d1489339a69", 0, 96537, path, nil, true)
amount := 9755
feeAmount := 846
changeAmount := 85936
changePath := NewDerivationPath(BaseCoinBip49MainNet, 1, 1)
toAddress := "bc1qjv79zewlvyyyd5y0qfk3svexzrqnammllj7mw6"
data := NewTransactionDataFlatFee(toAddress, BaseCoinBip49MainNet, amount, feeAmount, changePath, 590582)
data.AddUTXO(utxo)
err := data.Generate()
assert.Nil(t, err)
expectedEncodedTx := "01000000000101699a3389145d5c84658eb362d714f10b2f0ffdf758ca0d1aa0ac2d1fed9b9aa80000000000fdffffff021b26000000000000160014933c5165df610846d08f026d18332610c13eef7fb04f0100000000001600144227d834f1aae95273f0c87495f4ff0cb366545202473044022024b8f49fddcc119fc30990d6c970d8a1e0fa56d951d31591bed76c0867dbd11d0220755bb57af82993facbf413e523a8fa6fbccf8055ec95d1764da5e98b54e16bf2012103e775fd51f0dfb8cd865d9ff1cca2a158cf651fe997fdc9fee9c1d3b5e995ea77f6020900"
expectedTxid := "fe7f9a6de3203eb300cc66159e762251d675b5555dbd215c3574e75a762ca402"
expectedChangeAddress := "bc1qggnasd834t54yulsep6fta8lpjekv4zj6gv5rf"
wallet := NewHDWalletFromWords(w, BaseCoinBip84MainNet)
meta, err := wallet.BuildTransactionMetadata(data.TransactionData)
assert.Nil(t, err)
assert.Equal(t, toAddress, data.TransactionData.PaymentAddress)
assert.Equal(t, changeAmount, data.TransactionData.ChangeAmount)
assert.Equal(t, expectedEncodedTx, meta.EncodedTx)
assert.Equal(t, expectedTxid, meta.Txid)
assert.Equal(t, 1, meta.TransactionChangeMetadata.VoutIndex)
assert.Equal(t, 1, meta.TransactionChangeMetadata.Path.Index)
assert.Equal(t, expectedChangeAddress, meta.TransactionChangeMetadata.Address)
}
func TestTransactionBuilder_BuildP2KH_NoChange(t *testing.T) {
path := NewDerivationPath(BaseCoinBip49MainNet, 1, 7)
utxo := NewUTXO("f14914f76ad26e0c1aa5a68c82b021b854c93850fde12f8e3188c14be6dc384e", 1, 33255, path, nil, true)
amount := 23147
feeAmount := 10108
changePath := NewDerivationPath(BaseCoinBip49MainNet, 1, 2)
toAddress := "1HT6WtD5CAToc8wZdacCgY4XjJR4jV5Q5d"
data := NewTransactionDataFlatFee(toAddress, BaseCoinBip49MainNet, amount, feeAmount, changePath, 500000)
data.AddUTXO(utxo)
err := data.Generate()
assert.Nil(t, err)
expectedEncodedTx := "010000000001014e38dce64bc188318e2fe1fd5038c954b821b0828ca6a51a0c6ed26af71449f10100000017160014b4381165b195b3286079d46eb2dc8058e6f02241fdffffff016b5a0000000000001976a914b4716e71b900b957e49f749c8432b910417788e888ac0247304402204147d25961e7ea6f88df58878aa38167fe6f8ae04c3625485dc594ff716f18a002200c08aabefae62d59568155cfb7ca8df1a4d54c01e5abd767d59e7b982663db23012103a45ef894ab9e6f2e55683561181be9e69b20207af746d60b95fab33476dc932420a10700"
expectedTxid := "86a9dc5bef7933df26d2b081376084e456a5bd3c2f2df28e758ff062b05a8c17"
wallet := NewHDWalletFromWords(w, BaseCoinBip49MainNet)
meta, err := wallet.BuildTransactionMetadata(data.TransactionData)
assert.Nil(t, err)
assert.Equal(t, toAddress, data.TransactionData.PaymentAddress)
assert.Equal(t, expectedEncodedTx, meta.EncodedTx)
assert.Equal(t, expectedTxid, meta.Txid)
assert.Nil(t, meta.TransactionChangeMetadata)
}
func TestTransationBuilder_BuildSingleUTXO(t *testing.T) {
path := NewDerivationPath(BaseCoinBip49MainNet, 0, 0)
utxo := NewUTXO("3480e31ea00efeb570472983ff914694f62804e768a6c6b4d1b6cd70a1cd3efa", 1, 449893, path, nil, true)
amount := 218384
feeAmount := 668
changeAmount := 230841
changePath := NewDerivationPath(BaseCoinBip49MainNet, 1, 0)
toAddress := "3ERQiyXSeUYmxxqKyg8XwqGo4W7utgDrTR"
data := NewTransactionDataFlatFee(toAddress, BaseCoinBip49MainNet, amount, feeAmount, changePath, 500000)
data.AddUTXO(utxo)
err := data.Generate()
assert.Nil(t, err)
expectedEncodedTx := "01000000000101fa3ecda170cdb6d1b4c6a668e70428f6944691ff83294770b5fe0ea01ee380340100000017160014f990679acafe25c27615373b40bf22446d24ff44fdffffff02105503000000000017a9148ba60342bf59f73327fecab2bef17c1612888c3587b98503000000000017a9141cc1e09a63d1ae795a7130e099b28a0b1d8e4fae8702473044022026f508a317df64f935c43f135280f9f0e95617c22d0f80df77c333656d9303a802206a1c16bd7957e49ddac990f6151065cab326e55d011418e24333d2a979f963d60121039b3b694b8fc5b5e07fb069c783cac754f5d38c3e08bed1960e31fdb1dda35c2420a10700"
expectedTxid := "221ced4e8784290dea336afa1b0a06fa868812e51abbdca3126ce8d99335a6e2"
expectedChangeAddress := "34K56kSjgUCUSD8GTtuF7c9Zzwokbs6uZ7"
wallet := NewHDWalletFromWords(w, BaseCoinBip49MainNet)
meta, err := wallet.BuildTransactionMetadata(data.TransactionData)
assert.Nil(t, err)
assert.Equal(t, expectedEncodedTx, meta.EncodedTx)
assert.Equal(t, expectedTxid, meta.Txid)
assert.Equal(t, expectedChangeAddress, meta.TransactionChangeMetadata.Address)
assert.Equal(t, 1, meta.TransactionChangeMetadata.VoutIndex)
assert.Equal(t, 0, meta.TransactionChangeMetadata.Path.Index)
assert.Equal(t, changeAmount, data.TransactionData.ChangeAmount)
}
func TestTransactionBuilder_TestNet(t *testing.T) {
path := NewDerivationPath(BaseCoinBip49TestNet, 0, 0)
utxo := NewUTXO("1cfd000efbe248c48b499b0a5d76ea7687ee76cad8481f71277ee283df32af26", 0, 1250000000, path, nil, true)
amount := 9523810
feeAmount := 830
changeAmount := 1240475360
changePath := NewDerivationPath(BaseCoinBip49MainNet, 1, 0)
toAddress := "2N8o4Mu5PRAR27TC2eai62CRXarTbQmjyCx"
data := NewTransactionDataFlatFee(toAddress, BaseCoinBip49TestNet, amount, feeAmount, changePath, 644)
data.AddUTXO(utxo)
err := data.Generate()
assert.Nil(t, err)
| expectedTxid := "5eb44c7faaa9c17c886588a1e20461d60fbfe1e504e7bac5af3469fdd9039837"
expectedChangeAddress := "2MvdUi5o3f2tnEFh9yGvta6FzptTZtkPJC8"
wallet := NewHDWalletFromWords(w, BaseCoinBip49TestNet)
meta, err := wallet.BuildTransactionMetadata(data.TransactionData)
assert.Nil(t, err)
assert.Equal(t, toAddress, data.TransactionData.PaymentAddress)
assert.Equal(t, expectedEncodedTx, meta.EncodedTx)
assert.Equal(t, expectedTxid, meta.Txid)
assert.Equal(t, expectedChangeAddress, meta.TransactionChangeMetadata.Address)
assert.Equal(t, 1, meta.TransactionChangeMetadata.VoutIndex)
assert.Equal(t, 0, meta.TransactionChangeMetadata.Path.Index)
assert.Equal(t, changeAmount, data.TransactionData.ChangeAmount)
}
func TestTransactionBuilder_SendToNativeSegwit_BuildsProperly(t *testing.T) {
path := NewDerivationPath(BaseCoinBip49MainNet, 0, 80)
utxo := NewUTXO("94b5bcfbd52a405b291d906e636c8e133407e68a75b0a1ccc492e131ff5d8f90", 0, 10261, path, nil, true)
amount := 5000
feeAmount := 1000
changeAmount := 4261
changePath := NewDerivationPath(BaseCoinBip49MainNet, 1, 102)
toAddress := "bc1ql2sdag2nm9csz4wmlj735jxw88ym3yukyzmrpj"
data := NewTransactionDataFlatFee(toAddress, BaseCoinBip49MainNet, amount, feeAmount, changePath, 500000)
data.AddUTXO(utxo)
err := data.Generate()
assert.Nil(t, err)
expectedEncodedTx := "01000000000101908f5dff31e192c4cca1b0758ae60734138e6c636e901d295b402ad5fbbcb594000000001716001442288ee31111f7187e8cfe8c82917c4734da4c2efdffffff028813000000000000160014faa0dea153d9710155dbfcbd1a48ce39c9b89396a51000000000000017a914aa71651e8f7c618a4576873254ec80c4dfaa068b8702483045022100b3c3d02e7f455503447e70138bcf2f3e928af0d7b9640631e086a56d43740199022018906455f9f7314109e73489bb12c169b3a59302c8456b1b154e894466039f8d01210270d4003d27b5340df1895ef3a5aee2ae2fe3ed7383c01ba623723e702b6c83c120a10700"
expectedTxid := "1f1ffca0eda219b09116743d2c9b9dcf8eefd10d240bdc4e66678d72a6e4614d"
expectedChangeAddress := "3HEEdyeVwoGZf86jq8ovUhw9FiXkwCdY79"
wallet := NewHDWalletFromWords(w, BaseCoinBip49MainNet)
meta, err := wallet.BuildTransactionMetadata(data.TransactionData)
assert.Nil(t, err)
assert.Equal(t, toAddress, data.TransactionData.PaymentAddress)
assert.Equal(t, expectedEncodedTx, meta.EncodedTx)
assert.Equal(t, expectedTxid, meta.Txid)
assert.Equal(t, expectedChangeAddress, meta.TransactionChangeMetadata.Address)
assert.Equal(t, 1, meta.TransactionChangeMetadata.VoutIndex)
assert.Equal(t, 102, meta.TransactionChangeMetadata.Path.Index)
assert.Equal(t, changeAmount, data.TransactionData.ChangeAmount)
} | expectedEncodedTx := "0100000000010126af32df83e27e27711f48d8ca76ee8776ea765d0a9b498bc448e2fb0e00fd1c000000001716001438971f73930f6c141d977ac4fd4a727c854935b3fdffffff02625291000000000017a914aa8f293a04a7df8794b743e14ffb96c2a30a1b2787e026f0490000000017a914251dd11457a259c3ba47e5cca3717fe4214e02988702483045022100f24650e94fd022459920770af43f7b630654a85caca68fa73060a7c2422840fc022079267209fb416538e3d471d108f95c90e71e23d7628448f8a3e8c036e93849a1012103a1af804ac108a8a51782198c2d034b28bf90c8803f5a53f76276fa69a4eae77f84020000" | random_line_split |
transaction_builder_test.go | package cnlib
import "testing"
import "github.com/stretchr/testify/assert"
func TestTransactionBuilderBuildsTxCorrect(t *testing.T) {
inputPath := NewDerivationPath(BaseCoinBip49MainNet, 1, 53)
utxo := NewUTXO("1a08dafe993fdc17fdc661988c88f97a9974013291e759b9b5766b8e97c78f87", 1, 2788424, inputPath, nil, true)
amount := 13584
feeAmount := 3000
changeAmount := 2771840
changePath := NewDerivationPath(BaseCoinBip49MainNet, 1, 56)
toAddress := "3BgxxADLtnoKu9oytQiiVzYUqvo8weCVy9"
data := NewTransactionDataFlatFee(toAddress, BaseCoinBip49MainNet, amount, feeAmount, changePath, 539943)
data.AddUTXO(utxo)
err := data.Generate()
assert.Nil(t, err)
expectedEncodedTx := "01000000000101878fc7978e6b76b5b959e791320174997af9888c9861c6fd17dc3f99feda081a0100000017160014509060a6bedf13087124c0aeafc6e3db4e1e9a08fdffffff02103500000000000017a9146daec6ddb6faaf01f83f515045822a94d0c2331e87804b2a000000000017a914e0bc3e6f5f4080b4f007c6307ba579595e459a06870247304402205a9d97a269cefe296a746dc07e898d19889567e910339f31e12268703079a45a0220537145228842a020a16894006c7e50ae5109672ea13135a02b354f66838f9676012103d447f34dd13359a8fc64ed3977fcecea3f6802f842f9a9f857de07453b715735273d0800"
expectedTxid := "20d9d7eae4283573e042de272c0fc6af7df5a1100c4871127fa07c9022da1945"
expectedChangeAddress := "3NBJnvo9U5YbJnr1pALFqQEur1wXWJrjoM"
wallet := NewHDWalletFromWords(w, BaseCoinBip49MainNet)
meta, err := wallet.BuildTransactionMetadata(data.TransactionData)
assert.Nil(t, err)
assert.Equal(t, toAddress, data.TransactionData.PaymentAddress)
assert.Equal(t, changeAmount, data.TransactionData.ChangeAmount)
assert.Equal(t, expectedEncodedTx, meta.EncodedTx)
assert.Equal(t, expectedTxid, meta.Txid)
assert.Equal(t, 1, meta.TransactionChangeMetadata.VoutIndex)
assert.Equal(t, 56, meta.TransactionChangeMetadata.Path.Index)
assert.Equal(t, expectedChangeAddress, meta.TransactionChangeMetadata.Address)
}
func TestTransactionBuilder_TwoInputs_BuildsTransaction(t *testing.T) {
path1 := NewDerivationPath(BaseCoinBip49MainNet, 1, 56)
path2 := NewDerivationPath(BaseCoinBip49MainNet, 1, 57)
utxo1 := NewUTXO("24cc9150963a2369d7f413af8b18c3d0243b438ba742d6d083ec8ed492d312f9", 1, 2769977, path1, nil, true)
utxo2 := NewUTXO("ed611c20fc9088aa5ec1c86de88dd017965358c150c58f71eda721cdb2ac0a48", 1, 314605, path2, nil, true)
amount := 3000000
feeAmount := 4000
changeAmount := 80582
changePath := NewDerivationPath(BaseCoinBip49MainNet, 1, 58)
toAddress := "3CkiUcj5vU4TGZJeDcrmYGWH8GYJ5vKcQq"
data := NewTransactionDataFlatFee(toAddress, BaseCoinBip49MainNet, amount, feeAmount, changePath, 540220)
data.AddUTXO(utxo1)
data.AddUTXO(utxo2)
err := data.Generate()
assert.Nil(t, err)
expectedEncodedTx := "01000000000102f912d392d48eec83d0d642a78b433b24d0c3188baf13f4d769233a965091cc24010000001716001436386ac950d557ae06bfffc51e7b8fa08474c05ffdffffff480aacb2cd21a7ed718fc550c158539617d08de86dc8c15eaa8890fc201c61ed010000001716001480e1e7dc2f6436a60abec5e9e7f6b62b0b9985c4fdffffff02c0c62d000000000017a914795c7bc23aebac7ddea222bb13c5357b32ed0cd487c63a01000000000017a914a4a2fab6264d22efbfc997f30738ccc6db0f8c05870247304402202a1dfa92a9dba16fa476c738197316009665f1b705e5626b2729b136bb64aaa102203041d91270d91124cb9341c6d1bfb2c7aa3372ef85f412fa00b8bf4fa7091f2b0121027c3fde52baba263e526ee5acc051f7fd69000eb633b8cf7decd1334db8fb44ee02483045022100a3843ddb39dd088e8d9657eaed5454a27737112c821eb6f674414e02f295d39402206de16b7c5b1ff054d102451a9242b10fccf81828003c377046bd11fa6c025179012103cbd9a8066a39e1d05ec26b72116e84b8b852b6784a6359ebb35f5794445245883c3e0800"
expectedTxid := "f94e7111736dd2a5fd1c5bbcced153f90d17ee1b032f166dda785354f4063651"
expectedChangeAddress := "3GhXz1NGhwQusEiBYKKhTqQYE6MKt2utDN"
wallet := NewHDWalletFromWords(w, BaseCoinBip49MainNet)
meta, err := wallet.BuildTransactionMetadata(data.TransactionData)
assert.Nil(t, err)
assert.Equal(t, toAddress, data.TransactionData.PaymentAddress)
assert.Equal(t, changeAmount, data.TransactionData.ChangeAmount)
assert.Equal(t, expectedEncodedTx, meta.EncodedTx)
assert.Equal(t, expectedTxid, meta.Txid)
assert.Equal(t, 1, meta.TransactionChangeMetadata.VoutIndex)
assert.Equal(t, 58, meta.TransactionChangeMetadata.Path.Index)
assert.Equal(t, expectedChangeAddress, meta.TransactionChangeMetadata.Address)
}
func TestTransactionBuilder_BuildsNativeSegwitTransaction(t *testing.T) {
path := NewDerivationPath(BaseCoinBip84MainNet, 0, 1)
utxo := NewUTXO("a89a9bed1f2daca01a0dca58f7fd0f2f0bf114d762b38e65845c5d1489339a69", 0, 96537, path, nil, true)
amount := 9755
feeAmount := 846
changeAmount := 85936
changePath := NewDerivationPath(BaseCoinBip49MainNet, 1, 1)
toAddress := "bc1qjv79zewlvyyyd5y0qfk3svexzrqnammllj7mw6"
data := NewTransactionDataFlatFee(toAddress, BaseCoinBip49MainNet, amount, feeAmount, changePath, 590582)
data.AddUTXO(utxo)
err := data.Generate()
assert.Nil(t, err)
expectedEncodedTx := "01000000000101699a3389145d5c84658eb362d714f10b2f0ffdf758ca0d1aa0ac2d1fed9b9aa80000000000fdffffff021b26000000000000160014933c5165df610846d08f026d18332610c13eef7fb04f0100000000001600144227d834f1aae95273f0c87495f4ff0cb366545202473044022024b8f49fddcc119fc30990d6c970d8a1e0fa56d951d31591bed76c0867dbd11d0220755bb57af82993facbf413e523a8fa6fbccf8055ec95d1764da5e98b54e16bf2012103e775fd51f0dfb8cd865d9ff1cca2a158cf651fe997fdc9fee9c1d3b5e995ea77f6020900"
expectedTxid := "fe7f9a6de3203eb300cc66159e762251d675b5555dbd215c3574e75a762ca402"
expectedChangeAddress := "bc1qggnasd834t54yulsep6fta8lpjekv4zj6gv5rf"
wallet := NewHDWalletFromWords(w, BaseCoinBip84MainNet)
meta, err := wallet.BuildTransactionMetadata(data.TransactionData)
assert.Nil(t, err)
assert.Equal(t, toAddress, data.TransactionData.PaymentAddress)
assert.Equal(t, changeAmount, data.TransactionData.ChangeAmount)
assert.Equal(t, expectedEncodedTx, meta.EncodedTx)
assert.Equal(t, expectedTxid, meta.Txid)
assert.Equal(t, 1, meta.TransactionChangeMetadata.VoutIndex)
assert.Equal(t, 1, meta.TransactionChangeMetadata.Path.Index)
assert.Equal(t, expectedChangeAddress, meta.TransactionChangeMetadata.Address)
}
func | (t *testing.T) {
path := NewDerivationPath(BaseCoinBip49MainNet, 1, 7)
utxo := NewUTXO("f14914f76ad26e0c1aa5a68c82b021b854c93850fde12f8e3188c14be6dc384e", 1, 33255, path, nil, true)
amount := 23147
feeAmount := 10108
changePath := NewDerivationPath(BaseCoinBip49MainNet, 1, 2)
toAddress := "1HT6WtD5CAToc8wZdacCgY4XjJR4jV5Q5d"
data := NewTransactionDataFlatFee(toAddress, BaseCoinBip49MainNet, amount, feeAmount, changePath, 500000)
data.AddUTXO(utxo)
err := data.Generate()
assert.Nil(t, err)
expectedEncodedTx := "010000000001014e38dce64bc188318e2fe1fd5038c954b821b0828ca6a51a0c6ed26af71449f10100000017160014b4381165b195b3286079d46eb2dc8058e6f02241fdffffff016b5a0000000000001976a914b4716e71b900b957e49f749c8432b910417788e888ac0247304402204147d25961e7ea6f88df58878aa38167fe6f8ae04c3625485dc594ff716f18a002200c08aabefae62d59568155cfb7ca8df1a4d54c01e5abd767d59e7b982663db23012103a45ef894ab9e6f2e55683561181be9e69b20207af746d60b95fab33476dc932420a10700"
expectedTxid := "86a9dc5bef7933df26d2b081376084e456a5bd3c2f2df28e758ff062b05a8c17"
wallet := NewHDWalletFromWords(w, BaseCoinBip49MainNet)
meta, err := wallet.BuildTransactionMetadata(data.TransactionData)
assert.Nil(t, err)
assert.Equal(t, toAddress, data.TransactionData.PaymentAddress)
assert.Equal(t, expectedEncodedTx, meta.EncodedTx)
assert.Equal(t, expectedTxid, meta.Txid)
assert.Nil(t, meta.TransactionChangeMetadata)
}
func TestTransationBuilder_BuildSingleUTXO(t *testing.T) {
path := NewDerivationPath(BaseCoinBip49MainNet, 0, 0)
utxo := NewUTXO("3480e31ea00efeb570472983ff914694f62804e768a6c6b4d1b6cd70a1cd3efa", 1, 449893, path, nil, true)
amount := 218384
feeAmount := 668
changeAmount := 230841
changePath := NewDerivationPath(BaseCoinBip49MainNet, 1, 0)
toAddress := "3ERQiyXSeUYmxxqKyg8XwqGo4W7utgDrTR"
data := NewTransactionDataFlatFee(toAddress, BaseCoinBip49MainNet, amount, feeAmount, changePath, 500000)
data.AddUTXO(utxo)
err := data.Generate()
assert.Nil(t, err)
expectedEncodedTx := "01000000000101fa3ecda170cdb6d1b4c6a668e70428f6944691ff83294770b5fe0ea01ee380340100000017160014f990679acafe25c27615373b40bf22446d24ff44fdffffff02105503000000000017a9148ba60342bf59f73327fecab2bef17c1612888c3587b98503000000000017a9141cc1e09a63d1ae795a7130e099b28a0b1d8e4fae8702473044022026f508a317df64f935c43f135280f9f0e95617c22d0f80df77c333656d9303a802206a1c16bd7957e49ddac990f6151065cab326e55d011418e24333d2a979f963d60121039b3b694b8fc5b5e07fb069c783cac754f5d38c3e08bed1960e31fdb1dda35c2420a10700"
expectedTxid := "221ced4e8784290dea336afa1b0a06fa868812e51abbdca3126ce8d99335a6e2"
expectedChangeAddress := "34K56kSjgUCUSD8GTtuF7c9Zzwokbs6uZ7"
wallet := NewHDWalletFromWords(w, BaseCoinBip49MainNet)
meta, err := wallet.BuildTransactionMetadata(data.TransactionData)
assert.Nil(t, err)
assert.Equal(t, expectedEncodedTx, meta.EncodedTx)
assert.Equal(t, expectedTxid, meta.Txid)
assert.Equal(t, expectedChangeAddress, meta.TransactionChangeMetadata.Address)
assert.Equal(t, 1, meta.TransactionChangeMetadata.VoutIndex)
assert.Equal(t, 0, meta.TransactionChangeMetadata.Path.Index)
assert.Equal(t, changeAmount, data.TransactionData.ChangeAmount)
}
func TestTransactionBuilder_TestNet(t *testing.T) {
path := NewDerivationPath(BaseCoinBip49TestNet, 0, 0)
utxo := NewUTXO("1cfd000efbe248c48b499b0a5d76ea7687ee76cad8481f71277ee283df32af26", 0, 1250000000, path, nil, true)
amount := 9523810
feeAmount := 830
changeAmount := 1240475360
changePath := NewDerivationPath(BaseCoinBip49MainNet, 1, 0)
toAddress := "2N8o4Mu5PRAR27TC2eai62CRXarTbQmjyCx"
data := NewTransactionDataFlatFee(toAddress, BaseCoinBip49TestNet, amount, feeAmount, changePath, 644)
data.AddUTXO(utxo)
err := data.Generate()
assert.Nil(t, err)
expectedEncodedTx := "0100000000010126af32df83e27e27711f48d8ca76ee8776ea765d0a9b498bc448e2fb0e00fd1c000000001716001438971f73930f6c141d977ac4fd4a727c854935b3fdffffff02625291000000000017a914aa8f293a04a7df8794b743e14ffb96c2a30a1b2787e026f0490000000017a914251dd11457a259c3ba47e5cca3717fe4214e02988702483045022100f24650e94fd022459920770af43f7b630654a85caca68fa73060a7c2422840fc022079267209fb416538e3d471d108f95c90e71e23d7628448f8a3e8c036e93849a1012103a1af804ac108a8a51782198c2d034b28bf90c8803f5a53f76276fa69a4eae77f84020000"
expectedTxid := "5eb44c7faaa9c17c886588a1e20461d60fbfe1e504e7bac5af3469fdd9039837"
expectedChangeAddress := "2MvdUi5o3f2tnEFh9yGvta6FzptTZtkPJC8"
wallet := NewHDWalletFromWords(w, BaseCoinBip49TestNet)
meta, err := wallet.BuildTransactionMetadata(data.TransactionData)
assert.Nil(t, err)
assert.Equal(t, toAddress, data.TransactionData.PaymentAddress)
assert.Equal(t, expectedEncodedTx, meta.EncodedTx)
assert.Equal(t, expectedTxid, meta.Txid)
assert.Equal(t, expectedChangeAddress, meta.TransactionChangeMetadata.Address)
assert.Equal(t, 1, meta.TransactionChangeMetadata.VoutIndex)
assert.Equal(t, 0, meta.TransactionChangeMetadata.Path.Index)
assert.Equal(t, changeAmount, data.TransactionData.ChangeAmount)
}
func TestTransactionBuilder_SendToNativeSegwit_BuildsProperly(t *testing.T) {
path := NewDerivationPath(BaseCoinBip49MainNet, 0, 80)
utxo := NewUTXO("94b5bcfbd52a405b291d906e636c8e133407e68a75b0a1ccc492e131ff5d8f90", 0, 10261, path, nil, true)
amount := 5000
feeAmount := 1000
changeAmount := 4261
changePath := NewDerivationPath(BaseCoinBip49MainNet, 1, 102)
toAddress := "bc1ql2sdag2nm9csz4wmlj735jxw88ym3yukyzmrpj"
data := NewTransactionDataFlatFee(toAddress, BaseCoinBip49MainNet, amount, feeAmount, changePath, 500000)
data.AddUTXO(utxo)
err := data.Generate()
assert.Nil(t, err)
expectedEncodedTx := "01000000000101908f5dff31e192c4cca1b0758ae60734138e6c636e901d295b402ad5fbbcb594000000001716001442288ee31111f7187e8cfe8c82917c4734da4c2efdffffff028813000000000000160014faa0dea153d9710155dbfcbd1a48ce39c9b89396a51000000000000017a914aa71651e8f7c618a4576873254ec80c4dfaa068b8702483045022100b3c3d02e7f455503447e70138bcf2f3e928af0d7b9640631e086a56d43740199022018906455f9f7314109e73489bb12c169b3a59302c8456b1b154e894466039f8d01210270d4003d27b5340df1895ef3a5aee2ae2fe3ed7383c01ba623723e702b6c83c120a10700"
expectedTxid := "1f1ffca0eda219b09116743d2c9b9dcf8eefd10d240bdc4e66678d72a6e4614d"
expectedChangeAddress := "3HEEdyeVwoGZf86jq8ovUhw9FiXkwCdY79"
wallet := NewHDWalletFromWords(w, BaseCoinBip49MainNet)
meta, err := wallet.BuildTransactionMetadata(data.TransactionData)
assert.Nil(t, err)
assert.Equal(t, toAddress, data.TransactionData.PaymentAddress)
assert.Equal(t, expectedEncodedTx, meta.EncodedTx)
assert.Equal(t, expectedTxid, meta.Txid)
assert.Equal(t, expectedChangeAddress, meta.TransactionChangeMetadata.Address)
assert.Equal(t, 1, meta.TransactionChangeMetadata.VoutIndex)
assert.Equal(t, 102, meta.TransactionChangeMetadata.Path.Index)
assert.Equal(t, changeAmount, data.TransactionData.ChangeAmount)
}
| TestTransactionBuilder_BuildP2KH_NoChange | identifier_name |
interpolation.rs | /*
* Copyright 2018 The Starlark in Rust Authors.
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//! String interpolation-related code.
//! Based on <https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting>
use crate::values::{tuple::Tuple, Heap, Value, ValueLike};
use gazebo::prelude::*;
use std::{fmt::Write, iter};
use thiserror::Error;
const AFTER_PERCENT: &str = "'%' must be followed by an optional name and a specifier ('s', 'r', 'd', 'i', 'o', 'x', 'X', 'c') or '%'";
/// Operator `%` format or evaluation errors
#[derive(Clone, Dupe, Debug, Error)]
enum StringInterpolationError {
#[error(
"Unexpected EOF in format string. Could not find ')' when parsing '%(name)f' expression"
)]
UnexpectedEOFClosingParen,
/// `%` must be followed by specifier.
#[error("Unexpected EOF in format string. {}", AFTER_PERCENT)]
UnexpectedEOFPercent,
#[error("Unknown format string specifier '{}'. {}", .0.escape_default(), AFTER_PERCENT)]
UnknownSpecifier(char),
#[error("Invalid UTF-8 codepoint 0x{:x} passed for %c formatter", .0)]
ValueNotInUTFRange(u32),
/// Interpolation parameter is too big for the format string.
#[error("Too many arguments for format string")]
TooManyParameters,
/// Interpolation parameter is too small for the format string.
#[error("Not enough arguments for format string")]
NotEnoughParameters,
#[error("'%c' formatter requires a single-character string")]
ValueNotChar,
}
/// Format char
enum ArgFormat {
// str(x)
Str,
// repr(x)
Repr,
// signed integer decimal
Dec,
// signed octal
Oct,
// signed hexadecimal, lowercase
HexLower,
// signed hexadecimal, uppercase
HexUpper,
// x for string, chr(x) for int
Char,
// `%` sign
Percent,
}
impl ArgFormat {
fn format_arg(&self, out: &mut String, arg: Value) -> anyhow::Result<()> {
match self {
// Equivalent to `write!(out, "{}", arg.to_str()).unwrap()`, but avoid
// allocating a separate `String` on the way.
ArgFormat::Str => match arg.unpack_str() {
None => arg.collect_repr(out),
Some(v) => out.push_str(v),
},
ArgFormat::Repr => arg.collect_repr(out),
ArgFormat::Dec => write!(out, "{}", arg.to_int()?).unwrap(),
ArgFormat::Oct => {
let v = arg.to_int()?;
write!(
out,
"{}{:o}",
if v < 0 { "-" } else { "" },
v.wrapping_abs() as u64
)
.unwrap();
}
ArgFormat::HexLower => {
let v = arg.to_int()?;
write!(
out,
"{}{:x}",
if v < 0 { "-" } else { "" },
v.wrapping_abs() as u64
)
.unwrap();
}
ArgFormat::HexUpper => {
let v = arg.to_int()?;
write!(
out,
"{}{:X}",
if v < 0 { "-" } else { "" },
v.wrapping_abs() as u64
)
.unwrap();
}
ArgFormat::Char => match arg.unpack_str() {
Some(arg) => {
let mut chars = arg.chars();
let c = chars.next();
match c {
Some(c) if chars.next().is_none() => out.push(c),
_ => return Err(StringInterpolationError::ValueNotChar.into()),
}
}
None => {
let i = arg.to_int()? as u32;
match std::char::from_u32(i) {
Some(c) => write!(out, "{}", c).unwrap(),
None => {
return Err(StringInterpolationError::ValueNotInUTFRange(i).into());
}
}
}
},
ArgFormat::Percent => {
out.push('%');
}
}
Ok(())
}
}
// %(name)s or %s
enum NamedOrPositional {
Named(String),
Positional,
}
/// Implement Python `%` format strings.
pub struct Interpolation {
/// String before first parameter
init: String,
/// Number of positional arguments
positional_count: usize,
/// Number of named arguments
named_count: usize,
/// Arguments followed by uninterpreted strings
parameters: Vec<(NamedOrPositional, ArgFormat, String)>,
}
impl Interpolation {
fn append_literal(&mut self, c: char) {
if let Some(p) = self.parameters.last_mut() {
p.2.push(c);
} else {
self.init.push(c)
}
}
/// Parse a percent-interpolation string, returning an `Err` if the string is invalid.
pub fn parse(format: &str) -> anyhow::Result<Self> {
let mut result = Self {
init: String::new(),
positional_count: 0,
named_count: 0,
parameters: Vec::new(),
};
let mut chars = format.chars();
while let Some(c) = chars.next() {
if c != '%' {
result.append_literal(c);
} else {
let next = chars
.next()
.ok_or(StringInterpolationError::UnexpectedEOFPercent)?;
let (named_or_positional, format_char) = if next == '(' {
let mut name = String::new();
loop {
match chars.next() {
None => {
return Err(
StringInterpolationError::UnexpectedEOFClosingParen.into()
);
}
Some(')') => {
break;
}
Some(c) => name.push(c),
}
}
(
NamedOrPositional::Named(name),
chars
.next()
.ok_or(StringInterpolationError::UnexpectedEOFPercent)?,
)
} else {
(NamedOrPositional::Positional, next)
};
let format = match format_char {
's' => ArgFormat::Str,
'r' => ArgFormat::Repr,
'd' | 'i' => ArgFormat::Dec,
'o' => ArgFormat::Oct,
'x' => ArgFormat::HexLower,
'X' => ArgFormat::HexUpper,
'c' => ArgFormat::Char,
'%' => match named_or_positional {
NamedOrPositional::Positional => {
result.append_literal('%');
continue;
}
NamedOrPositional::Named(_) => {
// In both Python and Starlark Go implementations
// `%(n)%` consumes named argument, but
// `%%` does not consume positional argument.
// So `Percent` variant is added only when `ArgFormat` is `Named`.
ArgFormat::Percent
}
},
c => return Err(StringInterpolationError::UnknownSpecifier(c).into()),
};
match named_or_positional {
NamedOrPositional::Positional => |
NamedOrPositional::Named(..) => {
result.named_count += 1;
}
}
result
.parameters
.push((named_or_positional, format, String::new()));
}
}
Ok(result)
}
/// Apply a percent-interpolation string to a value.
pub fn apply<'v>(self, argument: Value<'v>, heap: &'v Heap) -> anyhow::Result<String> {
let mut r = self.init;
let owned_tuple;
let mut arg_iter: Box<dyn Iterator<Item = Value>> =
if self.named_count > 0 && self.positional_count == 0 {
box iter::empty()
} else {
match Tuple::from_value(argument) {
Some(x) => {
owned_tuple = x;
box owned_tuple.iter()
}
None => box iter::once(argument),
}
};
for (named_or_positional, format, tail) in self.parameters {
let arg = match named_or_positional {
NamedOrPositional::Positional => match arg_iter.next() {
Some(a) => a,
None => return Err(StringInterpolationError::NotEnoughParameters.into()),
},
NamedOrPositional::Named(name) => argument.at(heap.alloc(name), heap)?,
};
format.format_arg(&mut r, arg)?;
r.push_str(&tail);
}
if arg_iter.next().is_some() {
return Err(StringInterpolationError::TooManyParameters.into());
}
Ok(r)
}
}
| {
result.positional_count += 1;
} | conditional_block |
interpolation.rs | /*
* Copyright 2018 The Starlark in Rust Authors.
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//! String interpolation-related code.
//! Based on <https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting>
use crate::values::{tuple::Tuple, Heap, Value, ValueLike};
use gazebo::prelude::*;
use std::{fmt::Write, iter};
use thiserror::Error;
const AFTER_PERCENT: &str = "'%' must be followed by an optional name and a specifier ('s', 'r', 'd', 'i', 'o', 'x', 'X', 'c') or '%'";
/// Operator `%` format or evaluation errors
#[derive(Clone, Dupe, Debug, Error)]
enum StringInterpolationError {
#[error(
"Unexpected EOF in format string. Could not find ')' when parsing '%(name)f' expression"
)]
UnexpectedEOFClosingParen,
/// `%` must be followed by specifier.
#[error("Unexpected EOF in format string. {}", AFTER_PERCENT)]
UnexpectedEOFPercent,
#[error("Unknown format string specifier '{}'. {}", .0.escape_default(), AFTER_PERCENT)]
UnknownSpecifier(char),
#[error("Invalid UTF-8 codepoint 0x{:x} passed for %c formatter", .0)]
ValueNotInUTFRange(u32),
/// Interpolation parameter is too big for the format string.
#[error("Too many arguments for format string")]
TooManyParameters,
/// Interpolation parameter is too small for the format string.
#[error("Not enough arguments for format string")]
NotEnoughParameters,
#[error("'%c' formatter requires a single-character string")]
ValueNotChar,
}
/// Format char
enum ArgFormat {
// str(x)
Str,
// repr(x)
Repr,
// signed integer decimal
Dec,
// signed octal
Oct,
// signed hexadecimal, lowercase
HexLower,
// signed hexadecimal, uppercase
HexUpper,
// x for string, chr(x) for int
Char,
// `%` sign
Percent,
}
impl ArgFormat {
fn format_arg(&self, out: &mut String, arg: Value) -> anyhow::Result<()> {
match self {
// Equivalent to `write!(out, "{}", arg.to_str()).unwrap()`, but avoid
// allocating a separate `String` on the way.
ArgFormat::Str => match arg.unpack_str() {
None => arg.collect_repr(out),
Some(v) => out.push_str(v),
},
ArgFormat::Repr => arg.collect_repr(out),
ArgFormat::Dec => write!(out, "{}", arg.to_int()?).unwrap(),
ArgFormat::Oct => {
let v = arg.to_int()?;
write!(
out,
"{}{:o}",
if v < 0 { "-" } else { "" },
v.wrapping_abs() as u64
)
.unwrap();
}
ArgFormat::HexLower => {
let v = arg.to_int()?;
write!(
out,
"{}{:x}",
if v < 0 { "-" } else { "" },
v.wrapping_abs() as u64
)
.unwrap();
}
ArgFormat::HexUpper => {
let v = arg.to_int()?;
write!(
out,
"{}{:X}",
if v < 0 { "-" } else { "" },
v.wrapping_abs() as u64
)
.unwrap();
}
ArgFormat::Char => match arg.unpack_str() {
Some(arg) => {
let mut chars = arg.chars();
let c = chars.next();
match c {
Some(c) if chars.next().is_none() => out.push(c),
_ => return Err(StringInterpolationError::ValueNotChar.into()),
}
}
None => {
let i = arg.to_int()? as u32;
match std::char::from_u32(i) {
Some(c) => write!(out, "{}", c).unwrap(),
None => {
return Err(StringInterpolationError::ValueNotInUTFRange(i).into());
}
}
}
},
ArgFormat::Percent => {
out.push('%');
}
}
Ok(())
}
}
// %(name)s or %s
enum | {
Named(String),
Positional,
}
/// Implement Python `%` format strings.
pub struct Interpolation {
/// String before first parameter
init: String,
/// Number of positional arguments
positional_count: usize,
/// Number of named arguments
named_count: usize,
/// Arguments followed by uninterpreted strings
parameters: Vec<(NamedOrPositional, ArgFormat, String)>,
}
impl Interpolation {
fn append_literal(&mut self, c: char) {
if let Some(p) = self.parameters.last_mut() {
p.2.push(c);
} else {
self.init.push(c)
}
}
/// Parse a percent-interpolation string, returning an `Err` if the string is invalid.
pub fn parse(format: &str) -> anyhow::Result<Self> {
let mut result = Self {
init: String::new(),
positional_count: 0,
named_count: 0,
parameters: Vec::new(),
};
let mut chars = format.chars();
while let Some(c) = chars.next() {
if c != '%' {
result.append_literal(c);
} else {
let next = chars
.next()
.ok_or(StringInterpolationError::UnexpectedEOFPercent)?;
let (named_or_positional, format_char) = if next == '(' {
let mut name = String::new();
loop {
match chars.next() {
None => {
return Err(
StringInterpolationError::UnexpectedEOFClosingParen.into()
);
}
Some(')') => {
break;
}
Some(c) => name.push(c),
}
}
(
NamedOrPositional::Named(name),
chars
.next()
.ok_or(StringInterpolationError::UnexpectedEOFPercent)?,
)
} else {
(NamedOrPositional::Positional, next)
};
let format = match format_char {
's' => ArgFormat::Str,
'r' => ArgFormat::Repr,
'd' | 'i' => ArgFormat::Dec,
'o' => ArgFormat::Oct,
'x' => ArgFormat::HexLower,
'X' => ArgFormat::HexUpper,
'c' => ArgFormat::Char,
'%' => match named_or_positional {
NamedOrPositional::Positional => {
result.append_literal('%');
continue;
}
NamedOrPositional::Named(_) => {
// In both Python and Starlark Go implementations
// `%(n)%` consumes named argument, but
// `%%` does not consume positional argument.
// So `Percent` variant is added only when `ArgFormat` is `Named`.
ArgFormat::Percent
}
},
c => return Err(StringInterpolationError::UnknownSpecifier(c).into()),
};
match named_or_positional {
NamedOrPositional::Positional => {
result.positional_count += 1;
}
NamedOrPositional::Named(..) => {
result.named_count += 1;
}
}
result
.parameters
.push((named_or_positional, format, String::new()));
}
}
Ok(result)
}
/// Apply a percent-interpolation string to a value.
pub fn apply<'v>(self, argument: Value<'v>, heap: &'v Heap) -> anyhow::Result<String> {
let mut r = self.init;
let owned_tuple;
let mut arg_iter: Box<dyn Iterator<Item = Value>> =
if self.named_count > 0 && self.positional_count == 0 {
box iter::empty()
} else {
match Tuple::from_value(argument) {
Some(x) => {
owned_tuple = x;
box owned_tuple.iter()
}
None => box iter::once(argument),
}
};
for (named_or_positional, format, tail) in self.parameters {
let arg = match named_or_positional {
NamedOrPositional::Positional => match arg_iter.next() {
Some(a) => a,
None => return Err(StringInterpolationError::NotEnoughParameters.into()),
},
NamedOrPositional::Named(name) => argument.at(heap.alloc(name), heap)?,
};
format.format_arg(&mut r, arg)?;
r.push_str(&tail);
}
if arg_iter.next().is_some() {
return Err(StringInterpolationError::TooManyParameters.into());
}
Ok(r)
}
}
| NamedOrPositional | identifier_name |
interpolation.rs | /*
* Copyright 2018 The Starlark in Rust Authors.
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//! String interpolation-related code.
//! Based on <https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting>
use crate::values::{tuple::Tuple, Heap, Value, ValueLike};
use gazebo::prelude::*;
use std::{fmt::Write, iter};
use thiserror::Error;
const AFTER_PERCENT: &str = "'%' must be followed by an optional name and a specifier ('s', 'r', 'd', 'i', 'o', 'x', 'X', 'c') or '%'";
/// Operator `%` format or evaluation errors
#[derive(Clone, Dupe, Debug, Error)]
enum StringInterpolationError {
#[error(
"Unexpected EOF in format string. Could not find ')' when parsing '%(name)f' expression"
)]
UnexpectedEOFClosingParen,
/// `%` must be followed by specifier.
#[error("Unexpected EOF in format string. {}", AFTER_PERCENT)]
UnexpectedEOFPercent,
#[error("Unknown format string specifier '{}'. {}", .0.escape_default(), AFTER_PERCENT)]
UnknownSpecifier(char),
#[error("Invalid UTF-8 codepoint 0x{:x} passed for %c formatter", .0)]
ValueNotInUTFRange(u32),
/// Interpolation parameter is too big for the format string.
#[error("Too many arguments for format string")]
TooManyParameters,
/// Interpolation parameter is too small for the format string.
#[error("Not enough arguments for format string")]
NotEnoughParameters,
#[error("'%c' formatter requires a single-character string")]
ValueNotChar,
}
/// Format char
enum ArgFormat {
// str(x)
Str,
// repr(x)
Repr,
// signed integer decimal
Dec,
// signed octal
Oct,
// signed hexadecimal, lowercase
HexLower,
// signed hexadecimal, uppercase
HexUpper,
// x for string, chr(x) for int
Char,
// `%` sign
Percent,
}
impl ArgFormat {
fn format_arg(&self, out: &mut String, arg: Value) -> anyhow::Result<()> {
match self {
// Equivalent to `write!(out, "{}", arg.to_str()).unwrap()`, but avoid
// allocating a separate `String` on the way.
ArgFormat::Str => match arg.unpack_str() {
None => arg.collect_repr(out),
Some(v) => out.push_str(v),
},
ArgFormat::Repr => arg.collect_repr(out),
ArgFormat::Dec => write!(out, "{}", arg.to_int()?).unwrap(),
ArgFormat::Oct => {
let v = arg.to_int()?;
write!(
out,
"{}{:o}",
if v < 0 { "-" } else { "" },
v.wrapping_abs() as u64
)
.unwrap();
}
ArgFormat::HexLower => {
let v = arg.to_int()?;
write!(
out,
"{}{:x}",
if v < 0 { "-" } else { "" },
v.wrapping_abs() as u64
)
.unwrap();
}
ArgFormat::HexUpper => {
let v = arg.to_int()?;
write!(
out,
"{}{:X}",
if v < 0 { "-" } else { "" },
v.wrapping_abs() as u64
)
.unwrap();
}
ArgFormat::Char => match arg.unpack_str() {
Some(arg) => {
let mut chars = arg.chars();
let c = chars.next();
match c {
Some(c) if chars.next().is_none() => out.push(c),
_ => return Err(StringInterpolationError::ValueNotChar.into()),
}
}
None => {
let i = arg.to_int()? as u32;
match std::char::from_u32(i) {
Some(c) => write!(out, "{}", c).unwrap(),
None => {
return Err(StringInterpolationError::ValueNotInUTFRange(i).into());
}
}
}
},
ArgFormat::Percent => {
out.push('%');
}
}
Ok(())
}
}
// %(name)s or %s
enum NamedOrPositional {
Named(String),
Positional,
}
/// Implement Python `%` format strings.
pub struct Interpolation {
/// String before first parameter
init: String,
/// Number of positional arguments
positional_count: usize,
/// Number of named arguments
named_count: usize,
/// Arguments followed by uninterpreted strings
parameters: Vec<(NamedOrPositional, ArgFormat, String)>,
}
impl Interpolation {
fn append_literal(&mut self, c: char) {
if let Some(p) = self.parameters.last_mut() {
p.2.push(c);
} else {
self.init.push(c)
}
}
/// Parse a percent-interpolation string, returning an `Err` if the string is invalid.
pub fn parse(format: &str) -> anyhow::Result<Self> {
let mut result = Self {
init: String::new(),
positional_count: 0,
named_count: 0,
parameters: Vec::new(),
};
let mut chars = format.chars();
while let Some(c) = chars.next() {
if c != '%' {
result.append_literal(c);
} else {
let next = chars
.next()
.ok_or(StringInterpolationError::UnexpectedEOFPercent)?;
let (named_or_positional, format_char) = if next == '(' {
let mut name = String::new();
loop {
match chars.next() {
None => {
return Err(
StringInterpolationError::UnexpectedEOFClosingParen.into()
);
}
Some(')') => {
break;
}
Some(c) => name.push(c),
}
}
(
NamedOrPositional::Named(name),
chars
.next()
.ok_or(StringInterpolationError::UnexpectedEOFPercent)?,
)
} else {
(NamedOrPositional::Positional, next)
};
let format = match format_char {
's' => ArgFormat::Str,
'r' => ArgFormat::Repr,
'd' | 'i' => ArgFormat::Dec,
'o' => ArgFormat::Oct,
'x' => ArgFormat::HexLower,
'X' => ArgFormat::HexUpper,
'c' => ArgFormat::Char,
'%' => match named_or_positional {
NamedOrPositional::Positional => {
result.append_literal('%');
continue;
}
NamedOrPositional::Named(_) => {
// In both Python and Starlark Go implementations
// `%(n)%` consumes named argument, but
// `%%` does not consume positional argument.
// So `Percent` variant is added only when `ArgFormat` is `Named`.
ArgFormat::Percent
}
},
c => return Err(StringInterpolationError::UnknownSpecifier(c).into()),
};
match named_or_positional {
NamedOrPositional::Positional => {
result.positional_count += 1;
}
NamedOrPositional::Named(..) => {
result.named_count += 1;
}
}
result
.parameters
.push((named_or_positional, format, String::new()));
}
}
Ok(result)
}
/// Apply a percent-interpolation string to a value.
pub fn apply<'v>(self, argument: Value<'v>, heap: &'v Heap) -> anyhow::Result<String> {
let mut r = self.init;
let owned_tuple;
let mut arg_iter: Box<dyn Iterator<Item = Value>> =
if self.named_count > 0 && self.positional_count == 0 {
box iter::empty()
} else {
match Tuple::from_value(argument) {
Some(x) => {
owned_tuple = x; | box owned_tuple.iter()
}
None => box iter::once(argument),
}
};
for (named_or_positional, format, tail) in self.parameters {
let arg = match named_or_positional {
NamedOrPositional::Positional => match arg_iter.next() {
Some(a) => a,
None => return Err(StringInterpolationError::NotEnoughParameters.into()),
},
NamedOrPositional::Named(name) => argument.at(heap.alloc(name), heap)?,
};
format.format_arg(&mut r, arg)?;
r.push_str(&tail);
}
if arg_iter.next().is_some() {
return Err(StringInterpolationError::TooManyParameters.into());
}
Ok(r)
}
} | random_line_split | |
interpolation.rs | /*
* Copyright 2018 The Starlark in Rust Authors.
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//! String interpolation-related code.
//! Based on <https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting>
use crate::values::{tuple::Tuple, Heap, Value, ValueLike};
use gazebo::prelude::*;
use std::{fmt::Write, iter};
use thiserror::Error;
const AFTER_PERCENT: &str = "'%' must be followed by an optional name and a specifier ('s', 'r', 'd', 'i', 'o', 'x', 'X', 'c') or '%'";
/// Operator `%` format or evaluation errors
#[derive(Clone, Dupe, Debug, Error)]
enum StringInterpolationError {
#[error(
"Unexpected EOF in format string. Could not find ')' when parsing '%(name)f' expression"
)]
UnexpectedEOFClosingParen,
/// `%` must be followed by specifier.
#[error("Unexpected EOF in format string. {}", AFTER_PERCENT)]
UnexpectedEOFPercent,
#[error("Unknown format string specifier '{}'. {}", .0.escape_default(), AFTER_PERCENT)]
UnknownSpecifier(char),
#[error("Invalid UTF-8 codepoint 0x{:x} passed for %c formatter", .0)]
ValueNotInUTFRange(u32),
/// Interpolation parameter is too big for the format string.
#[error("Too many arguments for format string")]
TooManyParameters,
/// Interpolation parameter is too small for the format string.
#[error("Not enough arguments for format string")]
NotEnoughParameters,
#[error("'%c' formatter requires a single-character string")]
ValueNotChar,
}
/// Format char
enum ArgFormat {
// str(x)
Str,
// repr(x)
Repr,
// signed integer decimal
Dec,
// signed octal
Oct,
// signed hexadecimal, lowercase
HexLower,
// signed hexadecimal, uppercase
HexUpper,
// x for string, chr(x) for int
Char,
// `%` sign
Percent,
}
impl ArgFormat {
fn format_arg(&self, out: &mut String, arg: Value) -> anyhow::Result<()> {
match self {
// Equivalent to `write!(out, "{}", arg.to_str()).unwrap()`, but avoid
// allocating a separate `String` on the way.
ArgFormat::Str => match arg.unpack_str() {
None => arg.collect_repr(out),
Some(v) => out.push_str(v),
},
ArgFormat::Repr => arg.collect_repr(out),
ArgFormat::Dec => write!(out, "{}", arg.to_int()?).unwrap(),
ArgFormat::Oct => {
let v = arg.to_int()?;
write!(
out,
"{}{:o}",
if v < 0 { "-" } else { "" },
v.wrapping_abs() as u64
)
.unwrap();
}
ArgFormat::HexLower => {
let v = arg.to_int()?;
write!(
out,
"{}{:x}",
if v < 0 { "-" } else { "" },
v.wrapping_abs() as u64
)
.unwrap();
}
ArgFormat::HexUpper => {
let v = arg.to_int()?;
write!(
out,
"{}{:X}",
if v < 0 { "-" } else { "" },
v.wrapping_abs() as u64
)
.unwrap();
}
ArgFormat::Char => match arg.unpack_str() {
Some(arg) => {
let mut chars = arg.chars();
let c = chars.next();
match c {
Some(c) if chars.next().is_none() => out.push(c),
_ => return Err(StringInterpolationError::ValueNotChar.into()),
}
}
None => {
let i = arg.to_int()? as u32;
match std::char::from_u32(i) {
Some(c) => write!(out, "{}", c).unwrap(),
None => {
return Err(StringInterpolationError::ValueNotInUTFRange(i).into());
}
}
}
},
ArgFormat::Percent => {
out.push('%');
}
}
Ok(())
}
}
// %(name)s or %s
enum NamedOrPositional {
Named(String),
Positional,
}
/// Implement Python `%` format strings.
pub struct Interpolation {
/// String before first parameter
init: String,
/// Number of positional arguments
positional_count: usize,
/// Number of named arguments
named_count: usize,
/// Arguments followed by uninterpreted strings
parameters: Vec<(NamedOrPositional, ArgFormat, String)>,
}
impl Interpolation {
fn append_literal(&mut self, c: char) {
if let Some(p) = self.parameters.last_mut() {
p.2.push(c);
} else {
self.init.push(c)
}
}
/// Parse a percent-interpolation string, returning an `Err` if the string is invalid.
pub fn parse(format: &str) -> anyhow::Result<Self> |
/// Apply a percent-interpolation string to a value.
pub fn apply<'v>(self, argument: Value<'v>, heap: &'v Heap) -> anyhow::Result<String> {
let mut r = self.init;
let owned_tuple;
let mut arg_iter: Box<dyn Iterator<Item = Value>> =
if self.named_count > 0 && self.positional_count == 0 {
box iter::empty()
} else {
match Tuple::from_value(argument) {
Some(x) => {
owned_tuple = x;
box owned_tuple.iter()
}
None => box iter::once(argument),
}
};
for (named_or_positional, format, tail) in self.parameters {
let arg = match named_or_positional {
NamedOrPositional::Positional => match arg_iter.next() {
Some(a) => a,
None => return Err(StringInterpolationError::NotEnoughParameters.into()),
},
NamedOrPositional::Named(name) => argument.at(heap.alloc(name), heap)?,
};
format.format_arg(&mut r, arg)?;
r.push_str(&tail);
}
if arg_iter.next().is_some() {
return Err(StringInterpolationError::TooManyParameters.into());
}
Ok(r)
}
}
| {
let mut result = Self {
init: String::new(),
positional_count: 0,
named_count: 0,
parameters: Vec::new(),
};
let mut chars = format.chars();
while let Some(c) = chars.next() {
if c != '%' {
result.append_literal(c);
} else {
let next = chars
.next()
.ok_or(StringInterpolationError::UnexpectedEOFPercent)?;
let (named_or_positional, format_char) = if next == '(' {
let mut name = String::new();
loop {
match chars.next() {
None => {
return Err(
StringInterpolationError::UnexpectedEOFClosingParen.into()
);
}
Some(')') => {
break;
}
Some(c) => name.push(c),
}
}
(
NamedOrPositional::Named(name),
chars
.next()
.ok_or(StringInterpolationError::UnexpectedEOFPercent)?,
)
} else {
(NamedOrPositional::Positional, next)
};
let format = match format_char {
's' => ArgFormat::Str,
'r' => ArgFormat::Repr,
'd' | 'i' => ArgFormat::Dec,
'o' => ArgFormat::Oct,
'x' => ArgFormat::HexLower,
'X' => ArgFormat::HexUpper,
'c' => ArgFormat::Char,
'%' => match named_or_positional {
NamedOrPositional::Positional => {
result.append_literal('%');
continue;
}
NamedOrPositional::Named(_) => {
// In both Python and Starlark Go implementations
// `%(n)%` consumes named argument, but
// `%%` does not consume positional argument.
// So `Percent` variant is added only when `ArgFormat` is `Named`.
ArgFormat::Percent
}
},
c => return Err(StringInterpolationError::UnknownSpecifier(c).into()),
};
match named_or_positional {
NamedOrPositional::Positional => {
result.positional_count += 1;
}
NamedOrPositional::Named(..) => {
result.named_count += 1;
}
}
result
.parameters
.push((named_or_positional, format, String::new()));
}
}
Ok(result)
} | identifier_body |
play.go | // Copyright 2018 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package play provides common code for playing videos on Chrome.
package play
import (
"context"
"image"
"image/color"
"image/png"
"math"
"net/http"
"net/http/httptest"
"os"
"path"
"path/filepath"
"time"
"chromiumos/tast/common/perf"
"chromiumos/tast/errors"
"chromiumos/tast/local/audio/crastestclient"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/ash"
"chromiumos/tast/local/colorcmp"
"chromiumos/tast/local/graphics"
"chromiumos/tast/local/input"
"chromiumos/tast/local/media/devtools"
"chromiumos/tast/local/media/logging"
"chromiumos/tast/local/screenshot"
"chromiumos/tast/testing"
"chromiumos/tast/timing"
)
// VideoType represents a type of video played in TestPlay.
type VideoType int
const (
// NormalVideo represents a normal video. (i.e. non-MSE video.)
NormalVideo VideoType = iota
// MSEVideo represents a video requiring Media Source Extensions (MSE).
MSEVideo
// DRMVideo represents a video requiring Digital Rights Management (DRM).
DRMVideo
)
// VerifyHWAcceleratorMode represents a mode of TestPlay.
type VerifyHWAcceleratorMode int
const (
// NoVerifyHWAcceleratorUsed is a mode that plays a video without verifying
// hardware accelerator usage.
NoVerifyHWAcceleratorUsed VerifyHWAcceleratorMode = iota
// VerifyHWAcceleratorUsed is a mode that verifies a video is played using a
// hardware accelerator.
VerifyHWAcceleratorUsed
// VerifyNoHWAcceleratorUsed is a mode that verifies a video is not played
// using a hardware accelerator, i.e. it's using software decoding.
VerifyNoHWAcceleratorUsed
// VerifyHWDRMUsed is a mode that verifies a video is played using a hardware
// accelerator with HW DRM protection.
VerifyHWDRMUsed
)
// This is how long we need to wait before taking a screenshot in the
// TestPlayAndScreenshot case. This is necessary to ensure the video is on the screen
// and to let the "Press Esc to exit full screen" message disappear.
const delayToScreenshot = 7 * time.Second
// MSEDataFiles returns a list of required files for tests that play MSE videos.
func MSEDataFiles() []string {
return []string{
"shaka.html",
"third_party/shaka-player/shaka-player.compiled.debug.js",
"third_party/shaka-player/shaka-player.compiled.debug.map",
}
}
// DRMDataFiles returns a list of required files for tests that play DRM videos.
func DRMDataFiles() []string {
return []string{
"shaka_drm.html",
"third_party/shaka-player/shaka-player.compiled.debug.js",
"third_party/shaka-player/shaka-player.compiled.debug.map",
}
}
// loadPage opens a new tab to load the specified webpage.
// Note that if err != nil, conn is nil.
func loadPage(ctx context.Context, cs ash.ConnSource, url string) (*chrome.Conn, error) {
ctx, st := timing.Start(ctx, "load_page")
defer st.End()
conn, err := cs.NewConn(ctx, url)
if err != nil {
return nil, errors.Wrapf(err, "failed to open %v", url)
}
return conn, err
}
// playVideo invokes loadVideo(), plays a normal video in video.html, and checks if it has progress.
// videoFile is the file name which is played there.
// url is the URL of the video playback testing webpage.
func playVideo(ctx context.Context, cs ash.ConnSource, videoFile, url string, unmutePlayer bool) (bool, error) {
ctx, st := timing.Start(ctx, "play_video")
defer st.End()
conn, err := loadPage(ctx, cs, url)
if err != nil {
return false, err
}
defer conn.Close()
defer conn.CloseTarget(ctx)
observer, err := conn.GetMediaPropertiesChangedObserver(ctx)
if err != nil {
return false, errors.Wrap(err, "failed to retrieve a media DevTools observer")
}
if err := conn.Call(ctx, nil, "playUntilEnd", videoFile, unmutePlayer); err != nil {
return false, err
}
isPlatform, _, err := devtools.GetVideoDecoder(ctx, observer, url)
return isPlatform, err
}
// playMSEVideo plays an MSE video stream via Shaka player, and checks its play progress.
// mpdFile is the name of MPD file for the video stream.
// url is the URL of the shaka player webpage.
func playMSEVideo(ctx context.Context, cs ash.ConnSource, mpdFile, url string) (bool, error) {
ctx, st := timing.Start(ctx, "play_mse_video")
defer st.End()
conn, err := loadPage(ctx, cs, url)
if err != nil {
return false, err
}
defer conn.Close()
defer conn.CloseTarget(ctx)
observer, err := conn.GetMediaPropertiesChangedObserver(ctx)
if err != nil {
return false, errors.Wrap(err, "failed to retrieve a media DevTools observer")
}
if err := conn.Call(ctx, nil, "play_shaka", mpdFile); err != nil {
return false, err
}
isPlatform, _, err := devtools.GetVideoDecoder(ctx, observer, url)
return isPlatform, err
}
// playDRMVideo plays a DRM-protected MSE video stream via Shaka player, and
// checks its play progress. After it's done, it goes full screen and takes a
// screenshot and verifies the contents are all black.
// mpdFile is the name of MPD file for the video stream.cs ash.ConnSource,
// url is the URL of the shaka player webpage.
func playDRMVideo(ctx context.Context, s *testing.State, cs ash.ConnSource, cr *chrome.Chrome, mpdFile, url string) (bool, error) {
ctx, st := timing.Start(ctx, "play_drm_video")
defer st.End()
conn, err := loadPage(ctx, cs, url)
if err != nil {
return false, err
}
defer conn.Close()
defer conn.CloseTarget(ctx)
observer, err := conn.GetMediaPropertiesChangedObserver(ctx)
if err != nil {
return false, errors.Wrap(err, "failed to retrieve a media DevTools observer")
}
if err := conn.Call(ctx, nil, "play_shaka_drm", mpdFile); err != nil {
return false, err
}
// Now go full screen, take a screenshot and verify it's all black.
// Make the video go to full screen mode by pressing 'f': requestFullScreen() needs a user gesture.
ew, err := input.Keyboard(ctx)
if err != nil {
return false, errors.Wrap(err, "failed to initialize the keyboard writer")
}
defer ew.Close()
if err := ew.Type(ctx, "f"); err != nil {
return false, errors.Wrap(err, "failed to inject the 'f' key")
}
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
return false, errors.Wrap(err, "failed to connect to test API")
}
if err := ash.WaitForFullScreen(ctx, tconn); err != nil {
return false, errors.Wrap(err, "failed waiting for full screen")
}
// Take the screenshot, we don't need to wait because we are only verifying
// that the vast majority is black, so things like 'hit Esc to exist full screen'
// won't be an issue.
im, err := screenshot.GrabScreenshot(ctx, cr)
if err != nil {
return false, errors.Wrap(err, "failed taking screenshot")
}
// Verify that over 92% of the image is solid black. This is true because for
// HW DRM, you cannot actually screenshot the video and it will be replaced by
// solid black in the compositor. From testing, we have seen this be as low as
// 0.94, so set the threshold at 0.92.
color, ratio := colorcmp.DominantColor(im)
if ratio < 0.92 || !colorcmp.ColorsMatch(color, colorcmp.RGB(0, 0, 0), 1) {
return false, errors.Errorf("screenshot did not have solid black, instead got %v at ratio %0.2f",
colorcmp.ColorStr(color), ratio)
}
return devtools.CheckHWDRMPipeline(ctx, observer, url)
}
// seekVideoRepeatedly seeks video numSeeks times, saving some performance
// metrics (elapsed time, number of completed seeks) in outDir.
func seekVideoRepeatedly(ctx context.Context, conn *chrome.Conn, outDir string, numSeeks int) error {
ctx, st := timing.Start(ctx, "seek_video_repeatedly")
defer st.End()
p := perf.NewValues()
startTime := time.Now()
prevSeekCount := 0
seekCount := 0
for i := 0; i < numSeeks; i++ {
if err := conn.Call(ctx, &seekCount, "randomSeek"); err != nil {
// If the test times out, Call() might be interrupted and return
// zero seekCount, in that case used the last known good amount.
if seekCount == 0 {
seekCount = prevSeekCount
}
return errors.Wrapf(err, "error while seeking, completed %d/%d seeks", seekCount, numSeeks)
}
if seekCount == numSeeks {
break
}
prevSeekCount = seekCount
}
elapsed := time.Since(startTime).Seconds()
completedSeeks := math.Min(float64(seekCount+1), float64(numSeeks))
p.Set(perf.Metric{
Name: "average_seek_time",
Unit: "s",
Direction: perf.SmallerIsBetter,
}, elapsed/completedSeeks)
p.Set(perf.Metric{
Name: "completed_seeks",
Unit: "percent",
Direction: perf.BiggerIsBetter,
}, float64(100.0*completedSeeks/float64(numSeeks)))
testing.ContextLog(ctx, p)
p.Save(outDir)
return nil
}
// playSeekVideo invokes loadVideo() then plays the video referenced by videoFile
// while repeatedly and randomly seeking into it numSeeks. It returns an error if
// seeking did not succeed for some reason.
// videoFile is the file name which is played and seeked there.
// baseURL is the base URL which serves video playback testing webpage.
func playSeekVideo(ctx context.Context, cs ash.ConnSource, videoFile, baseURL, outDir string, numSeeks int) error {
ctx, st := timing.Start(ctx, "play_seek_video")
defer st.End()
// Establish a connection to a video play page
conn, err := loadPage(ctx, cs, baseURL+"/video.html")
if err != nil {
return err
}
defer conn.Close()
defer conn.CloseTarget(ctx)
if err := conn.Call(ctx, nil, "playRepeatedly", videoFile); err != nil {
return err
}
// Wait until videoElement has advanced so that chrome:media-internals has
// time to fill in their fields.
if err := conn.WaitForExpr(ctx, "document.getElementsByTagName('video')[0].currentTime > 1"); err != nil {
return errors.Wrap(err, "failed waiting for video to advance playback")
}
if err := seekVideoRepeatedly(ctx, conn, outDir, numSeeks); err != nil {
return err
}
return nil
}
// ColorDistance returns the maximum absolute difference between each component of a and b.
// Both a and b are assumed to be RGBA colors.
func ColorDistance(a, b color.Color) int {
aR, aG, aB, aA := a.RGBA()
bR, bG, bB, bA := b.RGBA()
abs := func(a int) int {
if a < 0 {
return -a
}
return a
}
max := func(nums ...int) int {
m := 0
for _, n := range nums {
if n > m {
m = n
}
}
return m
}
// Interestingly, the RGBA method returns components in the range [0, 0xFFFF] corresponding
// to the 8-bit values multiplied by 0x101 (see https://blog.golang.org/image). Therefore,
// we must shift them to the right by 8 so that they are in the more typical [0, 255] range.
return max(abs(int(aR>>8)-int(bR>>8)),
abs(int(aG>>8)-int(bG>>8)),
abs(int(aB>>8)-int(bB>>8)),
abs(int(aA>>8)-int(bA>>8)))
}
// ColorSamplingPointsForStillColorsVideo returns a map of points that are considered to be
// interesting in the rendering of the still-colors-*.mp4 test videos. The key in the map is
// a name for the corresponding point. There are two categories of points:
//
// - Outer corners: the four absolute corners of the video offset by 1 to ignore acceptable
// color blending artifacts on the edges. However, the outer bottom-right is not offset
// because we never expect blending artifacts there.
//
// - Inner corners: 4 stencils (one for each corner of the video). Each stencil is composed
// of 4 sampling points arranged as a square. The expectation is that for each stencil, 3
// of its points fall on the interior border of the test video while the remaining point
// falls inside one of the color rectangles. This helps us detect undesired
// stretching/shifting/rotation/mirroring. The naming convention for each point of a
// stencil is as follows:
//
// inner_Y_X_00: the corner of the stencil closest to the Y-X corner of the video.
// inner_Y_X_01: the corner of the stencil that's in the interior X border of the video.
// inner_Y_X_10: the corner of the stencil that's in the interior Y border of the video.
// inner_Y_X_11: the only corner of the stencil that's not on the border strip.
//
// For example, the top-right corner of the test video looks like this:
//
// MMMMMMMMMMMMMMMM
// MMMMMMMMMM2MMM0M
// MMMMMMMMMMMMMMMM
// 3 M1M
// MMM
//
// Where 'M' is the magenta interior border. So the names of each of the points 0, 1, 2, 3
// are:
//
// 0: inner_top_right_00
// 1: inner_top_right_01
// 2: inner_top_right_10
// 3: inner_top_right_11
func ColorSamplingPointsForStillColorsVideo(videoW, videoH int) map[string]image.Point {
outerCorners := map[string]image.Point{
"outer_top_left": {1, 1},
"outer_top_right": {(videoW - 1) - 1, 1},
"outer_bottom_right": {videoW - 1, videoH - 1},
"outer_bottom_left": {1, (videoH - 1) - 1},
}
edgeOffset := 5
stencilW := 5
innerCorners := map[string]image.Point{
"inner_top_left_00": {edgeOffset, edgeOffset},
"inner_top_left_01": {edgeOffset, edgeOffset + stencilW},
"inner_top_left_10": {edgeOffset + stencilW, edgeOffset},
"inner_top_left_11": {edgeOffset + stencilW, edgeOffset + stencilW},
"inner_top_right_00": {(videoW - 1) - edgeOffset, edgeOffset},
"inner_top_right_01": {(videoW - 1) - edgeOffset, edgeOffset + stencilW},
"inner_top_right_10": {(videoW - 1) - edgeOffset - stencilW, edgeOffset},
"inner_top_right_11": {(videoW - 1) - edgeOffset - stencilW, edgeOffset + stencilW},
"inner_bottom_right_00": {(videoW - 1) - edgeOffset, (videoH - 1) - edgeOffset},
"inner_bottom_right_01": {(videoW - 1) - edgeOffset, (videoH - 1) - edgeOffset - stencilW},
"inner_bottom_right_10": {(videoW - 1) - edgeOffset - stencilW, (videoH - 1) - edgeOffset},
"inner_bottom_right_11": {(videoW - 1) - edgeOffset - stencilW, (videoH - 1) - edgeOffset - stencilW},
"inner_bottom_left_00": {edgeOffset, (videoH - 1) - edgeOffset},
"inner_bottom_left_01": {edgeOffset, (videoH - 1) - edgeOffset - stencilW},
"inner_bottom_left_10": {edgeOffset + stencilW, (videoH - 1) - edgeOffset},
"inner_bottom_left_11": {edgeOffset + stencilW, (videoH - 1) - edgeOffset - stencilW},
}
samples := map[string]image.Point{}
for k, v := range innerCorners {
samples[k] = v
}
for k, v := range outerCorners {
samples[k] = v
}
return samples
}
// isVideoPadding returns true iff c corresponds to the expected color of the padding that a
// video gets when in full screen so that it appears centered. This color is black within a
// certain tolerance.
func isVideoPadding(c color.Color) bool {
black := color.RGBA{0, 0, 0, 255}
// The tolerance was picked empirically. For example, on kukui, the first padding row below
// the video has a color of (20, 1, 22, 255).
tolerance := 25
return ColorDistance(c, black) < tolerance
}
// TestPlay checks that the video file named filename can be played using Chrome.
// videotype represents a type of a given video. If it is MSEVideo, filename is a name
// of MPD file.
// If mode is VerifyHWAcceleratorUsed, this function also checks if hardware accelerator was used.
func TestPlay(ctx context.Context, s *testing.State, cs ash.ConnSource, cr *chrome.Chrome,
filename string, videotype VideoType, mode VerifyHWAcceleratorMode, unmutePlayer bool) error {
if unmutePlayer && videotype != NormalVideo {
return errors.New("got unmutePlayer = true, expected false: unmutePlayer " +
"is only implemented for videoType = NormalVideo")
}
vl, err := logging.NewVideoLogger()
if err != nil {
return err
}
defer vl.Close()
if err := crastestclient.Mute(ctx); err != nil {
return err
}
defer crastestclient.Unmute(ctx)
server := httptest.NewServer(http.FileServer(s.DataFileSystem()))
defer server.Close()
var playErr error
var url string
usesPlatformVideoDecoder, isHwDrmPipeline := false, false
switch videotype {
case NormalVideo:
url = server.URL + "/video.html"
usesPlatformVideoDecoder, playErr = playVideo(ctx, cs, filename, url, unmutePlayer)
case MSEVideo:
url = server.URL + "/shaka.html"
usesPlatformVideoDecoder, playErr = playMSEVideo(ctx, cs, filename, url)
case DRMVideo:
url = server.URL + "/shaka_drm.html"
isHwDrmPipeline, playErr = playDRMVideo(ctx, s, cs, cr, filename, url)
}
if playErr != nil {
return errors.Wrapf(err, "failed to play %v (%v): %v", filename, url, playErr)
}
if mode == NoVerifyHWAcceleratorUsed {
// Early return when no verification is needed.
return nil
}
if mode == VerifyHWAcceleratorUsed && !usesPlatformVideoDecoder |
if mode == VerifyNoHWAcceleratorUsed && usesPlatformVideoDecoder {
return errors.New("software decoding was not used when it was expected to")
}
if mode == VerifyHWDRMUsed && !isHwDrmPipeline {
return errors.New("HW DRM video pipeline was not used when it was expected to")
}
return nil
}
// TestSeek checks that the video file named filename can be seeked around.
// It will play the video and seek randomly into it numSeeks times.
func TestSeek(ctx context.Context, httpHandler http.Handler, cs ash.ConnSource, filename, outDir string, numSeeks int) error {
vl, err := logging.NewVideoLogger()
if err != nil {
return err
}
defer vl.Close()
server := httptest.NewServer(httpHandler)
defer server.Close()
if err := playSeekVideo(ctx, cs, filename, server.URL, outDir, numSeeks); err != nil {
return errors.Wrapf(err, "failed to play %v (%v): %v", filename, server.URL, err)
}
return nil
}
// TestPlayAndScreenshot plays the filename video, switches it to full
// screen mode, takes a screenshot and analyzes the resulting image to
// sample the colors of a few interesting points and compare them against
// expectations. The expectations are defined by refFilename which is a
// PNG file corresponding to the ideally rendered video frame in the absence
// of scaling or artifacts.
//
// Caveat: this test does not disable night light. Night light doesn't
// seem to affect the output of the screenshot tool, but this might
// not hold in the future in case we decide to apply night light at
// compositing time if the hardware does not support the color
// transform matrix.
func TestPlayAndScreenshot(ctx context.Context, s *testing.State, tconn *chrome.TestConn, cs ash.ConnSource, filename, refFilename string) error {
server := httptest.NewServer(http.FileServer(s.DataFileSystem()))
defer server.Close()
url := path.Join(server.URL, "video.html")
conn, err := cs.NewConn(ctx, url)
if err != nil {
return errors.Wrapf(err, "failed to open %v", url)
}
defer conn.Close()
// For consistency across test runs, ensure that the device is in landscape-primary orientation.
if err = graphics.RotateDisplayToLandscapePrimary(ctx, tconn); err != nil {
return errors.Wrap(err, "failed to set display to landscape-primary orientation")
}
// Make the video go to full screen mode by pressing 'f': requestFullScreen() needs a user gesture.
ew, err := input.Keyboard(ctx)
if err != nil {
return errors.Wrap(err, "failed to initialize the keyboard writer")
}
if err := ew.Type(ctx, "f"); err != nil {
return errors.Wrap(err, "failed to inject the 'f' key")
}
// Start playing the video indefinitely.
if err := conn.Call(ctx, nil, "playRepeatedly", filename); err != nil {
return errors.Wrapf(err, "failed to play %v", filename)
}
// TODO(andrescj): this sleep is here to wait prior to taking the screenshot to make sure the video
// is on the screen and to let the "Press Esc to exit full screen" message disappear. This is to
// make sure the video is the only thing on the screen and thus minimize the excuses Chrome would
// have to not promote it to a HW overlay. Poll instead for two conditions:
// 1) The screenshot is correct (i.e., do the checks below), and
// 2) There is a HW overlay.
if err := testing.Sleep(ctx, delayToScreenshot); err != nil {
return errors.Wrap(err, "failed to sleep prior to taking screenshot")
}
sshotPath := filepath.Join(s.OutDir(), "screenshot.png")
if err := screenshot.Capture(ctx, sshotPath); err != nil {
return errors.Wrap(err, "failed to capture screen")
}
// Decode the screenshot and rotate it if necessary to make later steps easier.
f, err := os.Open(sshotPath)
if err != nil {
return errors.Wrapf(err, "failed to open %v", sshotPath)
}
img, _, err := image.Decode(f)
// Close the file now because we might open it for writing again later.
if err := f.Close(); err != nil {
return errors.Wrapf(err, "failed to close %v", sshotPath)
}
if err != nil {
return errors.Wrapf(err, "could not decode %v", sshotPath)
}
if img.Bounds().Dx() < img.Bounds().Dy() {
s.Log("The screenshot is in portrait orientation; rotating it")
rotImg := image.NewRGBA(image.Rectangle{image.Point{}, image.Point{img.Bounds().Max.Y, img.Bounds().Max.X}})
for dstY := 0; dstY < rotImg.Bounds().Dy(); dstY++ {
for dstX := 0; dstX < rotImg.Bounds().Dx(); dstX++ {
srcColor := img.At(dstY, img.Bounds().Dy()-1-dstX)
rotImg.Set(dstX, dstY, srcColor)
}
}
f, err := os.Create(sshotPath)
if err != nil {
return errors.Wrapf(err, "could not create the rotated screenshot (%v)", sshotPath)
}
defer f.Close()
if err := png.Encode(f, rotImg); err != nil {
return errors.Wrapf(err, "could not encode the rotated screenshot (%v)", sshotPath)
}
img = rotImg
}
// Find the bounds of the video by excluding the black strips on each side.
xMiddle := img.Bounds().Dx() / 2
yMiddle := img.Bounds().Dy() / 2
top := 0
for ; top < img.Bounds().Dy(); top++ {
if !isVideoPadding(img.At(xMiddle, top)) {
break
}
}
bottom := img.Bounds().Dy() - 1
for ; bottom >= 0; bottom-- {
if !isVideoPadding(img.At(xMiddle, bottom)) {
break
}
}
if bottom <= top {
return errors.New("could not find the top or bottom boundary of the video")
}
left := 0
for ; left < img.Bounds().Dx(); left++ {
if !isVideoPadding(img.At(left, yMiddle)) {
break
}
}
right := img.Bounds().Dx() - 1
for ; right >= 0; right-- {
if !isVideoPadding(img.At(right, yMiddle)) {
break
}
}
if right <= left {
return errors.New("could not find the left or right boundary of the video")
}
s.Logf("Video bounds: (left, top) = (%d, %d); (right, bottom) = (%d, %d)",
left, top, right, bottom)
// Open the reference file to assert expectations on the screenshot later.
refPath := s.DataPath(refFilename)
f, err = os.Open(refPath)
if err != nil {
return errors.Wrapf(err, "failed to open %v", refPath)
}
defer f.Close()
refImg, _, err := image.Decode(f)
if err != nil {
return errors.Wrapf(err, "could not decode %v", refPath)
}
videoW := refImg.Bounds().Dx()
videoH := refImg.Bounds().Dy()
// Measurement 1:
// We'll sample a few interesting pixels and report the color distance with
// respect to the reference image.
samples := ColorSamplingPointsForStillColorsVideo(videoW, videoH)
p := perf.NewValues()
maxDistance := -1
maxDistancePoint := ""
for k, v := range samples {
// First convert the coordinates from video space to screenshot space.
videoX := v.X
videoY := v.Y
screenX := left + (right-left)*v.X/(videoW-1)
screenY := top + (bottom-top)*v.Y/(videoH-1)
// Then report the distance between the expected and actual colors at this location.
expectedColor := refImg.At(videoX, videoY)
actualColor := img.At(screenX, screenY)
distance := ColorDistance(expectedColor, actualColor)
if distance > maxDistance {
maxDistance = distance
maxDistancePoint = k
}
if distance != 0 {
s.Logf("At %v (video space = (%d, %d), screen space = (%d, %d)): expected RGBA = %v; got RGBA = %v; distance = %d",
k, videoX, videoY, screenX, screenY, expectedColor, actualColor, distance)
}
p.Set(perf.Metric{
Name: k,
Unit: "None",
Direction: perf.SmallerIsBetter,
}, float64(distance))
}
// The distance threshold was decided by analyzing the data reported above
// across many devices. It should ideally be smaller, but for now, it seems we
// have color space handling issues. Nonetheless, this threshold should be
// enough for detecting major video rendering issues. Note that:
//
// 1) We still report the distances as perf values so we can continue to
// analyze and improve.
// 2) We don't bother to report a total distance if this threshold is exceeded
// because it would just make email alerts very noisy.
if maxDistance > 100 {
p.Save(s.OutDir())
return errors.Errorf("the color distance for %v = %d exceeds the threshold (100)", maxDistancePoint, maxDistance)
}
// Measurement 2:
// We report an aggregate distance for the image: we go through all the pixels
// in the screenshot video to add up all the distances and then normalize by
// the number of pixels at the end.
totalDistance := 0.0
for row := top; row <= bottom; row++ {
for col := left; col <= right; col++ {
// First convert the coordinates from screenshot space to video space.
videoX := (col - left) * (videoW - 1) / (right - left)
videoY := (row - top) * (videoH - 1) / (bottom - top)
expectedColor := refImg.At(videoX, videoY)
actualColor := img.At(col, row)
totalDistance += float64(ColorDistance(expectedColor, actualColor))
}
}
totalDistance /= float64((right - left + 1) * (bottom - top + 1))
s.Log("The total distance for the entire image is ", totalDistance)
p.Set(perf.Metric{
Name: "total_distance",
Unit: "None",
Direction: perf.SmallerIsBetter,
}, totalDistance)
p.Save(s.OutDir())
return nil
}
| {
return errors.New("video decode acceleration was not used when it was expected to")
} | conditional_block |
play.go | // Copyright 2018 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package play provides common code for playing videos on Chrome.
package play
import (
"context"
"image"
"image/color"
"image/png"
"math"
"net/http"
"net/http/httptest"
"os"
"path"
"path/filepath"
"time"
"chromiumos/tast/common/perf"
"chromiumos/tast/errors"
"chromiumos/tast/local/audio/crastestclient"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/ash"
"chromiumos/tast/local/colorcmp"
"chromiumos/tast/local/graphics"
"chromiumos/tast/local/input"
"chromiumos/tast/local/media/devtools"
"chromiumos/tast/local/media/logging"
"chromiumos/tast/local/screenshot"
"chromiumos/tast/testing"
"chromiumos/tast/timing"
)
// VideoType represents a type of video played in TestPlay.
type VideoType int
const (
// NormalVideo represents a normal video. (i.e. non-MSE video.)
NormalVideo VideoType = iota
// MSEVideo represents a video requiring Media Source Extensions (MSE).
MSEVideo
// DRMVideo represents a video requiring Digital Rights Management (DRM).
DRMVideo
)
// VerifyHWAcceleratorMode represents a mode of TestPlay.
type VerifyHWAcceleratorMode int
const (
// NoVerifyHWAcceleratorUsed is a mode that plays a video without verifying
// hardware accelerator usage.
NoVerifyHWAcceleratorUsed VerifyHWAcceleratorMode = iota
// VerifyHWAcceleratorUsed is a mode that verifies a video is played using a
// hardware accelerator.
VerifyHWAcceleratorUsed
// VerifyNoHWAcceleratorUsed is a mode that verifies a video is not played
// using a hardware accelerator, i.e. it's using software decoding.
VerifyNoHWAcceleratorUsed
// VerifyHWDRMUsed is a mode that verifies a video is played using a hardware
// accelerator with HW DRM protection.
VerifyHWDRMUsed
)
// This is how long we need to wait before taking a screenshot in the
// TestPlayAndScreenshot case. This is necessary to ensure the video is on the screen
// and to let the "Press Esc to exit full screen" message disappear.
const delayToScreenshot = 7 * time.Second
// MSEDataFiles returns a list of required files for tests that play MSE videos.
func MSEDataFiles() []string {
return []string{
"shaka.html",
"third_party/shaka-player/shaka-player.compiled.debug.js",
"third_party/shaka-player/shaka-player.compiled.debug.map",
}
}
// DRMDataFiles returns a list of required files for tests that play DRM videos.
func DRMDataFiles() []string {
return []string{
"shaka_drm.html",
"third_party/shaka-player/shaka-player.compiled.debug.js",
"third_party/shaka-player/shaka-player.compiled.debug.map",
}
}
// loadPage opens a new tab to load the specified webpage.
// Note that if err != nil, conn is nil.
func loadPage(ctx context.Context, cs ash.ConnSource, url string) (*chrome.Conn, error) {
ctx, st := timing.Start(ctx, "load_page")
defer st.End()
conn, err := cs.NewConn(ctx, url)
if err != nil {
return nil, errors.Wrapf(err, "failed to open %v", url)
}
return conn, err
}
// playVideo invokes loadVideo(), plays a normal video in video.html, and checks if it has progress.
// videoFile is the file name which is played there.
// url is the URL of the video playback testing webpage.
func playVideo(ctx context.Context, cs ash.ConnSource, videoFile, url string, unmutePlayer bool) (bool, error) {
ctx, st := timing.Start(ctx, "play_video")
defer st.End()
conn, err := loadPage(ctx, cs, url)
if err != nil {
return false, err
}
defer conn.Close()
defer conn.CloseTarget(ctx)
observer, err := conn.GetMediaPropertiesChangedObserver(ctx)
if err != nil {
return false, errors.Wrap(err, "failed to retrieve a media DevTools observer")
}
if err := conn.Call(ctx, nil, "playUntilEnd", videoFile, unmutePlayer); err != nil {
return false, err
}
isPlatform, _, err := devtools.GetVideoDecoder(ctx, observer, url)
return isPlatform, err
}
// playMSEVideo plays an MSE video stream via Shaka player, and checks its play progress.
// mpdFile is the name of MPD file for the video stream.
// url is the URL of the shaka player webpage.
func playMSEVideo(ctx context.Context, cs ash.ConnSource, mpdFile, url string) (bool, error) {
ctx, st := timing.Start(ctx, "play_mse_video")
defer st.End()
conn, err := loadPage(ctx, cs, url)
if err != nil {
return false, err
}
defer conn.Close()
defer conn.CloseTarget(ctx)
observer, err := conn.GetMediaPropertiesChangedObserver(ctx)
if err != nil {
return false, errors.Wrap(err, "failed to retrieve a media DevTools observer")
}
if err := conn.Call(ctx, nil, "play_shaka", mpdFile); err != nil {
return false, err
}
isPlatform, _, err := devtools.GetVideoDecoder(ctx, observer, url)
return isPlatform, err
}
// playDRMVideo plays a DRM-protected MSE video stream via Shaka player, and
// checks its play progress. After it's done, it goes full screen and takes a
// screenshot and verifies the contents are all black.
// mpdFile is the name of MPD file for the video stream.cs ash.ConnSource,
// url is the URL of the shaka player webpage.
func playDRMVideo(ctx context.Context, s *testing.State, cs ash.ConnSource, cr *chrome.Chrome, mpdFile, url string) (bool, error) {
ctx, st := timing.Start(ctx, "play_drm_video")
defer st.End()
conn, err := loadPage(ctx, cs, url)
if err != nil {
return false, err
}
defer conn.Close()
defer conn.CloseTarget(ctx)
observer, err := conn.GetMediaPropertiesChangedObserver(ctx)
if err != nil {
return false, errors.Wrap(err, "failed to retrieve a media DevTools observer")
}
if err := conn.Call(ctx, nil, "play_shaka_drm", mpdFile); err != nil {
return false, err
}
// Now go full screen, take a screenshot and verify it's all black.
// Make the video go to full screen mode by pressing 'f': requestFullScreen() needs a user gesture.
ew, err := input.Keyboard(ctx)
if err != nil {
return false, errors.Wrap(err, "failed to initialize the keyboard writer")
}
defer ew.Close()
if err := ew.Type(ctx, "f"); err != nil {
return false, errors.Wrap(err, "failed to inject the 'f' key")
}
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
return false, errors.Wrap(err, "failed to connect to test API")
}
if err := ash.WaitForFullScreen(ctx, tconn); err != nil {
return false, errors.Wrap(err, "failed waiting for full screen")
}
// Take the screenshot, we don't need to wait because we are only verifying
// that the vast majority is black, so things like 'hit Esc to exist full screen'
// won't be an issue.
im, err := screenshot.GrabScreenshot(ctx, cr)
if err != nil {
return false, errors.Wrap(err, "failed taking screenshot")
}
// Verify that over 92% of the image is solid black. This is true because for
// HW DRM, you cannot actually screenshot the video and it will be replaced by
// solid black in the compositor. From testing, we have seen this be as low as
// 0.94, so set the threshold at 0.92.
color, ratio := colorcmp.DominantColor(im)
if ratio < 0.92 || !colorcmp.ColorsMatch(color, colorcmp.RGB(0, 0, 0), 1) {
return false, errors.Errorf("screenshot did not have solid black, instead got %v at ratio %0.2f",
colorcmp.ColorStr(color), ratio)
}
return devtools.CheckHWDRMPipeline(ctx, observer, url)
}
// seekVideoRepeatedly seeks video numSeeks times, saving some performance
// metrics (elapsed time, number of completed seeks) in outDir.
func seekVideoRepeatedly(ctx context.Context, conn *chrome.Conn, outDir string, numSeeks int) error {
ctx, st := timing.Start(ctx, "seek_video_repeatedly")
defer st.End()
p := perf.NewValues()
startTime := time.Now()
prevSeekCount := 0
seekCount := 0
for i := 0; i < numSeeks; i++ {
if err := conn.Call(ctx, &seekCount, "randomSeek"); err != nil {
// If the test times out, Call() might be interrupted and return
// zero seekCount, in that case used the last known good amount.
if seekCount == 0 {
seekCount = prevSeekCount
}
return errors.Wrapf(err, "error while seeking, completed %d/%d seeks", seekCount, numSeeks)
}
if seekCount == numSeeks {
break
}
prevSeekCount = seekCount
}
elapsed := time.Since(startTime).Seconds()
completedSeeks := math.Min(float64(seekCount+1), float64(numSeeks))
p.Set(perf.Metric{
Name: "average_seek_time",
Unit: "s",
Direction: perf.SmallerIsBetter,
}, elapsed/completedSeeks)
p.Set(perf.Metric{
Name: "completed_seeks",
Unit: "percent",
Direction: perf.BiggerIsBetter,
}, float64(100.0*completedSeeks/float64(numSeeks)))
testing.ContextLog(ctx, p)
p.Save(outDir)
return nil
}
// playSeekVideo invokes loadVideo() then plays the video referenced by videoFile
// while repeatedly and randomly seeking into it numSeeks. It returns an error if
// seeking did not succeed for some reason.
// videoFile is the file name which is played and seeked there.
// baseURL is the base URL which serves video playback testing webpage.
func playSeekVideo(ctx context.Context, cs ash.ConnSource, videoFile, baseURL, outDir string, numSeeks int) error |
// ColorDistance returns the maximum absolute difference between each component of a and b.
// Both a and b are assumed to be RGBA colors.
func ColorDistance(a, b color.Color) int {
aR, aG, aB, aA := a.RGBA()
bR, bG, bB, bA := b.RGBA()
abs := func(a int) int {
if a < 0 {
return -a
}
return a
}
max := func(nums ...int) int {
m := 0
for _, n := range nums {
if n > m {
m = n
}
}
return m
}
// Interestingly, the RGBA method returns components in the range [0, 0xFFFF] corresponding
// to the 8-bit values multiplied by 0x101 (see https://blog.golang.org/image). Therefore,
// we must shift them to the right by 8 so that they are in the more typical [0, 255] range.
return max(abs(int(aR>>8)-int(bR>>8)),
abs(int(aG>>8)-int(bG>>8)),
abs(int(aB>>8)-int(bB>>8)),
abs(int(aA>>8)-int(bA>>8)))
}
// ColorSamplingPointsForStillColorsVideo returns a map of points that are considered to be
// interesting in the rendering of the still-colors-*.mp4 test videos. The key in the map is
// a name for the corresponding point. There are two categories of points:
//
// - Outer corners: the four absolute corners of the video offset by 1 to ignore acceptable
// color blending artifacts on the edges. However, the outer bottom-right is not offset
// because we never expect blending artifacts there.
//
// - Inner corners: 4 stencils (one for each corner of the video). Each stencil is composed
// of 4 sampling points arranged as a square. The expectation is that for each stencil, 3
// of its points fall on the interior border of the test video while the remaining point
// falls inside one of the color rectangles. This helps us detect undesired
// stretching/shifting/rotation/mirroring. The naming convention for each point of a
// stencil is as follows:
//
// inner_Y_X_00: the corner of the stencil closest to the Y-X corner of the video.
// inner_Y_X_01: the corner of the stencil that's in the interior X border of the video.
// inner_Y_X_10: the corner of the stencil that's in the interior Y border of the video.
// inner_Y_X_11: the only corner of the stencil that's not on the border strip.
//
// For example, the top-right corner of the test video looks like this:
//
// MMMMMMMMMMMMMMMM
// MMMMMMMMMM2MMM0M
// MMMMMMMMMMMMMMMM
// 3 M1M
// MMM
//
// Where 'M' is the magenta interior border. So the names of each of the points 0, 1, 2, 3
// are:
//
// 0: inner_top_right_00
// 1: inner_top_right_01
// 2: inner_top_right_10
// 3: inner_top_right_11
func ColorSamplingPointsForStillColorsVideo(videoW, videoH int) map[string]image.Point {
outerCorners := map[string]image.Point{
"outer_top_left": {1, 1},
"outer_top_right": {(videoW - 1) - 1, 1},
"outer_bottom_right": {videoW - 1, videoH - 1},
"outer_bottom_left": {1, (videoH - 1) - 1},
}
edgeOffset := 5
stencilW := 5
innerCorners := map[string]image.Point{
"inner_top_left_00": {edgeOffset, edgeOffset},
"inner_top_left_01": {edgeOffset, edgeOffset + stencilW},
"inner_top_left_10": {edgeOffset + stencilW, edgeOffset},
"inner_top_left_11": {edgeOffset + stencilW, edgeOffset + stencilW},
"inner_top_right_00": {(videoW - 1) - edgeOffset, edgeOffset},
"inner_top_right_01": {(videoW - 1) - edgeOffset, edgeOffset + stencilW},
"inner_top_right_10": {(videoW - 1) - edgeOffset - stencilW, edgeOffset},
"inner_top_right_11": {(videoW - 1) - edgeOffset - stencilW, edgeOffset + stencilW},
"inner_bottom_right_00": {(videoW - 1) - edgeOffset, (videoH - 1) - edgeOffset},
"inner_bottom_right_01": {(videoW - 1) - edgeOffset, (videoH - 1) - edgeOffset - stencilW},
"inner_bottom_right_10": {(videoW - 1) - edgeOffset - stencilW, (videoH - 1) - edgeOffset},
"inner_bottom_right_11": {(videoW - 1) - edgeOffset - stencilW, (videoH - 1) - edgeOffset - stencilW},
"inner_bottom_left_00": {edgeOffset, (videoH - 1) - edgeOffset},
"inner_bottom_left_01": {edgeOffset, (videoH - 1) - edgeOffset - stencilW},
"inner_bottom_left_10": {edgeOffset + stencilW, (videoH - 1) - edgeOffset},
"inner_bottom_left_11": {edgeOffset + stencilW, (videoH - 1) - edgeOffset - stencilW},
}
samples := map[string]image.Point{}
for k, v := range innerCorners {
samples[k] = v
}
for k, v := range outerCorners {
samples[k] = v
}
return samples
}
// isVideoPadding returns true iff c corresponds to the expected color of the padding that a
// video gets when in full screen so that it appears centered. This color is black within a
// certain tolerance.
func isVideoPadding(c color.Color) bool {
black := color.RGBA{0, 0, 0, 255}
// The tolerance was picked empirically. For example, on kukui, the first padding row below
// the video has a color of (20, 1, 22, 255).
tolerance := 25
return ColorDistance(c, black) < tolerance
}
// TestPlay checks that the video file named filename can be played using Chrome.
// videotype represents a type of a given video. If it is MSEVideo, filename is a name
// of MPD file.
// If mode is VerifyHWAcceleratorUsed, this function also checks if hardware accelerator was used.
func TestPlay(ctx context.Context, s *testing.State, cs ash.ConnSource, cr *chrome.Chrome,
filename string, videotype VideoType, mode VerifyHWAcceleratorMode, unmutePlayer bool) error {
if unmutePlayer && videotype != NormalVideo {
return errors.New("got unmutePlayer = true, expected false: unmutePlayer " +
"is only implemented for videoType = NormalVideo")
}
vl, err := logging.NewVideoLogger()
if err != nil {
return err
}
defer vl.Close()
if err := crastestclient.Mute(ctx); err != nil {
return err
}
defer crastestclient.Unmute(ctx)
server := httptest.NewServer(http.FileServer(s.DataFileSystem()))
defer server.Close()
var playErr error
var url string
usesPlatformVideoDecoder, isHwDrmPipeline := false, false
switch videotype {
case NormalVideo:
url = server.URL + "/video.html"
usesPlatformVideoDecoder, playErr = playVideo(ctx, cs, filename, url, unmutePlayer)
case MSEVideo:
url = server.URL + "/shaka.html"
usesPlatformVideoDecoder, playErr = playMSEVideo(ctx, cs, filename, url)
case DRMVideo:
url = server.URL + "/shaka_drm.html"
isHwDrmPipeline, playErr = playDRMVideo(ctx, s, cs, cr, filename, url)
}
if playErr != nil {
return errors.Wrapf(err, "failed to play %v (%v): %v", filename, url, playErr)
}
if mode == NoVerifyHWAcceleratorUsed {
// Early return when no verification is needed.
return nil
}
if mode == VerifyHWAcceleratorUsed && !usesPlatformVideoDecoder {
return errors.New("video decode acceleration was not used when it was expected to")
}
if mode == VerifyNoHWAcceleratorUsed && usesPlatformVideoDecoder {
return errors.New("software decoding was not used when it was expected to")
}
if mode == VerifyHWDRMUsed && !isHwDrmPipeline {
return errors.New("HW DRM video pipeline was not used when it was expected to")
}
return nil
}
// TestSeek checks that the video file named filename can be seeked around.
// It will play the video and seek randomly into it numSeeks times.
func TestSeek(ctx context.Context, httpHandler http.Handler, cs ash.ConnSource, filename, outDir string, numSeeks int) error {
vl, err := logging.NewVideoLogger()
if err != nil {
return err
}
defer vl.Close()
server := httptest.NewServer(httpHandler)
defer server.Close()
if err := playSeekVideo(ctx, cs, filename, server.URL, outDir, numSeeks); err != nil {
return errors.Wrapf(err, "failed to play %v (%v): %v", filename, server.URL, err)
}
return nil
}
// TestPlayAndScreenshot plays the filename video, switches it to full
// screen mode, takes a screenshot and analyzes the resulting image to
// sample the colors of a few interesting points and compare them against
// expectations. The expectations are defined by refFilename which is a
// PNG file corresponding to the ideally rendered video frame in the absence
// of scaling or artifacts.
//
// Caveat: this test does not disable night light. Night light doesn't
// seem to affect the output of the screenshot tool, but this might
// not hold in the future in case we decide to apply night light at
// compositing time if the hardware does not support the color
// transform matrix.
func TestPlayAndScreenshot(ctx context.Context, s *testing.State, tconn *chrome.TestConn, cs ash.ConnSource, filename, refFilename string) error {
server := httptest.NewServer(http.FileServer(s.DataFileSystem()))
defer server.Close()
url := path.Join(server.URL, "video.html")
conn, err := cs.NewConn(ctx, url)
if err != nil {
return errors.Wrapf(err, "failed to open %v", url)
}
defer conn.Close()
// For consistency across test runs, ensure that the device is in landscape-primary orientation.
if err = graphics.RotateDisplayToLandscapePrimary(ctx, tconn); err != nil {
return errors.Wrap(err, "failed to set display to landscape-primary orientation")
}
// Make the video go to full screen mode by pressing 'f': requestFullScreen() needs a user gesture.
ew, err := input.Keyboard(ctx)
if err != nil {
return errors.Wrap(err, "failed to initialize the keyboard writer")
}
if err := ew.Type(ctx, "f"); err != nil {
return errors.Wrap(err, "failed to inject the 'f' key")
}
// Start playing the video indefinitely.
if err := conn.Call(ctx, nil, "playRepeatedly", filename); err != nil {
return errors.Wrapf(err, "failed to play %v", filename)
}
// TODO(andrescj): this sleep is here to wait prior to taking the screenshot to make sure the video
// is on the screen and to let the "Press Esc to exit full screen" message disappear. This is to
// make sure the video is the only thing on the screen and thus minimize the excuses Chrome would
// have to not promote it to a HW overlay. Poll instead for two conditions:
// 1) The screenshot is correct (i.e., do the checks below), and
// 2) There is a HW overlay.
if err := testing.Sleep(ctx, delayToScreenshot); err != nil {
return errors.Wrap(err, "failed to sleep prior to taking screenshot")
}
sshotPath := filepath.Join(s.OutDir(), "screenshot.png")
if err := screenshot.Capture(ctx, sshotPath); err != nil {
return errors.Wrap(err, "failed to capture screen")
}
// Decode the screenshot and rotate it if necessary to make later steps easier.
f, err := os.Open(sshotPath)
if err != nil {
return errors.Wrapf(err, "failed to open %v", sshotPath)
}
img, _, err := image.Decode(f)
// Close the file now because we might open it for writing again later.
if err := f.Close(); err != nil {
return errors.Wrapf(err, "failed to close %v", sshotPath)
}
if err != nil {
return errors.Wrapf(err, "could not decode %v", sshotPath)
}
if img.Bounds().Dx() < img.Bounds().Dy() {
s.Log("The screenshot is in portrait orientation; rotating it")
rotImg := image.NewRGBA(image.Rectangle{image.Point{}, image.Point{img.Bounds().Max.Y, img.Bounds().Max.X}})
for dstY := 0; dstY < rotImg.Bounds().Dy(); dstY++ {
for dstX := 0; dstX < rotImg.Bounds().Dx(); dstX++ {
srcColor := img.At(dstY, img.Bounds().Dy()-1-dstX)
rotImg.Set(dstX, dstY, srcColor)
}
}
f, err := os.Create(sshotPath)
if err != nil {
return errors.Wrapf(err, "could not create the rotated screenshot (%v)", sshotPath)
}
defer f.Close()
if err := png.Encode(f, rotImg); err != nil {
return errors.Wrapf(err, "could not encode the rotated screenshot (%v)", sshotPath)
}
img = rotImg
}
// Find the bounds of the video by excluding the black strips on each side.
xMiddle := img.Bounds().Dx() / 2
yMiddle := img.Bounds().Dy() / 2
top := 0
for ; top < img.Bounds().Dy(); top++ {
if !isVideoPadding(img.At(xMiddle, top)) {
break
}
}
bottom := img.Bounds().Dy() - 1
for ; bottom >= 0; bottom-- {
if !isVideoPadding(img.At(xMiddle, bottom)) {
break
}
}
if bottom <= top {
return errors.New("could not find the top or bottom boundary of the video")
}
left := 0
for ; left < img.Bounds().Dx(); left++ {
if !isVideoPadding(img.At(left, yMiddle)) {
break
}
}
right := img.Bounds().Dx() - 1
for ; right >= 0; right-- {
if !isVideoPadding(img.At(right, yMiddle)) {
break
}
}
if right <= left {
return errors.New("could not find the left or right boundary of the video")
}
s.Logf("Video bounds: (left, top) = (%d, %d); (right, bottom) = (%d, %d)",
left, top, right, bottom)
// Open the reference file to assert expectations on the screenshot later.
refPath := s.DataPath(refFilename)
f, err = os.Open(refPath)
if err != nil {
return errors.Wrapf(err, "failed to open %v", refPath)
}
defer f.Close()
refImg, _, err := image.Decode(f)
if err != nil {
return errors.Wrapf(err, "could not decode %v", refPath)
}
videoW := refImg.Bounds().Dx()
videoH := refImg.Bounds().Dy()
// Measurement 1:
// We'll sample a few interesting pixels and report the color distance with
// respect to the reference image.
samples := ColorSamplingPointsForStillColorsVideo(videoW, videoH)
p := perf.NewValues()
maxDistance := -1
maxDistancePoint := ""
for k, v := range samples {
// First convert the coordinates from video space to screenshot space.
videoX := v.X
videoY := v.Y
screenX := left + (right-left)*v.X/(videoW-1)
screenY := top + (bottom-top)*v.Y/(videoH-1)
// Then report the distance between the expected and actual colors at this location.
expectedColor := refImg.At(videoX, videoY)
actualColor := img.At(screenX, screenY)
distance := ColorDistance(expectedColor, actualColor)
if distance > maxDistance {
maxDistance = distance
maxDistancePoint = k
}
if distance != 0 {
s.Logf("At %v (video space = (%d, %d), screen space = (%d, %d)): expected RGBA = %v; got RGBA = %v; distance = %d",
k, videoX, videoY, screenX, screenY, expectedColor, actualColor, distance)
}
p.Set(perf.Metric{
Name: k,
Unit: "None",
Direction: perf.SmallerIsBetter,
}, float64(distance))
}
// The distance threshold was decided by analyzing the data reported above
// across many devices. It should ideally be smaller, but for now, it seems we
// have color space handling issues. Nonetheless, this threshold should be
// enough for detecting major video rendering issues. Note that:
//
// 1) We still report the distances as perf values so we can continue to
// analyze and improve.
// 2) We don't bother to report a total distance if this threshold is exceeded
// because it would just make email alerts very noisy.
if maxDistance > 100 {
p.Save(s.OutDir())
return errors.Errorf("the color distance for %v = %d exceeds the threshold (100)", maxDistancePoint, maxDistance)
}
// Measurement 2:
// We report an aggregate distance for the image: we go through all the pixels
// in the screenshot video to add up all the distances and then normalize by
// the number of pixels at the end.
totalDistance := 0.0
for row := top; row <= bottom; row++ {
for col := left; col <= right; col++ {
// First convert the coordinates from screenshot space to video space.
videoX := (col - left) * (videoW - 1) / (right - left)
videoY := (row - top) * (videoH - 1) / (bottom - top)
expectedColor := refImg.At(videoX, videoY)
actualColor := img.At(col, row)
totalDistance += float64(ColorDistance(expectedColor, actualColor))
}
}
totalDistance /= float64((right - left + 1) * (bottom - top + 1))
s.Log("The total distance for the entire image is ", totalDistance)
p.Set(perf.Metric{
Name: "total_distance",
Unit: "None",
Direction: perf.SmallerIsBetter,
}, totalDistance)
p.Save(s.OutDir())
return nil
}
| {
ctx, st := timing.Start(ctx, "play_seek_video")
defer st.End()
// Establish a connection to a video play page
conn, err := loadPage(ctx, cs, baseURL+"/video.html")
if err != nil {
return err
}
defer conn.Close()
defer conn.CloseTarget(ctx)
if err := conn.Call(ctx, nil, "playRepeatedly", videoFile); err != nil {
return err
}
// Wait until videoElement has advanced so that chrome:media-internals has
// time to fill in their fields.
if err := conn.WaitForExpr(ctx, "document.getElementsByTagName('video')[0].currentTime > 1"); err != nil {
return errors.Wrap(err, "failed waiting for video to advance playback")
}
if err := seekVideoRepeatedly(ctx, conn, outDir, numSeeks); err != nil {
return err
}
return nil
} | identifier_body |
play.go | // Copyright 2018 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package play provides common code for playing videos on Chrome.
package play
import (
"context"
"image"
"image/color"
"image/png"
"math"
"net/http"
"net/http/httptest"
"os"
"path"
"path/filepath"
"time"
"chromiumos/tast/common/perf"
"chromiumos/tast/errors"
"chromiumos/tast/local/audio/crastestclient"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/ash"
"chromiumos/tast/local/colorcmp"
"chromiumos/tast/local/graphics"
"chromiumos/tast/local/input"
"chromiumos/tast/local/media/devtools"
"chromiumos/tast/local/media/logging"
"chromiumos/tast/local/screenshot"
"chromiumos/tast/testing"
"chromiumos/tast/timing"
)
// VideoType represents a type of video played in TestPlay.
type VideoType int
const (
// NormalVideo represents a normal video. (i.e. non-MSE video.)
NormalVideo VideoType = iota
// MSEVideo represents a video requiring Media Source Extensions (MSE).
MSEVideo
// DRMVideo represents a video requiring Digital Rights Management (DRM).
DRMVideo
)
// VerifyHWAcceleratorMode represents a mode of TestPlay.
type VerifyHWAcceleratorMode int
const (
// NoVerifyHWAcceleratorUsed is a mode that plays a video without verifying
// hardware accelerator usage.
NoVerifyHWAcceleratorUsed VerifyHWAcceleratorMode = iota
// VerifyHWAcceleratorUsed is a mode that verifies a video is played using a
// hardware accelerator.
VerifyHWAcceleratorUsed
// VerifyNoHWAcceleratorUsed is a mode that verifies a video is not played
// using a hardware accelerator, i.e. it's using software decoding.
VerifyNoHWAcceleratorUsed
// VerifyHWDRMUsed is a mode that verifies a video is played using a hardware
// accelerator with HW DRM protection.
VerifyHWDRMUsed
)
// This is how long we need to wait before taking a screenshot in the
// TestPlayAndScreenshot case. This is necessary to ensure the video is on the screen
// and to let the "Press Esc to exit full screen" message disappear.
const delayToScreenshot = 7 * time.Second
// MSEDataFiles returns a list of required files for tests that play MSE videos.
func MSEDataFiles() []string {
return []string{
"shaka.html",
"third_party/shaka-player/shaka-player.compiled.debug.js",
"third_party/shaka-player/shaka-player.compiled.debug.map",
}
}
// DRMDataFiles returns a list of required files for tests that play DRM videos.
func DRMDataFiles() []string {
return []string{
"shaka_drm.html",
"third_party/shaka-player/shaka-player.compiled.debug.js",
"third_party/shaka-player/shaka-player.compiled.debug.map",
}
}
// loadPage opens a new tab to load the specified webpage.
// Note that if err != nil, conn is nil.
func loadPage(ctx context.Context, cs ash.ConnSource, url string) (*chrome.Conn, error) {
ctx, st := timing.Start(ctx, "load_page")
defer st.End()
conn, err := cs.NewConn(ctx, url)
if err != nil {
return nil, errors.Wrapf(err, "failed to open %v", url)
}
return conn, err
}
// playVideo invokes loadVideo(), plays a normal video in video.html, and checks if it has progress.
// videoFile is the file name which is played there.
// url is the URL of the video playback testing webpage.
func playVideo(ctx context.Context, cs ash.ConnSource, videoFile, url string, unmutePlayer bool) (bool, error) {
ctx, st := timing.Start(ctx, "play_video")
defer st.End()
conn, err := loadPage(ctx, cs, url)
if err != nil {
return false, err
}
defer conn.Close()
defer conn.CloseTarget(ctx)
observer, err := conn.GetMediaPropertiesChangedObserver(ctx)
if err != nil {
return false, errors.Wrap(err, "failed to retrieve a media DevTools observer")
}
if err := conn.Call(ctx, nil, "playUntilEnd", videoFile, unmutePlayer); err != nil {
return false, err
}
isPlatform, _, err := devtools.GetVideoDecoder(ctx, observer, url)
return isPlatform, err
}
// playMSEVideo plays an MSE video stream via Shaka player, and checks its play progress.
// mpdFile is the name of MPD file for the video stream.
// url is the URL of the shaka player webpage.
func playMSEVideo(ctx context.Context, cs ash.ConnSource, mpdFile, url string) (bool, error) {
ctx, st := timing.Start(ctx, "play_mse_video")
defer st.End()
conn, err := loadPage(ctx, cs, url)
if err != nil {
return false, err
}
defer conn.Close()
defer conn.CloseTarget(ctx)
observer, err := conn.GetMediaPropertiesChangedObserver(ctx)
if err != nil {
return false, errors.Wrap(err, "failed to retrieve a media DevTools observer")
}
if err := conn.Call(ctx, nil, "play_shaka", mpdFile); err != nil {
return false, err
}
isPlatform, _, err := devtools.GetVideoDecoder(ctx, observer, url)
return isPlatform, err
}
// playDRMVideo plays a DRM-protected MSE video stream via Shaka player, and
// checks its play progress. After it's done, it goes full screen and takes a
// screenshot and verifies the contents are all black.
// mpdFile is the name of MPD file for the video stream.cs ash.ConnSource,
// url is the URL of the shaka player webpage.
func playDRMVideo(ctx context.Context, s *testing.State, cs ash.ConnSource, cr *chrome.Chrome, mpdFile, url string) (bool, error) {
ctx, st := timing.Start(ctx, "play_drm_video")
defer st.End()
conn, err := loadPage(ctx, cs, url)
if err != nil {
return false, err
}
defer conn.Close()
defer conn.CloseTarget(ctx)
observer, err := conn.GetMediaPropertiesChangedObserver(ctx)
if err != nil {
return false, errors.Wrap(err, "failed to retrieve a media DevTools observer")
}
if err := conn.Call(ctx, nil, "play_shaka_drm", mpdFile); err != nil {
return false, err
}
// Now go full screen, take a screenshot and verify it's all black.
// Make the video go to full screen mode by pressing 'f': requestFullScreen() needs a user gesture.
ew, err := input.Keyboard(ctx)
if err != nil {
return false, errors.Wrap(err, "failed to initialize the keyboard writer")
}
defer ew.Close()
if err := ew.Type(ctx, "f"); err != nil {
return false, errors.Wrap(err, "failed to inject the 'f' key")
}
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
return false, errors.Wrap(err, "failed to connect to test API")
}
if err := ash.WaitForFullScreen(ctx, tconn); err != nil {
return false, errors.Wrap(err, "failed waiting for full screen")
}
// Take the screenshot, we don't need to wait because we are only verifying
// that the vast majority is black, so things like 'hit Esc to exist full screen'
// won't be an issue.
im, err := screenshot.GrabScreenshot(ctx, cr)
if err != nil {
return false, errors.Wrap(err, "failed taking screenshot")
}
// Verify that over 92% of the image is solid black. This is true because for
// HW DRM, you cannot actually screenshot the video and it will be replaced by
// solid black in the compositor. From testing, we have seen this be as low as
// 0.94, so set the threshold at 0.92.
color, ratio := colorcmp.DominantColor(im)
if ratio < 0.92 || !colorcmp.ColorsMatch(color, colorcmp.RGB(0, 0, 0), 1) {
return false, errors.Errorf("screenshot did not have solid black, instead got %v at ratio %0.2f",
colorcmp.ColorStr(color), ratio)
}
return devtools.CheckHWDRMPipeline(ctx, observer, url)
}
// seekVideoRepeatedly seeks video numSeeks times, saving some performance
// metrics (elapsed time, number of completed seeks) in outDir.
func seekVideoRepeatedly(ctx context.Context, conn *chrome.Conn, outDir string, numSeeks int) error {
ctx, st := timing.Start(ctx, "seek_video_repeatedly")
defer st.End()
p := perf.NewValues()
startTime := time.Now()
prevSeekCount := 0
seekCount := 0
for i := 0; i < numSeeks; i++ {
if err := conn.Call(ctx, &seekCount, "randomSeek"); err != nil {
// If the test times out, Call() might be interrupted and return
// zero seekCount, in that case used the last known good amount.
if seekCount == 0 {
seekCount = prevSeekCount
}
return errors.Wrapf(err, "error while seeking, completed %d/%d seeks", seekCount, numSeeks)
}
if seekCount == numSeeks {
break
}
prevSeekCount = seekCount
}
elapsed := time.Since(startTime).Seconds()
completedSeeks := math.Min(float64(seekCount+1), float64(numSeeks))
p.Set(perf.Metric{
Name: "average_seek_time",
Unit: "s",
Direction: perf.SmallerIsBetter,
}, elapsed/completedSeeks)
p.Set(perf.Metric{
Name: "completed_seeks",
Unit: "percent",
Direction: perf.BiggerIsBetter,
}, float64(100.0*completedSeeks/float64(numSeeks)))
testing.ContextLog(ctx, p)
p.Save(outDir)
return nil
}
// playSeekVideo invokes loadVideo() then plays the video referenced by videoFile
// while repeatedly and randomly seeking into it numSeeks. It returns an error if
// seeking did not succeed for some reason.
// videoFile is the file name which is played and seeked there.
// baseURL is the base URL which serves video playback testing webpage.
func playSeekVideo(ctx context.Context, cs ash.ConnSource, videoFile, baseURL, outDir string, numSeeks int) error {
ctx, st := timing.Start(ctx, "play_seek_video")
defer st.End()
// Establish a connection to a video play page
conn, err := loadPage(ctx, cs, baseURL+"/video.html")
if err != nil {
return err
}
defer conn.Close()
defer conn.CloseTarget(ctx)
if err := conn.Call(ctx, nil, "playRepeatedly", videoFile); err != nil {
return err
}
// Wait until videoElement has advanced so that chrome:media-internals has
// time to fill in their fields.
if err := conn.WaitForExpr(ctx, "document.getElementsByTagName('video')[0].currentTime > 1"); err != nil {
return errors.Wrap(err, "failed waiting for video to advance playback")
}
if err := seekVideoRepeatedly(ctx, conn, outDir, numSeeks); err != nil {
return err
}
return nil
}
// ColorDistance returns the maximum absolute difference between each component of a and b.
// Both a and b are assumed to be RGBA colors.
func ColorDistance(a, b color.Color) int {
aR, aG, aB, aA := a.RGBA()
bR, bG, bB, bA := b.RGBA()
abs := func(a int) int {
if a < 0 {
return -a
}
return a
}
max := func(nums ...int) int {
m := 0
for _, n := range nums {
if n > m {
m = n
}
}
return m
}
// Interestingly, the RGBA method returns components in the range [0, 0xFFFF] corresponding
// to the 8-bit values multiplied by 0x101 (see https://blog.golang.org/image). Therefore,
// we must shift them to the right by 8 so that they are in the more typical [0, 255] range.
return max(abs(int(aR>>8)-int(bR>>8)),
abs(int(aG>>8)-int(bG>>8)),
abs(int(aB>>8)-int(bB>>8)),
abs(int(aA>>8)-int(bA>>8)))
}
// ColorSamplingPointsForStillColorsVideo returns a map of points that are considered to be
// interesting in the rendering of the still-colors-*.mp4 test videos. The key in the map is
// a name for the corresponding point. There are two categories of points:
//
// - Outer corners: the four absolute corners of the video offset by 1 to ignore acceptable
// color blending artifacts on the edges. However, the outer bottom-right is not offset
// because we never expect blending artifacts there.
//
// - Inner corners: 4 stencils (one for each corner of the video). Each stencil is composed
// of 4 sampling points arranged as a square. The expectation is that for each stencil, 3
// of its points fall on the interior border of the test video while the remaining point
// falls inside one of the color rectangles. This helps us detect undesired
// stretching/shifting/rotation/mirroring. The naming convention for each point of a
// stencil is as follows:
//
// inner_Y_X_00: the corner of the stencil closest to the Y-X corner of the video.
// inner_Y_X_01: the corner of the stencil that's in the interior X border of the video.
// inner_Y_X_10: the corner of the stencil that's in the interior Y border of the video.
// inner_Y_X_11: the only corner of the stencil that's not on the border strip.
//
// For example, the top-right corner of the test video looks like this:
//
// MMMMMMMMMMMMMMMM
// MMMMMMMMMM2MMM0M
// MMMMMMMMMMMMMMMM
// 3 M1M
// MMM
//
// Where 'M' is the magenta interior border. So the names of each of the points 0, 1, 2, 3
// are:
//
// 0: inner_top_right_00
// 1: inner_top_right_01
// 2: inner_top_right_10
// 3: inner_top_right_11
func ColorSamplingPointsForStillColorsVideo(videoW, videoH int) map[string]image.Point {
outerCorners := map[string]image.Point{
"outer_top_left": {1, 1},
"outer_top_right": {(videoW - 1) - 1, 1},
"outer_bottom_right": {videoW - 1, videoH - 1},
"outer_bottom_left": {1, (videoH - 1) - 1},
}
edgeOffset := 5
stencilW := 5
innerCorners := map[string]image.Point{
"inner_top_left_00": {edgeOffset, edgeOffset},
"inner_top_left_01": {edgeOffset, edgeOffset + stencilW},
"inner_top_left_10": {edgeOffset + stencilW, edgeOffset},
"inner_top_left_11": {edgeOffset + stencilW, edgeOffset + stencilW},
"inner_top_right_00": {(videoW - 1) - edgeOffset, edgeOffset},
"inner_top_right_01": {(videoW - 1) - edgeOffset, edgeOffset + stencilW},
"inner_top_right_10": {(videoW - 1) - edgeOffset - stencilW, edgeOffset},
"inner_top_right_11": {(videoW - 1) - edgeOffset - stencilW, edgeOffset + stencilW},
"inner_bottom_right_00": {(videoW - 1) - edgeOffset, (videoH - 1) - edgeOffset},
"inner_bottom_right_01": {(videoW - 1) - edgeOffset, (videoH - 1) - edgeOffset - stencilW},
"inner_bottom_right_10": {(videoW - 1) - edgeOffset - stencilW, (videoH - 1) - edgeOffset},
"inner_bottom_right_11": {(videoW - 1) - edgeOffset - stencilW, (videoH - 1) - edgeOffset - stencilW},
"inner_bottom_left_00": {edgeOffset, (videoH - 1) - edgeOffset},
"inner_bottom_left_01": {edgeOffset, (videoH - 1) - edgeOffset - stencilW},
"inner_bottom_left_10": {edgeOffset + stencilW, (videoH - 1) - edgeOffset},
"inner_bottom_left_11": {edgeOffset + stencilW, (videoH - 1) - edgeOffset - stencilW},
}
samples := map[string]image.Point{}
for k, v := range innerCorners {
samples[k] = v
}
for k, v := range outerCorners {
samples[k] = v
}
return samples
}
// isVideoPadding returns true iff c corresponds to the expected color of the padding that a
// video gets when in full screen so that it appears centered. This color is black within a
// certain tolerance.
func isVideoPadding(c color.Color) bool {
black := color.RGBA{0, 0, 0, 255}
// The tolerance was picked empirically. For example, on kukui, the first padding row below
// the video has a color of (20, 1, 22, 255).
tolerance := 25
return ColorDistance(c, black) < tolerance
}
// TestPlay checks that the video file named filename can be played using Chrome.
// videotype represents a type of a given video. If it is MSEVideo, filename is a name
// of MPD file.
// If mode is VerifyHWAcceleratorUsed, this function also checks if hardware accelerator was used.
func TestPlay(ctx context.Context, s *testing.State, cs ash.ConnSource, cr *chrome.Chrome,
filename string, videotype VideoType, mode VerifyHWAcceleratorMode, unmutePlayer bool) error {
if unmutePlayer && videotype != NormalVideo {
return errors.New("got unmutePlayer = true, expected false: unmutePlayer " +
"is only implemented for videoType = NormalVideo")
}
vl, err := logging.NewVideoLogger()
if err != nil {
return err
}
defer vl.Close()
if err := crastestclient.Mute(ctx); err != nil {
return err
}
defer crastestclient.Unmute(ctx)
server := httptest.NewServer(http.FileServer(s.DataFileSystem()))
defer server.Close()
var playErr error
var url string
usesPlatformVideoDecoder, isHwDrmPipeline := false, false
switch videotype {
case NormalVideo:
url = server.URL + "/video.html"
usesPlatformVideoDecoder, playErr = playVideo(ctx, cs, filename, url, unmutePlayer)
case MSEVideo:
url = server.URL + "/shaka.html"
usesPlatformVideoDecoder, playErr = playMSEVideo(ctx, cs, filename, url)
case DRMVideo:
url = server.URL + "/shaka_drm.html"
isHwDrmPipeline, playErr = playDRMVideo(ctx, s, cs, cr, filename, url)
}
if playErr != nil {
return errors.Wrapf(err, "failed to play %v (%v): %v", filename, url, playErr)
}
if mode == NoVerifyHWAcceleratorUsed {
// Early return when no verification is needed.
return nil
}
if mode == VerifyHWAcceleratorUsed && !usesPlatformVideoDecoder {
return errors.New("video decode acceleration was not used when it was expected to")
}
if mode == VerifyNoHWAcceleratorUsed && usesPlatformVideoDecoder {
return errors.New("software decoding was not used when it was expected to")
}
if mode == VerifyHWDRMUsed && !isHwDrmPipeline {
return errors.New("HW DRM video pipeline was not used when it was expected to")
}
return nil
}
// TestSeek checks that the video file named filename can be seeked around.
// It will play the video and seek randomly into it numSeeks times.
func TestSeek(ctx context.Context, httpHandler http.Handler, cs ash.ConnSource, filename, outDir string, numSeeks int) error {
vl, err := logging.NewVideoLogger()
if err != nil {
return err
}
defer vl.Close()
server := httptest.NewServer(httpHandler)
defer server.Close()
if err := playSeekVideo(ctx, cs, filename, server.URL, outDir, numSeeks); err != nil {
return errors.Wrapf(err, "failed to play %v (%v): %v", filename, server.URL, err)
}
return nil
}
// TestPlayAndScreenshot plays the filename video, switches it to full
// screen mode, takes a screenshot and analyzes the resulting image to
// sample the colors of a few interesting points and compare them against
// expectations. The expectations are defined by refFilename which is a
// PNG file corresponding to the ideally rendered video frame in the absence
// of scaling or artifacts.
//
// Caveat: this test does not disable night light. Night light doesn't
// seem to affect the output of the screenshot tool, but this might
// not hold in the future in case we decide to apply night light at
// compositing time if the hardware does not support the color
// transform matrix.
func | (ctx context.Context, s *testing.State, tconn *chrome.TestConn, cs ash.ConnSource, filename, refFilename string) error {
server := httptest.NewServer(http.FileServer(s.DataFileSystem()))
defer server.Close()
url := path.Join(server.URL, "video.html")
conn, err := cs.NewConn(ctx, url)
if err != nil {
return errors.Wrapf(err, "failed to open %v", url)
}
defer conn.Close()
// For consistency across test runs, ensure that the device is in landscape-primary orientation.
if err = graphics.RotateDisplayToLandscapePrimary(ctx, tconn); err != nil {
return errors.Wrap(err, "failed to set display to landscape-primary orientation")
}
// Make the video go to full screen mode by pressing 'f': requestFullScreen() needs a user gesture.
ew, err := input.Keyboard(ctx)
if err != nil {
return errors.Wrap(err, "failed to initialize the keyboard writer")
}
if err := ew.Type(ctx, "f"); err != nil {
return errors.Wrap(err, "failed to inject the 'f' key")
}
// Start playing the video indefinitely.
if err := conn.Call(ctx, nil, "playRepeatedly", filename); err != nil {
return errors.Wrapf(err, "failed to play %v", filename)
}
// TODO(andrescj): this sleep is here to wait prior to taking the screenshot to make sure the video
// is on the screen and to let the "Press Esc to exit full screen" message disappear. This is to
// make sure the video is the only thing on the screen and thus minimize the excuses Chrome would
// have to not promote it to a HW overlay. Poll instead for two conditions:
// 1) The screenshot is correct (i.e., do the checks below), and
// 2) There is a HW overlay.
if err := testing.Sleep(ctx, delayToScreenshot); err != nil {
return errors.Wrap(err, "failed to sleep prior to taking screenshot")
}
sshotPath := filepath.Join(s.OutDir(), "screenshot.png")
if err := screenshot.Capture(ctx, sshotPath); err != nil {
return errors.Wrap(err, "failed to capture screen")
}
// Decode the screenshot and rotate it if necessary to make later steps easier.
f, err := os.Open(sshotPath)
if err != nil {
return errors.Wrapf(err, "failed to open %v", sshotPath)
}
img, _, err := image.Decode(f)
// Close the file now because we might open it for writing again later.
if err := f.Close(); err != nil {
return errors.Wrapf(err, "failed to close %v", sshotPath)
}
if err != nil {
return errors.Wrapf(err, "could not decode %v", sshotPath)
}
if img.Bounds().Dx() < img.Bounds().Dy() {
s.Log("The screenshot is in portrait orientation; rotating it")
rotImg := image.NewRGBA(image.Rectangle{image.Point{}, image.Point{img.Bounds().Max.Y, img.Bounds().Max.X}})
for dstY := 0; dstY < rotImg.Bounds().Dy(); dstY++ {
for dstX := 0; dstX < rotImg.Bounds().Dx(); dstX++ {
srcColor := img.At(dstY, img.Bounds().Dy()-1-dstX)
rotImg.Set(dstX, dstY, srcColor)
}
}
f, err := os.Create(sshotPath)
if err != nil {
return errors.Wrapf(err, "could not create the rotated screenshot (%v)", sshotPath)
}
defer f.Close()
if err := png.Encode(f, rotImg); err != nil {
return errors.Wrapf(err, "could not encode the rotated screenshot (%v)", sshotPath)
}
img = rotImg
}
// Find the bounds of the video by excluding the black strips on each side.
xMiddle := img.Bounds().Dx() / 2
yMiddle := img.Bounds().Dy() / 2
top := 0
for ; top < img.Bounds().Dy(); top++ {
if !isVideoPadding(img.At(xMiddle, top)) {
break
}
}
bottom := img.Bounds().Dy() - 1
for ; bottom >= 0; bottom-- {
if !isVideoPadding(img.At(xMiddle, bottom)) {
break
}
}
if bottom <= top {
return errors.New("could not find the top or bottom boundary of the video")
}
left := 0
for ; left < img.Bounds().Dx(); left++ {
if !isVideoPadding(img.At(left, yMiddle)) {
break
}
}
right := img.Bounds().Dx() - 1
for ; right >= 0; right-- {
if !isVideoPadding(img.At(right, yMiddle)) {
break
}
}
if right <= left {
return errors.New("could not find the left or right boundary of the video")
}
s.Logf("Video bounds: (left, top) = (%d, %d); (right, bottom) = (%d, %d)",
left, top, right, bottom)
// Open the reference file to assert expectations on the screenshot later.
refPath := s.DataPath(refFilename)
f, err = os.Open(refPath)
if err != nil {
return errors.Wrapf(err, "failed to open %v", refPath)
}
defer f.Close()
refImg, _, err := image.Decode(f)
if err != nil {
return errors.Wrapf(err, "could not decode %v", refPath)
}
videoW := refImg.Bounds().Dx()
videoH := refImg.Bounds().Dy()
// Measurement 1:
// We'll sample a few interesting pixels and report the color distance with
// respect to the reference image.
samples := ColorSamplingPointsForStillColorsVideo(videoW, videoH)
p := perf.NewValues()
maxDistance := -1
maxDistancePoint := ""
for k, v := range samples {
// First convert the coordinates from video space to screenshot space.
videoX := v.X
videoY := v.Y
screenX := left + (right-left)*v.X/(videoW-1)
screenY := top + (bottom-top)*v.Y/(videoH-1)
// Then report the distance between the expected and actual colors at this location.
expectedColor := refImg.At(videoX, videoY)
actualColor := img.At(screenX, screenY)
distance := ColorDistance(expectedColor, actualColor)
if distance > maxDistance {
maxDistance = distance
maxDistancePoint = k
}
if distance != 0 {
s.Logf("At %v (video space = (%d, %d), screen space = (%d, %d)): expected RGBA = %v; got RGBA = %v; distance = %d",
k, videoX, videoY, screenX, screenY, expectedColor, actualColor, distance)
}
p.Set(perf.Metric{
Name: k,
Unit: "None",
Direction: perf.SmallerIsBetter,
}, float64(distance))
}
// The distance threshold was decided by analyzing the data reported above
// across many devices. It should ideally be smaller, but for now, it seems we
// have color space handling issues. Nonetheless, this threshold should be
// enough for detecting major video rendering issues. Note that:
//
// 1) We still report the distances as perf values so we can continue to
// analyze and improve.
// 2) We don't bother to report a total distance if this threshold is exceeded
// because it would just make email alerts very noisy.
if maxDistance > 100 {
p.Save(s.OutDir())
return errors.Errorf("the color distance for %v = %d exceeds the threshold (100)", maxDistancePoint, maxDistance)
}
// Measurement 2:
// We report an aggregate distance for the image: we go through all the pixels
// in the screenshot video to add up all the distances and then normalize by
// the number of pixels at the end.
totalDistance := 0.0
for row := top; row <= bottom; row++ {
for col := left; col <= right; col++ {
// First convert the coordinates from screenshot space to video space.
videoX := (col - left) * (videoW - 1) / (right - left)
videoY := (row - top) * (videoH - 1) / (bottom - top)
expectedColor := refImg.At(videoX, videoY)
actualColor := img.At(col, row)
totalDistance += float64(ColorDistance(expectedColor, actualColor))
}
}
totalDistance /= float64((right - left + 1) * (bottom - top + 1))
s.Log("The total distance for the entire image is ", totalDistance)
p.Set(perf.Metric{
Name: "total_distance",
Unit: "None",
Direction: perf.SmallerIsBetter,
}, totalDistance)
p.Save(s.OutDir())
return nil
}
| TestPlayAndScreenshot | identifier_name |
play.go | // Copyright 2018 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package play provides common code for playing videos on Chrome.
package play
import (
"context"
"image"
"image/color" | "path"
"path/filepath"
"time"
"chromiumos/tast/common/perf"
"chromiumos/tast/errors"
"chromiumos/tast/local/audio/crastestclient"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/ash"
"chromiumos/tast/local/colorcmp"
"chromiumos/tast/local/graphics"
"chromiumos/tast/local/input"
"chromiumos/tast/local/media/devtools"
"chromiumos/tast/local/media/logging"
"chromiumos/tast/local/screenshot"
"chromiumos/tast/testing"
"chromiumos/tast/timing"
)
// VideoType represents a type of video played in TestPlay.
type VideoType int
const (
// NormalVideo represents a normal video. (i.e. non-MSE video.)
NormalVideo VideoType = iota
// MSEVideo represents a video requiring Media Source Extensions (MSE).
MSEVideo
// DRMVideo represents a video requiring Digital Rights Management (DRM).
DRMVideo
)
// VerifyHWAcceleratorMode represents a mode of TestPlay.
type VerifyHWAcceleratorMode int
const (
// NoVerifyHWAcceleratorUsed is a mode that plays a video without verifying
// hardware accelerator usage.
NoVerifyHWAcceleratorUsed VerifyHWAcceleratorMode = iota
// VerifyHWAcceleratorUsed is a mode that verifies a video is played using a
// hardware accelerator.
VerifyHWAcceleratorUsed
// VerifyNoHWAcceleratorUsed is a mode that verifies a video is not played
// using a hardware accelerator, i.e. it's using software decoding.
VerifyNoHWAcceleratorUsed
// VerifyHWDRMUsed is a mode that verifies a video is played using a hardware
// accelerator with HW DRM protection.
VerifyHWDRMUsed
)
// This is how long we need to wait before taking a screenshot in the
// TestPlayAndScreenshot case. This is necessary to ensure the video is on the screen
// and to let the "Press Esc to exit full screen" message disappear.
const delayToScreenshot = 7 * time.Second
// MSEDataFiles returns a list of required files for tests that play MSE videos.
func MSEDataFiles() []string {
return []string{
"shaka.html",
"third_party/shaka-player/shaka-player.compiled.debug.js",
"third_party/shaka-player/shaka-player.compiled.debug.map",
}
}
// DRMDataFiles returns a list of required files for tests that play DRM videos.
func DRMDataFiles() []string {
return []string{
"shaka_drm.html",
"third_party/shaka-player/shaka-player.compiled.debug.js",
"third_party/shaka-player/shaka-player.compiled.debug.map",
}
}
// loadPage opens a new tab to load the specified webpage.
// Note that if err != nil, conn is nil.
func loadPage(ctx context.Context, cs ash.ConnSource, url string) (*chrome.Conn, error) {
ctx, st := timing.Start(ctx, "load_page")
defer st.End()
conn, err := cs.NewConn(ctx, url)
if err != nil {
return nil, errors.Wrapf(err, "failed to open %v", url)
}
return conn, err
}
// playVideo invokes loadVideo(), plays a normal video in video.html, and checks if it has progress.
// videoFile is the file name which is played there.
// url is the URL of the video playback testing webpage.
func playVideo(ctx context.Context, cs ash.ConnSource, videoFile, url string, unmutePlayer bool) (bool, error) {
ctx, st := timing.Start(ctx, "play_video")
defer st.End()
conn, err := loadPage(ctx, cs, url)
if err != nil {
return false, err
}
defer conn.Close()
defer conn.CloseTarget(ctx)
observer, err := conn.GetMediaPropertiesChangedObserver(ctx)
if err != nil {
return false, errors.Wrap(err, "failed to retrieve a media DevTools observer")
}
if err := conn.Call(ctx, nil, "playUntilEnd", videoFile, unmutePlayer); err != nil {
return false, err
}
isPlatform, _, err := devtools.GetVideoDecoder(ctx, observer, url)
return isPlatform, err
}
// playMSEVideo plays an MSE video stream via Shaka player, and checks its play progress.
// mpdFile is the name of MPD file for the video stream.
// url is the URL of the shaka player webpage.
func playMSEVideo(ctx context.Context, cs ash.ConnSource, mpdFile, url string) (bool, error) {
ctx, st := timing.Start(ctx, "play_mse_video")
defer st.End()
conn, err := loadPage(ctx, cs, url)
if err != nil {
return false, err
}
defer conn.Close()
defer conn.CloseTarget(ctx)
observer, err := conn.GetMediaPropertiesChangedObserver(ctx)
if err != nil {
return false, errors.Wrap(err, "failed to retrieve a media DevTools observer")
}
if err := conn.Call(ctx, nil, "play_shaka", mpdFile); err != nil {
return false, err
}
isPlatform, _, err := devtools.GetVideoDecoder(ctx, observer, url)
return isPlatform, err
}
// playDRMVideo plays a DRM-protected MSE video stream via Shaka player, and
// checks its play progress. After it's done, it goes full screen and takes a
// screenshot and verifies the contents are all black.
// mpdFile is the name of MPD file for the video stream.cs ash.ConnSource,
// url is the URL of the shaka player webpage.
func playDRMVideo(ctx context.Context, s *testing.State, cs ash.ConnSource, cr *chrome.Chrome, mpdFile, url string) (bool, error) {
ctx, st := timing.Start(ctx, "play_drm_video")
defer st.End()
conn, err := loadPage(ctx, cs, url)
if err != nil {
return false, err
}
defer conn.Close()
defer conn.CloseTarget(ctx)
observer, err := conn.GetMediaPropertiesChangedObserver(ctx)
if err != nil {
return false, errors.Wrap(err, "failed to retrieve a media DevTools observer")
}
if err := conn.Call(ctx, nil, "play_shaka_drm", mpdFile); err != nil {
return false, err
}
// Now go full screen, take a screenshot and verify it's all black.
// Make the video go to full screen mode by pressing 'f': requestFullScreen() needs a user gesture.
ew, err := input.Keyboard(ctx)
if err != nil {
return false, errors.Wrap(err, "failed to initialize the keyboard writer")
}
defer ew.Close()
if err := ew.Type(ctx, "f"); err != nil {
return false, errors.Wrap(err, "failed to inject the 'f' key")
}
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
return false, errors.Wrap(err, "failed to connect to test API")
}
if err := ash.WaitForFullScreen(ctx, tconn); err != nil {
return false, errors.Wrap(err, "failed waiting for full screen")
}
// Take the screenshot, we don't need to wait because we are only verifying
// that the vast majority is black, so things like 'hit Esc to exist full screen'
// won't be an issue.
im, err := screenshot.GrabScreenshot(ctx, cr)
if err != nil {
return false, errors.Wrap(err, "failed taking screenshot")
}
// Verify that over 92% of the image is solid black. This is true because for
// HW DRM, you cannot actually screenshot the video and it will be replaced by
// solid black in the compositor. From testing, we have seen this be as low as
// 0.94, so set the threshold at 0.92.
color, ratio := colorcmp.DominantColor(im)
if ratio < 0.92 || !colorcmp.ColorsMatch(color, colorcmp.RGB(0, 0, 0), 1) {
return false, errors.Errorf("screenshot did not have solid black, instead got %v at ratio %0.2f",
colorcmp.ColorStr(color), ratio)
}
return devtools.CheckHWDRMPipeline(ctx, observer, url)
}
// seekVideoRepeatedly seeks video numSeeks times, saving some performance
// metrics (elapsed time, number of completed seeks) in outDir.
func seekVideoRepeatedly(ctx context.Context, conn *chrome.Conn, outDir string, numSeeks int) error {
ctx, st := timing.Start(ctx, "seek_video_repeatedly")
defer st.End()
p := perf.NewValues()
startTime := time.Now()
prevSeekCount := 0
seekCount := 0
for i := 0; i < numSeeks; i++ {
if err := conn.Call(ctx, &seekCount, "randomSeek"); err != nil {
// If the test times out, Call() might be interrupted and return
// zero seekCount, in that case used the last known good amount.
if seekCount == 0 {
seekCount = prevSeekCount
}
return errors.Wrapf(err, "error while seeking, completed %d/%d seeks", seekCount, numSeeks)
}
if seekCount == numSeeks {
break
}
prevSeekCount = seekCount
}
elapsed := time.Since(startTime).Seconds()
completedSeeks := math.Min(float64(seekCount+1), float64(numSeeks))
p.Set(perf.Metric{
Name: "average_seek_time",
Unit: "s",
Direction: perf.SmallerIsBetter,
}, elapsed/completedSeeks)
p.Set(perf.Metric{
Name: "completed_seeks",
Unit: "percent",
Direction: perf.BiggerIsBetter,
}, float64(100.0*completedSeeks/float64(numSeeks)))
testing.ContextLog(ctx, p)
p.Save(outDir)
return nil
}
// playSeekVideo invokes loadVideo() then plays the video referenced by videoFile
// while repeatedly and randomly seeking into it numSeeks. It returns an error if
// seeking did not succeed for some reason.
// videoFile is the file name which is played and seeked there.
// baseURL is the base URL which serves video playback testing webpage.
func playSeekVideo(ctx context.Context, cs ash.ConnSource, videoFile, baseURL, outDir string, numSeeks int) error {
ctx, st := timing.Start(ctx, "play_seek_video")
defer st.End()
// Establish a connection to a video play page
conn, err := loadPage(ctx, cs, baseURL+"/video.html")
if err != nil {
return err
}
defer conn.Close()
defer conn.CloseTarget(ctx)
if err := conn.Call(ctx, nil, "playRepeatedly", videoFile); err != nil {
return err
}
// Wait until videoElement has advanced so that chrome:media-internals has
// time to fill in their fields.
if err := conn.WaitForExpr(ctx, "document.getElementsByTagName('video')[0].currentTime > 1"); err != nil {
return errors.Wrap(err, "failed waiting for video to advance playback")
}
if err := seekVideoRepeatedly(ctx, conn, outDir, numSeeks); err != nil {
return err
}
return nil
}
// ColorDistance returns the maximum absolute difference between each component of a and b.
// Both a and b are assumed to be RGBA colors.
func ColorDistance(a, b color.Color) int {
aR, aG, aB, aA := a.RGBA()
bR, bG, bB, bA := b.RGBA()
abs := func(a int) int {
if a < 0 {
return -a
}
return a
}
max := func(nums ...int) int {
m := 0
for _, n := range nums {
if n > m {
m = n
}
}
return m
}
// Interestingly, the RGBA method returns components in the range [0, 0xFFFF] corresponding
// to the 8-bit values multiplied by 0x101 (see https://blog.golang.org/image). Therefore,
// we must shift them to the right by 8 so that they are in the more typical [0, 255] range.
return max(abs(int(aR>>8)-int(bR>>8)),
abs(int(aG>>8)-int(bG>>8)),
abs(int(aB>>8)-int(bB>>8)),
abs(int(aA>>8)-int(bA>>8)))
}
// ColorSamplingPointsForStillColorsVideo returns a map of points that are considered to be
// interesting in the rendering of the still-colors-*.mp4 test videos. The key in the map is
// a name for the corresponding point. There are two categories of points:
//
// - Outer corners: the four absolute corners of the video offset by 1 to ignore acceptable
// color blending artifacts on the edges. However, the outer bottom-right is not offset
// because we never expect blending artifacts there.
//
// - Inner corners: 4 stencils (one for each corner of the video). Each stencil is composed
// of 4 sampling points arranged as a square. The expectation is that for each stencil, 3
// of its points fall on the interior border of the test video while the remaining point
// falls inside one of the color rectangles. This helps us detect undesired
// stretching/shifting/rotation/mirroring. The naming convention for each point of a
// stencil is as follows:
//
// inner_Y_X_00: the corner of the stencil closest to the Y-X corner of the video.
// inner_Y_X_01: the corner of the stencil that's in the interior X border of the video.
// inner_Y_X_10: the corner of the stencil that's in the interior Y border of the video.
// inner_Y_X_11: the only corner of the stencil that's not on the border strip.
//
// For example, the top-right corner of the test video looks like this:
//
// MMMMMMMMMMMMMMMM
// MMMMMMMMMM2MMM0M
// MMMMMMMMMMMMMMMM
// 3 M1M
// MMM
//
// Where 'M' is the magenta interior border. So the names of each of the points 0, 1, 2, 3
// are:
//
// 0: inner_top_right_00
// 1: inner_top_right_01
// 2: inner_top_right_10
// 3: inner_top_right_11
func ColorSamplingPointsForStillColorsVideo(videoW, videoH int) map[string]image.Point {
outerCorners := map[string]image.Point{
"outer_top_left": {1, 1},
"outer_top_right": {(videoW - 1) - 1, 1},
"outer_bottom_right": {videoW - 1, videoH - 1},
"outer_bottom_left": {1, (videoH - 1) - 1},
}
edgeOffset := 5
stencilW := 5
innerCorners := map[string]image.Point{
"inner_top_left_00": {edgeOffset, edgeOffset},
"inner_top_left_01": {edgeOffset, edgeOffset + stencilW},
"inner_top_left_10": {edgeOffset + stencilW, edgeOffset},
"inner_top_left_11": {edgeOffset + stencilW, edgeOffset + stencilW},
"inner_top_right_00": {(videoW - 1) - edgeOffset, edgeOffset},
"inner_top_right_01": {(videoW - 1) - edgeOffset, edgeOffset + stencilW},
"inner_top_right_10": {(videoW - 1) - edgeOffset - stencilW, edgeOffset},
"inner_top_right_11": {(videoW - 1) - edgeOffset - stencilW, edgeOffset + stencilW},
"inner_bottom_right_00": {(videoW - 1) - edgeOffset, (videoH - 1) - edgeOffset},
"inner_bottom_right_01": {(videoW - 1) - edgeOffset, (videoH - 1) - edgeOffset - stencilW},
"inner_bottom_right_10": {(videoW - 1) - edgeOffset - stencilW, (videoH - 1) - edgeOffset},
"inner_bottom_right_11": {(videoW - 1) - edgeOffset - stencilW, (videoH - 1) - edgeOffset - stencilW},
"inner_bottom_left_00": {edgeOffset, (videoH - 1) - edgeOffset},
"inner_bottom_left_01": {edgeOffset, (videoH - 1) - edgeOffset - stencilW},
"inner_bottom_left_10": {edgeOffset + stencilW, (videoH - 1) - edgeOffset},
"inner_bottom_left_11": {edgeOffset + stencilW, (videoH - 1) - edgeOffset - stencilW},
}
samples := map[string]image.Point{}
for k, v := range innerCorners {
samples[k] = v
}
for k, v := range outerCorners {
samples[k] = v
}
return samples
}
// isVideoPadding returns true iff c corresponds to the expected color of the padding that a
// video gets when in full screen so that it appears centered. This color is black within a
// certain tolerance.
func isVideoPadding(c color.Color) bool {
black := color.RGBA{0, 0, 0, 255}
// The tolerance was picked empirically. For example, on kukui, the first padding row below
// the video has a color of (20, 1, 22, 255).
tolerance := 25
return ColorDistance(c, black) < tolerance
}
// TestPlay checks that the video file named filename can be played using Chrome.
// videotype represents a type of a given video. If it is MSEVideo, filename is a name
// of MPD file.
// If mode is VerifyHWAcceleratorUsed, this function also checks if hardware accelerator was used.
func TestPlay(ctx context.Context, s *testing.State, cs ash.ConnSource, cr *chrome.Chrome,
filename string, videotype VideoType, mode VerifyHWAcceleratorMode, unmutePlayer bool) error {
if unmutePlayer && videotype != NormalVideo {
return errors.New("got unmutePlayer = true, expected false: unmutePlayer " +
"is only implemented for videoType = NormalVideo")
}
vl, err := logging.NewVideoLogger()
if err != nil {
return err
}
defer vl.Close()
if err := crastestclient.Mute(ctx); err != nil {
return err
}
defer crastestclient.Unmute(ctx)
server := httptest.NewServer(http.FileServer(s.DataFileSystem()))
defer server.Close()
var playErr error
var url string
usesPlatformVideoDecoder, isHwDrmPipeline := false, false
switch videotype {
case NormalVideo:
url = server.URL + "/video.html"
usesPlatformVideoDecoder, playErr = playVideo(ctx, cs, filename, url, unmutePlayer)
case MSEVideo:
url = server.URL + "/shaka.html"
usesPlatformVideoDecoder, playErr = playMSEVideo(ctx, cs, filename, url)
case DRMVideo:
url = server.URL + "/shaka_drm.html"
isHwDrmPipeline, playErr = playDRMVideo(ctx, s, cs, cr, filename, url)
}
if playErr != nil {
return errors.Wrapf(err, "failed to play %v (%v): %v", filename, url, playErr)
}
if mode == NoVerifyHWAcceleratorUsed {
// Early return when no verification is needed.
return nil
}
if mode == VerifyHWAcceleratorUsed && !usesPlatformVideoDecoder {
return errors.New("video decode acceleration was not used when it was expected to")
}
if mode == VerifyNoHWAcceleratorUsed && usesPlatformVideoDecoder {
return errors.New("software decoding was not used when it was expected to")
}
if mode == VerifyHWDRMUsed && !isHwDrmPipeline {
return errors.New("HW DRM video pipeline was not used when it was expected to")
}
return nil
}
// TestSeek checks that the video file named filename can be seeked around.
// It will play the video and seek randomly into it numSeeks times.
func TestSeek(ctx context.Context, httpHandler http.Handler, cs ash.ConnSource, filename, outDir string, numSeeks int) error {
vl, err := logging.NewVideoLogger()
if err != nil {
return err
}
defer vl.Close()
server := httptest.NewServer(httpHandler)
defer server.Close()
if err := playSeekVideo(ctx, cs, filename, server.URL, outDir, numSeeks); err != nil {
return errors.Wrapf(err, "failed to play %v (%v): %v", filename, server.URL, err)
}
return nil
}
// TestPlayAndScreenshot plays the filename video, switches it to full
// screen mode, takes a screenshot and analyzes the resulting image to
// sample the colors of a few interesting points and compare them against
// expectations. The expectations are defined by refFilename which is a
// PNG file corresponding to the ideally rendered video frame in the absence
// of scaling or artifacts.
//
// Caveat: this test does not disable night light. Night light doesn't
// seem to affect the output of the screenshot tool, but this might
// not hold in the future in case we decide to apply night light at
// compositing time if the hardware does not support the color
// transform matrix.
func TestPlayAndScreenshot(ctx context.Context, s *testing.State, tconn *chrome.TestConn, cs ash.ConnSource, filename, refFilename string) error {
server := httptest.NewServer(http.FileServer(s.DataFileSystem()))
defer server.Close()
url := path.Join(server.URL, "video.html")
conn, err := cs.NewConn(ctx, url)
if err != nil {
return errors.Wrapf(err, "failed to open %v", url)
}
defer conn.Close()
// For consistency across test runs, ensure that the device is in landscape-primary orientation.
if err = graphics.RotateDisplayToLandscapePrimary(ctx, tconn); err != nil {
return errors.Wrap(err, "failed to set display to landscape-primary orientation")
}
// Make the video go to full screen mode by pressing 'f': requestFullScreen() needs a user gesture.
ew, err := input.Keyboard(ctx)
if err != nil {
return errors.Wrap(err, "failed to initialize the keyboard writer")
}
if err := ew.Type(ctx, "f"); err != nil {
return errors.Wrap(err, "failed to inject the 'f' key")
}
// Start playing the video indefinitely.
if err := conn.Call(ctx, nil, "playRepeatedly", filename); err != nil {
return errors.Wrapf(err, "failed to play %v", filename)
}
// TODO(andrescj): this sleep is here to wait prior to taking the screenshot to make sure the video
// is on the screen and to let the "Press Esc to exit full screen" message disappear. This is to
// make sure the video is the only thing on the screen and thus minimize the excuses Chrome would
// have to not promote it to a HW overlay. Poll instead for two conditions:
// 1) The screenshot is correct (i.e., do the checks below), and
// 2) There is a HW overlay.
if err := testing.Sleep(ctx, delayToScreenshot); err != nil {
return errors.Wrap(err, "failed to sleep prior to taking screenshot")
}
sshotPath := filepath.Join(s.OutDir(), "screenshot.png")
if err := screenshot.Capture(ctx, sshotPath); err != nil {
return errors.Wrap(err, "failed to capture screen")
}
// Decode the screenshot and rotate it if necessary to make later steps easier.
f, err := os.Open(sshotPath)
if err != nil {
return errors.Wrapf(err, "failed to open %v", sshotPath)
}
img, _, err := image.Decode(f)
// Close the file now because we might open it for writing again later.
if err := f.Close(); err != nil {
return errors.Wrapf(err, "failed to close %v", sshotPath)
}
if err != nil {
return errors.Wrapf(err, "could not decode %v", sshotPath)
}
if img.Bounds().Dx() < img.Bounds().Dy() {
s.Log("The screenshot is in portrait orientation; rotating it")
rotImg := image.NewRGBA(image.Rectangle{image.Point{}, image.Point{img.Bounds().Max.Y, img.Bounds().Max.X}})
for dstY := 0; dstY < rotImg.Bounds().Dy(); dstY++ {
for dstX := 0; dstX < rotImg.Bounds().Dx(); dstX++ {
srcColor := img.At(dstY, img.Bounds().Dy()-1-dstX)
rotImg.Set(dstX, dstY, srcColor)
}
}
f, err := os.Create(sshotPath)
if err != nil {
return errors.Wrapf(err, "could not create the rotated screenshot (%v)", sshotPath)
}
defer f.Close()
if err := png.Encode(f, rotImg); err != nil {
return errors.Wrapf(err, "could not encode the rotated screenshot (%v)", sshotPath)
}
img = rotImg
}
// Find the bounds of the video by excluding the black strips on each side.
xMiddle := img.Bounds().Dx() / 2
yMiddle := img.Bounds().Dy() / 2
top := 0
for ; top < img.Bounds().Dy(); top++ {
if !isVideoPadding(img.At(xMiddle, top)) {
break
}
}
bottom := img.Bounds().Dy() - 1
for ; bottom >= 0; bottom-- {
if !isVideoPadding(img.At(xMiddle, bottom)) {
break
}
}
if bottom <= top {
return errors.New("could not find the top or bottom boundary of the video")
}
left := 0
for ; left < img.Bounds().Dx(); left++ {
if !isVideoPadding(img.At(left, yMiddle)) {
break
}
}
right := img.Bounds().Dx() - 1
for ; right >= 0; right-- {
if !isVideoPadding(img.At(right, yMiddle)) {
break
}
}
if right <= left {
return errors.New("could not find the left or right boundary of the video")
}
s.Logf("Video bounds: (left, top) = (%d, %d); (right, bottom) = (%d, %d)",
left, top, right, bottom)
// Open the reference file to assert expectations on the screenshot later.
refPath := s.DataPath(refFilename)
f, err = os.Open(refPath)
if err != nil {
return errors.Wrapf(err, "failed to open %v", refPath)
}
defer f.Close()
refImg, _, err := image.Decode(f)
if err != nil {
return errors.Wrapf(err, "could not decode %v", refPath)
}
videoW := refImg.Bounds().Dx()
videoH := refImg.Bounds().Dy()
// Measurement 1:
// We'll sample a few interesting pixels and report the color distance with
// respect to the reference image.
samples := ColorSamplingPointsForStillColorsVideo(videoW, videoH)
p := perf.NewValues()
maxDistance := -1
maxDistancePoint := ""
for k, v := range samples {
// First convert the coordinates from video space to screenshot space.
videoX := v.X
videoY := v.Y
screenX := left + (right-left)*v.X/(videoW-1)
screenY := top + (bottom-top)*v.Y/(videoH-1)
// Then report the distance between the expected and actual colors at this location.
expectedColor := refImg.At(videoX, videoY)
actualColor := img.At(screenX, screenY)
distance := ColorDistance(expectedColor, actualColor)
if distance > maxDistance {
maxDistance = distance
maxDistancePoint = k
}
if distance != 0 {
s.Logf("At %v (video space = (%d, %d), screen space = (%d, %d)): expected RGBA = %v; got RGBA = %v; distance = %d",
k, videoX, videoY, screenX, screenY, expectedColor, actualColor, distance)
}
p.Set(perf.Metric{
Name: k,
Unit: "None",
Direction: perf.SmallerIsBetter,
}, float64(distance))
}
// The distance threshold was decided by analyzing the data reported above
// across many devices. It should ideally be smaller, but for now, it seems we
// have color space handling issues. Nonetheless, this threshold should be
// enough for detecting major video rendering issues. Note that:
//
// 1) We still report the distances as perf values so we can continue to
// analyze and improve.
// 2) We don't bother to report a total distance if this threshold is exceeded
// because it would just make email alerts very noisy.
if maxDistance > 100 {
p.Save(s.OutDir())
return errors.Errorf("the color distance for %v = %d exceeds the threshold (100)", maxDistancePoint, maxDistance)
}
// Measurement 2:
// We report an aggregate distance for the image: we go through all the pixels
// in the screenshot video to add up all the distances and then normalize by
// the number of pixels at the end.
totalDistance := 0.0
for row := top; row <= bottom; row++ {
for col := left; col <= right; col++ {
// First convert the coordinates from screenshot space to video space.
videoX := (col - left) * (videoW - 1) / (right - left)
videoY := (row - top) * (videoH - 1) / (bottom - top)
expectedColor := refImg.At(videoX, videoY)
actualColor := img.At(col, row)
totalDistance += float64(ColorDistance(expectedColor, actualColor))
}
}
totalDistance /= float64((right - left + 1) * (bottom - top + 1))
s.Log("The total distance for the entire image is ", totalDistance)
p.Set(perf.Metric{
Name: "total_distance",
Unit: "None",
Direction: perf.SmallerIsBetter,
}, totalDistance)
p.Save(s.OutDir())
return nil
} | "image/png"
"math"
"net/http"
"net/http/httptest"
"os" | random_line_split |
redisquota.go | // Copyright 2018 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package redisquota provides a quota implementation with redis as backend.
// The prerequisite is to have a redis server running.
//
//
// nolint: lll
//go:generate $GOPATH/src/istio.io/istio/bin/mixer_codegen.sh -a mixer/adapter/redisquota/config/config.proto -x "-n redisquota -t quota"
package redisquota // import "istio.io/istio/mixer/adapter/redisquota"
import (
"context"
"fmt"
"hash/fnv"
"io"
"sort"
"strconv"
"time"
"github.com/go-redis/redis"
"istio.io/istio/mixer/adapter/redisquota/config"
"istio.io/istio/mixer/pkg/adapter"
"istio.io/istio/mixer/pkg/status"
"istio.io/istio/mixer/template/quota"
)
var (
// LUA rate-limiting algorithm scripts
rateLimitingLUAScripts = map[config.Params_QuotaAlgorithm]string{
config.FIXED_WINDOW: luaFixedWindow,
config.ROLLING_WINDOW: luaRollingWindow,
}
)
type (
builder struct {
quotaTypes map[string]*quota.Type
adapterConfig *config.Params
}
handler struct {
// go-redis client
// connection pool with redis
client *redis.Client
// the limits we know about
limits map[string]*config.Params_Quota
// dimension hash map
dimensionHash map[*map[string]string]string
// list of algorithm LUA scripts
scripts map[config.Params_QuotaAlgorithm]*redis.Script
// indirection to support fast deterministic tests
getTime func() time.Time
// logger provided by the framework
logger adapter.Logger
}
)
// ensure our types implement the requisite interfaces
var _ quota.HandlerBuilder = &builder{}
var _ quota.Handler = &handler{}
///////////////// Configuration Methods ///////////////
func (b *builder) SetQuotaTypes(quotaTypes map[string]*quota.Type) {
b.quotaTypes = quotaTypes
}
func (b *builder) SetAdapterConfig(cfg adapter.Config) {
b.adapterConfig = cfg.(*config.Params)
}
func (b *builder) Validate() (ce *adapter.ConfigErrors) {
info := GetInfo()
if len(b.adapterConfig.Quotas) == 0 {
ce = ce.Appendf("quotas", "quota should not be empty")
}
limits := make(map[string]*config.Params_Quota, len(b.adapterConfig.Quotas))
for idx := range b.adapterConfig.Quotas {
quotas := &b.adapterConfig.Quotas[idx]
if len(quotas.Name) == 0 {
ce = ce.Appendf("name", "quotas.name should not be empty")
continue
}
limits[quotas.Name] = quotas
if quotas.ValidDuration == 0 {
ce = ce.Appendf("valid_duration", "quotas.valid_duration should be bigger must be > 0")
continue
}
if quotas.RateLimitAlgorithm == config.ROLLING_WINDOW {
if quotas.BucketDuration == 0 |
if quotas.ValidDuration > 0 && quotas.BucketDuration > 0 &&
quotas.ValidDuration <= quotas.BucketDuration {
ce = ce.Appendf("valid_duration", "quotas.valid_duration: %v should be longer than quotas.bucket_duration: %v for ROLLING_WINDOW algorithm",
quotas.ValidDuration, quotas.BucketDuration)
continue
}
}
for index := range quotas.Overrides {
if quotas.Overrides[index].MaxAmount <= 0 {
ce = ce.Appendf("max_amount", "quotas.overrides.max_amount must be > 0")
continue
}
if len(quotas.Overrides[index].Dimensions) == 0 {
ce = ce.Appendf("dimensions", "quotas.overrides.dimensions is empty")
continue
}
}
}
for k := range b.quotaTypes {
if _, ok := limits[k]; !ok {
ce = ce.Appendf("quotas", "did not find limit defined for quota %v", k)
}
}
// check redis related configuration
if b.adapterConfig.ConnectionPoolSize < 0 {
ce = ce.Appendf("connection_pool_size", "connection_pool_size of %v is invalid, must be > 0",
b.adapterConfig.ConnectionPoolSize)
}
if len(b.adapterConfig.RedisServerUrl) == 0 {
ce = ce.Appendf("redis_server_url", "redis_server_url should not be empty")
}
// test redis connection
option := redis.Options{
Addr: b.adapterConfig.RedisServerUrl,
}
if b.adapterConfig.ConnectionPoolSize > 0 {
option.PoolSize = int(b.adapterConfig.ConnectionPoolSize)
}
client := redis.NewClient(&option)
if _, err := client.Ping().Result(); err != nil {
ce = ce.Appendf(info.Name, "could not create a connection to redis server: %v", err)
return
}
// check scripts loading to redis
scripts := make(map[config.Params_QuotaAlgorithm]*redis.Script, 2)
for algorithm, script := range rateLimitingLUAScripts {
scripts[algorithm] = redis.NewScript(script)
if _, err := scripts[algorithm].Load(client).Result(); err != nil {
ce = ce.Appendf(info.Name, "unable to initialized redis service: %v", err)
return
}
}
_ = client.Close()
return
}
// getOverrideHash returns hash key of the given dimension in sorted by key
func getDimensionHash(dimensions map[string]string) string {
var keys []string
for k := range dimensions {
keys = append(keys, k)
}
sort.Strings(keys)
h := fnv.New32a()
for _, key := range keys {
_, _ = io.WriteString(h, key+"\t"+dimensions[key]+"\n")
}
return strconv.Itoa(int(h.Sum32()))
}
func (b *builder) Build(context context.Context, env adapter.Env) (adapter.Handler, error) {
limits := make(map[string]*config.Params_Quota, len(b.adapterConfig.Quotas))
for idx := range b.adapterConfig.Quotas {
limits[b.adapterConfig.Quotas[idx].Name] = &b.adapterConfig.Quotas[idx]
}
// Build memory address of dimensions to hash map
dimensionHash := make(map[*map[string]string]string)
for key := range limits {
for index := range limits[key].Overrides {
dimensionHash[&(limits[key].Overrides[index].Dimensions)] =
getDimensionHash(limits[key].Overrides[index].Dimensions)
}
}
// initialize redis client
option := redis.Options{
Addr: b.adapterConfig.RedisServerUrl,
}
if b.adapterConfig.ConnectionPoolSize > 0 {
option.PoolSize = int(b.adapterConfig.ConnectionPoolSize)
}
client := redis.NewClient(&option)
if _, err := client.Ping().Result(); err != nil {
return nil, fmt.Errorf("could not create a connection to redis server: %v", err)
}
// load scripts into redis
scripts := make(map[config.Params_QuotaAlgorithm]*redis.Script, 2)
for algorithm, script := range rateLimitingLUAScripts {
scripts[algorithm] = redis.NewScript(script)
}
h := &handler{
client: client,
limits: limits,
scripts: scripts,
logger: env.Logger(),
getTime: time.Now,
dimensionHash: dimensionHash,
}
return h, nil
}
////////////////// Runtime Methods //////////////////////////
// matchDimensions matches configured dimensions with dimensions of the instance.
func matchDimensions(cfg *map[string]string, inst *map[string]interface{}) bool {
for k, val := range *cfg {
if rval, ok := (*inst)[k]; ok {
if adapter.StringEquals(rval, val) { // this dimension matches, on to next comparison.
continue
}
}
// rval does not match val.
return false
}
return true
}
func getAllocatedTokenFromResult(result *interface{}) (int64, time.Duration, error) {
if res, ok := (*result).([]interface{}); ok {
if len(res) != 2 {
return 0, 0, fmt.Errorf("invalid response from the redis server: %v", *result)
}
// read token
tokenValue, tokenOk := res[0].(int64)
if !tokenOk {
return 0, 0, fmt.Errorf("invalid response from the redis server: %v", result)
}
// read expiration
expValue, expOk := res[1].(int64)
if !expOk {
return 0, 0, fmt.Errorf("invalid response from the redis server: %v", result)
}
return tokenValue, time.Duration(expValue) * time.Nanosecond, nil
}
return 0, 0, fmt.Errorf("invalid response from the redis server: %v", result)
}
// find override
func (h *handler) getKeyAndQuotaAmount(instance *quota.Instance, quota *config.Params_Quota) (string, int64, error) {
maxAmount := quota.MaxAmount
key := quota.Name
for idx := range quota.Overrides {
if matchDimensions("a.Overrides[idx].Dimensions, &instance.Dimensions) {
h.logger.Debugf("quota override: %v selected for %v", quota.Overrides[idx], *instance)
if hash, ok := h.dimensionHash["a.Overrides[idx].Dimensions]; ok {
// override key and max amount
key = key + "-" + hash
maxAmount = quota.Overrides[idx].MaxAmount
return key, maxAmount, nil
}
// This should not be happen
return "", 0, fmt.Errorf("quota override dimension hash lookup failed: %v in %v",
h.limits[instance.Name].Overrides[idx].Dimensions, h.dimensionHash)
}
}
return key, maxAmount, nil
}
func (h *handler) HandleQuota(context context.Context, instance *quota.Instance, args adapter.QuotaArgs) (adapter.QuotaResult, error) {
now := h.getTime()
if limit, ok := h.limits[instance.Name]; ok {
if script, ok := h.scripts[limit.RateLimitAlgorithm]; ok {
ret := status.OK
// get overridden quotaAmount and quotaKey
key, maxAmount, err := h.getKeyAndQuotaAmount(instance, limit)
if err != nil {
_ = h.logger.Errorf("%v", err.Error())
return adapter.QuotaResult{}, nil
}
h.logger.Debugf("key: %v maxAmount: %v", key, maxAmount)
// execute lua algorithm script
result, err := script.Run(
h.client,
[]string{
key + ".meta", // KEY[1]
key + ".data", // KEY[2]
},
maxAmount, // ARGV[1] credit
limit.GetValidDuration().Nanoseconds(), // ARGV[2] window length
limit.GetBucketDuration().Nanoseconds(), // ARGV[3] bucket length
args.BestEffort, // ARGV[4] best effort
args.QuotaAmount, // ARGV[5] token
now.UnixNano(), // ARGV[6] timestamp
args.DeduplicationID, // ARGS[8] deduplication id
).Result()
if err != nil {
_ = h.logger.Errorf("failed to run quota script: %v", err)
return adapter.QuotaResult{}, nil
}
allocated, expiration, err := getAllocatedTokenFromResult(&result)
if err != nil {
_ = h.logger.Errorf("%v", err)
return adapter.QuotaResult{}, nil
}
if allocated <= 0 {
ret = status.WithResourceExhausted("redisquota: Resource exhausted")
}
return adapter.QuotaResult{
Status: ret,
Amount: allocated,
ValidDuration: expiration * time.Nanosecond,
}, nil
}
}
return adapter.QuotaResult{}, nil
}
func (h handler) Close() error {
return h.client.Close()
}
////////////////// Bootstrap //////////////////////////
// GetInfo returns the Info associated with this adapter implementation.
func GetInfo() adapter.Info {
return adapter.Info{
Name: "redisquota",
Impl: "istio.io/mixer/adapter/redisquota",
Description: "Redis-based quotas.",
SupportedTemplates: []string{
quota.TemplateName,
},
DefaultConfig: &config.Params{
RedisServerUrl: "localhost:6379",
ConnectionPoolSize: 10,
},
NewBuilder: func() adapter.HandlerBuilder { return &builder{} },
}
}
///////////////////////////////////////////////////////
| {
ce = ce.Appendf("bucket_duration", "quotas.bucket_duration should be > 0 for ROLLING_WINDOW algorithm")
continue
} | conditional_block |
redisquota.go | // Copyright 2018 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package redisquota provides a quota implementation with redis as backend.
// The prerequisite is to have a redis server running.
//
//
// nolint: lll
//go:generate $GOPATH/src/istio.io/istio/bin/mixer_codegen.sh -a mixer/adapter/redisquota/config/config.proto -x "-n redisquota -t quota"
package redisquota // import "istio.io/istio/mixer/adapter/redisquota"
import (
"context"
"fmt"
"hash/fnv"
"io"
"sort"
"strconv"
"time"
"github.com/go-redis/redis"
"istio.io/istio/mixer/adapter/redisquota/config"
"istio.io/istio/mixer/pkg/adapter"
"istio.io/istio/mixer/pkg/status"
"istio.io/istio/mixer/template/quota"
)
var (
// LUA rate-limiting algorithm scripts
rateLimitingLUAScripts = map[config.Params_QuotaAlgorithm]string{
config.FIXED_WINDOW: luaFixedWindow,
config.ROLLING_WINDOW: luaRollingWindow,
}
)
type (
builder struct {
quotaTypes map[string]*quota.Type
adapterConfig *config.Params
}
handler struct {
// go-redis client
// connection pool with redis
client *redis.Client
// the limits we know about
limits map[string]*config.Params_Quota
// dimension hash map
dimensionHash map[*map[string]string]string
// list of algorithm LUA scripts
scripts map[config.Params_QuotaAlgorithm]*redis.Script
// indirection to support fast deterministic tests
getTime func() time.Time
// logger provided by the framework
logger adapter.Logger
}
)
// ensure our types implement the requisite interfaces
var _ quota.HandlerBuilder = &builder{}
var _ quota.Handler = &handler{}
///////////////// Configuration Methods ///////////////
func (b *builder) SetQuotaTypes(quotaTypes map[string]*quota.Type) {
b.quotaTypes = quotaTypes
}
func (b *builder) SetAdapterConfig(cfg adapter.Config) {
b.adapterConfig = cfg.(*config.Params)
}
func (b *builder) Validate() (ce *adapter.ConfigErrors) |
// getOverrideHash returns hash key of the given dimension in sorted by key
func getDimensionHash(dimensions map[string]string) string {
var keys []string
for k := range dimensions {
keys = append(keys, k)
}
sort.Strings(keys)
h := fnv.New32a()
for _, key := range keys {
_, _ = io.WriteString(h, key+"\t"+dimensions[key]+"\n")
}
return strconv.Itoa(int(h.Sum32()))
}
func (b *builder) Build(context context.Context, env adapter.Env) (adapter.Handler, error) {
limits := make(map[string]*config.Params_Quota, len(b.adapterConfig.Quotas))
for idx := range b.adapterConfig.Quotas {
limits[b.adapterConfig.Quotas[idx].Name] = &b.adapterConfig.Quotas[idx]
}
// Build memory address of dimensions to hash map
dimensionHash := make(map[*map[string]string]string)
for key := range limits {
for index := range limits[key].Overrides {
dimensionHash[&(limits[key].Overrides[index].Dimensions)] =
getDimensionHash(limits[key].Overrides[index].Dimensions)
}
}
// initialize redis client
option := redis.Options{
Addr: b.adapterConfig.RedisServerUrl,
}
if b.adapterConfig.ConnectionPoolSize > 0 {
option.PoolSize = int(b.adapterConfig.ConnectionPoolSize)
}
client := redis.NewClient(&option)
if _, err := client.Ping().Result(); err != nil {
return nil, fmt.Errorf("could not create a connection to redis server: %v", err)
}
// load scripts into redis
scripts := make(map[config.Params_QuotaAlgorithm]*redis.Script, 2)
for algorithm, script := range rateLimitingLUAScripts {
scripts[algorithm] = redis.NewScript(script)
}
h := &handler{
client: client,
limits: limits,
scripts: scripts,
logger: env.Logger(),
getTime: time.Now,
dimensionHash: dimensionHash,
}
return h, nil
}
////////////////// Runtime Methods //////////////////////////
// matchDimensions matches configured dimensions with dimensions of the instance.
func matchDimensions(cfg *map[string]string, inst *map[string]interface{}) bool {
for k, val := range *cfg {
if rval, ok := (*inst)[k]; ok {
if adapter.StringEquals(rval, val) { // this dimension matches, on to next comparison.
continue
}
}
// rval does not match val.
return false
}
return true
}
func getAllocatedTokenFromResult(result *interface{}) (int64, time.Duration, error) {
if res, ok := (*result).([]interface{}); ok {
if len(res) != 2 {
return 0, 0, fmt.Errorf("invalid response from the redis server: %v", *result)
}
// read token
tokenValue, tokenOk := res[0].(int64)
if !tokenOk {
return 0, 0, fmt.Errorf("invalid response from the redis server: %v", result)
}
// read expiration
expValue, expOk := res[1].(int64)
if !expOk {
return 0, 0, fmt.Errorf("invalid response from the redis server: %v", result)
}
return tokenValue, time.Duration(expValue) * time.Nanosecond, nil
}
return 0, 0, fmt.Errorf("invalid response from the redis server: %v", result)
}
// find override
func (h *handler) getKeyAndQuotaAmount(instance *quota.Instance, quota *config.Params_Quota) (string, int64, error) {
maxAmount := quota.MaxAmount
key := quota.Name
for idx := range quota.Overrides {
if matchDimensions("a.Overrides[idx].Dimensions, &instance.Dimensions) {
h.logger.Debugf("quota override: %v selected for %v", quota.Overrides[idx], *instance)
if hash, ok := h.dimensionHash["a.Overrides[idx].Dimensions]; ok {
// override key and max amount
key = key + "-" + hash
maxAmount = quota.Overrides[idx].MaxAmount
return key, maxAmount, nil
}
// This should not be happen
return "", 0, fmt.Errorf("quota override dimension hash lookup failed: %v in %v",
h.limits[instance.Name].Overrides[idx].Dimensions, h.dimensionHash)
}
}
return key, maxAmount, nil
}
func (h *handler) HandleQuota(context context.Context, instance *quota.Instance, args adapter.QuotaArgs) (adapter.QuotaResult, error) {
now := h.getTime()
if limit, ok := h.limits[instance.Name]; ok {
if script, ok := h.scripts[limit.RateLimitAlgorithm]; ok {
ret := status.OK
// get overridden quotaAmount and quotaKey
key, maxAmount, err := h.getKeyAndQuotaAmount(instance, limit)
if err != nil {
_ = h.logger.Errorf("%v", err.Error())
return adapter.QuotaResult{}, nil
}
h.logger.Debugf("key: %v maxAmount: %v", key, maxAmount)
// execute lua algorithm script
result, err := script.Run(
h.client,
[]string{
key + ".meta", // KEY[1]
key + ".data", // KEY[2]
},
maxAmount, // ARGV[1] credit
limit.GetValidDuration().Nanoseconds(), // ARGV[2] window length
limit.GetBucketDuration().Nanoseconds(), // ARGV[3] bucket length
args.BestEffort, // ARGV[4] best effort
args.QuotaAmount, // ARGV[5] token
now.UnixNano(), // ARGV[6] timestamp
args.DeduplicationID, // ARGS[8] deduplication id
).Result()
if err != nil {
_ = h.logger.Errorf("failed to run quota script: %v", err)
return adapter.QuotaResult{}, nil
}
allocated, expiration, err := getAllocatedTokenFromResult(&result)
if err != nil {
_ = h.logger.Errorf("%v", err)
return adapter.QuotaResult{}, nil
}
if allocated <= 0 {
ret = status.WithResourceExhausted("redisquota: Resource exhausted")
}
return adapter.QuotaResult{
Status: ret,
Amount: allocated,
ValidDuration: expiration * time.Nanosecond,
}, nil
}
}
return adapter.QuotaResult{}, nil
}
func (h handler) Close() error {
return h.client.Close()
}
////////////////// Bootstrap //////////////////////////
// GetInfo returns the Info associated with this adapter implementation.
func GetInfo() adapter.Info {
return adapter.Info{
Name: "redisquota",
Impl: "istio.io/mixer/adapter/redisquota",
Description: "Redis-based quotas.",
SupportedTemplates: []string{
quota.TemplateName,
},
DefaultConfig: &config.Params{
RedisServerUrl: "localhost:6379",
ConnectionPoolSize: 10,
},
NewBuilder: func() adapter.HandlerBuilder { return &builder{} },
}
}
///////////////////////////////////////////////////////
| {
info := GetInfo()
if len(b.adapterConfig.Quotas) == 0 {
ce = ce.Appendf("quotas", "quota should not be empty")
}
limits := make(map[string]*config.Params_Quota, len(b.adapterConfig.Quotas))
for idx := range b.adapterConfig.Quotas {
quotas := &b.adapterConfig.Quotas[idx]
if len(quotas.Name) == 0 {
ce = ce.Appendf("name", "quotas.name should not be empty")
continue
}
limits[quotas.Name] = quotas
if quotas.ValidDuration == 0 {
ce = ce.Appendf("valid_duration", "quotas.valid_duration should be bigger must be > 0")
continue
}
if quotas.RateLimitAlgorithm == config.ROLLING_WINDOW {
if quotas.BucketDuration == 0 {
ce = ce.Appendf("bucket_duration", "quotas.bucket_duration should be > 0 for ROLLING_WINDOW algorithm")
continue
}
if quotas.ValidDuration > 0 && quotas.BucketDuration > 0 &&
quotas.ValidDuration <= quotas.BucketDuration {
ce = ce.Appendf("valid_duration", "quotas.valid_duration: %v should be longer than quotas.bucket_duration: %v for ROLLING_WINDOW algorithm",
quotas.ValidDuration, quotas.BucketDuration)
continue
}
}
for index := range quotas.Overrides {
if quotas.Overrides[index].MaxAmount <= 0 {
ce = ce.Appendf("max_amount", "quotas.overrides.max_amount must be > 0")
continue
}
if len(quotas.Overrides[index].Dimensions) == 0 {
ce = ce.Appendf("dimensions", "quotas.overrides.dimensions is empty")
continue
}
}
}
for k := range b.quotaTypes {
if _, ok := limits[k]; !ok {
ce = ce.Appendf("quotas", "did not find limit defined for quota %v", k)
}
}
// check redis related configuration
if b.adapterConfig.ConnectionPoolSize < 0 {
ce = ce.Appendf("connection_pool_size", "connection_pool_size of %v is invalid, must be > 0",
b.adapterConfig.ConnectionPoolSize)
}
if len(b.adapterConfig.RedisServerUrl) == 0 {
ce = ce.Appendf("redis_server_url", "redis_server_url should not be empty")
}
// test redis connection
option := redis.Options{
Addr: b.adapterConfig.RedisServerUrl,
}
if b.adapterConfig.ConnectionPoolSize > 0 {
option.PoolSize = int(b.adapterConfig.ConnectionPoolSize)
}
client := redis.NewClient(&option)
if _, err := client.Ping().Result(); err != nil {
ce = ce.Appendf(info.Name, "could not create a connection to redis server: %v", err)
return
}
// check scripts loading to redis
scripts := make(map[config.Params_QuotaAlgorithm]*redis.Script, 2)
for algorithm, script := range rateLimitingLUAScripts {
scripts[algorithm] = redis.NewScript(script)
if _, err := scripts[algorithm].Load(client).Result(); err != nil {
ce = ce.Appendf(info.Name, "unable to initialized redis service: %v", err)
return
}
}
_ = client.Close()
return
} | identifier_body |
redisquota.go | // Copyright 2018 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package redisquota provides a quota implementation with redis as backend.
// The prerequisite is to have a redis server running.
//
//
// nolint: lll
//go:generate $GOPATH/src/istio.io/istio/bin/mixer_codegen.sh -a mixer/adapter/redisquota/config/config.proto -x "-n redisquota -t quota"
package redisquota // import "istio.io/istio/mixer/adapter/redisquota"
import (
"context"
"fmt"
"hash/fnv"
"io"
"sort"
"strconv"
"time"
"github.com/go-redis/redis"
"istio.io/istio/mixer/adapter/redisquota/config"
"istio.io/istio/mixer/pkg/adapter"
"istio.io/istio/mixer/pkg/status"
"istio.io/istio/mixer/template/quota"
)
var (
// LUA rate-limiting algorithm scripts
rateLimitingLUAScripts = map[config.Params_QuotaAlgorithm]string{
config.FIXED_WINDOW: luaFixedWindow,
config.ROLLING_WINDOW: luaRollingWindow,
}
)
type (
builder struct {
quotaTypes map[string]*quota.Type
adapterConfig *config.Params
}
handler struct {
// go-redis client
// connection pool with redis
client *redis.Client
// the limits we know about
limits map[string]*config.Params_Quota
// dimension hash map
dimensionHash map[*map[string]string]string
// list of algorithm LUA scripts
scripts map[config.Params_QuotaAlgorithm]*redis.Script
// indirection to support fast deterministic tests
getTime func() time.Time
// logger provided by the framework
logger adapter.Logger
}
)
// ensure our types implement the requisite interfaces
var _ quota.HandlerBuilder = &builder{}
var _ quota.Handler = &handler{}
///////////////// Configuration Methods ///////////////
func (b *builder) SetQuotaTypes(quotaTypes map[string]*quota.Type) {
b.quotaTypes = quotaTypes
}
func (b *builder) SetAdapterConfig(cfg adapter.Config) {
b.adapterConfig = cfg.(*config.Params)
}
func (b *builder) Validate() (ce *adapter.ConfigErrors) {
info := GetInfo()
if len(b.adapterConfig.Quotas) == 0 {
ce = ce.Appendf("quotas", "quota should not be empty")
}
limits := make(map[string]*config.Params_Quota, len(b.adapterConfig.Quotas))
for idx := range b.adapterConfig.Quotas {
quotas := &b.adapterConfig.Quotas[idx]
if len(quotas.Name) == 0 {
ce = ce.Appendf("name", "quotas.name should not be empty")
continue
}
limits[quotas.Name] = quotas
if quotas.ValidDuration == 0 {
ce = ce.Appendf("valid_duration", "quotas.valid_duration should be bigger must be > 0")
continue
}
if quotas.RateLimitAlgorithm == config.ROLLING_WINDOW {
if quotas.BucketDuration == 0 {
ce = ce.Appendf("bucket_duration", "quotas.bucket_duration should be > 0 for ROLLING_WINDOW algorithm")
continue
}
if quotas.ValidDuration > 0 && quotas.BucketDuration > 0 &&
quotas.ValidDuration <= quotas.BucketDuration {
ce = ce.Appendf("valid_duration", "quotas.valid_duration: %v should be longer than quotas.bucket_duration: %v for ROLLING_WINDOW algorithm",
quotas.ValidDuration, quotas.BucketDuration)
continue
}
}
for index := range quotas.Overrides {
if quotas.Overrides[index].MaxAmount <= 0 {
ce = ce.Appendf("max_amount", "quotas.overrides.max_amount must be > 0")
continue
}
if len(quotas.Overrides[index].Dimensions) == 0 {
ce = ce.Appendf("dimensions", "quotas.overrides.dimensions is empty")
continue
}
}
}
for k := range b.quotaTypes {
if _, ok := limits[k]; !ok {
ce = ce.Appendf("quotas", "did not find limit defined for quota %v", k)
}
}
// check redis related configuration
if b.adapterConfig.ConnectionPoolSize < 0 {
ce = ce.Appendf("connection_pool_size", "connection_pool_size of %v is invalid, must be > 0",
b.adapterConfig.ConnectionPoolSize)
}
if len(b.adapterConfig.RedisServerUrl) == 0 {
ce = ce.Appendf("redis_server_url", "redis_server_url should not be empty")
}
// test redis connection
option := redis.Options{
Addr: b.adapterConfig.RedisServerUrl,
}
if b.adapterConfig.ConnectionPoolSize > 0 {
option.PoolSize = int(b.adapterConfig.ConnectionPoolSize)
}
client := redis.NewClient(&option)
if _, err := client.Ping().Result(); err != nil {
ce = ce.Appendf(info.Name, "could not create a connection to redis server: %v", err)
return
}
// check scripts loading to redis
scripts := make(map[config.Params_QuotaAlgorithm]*redis.Script, 2)
for algorithm, script := range rateLimitingLUAScripts {
scripts[algorithm] = redis.NewScript(script)
if _, err := scripts[algorithm].Load(client).Result(); err != nil {
ce = ce.Appendf(info.Name, "unable to initialized redis service: %v", err)
return
}
}
_ = client.Close()
return
}
// getOverrideHash returns hash key of the given dimension in sorted by key
func getDimensionHash(dimensions map[string]string) string {
var keys []string
for k := range dimensions {
keys = append(keys, k)
}
sort.Strings(keys)
h := fnv.New32a()
for _, key := range keys {
_, _ = io.WriteString(h, key+"\t"+dimensions[key]+"\n")
}
return strconv.Itoa(int(h.Sum32()))
}
func (b *builder) Build(context context.Context, env adapter.Env) (adapter.Handler, error) {
limits := make(map[string]*config.Params_Quota, len(b.adapterConfig.Quotas))
for idx := range b.adapterConfig.Quotas {
limits[b.adapterConfig.Quotas[idx].Name] = &b.adapterConfig.Quotas[idx]
}
// Build memory address of dimensions to hash map
dimensionHash := make(map[*map[string]string]string)
for key := range limits {
for index := range limits[key].Overrides {
dimensionHash[&(limits[key].Overrides[index].Dimensions)] =
getDimensionHash(limits[key].Overrides[index].Dimensions)
}
}
// initialize redis client
option := redis.Options{
Addr: b.adapterConfig.RedisServerUrl,
}
if b.adapterConfig.ConnectionPoolSize > 0 {
option.PoolSize = int(b.adapterConfig.ConnectionPoolSize)
}
client := redis.NewClient(&option)
if _, err := client.Ping().Result(); err != nil {
return nil, fmt.Errorf("could not create a connection to redis server: %v", err)
}
// load scripts into redis
scripts := make(map[config.Params_QuotaAlgorithm]*redis.Script, 2)
for algorithm, script := range rateLimitingLUAScripts {
scripts[algorithm] = redis.NewScript(script)
}
h := &handler{
client: client,
limits: limits,
scripts: scripts,
logger: env.Logger(),
getTime: time.Now,
dimensionHash: dimensionHash,
}
return h, nil
}
////////////////// Runtime Methods //////////////////////////
// matchDimensions matches configured dimensions with dimensions of the instance.
func matchDimensions(cfg *map[string]string, inst *map[string]interface{}) bool {
for k, val := range *cfg {
if rval, ok := (*inst)[k]; ok {
if adapter.StringEquals(rval, val) { // this dimension matches, on to next comparison.
continue
}
}
// rval does not match val.
return false
}
return true
}
func getAllocatedTokenFromResult(result *interface{}) (int64, time.Duration, error) {
if res, ok := (*result).([]interface{}); ok {
if len(res) != 2 {
return 0, 0, fmt.Errorf("invalid response from the redis server: %v", *result)
}
// read token
tokenValue, tokenOk := res[0].(int64)
if !tokenOk {
return 0, 0, fmt.Errorf("invalid response from the redis server: %v", result)
}
// read expiration
expValue, expOk := res[1].(int64)
if !expOk {
return 0, 0, fmt.Errorf("invalid response from the redis server: %v", result)
}
return tokenValue, time.Duration(expValue) * time.Nanosecond, nil
}
return 0, 0, fmt.Errorf("invalid response from the redis server: %v", result)
}
// find override
func (h *handler) getKeyAndQuotaAmount(instance *quota.Instance, quota *config.Params_Quota) (string, int64, error) {
maxAmount := quota.MaxAmount
key := quota.Name
for idx := range quota.Overrides {
if matchDimensions("a.Overrides[idx].Dimensions, &instance.Dimensions) {
h.logger.Debugf("quota override: %v selected for %v", quota.Overrides[idx], *instance)
if hash, ok := h.dimensionHash["a.Overrides[idx].Dimensions]; ok {
// override key and max amount
key = key + "-" + hash
maxAmount = quota.Overrides[idx].MaxAmount
return key, maxAmount, nil
}
// This should not be happen
return "", 0, fmt.Errorf("quota override dimension hash lookup failed: %v in %v",
h.limits[instance.Name].Overrides[idx].Dimensions, h.dimensionHash)
}
}
return key, maxAmount, nil
}
func (h *handler) HandleQuota(context context.Context, instance *quota.Instance, args adapter.QuotaArgs) (adapter.QuotaResult, error) {
now := h.getTime()
if limit, ok := h.limits[instance.Name]; ok {
if script, ok := h.scripts[limit.RateLimitAlgorithm]; ok {
ret := status.OK
// get overridden quotaAmount and quotaKey
key, maxAmount, err := h.getKeyAndQuotaAmount(instance, limit)
if err != nil {
_ = h.logger.Errorf("%v", err.Error())
return adapter.QuotaResult{}, nil
}
h.logger.Debugf("key: %v maxAmount: %v", key, maxAmount)
// execute lua algorithm script | key + ".data", // KEY[2]
},
maxAmount, // ARGV[1] credit
limit.GetValidDuration().Nanoseconds(), // ARGV[2] window length
limit.GetBucketDuration().Nanoseconds(), // ARGV[3] bucket length
args.BestEffort, // ARGV[4] best effort
args.QuotaAmount, // ARGV[5] token
now.UnixNano(), // ARGV[6] timestamp
args.DeduplicationID, // ARGS[8] deduplication id
).Result()
if err != nil {
_ = h.logger.Errorf("failed to run quota script: %v", err)
return adapter.QuotaResult{}, nil
}
allocated, expiration, err := getAllocatedTokenFromResult(&result)
if err != nil {
_ = h.logger.Errorf("%v", err)
return adapter.QuotaResult{}, nil
}
if allocated <= 0 {
ret = status.WithResourceExhausted("redisquota: Resource exhausted")
}
return adapter.QuotaResult{
Status: ret,
Amount: allocated,
ValidDuration: expiration * time.Nanosecond,
}, nil
}
}
return adapter.QuotaResult{}, nil
}
func (h handler) Close() error {
return h.client.Close()
}
////////////////// Bootstrap //////////////////////////
// GetInfo returns the Info associated with this adapter implementation.
func GetInfo() adapter.Info {
return adapter.Info{
Name: "redisquota",
Impl: "istio.io/mixer/adapter/redisquota",
Description: "Redis-based quotas.",
SupportedTemplates: []string{
quota.TemplateName,
},
DefaultConfig: &config.Params{
RedisServerUrl: "localhost:6379",
ConnectionPoolSize: 10,
},
NewBuilder: func() adapter.HandlerBuilder { return &builder{} },
}
}
/////////////////////////////////////////////////////// | result, err := script.Run(
h.client,
[]string{
key + ".meta", // KEY[1] | random_line_split |
redisquota.go | // Copyright 2018 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package redisquota provides a quota implementation with redis as backend.
// The prerequisite is to have a redis server running.
//
//
// nolint: lll
//go:generate $GOPATH/src/istio.io/istio/bin/mixer_codegen.sh -a mixer/adapter/redisquota/config/config.proto -x "-n redisquota -t quota"
package redisquota // import "istio.io/istio/mixer/adapter/redisquota"
import (
"context"
"fmt"
"hash/fnv"
"io"
"sort"
"strconv"
"time"
"github.com/go-redis/redis"
"istio.io/istio/mixer/adapter/redisquota/config"
"istio.io/istio/mixer/pkg/adapter"
"istio.io/istio/mixer/pkg/status"
"istio.io/istio/mixer/template/quota"
)
var (
// LUA rate-limiting algorithm scripts
rateLimitingLUAScripts = map[config.Params_QuotaAlgorithm]string{
config.FIXED_WINDOW: luaFixedWindow,
config.ROLLING_WINDOW: luaRollingWindow,
}
)
type (
builder struct {
quotaTypes map[string]*quota.Type
adapterConfig *config.Params
}
handler struct {
// go-redis client
// connection pool with redis
client *redis.Client
// the limits we know about
limits map[string]*config.Params_Quota
// dimension hash map
dimensionHash map[*map[string]string]string
// list of algorithm LUA scripts
scripts map[config.Params_QuotaAlgorithm]*redis.Script
// indirection to support fast deterministic tests
getTime func() time.Time
// logger provided by the framework
logger adapter.Logger
}
)
// ensure our types implement the requisite interfaces
var _ quota.HandlerBuilder = &builder{}
var _ quota.Handler = &handler{}
///////////////// Configuration Methods ///////////////
func (b *builder) SetQuotaTypes(quotaTypes map[string]*quota.Type) {
b.quotaTypes = quotaTypes
}
func (b *builder) SetAdapterConfig(cfg adapter.Config) {
b.adapterConfig = cfg.(*config.Params)
}
func (b *builder) Validate() (ce *adapter.ConfigErrors) {
info := GetInfo()
if len(b.adapterConfig.Quotas) == 0 {
ce = ce.Appendf("quotas", "quota should not be empty")
}
limits := make(map[string]*config.Params_Quota, len(b.adapterConfig.Quotas))
for idx := range b.adapterConfig.Quotas {
quotas := &b.adapterConfig.Quotas[idx]
if len(quotas.Name) == 0 {
ce = ce.Appendf("name", "quotas.name should not be empty")
continue
}
limits[quotas.Name] = quotas
if quotas.ValidDuration == 0 {
ce = ce.Appendf("valid_duration", "quotas.valid_duration should be bigger must be > 0")
continue
}
if quotas.RateLimitAlgorithm == config.ROLLING_WINDOW {
if quotas.BucketDuration == 0 {
ce = ce.Appendf("bucket_duration", "quotas.bucket_duration should be > 0 for ROLLING_WINDOW algorithm")
continue
}
if quotas.ValidDuration > 0 && quotas.BucketDuration > 0 &&
quotas.ValidDuration <= quotas.BucketDuration {
ce = ce.Appendf("valid_duration", "quotas.valid_duration: %v should be longer than quotas.bucket_duration: %v for ROLLING_WINDOW algorithm",
quotas.ValidDuration, quotas.BucketDuration)
continue
}
}
for index := range quotas.Overrides {
if quotas.Overrides[index].MaxAmount <= 0 {
ce = ce.Appendf("max_amount", "quotas.overrides.max_amount must be > 0")
continue
}
if len(quotas.Overrides[index].Dimensions) == 0 {
ce = ce.Appendf("dimensions", "quotas.overrides.dimensions is empty")
continue
}
}
}
for k := range b.quotaTypes {
if _, ok := limits[k]; !ok {
ce = ce.Appendf("quotas", "did not find limit defined for quota %v", k)
}
}
// check redis related configuration
if b.adapterConfig.ConnectionPoolSize < 0 {
ce = ce.Appendf("connection_pool_size", "connection_pool_size of %v is invalid, must be > 0",
b.adapterConfig.ConnectionPoolSize)
}
if len(b.adapterConfig.RedisServerUrl) == 0 {
ce = ce.Appendf("redis_server_url", "redis_server_url should not be empty")
}
// test redis connection
option := redis.Options{
Addr: b.adapterConfig.RedisServerUrl,
}
if b.adapterConfig.ConnectionPoolSize > 0 {
option.PoolSize = int(b.adapterConfig.ConnectionPoolSize)
}
client := redis.NewClient(&option)
if _, err := client.Ping().Result(); err != nil {
ce = ce.Appendf(info.Name, "could not create a connection to redis server: %v", err)
return
}
// check scripts loading to redis
scripts := make(map[config.Params_QuotaAlgorithm]*redis.Script, 2)
for algorithm, script := range rateLimitingLUAScripts {
scripts[algorithm] = redis.NewScript(script)
if _, err := scripts[algorithm].Load(client).Result(); err != nil {
ce = ce.Appendf(info.Name, "unable to initialized redis service: %v", err)
return
}
}
_ = client.Close()
return
}
// getOverrideHash returns hash key of the given dimension in sorted by key
func | (dimensions map[string]string) string {
var keys []string
for k := range dimensions {
keys = append(keys, k)
}
sort.Strings(keys)
h := fnv.New32a()
for _, key := range keys {
_, _ = io.WriteString(h, key+"\t"+dimensions[key]+"\n")
}
return strconv.Itoa(int(h.Sum32()))
}
func (b *builder) Build(context context.Context, env adapter.Env) (adapter.Handler, error) {
limits := make(map[string]*config.Params_Quota, len(b.adapterConfig.Quotas))
for idx := range b.adapterConfig.Quotas {
limits[b.adapterConfig.Quotas[idx].Name] = &b.adapterConfig.Quotas[idx]
}
// Build memory address of dimensions to hash map
dimensionHash := make(map[*map[string]string]string)
for key := range limits {
for index := range limits[key].Overrides {
dimensionHash[&(limits[key].Overrides[index].Dimensions)] =
getDimensionHash(limits[key].Overrides[index].Dimensions)
}
}
// initialize redis client
option := redis.Options{
Addr: b.adapterConfig.RedisServerUrl,
}
if b.adapterConfig.ConnectionPoolSize > 0 {
option.PoolSize = int(b.adapterConfig.ConnectionPoolSize)
}
client := redis.NewClient(&option)
if _, err := client.Ping().Result(); err != nil {
return nil, fmt.Errorf("could not create a connection to redis server: %v", err)
}
// load scripts into redis
scripts := make(map[config.Params_QuotaAlgorithm]*redis.Script, 2)
for algorithm, script := range rateLimitingLUAScripts {
scripts[algorithm] = redis.NewScript(script)
}
h := &handler{
client: client,
limits: limits,
scripts: scripts,
logger: env.Logger(),
getTime: time.Now,
dimensionHash: dimensionHash,
}
return h, nil
}
////////////////// Runtime Methods //////////////////////////
// matchDimensions matches configured dimensions with dimensions of the instance.
func matchDimensions(cfg *map[string]string, inst *map[string]interface{}) bool {
for k, val := range *cfg {
if rval, ok := (*inst)[k]; ok {
if adapter.StringEquals(rval, val) { // this dimension matches, on to next comparison.
continue
}
}
// rval does not match val.
return false
}
return true
}
func getAllocatedTokenFromResult(result *interface{}) (int64, time.Duration, error) {
if res, ok := (*result).([]interface{}); ok {
if len(res) != 2 {
return 0, 0, fmt.Errorf("invalid response from the redis server: %v", *result)
}
// read token
tokenValue, tokenOk := res[0].(int64)
if !tokenOk {
return 0, 0, fmt.Errorf("invalid response from the redis server: %v", result)
}
// read expiration
expValue, expOk := res[1].(int64)
if !expOk {
return 0, 0, fmt.Errorf("invalid response from the redis server: %v", result)
}
return tokenValue, time.Duration(expValue) * time.Nanosecond, nil
}
return 0, 0, fmt.Errorf("invalid response from the redis server: %v", result)
}
// find override
func (h *handler) getKeyAndQuotaAmount(instance *quota.Instance, quota *config.Params_Quota) (string, int64, error) {
maxAmount := quota.MaxAmount
key := quota.Name
for idx := range quota.Overrides {
if matchDimensions("a.Overrides[idx].Dimensions, &instance.Dimensions) {
h.logger.Debugf("quota override: %v selected for %v", quota.Overrides[idx], *instance)
if hash, ok := h.dimensionHash["a.Overrides[idx].Dimensions]; ok {
// override key and max amount
key = key + "-" + hash
maxAmount = quota.Overrides[idx].MaxAmount
return key, maxAmount, nil
}
// This should not be happen
return "", 0, fmt.Errorf("quota override dimension hash lookup failed: %v in %v",
h.limits[instance.Name].Overrides[idx].Dimensions, h.dimensionHash)
}
}
return key, maxAmount, nil
}
func (h *handler) HandleQuota(context context.Context, instance *quota.Instance, args adapter.QuotaArgs) (adapter.QuotaResult, error) {
now := h.getTime()
if limit, ok := h.limits[instance.Name]; ok {
if script, ok := h.scripts[limit.RateLimitAlgorithm]; ok {
ret := status.OK
// get overridden quotaAmount and quotaKey
key, maxAmount, err := h.getKeyAndQuotaAmount(instance, limit)
if err != nil {
_ = h.logger.Errorf("%v", err.Error())
return adapter.QuotaResult{}, nil
}
h.logger.Debugf("key: %v maxAmount: %v", key, maxAmount)
// execute lua algorithm script
result, err := script.Run(
h.client,
[]string{
key + ".meta", // KEY[1]
key + ".data", // KEY[2]
},
maxAmount, // ARGV[1] credit
limit.GetValidDuration().Nanoseconds(), // ARGV[2] window length
limit.GetBucketDuration().Nanoseconds(), // ARGV[3] bucket length
args.BestEffort, // ARGV[4] best effort
args.QuotaAmount, // ARGV[5] token
now.UnixNano(), // ARGV[6] timestamp
args.DeduplicationID, // ARGS[8] deduplication id
).Result()
if err != nil {
_ = h.logger.Errorf("failed to run quota script: %v", err)
return adapter.QuotaResult{}, nil
}
allocated, expiration, err := getAllocatedTokenFromResult(&result)
if err != nil {
_ = h.logger.Errorf("%v", err)
return adapter.QuotaResult{}, nil
}
if allocated <= 0 {
ret = status.WithResourceExhausted("redisquota: Resource exhausted")
}
return adapter.QuotaResult{
Status: ret,
Amount: allocated,
ValidDuration: expiration * time.Nanosecond,
}, nil
}
}
return adapter.QuotaResult{}, nil
}
func (h handler) Close() error {
return h.client.Close()
}
////////////////// Bootstrap //////////////////////////
// GetInfo returns the Info associated with this adapter implementation.
func GetInfo() adapter.Info {
return adapter.Info{
Name: "redisquota",
Impl: "istio.io/mixer/adapter/redisquota",
Description: "Redis-based quotas.",
SupportedTemplates: []string{
quota.TemplateName,
},
DefaultConfig: &config.Params{
RedisServerUrl: "localhost:6379",
ConnectionPoolSize: 10,
},
NewBuilder: func() adapter.HandlerBuilder { return &builder{} },
}
}
///////////////////////////////////////////////////////
| getDimensionHash | identifier_name |
answer_identification.old.py | from src.question_classifier import *
from nltk.corpus import stopwords
import nltk
import text_analyzer
import re
import string
def num_occurrences_time_regex(tokens):
dates_pattern = r'[[0-9]{1,2}/]*[0-9]{1,2}/[0-9]{2,4}|[0-9]{4}|january|february|march|april|may|june|july|' \
r'august|september|october|november|december|jan|feb|mar|apr|may|jun|jul|aug|sep|' \
r'sept|oct|nov|dec|[0-2]?[0-9]'
time_pattern = r"\s*(\d{1,2}\:\d{2}\s?(?:AM|PM|am|pm)?)|\d{1,2}\s*(?:o'clock)"
span_pattern = r'(?:last|next|this)?\s*(?:week|month|yesterday|today|tomorrow|year)'
begin_pattern = r"first|last|since|ago"
end_pattern = r"start|begin|since|year"
if isinstance(tokens, list):
tokens = " ".join(tokens)
tokens = tokens.lower()
return len(re.findall(dates_pattern, tokens)) + len(re.findall(time_pattern, tokens)) + len(
re.findall(span_pattern, tokens))
def num_occurrences_quant_regex(tokens):
much_pattern = r'\$\s*\d+[,]?\d+[.]?\d*'
much_pattern2 = r'\d+[,]?\d*\s(?:dollars|cents|crowns|pounds|euros|pesos|yen|yuan|usd|eur|gbp|cad|aud)'
much_pattern3 = r'(?:dollar|cent|penny|pennies|euro|peso)[s]?'
if isinstance(tokens, list):
tokens = " ".join(tokens)
tokens = tokens.lower()
return len(re.findall(much_pattern, tokens)) + len(re.findall(much_pattern2, tokens)) + len(
re.findall(much_pattern3, tokens))
def get_parse_tree(sentence_text):
return next(CoreNLPParser().raw_parse(sentence_text))
def get_parse_trees_with_tag(sentence_text, tag):
parse_tree = next(CoreNLPParser().raw_parse(sentence_text))
phrases = []
for subtree in parse_tree.subtrees():
if subtree.label() == tag:
phrases.append(subtree)
return phrases
def get_dep_trees_with_tag(root_node, tag):
tagged = []
for node in root_node.get_nodes:
if node['tag'].lower() == tag.lower():
tagged.append(node)
return tagged
def calculate_overlap(sequence1, sequence2, eliminate_stopwords=True):
overlap = 0
for word in sequence1:
if word in sequence2 and (word not in stopwords.words('english') or eliminate_stopwords):
overlap += 1
return overlap
def overlap_indices(target_words, sentence):
indices = []
for i, word in enumerate(sentence):
if word in target_words and word not in stopwords.words('english'):
indices.append(i)
return indices
def get_top_ner_chunk_of_each_tag(sentence,
accepted_tags=("PERSON", "GPE", "ORGANIZATION")):
named_question_chunks = text_analyzer.squash_with_ne(
nltk.ne_chunk(nltk.pos_tag(
text_analyzer.lemmatize(sentence)),
binary=False
)
)
top_chunks = {}
for tag in accepted_tags:
question_chunks = [
x.split() for x in text_analyzer.get_contiguous_x_phrases(
named_question_chunks, tag
)
]
if question_chunks:
top_question_chunk = max(question_chunks, key=lambda x: len(x))
if len(top_question_chunk) > 0:
top_chunks[tag] = [(tag, top_question_chunk)]
return top_chunks
def to_sentence(tokens, index=0):
if isinstance(tokens, str):
return tokens
elif isinstance(tokens, list):
if isinstance(tokens[index], tuple):
return " ".join([
token[index] for token in tokens
])
else:
return " ".join(tokens)
def remove_punctuation(s):
return ''.join(c for c in s if c not in set(string.punctuation))
# todo: look through all of Carlos' stuff and make sure I'm implementing anything useful that he has
# todo: consider adding a "bad" tag in last-resort-y responses... or just don't return... idk
# todo: re-capitalize text when returning?
def get_answer_phrase(question_sentence, answer_sentence):
"""
Extract the narrowest phrase from the answer sentence containing the full answer to the question sentence
:param question_sentence: an answer sentence
:param answer_sentence: a question sentence
:return: the narrowest phrase containing the full answer
"""
# TODO: UNCOMMENT TRY/CATCH BLOCK!
try:
question_sentence = remove_punctuation(question_sentence)
answer_sentence = remove_punctuation(answer_sentence)
question = formulate_question(question_sentence)
answer = get_sentence(answer_sentence)
# todo!!!!
if question['qword'][0].lower() in ["what", "which"]:
best_phrase = None
for subtree in [
tree.subtrees() for tree in
get_parse_trees_with_tag(answer_sentence, "NP") +
get_parse_trees_with_tag(answer_sentence, "NX")
]:
for tree in subtree:
baseline = text_analyzer.sentence_similarity(question_sentence, " ".join(tree.leaves()))
baseline = text_analyzer.sentence_similarity(question_sentence)
elif question['qword'][0].lower() == "when":
# get prepositional phrases
prep_nodes = [d for d in answer.get_nodes if d['tag'] == "prep"]
if prep_nodes:
# todo: should this be the uppermost node (which'll be [0], always)?
top_prep_string = " ".join([x[0] for x in prep_nodes[0].get_pairs])
if num_occurrences_time_regex(top_prep_string) > 0:
return top_prep_string
# todo: find a way to use my dependency parse here?
prep_phrases = [x.leaves() for x in get_parse_trees_with_tag(answer_sentence, "PP")]
if prep_phrases:
return to_sentence(
max(
prep_phrases, key=lambda x: num_occurrences_time_regex(x)
)
)
else:
# todo: perhaps reconsider which one to return here. sentence length may be the wrong idea.
if prep_phrases:
return to_sentence(max(prep_phrases, key=lambda x: len(x)))
elif question['qword'][0].lower() == "where":
answer_chunks = get_top_ner_chunk_of_each_tag(answer_sentence, {"GPE"})
untagged = [
tagged[0][1] for tagged in [
answer_chunks[tag] for tag in answer_chunks
]
]
# get_dep_trees_with_tag(answer, "prep")
prep_phrases = [tree.leaves() for tree in get_parse_trees_with_tag(answer_sentence, "PP")]
# todo: strip preposition (e.g. "in") out of the answer
if prep_phrases:
return to_sentence(max(
prep_phrases,
key=lambda x: calculate_overlap(x, untagged, False)
))
elif question['qword'][0].lower() in ["who", "whose", "whom"]:
question_chunks = get_top_ner_chunk_of_each_tag(question_sentence)
answer_chunks = get_top_ner_chunk_of_each_tag(answer_sentence)
# todo: try something with checking the question tag with the answer tag
# todo: consider stripping out the part of the answer with question entity in it...?
untagged = [
tagged[0][1] for tagged in [
answer_chunks[tag] for tag in answer_chunks
]
]
# todo: figure out what to do if not untagged
if untagged:
return to_sentence(max(untagged, key=lambda x: len(x)))
elif question['qword'][0].lower() == "why":
# q_verb = question.tuple
# a_verb = answer.tuple
parse_tree = next(CoreNLPParser().raw_parse(answer_sentence))
to_vp_phrases = []
prev_was_to = False
for tree in parse_tree.subtrees():
if tree.label() == "VP":
for subtree in tree.subtrees():
if prev_was_to:
to_vp_phrases.append(subtree)
prev_was_to = False
elif subtree.label() == "TO":
prev_was_to = True
# todo: potentially strip out "to", and might consider including object?
# todo: honestly, might just pick out things after "to"
# if to_vp_phrases:
# return to_sentence(min(
# [tree.leaves() for tree in to_vp_phrases],
# key=lambda x: calculate_overlap(to_vp_phrases, x)
# ))
# todo: finish debugging
# vp_phrases = get_parse_trees_with_tag(answer_sentence, "VP")
# to_phrases = []
# if to_phrases:
# return to_sentence(max(
# to_phrases,
# key=lambda x: len([])
# ))
# todo: soup up this absolute trash
for i, word in enumerate(answer_sentence.split()):
if word in ["to", "so", "because"]:
return to_sentence(answer_sentence.split()[:i])
# todo: try things with conjunctions, potentially? test.
# conj_phrases = [tree.leaves() for tree in get_parse_trees_with_tag(answer_sentence, "PP")]
elif question['qword'][0].lower() == "how":
# TODO: look at "QP" parse tag for this!
if any([
# 'advmod' in [
# pair[1] for pair in [
# node.get_pairs[1] for node in question.get_nodes if node['tag'][0].lower() == 'w'
# ]
# ],
get_parse_trees_with_tag(question_sentence, "WHADJP"),
re.search(r"much|many|tall|long", question_sentence)
]):
qp_phrases = get_parse_trees_with_tag(answer_sentence, "QP")
if qp_phrases:
return to_sentence(min(
[tree.leaves() for tree in qp_phrases],
key=lambda x: num_occurrences_quant_regex(x)
))
# todo: non-measure cases! (mostly thinking about "how did/does")
except:
pass
def test_who1():
question_sentence = "Who is the principal of South Queens Junior High School?"
answer_sentence = "Principal Betty Jean Aucoin says the club is a first for a Nova Scotia public school."
test = get_answer_phrase(question_sentence, answer_sentence)
print(test)
def test_who2():
question_sentence = "Who said \"the effects were top notch\" when he was talking about \"The Phantom Menace\"?"
answer_sentence = "Mark Churchill and Ken Green were at the St. John's screening."
test = get_answer_phrase(question_sentence, answer_sentence)
print(test)
def test_where():
question_sentence = "Where is South Queens Junior High School located?"
answer_sentence = "A middle school in Liverpool, Nova Scotia is pumping up bodies as well as minds."
test = get_answer_phrase(question_sentence, answer_sentence)
print(test)
def test_when():
|
def test_why_to():
question_sentence = "Why did someone sleep in a tent on a sidewalk in front of a theater in Montreal?"
answer_sentence = "In Montreal someone actually slept in a tent out on the sidewalk in front of a movie " \
"theatre to make sure he got the first ticket."
test = get_answer_phrase(question_sentence, answer_sentence)
print(test)
def test_why_other():
question_sentence = "Why will diabetics have to be patient, despite Dr. Ji-Won Yoon's discovery?"
answer_sentence = "But, diabetics will have to be patient -- a cure for humans is between five and 10 years away."
test = get_answer_phrase(question_sentence, answer_sentence)
print(test)
def test_how_does():
question_sentence = "How does Newfoundland intend to use a film of seals feasting on cod?"
answer_sentence = "The Newfoundland government has a new weapon in its fight to increase the seal hunt: film of cod carnage."
test = get_answer_phrase(question_sentence, answer_sentence)
print(test)
def test_how_much():
question_sentence = "How much was sealing worth to the Newfoundland economy in 1996?"
answer_sentence = "In 1996 alone it was worth in excess of $11 million, with seal products being sold in Canada, Norway and Asia."
test = get_answer_phrase(question_sentence, answer_sentence)
print(test)
def test_what1():
question_sentence = "What has South Queens Junior High School done with its old metal shop?"
answer_sentence = "The school has turned its one-time metal shop - lost to budget cuts almost two years ago - into a money-making professional fitness club."
test = get_answer_phrase(question_sentence, answer_sentence)
print(test)
def template():
question_sentence = 0
answer_sentence = 0
test = get_answer_phrase(question_sentence, answer_sentence)
print(test)
if __name__ == "__main__":
# test_who1()
# test_who2()
# test_where()
# test_when()
# test_why_to()
# test_why_other()
# test_how_does()
# test_how_much()
test_what1()
| question_sentence = "When did Babe play for \"the finest basketball team that ever stepped out on a floor\"?"
answer_sentence = "Babe Belanger played with the Grads from 1929 to 1937."
test = get_answer_phrase(question_sentence, answer_sentence)
print(test) | identifier_body |
answer_identification.old.py | from src.question_classifier import *
from nltk.corpus import stopwords
import nltk
import text_analyzer
import re
import string
def num_occurrences_time_regex(tokens):
dates_pattern = r'[[0-9]{1,2}/]*[0-9]{1,2}/[0-9]{2,4}|[0-9]{4}|january|february|march|april|may|june|july|' \
r'august|september|october|november|december|jan|feb|mar|apr|may|jun|jul|aug|sep|' \
r'sept|oct|nov|dec|[0-2]?[0-9]'
time_pattern = r"\s*(\d{1,2}\:\d{2}\s?(?:AM|PM|am|pm)?)|\d{1,2}\s*(?:o'clock)"
span_pattern = r'(?:last|next|this)?\s*(?:week|month|yesterday|today|tomorrow|year)'
begin_pattern = r"first|last|since|ago"
end_pattern = r"start|begin|since|year"
if isinstance(tokens, list):
tokens = " ".join(tokens)
tokens = tokens.lower()
return len(re.findall(dates_pattern, tokens)) + len(re.findall(time_pattern, tokens)) + len(
re.findall(span_pattern, tokens))
def num_occurrences_quant_regex(tokens):
much_pattern = r'\$\s*\d+[,]?\d+[.]?\d*'
much_pattern2 = r'\d+[,]?\d*\s(?:dollars|cents|crowns|pounds|euros|pesos|yen|yuan|usd|eur|gbp|cad|aud)'
much_pattern3 = r'(?:dollar|cent|penny|pennies|euro|peso)[s]?'
if isinstance(tokens, list):
tokens = " ".join(tokens)
tokens = tokens.lower()
return len(re.findall(much_pattern, tokens)) + len(re.findall(much_pattern2, tokens)) + len(
re.findall(much_pattern3, tokens))
def get_parse_tree(sentence_text):
return next(CoreNLPParser().raw_parse(sentence_text))
def get_parse_trees_with_tag(sentence_text, tag):
parse_tree = next(CoreNLPParser().raw_parse(sentence_text))
phrases = []
for subtree in parse_tree.subtrees():
if subtree.label() == tag:
phrases.append(subtree)
return phrases
def get_dep_trees_with_tag(root_node, tag):
tagged = []
for node in root_node.get_nodes:
if node['tag'].lower() == tag.lower():
tagged.append(node)
return tagged
def calculate_overlap(sequence1, sequence2, eliminate_stopwords=True):
overlap = 0
for word in sequence1:
if word in sequence2 and (word not in stopwords.words('english') or eliminate_stopwords):
overlap += 1
return overlap
def overlap_indices(target_words, sentence):
indices = []
for i, word in enumerate(sentence):
if word in target_words and word not in stopwords.words('english'):
indices.append(i)
return indices
def get_top_ner_chunk_of_each_tag(sentence,
accepted_tags=("PERSON", "GPE", "ORGANIZATION")):
named_question_chunks = text_analyzer.squash_with_ne(
nltk.ne_chunk(nltk.pos_tag(
text_analyzer.lemmatize(sentence)),
binary=False
)
)
top_chunks = {}
for tag in accepted_tags:
question_chunks = [
x.split() for x in text_analyzer.get_contiguous_x_phrases(
named_question_chunks, tag
)
]
if question_chunks:
top_question_chunk = max(question_chunks, key=lambda x: len(x))
if len(top_question_chunk) > 0:
top_chunks[tag] = [(tag, top_question_chunk)]
return top_chunks
def to_sentence(tokens, index=0):
if isinstance(tokens, str):
return tokens
elif isinstance(tokens, list):
if isinstance(tokens[index], tuple):
return " ".join([
token[index] for token in tokens
])
else:
return " ".join(tokens)
def remove_punctuation(s):
return ''.join(c for c in s if c not in set(string.punctuation))
# todo: look through all of Carlos' stuff and make sure I'm implementing anything useful that he has
# todo: consider adding a "bad" tag in last-resort-y responses... or just don't return... idk
# todo: re-capitalize text when returning?
def get_answer_phrase(question_sentence, answer_sentence):
"""
Extract the narrowest phrase from the answer sentence containing the full answer to the question sentence
:param question_sentence: an answer sentence
:param answer_sentence: a question sentence
:return: the narrowest phrase containing the full answer
"""
# TODO: UNCOMMENT TRY/CATCH BLOCK!
try:
question_sentence = remove_punctuation(question_sentence)
answer_sentence = remove_punctuation(answer_sentence)
question = formulate_question(question_sentence)
answer = get_sentence(answer_sentence)
# todo!!!!
if question['qword'][0].lower() in ["what", "which"]:
best_phrase = None
for subtree in [
tree.subtrees() for tree in
get_parse_trees_with_tag(answer_sentence, "NP") +
get_parse_trees_with_tag(answer_sentence, "NX")
]:
for tree in subtree:
baseline = text_analyzer.sentence_similarity(question_sentence, " ".join(tree.leaves()))
baseline = text_analyzer.sentence_similarity(question_sentence)
elif question['qword'][0].lower() == "when":
# get prepositional phrases
prep_nodes = [d for d in answer.get_nodes if d['tag'] == "prep"]
if prep_nodes:
# todo: should this be the uppermost node (which'll be [0], always)?
top_prep_string = " ".join([x[0] for x in prep_nodes[0].get_pairs])
if num_occurrences_time_regex(top_prep_string) > 0:
return top_prep_string
# todo: find a way to use my dependency parse here?
prep_phrases = [x.leaves() for x in get_parse_trees_with_tag(answer_sentence, "PP")]
if prep_phrases:
return to_sentence(
max(
prep_phrases, key=lambda x: num_occurrences_time_regex(x)
)
)
else:
# todo: perhaps reconsider which one to return here. sentence length may be the wrong idea.
if prep_phrases:
return to_sentence(max(prep_phrases, key=lambda x: len(x)))
elif question['qword'][0].lower() == "where":
answer_chunks = get_top_ner_chunk_of_each_tag(answer_sentence, {"GPE"})
untagged = [
tagged[0][1] for tagged in [
answer_chunks[tag] for tag in answer_chunks
]
]
# get_dep_trees_with_tag(answer, "prep")
prep_phrases = [tree.leaves() for tree in get_parse_trees_with_tag(answer_sentence, "PP")]
# todo: strip preposition (e.g. "in") out of the answer
if prep_phrases:
return to_sentence(max(
prep_phrases,
key=lambda x: calculate_overlap(x, untagged, False)
))
elif question['qword'][0].lower() in ["who", "whose", "whom"]:
|
elif question['qword'][0].lower() == "why":
# q_verb = question.tuple
# a_verb = answer.tuple
parse_tree = next(CoreNLPParser().raw_parse(answer_sentence))
to_vp_phrases = []
prev_was_to = False
for tree in parse_tree.subtrees():
if tree.label() == "VP":
for subtree in tree.subtrees():
if prev_was_to:
to_vp_phrases.append(subtree)
prev_was_to = False
elif subtree.label() == "TO":
prev_was_to = True
# todo: potentially strip out "to", and might consider including object?
# todo: honestly, might just pick out things after "to"
# if to_vp_phrases:
# return to_sentence(min(
# [tree.leaves() for tree in to_vp_phrases],
# key=lambda x: calculate_overlap(to_vp_phrases, x)
# ))
# todo: finish debugging
# vp_phrases = get_parse_trees_with_tag(answer_sentence, "VP")
# to_phrases = []
# if to_phrases:
# return to_sentence(max(
# to_phrases,
# key=lambda x: len([])
# ))
# todo: soup up this absolute trash
for i, word in enumerate(answer_sentence.split()):
if word in ["to", "so", "because"]:
return to_sentence(answer_sentence.split()[:i])
# todo: try things with conjunctions, potentially? test.
# conj_phrases = [tree.leaves() for tree in get_parse_trees_with_tag(answer_sentence, "PP")]
elif question['qword'][0].lower() == "how":
# TODO: look at "QP" parse tag for this!
if any([
# 'advmod' in [
# pair[1] for pair in [
# node.get_pairs[1] for node in question.get_nodes if node['tag'][0].lower() == 'w'
# ]
# ],
get_parse_trees_with_tag(question_sentence, "WHADJP"),
re.search(r"much|many|tall|long", question_sentence)
]):
qp_phrases = get_parse_trees_with_tag(answer_sentence, "QP")
if qp_phrases:
return to_sentence(min(
[tree.leaves() for tree in qp_phrases],
key=lambda x: num_occurrences_quant_regex(x)
))
# todo: non-measure cases! (mostly thinking about "how did/does")
except:
pass
def test_who1():
question_sentence = "Who is the principal of South Queens Junior High School?"
answer_sentence = "Principal Betty Jean Aucoin says the club is a first for a Nova Scotia public school."
test = get_answer_phrase(question_sentence, answer_sentence)
print(test)
def test_who2():
question_sentence = "Who said \"the effects were top notch\" when he was talking about \"The Phantom Menace\"?"
answer_sentence = "Mark Churchill and Ken Green were at the St. John's screening."
test = get_answer_phrase(question_sentence, answer_sentence)
print(test)
def test_where():
question_sentence = "Where is South Queens Junior High School located?"
answer_sentence = "A middle school in Liverpool, Nova Scotia is pumping up bodies as well as minds."
test = get_answer_phrase(question_sentence, answer_sentence)
print(test)
def test_when():
question_sentence = "When did Babe play for \"the finest basketball team that ever stepped out on a floor\"?"
answer_sentence = "Babe Belanger played with the Grads from 1929 to 1937."
test = get_answer_phrase(question_sentence, answer_sentence)
print(test)
def test_why_to():
question_sentence = "Why did someone sleep in a tent on a sidewalk in front of a theater in Montreal?"
answer_sentence = "In Montreal someone actually slept in a tent out on the sidewalk in front of a movie " \
"theatre to make sure he got the first ticket."
test = get_answer_phrase(question_sentence, answer_sentence)
print(test)
def test_why_other():
question_sentence = "Why will diabetics have to be patient, despite Dr. Ji-Won Yoon's discovery?"
answer_sentence = "But, diabetics will have to be patient -- a cure for humans is between five and 10 years away."
test = get_answer_phrase(question_sentence, answer_sentence)
print(test)
def test_how_does():
question_sentence = "How does Newfoundland intend to use a film of seals feasting on cod?"
answer_sentence = "The Newfoundland government has a new weapon in its fight to increase the seal hunt: film of cod carnage."
test = get_answer_phrase(question_sentence, answer_sentence)
print(test)
def test_how_much():
question_sentence = "How much was sealing worth to the Newfoundland economy in 1996?"
answer_sentence = "In 1996 alone it was worth in excess of $11 million, with seal products being sold in Canada, Norway and Asia."
test = get_answer_phrase(question_sentence, answer_sentence)
print(test)
def test_what1():
question_sentence = "What has South Queens Junior High School done with its old metal shop?"
answer_sentence = "The school has turned its one-time metal shop - lost to budget cuts almost two years ago - into a money-making professional fitness club."
test = get_answer_phrase(question_sentence, answer_sentence)
print(test)
def template():
question_sentence = 0
answer_sentence = 0
test = get_answer_phrase(question_sentence, answer_sentence)
print(test)
if __name__ == "__main__":
# test_who1()
# test_who2()
# test_where()
# test_when()
# test_why_to()
# test_why_other()
# test_how_does()
# test_how_much()
test_what1()
| question_chunks = get_top_ner_chunk_of_each_tag(question_sentence)
answer_chunks = get_top_ner_chunk_of_each_tag(answer_sentence)
# todo: try something with checking the question tag with the answer tag
# todo: consider stripping out the part of the answer with question entity in it...?
untagged = [
tagged[0][1] for tagged in [
answer_chunks[tag] for tag in answer_chunks
]
]
# todo: figure out what to do if not untagged
if untagged:
return to_sentence(max(untagged, key=lambda x: len(x))) | conditional_block |
answer_identification.old.py | from src.question_classifier import *
from nltk.corpus import stopwords
import nltk
import text_analyzer
import re
import string
def num_occurrences_time_regex(tokens):
dates_pattern = r'[[0-9]{1,2}/]*[0-9]{1,2}/[0-9]{2,4}|[0-9]{4}|january|february|march|april|may|june|july|' \
r'august|september|october|november|december|jan|feb|mar|apr|may|jun|jul|aug|sep|' \
r'sept|oct|nov|dec|[0-2]?[0-9]'
time_pattern = r"\s*(\d{1,2}\:\d{2}\s?(?:AM|PM|am|pm)?)|\d{1,2}\s*(?:o'clock)"
span_pattern = r'(?:last|next|this)?\s*(?:week|month|yesterday|today|tomorrow|year)'
begin_pattern = r"first|last|since|ago"
end_pattern = r"start|begin|since|year"
if isinstance(tokens, list):
tokens = " ".join(tokens)
tokens = tokens.lower()
return len(re.findall(dates_pattern, tokens)) + len(re.findall(time_pattern, tokens)) + len(
re.findall(span_pattern, tokens))
def num_occurrences_quant_regex(tokens):
much_pattern = r'\$\s*\d+[,]?\d+[.]?\d*'
much_pattern2 = r'\d+[,]?\d*\s(?:dollars|cents|crowns|pounds|euros|pesos|yen|yuan|usd|eur|gbp|cad|aud)'
much_pattern3 = r'(?:dollar|cent|penny|pennies|euro|peso)[s]?'
if isinstance(tokens, list):
tokens = " ".join(tokens)
tokens = tokens.lower()
return len(re.findall(much_pattern, tokens)) + len(re.findall(much_pattern2, tokens)) + len(
re.findall(much_pattern3, tokens))
def get_parse_tree(sentence_text):
return next(CoreNLPParser().raw_parse(sentence_text))
def get_parse_trees_with_tag(sentence_text, tag):
parse_tree = next(CoreNLPParser().raw_parse(sentence_text))
phrases = []
for subtree in parse_tree.subtrees():
if subtree.label() == tag:
phrases.append(subtree)
return phrases
def get_dep_trees_with_tag(root_node, tag):
tagged = []
for node in root_node.get_nodes:
if node['tag'].lower() == tag.lower():
tagged.append(node)
return tagged
def calculate_overlap(sequence1, sequence2, eliminate_stopwords=True):
overlap = 0
for word in sequence1:
if word in sequence2 and (word not in stopwords.words('english') or eliminate_stopwords):
overlap += 1
return overlap
def overlap_indices(target_words, sentence):
indices = []
for i, word in enumerate(sentence):
if word in target_words and word not in stopwords.words('english'):
indices.append(i)
return indices
def get_top_ner_chunk_of_each_tag(sentence,
accepted_tags=("PERSON", "GPE", "ORGANIZATION")):
named_question_chunks = text_analyzer.squash_with_ne(
nltk.ne_chunk(nltk.pos_tag(
text_analyzer.lemmatize(sentence)),
binary=False
)
)
top_chunks = {}
for tag in accepted_tags:
question_chunks = [
x.split() for x in text_analyzer.get_contiguous_x_phrases(
named_question_chunks, tag
)
]
if question_chunks:
top_question_chunk = max(question_chunks, key=lambda x: len(x))
if len(top_question_chunk) > 0:
top_chunks[tag] = [(tag, top_question_chunk)]
return top_chunks
def to_sentence(tokens, index=0):
if isinstance(tokens, str):
return tokens
elif isinstance(tokens, list):
if isinstance(tokens[index], tuple):
return " ".join([
token[index] for token in tokens
])
else:
return " ".join(tokens)
def remove_punctuation(s):
return ''.join(c for c in s if c not in set(string.punctuation))
# todo: look through all of Carlos' stuff and make sure I'm implementing anything useful that he has
# todo: consider adding a "bad" tag in last-resort-y responses... or just don't return... idk
# todo: re-capitalize text when returning?
def get_answer_phrase(question_sentence, answer_sentence):
"""
Extract the narrowest phrase from the answer sentence containing the full answer to the question sentence
:param question_sentence: an answer sentence
:param answer_sentence: a question sentence
:return: the narrowest phrase containing the full answer
"""
# TODO: UNCOMMENT TRY/CATCH BLOCK!
try:
question_sentence = remove_punctuation(question_sentence)
answer_sentence = remove_punctuation(answer_sentence)
question = formulate_question(question_sentence)
answer = get_sentence(answer_sentence)
# todo!!!!
if question['qword'][0].lower() in ["what", "which"]:
best_phrase = None
for subtree in [
tree.subtrees() for tree in
get_parse_trees_with_tag(answer_sentence, "NP") +
get_parse_trees_with_tag(answer_sentence, "NX")
]:
for tree in subtree:
baseline = text_analyzer.sentence_similarity(question_sentence, " ".join(tree.leaves()))
baseline = text_analyzer.sentence_similarity(question_sentence)
elif question['qword'][0].lower() == "when":
# get prepositional phrases
prep_nodes = [d for d in answer.get_nodes if d['tag'] == "prep"]
if prep_nodes:
# todo: should this be the uppermost node (which'll be [0], always)?
top_prep_string = " ".join([x[0] for x in prep_nodes[0].get_pairs])
if num_occurrences_time_regex(top_prep_string) > 0:
return top_prep_string
# todo: find a way to use my dependency parse here?
prep_phrases = [x.leaves() for x in get_parse_trees_with_tag(answer_sentence, "PP")]
if prep_phrases:
return to_sentence(
max(
prep_phrases, key=lambda x: num_occurrences_time_regex(x)
)
)
else:
# todo: perhaps reconsider which one to return here. sentence length may be the wrong idea.
if prep_phrases:
return to_sentence(max(prep_phrases, key=lambda x: len(x)))
elif question['qword'][0].lower() == "where":
answer_chunks = get_top_ner_chunk_of_each_tag(answer_sentence, {"GPE"})
untagged = [
tagged[0][1] for tagged in [
answer_chunks[tag] for tag in answer_chunks
]
]
# get_dep_trees_with_tag(answer, "prep")
prep_phrases = [tree.leaves() for tree in get_parse_trees_with_tag(answer_sentence, "PP")]
# todo: strip preposition (e.g. "in") out of the answer
if prep_phrases:
return to_sentence(max(
prep_phrases,
key=lambda x: calculate_overlap(x, untagged, False)
))
elif question['qword'][0].lower() in ["who", "whose", "whom"]:
question_chunks = get_top_ner_chunk_of_each_tag(question_sentence)
answer_chunks = get_top_ner_chunk_of_each_tag(answer_sentence)
# todo: try something with checking the question tag with the answer tag
# todo: consider stripping out the part of the answer with question entity in it...?
untagged = [
tagged[0][1] for tagged in [
answer_chunks[tag] for tag in answer_chunks
]
]
# todo: figure out what to do if not untagged
if untagged:
return to_sentence(max(untagged, key=lambda x: len(x)))
elif question['qword'][0].lower() == "why":
# q_verb = question.tuple
# a_verb = answer.tuple
parse_tree = next(CoreNLPParser().raw_parse(answer_sentence))
to_vp_phrases = []
prev_was_to = False
for tree in parse_tree.subtrees():
if tree.label() == "VP":
for subtree in tree.subtrees():
if prev_was_to:
to_vp_phrases.append(subtree)
prev_was_to = False
elif subtree.label() == "TO":
prev_was_to = True
# todo: potentially strip out "to", and might consider including object?
# todo: honestly, might just pick out things after "to"
# if to_vp_phrases:
# return to_sentence(min(
# [tree.leaves() for tree in to_vp_phrases],
# key=lambda x: calculate_overlap(to_vp_phrases, x)
# ))
# todo: finish debugging
# vp_phrases = get_parse_trees_with_tag(answer_sentence, "VP")
# to_phrases = []
# if to_phrases:
# return to_sentence(max(
# to_phrases,
# key=lambda x: len([])
# ))
# todo: soup up this absolute trash
for i, word in enumerate(answer_sentence.split()):
if word in ["to", "so", "because"]:
return to_sentence(answer_sentence.split()[:i])
# todo: try things with conjunctions, potentially? test.
# conj_phrases = [tree.leaves() for tree in get_parse_trees_with_tag(answer_sentence, "PP")]
elif question['qword'][0].lower() == "how":
# TODO: look at "QP" parse tag for this!
if any([
# 'advmod' in [
# pair[1] for pair in [
# node.get_pairs[1] for node in question.get_nodes if node['tag'][0].lower() == 'w'
# ]
# ],
get_parse_trees_with_tag(question_sentence, "WHADJP"),
re.search(r"much|many|tall|long", question_sentence)
]):
qp_phrases = get_parse_trees_with_tag(answer_sentence, "QP")
if qp_phrases:
return to_sentence(min(
[tree.leaves() for tree in qp_phrases],
key=lambda x: num_occurrences_quant_regex(x)
))
# todo: non-measure cases! (mostly thinking about "how did/does")
except:
pass
def test_who1():
question_sentence = "Who is the principal of South Queens Junior High School?"
answer_sentence = "Principal Betty Jean Aucoin says the club is a first for a Nova Scotia public school."
test = get_answer_phrase(question_sentence, answer_sentence)
print(test)
def test_who2():
question_sentence = "Who said \"the effects were top notch\" when he was talking about \"The Phantom Menace\"?"
answer_sentence = "Mark Churchill and Ken Green were at the St. John's screening."
test = get_answer_phrase(question_sentence, answer_sentence)
print(test)
def test_where():
question_sentence = "Where is South Queens Junior High School located?"
answer_sentence = "A middle school in Liverpool, Nova Scotia is pumping up bodies as well as minds."
test = get_answer_phrase(question_sentence, answer_sentence)
print(test)
def | ():
question_sentence = "When did Babe play for \"the finest basketball team that ever stepped out on a floor\"?"
answer_sentence = "Babe Belanger played with the Grads from 1929 to 1937."
test = get_answer_phrase(question_sentence, answer_sentence)
print(test)
def test_why_to():
question_sentence = "Why did someone sleep in a tent on a sidewalk in front of a theater in Montreal?"
answer_sentence = "In Montreal someone actually slept in a tent out on the sidewalk in front of a movie " \
"theatre to make sure he got the first ticket."
test = get_answer_phrase(question_sentence, answer_sentence)
print(test)
def test_why_other():
question_sentence = "Why will diabetics have to be patient, despite Dr. Ji-Won Yoon's discovery?"
answer_sentence = "But, diabetics will have to be patient -- a cure for humans is between five and 10 years away."
test = get_answer_phrase(question_sentence, answer_sentence)
print(test)
def test_how_does():
question_sentence = "How does Newfoundland intend to use a film of seals feasting on cod?"
answer_sentence = "The Newfoundland government has a new weapon in its fight to increase the seal hunt: film of cod carnage."
test = get_answer_phrase(question_sentence, answer_sentence)
print(test)
def test_how_much():
question_sentence = "How much was sealing worth to the Newfoundland economy in 1996?"
answer_sentence = "In 1996 alone it was worth in excess of $11 million, with seal products being sold in Canada, Norway and Asia."
test = get_answer_phrase(question_sentence, answer_sentence)
print(test)
def test_what1():
question_sentence = "What has South Queens Junior High School done with its old metal shop?"
answer_sentence = "The school has turned its one-time metal shop - lost to budget cuts almost two years ago - into a money-making professional fitness club."
test = get_answer_phrase(question_sentence, answer_sentence)
print(test)
def template():
question_sentence = 0
answer_sentence = 0
test = get_answer_phrase(question_sentence, answer_sentence)
print(test)
if __name__ == "__main__":
# test_who1()
# test_who2()
# test_where()
# test_when()
# test_why_to()
# test_why_other()
# test_how_does()
# test_how_much()
test_what1()
| test_when | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.