file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
instance.rs | use crate::{
create_idx_struct,
data_structures::{cont_idx_vec::ContiguousIdxVec, skipvec::SkipVec},
small_indices::SmallIdx,
};
use anyhow::{anyhow, ensure, Error, Result};
use log::{info, trace};
use serde::Deserialize;
use std::{
fmt::{self, Display, Write as _},
io::{BufRead, Write},
mem,
time::Instant,
};
create_idx_struct!(pub NodeIdx);
create_idx_struct!(pub EdgeIdx);
create_idx_struct!(pub EntryIdx);
#[derive(Debug)]
struct CompressedIlpName<T>(T);
impl<T: SmallIdx> Display for CompressedIlpName<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
const CHARS: &[u8] = b"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
let mut val = self.0.idx();
while val != 0 {
f.write_char(char::from(CHARS[val % CHARS.len()]))?;
val /= CHARS.len();
}
Ok(())
}
}
#[derive(Debug)]
struct ParsedEdgeHandler {
edge_incidences: Vec<SkipVec<(NodeIdx, EntryIdx)>>,
node_degrees: Vec<usize>,
}
impl ParsedEdgeHandler {
fn handle_edge(&mut self, node_indices: impl IntoIterator<Item = Result<usize>>) -> Result<()> {
let incidences = SkipVec::try_sorted_from(node_indices.into_iter().map(|idx_result| {
idx_result.and_then(|node_idx| {
ensure!(
node_idx < self.node_degrees.len(),
"invalid node idx in edge: {}",
node_idx
);
Ok((NodeIdx::from(node_idx), EntryIdx::INVALID))
})
}))?;
ensure!(incidences.len() > 0, "edges may not be empty");
for (_, (node, _)) in &incidences {
self.node_degrees[node.idx()] += 1;
}
self.edge_incidences.push(incidences);
Ok(())
}
}
#[derive(Debug, Deserialize)]
struct JsonInstance {
num_nodes: usize,
edges: Vec<Vec<usize>>,
}
#[derive(Clone, Debug)]
pub struct Instance {
nodes: ContiguousIdxVec<NodeIdx>,
edges: ContiguousIdxVec<EdgeIdx>,
node_incidences: Vec<SkipVec<(EdgeIdx, EntryIdx)>>,
edge_incidences: Vec<SkipVec<(NodeIdx, EntryIdx)>>,
}
impl Instance {
fn load(
num_nodes: usize,
num_edges: usize,
read_edges: impl FnOnce(&mut ParsedEdgeHandler) -> Result<()>,
) -> Result<Self> {
let mut handler = ParsedEdgeHandler {
edge_incidences: Vec::with_capacity(num_edges),
node_degrees: vec![0; num_nodes],
};
read_edges(&mut handler)?;
let ParsedEdgeHandler {
mut edge_incidences,
node_degrees,
} = handler;
let mut node_incidences: Vec<_> = node_degrees
.iter()
.map(|&len| SkipVec::with_len(len))
.collect();
let mut rem_node_degrees = node_degrees;
for (edge, incidences) in edge_incidences.iter_mut().enumerate() {
let edge = EdgeIdx::from(edge);
for (edge_entry_idx, edge_entry) in incidences.iter_mut() {
let node = edge_entry.0.idx();
let node_entry_idx = node_incidences[node].len() - rem_node_degrees[node];
rem_node_degrees[node] -= 1;
edge_entry.1 = EntryIdx::from(node_entry_idx);
node_incidences[node][node_entry_idx] = (edge, EntryIdx::from(edge_entry_idx));
}
}
Ok(Self {
nodes: (0..num_nodes).map(NodeIdx::from).collect(),
edges: (0..num_edges).map(EdgeIdx::from).collect(),
node_incidences,
edge_incidences,
})
}
pub fn load_from_text(mut reader: impl BufRead) -> Result<Self> {
let time_before = Instant::now();
let mut line = String::new();
reader.read_line(&mut line)?;
let mut numbers = line.split_ascii_whitespace().map(str::parse);
let num_nodes = numbers
.next()
.ok_or_else(|| anyhow!("Missing node count"))??;
let num_edges = numbers
.next()
.ok_or_else(|| anyhow!("Missing edge count"))??;
ensure!(
numbers.next().is_none(),
"Too many numbers in first input line"
);
let instance = Self::load(num_nodes, num_edges, |handler| {
for _ in 0..num_edges {
line.clear();
reader.read_line(&mut line)?;
let mut numbers = line
.split_ascii_whitespace()
.map(|s| s.parse::<usize>().map_err(Error::from));
// Skip degree
numbers
.next()
.ok_or_else(|| anyhow!("empty edge line in input, expected degree"))??;
handler.handle_edge(numbers)?;
}
Ok(())
})?;
info!(
"Loaded text instance with {} nodes, {} edges in {:.2?}",
num_nodes,
num_edges,
time_before.elapsed(),
);
Ok(instance)
}
pub fn load_from_json(mut reader: impl BufRead) -> Result<Self> {
let time_before = Instant::now();
// Usually faster for large inputs, see https://github.com/serde-rs/json/issues/160
let mut text = String::new();
reader.read_to_string(&mut text)?;
let JsonInstance { num_nodes, edges } = serde_json::from_str(&text)?;
let num_edges = edges.len();
let instance = Self::load(num_nodes, num_edges, |handler| {
for edge in edges {
handler.handle_edge(edge.into_iter().map(Ok))?;
}
Ok(())
})?;
info!(
"Loaded json instance with {} nodes, {} edges in {:.2?}",
num_nodes,
num_edges,
time_before.elapsed(),
);
Ok(instance)
}
pub fn num_edges(&self) -> usize {
self.edges.len()
}
pub fn num_nodes_total(&self) -> usize {
self.node_incidences.len()
}
pub fn num_edges_total(&self) -> usize {
self.edge_incidences.len()
}
/// Edges incident to a node, sorted by increasing indices.
pub fn node(
&self,
node: NodeIdx,
) -> impl Iterator<Item = EdgeIdx> + ExactSizeIterator + Clone + '_ {
self.node_incidences[node.idx()]
.iter()
.map(|(_, (edge, _))| *edge)
}
/// Nodes incident to an edge, sorted by increasing indices.
pub fn edge(
&self,
edge: EdgeIdx,
) -> impl Iterator<Item = NodeIdx> + ExactSizeIterator + Clone + '_ {
self.edge_incidences[edge.idx()]
.iter()
.map(|(_, (node, _))| *node)
}
/// Alive nodes in the instance, in arbitrary order.
pub fn nodes(&self) -> &[NodeIdx] {
&self.nodes
}
/// Alive edges in the instance, in arbitrary order.
pub fn edges(&self) -> &[EdgeIdx] {
&self.edges
}
pub fn node_degree(&self, node: NodeIdx) -> usize {
self.node_incidences[node.idx()].len()
}
pub fn edge_size(&self, edge: EdgeIdx) -> usize {
self.edge_incidences[edge.idx()].len()
}
/// Deletes a node from the instance.
pub fn delete_node(&mut self, node: NodeIdx) {
trace!("Deleting node {}", node);
for (_idx, (edge, entry_idx)) in &self.node_incidences[node.idx()] {
self.edge_incidences[edge.idx()].delete(entry_idx.idx());
}
self.nodes.delete(node.idx());
}
/// Deletes an edge from the instance.
pub fn delete_edge(&mut self, edge: EdgeIdx) {
trace!("Deleting edge {}", edge);
for (_idx, (node, entry_idx)) in &self.edge_incidences[edge.idx()] {
self.node_incidences[node.idx()].delete(entry_idx.idx());
}
self.edges.delete(edge.idx());
}
/// Restores a previously deleted node.
///
/// All restore operations (node or edge) must be done in reverse order of
/// the corresponding deletions to produce sensible results.
pub fn restore_node(&mut self, node: NodeIdx) {
trace!("Restoring node {}", node);
for (_idx, (edge, entry_idx)) in self.node_incidences[node.idx()].iter().rev() {
self.edge_incidences[edge.idx()].restore(entry_idx.idx());
}
self.nodes.restore(node.idx());
}
/// Restores a previously deleted edge.
///
/// All restore operations (node or edge) must be done in reverse order of
/// the corresponding deletions to produce sensible results.
pub fn restore_edge(&mut self, edge: EdgeIdx) {
trace!("Restoring edge {}", edge);
for (_idx, (node, entry_idx)) in self.edge_incidences[edge.idx()].iter().rev() {
self.node_incidences[node.idx()].restore(entry_idx.idx());
}
self.edges.restore(edge.idx());
}
/// Deletes all edges incident to a node.
///
/// The node itself must have already been deleted.
pub fn delete_incident_edges(&mut self, node: NodeIdx) {
// We want to iterate over the incidence of `node` while deleting
// edges, which in turn changes node incidences. This is safe, since
// `node` itself was already deleted. To make the borrow checker
// accept this, we temporarily move `node` incidence to a local
// variable, replacing it with an empty list. This should not be much
// slower than unsafe alternatives, since an incidence list is only
// 28 bytes large.
trace!("Deleting all edges incident to {}", node);
debug_assert!(
self.nodes.is_deleted(node.idx()),
"Node passed to delete_incident_edges must be deleted"
);
let incidence = mem::take(&mut self.node_incidences[node.idx()]);
for (_, (edge, _)) in &incidence {
self.delete_edge(*edge);
}
self.node_incidences[node.idx()] = incidence;
}
/// Restores all incident edges to a node.
///
/// This reverses the effect of `delete_incident_edges`. As with all other
/// `restore_*` methods, this must be done in reverse order of deletions.
/// In particular, the node itself must still be deleted.
pub fn restore_incident_edges(&mut self, node: NodeIdx) {
trace!("Restoring all edges incident to {}", node);
debug_assert!(
self.nodes.is_deleted(node.idx()),
"Node passed to restore_incident_edges must be deleted"
);
// See `delete_incident_edges` for an explanation of this swapping around
let incidence = mem::take(&mut self.node_incidences[node.idx()]);
// It is important that we restore the edges in reverse order
for (_, (edge, _)) in incidence.iter().rev() {
self.restore_edge(*edge);
}
self.node_incidences[node.idx()] = incidence;
}
pub fn export_as_ilp(&self, mut writer: impl Write) -> Result<()> {
writeln!(writer, "Minimize")?;
write!(writer, " v{}", CompressedIlpName(self.nodes()[0]))?;
for &node in &self.nodes()[1..] {
write!(writer, " + v{}", CompressedIlpName(node))?;
}
writeln!(writer)?;
writeln!(writer, "Subject To")?;
for &edge in self.edges() {
write!(writer, " e{}: ", CompressedIlpName(edge))?;
for (idx, node) in self.edge(edge).enumerate() {
if idx > 0 |
write!(writer, "v{}", CompressedIlpName(node))?;
}
writeln!(writer, " >= 1")?;
}
writeln!(writer, "Binaries")?;
write!(writer, " v{}", CompressedIlpName(self.nodes()[0]))?;
for &node in &self.nodes()[1..] {
write!(writer, " v{}", CompressedIlpName(node))?;
}
writeln!(writer)?;
writeln!(writer, "End")?;
Ok(())
}
}
| {
write!(writer, " + ")?;
} | conditional_block |
instance.rs | use crate::{
create_idx_struct,
data_structures::{cont_idx_vec::ContiguousIdxVec, skipvec::SkipVec},
small_indices::SmallIdx,
};
use anyhow::{anyhow, ensure, Error, Result};
use log::{info, trace};
use serde::Deserialize;
use std::{
fmt::{self, Display, Write as _},
io::{BufRead, Write},
mem,
time::Instant,
};
create_idx_struct!(pub NodeIdx);
create_idx_struct!(pub EdgeIdx);
create_idx_struct!(pub EntryIdx);
#[derive(Debug)]
struct CompressedIlpName<T>(T);
impl<T: SmallIdx> Display for CompressedIlpName<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
const CHARS: &[u8] = b"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
let mut val = self.0.idx();
while val != 0 {
f.write_char(char::from(CHARS[val % CHARS.len()]))?;
val /= CHARS.len();
}
Ok(())
}
}
#[derive(Debug)]
struct ParsedEdgeHandler {
edge_incidences: Vec<SkipVec<(NodeIdx, EntryIdx)>>,
node_degrees: Vec<usize>,
}
impl ParsedEdgeHandler {
fn handle_edge(&mut self, node_indices: impl IntoIterator<Item = Result<usize>>) -> Result<()> {
let incidences = SkipVec::try_sorted_from(node_indices.into_iter().map(|idx_result| {
idx_result.and_then(|node_idx| {
ensure!(
node_idx < self.node_degrees.len(),
"invalid node idx in edge: {}",
node_idx
);
Ok((NodeIdx::from(node_idx), EntryIdx::INVALID))
})
}))?;
ensure!(incidences.len() > 0, "edges may not be empty");
for (_, (node, _)) in &incidences {
self.node_degrees[node.idx()] += 1;
}
self.edge_incidences.push(incidences);
Ok(())
}
}
#[derive(Debug, Deserialize)]
struct JsonInstance {
num_nodes: usize,
edges: Vec<Vec<usize>>,
}
#[derive(Clone, Debug)]
pub struct Instance {
nodes: ContiguousIdxVec<NodeIdx>,
edges: ContiguousIdxVec<EdgeIdx>,
node_incidences: Vec<SkipVec<(EdgeIdx, EntryIdx)>>,
edge_incidences: Vec<SkipVec<(NodeIdx, EntryIdx)>>,
}
impl Instance {
fn load(
num_nodes: usize,
num_edges: usize,
read_edges: impl FnOnce(&mut ParsedEdgeHandler) -> Result<()>,
) -> Result<Self> {
let mut handler = ParsedEdgeHandler {
edge_incidences: Vec::with_capacity(num_edges),
node_degrees: vec![0; num_nodes],
};
read_edges(&mut handler)?;
let ParsedEdgeHandler {
mut edge_incidences,
node_degrees,
} = handler;
let mut node_incidences: Vec<_> = node_degrees
.iter()
.map(|&len| SkipVec::with_len(len))
.collect();
let mut rem_node_degrees = node_degrees;
for (edge, incidences) in edge_incidences.iter_mut().enumerate() {
let edge = EdgeIdx::from(edge);
for (edge_entry_idx, edge_entry) in incidences.iter_mut() {
let node = edge_entry.0.idx();
let node_entry_idx = node_incidences[node].len() - rem_node_degrees[node];
rem_node_degrees[node] -= 1;
edge_entry.1 = EntryIdx::from(node_entry_idx);
node_incidences[node][node_entry_idx] = (edge, EntryIdx::from(edge_entry_idx));
}
}
Ok(Self {
nodes: (0..num_nodes).map(NodeIdx::from).collect(),
edges: (0..num_edges).map(EdgeIdx::from).collect(),
node_incidences,
edge_incidences,
})
}
pub fn load_from_text(mut reader: impl BufRead) -> Result<Self> {
let time_before = Instant::now();
let mut line = String::new();
reader.read_line(&mut line)?;
let mut numbers = line.split_ascii_whitespace().map(str::parse);
let num_nodes = numbers
.next()
.ok_or_else(|| anyhow!("Missing node count"))??;
let num_edges = numbers
.next()
.ok_or_else(|| anyhow!("Missing edge count"))??;
ensure!(
numbers.next().is_none(),
"Too many numbers in first input line"
);
let instance = Self::load(num_nodes, num_edges, |handler| {
for _ in 0..num_edges {
line.clear();
reader.read_line(&mut line)?;
let mut numbers = line
.split_ascii_whitespace()
.map(|s| s.parse::<usize>().map_err(Error::from));
// Skip degree
numbers
.next()
.ok_or_else(|| anyhow!("empty edge line in input, expected degree"))??;
handler.handle_edge(numbers)?;
}
Ok(())
})?;
info!(
"Loaded text instance with {} nodes, {} edges in {:.2?}",
num_nodes,
num_edges,
time_before.elapsed(),
);
Ok(instance)
}
pub fn load_from_json(mut reader: impl BufRead) -> Result<Self> {
let time_before = Instant::now();
// Usually faster for large inputs, see https://github.com/serde-rs/json/issues/160
let mut text = String::new();
reader.read_to_string(&mut text)?;
let JsonInstance { num_nodes, edges } = serde_json::from_str(&text)?;
let num_edges = edges.len();
let instance = Self::load(num_nodes, num_edges, |handler| {
for edge in edges {
handler.handle_edge(edge.into_iter().map(Ok))?;
}
Ok(())
})?;
info!(
"Loaded json instance with {} nodes, {} edges in {:.2?}",
num_nodes,
num_edges,
time_before.elapsed(),
);
Ok(instance)
}
pub fn num_edges(&self) -> usize {
self.edges.len()
}
pub fn num_nodes_total(&self) -> usize {
self.node_incidences.len()
}
pub fn num_edges_total(&self) -> usize {
self.edge_incidences.len()
}
/// Edges incident to a node, sorted by increasing indices.
pub fn node(
&self,
node: NodeIdx,
) -> impl Iterator<Item = EdgeIdx> + ExactSizeIterator + Clone + '_ {
self.node_incidences[node.idx()]
.iter()
.map(|(_, (edge, _))| *edge)
}
/// Nodes incident to an edge, sorted by increasing indices.
pub fn edge(
&self,
edge: EdgeIdx,
) -> impl Iterator<Item = NodeIdx> + ExactSizeIterator + Clone + '_ {
self.edge_incidences[edge.idx()]
.iter()
.map(|(_, (node, _))| *node)
}
/// Alive nodes in the instance, in arbitrary order.
pub fn nodes(&self) -> &[NodeIdx] {
&self.nodes
}
/// Alive edges in the instance, in arbitrary order.
pub fn edges(&self) -> &[EdgeIdx] {
&self.edges
}
pub fn node_degree(&self, node: NodeIdx) -> usize {
self.node_incidences[node.idx()].len()
}
pub fn edge_size(&self, edge: EdgeIdx) -> usize {
self.edge_incidences[edge.idx()].len()
}
/// Deletes a node from the instance.
pub fn delete_node(&mut self, node: NodeIdx) {
trace!("Deleting node {}", node);
for (_idx, (edge, entry_idx)) in &self.node_incidences[node.idx()] {
self.edge_incidences[edge.idx()].delete(entry_idx.idx());
}
self.nodes.delete(node.idx());
}
/// Deletes an edge from the instance.
pub fn delete_edge(&mut self, edge: EdgeIdx) {
trace!("Deleting edge {}", edge);
for (_idx, (node, entry_idx)) in &self.edge_incidences[edge.idx()] {
self.node_incidences[node.idx()].delete(entry_idx.idx());
}
self.edges.delete(edge.idx());
}
/// Restores a previously deleted node.
///
/// All restore operations (node or edge) must be done in reverse order of
/// the corresponding deletions to produce sensible results.
pub fn restore_node(&mut self, node: NodeIdx) {
trace!("Restoring node {}", node);
for (_idx, (edge, entry_idx)) in self.node_incidences[node.idx()].iter().rev() {
self.edge_incidences[edge.idx()].restore(entry_idx.idx());
}
self.nodes.restore(node.idx());
}
/// Restores a previously deleted edge.
///
/// All restore operations (node or edge) must be done in reverse order of
/// the corresponding deletions to produce sensible results.
pub fn | (&mut self, edge: EdgeIdx) {
trace!("Restoring edge {}", edge);
for (_idx, (node, entry_idx)) in self.edge_incidences[edge.idx()].iter().rev() {
self.node_incidences[node.idx()].restore(entry_idx.idx());
}
self.edges.restore(edge.idx());
}
/// Deletes all edges incident to a node.
///
/// The node itself must have already been deleted.
pub fn delete_incident_edges(&mut self, node: NodeIdx) {
// We want to iterate over the incidence of `node` while deleting
// edges, which in turn changes node incidences. This is safe, since
// `node` itself was already deleted. To make the borrow checker
// accept this, we temporarily move `node` incidence to a local
// variable, replacing it with an empty list. This should not be much
// slower than unsafe alternatives, since an incidence list is only
// 28 bytes large.
trace!("Deleting all edges incident to {}", node);
debug_assert!(
self.nodes.is_deleted(node.idx()),
"Node passed to delete_incident_edges must be deleted"
);
let incidence = mem::take(&mut self.node_incidences[node.idx()]);
for (_, (edge, _)) in &incidence {
self.delete_edge(*edge);
}
self.node_incidences[node.idx()] = incidence;
}
/// Restores all incident edges to a node.
///
/// This reverses the effect of `delete_incident_edges`. As with all other
/// `restore_*` methods, this must be done in reverse order of deletions.
/// In particular, the node itself must still be deleted.
pub fn restore_incident_edges(&mut self, node: NodeIdx) {
trace!("Restoring all edges incident to {}", node);
debug_assert!(
self.nodes.is_deleted(node.idx()),
"Node passed to restore_incident_edges must be deleted"
);
// See `delete_incident_edges` for an explanation of this swapping around
let incidence = mem::take(&mut self.node_incidences[node.idx()]);
// It is important that we restore the edges in reverse order
for (_, (edge, _)) in incidence.iter().rev() {
self.restore_edge(*edge);
}
self.node_incidences[node.idx()] = incidence;
}
pub fn export_as_ilp(&self, mut writer: impl Write) -> Result<()> {
writeln!(writer, "Minimize")?;
write!(writer, " v{}", CompressedIlpName(self.nodes()[0]))?;
for &node in &self.nodes()[1..] {
write!(writer, " + v{}", CompressedIlpName(node))?;
}
writeln!(writer)?;
writeln!(writer, "Subject To")?;
for &edge in self.edges() {
write!(writer, " e{}: ", CompressedIlpName(edge))?;
for (idx, node) in self.edge(edge).enumerate() {
if idx > 0 {
write!(writer, " + ")?;
}
write!(writer, "v{}", CompressedIlpName(node))?;
}
writeln!(writer, " >= 1")?;
}
writeln!(writer, "Binaries")?;
write!(writer, " v{}", CompressedIlpName(self.nodes()[0]))?;
for &node in &self.nodes()[1..] {
write!(writer, " v{}", CompressedIlpName(node))?;
}
writeln!(writer)?;
writeln!(writer, "End")?;
Ok(())
}
}
| restore_edge | identifier_name |
jcontroller_service.py | #!/usr/bin/env python3
#
# Copyright 2016-2017 Games Creators Club
#
# MIT License
#
import array
import math
import socket
import struct
import threading
import time
import traceback
from enum import Enum
from fcntl import ioctl
import pyroslib as pyros
DEBUG_AXES = False
DEBUG_BUTTONS = False
DEBUG_JOYSTICK = False
DEBUG_UDP = False
EXPO = 0.5
MAX_STOPPING = 10
JCONTROLLER_UDP_PORT = 1880
lastDividerL = 1
lastDividerR = 1
dividerL = 1
dividerR = 1
gyroAngle = 0
gyroDeltaAngle = 0
class modes(Enum):
NONE = 0
NORMAL = ' X'
GOLF = 2
PINOON = 3
DUCK_SHOOT = 4
OBSTICLE_COURSE = 5
mode = modes.DUCK_SHOOT
speeds = [25, 50, 100, 150, 300]
speed_index = 2
mode = modes.GOLF
wobble = False
wobble_alpha = 0
# We'll store the states here.
axis_states = {}
button_states = {}
haveJoystickEvent = False
# These constants were borrowed from linux/input.h
axis_names = {
0x00: 'x',
0x01: 'y',
0x02: 'z',
0x03: 'rx',
0x04: 'ry',
0x05: 'rz',
0x06: 'trottle',
0x07: 'rudder',
0x08: 'wheel',
0x09: 'gas',
0x0a: 'brake',
0x10: 'hat0x',
0x11: 'hat0y',
0x12: 'hat1x',
0x13: 'hat1y',
0x14: 'hat2x',
0x15: 'hat2y',
0x16: 'hat3x',
0x17: 'hat3y',
0x18: 'pressure',
0x19: 'distance',
0x1a: 'tilt_x',
0x1b: 'tilt_y',
0x1c: 'tool_width',
0x20: 'volume',
0x28: 'misc',
}
button_names = {
0x120: 'select',
0x121: 'lbutton',
0x122: 'rbutton',
0x123: 'start',
0x124: 'lup',
0x125: 'lright',
0x126: 'ldown',
0x127: 'lleft',
0x128: 'tl1',
0x129: 'tr1',
0x12a: 'tl2',
0x12b: 'tr2',
0x12c: 'by',
0x12d: 'ba',
0x12e: 'bb',
0x12f: 'bx',
0x130: 'a',
0x131: 'b',
0x132: 'c',
0x133: 'x',
0x134: 'y',
0x135: 'z',
0x136: 'tl',
0x137: 'tr',
0x138: 'tl2',
0x139: 'tr2',
# 0x13a: 'select',
# 0x13b: 'start',
# 0x13c: 'mode',
# 0x13d: 'thumbl',
# 0x13e: 'thumbr',
# 0x220: 'dpad_up',
# 0x221: 'dpad_down',
# 0x222: 'dpad_left',
# 0x223: 'dpad_right',
# XBo 360 controller uses these codes.
# 0x2c0: 'dpad_left',
# 0x2c1: 'dpad_right',
# 0x2c2: 'dpad_up',
# 0x2c3: 'dpad_down',
}
orbitDistance = 0
axis_map = []
button_map = []
# Open the joystick device.
lunge_back_time = 0
def handleGyroData(topic, message, groups):
global gyroAngle, gyroDeltaAngle
data = message.split(",")
gyroChange = float(data[2])
gyroDeltaAngle = gyroChange
gyroAngle += gyroChange
gyroDeltaTime = float(data[3])
lastGyroReceivedTime = time.time()
# print("gyro angle: " + str(gyroAngle))
def connectToJoystick(printError):
global fn, jsdev
try:
fn = '/dev/input/js0'
# print('Opening %s...' % fn)
jsdev = open(fn, 'rb')
# Get the device name.
# buf = bytearray(63)
# buf = array.array('c', ['\0'] * 64)
buf = array.array('b', [0] * 64)
ioctl(jsdev, 0x80006a13 + (0x10000 * len(buf)), buf) # JSIOCGNAME(len)
js_name = buf.tostring()
print('Device name: %s' % js_name)
# Get number of axes and buttons.
buf = array.array('B', [0])
ioctl(jsdev, 0x80016a11, buf) # JSIOCGAXES
num_axes = buf[0]
buf = array.array('B', [0])
ioctl(jsdev, 0x80016a12, buf) # JSIOCGBUTTONS |
for axis in buf[:num_axes]:
axis_name = axis_names.get(axis, '0x%02x' % axis)
axis_map.append(axis_name)
axis_states[axis_name] = 0.0
# Get the button map.
buf = array.array('H', [0] * 200)
ioctl(jsdev, 0x80406a34, buf) # JSIOCGBTNMAP
for btn in buf[:num_buttons]:
btn_name = button_names.get(btn, '0x%03x' % btn)
button_map.append(btn_name)
button_states[btn_name] = 0
print('%d axes found: %s' % (num_axes, ', '.join(axis_map)))
print('%d buttons found: %s' % (num_buttons, ', '.join(button_map)))
return True
except Exception as e:
if printError:
print("Failed to connect to joystick" + str(e))
return False
except BaseException as e:
if printError:
print("Failed to connect to joystick - no exception given " + str(e))
return False
def readEvents():
global haveJoystickEvent
reconnect = True
noError = True
while True:
if reconnect:
connected = connectToJoystick(noError)
if connected:
reconnect = False
noError = True
else:
noError = False
time.sleep(0.5)
else:
try:
evbuf = jsdev.read(8)
if evbuf:
time_of_event, value, event_type, number = struct.unpack('IhBB', evbuf)
if event_type & 0x01:
button = button_map[number]
if button:
button_states[button] = value
haveJoystickEvent = True
if event_type & 0x02:
selected_axis = axis_map[number]
if selected_axis:
fvalue = value / 32767.0
axis_states[selected_axis] = fvalue
haveJoystickEvent = True
except BaseException as e:
print("Failed to read joystick " + str(e))
reconnect = True
time.sleep(0.2)
def readUDPEvents():
global haveJoystickEvent
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('', JCONTROLLER_UDP_PORT))
s.settimeout(10)
print(" Started receive thread...")
while True:
try:
data, addr = s.recvfrom(1024)
p = str(data, 'utf-8')
if p.startswith("J#"):
if DEBUG_UDP:
print(" received " + p)
kvps = p[2:].split(";")
for kvp in kvps:
kv = kvp.split("=")
if len(kv) == 2:
key = kv[0]
value = kv[1]
if key in axis_states:
axis_states[key] = float(value)
haveJoystickEvent = True
elif key in button_states:
button_states[key] = int(value)
haveJoystickEvent = True
except:
pass
def startReadEventsLoopThread():
thread = threading.Thread(target=readEvents, args=())
thread.daemon = True
thread.start()
def startReadUDPEventsLoopThread():
thread = threading.Thread(target=readUDPEvents, args=())
thread.daemon = True
thread.start()
startReadEventsLoopThread()
startReadUDPEventsLoopThread()
topSpeed = 50
sensorDistance = 200
directionLock = False
alreadyStopped = 0
lastX1 = 0
lastY1 = 0
lastX2 = 0
lastY2 = 0
lastX3 = 0
lastY3 = 0
lastSelect = False
lastStart = False
lastTL = False
lastTL2 = False
lastTR = False
lastTR2 = False
lastA = False
lastB = False
lastBX = False
lastBY = False
lastLButton = False
lastRButton = False
lastTopSpeed = topSpeed
doOrbit = False
prepareToOrbit = False
continueToReadDistance = False
boost = False
kick = 0
fullSpeed = False
axis_states["x"] = 0
axis_states["y"] = 0
axis_states["rx"] = 0
axis_states["ry"] = 0
# for button_name in button_names:
# button_states[button_names[button_name]] = 0
lastBoost = False
balLocked = False
target_charge = 0
charge = 0
last_charge = 0
elevation = 0
target_angle = 0
def moveServo(servoid, angle):
# TODO move this out to separate service
f = open("/dev/servoblaster", 'w')
f.write(str(servoid) + "=" + str(angle) + "\n")
f.close()
pyros.publish("servo/" + str(servoid), str(int(angle)))
def setCharge(value):
global charge, lastCharge
lastCharge = charge
charge = value
if not charge == lastCharge:
motorSpeed = int(85 + charge * (105 - 85) / 100)
print("DUCK motor speed: " + str(motorSpeed) + " charge:" + str(charge))
# moveServo(13, motorSpeed)
pyros.publish("servo/13", str(motorSpeed))
def addCharge(ammount):
global charge
setCharge(charge + ammount)
def lockDirectionLoop():
global gyroAngle, target_angle, directionLock
if directionLock:
difference = target_angle - gyroAngle
multiplier = 1
turn_speed = difference * multiplier
if turn_speed < -150:
turn_speed = -150
elif turn_speed > 150:
turn_speed = 150
print("turning at speed " + str(turn_speed) + " to match the angle " + str(gyroAngle) + " to " + str(target_angle))
pyros.publish("move/rotate", str(int(turn_speed)))
def processButtons():
global lastX3, lastY3, lastSelect, lastStart
global lastTL, lastTL2, lastTR, lastTR2, lastA, lastB, lastBX, lastBY, lastDividerL, lastDividerR, target_charge
global lastLButton, lastRButton, dividerL, dividerR, directionLock
global topSpeed, prepareToOrbit, continueToReadDistance, doOrbit, boost, kick, lastBoost, lastTL, balLocked, charge, mode, elevation, fullSpeed, target_angle, speed_index, speeds
global wobble
global lup
# print("Axis states: " + str(axis_states))
# 4 ly up: "TopBtn2", lx r 5: "PinkieBtn", ly down 6: "BaseBtn", lx left 7:"BaseBtn2"
# x3 = int(axis_states["hat0x"])
# y3 = int(axis_states["hat0y"])
try:
lup = button_states["lup"]
lright = button_states["lright"]
ldown = button_states["ldown"]
lleft = button_states["lleft"]
x3 = 0
y3 = 0
if lup:
y3 = -1
if ldown:
y3 = 1
if lleft:
x3 = -1
if lright:
x3 = 1
tl = button_states["tl1"]
tl2 = button_states["tl2"]
tr = button_states["tr1"]
tr2 = button_states["tr2"]
a = button_states["ba"]
bb = button_states["bb"]
bx = button_states["bx"]
by = button_states["by"]
start = button_states["start"]
select = button_states["select"]
lbutton = button_states["lbutton"]
rbutton = button_states["rbutton"]
lastDividerR = dividerR
if rbutton:
dividerR = 4
else:
dividerR = 1
lastDividerL = dividerL
if lbutton:
dividerL = 4
else:
dividerL = 1
if y3 != lastY3:
if y3 < 0:
if speed_index < len(speeds) :
speed_index += 1
topSpeed = speeds[speed_index]
elif y3 > 0:
if speed_index > 0:
speed_index -= 1
topSpeed = speeds[speed_index]
if x3 != lastX3:
if x3 > 0:
topSpeed += 100
if topSpeed > 300:
topSpeed = 300
elif x3 < 0:
if topSpeed >= 100:
topSpeed -= 100
if topSpeed < 30:
topSpeed = 30
elif topSpeed > 50:
topSpeed = 50
wobble = False
# print("mode: " + str(mode))
# if mode == modes.PINOON:
# fullSpeed = tl
# lastBoost = boost
# boost = tr
#
# wobble = tr2
# if not boost:
# if tl2 and not lastTL2:
# print("prepared to do orbit")
# doOrbit = True
# pyros.publish("sensor/distance/read", "0")
#
# doOrbit = tl2
# # if tl2:
# # pyros.publish("sensor/distance/read", "0")
# else:
# doOrbit = False
# if mode == modes.OBSTICAL_COURSE:
fullSpeed = tl
pyros.publish("sensor/gyro/continuous", "continue")
if tr2 and not lastTR2:
directionLock = True
target_angle = gyroAngle
elif not tr2 and lastTR2:
directionLock = False
pyros.publish("move/stop", "0")
lockDirectionLoop()
# if mode == modes.GOLF:
# print("golf")
#
# fullSpeed = tl
# print("tr2: " + str(tr2))
# if tr2 and not lastTR2:
# balLocked = not balLocked
#
# if balLocked:
# # moveServo(9, 220)
# moveServo(9, 217)
#
# print("locke")
#
#
# if tr:
# moveServo(9, 100)
# print("tr")
#
# balLocked = False
# else:
# if not balLocked:
# print("not locked")
#
# moveServo(9, 150)
#
# if bx and bx != lastBX:
# kick = 1
# print("kick")
# pyros.publish("move/drive", "0 300")
# pyros.sleep(1)
# pyros.publish("move/drive", "0 0")
# if mode == modes.DUCK_SHOOT:
# print("shooting ducks")
# if tr:
# pyros.publish("servo/9", "115")
# else:
# pyros.publish("servo/9", "175")
#
# if tl and not lastTL:
# target_charge = 100
# print("charging")
# elif not tl and lastTL:
# target_charge = 65
# print("decharging")
#
#
# if charge > target_charge:
# addCharge(-1)
# elif charge < target_charge:
# addCharge(1)
# setCharge(charge)
#
# if tr2:
# if elevation > -25:
# print("waaaa")
# elevation -= 1
# if tl2:
# if elevation < 25:
# print("weeeee")
# elevation += 1
#
# servoValue = 150 + elevation
# # print("elevation: " + str(elevation) + " servo: " + str(servoValue))
# print("targetcharge: " + str(target_charge) + " charge: " + str(charge))
# pyros.publish("servo/12", str(servoValue))
# else:
# fullSpeed = tl
# if mode != modes.DUCK_SHOOT:
# target_charge = 0
# setCharge(0)
if a:
mode = modes.OBSTICAL_COURSE
print("obsitcal")
elif bb:
mode = modes.DUCK_SHOOT
target_charge = 0
setCharge(0)
elevation = 0
elif bx:
mode = modes.GOLF
print("golf")
elif by:
mode = modes.PINOON
lastX3 = x3
lastY3 = y3
lastStart = start
lastTL = tl
lastTL2 = tl2
lastTR = tr
lastTR2 = tr2
lastA = a
lastB = bb
lastBX = bx
lastBY = by
lastSelect = select
lastLButton = lbutton
lastRButton = rbutton
if DEBUG_BUTTONS:
print("OK Button states: " + str(button_states))
if DEBUG_AXES:
print("OK Axis states: " + str(axis_states))
except Exception as e:
if DEBUG_BUTTONS:
print("ERR Button states: " + str(button_states) + str(e))
if DEBUG_AXES:
print("ERR Axis states: " + str(axis_states) + str(e))
def calcRoverSpeed(speed):
global fullSpeed
spd = speed
if boost or lunge_back_time > 0 or fullSpeed:
# spd = int(speed * topSpeed * 2)
# if spd > 300:
if speed > 0:
spd = 300
elif speed < 0:
spd = -300
else:
spd = 0
else:
spd = int(speed * topSpeed)
if spd > 300:
spd = 300
elif spd < -300:
spd = -300
return spd
def calculateExpo(v, expoPercentage):
if v >= 0:
return v * v * expoPercentage + v * (1.0 - expoPercentage)
else:
return - v * v * expoPercentage + v * (1.0 - expoPercentage)
def calcRoverDistance(distance):
if distance >= 0:
distance = abs(distance)
distance = 1.0 - distance
distance += 0.2
distance *= 500
else:
distance = abs(distance)
distance = 1.0 - distance
distance += 0.2
distance = - distance * 500
return int(distance)
def processJoysticks():
global kick, dividerR, dividerL, lastDividerR, lastDividerL, boost, lunge_back_time, alreadyStopped, orbitDistance, directionLock, target_angle, wobble, wobble_alpha
lx = float(axis_states["x"])
ly = float(axis_states["y"])
rx = float(axis_states["rx"])
ry = float(axis_states["ry"])
if wobble:
print("wobble")
rx = float(math.sin(wobble_alpha * 0.9))
if ry < 0.1 and ry > -0.1 and rx < 0.1 and rx > -0.1:
if boost:
lunge_back_time += 1
if lunge_back_time > 6:
lunge_back_time = 6
ry = -1
lx = 0
ly = 0
else:
if lunge_back_time > 0:
lunge_back_time -= 1
ry = 1
lx = 0
ly = 0
else:
if not ry > -0:
lunge_back_time = 0
ld = math.sqrt(lx * lx + ly * ly)
rd = math.sqrt(rx * rx + ry * ry)
ra = math.atan2(rx, -ry) * 180 / math.pi
if not directionLock:
if ld < 0.1 < rd:
distance = rd
distance = calculateExpo(distance, EXPO)
roverSpeed = calcRoverSpeed(distance)
pyros.publish("move/drive", str(round(ra, 1)) + " " + str(int(roverSpeed / dividerR)))
if DEBUG_JOYSTICK:
print("Driving a:" + str(round(ra, 1)) + " s:" + str(roverSpeed) + " ld:" + str(ld) + " rd:" + str(rd))
alreadyStopped = 0
elif ld > 0.1 and rd > 0.1:
ory = ry
olx = lx
ry = calculateExpo(ry, EXPO)
lx = calculateExpo(lx, EXPO)
roverSpeed = -calcRoverSpeed(ry) * 1.3
roverTurningDistance = calcRoverDistance(lx)
pyros.publish("move/steer", str(roverTurningDistance) + " " + str(int(roverSpeed / dividerR)))
if DEBUG_JOYSTICK:
print("Steering d:" + str(roverTurningDistance) + " s:" + str(roverSpeed) + " ry: " + str(ory) + " lx:" + str(olx) + " ld:" + str(ld) + " rd:" + str(rd))
alreadyStopped = 0
elif ld > 0.1:
if doOrbit:
orbitDistance = sensorDistance + (ly * 5)
roverSpeed = calcRoverSpeed(lx) / 1
# print("speed: " + str(roverSpeed))
# print(str(int(orbitDistance + 70)) + " " + str(int(roverSpeed)))
pyros.publish("move/orbit", str(int(orbitDistance + 70)) + " " + str(int(roverSpeed)))
if DEBUG_JOYSTICK:
print("Orbit sen:" + str(int(orbitDistance + 70)) + " s:" + str(roverSpeed) + " ld:" + str(ld) + " rd:" + str(rd))
alreadyStopped = 0
else:
olx = lx
lx = calculateExpo(lx, EXPO) / 2
roverSpeed = calcRoverSpeed(lx)
pyros.publish("move/rotate", int(roverSpeed / dividerL))
if DEBUG_JOYSTICK:
print("Rotate s:" + str(roverSpeed) + " lx:" + str(olx) + " ld:" + str(ld) + " rd:" + str(rd))
alreadyStopped = 0
# elif kick > 0:
# if DEBUG_JOYSTICK:
# print("Kick stop: ld:" + str(ld) + " rd:" + str(rd))
# pass
#
# alreadyStopped = 0
else:
# pyros.publish("move/drive", str(ra) + " 0")
# if ra != 0:
# print("-> move/drive " + str(ra))
roverSpeed = 0
if alreadyStopped < MAX_STOPPING:
pyros.publish("move/stop", "0")
alreadyStopped += 1
if DEBUG_JOYSTICK:
print("Rotate stop: ld:" + str(ld) + " rd:" + str(rd))
else:
target_angle += lx * 4
def handleDistance(topic, message, groups):
global sensorDistance, prepareToOrbit, orbitDistance
# print("** distance = " + message)
if "," in message:
pass
else:
split = message.split(":")
d = float(split[1])
print("d: " + str(d))
if d >= 0:
sensorDistance = d
orbitDistance = sensorDistance
if prepareToOrbit:
prepareToOrbit = False
# Main event loop
def loop():
global wobble_alpha
wobble_alpha += 1
processButtons()
if haveJoystickEvent:
processJoysticks()
if __name__ == "__main__":
try:
print("Starting jcontroller service...")
pyros.subscribe("sensor/gyro", handleGyroData)
pyros.subscribe("sensor/distance", handleDistance)
pyros.init("jcontroller-service")
print("Started jcontroller service.")
pyros.publish("servo/9", "175")
pyros.forever(0.1, loop)
except Exception as ex:
print("ERROR: " + str(ex) + "\n" + ''.join(traceback.format_tb(ex.__traceback__))) | num_buttons = buf[0]
# Get the axis map.
buf = array.array('B', [0] * 0x40)
ioctl(jsdev, 0x80406a32, buf) # JSIOCGAXMAP | random_line_split |
jcontroller_service.py | #!/usr/bin/env python3
#
# Copyright 2016-2017 Games Creators Club
#
# MIT License
#
import array
import math
import socket
import struct
import threading
import time
import traceback
from enum import Enum
from fcntl import ioctl
import pyroslib as pyros
DEBUG_AXES = False
DEBUG_BUTTONS = False
DEBUG_JOYSTICK = False
DEBUG_UDP = False
EXPO = 0.5
MAX_STOPPING = 10
JCONTROLLER_UDP_PORT = 1880
lastDividerL = 1
lastDividerR = 1
dividerL = 1
dividerR = 1
gyroAngle = 0
gyroDeltaAngle = 0
class modes(Enum):
NONE = 0
NORMAL = ' X'
GOLF = 2
PINOON = 3
DUCK_SHOOT = 4
OBSTICLE_COURSE = 5
mode = modes.DUCK_SHOOT
speeds = [25, 50, 100, 150, 300]
speed_index = 2
mode = modes.GOLF
wobble = False
wobble_alpha = 0
# We'll store the states here.
axis_states = {}
button_states = {}
haveJoystickEvent = False
# These constants were borrowed from linux/input.h
axis_names = {
0x00: 'x',
0x01: 'y',
0x02: 'z',
0x03: 'rx',
0x04: 'ry',
0x05: 'rz',
0x06: 'trottle',
0x07: 'rudder',
0x08: 'wheel',
0x09: 'gas',
0x0a: 'brake',
0x10: 'hat0x',
0x11: 'hat0y',
0x12: 'hat1x',
0x13: 'hat1y',
0x14: 'hat2x',
0x15: 'hat2y',
0x16: 'hat3x',
0x17: 'hat3y',
0x18: 'pressure',
0x19: 'distance',
0x1a: 'tilt_x',
0x1b: 'tilt_y',
0x1c: 'tool_width',
0x20: 'volume',
0x28: 'misc',
}
button_names = {
0x120: 'select',
0x121: 'lbutton',
0x122: 'rbutton',
0x123: 'start',
0x124: 'lup',
0x125: 'lright',
0x126: 'ldown',
0x127: 'lleft',
0x128: 'tl1',
0x129: 'tr1',
0x12a: 'tl2',
0x12b: 'tr2',
0x12c: 'by',
0x12d: 'ba',
0x12e: 'bb',
0x12f: 'bx',
0x130: 'a',
0x131: 'b',
0x132: 'c',
0x133: 'x',
0x134: 'y',
0x135: 'z',
0x136: 'tl',
0x137: 'tr',
0x138: 'tl2',
0x139: 'tr2',
# 0x13a: 'select',
# 0x13b: 'start',
# 0x13c: 'mode',
# 0x13d: 'thumbl',
# 0x13e: 'thumbr',
# 0x220: 'dpad_up',
# 0x221: 'dpad_down',
# 0x222: 'dpad_left',
# 0x223: 'dpad_right',
# XBo 360 controller uses these codes.
# 0x2c0: 'dpad_left',
# 0x2c1: 'dpad_right',
# 0x2c2: 'dpad_up',
# 0x2c3: 'dpad_down',
}
orbitDistance = 0
axis_map = []
button_map = []
# Open the joystick device.
lunge_back_time = 0
def handleGyroData(topic, message, groups):
global gyroAngle, gyroDeltaAngle
data = message.split(",")
gyroChange = float(data[2])
gyroDeltaAngle = gyroChange
gyroAngle += gyroChange
gyroDeltaTime = float(data[3])
lastGyroReceivedTime = time.time()
# print("gyro angle: " + str(gyroAngle))
def connectToJoystick(printError):
global fn, jsdev
try:
fn = '/dev/input/js0'
# print('Opening %s...' % fn)
jsdev = open(fn, 'rb')
# Get the device name.
# buf = bytearray(63)
# buf = array.array('c', ['\0'] * 64)
buf = array.array('b', [0] * 64)
ioctl(jsdev, 0x80006a13 + (0x10000 * len(buf)), buf) # JSIOCGNAME(len)
js_name = buf.tostring()
print('Device name: %s' % js_name)
# Get number of axes and buttons.
buf = array.array('B', [0])
ioctl(jsdev, 0x80016a11, buf) # JSIOCGAXES
num_axes = buf[0]
buf = array.array('B', [0])
ioctl(jsdev, 0x80016a12, buf) # JSIOCGBUTTONS
num_buttons = buf[0]
# Get the axis map.
buf = array.array('B', [0] * 0x40)
ioctl(jsdev, 0x80406a32, buf) # JSIOCGAXMAP
for axis in buf[:num_axes]:
axis_name = axis_names.get(axis, '0x%02x' % axis)
axis_map.append(axis_name)
axis_states[axis_name] = 0.0
# Get the button map.
buf = array.array('H', [0] * 200)
ioctl(jsdev, 0x80406a34, buf) # JSIOCGBTNMAP
for btn in buf[:num_buttons]:
btn_name = button_names.get(btn, '0x%03x' % btn)
button_map.append(btn_name)
button_states[btn_name] = 0
print('%d axes found: %s' % (num_axes, ', '.join(axis_map)))
print('%d buttons found: %s' % (num_buttons, ', '.join(button_map)))
return True
except Exception as e:
if printError:
print("Failed to connect to joystick" + str(e))
return False
except BaseException as e:
if printError:
print("Failed to connect to joystick - no exception given " + str(e))
return False
def readEvents():
|
def readUDPEvents():
global haveJoystickEvent
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('', JCONTROLLER_UDP_PORT))
s.settimeout(10)
print(" Started receive thread...")
while True:
try:
data, addr = s.recvfrom(1024)
p = str(data, 'utf-8')
if p.startswith("J#"):
if DEBUG_UDP:
print(" received " + p)
kvps = p[2:].split(";")
for kvp in kvps:
kv = kvp.split("=")
if len(kv) == 2:
key = kv[0]
value = kv[1]
if key in axis_states:
axis_states[key] = float(value)
haveJoystickEvent = True
elif key in button_states:
button_states[key] = int(value)
haveJoystickEvent = True
except:
pass
def startReadEventsLoopThread():
thread = threading.Thread(target=readEvents, args=())
thread.daemon = True
thread.start()
def startReadUDPEventsLoopThread():
thread = threading.Thread(target=readUDPEvents, args=())
thread.daemon = True
thread.start()
startReadEventsLoopThread()
startReadUDPEventsLoopThread()
topSpeed = 50
sensorDistance = 200
directionLock = False
alreadyStopped = 0
lastX1 = 0
lastY1 = 0
lastX2 = 0
lastY2 = 0
lastX3 = 0
lastY3 = 0
lastSelect = False
lastStart = False
lastTL = False
lastTL2 = False
lastTR = False
lastTR2 = False
lastA = False
lastB = False
lastBX = False
lastBY = False
lastLButton = False
lastRButton = False
lastTopSpeed = topSpeed
doOrbit = False
prepareToOrbit = False
continueToReadDistance = False
boost = False
kick = 0
fullSpeed = False
axis_states["x"] = 0
axis_states["y"] = 0
axis_states["rx"] = 0
axis_states["ry"] = 0
# for button_name in button_names:
# button_states[button_names[button_name]] = 0
lastBoost = False
balLocked = False
target_charge = 0
charge = 0
last_charge = 0
elevation = 0
target_angle = 0
def moveServo(servoid, angle):
# TODO move this out to separate service
f = open("/dev/servoblaster", 'w')
f.write(str(servoid) + "=" + str(angle) + "\n")
f.close()
pyros.publish("servo/" + str(servoid), str(int(angle)))
def setCharge(value):
global charge, lastCharge
lastCharge = charge
charge = value
if not charge == lastCharge:
motorSpeed = int(85 + charge * (105 - 85) / 100)
print("DUCK motor speed: " + str(motorSpeed) + " charge:" + str(charge))
# moveServo(13, motorSpeed)
pyros.publish("servo/13", str(motorSpeed))
def addCharge(ammount):
global charge
setCharge(charge + ammount)
def lockDirectionLoop():
global gyroAngle, target_angle, directionLock
if directionLock:
difference = target_angle - gyroAngle
multiplier = 1
turn_speed = difference * multiplier
if turn_speed < -150:
turn_speed = -150
elif turn_speed > 150:
turn_speed = 150
print("turning at speed " + str(turn_speed) + " to match the angle " + str(gyroAngle) + " to " + str(target_angle))
pyros.publish("move/rotate", str(int(turn_speed)))
def processButtons():
global lastX3, lastY3, lastSelect, lastStart
global lastTL, lastTL2, lastTR, lastTR2, lastA, lastB, lastBX, lastBY, lastDividerL, lastDividerR, target_charge
global lastLButton, lastRButton, dividerL, dividerR, directionLock
global topSpeed, prepareToOrbit, continueToReadDistance, doOrbit, boost, kick, lastBoost, lastTL, balLocked, charge, mode, elevation, fullSpeed, target_angle, speed_index, speeds
global wobble
global lup
# print("Axis states: " + str(axis_states))
# 4 ly up: "TopBtn2", lx r 5: "PinkieBtn", ly down 6: "BaseBtn", lx left 7:"BaseBtn2"
# x3 = int(axis_states["hat0x"])
# y3 = int(axis_states["hat0y"])
try:
lup = button_states["lup"]
lright = button_states["lright"]
ldown = button_states["ldown"]
lleft = button_states["lleft"]
x3 = 0
y3 = 0
if lup:
y3 = -1
if ldown:
y3 = 1
if lleft:
x3 = -1
if lright:
x3 = 1
tl = button_states["tl1"]
tl2 = button_states["tl2"]
tr = button_states["tr1"]
tr2 = button_states["tr2"]
a = button_states["ba"]
bb = button_states["bb"]
bx = button_states["bx"]
by = button_states["by"]
start = button_states["start"]
select = button_states["select"]
lbutton = button_states["lbutton"]
rbutton = button_states["rbutton"]
lastDividerR = dividerR
if rbutton:
dividerR = 4
else:
dividerR = 1
lastDividerL = dividerL
if lbutton:
dividerL = 4
else:
dividerL = 1
if y3 != lastY3:
if y3 < 0:
if speed_index < len(speeds) :
speed_index += 1
topSpeed = speeds[speed_index]
elif y3 > 0:
if speed_index > 0:
speed_index -= 1
topSpeed = speeds[speed_index]
if x3 != lastX3:
if x3 > 0:
topSpeed += 100
if topSpeed > 300:
topSpeed = 300
elif x3 < 0:
if topSpeed >= 100:
topSpeed -= 100
if topSpeed < 30:
topSpeed = 30
elif topSpeed > 50:
topSpeed = 50
wobble = False
# print("mode: " + str(mode))
# if mode == modes.PINOON:
# fullSpeed = tl
# lastBoost = boost
# boost = tr
#
# wobble = tr2
# if not boost:
# if tl2 and not lastTL2:
# print("prepared to do orbit")
# doOrbit = True
# pyros.publish("sensor/distance/read", "0")
#
# doOrbit = tl2
# # if tl2:
# # pyros.publish("sensor/distance/read", "0")
# else:
# doOrbit = False
# if mode == modes.OBSTICAL_COURSE:
fullSpeed = tl
pyros.publish("sensor/gyro/continuous", "continue")
if tr2 and not lastTR2:
directionLock = True
target_angle = gyroAngle
elif not tr2 and lastTR2:
directionLock = False
pyros.publish("move/stop", "0")
lockDirectionLoop()
# if mode == modes.GOLF:
# print("golf")
#
# fullSpeed = tl
# print("tr2: " + str(tr2))
# if tr2 and not lastTR2:
# balLocked = not balLocked
#
# if balLocked:
# # moveServo(9, 220)
# moveServo(9, 217)
#
# print("locke")
#
#
# if tr:
# moveServo(9, 100)
# print("tr")
#
# balLocked = False
# else:
# if not balLocked:
# print("not locked")
#
# moveServo(9, 150)
#
# if bx and bx != lastBX:
# kick = 1
# print("kick")
# pyros.publish("move/drive", "0 300")
# pyros.sleep(1)
# pyros.publish("move/drive", "0 0")
# if mode == modes.DUCK_SHOOT:
# print("shooting ducks")
# if tr:
# pyros.publish("servo/9", "115")
# else:
# pyros.publish("servo/9", "175")
#
# if tl and not lastTL:
# target_charge = 100
# print("charging")
# elif not tl and lastTL:
# target_charge = 65
# print("decharging")
#
#
# if charge > target_charge:
# addCharge(-1)
# elif charge < target_charge:
# addCharge(1)
# setCharge(charge)
#
# if tr2:
# if elevation > -25:
# print("waaaa")
# elevation -= 1
# if tl2:
# if elevation < 25:
# print("weeeee")
# elevation += 1
#
# servoValue = 150 + elevation
# # print("elevation: " + str(elevation) + " servo: " + str(servoValue))
# print("targetcharge: " + str(target_charge) + " charge: " + str(charge))
# pyros.publish("servo/12", str(servoValue))
# else:
# fullSpeed = tl
# if mode != modes.DUCK_SHOOT:
# target_charge = 0
# setCharge(0)
if a:
mode = modes.OBSTICAL_COURSE
print("obsitcal")
elif bb:
mode = modes.DUCK_SHOOT
target_charge = 0
setCharge(0)
elevation = 0
elif bx:
mode = modes.GOLF
print("golf")
elif by:
mode = modes.PINOON
lastX3 = x3
lastY3 = y3
lastStart = start
lastTL = tl
lastTL2 = tl2
lastTR = tr
lastTR2 = tr2
lastA = a
lastB = bb
lastBX = bx
lastBY = by
lastSelect = select
lastLButton = lbutton
lastRButton = rbutton
if DEBUG_BUTTONS:
print("OK Button states: " + str(button_states))
if DEBUG_AXES:
print("OK Axis states: " + str(axis_states))
except Exception as e:
if DEBUG_BUTTONS:
print("ERR Button states: " + str(button_states) + str(e))
if DEBUG_AXES:
print("ERR Axis states: " + str(axis_states) + str(e))
def calcRoverSpeed(speed):
global fullSpeed
spd = speed
if boost or lunge_back_time > 0 or fullSpeed:
# spd = int(speed * topSpeed * 2)
# if spd > 300:
if speed > 0:
spd = 300
elif speed < 0:
spd = -300
else:
spd = 0
else:
spd = int(speed * topSpeed)
if spd > 300:
spd = 300
elif spd < -300:
spd = -300
return spd
def calculateExpo(v, expoPercentage):
if v >= 0:
return v * v * expoPercentage + v * (1.0 - expoPercentage)
else:
return - v * v * expoPercentage + v * (1.0 - expoPercentage)
def calcRoverDistance(distance):
if distance >= 0:
distance = abs(distance)
distance = 1.0 - distance
distance += 0.2
distance *= 500
else:
distance = abs(distance)
distance = 1.0 - distance
distance += 0.2
distance = - distance * 500
return int(distance)
def processJoysticks():
global kick, dividerR, dividerL, lastDividerR, lastDividerL, boost, lunge_back_time, alreadyStopped, orbitDistance, directionLock, target_angle, wobble, wobble_alpha
lx = float(axis_states["x"])
ly = float(axis_states["y"])
rx = float(axis_states["rx"])
ry = float(axis_states["ry"])
if wobble:
print("wobble")
rx = float(math.sin(wobble_alpha * 0.9))
if ry < 0.1 and ry > -0.1 and rx < 0.1 and rx > -0.1:
if boost:
lunge_back_time += 1
if lunge_back_time > 6:
lunge_back_time = 6
ry = -1
lx = 0
ly = 0
else:
if lunge_back_time > 0:
lunge_back_time -= 1
ry = 1
lx = 0
ly = 0
else:
if not ry > -0:
lunge_back_time = 0
ld = math.sqrt(lx * lx + ly * ly)
rd = math.sqrt(rx * rx + ry * ry)
ra = math.atan2(rx, -ry) * 180 / math.pi
if not directionLock:
if ld < 0.1 < rd:
distance = rd
distance = calculateExpo(distance, EXPO)
roverSpeed = calcRoverSpeed(distance)
pyros.publish("move/drive", str(round(ra, 1)) + " " + str(int(roverSpeed / dividerR)))
if DEBUG_JOYSTICK:
print("Driving a:" + str(round(ra, 1)) + " s:" + str(roverSpeed) + " ld:" + str(ld) + " rd:" + str(rd))
alreadyStopped = 0
elif ld > 0.1 and rd > 0.1:
ory = ry
olx = lx
ry = calculateExpo(ry, EXPO)
lx = calculateExpo(lx, EXPO)
roverSpeed = -calcRoverSpeed(ry) * 1.3
roverTurningDistance = calcRoverDistance(lx)
pyros.publish("move/steer", str(roverTurningDistance) + " " + str(int(roverSpeed / dividerR)))
if DEBUG_JOYSTICK:
print("Steering d:" + str(roverTurningDistance) + " s:" + str(roverSpeed) + " ry: " + str(ory) + " lx:" + str(olx) + " ld:" + str(ld) + " rd:" + str(rd))
alreadyStopped = 0
elif ld > 0.1:
if doOrbit:
orbitDistance = sensorDistance + (ly * 5)
roverSpeed = calcRoverSpeed(lx) / 1
# print("speed: " + str(roverSpeed))
# print(str(int(orbitDistance + 70)) + " " + str(int(roverSpeed)))
pyros.publish("move/orbit", str(int(orbitDistance + 70)) + " " + str(int(roverSpeed)))
if DEBUG_JOYSTICK:
print("Orbit sen:" + str(int(orbitDistance + 70)) + " s:" + str(roverSpeed) + " ld:" + str(ld) + " rd:" + str(rd))
alreadyStopped = 0
else:
olx = lx
lx = calculateExpo(lx, EXPO) / 2
roverSpeed = calcRoverSpeed(lx)
pyros.publish("move/rotate", int(roverSpeed / dividerL))
if DEBUG_JOYSTICK:
print("Rotate s:" + str(roverSpeed) + " lx:" + str(olx) + " ld:" + str(ld) + " rd:" + str(rd))
alreadyStopped = 0
# elif kick > 0:
# if DEBUG_JOYSTICK:
# print("Kick stop: ld:" + str(ld) + " rd:" + str(rd))
# pass
#
# alreadyStopped = 0
else:
# pyros.publish("move/drive", str(ra) + " 0")
# if ra != 0:
# print("-> move/drive " + str(ra))
roverSpeed = 0
if alreadyStopped < MAX_STOPPING:
pyros.publish("move/stop", "0")
alreadyStopped += 1
if DEBUG_JOYSTICK:
print("Rotate stop: ld:" + str(ld) + " rd:" + str(rd))
else:
target_angle += lx * 4
def handleDistance(topic, message, groups):
global sensorDistance, prepareToOrbit, orbitDistance
# print("** distance = " + message)
if "," in message:
pass
else:
split = message.split(":")
d = float(split[1])
print("d: " + str(d))
if d >= 0:
sensorDistance = d
orbitDistance = sensorDistance
if prepareToOrbit:
prepareToOrbit = False
# Main event loop
def loop():
global wobble_alpha
wobble_alpha += 1
processButtons()
if haveJoystickEvent:
processJoysticks()
if __name__ == "__main__":
try:
print("Starting jcontroller service...")
pyros.subscribe("sensor/gyro", handleGyroData)
pyros.subscribe("sensor/distance", handleDistance)
pyros.init("jcontroller-service")
print("Started jcontroller service.")
pyros.publish("servo/9", "175")
pyros.forever(0.1, loop)
except Exception as ex:
print("ERROR: " + str(ex) + "\n" + ''.join(traceback.format_tb(ex.__traceback__)))
| global haveJoystickEvent
reconnect = True
noError = True
while True:
if reconnect:
connected = connectToJoystick(noError)
if connected:
reconnect = False
noError = True
else:
noError = False
time.sleep(0.5)
else:
try:
evbuf = jsdev.read(8)
if evbuf:
time_of_event, value, event_type, number = struct.unpack('IhBB', evbuf)
if event_type & 0x01:
button = button_map[number]
if button:
button_states[button] = value
haveJoystickEvent = True
if event_type & 0x02:
selected_axis = axis_map[number]
if selected_axis:
fvalue = value / 32767.0
axis_states[selected_axis] = fvalue
haveJoystickEvent = True
except BaseException as e:
print("Failed to read joystick " + str(e))
reconnect = True
time.sleep(0.2) | identifier_body |
jcontroller_service.py | #!/usr/bin/env python3
#
# Copyright 2016-2017 Games Creators Club
#
# MIT License
#
import array
import math
import socket
import struct
import threading
import time
import traceback
from enum import Enum
from fcntl import ioctl
import pyroslib as pyros
DEBUG_AXES = False
DEBUG_BUTTONS = False
DEBUG_JOYSTICK = False
DEBUG_UDP = False
EXPO = 0.5
MAX_STOPPING = 10
JCONTROLLER_UDP_PORT = 1880
lastDividerL = 1
lastDividerR = 1
dividerL = 1
dividerR = 1
gyroAngle = 0
gyroDeltaAngle = 0
class modes(Enum):
NONE = 0
NORMAL = ' X'
GOLF = 2
PINOON = 3
DUCK_SHOOT = 4
OBSTICLE_COURSE = 5
mode = modes.DUCK_SHOOT
speeds = [25, 50, 100, 150, 300]
speed_index = 2
mode = modes.GOLF
wobble = False
wobble_alpha = 0
# We'll store the states here.
axis_states = {}
button_states = {}
haveJoystickEvent = False
# These constants were borrowed from linux/input.h
axis_names = {
0x00: 'x',
0x01: 'y',
0x02: 'z',
0x03: 'rx',
0x04: 'ry',
0x05: 'rz',
0x06: 'trottle',
0x07: 'rudder',
0x08: 'wheel',
0x09: 'gas',
0x0a: 'brake',
0x10: 'hat0x',
0x11: 'hat0y',
0x12: 'hat1x',
0x13: 'hat1y',
0x14: 'hat2x',
0x15: 'hat2y',
0x16: 'hat3x',
0x17: 'hat3y',
0x18: 'pressure',
0x19: 'distance',
0x1a: 'tilt_x',
0x1b: 'tilt_y',
0x1c: 'tool_width',
0x20: 'volume',
0x28: 'misc',
}
button_names = {
0x120: 'select',
0x121: 'lbutton',
0x122: 'rbutton',
0x123: 'start',
0x124: 'lup',
0x125: 'lright',
0x126: 'ldown',
0x127: 'lleft',
0x128: 'tl1',
0x129: 'tr1',
0x12a: 'tl2',
0x12b: 'tr2',
0x12c: 'by',
0x12d: 'ba',
0x12e: 'bb',
0x12f: 'bx',
0x130: 'a',
0x131: 'b',
0x132: 'c',
0x133: 'x',
0x134: 'y',
0x135: 'z',
0x136: 'tl',
0x137: 'tr',
0x138: 'tl2',
0x139: 'tr2',
# 0x13a: 'select',
# 0x13b: 'start',
# 0x13c: 'mode',
# 0x13d: 'thumbl',
# 0x13e: 'thumbr',
# 0x220: 'dpad_up',
# 0x221: 'dpad_down',
# 0x222: 'dpad_left',
# 0x223: 'dpad_right',
# XBo 360 controller uses these codes.
# 0x2c0: 'dpad_left',
# 0x2c1: 'dpad_right',
# 0x2c2: 'dpad_up',
# 0x2c3: 'dpad_down',
}
orbitDistance = 0
axis_map = []
button_map = []
# Open the joystick device.
lunge_back_time = 0
def handleGyroData(topic, message, groups):
global gyroAngle, gyroDeltaAngle
data = message.split(",")
gyroChange = float(data[2])
gyroDeltaAngle = gyroChange
gyroAngle += gyroChange
gyroDeltaTime = float(data[3])
lastGyroReceivedTime = time.time()
# print("gyro angle: " + str(gyroAngle))
def connectToJoystick(printError):
global fn, jsdev
try:
fn = '/dev/input/js0'
# print('Opening %s...' % fn)
jsdev = open(fn, 'rb')
# Get the device name.
# buf = bytearray(63)
# buf = array.array('c', ['\0'] * 64)
buf = array.array('b', [0] * 64)
ioctl(jsdev, 0x80006a13 + (0x10000 * len(buf)), buf) # JSIOCGNAME(len)
js_name = buf.tostring()
print('Device name: %s' % js_name)
# Get number of axes and buttons.
buf = array.array('B', [0])
ioctl(jsdev, 0x80016a11, buf) # JSIOCGAXES
num_axes = buf[0]
buf = array.array('B', [0])
ioctl(jsdev, 0x80016a12, buf) # JSIOCGBUTTONS
num_buttons = buf[0]
# Get the axis map.
buf = array.array('B', [0] * 0x40)
ioctl(jsdev, 0x80406a32, buf) # JSIOCGAXMAP
for axis in buf[:num_axes]:
axis_name = axis_names.get(axis, '0x%02x' % axis)
axis_map.append(axis_name)
axis_states[axis_name] = 0.0
# Get the button map.
buf = array.array('H', [0] * 200)
ioctl(jsdev, 0x80406a34, buf) # JSIOCGBTNMAP
for btn in buf[:num_buttons]:
btn_name = button_names.get(btn, '0x%03x' % btn)
button_map.append(btn_name)
button_states[btn_name] = 0
print('%d axes found: %s' % (num_axes, ', '.join(axis_map)))
print('%d buttons found: %s' % (num_buttons, ', '.join(button_map)))
return True
except Exception as e:
if printError:
print("Failed to connect to joystick" + str(e))
return False
except BaseException as e:
if printError:
print("Failed to connect to joystick - no exception given " + str(e))
return False
def readEvents():
global haveJoystickEvent
reconnect = True
noError = True
while True:
if reconnect:
connected = connectToJoystick(noError)
if connected:
reconnect = False
noError = True
else:
noError = False
time.sleep(0.5)
else:
try:
evbuf = jsdev.read(8)
if evbuf:
time_of_event, value, event_type, number = struct.unpack('IhBB', evbuf)
if event_type & 0x01:
button = button_map[number]
if button:
button_states[button] = value
haveJoystickEvent = True
if event_type & 0x02:
selected_axis = axis_map[number]
if selected_axis:
fvalue = value / 32767.0
axis_states[selected_axis] = fvalue
haveJoystickEvent = True
except BaseException as e:
print("Failed to read joystick " + str(e))
reconnect = True
time.sleep(0.2)
def readUDPEvents():
global haveJoystickEvent
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('', JCONTROLLER_UDP_PORT))
s.settimeout(10)
print(" Started receive thread...")
while True:
try:
data, addr = s.recvfrom(1024)
p = str(data, 'utf-8')
if p.startswith("J#"):
if DEBUG_UDP:
print(" received " + p)
kvps = p[2:].split(";")
for kvp in kvps:
kv = kvp.split("=")
if len(kv) == 2:
key = kv[0]
value = kv[1]
if key in axis_states:
axis_states[key] = float(value)
haveJoystickEvent = True
elif key in button_states:
button_states[key] = int(value)
haveJoystickEvent = True
except:
pass
def startReadEventsLoopThread():
thread = threading.Thread(target=readEvents, args=())
thread.daemon = True
thread.start()
def startReadUDPEventsLoopThread():
thread = threading.Thread(target=readUDPEvents, args=())
thread.daemon = True
thread.start()
startReadEventsLoopThread()
startReadUDPEventsLoopThread()
topSpeed = 50
sensorDistance = 200
directionLock = False
alreadyStopped = 0
lastX1 = 0
lastY1 = 0
lastX2 = 0
lastY2 = 0
lastX3 = 0
lastY3 = 0
lastSelect = False
lastStart = False
lastTL = False
lastTL2 = False
lastTR = False
lastTR2 = False
lastA = False
lastB = False
lastBX = False
lastBY = False
lastLButton = False
lastRButton = False
lastTopSpeed = topSpeed
doOrbit = False
prepareToOrbit = False
continueToReadDistance = False
boost = False
kick = 0
fullSpeed = False
axis_states["x"] = 0
axis_states["y"] = 0
axis_states["rx"] = 0
axis_states["ry"] = 0
# for button_name in button_names:
# button_states[button_names[button_name]] = 0
lastBoost = False
balLocked = False
target_charge = 0
charge = 0
last_charge = 0
elevation = 0
target_angle = 0
def moveServo(servoid, angle):
# TODO move this out to separate service
f = open("/dev/servoblaster", 'w')
f.write(str(servoid) + "=" + str(angle) + "\n")
f.close()
pyros.publish("servo/" + str(servoid), str(int(angle)))
def setCharge(value):
global charge, lastCharge
lastCharge = charge
charge = value
if not charge == lastCharge:
motorSpeed = int(85 + charge * (105 - 85) / 100)
print("DUCK motor speed: " + str(motorSpeed) + " charge:" + str(charge))
# moveServo(13, motorSpeed)
pyros.publish("servo/13", str(motorSpeed))
def addCharge(ammount):
global charge
setCharge(charge + ammount)
def lockDirectionLoop():
global gyroAngle, target_angle, directionLock
if directionLock:
difference = target_angle - gyroAngle
multiplier = 1
turn_speed = difference * multiplier
if turn_speed < -150:
turn_speed = -150
elif turn_speed > 150:
turn_speed = 150
print("turning at speed " + str(turn_speed) + " to match the angle " + str(gyroAngle) + " to " + str(target_angle))
pyros.publish("move/rotate", str(int(turn_speed)))
def processButtons():
global lastX3, lastY3, lastSelect, lastStart
global lastTL, lastTL2, lastTR, lastTR2, lastA, lastB, lastBX, lastBY, lastDividerL, lastDividerR, target_charge
global lastLButton, lastRButton, dividerL, dividerR, directionLock
global topSpeed, prepareToOrbit, continueToReadDistance, doOrbit, boost, kick, lastBoost, lastTL, balLocked, charge, mode, elevation, fullSpeed, target_angle, speed_index, speeds
global wobble
global lup
# print("Axis states: " + str(axis_states))
# 4 ly up: "TopBtn2", lx r 5: "PinkieBtn", ly down 6: "BaseBtn", lx left 7:"BaseBtn2"
# x3 = int(axis_states["hat0x"])
# y3 = int(axis_states["hat0y"])
try:
lup = button_states["lup"]
lright = button_states["lright"]
ldown = button_states["ldown"]
lleft = button_states["lleft"]
x3 = 0
y3 = 0
if lup:
y3 = -1
if ldown:
y3 = 1
if lleft:
x3 = -1
if lright:
x3 = 1
tl = button_states["tl1"]
tl2 = button_states["tl2"]
tr = button_states["tr1"]
tr2 = button_states["tr2"]
a = button_states["ba"]
bb = button_states["bb"]
bx = button_states["bx"]
by = button_states["by"]
start = button_states["start"]
select = button_states["select"]
lbutton = button_states["lbutton"]
rbutton = button_states["rbutton"]
lastDividerR = dividerR
if rbutton:
dividerR = 4
else:
dividerR = 1
lastDividerL = dividerL
if lbutton:
dividerL = 4
else:
dividerL = 1
if y3 != lastY3:
if y3 < 0:
if speed_index < len(speeds) :
speed_index += 1
topSpeed = speeds[speed_index]
elif y3 > 0:
if speed_index > 0:
speed_index -= 1
topSpeed = speeds[speed_index]
if x3 != lastX3:
if x3 > 0:
topSpeed += 100
if topSpeed > 300:
topSpeed = 300
elif x3 < 0:
if topSpeed >= 100:
topSpeed -= 100
if topSpeed < 30:
topSpeed = 30
elif topSpeed > 50:
topSpeed = 50
wobble = False
# print("mode: " + str(mode))
# if mode == modes.PINOON:
# fullSpeed = tl
# lastBoost = boost
# boost = tr
#
# wobble = tr2
# if not boost:
# if tl2 and not lastTL2:
# print("prepared to do orbit")
# doOrbit = True
# pyros.publish("sensor/distance/read", "0")
#
# doOrbit = tl2
# # if tl2:
# # pyros.publish("sensor/distance/read", "0")
# else:
# doOrbit = False
# if mode == modes.OBSTICAL_COURSE:
fullSpeed = tl
pyros.publish("sensor/gyro/continuous", "continue")
if tr2 and not lastTR2:
directionLock = True
target_angle = gyroAngle
elif not tr2 and lastTR2:
directionLock = False
pyros.publish("move/stop", "0")
lockDirectionLoop()
# if mode == modes.GOLF:
# print("golf")
#
# fullSpeed = tl
# print("tr2: " + str(tr2))
# if tr2 and not lastTR2:
# balLocked = not balLocked
#
# if balLocked:
# # moveServo(9, 220)
# moveServo(9, 217)
#
# print("locke")
#
#
# if tr:
# moveServo(9, 100)
# print("tr")
#
# balLocked = False
# else:
# if not balLocked:
# print("not locked")
#
# moveServo(9, 150)
#
# if bx and bx != lastBX:
# kick = 1
# print("kick")
# pyros.publish("move/drive", "0 300")
# pyros.sleep(1)
# pyros.publish("move/drive", "0 0")
# if mode == modes.DUCK_SHOOT:
# print("shooting ducks")
# if tr:
# pyros.publish("servo/9", "115")
# else:
# pyros.publish("servo/9", "175")
#
# if tl and not lastTL:
# target_charge = 100
# print("charging")
# elif not tl and lastTL:
# target_charge = 65
# print("decharging")
#
#
# if charge > target_charge:
# addCharge(-1)
# elif charge < target_charge:
# addCharge(1)
# setCharge(charge)
#
# if tr2:
# if elevation > -25:
# print("waaaa")
# elevation -= 1
# if tl2:
# if elevation < 25:
# print("weeeee")
# elevation += 1
#
# servoValue = 150 + elevation
# # print("elevation: " + str(elevation) + " servo: " + str(servoValue))
# print("targetcharge: " + str(target_charge) + " charge: " + str(charge))
# pyros.publish("servo/12", str(servoValue))
# else:
# fullSpeed = tl
# if mode != modes.DUCK_SHOOT:
# target_charge = 0
# setCharge(0)
if a:
mode = modes.OBSTICAL_COURSE
print("obsitcal")
elif bb:
mode = modes.DUCK_SHOOT
target_charge = 0
setCharge(0)
elevation = 0
elif bx:
mode = modes.GOLF
print("golf")
elif by:
mode = modes.PINOON
lastX3 = x3
lastY3 = y3
lastStart = start
lastTL = tl
lastTL2 = tl2
lastTR = tr
lastTR2 = tr2
lastA = a
lastB = bb
lastBX = bx
lastBY = by
lastSelect = select
lastLButton = lbutton
lastRButton = rbutton
if DEBUG_BUTTONS:
print("OK Button states: " + str(button_states))
if DEBUG_AXES:
print("OK Axis states: " + str(axis_states))
except Exception as e:
if DEBUG_BUTTONS:
print("ERR Button states: " + str(button_states) + str(e))
if DEBUG_AXES:
print("ERR Axis states: " + str(axis_states) + str(e))
def | (speed):
global fullSpeed
spd = speed
if boost or lunge_back_time > 0 or fullSpeed:
# spd = int(speed * topSpeed * 2)
# if spd > 300:
if speed > 0:
spd = 300
elif speed < 0:
spd = -300
else:
spd = 0
else:
spd = int(speed * topSpeed)
if spd > 300:
spd = 300
elif spd < -300:
spd = -300
return spd
def calculateExpo(v, expoPercentage):
if v >= 0:
return v * v * expoPercentage + v * (1.0 - expoPercentage)
else:
return - v * v * expoPercentage + v * (1.0 - expoPercentage)
def calcRoverDistance(distance):
if distance >= 0:
distance = abs(distance)
distance = 1.0 - distance
distance += 0.2
distance *= 500
else:
distance = abs(distance)
distance = 1.0 - distance
distance += 0.2
distance = - distance * 500
return int(distance)
def processJoysticks():
global kick, dividerR, dividerL, lastDividerR, lastDividerL, boost, lunge_back_time, alreadyStopped, orbitDistance, directionLock, target_angle, wobble, wobble_alpha
lx = float(axis_states["x"])
ly = float(axis_states["y"])
rx = float(axis_states["rx"])
ry = float(axis_states["ry"])
if wobble:
print("wobble")
rx = float(math.sin(wobble_alpha * 0.9))
if ry < 0.1 and ry > -0.1 and rx < 0.1 and rx > -0.1:
if boost:
lunge_back_time += 1
if lunge_back_time > 6:
lunge_back_time = 6
ry = -1
lx = 0
ly = 0
else:
if lunge_back_time > 0:
lunge_back_time -= 1
ry = 1
lx = 0
ly = 0
else:
if not ry > -0:
lunge_back_time = 0
ld = math.sqrt(lx * lx + ly * ly)
rd = math.sqrt(rx * rx + ry * ry)
ra = math.atan2(rx, -ry) * 180 / math.pi
if not directionLock:
if ld < 0.1 < rd:
distance = rd
distance = calculateExpo(distance, EXPO)
roverSpeed = calcRoverSpeed(distance)
pyros.publish("move/drive", str(round(ra, 1)) + " " + str(int(roverSpeed / dividerR)))
if DEBUG_JOYSTICK:
print("Driving a:" + str(round(ra, 1)) + " s:" + str(roverSpeed) + " ld:" + str(ld) + " rd:" + str(rd))
alreadyStopped = 0
elif ld > 0.1 and rd > 0.1:
ory = ry
olx = lx
ry = calculateExpo(ry, EXPO)
lx = calculateExpo(lx, EXPO)
roverSpeed = -calcRoverSpeed(ry) * 1.3
roverTurningDistance = calcRoverDistance(lx)
pyros.publish("move/steer", str(roverTurningDistance) + " " + str(int(roverSpeed / dividerR)))
if DEBUG_JOYSTICK:
print("Steering d:" + str(roverTurningDistance) + " s:" + str(roverSpeed) + " ry: " + str(ory) + " lx:" + str(olx) + " ld:" + str(ld) + " rd:" + str(rd))
alreadyStopped = 0
elif ld > 0.1:
if doOrbit:
orbitDistance = sensorDistance + (ly * 5)
roverSpeed = calcRoverSpeed(lx) / 1
# print("speed: " + str(roverSpeed))
# print(str(int(orbitDistance + 70)) + " " + str(int(roverSpeed)))
pyros.publish("move/orbit", str(int(orbitDistance + 70)) + " " + str(int(roverSpeed)))
if DEBUG_JOYSTICK:
print("Orbit sen:" + str(int(orbitDistance + 70)) + " s:" + str(roverSpeed) + " ld:" + str(ld) + " rd:" + str(rd))
alreadyStopped = 0
else:
olx = lx
lx = calculateExpo(lx, EXPO) / 2
roverSpeed = calcRoverSpeed(lx)
pyros.publish("move/rotate", int(roverSpeed / dividerL))
if DEBUG_JOYSTICK:
print("Rotate s:" + str(roverSpeed) + " lx:" + str(olx) + " ld:" + str(ld) + " rd:" + str(rd))
alreadyStopped = 0
# elif kick > 0:
# if DEBUG_JOYSTICK:
# print("Kick stop: ld:" + str(ld) + " rd:" + str(rd))
# pass
#
# alreadyStopped = 0
else:
# pyros.publish("move/drive", str(ra) + " 0")
# if ra != 0:
# print("-> move/drive " + str(ra))
roverSpeed = 0
if alreadyStopped < MAX_STOPPING:
pyros.publish("move/stop", "0")
alreadyStopped += 1
if DEBUG_JOYSTICK:
print("Rotate stop: ld:" + str(ld) + " rd:" + str(rd))
else:
target_angle += lx * 4
def handleDistance(topic, message, groups):
global sensorDistance, prepareToOrbit, orbitDistance
# print("** distance = " + message)
if "," in message:
pass
else:
split = message.split(":")
d = float(split[1])
print("d: " + str(d))
if d >= 0:
sensorDistance = d
orbitDistance = sensorDistance
if prepareToOrbit:
prepareToOrbit = False
# Main event loop
def loop():
global wobble_alpha
wobble_alpha += 1
processButtons()
if haveJoystickEvent:
processJoysticks()
if __name__ == "__main__":
try:
print("Starting jcontroller service...")
pyros.subscribe("sensor/gyro", handleGyroData)
pyros.subscribe("sensor/distance", handleDistance)
pyros.init("jcontroller-service")
print("Started jcontroller service.")
pyros.publish("servo/9", "175")
pyros.forever(0.1, loop)
except Exception as ex:
print("ERROR: " + str(ex) + "\n" + ''.join(traceback.format_tb(ex.__traceback__)))
| calcRoverSpeed | identifier_name |
jcontroller_service.py | #!/usr/bin/env python3
#
# Copyright 2016-2017 Games Creators Club
#
# MIT License
#
import array
import math
import socket
import struct
import threading
import time
import traceback
from enum import Enum
from fcntl import ioctl
import pyroslib as pyros
DEBUG_AXES = False
DEBUG_BUTTONS = False
DEBUG_JOYSTICK = False
DEBUG_UDP = False
EXPO = 0.5
MAX_STOPPING = 10
JCONTROLLER_UDP_PORT = 1880
lastDividerL = 1
lastDividerR = 1
dividerL = 1
dividerR = 1
gyroAngle = 0
gyroDeltaAngle = 0
class modes(Enum):
NONE = 0
NORMAL = ' X'
GOLF = 2
PINOON = 3
DUCK_SHOOT = 4
OBSTICLE_COURSE = 5
mode = modes.DUCK_SHOOT
speeds = [25, 50, 100, 150, 300]
speed_index = 2
mode = modes.GOLF
wobble = False
wobble_alpha = 0
# We'll store the states here.
axis_states = {}
button_states = {}
haveJoystickEvent = False
# These constants were borrowed from linux/input.h
axis_names = {
0x00: 'x',
0x01: 'y',
0x02: 'z',
0x03: 'rx',
0x04: 'ry',
0x05: 'rz',
0x06: 'trottle',
0x07: 'rudder',
0x08: 'wheel',
0x09: 'gas',
0x0a: 'brake',
0x10: 'hat0x',
0x11: 'hat0y',
0x12: 'hat1x',
0x13: 'hat1y',
0x14: 'hat2x',
0x15: 'hat2y',
0x16: 'hat3x',
0x17: 'hat3y',
0x18: 'pressure',
0x19: 'distance',
0x1a: 'tilt_x',
0x1b: 'tilt_y',
0x1c: 'tool_width',
0x20: 'volume',
0x28: 'misc',
}
button_names = {
0x120: 'select',
0x121: 'lbutton',
0x122: 'rbutton',
0x123: 'start',
0x124: 'lup',
0x125: 'lright',
0x126: 'ldown',
0x127: 'lleft',
0x128: 'tl1',
0x129: 'tr1',
0x12a: 'tl2',
0x12b: 'tr2',
0x12c: 'by',
0x12d: 'ba',
0x12e: 'bb',
0x12f: 'bx',
0x130: 'a',
0x131: 'b',
0x132: 'c',
0x133: 'x',
0x134: 'y',
0x135: 'z',
0x136: 'tl',
0x137: 'tr',
0x138: 'tl2',
0x139: 'tr2',
# 0x13a: 'select',
# 0x13b: 'start',
# 0x13c: 'mode',
# 0x13d: 'thumbl',
# 0x13e: 'thumbr',
# 0x220: 'dpad_up',
# 0x221: 'dpad_down',
# 0x222: 'dpad_left',
# 0x223: 'dpad_right',
# XBo 360 controller uses these codes.
# 0x2c0: 'dpad_left',
# 0x2c1: 'dpad_right',
# 0x2c2: 'dpad_up',
# 0x2c3: 'dpad_down',
}
orbitDistance = 0
axis_map = []
button_map = []
# Open the joystick device.
lunge_back_time = 0
def handleGyroData(topic, message, groups):
global gyroAngle, gyroDeltaAngle
data = message.split(",")
gyroChange = float(data[2])
gyroDeltaAngle = gyroChange
gyroAngle += gyroChange
gyroDeltaTime = float(data[3])
lastGyroReceivedTime = time.time()
# print("gyro angle: " + str(gyroAngle))
def connectToJoystick(printError):
global fn, jsdev
try:
fn = '/dev/input/js0'
# print('Opening %s...' % fn)
jsdev = open(fn, 'rb')
# Get the device name.
# buf = bytearray(63)
# buf = array.array('c', ['\0'] * 64)
buf = array.array('b', [0] * 64)
ioctl(jsdev, 0x80006a13 + (0x10000 * len(buf)), buf) # JSIOCGNAME(len)
js_name = buf.tostring()
print('Device name: %s' % js_name)
# Get number of axes and buttons.
buf = array.array('B', [0])
ioctl(jsdev, 0x80016a11, buf) # JSIOCGAXES
num_axes = buf[0]
buf = array.array('B', [0])
ioctl(jsdev, 0x80016a12, buf) # JSIOCGBUTTONS
num_buttons = buf[0]
# Get the axis map.
buf = array.array('B', [0] * 0x40)
ioctl(jsdev, 0x80406a32, buf) # JSIOCGAXMAP
for axis in buf[:num_axes]:
axis_name = axis_names.get(axis, '0x%02x' % axis)
axis_map.append(axis_name)
axis_states[axis_name] = 0.0
# Get the button map.
buf = array.array('H', [0] * 200)
ioctl(jsdev, 0x80406a34, buf) # JSIOCGBTNMAP
for btn in buf[:num_buttons]:
btn_name = button_names.get(btn, '0x%03x' % btn)
button_map.append(btn_name)
button_states[btn_name] = 0
print('%d axes found: %s' % (num_axes, ', '.join(axis_map)))
print('%d buttons found: %s' % (num_buttons, ', '.join(button_map)))
return True
except Exception as e:
if printError:
print("Failed to connect to joystick" + str(e))
return False
except BaseException as e:
if printError:
print("Failed to connect to joystick - no exception given " + str(e))
return False
def readEvents():
global haveJoystickEvent
reconnect = True
noError = True
while True:
if reconnect:
connected = connectToJoystick(noError)
if connected:
reconnect = False
noError = True
else:
noError = False
time.sleep(0.5)
else:
try:
evbuf = jsdev.read(8)
if evbuf:
time_of_event, value, event_type, number = struct.unpack('IhBB', evbuf)
if event_type & 0x01:
button = button_map[number]
if button:
button_states[button] = value
haveJoystickEvent = True
if event_type & 0x02:
selected_axis = axis_map[number]
if selected_axis:
fvalue = value / 32767.0
axis_states[selected_axis] = fvalue
haveJoystickEvent = True
except BaseException as e:
print("Failed to read joystick " + str(e))
reconnect = True
time.sleep(0.2)
def readUDPEvents():
global haveJoystickEvent
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('', JCONTROLLER_UDP_PORT))
s.settimeout(10)
print(" Started receive thread...")
while True:
try:
data, addr = s.recvfrom(1024)
p = str(data, 'utf-8')
if p.startswith("J#"):
if DEBUG_UDP:
print(" received " + p)
kvps = p[2:].split(";")
for kvp in kvps:
kv = kvp.split("=")
if len(kv) == 2:
key = kv[0]
value = kv[1]
if key in axis_states:
axis_states[key] = float(value)
haveJoystickEvent = True
elif key in button_states:
button_states[key] = int(value)
haveJoystickEvent = True
except:
pass
def startReadEventsLoopThread():
thread = threading.Thread(target=readEvents, args=())
thread.daemon = True
thread.start()
def startReadUDPEventsLoopThread():
thread = threading.Thread(target=readUDPEvents, args=())
thread.daemon = True
thread.start()
startReadEventsLoopThread()
startReadUDPEventsLoopThread()
topSpeed = 50
sensorDistance = 200
directionLock = False
alreadyStopped = 0
lastX1 = 0
lastY1 = 0
lastX2 = 0
lastY2 = 0
lastX3 = 0
lastY3 = 0
lastSelect = False
lastStart = False
lastTL = False
lastTL2 = False
lastTR = False
lastTR2 = False
lastA = False
lastB = False
lastBX = False
lastBY = False
lastLButton = False
lastRButton = False
lastTopSpeed = topSpeed
doOrbit = False
prepareToOrbit = False
continueToReadDistance = False
boost = False
kick = 0
fullSpeed = False
axis_states["x"] = 0
axis_states["y"] = 0
axis_states["rx"] = 0
axis_states["ry"] = 0
# for button_name in button_names:
# button_states[button_names[button_name]] = 0
lastBoost = False
balLocked = False
target_charge = 0
charge = 0
last_charge = 0
elevation = 0
target_angle = 0
def moveServo(servoid, angle):
# TODO move this out to separate service
f = open("/dev/servoblaster", 'w')
f.write(str(servoid) + "=" + str(angle) + "\n")
f.close()
pyros.publish("servo/" + str(servoid), str(int(angle)))
def setCharge(value):
global charge, lastCharge
lastCharge = charge
charge = value
if not charge == lastCharge:
motorSpeed = int(85 + charge * (105 - 85) / 100)
print("DUCK motor speed: " + str(motorSpeed) + " charge:" + str(charge))
# moveServo(13, motorSpeed)
pyros.publish("servo/13", str(motorSpeed))
def addCharge(ammount):
global charge
setCharge(charge + ammount)
def lockDirectionLoop():
global gyroAngle, target_angle, directionLock
if directionLock:
difference = target_angle - gyroAngle
multiplier = 1
turn_speed = difference * multiplier
if turn_speed < -150:
turn_speed = -150
elif turn_speed > 150:
turn_speed = 150
print("turning at speed " + str(turn_speed) + " to match the angle " + str(gyroAngle) + " to " + str(target_angle))
pyros.publish("move/rotate", str(int(turn_speed)))
def processButtons():
global lastX3, lastY3, lastSelect, lastStart
global lastTL, lastTL2, lastTR, lastTR2, lastA, lastB, lastBX, lastBY, lastDividerL, lastDividerR, target_charge
global lastLButton, lastRButton, dividerL, dividerR, directionLock
global topSpeed, prepareToOrbit, continueToReadDistance, doOrbit, boost, kick, lastBoost, lastTL, balLocked, charge, mode, elevation, fullSpeed, target_angle, speed_index, speeds
global wobble
global lup
# print("Axis states: " + str(axis_states))
# 4 ly up: "TopBtn2", lx r 5: "PinkieBtn", ly down 6: "BaseBtn", lx left 7:"BaseBtn2"
# x3 = int(axis_states["hat0x"])
# y3 = int(axis_states["hat0y"])
try:
lup = button_states["lup"]
lright = button_states["lright"]
ldown = button_states["ldown"]
lleft = button_states["lleft"]
x3 = 0
y3 = 0
if lup:
y3 = -1
if ldown:
y3 = 1
if lleft:
x3 = -1
if lright:
x3 = 1
tl = button_states["tl1"]
tl2 = button_states["tl2"]
tr = button_states["tr1"]
tr2 = button_states["tr2"]
a = button_states["ba"]
bb = button_states["bb"]
bx = button_states["bx"]
by = button_states["by"]
start = button_states["start"]
select = button_states["select"]
lbutton = button_states["lbutton"]
rbutton = button_states["rbutton"]
lastDividerR = dividerR
if rbutton:
dividerR = 4
else:
dividerR = 1
lastDividerL = dividerL
if lbutton:
dividerL = 4
else:
dividerL = 1
if y3 != lastY3:
if y3 < 0:
if speed_index < len(speeds) :
speed_index += 1
topSpeed = speeds[speed_index]
elif y3 > 0:
if speed_index > 0:
speed_index -= 1
topSpeed = speeds[speed_index]
if x3 != lastX3:
if x3 > 0:
topSpeed += 100
if topSpeed > 300:
topSpeed = 300
elif x3 < 0:
if topSpeed >= 100:
topSpeed -= 100
if topSpeed < 30:
topSpeed = 30
elif topSpeed > 50:
topSpeed = 50
wobble = False
# print("mode: " + str(mode))
# if mode == modes.PINOON:
# fullSpeed = tl
# lastBoost = boost
# boost = tr
#
# wobble = tr2
# if not boost:
# if tl2 and not lastTL2:
# print("prepared to do orbit")
# doOrbit = True
# pyros.publish("sensor/distance/read", "0")
#
# doOrbit = tl2
# # if tl2:
# # pyros.publish("sensor/distance/read", "0")
# else:
# doOrbit = False
# if mode == modes.OBSTICAL_COURSE:
fullSpeed = tl
pyros.publish("sensor/gyro/continuous", "continue")
if tr2 and not lastTR2:
directionLock = True
target_angle = gyroAngle
elif not tr2 and lastTR2:
directionLock = False
pyros.publish("move/stop", "0")
lockDirectionLoop()
# if mode == modes.GOLF:
# print("golf")
#
# fullSpeed = tl
# print("tr2: " + str(tr2))
# if tr2 and not lastTR2:
# balLocked = not balLocked
#
# if balLocked:
# # moveServo(9, 220)
# moveServo(9, 217)
#
# print("locke")
#
#
# if tr:
# moveServo(9, 100)
# print("tr")
#
# balLocked = False
# else:
# if not balLocked:
# print("not locked")
#
# moveServo(9, 150)
#
# if bx and bx != lastBX:
# kick = 1
# print("kick")
# pyros.publish("move/drive", "0 300")
# pyros.sleep(1)
# pyros.publish("move/drive", "0 0")
# if mode == modes.DUCK_SHOOT:
# print("shooting ducks")
# if tr:
# pyros.publish("servo/9", "115")
# else:
# pyros.publish("servo/9", "175")
#
# if tl and not lastTL:
# target_charge = 100
# print("charging")
# elif not tl and lastTL:
# target_charge = 65
# print("decharging")
#
#
# if charge > target_charge:
# addCharge(-1)
# elif charge < target_charge:
# addCharge(1)
# setCharge(charge)
#
# if tr2:
# if elevation > -25:
# print("waaaa")
# elevation -= 1
# if tl2:
# if elevation < 25:
# print("weeeee")
# elevation += 1
#
# servoValue = 150 + elevation
# # print("elevation: " + str(elevation) + " servo: " + str(servoValue))
# print("targetcharge: " + str(target_charge) + " charge: " + str(charge))
# pyros.publish("servo/12", str(servoValue))
# else:
# fullSpeed = tl
# if mode != modes.DUCK_SHOOT:
# target_charge = 0
# setCharge(0)
if a:
mode = modes.OBSTICAL_COURSE
print("obsitcal")
elif bb:
mode = modes.DUCK_SHOOT
target_charge = 0
setCharge(0)
elevation = 0
elif bx:
mode = modes.GOLF
print("golf")
elif by:
mode = modes.PINOON
lastX3 = x3
lastY3 = y3
lastStart = start
lastTL = tl
lastTL2 = tl2
lastTR = tr
lastTR2 = tr2
lastA = a
lastB = bb
lastBX = bx
lastBY = by
lastSelect = select
lastLButton = lbutton
lastRButton = rbutton
if DEBUG_BUTTONS:
print("OK Button states: " + str(button_states))
if DEBUG_AXES:
print("OK Axis states: " + str(axis_states))
except Exception as e:
if DEBUG_BUTTONS:
print("ERR Button states: " + str(button_states) + str(e))
if DEBUG_AXES:
print("ERR Axis states: " + str(axis_states) + str(e))
def calcRoverSpeed(speed):
global fullSpeed
spd = speed
if boost or lunge_back_time > 0 or fullSpeed:
# spd = int(speed * topSpeed * 2)
# if spd > 300:
if speed > 0:
spd = 300
elif speed < 0:
spd = -300
else:
spd = 0
else:
spd = int(speed * topSpeed)
if spd > 300:
spd = 300
elif spd < -300:
spd = -300
return spd
def calculateExpo(v, expoPercentage):
if v >= 0:
return v * v * expoPercentage + v * (1.0 - expoPercentage)
else:
return - v * v * expoPercentage + v * (1.0 - expoPercentage)
def calcRoverDistance(distance):
if distance >= 0:
distance = abs(distance)
distance = 1.0 - distance
distance += 0.2
distance *= 500
else:
distance = abs(distance)
distance = 1.0 - distance
distance += 0.2
distance = - distance * 500
return int(distance)
def processJoysticks():
global kick, dividerR, dividerL, lastDividerR, lastDividerL, boost, lunge_back_time, alreadyStopped, orbitDistance, directionLock, target_angle, wobble, wobble_alpha
lx = float(axis_states["x"])
ly = float(axis_states["y"])
rx = float(axis_states["rx"])
ry = float(axis_states["ry"])
if wobble:
print("wobble")
rx = float(math.sin(wobble_alpha * 0.9))
if ry < 0.1 and ry > -0.1 and rx < 0.1 and rx > -0.1:
if boost:
lunge_back_time += 1
if lunge_back_time > 6:
lunge_back_time = 6
ry = -1
lx = 0
ly = 0
else:
if lunge_back_time > 0:
lunge_back_time -= 1
ry = 1
lx = 0
ly = 0
else:
if not ry > -0:
lunge_back_time = 0
ld = math.sqrt(lx * lx + ly * ly)
rd = math.sqrt(rx * rx + ry * ry)
ra = math.atan2(rx, -ry) * 180 / math.pi
if not directionLock:
if ld < 0.1 < rd:
distance = rd
distance = calculateExpo(distance, EXPO)
roverSpeed = calcRoverSpeed(distance)
pyros.publish("move/drive", str(round(ra, 1)) + " " + str(int(roverSpeed / dividerR)))
if DEBUG_JOYSTICK:
print("Driving a:" + str(round(ra, 1)) + " s:" + str(roverSpeed) + " ld:" + str(ld) + " rd:" + str(rd))
alreadyStopped = 0
elif ld > 0.1 and rd > 0.1:
ory = ry
olx = lx
ry = calculateExpo(ry, EXPO)
lx = calculateExpo(lx, EXPO)
roverSpeed = -calcRoverSpeed(ry) * 1.3
roverTurningDistance = calcRoverDistance(lx)
pyros.publish("move/steer", str(roverTurningDistance) + " " + str(int(roverSpeed / dividerR)))
if DEBUG_JOYSTICK:
print("Steering d:" + str(roverTurningDistance) + " s:" + str(roverSpeed) + " ry: " + str(ory) + " lx:" + str(olx) + " ld:" + str(ld) + " rd:" + str(rd))
alreadyStopped = 0
elif ld > 0.1:
if doOrbit:
orbitDistance = sensorDistance + (ly * 5)
roverSpeed = calcRoverSpeed(lx) / 1
# print("speed: " + str(roverSpeed))
# print(str(int(orbitDistance + 70)) + " " + str(int(roverSpeed)))
pyros.publish("move/orbit", str(int(orbitDistance + 70)) + " " + str(int(roverSpeed)))
if DEBUG_JOYSTICK:
print("Orbit sen:" + str(int(orbitDistance + 70)) + " s:" + str(roverSpeed) + " ld:" + str(ld) + " rd:" + str(rd))
alreadyStopped = 0
else:
olx = lx
lx = calculateExpo(lx, EXPO) / 2
roverSpeed = calcRoverSpeed(lx)
pyros.publish("move/rotate", int(roverSpeed / dividerL))
if DEBUG_JOYSTICK:
print("Rotate s:" + str(roverSpeed) + " lx:" + str(olx) + " ld:" + str(ld) + " rd:" + str(rd))
alreadyStopped = 0
# elif kick > 0:
# if DEBUG_JOYSTICK:
# print("Kick stop: ld:" + str(ld) + " rd:" + str(rd))
# pass
#
# alreadyStopped = 0
else:
# pyros.publish("move/drive", str(ra) + " 0")
# if ra != 0:
# print("-> move/drive " + str(ra))
roverSpeed = 0
if alreadyStopped < MAX_STOPPING:
pyros.publish("move/stop", "0")
alreadyStopped += 1
if DEBUG_JOYSTICK:
print("Rotate stop: ld:" + str(ld) + " rd:" + str(rd))
else:
target_angle += lx * 4
def handleDistance(topic, message, groups):
global sensorDistance, prepareToOrbit, orbitDistance
# print("** distance = " + message)
if "," in message:
pass
else:
|
# Main event loop
def loop():
global wobble_alpha
wobble_alpha += 1
processButtons()
if haveJoystickEvent:
processJoysticks()
if __name__ == "__main__":
try:
print("Starting jcontroller service...")
pyros.subscribe("sensor/gyro", handleGyroData)
pyros.subscribe("sensor/distance", handleDistance)
pyros.init("jcontroller-service")
print("Started jcontroller service.")
pyros.publish("servo/9", "175")
pyros.forever(0.1, loop)
except Exception as ex:
print("ERROR: " + str(ex) + "\n" + ''.join(traceback.format_tb(ex.__traceback__)))
| split = message.split(":")
d = float(split[1])
print("d: " + str(d))
if d >= 0:
sensorDistance = d
orbitDistance = sensorDistance
if prepareToOrbit:
prepareToOrbit = False | conditional_block |
lib.rs | use tokio::time::delay_for;
use core::time::Duration;
use anyhow::{anyhow, Result};
use channels::AGENT_CHANNEL;
use comm::{AgentEvent, Hub};
use config::Config;
use tokio::sync::RwLock;
use tokio::sync::RwLockReadGuard;
use tokio::sync::RwLockWriteGuard;
use crossbeam_channel::{Receiver, Sender};
use log::{debug, info};
use std::{collections::HashMap, fmt::Debug};
use std::{sync::Arc, time::Instant};
use threads::AsyncRunner;
use tokio::runtime::Builder;
use lazy_static::*;
use tokio::runtime::Runtime;
use work::{Workload, WorkloadHandle, WorkloadStatus};
use async_trait::async_trait;
pub mod cfg;
pub mod comm;
pub mod plugins;
pub mod prom;
pub mod threads;
pub mod work;
pub mod channels {
pub const AGENT_CHANNEL: &'static str = "Agent";
}
// Configuration
lazy_static! {
static ref SETTINGS: RwLock<Config> = RwLock::new(Config::default());
}
impl<'a> Default for Agent {
fn default() -> Agent {
let agt = Agent {
name: "Unnamed Agent".to_string(),
features: Vec::default(),
runtime: Runtime::new().unwrap(),
state: AgentState::Stopped,
work_handles: Vec::default(),
hub: Hub::new(),
};
agt
}
}
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum AgentState {
Ok,
Stopped,
Error,
}
#[derive(Clone, Copy, Debug)]
pub struct Schedule {}
#[derive(Clone, Debug)]
pub enum AgentCommand {
Start,
Schedule(Schedule, Workload),
Stop,
}
#[derive(Debug, Clone)]
pub struct FeatureConfig {
pub bind_address: [u8; 4],
pub bind_port: u16,
pub settings: HashMap<String, String>,
}
#[derive(Clone)]
pub struct AgentController {
pub agent: Arc<RwLock<Agent>>,
pub signal: Option<Sender<()>>,
}
impl Debug for AgentController {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_fmt(format_args!("Handle for Agent"))
}
}
impl AgentController {
pub fn new(name: &str) -> AgentController {
let runtime = Builder::new()
.threaded_scheduler()
.enable_all()
.build()
.unwrap();
let agent = Agent::with_runtime(name, runtime);
AgentController {
agent: Arc::new(RwLock::new(agent)),
signal: None,
}
}
pub async fn with_read<F>(&self, closure: F)
where
F: FnOnce(&RwLockReadGuard<Agent>) + Sync + Send + 'static,
{
let handle = self.agent.clone();
let agent = handle.read().await;
closure(&agent);
}
pub async fn with_write<F>(&self, closure: F)
where
F: FnOnce(&mut RwLockWriteGuard<Agent>) + Sync + Send + 'static,
{
let handle = self.agent.clone();
let mut agent = handle.write().await;
closure(&mut agent);
drop(agent);
}
pub async fn send(&self, event: AgentEvent) -> Result<()> {
let agent = self.agent.read().await;
let mut hub = agent.hub.write().await;
let channel = hub.get_or_create(AGENT_CHANNEL);
channel
.sender
.send(event)
.map_err(|err| anyhow!("{}", err))?;
Ok(())
}
pub async fn get_channel(&self, name: &str) -> (Receiver<AgentEvent>, Sender<AgentEvent>) {
let agent = self.agent.read().await;
let chan = agent.hub.clone().write().await.get_or_create(name);
drop(agent);
(chan.receiver, chan.sender)
}
pub fn with_runtime(name: &str, runtime: Runtime) -> AgentController {
let agent = Agent::with_runtime(name, runtime);
AgentController {
agent: Arc::new(RwLock::new(agent)),
signal: None,
}
}
pub async fn add_feature(&mut self, handle: FeatureHandle) -> &mut AgentController {
let mut agent = self.agent.write().await;
agent.features.push(handle);
drop(agent);
self
}
pub async fn start(&mut self) -> Receiver<()> {
let agent = self.agent.read().await;
info!("Feature count: {}", agent.features.len());
for f in &agent.features {
let feature_name = f.read().await.name();
let (rx, tx) = self.get_channel(AGENT_CHANNEL).await;
f.write().await.init(self.clone()).await;
let feature_handle = f.clone();
agent.runtime.spawn(async move {
debug!("Spawning feature communication channel.");
loop {
let fh = feature_handle.clone();
let message = rx
.recv()
.map_err(|err| anyhow!("Error receiving message: {}", err));
if let Ok(message) = message {
info!("Got AgentEvent {}", message);
let mut feature = fh.write().await;
feature.on_event(message.clone()).await;
debug!("Done writing event to feature");
}
}
});
let _ = tx
.send(AgentEvent::Started)
.map_err(|err| {
anyhow!(
"Error sending Agent Start event to {}: {}",
feature_name,
err
)
})
.unwrap();
}
drop(agent);
self.agent.write().await.state = AgentState::Ok;
let (rx, tx) = crossbeam_channel::bounded::<()>(1);
self.signal = Some(rx);
tx
}
pub async fn schedule<T>(&self, interval : Duration, func : T) where T : Fn() -> () + Clone + Send + 'static {
let handle = {
let agent = self.agent.read().await;
agent.runtime.handle().clone()
};
let f = func.clone();
handle.spawn(async move {
info!("Starting scheduled function");
loop {
f.clone()();
delay_for(interval).await;
}
});
}
}
/*
impl FeatureHandle {
pub fn new<T>(feature: T) -> FeatureHandle
where
T: Feature + 'static,
{
FeatureHandle {
handle: Arc::new(tokio::sync::RwLock::new(feature)),
}
}
}
impl<'a> FeatureHandle {
pub async fn with_write<F>(&self, callback: F)
where
F: FnOnce(&mut RwLockWriteGuard<dyn Feature>) + Sync + Send + 'static,
{
callback(&mut self.handle.write().await);
}
pub async fn with_read<F>(&self, callback: F)
where
F: FnOnce(&RwLockReadGuard<dyn Feature>) + Sync + Send + 'static,
{
callback(&self.handle.read().await);
}
}*/
type HubHandle = Arc<RwLock<Hub<AgentEvent>>>;
pub struct Agent {
pub name: String,
pub features: Vec<FeatureHandle>,
pub state: AgentState,
pub runtime: Runtime,
pub hub: HubHandle,
pub work_handles: Vec<Arc<tokio::sync::RwLock<WorkloadHandle>>>,
}
impl Agent {
pub fn new(name: &str) -> Agent {
Agent {
name: name.to_string(),
..Default::default()
}
}
pub fn with_runtime(name: &str, runtime: Runtime) -> Agent {
Agent {
name: name.to_string(),
features: Vec::default(),
runtime: runtime,
state: AgentState::Stopped,
work_handles: Vec::default(),
hub: Hub::new(),
}
}
pub fn status(&self) -> AgentState {
self.state
}
/// Run a command on the agent.
pub async fn command(&mut self, cmd: AgentCommand) {
let channel = self.hub.write().await.get_or_create(AGENT_CHANNEL);
match cmd {
AgentCommand::Start => {
self.state = AgentState::Ok;
let work_handles = self.work_handles.clone();
println!("Workload handles: {}", work_handles.len());
// rerun defered workload handles
for wl in work_handles {
let wl2 = wl.clone();
let (status, workload) = AsyncRunner::block_on(async move {
let wl = wl.read().await;
(wl.status.clone(), wl.workload.as_ref().unwrap().clone())
});
if status == WorkloadStatus::None {
self.run(workload);
}
AsyncRunner::block_on(async move {
let mut handle = wl2.write().await;
handle.status = WorkloadStatus::Complete;
});
}
channel.sender.send(AgentEvent::Started).unwrap();
}
AgentCommand::Schedule(_, _) => {}
AgentCommand::Stop => {
self.state = AgentState::Stopped;
channel.sender.send(AgentEvent::Stopped).unwrap();
}
}
}
/// Run a workload in the agent
/// This will capture the statistics of the workload run and store it in
/// the agent.
pub fn run(&mut self, workload: Workload) -> Arc<tokio::sync::RwLock<WorkloadHandle>> |
}
pub type FeatureHandle = Arc<tokio::sync::RwLock<dyn Feature>>;
#[async_trait]
pub trait Feature: Send + Sync {
async fn init(&mut self, agent: AgentController);
async fn on_event(&mut self, event: AgentEvent);
fn name(&self) -> String;
}
| {
if self.state == AgentState::Stopped {
info!("Agent stopped, Not running workload {}. Work will be deferred until the agent starts.", workload.id);
let work_handle = Arc::new(tokio::sync::RwLock::new(WorkloadHandle {
id: workload.id,
join_handle: None,
status: WorkloadStatus::None,
workload: Some(workload),
..Default::default()
}));
self.work_handles.push(work_handle.clone());
return work_handle;
}
let id = workload.id;
prom::WORKLOAD_START.inc();
let jh = self.runtime.spawn(async move {
info!("[Workload {}] Running.", id);
let start = Instant::now();
let result = workload.run().await;
let mills = start.elapsed().as_millis();
info!("[Workload {}] Duration: {}ms", id, mills as f64);
prom::WORKLOAD_TOTAL_TIME.inc_by(mills as i64);
crate::prom::WORKLOAD_TIME_COLLECTOR
.with_label_values(&["processing_time"])
.observe(mills as f64 / 1000.);
match result {
Ok(wl) => {
prom::WORKLOAD_COMPLETE.inc();
Ok(wl)
}
Err(_) => {
prom::WORKLOAD_ERROR.inc();
Err(anyhow!("Workload run failed."))
}
}
});
let work_handle = Arc::new(tokio::sync::RwLock::new(WorkloadHandle {
id: id,
join_handle: Some(jh),
status: work::WorkloadStatus::Running,
..Default::default()
}));
self.work_handles.push(work_handle.clone());
work_handle
} | identifier_body |
lib.rs | use tokio::time::delay_for;
use core::time::Duration;
use anyhow::{anyhow, Result};
use channels::AGENT_CHANNEL;
use comm::{AgentEvent, Hub};
use config::Config;
use tokio::sync::RwLock;
use tokio::sync::RwLockReadGuard;
use tokio::sync::RwLockWriteGuard;
use crossbeam_channel::{Receiver, Sender};
use log::{debug, info};
use std::{collections::HashMap, fmt::Debug};
use std::{sync::Arc, time::Instant};
use threads::AsyncRunner;
use tokio::runtime::Builder;
use lazy_static::*;
use tokio::runtime::Runtime;
use work::{Workload, WorkloadHandle, WorkloadStatus};
use async_trait::async_trait;
pub mod cfg;
pub mod comm;
pub mod plugins;
pub mod prom;
pub mod threads;
pub mod work;
pub mod channels {
pub const AGENT_CHANNEL: &'static str = "Agent";
}
// Configuration
lazy_static! {
static ref SETTINGS: RwLock<Config> = RwLock::new(Config::default());
}
impl<'a> Default for Agent {
fn default() -> Agent {
let agt = Agent {
name: "Unnamed Agent".to_string(),
features: Vec::default(),
runtime: Runtime::new().unwrap(),
state: AgentState::Stopped,
work_handles: Vec::default(),
hub: Hub::new(),
};
agt
}
}
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum AgentState {
Ok,
Stopped,
Error,
}
#[derive(Clone, Copy, Debug)]
pub struct Schedule {}
#[derive(Clone, Debug)]
pub enum AgentCommand {
Start,
Schedule(Schedule, Workload),
Stop,
}
#[derive(Debug, Clone)]
pub struct FeatureConfig {
pub bind_address: [u8; 4],
pub bind_port: u16,
pub settings: HashMap<String, String>,
}
#[derive(Clone)]
pub struct AgentController {
pub agent: Arc<RwLock<Agent>>,
pub signal: Option<Sender<()>>,
}
impl Debug for AgentController {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_fmt(format_args!("Handle for Agent"))
}
}
impl AgentController {
pub fn new(name: &str) -> AgentController {
let runtime = Builder::new()
.threaded_scheduler()
.enable_all()
.build()
.unwrap();
let agent = Agent::with_runtime(name, runtime);
AgentController {
agent: Arc::new(RwLock::new(agent)),
signal: None,
}
}
pub async fn with_read<F>(&self, closure: F)
where
F: FnOnce(&RwLockReadGuard<Agent>) + Sync + Send + 'static,
{
let handle = self.agent.clone();
let agent = handle.read().await;
closure(&agent);
}
pub async fn with_write<F>(&self, closure: F)
where
F: FnOnce(&mut RwLockWriteGuard<Agent>) + Sync + Send + 'static,
{
let handle = self.agent.clone();
let mut agent = handle.write().await;
closure(&mut agent);
drop(agent);
}
pub async fn send(&self, event: AgentEvent) -> Result<()> {
let agent = self.agent.read().await;
let mut hub = agent.hub.write().await;
let channel = hub.get_or_create(AGENT_CHANNEL);
channel
.sender
.send(event)
.map_err(|err| anyhow!("{}", err))?;
Ok(())
}
pub async fn get_channel(&self, name: &str) -> (Receiver<AgentEvent>, Sender<AgentEvent>) {
let agent = self.agent.read().await;
let chan = agent.hub.clone().write().await.get_or_create(name);
drop(agent);
(chan.receiver, chan.sender)
}
pub fn with_runtime(name: &str, runtime: Runtime) -> AgentController {
let agent = Agent::with_runtime(name, runtime);
AgentController {
agent: Arc::new(RwLock::new(agent)),
signal: None,
}
}
pub async fn add_feature(&mut self, handle: FeatureHandle) -> &mut AgentController {
let mut agent = self.agent.write().await;
agent.features.push(handle);
drop(agent);
self
}
pub async fn start(&mut self) -> Receiver<()> {
let agent = self.agent.read().await;
info!("Feature count: {}", agent.features.len());
for f in &agent.features {
let feature_name = f.read().await.name();
let (rx, tx) = self.get_channel(AGENT_CHANNEL).await;
f.write().await.init(self.clone()).await;
let feature_handle = f.clone();
agent.runtime.spawn(async move {
debug!("Spawning feature communication channel.");
loop {
let fh = feature_handle.clone();
let message = rx
.recv()
.map_err(|err| anyhow!("Error receiving message: {}", err));
if let Ok(message) = message {
info!("Got AgentEvent {}", message);
let mut feature = fh.write().await;
feature.on_event(message.clone()).await;
debug!("Done writing event to feature");
}
}
});
let _ = tx
.send(AgentEvent::Started)
.map_err(|err| {
anyhow!(
"Error sending Agent Start event to {}: {}",
feature_name,
err
)
})
.unwrap();
}
drop(agent);
self.agent.write().await.state = AgentState::Ok;
let (rx, tx) = crossbeam_channel::bounded::<()>(1);
self.signal = Some(rx);
tx
}
pub async fn schedule<T>(&self, interval : Duration, func : T) where T : Fn() -> () + Clone + Send + 'static {
let handle = {
let agent = self.agent.read().await;
agent.runtime.handle().clone()
};
let f = func.clone();
handle.spawn(async move {
info!("Starting scheduled function");
loop {
f.clone()();
delay_for(interval).await;
}
});
}
}
/*
impl FeatureHandle {
pub fn new<T>(feature: T) -> FeatureHandle
where
T: Feature + 'static,
{
FeatureHandle {
handle: Arc::new(tokio::sync::RwLock::new(feature)),
}
}
}
impl<'a> FeatureHandle {
pub async fn with_write<F>(&self, callback: F)
where
F: FnOnce(&mut RwLockWriteGuard<dyn Feature>) + Sync + Send + 'static,
{
callback(&mut self.handle.write().await);
}
pub async fn with_read<F>(&self, callback: F)
where
F: FnOnce(&RwLockReadGuard<dyn Feature>) + Sync + Send + 'static,
{
callback(&self.handle.read().await);
}
}*/
type HubHandle = Arc<RwLock<Hub<AgentEvent>>>;
pub struct Agent {
pub name: String,
pub features: Vec<FeatureHandle>,
pub state: AgentState,
pub runtime: Runtime,
pub hub: HubHandle,
pub work_handles: Vec<Arc<tokio::sync::RwLock<WorkloadHandle>>>,
}
impl Agent {
pub fn new(name: &str) -> Agent {
Agent {
name: name.to_string(),
..Default::default()
}
}
pub fn with_runtime(name: &str, runtime: Runtime) -> Agent {
Agent {
name: name.to_string(),
features: Vec::default(),
runtime: runtime,
state: AgentState::Stopped,
work_handles: Vec::default(),
hub: Hub::new(),
}
}
pub fn status(&self) -> AgentState {
self.state
}
/// Run a command on the agent.
pub async fn command(&mut self, cmd: AgentCommand) {
let channel = self.hub.write().await.get_or_create(AGENT_CHANNEL);
match cmd {
AgentCommand::Start => {
self.state = AgentState::Ok;
let work_handles = self.work_handles.clone();
println!("Workload handles: {}", work_handles.len());
// rerun defered workload handles
for wl in work_handles {
let wl2 = wl.clone();
let (status, workload) = AsyncRunner::block_on(async move {
let wl = wl.read().await;
(wl.status.clone(), wl.workload.as_ref().unwrap().clone())
});
if status == WorkloadStatus::None {
self.run(workload);
}
AsyncRunner::block_on(async move {
let mut handle = wl2.write().await;
handle.status = WorkloadStatus::Complete;
});
}
channel.sender.send(AgentEvent::Started).unwrap();
}
AgentCommand::Schedule(_, _) => |
AgentCommand::Stop => {
self.state = AgentState::Stopped;
channel.sender.send(AgentEvent::Stopped).unwrap();
}
}
}
/// Run a workload in the agent
/// This will capture the statistics of the workload run and store it in
/// the agent.
pub fn run(&mut self, workload: Workload) -> Arc<tokio::sync::RwLock<WorkloadHandle>> {
if self.state == AgentState::Stopped {
info!("Agent stopped, Not running workload {}. Work will be deferred until the agent starts.", workload.id);
let work_handle = Arc::new(tokio::sync::RwLock::new(WorkloadHandle {
id: workload.id,
join_handle: None,
status: WorkloadStatus::None,
workload: Some(workload),
..Default::default()
}));
self.work_handles.push(work_handle.clone());
return work_handle;
}
let id = workload.id;
prom::WORKLOAD_START.inc();
let jh = self.runtime.spawn(async move {
info!("[Workload {}] Running.", id);
let start = Instant::now();
let result = workload.run().await;
let mills = start.elapsed().as_millis();
info!("[Workload {}] Duration: {}ms", id, mills as f64);
prom::WORKLOAD_TOTAL_TIME.inc_by(mills as i64);
crate::prom::WORKLOAD_TIME_COLLECTOR
.with_label_values(&["processing_time"])
.observe(mills as f64 / 1000.);
match result {
Ok(wl) => {
prom::WORKLOAD_COMPLETE.inc();
Ok(wl)
}
Err(_) => {
prom::WORKLOAD_ERROR.inc();
Err(anyhow!("Workload run failed."))
}
}
});
let work_handle = Arc::new(tokio::sync::RwLock::new(WorkloadHandle {
id: id,
join_handle: Some(jh),
status: work::WorkloadStatus::Running,
..Default::default()
}));
self.work_handles.push(work_handle.clone());
work_handle
}
}
pub type FeatureHandle = Arc<tokio::sync::RwLock<dyn Feature>>;
#[async_trait]
pub trait Feature: Send + Sync {
async fn init(&mut self, agent: AgentController);
async fn on_event(&mut self, event: AgentEvent);
fn name(&self) -> String;
}
| {} | conditional_block |
lib.rs | use tokio::time::delay_for;
use core::time::Duration;
use anyhow::{anyhow, Result};
use channels::AGENT_CHANNEL;
use comm::{AgentEvent, Hub};
use config::Config;
use tokio::sync::RwLock;
use tokio::sync::RwLockReadGuard;
use tokio::sync::RwLockWriteGuard;
use crossbeam_channel::{Receiver, Sender};
use log::{debug, info};
use std::{collections::HashMap, fmt::Debug};
use std::{sync::Arc, time::Instant};
use threads::AsyncRunner;
use tokio::runtime::Builder;
use lazy_static::*; | use work::{Workload, WorkloadHandle, WorkloadStatus};
use async_trait::async_trait;
pub mod cfg;
pub mod comm;
pub mod plugins;
pub mod prom;
pub mod threads;
pub mod work;
pub mod channels {
pub const AGENT_CHANNEL: &'static str = "Agent";
}
// Configuration
lazy_static! {
static ref SETTINGS: RwLock<Config> = RwLock::new(Config::default());
}
impl<'a> Default for Agent {
fn default() -> Agent {
let agt = Agent {
name: "Unnamed Agent".to_string(),
features: Vec::default(),
runtime: Runtime::new().unwrap(),
state: AgentState::Stopped,
work_handles: Vec::default(),
hub: Hub::new(),
};
agt
}
}
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum AgentState {
Ok,
Stopped,
Error,
}
#[derive(Clone, Copy, Debug)]
pub struct Schedule {}
#[derive(Clone, Debug)]
pub enum AgentCommand {
Start,
Schedule(Schedule, Workload),
Stop,
}
#[derive(Debug, Clone)]
pub struct FeatureConfig {
pub bind_address: [u8; 4],
pub bind_port: u16,
pub settings: HashMap<String, String>,
}
#[derive(Clone)]
pub struct AgentController {
pub agent: Arc<RwLock<Agent>>,
pub signal: Option<Sender<()>>,
}
impl Debug for AgentController {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_fmt(format_args!("Handle for Agent"))
}
}
impl AgentController {
pub fn new(name: &str) -> AgentController {
let runtime = Builder::new()
.threaded_scheduler()
.enable_all()
.build()
.unwrap();
let agent = Agent::with_runtime(name, runtime);
AgentController {
agent: Arc::new(RwLock::new(agent)),
signal: None,
}
}
pub async fn with_read<F>(&self, closure: F)
where
F: FnOnce(&RwLockReadGuard<Agent>) + Sync + Send + 'static,
{
let handle = self.agent.clone();
let agent = handle.read().await;
closure(&agent);
}
pub async fn with_write<F>(&self, closure: F)
where
F: FnOnce(&mut RwLockWriteGuard<Agent>) + Sync + Send + 'static,
{
let handle = self.agent.clone();
let mut agent = handle.write().await;
closure(&mut agent);
drop(agent);
}
pub async fn send(&self, event: AgentEvent) -> Result<()> {
let agent = self.agent.read().await;
let mut hub = agent.hub.write().await;
let channel = hub.get_or_create(AGENT_CHANNEL);
channel
.sender
.send(event)
.map_err(|err| anyhow!("{}", err))?;
Ok(())
}
pub async fn get_channel(&self, name: &str) -> (Receiver<AgentEvent>, Sender<AgentEvent>) {
let agent = self.agent.read().await;
let chan = agent.hub.clone().write().await.get_or_create(name);
drop(agent);
(chan.receiver, chan.sender)
}
pub fn with_runtime(name: &str, runtime: Runtime) -> AgentController {
let agent = Agent::with_runtime(name, runtime);
AgentController {
agent: Arc::new(RwLock::new(agent)),
signal: None,
}
}
pub async fn add_feature(&mut self, handle: FeatureHandle) -> &mut AgentController {
let mut agent = self.agent.write().await;
agent.features.push(handle);
drop(agent);
self
}
pub async fn start(&mut self) -> Receiver<()> {
let agent = self.agent.read().await;
info!("Feature count: {}", agent.features.len());
for f in &agent.features {
let feature_name = f.read().await.name();
let (rx, tx) = self.get_channel(AGENT_CHANNEL).await;
f.write().await.init(self.clone()).await;
let feature_handle = f.clone();
agent.runtime.spawn(async move {
debug!("Spawning feature communication channel.");
loop {
let fh = feature_handle.clone();
let message = rx
.recv()
.map_err(|err| anyhow!("Error receiving message: {}", err));
if let Ok(message) = message {
info!("Got AgentEvent {}", message);
let mut feature = fh.write().await;
feature.on_event(message.clone()).await;
debug!("Done writing event to feature");
}
}
});
let _ = tx
.send(AgentEvent::Started)
.map_err(|err| {
anyhow!(
"Error sending Agent Start event to {}: {}",
feature_name,
err
)
})
.unwrap();
}
drop(agent);
self.agent.write().await.state = AgentState::Ok;
let (rx, tx) = crossbeam_channel::bounded::<()>(1);
self.signal = Some(rx);
tx
}
pub async fn schedule<T>(&self, interval : Duration, func : T) where T : Fn() -> () + Clone + Send + 'static {
let handle = {
let agent = self.agent.read().await;
agent.runtime.handle().clone()
};
let f = func.clone();
handle.spawn(async move {
info!("Starting scheduled function");
loop {
f.clone()();
delay_for(interval).await;
}
});
}
}
/*
impl FeatureHandle {
pub fn new<T>(feature: T) -> FeatureHandle
where
T: Feature + 'static,
{
FeatureHandle {
handle: Arc::new(tokio::sync::RwLock::new(feature)),
}
}
}
impl<'a> FeatureHandle {
pub async fn with_write<F>(&self, callback: F)
where
F: FnOnce(&mut RwLockWriteGuard<dyn Feature>) + Sync + Send + 'static,
{
callback(&mut self.handle.write().await);
}
pub async fn with_read<F>(&self, callback: F)
where
F: FnOnce(&RwLockReadGuard<dyn Feature>) + Sync + Send + 'static,
{
callback(&self.handle.read().await);
}
}*/
type HubHandle = Arc<RwLock<Hub<AgentEvent>>>;
pub struct Agent {
pub name: String,
pub features: Vec<FeatureHandle>,
pub state: AgentState,
pub runtime: Runtime,
pub hub: HubHandle,
pub work_handles: Vec<Arc<tokio::sync::RwLock<WorkloadHandle>>>,
}
impl Agent {
pub fn new(name: &str) -> Agent {
Agent {
name: name.to_string(),
..Default::default()
}
}
pub fn with_runtime(name: &str, runtime: Runtime) -> Agent {
Agent {
name: name.to_string(),
features: Vec::default(),
runtime: runtime,
state: AgentState::Stopped,
work_handles: Vec::default(),
hub: Hub::new(),
}
}
pub fn status(&self) -> AgentState {
self.state
}
/// Run a command on the agent.
pub async fn command(&mut self, cmd: AgentCommand) {
let channel = self.hub.write().await.get_or_create(AGENT_CHANNEL);
match cmd {
AgentCommand::Start => {
self.state = AgentState::Ok;
let work_handles = self.work_handles.clone();
println!("Workload handles: {}", work_handles.len());
// rerun defered workload handles
for wl in work_handles {
let wl2 = wl.clone();
let (status, workload) = AsyncRunner::block_on(async move {
let wl = wl.read().await;
(wl.status.clone(), wl.workload.as_ref().unwrap().clone())
});
if status == WorkloadStatus::None {
self.run(workload);
}
AsyncRunner::block_on(async move {
let mut handle = wl2.write().await;
handle.status = WorkloadStatus::Complete;
});
}
channel.sender.send(AgentEvent::Started).unwrap();
}
AgentCommand::Schedule(_, _) => {}
AgentCommand::Stop => {
self.state = AgentState::Stopped;
channel.sender.send(AgentEvent::Stopped).unwrap();
}
}
}
/// Run a workload in the agent
/// This will capture the statistics of the workload run and store it in
/// the agent.
pub fn run(&mut self, workload: Workload) -> Arc<tokio::sync::RwLock<WorkloadHandle>> {
if self.state == AgentState::Stopped {
info!("Agent stopped, Not running workload {}. Work will be deferred until the agent starts.", workload.id);
let work_handle = Arc::new(tokio::sync::RwLock::new(WorkloadHandle {
id: workload.id,
join_handle: None,
status: WorkloadStatus::None,
workload: Some(workload),
..Default::default()
}));
self.work_handles.push(work_handle.clone());
return work_handle;
}
let id = workload.id;
prom::WORKLOAD_START.inc();
let jh = self.runtime.spawn(async move {
info!("[Workload {}] Running.", id);
let start = Instant::now();
let result = workload.run().await;
let mills = start.elapsed().as_millis();
info!("[Workload {}] Duration: {}ms", id, mills as f64);
prom::WORKLOAD_TOTAL_TIME.inc_by(mills as i64);
crate::prom::WORKLOAD_TIME_COLLECTOR
.with_label_values(&["processing_time"])
.observe(mills as f64 / 1000.);
match result {
Ok(wl) => {
prom::WORKLOAD_COMPLETE.inc();
Ok(wl)
}
Err(_) => {
prom::WORKLOAD_ERROR.inc();
Err(anyhow!("Workload run failed."))
}
}
});
let work_handle = Arc::new(tokio::sync::RwLock::new(WorkloadHandle {
id: id,
join_handle: Some(jh),
status: work::WorkloadStatus::Running,
..Default::default()
}));
self.work_handles.push(work_handle.clone());
work_handle
}
}
pub type FeatureHandle = Arc<tokio::sync::RwLock<dyn Feature>>;
#[async_trait]
pub trait Feature: Send + Sync {
async fn init(&mut self, agent: AgentController);
async fn on_event(&mut self, event: AgentEvent);
fn name(&self) -> String;
} |
use tokio::runtime::Runtime;
| random_line_split |
lib.rs | use tokio::time::delay_for;
use core::time::Duration;
use anyhow::{anyhow, Result};
use channels::AGENT_CHANNEL;
use comm::{AgentEvent, Hub};
use config::Config;
use tokio::sync::RwLock;
use tokio::sync::RwLockReadGuard;
use tokio::sync::RwLockWriteGuard;
use crossbeam_channel::{Receiver, Sender};
use log::{debug, info};
use std::{collections::HashMap, fmt::Debug};
use std::{sync::Arc, time::Instant};
use threads::AsyncRunner;
use tokio::runtime::Builder;
use lazy_static::*;
use tokio::runtime::Runtime;
use work::{Workload, WorkloadHandle, WorkloadStatus};
use async_trait::async_trait;
pub mod cfg;
pub mod comm;
pub mod plugins;
pub mod prom;
pub mod threads;
pub mod work;
pub mod channels {
pub const AGENT_CHANNEL: &'static str = "Agent";
}
// Configuration
lazy_static! {
static ref SETTINGS: RwLock<Config> = RwLock::new(Config::default());
}
impl<'a> Default for Agent {
fn default() -> Agent {
let agt = Agent {
name: "Unnamed Agent".to_string(),
features: Vec::default(),
runtime: Runtime::new().unwrap(),
state: AgentState::Stopped,
work_handles: Vec::default(),
hub: Hub::new(),
};
agt
}
}
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum AgentState {
Ok,
Stopped,
Error,
}
#[derive(Clone, Copy, Debug)]
pub struct Schedule {}
#[derive(Clone, Debug)]
pub enum | {
Start,
Schedule(Schedule, Workload),
Stop,
}
#[derive(Debug, Clone)]
pub struct FeatureConfig {
pub bind_address: [u8; 4],
pub bind_port: u16,
pub settings: HashMap<String, String>,
}
#[derive(Clone)]
pub struct AgentController {
pub agent: Arc<RwLock<Agent>>,
pub signal: Option<Sender<()>>,
}
impl Debug for AgentController {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_fmt(format_args!("Handle for Agent"))
}
}
impl AgentController {
pub fn new(name: &str) -> AgentController {
let runtime = Builder::new()
.threaded_scheduler()
.enable_all()
.build()
.unwrap();
let agent = Agent::with_runtime(name, runtime);
AgentController {
agent: Arc::new(RwLock::new(agent)),
signal: None,
}
}
pub async fn with_read<F>(&self, closure: F)
where
F: FnOnce(&RwLockReadGuard<Agent>) + Sync + Send + 'static,
{
let handle = self.agent.clone();
let agent = handle.read().await;
closure(&agent);
}
pub async fn with_write<F>(&self, closure: F)
where
F: FnOnce(&mut RwLockWriteGuard<Agent>) + Sync + Send + 'static,
{
let handle = self.agent.clone();
let mut agent = handle.write().await;
closure(&mut agent);
drop(agent);
}
pub async fn send(&self, event: AgentEvent) -> Result<()> {
let agent = self.agent.read().await;
let mut hub = agent.hub.write().await;
let channel = hub.get_or_create(AGENT_CHANNEL);
channel
.sender
.send(event)
.map_err(|err| anyhow!("{}", err))?;
Ok(())
}
pub async fn get_channel(&self, name: &str) -> (Receiver<AgentEvent>, Sender<AgentEvent>) {
let agent = self.agent.read().await;
let chan = agent.hub.clone().write().await.get_or_create(name);
drop(agent);
(chan.receiver, chan.sender)
}
pub fn with_runtime(name: &str, runtime: Runtime) -> AgentController {
let agent = Agent::with_runtime(name, runtime);
AgentController {
agent: Arc::new(RwLock::new(agent)),
signal: None,
}
}
pub async fn add_feature(&mut self, handle: FeatureHandle) -> &mut AgentController {
let mut agent = self.agent.write().await;
agent.features.push(handle);
drop(agent);
self
}
pub async fn start(&mut self) -> Receiver<()> {
let agent = self.agent.read().await;
info!("Feature count: {}", agent.features.len());
for f in &agent.features {
let feature_name = f.read().await.name();
let (rx, tx) = self.get_channel(AGENT_CHANNEL).await;
f.write().await.init(self.clone()).await;
let feature_handle = f.clone();
agent.runtime.spawn(async move {
debug!("Spawning feature communication channel.");
loop {
let fh = feature_handle.clone();
let message = rx
.recv()
.map_err(|err| anyhow!("Error receiving message: {}", err));
if let Ok(message) = message {
info!("Got AgentEvent {}", message);
let mut feature = fh.write().await;
feature.on_event(message.clone()).await;
debug!("Done writing event to feature");
}
}
});
let _ = tx
.send(AgentEvent::Started)
.map_err(|err| {
anyhow!(
"Error sending Agent Start event to {}: {}",
feature_name,
err
)
})
.unwrap();
}
drop(agent);
self.agent.write().await.state = AgentState::Ok;
let (rx, tx) = crossbeam_channel::bounded::<()>(1);
self.signal = Some(rx);
tx
}
pub async fn schedule<T>(&self, interval : Duration, func : T) where T : Fn() -> () + Clone + Send + 'static {
let handle = {
let agent = self.agent.read().await;
agent.runtime.handle().clone()
};
let f = func.clone();
handle.spawn(async move {
info!("Starting scheduled function");
loop {
f.clone()();
delay_for(interval).await;
}
});
}
}
/*
impl FeatureHandle {
pub fn new<T>(feature: T) -> FeatureHandle
where
T: Feature + 'static,
{
FeatureHandle {
handle: Arc::new(tokio::sync::RwLock::new(feature)),
}
}
}
impl<'a> FeatureHandle {
pub async fn with_write<F>(&self, callback: F)
where
F: FnOnce(&mut RwLockWriteGuard<dyn Feature>) + Sync + Send + 'static,
{
callback(&mut self.handle.write().await);
}
pub async fn with_read<F>(&self, callback: F)
where
F: FnOnce(&RwLockReadGuard<dyn Feature>) + Sync + Send + 'static,
{
callback(&self.handle.read().await);
}
}*/
type HubHandle = Arc<RwLock<Hub<AgentEvent>>>;
pub struct Agent {
pub name: String,
pub features: Vec<FeatureHandle>,
pub state: AgentState,
pub runtime: Runtime,
pub hub: HubHandle,
pub work_handles: Vec<Arc<tokio::sync::RwLock<WorkloadHandle>>>,
}
impl Agent {
pub fn new(name: &str) -> Agent {
Agent {
name: name.to_string(),
..Default::default()
}
}
pub fn with_runtime(name: &str, runtime: Runtime) -> Agent {
Agent {
name: name.to_string(),
features: Vec::default(),
runtime: runtime,
state: AgentState::Stopped,
work_handles: Vec::default(),
hub: Hub::new(),
}
}
pub fn status(&self) -> AgentState {
self.state
}
/// Run a command on the agent.
pub async fn command(&mut self, cmd: AgentCommand) {
let channel = self.hub.write().await.get_or_create(AGENT_CHANNEL);
match cmd {
AgentCommand::Start => {
self.state = AgentState::Ok;
let work_handles = self.work_handles.clone();
println!("Workload handles: {}", work_handles.len());
// rerun defered workload handles
for wl in work_handles {
let wl2 = wl.clone();
let (status, workload) = AsyncRunner::block_on(async move {
let wl = wl.read().await;
(wl.status.clone(), wl.workload.as_ref().unwrap().clone())
});
if status == WorkloadStatus::None {
self.run(workload);
}
AsyncRunner::block_on(async move {
let mut handle = wl2.write().await;
handle.status = WorkloadStatus::Complete;
});
}
channel.sender.send(AgentEvent::Started).unwrap();
}
AgentCommand::Schedule(_, _) => {}
AgentCommand::Stop => {
self.state = AgentState::Stopped;
channel.sender.send(AgentEvent::Stopped).unwrap();
}
}
}
/// Run a workload in the agent
/// This will capture the statistics of the workload run and store it in
/// the agent.
pub fn run(&mut self, workload: Workload) -> Arc<tokio::sync::RwLock<WorkloadHandle>> {
if self.state == AgentState::Stopped {
info!("Agent stopped, Not running workload {}. Work will be deferred until the agent starts.", workload.id);
let work_handle = Arc::new(tokio::sync::RwLock::new(WorkloadHandle {
id: workload.id,
join_handle: None,
status: WorkloadStatus::None,
workload: Some(workload),
..Default::default()
}));
self.work_handles.push(work_handle.clone());
return work_handle;
}
let id = workload.id;
prom::WORKLOAD_START.inc();
let jh = self.runtime.spawn(async move {
info!("[Workload {}] Running.", id);
let start = Instant::now();
let result = workload.run().await;
let mills = start.elapsed().as_millis();
info!("[Workload {}] Duration: {}ms", id, mills as f64);
prom::WORKLOAD_TOTAL_TIME.inc_by(mills as i64);
crate::prom::WORKLOAD_TIME_COLLECTOR
.with_label_values(&["processing_time"])
.observe(mills as f64 / 1000.);
match result {
Ok(wl) => {
prom::WORKLOAD_COMPLETE.inc();
Ok(wl)
}
Err(_) => {
prom::WORKLOAD_ERROR.inc();
Err(anyhow!("Workload run failed."))
}
}
});
let work_handle = Arc::new(tokio::sync::RwLock::new(WorkloadHandle {
id: id,
join_handle: Some(jh),
status: work::WorkloadStatus::Running,
..Default::default()
}));
self.work_handles.push(work_handle.clone());
work_handle
}
}
pub type FeatureHandle = Arc<tokio::sync::RwLock<dyn Feature>>;
#[async_trait]
pub trait Feature: Send + Sync {
async fn init(&mut self, agent: AgentController);
async fn on_event(&mut self, event: AgentEvent);
fn name(&self) -> String;
}
| AgentCommand | identifier_name |
learn.py | import numpy as np
import time
from RLC.real_chess.tree import Node
import math
import gc
def softmax(x, temperature=1):
return np.exp(x / temperature) / np.sum(np.exp(x / temperature))
def sigmoid(x):
return 1 / (1 + math.exp(-x))
class TD_search(object):
def __init__(self, env, agent, gamma=0.9, search_time=1, memsize=2000, batch_size=256, temperature=1):
"""
Chess algorithm that combines bootstrapped monte carlo tree search with Q Learning
Args:
env: RLC chess environment
agent: RLC chess agent
gamma: discount factor
search_time: maximum time spent doing tree search
memsize: Amount of training samples to keep in-memory
batch_size: Size of the training batches
temperature: softmax temperature for mcts
"""
self.env = env
self.agent = agent
self.tree = Node(self.env)
self.gamma = gamma
self.memsize = memsize
self.batch_size = batch_size
self.temperature = temperature
self.reward_trace = [] # Keeps track of the rewards
self.piece_balance_trace = [] # Keep track of the material value on the board
self.ready = False # Whether to start training
self.search_time = search_time
self.min_sim_count = 10
self.mem_state = np.zeros(shape=(1, 8, 8, 8))
self.mem_sucstate = np.zeros(shape=(1, 8, 8, 8))
self.mem_reward = np.zeros(shape=(1))
self.mem_error = np.zeros(shape=(1))
self.mem_episode_active = np.ones(shape=(1))
def learn(self, iters=40, c=5, timelimit_seconds=3600, maxiter=80):
"""
Start Reinforcement Learning Algorithm
Args:
iters: maximum amount of iterations to train
c: model update rate (once every C games)
timelimit_seconds: maximum training time
maxiter: Maximum duration of a game, in halfmoves
Returns:
"""
starttime = time.time()
for k in range(iters):
self.env.reset()
if k % c == 0:
self.agent.fix_model()
print("iter", k)
if k > c:
self.ready = True
self.play_game(k, maxiter=maxiter)
if starttime + timelimit_seconds < time.time():
break
return self.env.board
def play_game(self, k, maxiter=80):
"""
Play a chess game and learn from it
Args:
k: the play iteration number
maxiter: maximum duration of the game (halfmoves)
Returns:
board: Chess environment on terminal state
"""
episode_end = False
turncount = 0
tree = Node(self.env.board, gamma=self.gamma) # Initialize the game tree
# Play a game of chess
while not episode_end:
state = np.expand_dims(self.env.layer_board.copy(), axis=0)
state_value = self.agent.predict(state)
# White's turn involves tree-search
if self.env.board.turn:
# Do a Monte Carlo Tree Search after game iteration k
start_mcts_after = -1
if k > start_mcts_after:
tree = self.mcts(tree)
# Step the best move
max_move = None
max_value = np.NINF
for move, child in tree.children.items():
sampled_value = np.mean(child.values)
if sampled_value > max_value:
max_value = sampled_value
max_move = move
else:
max_move = np.random.choice([move for move in self.env.board.generate_legal_moves()])
# Black's turn is myopic
else:
max_move = None
max_value = np.NINF
for move in self.env.board.generate_legal_moves():
self.env.step(move)
if self.env.board.result() == "0-1":
max_move = move
self.env.board.pop()
self.env.init_layer_board()
break
successor_state_value_opponent = self.env.opposing_agent.predict(
np.expand_dims(self.env.layer_board, axis=0))
if successor_state_value_opponent > max_value:
max_move = move
max_value = successor_state_value_opponent
self.env.board.pop()
self.env.init_layer_board()
if not (self.env.board.turn and max_move not in tree.children.keys()) or not k > start_mcts_after:
tree.children[max_move] = Node(gamma=0.9, parent=tree)
episode_end, reward = self.env.step(max_move)
tree = tree.children[max_move]
tree.parent = None
gc.collect()
sucstate = np.expand_dims(self.env.layer_board, axis=0)
new_state_value = self.agent.predict(sucstate)
error = reward + self.gamma * new_state_value - state_value
error = np.float(np.squeeze(error))
turncount += 1
if turncount > maxiter and not episode_end:
episode_end = True
episode_active = 0 if episode_end else 1
# construct training sample state, prediction, error
self.mem_state = np.append(self.mem_state, state, axis=0)
self.mem_reward = np.append(self.mem_reward, reward)
self.mem_sucstate = np.append(self.mem_sucstate, sucstate, axis=0)
self.mem_error = np.append(self.mem_error, error)
self.reward_trace = np.append(self.reward_trace, reward)
self.mem_episode_active = np.append(self.mem_episode_active, episode_active)
if self.mem_state.shape[0] > self.memsize:
self.mem_state = self.mem_state[1:]
self.mem_reward = self.mem_reward[1:]
self.mem_sucstate = self.mem_sucstate[1:]
self.mem_error = self.mem_error[1:]
self.mem_episode_active = self.mem_episode_active[1:]
gc.collect()
if turncount % 10 == 0:
self.update_agent()
piece_balance = self.env.get_material_value()
self.piece_balance_trace.append(piece_balance)
print("game ended with result", reward, "and material balance", piece_balance, "in", turncount, "halfmoves")
return self.env.board
def update_agent(self):
"""
Update the Agent with TD learning
Returns:
None
"""
if self.ready:
choice_indices, states, rewards, sucstates, episode_active = self.get_minibatch()
td_errors = self.agent.TD_update(states, rewards, sucstates, episode_active, gamma=self.gamma)
self.mem_error[choice_indices.tolist()] = td_errors
def get_minibatch(self, prioritized=True):
"""
Get a mini batch of experience
Args:
prioritized:
Returns:
"""
if prioritized:
sampling_priorities = np.abs(self.mem_error) + 1e-9
else:
sampling_priorities = np.ones(shape=self.mem_error.shape)
sampling_probs = sampling_priorities / np.sum(sampling_priorities)
sample_indices = [x for x in range(self.mem_state.shape[0])]
choice_indices = np.random.choice(sample_indices,
min(self.mem_state.shape[0],
self.batch_size),
p=np.squeeze(sampling_probs),
replace=False
)
states = self.mem_state[choice_indices]
rewards = self.mem_reward[choice_indices]
sucstates = self.mem_sucstate[choice_indices]
episode_active = self.mem_episode_active[choice_indices]
return choice_indices, states, rewards, sucstates, episode_active
def mcts(self, node):
"""
Run Monte Carlo Tree Search
Args:
node: A game state node object
Returns:
the node with playout sims
"""
starttime = time.time()
sim_count = 0
board_in = self.env.board.fen()
# First make a prediction for each child state
for move in self.env.board.generate_legal_moves():
if move not in node.children.keys():
|
episode_end, reward = self.env.step(move)
if episode_end:
successor_state_value = 0
else:
successor_state_value = np.squeeze(
self.agent.model.predict(np.expand_dims(self.env.layer_board, axis=0))
)
child_value = reward + self.gamma * successor_state_value
node.update_child(move, child_value)
self.env.board.pop()
self.env.init_layer_board()
if not node.values:
node.values = [0]
while starttime + self.search_time > time.time() or sim_count < self.min_sim_count:
depth = 0
color = 1
node_rewards = []
# Select the best node from where to start MCTS
while node.children:
node, move = node.select(color=color)
if not move:
# No move means that the node selects itself, not a child node.
break
else:
depth += 1
color = color * -1 # switch color
episode_end, reward = self.env.step(move) # Update the environment to reflect the node
node_rewards.append(reward)
# Check best node is terminal
if self.env.board.result() == "1-0" and depth == 1: # -> Direct win for white, no need for mcts.
self.env.board.pop()
self.env.init_layer_board()
node.update(1)
node = node.parent
return node
elif episode_end: # -> if the explored tree leads to a terminal state, simulate from root.
while node.parent:
self.env.board.pop()
self.env.init_layer_board()
node = node.parent
break
else:
continue
# Expand the game tree with a simulation
Returns, move = node.simulate(self.agent.fixed_model,
self.env,
temperature=self.temperature,
depth=0)
self.env.init_layer_board()
if move not in node.children.keys():
node.children[move] = Node(self.env.board, parent=node)
node.update_child(move, Returns)
# Return to root node and backpropagate Returns
while node.parent:
latest_reward = node_rewards.pop(-1)
Returns = latest_reward + self.gamma * Returns
node.update(Returns)
node = node.parent
self.env.board.pop()
self.env.init_layer_board()
sim_count += 1
board_out = self.env.board.fen()
assert board_in == board_out
return node
| node.children[move] = Node(self.env.board, parent=node) | conditional_block |
learn.py | import numpy as np
import time
from RLC.real_chess.tree import Node
import math
import gc
def softmax(x, temperature=1):
|
def sigmoid(x):
return 1 / (1 + math.exp(-x))
class TD_search(object):
def __init__(self, env, agent, gamma=0.9, search_time=1, memsize=2000, batch_size=256, temperature=1):
"""
Chess algorithm that combines bootstrapped monte carlo tree search with Q Learning
Args:
env: RLC chess environment
agent: RLC chess agent
gamma: discount factor
search_time: maximum time spent doing tree search
memsize: Amount of training samples to keep in-memory
batch_size: Size of the training batches
temperature: softmax temperature for mcts
"""
self.env = env
self.agent = agent
self.tree = Node(self.env)
self.gamma = gamma
self.memsize = memsize
self.batch_size = batch_size
self.temperature = temperature
self.reward_trace = [] # Keeps track of the rewards
self.piece_balance_trace = [] # Keep track of the material value on the board
self.ready = False # Whether to start training
self.search_time = search_time
self.min_sim_count = 10
self.mem_state = np.zeros(shape=(1, 8, 8, 8))
self.mem_sucstate = np.zeros(shape=(1, 8, 8, 8))
self.mem_reward = np.zeros(shape=(1))
self.mem_error = np.zeros(shape=(1))
self.mem_episode_active = np.ones(shape=(1))
def learn(self, iters=40, c=5, timelimit_seconds=3600, maxiter=80):
"""
Start Reinforcement Learning Algorithm
Args:
iters: maximum amount of iterations to train
c: model update rate (once every C games)
timelimit_seconds: maximum training time
maxiter: Maximum duration of a game, in halfmoves
Returns:
"""
starttime = time.time()
for k in range(iters):
self.env.reset()
if k % c == 0:
self.agent.fix_model()
print("iter", k)
if k > c:
self.ready = True
self.play_game(k, maxiter=maxiter)
if starttime + timelimit_seconds < time.time():
break
return self.env.board
def play_game(self, k, maxiter=80):
"""
Play a chess game and learn from it
Args:
k: the play iteration number
maxiter: maximum duration of the game (halfmoves)
Returns:
board: Chess environment on terminal state
"""
episode_end = False
turncount = 0
tree = Node(self.env.board, gamma=self.gamma) # Initialize the game tree
# Play a game of chess
while not episode_end:
state = np.expand_dims(self.env.layer_board.copy(), axis=0)
state_value = self.agent.predict(state)
# White's turn involves tree-search
if self.env.board.turn:
# Do a Monte Carlo Tree Search after game iteration k
start_mcts_after = -1
if k > start_mcts_after:
tree = self.mcts(tree)
# Step the best move
max_move = None
max_value = np.NINF
for move, child in tree.children.items():
sampled_value = np.mean(child.values)
if sampled_value > max_value:
max_value = sampled_value
max_move = move
else:
max_move = np.random.choice([move for move in self.env.board.generate_legal_moves()])
# Black's turn is myopic
else:
max_move = None
max_value = np.NINF
for move in self.env.board.generate_legal_moves():
self.env.step(move)
if self.env.board.result() == "0-1":
max_move = move
self.env.board.pop()
self.env.init_layer_board()
break
successor_state_value_opponent = self.env.opposing_agent.predict(
np.expand_dims(self.env.layer_board, axis=0))
if successor_state_value_opponent > max_value:
max_move = move
max_value = successor_state_value_opponent
self.env.board.pop()
self.env.init_layer_board()
if not (self.env.board.turn and max_move not in tree.children.keys()) or not k > start_mcts_after:
tree.children[max_move] = Node(gamma=0.9, parent=tree)
episode_end, reward = self.env.step(max_move)
tree = tree.children[max_move]
tree.parent = None
gc.collect()
sucstate = np.expand_dims(self.env.layer_board, axis=0)
new_state_value = self.agent.predict(sucstate)
error = reward + self.gamma * new_state_value - state_value
error = np.float(np.squeeze(error))
turncount += 1
if turncount > maxiter and not episode_end:
episode_end = True
episode_active = 0 if episode_end else 1
# construct training sample state, prediction, error
self.mem_state = np.append(self.mem_state, state, axis=0)
self.mem_reward = np.append(self.mem_reward, reward)
self.mem_sucstate = np.append(self.mem_sucstate, sucstate, axis=0)
self.mem_error = np.append(self.mem_error, error)
self.reward_trace = np.append(self.reward_trace, reward)
self.mem_episode_active = np.append(self.mem_episode_active, episode_active)
if self.mem_state.shape[0] > self.memsize:
self.mem_state = self.mem_state[1:]
self.mem_reward = self.mem_reward[1:]
self.mem_sucstate = self.mem_sucstate[1:]
self.mem_error = self.mem_error[1:]
self.mem_episode_active = self.mem_episode_active[1:]
gc.collect()
if turncount % 10 == 0:
self.update_agent()
piece_balance = self.env.get_material_value()
self.piece_balance_trace.append(piece_balance)
print("game ended with result", reward, "and material balance", piece_balance, "in", turncount, "halfmoves")
return self.env.board
def update_agent(self):
"""
Update the Agent with TD learning
Returns:
None
"""
if self.ready:
choice_indices, states, rewards, sucstates, episode_active = self.get_minibatch()
td_errors = self.agent.TD_update(states, rewards, sucstates, episode_active, gamma=self.gamma)
self.mem_error[choice_indices.tolist()] = td_errors
def get_minibatch(self, prioritized=True):
"""
Get a mini batch of experience
Args:
prioritized:
Returns:
"""
if prioritized:
sampling_priorities = np.abs(self.mem_error) + 1e-9
else:
sampling_priorities = np.ones(shape=self.mem_error.shape)
sampling_probs = sampling_priorities / np.sum(sampling_priorities)
sample_indices = [x for x in range(self.mem_state.shape[0])]
choice_indices = np.random.choice(sample_indices,
min(self.mem_state.shape[0],
self.batch_size),
p=np.squeeze(sampling_probs),
replace=False
)
states = self.mem_state[choice_indices]
rewards = self.mem_reward[choice_indices]
sucstates = self.mem_sucstate[choice_indices]
episode_active = self.mem_episode_active[choice_indices]
return choice_indices, states, rewards, sucstates, episode_active
def mcts(self, node):
"""
Run Monte Carlo Tree Search
Args:
node: A game state node object
Returns:
the node with playout sims
"""
starttime = time.time()
sim_count = 0
board_in = self.env.board.fen()
# First make a prediction for each child state
for move in self.env.board.generate_legal_moves():
if move not in node.children.keys():
node.children[move] = Node(self.env.board, parent=node)
episode_end, reward = self.env.step(move)
if episode_end:
successor_state_value = 0
else:
successor_state_value = np.squeeze(
self.agent.model.predict(np.expand_dims(self.env.layer_board, axis=0))
)
child_value = reward + self.gamma * successor_state_value
node.update_child(move, child_value)
self.env.board.pop()
self.env.init_layer_board()
if not node.values:
node.values = [0]
while starttime + self.search_time > time.time() or sim_count < self.min_sim_count:
depth = 0
color = 1
node_rewards = []
# Select the best node from where to start MCTS
while node.children:
node, move = node.select(color=color)
if not move:
# No move means that the node selects itself, not a child node.
break
else:
depth += 1
color = color * -1 # switch color
episode_end, reward = self.env.step(move) # Update the environment to reflect the node
node_rewards.append(reward)
# Check best node is terminal
if self.env.board.result() == "1-0" and depth == 1: # -> Direct win for white, no need for mcts.
self.env.board.pop()
self.env.init_layer_board()
node.update(1)
node = node.parent
return node
elif episode_end: # -> if the explored tree leads to a terminal state, simulate from root.
while node.parent:
self.env.board.pop()
self.env.init_layer_board()
node = node.parent
break
else:
continue
# Expand the game tree with a simulation
Returns, move = node.simulate(self.agent.fixed_model,
self.env,
temperature=self.temperature,
depth=0)
self.env.init_layer_board()
if move not in node.children.keys():
node.children[move] = Node(self.env.board, parent=node)
node.update_child(move, Returns)
# Return to root node and backpropagate Returns
while node.parent:
latest_reward = node_rewards.pop(-1)
Returns = latest_reward + self.gamma * Returns
node.update(Returns)
node = node.parent
self.env.board.pop()
self.env.init_layer_board()
sim_count += 1
board_out = self.env.board.fen()
assert board_in == board_out
return node
| return np.exp(x / temperature) / np.sum(np.exp(x / temperature)) | identifier_body |
learn.py | import numpy as np
import time
from RLC.real_chess.tree import Node
import math
import gc
def softmax(x, temperature=1):
return np.exp(x / temperature) / np.sum(np.exp(x / temperature))
def sigmoid(x):
return 1 / (1 + math.exp(-x))
class TD_search(object):
def __init__(self, env, agent, gamma=0.9, search_time=1, memsize=2000, batch_size=256, temperature=1):
"""
Chess algorithm that combines bootstrapped monte carlo tree search with Q Learning
Args:
env: RLC chess environment
agent: RLC chess agent
gamma: discount factor
search_time: maximum time spent doing tree search
memsize: Amount of training samples to keep in-memory
batch_size: Size of the training batches
temperature: softmax temperature for mcts
"""
self.env = env
self.agent = agent
self.tree = Node(self.env)
self.gamma = gamma
self.memsize = memsize
self.batch_size = batch_size
self.temperature = temperature
self.reward_trace = [] # Keeps track of the rewards
self.piece_balance_trace = [] # Keep track of the material value on the board
self.ready = False # Whether to start training
self.search_time = search_time
self.min_sim_count = 10
self.mem_state = np.zeros(shape=(1, 8, 8, 8))
self.mem_sucstate = np.zeros(shape=(1, 8, 8, 8))
self.mem_reward = np.zeros(shape=(1))
self.mem_error = np.zeros(shape=(1))
self.mem_episode_active = np.ones(shape=(1))
def learn(self, iters=40, c=5, timelimit_seconds=3600, maxiter=80):
"""
Start Reinforcement Learning Algorithm
Args:
iters: maximum amount of iterations to train
c: model update rate (once every C games)
timelimit_seconds: maximum training time
maxiter: Maximum duration of a game, in halfmoves
Returns:
"""
starttime = time.time()
for k in range(iters):
self.env.reset()
if k % c == 0:
self.agent.fix_model()
print("iter", k)
if k > c:
self.ready = True
self.play_game(k, maxiter=maxiter)
if starttime + timelimit_seconds < time.time():
break
return self.env.board
def play_game(self, k, maxiter=80):
"""
Play a chess game and learn from it
Args:
k: the play iteration number
maxiter: maximum duration of the game (halfmoves)
Returns:
board: Chess environment on terminal state
"""
episode_end = False
turncount = 0
tree = Node(self.env.board, gamma=self.gamma) # Initialize the game tree
# Play a game of chess
while not episode_end:
state = np.expand_dims(self.env.layer_board.copy(), axis=0)
state_value = self.agent.predict(state)
# White's turn involves tree-search
if self.env.board.turn:
# Do a Monte Carlo Tree Search after game iteration k
start_mcts_after = -1
if k > start_mcts_after:
tree = self.mcts(tree)
# Step the best move
max_move = None
max_value = np.NINF
for move, child in tree.children.items():
sampled_value = np.mean(child.values)
if sampled_value > max_value:
max_value = sampled_value
max_move = move
else:
max_move = np.random.choice([move for move in self.env.board.generate_legal_moves()])
# Black's turn is myopic
else:
max_move = None
max_value = np.NINF
for move in self.env.board.generate_legal_moves():
self.env.step(move)
if self.env.board.result() == "0-1":
max_move = move
self.env.board.pop()
self.env.init_layer_board()
break
successor_state_value_opponent = self.env.opposing_agent.predict(
np.expand_dims(self.env.layer_board, axis=0))
if successor_state_value_opponent > max_value:
max_move = move
max_value = successor_state_value_opponent
self.env.board.pop()
self.env.init_layer_board()
if not (self.env.board.turn and max_move not in tree.children.keys()) or not k > start_mcts_after:
tree.children[max_move] = Node(gamma=0.9, parent=tree)
episode_end, reward = self.env.step(max_move)
tree = tree.children[max_move]
tree.parent = None
gc.collect()
sucstate = np.expand_dims(self.env.layer_board, axis=0)
new_state_value = self.agent.predict(sucstate)
error = reward + self.gamma * new_state_value - state_value
error = np.float(np.squeeze(error))
turncount += 1
if turncount > maxiter and not episode_end:
episode_end = True
episode_active = 0 if episode_end else 1
# construct training sample state, prediction, error
self.mem_state = np.append(self.mem_state, state, axis=0)
self.mem_reward = np.append(self.mem_reward, reward)
self.mem_sucstate = np.append(self.mem_sucstate, sucstate, axis=0)
self.mem_error = np.append(self.mem_error, error)
self.reward_trace = np.append(self.reward_trace, reward)
self.mem_episode_active = np.append(self.mem_episode_active, episode_active)
if self.mem_state.shape[0] > self.memsize:
self.mem_state = self.mem_state[1:]
self.mem_reward = self.mem_reward[1:]
self.mem_sucstate = self.mem_sucstate[1:]
self.mem_error = self.mem_error[1:]
self.mem_episode_active = self.mem_episode_active[1:]
gc.collect()
if turncount % 10 == 0:
self.update_agent()
piece_balance = self.env.get_material_value()
self.piece_balance_trace.append(piece_balance)
print("game ended with result", reward, "and material balance", piece_balance, "in", turncount, "halfmoves")
return self.env.board
def update_agent(self):
"""
Update the Agent with TD learning
Returns:
None
"""
if self.ready:
choice_indices, states, rewards, sucstates, episode_active = self.get_minibatch()
td_errors = self.agent.TD_update(states, rewards, sucstates, episode_active, gamma=self.gamma)
self.mem_error[choice_indices.tolist()] = td_errors | Args:
prioritized:
Returns:
"""
if prioritized:
sampling_priorities = np.abs(self.mem_error) + 1e-9
else:
sampling_priorities = np.ones(shape=self.mem_error.shape)
sampling_probs = sampling_priorities / np.sum(sampling_priorities)
sample_indices = [x for x in range(self.mem_state.shape[0])]
choice_indices = np.random.choice(sample_indices,
min(self.mem_state.shape[0],
self.batch_size),
p=np.squeeze(sampling_probs),
replace=False
)
states = self.mem_state[choice_indices]
rewards = self.mem_reward[choice_indices]
sucstates = self.mem_sucstate[choice_indices]
episode_active = self.mem_episode_active[choice_indices]
return choice_indices, states, rewards, sucstates, episode_active
def mcts(self, node):
"""
Run Monte Carlo Tree Search
Args:
node: A game state node object
Returns:
the node with playout sims
"""
starttime = time.time()
sim_count = 0
board_in = self.env.board.fen()
# First make a prediction for each child state
for move in self.env.board.generate_legal_moves():
if move not in node.children.keys():
node.children[move] = Node(self.env.board, parent=node)
episode_end, reward = self.env.step(move)
if episode_end:
successor_state_value = 0
else:
successor_state_value = np.squeeze(
self.agent.model.predict(np.expand_dims(self.env.layer_board, axis=0))
)
child_value = reward + self.gamma * successor_state_value
node.update_child(move, child_value)
self.env.board.pop()
self.env.init_layer_board()
if not node.values:
node.values = [0]
while starttime + self.search_time > time.time() or sim_count < self.min_sim_count:
depth = 0
color = 1
node_rewards = []
# Select the best node from where to start MCTS
while node.children:
node, move = node.select(color=color)
if not move:
# No move means that the node selects itself, not a child node.
break
else:
depth += 1
color = color * -1 # switch color
episode_end, reward = self.env.step(move) # Update the environment to reflect the node
node_rewards.append(reward)
# Check best node is terminal
if self.env.board.result() == "1-0" and depth == 1: # -> Direct win for white, no need for mcts.
self.env.board.pop()
self.env.init_layer_board()
node.update(1)
node = node.parent
return node
elif episode_end: # -> if the explored tree leads to a terminal state, simulate from root.
while node.parent:
self.env.board.pop()
self.env.init_layer_board()
node = node.parent
break
else:
continue
# Expand the game tree with a simulation
Returns, move = node.simulate(self.agent.fixed_model,
self.env,
temperature=self.temperature,
depth=0)
self.env.init_layer_board()
if move not in node.children.keys():
node.children[move] = Node(self.env.board, parent=node)
node.update_child(move, Returns)
# Return to root node and backpropagate Returns
while node.parent:
latest_reward = node_rewards.pop(-1)
Returns = latest_reward + self.gamma * Returns
node.update(Returns)
node = node.parent
self.env.board.pop()
self.env.init_layer_board()
sim_count += 1
board_out = self.env.board.fen()
assert board_in == board_out
return node |
def get_minibatch(self, prioritized=True):
"""
Get a mini batch of experience | random_line_split |
learn.py | import numpy as np
import time
from RLC.real_chess.tree import Node
import math
import gc
def softmax(x, temperature=1):
return np.exp(x / temperature) / np.sum(np.exp(x / temperature))
def | (x):
return 1 / (1 + math.exp(-x))
class TD_search(object):
def __init__(self, env, agent, gamma=0.9, search_time=1, memsize=2000, batch_size=256, temperature=1):
"""
Chess algorithm that combines bootstrapped monte carlo tree search with Q Learning
Args:
env: RLC chess environment
agent: RLC chess agent
gamma: discount factor
search_time: maximum time spent doing tree search
memsize: Amount of training samples to keep in-memory
batch_size: Size of the training batches
temperature: softmax temperature for mcts
"""
self.env = env
self.agent = agent
self.tree = Node(self.env)
self.gamma = gamma
self.memsize = memsize
self.batch_size = batch_size
self.temperature = temperature
self.reward_trace = [] # Keeps track of the rewards
self.piece_balance_trace = [] # Keep track of the material value on the board
self.ready = False # Whether to start training
self.search_time = search_time
self.min_sim_count = 10
self.mem_state = np.zeros(shape=(1, 8, 8, 8))
self.mem_sucstate = np.zeros(shape=(1, 8, 8, 8))
self.mem_reward = np.zeros(shape=(1))
self.mem_error = np.zeros(shape=(1))
self.mem_episode_active = np.ones(shape=(1))
def learn(self, iters=40, c=5, timelimit_seconds=3600, maxiter=80):
"""
Start Reinforcement Learning Algorithm
Args:
iters: maximum amount of iterations to train
c: model update rate (once every C games)
timelimit_seconds: maximum training time
maxiter: Maximum duration of a game, in halfmoves
Returns:
"""
starttime = time.time()
for k in range(iters):
self.env.reset()
if k % c == 0:
self.agent.fix_model()
print("iter", k)
if k > c:
self.ready = True
self.play_game(k, maxiter=maxiter)
if starttime + timelimit_seconds < time.time():
break
return self.env.board
def play_game(self, k, maxiter=80):
"""
Play a chess game and learn from it
Args:
k: the play iteration number
maxiter: maximum duration of the game (halfmoves)
Returns:
board: Chess environment on terminal state
"""
episode_end = False
turncount = 0
tree = Node(self.env.board, gamma=self.gamma) # Initialize the game tree
# Play a game of chess
while not episode_end:
state = np.expand_dims(self.env.layer_board.copy(), axis=0)
state_value = self.agent.predict(state)
# White's turn involves tree-search
if self.env.board.turn:
# Do a Monte Carlo Tree Search after game iteration k
start_mcts_after = -1
if k > start_mcts_after:
tree = self.mcts(tree)
# Step the best move
max_move = None
max_value = np.NINF
for move, child in tree.children.items():
sampled_value = np.mean(child.values)
if sampled_value > max_value:
max_value = sampled_value
max_move = move
else:
max_move = np.random.choice([move for move in self.env.board.generate_legal_moves()])
# Black's turn is myopic
else:
max_move = None
max_value = np.NINF
for move in self.env.board.generate_legal_moves():
self.env.step(move)
if self.env.board.result() == "0-1":
max_move = move
self.env.board.pop()
self.env.init_layer_board()
break
successor_state_value_opponent = self.env.opposing_agent.predict(
np.expand_dims(self.env.layer_board, axis=0))
if successor_state_value_opponent > max_value:
max_move = move
max_value = successor_state_value_opponent
self.env.board.pop()
self.env.init_layer_board()
if not (self.env.board.turn and max_move not in tree.children.keys()) or not k > start_mcts_after:
tree.children[max_move] = Node(gamma=0.9, parent=tree)
episode_end, reward = self.env.step(max_move)
tree = tree.children[max_move]
tree.parent = None
gc.collect()
sucstate = np.expand_dims(self.env.layer_board, axis=0)
new_state_value = self.agent.predict(sucstate)
error = reward + self.gamma * new_state_value - state_value
error = np.float(np.squeeze(error))
turncount += 1
if turncount > maxiter and not episode_end:
episode_end = True
episode_active = 0 if episode_end else 1
# construct training sample state, prediction, error
self.mem_state = np.append(self.mem_state, state, axis=0)
self.mem_reward = np.append(self.mem_reward, reward)
self.mem_sucstate = np.append(self.mem_sucstate, sucstate, axis=0)
self.mem_error = np.append(self.mem_error, error)
self.reward_trace = np.append(self.reward_trace, reward)
self.mem_episode_active = np.append(self.mem_episode_active, episode_active)
if self.mem_state.shape[0] > self.memsize:
self.mem_state = self.mem_state[1:]
self.mem_reward = self.mem_reward[1:]
self.mem_sucstate = self.mem_sucstate[1:]
self.mem_error = self.mem_error[1:]
self.mem_episode_active = self.mem_episode_active[1:]
gc.collect()
if turncount % 10 == 0:
self.update_agent()
piece_balance = self.env.get_material_value()
self.piece_balance_trace.append(piece_balance)
print("game ended with result", reward, "and material balance", piece_balance, "in", turncount, "halfmoves")
return self.env.board
def update_agent(self):
"""
Update the Agent with TD learning
Returns:
None
"""
if self.ready:
choice_indices, states, rewards, sucstates, episode_active = self.get_minibatch()
td_errors = self.agent.TD_update(states, rewards, sucstates, episode_active, gamma=self.gamma)
self.mem_error[choice_indices.tolist()] = td_errors
def get_minibatch(self, prioritized=True):
"""
Get a mini batch of experience
Args:
prioritized:
Returns:
"""
if prioritized:
sampling_priorities = np.abs(self.mem_error) + 1e-9
else:
sampling_priorities = np.ones(shape=self.mem_error.shape)
sampling_probs = sampling_priorities / np.sum(sampling_priorities)
sample_indices = [x for x in range(self.mem_state.shape[0])]
choice_indices = np.random.choice(sample_indices,
min(self.mem_state.shape[0],
self.batch_size),
p=np.squeeze(sampling_probs),
replace=False
)
states = self.mem_state[choice_indices]
rewards = self.mem_reward[choice_indices]
sucstates = self.mem_sucstate[choice_indices]
episode_active = self.mem_episode_active[choice_indices]
return choice_indices, states, rewards, sucstates, episode_active
def mcts(self, node):
"""
Run Monte Carlo Tree Search
Args:
node: A game state node object
Returns:
the node with playout sims
"""
starttime = time.time()
sim_count = 0
board_in = self.env.board.fen()
# First make a prediction for each child state
for move in self.env.board.generate_legal_moves():
if move not in node.children.keys():
node.children[move] = Node(self.env.board, parent=node)
episode_end, reward = self.env.step(move)
if episode_end:
successor_state_value = 0
else:
successor_state_value = np.squeeze(
self.agent.model.predict(np.expand_dims(self.env.layer_board, axis=0))
)
child_value = reward + self.gamma * successor_state_value
node.update_child(move, child_value)
self.env.board.pop()
self.env.init_layer_board()
if not node.values:
node.values = [0]
while starttime + self.search_time > time.time() or sim_count < self.min_sim_count:
depth = 0
color = 1
node_rewards = []
# Select the best node from where to start MCTS
while node.children:
node, move = node.select(color=color)
if not move:
# No move means that the node selects itself, not a child node.
break
else:
depth += 1
color = color * -1 # switch color
episode_end, reward = self.env.step(move) # Update the environment to reflect the node
node_rewards.append(reward)
# Check best node is terminal
if self.env.board.result() == "1-0" and depth == 1: # -> Direct win for white, no need for mcts.
self.env.board.pop()
self.env.init_layer_board()
node.update(1)
node = node.parent
return node
elif episode_end: # -> if the explored tree leads to a terminal state, simulate from root.
while node.parent:
self.env.board.pop()
self.env.init_layer_board()
node = node.parent
break
else:
continue
# Expand the game tree with a simulation
Returns, move = node.simulate(self.agent.fixed_model,
self.env,
temperature=self.temperature,
depth=0)
self.env.init_layer_board()
if move not in node.children.keys():
node.children[move] = Node(self.env.board, parent=node)
node.update_child(move, Returns)
# Return to root node and backpropagate Returns
while node.parent:
latest_reward = node_rewards.pop(-1)
Returns = latest_reward + self.gamma * Returns
node.update(Returns)
node = node.parent
self.env.board.pop()
self.env.init_layer_board()
sim_count += 1
board_out = self.env.board.fen()
assert board_in == board_out
return node
| sigmoid | identifier_name |
main.py | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from lxml import etree
from StringIO import StringIO
from odoo import http, _
from odoo.http import content_disposition, request
from odoo.exceptions import UserError, AccessError
from odoo.addons.web_studio.controllers import export
class WebStudioController(http.Controller):
@http.route('/web_studio/init', type='json', auth='user')
def studio_init(self):
return {
'dbuuid': request.env['ir.config_parameter'].get_param('database.uuid'),
'multi_lang': bool(request.env['res.lang'].search_count([('code', '!=', 'en_US')])),
}
@http.route('/web_studio/chatter_allowed', type='json', auth='user')
def is_chatter_allowed(self, model):
""" Returns True iff a chatter can be activated on the model's form views, i.e. if
- it is a custom model (since we can make it inherit from mail.thread), or
- it already inherits from mail.thread.
"""
Model = request.env[model]
return Model._custom or isinstance(Model, type(request.env['mail.thread']))
@http.route('/web_studio/get_studio_action', type='json', auth='user')
def get_studio_action(self, action_name, model, view_id=None, view_type=None):
view_type = 'tree' if view_type == 'list' else view_type # list is stored as tree in db
model = request.env['ir.model'].search([('model', '=', model)], limit=1)
action = None
if hasattr(self, '_get_studio_action_' + action_name):
action = getattr(self, '_get_studio_action_' + action_name)(model, view_id=view_id, view_type=view_type)
return action
def _get_studio_action_acl(self, model, **kwargs):
return {
'name': _('Access Control Lists'),
'type': 'ir.actions.act_window',
'res_model': 'ir.model.access',
'views': [[False, 'list'], [False, 'form']],
'target': 'current',
'domain': [],
'context': {'search_default_model_id': model.id},
'help': """ <p class="oe_view_nocontent_create">
Click to add a new access control list.
</p>
""",
}
def _get_studio_action_automations(self, model, **kwargs):
return {
'name': _('Automated Actions'),
'type': 'ir.actions.act_window',
'res_model': 'base.action.rule',
'views': [[False, 'list'], [False, 'form']],
'target': 'current',
'domain': [],
'context': {'search_default_model_id': model.id},
'help': """ <p class="oe_view_nocontent_create">
Click to add a new automated action.
</p>
""",
}
def | (self, model, **kwargs):
return {
'name': _('Search Filters'),
'type': 'ir.actions.act_window',
'res_model': 'ir.filters',
'views': [[False, 'list'], [False, 'form']],
'target': 'current',
'domain': [],
'context': {'search_default_model_id': model.model}, # model_id is a Selection on ir.filters
'help': """ <p class="oe_view_nocontent_create">
Click to add a new filter.
</p>
""",
}
def _get_studio_action_reports(self, model, **kwargs):
return {
'name': _('Reports'),
'type': 'ir.actions.act_window',
'res_model': 'ir.actions.report.xml',
'views': [[False, 'kanban'], [False, 'form']],
'target': 'current',
'domain': [],
'context': {'search_default_model': model.model},
'help': """ <p class="oe_view_nocontent_create">
Click to add a new report.
</p>
""",
}
def _get_studio_action_translations(self, model, **kwargs):
""" Open a view for translating the field(s) of the record (model, id). """
domain = ['|', ('name', '=', model.model), ('name', 'ilike', model.model + ',')]
# search view + its inheritancies
views = request.env['ir.ui.view'].search([('model', '=', model.model)])
domain = ['|', '&', ('name', '=', 'ir.ui.view,arch_db'), ('res_id', 'in', views.ids)] + domain
def make_domain(fld, rec):
name = "%s,%s" % (fld.model_name, fld.name)
return ['&', ('res_id', '=', rec.id), ('name', '=', name)]
def insert_missing(fld, rec):
if not fld.translate:
return []
if fld.related:
try:
# traverse related fields up to their data source
while fld.related:
rec, fld = fld.traverse_related(rec)
if rec:
return ['|'] + domain + make_domain(fld, rec)
except AccessError:
return []
assert fld.translate and rec._name == fld.model_name
request.env['ir.translation'].insert_missing(fld, rec)
return []
# insert missing translations of views
for view in views:
for name, fld in view._fields.items():
domain += insert_missing(fld, view)
# insert missing translations of model, and extend domain for related fields
record = request.env[model.model].search([], limit=1)
if record:
for name, fld in record._fields.items():
domain += insert_missing(fld, record)
action = {
'name': _('Translate view'),
'type': 'ir.actions.act_window',
'res_model': 'ir.translation',
'view_mode': 'tree',
'views': [[request.env.ref('base.view_translation_dialog_tree').id, 'list']],
'target': 'current',
'domain': domain,
}
return action
@http.route('/web_studio/create_new_menu', type='json', auth='user')
def create_new_menu(self, name, model_id, is_app=False, parent_id=None, icon=None):
""" Create a new menu @name, linked to a new action associated to the model_id
@param is_app: if True, create an extra menu (app, without parent)
@param parent_id: the parent of the new menu.
To be set if is_app is False.
@param icon: the icon of the new app, like [icon, icon_color, background_color].
To be set if is_app is True.
"""
# create the action
model = request.env['ir.model'].browse(model_id)
new_action = request.env['ir.actions.act_window'].create({
'name': name,
'res_model': model.model,
'help': """
<p>
This is your new action ; by default, it contains a list view and a form view.
</p>
<p>
You can start customizing these screens by clicking on the Studio icon on the
top right corner (you can also customize this help message there).
</p>
""",
})
action_ref = 'ir.actions.act_window,' + str(new_action.id)
if is_app:
# create the menus (app menu + first submenu)
new_context = dict(request.context)
new_context.update({'ir.ui.menu.full_list': True}) # allows to create a menu without action
new_menu = request.env['ir.ui.menu'].with_context(new_context).create({
'name': name,
'web_icon': ','.join(icon),
'child_id': [(0, 0, {
'name': name,
'action': action_ref,
})]
})
else:
# create the submenu
new_menu = request.env['ir.ui.menu'].create({
'name': name,
'action': action_ref,
'parent_id': parent_id,
})
return {
'menu_id': new_menu.id,
'action_id': new_action.id,
}
@http.route('/web_studio/edit_action', type='json', auth='user')
def edit_action(self, action_type, action_id, args):
action_id = request.env[action_type].browse(action_id)
if action_id:
if 'groups_id' in args:
args['groups_id'] = [(6, 0, args['groups_id'])]
if 'view_mode' in args:
args['view_mode'] = args['view_mode'].replace('list', 'tree') # list is stored as tree in db
# Check that each views in view_mode exists or try to get default
view_ids = request.env['ir.ui.view'].search([('model', '=', action_id.res_model)])
view_types = [view_id.type for view_id in view_ids]
for view_type in args['view_mode'].split(','):
if view_type not in view_types:
try:
request.env[action_id.res_model].fields_view_get(view_type=view_type)
except UserError as e:
return e.name
# As view_ids has precedence on view_mode, we need to use them and resequence them
view_modes = args['view_mode'].split(',')
if action_id.view_ids:
missing_view_modes = [x for x in view_modes if x not in [y.view_mode for y in action_id.view_ids]]
for view_mode in missing_view_modes:
request.env['ir.actions.act_window.view'].create({'view_mode': view_mode, 'act_window_id': action_id.id})
for view_id in action_id.view_ids:
if view_id.view_mode in view_modes:
view_id.sequence = view_modes.index(view_id.view_mode)
view_xml_id = request.env['ir.model.data'].search([('model', '=', 'ir.actions.act_window.view'), ('res_id', '=', view_id.id)])
else:
view_id.unlink()
action_id.write(args)
return True
@http.route('/web_studio/set_another_view', type='json', auth='user')
def set_another_view(self, action_id, view_mode, view_id):
action_id = request.env['ir.actions.act_window'].browse(action_id)
window_view = request.env['ir.actions.act_window.view'].search([('view_mode', '=', view_mode), ('act_window_id', '=', action_id.id)])
if not window_view:
window_view = request.env['ir.actions.act_window.view'].create({'view_mode': view_mode, 'act_window_id': action_id.id})
window_view.view_id = view_id
return True
def _get_studio_view(self, view):
return request.env['ir.ui.view'].search([('inherit_id', '=', view.id), ('name', 'ilike', '%studio%customization%')], limit=1)
@http.route('/web_studio/get_studio_view_arch', type='json', auth='user')
def get_studio_view_arch(self, model, view_type, view_id=False):
view_type = 'tree' if view_type == 'list' else view_type # list is stored as tree in db
if not view_id:
# TOFIX: it's possibly not the used view ; see fields_get_view
# try to find the lowest priority matching ir.ui.view
view_id = request.env['ir.ui.view'].default_view(request.env[model]._name, view_type)
# We have to create a view with the default view if we want to customize it.
view = self._get_or_create_default_view(model, view_type, view_id)
studio_view = self._get_studio_view(view)
return {
'studio_view_id': studio_view and studio_view.id or False,
'studio_view_arch': studio_view and studio_view.arch_db or "<data/>",
}
@http.route('/web_studio/edit_view', type='json', auth='user')
def edit_view(self, view_id, studio_view_arch, operations=None):
view = request.env['ir.ui.view'].browse(view_id)
studio_view = self._get_studio_view(view)
parser = etree.XMLParser(remove_blank_text=True)
arch = etree.parse(StringIO(studio_view_arch), parser).getroot()
for op in operations:
# Call the right operation handler
if 'node' in op:
op['node'] = self._preprocess_attrs(op['node'])
getattr(self, '_operation_%s' % (op['type']))(arch, op, view.model)
# Save or create changes into studio view, identifiable by xmlid
# Example for view id 42 of model crm.lead: web-studio_crm.lead-42
# TODO: if len(arch) == 0, delete the view
new_arch = etree.tostring(arch, encoding='utf-8', pretty_print=True)
if studio_view:
studio_view.arch_db = new_arch
else:
# We have to play with priorities. Consider the following:
# View Base: <field name="x"/><field name="y"/>
# View Standard inherits Base: <field name="x" position="after"><field name="z"/></field>
# View Custo inherits Base: <field name="x" position="after"><field name="x2"/></field>
# We want x,x2,z,y, because that's what we did in studio, but the order of xpath
# resolution is sequence,name, not sequence,id. Because "Custo" < "Standard", it
# would first resolve in x,x2,y, then resolve "Standard" with x,z,x2,y as result.
studio_view = request.env['ir.ui.view'].create({
'type': view.type,
'model': view.model,
'inherit_id': view.id,
'mode': 'extension',
'priority': 99,
'arch': new_arch,
'name': "Odoo Studio: %s customization" % (view.name),
})
fields_view = request.env[view.model].with_context({'studio': True}).fields_view_get(view.id, view.type)
return fields_view
@http.route('/web_studio/edit_view_arch', type='json', auth='user')
def edit_view_arch(self, view_id, view_arch):
view = request.env['ir.ui.view'].browse(view_id)
if view:
view.write({'arch': view_arch})
if view.model:
try:
fields_view = request.env[view.model].with_context({'studio': True}).fields_view_get(view.id, view.type)
return fields_view
except Exception:
return False
@http.route('/web_studio/export', type='http', auth='user')
def export(self, token):
""" Exports a zip file containing the 'studio_customization' module
gathering all customizations done with Studio (customizations of
existing apps and freshly created apps).
"""
studio_module = request.env['ir.module.module'].get_studio_module()
data = request.env['ir.model.data'].search([('studio', '=', True)])
content = export.generate_archive(studio_module, data)
return request.make_response(content, headers=[
('Content-Disposition', content_disposition('customizations.zip')),
('Content-Type', 'application/zip'),
('Content-Length', len(content)),
], cookies={'fileToken': token})
def _preprocess_attrs(self, node):
# The js can't give us the field name, it only has the field id
if node['tag'] == 'field' and 'id' in node['attrs']:
node['attrs']['name'] = request.env['ir.model.fields'].browse(node['attrs'].pop('id')).name
return node
def _get_or_create_default_view(self, model, view_type, view_id=False):
View = request.env['ir.ui.view']
# If we have no view_id to inherit from, it's because we are adding
# fields to the default view of a new model. We will materialize the
# default view as a true view so we can keep using our xpath mechanism.
if view_id:
view = View.browse(view_id)
else:
arch = request.env[model].fields_view_get(view_id, view_type)['arch']
view = View.create({
'type': view_type,
'model': model,
'arch': arch,
'name': "Default %s view for %s" % (view_type, model),
})
return view
def _node_to_expr(self, node):
if not node.get('attrs') and node.get('xpath_info'):
# Format of expr is /form/tag1[]/tag2[]/[...]/tag[]
expr = ''.join(['/%s[%s]' % (parent['tag'], parent['indice']) for parent in node.get('xpath_info')])
else:
# Format of expr is //tag[@attr1_name=attr1_value][@attr2_name=attr2_value][...]
expr = '//' + node['tag'] + ''.join(['[@%s=\'%s\']' % (k, v) for k, v in node.get('attrs', {}).items()])
# Special case when we have <label/><div/> instead of <field>
# TODO: This is very naive, couldn't the js detect such a situation and
# tell us to anchor the xpath on another element ?
if node['tag'] == 'label':
expr = expr + '/following-sibling::div'
return expr
# If we already have an xpath on this element, use it, otherwise, create a new one.
def _get_xpath_node(self, arch, operation):
expr = self._node_to_expr(operation['target'])
position = operation['position']
xpath_node = arch.find('xpath[@expr="%s"][@position="%s"]' % (expr, position))
if xpath_node is None: # bool(node) == False if node has no children
xpath_node = etree.SubElement(arch, 'xpath', {
'expr': expr,
'position': position
})
return xpath_node
def _operation_remove(self, arch, operation, model=None):
expr = self._node_to_expr(operation['target'])
# We have to create a brand new xpath to remove this field from the view.
# TODO: Sometimes, we have to delete more stuff than just a single tag !
etree.SubElement(arch, 'xpath', {
'expr': expr,
'position': 'replace'
})
def _operation_add(self, arch, operation, model):
node = operation['node']
xpath_node = self._get_xpath_node(arch, operation)
# Create the actual node inside the xpath. It needs to be the first
# child of the xpath to respect the order in which they were added.
xml_node = etree.Element(node['tag'], node.get('attrs'))
if node['tag'] == 'notebook':
# FIXME take the same randomString as parent
name = 'studio_page_' + node['attrs']['name'].split('_')[2]
xml_node_page = etree.Element('page', {'string': 'New Page', 'name': name})
xml_node.insert(0, xml_node_page)
elif node['tag'] == 'group':
xml_node_page_right = etree.Element('group', {'string': 'Right Title', 'name': node['attrs']['name'] + '_right'})
xml_node_page_left = etree.Element('group', {'string': 'Left Title', 'name': node['attrs']['name'] + '_left'})
xml_node.insert(0, xml_node_page_right)
xml_node.insert(0, xml_node_page_left)
elif node['tag'] == 'button':
# To create a stat button, we need
# - a many2one field (1) that points to this model
# - a field (2) that counts the number of records associated with the current record
# - an action to jump in (3) with the many2one field (1) as domain/context
#
# (1) [button_field] the many2one field
# (2) [button_count_field] is a non-stored computed field (to always have the good value in the stat button, if access rights)
# (3) [button_action] an act_window action to jump in the related model
button_field = request.env['ir.model.fields'].browse(node['field'])
button_count_field, button_action = self._get_or_create_fields_for_button(model, button_field, node['string'])
# the XML looks like <button> <field/> </button : a element `field` needs to be inserted inside the button
xml_node_field = etree.Element('field', {'widget': 'statinfo', 'name': button_count_field.name, 'string': node['string'] or button_count_field.field_description})
xml_node.insert(0, xml_node_field)
xml_node.attrib['type'] = 'action'
xml_node.attrib['name'] = str(button_action.id)
else:
xml_node.text = node.get('text')
xpath_node.insert(0, xml_node)
def _get_or_create_fields_for_button(self, model, field, button_name):
""" Returns the button_count_field and the button_action link to a stat button.
@param field: a many2one field
"""
if field.ttype != 'many2one' or field.relation != model:
raise UserError(_('The related field of a button has to be a many2one to %s.' % model))
model = request.env['ir.model'].search([('model', '=', model)], limit=1)
# There is a counter on the button ; as the related field is a many2one, we need
# to create a new computed field that counts the number of records in the one2many
button_count_field_name = 'x_%s_count' % field.name
button_count_field = request.env['ir.model.fields'].search([('name', '=', button_count_field_name), ('model_id', '=', model.id)])
if not button_count_field:
compute_function = """
results = self.env['%(model)s'].read_group([('%(field)s', 'in', self.ids)], '%(field)s', '%(field)s')
dic = {}
for x in results: dic[x['%(field)s'][0]] = x['%(field)s_count']
for record in self: record['%(count_field)s'] = dic.get(record.id, 0)
""" % {
'model': field.model,
'field': field.name,
'count_field': button_count_field_name,
}
button_count_field = request.env['ir.model.fields'].create({
'name': button_count_field_name,
'field_description': '%s count' % field.field_description,
'model': model.model,
'model_id': model.id,
'ttype': 'integer',
'store': False,
'compute': compute_function.replace(' ', ''), # remove indentation for safe_eval
})
# The action could already exist but we don't want to recreate one each time
button_action_domain = "[('%s', '=', active_id)]" % (field.name)
button_action_context = "{'search_default_%s': active_id,'default_%s': active_id}" % (field.name, field.name)
button_action = request.env['ir.actions.act_window'].search([
('name', '=', button_name), ('res_model', '=', field.model),
('domain', '=', button_action_domain), ('context', '=', button_action_context),
])
if not button_action:
# Link the button with an associated act_window
button_action = request.env['ir.actions.act_window'].create({
'name': button_name,
'res_model': field.model,
'view_mode': 'tree,form',
'view_type': 'form',
'domain': button_action_domain,
'context': button_action_context,
})
return button_count_field, button_action
def _operation_move(self, arch, operation, model=None):
self._operation_remove(arch, dict(operation, target=operation['node']))
self._operation_add(arch, operation)
# Create or update node for each attribute
def _operation_attributes(self, arch, operation, model=None):
ir_model_data = request.env['ir.model.data']
new_attrs = operation['new_attrs']
if (new_attrs.get('groups')):
eval_attr = []
for many2many_value in new_attrs['groups']:
group_xmlid = ir_model_data.search([
('model', '=', 'res.groups'),
('res_id', '=', many2many_value)])
eval_attr.append(group_xmlid.complete_name)
eval_attr = ",".join(eval_attr)
new_attrs['groups'] = eval_attr
else:
# TOFIX
new_attrs['groups'] = ''
xpath_node = self._get_xpath_node(arch, operation)
for key, new_attr in new_attrs.iteritems():
xml_node = xpath_node.find('attribute[@name="%s"]' % (key))
if xml_node is None:
xml_node = etree.Element('attribute', {'name': key})
xml_node.text = new_attr
xpath_node.insert(0, xml_node)
else:
xml_node.text = new_attr
def _operation_buttonbox(self, arch, operation, model=None):
studio_view_arch = arch # The actual arch is the studio view arch
# Get the arch of the form view with inherited views applied
arch = request.env[model].fields_view_get(view_type='form')['arch']
parser = etree.XMLParser(remove_blank_text=True)
arch = etree.parse(StringIO(arch), parser).getroot()
# Create xpath to put the buttonbox as the first child of the sheet
if arch.find('sheet'):
sheet_node = arch.find('sheet')
if list(sheet_node): # Check if children exists
xpath_node = etree.SubElement(studio_view_arch, 'xpath', {
'expr': '//sheet/*[1]',
'position': 'before'
})
else:
xpath_node = etree.SubElement(studio_view_arch, 'xpath', {
'expr': '//sheet',
'position': 'inside'
})
# Create and insert the buttonbox node inside the xpath node
buttonbox_node = etree.Element('div', {'name': 'button_box', 'class': 'oe_button_box'})
xpath_node.append(buttonbox_node)
def _operation_chatter(self, arch, operation, model=None):
def _get_remove_field_op(arch, field_name):
return {
'type': 'remove',
'target': {
'tag': 'field',
'attrs': {
'name': field_name,
},
}
}
if not self.is_chatter_allowed(operation['model']):
# Chatter can only be activated form models that (can) inherit from mail.thread
return
# From this point, the model is either a custom model or inherits from mail.thread
model = request.env['ir.model'].search([('model', '=', operation['model'])])
if model.state == 'manual' and not model.mail_thread:
# Activate mail.thread inheritance on the custom model
model.write({'mail_thread': True})
# Remove message_ids and message_follower_ids if already defined in form view
if operation['remove_message_ids']:
self._operation_remove(arch, _get_remove_field_op(arch, 'message_ids'))
if operation['remove_follower_ids']:
self._operation_remove(arch, _get_remove_field_op(arch, 'message_follower_ids'))
xpath_node = etree.SubElement(arch, 'xpath', {
'expr': '//sheet',
'position': 'after',
})
chatter_node = etree.Element('div', {'class': 'oe_chatter'})
thread_node = etree.Element('field', {'name': 'message_ids', 'widget': 'mail_thread'})
follower_node = etree.Element('field', {'name': 'message_follower_ids', 'widget': 'mail_followers'})
chatter_node.append(follower_node)
chatter_node.append(thread_node)
xpath_node.append(chatter_node)
| _get_studio_action_filters | identifier_name |
main.py | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from lxml import etree
from StringIO import StringIO
from odoo import http, _
from odoo.http import content_disposition, request
from odoo.exceptions import UserError, AccessError
from odoo.addons.web_studio.controllers import export
class WebStudioController(http.Controller):
@http.route('/web_studio/init', type='json', auth='user')
def studio_init(self):
return {
'dbuuid': request.env['ir.config_parameter'].get_param('database.uuid'),
'multi_lang': bool(request.env['res.lang'].search_count([('code', '!=', 'en_US')])),
}
@http.route('/web_studio/chatter_allowed', type='json', auth='user')
def is_chatter_allowed(self, model):
""" Returns True iff a chatter can be activated on the model's form views, i.e. if
- it is a custom model (since we can make it inherit from mail.thread), or
- it already inherits from mail.thread.
"""
Model = request.env[model]
return Model._custom or isinstance(Model, type(request.env['mail.thread']))
@http.route('/web_studio/get_studio_action', type='json', auth='user')
def get_studio_action(self, action_name, model, view_id=None, view_type=None):
view_type = 'tree' if view_type == 'list' else view_type # list is stored as tree in db
model = request.env['ir.model'].search([('model', '=', model)], limit=1)
action = None
if hasattr(self, '_get_studio_action_' + action_name):
action = getattr(self, '_get_studio_action_' + action_name)(model, view_id=view_id, view_type=view_type)
return action
def _get_studio_action_acl(self, model, **kwargs):
return {
'name': _('Access Control Lists'),
'type': 'ir.actions.act_window',
'res_model': 'ir.model.access',
'views': [[False, 'list'], [False, 'form']],
'target': 'current',
'domain': [],
'context': {'search_default_model_id': model.id},
'help': """ <p class="oe_view_nocontent_create">
Click to add a new access control list.
</p>
""",
}
def _get_studio_action_automations(self, model, **kwargs):
return {
'name': _('Automated Actions'),
'type': 'ir.actions.act_window',
'res_model': 'base.action.rule',
'views': [[False, 'list'], [False, 'form']],
'target': 'current',
'domain': [],
'context': {'search_default_model_id': model.id},
'help': """ <p class="oe_view_nocontent_create">
Click to add a new automated action.
</p>
""",
}
def _get_studio_action_filters(self, model, **kwargs):
return {
'name': _('Search Filters'),
'type': 'ir.actions.act_window',
'res_model': 'ir.filters',
'views': [[False, 'list'], [False, 'form']],
'target': 'current',
'domain': [],
'context': {'search_default_model_id': model.model}, # model_id is a Selection on ir.filters
'help': """ <p class="oe_view_nocontent_create">
Click to add a new filter.
</p>
""",
}
def _get_studio_action_reports(self, model, **kwargs):
return {
'name': _('Reports'),
'type': 'ir.actions.act_window',
'res_model': 'ir.actions.report.xml',
'views': [[False, 'kanban'], [False, 'form']],
'target': 'current',
'domain': [],
'context': {'search_default_model': model.model},
'help': """ <p class="oe_view_nocontent_create">
Click to add a new report.
</p>
""",
}
def _get_studio_action_translations(self, model, **kwargs):
""" Open a view for translating the field(s) of the record (model, id). """
domain = ['|', ('name', '=', model.model), ('name', 'ilike', model.model + ',')]
# search view + its inheritancies
views = request.env['ir.ui.view'].search([('model', '=', model.model)])
domain = ['|', '&', ('name', '=', 'ir.ui.view,arch_db'), ('res_id', 'in', views.ids)] + domain
def make_domain(fld, rec):
name = "%s,%s" % (fld.model_name, fld.name)
return ['&', ('res_id', '=', rec.id), ('name', '=', name)]
def insert_missing(fld, rec):
if not fld.translate:
return []
if fld.related:
try:
# traverse related fields up to their data source
while fld.related:
rec, fld = fld.traverse_related(rec)
if rec:
return ['|'] + domain + make_domain(fld, rec)
except AccessError:
return []
assert fld.translate and rec._name == fld.model_name
request.env['ir.translation'].insert_missing(fld, rec)
return []
# insert missing translations of views
for view in views:
for name, fld in view._fields.items():
domain += insert_missing(fld, view)
# insert missing translations of model, and extend domain for related fields
record = request.env[model.model].search([], limit=1)
if record:
for name, fld in record._fields.items():
domain += insert_missing(fld, record)
action = {
'name': _('Translate view'),
'type': 'ir.actions.act_window',
'res_model': 'ir.translation',
'view_mode': 'tree',
'views': [[request.env.ref('base.view_translation_dialog_tree').id, 'list']],
'target': 'current',
'domain': domain,
}
return action
@http.route('/web_studio/create_new_menu', type='json', auth='user')
def create_new_menu(self, name, model_id, is_app=False, parent_id=None, icon=None):
""" Create a new menu @name, linked to a new action associated to the model_id
@param is_app: if True, create an extra menu (app, without parent)
@param parent_id: the parent of the new menu.
To be set if is_app is False.
@param icon: the icon of the new app, like [icon, icon_color, background_color].
To be set if is_app is True.
"""
# create the action
model = request.env['ir.model'].browse(model_id)
new_action = request.env['ir.actions.act_window'].create({
'name': name,
'res_model': model.model,
'help': """
<p>
This is your new action ; by default, it contains a list view and a form view.
</p>
<p>
You can start customizing these screens by clicking on the Studio icon on the
top right corner (you can also customize this help message there).
</p>
""",
})
action_ref = 'ir.actions.act_window,' + str(new_action.id)
if is_app:
# create the menus (app menu + first submenu)
new_context = dict(request.context)
new_context.update({'ir.ui.menu.full_list': True}) # allows to create a menu without action
new_menu = request.env['ir.ui.menu'].with_context(new_context).create({
'name': name,
'web_icon': ','.join(icon),
'child_id': [(0, 0, {
'name': name,
'action': action_ref,
})]
})
else:
# create the submenu
new_menu = request.env['ir.ui.menu'].create({
'name': name,
'action': action_ref,
'parent_id': parent_id,
})
return {
'menu_id': new_menu.id,
'action_id': new_action.id,
}
@http.route('/web_studio/edit_action', type='json', auth='user')
def edit_action(self, action_type, action_id, args):
action_id = request.env[action_type].browse(action_id)
if action_id:
if 'groups_id' in args:
args['groups_id'] = [(6, 0, args['groups_id'])]
if 'view_mode' in args:
args['view_mode'] = args['view_mode'].replace('list', 'tree') # list is stored as tree in db
# Check that each views in view_mode exists or try to get default
view_ids = request.env['ir.ui.view'].search([('model', '=', action_id.res_model)])
view_types = [view_id.type for view_id in view_ids]
for view_type in args['view_mode'].split(','):
if view_type not in view_types:
try:
request.env[action_id.res_model].fields_view_get(view_type=view_type)
except UserError as e:
return e.name
# As view_ids has precedence on view_mode, we need to use them and resequence them
view_modes = args['view_mode'].split(',')
if action_id.view_ids:
missing_view_modes = [x for x in view_modes if x not in [y.view_mode for y in action_id.view_ids]]
for view_mode in missing_view_modes:
request.env['ir.actions.act_window.view'].create({'view_mode': view_mode, 'act_window_id': action_id.id})
for view_id in action_id.view_ids:
if view_id.view_mode in view_modes:
view_id.sequence = view_modes.index(view_id.view_mode)
view_xml_id = request.env['ir.model.data'].search([('model', '=', 'ir.actions.act_window.view'), ('res_id', '=', view_id.id)])
else:
view_id.unlink()
action_id.write(args)
return True
@http.route('/web_studio/set_another_view', type='json', auth='user')
def set_another_view(self, action_id, view_mode, view_id):
action_id = request.env['ir.actions.act_window'].browse(action_id)
window_view = request.env['ir.actions.act_window.view'].search([('view_mode', '=', view_mode), ('act_window_id', '=', action_id.id)])
if not window_view:
window_view = request.env['ir.actions.act_window.view'].create({'view_mode': view_mode, 'act_window_id': action_id.id})
window_view.view_id = view_id
return True
def _get_studio_view(self, view):
return request.env['ir.ui.view'].search([('inherit_id', '=', view.id), ('name', 'ilike', '%studio%customization%')], limit=1)
@http.route('/web_studio/get_studio_view_arch', type='json', auth='user')
def get_studio_view_arch(self, model, view_type, view_id=False):
view_type = 'tree' if view_type == 'list' else view_type # list is stored as tree in db
if not view_id:
# TOFIX: it's possibly not the used view ; see fields_get_view
# try to find the lowest priority matching ir.ui.view
view_id = request.env['ir.ui.view'].default_view(request.env[model]._name, view_type)
# We have to create a view with the default view if we want to customize it.
view = self._get_or_create_default_view(model, view_type, view_id)
studio_view = self._get_studio_view(view)
return {
'studio_view_id': studio_view and studio_view.id or False,
'studio_view_arch': studio_view and studio_view.arch_db or "<data/>",
}
@http.route('/web_studio/edit_view', type='json', auth='user')
def edit_view(self, view_id, studio_view_arch, operations=None):
view = request.env['ir.ui.view'].browse(view_id)
studio_view = self._get_studio_view(view)
parser = etree.XMLParser(remove_blank_text=True)
arch = etree.parse(StringIO(studio_view_arch), parser).getroot()
for op in operations:
# Call the right operation handler
if 'node' in op:
op['node'] = self._preprocess_attrs(op['node'])
getattr(self, '_operation_%s' % (op['type']))(arch, op, view.model)
# Save or create changes into studio view, identifiable by xmlid
# Example for view id 42 of model crm.lead: web-studio_crm.lead-42
# TODO: if len(arch) == 0, delete the view
new_arch = etree.tostring(arch, encoding='utf-8', pretty_print=True)
if studio_view:
studio_view.arch_db = new_arch
else:
# We have to play with priorities. Consider the following:
# View Base: <field name="x"/><field name="y"/>
# View Standard inherits Base: <field name="x" position="after"><field name="z"/></field>
# View Custo inherits Base: <field name="x" position="after"><field name="x2"/></field>
# We want x,x2,z,y, because that's what we did in studio, but the order of xpath
# resolution is sequence,name, not sequence,id. Because "Custo" < "Standard", it | 'inherit_id': view.id,
'mode': 'extension',
'priority': 99,
'arch': new_arch,
'name': "Odoo Studio: %s customization" % (view.name),
})
fields_view = request.env[view.model].with_context({'studio': True}).fields_view_get(view.id, view.type)
return fields_view
@http.route('/web_studio/edit_view_arch', type='json', auth='user')
def edit_view_arch(self, view_id, view_arch):
view = request.env['ir.ui.view'].browse(view_id)
if view:
view.write({'arch': view_arch})
if view.model:
try:
fields_view = request.env[view.model].with_context({'studio': True}).fields_view_get(view.id, view.type)
return fields_view
except Exception:
return False
@http.route('/web_studio/export', type='http', auth='user')
def export(self, token):
""" Exports a zip file containing the 'studio_customization' module
gathering all customizations done with Studio (customizations of
existing apps and freshly created apps).
"""
studio_module = request.env['ir.module.module'].get_studio_module()
data = request.env['ir.model.data'].search([('studio', '=', True)])
content = export.generate_archive(studio_module, data)
return request.make_response(content, headers=[
('Content-Disposition', content_disposition('customizations.zip')),
('Content-Type', 'application/zip'),
('Content-Length', len(content)),
], cookies={'fileToken': token})
def _preprocess_attrs(self, node):
# The js can't give us the field name, it only has the field id
if node['tag'] == 'field' and 'id' in node['attrs']:
node['attrs']['name'] = request.env['ir.model.fields'].browse(node['attrs'].pop('id')).name
return node
def _get_or_create_default_view(self, model, view_type, view_id=False):
View = request.env['ir.ui.view']
# If we have no view_id to inherit from, it's because we are adding
# fields to the default view of a new model. We will materialize the
# default view as a true view so we can keep using our xpath mechanism.
if view_id:
view = View.browse(view_id)
else:
arch = request.env[model].fields_view_get(view_id, view_type)['arch']
view = View.create({
'type': view_type,
'model': model,
'arch': arch,
'name': "Default %s view for %s" % (view_type, model),
})
return view
def _node_to_expr(self, node):
if not node.get('attrs') and node.get('xpath_info'):
# Format of expr is /form/tag1[]/tag2[]/[...]/tag[]
expr = ''.join(['/%s[%s]' % (parent['tag'], parent['indice']) for parent in node.get('xpath_info')])
else:
# Format of expr is //tag[@attr1_name=attr1_value][@attr2_name=attr2_value][...]
expr = '//' + node['tag'] + ''.join(['[@%s=\'%s\']' % (k, v) for k, v in node.get('attrs', {}).items()])
# Special case when we have <label/><div/> instead of <field>
# TODO: This is very naive, couldn't the js detect such a situation and
# tell us to anchor the xpath on another element ?
if node['tag'] == 'label':
expr = expr + '/following-sibling::div'
return expr
# If we already have an xpath on this element, use it, otherwise, create a new one.
def _get_xpath_node(self, arch, operation):
expr = self._node_to_expr(operation['target'])
position = operation['position']
xpath_node = arch.find('xpath[@expr="%s"][@position="%s"]' % (expr, position))
if xpath_node is None: # bool(node) == False if node has no children
xpath_node = etree.SubElement(arch, 'xpath', {
'expr': expr,
'position': position
})
return xpath_node
def _operation_remove(self, arch, operation, model=None):
expr = self._node_to_expr(operation['target'])
# We have to create a brand new xpath to remove this field from the view.
# TODO: Sometimes, we have to delete more stuff than just a single tag !
etree.SubElement(arch, 'xpath', {
'expr': expr,
'position': 'replace'
})
def _operation_add(self, arch, operation, model):
node = operation['node']
xpath_node = self._get_xpath_node(arch, operation)
# Create the actual node inside the xpath. It needs to be the first
# child of the xpath to respect the order in which they were added.
xml_node = etree.Element(node['tag'], node.get('attrs'))
if node['tag'] == 'notebook':
# FIXME take the same randomString as parent
name = 'studio_page_' + node['attrs']['name'].split('_')[2]
xml_node_page = etree.Element('page', {'string': 'New Page', 'name': name})
xml_node.insert(0, xml_node_page)
elif node['tag'] == 'group':
xml_node_page_right = etree.Element('group', {'string': 'Right Title', 'name': node['attrs']['name'] + '_right'})
xml_node_page_left = etree.Element('group', {'string': 'Left Title', 'name': node['attrs']['name'] + '_left'})
xml_node.insert(0, xml_node_page_right)
xml_node.insert(0, xml_node_page_left)
elif node['tag'] == 'button':
# To create a stat button, we need
# - a many2one field (1) that points to this model
# - a field (2) that counts the number of records associated with the current record
# - an action to jump in (3) with the many2one field (1) as domain/context
#
# (1) [button_field] the many2one field
# (2) [button_count_field] is a non-stored computed field (to always have the good value in the stat button, if access rights)
# (3) [button_action] an act_window action to jump in the related model
button_field = request.env['ir.model.fields'].browse(node['field'])
button_count_field, button_action = self._get_or_create_fields_for_button(model, button_field, node['string'])
# the XML looks like <button> <field/> </button : a element `field` needs to be inserted inside the button
xml_node_field = etree.Element('field', {'widget': 'statinfo', 'name': button_count_field.name, 'string': node['string'] or button_count_field.field_description})
xml_node.insert(0, xml_node_field)
xml_node.attrib['type'] = 'action'
xml_node.attrib['name'] = str(button_action.id)
else:
xml_node.text = node.get('text')
xpath_node.insert(0, xml_node)
def _get_or_create_fields_for_button(self, model, field, button_name):
""" Returns the button_count_field and the button_action link to a stat button.
@param field: a many2one field
"""
if field.ttype != 'many2one' or field.relation != model:
raise UserError(_('The related field of a button has to be a many2one to %s.' % model))
model = request.env['ir.model'].search([('model', '=', model)], limit=1)
# There is a counter on the button ; as the related field is a many2one, we need
# to create a new computed field that counts the number of records in the one2many
button_count_field_name = 'x_%s_count' % field.name
button_count_field = request.env['ir.model.fields'].search([('name', '=', button_count_field_name), ('model_id', '=', model.id)])
if not button_count_field:
compute_function = """
results = self.env['%(model)s'].read_group([('%(field)s', 'in', self.ids)], '%(field)s', '%(field)s')
dic = {}
for x in results: dic[x['%(field)s'][0]] = x['%(field)s_count']
for record in self: record['%(count_field)s'] = dic.get(record.id, 0)
""" % {
'model': field.model,
'field': field.name,
'count_field': button_count_field_name,
}
button_count_field = request.env['ir.model.fields'].create({
'name': button_count_field_name,
'field_description': '%s count' % field.field_description,
'model': model.model,
'model_id': model.id,
'ttype': 'integer',
'store': False,
'compute': compute_function.replace(' ', ''), # remove indentation for safe_eval
})
# The action could already exist but we don't want to recreate one each time
button_action_domain = "[('%s', '=', active_id)]" % (field.name)
button_action_context = "{'search_default_%s': active_id,'default_%s': active_id}" % (field.name, field.name)
button_action = request.env['ir.actions.act_window'].search([
('name', '=', button_name), ('res_model', '=', field.model),
('domain', '=', button_action_domain), ('context', '=', button_action_context),
])
if not button_action:
# Link the button with an associated act_window
button_action = request.env['ir.actions.act_window'].create({
'name': button_name,
'res_model': field.model,
'view_mode': 'tree,form',
'view_type': 'form',
'domain': button_action_domain,
'context': button_action_context,
})
return button_count_field, button_action
def _operation_move(self, arch, operation, model=None):
self._operation_remove(arch, dict(operation, target=operation['node']))
self._operation_add(arch, operation)
# Create or update node for each attribute
def _operation_attributes(self, arch, operation, model=None):
ir_model_data = request.env['ir.model.data']
new_attrs = operation['new_attrs']
if (new_attrs.get('groups')):
eval_attr = []
for many2many_value in new_attrs['groups']:
group_xmlid = ir_model_data.search([
('model', '=', 'res.groups'),
('res_id', '=', many2many_value)])
eval_attr.append(group_xmlid.complete_name)
eval_attr = ",".join(eval_attr)
new_attrs['groups'] = eval_attr
else:
# TOFIX
new_attrs['groups'] = ''
xpath_node = self._get_xpath_node(arch, operation)
for key, new_attr in new_attrs.iteritems():
xml_node = xpath_node.find('attribute[@name="%s"]' % (key))
if xml_node is None:
xml_node = etree.Element('attribute', {'name': key})
xml_node.text = new_attr
xpath_node.insert(0, xml_node)
else:
xml_node.text = new_attr
def _operation_buttonbox(self, arch, operation, model=None):
studio_view_arch = arch # The actual arch is the studio view arch
# Get the arch of the form view with inherited views applied
arch = request.env[model].fields_view_get(view_type='form')['arch']
parser = etree.XMLParser(remove_blank_text=True)
arch = etree.parse(StringIO(arch), parser).getroot()
# Create xpath to put the buttonbox as the first child of the sheet
if arch.find('sheet'):
sheet_node = arch.find('sheet')
if list(sheet_node): # Check if children exists
xpath_node = etree.SubElement(studio_view_arch, 'xpath', {
'expr': '//sheet/*[1]',
'position': 'before'
})
else:
xpath_node = etree.SubElement(studio_view_arch, 'xpath', {
'expr': '//sheet',
'position': 'inside'
})
# Create and insert the buttonbox node inside the xpath node
buttonbox_node = etree.Element('div', {'name': 'button_box', 'class': 'oe_button_box'})
xpath_node.append(buttonbox_node)
def _operation_chatter(self, arch, operation, model=None):
def _get_remove_field_op(arch, field_name):
return {
'type': 'remove',
'target': {
'tag': 'field',
'attrs': {
'name': field_name,
},
}
}
if not self.is_chatter_allowed(operation['model']):
# Chatter can only be activated form models that (can) inherit from mail.thread
return
# From this point, the model is either a custom model or inherits from mail.thread
model = request.env['ir.model'].search([('model', '=', operation['model'])])
if model.state == 'manual' and not model.mail_thread:
# Activate mail.thread inheritance on the custom model
model.write({'mail_thread': True})
# Remove message_ids and message_follower_ids if already defined in form view
if operation['remove_message_ids']:
self._operation_remove(arch, _get_remove_field_op(arch, 'message_ids'))
if operation['remove_follower_ids']:
self._operation_remove(arch, _get_remove_field_op(arch, 'message_follower_ids'))
xpath_node = etree.SubElement(arch, 'xpath', {
'expr': '//sheet',
'position': 'after',
})
chatter_node = etree.Element('div', {'class': 'oe_chatter'})
thread_node = etree.Element('field', {'name': 'message_ids', 'widget': 'mail_thread'})
follower_node = etree.Element('field', {'name': 'message_follower_ids', 'widget': 'mail_followers'})
chatter_node.append(follower_node)
chatter_node.append(thread_node)
xpath_node.append(chatter_node) | # would first resolve in x,x2,y, then resolve "Standard" with x,z,x2,y as result.
studio_view = request.env['ir.ui.view'].create({
'type': view.type,
'model': view.model, | random_line_split |
main.py | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from lxml import etree
from StringIO import StringIO
from odoo import http, _
from odoo.http import content_disposition, request
from odoo.exceptions import UserError, AccessError
from odoo.addons.web_studio.controllers import export
class WebStudioController(http.Controller):
@http.route('/web_studio/init', type='json', auth='user')
def studio_init(self):
return {
'dbuuid': request.env['ir.config_parameter'].get_param('database.uuid'),
'multi_lang': bool(request.env['res.lang'].search_count([('code', '!=', 'en_US')])),
}
@http.route('/web_studio/chatter_allowed', type='json', auth='user')
def is_chatter_allowed(self, model):
""" Returns True iff a chatter can be activated on the model's form views, i.e. if
- it is a custom model (since we can make it inherit from mail.thread), or
- it already inherits from mail.thread.
"""
Model = request.env[model]
return Model._custom or isinstance(Model, type(request.env['mail.thread']))
@http.route('/web_studio/get_studio_action', type='json', auth='user')
def get_studio_action(self, action_name, model, view_id=None, view_type=None):
view_type = 'tree' if view_type == 'list' else view_type # list is stored as tree in db
model = request.env['ir.model'].search([('model', '=', model)], limit=1)
action = None
if hasattr(self, '_get_studio_action_' + action_name):
action = getattr(self, '_get_studio_action_' + action_name)(model, view_id=view_id, view_type=view_type)
return action
def _get_studio_action_acl(self, model, **kwargs):
return {
'name': _('Access Control Lists'),
'type': 'ir.actions.act_window',
'res_model': 'ir.model.access',
'views': [[False, 'list'], [False, 'form']],
'target': 'current',
'domain': [],
'context': {'search_default_model_id': model.id},
'help': """ <p class="oe_view_nocontent_create">
Click to add a new access control list.
</p>
""",
}
def _get_studio_action_automations(self, model, **kwargs):
return {
'name': _('Automated Actions'),
'type': 'ir.actions.act_window',
'res_model': 'base.action.rule',
'views': [[False, 'list'], [False, 'form']],
'target': 'current',
'domain': [],
'context': {'search_default_model_id': model.id},
'help': """ <p class="oe_view_nocontent_create">
Click to add a new automated action.
</p>
""",
}
def _get_studio_action_filters(self, model, **kwargs):
return {
'name': _('Search Filters'),
'type': 'ir.actions.act_window',
'res_model': 'ir.filters',
'views': [[False, 'list'], [False, 'form']],
'target': 'current',
'domain': [],
'context': {'search_default_model_id': model.model}, # model_id is a Selection on ir.filters
'help': """ <p class="oe_view_nocontent_create">
Click to add a new filter.
</p>
""",
}
def _get_studio_action_reports(self, model, **kwargs):
return {
'name': _('Reports'),
'type': 'ir.actions.act_window',
'res_model': 'ir.actions.report.xml',
'views': [[False, 'kanban'], [False, 'form']],
'target': 'current',
'domain': [],
'context': {'search_default_model': model.model},
'help': """ <p class="oe_view_nocontent_create">
Click to add a new report.
</p>
""",
}
def _get_studio_action_translations(self, model, **kwargs):
""" Open a view for translating the field(s) of the record (model, id). """
domain = ['|', ('name', '=', model.model), ('name', 'ilike', model.model + ',')]
# search view + its inheritancies
views = request.env['ir.ui.view'].search([('model', '=', model.model)])
domain = ['|', '&', ('name', '=', 'ir.ui.view,arch_db'), ('res_id', 'in', views.ids)] + domain
def make_domain(fld, rec):
name = "%s,%s" % (fld.model_name, fld.name)
return ['&', ('res_id', '=', rec.id), ('name', '=', name)]
def insert_missing(fld, rec):
if not fld.translate:
return []
if fld.related:
try:
# traverse related fields up to their data source
while fld.related:
rec, fld = fld.traverse_related(rec)
if rec:
return ['|'] + domain + make_domain(fld, rec)
except AccessError:
return []
assert fld.translate and rec._name == fld.model_name
request.env['ir.translation'].insert_missing(fld, rec)
return []
# insert missing translations of views
for view in views:
for name, fld in view._fields.items():
domain += insert_missing(fld, view)
# insert missing translations of model, and extend domain for related fields
record = request.env[model.model].search([], limit=1)
if record:
for name, fld in record._fields.items():
domain += insert_missing(fld, record)
action = {
'name': _('Translate view'),
'type': 'ir.actions.act_window',
'res_model': 'ir.translation',
'view_mode': 'tree',
'views': [[request.env.ref('base.view_translation_dialog_tree').id, 'list']],
'target': 'current',
'domain': domain,
}
return action
@http.route('/web_studio/create_new_menu', type='json', auth='user')
def create_new_menu(self, name, model_id, is_app=False, parent_id=None, icon=None):
""" Create a new menu @name, linked to a new action associated to the model_id
@param is_app: if True, create an extra menu (app, without parent)
@param parent_id: the parent of the new menu.
To be set if is_app is False.
@param icon: the icon of the new app, like [icon, icon_color, background_color].
To be set if is_app is True.
"""
# create the action
model = request.env['ir.model'].browse(model_id)
new_action = request.env['ir.actions.act_window'].create({
'name': name,
'res_model': model.model,
'help': """
<p>
This is your new action ; by default, it contains a list view and a form view.
</p>
<p>
You can start customizing these screens by clicking on the Studio icon on the
top right corner (you can also customize this help message there).
</p>
""",
})
action_ref = 'ir.actions.act_window,' + str(new_action.id)
if is_app:
# create the menus (app menu + first submenu)
new_context = dict(request.context)
new_context.update({'ir.ui.menu.full_list': True}) # allows to create a menu without action
new_menu = request.env['ir.ui.menu'].with_context(new_context).create({
'name': name,
'web_icon': ','.join(icon),
'child_id': [(0, 0, {
'name': name,
'action': action_ref,
})]
})
else:
# create the submenu
new_menu = request.env['ir.ui.menu'].create({
'name': name,
'action': action_ref,
'parent_id': parent_id,
})
return {
'menu_id': new_menu.id,
'action_id': new_action.id,
}
@http.route('/web_studio/edit_action', type='json', auth='user')
def edit_action(self, action_type, action_id, args):
action_id = request.env[action_type].browse(action_id)
if action_id:
if 'groups_id' in args:
args['groups_id'] = [(6, 0, args['groups_id'])]
if 'view_mode' in args:
args['view_mode'] = args['view_mode'].replace('list', 'tree') # list is stored as tree in db
# Check that each views in view_mode exists or try to get default
view_ids = request.env['ir.ui.view'].search([('model', '=', action_id.res_model)])
view_types = [view_id.type for view_id in view_ids]
for view_type in args['view_mode'].split(','):
if view_type not in view_types:
try:
request.env[action_id.res_model].fields_view_get(view_type=view_type)
except UserError as e:
return e.name
# As view_ids has precedence on view_mode, we need to use them and resequence them
view_modes = args['view_mode'].split(',')
if action_id.view_ids:
missing_view_modes = [x for x in view_modes if x not in [y.view_mode for y in action_id.view_ids]]
for view_mode in missing_view_modes:
request.env['ir.actions.act_window.view'].create({'view_mode': view_mode, 'act_window_id': action_id.id})
for view_id in action_id.view_ids:
if view_id.view_mode in view_modes:
view_id.sequence = view_modes.index(view_id.view_mode)
view_xml_id = request.env['ir.model.data'].search([('model', '=', 'ir.actions.act_window.view'), ('res_id', '=', view_id.id)])
else:
view_id.unlink()
action_id.write(args)
return True
@http.route('/web_studio/set_another_view', type='json', auth='user')
def set_another_view(self, action_id, view_mode, view_id):
action_id = request.env['ir.actions.act_window'].browse(action_id)
window_view = request.env['ir.actions.act_window.view'].search([('view_mode', '=', view_mode), ('act_window_id', '=', action_id.id)])
if not window_view:
window_view = request.env['ir.actions.act_window.view'].create({'view_mode': view_mode, 'act_window_id': action_id.id})
window_view.view_id = view_id
return True
def _get_studio_view(self, view):
return request.env['ir.ui.view'].search([('inherit_id', '=', view.id), ('name', 'ilike', '%studio%customization%')], limit=1)
@http.route('/web_studio/get_studio_view_arch', type='json', auth='user')
def get_studio_view_arch(self, model, view_type, view_id=False):
view_type = 'tree' if view_type == 'list' else view_type # list is stored as tree in db
if not view_id:
# TOFIX: it's possibly not the used view ; see fields_get_view
# try to find the lowest priority matching ir.ui.view
view_id = request.env['ir.ui.view'].default_view(request.env[model]._name, view_type)
# We have to create a view with the default view if we want to customize it.
view = self._get_or_create_default_view(model, view_type, view_id)
studio_view = self._get_studio_view(view)
return {
'studio_view_id': studio_view and studio_view.id or False,
'studio_view_arch': studio_view and studio_view.arch_db or "<data/>",
}
@http.route('/web_studio/edit_view', type='json', auth='user')
def edit_view(self, view_id, studio_view_arch, operations=None):
view = request.env['ir.ui.view'].browse(view_id)
studio_view = self._get_studio_view(view)
parser = etree.XMLParser(remove_blank_text=True)
arch = etree.parse(StringIO(studio_view_arch), parser).getroot()
for op in operations:
# Call the right operation handler
if 'node' in op:
op['node'] = self._preprocess_attrs(op['node'])
getattr(self, '_operation_%s' % (op['type']))(arch, op, view.model)
# Save or create changes into studio view, identifiable by xmlid
# Example for view id 42 of model crm.lead: web-studio_crm.lead-42
# TODO: if len(arch) == 0, delete the view
new_arch = etree.tostring(arch, encoding='utf-8', pretty_print=True)
if studio_view:
studio_view.arch_db = new_arch
else:
# We have to play with priorities. Consider the following:
# View Base: <field name="x"/><field name="y"/>
# View Standard inherits Base: <field name="x" position="after"><field name="z"/></field>
# View Custo inherits Base: <field name="x" position="after"><field name="x2"/></field>
# We want x,x2,z,y, because that's what we did in studio, but the order of xpath
# resolution is sequence,name, not sequence,id. Because "Custo" < "Standard", it
# would first resolve in x,x2,y, then resolve "Standard" with x,z,x2,y as result.
studio_view = request.env['ir.ui.view'].create({
'type': view.type,
'model': view.model,
'inherit_id': view.id,
'mode': 'extension',
'priority': 99,
'arch': new_arch,
'name': "Odoo Studio: %s customization" % (view.name),
})
fields_view = request.env[view.model].with_context({'studio': True}).fields_view_get(view.id, view.type)
return fields_view
@http.route('/web_studio/edit_view_arch', type='json', auth='user')
def edit_view_arch(self, view_id, view_arch):
view = request.env['ir.ui.view'].browse(view_id)
if view:
view.write({'arch': view_arch})
if view.model:
try:
fields_view = request.env[view.model].with_context({'studio': True}).fields_view_get(view.id, view.type)
return fields_view
except Exception:
return False
@http.route('/web_studio/export', type='http', auth='user')
def export(self, token):
""" Exports a zip file containing the 'studio_customization' module
gathering all customizations done with Studio (customizations of
existing apps and freshly created apps).
"""
studio_module = request.env['ir.module.module'].get_studio_module()
data = request.env['ir.model.data'].search([('studio', '=', True)])
content = export.generate_archive(studio_module, data)
return request.make_response(content, headers=[
('Content-Disposition', content_disposition('customizations.zip')),
('Content-Type', 'application/zip'),
('Content-Length', len(content)),
], cookies={'fileToken': token})
def _preprocess_attrs(self, node):
# The js can't give us the field name, it only has the field id
if node['tag'] == 'field' and 'id' in node['attrs']:
node['attrs']['name'] = request.env['ir.model.fields'].browse(node['attrs'].pop('id')).name
return node
def _get_or_create_default_view(self, model, view_type, view_id=False):
View = request.env['ir.ui.view']
# If we have no view_id to inherit from, it's because we are adding
# fields to the default view of a new model. We will materialize the
# default view as a true view so we can keep using our xpath mechanism.
if view_id:
view = View.browse(view_id)
else:
arch = request.env[model].fields_view_get(view_id, view_type)['arch']
view = View.create({
'type': view_type,
'model': model,
'arch': arch,
'name': "Default %s view for %s" % (view_type, model),
})
return view
def _node_to_expr(self, node):
if not node.get('attrs') and node.get('xpath_info'):
# Format of expr is /form/tag1[]/tag2[]/[...]/tag[]
expr = ''.join(['/%s[%s]' % (parent['tag'], parent['indice']) for parent in node.get('xpath_info')])
else:
# Format of expr is //tag[@attr1_name=attr1_value][@attr2_name=attr2_value][...]
expr = '//' + node['tag'] + ''.join(['[@%s=\'%s\']' % (k, v) for k, v in node.get('attrs', {}).items()])
# Special case when we have <label/><div/> instead of <field>
# TODO: This is very naive, couldn't the js detect such a situation and
# tell us to anchor the xpath on another element ?
if node['tag'] == 'label':
expr = expr + '/following-sibling::div'
return expr
# If we already have an xpath on this element, use it, otherwise, create a new one.
def _get_xpath_node(self, arch, operation):
expr = self._node_to_expr(operation['target'])
position = operation['position']
xpath_node = arch.find('xpath[@expr="%s"][@position="%s"]' % (expr, position))
if xpath_node is None: # bool(node) == False if node has no children
xpath_node = etree.SubElement(arch, 'xpath', {
'expr': expr,
'position': position
})
return xpath_node
def _operation_remove(self, arch, operation, model=None):
expr = self._node_to_expr(operation['target'])
# We have to create a brand new xpath to remove this field from the view.
# TODO: Sometimes, we have to delete more stuff than just a single tag !
etree.SubElement(arch, 'xpath', {
'expr': expr,
'position': 'replace'
})
def _operation_add(self, arch, operation, model):
node = operation['node']
xpath_node = self._get_xpath_node(arch, operation)
# Create the actual node inside the xpath. It needs to be the first
# child of the xpath to respect the order in which they were added.
xml_node = etree.Element(node['tag'], node.get('attrs'))
if node['tag'] == 'notebook':
# FIXME take the same randomString as parent
name = 'studio_page_' + node['attrs']['name'].split('_')[2]
xml_node_page = etree.Element('page', {'string': 'New Page', 'name': name})
xml_node.insert(0, xml_node_page)
elif node['tag'] == 'group':
xml_node_page_right = etree.Element('group', {'string': 'Right Title', 'name': node['attrs']['name'] + '_right'})
xml_node_page_left = etree.Element('group', {'string': 'Left Title', 'name': node['attrs']['name'] + '_left'})
xml_node.insert(0, xml_node_page_right)
xml_node.insert(0, xml_node_page_left)
elif node['tag'] == 'button':
# To create a stat button, we need
# - a many2one field (1) that points to this model
# - a field (2) that counts the number of records associated with the current record
# - an action to jump in (3) with the many2one field (1) as domain/context
#
# (1) [button_field] the many2one field
# (2) [button_count_field] is a non-stored computed field (to always have the good value in the stat button, if access rights)
# (3) [button_action] an act_window action to jump in the related model
button_field = request.env['ir.model.fields'].browse(node['field'])
button_count_field, button_action = self._get_or_create_fields_for_button(model, button_field, node['string'])
# the XML looks like <button> <field/> </button : a element `field` needs to be inserted inside the button
xml_node_field = etree.Element('field', {'widget': 'statinfo', 'name': button_count_field.name, 'string': node['string'] or button_count_field.field_description})
xml_node.insert(0, xml_node_field)
xml_node.attrib['type'] = 'action'
xml_node.attrib['name'] = str(button_action.id)
else:
xml_node.text = node.get('text')
xpath_node.insert(0, xml_node)
def _get_or_create_fields_for_button(self, model, field, button_name):
""" Returns the button_count_field and the button_action link to a stat button.
@param field: a many2one field
"""
if field.ttype != 'many2one' or field.relation != model:
raise UserError(_('The related field of a button has to be a many2one to %s.' % model))
model = request.env['ir.model'].search([('model', '=', model)], limit=1)
# There is a counter on the button ; as the related field is a many2one, we need
# to create a new computed field that counts the number of records in the one2many
button_count_field_name = 'x_%s_count' % field.name
button_count_field = request.env['ir.model.fields'].search([('name', '=', button_count_field_name), ('model_id', '=', model.id)])
if not button_count_field:
compute_function = """
results = self.env['%(model)s'].read_group([('%(field)s', 'in', self.ids)], '%(field)s', '%(field)s')
dic = {}
for x in results: dic[x['%(field)s'][0]] = x['%(field)s_count']
for record in self: record['%(count_field)s'] = dic.get(record.id, 0)
""" % {
'model': field.model,
'field': field.name,
'count_field': button_count_field_name,
}
button_count_field = request.env['ir.model.fields'].create({
'name': button_count_field_name,
'field_description': '%s count' % field.field_description,
'model': model.model,
'model_id': model.id,
'ttype': 'integer',
'store': False,
'compute': compute_function.replace(' ', ''), # remove indentation for safe_eval
})
# The action could already exist but we don't want to recreate one each time
button_action_domain = "[('%s', '=', active_id)]" % (field.name)
button_action_context = "{'search_default_%s': active_id,'default_%s': active_id}" % (field.name, field.name)
button_action = request.env['ir.actions.act_window'].search([
('name', '=', button_name), ('res_model', '=', field.model),
('domain', '=', button_action_domain), ('context', '=', button_action_context),
])
if not button_action:
# Link the button with an associated act_window
button_action = request.env['ir.actions.act_window'].create({
'name': button_name,
'res_model': field.model,
'view_mode': 'tree,form',
'view_type': 'form',
'domain': button_action_domain,
'context': button_action_context,
})
return button_count_field, button_action
def _operation_move(self, arch, operation, model=None):
self._operation_remove(arch, dict(operation, target=operation['node']))
self._operation_add(arch, operation)
# Create or update node for each attribute
def _operation_attributes(self, arch, operation, model=None):
ir_model_data = request.env['ir.model.data']
new_attrs = operation['new_attrs']
if (new_attrs.get('groups')):
eval_attr = []
for many2many_value in new_attrs['groups']:
group_xmlid = ir_model_data.search([
('model', '=', 'res.groups'),
('res_id', '=', many2many_value)])
eval_attr.append(group_xmlid.complete_name)
eval_attr = ",".join(eval_attr)
new_attrs['groups'] = eval_attr
else:
# TOFIX
new_attrs['groups'] = ''
xpath_node = self._get_xpath_node(arch, operation)
for key, new_attr in new_attrs.iteritems():
xml_node = xpath_node.find('attribute[@name="%s"]' % (key))
if xml_node is None:
|
else:
xml_node.text = new_attr
def _operation_buttonbox(self, arch, operation, model=None):
studio_view_arch = arch # The actual arch is the studio view arch
# Get the arch of the form view with inherited views applied
arch = request.env[model].fields_view_get(view_type='form')['arch']
parser = etree.XMLParser(remove_blank_text=True)
arch = etree.parse(StringIO(arch), parser).getroot()
# Create xpath to put the buttonbox as the first child of the sheet
if arch.find('sheet'):
sheet_node = arch.find('sheet')
if list(sheet_node): # Check if children exists
xpath_node = etree.SubElement(studio_view_arch, 'xpath', {
'expr': '//sheet/*[1]',
'position': 'before'
})
else:
xpath_node = etree.SubElement(studio_view_arch, 'xpath', {
'expr': '//sheet',
'position': 'inside'
})
# Create and insert the buttonbox node inside the xpath node
buttonbox_node = etree.Element('div', {'name': 'button_box', 'class': 'oe_button_box'})
xpath_node.append(buttonbox_node)
def _operation_chatter(self, arch, operation, model=None):
def _get_remove_field_op(arch, field_name):
return {
'type': 'remove',
'target': {
'tag': 'field',
'attrs': {
'name': field_name,
},
}
}
if not self.is_chatter_allowed(operation['model']):
# Chatter can only be activated form models that (can) inherit from mail.thread
return
# From this point, the model is either a custom model or inherits from mail.thread
model = request.env['ir.model'].search([('model', '=', operation['model'])])
if model.state == 'manual' and not model.mail_thread:
# Activate mail.thread inheritance on the custom model
model.write({'mail_thread': True})
# Remove message_ids and message_follower_ids if already defined in form view
if operation['remove_message_ids']:
self._operation_remove(arch, _get_remove_field_op(arch, 'message_ids'))
if operation['remove_follower_ids']:
self._operation_remove(arch, _get_remove_field_op(arch, 'message_follower_ids'))
xpath_node = etree.SubElement(arch, 'xpath', {
'expr': '//sheet',
'position': 'after',
})
chatter_node = etree.Element('div', {'class': 'oe_chatter'})
thread_node = etree.Element('field', {'name': 'message_ids', 'widget': 'mail_thread'})
follower_node = etree.Element('field', {'name': 'message_follower_ids', 'widget': 'mail_followers'})
chatter_node.append(follower_node)
chatter_node.append(thread_node)
xpath_node.append(chatter_node)
| xml_node = etree.Element('attribute', {'name': key})
xml_node.text = new_attr
xpath_node.insert(0, xml_node) | conditional_block |
main.py | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from lxml import etree
from StringIO import StringIO
from odoo import http, _
from odoo.http import content_disposition, request
from odoo.exceptions import UserError, AccessError
from odoo.addons.web_studio.controllers import export
class WebStudioController(http.Controller):
@http.route('/web_studio/init', type='json', auth='user')
def studio_init(self):
return {
'dbuuid': request.env['ir.config_parameter'].get_param('database.uuid'),
'multi_lang': bool(request.env['res.lang'].search_count([('code', '!=', 'en_US')])),
}
@http.route('/web_studio/chatter_allowed', type='json', auth='user')
def is_chatter_allowed(self, model):
""" Returns True iff a chatter can be activated on the model's form views, i.e. if
- it is a custom model (since we can make it inherit from mail.thread), or
- it already inherits from mail.thread.
"""
Model = request.env[model]
return Model._custom or isinstance(Model, type(request.env['mail.thread']))
@http.route('/web_studio/get_studio_action', type='json', auth='user')
def get_studio_action(self, action_name, model, view_id=None, view_type=None):
view_type = 'tree' if view_type == 'list' else view_type # list is stored as tree in db
model = request.env['ir.model'].search([('model', '=', model)], limit=1)
action = None
if hasattr(self, '_get_studio_action_' + action_name):
action = getattr(self, '_get_studio_action_' + action_name)(model, view_id=view_id, view_type=view_type)
return action
def _get_studio_action_acl(self, model, **kwargs):
return {
'name': _('Access Control Lists'),
'type': 'ir.actions.act_window',
'res_model': 'ir.model.access',
'views': [[False, 'list'], [False, 'form']],
'target': 'current',
'domain': [],
'context': {'search_default_model_id': model.id},
'help': """ <p class="oe_view_nocontent_create">
Click to add a new access control list.
</p>
""",
}
def _get_studio_action_automations(self, model, **kwargs):
return {
'name': _('Automated Actions'),
'type': 'ir.actions.act_window',
'res_model': 'base.action.rule',
'views': [[False, 'list'], [False, 'form']],
'target': 'current',
'domain': [],
'context': {'search_default_model_id': model.id},
'help': """ <p class="oe_view_nocontent_create">
Click to add a new automated action.
</p>
""",
}
def _get_studio_action_filters(self, model, **kwargs):
return {
'name': _('Search Filters'),
'type': 'ir.actions.act_window',
'res_model': 'ir.filters',
'views': [[False, 'list'], [False, 'form']],
'target': 'current',
'domain': [],
'context': {'search_default_model_id': model.model}, # model_id is a Selection on ir.filters
'help': """ <p class="oe_view_nocontent_create">
Click to add a new filter.
</p>
""",
}
def _get_studio_action_reports(self, model, **kwargs):
return {
'name': _('Reports'),
'type': 'ir.actions.act_window',
'res_model': 'ir.actions.report.xml',
'views': [[False, 'kanban'], [False, 'form']],
'target': 'current',
'domain': [],
'context': {'search_default_model': model.model},
'help': """ <p class="oe_view_nocontent_create">
Click to add a new report.
</p>
""",
}
def _get_studio_action_translations(self, model, **kwargs):
""" Open a view for translating the field(s) of the record (model, id). """
domain = ['|', ('name', '=', model.model), ('name', 'ilike', model.model + ',')]
# search view + its inheritancies
views = request.env['ir.ui.view'].search([('model', '=', model.model)])
domain = ['|', '&', ('name', '=', 'ir.ui.view,arch_db'), ('res_id', 'in', views.ids)] + domain
def make_domain(fld, rec):
name = "%s,%s" % (fld.model_name, fld.name)
return ['&', ('res_id', '=', rec.id), ('name', '=', name)]
def insert_missing(fld, rec):
if not fld.translate:
return []
if fld.related:
try:
# traverse related fields up to their data source
while fld.related:
rec, fld = fld.traverse_related(rec)
if rec:
return ['|'] + domain + make_domain(fld, rec)
except AccessError:
return []
assert fld.translate and rec._name == fld.model_name
request.env['ir.translation'].insert_missing(fld, rec)
return []
# insert missing translations of views
for view in views:
for name, fld in view._fields.items():
domain += insert_missing(fld, view)
# insert missing translations of model, and extend domain for related fields
record = request.env[model.model].search([], limit=1)
if record:
for name, fld in record._fields.items():
domain += insert_missing(fld, record)
action = {
'name': _('Translate view'),
'type': 'ir.actions.act_window',
'res_model': 'ir.translation',
'view_mode': 'tree',
'views': [[request.env.ref('base.view_translation_dialog_tree').id, 'list']],
'target': 'current',
'domain': domain,
}
return action
@http.route('/web_studio/create_new_menu', type='json', auth='user')
def create_new_menu(self, name, model_id, is_app=False, parent_id=None, icon=None):
""" Create a new menu @name, linked to a new action associated to the model_id
@param is_app: if True, create an extra menu (app, without parent)
@param parent_id: the parent of the new menu.
To be set if is_app is False.
@param icon: the icon of the new app, like [icon, icon_color, background_color].
To be set if is_app is True.
"""
# create the action
model = request.env['ir.model'].browse(model_id)
new_action = request.env['ir.actions.act_window'].create({
'name': name,
'res_model': model.model,
'help': """
<p>
This is your new action ; by default, it contains a list view and a form view.
</p>
<p>
You can start customizing these screens by clicking on the Studio icon on the
top right corner (you can also customize this help message there).
</p>
""",
})
action_ref = 'ir.actions.act_window,' + str(new_action.id)
if is_app:
# create the menus (app menu + first submenu)
new_context = dict(request.context)
new_context.update({'ir.ui.menu.full_list': True}) # allows to create a menu without action
new_menu = request.env['ir.ui.menu'].with_context(new_context).create({
'name': name,
'web_icon': ','.join(icon),
'child_id': [(0, 0, {
'name': name,
'action': action_ref,
})]
})
else:
# create the submenu
new_menu = request.env['ir.ui.menu'].create({
'name': name,
'action': action_ref,
'parent_id': parent_id,
})
return {
'menu_id': new_menu.id,
'action_id': new_action.id,
}
@http.route('/web_studio/edit_action', type='json', auth='user')
def edit_action(self, action_type, action_id, args):
action_id = request.env[action_type].browse(action_id)
if action_id:
if 'groups_id' in args:
args['groups_id'] = [(6, 0, args['groups_id'])]
if 'view_mode' in args:
args['view_mode'] = args['view_mode'].replace('list', 'tree') # list is stored as tree in db
# Check that each views in view_mode exists or try to get default
view_ids = request.env['ir.ui.view'].search([('model', '=', action_id.res_model)])
view_types = [view_id.type for view_id in view_ids]
for view_type in args['view_mode'].split(','):
if view_type not in view_types:
try:
request.env[action_id.res_model].fields_view_get(view_type=view_type)
except UserError as e:
return e.name
# As view_ids has precedence on view_mode, we need to use them and resequence them
view_modes = args['view_mode'].split(',')
if action_id.view_ids:
missing_view_modes = [x for x in view_modes if x not in [y.view_mode for y in action_id.view_ids]]
for view_mode in missing_view_modes:
request.env['ir.actions.act_window.view'].create({'view_mode': view_mode, 'act_window_id': action_id.id})
for view_id in action_id.view_ids:
if view_id.view_mode in view_modes:
view_id.sequence = view_modes.index(view_id.view_mode)
view_xml_id = request.env['ir.model.data'].search([('model', '=', 'ir.actions.act_window.view'), ('res_id', '=', view_id.id)])
else:
view_id.unlink()
action_id.write(args)
return True
@http.route('/web_studio/set_another_view', type='json', auth='user')
def set_another_view(self, action_id, view_mode, view_id):
action_id = request.env['ir.actions.act_window'].browse(action_id)
window_view = request.env['ir.actions.act_window.view'].search([('view_mode', '=', view_mode), ('act_window_id', '=', action_id.id)])
if not window_view:
window_view = request.env['ir.actions.act_window.view'].create({'view_mode': view_mode, 'act_window_id': action_id.id})
window_view.view_id = view_id
return True
def _get_studio_view(self, view):
return request.env['ir.ui.view'].search([('inherit_id', '=', view.id), ('name', 'ilike', '%studio%customization%')], limit=1)
@http.route('/web_studio/get_studio_view_arch', type='json', auth='user')
def get_studio_view_arch(self, model, view_type, view_id=False):
view_type = 'tree' if view_type == 'list' else view_type # list is stored as tree in db
if not view_id:
# TOFIX: it's possibly not the used view ; see fields_get_view
# try to find the lowest priority matching ir.ui.view
view_id = request.env['ir.ui.view'].default_view(request.env[model]._name, view_type)
# We have to create a view with the default view if we want to customize it.
view = self._get_or_create_default_view(model, view_type, view_id)
studio_view = self._get_studio_view(view)
return {
'studio_view_id': studio_view and studio_view.id or False,
'studio_view_arch': studio_view and studio_view.arch_db or "<data/>",
}
@http.route('/web_studio/edit_view', type='json', auth='user')
def edit_view(self, view_id, studio_view_arch, operations=None):
|
@http.route('/web_studio/edit_view_arch', type='json', auth='user')
def edit_view_arch(self, view_id, view_arch):
view = request.env['ir.ui.view'].browse(view_id)
if view:
view.write({'arch': view_arch})
if view.model:
try:
fields_view = request.env[view.model].with_context({'studio': True}).fields_view_get(view.id, view.type)
return fields_view
except Exception:
return False
@http.route('/web_studio/export', type='http', auth='user')
def export(self, token):
""" Exports a zip file containing the 'studio_customization' module
gathering all customizations done with Studio (customizations of
existing apps and freshly created apps).
"""
studio_module = request.env['ir.module.module'].get_studio_module()
data = request.env['ir.model.data'].search([('studio', '=', True)])
content = export.generate_archive(studio_module, data)
return request.make_response(content, headers=[
('Content-Disposition', content_disposition('customizations.zip')),
('Content-Type', 'application/zip'),
('Content-Length', len(content)),
], cookies={'fileToken': token})
def _preprocess_attrs(self, node):
# The js can't give us the field name, it only has the field id
if node['tag'] == 'field' and 'id' in node['attrs']:
node['attrs']['name'] = request.env['ir.model.fields'].browse(node['attrs'].pop('id')).name
return node
def _get_or_create_default_view(self, model, view_type, view_id=False):
View = request.env['ir.ui.view']
# If we have no view_id to inherit from, it's because we are adding
# fields to the default view of a new model. We will materialize the
# default view as a true view so we can keep using our xpath mechanism.
if view_id:
view = View.browse(view_id)
else:
arch = request.env[model].fields_view_get(view_id, view_type)['arch']
view = View.create({
'type': view_type,
'model': model,
'arch': arch,
'name': "Default %s view for %s" % (view_type, model),
})
return view
def _node_to_expr(self, node):
if not node.get('attrs') and node.get('xpath_info'):
# Format of expr is /form/tag1[]/tag2[]/[...]/tag[]
expr = ''.join(['/%s[%s]' % (parent['tag'], parent['indice']) for parent in node.get('xpath_info')])
else:
# Format of expr is //tag[@attr1_name=attr1_value][@attr2_name=attr2_value][...]
expr = '//' + node['tag'] + ''.join(['[@%s=\'%s\']' % (k, v) for k, v in node.get('attrs', {}).items()])
# Special case when we have <label/><div/> instead of <field>
# TODO: This is very naive, couldn't the js detect such a situation and
# tell us to anchor the xpath on another element ?
if node['tag'] == 'label':
expr = expr + '/following-sibling::div'
return expr
# If we already have an xpath on this element, use it, otherwise, create a new one.
def _get_xpath_node(self, arch, operation):
expr = self._node_to_expr(operation['target'])
position = operation['position']
xpath_node = arch.find('xpath[@expr="%s"][@position="%s"]' % (expr, position))
if xpath_node is None: # bool(node) == False if node has no children
xpath_node = etree.SubElement(arch, 'xpath', {
'expr': expr,
'position': position
})
return xpath_node
def _operation_remove(self, arch, operation, model=None):
expr = self._node_to_expr(operation['target'])
# We have to create a brand new xpath to remove this field from the view.
# TODO: Sometimes, we have to delete more stuff than just a single tag !
etree.SubElement(arch, 'xpath', {
'expr': expr,
'position': 'replace'
})
def _operation_add(self, arch, operation, model):
node = operation['node']
xpath_node = self._get_xpath_node(arch, operation)
# Create the actual node inside the xpath. It needs to be the first
# child of the xpath to respect the order in which they were added.
xml_node = etree.Element(node['tag'], node.get('attrs'))
if node['tag'] == 'notebook':
# FIXME take the same randomString as parent
name = 'studio_page_' + node['attrs']['name'].split('_')[2]
xml_node_page = etree.Element('page', {'string': 'New Page', 'name': name})
xml_node.insert(0, xml_node_page)
elif node['tag'] == 'group':
xml_node_page_right = etree.Element('group', {'string': 'Right Title', 'name': node['attrs']['name'] + '_right'})
xml_node_page_left = etree.Element('group', {'string': 'Left Title', 'name': node['attrs']['name'] + '_left'})
xml_node.insert(0, xml_node_page_right)
xml_node.insert(0, xml_node_page_left)
elif node['tag'] == 'button':
# To create a stat button, we need
# - a many2one field (1) that points to this model
# - a field (2) that counts the number of records associated with the current record
# - an action to jump in (3) with the many2one field (1) as domain/context
#
# (1) [button_field] the many2one field
# (2) [button_count_field] is a non-stored computed field (to always have the good value in the stat button, if access rights)
# (3) [button_action] an act_window action to jump in the related model
button_field = request.env['ir.model.fields'].browse(node['field'])
button_count_field, button_action = self._get_or_create_fields_for_button(model, button_field, node['string'])
# the XML looks like <button> <field/> </button : a element `field` needs to be inserted inside the button
xml_node_field = etree.Element('field', {'widget': 'statinfo', 'name': button_count_field.name, 'string': node['string'] or button_count_field.field_description})
xml_node.insert(0, xml_node_field)
xml_node.attrib['type'] = 'action'
xml_node.attrib['name'] = str(button_action.id)
else:
xml_node.text = node.get('text')
xpath_node.insert(0, xml_node)
def _get_or_create_fields_for_button(self, model, field, button_name):
""" Returns the button_count_field and the button_action link to a stat button.
@param field: a many2one field
"""
if field.ttype != 'many2one' or field.relation != model:
raise UserError(_('The related field of a button has to be a many2one to %s.' % model))
model = request.env['ir.model'].search([('model', '=', model)], limit=1)
# There is a counter on the button ; as the related field is a many2one, we need
# to create a new computed field that counts the number of records in the one2many
button_count_field_name = 'x_%s_count' % field.name
button_count_field = request.env['ir.model.fields'].search([('name', '=', button_count_field_name), ('model_id', '=', model.id)])
if not button_count_field:
compute_function = """
results = self.env['%(model)s'].read_group([('%(field)s', 'in', self.ids)], '%(field)s', '%(field)s')
dic = {}
for x in results: dic[x['%(field)s'][0]] = x['%(field)s_count']
for record in self: record['%(count_field)s'] = dic.get(record.id, 0)
""" % {
'model': field.model,
'field': field.name,
'count_field': button_count_field_name,
}
button_count_field = request.env['ir.model.fields'].create({
'name': button_count_field_name,
'field_description': '%s count' % field.field_description,
'model': model.model,
'model_id': model.id,
'ttype': 'integer',
'store': False,
'compute': compute_function.replace(' ', ''), # remove indentation for safe_eval
})
# The action could already exist but we don't want to recreate one each time
button_action_domain = "[('%s', '=', active_id)]" % (field.name)
button_action_context = "{'search_default_%s': active_id,'default_%s': active_id}" % (field.name, field.name)
button_action = request.env['ir.actions.act_window'].search([
('name', '=', button_name), ('res_model', '=', field.model),
('domain', '=', button_action_domain), ('context', '=', button_action_context),
])
if not button_action:
# Link the button with an associated act_window
button_action = request.env['ir.actions.act_window'].create({
'name': button_name,
'res_model': field.model,
'view_mode': 'tree,form',
'view_type': 'form',
'domain': button_action_domain,
'context': button_action_context,
})
return button_count_field, button_action
def _operation_move(self, arch, operation, model=None):
self._operation_remove(arch, dict(operation, target=operation['node']))
self._operation_add(arch, operation)
# Create or update node for each attribute
def _operation_attributes(self, arch, operation, model=None):
ir_model_data = request.env['ir.model.data']
new_attrs = operation['new_attrs']
if (new_attrs.get('groups')):
eval_attr = []
for many2many_value in new_attrs['groups']:
group_xmlid = ir_model_data.search([
('model', '=', 'res.groups'),
('res_id', '=', many2many_value)])
eval_attr.append(group_xmlid.complete_name)
eval_attr = ",".join(eval_attr)
new_attrs['groups'] = eval_attr
else:
# TOFIX
new_attrs['groups'] = ''
xpath_node = self._get_xpath_node(arch, operation)
for key, new_attr in new_attrs.iteritems():
xml_node = xpath_node.find('attribute[@name="%s"]' % (key))
if xml_node is None:
xml_node = etree.Element('attribute', {'name': key})
xml_node.text = new_attr
xpath_node.insert(0, xml_node)
else:
xml_node.text = new_attr
def _operation_buttonbox(self, arch, operation, model=None):
studio_view_arch = arch # The actual arch is the studio view arch
# Get the arch of the form view with inherited views applied
arch = request.env[model].fields_view_get(view_type='form')['arch']
parser = etree.XMLParser(remove_blank_text=True)
arch = etree.parse(StringIO(arch), parser).getroot()
# Create xpath to put the buttonbox as the first child of the sheet
if arch.find('sheet'):
sheet_node = arch.find('sheet')
if list(sheet_node): # Check if children exists
xpath_node = etree.SubElement(studio_view_arch, 'xpath', {
'expr': '//sheet/*[1]',
'position': 'before'
})
else:
xpath_node = etree.SubElement(studio_view_arch, 'xpath', {
'expr': '//sheet',
'position': 'inside'
})
# Create and insert the buttonbox node inside the xpath node
buttonbox_node = etree.Element('div', {'name': 'button_box', 'class': 'oe_button_box'})
xpath_node.append(buttonbox_node)
def _operation_chatter(self, arch, operation, model=None):
def _get_remove_field_op(arch, field_name):
return {
'type': 'remove',
'target': {
'tag': 'field',
'attrs': {
'name': field_name,
},
}
}
if not self.is_chatter_allowed(operation['model']):
# Chatter can only be activated form models that (can) inherit from mail.thread
return
# From this point, the model is either a custom model or inherits from mail.thread
model = request.env['ir.model'].search([('model', '=', operation['model'])])
if model.state == 'manual' and not model.mail_thread:
# Activate mail.thread inheritance on the custom model
model.write({'mail_thread': True})
# Remove message_ids and message_follower_ids if already defined in form view
if operation['remove_message_ids']:
self._operation_remove(arch, _get_remove_field_op(arch, 'message_ids'))
if operation['remove_follower_ids']:
self._operation_remove(arch, _get_remove_field_op(arch, 'message_follower_ids'))
xpath_node = etree.SubElement(arch, 'xpath', {
'expr': '//sheet',
'position': 'after',
})
chatter_node = etree.Element('div', {'class': 'oe_chatter'})
thread_node = etree.Element('field', {'name': 'message_ids', 'widget': 'mail_thread'})
follower_node = etree.Element('field', {'name': 'message_follower_ids', 'widget': 'mail_followers'})
chatter_node.append(follower_node)
chatter_node.append(thread_node)
xpath_node.append(chatter_node)
| view = request.env['ir.ui.view'].browse(view_id)
studio_view = self._get_studio_view(view)
parser = etree.XMLParser(remove_blank_text=True)
arch = etree.parse(StringIO(studio_view_arch), parser).getroot()
for op in operations:
# Call the right operation handler
if 'node' in op:
op['node'] = self._preprocess_attrs(op['node'])
getattr(self, '_operation_%s' % (op['type']))(arch, op, view.model)
# Save or create changes into studio view, identifiable by xmlid
# Example for view id 42 of model crm.lead: web-studio_crm.lead-42
# TODO: if len(arch) == 0, delete the view
new_arch = etree.tostring(arch, encoding='utf-8', pretty_print=True)
if studio_view:
studio_view.arch_db = new_arch
else:
# We have to play with priorities. Consider the following:
# View Base: <field name="x"/><field name="y"/>
# View Standard inherits Base: <field name="x" position="after"><field name="z"/></field>
# View Custo inherits Base: <field name="x" position="after"><field name="x2"/></field>
# We want x,x2,z,y, because that's what we did in studio, but the order of xpath
# resolution is sequence,name, not sequence,id. Because "Custo" < "Standard", it
# would first resolve in x,x2,y, then resolve "Standard" with x,z,x2,y as result.
studio_view = request.env['ir.ui.view'].create({
'type': view.type,
'model': view.model,
'inherit_id': view.id,
'mode': 'extension',
'priority': 99,
'arch': new_arch,
'name': "Odoo Studio: %s customization" % (view.name),
})
fields_view = request.env[view.model].with_context({'studio': True}).fields_view_get(view.id, view.type)
return fields_view | identifier_body |
mod.rs | //! Tiles organised into chunks for efficiency and performance.
//!
//! Mostly everything in this module is private API and not intended to be used
//! outside of this crate as a lot goes on under the hood that can cause issues.
//! With that being said, everything that can be used with helping a chunk get
//! created does live in here.
//!
//! These below examples have nothing to do with this library as all should be
//! done through the [`Tilemap`]. These are just more specific examples which
//! use the private API of this library.
//!
//! [`Tilemap`]: crate::tilemap::Tilemap
//!
//! # Simple chunk creation
//! ```
//! use bevy_asset::{prelude::*, HandleId};
//! use bevy_sprite::prelude::*;
//! use bevy_tilemap::prelude::*;
//!
//! // This must be set in Asset<TextureAtlas>.
//! let texture_atlas_handle = Handle::weak(HandleId::random::<TextureAtlas>());
//!
//! let mut tilemap = Tilemap::new(texture_atlas_handle, 32, 32);
//!
//! // There are two ways to create a new chunk. Either directly...
//!
//! tilemap.insert_chunk((0, 0));
//!
//! // Or indirectly...
//!
//! let point = (0, 0);
//! let sprite_index = 0;
//! let tile = Tile { point, sprite_index, ..Default::default() };
//! tilemap.insert_tile(tile);
//!
//! ```
//!
//! # Specifying what kind of chunk
//! ```
//! use bevy_asset::{prelude::*, HandleId};
//! use bevy_sprite::prelude::*;
//! use bevy_tilemap::prelude::*;
//!
//! // This must be set in Asset<TextureAtlas>.
//! let texture_atlas_handle = Handle::weak(HandleId::random::<TextureAtlas>());
//!
//! let mut tilemap = Tilemap::new(texture_atlas_handle, 32, 32);
//!
//! tilemap.insert_chunk((0, 0));
//!
//! let z_order = 0;
//! tilemap.add_layer(TilemapLayer { kind: LayerKind::Dense, ..Default::default() }, 1);
//!
//! let z_order = 1;
//! tilemap.add_layer(TilemapLayer { kind: LayerKind::Dense, ..Default::default() }, 1);
//! ```
/// Chunk entity.
pub(crate) mod entity;
/// Sparse and dense chunk layers.
mod layer;
/// Meshes for rendering to vertices.
pub(crate) mod mesh;
/// Raw tile that is stored in the chunks.
pub mod raw_tile;
/// Files and helpers for rendering.
pub(crate) mod render;
/// Systems for chunks.
pub(crate) mod system;
use crate::{lib::*, tile::Tile};
pub use layer::LayerKind;
use layer::{DenseLayer, LayerKindInner, SparseLayer, SpriteLayer};
pub use raw_tile::RawTile;
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Clone, PartialEq, Debug)]
#[doc(hidden)]
pub(crate) struct Chunk {
/// The point coordinate of the chunk.
point: Point2,
/// The sprite layers of the chunk.
sprite_layers: Vec<Option<SpriteLayer>>,
/// Ephemeral user data that can be used for flags or other purposes.
user_data: u128,
/// Contains a map of all collision entities.
#[cfg(feature = "bevy_rapier2d")]
pub collision_entities: HashMap<usize, Entity>,
}
impl Chunk {
/// A newly constructed chunk from a point and the maximum number of layers.
pub(crate) fn new(
point: Point2,
layers: &[Option<LayerKind>],
dimensions: Dimension2,
) -> Chunk {
let mut chunk = Chunk {
point,
sprite_layers: vec![None; layers.len()],
user_data: 0,
#[cfg(feature = "bevy_rapier2d")]
collision_entities: HashMap::default(),
};
for (z_order, kind) in layers.iter().enumerate() {
if let Some(kind) = kind {
chunk.add_layer(kind, z_order, dimensions)
}
}
chunk
}
/// Adds a layer from a layer kind, the z layer, and dimensions of the
/// chunk.
pub(crate) fn add_layer(&mut self, kind: &LayerKind, z_order: usize, dimensions: Dimension2) {
match kind {
LayerKind::Dense => {
let tiles = vec![
RawTile {
index: 0,
color: Color::rgba(0.0, 0.0, 0.0, 0.0)
};
dimensions.area() as usize
];
if let Some(layer) = self.sprite_layers.get_mut(z_order) {
*layer = Some(SpriteLayer {
inner: LayerKindInner::Dense(DenseLayer::new(tiles)),
entity: None,
});
} else {
error!("sprite layer {} is out of bounds", z_order);
}
}
LayerKind::Sparse => {
if let Some(layer) = self.sprite_layers.get_mut(z_order) {
*layer = Some(SpriteLayer {
inner: LayerKindInner::Sparse(SparseLayer::new(HashMap::default())),
entity: None,
});
} else {
error!("sprite layer {} is out of bounds", z_order);
}
}
}
}
/// Returns the point of the location of the chunk.
pub(crate) fn point(&self) -> Point2 {
self.point
}
// /// Returns a copy of the user data.
// pub(crate) fn user_data(&self) -> u128 {
// self.user_data
// }
//
// /// Returns a mutable reference to the user data.
// pub(crate) fn user_data_mut(&mut self) -> &mut u128 {
// &mut self.user_data
// }
/// Moves a layer from a z layer to another.
pub(crate) fn move_layer(&mut self, from_z: usize, to_z: usize) {
// TODO: rename to swap and include it in the greater api
if self.sprite_layers.get(to_z).is_some() {
error!(
"sprite layer {} unexpectedly exists and can not be moved",
to_z
);
return;
}
self.sprite_layers.swap(from_z, to_z);
}
/// Removes a layer from the specified layer.
pub(crate) fn remove_layer(&mut self, z_order: usize) {
self.sprite_layers.get_mut(z_order).take();
}
/// Sets the mesh for the chunk layer to use.
pub(crate) fn set_mesh(&mut self, z_order: usize, mesh: Handle<Mesh>) {
if let Some(layer) = self.sprite_layers.get_mut(z_order) {
if let Some(layer) = layer.as_mut() {
layer.inner.as_mut().set_mesh(mesh)
} else {
error!("can not set mesh to sprite layer {}", z_order);
}
} else {
error!("sprite layer {} does not exist", z_order);
}
}
/// Sets a single raw tile to be added to a z layer and index.
pub(crate) fn set_tile<P: Into<Point2>>(&mut self, index: usize, tile: Tile<P>) {
if let Some(layer) = self.sprite_layers.get_mut(tile.z_order) {
if let Some(layer) = layer.as_mut() | else {
error!("can not set tile to sprite layer {}", tile.z_order);
}
} else {
error!("sprite layer {} does not exist", tile.z_order);
}
}
/// Removes a tile from a sprite layer with a given index and z order.
pub(crate) fn remove_tile(&mut self, index: usize, z_order: usize) {
if let Some(layer) = self.sprite_layers.get_mut(z_order) {
if let Some(layer) = layer.as_mut() {
layer.inner.as_mut().remove_tile(index);
} else {
error!("can not remove tile on sprite layer {}", z_order);
}
} else {
error!("sprite layer {} does not exist", z_order);
}
}
/// Adds an entity to a z layer, always when it is spawned.
pub(crate) fn add_entity(&mut self, z_order: usize, entity: Entity) {
if let Some(layer) = self.sprite_layers.get_mut(z_order) {
if let Some(layer) = layer.as_mut() {
layer.entity = Some(entity);
} else {
error!("can not add entity to sprite layer {}", z_order);
}
} else {
error!("sprite layer {} does not exist", z_order);
}
}
/// Adds an entity to a tile index in a layer.
#[cfg(feature = "bevy_rapier2d")]
pub(crate) fn insert_collision_entity(
&mut self,
index: usize,
entity: Entity,
) -> Option<Entity> {
self.collision_entities.insert(index, entity)
}
/// Gets the layers entity, if any. Useful for despawning.
pub(crate) fn get_entity(&self, z_order: usize) -> Option<Entity> {
self.sprite_layers
.get(z_order)
.and_then(|o| o.as_ref().and_then(|layer| layer.entity))
}
/// Gets the collision entity if any.
#[cfg(feature = "bevy_rapier2d")]
pub(crate) fn get_collision_entity(&self, index: usize) -> Option<Entity> {
self.collision_entities.get(&index).cloned()
}
/// Remove all the layers and collision entities and return them for use with bulk despawning.
pub(crate) fn remove_entities(&mut self) -> Vec<Entity> {
let mut entities = Vec::new();
for sprite_layer in &mut self.sprite_layers {
if let Some(layer) = sprite_layer {
if let Some(entity) = layer.entity.take() {
entities.push(entity);
}
}
}
#[cfg(feature = "bevy_rapier2d")]
for (_, entity) in self.collision_entities.drain() {
entities.push(entity)
}
entities
}
/// Gets a reference to a tile from a provided z order and index.
pub(crate) fn get_tile(&self, z_order: usize, index: usize) -> Option<&RawTile> {
self.sprite_layers.get(z_order).and_then(|layer| {
layer
.as_ref()
.and_then(|layer| layer.inner.as_ref().get_tile(index))
})
}
/// Gets a mutable reference to a tile from a provided z order and index.
pub(crate) fn get_tile_mut(&mut self, z_order: usize, index: usize) -> Option<&mut RawTile> {
self.sprite_layers.get_mut(z_order).and_then(|layer| {
layer
.as_mut()
.and_then(|layer| layer.inner.as_mut().get_tile_mut(index))
})
}
/// Gets a vec of all the tiles in the layer, if any.
#[cfg(feature = "bevy_rapier2d")]
pub(crate) fn get_tile_indices(&self, z_order: usize) -> Option<Vec<usize>> {
self.sprite_layers.get(z_order).and_then(|layer| {
layer
.as_ref()
.map(|layer| layer.inner.as_ref().get_tile_indices())
})
}
/// At the given z layer, changes the tiles into attributes for use with
/// the renderer using the given dimensions.
///
/// Easier to pass in the dimensions opposed to storing it everywhere.
pub(crate) fn tiles_to_renderer_parts(
&self,
z: usize,
dimensions: Dimension2,
) -> Option<(Vec<f32>, Vec<[f32; 4]>)> {
let area = dimensions.area() as usize;
self.sprite_layers.get(z).and_then(|o| {
o.as_ref()
.map(|layer| layer.inner.as_ref().tiles_to_attributes(area))
})
}
}
| {
let raw_tile = RawTile {
index: tile.sprite_index,
color: tile.tint,
};
layer.inner.as_mut().set_tile(index, raw_tile);
} | conditional_block |
mod.rs | //! Tiles organised into chunks for efficiency and performance.
//!
//! Mostly everything in this module is private API and not intended to be used
//! outside of this crate as a lot goes on under the hood that can cause issues.
//! With that being said, everything that can be used with helping a chunk get
//! created does live in here.
//!
//! These below examples have nothing to do with this library as all should be
//! done through the [`Tilemap`]. These are just more specific examples which
//! use the private API of this library.
//!
//! [`Tilemap`]: crate::tilemap::Tilemap
//!
//! # Simple chunk creation
//! ```
//! use bevy_asset::{prelude::*, HandleId};
//! use bevy_sprite::prelude::*;
//! use bevy_tilemap::prelude::*;
//!
//! // This must be set in Asset<TextureAtlas>.
//! let texture_atlas_handle = Handle::weak(HandleId::random::<TextureAtlas>());
//!
//! let mut tilemap = Tilemap::new(texture_atlas_handle, 32, 32);
//!
//! // There are two ways to create a new chunk. Either directly...
//!
//! tilemap.insert_chunk((0, 0));
//!
//! // Or indirectly...
//!
//! let point = (0, 0);
//! let sprite_index = 0;
//! let tile = Tile { point, sprite_index, ..Default::default() };
//! tilemap.insert_tile(tile);
//!
//! ```
//!
//! # Specifying what kind of chunk
//! ```
//! use bevy_asset::{prelude::*, HandleId};
//! use bevy_sprite::prelude::*;
//! use bevy_tilemap::prelude::*;
//!
//! // This must be set in Asset<TextureAtlas>.
//! let texture_atlas_handle = Handle::weak(HandleId::random::<TextureAtlas>());
//!
//! let mut tilemap = Tilemap::new(texture_atlas_handle, 32, 32);
//!
//! tilemap.insert_chunk((0, 0));
//!
//! let z_order = 0;
//! tilemap.add_layer(TilemapLayer { kind: LayerKind::Dense, ..Default::default() }, 1);
//!
//! let z_order = 1;
//! tilemap.add_layer(TilemapLayer { kind: LayerKind::Dense, ..Default::default() }, 1);
//! ```
/// Chunk entity.
pub(crate) mod entity;
/// Sparse and dense chunk layers.
mod layer;
/// Meshes for rendering to vertices.
pub(crate) mod mesh;
/// Raw tile that is stored in the chunks.
pub mod raw_tile;
/// Files and helpers for rendering.
pub(crate) mod render;
/// Systems for chunks.
pub(crate) mod system;
use crate::{lib::*, tile::Tile};
pub use layer::LayerKind;
use layer::{DenseLayer, LayerKindInner, SparseLayer, SpriteLayer};
pub use raw_tile::RawTile;
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Clone, PartialEq, Debug)]
#[doc(hidden)]
pub(crate) struct Chunk {
/// The point coordinate of the chunk.
point: Point2,
/// The sprite layers of the chunk.
sprite_layers: Vec<Option<SpriteLayer>>,
/// Ephemeral user data that can be used for flags or other purposes.
user_data: u128,
/// Contains a map of all collision entities.
#[cfg(feature = "bevy_rapier2d")]
pub collision_entities: HashMap<usize, Entity>,
}
impl Chunk {
/// A newly constructed chunk from a point and the maximum number of layers.
pub(crate) fn new(
point: Point2,
layers: &[Option<LayerKind>],
dimensions: Dimension2,
) -> Chunk {
let mut chunk = Chunk {
point,
sprite_layers: vec![None; layers.len()],
user_data: 0,
#[cfg(feature = "bevy_rapier2d")]
collision_entities: HashMap::default(),
};
for (z_order, kind) in layers.iter().enumerate() {
if let Some(kind) = kind {
chunk.add_layer(kind, z_order, dimensions)
}
}
chunk
}
/// Adds a layer from a layer kind, the z layer, and dimensions of the
/// chunk.
pub(crate) fn add_layer(&mut self, kind: &LayerKind, z_order: usize, dimensions: Dimension2) {
match kind {
LayerKind::Dense => {
let tiles = vec![
RawTile {
index: 0,
color: Color::rgba(0.0, 0.0, 0.0, 0.0)
};
dimensions.area() as usize
];
if let Some(layer) = self.sprite_layers.get_mut(z_order) {
*layer = Some(SpriteLayer {
inner: LayerKindInner::Dense(DenseLayer::new(tiles)),
entity: None,
});
} else {
error!("sprite layer {} is out of bounds", z_order);
}
}
LayerKind::Sparse => {
if let Some(layer) = self.sprite_layers.get_mut(z_order) {
*layer = Some(SpriteLayer {
inner: LayerKindInner::Sparse(SparseLayer::new(HashMap::default())),
entity: None,
});
} else {
error!("sprite layer {} is out of bounds", z_order);
}
}
}
}
/// Returns the point of the location of the chunk.
pub(crate) fn point(&self) -> Point2 {
self.point
}
// /// Returns a copy of the user data.
// pub(crate) fn user_data(&self) -> u128 {
// self.user_data
// }
//
// /// Returns a mutable reference to the user data.
// pub(crate) fn user_data_mut(&mut self) -> &mut u128 {
// &mut self.user_data
// }
/// Moves a layer from a z layer to another.
pub(crate) fn move_layer(&mut self, from_z: usize, to_z: usize) |
/// Removes a layer from the specified layer.
pub(crate) fn remove_layer(&mut self, z_order: usize) {
self.sprite_layers.get_mut(z_order).take();
}
/// Sets the mesh for the chunk layer to use.
pub(crate) fn set_mesh(&mut self, z_order: usize, mesh: Handle<Mesh>) {
if let Some(layer) = self.sprite_layers.get_mut(z_order) {
if let Some(layer) = layer.as_mut() {
layer.inner.as_mut().set_mesh(mesh)
} else {
error!("can not set mesh to sprite layer {}", z_order);
}
} else {
error!("sprite layer {} does not exist", z_order);
}
}
/// Sets a single raw tile to be added to a z layer and index.
pub(crate) fn set_tile<P: Into<Point2>>(&mut self, index: usize, tile: Tile<P>) {
if let Some(layer) = self.sprite_layers.get_mut(tile.z_order) {
if let Some(layer) = layer.as_mut() {
let raw_tile = RawTile {
index: tile.sprite_index,
color: tile.tint,
};
layer.inner.as_mut().set_tile(index, raw_tile);
} else {
error!("can not set tile to sprite layer {}", tile.z_order);
}
} else {
error!("sprite layer {} does not exist", tile.z_order);
}
}
/// Removes a tile from a sprite layer with a given index and z order.
pub(crate) fn remove_tile(&mut self, index: usize, z_order: usize) {
if let Some(layer) = self.sprite_layers.get_mut(z_order) {
if let Some(layer) = layer.as_mut() {
layer.inner.as_mut().remove_tile(index);
} else {
error!("can not remove tile on sprite layer {}", z_order);
}
} else {
error!("sprite layer {} does not exist", z_order);
}
}
/// Adds an entity to a z layer, always when it is spawned.
pub(crate) fn add_entity(&mut self, z_order: usize, entity: Entity) {
if let Some(layer) = self.sprite_layers.get_mut(z_order) {
if let Some(layer) = layer.as_mut() {
layer.entity = Some(entity);
} else {
error!("can not add entity to sprite layer {}", z_order);
}
} else {
error!("sprite layer {} does not exist", z_order);
}
}
/// Adds an entity to a tile index in a layer.
#[cfg(feature = "bevy_rapier2d")]
pub(crate) fn insert_collision_entity(
&mut self,
index: usize,
entity: Entity,
) -> Option<Entity> {
self.collision_entities.insert(index, entity)
}
/// Gets the layers entity, if any. Useful for despawning.
pub(crate) fn get_entity(&self, z_order: usize) -> Option<Entity> {
self.sprite_layers
.get(z_order)
.and_then(|o| o.as_ref().and_then(|layer| layer.entity))
}
/// Gets the collision entity if any.
#[cfg(feature = "bevy_rapier2d")]
pub(crate) fn get_collision_entity(&self, index: usize) -> Option<Entity> {
self.collision_entities.get(&index).cloned()
}
/// Remove all the layers and collision entities and return them for use with bulk despawning.
pub(crate) fn remove_entities(&mut self) -> Vec<Entity> {
let mut entities = Vec::new();
for sprite_layer in &mut self.sprite_layers {
if let Some(layer) = sprite_layer {
if let Some(entity) = layer.entity.take() {
entities.push(entity);
}
}
}
#[cfg(feature = "bevy_rapier2d")]
for (_, entity) in self.collision_entities.drain() {
entities.push(entity)
}
entities
}
/// Gets a reference to a tile from a provided z order and index.
pub(crate) fn get_tile(&self, z_order: usize, index: usize) -> Option<&RawTile> {
self.sprite_layers.get(z_order).and_then(|layer| {
layer
.as_ref()
.and_then(|layer| layer.inner.as_ref().get_tile(index))
})
}
/// Gets a mutable reference to a tile from a provided z order and index.
pub(crate) fn get_tile_mut(&mut self, z_order: usize, index: usize) -> Option<&mut RawTile> {
self.sprite_layers.get_mut(z_order).and_then(|layer| {
layer
.as_mut()
.and_then(|layer| layer.inner.as_mut().get_tile_mut(index))
})
}
/// Gets a vec of all the tiles in the layer, if any.
#[cfg(feature = "bevy_rapier2d")]
pub(crate) fn get_tile_indices(&self, z_order: usize) -> Option<Vec<usize>> {
self.sprite_layers.get(z_order).and_then(|layer| {
layer
.as_ref()
.map(|layer| layer.inner.as_ref().get_tile_indices())
})
}
/// At the given z layer, changes the tiles into attributes for use with
/// the renderer using the given dimensions.
///
/// Easier to pass in the dimensions opposed to storing it everywhere.
pub(crate) fn tiles_to_renderer_parts(
&self,
z: usize,
dimensions: Dimension2,
) -> Option<(Vec<f32>, Vec<[f32; 4]>)> {
let area = dimensions.area() as usize;
self.sprite_layers.get(z).and_then(|o| {
o.as_ref()
.map(|layer| layer.inner.as_ref().tiles_to_attributes(area))
})
}
}
| {
// TODO: rename to swap and include it in the greater api
if self.sprite_layers.get(to_z).is_some() {
error!(
"sprite layer {} unexpectedly exists and can not be moved",
to_z
);
return;
}
self.sprite_layers.swap(from_z, to_z);
} | identifier_body |
mod.rs | //! Tiles organised into chunks for efficiency and performance.
//!
//! Mostly everything in this module is private API and not intended to be used
//! outside of this crate as a lot goes on under the hood that can cause issues.
//! With that being said, everything that can be used with helping a chunk get
//! created does live in here.
//!
//! These below examples have nothing to do with this library as all should be
//! done through the [`Tilemap`]. These are just more specific examples which
//! use the private API of this library.
//!
//! [`Tilemap`]: crate::tilemap::Tilemap
//!
//! # Simple chunk creation
//! ```
//! use bevy_asset::{prelude::*, HandleId};
//! use bevy_sprite::prelude::*;
//! use bevy_tilemap::prelude::*;
//!
//! // This must be set in Asset<TextureAtlas>.
//! let texture_atlas_handle = Handle::weak(HandleId::random::<TextureAtlas>());
//!
//! let mut tilemap = Tilemap::new(texture_atlas_handle, 32, 32);
//!
//! // There are two ways to create a new chunk. Either directly...
//!
//! tilemap.insert_chunk((0, 0));
//!
//! // Or indirectly...
//!
//! let point = (0, 0);
//! let sprite_index = 0;
//! let tile = Tile { point, sprite_index, ..Default::default() };
//! tilemap.insert_tile(tile);
//!
//! ```
//!
//! # Specifying what kind of chunk
//! ```
//! use bevy_asset::{prelude::*, HandleId};
//! use bevy_sprite::prelude::*;
//! use bevy_tilemap::prelude::*;
//!
//! // This must be set in Asset<TextureAtlas>.
//! let texture_atlas_handle = Handle::weak(HandleId::random::<TextureAtlas>());
//!
//! let mut tilemap = Tilemap::new(texture_atlas_handle, 32, 32);
//!
//! tilemap.insert_chunk((0, 0));
//!
//! let z_order = 0;
//! tilemap.add_layer(TilemapLayer { kind: LayerKind::Dense, ..Default::default() }, 1);
//!
//! let z_order = 1;
//! tilemap.add_layer(TilemapLayer { kind: LayerKind::Dense, ..Default::default() }, 1);
//! ```
/// Chunk entity.
pub(crate) mod entity;
/// Sparse and dense chunk layers.
mod layer;
/// Meshes for rendering to vertices.
pub(crate) mod mesh;
/// Raw tile that is stored in the chunks.
pub mod raw_tile;
/// Files and helpers for rendering.
pub(crate) mod render;
/// Systems for chunks.
pub(crate) mod system;
use crate::{lib::*, tile::Tile};
pub use layer::LayerKind;
use layer::{DenseLayer, LayerKindInner, SparseLayer, SpriteLayer};
pub use raw_tile::RawTile;
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Clone, PartialEq, Debug)]
#[doc(hidden)]
pub(crate) struct Chunk {
/// The point coordinate of the chunk.
point: Point2,
/// The sprite layers of the chunk.
sprite_layers: Vec<Option<SpriteLayer>>,
/// Ephemeral user data that can be used for flags or other purposes.
user_data: u128,
/// Contains a map of all collision entities.
#[cfg(feature = "bevy_rapier2d")]
pub collision_entities: HashMap<usize, Entity>,
}
impl Chunk {
/// A newly constructed chunk from a point and the maximum number of layers.
pub(crate) fn new(
point: Point2,
layers: &[Option<LayerKind>],
dimensions: Dimension2,
) -> Chunk {
let mut chunk = Chunk {
point,
sprite_layers: vec![None; layers.len()],
user_data: 0,
#[cfg(feature = "bevy_rapier2d")]
collision_entities: HashMap::default(),
};
for (z_order, kind) in layers.iter().enumerate() {
if let Some(kind) = kind {
chunk.add_layer(kind, z_order, dimensions)
}
}
chunk
}
/// Adds a layer from a layer kind, the z layer, and dimensions of the
/// chunk.
pub(crate) fn add_layer(&mut self, kind: &LayerKind, z_order: usize, dimensions: Dimension2) {
match kind {
LayerKind::Dense => {
let tiles = vec![
RawTile {
index: 0,
color: Color::rgba(0.0, 0.0, 0.0, 0.0)
};
dimensions.area() as usize
];
if let Some(layer) = self.sprite_layers.get_mut(z_order) {
*layer = Some(SpriteLayer {
inner: LayerKindInner::Dense(DenseLayer::new(tiles)),
entity: None,
});
} else {
error!("sprite layer {} is out of bounds", z_order);
}
}
LayerKind::Sparse => {
if let Some(layer) = self.sprite_layers.get_mut(z_order) {
*layer = Some(SpriteLayer {
inner: LayerKindInner::Sparse(SparseLayer::new(HashMap::default())),
entity: None,
});
} else {
error!("sprite layer {} is out of bounds", z_order);
}
}
}
}
/// Returns the point of the location of the chunk.
pub(crate) fn point(&self) -> Point2 {
self.point
}
// /// Returns a copy of the user data.
// pub(crate) fn user_data(&self) -> u128 {
// self.user_data
// }
//
// /// Returns a mutable reference to the user data.
// pub(crate) fn user_data_mut(&mut self) -> &mut u128 {
// &mut self.user_data
// }
/// Moves a layer from a z layer to another.
pub(crate) fn move_layer(&mut self, from_z: usize, to_z: usize) {
// TODO: rename to swap and include it in the greater api
if self.sprite_layers.get(to_z).is_some() {
error!(
"sprite layer {} unexpectedly exists and can not be moved",
to_z
);
return;
}
self.sprite_layers.swap(from_z, to_z);
}
/// Removes a layer from the specified layer.
pub(crate) fn remove_layer(&mut self, z_order: usize) {
self.sprite_layers.get_mut(z_order).take();
}
/// Sets the mesh for the chunk layer to use.
pub(crate) fn set_mesh(&mut self, z_order: usize, mesh: Handle<Mesh>) {
if let Some(layer) = self.sprite_layers.get_mut(z_order) {
if let Some(layer) = layer.as_mut() {
layer.inner.as_mut().set_mesh(mesh)
} else {
error!("can not set mesh to sprite layer {}", z_order);
}
} else {
error!("sprite layer {} does not exist", z_order);
}
}
/// Sets a single raw tile to be added to a z layer and index.
pub(crate) fn set_tile<P: Into<Point2>>(&mut self, index: usize, tile: Tile<P>) {
if let Some(layer) = self.sprite_layers.get_mut(tile.z_order) {
if let Some(layer) = layer.as_mut() {
let raw_tile = RawTile {
index: tile.sprite_index,
color: tile.tint,
};
layer.inner.as_mut().set_tile(index, raw_tile);
} else {
error!("can not set tile to sprite layer {}", tile.z_order);
}
} else {
error!("sprite layer {} does not exist", tile.z_order);
}
}
/// Removes a tile from a sprite layer with a given index and z order.
pub(crate) fn remove_tile(&mut self, index: usize, z_order: usize) {
if let Some(layer) = self.sprite_layers.get_mut(z_order) {
if let Some(layer) = layer.as_mut() {
layer.inner.as_mut().remove_tile(index);
} else {
error!("can not remove tile on sprite layer {}", z_order);
}
} else {
error!("sprite layer {} does not exist", z_order);
}
}
/// Adds an entity to a z layer, always when it is spawned.
pub(crate) fn add_entity(&mut self, z_order: usize, entity: Entity) {
if let Some(layer) = self.sprite_layers.get_mut(z_order) {
if let Some(layer) = layer.as_mut() {
layer.entity = Some(entity);
} else {
error!("can not add entity to sprite layer {}", z_order);
}
} else {
error!("sprite layer {} does not exist", z_order);
}
}
/// Adds an entity to a tile index in a layer.
#[cfg(feature = "bevy_rapier2d")]
pub(crate) fn insert_collision_entity(
&mut self,
index: usize,
entity: Entity,
) -> Option<Entity> {
self.collision_entities.insert(index, entity)
}
/// Gets the layers entity, if any. Useful for despawning.
pub(crate) fn get_entity(&self, z_order: usize) -> Option<Entity> {
self.sprite_layers
.get(z_order)
.and_then(|o| o.as_ref().and_then(|layer| layer.entity))
}
/// Gets the collision entity if any.
#[cfg(feature = "bevy_rapier2d")]
pub(crate) fn get_collision_entity(&self, index: usize) -> Option<Entity> {
self.collision_entities.get(&index).cloned()
}
/// Remove all the layers and collision entities and return them for use with bulk despawning.
pub(crate) fn remove_entities(&mut self) -> Vec<Entity> {
let mut entities = Vec::new();
for sprite_layer in &mut self.sprite_layers {
if let Some(layer) = sprite_layer {
if let Some(entity) = layer.entity.take() {
entities.push(entity);
}
}
}
#[cfg(feature = "bevy_rapier2d")]
for (_, entity) in self.collision_entities.drain() {
entities.push(entity)
}
entities
}
/// Gets a reference to a tile from a provided z order and index.
pub(crate) fn get_tile(&self, z_order: usize, index: usize) -> Option<&RawTile> {
self.sprite_layers.get(z_order).and_then(|layer| {
layer
.as_ref()
.and_then(|layer| layer.inner.as_ref().get_tile(index))
})
}
/// Gets a mutable reference to a tile from a provided z order and index.
pub(crate) fn | (&mut self, z_order: usize, index: usize) -> Option<&mut RawTile> {
self.sprite_layers.get_mut(z_order).and_then(|layer| {
layer
.as_mut()
.and_then(|layer| layer.inner.as_mut().get_tile_mut(index))
})
}
/// Gets a vec of all the tiles in the layer, if any.
#[cfg(feature = "bevy_rapier2d")]
pub(crate) fn get_tile_indices(&self, z_order: usize) -> Option<Vec<usize>> {
self.sprite_layers.get(z_order).and_then(|layer| {
layer
.as_ref()
.map(|layer| layer.inner.as_ref().get_tile_indices())
})
}
/// At the given z layer, changes the tiles into attributes for use with
/// the renderer using the given dimensions.
///
/// Easier to pass in the dimensions opposed to storing it everywhere.
pub(crate) fn tiles_to_renderer_parts(
&self,
z: usize,
dimensions: Dimension2,
) -> Option<(Vec<f32>, Vec<[f32; 4]>)> {
let area = dimensions.area() as usize;
self.sprite_layers.get(z).and_then(|o| {
o.as_ref()
.map(|layer| layer.inner.as_ref().tiles_to_attributes(area))
})
}
}
| get_tile_mut | identifier_name |
mod.rs | //! Tiles organised into chunks for efficiency and performance.
//!
//! Mostly everything in this module is private API and not intended to be used
//! outside of this crate as a lot goes on under the hood that can cause issues.
//! With that being said, everything that can be used with helping a chunk get
//! created does live in here.
//!
//! These below examples have nothing to do with this library as all should be
//! done through the [`Tilemap`]. These are just more specific examples which
//! use the private API of this library.
//!
//! [`Tilemap`]: crate::tilemap::Tilemap
//!
//! # Simple chunk creation
//! ```
//! use bevy_asset::{prelude::*, HandleId};
//! use bevy_sprite::prelude::*;
//! use bevy_tilemap::prelude::*;
//!
//! // This must be set in Asset<TextureAtlas>.
//! let texture_atlas_handle = Handle::weak(HandleId::random::<TextureAtlas>());
//!
//! let mut tilemap = Tilemap::new(texture_atlas_handle, 32, 32);
//!
//! // There are two ways to create a new chunk. Either directly...
//!
//! tilemap.insert_chunk((0, 0));
//!
//! // Or indirectly...
//!
//! let point = (0, 0);
//! let sprite_index = 0;
//! let tile = Tile { point, sprite_index, ..Default::default() };
//! tilemap.insert_tile(tile);
//!
//! ```
//!
//! # Specifying what kind of chunk
//! ```
//! use bevy_asset::{prelude::*, HandleId};
//! use bevy_sprite::prelude::*;
//! use bevy_tilemap::prelude::*;
//!
//! // This must be set in Asset<TextureAtlas>.
//! let texture_atlas_handle = Handle::weak(HandleId::random::<TextureAtlas>());
//!
//! let mut tilemap = Tilemap::new(texture_atlas_handle, 32, 32);
//!
//! tilemap.insert_chunk((0, 0));
//!
//! let z_order = 0;
//! tilemap.add_layer(TilemapLayer { kind: LayerKind::Dense, ..Default::default() }, 1);
//!
//! let z_order = 1;
//! tilemap.add_layer(TilemapLayer { kind: LayerKind::Dense, ..Default::default() }, 1);
//! ```
/// Chunk entity.
pub(crate) mod entity;
/// Sparse and dense chunk layers.
mod layer;
/// Meshes for rendering to vertices.
pub(crate) mod mesh;
/// Raw tile that is stored in the chunks.
pub mod raw_tile;
/// Files and helpers for rendering.
pub(crate) mod render;
/// Systems for chunks.
pub(crate) mod system;
use crate::{lib::*, tile::Tile};
pub use layer::LayerKind;
use layer::{DenseLayer, LayerKindInner, SparseLayer, SpriteLayer};
pub use raw_tile::RawTile;
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Clone, PartialEq, Debug)]
#[doc(hidden)]
pub(crate) struct Chunk {
/// The point coordinate of the chunk.
point: Point2,
/// The sprite layers of the chunk.
sprite_layers: Vec<Option<SpriteLayer>>,
/// Ephemeral user data that can be used for flags or other purposes.
user_data: u128,
/// Contains a map of all collision entities.
#[cfg(feature = "bevy_rapier2d")]
pub collision_entities: HashMap<usize, Entity>,
}
impl Chunk {
/// A newly constructed chunk from a point and the maximum number of layers.
pub(crate) fn new(
point: Point2,
layers: &[Option<LayerKind>],
dimensions: Dimension2,
) -> Chunk {
let mut chunk = Chunk {
point,
sprite_layers: vec![None; layers.len()],
user_data: 0,
#[cfg(feature = "bevy_rapier2d")]
collision_entities: HashMap::default(),
};
for (z_order, kind) in layers.iter().enumerate() {
if let Some(kind) = kind {
chunk.add_layer(kind, z_order, dimensions)
}
}
chunk
}
/// Adds a layer from a layer kind, the z layer, and dimensions of the
/// chunk.
pub(crate) fn add_layer(&mut self, kind: &LayerKind, z_order: usize, dimensions: Dimension2) {
match kind {
LayerKind::Dense => {
let tiles = vec![
RawTile {
index: 0,
color: Color::rgba(0.0, 0.0, 0.0, 0.0)
};
dimensions.area() as usize
];
if let Some(layer) = self.sprite_layers.get_mut(z_order) {
*layer = Some(SpriteLayer {
inner: LayerKindInner::Dense(DenseLayer::new(tiles)),
entity: None,
});
} else {
error!("sprite layer {} is out of bounds", z_order);
}
}
LayerKind::Sparse => {
if let Some(layer) = self.sprite_layers.get_mut(z_order) {
*layer = Some(SpriteLayer {
inner: LayerKindInner::Sparse(SparseLayer::new(HashMap::default())),
entity: None,
});
} else {
error!("sprite layer {} is out of bounds", z_order);
}
}
}
}
/// Returns the point of the location of the chunk.
pub(crate) fn point(&self) -> Point2 {
self.point
}
// /// Returns a copy of the user data.
// pub(crate) fn user_data(&self) -> u128 {
// self.user_data
// }
//
// /// Returns a mutable reference to the user data.
// pub(crate) fn user_data_mut(&mut self) -> &mut u128 {
// &mut self.user_data
// }
/// Moves a layer from a z layer to another.
pub(crate) fn move_layer(&mut self, from_z: usize, to_z: usize) {
// TODO: rename to swap and include it in the greater api
if self.sprite_layers.get(to_z).is_some() {
error!(
"sprite layer {} unexpectedly exists and can not be moved",
to_z
);
return;
}
self.sprite_layers.swap(from_z, to_z);
}
/// Removes a layer from the specified layer.
pub(crate) fn remove_layer(&mut self, z_order: usize) {
self.sprite_layers.get_mut(z_order).take();
}
/// Sets the mesh for the chunk layer to use.
pub(crate) fn set_mesh(&mut self, z_order: usize, mesh: Handle<Mesh>) {
if let Some(layer) = self.sprite_layers.get_mut(z_order) {
if let Some(layer) = layer.as_mut() {
layer.inner.as_mut().set_mesh(mesh)
} else {
error!("can not set mesh to sprite layer {}", z_order);
}
} else {
error!("sprite layer {} does not exist", z_order);
}
}
/// Sets a single raw tile to be added to a z layer and index.
pub(crate) fn set_tile<P: Into<Point2>>(&mut self, index: usize, tile: Tile<P>) {
if let Some(layer) = self.sprite_layers.get_mut(tile.z_order) {
if let Some(layer) = layer.as_mut() {
let raw_tile = RawTile {
index: tile.sprite_index,
color: tile.tint,
};
layer.inner.as_mut().set_tile(index, raw_tile);
} else {
error!("can not set tile to sprite layer {}", tile.z_order);
}
} else {
error!("sprite layer {} does not exist", tile.z_order);
}
}
/// Removes a tile from a sprite layer with a given index and z order.
pub(crate) fn remove_tile(&mut self, index: usize, z_order: usize) {
if let Some(layer) = self.sprite_layers.get_mut(z_order) {
if let Some(layer) = layer.as_mut() {
layer.inner.as_mut().remove_tile(index);
} else {
error!("can not remove tile on sprite layer {}", z_order);
}
} else {
error!("sprite layer {} does not exist", z_order);
}
}
/// Adds an entity to a z layer, always when it is spawned.
pub(crate) fn add_entity(&mut self, z_order: usize, entity: Entity) {
if let Some(layer) = self.sprite_layers.get_mut(z_order) {
if let Some(layer) = layer.as_mut() {
layer.entity = Some(entity);
} else {
error!("can not add entity to sprite layer {}", z_order);
}
} else {
error!("sprite layer {} does not exist", z_order);
}
}
/// Adds an entity to a tile index in a layer.
#[cfg(feature = "bevy_rapier2d")]
pub(crate) fn insert_collision_entity(
&mut self,
index: usize,
entity: Entity,
) -> Option<Entity> {
self.collision_entities.insert(index, entity)
}
/// Gets the layers entity, if any. Useful for despawning.
pub(crate) fn get_entity(&self, z_order: usize) -> Option<Entity> {
self.sprite_layers
.get(z_order)
.and_then(|o| o.as_ref().and_then(|layer| layer.entity))
}
/// Gets the collision entity if any.
#[cfg(feature = "bevy_rapier2d")]
pub(crate) fn get_collision_entity(&self, index: usize) -> Option<Entity> {
self.collision_entities.get(&index).cloned()
}
/// Remove all the layers and collision entities and return them for use with bulk despawning.
pub(crate) fn remove_entities(&mut self) -> Vec<Entity> {
let mut entities = Vec::new();
for sprite_layer in &mut self.sprite_layers {
if let Some(layer) = sprite_layer {
if let Some(entity) = layer.entity.take() {
entities.push(entity);
}
}
}
#[cfg(feature = "bevy_rapier2d")]
for (_, entity) in self.collision_entities.drain() {
entities.push(entity)
}
entities |
/// Gets a reference to a tile from a provided z order and index.
pub(crate) fn get_tile(&self, z_order: usize, index: usize) -> Option<&RawTile> {
self.sprite_layers.get(z_order).and_then(|layer| {
layer
.as_ref()
.and_then(|layer| layer.inner.as_ref().get_tile(index))
})
}
/// Gets a mutable reference to a tile from a provided z order and index.
pub(crate) fn get_tile_mut(&mut self, z_order: usize, index: usize) -> Option<&mut RawTile> {
self.sprite_layers.get_mut(z_order).and_then(|layer| {
layer
.as_mut()
.and_then(|layer| layer.inner.as_mut().get_tile_mut(index))
})
}
/// Gets a vec of all the tiles in the layer, if any.
#[cfg(feature = "bevy_rapier2d")]
pub(crate) fn get_tile_indices(&self, z_order: usize) -> Option<Vec<usize>> {
self.sprite_layers.get(z_order).and_then(|layer| {
layer
.as_ref()
.map(|layer| layer.inner.as_ref().get_tile_indices())
})
}
/// At the given z layer, changes the tiles into attributes for use with
/// the renderer using the given dimensions.
///
/// Easier to pass in the dimensions opposed to storing it everywhere.
pub(crate) fn tiles_to_renderer_parts(
&self,
z: usize,
dimensions: Dimension2,
) -> Option<(Vec<f32>, Vec<[f32; 4]>)> {
let area = dimensions.area() as usize;
self.sprite_layers.get(z).and_then(|o| {
o.as_ref()
.map(|layer| layer.inner.as_ref().tiles_to_attributes(area))
})
}
} | } | random_line_split |
process.rs | use crate::{FromInner, HandleTrait, Inner, IntoInner};
use std::convert::TryFrom;
use std::ffi::CString;
use uv::uv_stdio_container_s__bindgen_ty_1 as uv_stdio_container_data;
use uv::{
uv_disable_stdio_inheritance, uv_kill, uv_process_get_pid, uv_process_kill,
uv_process_options_t, uv_process_t, uv_spawn, uv_stdio_container_t,
};
callbacks! {
pub ExitCB(handle: ProcessHandle, exit_status: i64, term_signal: i32);
}
/// Additional data stored on the handle
#[derive(Default)]
pub(crate) struct ProcessDataFields<'a> {
exit_cb: ExitCB<'a>,
}
/// Callback for uv_process_options_t.exit_cb
extern "C" fn uv_exit_cb(
handle: *mut uv_process_t,
exit_status: i64,
term_signal: std::os::raw::c_int,
) {
let dataptr = crate::Handle::get_data(uv_handle!(handle));
if !dataptr.is_null() {
unsafe {
if let super::ProcessData(d) = &mut (*dataptr).addl {
d.exit_cb
.call(handle.into_inner(), exit_status, term_signal as _);
}
}
}
}
bitflags! {
/// Flags specifying how a stdio should be transmitted to the child process.
pub struct StdioFlags: u32 {
/// No file descriptor will be provided (or redirected to `/dev/null` if it is fd 0, 1 or
/// 2).
const IGNORE = uv::uv_stdio_flags_UV_IGNORE as _;
/// Open a new pipe into `data.stream`, per the flags below. The `data.stream` field must
/// point to a PipeHandle object that has been initialized with `new`, but not yet opened
/// or connected.
const CREATE_PIPE = uv::uv_stdio_flags_UV_CREATE_PIPE as _;
/// The child process will be given a duplicate of the parent's file descriptor given by
/// `data.fd`.
const INHERIT_FD = uv::uv_stdio_flags_UV_INHERIT_FD as _;
/// The child process will be given a duplicate of the parent's file descriptor being used
/// by the stream handle given by `data.stream`.
const INHERIT_STREAM = uv::uv_stdio_flags_UV_INHERIT_STREAM as _;
/// When UV_CREATE_PIPE is specified, UV_READABLE_PIPE and UV_WRITABLE_PIPE determine the
/// direction of flow, from the child process' perspective. Both flags may be specified to
/// create a duplex data stream.
const READABLE_PIPE = uv::uv_stdio_flags_UV_READABLE_PIPE as _;
const WRITABLE_PIPE = uv::uv_stdio_flags_UV_WRITABLE_PIPE as _;
/// Open the child pipe handle in overlapped mode on Windows. On Unix it is silently
/// ignored.
const OVERLAPPED_PIPE = uv::uv_stdio_flags_UV_OVERLAPPED_PIPE as _;
}
}
impl Default for StdioFlags {
fn default() -> Self {
StdioFlags::IGNORE
}
}
bitflags! {
/// Flags to be set on the flags field of ProcessOptions.
pub struct ProcessFlags: u32 {
/// Set the child process' user id.
const SETUID = uv::uv_process_flags_UV_PROCESS_SETUID as _;
/// Set the child process' group id.
const SETGID = uv::uv_process_flags_UV_PROCESS_SETGID as _;
/// Do not wrap any arguments in quotes, or perform any other escaping, when converting the
/// argument list into a command line string. This option is only meaningful on Windows
/// systems. On Unix it is silently ignored.
const WINDOWS_VERBATIM_ARGUMENTS = uv::uv_process_flags_UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS as _;
/// Spawn the child process in a detached state - this will make it a process group leader,
/// and will effectively enable the child to keep running after the parent exits. Note that
/// the child process will still keep the parent's event loop alive unless the parent
/// process calls uv_unref() on the child's process handle.
const DETACHED = uv::uv_process_flags_UV_PROCESS_DETACHED as _;
/// Hide the subprocess window that would normally be created. This option is only
/// meaningful on Windows systems. On Unix it is silently ignored.
const WINDOWS_HIDE = uv::uv_process_flags_UV_PROCESS_WINDOWS_HIDE as _;
/// Hide the subprocess console window that would normally be created. This option is only
/// meaningful on Windows systems. On Unix it is silently ignored.
const WINDOWS_HIDE_CONSOLE = uv::uv_process_flags_UV_PROCESS_WINDOWS_HIDE_CONSOLE as _;
/// Hide the subprocess GUI window that would normally be created. This option is only
/// meaningful on Windows systems. On Unix it is silently ignored.
const WINDOWS_HIDE_GUI = uv::uv_process_flags_UV_PROCESS_WINDOWS_HIDE_GUI as _;
}
}
pub enum StdioType {
Stream(crate::StreamHandle),
Fd(i32),
}
impl Default for StdioType {
fn default() -> Self {
StdioType::Fd(0)
}
}
impl Inner<uv_stdio_container_data> for StdioType {
fn inner(&self) -> uv_stdio_container_data {
match self {
StdioType::Stream(s) => uv_stdio_container_data { stream: s.inner() },
StdioType::Fd(fd) => uv_stdio_container_data { fd: *fd },
}
}
}
/// Container for each stdio handle or fd passed to a child process.
#[derive(Default)]
pub struct StdioContainer {
pub flags: StdioFlags,
pub data: StdioType,
}
/// Options for spawning the process (passed to spawn()).
pub struct ProcessOptions<'a> {
/// Called after the process exits.
pub exit_cb: ExitCB<'static>,
/// Path to program to execute.
pub file: &'a str,
/// Command line arguments. args[0] should be the path to the program. On Windows this uses
/// CreateProcess which concatenates the arguments into a string this can cause some strange
/// errors. See the note at windows_verbatim_arguments.
pub args: &'a [&'a str],
/// This will be set as the environ variable in the subprocess. If this is None then the
/// parents environ will be used.
pub env: Option<&'a [&'a str]>,
/// If Some() this represents a directory the subprocess should execute in. Stands for current
/// working directory.
pub cwd: Option<&'a str>,
/// Various flags that control how spawn() behaves. See the definition of `ProcessFlags`.
pub flags: ProcessFlags,
/// The `stdio` field points to an array of StdioContainer structs that describe the file
/// descriptors that will be made available to the child process. The convention is that
/// stdio[0] points to stdin, fd 1 is used for stdout, and fd 2 is stderr.
///
/// Note that on windows file descriptors greater than 2 are available to the child process
/// only if the child processes uses the MSVCRT runtime.
pub stdio: &'a [StdioContainer],
/// Libuv can change the child process' user/group id. This happens only when the appropriate
/// bits are set in the flags fields. This is not supported on windows; spawn() will fail and
/// set the error to ENOTSUP.
pub uid: crate::Uid,
/// Libuv can change the child process' user/group id. This happens only when the appropriate
/// bits are set in the flags fields. This is not supported on windows; spawn() will fail and
/// set the error to ENOTSUP.
pub gid: crate::Gid,
}
impl<'a> ProcessOptions<'a> {
/// Constructs a new ProcessOptions object. The args slice must have at least one member: the
/// path to the program to execute. Any additional members of the slice will be passed as
/// command line arguments.
pub fn new(args: &'a [&'a str]) -> ProcessOptions {
assert!(
args.len() > 0,
"ProcessOptions args slice must contain at least one str"
);
ProcessOptions {
exit_cb: ().into(),
file: args[0],
args: args,
env: None,
cwd: None,
flags: ProcessFlags::empty(),
stdio: &[],
uid: 0,
gid: 0,
}
}
}
/// Process handles will spawn a new process and allow the user to control it and establish
/// communication channels with it using streams.
#[derive(Clone, Copy)]
pub struct | {
handle: *mut uv_process_t,
}
impl ProcessHandle {
/// Create a new process handle
pub fn new() -> crate::Result<ProcessHandle> {
let layout = std::alloc::Layout::new::<uv_process_t>();
let handle = unsafe { std::alloc::alloc(layout) as *mut uv_process_t };
if handle.is_null() {
return Err(crate::Error::ENOMEM);
}
crate::Handle::initialize_data(uv_handle!(handle), super::ProcessData(Default::default()));
Ok(ProcessHandle { handle })
}
/// Disables inheritance for file descriptors / handles that this process inherited from its
/// parent. The effect is that child processes spawned by this process don’t accidentally
/// inherit these handles.
///
/// It is recommended to call this function as early in your program as possible, before the
/// inherited file descriptors can be closed or duplicated.
///
/// Note: This function works on a best-effort basis: there is no guarantee that libuv can
/// discover all file descriptors that were inherited. In general it does a better job on
/// Windows than it does on Unix.
pub fn disable_stdio_inheritance() {
unsafe { uv_disable_stdio_inheritance() };
}
/// Initializes the process handle and starts the process.
///
/// Possible reasons for failing to spawn would include (but not be limited to) the file to
/// execute not existing, not having permissions to use the setuid or setgid specified, or not
/// having enough memory to allocate for the new process.
pub fn spawn(
&mut self,
r#loop: &crate::Loop,
options: ProcessOptions,
) -> Result<(), Box<dyn std::error::Error>> {
let exit_cb_uv = use_c_callback!(uv_exit_cb, options.exit_cb);
let dataptr = crate::Handle::get_data(uv_handle!(self.handle));
if !dataptr.is_null() {
if let super::ProcessData(d) = unsafe { &mut (*dataptr).addl } {
d.exit_cb = options.exit_cb;
}
}
// CString will ensure we have a terminating null
let file = CString::new(options.file)?;
// For args, libuv-sys is expecting a "*mut *mut c_char". The only way to get a "*mut
// c_char" from a CString is via CString::into_raw() which will "leak" the memory from
// rust. We'll need to make sure to reclaim that memory later so it'll be GC'd. So, first
// we need to convert all of the arguments to CStrings for the null-termination. Then we
// need to grab a *mut pointer to the data using CString::into_raw() which will "leak" the
// CStrings out of rust. Then we need to add a final null pointer to the end (the C code
// requires it so it can find the end of the array) and collect it all into a Vec.
let mut args = options
.args
.iter()
.map(|a| CString::new(*a).map(|s| s.into_raw()))
.chain(std::iter::once(Ok(std::ptr::null_mut())))
.collect::<Result<Vec<*mut std::os::raw::c_char>, std::ffi::NulError>>()?;
// env is similar to args except that it is Option'al.
let mut env = options
.env
.map(|env| {
env.iter()
.map(|e| CString::new(*e).map(|s| s.into_raw()))
.chain(std::iter::once(Ok(std::ptr::null_mut())))
.collect::<Result<Vec<*mut std::os::raw::c_char>, std::ffi::NulError>>()
})
.transpose()?;
// cwd is like file except it's Option'al
let cwd = options.cwd.map(|cwd| CString::new(cwd)).transpose()?;
// stdio is an array of uv_stdio_container_t objects
let mut stdio = options
.stdio
.iter()
.map(|stdio| uv_stdio_container_t {
flags: stdio.flags.bits() as _,
data: stdio.data.inner(),
})
.collect::<Vec<uv_stdio_container_t>>();
let options = uv_process_options_t {
exit_cb: exit_cb_uv,
file: file.as_ptr(),
args: args.as_mut_ptr(),
env: env
.as_mut()
.map_or(std::ptr::null_mut(), |e| e.as_mut_ptr()),
cwd: cwd.map_or(std::ptr::null(), |s| s.as_ptr()),
flags: options.flags.bits(),
stdio_count: options.stdio.len() as _,
stdio: stdio.as_mut_ptr(),
uid: options.uid,
gid: options.gid,
};
let result = crate::uvret(unsafe {
uv_spawn(r#loop.into_inner(), self.handle, &options as *const _)
})
.map_err(|e| Box::new(e) as _);
// reclaim data so it'll be freed - I'm pretty sure it's safe to free options here. Under
// the hood, libuv is calling fork and execvp. The fork should copy the address space into
// the new process, so freeing it here shouldn't affect that. Then execvp is going to
// replace the address space, so we don't need to worry about leaking the copy.
// For args, we don't need the last element because it's a null pointer.
let args: Vec<CString> = args
.iter()
.take(args.len() - 1)
.map(|a| unsafe { CString::from_raw(*a) })
.collect();
std::mem::drop(args);
// env is the same as args except it's Option'al
let env: Option<Vec<CString>> = env.map(|env| {
env.iter()
.take(env.len() - 1)
.map(|e| unsafe { CString::from_raw(*e) })
.collect()
});
std::mem::drop(env);
result
}
/// The PID of the spawned process. It’s set after calling spawn().
pub fn pid(&self) -> i32 {
unsafe { uv_process_get_pid(self.handle) as _ }
}
/// Sends the specified signal to the given process handle. Check the documentation on
/// SignalHandle for signal support, specially on Windows.
pub fn kill(&mut self, signum: i32) -> crate::Result<()> {
crate::uvret(unsafe { uv_process_kill(self.handle, signum) })
}
/// Sends the specified signal to the given PID. Check the documentation on SignalHandle for
/// signal support, specially on Windows.
pub fn kill_pid(pid: i32, signum: i32) -> crate::Result<()> {
crate::uvret(unsafe { uv_kill(pid, signum) })
}
}
impl FromInner<*mut uv_process_t> for ProcessHandle {
fn from_inner(handle: *mut uv_process_t) -> ProcessHandle {
ProcessHandle { handle }
}
}
impl Inner<*mut uv::uv_handle_t> for ProcessHandle {
fn inner(&self) -> *mut uv::uv_handle_t {
uv_handle!(self.handle)
}
}
impl From<ProcessHandle> for crate::Handle {
fn from(process: ProcessHandle) -> crate::Handle {
crate::Handle::from_inner(Inner::<*mut uv::uv_handle_t>::inner(&process))
}
}
impl crate::ToHandle for ProcessHandle {
fn to_handle(&self) -> crate::Handle {
crate::Handle::from_inner(Inner::<*mut uv::uv_handle_t>::inner(self))
}
}
impl TryFrom<crate::Handle> for ProcessHandle {
type Error = crate::ConversionError;
fn try_from(handle: crate::Handle) -> Result<Self, Self::Error> {
let t = handle.get_type();
if t != crate::HandleType::PROCESS {
Err(crate::ConversionError::new(t, crate::HandleType::PROCESS))
} else {
Ok((handle.inner() as *mut uv_process_t).into_inner())
}
}
}
impl HandleTrait for ProcessHandle {}
impl crate::Loop {
/// Create a new process handle and spawn the process
pub fn spawn_process(
&self,
options: ProcessOptions,
) -> Result<ProcessHandle, Box<dyn std::error::Error>> {
let mut process = ProcessHandle::new()?;
process.spawn(self, options)?;
Ok(process)
}
}
| ProcessHandle | identifier_name |
process.rs | use crate::{FromInner, HandleTrait, Inner, IntoInner};
use std::convert::TryFrom;
use std::ffi::CString;
use uv::uv_stdio_container_s__bindgen_ty_1 as uv_stdio_container_data;
use uv::{
uv_disable_stdio_inheritance, uv_kill, uv_process_get_pid, uv_process_kill,
uv_process_options_t, uv_process_t, uv_spawn, uv_stdio_container_t,
};
callbacks! {
pub ExitCB(handle: ProcessHandle, exit_status: i64, term_signal: i32);
}
/// Additional data stored on the handle
#[derive(Default)]
pub(crate) struct ProcessDataFields<'a> {
exit_cb: ExitCB<'a>,
}
/// Callback for uv_process_options_t.exit_cb
extern "C" fn uv_exit_cb(
handle: *mut uv_process_t,
exit_status: i64,
term_signal: std::os::raw::c_int,
) {
let dataptr = crate::Handle::get_data(uv_handle!(handle));
if !dataptr.is_null() {
unsafe {
if let super::ProcessData(d) = &mut (*dataptr).addl {
d.exit_cb
.call(handle.into_inner(), exit_status, term_signal as _);
}
}
}
}
bitflags! {
/// Flags specifying how a stdio should be transmitted to the child process. | /// 2).
const IGNORE = uv::uv_stdio_flags_UV_IGNORE as _;
/// Open a new pipe into `data.stream`, per the flags below. The `data.stream` field must
/// point to a PipeHandle object that has been initialized with `new`, but not yet opened
/// or connected.
const CREATE_PIPE = uv::uv_stdio_flags_UV_CREATE_PIPE as _;
/// The child process will be given a duplicate of the parent's file descriptor given by
/// `data.fd`.
const INHERIT_FD = uv::uv_stdio_flags_UV_INHERIT_FD as _;
/// The child process will be given a duplicate of the parent's file descriptor being used
/// by the stream handle given by `data.stream`.
const INHERIT_STREAM = uv::uv_stdio_flags_UV_INHERIT_STREAM as _;
/// When UV_CREATE_PIPE is specified, UV_READABLE_PIPE and UV_WRITABLE_PIPE determine the
/// direction of flow, from the child process' perspective. Both flags may be specified to
/// create a duplex data stream.
const READABLE_PIPE = uv::uv_stdio_flags_UV_READABLE_PIPE as _;
const WRITABLE_PIPE = uv::uv_stdio_flags_UV_WRITABLE_PIPE as _;
/// Open the child pipe handle in overlapped mode on Windows. On Unix it is silently
/// ignored.
const OVERLAPPED_PIPE = uv::uv_stdio_flags_UV_OVERLAPPED_PIPE as _;
}
}
impl Default for StdioFlags {
fn default() -> Self {
StdioFlags::IGNORE
}
}
bitflags! {
/// Flags to be set on the flags field of ProcessOptions.
pub struct ProcessFlags: u32 {
/// Set the child process' user id.
const SETUID = uv::uv_process_flags_UV_PROCESS_SETUID as _;
/// Set the child process' group id.
const SETGID = uv::uv_process_flags_UV_PROCESS_SETGID as _;
/// Do not wrap any arguments in quotes, or perform any other escaping, when converting the
/// argument list into a command line string. This option is only meaningful on Windows
/// systems. On Unix it is silently ignored.
const WINDOWS_VERBATIM_ARGUMENTS = uv::uv_process_flags_UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS as _;
/// Spawn the child process in a detached state - this will make it a process group leader,
/// and will effectively enable the child to keep running after the parent exits. Note that
/// the child process will still keep the parent's event loop alive unless the parent
/// process calls uv_unref() on the child's process handle.
const DETACHED = uv::uv_process_flags_UV_PROCESS_DETACHED as _;
/// Hide the subprocess window that would normally be created. This option is only
/// meaningful on Windows systems. On Unix it is silently ignored.
const WINDOWS_HIDE = uv::uv_process_flags_UV_PROCESS_WINDOWS_HIDE as _;
/// Hide the subprocess console window that would normally be created. This option is only
/// meaningful on Windows systems. On Unix it is silently ignored.
const WINDOWS_HIDE_CONSOLE = uv::uv_process_flags_UV_PROCESS_WINDOWS_HIDE_CONSOLE as _;
/// Hide the subprocess GUI window that would normally be created. This option is only
/// meaningful on Windows systems. On Unix it is silently ignored.
const WINDOWS_HIDE_GUI = uv::uv_process_flags_UV_PROCESS_WINDOWS_HIDE_GUI as _;
}
}
pub enum StdioType {
Stream(crate::StreamHandle),
Fd(i32),
}
impl Default for StdioType {
fn default() -> Self {
StdioType::Fd(0)
}
}
impl Inner<uv_stdio_container_data> for StdioType {
fn inner(&self) -> uv_stdio_container_data {
match self {
StdioType::Stream(s) => uv_stdio_container_data { stream: s.inner() },
StdioType::Fd(fd) => uv_stdio_container_data { fd: *fd },
}
}
}
/// Container for each stdio handle or fd passed to a child process.
#[derive(Default)]
pub struct StdioContainer {
pub flags: StdioFlags,
pub data: StdioType,
}
/// Options for spawning the process (passed to spawn()).
pub struct ProcessOptions<'a> {
/// Called after the process exits.
pub exit_cb: ExitCB<'static>,
/// Path to program to execute.
pub file: &'a str,
/// Command line arguments. args[0] should be the path to the program. On Windows this uses
/// CreateProcess which concatenates the arguments into a string this can cause some strange
/// errors. See the note at windows_verbatim_arguments.
pub args: &'a [&'a str],
/// This will be set as the environ variable in the subprocess. If this is None then the
/// parents environ will be used.
pub env: Option<&'a [&'a str]>,
/// If Some() this represents a directory the subprocess should execute in. Stands for current
/// working directory.
pub cwd: Option<&'a str>,
/// Various flags that control how spawn() behaves. See the definition of `ProcessFlags`.
pub flags: ProcessFlags,
/// The `stdio` field points to an array of StdioContainer structs that describe the file
/// descriptors that will be made available to the child process. The convention is that
/// stdio[0] points to stdin, fd 1 is used for stdout, and fd 2 is stderr.
///
/// Note that on windows file descriptors greater than 2 are available to the child process
/// only if the child processes uses the MSVCRT runtime.
pub stdio: &'a [StdioContainer],
/// Libuv can change the child process' user/group id. This happens only when the appropriate
/// bits are set in the flags fields. This is not supported on windows; spawn() will fail and
/// set the error to ENOTSUP.
pub uid: crate::Uid,
/// Libuv can change the child process' user/group id. This happens only when the appropriate
/// bits are set in the flags fields. This is not supported on windows; spawn() will fail and
/// set the error to ENOTSUP.
pub gid: crate::Gid,
}
impl<'a> ProcessOptions<'a> {
/// Constructs a new ProcessOptions object. The args slice must have at least one member: the
/// path to the program to execute. Any additional members of the slice will be passed as
/// command line arguments.
pub fn new(args: &'a [&'a str]) -> ProcessOptions {
assert!(
args.len() > 0,
"ProcessOptions args slice must contain at least one str"
);
ProcessOptions {
exit_cb: ().into(),
file: args[0],
args: args,
env: None,
cwd: None,
flags: ProcessFlags::empty(),
stdio: &[],
uid: 0,
gid: 0,
}
}
}
/// Process handles will spawn a new process and allow the user to control it and establish
/// communication channels with it using streams.
#[derive(Clone, Copy)]
pub struct ProcessHandle {
handle: *mut uv_process_t,
}
impl ProcessHandle {
/// Create a new process handle
pub fn new() -> crate::Result<ProcessHandle> {
let layout = std::alloc::Layout::new::<uv_process_t>();
let handle = unsafe { std::alloc::alloc(layout) as *mut uv_process_t };
if handle.is_null() {
return Err(crate::Error::ENOMEM);
}
crate::Handle::initialize_data(uv_handle!(handle), super::ProcessData(Default::default()));
Ok(ProcessHandle { handle })
}
/// Disables inheritance for file descriptors / handles that this process inherited from its
/// parent. The effect is that child processes spawned by this process don’t accidentally
/// inherit these handles.
///
/// It is recommended to call this function as early in your program as possible, before the
/// inherited file descriptors can be closed or duplicated.
///
/// Note: This function works on a best-effort basis: there is no guarantee that libuv can
/// discover all file descriptors that were inherited. In general it does a better job on
/// Windows than it does on Unix.
pub fn disable_stdio_inheritance() {
unsafe { uv_disable_stdio_inheritance() };
}
/// Initializes the process handle and starts the process.
///
/// Possible reasons for failing to spawn would include (but not be limited to) the file to
/// execute not existing, not having permissions to use the setuid or setgid specified, or not
/// having enough memory to allocate for the new process.
pub fn spawn(
&mut self,
r#loop: &crate::Loop,
options: ProcessOptions,
) -> Result<(), Box<dyn std::error::Error>> {
let exit_cb_uv = use_c_callback!(uv_exit_cb, options.exit_cb);
let dataptr = crate::Handle::get_data(uv_handle!(self.handle));
if !dataptr.is_null() {
if let super::ProcessData(d) = unsafe { &mut (*dataptr).addl } {
d.exit_cb = options.exit_cb;
}
}
// CString will ensure we have a terminating null
let file = CString::new(options.file)?;
// For args, libuv-sys is expecting a "*mut *mut c_char". The only way to get a "*mut
// c_char" from a CString is via CString::into_raw() which will "leak" the memory from
// rust. We'll need to make sure to reclaim that memory later so it'll be GC'd. So, first
// we need to convert all of the arguments to CStrings for the null-termination. Then we
// need to grab a *mut pointer to the data using CString::into_raw() which will "leak" the
// CStrings out of rust. Then we need to add a final null pointer to the end (the C code
// requires it so it can find the end of the array) and collect it all into a Vec.
let mut args = options
.args
.iter()
.map(|a| CString::new(*a).map(|s| s.into_raw()))
.chain(std::iter::once(Ok(std::ptr::null_mut())))
.collect::<Result<Vec<*mut std::os::raw::c_char>, std::ffi::NulError>>()?;
// env is similar to args except that it is Option'al.
let mut env = options
.env
.map(|env| {
env.iter()
.map(|e| CString::new(*e).map(|s| s.into_raw()))
.chain(std::iter::once(Ok(std::ptr::null_mut())))
.collect::<Result<Vec<*mut std::os::raw::c_char>, std::ffi::NulError>>()
})
.transpose()?;
// cwd is like file except it's Option'al
let cwd = options.cwd.map(|cwd| CString::new(cwd)).transpose()?;
// stdio is an array of uv_stdio_container_t objects
let mut stdio = options
.stdio
.iter()
.map(|stdio| uv_stdio_container_t {
flags: stdio.flags.bits() as _,
data: stdio.data.inner(),
})
.collect::<Vec<uv_stdio_container_t>>();
let options = uv_process_options_t {
exit_cb: exit_cb_uv,
file: file.as_ptr(),
args: args.as_mut_ptr(),
env: env
.as_mut()
.map_or(std::ptr::null_mut(), |e| e.as_mut_ptr()),
cwd: cwd.map_or(std::ptr::null(), |s| s.as_ptr()),
flags: options.flags.bits(),
stdio_count: options.stdio.len() as _,
stdio: stdio.as_mut_ptr(),
uid: options.uid,
gid: options.gid,
};
let result = crate::uvret(unsafe {
uv_spawn(r#loop.into_inner(), self.handle, &options as *const _)
})
.map_err(|e| Box::new(e) as _);
// reclaim data so it'll be freed - I'm pretty sure it's safe to free options here. Under
// the hood, libuv is calling fork and execvp. The fork should copy the address space into
// the new process, so freeing it here shouldn't affect that. Then execvp is going to
// replace the address space, so we don't need to worry about leaking the copy.
// For args, we don't need the last element because it's a null pointer.
let args: Vec<CString> = args
.iter()
.take(args.len() - 1)
.map(|a| unsafe { CString::from_raw(*a) })
.collect();
std::mem::drop(args);
// env is the same as args except it's Option'al
let env: Option<Vec<CString>> = env.map(|env| {
env.iter()
.take(env.len() - 1)
.map(|e| unsafe { CString::from_raw(*e) })
.collect()
});
std::mem::drop(env);
result
}
/// The PID of the spawned process. It’s set after calling spawn().
pub fn pid(&self) -> i32 {
unsafe { uv_process_get_pid(self.handle) as _ }
}
/// Sends the specified signal to the given process handle. Check the documentation on
/// SignalHandle for signal support, specially on Windows.
pub fn kill(&mut self, signum: i32) -> crate::Result<()> {
crate::uvret(unsafe { uv_process_kill(self.handle, signum) })
}
/// Sends the specified signal to the given PID. Check the documentation on SignalHandle for
/// signal support, specially on Windows.
pub fn kill_pid(pid: i32, signum: i32) -> crate::Result<()> {
crate::uvret(unsafe { uv_kill(pid, signum) })
}
}
impl FromInner<*mut uv_process_t> for ProcessHandle {
fn from_inner(handle: *mut uv_process_t) -> ProcessHandle {
ProcessHandle { handle }
}
}
impl Inner<*mut uv::uv_handle_t> for ProcessHandle {
fn inner(&self) -> *mut uv::uv_handle_t {
uv_handle!(self.handle)
}
}
impl From<ProcessHandle> for crate::Handle {
fn from(process: ProcessHandle) -> crate::Handle {
crate::Handle::from_inner(Inner::<*mut uv::uv_handle_t>::inner(&process))
}
}
impl crate::ToHandle for ProcessHandle {
fn to_handle(&self) -> crate::Handle {
crate::Handle::from_inner(Inner::<*mut uv::uv_handle_t>::inner(self))
}
}
impl TryFrom<crate::Handle> for ProcessHandle {
type Error = crate::ConversionError;
fn try_from(handle: crate::Handle) -> Result<Self, Self::Error> {
let t = handle.get_type();
if t != crate::HandleType::PROCESS {
Err(crate::ConversionError::new(t, crate::HandleType::PROCESS))
} else {
Ok((handle.inner() as *mut uv_process_t).into_inner())
}
}
}
impl HandleTrait for ProcessHandle {}
impl crate::Loop {
/// Create a new process handle and spawn the process
pub fn spawn_process(
&self,
options: ProcessOptions,
) -> Result<ProcessHandle, Box<dyn std::error::Error>> {
let mut process = ProcessHandle::new()?;
process.spawn(self, options)?;
Ok(process)
}
} | pub struct StdioFlags: u32 {
/// No file descriptor will be provided (or redirected to `/dev/null` if it is fd 0, 1 or | random_line_split |
process.rs | use crate::{FromInner, HandleTrait, Inner, IntoInner};
use std::convert::TryFrom;
use std::ffi::CString;
use uv::uv_stdio_container_s__bindgen_ty_1 as uv_stdio_container_data;
use uv::{
uv_disable_stdio_inheritance, uv_kill, uv_process_get_pid, uv_process_kill,
uv_process_options_t, uv_process_t, uv_spawn, uv_stdio_container_t,
};
callbacks! {
pub ExitCB(handle: ProcessHandle, exit_status: i64, term_signal: i32);
}
/// Additional data stored on the handle
#[derive(Default)]
pub(crate) struct ProcessDataFields<'a> {
exit_cb: ExitCB<'a>,
}
/// Callback for uv_process_options_t.exit_cb
extern "C" fn uv_exit_cb(
handle: *mut uv_process_t,
exit_status: i64,
term_signal: std::os::raw::c_int,
) {
let dataptr = crate::Handle::get_data(uv_handle!(handle));
if !dataptr.is_null() {
unsafe {
if let super::ProcessData(d) = &mut (*dataptr).addl {
d.exit_cb
.call(handle.into_inner(), exit_status, term_signal as _);
}
}
}
}
bitflags! {
/// Flags specifying how a stdio should be transmitted to the child process.
pub struct StdioFlags: u32 {
/// No file descriptor will be provided (or redirected to `/dev/null` if it is fd 0, 1 or
/// 2).
const IGNORE = uv::uv_stdio_flags_UV_IGNORE as _;
/// Open a new pipe into `data.stream`, per the flags below. The `data.stream` field must
/// point to a PipeHandle object that has been initialized with `new`, but not yet opened
/// or connected.
const CREATE_PIPE = uv::uv_stdio_flags_UV_CREATE_PIPE as _;
/// The child process will be given a duplicate of the parent's file descriptor given by
/// `data.fd`.
const INHERIT_FD = uv::uv_stdio_flags_UV_INHERIT_FD as _;
/// The child process will be given a duplicate of the parent's file descriptor being used
/// by the stream handle given by `data.stream`.
const INHERIT_STREAM = uv::uv_stdio_flags_UV_INHERIT_STREAM as _;
/// When UV_CREATE_PIPE is specified, UV_READABLE_PIPE and UV_WRITABLE_PIPE determine the
/// direction of flow, from the child process' perspective. Both flags may be specified to
/// create a duplex data stream.
const READABLE_PIPE = uv::uv_stdio_flags_UV_READABLE_PIPE as _;
const WRITABLE_PIPE = uv::uv_stdio_flags_UV_WRITABLE_PIPE as _;
/// Open the child pipe handle in overlapped mode on Windows. On Unix it is silently
/// ignored.
const OVERLAPPED_PIPE = uv::uv_stdio_flags_UV_OVERLAPPED_PIPE as _;
}
}
impl Default for StdioFlags {
fn default() -> Self {
StdioFlags::IGNORE
}
}
bitflags! {
/// Flags to be set on the flags field of ProcessOptions.
pub struct ProcessFlags: u32 {
/// Set the child process' user id.
const SETUID = uv::uv_process_flags_UV_PROCESS_SETUID as _;
/// Set the child process' group id.
const SETGID = uv::uv_process_flags_UV_PROCESS_SETGID as _;
/// Do not wrap any arguments in quotes, or perform any other escaping, when converting the
/// argument list into a command line string. This option is only meaningful on Windows
/// systems. On Unix it is silently ignored.
const WINDOWS_VERBATIM_ARGUMENTS = uv::uv_process_flags_UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS as _;
/// Spawn the child process in a detached state - this will make it a process group leader,
/// and will effectively enable the child to keep running after the parent exits. Note that
/// the child process will still keep the parent's event loop alive unless the parent
/// process calls uv_unref() on the child's process handle.
const DETACHED = uv::uv_process_flags_UV_PROCESS_DETACHED as _;
/// Hide the subprocess window that would normally be created. This option is only
/// meaningful on Windows systems. On Unix it is silently ignored.
const WINDOWS_HIDE = uv::uv_process_flags_UV_PROCESS_WINDOWS_HIDE as _;
/// Hide the subprocess console window that would normally be created. This option is only
/// meaningful on Windows systems. On Unix it is silently ignored.
const WINDOWS_HIDE_CONSOLE = uv::uv_process_flags_UV_PROCESS_WINDOWS_HIDE_CONSOLE as _;
/// Hide the subprocess GUI window that would normally be created. This option is only
/// meaningful on Windows systems. On Unix it is silently ignored.
const WINDOWS_HIDE_GUI = uv::uv_process_flags_UV_PROCESS_WINDOWS_HIDE_GUI as _;
}
}
pub enum StdioType {
Stream(crate::StreamHandle),
Fd(i32),
}
impl Default for StdioType {
fn default() -> Self {
StdioType::Fd(0)
}
}
impl Inner<uv_stdio_container_data> for StdioType {
fn inner(&self) -> uv_stdio_container_data {
match self {
StdioType::Stream(s) => uv_stdio_container_data { stream: s.inner() },
StdioType::Fd(fd) => uv_stdio_container_data { fd: *fd },
}
}
}
/// Container for each stdio handle or fd passed to a child process.
#[derive(Default)]
pub struct StdioContainer {
pub flags: StdioFlags,
pub data: StdioType,
}
/// Options for spawning the process (passed to spawn()).
pub struct ProcessOptions<'a> {
/// Called after the process exits.
pub exit_cb: ExitCB<'static>,
/// Path to program to execute.
pub file: &'a str,
/// Command line arguments. args[0] should be the path to the program. On Windows this uses
/// CreateProcess which concatenates the arguments into a string this can cause some strange
/// errors. See the note at windows_verbatim_arguments.
pub args: &'a [&'a str],
/// This will be set as the environ variable in the subprocess. If this is None then the
/// parents environ will be used.
pub env: Option<&'a [&'a str]>,
/// If Some() this represents a directory the subprocess should execute in. Stands for current
/// working directory.
pub cwd: Option<&'a str>,
/// Various flags that control how spawn() behaves. See the definition of `ProcessFlags`.
pub flags: ProcessFlags,
/// The `stdio` field points to an array of StdioContainer structs that describe the file
/// descriptors that will be made available to the child process. The convention is that
/// stdio[0] points to stdin, fd 1 is used for stdout, and fd 2 is stderr.
///
/// Note that on windows file descriptors greater than 2 are available to the child process
/// only if the child processes uses the MSVCRT runtime.
pub stdio: &'a [StdioContainer],
/// Libuv can change the child process' user/group id. This happens only when the appropriate
/// bits are set in the flags fields. This is not supported on windows; spawn() will fail and
/// set the error to ENOTSUP.
pub uid: crate::Uid,
/// Libuv can change the child process' user/group id. This happens only when the appropriate
/// bits are set in the flags fields. This is not supported on windows; spawn() will fail and
/// set the error to ENOTSUP.
pub gid: crate::Gid,
}
impl<'a> ProcessOptions<'a> {
/// Constructs a new ProcessOptions object. The args slice must have at least one member: the
/// path to the program to execute. Any additional members of the slice will be passed as
/// command line arguments.
pub fn new(args: &'a [&'a str]) -> ProcessOptions {
assert!(
args.len() > 0,
"ProcessOptions args slice must contain at least one str"
);
ProcessOptions {
exit_cb: ().into(),
file: args[0],
args: args,
env: None,
cwd: None,
flags: ProcessFlags::empty(),
stdio: &[],
uid: 0,
gid: 0,
}
}
}
/// Process handles will spawn a new process and allow the user to control it and establish
/// communication channels with it using streams.
#[derive(Clone, Copy)]
pub struct ProcessHandle {
handle: *mut uv_process_t,
}
impl ProcessHandle {
/// Create a new process handle
pub fn new() -> crate::Result<ProcessHandle> {
let layout = std::alloc::Layout::new::<uv_process_t>();
let handle = unsafe { std::alloc::alloc(layout) as *mut uv_process_t };
if handle.is_null() {
return Err(crate::Error::ENOMEM);
}
crate::Handle::initialize_data(uv_handle!(handle), super::ProcessData(Default::default()));
Ok(ProcessHandle { handle })
}
/// Disables inheritance for file descriptors / handles that this process inherited from its
/// parent. The effect is that child processes spawned by this process don’t accidentally
/// inherit these handles.
///
/// It is recommended to call this function as early in your program as possible, before the
/// inherited file descriptors can be closed or duplicated.
///
/// Note: This function works on a best-effort basis: there is no guarantee that libuv can
/// discover all file descriptors that were inherited. In general it does a better job on
/// Windows than it does on Unix.
pub fn disable_stdio_inheritance() {
unsafe { uv_disable_stdio_inheritance() };
}
/// Initializes the process handle and starts the process.
///
/// Possible reasons for failing to spawn would include (but not be limited to) the file to
/// execute not existing, not having permissions to use the setuid or setgid specified, or not
/// having enough memory to allocate for the new process.
pub fn spawn(
&mut self,
r#loop: &crate::Loop,
options: ProcessOptions,
) -> Result<(), Box<dyn std::error::Error>> {
let exit_cb_uv = use_c_callback!(uv_exit_cb, options.exit_cb);
let dataptr = crate::Handle::get_data(uv_handle!(self.handle));
if !dataptr.is_null() {
if let super::ProcessData(d) = unsafe { &mut (*dataptr).addl } {
| }
// CString will ensure we have a terminating null
let file = CString::new(options.file)?;
// For args, libuv-sys is expecting a "*mut *mut c_char". The only way to get a "*mut
// c_char" from a CString is via CString::into_raw() which will "leak" the memory from
// rust. We'll need to make sure to reclaim that memory later so it'll be GC'd. So, first
// we need to convert all of the arguments to CStrings for the null-termination. Then we
// need to grab a *mut pointer to the data using CString::into_raw() which will "leak" the
// CStrings out of rust. Then we need to add a final null pointer to the end (the C code
// requires it so it can find the end of the array) and collect it all into a Vec.
let mut args = options
.args
.iter()
.map(|a| CString::new(*a).map(|s| s.into_raw()))
.chain(std::iter::once(Ok(std::ptr::null_mut())))
.collect::<Result<Vec<*mut std::os::raw::c_char>, std::ffi::NulError>>()?;
// env is similar to args except that it is Option'al.
let mut env = options
.env
.map(|env| {
env.iter()
.map(|e| CString::new(*e).map(|s| s.into_raw()))
.chain(std::iter::once(Ok(std::ptr::null_mut())))
.collect::<Result<Vec<*mut std::os::raw::c_char>, std::ffi::NulError>>()
})
.transpose()?;
// cwd is like file except it's Option'al
let cwd = options.cwd.map(|cwd| CString::new(cwd)).transpose()?;
// stdio is an array of uv_stdio_container_t objects
let mut stdio = options
.stdio
.iter()
.map(|stdio| uv_stdio_container_t {
flags: stdio.flags.bits() as _,
data: stdio.data.inner(),
})
.collect::<Vec<uv_stdio_container_t>>();
let options = uv_process_options_t {
exit_cb: exit_cb_uv,
file: file.as_ptr(),
args: args.as_mut_ptr(),
env: env
.as_mut()
.map_or(std::ptr::null_mut(), |e| e.as_mut_ptr()),
cwd: cwd.map_or(std::ptr::null(), |s| s.as_ptr()),
flags: options.flags.bits(),
stdio_count: options.stdio.len() as _,
stdio: stdio.as_mut_ptr(),
uid: options.uid,
gid: options.gid,
};
let result = crate::uvret(unsafe {
uv_spawn(r#loop.into_inner(), self.handle, &options as *const _)
})
.map_err(|e| Box::new(e) as _);
// reclaim data so it'll be freed - I'm pretty sure it's safe to free options here. Under
// the hood, libuv is calling fork and execvp. The fork should copy the address space into
// the new process, so freeing it here shouldn't affect that. Then execvp is going to
// replace the address space, so we don't need to worry about leaking the copy.
// For args, we don't need the last element because it's a null pointer.
let args: Vec<CString> = args
.iter()
.take(args.len() - 1)
.map(|a| unsafe { CString::from_raw(*a) })
.collect();
std::mem::drop(args);
// env is the same as args except it's Option'al
let env: Option<Vec<CString>> = env.map(|env| {
env.iter()
.take(env.len() - 1)
.map(|e| unsafe { CString::from_raw(*e) })
.collect()
});
std::mem::drop(env);
result
}
/// The PID of the spawned process. It’s set after calling spawn().
pub fn pid(&self) -> i32 {
unsafe { uv_process_get_pid(self.handle) as _ }
}
/// Sends the specified signal to the given process handle. Check the documentation on
/// SignalHandle for signal support, specially on Windows.
pub fn kill(&mut self, signum: i32) -> crate::Result<()> {
crate::uvret(unsafe { uv_process_kill(self.handle, signum) })
}
/// Sends the specified signal to the given PID. Check the documentation on SignalHandle for
/// signal support, specially on Windows.
pub fn kill_pid(pid: i32, signum: i32) -> crate::Result<()> {
crate::uvret(unsafe { uv_kill(pid, signum) })
}
}
impl FromInner<*mut uv_process_t> for ProcessHandle {
fn from_inner(handle: *mut uv_process_t) -> ProcessHandle {
ProcessHandle { handle }
}
}
impl Inner<*mut uv::uv_handle_t> for ProcessHandle {
fn inner(&self) -> *mut uv::uv_handle_t {
uv_handle!(self.handle)
}
}
impl From<ProcessHandle> for crate::Handle {
fn from(process: ProcessHandle) -> crate::Handle {
crate::Handle::from_inner(Inner::<*mut uv::uv_handle_t>::inner(&process))
}
}
impl crate::ToHandle for ProcessHandle {
fn to_handle(&self) -> crate::Handle {
crate::Handle::from_inner(Inner::<*mut uv::uv_handle_t>::inner(self))
}
}
impl TryFrom<crate::Handle> for ProcessHandle {
type Error = crate::ConversionError;
fn try_from(handle: crate::Handle) -> Result<Self, Self::Error> {
let t = handle.get_type();
if t != crate::HandleType::PROCESS {
Err(crate::ConversionError::new(t, crate::HandleType::PROCESS))
} else {
Ok((handle.inner() as *mut uv_process_t).into_inner())
}
}
}
impl HandleTrait for ProcessHandle {}
impl crate::Loop {
/// Create a new process handle and spawn the process
pub fn spawn_process(
&self,
options: ProcessOptions,
) -> Result<ProcessHandle, Box<dyn std::error::Error>> {
let mut process = ProcessHandle::new()?;
process.spawn(self, options)?;
Ok(process)
}
}
| d.exit_cb = options.exit_cb;
}
| conditional_block |
_mkl.py | from ._base import Matrix, MatrixError, BackendNotAvailable
from .. import numeric, _util as util, warnings
from contextlib import contextmanager
from ctypes import c_int, byref, CDLL
import treelog as log
import os
import numpy
libmkl_path = os.environ.get('NUTILS_MATRIX_MKL_LIB', None)
if libmkl_path:
libmkl = CDLL(libmkl_path)
else:
for v in '.2', '.1', '':
libmkl = util.loadlib(linux=f'libmkl_rt.so{v}', darwin=f'libmkl_rt{v}.dylib', win32=f'mkl_rt{v}.dll')
if libmkl:
break
else:
raise BackendNotAvailable('the Intel MKL matrix backend requires libmkl to be installed (try: pip install mkl)')
def assemble(data, index, shape):
# In the increments below the output dtype is set to int32 not only to avoid
# an additional allocation, but crucially also to avoid truncation in case
# the incremented index overflows the original type.
return MKLMatrix(data, ncols=shape[1],
rowptr=numpy.add(index[0].searchsorted(numpy.arange(shape[0]+1)), 1, dtype=numpy.int32),
colidx=numpy.add(index[1], 1, dtype=numpy.int32))
class Pardiso:
'''Wrapper for libmkl.pardiso.
https://www.intel.com/content/www/us/en/develop/documentation/onemkl-developer-reference-c/top/
sparse-solver-routines/onemkl-pardiso-parallel-direct-sparse-solver-iface.html
'''
_errorcodes = {
-1: 'input inconsistent',
-2: 'not enough memory',
-3: 'reordering problem',
-4: 'zero pivot, numerical factorization or iterative refinement problem',
-5: 'unclassified (internal) error',
-6: 'reordering failed (matrix types 11 and 13 only)',
-7: 'diagonal matrix is singular',
-8: '32-bit integer overflow problem',
-9: 'not enough memory for OOC',
-10: 'error opening OOC files',
-11: 'read/write error with OOC files',
-12: 'pardiso_64 called from 32-bit library',
}
def __init__(self, mtype, a, ia, ja, verbose=False, iparm={}): | self.mtype = c_int(mtype)
self.n = c_int(len(ia)-1)
self.a = a.ctypes
self.ia = ia.ctypes
self.ja = ja.ctypes
self.perm = None
self.iparm = numpy.zeros(64, dtype=numpy.int32) # https://software.intel.com/en-us/mkl-developer-reference-c-pardiso-iparm-parameter
self.msglvl = c_int(verbose)
libmkl.pardisoinit(self.pt.ctypes, byref(self.mtype), self.iparm.ctypes) # initialize iparm based on mtype
if self.iparm[0] != 1:
raise MatrixError('pardiso init failed')
for n, v in iparm.items():
self.iparm[n] = v
self.iparm[10] = 1 # enable scaling (default for nonsymmetric matrices, recommended for highly indefinite symmetric matrices)
self.iparm[12] = 1 # enable matching (default for nonsymmetric matrices, recommended for highly indefinite symmetric matrices)
self.iparm[27] = 0 # double precision data
self.iparm[34] = 0 # one-based indexing
self.iparm[36] = 0 # csr matrix format
self._phase(12) # analysis, numerical factorization
log.debug('peak memory use {:,d}k'.format(max(self.iparm[14], self.iparm[15]+self.iparm[16])))
def __call__(self, rhs):
rhsflat = numpy.ascontiguousarray(rhs.reshape(rhs.shape[0], -1).T, dtype=self.dtype)
lhsflat = numpy.empty_like(rhsflat)
self._phase(33, rhsflat.shape[0], rhsflat.ctypes, lhsflat.ctypes) # solve, iterative refinement
return lhsflat.T.reshape(rhs.shape)
def _phase(self, phase, nrhs=0, b=None, x=None):
error = c_int(1)
libmkl.pardiso(self.pt.ctypes, byref(self.maxfct), byref(self.mnum), byref(self.mtype),
byref(c_int(phase)), byref(self.n), self.a, self.ia, self.ja, self.perm,
byref(c_int(nrhs)), self.iparm.ctypes, byref(self.msglvl), b, x, byref(error))
if error.value:
raise MatrixError(self._errorcodes.get(error.value, 'unknown error {}'.format(error.value)))
def __del__(self):
self._phase(-1) # release all internal memory for all matrices
if self.pt.any():
warnings.warn('Pardiso failed to release its internal memory')
class MKLMatrix(Matrix):
'''matrix implementation based on sorted coo data'''
def __init__(self, data, rowptr, colidx, ncols):
assert len(data) == len(colidx) == rowptr[-1]-1
self.data = numpy.ascontiguousarray(data, dtype=numpy.complex128 if data.dtype.kind == 'c' else numpy.float64)
self.rowptr = numpy.ascontiguousarray(rowptr, dtype=numpy.int32)
self.colidx = numpy.ascontiguousarray(colidx, dtype=numpy.int32)
super().__init__((len(rowptr)-1, ncols), self.data.dtype)
def mkl_(self, name, *args):
attr = 'mkl_' + dict(f='d', c='z')[self.dtype.kind] + name
return getattr(libmkl, attr)(*args)
def convert(self, mat):
if not isinstance(mat, Matrix):
raise TypeError('cannot convert {} to Matrix'.format(type(mat).__name__))
if self.shape != mat.shape:
raise MatrixError('non-matching shapes')
if isinstance(mat, MKLMatrix) and mat.dtype == self.dtype:
return mat
data, colidx, rowptr = mat.export('csr')
return MKLMatrix(data.astype(self.dtype, copy=False), rowptr+1, colidx+1, self.shape[1])
def __add__(self, other):
other = self.convert(other)
assert self.shape == other.shape and self.dtype == other.dtype
request = c_int(1)
info = c_int()
rowptr = numpy.empty(self.shape[0]+1, dtype=numpy.int32)
one = numpy.array(1, dtype=self.dtype)
args = ["N", byref(request), byref(c_int(0)),
byref(c_int(self.shape[0])), byref(c_int(self.shape[1])),
self.data.ctypes, self.colidx.ctypes, self.rowptr.ctypes, one.ctypes,
other.data.ctypes, other.colidx.ctypes, other.rowptr.ctypes,
None, None, rowptr.ctypes, None, byref(info)]
self.mkl_('csradd', *args)
assert info.value == 0
colidx = numpy.empty(rowptr[-1]-1, dtype=numpy.int32)
data = numpy.empty(rowptr[-1]-1, dtype=self.dtype)
request.value = 2
args[12:14] = data.ctypes, colidx.ctypes
self.mkl_('csradd', *args)
assert info.value == 0
return MKLMatrix(data, rowptr, colidx, self.shape[1])
def __mul__(self, other):
if not numeric.isnumber(other):
raise TypeError
return MKLMatrix(self.data * other, self.rowptr, self.colidx, self.shape[1])
def __matmul__(self, other):
if not isinstance(other, numpy.ndarray):
raise TypeError
if other.shape[0] != self.shape[1]:
raise MatrixError
x = numpy.ascontiguousarray(other.T, dtype=self.dtype)
y = numpy.empty(x.shape[:-1] + self.shape[:1], dtype=self.dtype)
if other.ndim == 1:
self.mkl_('csrgemv', 'N', byref(c_int(self.shape[0])),
self.data.ctypes, self.rowptr.ctypes, self.colidx.ctypes, x.ctypes, y.ctypes)
else:
zero = numpy.array(0, dtype=self.dtype)
one = numpy.array(1, dtype=self.dtype)
self.mkl_('csrmm', 'N', byref(c_int(self.shape[0])),
byref(c_int(other.size//other.shape[0])),
byref(c_int(self.shape[1])), one.ctypes, 'GXXFXX',
self.data.ctypes, self.colidx.ctypes, self.rowptr.ctypes, self.rowptr[1:].ctypes,
x.ctypes, byref(c_int(other.shape[0])), zero.ctypes,
y.ctypes, byref(c_int(other.shape[0])))
return y.T
def __neg__(self):
return MKLMatrix(-self.data, self.rowptr, self.colidx, self.shape[1])
@property
def T(self):
if self.shape[0] != self.shape[1]:
raise NotImplementedError('MKLMatrix does not yet support transpose of non-square matrices')
job = numpy.array([0, 1, 1, 0, 0, 1], numpy.int32)
data = numpy.empty_like(self.data)
rowptr = numpy.empty_like(self.rowptr)
colidx = numpy.empty_like(self.colidx)
info = c_int()
self.mkl_('csrcsc', job.ctypes,
byref(c_int(self.shape[0])), self.data.ctypes,
self.colidx.ctypes, self.rowptr.ctypes, data.ctypes, colidx.ctypes,
rowptr.ctypes, byref(info))
return MKLMatrix(data, rowptr, colidx, self.shape[1])
def _submatrix(self, rows, cols):
keep = rows.repeat(numpy.diff(self.rowptr))
keep &= cols[self.colidx-1]
if keep.all(): # all nonzero entries are kept
rowptr = self.rowptr[numpy.hstack([True, rows])]
keep = slice(None) # avoid array copies
else:
rowptr = numpy.cumsum([1] + [keep[i:j].sum() for i, j in numeric.overlapping(self.rowptr-1)[rows]], dtype=numpy.int32)
data = self.data[keep]
assert rowptr[-1] == len(data)+1
colidx = (self.colidx if cols.all() else cols.cumsum(dtype=numpy.int32)[self.colidx-1])[keep]
return MKLMatrix(data, rowptr, colidx, cols.sum())
def export(self, form):
if form == 'dense':
dense = numpy.zeros(self.shape, self.dtype)
for row, i, j in zip(dense, self.rowptr[:-1]-1, self.rowptr[1:]-1):
row[self.colidx[i:j]-1] = self.data[i:j]
return dense
if form == 'csr':
return self.data, self.colidx-1, self.rowptr-1
if form == 'coo':
return self.data, (numpy.arange(self.shape[0]).repeat(self.rowptr[1:]-self.rowptr[:-1]), self.colidx-1)
raise NotImplementedError('cannot export MKLMatrix to {!r}'.format(form))
def _solver_fgmres(self, rhs, atol, maxiter=0, restart=150, precon=None, ztol=1e-12, preconargs={}, **args):
if self.dtype.kind == 'c':
raise MatrixError("MKL's fgmres does not support complex data")
rci = c_int(0)
n = c_int(len(rhs))
b = numpy.array(rhs, dtype=numpy.float64, copy=False)
x = numpy.zeros_like(b)
N = min(restart, len(rhs))
ipar = numpy.empty(128, dtype=numpy.int32)
dpar = numpy.empty(128, dtype=numpy.float64)
tmp = numpy.empty((2*N+1)*len(rhs)+(N*(N+9))//2+1, dtype=numpy.float64)
dfgmres_args = byref(n), x.ctypes, b.ctypes, byref(rci), ipar.ctypes, dpar.ctypes, tmp.ctypes
itercount = c_int(0)
libmkl.dfgmres_init(*dfgmres_args)
ipar[7] = 0 # do not perform the stopping test for the maximum number of iterations
ipar[8] = 0 # do not perform the residual stopping test
ipar[9] = 1 # perform the user-defined stopping test by setting RCI_request=2
if precon is not None:
ipar[10] = 1 # run the preconditioned version of the FGMRES method
precon = self.getprecon(precon, **args, **preconargs)
ipar[11] = 0 # do not perform the automatic test for zero norm of the currently generated vector
ipar[12] = 0 # update the solution to the vector x according to the computations done by the dfgmres routine
ipar[14] = N # the number of non-restarted FGMRES iterations
libmkl.dfgmres_check(*dfgmres_args)
if rci.value in (-1001, -1010, -1011):
warnings.warn('dgmres ' + ' and '.join(['wrote some warnings to stdout', 'changed some parameters to make them consistent or correct'][1 if rci.value == -1010 else 0:1 if rci.value == -1001 else 2]))
elif rci.value != 0:
raise MatrixError('dgmres check failed with error code {}'.format(rci.value))
with log.context('fgmres {:.0f}%', 0, 0) as format:
while True:
libmkl.dfgmres(*dfgmres_args)
if rci.value == 1: # multiply the matrix
tmp[ipar[22]-1:ipar[22]+n.value-1] = self @ tmp[ipar[21]-1:ipar[21]+n.value-1]
elif rci.value == 2: # perform the stopping test
if dpar[4] < atol:
libmkl.dfgmres_get(*dfgmres_args, byref(itercount))
if numpy.linalg.norm(self @ x - b) < atol:
break
format(100 * numpy.log(dpar[2]/dpar[4]) / numpy.log(dpar[2]/atol))
if ipar[3] > maxiter > 0:
break
elif rci.value == 3: # apply the preconditioner
tmp[ipar[22]-1:ipar[22]+n.value-1] = precon(tmp[ipar[21]-1:ipar[21]+n.value-1])
elif rci.value == 4: # check if the norm of the current orthogonal vector is zero
if dpar[6] < ztol:
libmkl.dfgmres_get(*dfgmres_args, byref(itercount))
if numpy.linalg.norm(self @ x - b) < atol:
break
raise MatrixError('singular matrix')
else:
raise MatrixError('this should not have occurred: rci={}'.format(rci.value))
log.debug('performed {} fgmres iterations, {} restarts'.format(ipar[3], ipar[3]//ipar[14]))
return x
def _precon_direct(self, **args):
return Pardiso(mtype=dict(f=11, c=13)[self.dtype.kind], a=self.data, ia=self.rowptr, ja=self.colidx, **args)
def _precon_sym_direct(self, **args):
upper = numpy.zeros(len(self.data), dtype=bool)
rowptr = numpy.empty_like(self.rowptr)
rowptr[0] = 1
diagdom = True
for irow, (n, m) in enumerate(numeric.overlapping(self.rowptr-1), start=1):
d = n + self.colidx[n:m].searchsorted(irow)
upper[d:m] = True
rowptr[irow] = rowptr[irow-1] + (m-d)
diagdom = diagdom and d < m and self.colidx[d] == irow and abs(self.data[n:m]).sum() < 2 * abs(self.data[d])
if diagdom:
log.debug('matrix is diagonally dominant, solving as SPD')
mtype = dict(f=2, c=4)
else:
mtype = dict(f=-2, c=6)
return Pardiso(mtype=mtype[self.dtype.kind], a=self.data[upper], ia=rowptr, ja=self.colidx[upper], **args)
# vim:sw=4:sts=4:et | self.dtype = a.dtype
self.pt = numpy.zeros(64, numpy.int64) # handle to data structure
self.maxfct = c_int(1)
self.mnum = c_int(1) | random_line_split |
_mkl.py | from ._base import Matrix, MatrixError, BackendNotAvailable
from .. import numeric, _util as util, warnings
from contextlib import contextmanager
from ctypes import c_int, byref, CDLL
import treelog as log
import os
import numpy
libmkl_path = os.environ.get('NUTILS_MATRIX_MKL_LIB', None)
if libmkl_path:
libmkl = CDLL(libmkl_path)
else:
for v in '.2', '.1', '':
libmkl = util.loadlib(linux=f'libmkl_rt.so{v}', darwin=f'libmkl_rt{v}.dylib', win32=f'mkl_rt{v}.dll')
if libmkl:
break
else:
raise BackendNotAvailable('the Intel MKL matrix backend requires libmkl to be installed (try: pip install mkl)')
def assemble(data, index, shape):
# In the increments below the output dtype is set to int32 not only to avoid
# an additional allocation, but crucially also to avoid truncation in case
# the incremented index overflows the original type.
return MKLMatrix(data, ncols=shape[1],
rowptr=numpy.add(index[0].searchsorted(numpy.arange(shape[0]+1)), 1, dtype=numpy.int32),
colidx=numpy.add(index[1], 1, dtype=numpy.int32))
class Pardiso:
'''Wrapper for libmkl.pardiso.
https://www.intel.com/content/www/us/en/develop/documentation/onemkl-developer-reference-c/top/
sparse-solver-routines/onemkl-pardiso-parallel-direct-sparse-solver-iface.html
'''
_errorcodes = {
-1: 'input inconsistent',
-2: 'not enough memory',
-3: 'reordering problem',
-4: 'zero pivot, numerical factorization or iterative refinement problem',
-5: 'unclassified (internal) error',
-6: 'reordering failed (matrix types 11 and 13 only)',
-7: 'diagonal matrix is singular',
-8: '32-bit integer overflow problem',
-9: 'not enough memory for OOC',
-10: 'error opening OOC files',
-11: 'read/write error with OOC files',
-12: 'pardiso_64 called from 32-bit library',
}
def __init__(self, mtype, a, ia, ja, verbose=False, iparm={}):
self.dtype = a.dtype
self.pt = numpy.zeros(64, numpy.int64) # handle to data structure
self.maxfct = c_int(1)
self.mnum = c_int(1)
self.mtype = c_int(mtype)
self.n = c_int(len(ia)-1)
self.a = a.ctypes
self.ia = ia.ctypes
self.ja = ja.ctypes
self.perm = None
self.iparm = numpy.zeros(64, dtype=numpy.int32) # https://software.intel.com/en-us/mkl-developer-reference-c-pardiso-iparm-parameter
self.msglvl = c_int(verbose)
libmkl.pardisoinit(self.pt.ctypes, byref(self.mtype), self.iparm.ctypes) # initialize iparm based on mtype
if self.iparm[0] != 1:
raise MatrixError('pardiso init failed')
for n, v in iparm.items():
self.iparm[n] = v
self.iparm[10] = 1 # enable scaling (default for nonsymmetric matrices, recommended for highly indefinite symmetric matrices)
self.iparm[12] = 1 # enable matching (default for nonsymmetric matrices, recommended for highly indefinite symmetric matrices)
self.iparm[27] = 0 # double precision data
self.iparm[34] = 0 # one-based indexing
self.iparm[36] = 0 # csr matrix format
self._phase(12) # analysis, numerical factorization
log.debug('peak memory use {:,d}k'.format(max(self.iparm[14], self.iparm[15]+self.iparm[16])))
def __call__(self, rhs):
rhsflat = numpy.ascontiguousarray(rhs.reshape(rhs.shape[0], -1).T, dtype=self.dtype)
lhsflat = numpy.empty_like(rhsflat)
self._phase(33, rhsflat.shape[0], rhsflat.ctypes, lhsflat.ctypes) # solve, iterative refinement
return lhsflat.T.reshape(rhs.shape)
def _phase(self, phase, nrhs=0, b=None, x=None):
error = c_int(1)
libmkl.pardiso(self.pt.ctypes, byref(self.maxfct), byref(self.mnum), byref(self.mtype),
byref(c_int(phase)), byref(self.n), self.a, self.ia, self.ja, self.perm,
byref(c_int(nrhs)), self.iparm.ctypes, byref(self.msglvl), b, x, byref(error))
if error.value:
raise MatrixError(self._errorcodes.get(error.value, 'unknown error {}'.format(error.value)))
def __del__(self):
self._phase(-1) # release all internal memory for all matrices
if self.pt.any():
warnings.warn('Pardiso failed to release its internal memory')
class MKLMatrix(Matrix):
'''matrix implementation based on sorted coo data'''
def __init__(self, data, rowptr, colidx, ncols):
assert len(data) == len(colidx) == rowptr[-1]-1
self.data = numpy.ascontiguousarray(data, dtype=numpy.complex128 if data.dtype.kind == 'c' else numpy.float64)
self.rowptr = numpy.ascontiguousarray(rowptr, dtype=numpy.int32)
self.colidx = numpy.ascontiguousarray(colidx, dtype=numpy.int32)
super().__init__((len(rowptr)-1, ncols), self.data.dtype)
def mkl_(self, name, *args):
attr = 'mkl_' + dict(f='d', c='z')[self.dtype.kind] + name
return getattr(libmkl, attr)(*args)
def convert(self, mat):
if not isinstance(mat, Matrix):
raise TypeError('cannot convert {} to Matrix'.format(type(mat).__name__))
if self.shape != mat.shape:
raise MatrixError('non-matching shapes')
if isinstance(mat, MKLMatrix) and mat.dtype == self.dtype:
return mat
data, colidx, rowptr = mat.export('csr')
return MKLMatrix(data.astype(self.dtype, copy=False), rowptr+1, colidx+1, self.shape[1])
def __add__(self, other):
other = self.convert(other)
assert self.shape == other.shape and self.dtype == other.dtype
request = c_int(1)
info = c_int()
rowptr = numpy.empty(self.shape[0]+1, dtype=numpy.int32)
one = numpy.array(1, dtype=self.dtype)
args = ["N", byref(request), byref(c_int(0)),
byref(c_int(self.shape[0])), byref(c_int(self.shape[1])),
self.data.ctypes, self.colidx.ctypes, self.rowptr.ctypes, one.ctypes,
other.data.ctypes, other.colidx.ctypes, other.rowptr.ctypes,
None, None, rowptr.ctypes, None, byref(info)]
self.mkl_('csradd', *args)
assert info.value == 0
colidx = numpy.empty(rowptr[-1]-1, dtype=numpy.int32)
data = numpy.empty(rowptr[-1]-1, dtype=self.dtype)
request.value = 2
args[12:14] = data.ctypes, colidx.ctypes
self.mkl_('csradd', *args)
assert info.value == 0
return MKLMatrix(data, rowptr, colidx, self.shape[1])
def __mul__(self, other):
if not numeric.isnumber(other):
raise TypeError
return MKLMatrix(self.data * other, self.rowptr, self.colidx, self.shape[1])
def __matmul__(self, other):
if not isinstance(other, numpy.ndarray):
raise TypeError
if other.shape[0] != self.shape[1]:
raise MatrixError
x = numpy.ascontiguousarray(other.T, dtype=self.dtype)
y = numpy.empty(x.shape[:-1] + self.shape[:1], dtype=self.dtype)
if other.ndim == 1:
self.mkl_('csrgemv', 'N', byref(c_int(self.shape[0])),
self.data.ctypes, self.rowptr.ctypes, self.colidx.ctypes, x.ctypes, y.ctypes)
else:
zero = numpy.array(0, dtype=self.dtype)
one = numpy.array(1, dtype=self.dtype)
self.mkl_('csrmm', 'N', byref(c_int(self.shape[0])),
byref(c_int(other.size//other.shape[0])),
byref(c_int(self.shape[1])), one.ctypes, 'GXXFXX',
self.data.ctypes, self.colidx.ctypes, self.rowptr.ctypes, self.rowptr[1:].ctypes,
x.ctypes, byref(c_int(other.shape[0])), zero.ctypes,
y.ctypes, byref(c_int(other.shape[0])))
return y.T
def __neg__(self):
return MKLMatrix(-self.data, self.rowptr, self.colidx, self.shape[1])
@property
def T(self):
if self.shape[0] != self.shape[1]:
raise NotImplementedError('MKLMatrix does not yet support transpose of non-square matrices')
job = numpy.array([0, 1, 1, 0, 0, 1], numpy.int32)
data = numpy.empty_like(self.data)
rowptr = numpy.empty_like(self.rowptr)
colidx = numpy.empty_like(self.colidx)
info = c_int()
self.mkl_('csrcsc', job.ctypes,
byref(c_int(self.shape[0])), self.data.ctypes,
self.colidx.ctypes, self.rowptr.ctypes, data.ctypes, colidx.ctypes,
rowptr.ctypes, byref(info))
return MKLMatrix(data, rowptr, colidx, self.shape[1])
def _submatrix(self, rows, cols):
keep = rows.repeat(numpy.diff(self.rowptr))
keep &= cols[self.colidx-1]
if keep.all(): # all nonzero entries are kept
rowptr = self.rowptr[numpy.hstack([True, rows])]
keep = slice(None) # avoid array copies
else:
rowptr = numpy.cumsum([1] + [keep[i:j].sum() for i, j in numeric.overlapping(self.rowptr-1)[rows]], dtype=numpy.int32)
data = self.data[keep]
assert rowptr[-1] == len(data)+1
colidx = (self.colidx if cols.all() else cols.cumsum(dtype=numpy.int32)[self.colidx-1])[keep]
return MKLMatrix(data, rowptr, colidx, cols.sum())
def export(self, form):
if form == 'dense':
dense = numpy.zeros(self.shape, self.dtype)
for row, i, j in zip(dense, self.rowptr[:-1]-1, self.rowptr[1:]-1):
row[self.colidx[i:j]-1] = self.data[i:j]
return dense
if form == 'csr':
return self.data, self.colidx-1, self.rowptr-1
if form == 'coo':
return self.data, (numpy.arange(self.shape[0]).repeat(self.rowptr[1:]-self.rowptr[:-1]), self.colidx-1)
raise NotImplementedError('cannot export MKLMatrix to {!r}'.format(form))
def _solver_fgmres(self, rhs, atol, maxiter=0, restart=150, precon=None, ztol=1e-12, preconargs={}, **args):
if self.dtype.kind == 'c':
raise MatrixError("MKL's fgmres does not support complex data")
rci = c_int(0)
n = c_int(len(rhs))
b = numpy.array(rhs, dtype=numpy.float64, copy=False)
x = numpy.zeros_like(b)
N = min(restart, len(rhs))
ipar = numpy.empty(128, dtype=numpy.int32)
dpar = numpy.empty(128, dtype=numpy.float64)
tmp = numpy.empty((2*N+1)*len(rhs)+(N*(N+9))//2+1, dtype=numpy.float64)
dfgmres_args = byref(n), x.ctypes, b.ctypes, byref(rci), ipar.ctypes, dpar.ctypes, tmp.ctypes
itercount = c_int(0)
libmkl.dfgmres_init(*dfgmres_args)
ipar[7] = 0 # do not perform the stopping test for the maximum number of iterations
ipar[8] = 0 # do not perform the residual stopping test
ipar[9] = 1 # perform the user-defined stopping test by setting RCI_request=2
if precon is not None:
ipar[10] = 1 # run the preconditioned version of the FGMRES method
precon = self.getprecon(precon, **args, **preconargs)
ipar[11] = 0 # do not perform the automatic test for zero norm of the currently generated vector
ipar[12] = 0 # update the solution to the vector x according to the computations done by the dfgmres routine
ipar[14] = N # the number of non-restarted FGMRES iterations
libmkl.dfgmres_check(*dfgmres_args)
if rci.value in (-1001, -1010, -1011):
warnings.warn('dgmres ' + ' and '.join(['wrote some warnings to stdout', 'changed some parameters to make them consistent or correct'][1 if rci.value == -1010 else 0:1 if rci.value == -1001 else 2]))
elif rci.value != 0:
raise MatrixError('dgmres check failed with error code {}'.format(rci.value))
with log.context('fgmres {:.0f}%', 0, 0) as format:
while True:
libmkl.dfgmres(*dfgmres_args)
if rci.value == 1: # multiply the matrix
tmp[ipar[22]-1:ipar[22]+n.value-1] = self @ tmp[ipar[21]-1:ipar[21]+n.value-1]
elif rci.value == 2: # perform the stopping test
if dpar[4] < atol:
libmkl.dfgmres_get(*dfgmres_args, byref(itercount))
if numpy.linalg.norm(self @ x - b) < atol:
break
format(100 * numpy.log(dpar[2]/dpar[4]) / numpy.log(dpar[2]/atol))
if ipar[3] > maxiter > 0:
break
elif rci.value == 3: # apply the preconditioner
tmp[ipar[22]-1:ipar[22]+n.value-1] = precon(tmp[ipar[21]-1:ipar[21]+n.value-1])
elif rci.value == 4: # check if the norm of the current orthogonal vector is zero
if dpar[6] < ztol:
libmkl.dfgmres_get(*dfgmres_args, byref(itercount))
if numpy.linalg.norm(self @ x - b) < atol:
break
raise MatrixError('singular matrix')
else:
raise MatrixError('this should not have occurred: rci={}'.format(rci.value))
log.debug('performed {} fgmres iterations, {} restarts'.format(ipar[3], ipar[3]//ipar[14]))
return x
def _precon_direct(self, **args):
return Pardiso(mtype=dict(f=11, c=13)[self.dtype.kind], a=self.data, ia=self.rowptr, ja=self.colidx, **args)
def _precon_sym_direct(self, **args):
upper = numpy.zeros(len(self.data), dtype=bool)
rowptr = numpy.empty_like(self.rowptr)
rowptr[0] = 1
diagdom = True
for irow, (n, m) in enumerate(numeric.overlapping(self.rowptr-1), start=1):
d = n + self.colidx[n:m].searchsorted(irow)
upper[d:m] = True
rowptr[irow] = rowptr[irow-1] + (m-d)
diagdom = diagdom and d < m and self.colidx[d] == irow and abs(self.data[n:m]).sum() < 2 * abs(self.data[d])
if diagdom:
log.debug('matrix is diagonally dominant, solving as SPD')
mtype = dict(f=2, c=4)
else:
|
return Pardiso(mtype=mtype[self.dtype.kind], a=self.data[upper], ia=rowptr, ja=self.colidx[upper], **args)
# vim:sw=4:sts=4:et
| mtype = dict(f=-2, c=6) | conditional_block |
_mkl.py | from ._base import Matrix, MatrixError, BackendNotAvailable
from .. import numeric, _util as util, warnings
from contextlib import contextmanager
from ctypes import c_int, byref, CDLL
import treelog as log
import os
import numpy
libmkl_path = os.environ.get('NUTILS_MATRIX_MKL_LIB', None)
if libmkl_path:
libmkl = CDLL(libmkl_path)
else:
for v in '.2', '.1', '':
libmkl = util.loadlib(linux=f'libmkl_rt.so{v}', darwin=f'libmkl_rt{v}.dylib', win32=f'mkl_rt{v}.dll')
if libmkl:
break
else:
raise BackendNotAvailable('the Intel MKL matrix backend requires libmkl to be installed (try: pip install mkl)')
def assemble(data, index, shape):
# In the increments below the output dtype is set to int32 not only to avoid
# an additional allocation, but crucially also to avoid truncation in case
# the incremented index overflows the original type.
return MKLMatrix(data, ncols=shape[1],
rowptr=numpy.add(index[0].searchsorted(numpy.arange(shape[0]+1)), 1, dtype=numpy.int32),
colidx=numpy.add(index[1], 1, dtype=numpy.int32))
class | :
'''Wrapper for libmkl.pardiso.
https://www.intel.com/content/www/us/en/develop/documentation/onemkl-developer-reference-c/top/
sparse-solver-routines/onemkl-pardiso-parallel-direct-sparse-solver-iface.html
'''
_errorcodes = {
-1: 'input inconsistent',
-2: 'not enough memory',
-3: 'reordering problem',
-4: 'zero pivot, numerical factorization or iterative refinement problem',
-5: 'unclassified (internal) error',
-6: 'reordering failed (matrix types 11 and 13 only)',
-7: 'diagonal matrix is singular',
-8: '32-bit integer overflow problem',
-9: 'not enough memory for OOC',
-10: 'error opening OOC files',
-11: 'read/write error with OOC files',
-12: 'pardiso_64 called from 32-bit library',
}
def __init__(self, mtype, a, ia, ja, verbose=False, iparm={}):
self.dtype = a.dtype
self.pt = numpy.zeros(64, numpy.int64) # handle to data structure
self.maxfct = c_int(1)
self.mnum = c_int(1)
self.mtype = c_int(mtype)
self.n = c_int(len(ia)-1)
self.a = a.ctypes
self.ia = ia.ctypes
self.ja = ja.ctypes
self.perm = None
self.iparm = numpy.zeros(64, dtype=numpy.int32) # https://software.intel.com/en-us/mkl-developer-reference-c-pardiso-iparm-parameter
self.msglvl = c_int(verbose)
libmkl.pardisoinit(self.pt.ctypes, byref(self.mtype), self.iparm.ctypes) # initialize iparm based on mtype
if self.iparm[0] != 1:
raise MatrixError('pardiso init failed')
for n, v in iparm.items():
self.iparm[n] = v
self.iparm[10] = 1 # enable scaling (default for nonsymmetric matrices, recommended for highly indefinite symmetric matrices)
self.iparm[12] = 1 # enable matching (default for nonsymmetric matrices, recommended for highly indefinite symmetric matrices)
self.iparm[27] = 0 # double precision data
self.iparm[34] = 0 # one-based indexing
self.iparm[36] = 0 # csr matrix format
self._phase(12) # analysis, numerical factorization
log.debug('peak memory use {:,d}k'.format(max(self.iparm[14], self.iparm[15]+self.iparm[16])))
def __call__(self, rhs):
rhsflat = numpy.ascontiguousarray(rhs.reshape(rhs.shape[0], -1).T, dtype=self.dtype)
lhsflat = numpy.empty_like(rhsflat)
self._phase(33, rhsflat.shape[0], rhsflat.ctypes, lhsflat.ctypes) # solve, iterative refinement
return lhsflat.T.reshape(rhs.shape)
def _phase(self, phase, nrhs=0, b=None, x=None):
error = c_int(1)
libmkl.pardiso(self.pt.ctypes, byref(self.maxfct), byref(self.mnum), byref(self.mtype),
byref(c_int(phase)), byref(self.n), self.a, self.ia, self.ja, self.perm,
byref(c_int(nrhs)), self.iparm.ctypes, byref(self.msglvl), b, x, byref(error))
if error.value:
raise MatrixError(self._errorcodes.get(error.value, 'unknown error {}'.format(error.value)))
def __del__(self):
self._phase(-1) # release all internal memory for all matrices
if self.pt.any():
warnings.warn('Pardiso failed to release its internal memory')
class MKLMatrix(Matrix):
'''matrix implementation based on sorted coo data'''
def __init__(self, data, rowptr, colidx, ncols):
assert len(data) == len(colidx) == rowptr[-1]-1
self.data = numpy.ascontiguousarray(data, dtype=numpy.complex128 if data.dtype.kind == 'c' else numpy.float64)
self.rowptr = numpy.ascontiguousarray(rowptr, dtype=numpy.int32)
self.colidx = numpy.ascontiguousarray(colidx, dtype=numpy.int32)
super().__init__((len(rowptr)-1, ncols), self.data.dtype)
def mkl_(self, name, *args):
attr = 'mkl_' + dict(f='d', c='z')[self.dtype.kind] + name
return getattr(libmkl, attr)(*args)
def convert(self, mat):
if not isinstance(mat, Matrix):
raise TypeError('cannot convert {} to Matrix'.format(type(mat).__name__))
if self.shape != mat.shape:
raise MatrixError('non-matching shapes')
if isinstance(mat, MKLMatrix) and mat.dtype == self.dtype:
return mat
data, colidx, rowptr = mat.export('csr')
return MKLMatrix(data.astype(self.dtype, copy=False), rowptr+1, colidx+1, self.shape[1])
def __add__(self, other):
other = self.convert(other)
assert self.shape == other.shape and self.dtype == other.dtype
request = c_int(1)
info = c_int()
rowptr = numpy.empty(self.shape[0]+1, dtype=numpy.int32)
one = numpy.array(1, dtype=self.dtype)
args = ["N", byref(request), byref(c_int(0)),
byref(c_int(self.shape[0])), byref(c_int(self.shape[1])),
self.data.ctypes, self.colidx.ctypes, self.rowptr.ctypes, one.ctypes,
other.data.ctypes, other.colidx.ctypes, other.rowptr.ctypes,
None, None, rowptr.ctypes, None, byref(info)]
self.mkl_('csradd', *args)
assert info.value == 0
colidx = numpy.empty(rowptr[-1]-1, dtype=numpy.int32)
data = numpy.empty(rowptr[-1]-1, dtype=self.dtype)
request.value = 2
args[12:14] = data.ctypes, colidx.ctypes
self.mkl_('csradd', *args)
assert info.value == 0
return MKLMatrix(data, rowptr, colidx, self.shape[1])
def __mul__(self, other):
if not numeric.isnumber(other):
raise TypeError
return MKLMatrix(self.data * other, self.rowptr, self.colidx, self.shape[1])
def __matmul__(self, other):
if not isinstance(other, numpy.ndarray):
raise TypeError
if other.shape[0] != self.shape[1]:
raise MatrixError
x = numpy.ascontiguousarray(other.T, dtype=self.dtype)
y = numpy.empty(x.shape[:-1] + self.shape[:1], dtype=self.dtype)
if other.ndim == 1:
self.mkl_('csrgemv', 'N', byref(c_int(self.shape[0])),
self.data.ctypes, self.rowptr.ctypes, self.colidx.ctypes, x.ctypes, y.ctypes)
else:
zero = numpy.array(0, dtype=self.dtype)
one = numpy.array(1, dtype=self.dtype)
self.mkl_('csrmm', 'N', byref(c_int(self.shape[0])),
byref(c_int(other.size//other.shape[0])),
byref(c_int(self.shape[1])), one.ctypes, 'GXXFXX',
self.data.ctypes, self.colidx.ctypes, self.rowptr.ctypes, self.rowptr[1:].ctypes,
x.ctypes, byref(c_int(other.shape[0])), zero.ctypes,
y.ctypes, byref(c_int(other.shape[0])))
return y.T
def __neg__(self):
return MKLMatrix(-self.data, self.rowptr, self.colidx, self.shape[1])
@property
def T(self):
if self.shape[0] != self.shape[1]:
raise NotImplementedError('MKLMatrix does not yet support transpose of non-square matrices')
job = numpy.array([0, 1, 1, 0, 0, 1], numpy.int32)
data = numpy.empty_like(self.data)
rowptr = numpy.empty_like(self.rowptr)
colidx = numpy.empty_like(self.colidx)
info = c_int()
self.mkl_('csrcsc', job.ctypes,
byref(c_int(self.shape[0])), self.data.ctypes,
self.colidx.ctypes, self.rowptr.ctypes, data.ctypes, colidx.ctypes,
rowptr.ctypes, byref(info))
return MKLMatrix(data, rowptr, colidx, self.shape[1])
def _submatrix(self, rows, cols):
keep = rows.repeat(numpy.diff(self.rowptr))
keep &= cols[self.colidx-1]
if keep.all(): # all nonzero entries are kept
rowptr = self.rowptr[numpy.hstack([True, rows])]
keep = slice(None) # avoid array copies
else:
rowptr = numpy.cumsum([1] + [keep[i:j].sum() for i, j in numeric.overlapping(self.rowptr-1)[rows]], dtype=numpy.int32)
data = self.data[keep]
assert rowptr[-1] == len(data)+1
colidx = (self.colidx if cols.all() else cols.cumsum(dtype=numpy.int32)[self.colidx-1])[keep]
return MKLMatrix(data, rowptr, colidx, cols.sum())
def export(self, form):
if form == 'dense':
dense = numpy.zeros(self.shape, self.dtype)
for row, i, j in zip(dense, self.rowptr[:-1]-1, self.rowptr[1:]-1):
row[self.colidx[i:j]-1] = self.data[i:j]
return dense
if form == 'csr':
return self.data, self.colidx-1, self.rowptr-1
if form == 'coo':
return self.data, (numpy.arange(self.shape[0]).repeat(self.rowptr[1:]-self.rowptr[:-1]), self.colidx-1)
raise NotImplementedError('cannot export MKLMatrix to {!r}'.format(form))
def _solver_fgmres(self, rhs, atol, maxiter=0, restart=150, precon=None, ztol=1e-12, preconargs={}, **args):
if self.dtype.kind == 'c':
raise MatrixError("MKL's fgmres does not support complex data")
rci = c_int(0)
n = c_int(len(rhs))
b = numpy.array(rhs, dtype=numpy.float64, copy=False)
x = numpy.zeros_like(b)
N = min(restart, len(rhs))
ipar = numpy.empty(128, dtype=numpy.int32)
dpar = numpy.empty(128, dtype=numpy.float64)
tmp = numpy.empty((2*N+1)*len(rhs)+(N*(N+9))//2+1, dtype=numpy.float64)
dfgmres_args = byref(n), x.ctypes, b.ctypes, byref(rci), ipar.ctypes, dpar.ctypes, tmp.ctypes
itercount = c_int(0)
libmkl.dfgmres_init(*dfgmres_args)
ipar[7] = 0 # do not perform the stopping test for the maximum number of iterations
ipar[8] = 0 # do not perform the residual stopping test
ipar[9] = 1 # perform the user-defined stopping test by setting RCI_request=2
if precon is not None:
ipar[10] = 1 # run the preconditioned version of the FGMRES method
precon = self.getprecon(precon, **args, **preconargs)
ipar[11] = 0 # do not perform the automatic test for zero norm of the currently generated vector
ipar[12] = 0 # update the solution to the vector x according to the computations done by the dfgmres routine
ipar[14] = N # the number of non-restarted FGMRES iterations
libmkl.dfgmres_check(*dfgmres_args)
if rci.value in (-1001, -1010, -1011):
warnings.warn('dgmres ' + ' and '.join(['wrote some warnings to stdout', 'changed some parameters to make them consistent or correct'][1 if rci.value == -1010 else 0:1 if rci.value == -1001 else 2]))
elif rci.value != 0:
raise MatrixError('dgmres check failed with error code {}'.format(rci.value))
with log.context('fgmres {:.0f}%', 0, 0) as format:
while True:
libmkl.dfgmres(*dfgmres_args)
if rci.value == 1: # multiply the matrix
tmp[ipar[22]-1:ipar[22]+n.value-1] = self @ tmp[ipar[21]-1:ipar[21]+n.value-1]
elif rci.value == 2: # perform the stopping test
if dpar[4] < atol:
libmkl.dfgmres_get(*dfgmres_args, byref(itercount))
if numpy.linalg.norm(self @ x - b) < atol:
break
format(100 * numpy.log(dpar[2]/dpar[4]) / numpy.log(dpar[2]/atol))
if ipar[3] > maxiter > 0:
break
elif rci.value == 3: # apply the preconditioner
tmp[ipar[22]-1:ipar[22]+n.value-1] = precon(tmp[ipar[21]-1:ipar[21]+n.value-1])
elif rci.value == 4: # check if the norm of the current orthogonal vector is zero
if dpar[6] < ztol:
libmkl.dfgmres_get(*dfgmres_args, byref(itercount))
if numpy.linalg.norm(self @ x - b) < atol:
break
raise MatrixError('singular matrix')
else:
raise MatrixError('this should not have occurred: rci={}'.format(rci.value))
log.debug('performed {} fgmres iterations, {} restarts'.format(ipar[3], ipar[3]//ipar[14]))
return x
def _precon_direct(self, **args):
return Pardiso(mtype=dict(f=11, c=13)[self.dtype.kind], a=self.data, ia=self.rowptr, ja=self.colidx, **args)
def _precon_sym_direct(self, **args):
upper = numpy.zeros(len(self.data), dtype=bool)
rowptr = numpy.empty_like(self.rowptr)
rowptr[0] = 1
diagdom = True
for irow, (n, m) in enumerate(numeric.overlapping(self.rowptr-1), start=1):
d = n + self.colidx[n:m].searchsorted(irow)
upper[d:m] = True
rowptr[irow] = rowptr[irow-1] + (m-d)
diagdom = diagdom and d < m and self.colidx[d] == irow and abs(self.data[n:m]).sum() < 2 * abs(self.data[d])
if diagdom:
log.debug('matrix is diagonally dominant, solving as SPD')
mtype = dict(f=2, c=4)
else:
mtype = dict(f=-2, c=6)
return Pardiso(mtype=mtype[self.dtype.kind], a=self.data[upper], ia=rowptr, ja=self.colidx[upper], **args)
# vim:sw=4:sts=4:et
| Pardiso | identifier_name |
_mkl.py | from ._base import Matrix, MatrixError, BackendNotAvailable
from .. import numeric, _util as util, warnings
from contextlib import contextmanager
from ctypes import c_int, byref, CDLL
import treelog as log
import os
import numpy
libmkl_path = os.environ.get('NUTILS_MATRIX_MKL_LIB', None)
if libmkl_path:
libmkl = CDLL(libmkl_path)
else:
for v in '.2', '.1', '':
libmkl = util.loadlib(linux=f'libmkl_rt.so{v}', darwin=f'libmkl_rt{v}.dylib', win32=f'mkl_rt{v}.dll')
if libmkl:
break
else:
raise BackendNotAvailable('the Intel MKL matrix backend requires libmkl to be installed (try: pip install mkl)')
def assemble(data, index, shape):
# In the increments below the output dtype is set to int32 not only to avoid
# an additional allocation, but crucially also to avoid truncation in case
# the incremented index overflows the original type.
return MKLMatrix(data, ncols=shape[1],
rowptr=numpy.add(index[0].searchsorted(numpy.arange(shape[0]+1)), 1, dtype=numpy.int32),
colidx=numpy.add(index[1], 1, dtype=numpy.int32))
class Pardiso:
'''Wrapper for libmkl.pardiso.
https://www.intel.com/content/www/us/en/develop/documentation/onemkl-developer-reference-c/top/
sparse-solver-routines/onemkl-pardiso-parallel-direct-sparse-solver-iface.html
'''
_errorcodes = {
-1: 'input inconsistent',
-2: 'not enough memory',
-3: 'reordering problem',
-4: 'zero pivot, numerical factorization or iterative refinement problem',
-5: 'unclassified (internal) error',
-6: 'reordering failed (matrix types 11 and 13 only)',
-7: 'diagonal matrix is singular',
-8: '32-bit integer overflow problem',
-9: 'not enough memory for OOC',
-10: 'error opening OOC files',
-11: 'read/write error with OOC files',
-12: 'pardiso_64 called from 32-bit library',
}
def __init__(self, mtype, a, ia, ja, verbose=False, iparm={}):
self.dtype = a.dtype
self.pt = numpy.zeros(64, numpy.int64) # handle to data structure
self.maxfct = c_int(1)
self.mnum = c_int(1)
self.mtype = c_int(mtype)
self.n = c_int(len(ia)-1)
self.a = a.ctypes
self.ia = ia.ctypes
self.ja = ja.ctypes
self.perm = None
self.iparm = numpy.zeros(64, dtype=numpy.int32) # https://software.intel.com/en-us/mkl-developer-reference-c-pardiso-iparm-parameter
self.msglvl = c_int(verbose)
libmkl.pardisoinit(self.pt.ctypes, byref(self.mtype), self.iparm.ctypes) # initialize iparm based on mtype
if self.iparm[0] != 1:
raise MatrixError('pardiso init failed')
for n, v in iparm.items():
self.iparm[n] = v
self.iparm[10] = 1 # enable scaling (default for nonsymmetric matrices, recommended for highly indefinite symmetric matrices)
self.iparm[12] = 1 # enable matching (default for nonsymmetric matrices, recommended for highly indefinite symmetric matrices)
self.iparm[27] = 0 # double precision data
self.iparm[34] = 0 # one-based indexing
self.iparm[36] = 0 # csr matrix format
self._phase(12) # analysis, numerical factorization
log.debug('peak memory use {:,d}k'.format(max(self.iparm[14], self.iparm[15]+self.iparm[16])))
def __call__(self, rhs):
rhsflat = numpy.ascontiguousarray(rhs.reshape(rhs.shape[0], -1).T, dtype=self.dtype)
lhsflat = numpy.empty_like(rhsflat)
self._phase(33, rhsflat.shape[0], rhsflat.ctypes, lhsflat.ctypes) # solve, iterative refinement
return lhsflat.T.reshape(rhs.shape)
def _phase(self, phase, nrhs=0, b=None, x=None):
error = c_int(1)
libmkl.pardiso(self.pt.ctypes, byref(self.maxfct), byref(self.mnum), byref(self.mtype),
byref(c_int(phase)), byref(self.n), self.a, self.ia, self.ja, self.perm,
byref(c_int(nrhs)), self.iparm.ctypes, byref(self.msglvl), b, x, byref(error))
if error.value:
raise MatrixError(self._errorcodes.get(error.value, 'unknown error {}'.format(error.value)))
def __del__(self):
self._phase(-1) # release all internal memory for all matrices
if self.pt.any():
warnings.warn('Pardiso failed to release its internal memory')
class MKLMatrix(Matrix):
'''matrix implementation based on sorted coo data'''
def __init__(self, data, rowptr, colidx, ncols):
|
def mkl_(self, name, *args):
attr = 'mkl_' + dict(f='d', c='z')[self.dtype.kind] + name
return getattr(libmkl, attr)(*args)
def convert(self, mat):
if not isinstance(mat, Matrix):
raise TypeError('cannot convert {} to Matrix'.format(type(mat).__name__))
if self.shape != mat.shape:
raise MatrixError('non-matching shapes')
if isinstance(mat, MKLMatrix) and mat.dtype == self.dtype:
return mat
data, colidx, rowptr = mat.export('csr')
return MKLMatrix(data.astype(self.dtype, copy=False), rowptr+1, colidx+1, self.shape[1])
def __add__(self, other):
other = self.convert(other)
assert self.shape == other.shape and self.dtype == other.dtype
request = c_int(1)
info = c_int()
rowptr = numpy.empty(self.shape[0]+1, dtype=numpy.int32)
one = numpy.array(1, dtype=self.dtype)
args = ["N", byref(request), byref(c_int(0)),
byref(c_int(self.shape[0])), byref(c_int(self.shape[1])),
self.data.ctypes, self.colidx.ctypes, self.rowptr.ctypes, one.ctypes,
other.data.ctypes, other.colidx.ctypes, other.rowptr.ctypes,
None, None, rowptr.ctypes, None, byref(info)]
self.mkl_('csradd', *args)
assert info.value == 0
colidx = numpy.empty(rowptr[-1]-1, dtype=numpy.int32)
data = numpy.empty(rowptr[-1]-1, dtype=self.dtype)
request.value = 2
args[12:14] = data.ctypes, colidx.ctypes
self.mkl_('csradd', *args)
assert info.value == 0
return MKLMatrix(data, rowptr, colidx, self.shape[1])
def __mul__(self, other):
if not numeric.isnumber(other):
raise TypeError
return MKLMatrix(self.data * other, self.rowptr, self.colidx, self.shape[1])
def __matmul__(self, other):
if not isinstance(other, numpy.ndarray):
raise TypeError
if other.shape[0] != self.shape[1]:
raise MatrixError
x = numpy.ascontiguousarray(other.T, dtype=self.dtype)
y = numpy.empty(x.shape[:-1] + self.shape[:1], dtype=self.dtype)
if other.ndim == 1:
self.mkl_('csrgemv', 'N', byref(c_int(self.shape[0])),
self.data.ctypes, self.rowptr.ctypes, self.colidx.ctypes, x.ctypes, y.ctypes)
else:
zero = numpy.array(0, dtype=self.dtype)
one = numpy.array(1, dtype=self.dtype)
self.mkl_('csrmm', 'N', byref(c_int(self.shape[0])),
byref(c_int(other.size//other.shape[0])),
byref(c_int(self.shape[1])), one.ctypes, 'GXXFXX',
self.data.ctypes, self.colidx.ctypes, self.rowptr.ctypes, self.rowptr[1:].ctypes,
x.ctypes, byref(c_int(other.shape[0])), zero.ctypes,
y.ctypes, byref(c_int(other.shape[0])))
return y.T
def __neg__(self):
return MKLMatrix(-self.data, self.rowptr, self.colidx, self.shape[1])
@property
def T(self):
if self.shape[0] != self.shape[1]:
raise NotImplementedError('MKLMatrix does not yet support transpose of non-square matrices')
job = numpy.array([0, 1, 1, 0, 0, 1], numpy.int32)
data = numpy.empty_like(self.data)
rowptr = numpy.empty_like(self.rowptr)
colidx = numpy.empty_like(self.colidx)
info = c_int()
self.mkl_('csrcsc', job.ctypes,
byref(c_int(self.shape[0])), self.data.ctypes,
self.colidx.ctypes, self.rowptr.ctypes, data.ctypes, colidx.ctypes,
rowptr.ctypes, byref(info))
return MKLMatrix(data, rowptr, colidx, self.shape[1])
def _submatrix(self, rows, cols):
keep = rows.repeat(numpy.diff(self.rowptr))
keep &= cols[self.colidx-1]
if keep.all(): # all nonzero entries are kept
rowptr = self.rowptr[numpy.hstack([True, rows])]
keep = slice(None) # avoid array copies
else:
rowptr = numpy.cumsum([1] + [keep[i:j].sum() for i, j in numeric.overlapping(self.rowptr-1)[rows]], dtype=numpy.int32)
data = self.data[keep]
assert rowptr[-1] == len(data)+1
colidx = (self.colidx if cols.all() else cols.cumsum(dtype=numpy.int32)[self.colidx-1])[keep]
return MKLMatrix(data, rowptr, colidx, cols.sum())
def export(self, form):
if form == 'dense':
dense = numpy.zeros(self.shape, self.dtype)
for row, i, j in zip(dense, self.rowptr[:-1]-1, self.rowptr[1:]-1):
row[self.colidx[i:j]-1] = self.data[i:j]
return dense
if form == 'csr':
return self.data, self.colidx-1, self.rowptr-1
if form == 'coo':
return self.data, (numpy.arange(self.shape[0]).repeat(self.rowptr[1:]-self.rowptr[:-1]), self.colidx-1)
raise NotImplementedError('cannot export MKLMatrix to {!r}'.format(form))
def _solver_fgmres(self, rhs, atol, maxiter=0, restart=150, precon=None, ztol=1e-12, preconargs={}, **args):
if self.dtype.kind == 'c':
raise MatrixError("MKL's fgmres does not support complex data")
rci = c_int(0)
n = c_int(len(rhs))
b = numpy.array(rhs, dtype=numpy.float64, copy=False)
x = numpy.zeros_like(b)
N = min(restart, len(rhs))
ipar = numpy.empty(128, dtype=numpy.int32)
dpar = numpy.empty(128, dtype=numpy.float64)
tmp = numpy.empty((2*N+1)*len(rhs)+(N*(N+9))//2+1, dtype=numpy.float64)
dfgmres_args = byref(n), x.ctypes, b.ctypes, byref(rci), ipar.ctypes, dpar.ctypes, tmp.ctypes
itercount = c_int(0)
libmkl.dfgmres_init(*dfgmres_args)
ipar[7] = 0 # do not perform the stopping test for the maximum number of iterations
ipar[8] = 0 # do not perform the residual stopping test
ipar[9] = 1 # perform the user-defined stopping test by setting RCI_request=2
if precon is not None:
ipar[10] = 1 # run the preconditioned version of the FGMRES method
precon = self.getprecon(precon, **args, **preconargs)
ipar[11] = 0 # do not perform the automatic test for zero norm of the currently generated vector
ipar[12] = 0 # update the solution to the vector x according to the computations done by the dfgmres routine
ipar[14] = N # the number of non-restarted FGMRES iterations
libmkl.dfgmres_check(*dfgmres_args)
if rci.value in (-1001, -1010, -1011):
warnings.warn('dgmres ' + ' and '.join(['wrote some warnings to stdout', 'changed some parameters to make them consistent or correct'][1 if rci.value == -1010 else 0:1 if rci.value == -1001 else 2]))
elif rci.value != 0:
raise MatrixError('dgmres check failed with error code {}'.format(rci.value))
with log.context('fgmres {:.0f}%', 0, 0) as format:
while True:
libmkl.dfgmres(*dfgmres_args)
if rci.value == 1: # multiply the matrix
tmp[ipar[22]-1:ipar[22]+n.value-1] = self @ tmp[ipar[21]-1:ipar[21]+n.value-1]
elif rci.value == 2: # perform the stopping test
if dpar[4] < atol:
libmkl.dfgmres_get(*dfgmres_args, byref(itercount))
if numpy.linalg.norm(self @ x - b) < atol:
break
format(100 * numpy.log(dpar[2]/dpar[4]) / numpy.log(dpar[2]/atol))
if ipar[3] > maxiter > 0:
break
elif rci.value == 3: # apply the preconditioner
tmp[ipar[22]-1:ipar[22]+n.value-1] = precon(tmp[ipar[21]-1:ipar[21]+n.value-1])
elif rci.value == 4: # check if the norm of the current orthogonal vector is zero
if dpar[6] < ztol:
libmkl.dfgmres_get(*dfgmres_args, byref(itercount))
if numpy.linalg.norm(self @ x - b) < atol:
break
raise MatrixError('singular matrix')
else:
raise MatrixError('this should not have occurred: rci={}'.format(rci.value))
log.debug('performed {} fgmres iterations, {} restarts'.format(ipar[3], ipar[3]//ipar[14]))
return x
def _precon_direct(self, **args):
return Pardiso(mtype=dict(f=11, c=13)[self.dtype.kind], a=self.data, ia=self.rowptr, ja=self.colidx, **args)
def _precon_sym_direct(self, **args):
upper = numpy.zeros(len(self.data), dtype=bool)
rowptr = numpy.empty_like(self.rowptr)
rowptr[0] = 1
diagdom = True
for irow, (n, m) in enumerate(numeric.overlapping(self.rowptr-1), start=1):
d = n + self.colidx[n:m].searchsorted(irow)
upper[d:m] = True
rowptr[irow] = rowptr[irow-1] + (m-d)
diagdom = diagdom and d < m and self.colidx[d] == irow and abs(self.data[n:m]).sum() < 2 * abs(self.data[d])
if diagdom:
log.debug('matrix is diagonally dominant, solving as SPD')
mtype = dict(f=2, c=4)
else:
mtype = dict(f=-2, c=6)
return Pardiso(mtype=mtype[self.dtype.kind], a=self.data[upper], ia=rowptr, ja=self.colidx[upper], **args)
# vim:sw=4:sts=4:et
| assert len(data) == len(colidx) == rowptr[-1]-1
self.data = numpy.ascontiguousarray(data, dtype=numpy.complex128 if data.dtype.kind == 'c' else numpy.float64)
self.rowptr = numpy.ascontiguousarray(rowptr, dtype=numpy.int32)
self.colidx = numpy.ascontiguousarray(colidx, dtype=numpy.int32)
super().__init__((len(rowptr)-1, ncols), self.data.dtype) | identifier_body |
transcription.py | #!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2014 CNRS (Hervé BREDIN - http://herve.niderb.fr)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import unicode_literals
import networkx as nx
from networkx.readwrite.json_graph import node_link_data, node_link_graph
from time import T, TStart, TEnd
from segment import Segment
from json import PYANNOTE_JSON_TRANSCRIPTION
import itertools
class Transcription(nx.MultiDiGraph):
"""Transcription stored as annotation graph"""
def __init__(self, graph=None, **attrs):
super(Transcription, self).__init__(data=graph)
self.graph.update(attrs)
def drifting(self):
"""Get list of drifting times"""
return [n for n in self if n.drifting]
def anchored(self):
"""Get list of anchored times"""
return [n for n in self if n.anchored]
def add_edge(self, t1, t2, key=None, attr_dict=None, **attrs):
"""Add annotation to the graph between times t1 and t2
Parameters
----------
t1, t2: float, str or None
data : dict, optional
{annotation_type: annotation_value} dictionary
Example
-------
>>> G = Transcription()
>>> G.add_edge(T(1.), T(), speaker='John', 'speech'='Hello world!')
"""
t1 = T(t1)
t2 = T(t2)
# make sure Ts are connected in correct chronological order
if t1.anchored and t2.anchored:
assert t1 <= t2
super(Transcription, self).add_edge(t1, t2, key=key, attr_dict=attr_dict, **attrs)
def relabel_drifting_nodes(self, mapping=None):
"""Relabel drifting nodes
Parameters
----------
mapping : dict, optional
A dictionary with the old labels as keys and new labels as values.
Returns
-------
g : Transcription
New annotation graph
mapping : dict
A dictionary with the new labels as keys and old labels as values.
Can be used to get back to the version before relabelling.
"""
if mapping is None:
old2new = {n: T() for n in self.drifting()}
else:
old2new = dict(mapping)
new2old = {new: old for old, new in old2new.iteritems()}
return nx.relabel_nodes(self, old2new, copy=True), new2old
def crop(self, source, target=None):
"""Get subgraph between source and target
Parameters
----------
source : Segment,
target : float or str, optional
Returns
-------
g : Transcription
Sub-graph between source and target
"""
if isinstance(source, Segment):
source, target = source.start, source.end
source = T(source)
target = T(target)
if source.anchored:
before = [n for n in self.anchored() if n <= source]
if before:
source = sorted(before)[-1]
if target.anchored:
after = [n for n in self.anchored() if n >= target]
if after:
target = sorted(after)[0]
from_source = nx.algorithms.descendants(self, source)
to_target = nx.algorithms.ancestors(self, target)
nbunch = {source, target} | (from_source & to_target)
return self.subgraph(nbunch)
# =========================================================================
def _merge(self, drifting_t, another_t):
"""Helper function to merge `drifting_t` with `another_t`
Assumes that both `drifting_t` and `another_t` exists.
Also assumes that `drifting_t` is an instance of `TFloating`
(otherwise, this might lead to weird graph configuration)
Parameters
----------
drifting_t :
Existing drifting time in graph
another_t :
Existing time in graph
"""
# drifting_t and another_t must exist in graph
# add a (t --> another_t) edge for each (t --> drifting_t) edge
for t, _, key, data in self.in_edges_iter(
nbunch=[drifting_t], data=True, keys=True
):
self.add_edge(t, another_t, key=key, attr_dict=data)
# add a (another_t --> t) edge for each (drifting_t --> t) edge
for _, t, key, data in self.edges_iter(
nbunch=[drifting_t], data=True, keys=True
):
self.add_edge(another_t, t, key=key, attr_dict=data)
# remove drifting_t node (as it was replaced by another_t)
self.remove_node(drifting_t)
def anchor(self, drifting_t, anchored_t):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
o -- [ D ] -- o ==> o -- [ A ] -- o
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Anchor `drifting_t` at `anchored_t`
Parameters
----------
drifting_t :
Drifting time to anchor
anchored_t :
When to anchor `drifting_t`
"""
drifting_t = T(drifting_t)
anchored_t = T(anchored_t)
assert (drifting_t in self) and (drifting_t.drifting)
assert anchored_t.anchored
if anchored_t not in self:
self.add_node(anchored_t)
self._merge(drifting_t, anchored_t)
def align(self, one_t, another_t):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
o -- [ F ] -- o o o
⟍ ⟋
==> [ F ]
⟋ ⟍
o -- [ f ] -- o o o
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Align two (potentially drifting) times
`one_t` and `another_t` cannot both be anchored at the same time
In case `another_t` is anchored, this is similar to `anchor` method
Parameters
----------
one_t, another_t
Two times to be aligned.
"""
one_t = T(one_t)
another_t = T(another_t)
assert one_t in self
assert another_t in self
# first time is drifting
if one_t.drifting:
self._merge(one_t, another_t)
# second time is drifting
elif another_t.drifting:
self._merge(another_t, one_t)
# both times are anchored --> FAIL
else:
raise ValueError(
'Cannot align two anchored times')
# =========================================================================
def pre_align(self, t1, t2, t):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
p -- [ t1 ] p [ t1 ]
⟍ ⟋
==> [ t ]
⟋ ⟍
p' -- [ t2 ] p' [ t2 ]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
t1 = T(t1)
t2 = T(t2)
t = T(t)
# make sure --[t1] incoming edges are empty
# because they're going to be removed afterwards,
# and we don't want to loose data
pred1 = self.predecessors(t1)
for p in pred1:
for key, data in self[p][t1].iteritems():
assert not data
# make sure --[t2] incoming edges are empty
# (for the same reason...)
pred2 = self.predecessors(t2)
for p in pred2:
for key, data in self[p][t2].iteritems():
assert not data
# let's get started (remove all incoming edges)
for p in pred1:
for key in list(self[p][t1]):
self.remove_edge(p, t1, key=key)
for p in pred2:
for key in list(self[p][t2]):
self.remove_edge(p, t2, key=key)
for p in set(pred1) | set(pred2):
self.add_edge(p, t)
self.add_edge(t, t1)
self.add_edge(t, t2)
def post_align(self, t1, t2, t):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
[ t1 ] -- s [ t1 ] s
⟍ ⟋
==> [ t ]
⟋ ⟍
[ t2 ] -- s' [ t2 ] s'
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
t1 = T(t1)
t2 = T(t2)
t = T(t)
# make sure [t1]-- outgoing edges are empty
# because they're going to be removed afterwards,
# and we don't want to loose data
succ1 = self.successors(t1)
for s in succ1:
for key, data in self[t1][s].iteritems():
assert not data
# make sure --[t2] outgoing edges are empty
# (for the same reason...)
succ2 = self.successors(t2)
for s in succ2:
for key, data in self[t2][s].iteritems():
assert not data
# let's get started (remove all outgoing edges)
for s in succ1:
for key in list(self[t1][s]):
self.remove_edge(t1, s, key=key)
for s in succ2:
for key in list(self[t2][s]):
self.remove_edge(t2, s, key=key)
for s in set(succ1) | set(succ2):
self.add_edge(t, s)
self.add_edge(t1, t)
self.add_edge(t2, t)
# =========================================================================
def ordering_graph(self):
"""Ordering graph
t1 --> t2 in the ordering graph indicates that t1 happens before t2.
A missing edge simply means that it is not clear yet.
"""
g = nx.DiGraph()
# add times
for t in self.nodes_iter():
g.add_node(t)
# | edges
for t1, t2 in self.edges_iter():
g.add_edge(t1, t2)
# connect every pair of anchored times
anchored = sorted(self.anchored())
for t1, t2 in itertools.combinations(anchored, 2):
g.add_edge(t1, t2)
# connect every time with its sucessors
_g = g.copy()
for t1 in _g:
for t2 in set([target for (_, target) in nx.bfs_edges(_g, t1)]):
g.add_edge(t1, t2)
return g
def ordered_edges_iter(self, data=False, keys=False):
"""Return an iterator over the edges in temporal/topological order.
Ordered edges are returned as tuples with optional data and keys
in the order (t1, t2, key, data).
Parameters
----------
data : bool, optional (default=False)
If True, return edge attribute dict with each edge.
keys : bool, optional (default=False)
If True, return edge keys with each edge.
Returns
-------
edge_iter : iterator
An iterator of (u,v), (u,v,d) or (u,v,key,d) tuples of edges.
"""
# start by sorting nodes in temporal+topological order
o = self.ordering_graph()
nodes = nx.topological_sort(o)
# iterate over edges using this very order
for _ in self.edges_iter(nbunch=nodes, data=data, keys=keys):
yield _
# =========================================================================
def _anchored_successors(self, n):
"""Get all first anchored successors"""
# loop on all outgoing edges
for t in self.successors(n):
# if neighbor is anchored
# stop looking for (necessarily later) successors
if t.anchored:
yield t
continue
# if neighbor is not anchored
# look one level deeper
for tt in self._anchored_successors(t):
yield tt
def _anchored_predecessors(self, n):
"""Get all first anchored predecessors"""
# loop on all incoming edges
for t in self.predecessors(n):
# if predecessor is anchored
# stop looking for (necessarily earlier) predecessors
if t.anchored:
yield t
continue
# if neighbor is not anchored
# look one level deeper
for tt in self._anchored_predecessors(t):
yield tt
def timerange(self, t):
"""Infer smallest possible timerange from graph structure
Returns
-------
(left, right) tuple
left == None or right == None indicates that the current state of
the annotation graph does not allow to decide the boundary.
"""
t = T(t)
if t.anchored:
return (t.T, t.T)
successors = [n for n in self._anchored_successors(t)]
predecessors = [n for n in self._anchored_predecessors(t)]
earlier_successor = None
if successors:
earlier_successor = min(successors)
later_predecessor = None
if predecessors:
later_predecessor = max(predecessors)
return (later_predecessor.T, earlier_successor.T)
# =========================================================================
def for_json(self):
return {PYANNOTE_JSON_TRANSCRIPTION: node_link_data(self)}
@classmethod
def from_json(cls, data):
graph = node_link_graph(data[PYANNOTE_JSON_TRANSCRIPTION])
mapping = {node: T(node) for node in graph}
graph = nx.relabel_nodes(graph, mapping)
return cls(graph=graph, **graph.graph)
# === IPython Notebook displays ===========================================
def _repr_svg_(self):
from notebook import repr_transcription
return repr_transcription(self)
| add existing | conditional_block |
transcription.py | #!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2014 CNRS (Hervé BREDIN - http://herve.niderb.fr)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import unicode_literals
import networkx as nx
from networkx.readwrite.json_graph import node_link_data, node_link_graph
from time import T, TStart, TEnd
from segment import Segment
from json import PYANNOTE_JSON_TRANSCRIPTION
import itertools
class Transcription(nx.MultiDiGraph):
"""Transcription stored as annotation graph"""
def __init__(self, graph=None, **attrs):
super(Transcription, self).__init__(data=graph)
self.graph.update(attrs)
def drifting(self):
"""Get list of drifting times"""
return [n for n in self if n.drifting]
def anchored(self):
"""Get list of anchored times"""
return [n for n in self if n.anchored]
def add_edge(self, t1, t2, key=None, attr_dict=None, **attrs):
"""Add annotation to the graph between times t1 and t2
Parameters
----------
t1, t2: float, str or None
data : dict, optional
{annotation_type: annotation_value} dictionary
Example
-------
>>> G = Transcription()
>>> G.add_edge(T(1.), T(), speaker='John', 'speech'='Hello world!')
"""
t1 = T(t1)
t2 = T(t2)
# make sure Ts are connected in correct chronological order
if t1.anchored and t2.anchored:
assert t1 <= t2
super(Transcription, self).add_edge(t1, t2, key=key, attr_dict=attr_dict, **attrs)
def relabel_drifting_nodes(self, mapping=None):
"""Relabel drifting nodes
Parameters
----------
mapping : dict, optional
A dictionary with the old labels as keys and new labels as values.
Returns
-------
g : Transcription
New annotation graph
mapping : dict
A dictionary with the new labels as keys and old labels as values.
Can be used to get back to the version before relabelling.
"""
if mapping is None:
old2new = {n: T() for n in self.drifting()}
else:
old2new = dict(mapping)
new2old = {new: old for old, new in old2new.iteritems()}
return nx.relabel_nodes(self, old2new, copy=True), new2old
def crop(self, source, target=None):
"""Get subgraph between source and target
Parameters
----------
source : Segment,
target : float or str, optional
Returns
-------
g : Transcription
Sub-graph between source and target
"""
if isinstance(source, Segment):
source, target = source.start, source.end
source = T(source)
target = T(target)
if source.anchored:
before = [n for n in self.anchored() if n <= source]
if before:
source = sorted(before)[-1]
if target.anchored:
after = [n for n in self.anchored() if n >= target]
if after:
target = sorted(after)[0]
from_source = nx.algorithms.descendants(self, source)
to_target = nx.algorithms.ancestors(self, target)
nbunch = {source, target} | (from_source & to_target)
return self.subgraph(nbunch)
# =========================================================================
def _merge(self, drifting_t, another_t):
"""Helper function to merge `drifting_t` with `another_t`
Assumes that both `drifting_t` and `another_t` exists.
Also assumes that `drifting_t` is an instance of `TFloating`
(otherwise, this might lead to weird graph configuration)
Parameters
----------
drifting_t :
Existing drifting time in graph
another_t :
Existing time in graph
"""
# drifting_t and another_t must exist in graph
# add a (t --> another_t) edge for each (t --> drifting_t) edge
for t, _, key, data in self.in_edges_iter(
nbunch=[drifting_t], data=True, keys=True
):
self.add_edge(t, another_t, key=key, attr_dict=data)
# add a (another_t --> t) edge for each (drifting_t --> t) edge
for _, t, key, data in self.edges_iter(
nbunch=[drifting_t], data=True, keys=True
):
self.add_edge(another_t, t, key=key, attr_dict=data)
# remove drifting_t node (as it was replaced by another_t)
self.remove_node(drifting_t)
def anchor(self, drifting_t, anchored_t):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
o -- [ D ] -- o ==> o -- [ A ] -- o
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Anchor `drifting_t` at `anchored_t`
Parameters
----------
drifting_t :
Drifting time to anchor
anchored_t :
When to anchor `drifting_t`
"""
drifting_t = T(drifting_t)
anchored_t = T(anchored_t)
assert (drifting_t in self) and (drifting_t.drifting)
assert anchored_t.anchored
if anchored_t not in self:
self.add_node(anchored_t)
self._merge(drifting_t, anchored_t)
def align(self, one_t, another_t):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
o -- [ F ] -- o o o
⟍ ⟋
==> [ F ]
⟋ ⟍
o -- [ f ] -- o o o
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Align two (potentially drifting) times
`one_t` and `another_t` cannot both be anchored at the same time
In case `another_t` is anchored, this is similar to `anchor` method
Parameters
----------
one_t, another_t
Two times to be aligned.
"""
one_t = T(one_t)
another_t = T(another_t)
assert one_t in self
assert another_t in self
# first time is drifting
if one_t.drifting:
self._merge(one_t, another_t)
# second time is drifting
elif another_t.drifting:
self._merge(another_t, one_t)
# both times are anchored --> FAIL
else:
raise ValueError(
'Cannot align two anchored times')
# =========================================================================
def pre_align(self, t1, t2, t):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
p -- [ t1 ] p [ t1 ]
⟍ ⟋
==> [ t ]
⟋ ⟍
p' -- [ t2 ] p' [ t2 ]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
t1 = T(t1)
t2 = T(t2)
t = T(t)
# make sure --[t1] incoming edges are empty
# because they're going to be removed afterwards,
# and we don't want to loose data
pred1 = self.predecessors(t1)
for p in pred1:
for key, data in self[p][t1].iteritems():
assert not data
# make sure --[t2] incoming edges are empty
# (for the same reason...)
pred2 = self.predecessors(t2)
for p in pred2:
for key, data in self[p][t2].iteritems():
assert not data
# let's get started (remove all incoming edges)
for p in pred1:
for key in list(self[p][t1]):
self.remove_edge(p, t1, key=key)
for p in pred2:
for key in list(self[p][t2]):
self.remove_edge(p, t2, key=key)
for p in set(pred1) | set(pred2):
self.add_edge(p, t)
self.add_edge(t, t1)
self.add_edge(t, t2)
def post_align(self, t1, t2, t):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
[ t1 ] -- s [ t1 ] s
⟍ ⟋
==> [ t ]
⟋ ⟍
[ t2 ] -- s' [ t2 ] s'
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
t1 = T(t1)
t2 = T(t2)
t = T(t)
# make sure [t1]-- outgoing edges are empty
# because they're going to be removed afterwards,
# and we don't want to loose data
succ1 = self.successors(t1)
for s in succ1:
for key, data in self[t1][s].iteritems():
assert not data
# make sure --[t2] outgoing edges are empty
# (for the same reason...)
succ2 = self.successors(t2)
for s in succ2:
for key, data in self[t2][s].iteritems():
assert not data
# let's get started (remove all outgoing edges)
for s in succ1:
for key in list(self[t1][s]):
self.remove_edge(t1, s, key=key)
for s in succ2:
for key in list(self[t2][s]):
self.remove_edge(t2, s, key=key)
for s in set(succ1) | set(succ2):
self.add_edge(t, s)
self.add_edge(t1, t)
self.add_edge(t2, t)
# =========================================================================
def ordering_graph(self):
"""Ordering graph
t1 --> t2 in the ordering graph indicates that t1 happens before t2.
A missing edge simply means that it is not clear yet.
"""
g = nx.DiGraph()
# add times
for t in self.nodes_iter():
g.add_node(t)
# add existing edges
for t1, t2 in self.edges_iter():
g.add_edge(t1, t2)
# connect every pair of anchored times
anchored = sorted(self.anchored())
for t1, t2 in itertools.combinations(anchored, 2):
g.add_edge(t1, t2)
# connect every time with its sucessors
_g = g.copy()
for t1 in _g:
for t2 in set([target for (_, target) in nx.bfs_edges(_g, t1)]):
g.add_edge(t1, t2)
return g
def ordered_edges_iter(self, data=False, keys=False):
"""Return an iterator over the edges in temporal/topological order.
Ordered edges are returned as tuples with optional data and keys
in the order (t1, t2, key, data).
Parameters
----------
data : bool, optional (default=False)
If True, return edge attribute dict with each edge.
keys : bool, optional (default=False)
If True, return edge keys with each edge.
Returns
-------
edge_iter : iterator
An iterator of (u,v), (u,v,d) or (u,v,key,d) tuples of edges.
"""
# start by sorting nodes in temporal+topological order
o = self.ordering_graph()
nodes = nx.topological_sort(o)
# iterate over edges using this very order
for _ in self.edges_iter(nbunch=nodes, data=data, keys=keys):
yield _
# =========================================================================
def _anchored_successors(self, n):
"""Get all first anchored successors"""
# loop on all outgoing edges
for t in self.successors(n):
# if neighbor is anchored
# stop looking for (necessarily later) successors
if t.anchored:
yield t
continue
# if neighbor is not anchored
# look one level deeper
for tt in self._anchored_successors(t):
yield tt
def _anchored_predecessors(self, n):
"""Get all first anchored predecessors"""
# loop on all incoming edges
for t in self.predecessors(n):
# if predecessor is anchored
# stop looking for (necessarily earlier) predecessors
if t.anchored:
yield t
continue
# if neighbor is not anchored
# look one level deeper
for tt in self._anchored_predecessors(t):
yield tt
def timerange(self, t):
"""Infer smallest possible timerange from graph structure
Returns
-------
(left, right) tuple
left == None or right == None indicates that the current state of
the annotation graph does not allow to decide the boundary.
"""
t = T(t)
if t.anchored:
return (t.T, t.T)
successors = [n for n in self._anchored_successors(t)]
predecessors = [n for n in self._anchored_predecessors(t)]
earlier_successor = None
if successors:
earlier_successor = min(successors)
later_predecessor = None
if predecessors:
later_predecessor = max(predecessors)
return (later_predecessor.T, earlier_successor.T)
# =========================================================================
def for_json(self):
return {PYANNOTE_JSON_TRANSCRIPTION: node_link_data(self)}
@classmethod
def from_json(cls, data):
graph = node_link_graph(d | ook displays ===========================================
def _repr_svg_(self):
from notebook import repr_transcription
return repr_transcription(self)
| ata[PYANNOTE_JSON_TRANSCRIPTION])
mapping = {node: T(node) for node in graph}
graph = nx.relabel_nodes(graph, mapping)
return cls(graph=graph, **graph.graph)
# === IPython Noteb | identifier_body |
transcription.py | #!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2014 CNRS (Hervé BREDIN - http://herve.niderb.fr)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import unicode_literals
import networkx as nx
from networkx.readwrite.json_graph import node_link_data, node_link_graph
from time import T, TStart, TEnd
from segment import Segment
from json import PYANNOTE_JSON_TRANSCRIPTION
import itertools
class Transcription(nx.MultiDiGraph):
"""Transcription stored as annotation graph"""
def __init__(self, graph=None, **attrs):
super(Transcription, self).__init__(data=graph)
self.graph.update(attrs)
def drifting(self):
"""Get list of drifting times"""
return [n for n in self if n.drifting]
def anchored(self):
"""Get list of anchored times"""
return [n for n in self if n.anchored]
def add_edge(self, t1, t2, key=None, attr_dict=None, **attrs):
"""Add annotation to the graph between times t1 and t2
Parameters
----------
t1, t2: float, str or None
data : dict, optional
{annotation_type: annotation_value} dictionary
Example
-------
>>> G = Transcription()
>>> G.add_edge(T(1.), T(), speaker='John', 'speech'='Hello world!')
"""
t1 = T(t1)
t2 = T(t2)
# make sure Ts are connected in correct chronological order
if t1.anchored and t2.anchored:
assert t1 <= t2
super(Transcription, self).add_edge(t1, t2, key=key, attr_dict=attr_dict, **attrs)
def relabel_drifting_nodes(self, mapping=None):
"""Relabel drifting nodes
Parameters
----------
mapping : dict, optional
A dictionary with the old labels as keys and new labels as values.
Returns
-------
g : Transcription
New annotation graph
mapping : dict
A dictionary with the new labels as keys and old labels as values.
Can be used to get back to the version before relabelling.
"""
if mapping is None:
old2new = {n: T() for n in self.drifting()}
else:
old2new = dict(mapping)
new2old = {new: old for old, new in old2new.iteritems()}
return nx.relabel_nodes(self, old2new, copy=True), new2old
def crop(self, source, target=None):
"""Get subgraph between source and target
Parameters
----------
source : Segment,
target : float or str, optional
Returns
-------
g : Transcription
Sub-graph between source and target
"""
if isinstance(source, Segment):
source, target = source.start, source.end
source = T(source)
target = T(target)
if source.anchored:
before = [n for n in self.anchored() if n <= source]
if before:
source = sorted(before)[-1]
if target.anchored:
after = [n for n in self.anchored() if n >= target]
if after:
target = sorted(after)[0]
from_source = nx.algorithms.descendants(self, source)
to_target = nx.algorithms.ancestors(self, target)
nbunch = {source, target} | (from_source & to_target)
return self.subgraph(nbunch)
# =========================================================================
def _merge(self, drifting_t, another_t):
"""Helper function to merge `drifting_t` with `another_t`
Assumes that both `drifting_t` and `another_t` exists.
Also assumes that `drifting_t` is an instance of `TFloating`
(otherwise, this might lead to weird graph configuration)
Parameters
----------
drifting_t :
Existing drifting time in graph
another_t :
Existing time in graph
"""
# drifting_t and another_t must exist in graph
# add a (t --> another_t) edge for each (t --> drifting_t) edge
for t, _, key, data in self.in_edges_iter(
nbunch=[drifting_t], data=True, keys=True
):
self.add_edge(t, another_t, key=key, attr_dict=data)
# add a (another_t --> t) edge for each (drifting_t --> t) edge
for _, t, key, data in self.edges_iter(
nbunch=[drifting_t], data=True, keys=True
):
self.add_edge(another_t, t, key=key, attr_dict=data)
# remove drifting_t node (as it was replaced by another_t)
self.remove_node(drifting_t)
def anchor(self, drifting_t, anchored_t):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
o -- [ D ] -- o ==> o -- [ A ] -- o
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Anchor `drifting_t` at `anchored_t`
Parameters
----------
drifting_t :
Drifting time to anchor
anchored_t :
When to anchor `drifting_t`
"""
drifting_t = T(drifting_t)
anchored_t = T(anchored_t)
assert (drifting_t in self) and (drifting_t.drifting)
assert anchored_t.anchored
if anchored_t not in self:
self.add_node(anchored_t)
self._merge(drifting_t, anchored_t)
def align(self, one_t, another_t):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
o -- [ F ] -- o o o
⟍ ⟋
==> [ F ]
⟋ ⟍
o -- [ f ] -- o o o
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Align two (potentially drifting) times
`one_t` and `another_t` cannot both be anchored at the same time
In case `another_t` is anchored, this is similar to `anchor` method
Parameters
----------
one_t, another_t
Two times to be aligned.
"""
one_t = T(one_t)
another_t = T(another_t)
assert one_t in self
assert another_t in self
# first time is drifting
if one_t.drifting:
self._merge(one_t, another_t)
# second time is drifting
elif another_t.drifting:
self._merge(another_t, one_t)
# both times are anchored --> FAIL
else:
raise ValueError(
'Cannot align two anchored times')
# =========================================================================
def pre_align(self, t1, t2, t):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
p -- [ t1 ] p [ t1 ]
⟍ ⟋
==> [ t ]
⟋ ⟍
p' -- [ t2 ] p' [ t2 ]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
t1 = T(t1)
t2 = T(t2)
t = T(t)
# make sure --[t1] incoming edges are empty
# because they're going to be removed afterwards,
# and we don't want to loose data
pred1 = self.predecessors(t1)
for p in pred1:
for key, data in self[p][t1].iteritems():
assert not data
# make sure --[t2] incoming edges are empty
# (for the same reason...)
pred2 = self.predecessors(t2)
for p in pred2:
for key, data in self[p][t2].iteritems():
assert not data
# let's get started (remove all incoming edges)
for p in pred1:
for key in list(self[p][t1]):
self.remove_edge(p, t1, key=key)
for p in pred2:
for key in list(self[p][t2]):
self.remove_edge(p, t2, key=key)
for p in set(pred1) | set(pred2):
self.add_edge(p, t)
self.add_edge(t, t1)
self.add_edge(t, t2)
def post_align(self, t1, t2, t):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
[ t1 ] -- s [ t1 ] s
⟍ ⟋
==> [ t ]
⟋ ⟍
[ t2 ] -- s' [ t2 ] s'
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
t1 = T(t1)
t2 = T(t2)
t = T(t)
# make sure [t1]-- outgoing edges are empty
# because they're going to be removed afterwards,
# and we don't want to loose data
succ1 = self.successors(t1)
for s in succ1:
for key, data in self[t1][s].iteritems():
assert not data
# make sure --[t2] outgoing edges are empty
# (for the same reason...)
succ2 = self.successors(t2)
for s in succ2:
for key, data in self[t2][s].iteritems():
assert not data
# let's get started (remove all outgoing edges)
for s in succ1:
for key in list(self[t1][s]):
self.remove_edge(t1, s, key=key)
for s in succ2:
for key in list(self[t2][s]):
self.remove_edge(t2, s, key=key)
for s in set(succ1) | set(succ2):
self.add_edge(t, s)
self.add_edge(t1, t)
self.add_edge(t2, t)
# =========================================================================
def ordering_graph(self):
"""Ordering graph
t1 --> t2 in the ordering graph indicates that t1 happens before t2.
A missing edge simply means that it is not clear yet.
"""
g = nx.DiGraph()
# add times | for t in self.nodes_iter():
g.add_node(t)
# add existing edges
for t1, t2 in self.edges_iter():
g.add_edge(t1, t2)
# connect every pair of anchored times
anchored = sorted(self.anchored())
for t1, t2 in itertools.combinations(anchored, 2):
g.add_edge(t1, t2)
# connect every time with its sucessors
_g = g.copy()
for t1 in _g:
for t2 in set([target for (_, target) in nx.bfs_edges(_g, t1)]):
g.add_edge(t1, t2)
return g
def ordered_edges_iter(self, data=False, keys=False):
"""Return an iterator over the edges in temporal/topological order.
Ordered edges are returned as tuples with optional data and keys
in the order (t1, t2, key, data).
Parameters
----------
data : bool, optional (default=False)
If True, return edge attribute dict with each edge.
keys : bool, optional (default=False)
If True, return edge keys with each edge.
Returns
-------
edge_iter : iterator
An iterator of (u,v), (u,v,d) or (u,v,key,d) tuples of edges.
"""
# start by sorting nodes in temporal+topological order
o = self.ordering_graph()
nodes = nx.topological_sort(o)
# iterate over edges using this very order
for _ in self.edges_iter(nbunch=nodes, data=data, keys=keys):
yield _
# =========================================================================
def _anchored_successors(self, n):
"""Get all first anchored successors"""
# loop on all outgoing edges
for t in self.successors(n):
# if neighbor is anchored
# stop looking for (necessarily later) successors
if t.anchored:
yield t
continue
# if neighbor is not anchored
# look one level deeper
for tt in self._anchored_successors(t):
yield tt
def _anchored_predecessors(self, n):
"""Get all first anchored predecessors"""
# loop on all incoming edges
for t in self.predecessors(n):
# if predecessor is anchored
# stop looking for (necessarily earlier) predecessors
if t.anchored:
yield t
continue
# if neighbor is not anchored
# look one level deeper
for tt in self._anchored_predecessors(t):
yield tt
def timerange(self, t):
"""Infer smallest possible timerange from graph structure
Returns
-------
(left, right) tuple
left == None or right == None indicates that the current state of
the annotation graph does not allow to decide the boundary.
"""
t = T(t)
if t.anchored:
return (t.T, t.T)
successors = [n for n in self._anchored_successors(t)]
predecessors = [n for n in self._anchored_predecessors(t)]
earlier_successor = None
if successors:
earlier_successor = min(successors)
later_predecessor = None
if predecessors:
later_predecessor = max(predecessors)
return (later_predecessor.T, earlier_successor.T)
# =========================================================================
def for_json(self):
return {PYANNOTE_JSON_TRANSCRIPTION: node_link_data(self)}
@classmethod
def from_json(cls, data):
graph = node_link_graph(data[PYANNOTE_JSON_TRANSCRIPTION])
mapping = {node: T(node) for node in graph}
graph = nx.relabel_nodes(graph, mapping)
return cls(graph=graph, **graph.graph)
# === IPython Notebook displays ===========================================
def _repr_svg_(self):
from notebook import repr_transcription
return repr_transcription(self) | random_line_split | |
transcription.py | #!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2014 CNRS (Hervé BREDIN - http://herve.niderb.fr)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import unicode_literals
import networkx as nx
from networkx.readwrite.json_graph import node_link_data, node_link_graph
from time import T, TStart, TEnd
from segment import Segment
from json import PYANNOTE_JSON_TRANSCRIPTION
import itertools
class Transcription(nx.MultiDiGraph):
"""Transcription stored as annotation graph"""
def __init__(self, graph=None, **attrs):
super(Transcription, self).__init__(data=graph)
self.graph.update(attrs)
def drifting(self):
"""Get list of drifting times"""
return [n for n in self if n.drifting]
def anchored(self):
"""Get list of anchored times"""
return [n for n in self if n.anchored]
def add_edge(self, t1, t2, key=None, attr_dict=None, **attrs):
"""Add annotation to the graph between times t1 and t2
Parameters
----------
t1, t2: float, str or None
data : dict, optional
{annotation_type: annotation_value} dictionary
Example
-------
>>> G = Transcription()
>>> G.add_edge(T(1.), T(), speaker='John', 'speech'='Hello world!')
"""
t1 = T(t1)
t2 = T(t2)
# make sure Ts are connected in correct chronological order
if t1.anchored and t2.anchored:
assert t1 <= t2
super(Transcription, self).add_edge(t1, t2, key=key, attr_dict=attr_dict, **attrs)
def relabel_drifting_nodes(self, mapping=None):
"""Relabel drifting nodes
Parameters
----------
mapping : dict, optional
A dictionary with the old labels as keys and new labels as values.
Returns
-------
g : Transcription
New annotation graph
mapping : dict
A dictionary with the new labels as keys and old labels as values.
Can be used to get back to the version before relabelling.
"""
if mapping is None:
old2new = {n: T() for n in self.drifting()}
else:
old2new = dict(mapping)
new2old = {new: old for old, new in old2new.iteritems()}
return nx.relabel_nodes(self, old2new, copy=True), new2old
def crop(self, source, target=None):
"""Get subgraph between source and target
Parameters
----------
source : Segment,
target : float or str, optional
Returns
-------
g : Transcription
Sub-graph between source and target
"""
if isinstance(source, Segment):
source, target = source.start, source.end
source = T(source)
target = T(target)
if source.anchored:
before = [n for n in self.anchored() if n <= source]
if before:
source = sorted(before)[-1]
if target.anchored:
after = [n for n in self.anchored() if n >= target]
if after:
target = sorted(after)[0]
from_source = nx.algorithms.descendants(self, source)
to_target = nx.algorithms.ancestors(self, target)
nbunch = {source, target} | (from_source & to_target)
return self.subgraph(nbunch)
# =========================================================================
def _merge(self, drifting_t, another_t):
"""Helper function to merge `drifting_t` with `another_t`
Assumes that both `drifting_t` and `another_t` exists.
Also assumes that `drifting_t` is an instance of `TFloating`
(otherwise, this might lead to weird graph configuration)
Parameters
----------
drifting_t :
Existing drifting time in graph
another_t :
Existing time in graph
"""
# drifting_t and another_t must exist in graph
# add a (t --> another_t) edge for each (t --> drifting_t) edge
for t, _, key, data in self.in_edges_iter(
nbunch=[drifting_t], data=True, keys=True
):
self.add_edge(t, another_t, key=key, attr_dict=data)
# add a (another_t --> t) edge for each (drifting_t --> t) edge
for _, t, key, data in self.edges_iter(
nbunch=[drifting_t], data=True, keys=True
):
self.add_edge(another_t, t, key=key, attr_dict=data)
# remove drifting_t node (as it was replaced by another_t)
self.remove_node(drifting_t)
def anchor(self, drifting_t, anchored_t):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
o -- [ D ] -- o ==> o -- [ A ] -- o
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Anchor `drifting_t` at `anchored_t`
Parameters
----------
drifting_t :
Drifting time to anchor
anchored_t :
When to anchor `drifting_t`
"""
drifting_t = T(drifting_t)
anchored_t = T(anchored_t)
assert (drifting_t in self) and (drifting_t.drifting)
assert anchored_t.anchored
if anchored_t not in self:
self.add_node(anchored_t)
self._merge(drifting_t, anchored_t)
def align(self, one_t, another_t):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
o -- [ F ] -- o o o
⟍ ⟋
==> [ F ]
⟋ ⟍
o -- [ f ] -- o o o
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Align two (potentially drifting) times
`one_t` and `another_t` cannot both be anchored at the same time
In case `another_t` is anchored, this is similar to `anchor` method
Parameters
----------
one_t, another_t
Two times to be aligned.
"""
one_t = T(one_t)
another_t = T(another_t)
assert one_t in self
assert another_t in self
# first time is drifting
if one_t.drifting:
self._merge(one_t, another_t)
# second time is drifting
elif another_t.drifting:
self._merge(another_t, one_t)
# both times are anchored --> FAIL
else:
raise ValueError(
'Cannot align two anchored times')
# =========================================================================
def pre_align(self, t1, t2, t):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
p -- [ t1 ] p [ t1 ]
⟍ ⟋
==> [ t ]
⟋ ⟍
p' -- [ t2 ] p' [ t2 ]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
t1 = T(t1)
t2 = T(t2)
t = T(t)
# make sure --[t1] incoming edges are empty
# because they're going to be removed afterwards,
# and we don't want to loose data
pred1 = self.predecessors(t1)
for p in pred1:
for key, data in self[p][t1].iteritems():
assert not data
# make sure --[t2] incoming edges are empty
# (for the same reason...)
pred2 = self.predecessors(t2)
for p in pred2:
for key, data in self[p][t2].iteritems():
assert not data
# let's get started (remove all incoming edges)
for p in pred1:
for key in list(self[p][t1]):
self.remove_edge(p, t1, key=key)
for p in pred2:
for key in list(self[p][t2]):
self.remove_edge(p, t2, key=key)
for p in set(pred1) | set(pred2):
self.add_edge(p, t)
self.add_edge(t, t1)
self.add_edge(t, t2)
def post_align(self, t1, t2, t):
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
[ t1 ] -- s [ t1 ] s
⟍ ⟋
==> [ t ]
⟋ ⟍
[ t2 ] -- s' [ t2 ] s'
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
t1 = T(t1)
t2 = T(t2)
t = T(t)
# make sure [t1]-- outgoing edges are empty
# because they're going to be removed afterwards,
# and we don't want to loose data
succ1 = self.successors(t1)
for s in succ1:
for key, data in self[t1][s].iteritems():
assert not data
# make sure --[t2] outgoing edges are empty
# (for the same reason...)
succ2 = self.successors(t2)
for s in succ2:
for key, data in self[t2][s].iteritems():
assert not data
# let's get started (remove all outgoing edges)
for s in succ1:
for key in list(self[t1][s]):
self.remove_edge(t1, s, key=key)
for s in succ2:
for key in list(self[t2][s]):
self.remove_edge(t2, s, key=key)
for s in set(succ1) | set(succ2):
self.add_edge(t, s)
self.add_edge(t1, t)
self.add_edge(t2, t)
# =========================================================================
def ordering_graph(self):
"""Ordering graph
t1 --> t2 in the ordering graph indicates that t1 happens before t2.
A missing edge simply means that it is not clear yet.
"""
g = nx.DiGraph()
# add times
for t in self.nodes_iter():
g.add_node(t)
# add existing edges
for t1, t2 in self.edges_iter():
g.add_edge(t1, t2)
# connect every pair of anchored times
anchored = sorted(self.anchored())
for t1, t2 in itertools.combinations(anchored, 2):
g.add_edge(t1, t2)
# connect every time with its sucessors
_g = g.copy()
for t1 in _g:
for t2 in set([target for (_, target) in nx.bfs_edges(_g, t1)]):
g.add_edge(t1, t2)
return g
def ordered_edges_iter(self, data=False, keys=False):
"""Return an iterator over the edges in temporal/topological order.
Ordered edges are returned as tuples with optional data and keys
in the order (t1, t2, key, data).
Parameters
----------
data : bool, optional (default=False)
If True, return edge attribute dict with each edge.
keys : bool, optional (default=False)
If True, return edge keys with each edge.
Returns
-------
edge_iter : iterator
An iterator of (u,v), (u,v,d) or (u,v,key,d) tuples of edges.
"""
# start by sorting nodes in temporal+topological order
o = self.ordering_graph()
nodes = nx.topological_sort(o)
# iterate over edges using this very order
for _ in self.edges_iter(nbunch=nodes, data=data, keys=keys):
yield _
# =========================================================================
def _anchored_successors(self, n):
"""Get all first anchored successors"""
# loop on all outgoing edges
for t in self.successors(n):
# if neighbor is anchored
# stop looking for (necessarily later) successors
if t.anchored:
yield t
continue
# if neighbor is not anchored
# look one level deeper
for tt in self._anchored_successors(t):
yield tt
def _anchored_predecessors(self, n):
"""Get all first anchored predecessors"""
# loop on all incoming edges
for t in self.predecessors(n):
# if predecessor is anchored
# stop looking for (necessarily earlier) predecessors
if t.anchored:
yield t
continue
# if neighbor is not anchored
# look one level deeper
for tt in self._anchored_predecessors(t):
yield tt
def timerange(self, t):
"""Infer smallest possible timerange from graph structure
Returns
-------
(left, right) tuple
left == None or right == None indicates that the current state of
the annotation graph does not allow to decide the boundary.
"""
t = T(t)
if t.anchored:
return (t.T, t.T)
successors = [n for n in self._anchored_successors(t)]
predecessors = [n for n in self._anchored_predecessors(t)]
earlier_successor = None
if successors:
earlier_successor = min(successors)
later_predecessor = None
if predecessors:
later_predecessor = max(predecessors)
return (later_predecessor.T, earlier_successor.T)
# =========================================================================
def for_json(self):
return {PYANNOTE_JSON_TRANSCRIPTION: node_link_data(self)}
@classmethod
def from_json(cls, data):
graph = node_link_graph(data[PYANNOTE_JSON_TRANSCRIPTION])
mapping = {node: T(node) for node in graph}
graph = nx.relabel_nodes(graph, mapping)
return cls(graph=graph, **graph.graph)
# === IPython Notebook displays ===========================================
def _repr_svg_(self):
| book import repr_transcription
return repr_transcription(self)
| from note | identifier_name |
ccm.rs | // Implementation of the AEAD CCM cipher as described in:
// https://datatracker.ietf.org/doc/html/rfc3610
//
// NOTE: Currently only fixed block sizes are supported.
use core::result::Result;
use common::ceil_div;
use crate::constant_eq;
use crate::utils::xor_inplace;
/// Number of bytes in an AES-128 key.
pub const KEY_SIZE: usize = 16;
const BLOCK_SIZE: usize = 16; // 128-bit blocks
/// A buffer for encrypting blocks with some cipher.
///
/// This object should consist of a 'plaintext' block buffer to be filled by the
/// user with the unencrypted block and an indepedent 'ciphertext' block buffer
/// that will continue the encrypted form of the 'plaintext' buffer upon
/// request.
pub trait BlockCipherBuffer {
fn plaintext(&self) -> &[u8; BLOCK_SIZE];
fn plaintext_mut(&mut self) -> &mut [u8; BLOCK_SIZE];
fn ciphertext(&self) -> &[u8; BLOCK_SIZE];
fn plaintext_mut_ciphertext(&mut self) -> (&mut [u8; BLOCK_SIZE], &[u8; BLOCK_SIZE]);
/// Should encrypt the plaintext buffer writing the result into the internal
/// ciphertext buffer.
///
/// MUST NOT modify the plaintext buffer.
fn encrypt(&mut self);
}
pub struct | <Block, Nonce> {
block: Block,
tag_size: usize,
length_size: usize,
nonce: Nonce,
}
impl<Block: BlockCipherBuffer, Nonce: AsRef<[u8]>> CCM<Block, Nonce> {
///
/// Arguments:
/// - block:
/// - tag_size: Number of bytes used to store the message authentication
/// tag.
/// - length_size: Number of bytes used to represent the message length.
/// - nonce:
pub fn new(block: Block, tag_size: usize, length_size: usize, nonce: Nonce) -> Self {
let nonce_size = 15 - length_size;
assert_eq!(nonce_size, nonce.as_ref().len());
Self {
block,
tag_size,
length_size,
nonce,
}
}
/// Performs in-place encryption of the given message.
///
/// Arguments:
/// - message: A buffer of size plaintext_length + tag_size. The final
/// tag_size bytes can be initially to any value.
/// - additional_data: Additional data which will be considered while making
/// the CBC-MAC.
pub fn encrypt_inplace(&mut self, message: &mut [u8], additional_data: &[u8]) {
assert!(message.len() >= self.tag_size);
let (plaintext, tag) = message.split_at_mut(message.len() - self.tag_size);
self.compute_cbc_mac(plaintext, additional_data, tag);
self.apply_ctr_mode(plaintext, tag);
}
/// Performs in-place decryption of the given message.
///
/// Arguments:
/// - message: The buffer consisting of an encrypted plaintext + a tag. This
/// is also the data generated by encrypt_inplace.
///
/// Returns: If the MIC is valid, returns a pointer to the plaintext part of
/// the buffer else returns an error.
pub fn decrypt_inplace<'b>(
&mut self,
message: &'b mut [u8],
additional_data: &[u8],
) -> Result<&'b [u8], ()> {
if message.len() < self.tag_size {
return Err(());
}
let (ciphertext, tag) = message.split_at_mut(message.len() - self.tag_size);
self.apply_ctr_mode(ciphertext, tag);
// Buffer to store the computed tag (the tag can be at most BLOCK_SIZE bytes in
// length).
let mut expected_tag_buf = [0u8; BLOCK_SIZE];
let expected_tag = &mut expected_tag_buf[0..self.tag_size];
self.compute_cbc_mac(ciphertext, additional_data, expected_tag);
// TODO: Use a constant time comparison function
if expected_tag != tag {
return Err(());
}
Ok(ciphertext)
}
fn compute_cbc_mac(&mut self, plaintext: &[u8], mut additional_data: &[u8], tag: &mut [u8]) {
// Set up B_0 for CBC-MAC
self.copy_nonce_to_block();
self.setup_for_cbc_mac(!additional_data.is_empty(), plaintext.len());
// Generate X_1 into self.block.ciphertext
self.block.encrypt();
if !additional_data.is_empty() {
// Construct B_i containing the length of the block.
{
let mut remaining = &mut self.block.plaintext_mut()[..];
if additional_data.len() < ((1 << 16) - (1 << 8)) {
*array_mut_ref![remaining, 0, 2] = (additional_data.len() as u16).to_be_bytes();
remaining = &mut remaining[2..];
} else if additional_data.len() <= (core::u32::MAX as usize) {
remaining[0] = 0xFF;
remaining[1] = 0xFE;
*array_mut_ref![remaining, 2, 4] = (additional_data.len() as u32).to_be_bytes();
remaining = &mut remaining[6..];
} else {
remaining[0] = 0xFF;
remaining[1] = 0xFF;
*array_mut_ref![remaining, 2, 8] = (additional_data.len() as u64).to_be_bytes();
remaining = &mut remaining[10..];
}
let n = core::cmp::min(remaining.len(), additional_data.len());
remaining[0..n].copy_from_slice(&additional_data[0..n]);
additional_data = &additional_data[n..];
for i in n..remaining.len() {
remaining[i] = 0;
}
}
// Store 'X_i XOR B_i' into self.block.plaintext.
{
let (p, c) = self.block.plaintext_mut_ciphertext();
xor16_inplace(c, p);
}
// Encrypt 'X_i XOR B_i' to get X_i+1
self.block.encrypt();
// Construct remaining additional data blocks.
self.append_cbc_mac_data_blocks(additional_data);
}
self.append_cbc_mac_data_blocks(plaintext);
// Get raw unencrypted tag by taking first tag_size bytes.
tag.copy_from_slice(&self.block.ciphertext()[0..tag.len()]);
}
fn append_cbc_mac_data_blocks(&mut self, mut input: &[u8]) {
while input.len() > 0 {
let (plaintext, ciphertext) = self.block.plaintext_mut_ciphertext();
// Set the data.plaintext to B_i (the next input block padded with zeros).
let n = core::cmp::min(input.len(), BLOCK_SIZE);
plaintext[0..n].copy_from_slice(&input[0..n]);
input = &input[n..];
for i in n..BLOCK_SIZE {
plaintext[i] = 0;
}
// TODO: Deduplicate these two lines.
// Perform 'X_i XOR B_i'
xor16_inplace(ciphertext, plaintext);
// Encrypt 'X_i XOR B_i' to get X_i+1
self.block.encrypt();
}
}
/// Applies CTR encryption to the given plaintext and tag encrpyting them
/// in-place.
fn apply_ctr_mode(&mut self, plaintext: &mut [u8], tag: &mut [u8]) {
// Setup A_0
self.copy_nonce_to_block();
self.setup_for_ctr_enc(0);
// Generate S_0
self.block.encrypt();
// Encrypt tag as 'T XOR first-M-bytes( S_0 )' and write to the end of the
// output buffer.
xor_inplace(&self.block.ciphertext()[0..tag.len()], tag);
for i in 0..ceil_div(plaintext.len(), BLOCK_SIZE) {
let counter = i + 1;
// Setup A_(i + 1)
self.setup_for_ctr_enc(counter);
// Generate S_(i + 1)
self.block.encrypt();
let start_i = i * BLOCK_SIZE;
let end_i = core::cmp::min(plaintext.len(), start_i + BLOCK_SIZE);
xor_inplace(
&self.block.ciphertext()[0..(end_i - start_i)],
&mut plaintext[start_i..end_i],
);
}
}
fn copy_nonce_to_block(&mut self) {
self.block.plaintext_mut()[1..(1 + self.nonce.as_ref().len())]
.copy_from_slice(&self.nonce.as_ref()[..]);
}
fn setup_for_cbc_mac(&mut self, has_additional_data: bool, length: usize) {
let block = self.block.plaintext_mut();
block[0] = (if has_additional_data { 1 } else { 0 }) << 6
| ((self.tag_size as u8 - 2) / 2) << 3
| (self.length_size as u8 - 1);
if self.length_size == 2 {
*array_mut_ref![block, block.len() - 2, 2] = (length as u16).to_be_bytes();
} else {
todo!();
}
}
fn setup_for_ctr_enc(&mut self, counter: usize) {
let block = self.block.plaintext_mut();
block[0] = (self.length_size as u8 - 1);
if self.length_size == 2 {
*array_mut_ref![block, block.len() - 2, 2] = (counter as u16).to_be_bytes();
} else {
todo!();
}
}
}
fn xor16_inplace(a: &[u8; BLOCK_SIZE], b: &mut [u8; BLOCK_SIZE]) {
for i in 0..(BLOCK_SIZE / 4) {
let a_ref = array_ref![a, 4 * i, 4];
let b_ref = array_mut_ref![b, 4 * i, 4];
*b_ref = (u32::from_ne_bytes(*a_ref) ^ u32::from_ne_bytes(*b_ref)).to_ne_bytes();
}
}
#[cfg(feature = "std")]
pub mod aes {
use super::*;
use crate::{aes::AESBlockCipher, cipher::BlockCipher};
pub struct AES128BlockEncryptor {
cipher: AESBlockCipher,
plaintext: [u8; BLOCK_SIZE],
ciphertext: [u8; BLOCK_SIZE],
}
impl AES128BlockEncryptor {
pub fn new(key: &[u8]) -> Self {
assert_eq!(key.len(), KEY_SIZE);
Self {
cipher: AESBlockCipher::create(key).unwrap(),
plaintext: [0u8; BLOCK_SIZE],
ciphertext: [0u8; BLOCK_SIZE],
}
}
}
impl BlockCipherBuffer for AES128BlockEncryptor {
fn plaintext(&self) -> &[u8; BLOCK_SIZE] {
&self.plaintext
}
fn plaintext_mut(&mut self) -> &mut [u8; BLOCK_SIZE] {
&mut self.plaintext
}
fn plaintext_mut_ciphertext(&mut self) -> (&mut [u8; BLOCK_SIZE], &[u8; BLOCK_SIZE]) {
(&mut self.plaintext, &self.ciphertext)
}
fn encrypt(&mut self) {
self.cipher
.encrypt_block(&self.plaintext, &mut self.ciphertext);
}
fn ciphertext(&self) -> &[u8; BLOCK_SIZE] {
&self.ciphertext
}
}
}
#[cfg(test)]
mod tests {
use super::aes::*;
use super::*;
// Test vectors from the RFC
#[test]
fn works() {
let key = hex!("C0C1C2C3C4C5C6C7C8C9CACBCCCDCECF");
let nonce = hex!("00000003020100A0A1A2A3A4A5");
let mut data = hex!("000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E");
let tag_size = 8;
let length_size = 2;
let expected_ciphertext =
hex!("588C979A61C663D2F066D0C2C0F989806D5F6B61DAC38417E8D12CFDF926E0");
let aad = &data[0..8];
let mut plaintext = data[8..].to_vec();
plaintext.resize(plaintext.len() + tag_size, 0);
let mut ccm = CCM::new(
AES128BlockEncryptor::new(&key),
tag_size,
length_size,
array_ref![&nonce, 0, 13],
);
ccm.encrypt_inplace(&mut plaintext, &aad);
assert_eq!(&plaintext, &expected_ciphertext);
println!("{:02x?}", &plaintext);
}
}
| CCM | identifier_name |
ccm.rs | // Implementation of the AEAD CCM cipher as described in:
// https://datatracker.ietf.org/doc/html/rfc3610
//
// NOTE: Currently only fixed block sizes are supported.
use core::result::Result;
use common::ceil_div;
use crate::constant_eq;
use crate::utils::xor_inplace;
/// Number of bytes in an AES-128 key.
pub const KEY_SIZE: usize = 16;
const BLOCK_SIZE: usize = 16; // 128-bit blocks
/// A buffer for encrypting blocks with some cipher.
///
/// This object should consist of a 'plaintext' block buffer to be filled by the
/// user with the unencrypted block and an indepedent 'ciphertext' block buffer
/// that will continue the encrypted form of the 'plaintext' buffer upon
/// request.
pub trait BlockCipherBuffer {
fn plaintext(&self) -> &[u8; BLOCK_SIZE];
fn plaintext_mut(&mut self) -> &mut [u8; BLOCK_SIZE];
fn ciphertext(&self) -> &[u8; BLOCK_SIZE];
fn plaintext_mut_ciphertext(&mut self) -> (&mut [u8; BLOCK_SIZE], &[u8; BLOCK_SIZE]);
/// Should encrypt the plaintext buffer writing the result into the internal
/// ciphertext buffer.
///
/// MUST NOT modify the plaintext buffer.
fn encrypt(&mut self);
}
pub struct CCM<Block, Nonce> {
block: Block,
tag_size: usize,
length_size: usize,
nonce: Nonce,
}
impl<Block: BlockCipherBuffer, Nonce: AsRef<[u8]>> CCM<Block, Nonce> {
///
/// Arguments:
/// - block:
/// - tag_size: Number of bytes used to store the message authentication
/// tag.
/// - length_size: Number of bytes used to represent the message length.
/// - nonce:
pub fn new(block: Block, tag_size: usize, length_size: usize, nonce: Nonce) -> Self {
let nonce_size = 15 - length_size;
assert_eq!(nonce_size, nonce.as_ref().len());
Self {
block,
tag_size,
length_size,
nonce,
}
}
/// Performs in-place encryption of the given message.
///
/// Arguments:
/// - message: A buffer of size plaintext_length + tag_size. The final
/// tag_size bytes can be initially to any value.
/// - additional_data: Additional data which will be considered while making
/// the CBC-MAC.
pub fn encrypt_inplace(&mut self, message: &mut [u8], additional_data: &[u8]) {
assert!(message.len() >= self.tag_size);
let (plaintext, tag) = message.split_at_mut(message.len() - self.tag_size);
self.compute_cbc_mac(plaintext, additional_data, tag);
self.apply_ctr_mode(plaintext, tag);
}
/// Performs in-place decryption of the given message.
///
/// Arguments:
/// - message: The buffer consisting of an encrypted plaintext + a tag. This
/// is also the data generated by encrypt_inplace.
///
/// Returns: If the MIC is valid, returns a pointer to the plaintext part of
/// the buffer else returns an error.
pub fn decrypt_inplace<'b>(
&mut self,
message: &'b mut [u8],
additional_data: &[u8],
) -> Result<&'b [u8], ()> {
if message.len() < self.tag_size {
return Err(());
}
let (ciphertext, tag) = message.split_at_mut(message.len() - self.tag_size);
self.apply_ctr_mode(ciphertext, tag);
// Buffer to store the computed tag (the tag can be at most BLOCK_SIZE bytes in
// length).
let mut expected_tag_buf = [0u8; BLOCK_SIZE];
let expected_tag = &mut expected_tag_buf[0..self.tag_size];
self.compute_cbc_mac(ciphertext, additional_data, expected_tag);
// TODO: Use a constant time comparison function
if expected_tag != tag {
return Err(());
}
Ok(ciphertext)
}
fn compute_cbc_mac(&mut self, plaintext: &[u8], mut additional_data: &[u8], tag: &mut [u8]) {
// Set up B_0 for CBC-MAC
self.copy_nonce_to_block();
self.setup_for_cbc_mac(!additional_data.is_empty(), plaintext.len());
// Generate X_1 into self.block.ciphertext
self.block.encrypt();
if !additional_data.is_empty() {
// Construct B_i containing the length of the block.
{
let mut remaining = &mut self.block.plaintext_mut()[..];
if additional_data.len() < ((1 << 16) - (1 << 8)) {
*array_mut_ref![remaining, 0, 2] = (additional_data.len() as u16).to_be_bytes();
remaining = &mut remaining[2..];
} else if additional_data.len() <= (core::u32::MAX as usize) {
remaining[0] = 0xFF;
remaining[1] = 0xFE;
*array_mut_ref![remaining, 2, 4] = (additional_data.len() as u32).to_be_bytes();
remaining = &mut remaining[6..];
} else {
remaining[0] = 0xFF;
remaining[1] = 0xFF;
*array_mut_ref![remaining, 2, 8] = (additional_data.len() as u64).to_be_bytes();
remaining = &mut remaining[10..];
}
let n = core::cmp::min(remaining.len(), additional_data.len());
remaining[0..n].copy_from_slice(&additional_data[0..n]);
additional_data = &additional_data[n..];
for i in n..remaining.len() {
remaining[i] = 0;
}
}
// Store 'X_i XOR B_i' into self.block.plaintext.
{
let (p, c) = self.block.plaintext_mut_ciphertext();
xor16_inplace(c, p);
}
// Encrypt 'X_i XOR B_i' to get X_i+1
self.block.encrypt();
// Construct remaining additional data blocks.
self.append_cbc_mac_data_blocks(additional_data);
}
self.append_cbc_mac_data_blocks(plaintext);
// Get raw unencrypted tag by taking first tag_size bytes.
tag.copy_from_slice(&self.block.ciphertext()[0..tag.len()]);
}
fn append_cbc_mac_data_blocks(&mut self, mut input: &[u8]) {
while input.len() > 0 {
let (plaintext, ciphertext) = self.block.plaintext_mut_ciphertext();
// Set the data.plaintext to B_i (the next input block padded with zeros).
let n = core::cmp::min(input.len(), BLOCK_SIZE);
plaintext[0..n].copy_from_slice(&input[0..n]);
input = &input[n..];
for i in n..BLOCK_SIZE {
plaintext[i] = 0;
}
// TODO: Deduplicate these two lines.
// Perform 'X_i XOR B_i'
xor16_inplace(ciphertext, plaintext);
// Encrypt 'X_i XOR B_i' to get X_i+1
self.block.encrypt();
}
}
/// Applies CTR encryption to the given plaintext and tag encrpyting them
/// in-place.
fn apply_ctr_mode(&mut self, plaintext: &mut [u8], tag: &mut [u8]) {
// Setup A_0
self.copy_nonce_to_block();
self.setup_for_ctr_enc(0);
// Generate S_0
self.block.encrypt();
// Encrypt tag as 'T XOR first-M-bytes( S_0 )' and write to the end of the
// output buffer.
xor_inplace(&self.block.ciphertext()[0..tag.len()], tag);
for i in 0..ceil_div(plaintext.len(), BLOCK_SIZE) {
let counter = i + 1;
// Setup A_(i + 1)
self.setup_for_ctr_enc(counter);
// Generate S_(i + 1)
self.block.encrypt();
let start_i = i * BLOCK_SIZE;
let end_i = core::cmp::min(plaintext.len(), start_i + BLOCK_SIZE);
xor_inplace(
&self.block.ciphertext()[0..(end_i - start_i)],
&mut plaintext[start_i..end_i],
);
}
}
fn copy_nonce_to_block(&mut self) {
self.block.plaintext_mut()[1..(1 + self.nonce.as_ref().len())]
.copy_from_slice(&self.nonce.as_ref()[..]);
}
fn setup_for_cbc_mac(&mut self, has_additional_data: bool, length: usize) {
let block = self.block.plaintext_mut();
block[0] = (if has_additional_data | else { 0 }) << 6
| ((self.tag_size as u8 - 2) / 2) << 3
| (self.length_size as u8 - 1);
if self.length_size == 2 {
*array_mut_ref![block, block.len() - 2, 2] = (length as u16).to_be_bytes();
} else {
todo!();
}
}
fn setup_for_ctr_enc(&mut self, counter: usize) {
let block = self.block.plaintext_mut();
block[0] = (self.length_size as u8 - 1);
if self.length_size == 2 {
*array_mut_ref![block, block.len() - 2, 2] = (counter as u16).to_be_bytes();
} else {
todo!();
}
}
}
fn xor16_inplace(a: &[u8; BLOCK_SIZE], b: &mut [u8; BLOCK_SIZE]) {
for i in 0..(BLOCK_SIZE / 4) {
let a_ref = array_ref![a, 4 * i, 4];
let b_ref = array_mut_ref![b, 4 * i, 4];
*b_ref = (u32::from_ne_bytes(*a_ref) ^ u32::from_ne_bytes(*b_ref)).to_ne_bytes();
}
}
#[cfg(feature = "std")]
pub mod aes {
use super::*;
use crate::{aes::AESBlockCipher, cipher::BlockCipher};
pub struct AES128BlockEncryptor {
cipher: AESBlockCipher,
plaintext: [u8; BLOCK_SIZE],
ciphertext: [u8; BLOCK_SIZE],
}
impl AES128BlockEncryptor {
pub fn new(key: &[u8]) -> Self {
assert_eq!(key.len(), KEY_SIZE);
Self {
cipher: AESBlockCipher::create(key).unwrap(),
plaintext: [0u8; BLOCK_SIZE],
ciphertext: [0u8; BLOCK_SIZE],
}
}
}
impl BlockCipherBuffer for AES128BlockEncryptor {
fn plaintext(&self) -> &[u8; BLOCK_SIZE] {
&self.plaintext
}
fn plaintext_mut(&mut self) -> &mut [u8; BLOCK_SIZE] {
&mut self.plaintext
}
fn plaintext_mut_ciphertext(&mut self) -> (&mut [u8; BLOCK_SIZE], &[u8; BLOCK_SIZE]) {
(&mut self.plaintext, &self.ciphertext)
}
fn encrypt(&mut self) {
self.cipher
.encrypt_block(&self.plaintext, &mut self.ciphertext);
}
fn ciphertext(&self) -> &[u8; BLOCK_SIZE] {
&self.ciphertext
}
}
}
#[cfg(test)]
mod tests {
use super::aes::*;
use super::*;
// Test vectors from the RFC
#[test]
fn works() {
let key = hex!("C0C1C2C3C4C5C6C7C8C9CACBCCCDCECF");
let nonce = hex!("00000003020100A0A1A2A3A4A5");
let mut data = hex!("000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E");
let tag_size = 8;
let length_size = 2;
let expected_ciphertext =
hex!("588C979A61C663D2F066D0C2C0F989806D5F6B61DAC38417E8D12CFDF926E0");
let aad = &data[0..8];
let mut plaintext = data[8..].to_vec();
plaintext.resize(plaintext.len() + tag_size, 0);
let mut ccm = CCM::new(
AES128BlockEncryptor::new(&key),
tag_size,
length_size,
array_ref![&nonce, 0, 13],
);
ccm.encrypt_inplace(&mut plaintext, &aad);
assert_eq!(&plaintext, &expected_ciphertext);
println!("{:02x?}", &plaintext);
}
}
| { 1 } | conditional_block |
ccm.rs | // Implementation of the AEAD CCM cipher as described in:
// https://datatracker.ietf.org/doc/html/rfc3610
//
// NOTE: Currently only fixed block sizes are supported.
use core::result::Result;
use common::ceil_div;
use crate::constant_eq;
use crate::utils::xor_inplace;
/// Number of bytes in an AES-128 key.
pub const KEY_SIZE: usize = 16;
const BLOCK_SIZE: usize = 16; // 128-bit blocks
/// A buffer for encrypting blocks with some cipher.
///
/// This object should consist of a 'plaintext' block buffer to be filled by the
/// user with the unencrypted block and an indepedent 'ciphertext' block buffer
/// that will continue the encrypted form of the 'plaintext' buffer upon
/// request.
pub trait BlockCipherBuffer {
fn plaintext(&self) -> &[u8; BLOCK_SIZE];
fn plaintext_mut(&mut self) -> &mut [u8; BLOCK_SIZE];
fn ciphertext(&self) -> &[u8; BLOCK_SIZE];
fn plaintext_mut_ciphertext(&mut self) -> (&mut [u8; BLOCK_SIZE], &[u8; BLOCK_SIZE]);
/// Should encrypt the plaintext buffer writing the result into the internal
/// ciphertext buffer.
///
/// MUST NOT modify the plaintext buffer.
fn encrypt(&mut self);
}
pub struct CCM<Block, Nonce> {
block: Block,
tag_size: usize,
length_size: usize,
nonce: Nonce,
}
impl<Block: BlockCipherBuffer, Nonce: AsRef<[u8]>> CCM<Block, Nonce> {
///
/// Arguments:
/// - block:
/// - tag_size: Number of bytes used to store the message authentication
/// tag.
/// - length_size: Number of bytes used to represent the message length.
/// - nonce:
pub fn new(block: Block, tag_size: usize, length_size: usize, nonce: Nonce) -> Self {
let nonce_size = 15 - length_size;
assert_eq!(nonce_size, nonce.as_ref().len());
Self {
block,
tag_size,
length_size,
nonce,
}
}
/// Performs in-place encryption of the given message.
///
/// Arguments:
/// - message: A buffer of size plaintext_length + tag_size. The final
/// tag_size bytes can be initially to any value.
/// - additional_data: Additional data which will be considered while making
/// the CBC-MAC.
pub fn encrypt_inplace(&mut self, message: &mut [u8], additional_data: &[u8]) {
assert!(message.len() >= self.tag_size);
let (plaintext, tag) = message.split_at_mut(message.len() - self.tag_size);
self.compute_cbc_mac(plaintext, additional_data, tag);
self.apply_ctr_mode(plaintext, tag);
}
/// Performs in-place decryption of the given message.
///
/// Arguments:
/// - message: The buffer consisting of an encrypted plaintext + a tag. This
/// is also the data generated by encrypt_inplace.
///
/// Returns: If the MIC is valid, returns a pointer to the plaintext part of
/// the buffer else returns an error.
pub fn decrypt_inplace<'b>(
&mut self,
message: &'b mut [u8],
additional_data: &[u8],
) -> Result<&'b [u8], ()> {
if message.len() < self.tag_size {
return Err(());
}
let (ciphertext, tag) = message.split_at_mut(message.len() - self.tag_size);
self.apply_ctr_mode(ciphertext, tag);
// Buffer to store the computed tag (the tag can be at most BLOCK_SIZE bytes in
// length).
let mut expected_tag_buf = [0u8; BLOCK_SIZE];
let expected_tag = &mut expected_tag_buf[0..self.tag_size];
self.compute_cbc_mac(ciphertext, additional_data, expected_tag);
// TODO: Use a constant time comparison function
if expected_tag != tag {
return Err(());
}
Ok(ciphertext)
}
fn compute_cbc_mac(&mut self, plaintext: &[u8], mut additional_data: &[u8], tag: &mut [u8]) {
// Set up B_0 for CBC-MAC
self.copy_nonce_to_block();
self.setup_for_cbc_mac(!additional_data.is_empty(), plaintext.len());
// Generate X_1 into self.block.ciphertext
self.block.encrypt();
if !additional_data.is_empty() {
// Construct B_i containing the length of the block.
{
let mut remaining = &mut self.block.plaintext_mut()[..];
if additional_data.len() < ((1 << 16) - (1 << 8)) {
*array_mut_ref![remaining, 0, 2] = (additional_data.len() as u16).to_be_bytes();
remaining = &mut remaining[2..];
} else if additional_data.len() <= (core::u32::MAX as usize) {
remaining[0] = 0xFF;
remaining[1] = 0xFE;
*array_mut_ref![remaining, 2, 4] = (additional_data.len() as u32).to_be_bytes();
remaining = &mut remaining[6..];
} else {
remaining[0] = 0xFF;
remaining[1] = 0xFF;
*array_mut_ref![remaining, 2, 8] = (additional_data.len() as u64).to_be_bytes();
remaining = &mut remaining[10..];
}
let n = core::cmp::min(remaining.len(), additional_data.len());
remaining[0..n].copy_from_slice(&additional_data[0..n]);
additional_data = &additional_data[n..];
for i in n..remaining.len() {
remaining[i] = 0;
}
}
// Store 'X_i XOR B_i' into self.block.plaintext.
{
let (p, c) = self.block.plaintext_mut_ciphertext();
xor16_inplace(c, p);
}
// Encrypt 'X_i XOR B_i' to get X_i+1
self.block.encrypt();
// Construct remaining additional data blocks.
self.append_cbc_mac_data_blocks(additional_data);
}
self.append_cbc_mac_data_blocks(plaintext);
// Get raw unencrypted tag by taking first tag_size bytes.
tag.copy_from_slice(&self.block.ciphertext()[0..tag.len()]);
}
fn append_cbc_mac_data_blocks(&mut self, mut input: &[u8]) {
while input.len() > 0 {
let (plaintext, ciphertext) = self.block.plaintext_mut_ciphertext();
// Set the data.plaintext to B_i (the next input block padded with zeros).
let n = core::cmp::min(input.len(), BLOCK_SIZE);
plaintext[0..n].copy_from_slice(&input[0..n]);
input = &input[n..];
for i in n..BLOCK_SIZE {
plaintext[i] = 0;
}
// TODO: Deduplicate these two lines.
// Perform 'X_i XOR B_i'
xor16_inplace(ciphertext, plaintext);
// Encrypt 'X_i XOR B_i' to get X_i+1
self.block.encrypt();
}
}
/// Applies CTR encryption to the given plaintext and tag encrpyting them
/// in-place.
fn apply_ctr_mode(&mut self, plaintext: &mut [u8], tag: &mut [u8]) {
// Setup A_0
self.copy_nonce_to_block();
self.setup_for_ctr_enc(0);
// Generate S_0
self.block.encrypt();
// Encrypt tag as 'T XOR first-M-bytes( S_0 )' and write to the end of the
// output buffer.
xor_inplace(&self.block.ciphertext()[0..tag.len()], tag);
for i in 0..ceil_div(plaintext.len(), BLOCK_SIZE) {
let counter = i + 1;
// Setup A_(i + 1)
self.setup_for_ctr_enc(counter);
// Generate S_(i + 1)
self.block.encrypt();
let start_i = i * BLOCK_SIZE;
let end_i = core::cmp::min(plaintext.len(), start_i + BLOCK_SIZE);
xor_inplace(
&self.block.ciphertext()[0..(end_i - start_i)],
&mut plaintext[start_i..end_i],
);
}
}
fn copy_nonce_to_block(&mut self) {
self.block.plaintext_mut()[1..(1 + self.nonce.as_ref().len())]
.copy_from_slice(&self.nonce.as_ref()[..]);
}
fn setup_for_cbc_mac(&mut self, has_additional_data: bool, length: usize) {
let block = self.block.plaintext_mut();
block[0] = (if has_additional_data { 1 } else { 0 }) << 6
| ((self.tag_size as u8 - 2) / 2) << 3
| (self.length_size as u8 - 1);
if self.length_size == 2 {
*array_mut_ref![block, block.len() - 2, 2] = (length as u16).to_be_bytes();
} else {
todo!();
}
}
fn setup_for_ctr_enc(&mut self, counter: usize) {
let block = self.block.plaintext_mut();
block[0] = (self.length_size as u8 - 1);
if self.length_size == 2 {
*array_mut_ref![block, block.len() - 2, 2] = (counter as u16).to_be_bytes();
} else {
todo!();
}
}
}
fn xor16_inplace(a: &[u8; BLOCK_SIZE], b: &mut [u8; BLOCK_SIZE]) {
for i in 0..(BLOCK_SIZE / 4) {
let a_ref = array_ref![a, 4 * i, 4];
let b_ref = array_mut_ref![b, 4 * i, 4];
*b_ref = (u32::from_ne_bytes(*a_ref) ^ u32::from_ne_bytes(*b_ref)).to_ne_bytes();
}
}
#[cfg(feature = "std")]
pub mod aes {
use super::*;
use crate::{aes::AESBlockCipher, cipher::BlockCipher};
pub struct AES128BlockEncryptor {
cipher: AESBlockCipher,
plaintext: [u8; BLOCK_SIZE],
ciphertext: [u8; BLOCK_SIZE],
}
impl AES128BlockEncryptor {
pub fn new(key: &[u8]) -> Self |
}
impl BlockCipherBuffer for AES128BlockEncryptor {
fn plaintext(&self) -> &[u8; BLOCK_SIZE] {
&self.plaintext
}
fn plaintext_mut(&mut self) -> &mut [u8; BLOCK_SIZE] {
&mut self.plaintext
}
fn plaintext_mut_ciphertext(&mut self) -> (&mut [u8; BLOCK_SIZE], &[u8; BLOCK_SIZE]) {
(&mut self.plaintext, &self.ciphertext)
}
fn encrypt(&mut self) {
self.cipher
.encrypt_block(&self.plaintext, &mut self.ciphertext);
}
fn ciphertext(&self) -> &[u8; BLOCK_SIZE] {
&self.ciphertext
}
}
}
#[cfg(test)]
mod tests {
use super::aes::*;
use super::*;
// Test vectors from the RFC
#[test]
fn works() {
let key = hex!("C0C1C2C3C4C5C6C7C8C9CACBCCCDCECF");
let nonce = hex!("00000003020100A0A1A2A3A4A5");
let mut data = hex!("000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E");
let tag_size = 8;
let length_size = 2;
let expected_ciphertext =
hex!("588C979A61C663D2F066D0C2C0F989806D5F6B61DAC38417E8D12CFDF926E0");
let aad = &data[0..8];
let mut plaintext = data[8..].to_vec();
plaintext.resize(plaintext.len() + tag_size, 0);
let mut ccm = CCM::new(
AES128BlockEncryptor::new(&key),
tag_size,
length_size,
array_ref![&nonce, 0, 13],
);
ccm.encrypt_inplace(&mut plaintext, &aad);
assert_eq!(&plaintext, &expected_ciphertext);
println!("{:02x?}", &plaintext);
}
}
| {
assert_eq!(key.len(), KEY_SIZE);
Self {
cipher: AESBlockCipher::create(key).unwrap(),
plaintext: [0u8; BLOCK_SIZE],
ciphertext: [0u8; BLOCK_SIZE],
}
} | identifier_body |
ccm.rs | // Implementation of the AEAD CCM cipher as described in:
// https://datatracker.ietf.org/doc/html/rfc3610
//
// NOTE: Currently only fixed block sizes are supported.
use core::result::Result;
use common::ceil_div;
use crate::constant_eq;
use crate::utils::xor_inplace;
/// Number of bytes in an AES-128 key.
pub const KEY_SIZE: usize = 16;
const BLOCK_SIZE: usize = 16; // 128-bit blocks
/// A buffer for encrypting blocks with some cipher.
///
/// This object should consist of a 'plaintext' block buffer to be filled by the
/// user with the unencrypted block and an indepedent 'ciphertext' block buffer
/// that will continue the encrypted form of the 'plaintext' buffer upon
/// request.
pub trait BlockCipherBuffer {
fn plaintext(&self) -> &[u8; BLOCK_SIZE];
fn plaintext_mut(&mut self) -> &mut [u8; BLOCK_SIZE];
fn ciphertext(&self) -> &[u8; BLOCK_SIZE];
fn plaintext_mut_ciphertext(&mut self) -> (&mut [u8; BLOCK_SIZE], &[u8; BLOCK_SIZE]);
/// Should encrypt the plaintext buffer writing the result into the internal
/// ciphertext buffer.
///
/// MUST NOT modify the plaintext buffer.
fn encrypt(&mut self);
}
pub struct CCM<Block, Nonce> {
block: Block,
tag_size: usize,
length_size: usize,
nonce: Nonce,
}
impl<Block: BlockCipherBuffer, Nonce: AsRef<[u8]>> CCM<Block, Nonce> {
/// | /// - tag_size: Number of bytes used to store the message authentication
/// tag.
/// - length_size: Number of bytes used to represent the message length.
/// - nonce:
pub fn new(block: Block, tag_size: usize, length_size: usize, nonce: Nonce) -> Self {
let nonce_size = 15 - length_size;
assert_eq!(nonce_size, nonce.as_ref().len());
Self {
block,
tag_size,
length_size,
nonce,
}
}
/// Performs in-place encryption of the given message.
///
/// Arguments:
/// - message: A buffer of size plaintext_length + tag_size. The final
/// tag_size bytes can be initially to any value.
/// - additional_data: Additional data which will be considered while making
/// the CBC-MAC.
pub fn encrypt_inplace(&mut self, message: &mut [u8], additional_data: &[u8]) {
assert!(message.len() >= self.tag_size);
let (plaintext, tag) = message.split_at_mut(message.len() - self.tag_size);
self.compute_cbc_mac(plaintext, additional_data, tag);
self.apply_ctr_mode(plaintext, tag);
}
/// Performs in-place decryption of the given message.
///
/// Arguments:
/// - message: The buffer consisting of an encrypted plaintext + a tag. This
/// is also the data generated by encrypt_inplace.
///
/// Returns: If the MIC is valid, returns a pointer to the plaintext part of
/// the buffer else returns an error.
pub fn decrypt_inplace<'b>(
&mut self,
message: &'b mut [u8],
additional_data: &[u8],
) -> Result<&'b [u8], ()> {
if message.len() < self.tag_size {
return Err(());
}
let (ciphertext, tag) = message.split_at_mut(message.len() - self.tag_size);
self.apply_ctr_mode(ciphertext, tag);
// Buffer to store the computed tag (the tag can be at most BLOCK_SIZE bytes in
// length).
let mut expected_tag_buf = [0u8; BLOCK_SIZE];
let expected_tag = &mut expected_tag_buf[0..self.tag_size];
self.compute_cbc_mac(ciphertext, additional_data, expected_tag);
// TODO: Use a constant time comparison function
if expected_tag != tag {
return Err(());
}
Ok(ciphertext)
}
fn compute_cbc_mac(&mut self, plaintext: &[u8], mut additional_data: &[u8], tag: &mut [u8]) {
// Set up B_0 for CBC-MAC
self.copy_nonce_to_block();
self.setup_for_cbc_mac(!additional_data.is_empty(), plaintext.len());
// Generate X_1 into self.block.ciphertext
self.block.encrypt();
if !additional_data.is_empty() {
// Construct B_i containing the length of the block.
{
let mut remaining = &mut self.block.plaintext_mut()[..];
if additional_data.len() < ((1 << 16) - (1 << 8)) {
*array_mut_ref![remaining, 0, 2] = (additional_data.len() as u16).to_be_bytes();
remaining = &mut remaining[2..];
} else if additional_data.len() <= (core::u32::MAX as usize) {
remaining[0] = 0xFF;
remaining[1] = 0xFE;
*array_mut_ref![remaining, 2, 4] = (additional_data.len() as u32).to_be_bytes();
remaining = &mut remaining[6..];
} else {
remaining[0] = 0xFF;
remaining[1] = 0xFF;
*array_mut_ref![remaining, 2, 8] = (additional_data.len() as u64).to_be_bytes();
remaining = &mut remaining[10..];
}
let n = core::cmp::min(remaining.len(), additional_data.len());
remaining[0..n].copy_from_slice(&additional_data[0..n]);
additional_data = &additional_data[n..];
for i in n..remaining.len() {
remaining[i] = 0;
}
}
// Store 'X_i XOR B_i' into self.block.plaintext.
{
let (p, c) = self.block.plaintext_mut_ciphertext();
xor16_inplace(c, p);
}
// Encrypt 'X_i XOR B_i' to get X_i+1
self.block.encrypt();
// Construct remaining additional data blocks.
self.append_cbc_mac_data_blocks(additional_data);
}
self.append_cbc_mac_data_blocks(plaintext);
// Get raw unencrypted tag by taking first tag_size bytes.
tag.copy_from_slice(&self.block.ciphertext()[0..tag.len()]);
}
fn append_cbc_mac_data_blocks(&mut self, mut input: &[u8]) {
while input.len() > 0 {
let (plaintext, ciphertext) = self.block.plaintext_mut_ciphertext();
// Set the data.plaintext to B_i (the next input block padded with zeros).
let n = core::cmp::min(input.len(), BLOCK_SIZE);
plaintext[0..n].copy_from_slice(&input[0..n]);
input = &input[n..];
for i in n..BLOCK_SIZE {
plaintext[i] = 0;
}
// TODO: Deduplicate these two lines.
// Perform 'X_i XOR B_i'
xor16_inplace(ciphertext, plaintext);
// Encrypt 'X_i XOR B_i' to get X_i+1
self.block.encrypt();
}
}
/// Applies CTR encryption to the given plaintext and tag encrpyting them
/// in-place.
fn apply_ctr_mode(&mut self, plaintext: &mut [u8], tag: &mut [u8]) {
// Setup A_0
self.copy_nonce_to_block();
self.setup_for_ctr_enc(0);
// Generate S_0
self.block.encrypt();
// Encrypt tag as 'T XOR first-M-bytes( S_0 )' and write to the end of the
// output buffer.
xor_inplace(&self.block.ciphertext()[0..tag.len()], tag);
for i in 0..ceil_div(plaintext.len(), BLOCK_SIZE) {
let counter = i + 1;
// Setup A_(i + 1)
self.setup_for_ctr_enc(counter);
// Generate S_(i + 1)
self.block.encrypt();
let start_i = i * BLOCK_SIZE;
let end_i = core::cmp::min(plaintext.len(), start_i + BLOCK_SIZE);
xor_inplace(
&self.block.ciphertext()[0..(end_i - start_i)],
&mut plaintext[start_i..end_i],
);
}
}
fn copy_nonce_to_block(&mut self) {
self.block.plaintext_mut()[1..(1 + self.nonce.as_ref().len())]
.copy_from_slice(&self.nonce.as_ref()[..]);
}
fn setup_for_cbc_mac(&mut self, has_additional_data: bool, length: usize) {
let block = self.block.plaintext_mut();
block[0] = (if has_additional_data { 1 } else { 0 }) << 6
| ((self.tag_size as u8 - 2) / 2) << 3
| (self.length_size as u8 - 1);
if self.length_size == 2 {
*array_mut_ref![block, block.len() - 2, 2] = (length as u16).to_be_bytes();
} else {
todo!();
}
}
fn setup_for_ctr_enc(&mut self, counter: usize) {
let block = self.block.plaintext_mut();
block[0] = (self.length_size as u8 - 1);
if self.length_size == 2 {
*array_mut_ref![block, block.len() - 2, 2] = (counter as u16).to_be_bytes();
} else {
todo!();
}
}
}
fn xor16_inplace(a: &[u8; BLOCK_SIZE], b: &mut [u8; BLOCK_SIZE]) {
for i in 0..(BLOCK_SIZE / 4) {
let a_ref = array_ref![a, 4 * i, 4];
let b_ref = array_mut_ref![b, 4 * i, 4];
*b_ref = (u32::from_ne_bytes(*a_ref) ^ u32::from_ne_bytes(*b_ref)).to_ne_bytes();
}
}
#[cfg(feature = "std")]
pub mod aes {
use super::*;
use crate::{aes::AESBlockCipher, cipher::BlockCipher};
pub struct AES128BlockEncryptor {
cipher: AESBlockCipher,
plaintext: [u8; BLOCK_SIZE],
ciphertext: [u8; BLOCK_SIZE],
}
impl AES128BlockEncryptor {
pub fn new(key: &[u8]) -> Self {
assert_eq!(key.len(), KEY_SIZE);
Self {
cipher: AESBlockCipher::create(key).unwrap(),
plaintext: [0u8; BLOCK_SIZE],
ciphertext: [0u8; BLOCK_SIZE],
}
}
}
impl BlockCipherBuffer for AES128BlockEncryptor {
fn plaintext(&self) -> &[u8; BLOCK_SIZE] {
&self.plaintext
}
fn plaintext_mut(&mut self) -> &mut [u8; BLOCK_SIZE] {
&mut self.plaintext
}
fn plaintext_mut_ciphertext(&mut self) -> (&mut [u8; BLOCK_SIZE], &[u8; BLOCK_SIZE]) {
(&mut self.plaintext, &self.ciphertext)
}
fn encrypt(&mut self) {
self.cipher
.encrypt_block(&self.plaintext, &mut self.ciphertext);
}
fn ciphertext(&self) -> &[u8; BLOCK_SIZE] {
&self.ciphertext
}
}
}
#[cfg(test)]
mod tests {
use super::aes::*;
use super::*;
// Test vectors from the RFC
#[test]
fn works() {
let key = hex!("C0C1C2C3C4C5C6C7C8C9CACBCCCDCECF");
let nonce = hex!("00000003020100A0A1A2A3A4A5");
let mut data = hex!("000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E");
let tag_size = 8;
let length_size = 2;
let expected_ciphertext =
hex!("588C979A61C663D2F066D0C2C0F989806D5F6B61DAC38417E8D12CFDF926E0");
let aad = &data[0..8];
let mut plaintext = data[8..].to_vec();
plaintext.resize(plaintext.len() + tag_size, 0);
let mut ccm = CCM::new(
AES128BlockEncryptor::new(&key),
tag_size,
length_size,
array_ref![&nonce, 0, 13],
);
ccm.encrypt_inplace(&mut plaintext, &aad);
assert_eq!(&plaintext, &expected_ciphertext);
println!("{:02x?}", &plaintext);
}
} | /// Arguments:
/// - block: | random_line_split |
protocol.rs | // Copyright 2018 Parity Technologies (UK) Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use bytes::BytesMut;
use crate::structs_proto;
use futures::{future::{self, FutureResult}, Async, AsyncSink, Future, Poll, Sink, Stream};
use futures::try_ready;
use libp2p_core::{
Multiaddr, PublicKey,
upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo, Negotiated}
};
use log::{debug, trace};
use protobuf::Message as ProtobufMessage;
use protobuf::parse_from_bytes as protobuf_parse_from_bytes;
use protobuf::RepeatedField;
use std::convert::TryFrom;
use std::io::{Error as IoError, ErrorKind as IoErrorKind};
use std::iter;
use tokio_codec::Framed;
use tokio_io::{AsyncRead, AsyncWrite};
use unsigned_varint::codec;
/// Configuration for an upgrade to the identity protocol.
#[derive(Debug, Clone)]
pub struct IdentifyProtocolConfig;
#[derive(Debug, Clone)]
pub struct RemoteInfo {
/// Information about the remote.
pub info: IdentifyInfo,
/// Address the remote sees for us.
pub observed_addr: Multiaddr,
_priv: ()
}
/// Object used to send back information to the client.
pub struct IdentifySender<T> {
inner: Framed<T, codec::UviBytes<Vec<u8>>>,
}
impl<T> IdentifySender<T> where T: AsyncWrite {
/// Sends back information to the client. Returns a future that is signalled whenever the
/// info have been sent.
pub fn send(self, info: IdentifyInfo, observed_addr: &Multiaddr) -> IdentifySenderFuture<T> {
debug!("Sending identify info to client");
trace!("Sending: {:?}", info);
let listen_addrs = info.listen_addrs
.into_iter()
.map(|addr| addr.to_vec())
.collect();
let pubkey_bytes = info.public_key.into_protobuf_encoding();
let mut message = structs_proto::Identify::new();
message.set_agentVersion(info.agent_version);
message.set_protocolVersion(info.protocol_version);
message.set_publicKey(pubkey_bytes);
message.set_listenAddrs(listen_addrs);
message.set_observedAddr(observed_addr.to_vec());
message.set_protocols(RepeatedField::from_vec(info.protocols));
let bytes = message
.write_to_bytes()
.expect("writing protobuf failed; should never happen");
IdentifySenderFuture {
inner: self.inner,
item: Some(bytes),
}
}
}
/// Future returned by `IdentifySender::send()`. Must be processed to the end in order to send
/// the information to the remote.
// Note: we don't use a `futures::sink::Sink` because it requires `T` to implement `Sink`, which
// means that we would require `T: AsyncWrite` in this struct definition. This requirement
// would then propagate everywhere.
#[must_use = "futures do nothing unless polled"]
pub struct IdentifySenderFuture<T> {
/// The Sink where to send the data.
inner: Framed<T, codec::UviBytes<Vec<u8>>>,
/// Bytes to send, or `None` if we've already sent them.
item: Option<Vec<u8>>,
}
impl<T> Future for IdentifySenderFuture<T>
where T: AsyncWrite
{
type Item = ();
type Error = IoError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if let Some(item) = self.item.take() {
if let AsyncSink::NotReady(item) = self.inner.start_send(item)? {
self.item = Some(item);
return Ok(Async::NotReady);
}
}
// A call to `close()` implies flushing.
try_ready!(self.inner.close());
Ok(Async::Ready(()))
}
}
/// Information sent from the listener to the dialer.
#[derive(Debug, Clone)]
pub struct IdentifyInfo {
/// Public key of the node.
pub public_key: PublicKey,
/// Version of the "global" protocol, e.g. `ipfs/1.0.0` or `polkadot/1.0.0`.
pub protocol_version: String,
/// Name and version of the client. Can be thought as similar to the `User-Agent` header
/// of HTTP.
pub agent_version: String,
/// Addresses that the node is listening on.
pub listen_addrs: Vec<Multiaddr>,
/// Protocols supported by the node, e.g. `/ipfs/ping/1.0.0`.
pub protocols: Vec<String>,
}
impl UpgradeInfo for IdentifyProtocolConfig {
type Info = &'static [u8];
type InfoIter = iter::Once<Self::Info>;
fn | (&self) -> Self::InfoIter {
iter::once(b"/ipfs/id/1.0.0")
}
}
impl<C> InboundUpgrade<C> for IdentifyProtocolConfig
where
C: AsyncRead + AsyncWrite,
{
type Output = IdentifySender<Negotiated<C>>;
type Error = IoError;
type Future = FutureResult<Self::Output, IoError>;
fn upgrade_inbound(self, socket: Negotiated<C>, _: Self::Info) -> Self::Future {
trace!("Upgrading inbound connection");
let socket = Framed::new(socket, codec::UviBytes::default());
let sender = IdentifySender { inner: socket };
future::ok(sender)
}
}
impl<C> OutboundUpgrade<C> for IdentifyProtocolConfig
where
C: AsyncRead + AsyncWrite,
{
type Output = RemoteInfo;
type Error = IoError;
type Future = IdentifyOutboundFuture<Negotiated<C>>;
fn upgrade_outbound(self, socket: Negotiated<C>, _: Self::Info) -> Self::Future {
IdentifyOutboundFuture {
inner: Framed::new(socket, codec::UviBytes::<BytesMut>::default()),
shutdown: false,
}
}
}
/// Future returned by `OutboundUpgrade::upgrade_outbound`.
pub struct IdentifyOutboundFuture<T> {
inner: Framed<T, codec::UviBytes<BytesMut>>,
/// If true, we have finished shutting down the writing part of `inner`.
shutdown: bool,
}
impl<T> Future for IdentifyOutboundFuture<T>
where T: AsyncRead + AsyncWrite,
{
type Item = RemoteInfo;
type Error = IoError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if !self.shutdown {
try_ready!(self.inner.close());
self.shutdown = true;
}
let msg = match try_ready!(self.inner.poll()) {
Some(i) => i,
None => {
debug!("Identify protocol stream closed before receiving info");
return Err(IoErrorKind::InvalidData.into());
}
};
debug!("Received identify message");
let (info, observed_addr) = match parse_proto_msg(msg) {
Ok(v) => v,
Err(err) => {
debug!("Failed to parse protobuf message; error = {:?}", err);
return Err(err)
}
};
trace!("Remote observes us as {:?}", observed_addr);
trace!("Information received: {:?}", info);
Ok(Async::Ready(RemoteInfo {
info,
observed_addr: observed_addr.clone(),
_priv: ()
}))
}
}
// Turns a protobuf message into an `IdentifyInfo` and an observed address. If something bad
// happens, turn it into an `IoError`.
fn parse_proto_msg(msg: BytesMut) -> Result<(IdentifyInfo, Multiaddr), IoError> {
match protobuf_parse_from_bytes::<structs_proto::Identify>(&msg) {
Ok(mut msg) => {
// Turn a `Vec<u8>` into a `Multiaddr`. If something bad happens, turn it into
// an `IoError`.
fn bytes_to_multiaddr(bytes: Vec<u8>) -> Result<Multiaddr, IoError> {
Multiaddr::try_from(bytes)
.map_err(|err| IoError::new(IoErrorKind::InvalidData, err))
}
let listen_addrs = {
let mut addrs = Vec::new();
for addr in msg.take_listenAddrs().into_iter() {
addrs.push(bytes_to_multiaddr(addr)?);
}
addrs
};
let public_key = PublicKey::from_protobuf_encoding(msg.get_publicKey())
.map_err(|e| IoError::new(IoErrorKind::InvalidData, e))?;
let observed_addr = bytes_to_multiaddr(msg.take_observedAddr())?;
let info = IdentifyInfo {
public_key,
protocol_version: msg.take_protocolVersion(),
agent_version: msg.take_agentVersion(),
listen_addrs,
protocols: msg.take_protocols().into_vec(),
};
Ok((info, observed_addr))
}
Err(err) => Err(IoError::new(IoErrorKind::InvalidData, err)),
}
}
#[cfg(test)]
mod tests {
use crate::protocol::{IdentifyInfo, RemoteInfo, IdentifyProtocolConfig};
use tokio::runtime::current_thread::Runtime;
use libp2p_tcp::TcpConfig;
use futures::{Future, Stream};
use libp2p_core::{
identity,
Transport,
transport::ListenerEvent,
upgrade::{apply_outbound, apply_inbound}
};
use std::{io, sync::mpsc, thread};
#[test]
fn correct_transfer() {
// We open a server and a client, send info from the server to the client, and check that
// they were successfully received.
let send_pubkey = identity::Keypair::generate_ed25519().public();
let recv_pubkey = send_pubkey.clone();
let (tx, rx) = mpsc::channel();
let bg_thread = thread::spawn(move || {
let transport = TcpConfig::new();
let mut listener = transport
.listen_on("/ip4/127.0.0.1/tcp/0".parse().unwrap())
.unwrap();
let addr = listener.by_ref().wait()
.next()
.expect("some event")
.expect("no error")
.into_new_address()
.expect("listen address");
tx.send(addr).unwrap();
let future = listener
.filter_map(ListenerEvent::into_upgrade)
.into_future()
.map_err(|(err, _)| err)
.and_then(|(client, _)| client.unwrap().0)
.and_then(|socket| {
apply_inbound(socket, IdentifyProtocolConfig)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
})
.and_then(|sender| {
sender.send(
IdentifyInfo {
public_key: send_pubkey,
protocol_version: "proto_version".to_owned(),
agent_version: "agent_version".to_owned(),
listen_addrs: vec![
"/ip4/80.81.82.83/tcp/500".parse().unwrap(),
"/ip6/::1/udp/1000".parse().unwrap(),
],
protocols: vec!["proto1".to_string(), "proto2".to_string()],
},
&"/ip4/100.101.102.103/tcp/5000".parse().unwrap(),
)
});
let mut rt = Runtime::new().unwrap();
let _ = rt.block_on(future).unwrap();
});
let transport = TcpConfig::new();
let future = transport.dial(rx.recv().unwrap())
.unwrap()
.and_then(|socket| {
apply_outbound(socket, IdentifyProtocolConfig)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
})
.and_then(|RemoteInfo { info, observed_addr, .. }| {
assert_eq!(observed_addr, "/ip4/100.101.102.103/tcp/5000".parse().unwrap());
assert_eq!(info.public_key, recv_pubkey);
assert_eq!(info.protocol_version, "proto_version");
assert_eq!(info.agent_version, "agent_version");
assert_eq!(info.listen_addrs,
&["/ip4/80.81.82.83/tcp/500".parse().unwrap(),
"/ip6/::1/udp/1000".parse().unwrap()]);
assert_eq!(info.protocols, &["proto1".to_string(), "proto2".to_string()]);
Ok(())
});
let mut rt = Runtime::new().unwrap();
let _ = rt.block_on(future).unwrap();
bg_thread.join().unwrap();
}
}
| protocol_info | identifier_name |
protocol.rs | // Copyright 2018 Parity Technologies (UK) Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use bytes::BytesMut;
use crate::structs_proto;
use futures::{future::{self, FutureResult}, Async, AsyncSink, Future, Poll, Sink, Stream};
use futures::try_ready;
use libp2p_core::{
Multiaddr, PublicKey,
upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo, Negotiated}
};
use log::{debug, trace};
use protobuf::Message as ProtobufMessage;
use protobuf::parse_from_bytes as protobuf_parse_from_bytes;
use protobuf::RepeatedField;
use std::convert::TryFrom;
use std::io::{Error as IoError, ErrorKind as IoErrorKind};
use std::iter;
use tokio_codec::Framed;
use tokio_io::{AsyncRead, AsyncWrite};
use unsigned_varint::codec;
/// Configuration for an upgrade to the identity protocol.
#[derive(Debug, Clone)]
pub struct IdentifyProtocolConfig;
#[derive(Debug, Clone)]
pub struct RemoteInfo {
/// Information about the remote.
pub info: IdentifyInfo,
/// Address the remote sees for us.
pub observed_addr: Multiaddr,
_priv: ()
}
/// Object used to send back information to the client.
pub struct IdentifySender<T> {
inner: Framed<T, codec::UviBytes<Vec<u8>>>,
}
impl<T> IdentifySender<T> where T: AsyncWrite {
/// Sends back information to the client. Returns a future that is signalled whenever the
/// info have been sent.
pub fn send(self, info: IdentifyInfo, observed_addr: &Multiaddr) -> IdentifySenderFuture<T> {
debug!("Sending identify info to client");
trace!("Sending: {:?}", info);
let listen_addrs = info.listen_addrs
.into_iter()
.map(|addr| addr.to_vec())
.collect();
let pubkey_bytes = info.public_key.into_protobuf_encoding();
let mut message = structs_proto::Identify::new();
message.set_agentVersion(info.agent_version);
message.set_protocolVersion(info.protocol_version);
message.set_publicKey(pubkey_bytes);
message.set_listenAddrs(listen_addrs);
message.set_observedAddr(observed_addr.to_vec());
message.set_protocols(RepeatedField::from_vec(info.protocols));
let bytes = message
.write_to_bytes()
.expect("writing protobuf failed; should never happen");
IdentifySenderFuture {
inner: self.inner,
item: Some(bytes),
}
}
}
/// Future returned by `IdentifySender::send()`. Must be processed to the end in order to send
/// the information to the remote.
// Note: we don't use a `futures::sink::Sink` because it requires `T` to implement `Sink`, which
// means that we would require `T: AsyncWrite` in this struct definition. This requirement
// would then propagate everywhere.
#[must_use = "futures do nothing unless polled"]
pub struct IdentifySenderFuture<T> {
/// The Sink where to send the data.
inner: Framed<T, codec::UviBytes<Vec<u8>>>,
/// Bytes to send, or `None` if we've already sent them.
item: Option<Vec<u8>>,
}
impl<T> Future for IdentifySenderFuture<T>
where T: AsyncWrite
{
type Item = ();
type Error = IoError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if let Some(item) = self.item.take() {
if let AsyncSink::NotReady(item) = self.inner.start_send(item)? {
self.item = Some(item);
return Ok(Async::NotReady);
}
}
// A call to `close()` implies flushing.
try_ready!(self.inner.close());
Ok(Async::Ready(()))
}
}
/// Information sent from the listener to the dialer.
#[derive(Debug, Clone)]
pub struct IdentifyInfo {
/// Public key of the node.
pub public_key: PublicKey,
/// Version of the "global" protocol, e.g. `ipfs/1.0.0` or `polkadot/1.0.0`.
pub protocol_version: String,
/// Name and version of the client. Can be thought as similar to the `User-Agent` header
/// of HTTP.
pub agent_version: String,
/// Addresses that the node is listening on.
pub listen_addrs: Vec<Multiaddr>,
/// Protocols supported by the node, e.g. `/ipfs/ping/1.0.0`.
pub protocols: Vec<String>,
}
impl UpgradeInfo for IdentifyProtocolConfig {
type Info = &'static [u8];
type InfoIter = iter::Once<Self::Info>;
fn protocol_info(&self) -> Self::InfoIter {
iter::once(b"/ipfs/id/1.0.0")
}
}
impl<C> InboundUpgrade<C> for IdentifyProtocolConfig
where
C: AsyncRead + AsyncWrite,
{
type Output = IdentifySender<Negotiated<C>>;
type Error = IoError;
type Future = FutureResult<Self::Output, IoError>;
fn upgrade_inbound(self, socket: Negotiated<C>, _: Self::Info) -> Self::Future {
trace!("Upgrading inbound connection");
let socket = Framed::new(socket, codec::UviBytes::default());
let sender = IdentifySender { inner: socket };
future::ok(sender)
}
}
impl<C> OutboundUpgrade<C> for IdentifyProtocolConfig
where
C: AsyncRead + AsyncWrite,
{
type Output = RemoteInfo;
type Error = IoError;
type Future = IdentifyOutboundFuture<Negotiated<C>>;
fn upgrade_outbound(self, socket: Negotiated<C>, _: Self::Info) -> Self::Future {
IdentifyOutboundFuture {
inner: Framed::new(socket, codec::UviBytes::<BytesMut>::default()),
shutdown: false,
}
}
}
/// Future returned by `OutboundUpgrade::upgrade_outbound`.
pub struct IdentifyOutboundFuture<T> {
inner: Framed<T, codec::UviBytes<BytesMut>>,
/// If true, we have finished shutting down the writing part of `inner`.
shutdown: bool,
}
impl<T> Future for IdentifyOutboundFuture<T>
where T: AsyncRead + AsyncWrite,
{
type Item = RemoteInfo;
type Error = IoError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if !self.shutdown {
try_ready!(self.inner.close());
self.shutdown = true;
}
let msg = match try_ready!(self.inner.poll()) {
Some(i) => i,
None => {
debug!("Identify protocol stream closed before receiving info");
return Err(IoErrorKind::InvalidData.into());
}
};
debug!("Received identify message");
let (info, observed_addr) = match parse_proto_msg(msg) {
Ok(v) => v,
Err(err) => {
debug!("Failed to parse protobuf message; error = {:?}", err);
return Err(err)
}
};
trace!("Remote observes us as {:?}", observed_addr);
trace!("Information received: {:?}", info);
Ok(Async::Ready(RemoteInfo {
info,
observed_addr: observed_addr.clone(),
_priv: ()
}))
}
}
// Turns a protobuf message into an `IdentifyInfo` and an observed address. If something bad
// happens, turn it into an `IoError`.
fn parse_proto_msg(msg: BytesMut) -> Result<(IdentifyInfo, Multiaddr), IoError> {
match protobuf_parse_from_bytes::<structs_proto::Identify>(&msg) {
Ok(mut msg) => {
// Turn a `Vec<u8>` into a `Multiaddr`. If something bad happens, turn it into
// an `IoError`.
fn bytes_to_multiaddr(bytes: Vec<u8>) -> Result<Multiaddr, IoError> {
Multiaddr::try_from(bytes)
.map_err(|err| IoError::new(IoErrorKind::InvalidData, err))
}
let listen_addrs = {
let mut addrs = Vec::new();
for addr in msg.take_listenAddrs().into_iter() {
addrs.push(bytes_to_multiaddr(addr)?);
}
addrs
};
let public_key = PublicKey::from_protobuf_encoding(msg.get_publicKey())
.map_err(|e| IoError::new(IoErrorKind::InvalidData, e))?;
let observed_addr = bytes_to_multiaddr(msg.take_observedAddr())?;
let info = IdentifyInfo {
public_key,
protocol_version: msg.take_protocolVersion(),
agent_version: msg.take_agentVersion(),
listen_addrs,
protocols: msg.take_protocols().into_vec(),
};
Ok((info, observed_addr))
}
Err(err) => Err(IoError::new(IoErrorKind::InvalidData, err)),
}
}
#[cfg(test)]
mod tests {
use crate::protocol::{IdentifyInfo, RemoteInfo, IdentifyProtocolConfig};
use tokio::runtime::current_thread::Runtime;
use libp2p_tcp::TcpConfig;
use futures::{Future, Stream};
use libp2p_core::{
identity,
Transport,
transport::ListenerEvent,
upgrade::{apply_outbound, apply_inbound}
};
use std::{io, sync::mpsc, thread};
#[test]
fn correct_transfer() {
// We open a server and a client, send info from the server to the client, and check that
// they were successfully received.
let send_pubkey = identity::Keypair::generate_ed25519().public();
let recv_pubkey = send_pubkey.clone();
let (tx, rx) = mpsc::channel();
let bg_thread = thread::spawn(move || {
let transport = TcpConfig::new();
let mut listener = transport
.listen_on("/ip4/127.0.0.1/tcp/0".parse().unwrap())
.unwrap();
let addr = listener.by_ref().wait()
.next()
.expect("some event")
.expect("no error")
.into_new_address()
.expect("listen address");
tx.send(addr).unwrap();
let future = listener
.filter_map(ListenerEvent::into_upgrade)
.into_future()
.map_err(|(err, _)| err)
.and_then(|(client, _)| client.unwrap().0)
.and_then(|socket| {
apply_inbound(socket, IdentifyProtocolConfig)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
})
.and_then(|sender| {
sender.send(
IdentifyInfo {
public_key: send_pubkey,
protocol_version: "proto_version".to_owned(),
agent_version: "agent_version".to_owned(),
listen_addrs: vec![
"/ip4/80.81.82.83/tcp/500".parse().unwrap(), | )
});
let mut rt = Runtime::new().unwrap();
let _ = rt.block_on(future).unwrap();
});
let transport = TcpConfig::new();
let future = transport.dial(rx.recv().unwrap())
.unwrap()
.and_then(|socket| {
apply_outbound(socket, IdentifyProtocolConfig)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
})
.and_then(|RemoteInfo { info, observed_addr, .. }| {
assert_eq!(observed_addr, "/ip4/100.101.102.103/tcp/5000".parse().unwrap());
assert_eq!(info.public_key, recv_pubkey);
assert_eq!(info.protocol_version, "proto_version");
assert_eq!(info.agent_version, "agent_version");
assert_eq!(info.listen_addrs,
&["/ip4/80.81.82.83/tcp/500".parse().unwrap(),
"/ip6/::1/udp/1000".parse().unwrap()]);
assert_eq!(info.protocols, &["proto1".to_string(), "proto2".to_string()]);
Ok(())
});
let mut rt = Runtime::new().unwrap();
let _ = rt.block_on(future).unwrap();
bg_thread.join().unwrap();
}
} | "/ip6/::1/udp/1000".parse().unwrap(),
],
protocols: vec!["proto1".to_string(), "proto2".to_string()],
},
&"/ip4/100.101.102.103/tcp/5000".parse().unwrap(), | random_line_split |
protocol.rs | // Copyright 2018 Parity Technologies (UK) Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use bytes::BytesMut;
use crate::structs_proto;
use futures::{future::{self, FutureResult}, Async, AsyncSink, Future, Poll, Sink, Stream};
use futures::try_ready;
use libp2p_core::{
Multiaddr, PublicKey,
upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo, Negotiated}
};
use log::{debug, trace};
use protobuf::Message as ProtobufMessage;
use protobuf::parse_from_bytes as protobuf_parse_from_bytes;
use protobuf::RepeatedField;
use std::convert::TryFrom;
use std::io::{Error as IoError, ErrorKind as IoErrorKind};
use std::iter;
use tokio_codec::Framed;
use tokio_io::{AsyncRead, AsyncWrite};
use unsigned_varint::codec;
/// Configuration for an upgrade to the identity protocol.
#[derive(Debug, Clone)]
pub struct IdentifyProtocolConfig;
#[derive(Debug, Clone)]
pub struct RemoteInfo {
/// Information about the remote.
pub info: IdentifyInfo,
/// Address the remote sees for us.
pub observed_addr: Multiaddr,
_priv: ()
}
/// Object used to send back information to the client.
pub struct IdentifySender<T> {
inner: Framed<T, codec::UviBytes<Vec<u8>>>,
}
impl<T> IdentifySender<T> where T: AsyncWrite {
/// Sends back information to the client. Returns a future that is signalled whenever the
/// info have been sent.
pub fn send(self, info: IdentifyInfo, observed_addr: &Multiaddr) -> IdentifySenderFuture<T> {
debug!("Sending identify info to client");
trace!("Sending: {:?}", info);
let listen_addrs = info.listen_addrs
.into_iter()
.map(|addr| addr.to_vec())
.collect();
let pubkey_bytes = info.public_key.into_protobuf_encoding();
let mut message = structs_proto::Identify::new();
message.set_agentVersion(info.agent_version);
message.set_protocolVersion(info.protocol_version);
message.set_publicKey(pubkey_bytes);
message.set_listenAddrs(listen_addrs);
message.set_observedAddr(observed_addr.to_vec());
message.set_protocols(RepeatedField::from_vec(info.protocols));
let bytes = message
.write_to_bytes()
.expect("writing protobuf failed; should never happen");
IdentifySenderFuture {
inner: self.inner,
item: Some(bytes),
}
}
}
/// Future returned by `IdentifySender::send()`. Must be processed to the end in order to send
/// the information to the remote.
// Note: we don't use a `futures::sink::Sink` because it requires `T` to implement `Sink`, which
// means that we would require `T: AsyncWrite` in this struct definition. This requirement
// would then propagate everywhere.
#[must_use = "futures do nothing unless polled"]
pub struct IdentifySenderFuture<T> {
/// The Sink where to send the data.
inner: Framed<T, codec::UviBytes<Vec<u8>>>,
/// Bytes to send, or `None` if we've already sent them.
item: Option<Vec<u8>>,
}
impl<T> Future for IdentifySenderFuture<T>
where T: AsyncWrite
{
type Item = ();
type Error = IoError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if let Some(item) = self.item.take() {
if let AsyncSink::NotReady(item) = self.inner.start_send(item)? {
self.item = Some(item);
return Ok(Async::NotReady);
}
}
// A call to `close()` implies flushing.
try_ready!(self.inner.close());
Ok(Async::Ready(()))
}
}
/// Information sent from the listener to the dialer.
#[derive(Debug, Clone)]
pub struct IdentifyInfo {
/// Public key of the node.
pub public_key: PublicKey,
/// Version of the "global" protocol, e.g. `ipfs/1.0.0` or `polkadot/1.0.0`.
pub protocol_version: String,
/// Name and version of the client. Can be thought as similar to the `User-Agent` header
/// of HTTP.
pub agent_version: String,
/// Addresses that the node is listening on.
pub listen_addrs: Vec<Multiaddr>,
/// Protocols supported by the node, e.g. `/ipfs/ping/1.0.0`.
pub protocols: Vec<String>,
}
impl UpgradeInfo for IdentifyProtocolConfig {
type Info = &'static [u8];
type InfoIter = iter::Once<Self::Info>;
fn protocol_info(&self) -> Self::InfoIter {
iter::once(b"/ipfs/id/1.0.0")
}
}
impl<C> InboundUpgrade<C> for IdentifyProtocolConfig
where
C: AsyncRead + AsyncWrite,
{
type Output = IdentifySender<Negotiated<C>>;
type Error = IoError;
type Future = FutureResult<Self::Output, IoError>;
fn upgrade_inbound(self, socket: Negotiated<C>, _: Self::Info) -> Self::Future {
trace!("Upgrading inbound connection");
let socket = Framed::new(socket, codec::UviBytes::default());
let sender = IdentifySender { inner: socket };
future::ok(sender)
}
}
impl<C> OutboundUpgrade<C> for IdentifyProtocolConfig
where
C: AsyncRead + AsyncWrite,
{
type Output = RemoteInfo;
type Error = IoError;
type Future = IdentifyOutboundFuture<Negotiated<C>>;
fn upgrade_outbound(self, socket: Negotiated<C>, _: Self::Info) -> Self::Future {
IdentifyOutboundFuture {
inner: Framed::new(socket, codec::UviBytes::<BytesMut>::default()),
shutdown: false,
}
}
}
/// Future returned by `OutboundUpgrade::upgrade_outbound`.
pub struct IdentifyOutboundFuture<T> {
inner: Framed<T, codec::UviBytes<BytesMut>>,
/// If true, we have finished shutting down the writing part of `inner`.
shutdown: bool,
}
impl<T> Future for IdentifyOutboundFuture<T>
where T: AsyncRead + AsyncWrite,
{
type Item = RemoteInfo;
type Error = IoError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if !self.shutdown {
try_ready!(self.inner.close());
self.shutdown = true;
}
let msg = match try_ready!(self.inner.poll()) {
Some(i) => i,
None => {
debug!("Identify protocol stream closed before receiving info");
return Err(IoErrorKind::InvalidData.into());
}
};
debug!("Received identify message");
let (info, observed_addr) = match parse_proto_msg(msg) {
Ok(v) => v,
Err(err) => {
debug!("Failed to parse protobuf message; error = {:?}", err);
return Err(err)
}
};
trace!("Remote observes us as {:?}", observed_addr);
trace!("Information received: {:?}", info);
Ok(Async::Ready(RemoteInfo {
info,
observed_addr: observed_addr.clone(),
_priv: ()
}))
}
}
// Turns a protobuf message into an `IdentifyInfo` and an observed address. If something bad
// happens, turn it into an `IoError`.
fn parse_proto_msg(msg: BytesMut) -> Result<(IdentifyInfo, Multiaddr), IoError> {
match protobuf_parse_from_bytes::<structs_proto::Identify>(&msg) {
Ok(mut msg) => {
// Turn a `Vec<u8>` into a `Multiaddr`. If something bad happens, turn it into
// an `IoError`.
fn bytes_to_multiaddr(bytes: Vec<u8>) -> Result<Multiaddr, IoError> |
let listen_addrs = {
let mut addrs = Vec::new();
for addr in msg.take_listenAddrs().into_iter() {
addrs.push(bytes_to_multiaddr(addr)?);
}
addrs
};
let public_key = PublicKey::from_protobuf_encoding(msg.get_publicKey())
.map_err(|e| IoError::new(IoErrorKind::InvalidData, e))?;
let observed_addr = bytes_to_multiaddr(msg.take_observedAddr())?;
let info = IdentifyInfo {
public_key,
protocol_version: msg.take_protocolVersion(),
agent_version: msg.take_agentVersion(),
listen_addrs,
protocols: msg.take_protocols().into_vec(),
};
Ok((info, observed_addr))
}
Err(err) => Err(IoError::new(IoErrorKind::InvalidData, err)),
}
}
#[cfg(test)]
mod tests {
use crate::protocol::{IdentifyInfo, RemoteInfo, IdentifyProtocolConfig};
use tokio::runtime::current_thread::Runtime;
use libp2p_tcp::TcpConfig;
use futures::{Future, Stream};
use libp2p_core::{
identity,
Transport,
transport::ListenerEvent,
upgrade::{apply_outbound, apply_inbound}
};
use std::{io, sync::mpsc, thread};
#[test]
fn correct_transfer() {
// We open a server and a client, send info from the server to the client, and check that
// they were successfully received.
let send_pubkey = identity::Keypair::generate_ed25519().public();
let recv_pubkey = send_pubkey.clone();
let (tx, rx) = mpsc::channel();
let bg_thread = thread::spawn(move || {
let transport = TcpConfig::new();
let mut listener = transport
.listen_on("/ip4/127.0.0.1/tcp/0".parse().unwrap())
.unwrap();
let addr = listener.by_ref().wait()
.next()
.expect("some event")
.expect("no error")
.into_new_address()
.expect("listen address");
tx.send(addr).unwrap();
let future = listener
.filter_map(ListenerEvent::into_upgrade)
.into_future()
.map_err(|(err, _)| err)
.and_then(|(client, _)| client.unwrap().0)
.and_then(|socket| {
apply_inbound(socket, IdentifyProtocolConfig)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
})
.and_then(|sender| {
sender.send(
IdentifyInfo {
public_key: send_pubkey,
protocol_version: "proto_version".to_owned(),
agent_version: "agent_version".to_owned(),
listen_addrs: vec![
"/ip4/80.81.82.83/tcp/500".parse().unwrap(),
"/ip6/::1/udp/1000".parse().unwrap(),
],
protocols: vec!["proto1".to_string(), "proto2".to_string()],
},
&"/ip4/100.101.102.103/tcp/5000".parse().unwrap(),
)
});
let mut rt = Runtime::new().unwrap();
let _ = rt.block_on(future).unwrap();
});
let transport = TcpConfig::new();
let future = transport.dial(rx.recv().unwrap())
.unwrap()
.and_then(|socket| {
apply_outbound(socket, IdentifyProtocolConfig)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
})
.and_then(|RemoteInfo { info, observed_addr, .. }| {
assert_eq!(observed_addr, "/ip4/100.101.102.103/tcp/5000".parse().unwrap());
assert_eq!(info.public_key, recv_pubkey);
assert_eq!(info.protocol_version, "proto_version");
assert_eq!(info.agent_version, "agent_version");
assert_eq!(info.listen_addrs,
&["/ip4/80.81.82.83/tcp/500".parse().unwrap(),
"/ip6/::1/udp/1000".parse().unwrap()]);
assert_eq!(info.protocols, &["proto1".to_string(), "proto2".to_string()]);
Ok(())
});
let mut rt = Runtime::new().unwrap();
let _ = rt.block_on(future).unwrap();
bg_thread.join().unwrap();
}
}
| {
Multiaddr::try_from(bytes)
.map_err(|err| IoError::new(IoErrorKind::InvalidData, err))
} | identifier_body |
postgres.go | package postgres
import (
"context"
"database/sql"
"database/sql/driver"
"encoding/json"
"time"
kitlog "github.com/go-kit/kit/log"
"github.com/google/uuid"
"github.com/jmoiron/sqlx"
"github.com/lib/pq"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/crypto/acme/autocert"
)
var (
// StreamGauge is a gauge of the number of current registered streams
StreamGauge = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: "decode",
Subsystem: "encoder",
Name: "stream_gauge",
Help: "Count of current streams in database",
},
)
)
// Action is a type alias for string - we use for constants
type Action string
const (
// Share defines an action of sharing a sensor without processing
Share Action = "SHARE"
// Bin defines an action of sharing binned values for a sensor
Bin Action = "BIN"
// MovingAverage defines an action of sharing a moving average for a sensor
MovingAverage Action = "MOVING_AVG"
// TokenLength is a constant which controls the length in bytes of the security
// tokens we generate for streams.
TokenLength = 24
// pqUniqueViolation is an error returned by postgres when we attempt to insert
// a row that violates a unique index
pqUniqueViolation = "23505"
)
// Device is a type used when reading data back from the DB. A single Device may
// feed data to multiple streams, hence the separation here with the associated
// Stream type.
type Device struct {
ID int `db:"id"`
DeviceToken string `db:"device_token"`
Label string `db:"device_label"`
Longitude float64 `db:"longitude"`
Latitude float64 `db:"latitude"`
Exposure string `db:"exposure"`
Streams []*Stream
}
// Stream is a type used when reading data back from the DB, and when creating a
// stream. It contains a public key field used when reading data, and for
// creating a new stream has an associated Device instance.
type Stream struct {
CommunityID string `db:"community_id"`
PublicKey string `db:"public_key"`
Operations Operations `db:"operations"`
StreamID string
Token string
Device *Device
}
// Operation is a type used to capture the data around the operations to be
// applied to a Stream.
type Operation struct {
SensorID uint32 `json:"sensorId"`
Action Action `json:"action"`
Bins []float64 `json:"bins"`
Interval uint32 `json:"interval"`
}
// Operations is a type alias for a slice of Operation instance. We add as a
// separate type as we implement sql.Valuer and sql.Scanner interfaces to read
// and write back from the DB.
type Operations []*Operation
// Value is our implementation of the sql.Valuer interface which converts the
// instance into a value that can be written to the database.
func (o Operations) Value() (driver.Value, error) {
return json.Marshal(o)
}
// Scan is our implementation of the sql.Scanner interface which takes the value
// read from the database, and converts it back into an instance of the type.
func (o *Operations) Scan(src interface{}) error {
if o == nil {
return nil
}
source, ok := src.([]byte)
if !ok {
return errors.New("Value read from database cannot be typecast to a byte slice")
}
err := json.Unmarshal(source, &o)
if err != nil {
return errors.Wrap(err, "failed to unmarshal bytes into Operations")
}
return nil
}
// Open is a helper function that takes as input a connection string for a DB,
// and returns either a sqlx.DB instance or an error. This function is separated
// out to help with CLI tasks for managing migrations.
func Open(connStr string) (*sqlx.DB, error) {
return sqlx.Open("postgres", connStr)
}
// DB is our type that wraps an sqlx.DB instance and provides an API for the
// data access functions we require.
type DB struct {
connStr string
encryptionPassword []byte
DB *sqlx.DB
logger kitlog.Logger
}
// Config is used to carry package local configuration for Postgres DB module.
type Config struct {
ConnStr string
EncryptionPassword string
}
// NewDB creates a new DB instance with the given connection string. We also
// pass in a logger.
func NewDB(config *Config, logger kitlog.Logger) *DB {
logger = kitlog.With(logger, "module", "postgres")
return &DB{
connStr: config.ConnStr,
encryptionPassword: []byte(config.EncryptionPassword),
logger: logger,
}
}
// Start creates our DB connection pool running returning an error if any
// failure occurs.
func (d *DB) Start() error {
d.logger.Log("msg", "starting postgres")
db, err := Open(d.connStr)
if err != nil {
return errors.Wrap(err, "opening db connection failed")
}
d.DB = db
go d.recordMetrics()
return nil
}
// Stop closes the DB connection pool.
func (d *DB) Stop() error {
d.logger.Log("msg", "stopping postgres client")
return d.DB.Close()
}
// CreateStream attempts to insert records into the database for the given
// Stream object. Returns a string containing the ID of the created stream if
// successful or an error if any data constraint is violated, or any other error
// occurs.
func (d *DB) CreateStream(stream *Stream) (_ *Stream, err error) {
sql := `INSERT INTO devices
(device_token, longitude, latitude, exposure, device_label)
VALUES (:device_token, :longitude, :latitude, :exposure, :device_label)
ON CONFLICT (device_token) DO UPDATE
SET longitude = EXCLUDED.longitude,
latitude = EXCLUDED.latitude,
exposure = EXCLUDED.exposure,
device_label = EXCLUDED.device_label
RETURNING id`
mapArgs := map[string]interface{}{
"device_token": stream.Device.DeviceToken,
"longitude": stream.Device.Longitude,
"latitude": stream.Device.Latitude,
"exposure": stream.Device.Exposure,
"device_label": stream.Device.Label,
}
tx, err := BeginTX(d.DB)
if err != nil {
return nil, errors.Wrap(err, "failed to start transaction when inserting device")
}
defer func() {
if cerr := tx.CommitOrRollback(); err == nil && cerr != nil {
err = cerr
}
}()
var deviceID int
// we use a Get for the upsert so we get back the device id
err = tx.Get(&deviceID, sql, mapArgs)
if err != nil {
return nil, errors.Wrap(err, "failed to save device")
}
streamID, err := uuid.NewRandom()
if err != nil {
return nil, errors.Wrap(err, "failed to generate stream UUID")
}
// streams insert sql
sql = `INSERT INTO streams
(device_id, community_id, public_key, token, operations, uuid)
VALUES (:device_id, :community_id, :public_key, pgp_sym_encrypt(:token, :encryption_password), :operations, :uuid)`
token, err := GenerateToken(TokenLength)
if err != nil {
return nil, errors.Wrap(err, "failed to generate random token")
}
mapArgs = map[string]interface{}{
"device_id": deviceID,
"community_id": stream.CommunityID,
"public_key": stream.PublicKey,
"token": token,
"encryption_password": d.encryptionPassword,
"operations": stream.Operations,
"uuid": streamID.String(),
}
err = tx.Exec(sql, mapArgs)
if err != nil {
if pqErr, ok := err.(*pq.Error); ok {
if pqErr.Code == pqUniqueViolation {
return nil, errors.New("failed to create stream: device already registered within community")
}
}
return nil, errors.Wrap(err, "failed to create stream")
}
stream.StreamID = streamID.String()
stream.Token = token
return stream, err
}
// DeleteStream deletes a stream identified by the given id string. If this
// stream is the last one associated with a device, then the device record is
// also deleted. We return a Device object purely so we can pass back out the
// token allowing us to unsubscribe.
func (d *DB) DeleteStream(stream *Stream) (_ *Device, err error) {
sql := `DELETE FROM streams
WHERE uuid = :uuid
AND pgp_sym_decrypt(token, :encryption_password) = :token
RETURNING device_id`
mapArgs := map[string]interface{}{
"uuid": stream.StreamID,
"encryption_password": d.encryptionPassword,
"token": stream.Token,
}
tx, err := BeginTX(d.DB)
if err != nil {
return nil, errors.Wrap(err, "failed to start transaction when deleting stream")
}
defer func() {
if cerr := tx.CommitOrRollback(); err == nil && cerr != nil {
err = cerr
}
}()
var deviceID int
// again use a Get to run the delete so we get back the device's id
err = tx.Get(&deviceID, sql, mapArgs)
if err != nil {
return nil, errors.Wrap(err, "failed to delete stream")
}
// now we count streams for that device id, and if no more we should also
// delete the device and unsubscribe from its topic
sql = `SELECT COUNT(*) FROM streams WHERE device_id = :device_id`
mapArgs = map[string]interface{}{
"device_id": deviceID,
}
var streamCount int
// again use a Get to get the count
err = tx.Get(&streamCount, sql, mapArgs)
if err != nil {
return nil, errors.Wrap(err, "failed to count streams")
}
if streamCount == 0 {
// delete the device too
sql = `DELETE FROM devices WHERE id = :id RETURNING device_token`
mapArgs = map[string]interface{}{
"id": deviceID,
}
var device Device
err = tx.Get(&device, sql, mapArgs)
if err != nil {
return nil, errors.Wrap(err, "failed to delete device")
}
return &device, nil
}
return nil, nil
}
// GetDevices returns a slice of pointers to Device instances. We don't worry
// about pagination here as we have a maximum number of devices of approximately
// 25 to 50. Note we do not load all streams for these devices.
func (d *DB) GetDevices() ([]*Device, error) |
// GetDevice returns a single device identified by device_token, including all streams
// for that device. This is used to set up subscriptions for existing records on
// application start.
func (d *DB) GetDevice(deviceToken string) (_ *Device, err error) {
sql := `SELECT id, device_token, longitude, latitude, exposure, device_label
FROM devices
WHERE device_token = :device_token`
mapArgs := map[string]interface{}{
"device_token": deviceToken,
}
tx, err := BeginTX(d.DB)
if err != nil {
return nil, errors.Wrap(err, "failed to begin transaction")
}
defer func() {
if cerr := tx.CommitOrRollback(); err == nil && cerr != nil {
err = cerr
}
}()
var device Device
err = tx.Get(&device, sql, mapArgs)
if err != nil {
return nil, errors.Wrap(err, "failed to load device")
}
// now load streams
sql = `SELECT community_id, public_key, operations FROM streams WHERE device_id = :device_id`
mapArgs = map[string]interface{}{
"device_id": device.ID,
}
streams := []*Stream{}
mapper := func(rows *sqlx.Rows) error {
for rows.Next() {
var s Stream
err = rows.StructScan(&s)
if err != nil {
return errors.Wrap(err, "failed to scan stream row into struct")
}
streams = append(streams, &s)
}
return nil
}
err = tx.Map(sql, mapArgs, mapper)
if err != nil {
return nil, errors.Wrap(err, "failed to execute row mapper")
}
device.Streams = streams
return &device, nil
}
// MigrateUp is a convenience function to run all up migrations in the context
// of an instantiated DB instance.
func (d *DB) MigrateUp() error {
return MigrateUp(d.DB.DB, d.logger)
}
// Ping attempts to verify the database connection is still alive by executing a
// simple select query on the database server. We don't use the built in
// DB.Ping() function here as this may not go to the database if there existing
// connections in the pool.
func (d *DB) Ping() error {
_, err := d.DB.Exec("SELECT 1")
if err != nil {
return err
}
return nil
}
// Get is an implementation of the Get method of the autocert.Cache interface.
func (d *DB) Get(ctx context.Context, key string) ([]byte, error) {
query := `SELECT certificate FROM certificates WHERE key = $1`
var cert []byte
err := d.DB.Get(&cert, query, key)
if err != nil {
if err == sql.ErrNoRows {
return nil, autocert.ErrCacheMiss
}
return nil, errors.Wrap(err, "failed to read certificate from DB")
}
return cert, nil
}
// Put is an implementation of the Put method of the autocert.Cache interface
// for saving certificates
func (d *DB) Put(ctx context.Context, key string, cert []byte) error {
query := `INSERT INTO certificates (key, certificate)
VALUES (:key, :certificate)
ON CONFLICT (key)
DO UPDATE SET certificate = EXCLUDED.certificate`
mapArgs := map[string]interface{}{
"key": key,
"certificate": cert,
}
tx, err := d.DB.Beginx()
if err != nil {
return errors.Wrap(err, "failed to begin transaction when writing certificate")
}
query, args, err := tx.BindNamed(query, mapArgs)
if err != nil {
tx.Rollback()
return errors.Wrap(err, "failed to bind named parameters")
}
_, err = tx.Exec(query, args...)
if err != nil {
tx.Rollback()
return errors.Wrap(err, "failed to insert certificate")
}
return tx.Commit()
}
// Delete is an implementation of the Delete method of the autocert.Cache
// interface method for deleting certificates.
func (d *DB) Delete(ctx context.Context, key string) error {
query := `DELETE FROM certificates WHERE key = $1`
tx, err := d.DB.Beginx()
if err != nil {
return errors.Wrap(err, "failed to begin transaction when deleting certificate")
}
_, err = tx.Exec(query, key)
if err != nil {
tx.Rollback()
return errors.Wrap(err, "failed to delete certificate")
}
return tx.Commit()
}
// recordMetrics starts a ticker to collect some gauge related metrics from the
// DB on a 30 second interval
func (d *DB) recordMetrics() {
ticker := time.NewTicker(time.Second * time.Duration(30))
for range ticker.C {
var streamCount float64
err := d.DB.Get(&streamCount, `SELECT COUNT(*) FROM streams`)
if err != nil {
d.logger.Log(
"msg", "error counting streams",
"err", err,
)
continue
}
StreamGauge.Set(streamCount)
}
}
| {
sql := `SELECT id, device_token FROM devices`
tx, err := BeginTX(d.DB)
if err != nil {
return nil, errors.Wrap(err, "failed to begin transaction")
}
defer func() {
if cerr := tx.CommitOrRollback(); err == nil && cerr != nil {
err = cerr
}
}()
devices := []*Device{}
mapper := func(rows *sqlx.Rows) error {
for rows.Next() {
var d Device
err = rows.StructScan(&d)
if err != nil {
return errors.Wrap(err, "failed to scan row into Device struct")
}
devices = append(devices, &d)
}
return nil
}
err = tx.Map(sql, []interface{}{}, mapper)
if err != nil {
return nil, errors.Wrap(err, "failed to select device rows from database")
}
return devices, nil
} | identifier_body |
postgres.go | package postgres
import (
"context"
"database/sql"
"database/sql/driver"
"encoding/json"
"time"
kitlog "github.com/go-kit/kit/log"
"github.com/google/uuid"
"github.com/jmoiron/sqlx"
"github.com/lib/pq"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/crypto/acme/autocert"
)
var (
// StreamGauge is a gauge of the number of current registered streams
StreamGauge = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: "decode",
Subsystem: "encoder",
Name: "stream_gauge",
Help: "Count of current streams in database",
},
)
)
// Action is a type alias for string - we use for constants
type Action string
const (
// Share defines an action of sharing a sensor without processing
Share Action = "SHARE"
// Bin defines an action of sharing binned values for a sensor
Bin Action = "BIN"
// MovingAverage defines an action of sharing a moving average for a sensor
MovingAverage Action = "MOVING_AVG"
// TokenLength is a constant which controls the length in bytes of the security
// tokens we generate for streams.
TokenLength = 24
// pqUniqueViolation is an error returned by postgres when we attempt to insert
// a row that violates a unique index
pqUniqueViolation = "23505"
)
// Device is a type used when reading data back from the DB. A single Device may
// feed data to multiple streams, hence the separation here with the associated
// Stream type.
type Device struct {
ID int `db:"id"`
DeviceToken string `db:"device_token"`
Label string `db:"device_label"`
Longitude float64 `db:"longitude"`
Latitude float64 `db:"latitude"`
Exposure string `db:"exposure"`
Streams []*Stream
}
// Stream is a type used when reading data back from the DB, and when creating a
// stream. It contains a public key field used when reading data, and for
// creating a new stream has an associated Device instance.
type Stream struct {
CommunityID string `db:"community_id"`
PublicKey string `db:"public_key"`
Operations Operations `db:"operations"`
StreamID string
Token string
Device *Device
}
// Operation is a type used to capture the data around the operations to be
// applied to a Stream.
type Operation struct {
SensorID uint32 `json:"sensorId"`
Action Action `json:"action"`
Bins []float64 `json:"bins"`
Interval uint32 `json:"interval"`
}
// Operations is a type alias for a slice of Operation instance. We add as a
// separate type as we implement sql.Valuer and sql.Scanner interfaces to read
// and write back from the DB.
type Operations []*Operation
// Value is our implementation of the sql.Valuer interface which converts the
// instance into a value that can be written to the database.
func (o Operations) Value() (driver.Value, error) {
return json.Marshal(o)
}
// Scan is our implementation of the sql.Scanner interface which takes the value
// read from the database, and converts it back into an instance of the type.
func (o *Operations) Scan(src interface{}) error {
if o == nil {
return nil
}
source, ok := src.([]byte)
if !ok {
return errors.New("Value read from database cannot be typecast to a byte slice")
}
err := json.Unmarshal(source, &o)
if err != nil {
return errors.Wrap(err, "failed to unmarshal bytes into Operations")
}
return nil
}
// Open is a helper function that takes as input a connection string for a DB,
// and returns either a sqlx.DB instance or an error. This function is separated
// out to help with CLI tasks for managing migrations.
func Open(connStr string) (*sqlx.DB, error) {
return sqlx.Open("postgres", connStr)
}
// DB is our type that wraps an sqlx.DB instance and provides an API for the
// data access functions we require.
type DB struct {
connStr string
encryptionPassword []byte
DB *sqlx.DB
logger kitlog.Logger
}
// Config is used to carry package local configuration for Postgres DB module.
type Config struct {
ConnStr string
EncryptionPassword string
}
// NewDB creates a new DB instance with the given connection string. We also
// pass in a logger.
func NewDB(config *Config, logger kitlog.Logger) *DB {
logger = kitlog.With(logger, "module", "postgres")
return &DB{
connStr: config.ConnStr,
encryptionPassword: []byte(config.EncryptionPassword),
logger: logger,
}
}
// Start creates our DB connection pool running returning an error if any
// failure occurs.
func (d *DB) Start() error {
d.logger.Log("msg", "starting postgres")
db, err := Open(d.connStr)
if err != nil {
return errors.Wrap(err, "opening db connection failed")
}
d.DB = db
go d.recordMetrics()
return nil
}
// Stop closes the DB connection pool.
func (d *DB) Stop() error {
d.logger.Log("msg", "stopping postgres client")
return d.DB.Close()
}
// CreateStream attempts to insert records into the database for the given
// Stream object. Returns a string containing the ID of the created stream if
// successful or an error if any data constraint is violated, or any other error
// occurs.
func (d *DB) CreateStream(stream *Stream) (_ *Stream, err error) {
sql := `INSERT INTO devices
(device_token, longitude, latitude, exposure, device_label)
VALUES (:device_token, :longitude, :latitude, :exposure, :device_label)
ON CONFLICT (device_token) DO UPDATE
SET longitude = EXCLUDED.longitude,
latitude = EXCLUDED.latitude,
exposure = EXCLUDED.exposure,
device_label = EXCLUDED.device_label
RETURNING id`
mapArgs := map[string]interface{}{
"device_token": stream.Device.DeviceToken,
"longitude": stream.Device.Longitude,
"latitude": stream.Device.Latitude,
"exposure": stream.Device.Exposure,
"device_label": stream.Device.Label,
}
tx, err := BeginTX(d.DB)
if err != nil {
return nil, errors.Wrap(err, "failed to start transaction when inserting device")
}
defer func() {
if cerr := tx.CommitOrRollback(); err == nil && cerr != nil {
err = cerr
}
}()
var deviceID int
// we use a Get for the upsert so we get back the device id
err = tx.Get(&deviceID, sql, mapArgs)
if err != nil {
return nil, errors.Wrap(err, "failed to save device")
}
streamID, err := uuid.NewRandom()
if err != nil {
return nil, errors.Wrap(err, "failed to generate stream UUID")
}
// streams insert sql
sql = `INSERT INTO streams
(device_id, community_id, public_key, token, operations, uuid)
VALUES (:device_id, :community_id, :public_key, pgp_sym_encrypt(:token, :encryption_password), :operations, :uuid)`
token, err := GenerateToken(TokenLength)
if err != nil {
return nil, errors.Wrap(err, "failed to generate random token")
}
mapArgs = map[string]interface{}{
"device_id": deviceID,
"community_id": stream.CommunityID,
"public_key": stream.PublicKey,
"token": token,
"encryption_password": d.encryptionPassword,
"operations": stream.Operations,
"uuid": streamID.String(),
}
err = tx.Exec(sql, mapArgs)
if err != nil {
if pqErr, ok := err.(*pq.Error); ok {
if pqErr.Code == pqUniqueViolation {
return nil, errors.New("failed to create stream: device already registered within community")
}
}
return nil, errors.Wrap(err, "failed to create stream")
}
stream.StreamID = streamID.String()
stream.Token = token
return stream, err
}
// DeleteStream deletes a stream identified by the given id string. If this
// stream is the last one associated with a device, then the device record is
// also deleted. We return a Device object purely so we can pass back out the
// token allowing us to unsubscribe.
func (d *DB) | (stream *Stream) (_ *Device, err error) {
sql := `DELETE FROM streams
WHERE uuid = :uuid
AND pgp_sym_decrypt(token, :encryption_password) = :token
RETURNING device_id`
mapArgs := map[string]interface{}{
"uuid": stream.StreamID,
"encryption_password": d.encryptionPassword,
"token": stream.Token,
}
tx, err := BeginTX(d.DB)
if err != nil {
return nil, errors.Wrap(err, "failed to start transaction when deleting stream")
}
defer func() {
if cerr := tx.CommitOrRollback(); err == nil && cerr != nil {
err = cerr
}
}()
var deviceID int
// again use a Get to run the delete so we get back the device's id
err = tx.Get(&deviceID, sql, mapArgs)
if err != nil {
return nil, errors.Wrap(err, "failed to delete stream")
}
// now we count streams for that device id, and if no more we should also
// delete the device and unsubscribe from its topic
sql = `SELECT COUNT(*) FROM streams WHERE device_id = :device_id`
mapArgs = map[string]interface{}{
"device_id": deviceID,
}
var streamCount int
// again use a Get to get the count
err = tx.Get(&streamCount, sql, mapArgs)
if err != nil {
return nil, errors.Wrap(err, "failed to count streams")
}
if streamCount == 0 {
// delete the device too
sql = `DELETE FROM devices WHERE id = :id RETURNING device_token`
mapArgs = map[string]interface{}{
"id": deviceID,
}
var device Device
err = tx.Get(&device, sql, mapArgs)
if err != nil {
return nil, errors.Wrap(err, "failed to delete device")
}
return &device, nil
}
return nil, nil
}
// GetDevices returns a slice of pointers to Device instances. We don't worry
// about pagination here as we have a maximum number of devices of approximately
// 25 to 50. Note we do not load all streams for these devices.
func (d *DB) GetDevices() ([]*Device, error) {
sql := `SELECT id, device_token FROM devices`
tx, err := BeginTX(d.DB)
if err != nil {
return nil, errors.Wrap(err, "failed to begin transaction")
}
defer func() {
if cerr := tx.CommitOrRollback(); err == nil && cerr != nil {
err = cerr
}
}()
devices := []*Device{}
mapper := func(rows *sqlx.Rows) error {
for rows.Next() {
var d Device
err = rows.StructScan(&d)
if err != nil {
return errors.Wrap(err, "failed to scan row into Device struct")
}
devices = append(devices, &d)
}
return nil
}
err = tx.Map(sql, []interface{}{}, mapper)
if err != nil {
return nil, errors.Wrap(err, "failed to select device rows from database")
}
return devices, nil
}
// GetDevice returns a single device identified by device_token, including all streams
// for that device. This is used to set up subscriptions for existing records on
// application start.
func (d *DB) GetDevice(deviceToken string) (_ *Device, err error) {
sql := `SELECT id, device_token, longitude, latitude, exposure, device_label
FROM devices
WHERE device_token = :device_token`
mapArgs := map[string]interface{}{
"device_token": deviceToken,
}
tx, err := BeginTX(d.DB)
if err != nil {
return nil, errors.Wrap(err, "failed to begin transaction")
}
defer func() {
if cerr := tx.CommitOrRollback(); err == nil && cerr != nil {
err = cerr
}
}()
var device Device
err = tx.Get(&device, sql, mapArgs)
if err != nil {
return nil, errors.Wrap(err, "failed to load device")
}
// now load streams
sql = `SELECT community_id, public_key, operations FROM streams WHERE device_id = :device_id`
mapArgs = map[string]interface{}{
"device_id": device.ID,
}
streams := []*Stream{}
mapper := func(rows *sqlx.Rows) error {
for rows.Next() {
var s Stream
err = rows.StructScan(&s)
if err != nil {
return errors.Wrap(err, "failed to scan stream row into struct")
}
streams = append(streams, &s)
}
return nil
}
err = tx.Map(sql, mapArgs, mapper)
if err != nil {
return nil, errors.Wrap(err, "failed to execute row mapper")
}
device.Streams = streams
return &device, nil
}
// MigrateUp is a convenience function to run all up migrations in the context
// of an instantiated DB instance.
func (d *DB) MigrateUp() error {
return MigrateUp(d.DB.DB, d.logger)
}
// Ping attempts to verify the database connection is still alive by executing a
// simple select query on the database server. We don't use the built in
// DB.Ping() function here as this may not go to the database if there existing
// connections in the pool.
func (d *DB) Ping() error {
_, err := d.DB.Exec("SELECT 1")
if err != nil {
return err
}
return nil
}
// Get is an implementation of the Get method of the autocert.Cache interface.
func (d *DB) Get(ctx context.Context, key string) ([]byte, error) {
query := `SELECT certificate FROM certificates WHERE key = $1`
var cert []byte
err := d.DB.Get(&cert, query, key)
if err != nil {
if err == sql.ErrNoRows {
return nil, autocert.ErrCacheMiss
}
return nil, errors.Wrap(err, "failed to read certificate from DB")
}
return cert, nil
}
// Put is an implementation of the Put method of the autocert.Cache interface
// for saving certificates
func (d *DB) Put(ctx context.Context, key string, cert []byte) error {
query := `INSERT INTO certificates (key, certificate)
VALUES (:key, :certificate)
ON CONFLICT (key)
DO UPDATE SET certificate = EXCLUDED.certificate`
mapArgs := map[string]interface{}{
"key": key,
"certificate": cert,
}
tx, err := d.DB.Beginx()
if err != nil {
return errors.Wrap(err, "failed to begin transaction when writing certificate")
}
query, args, err := tx.BindNamed(query, mapArgs)
if err != nil {
tx.Rollback()
return errors.Wrap(err, "failed to bind named parameters")
}
_, err = tx.Exec(query, args...)
if err != nil {
tx.Rollback()
return errors.Wrap(err, "failed to insert certificate")
}
return tx.Commit()
}
// Delete is an implementation of the Delete method of the autocert.Cache
// interface method for deleting certificates.
func (d *DB) Delete(ctx context.Context, key string) error {
query := `DELETE FROM certificates WHERE key = $1`
tx, err := d.DB.Beginx()
if err != nil {
return errors.Wrap(err, "failed to begin transaction when deleting certificate")
}
_, err = tx.Exec(query, key)
if err != nil {
tx.Rollback()
return errors.Wrap(err, "failed to delete certificate")
}
return tx.Commit()
}
// recordMetrics starts a ticker to collect some gauge related metrics from the
// DB on a 30 second interval
func (d *DB) recordMetrics() {
ticker := time.NewTicker(time.Second * time.Duration(30))
for range ticker.C {
var streamCount float64
err := d.DB.Get(&streamCount, `SELECT COUNT(*) FROM streams`)
if err != nil {
d.logger.Log(
"msg", "error counting streams",
"err", err,
)
continue
}
StreamGauge.Set(streamCount)
}
}
| DeleteStream | identifier_name |
postgres.go | package postgres
import (
"context"
"database/sql"
"database/sql/driver"
"encoding/json"
"time"
kitlog "github.com/go-kit/kit/log"
"github.com/google/uuid"
"github.com/jmoiron/sqlx"
"github.com/lib/pq"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/crypto/acme/autocert"
)
var (
// StreamGauge is a gauge of the number of current registered streams
StreamGauge = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: "decode",
Subsystem: "encoder",
Name: "stream_gauge",
Help: "Count of current streams in database",
},
)
)
// Action is a type alias for string - we use for constants
type Action string
const (
// Share defines an action of sharing a sensor without processing
Share Action = "SHARE"
// Bin defines an action of sharing binned values for a sensor
Bin Action = "BIN"
// MovingAverage defines an action of sharing a moving average for a sensor
MovingAverage Action = "MOVING_AVG"
// TokenLength is a constant which controls the length in bytes of the security
// tokens we generate for streams.
TokenLength = 24
// pqUniqueViolation is an error returned by postgres when we attempt to insert
// a row that violates a unique index
pqUniqueViolation = "23505"
)
// Device is a type used when reading data back from the DB. A single Device may
// feed data to multiple streams, hence the separation here with the associated
// Stream type.
type Device struct {
ID int `db:"id"`
DeviceToken string `db:"device_token"`
Label string `db:"device_label"`
Longitude float64 `db:"longitude"`
Latitude float64 `db:"latitude"`
Exposure string `db:"exposure"`
Streams []*Stream
}
// Stream is a type used when reading data back from the DB, and when creating a
// stream. It contains a public key field used when reading data, and for
// creating a new stream has an associated Device instance.
type Stream struct {
CommunityID string `db:"community_id"`
PublicKey string `db:"public_key"`
Operations Operations `db:"operations"`
StreamID string
Token string
Device *Device
}
// Operation is a type used to capture the data around the operations to be
// applied to a Stream.
type Operation struct {
SensorID uint32 `json:"sensorId"`
Action Action `json:"action"`
Bins []float64 `json:"bins"`
Interval uint32 `json:"interval"`
}
// Operations is a type alias for a slice of Operation instance. We add as a
// separate type as we implement sql.Valuer and sql.Scanner interfaces to read
// and write back from the DB.
type Operations []*Operation
// Value is our implementation of the sql.Valuer interface which converts the
// instance into a value that can be written to the database.
func (o Operations) Value() (driver.Value, error) {
return json.Marshal(o)
}
// Scan is our implementation of the sql.Scanner interface which takes the value
// read from the database, and converts it back into an instance of the type.
func (o *Operations) Scan(src interface{}) error {
if o == nil {
return nil
}
source, ok := src.([]byte)
if !ok {
return errors.New("Value read from database cannot be typecast to a byte slice")
}
err := json.Unmarshal(source, &o)
if err != nil {
return errors.Wrap(err, "failed to unmarshal bytes into Operations")
}
return nil
}
// Open is a helper function that takes as input a connection string for a DB,
// and returns either a sqlx.DB instance or an error. This function is separated
// out to help with CLI tasks for managing migrations.
func Open(connStr string) (*sqlx.DB, error) {
return sqlx.Open("postgres", connStr)
}
// DB is our type that wraps an sqlx.DB instance and provides an API for the
// data access functions we require.
type DB struct {
connStr string
encryptionPassword []byte
DB *sqlx.DB
logger kitlog.Logger
}
// Config is used to carry package local configuration for Postgres DB module.
type Config struct {
ConnStr string
EncryptionPassword string
}
// NewDB creates a new DB instance with the given connection string. We also
// pass in a logger.
func NewDB(config *Config, logger kitlog.Logger) *DB {
logger = kitlog.With(logger, "module", "postgres")
return &DB{
connStr: config.ConnStr,
encryptionPassword: []byte(config.EncryptionPassword),
logger: logger,
}
}
// Start creates our DB connection pool running returning an error if any
// failure occurs.
func (d *DB) Start() error {
d.logger.Log("msg", "starting postgres")
db, err := Open(d.connStr)
if err != nil {
return errors.Wrap(err, "opening db connection failed")
}
d.DB = db
go d.recordMetrics()
return nil
}
// Stop closes the DB connection pool.
func (d *DB) Stop() error {
d.logger.Log("msg", "stopping postgres client")
return d.DB.Close()
}
// CreateStream attempts to insert records into the database for the given
// Stream object. Returns a string containing the ID of the created stream if
// successful or an error if any data constraint is violated, or any other error
// occurs.
func (d *DB) CreateStream(stream *Stream) (_ *Stream, err error) {
sql := `INSERT INTO devices
(device_token, longitude, latitude, exposure, device_label)
VALUES (:device_token, :longitude, :latitude, :exposure, :device_label)
ON CONFLICT (device_token) DO UPDATE
SET longitude = EXCLUDED.longitude,
latitude = EXCLUDED.latitude,
exposure = EXCLUDED.exposure,
device_label = EXCLUDED.device_label
RETURNING id`
mapArgs := map[string]interface{}{
"device_token": stream.Device.DeviceToken,
"longitude": stream.Device.Longitude,
"latitude": stream.Device.Latitude,
"exposure": stream.Device.Exposure,
"device_label": stream.Device.Label,
}
tx, err := BeginTX(d.DB)
if err != nil {
return nil, errors.Wrap(err, "failed to start transaction when inserting device")
}
defer func() {
if cerr := tx.CommitOrRollback(); err == nil && cerr != nil {
err = cerr
}
}()
var deviceID int
// we use a Get for the upsert so we get back the device id
err = tx.Get(&deviceID, sql, mapArgs)
if err != nil {
return nil, errors.Wrap(err, "failed to save device")
}
streamID, err := uuid.NewRandom()
if err != nil {
return nil, errors.Wrap(err, "failed to generate stream UUID")
}
// streams insert sql
sql = `INSERT INTO streams
(device_id, community_id, public_key, token, operations, uuid)
VALUES (:device_id, :community_id, :public_key, pgp_sym_encrypt(:token, :encryption_password), :operations, :uuid)`
token, err := GenerateToken(TokenLength)
if err != nil {
return nil, errors.Wrap(err, "failed to generate random token")
}
mapArgs = map[string]interface{}{
"device_id": deviceID,
"community_id": stream.CommunityID,
"public_key": stream.PublicKey,
"token": token,
"encryption_password": d.encryptionPassword,
"operations": stream.Operations,
"uuid": streamID.String(),
}
err = tx.Exec(sql, mapArgs)
if err != nil {
if pqErr, ok := err.(*pq.Error); ok {
if pqErr.Code == pqUniqueViolation {
return nil, errors.New("failed to create stream: device already registered within community")
}
}
return nil, errors.Wrap(err, "failed to create stream")
}
stream.StreamID = streamID.String()
stream.Token = token
return stream, err
}
// DeleteStream deletes a stream identified by the given id string. If this
// stream is the last one associated with a device, then the device record is
// also deleted. We return a Device object purely so we can pass back out the
// token allowing us to unsubscribe.
func (d *DB) DeleteStream(stream *Stream) (_ *Device, err error) {
sql := `DELETE FROM streams
WHERE uuid = :uuid
AND pgp_sym_decrypt(token, :encryption_password) = :token
RETURNING device_id`
mapArgs := map[string]interface{}{
"uuid": stream.StreamID,
"encryption_password": d.encryptionPassword,
"token": stream.Token,
}
tx, err := BeginTX(d.DB)
if err != nil {
return nil, errors.Wrap(err, "failed to start transaction when deleting stream")
}
defer func() {
if cerr := tx.CommitOrRollback(); err == nil && cerr != nil {
err = cerr
}
}()
var deviceID int
// again use a Get to run the delete so we get back the device's id
err = tx.Get(&deviceID, sql, mapArgs)
if err != nil {
return nil, errors.Wrap(err, "failed to delete stream")
}
// now we count streams for that device id, and if no more we should also
// delete the device and unsubscribe from its topic
sql = `SELECT COUNT(*) FROM streams WHERE device_id = :device_id`
mapArgs = map[string]interface{}{
"device_id": deviceID,
}
var streamCount int
// again use a Get to get the count
err = tx.Get(&streamCount, sql, mapArgs)
if err != nil |
if streamCount == 0 {
// delete the device too
sql = `DELETE FROM devices WHERE id = :id RETURNING device_token`
mapArgs = map[string]interface{}{
"id": deviceID,
}
var device Device
err = tx.Get(&device, sql, mapArgs)
if err != nil {
return nil, errors.Wrap(err, "failed to delete device")
}
return &device, nil
}
return nil, nil
}
// GetDevices returns a slice of pointers to Device instances. We don't worry
// about pagination here as we have a maximum number of devices of approximately
// 25 to 50. Note we do not load all streams for these devices.
func (d *DB) GetDevices() ([]*Device, error) {
sql := `SELECT id, device_token FROM devices`
tx, err := BeginTX(d.DB)
if err != nil {
return nil, errors.Wrap(err, "failed to begin transaction")
}
defer func() {
if cerr := tx.CommitOrRollback(); err == nil && cerr != nil {
err = cerr
}
}()
devices := []*Device{}
mapper := func(rows *sqlx.Rows) error {
for rows.Next() {
var d Device
err = rows.StructScan(&d)
if err != nil {
return errors.Wrap(err, "failed to scan row into Device struct")
}
devices = append(devices, &d)
}
return nil
}
err = tx.Map(sql, []interface{}{}, mapper)
if err != nil {
return nil, errors.Wrap(err, "failed to select device rows from database")
}
return devices, nil
}
// GetDevice returns a single device identified by device_token, including all streams
// for that device. This is used to set up subscriptions for existing records on
// application start.
func (d *DB) GetDevice(deviceToken string) (_ *Device, err error) {
sql := `SELECT id, device_token, longitude, latitude, exposure, device_label
FROM devices
WHERE device_token = :device_token`
mapArgs := map[string]interface{}{
"device_token": deviceToken,
}
tx, err := BeginTX(d.DB)
if err != nil {
return nil, errors.Wrap(err, "failed to begin transaction")
}
defer func() {
if cerr := tx.CommitOrRollback(); err == nil && cerr != nil {
err = cerr
}
}()
var device Device
err = tx.Get(&device, sql, mapArgs)
if err != nil {
return nil, errors.Wrap(err, "failed to load device")
}
// now load streams
sql = `SELECT community_id, public_key, operations FROM streams WHERE device_id = :device_id`
mapArgs = map[string]interface{}{
"device_id": device.ID,
}
streams := []*Stream{}
mapper := func(rows *sqlx.Rows) error {
for rows.Next() {
var s Stream
err = rows.StructScan(&s)
if err != nil {
return errors.Wrap(err, "failed to scan stream row into struct")
}
streams = append(streams, &s)
}
return nil
}
err = tx.Map(sql, mapArgs, mapper)
if err != nil {
return nil, errors.Wrap(err, "failed to execute row mapper")
}
device.Streams = streams
return &device, nil
}
// MigrateUp is a convenience function to run all up migrations in the context
// of an instantiated DB instance.
func (d *DB) MigrateUp() error {
return MigrateUp(d.DB.DB, d.logger)
}
// Ping attempts to verify the database connection is still alive by executing a
// simple select query on the database server. We don't use the built in
// DB.Ping() function here as this may not go to the database if there existing
// connections in the pool.
func (d *DB) Ping() error {
_, err := d.DB.Exec("SELECT 1")
if err != nil {
return err
}
return nil
}
// Get is an implementation of the Get method of the autocert.Cache interface.
func (d *DB) Get(ctx context.Context, key string) ([]byte, error) {
query := `SELECT certificate FROM certificates WHERE key = $1`
var cert []byte
err := d.DB.Get(&cert, query, key)
if err != nil {
if err == sql.ErrNoRows {
return nil, autocert.ErrCacheMiss
}
return nil, errors.Wrap(err, "failed to read certificate from DB")
}
return cert, nil
}
// Put is an implementation of the Put method of the autocert.Cache interface
// for saving certificates
func (d *DB) Put(ctx context.Context, key string, cert []byte) error {
query := `INSERT INTO certificates (key, certificate)
VALUES (:key, :certificate)
ON CONFLICT (key)
DO UPDATE SET certificate = EXCLUDED.certificate`
mapArgs := map[string]interface{}{
"key": key,
"certificate": cert,
}
tx, err := d.DB.Beginx()
if err != nil {
return errors.Wrap(err, "failed to begin transaction when writing certificate")
}
query, args, err := tx.BindNamed(query, mapArgs)
if err != nil {
tx.Rollback()
return errors.Wrap(err, "failed to bind named parameters")
}
_, err = tx.Exec(query, args...)
if err != nil {
tx.Rollback()
return errors.Wrap(err, "failed to insert certificate")
}
return tx.Commit()
}
// Delete is an implementation of the Delete method of the autocert.Cache
// interface method for deleting certificates.
func (d *DB) Delete(ctx context.Context, key string) error {
query := `DELETE FROM certificates WHERE key = $1`
tx, err := d.DB.Beginx()
if err != nil {
return errors.Wrap(err, "failed to begin transaction when deleting certificate")
}
_, err = tx.Exec(query, key)
if err != nil {
tx.Rollback()
return errors.Wrap(err, "failed to delete certificate")
}
return tx.Commit()
}
// recordMetrics starts a ticker to collect some gauge related metrics from the
// DB on a 30 second interval
func (d *DB) recordMetrics() {
ticker := time.NewTicker(time.Second * time.Duration(30))
for range ticker.C {
var streamCount float64
err := d.DB.Get(&streamCount, `SELECT COUNT(*) FROM streams`)
if err != nil {
d.logger.Log(
"msg", "error counting streams",
"err", err,
)
continue
}
StreamGauge.Set(streamCount)
}
}
| {
return nil, errors.Wrap(err, "failed to count streams")
} | conditional_block |
postgres.go | package postgres
import (
"context"
"database/sql"
"database/sql/driver"
"encoding/json"
"time"
kitlog "github.com/go-kit/kit/log"
"github.com/google/uuid"
"github.com/jmoiron/sqlx"
"github.com/lib/pq"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/crypto/acme/autocert"
)
var (
// StreamGauge is a gauge of the number of current registered streams
StreamGauge = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: "decode",
Subsystem: "encoder",
Name: "stream_gauge",
Help: "Count of current streams in database",
},
)
)
// Action is a type alias for string - we use for constants
type Action string
const (
// Share defines an action of sharing a sensor without processing
Share Action = "SHARE"
// Bin defines an action of sharing binned values for a sensor
Bin Action = "BIN"
// MovingAverage defines an action of sharing a moving average for a sensor
MovingAverage Action = "MOVING_AVG"
// TokenLength is a constant which controls the length in bytes of the security
// tokens we generate for streams.
TokenLength = 24
// pqUniqueViolation is an error returned by postgres when we attempt to insert
// a row that violates a unique index
pqUniqueViolation = "23505"
)
// Device is a type used when reading data back from the DB. A single Device may
// feed data to multiple streams, hence the separation here with the associated
// Stream type.
type Device struct {
ID int `db:"id"`
DeviceToken string `db:"device_token"`
Label string `db:"device_label"`
Longitude float64 `db:"longitude"`
Latitude float64 `db:"latitude"`
Exposure string `db:"exposure"`
Streams []*Stream
}
// Stream is a type used when reading data back from the DB, and when creating a
// stream. It contains a public key field used when reading data, and for
// creating a new stream has an associated Device instance.
type Stream struct {
CommunityID string `db:"community_id"`
PublicKey string `db:"public_key"`
Operations Operations `db:"operations"`
StreamID string
Token string
Device *Device
}
// Operation is a type used to capture the data around the operations to be
// applied to a Stream.
type Operation struct {
SensorID uint32 `json:"sensorId"`
Action Action `json:"action"`
Bins []float64 `json:"bins"`
Interval uint32 `json:"interval"`
} | // and write back from the DB.
type Operations []*Operation
// Value is our implementation of the sql.Valuer interface which converts the
// instance into a value that can be written to the database.
func (o Operations) Value() (driver.Value, error) {
return json.Marshal(o)
}
// Scan is our implementation of the sql.Scanner interface which takes the value
// read from the database, and converts it back into an instance of the type.
func (o *Operations) Scan(src interface{}) error {
if o == nil {
return nil
}
source, ok := src.([]byte)
if !ok {
return errors.New("Value read from database cannot be typecast to a byte slice")
}
err := json.Unmarshal(source, &o)
if err != nil {
return errors.Wrap(err, "failed to unmarshal bytes into Operations")
}
return nil
}
// Open is a helper function that takes as input a connection string for a DB,
// and returns either a sqlx.DB instance or an error. This function is separated
// out to help with CLI tasks for managing migrations.
func Open(connStr string) (*sqlx.DB, error) {
return sqlx.Open("postgres", connStr)
}
// DB is our type that wraps an sqlx.DB instance and provides an API for the
// data access functions we require.
type DB struct {
connStr string
encryptionPassword []byte
DB *sqlx.DB
logger kitlog.Logger
}
// Config is used to carry package local configuration for Postgres DB module.
type Config struct {
ConnStr string
EncryptionPassword string
}
// NewDB creates a new DB instance with the given connection string. We also
// pass in a logger.
func NewDB(config *Config, logger kitlog.Logger) *DB {
logger = kitlog.With(logger, "module", "postgres")
return &DB{
connStr: config.ConnStr,
encryptionPassword: []byte(config.EncryptionPassword),
logger: logger,
}
}
// Start creates our DB connection pool running returning an error if any
// failure occurs.
func (d *DB) Start() error {
d.logger.Log("msg", "starting postgres")
db, err := Open(d.connStr)
if err != nil {
return errors.Wrap(err, "opening db connection failed")
}
d.DB = db
go d.recordMetrics()
return nil
}
// Stop closes the DB connection pool.
func (d *DB) Stop() error {
d.logger.Log("msg", "stopping postgres client")
return d.DB.Close()
}
// CreateStream attempts to insert records into the database for the given
// Stream object. Returns a string containing the ID of the created stream if
// successful or an error if any data constraint is violated, or any other error
// occurs.
func (d *DB) CreateStream(stream *Stream) (_ *Stream, err error) {
sql := `INSERT INTO devices
(device_token, longitude, latitude, exposure, device_label)
VALUES (:device_token, :longitude, :latitude, :exposure, :device_label)
ON CONFLICT (device_token) DO UPDATE
SET longitude = EXCLUDED.longitude,
latitude = EXCLUDED.latitude,
exposure = EXCLUDED.exposure,
device_label = EXCLUDED.device_label
RETURNING id`
mapArgs := map[string]interface{}{
"device_token": stream.Device.DeviceToken,
"longitude": stream.Device.Longitude,
"latitude": stream.Device.Latitude,
"exposure": stream.Device.Exposure,
"device_label": stream.Device.Label,
}
tx, err := BeginTX(d.DB)
if err != nil {
return nil, errors.Wrap(err, "failed to start transaction when inserting device")
}
defer func() {
if cerr := tx.CommitOrRollback(); err == nil && cerr != nil {
err = cerr
}
}()
var deviceID int
// we use a Get for the upsert so we get back the device id
err = tx.Get(&deviceID, sql, mapArgs)
if err != nil {
return nil, errors.Wrap(err, "failed to save device")
}
streamID, err := uuid.NewRandom()
if err != nil {
return nil, errors.Wrap(err, "failed to generate stream UUID")
}
// streams insert sql
sql = `INSERT INTO streams
(device_id, community_id, public_key, token, operations, uuid)
VALUES (:device_id, :community_id, :public_key, pgp_sym_encrypt(:token, :encryption_password), :operations, :uuid)`
token, err := GenerateToken(TokenLength)
if err != nil {
return nil, errors.Wrap(err, "failed to generate random token")
}
mapArgs = map[string]interface{}{
"device_id": deviceID,
"community_id": stream.CommunityID,
"public_key": stream.PublicKey,
"token": token,
"encryption_password": d.encryptionPassword,
"operations": stream.Operations,
"uuid": streamID.String(),
}
err = tx.Exec(sql, mapArgs)
if err != nil {
if pqErr, ok := err.(*pq.Error); ok {
if pqErr.Code == pqUniqueViolation {
return nil, errors.New("failed to create stream: device already registered within community")
}
}
return nil, errors.Wrap(err, "failed to create stream")
}
stream.StreamID = streamID.String()
stream.Token = token
return stream, err
}
// DeleteStream deletes a stream identified by the given id string. If this
// stream is the last one associated with a device, then the device record is
// also deleted. We return a Device object purely so we can pass back out the
// token allowing us to unsubscribe.
func (d *DB) DeleteStream(stream *Stream) (_ *Device, err error) {
sql := `DELETE FROM streams
WHERE uuid = :uuid
AND pgp_sym_decrypt(token, :encryption_password) = :token
RETURNING device_id`
mapArgs := map[string]interface{}{
"uuid": stream.StreamID,
"encryption_password": d.encryptionPassword,
"token": stream.Token,
}
tx, err := BeginTX(d.DB)
if err != nil {
return nil, errors.Wrap(err, "failed to start transaction when deleting stream")
}
defer func() {
if cerr := tx.CommitOrRollback(); err == nil && cerr != nil {
err = cerr
}
}()
var deviceID int
// again use a Get to run the delete so we get back the device's id
err = tx.Get(&deviceID, sql, mapArgs)
if err != nil {
return nil, errors.Wrap(err, "failed to delete stream")
}
// now we count streams for that device id, and if no more we should also
// delete the device and unsubscribe from its topic
sql = `SELECT COUNT(*) FROM streams WHERE device_id = :device_id`
mapArgs = map[string]interface{}{
"device_id": deviceID,
}
var streamCount int
// again use a Get to get the count
err = tx.Get(&streamCount, sql, mapArgs)
if err != nil {
return nil, errors.Wrap(err, "failed to count streams")
}
if streamCount == 0 {
// delete the device too
sql = `DELETE FROM devices WHERE id = :id RETURNING device_token`
mapArgs = map[string]interface{}{
"id": deviceID,
}
var device Device
err = tx.Get(&device, sql, mapArgs)
if err != nil {
return nil, errors.Wrap(err, "failed to delete device")
}
return &device, nil
}
return nil, nil
}
// GetDevices returns a slice of pointers to Device instances. We don't worry
// about pagination here as we have a maximum number of devices of approximately
// 25 to 50. Note we do not load all streams for these devices.
func (d *DB) GetDevices() ([]*Device, error) {
sql := `SELECT id, device_token FROM devices`
tx, err := BeginTX(d.DB)
if err != nil {
return nil, errors.Wrap(err, "failed to begin transaction")
}
defer func() {
if cerr := tx.CommitOrRollback(); err == nil && cerr != nil {
err = cerr
}
}()
devices := []*Device{}
mapper := func(rows *sqlx.Rows) error {
for rows.Next() {
var d Device
err = rows.StructScan(&d)
if err != nil {
return errors.Wrap(err, "failed to scan row into Device struct")
}
devices = append(devices, &d)
}
return nil
}
err = tx.Map(sql, []interface{}{}, mapper)
if err != nil {
return nil, errors.Wrap(err, "failed to select device rows from database")
}
return devices, nil
}
// GetDevice returns a single device identified by device_token, including all streams
// for that device. This is used to set up subscriptions for existing records on
// application start.
func (d *DB) GetDevice(deviceToken string) (_ *Device, err error) {
sql := `SELECT id, device_token, longitude, latitude, exposure, device_label
FROM devices
WHERE device_token = :device_token`
mapArgs := map[string]interface{}{
"device_token": deviceToken,
}
tx, err := BeginTX(d.DB)
if err != nil {
return nil, errors.Wrap(err, "failed to begin transaction")
}
defer func() {
if cerr := tx.CommitOrRollback(); err == nil && cerr != nil {
err = cerr
}
}()
var device Device
err = tx.Get(&device, sql, mapArgs)
if err != nil {
return nil, errors.Wrap(err, "failed to load device")
}
// now load streams
sql = `SELECT community_id, public_key, operations FROM streams WHERE device_id = :device_id`
mapArgs = map[string]interface{}{
"device_id": device.ID,
}
streams := []*Stream{}
mapper := func(rows *sqlx.Rows) error {
for rows.Next() {
var s Stream
err = rows.StructScan(&s)
if err != nil {
return errors.Wrap(err, "failed to scan stream row into struct")
}
streams = append(streams, &s)
}
return nil
}
err = tx.Map(sql, mapArgs, mapper)
if err != nil {
return nil, errors.Wrap(err, "failed to execute row mapper")
}
device.Streams = streams
return &device, nil
}
// MigrateUp is a convenience function to run all up migrations in the context
// of an instantiated DB instance.
func (d *DB) MigrateUp() error {
return MigrateUp(d.DB.DB, d.logger)
}
// Ping attempts to verify the database connection is still alive by executing a
// simple select query on the database server. We don't use the built in
// DB.Ping() function here as this may not go to the database if there existing
// connections in the pool.
func (d *DB) Ping() error {
_, err := d.DB.Exec("SELECT 1")
if err != nil {
return err
}
return nil
}
// Get is an implementation of the Get method of the autocert.Cache interface.
func (d *DB) Get(ctx context.Context, key string) ([]byte, error) {
query := `SELECT certificate FROM certificates WHERE key = $1`
var cert []byte
err := d.DB.Get(&cert, query, key)
if err != nil {
if err == sql.ErrNoRows {
return nil, autocert.ErrCacheMiss
}
return nil, errors.Wrap(err, "failed to read certificate from DB")
}
return cert, nil
}
// Put is an implementation of the Put method of the autocert.Cache interface
// for saving certificates
func (d *DB) Put(ctx context.Context, key string, cert []byte) error {
query := `INSERT INTO certificates (key, certificate)
VALUES (:key, :certificate)
ON CONFLICT (key)
DO UPDATE SET certificate = EXCLUDED.certificate`
mapArgs := map[string]interface{}{
"key": key,
"certificate": cert,
}
tx, err := d.DB.Beginx()
if err != nil {
return errors.Wrap(err, "failed to begin transaction when writing certificate")
}
query, args, err := tx.BindNamed(query, mapArgs)
if err != nil {
tx.Rollback()
return errors.Wrap(err, "failed to bind named parameters")
}
_, err = tx.Exec(query, args...)
if err != nil {
tx.Rollback()
return errors.Wrap(err, "failed to insert certificate")
}
return tx.Commit()
}
// Delete is an implementation of the Delete method of the autocert.Cache
// interface method for deleting certificates.
func (d *DB) Delete(ctx context.Context, key string) error {
query := `DELETE FROM certificates WHERE key = $1`
tx, err := d.DB.Beginx()
if err != nil {
return errors.Wrap(err, "failed to begin transaction when deleting certificate")
}
_, err = tx.Exec(query, key)
if err != nil {
tx.Rollback()
return errors.Wrap(err, "failed to delete certificate")
}
return tx.Commit()
}
// recordMetrics starts a ticker to collect some gauge related metrics from the
// DB on a 30 second interval
func (d *DB) recordMetrics() {
ticker := time.NewTicker(time.Second * time.Duration(30))
for range ticker.C {
var streamCount float64
err := d.DB.Get(&streamCount, `SELECT COUNT(*) FROM streams`)
if err != nil {
d.logger.Log(
"msg", "error counting streams",
"err", err,
)
continue
}
StreamGauge.Set(streamCount)
}
} |
// Operations is a type alias for a slice of Operation instance. We add as a
// separate type as we implement sql.Valuer and sql.Scanner interfaces to read | random_line_split |
index.js | import React from 'react';
import ReactDOM from 'react-dom';
import lareneTweetImg from './img/larene-tweet.png';
import youtubeImg from './img/youtube.png';
import lighthouseImg from './img/lighthouse.png';
import axeImg from './img/axe.png';
import randomImg from './img/random-img.png';
import emulateVisionVideo from './img/emulateVisionDeficiencies.mp4';
import A11YInsights from './img/a11yInsights.mp4';
import './css/index.css';
import {
Appear,
Box,
CodePane,
CodeSpan,
Deck,
FlexBox,
FullScreen,
Grid,
Heading,
Image,
ListItem,
Markdown,
Notes,
OrderedList,
Link,
Quote,
Progress,
Slide,
Stepper,
Text,
UnorderedList,
indentNormalizer,
} from 'spectacle';
// SPECTACLE_CLI_THEME_START
const theme = {
fonts: {
header: '"Open Sans Condensed", Helvetica, Arial, sans-serif',
text: '"Open Sans Condensed", Helvetica, Arial, sans-serif'
}
};
// SPECTACLE_CLI_THEME_END
// SPECTACLE_CLI_TEMPLATE_START
const template = () => (
<FlexBox
justifyContent="space-between"
position="absolute"
bottom={0}
width={1}
>
<Box padding="0 1em">
<FullScreen />
</Box>
<Box padding="1em">
<Progress />
</Box>
</FlexBox>
);
// SPECTACLE_CLI_TEMPLATE_END
const cssTricksCodeBlock = indentNormalizer(`
img:not([alt]),
img[alt=""] {
border: 5px dashed red;
}
`)
const Presentation = () => (
<Deck theme={theme} template={template} transitionEffect="fade">
<Slide>
<FlexBox height="100%" flexDirection="column">
<Heading margin="0px" fontSize="80px">
Making WebApps Accessible for Everyone
</Heading>
<Heading margin="0px 32px" color="primary" fontSize="h2">
Maxence Poutord - @_maxpou
</Heading>
</FlexBox>
</Slide>
<Slide
backgroundColor="tertiary"
backgroundImage="url(https://heise.cloudimg.io/width/1220/q50.png-lossy-50.webp-lossy-50.foil1/_www-heise-de_/imgs/18/2/7/8/7/7/9/3/AoE2_Header_Microsoft-f5960ac0d6c87473.png)"
backgroundOpacity={0.5}
>
<Heading>The day I lost that battle...</Heading>
</Slide>
<Slide>
<FlexBox height="100%" flexDirection="column">
<Heading margin="0px" fontSize="80px">
Making WebApps Accessible for Everyone
</Heading>
<Heading margin="0px 32px" color="primary" fontSize="h2">
Maxence Poutord - @_maxpou
</Heading>
</FlexBox>
</Slide>
<Slide>
<Image src={lareneTweetImg} alt="Larene's Tweet https://twitter.com/LareneLg/status/1270578058714443776" style={{objectFit: 'contain'}}/>
</Slide>
<Slide>
<Heading>Types of Disabilities</Heading>
<UnorderedList>
<Appear elementNum={0}><ListItem>👨🦯 Visual</ListItem></Appear>
<Appear elementNum={1}><ListItem>👂 Auditory</ListItem></Appear>
<Appear elementNum={2}><ListItem>🗣 Speach</ListItem></Appear>
<Appear elementNum={3}><ListItem>👩🦼 Motor</ListItem></Appear>
<Appear elementNum={4}><ListItem>🧠 Cognitive</ListItem></Appear>
</UnorderedList>
</Slide>
<Slide>
<Heading margin="15% 0px">Building an a11y Friendly webapp in 5 steps</Heading>
</Slide>
<Slide>
<Heading>Step 1. Unplug your mouse</Heading>
</Slide>
<Slide>
<Heading >Exercice: keyboard only feature</Heading>
<Text>🕵️♀️ What to spot?</Text>
<UnorderedList>
<ListItem>Where's the focus</ListItem>
<ListItem>Click works with enter</ListItem>
<ListItem>Non-focusable items</ListItem>
</UnorderedList>
</Slide>
<Slide>
<Heading>Antipattern</Heading>
<CodePane
fontSize={18}
language="css"
autoFillHeight
>
{indentNormalizer(`
:focus {
outline: none;
}
`)}
</CodePane>
<div className='antipatternOutline'>
<button className='btn' onClick={() => console.log('Button 1')}>Button 1</button>
<button className='btn noOutline' onClick={() => console.log('Button 2')}>Button 2</button>
<button className='btn' onClick={() => console.log('Button 3')}>Button 3</button>
<a className='btn' onClick={() => console.log('Button 4')} onKeyUp={() => console.log('Button 4')}>Button 4</a>
<button className='btn' onClick={() => console.log('Button 5')}>Button 5</button>
</div>
</Slide>
<Slide>
<Heading>Cool patterns</Heading>
<Image src={youtubeImg} alt="Youtube skip navigation" style={{width: '100%'}}/>
</Slide>
<Slide>
<Heading>Step 2. Get tools</Heading>
</Slide>
<Slide>
<Heading style={{marginBottom: '0'}}>Lighthouse</Heading>
<Image src={lighthouseImg} alt="Lighthouse audit for Aliexpress.com" style={{width: '70%', margin: '0 auto'}}/>
</Slide>
<Slide>
<Heading style={{marginBottom: '0'}}>Accessibility Insights</Heading>
<video src={A11YInsights} autoPlay={true} controls style={{width: '60%', margin: '0 auto'}} />
</Slide>
<Slide>
<Heading style={{marginBottom: '0'}}>Axe</Heading>
<Image src={axeImg} alt="Axe audit for apple.com" style={{width: '85%', margin: '0 auto'}}/>
</Slide>
<Slide>
<Heading>CSS Hack</Heading>
<CodePane
fontSize={18}
language="css"
autoFillHeight
>
{cssTricksCodeBlock}
</CodePane>
<Image src={randomImg} style={{width: '25%', margin: '0 auto'}}/>
</Slide>
<Slide>
<Heading>Step 3. RTFM</Heading>
</Slide>
<Slide>
<Heading>w3.org</Heading>
<Link href="https://www.w3.org/TR/wai-aria-practices-1.1/#aria_ex" target="_blank" rel="noopener noreferrer">
w3.org/TR/wai-aria-practices-1.1/#aria_ex
</Link>
</Slide>
<Slide>
<Heading>Is My Component a11y friendly?</Heading>
<UnorderedList>
<ListItem>
<Link href="https://v5.getbootstrap.com/docs/5.0/components/collapse/" target="_blank" rel="noopener noreferrer">
v5.getbootstrap.com
</Link>
</ListItem>
<ListItem>
<Link href="https://polaris.shopify.com/components/navigation/tabs#navigation" target="_blank" rel="noopener noreferrer">
polaris.shopify.com | </Slide>
<Slide>
<Heading>What do you prefer?</Heading>
<CodePane
fontSize={18}
language="html"
autoFillHeight
>
{indentNormalizer(`
<button>Save</button>
`)}
</CodePane>
<Text>OR:</Text>
<CodePane
fontSize={18}
language="html"
autoFillHeight
>
{indentNormalizer(`
<div tabindex="0" role="button" onclick="btnHandler(event)" onKeyDown="btnHandler(event)">
Save
</div>
<script>
function btnHandler(event) {
if (event instanceof KeyboardEvent && event.key !== 'Enter' && event.key !== ' ') {
return;
}
// function's body
}
</script>
`)}
</CodePane>
</Slide>
<Slide>
<Heading style={{marginTop: '20%'}}><i>"No ARIA is better than Bad ARIA"</i></Heading>
</Slide>
<Slide>
<Heading>Step 4. Mimic disabilities</Heading>
</Slide>
<Slide>
<Heading style={{marginBottom: '0'}}>DevTools</Heading>
<Text style={{margin: '0 auto'}}><strike>Chrome</strike> Brave > rendering > Emulate vision deficiencies</Text>
<video src={emulateVisionVideo} autoPlay={true} controls style={{width: '50%', margin: '0 auto'}} />
<form id="emulateVisionForm">
<input id="username-demo" type="text" placeholder="enter your email" onKeyUp={() => {
const input = document.getElementById('username-demo')
if (input.value === '') {
input.className='';
return;
}
const isEmail = /\S+@\S+\.\S+/.test(input.value)
isEmail ? input.className="valid" : input.className="invalid"
}}/>
</form>
</Slide>
<Slide>
<Heading>Use a screen reader!</Heading>
<Text>iOS: VoiceOver</Text>
<Text>Windows: NVDA</Text>
<Text>Android: Talkback</Text>
</Slide>
<Slide>
<Heading>VoiceOver: cheat sheet</Heading>
<Text>
<code>⌘</code> + <code>F5</code>: Start/Stop
<br/>
<code>⇪</code>: VoiceOver Activation Key (VO)
<br/>
<code>VO</code> + <code>U</code>: Open Rotor
<br/>
<code>VO</code> + <code>⌘</code> + <code>← / →</code>: increase/decrease voice speed
<br/>
<code>VO</code> + <code>A</code> / <code>control</code>: start/stop reading
<br/>
<code>VO</code> + <code>← / →</code>: read next/previous item
</Text>
</Slide>
<Slide>
<Heading>🔎 What do you need to spot?</Heading>
<UnorderedList>
<ListItem>Missing labels on forms, alt on images...</ListItem>
<ListItem>Wrong/missing state (i.e. <Link href="https://v5.getbootstrap.com/docs/5.0/components/dropdowns/" target="_blank" rel="noopener noreferrer">dropdown</Link>)</ListItem>
<ListItem>Wrong item (link vs. button...)</ListItem>
<ListItem>Misformatted tables</ListItem>
</UnorderedList>
</Slide>
<Slide>
<Heading>👍 Worth watching</Heading>
<iframe
src="https://www.youtube.com/embed/dEbl5jvLKGQ"
frameBorder="0"
allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture"
allowFullScreen
style={{width: '60%', height: '60%', margin: '0 auto'}}
></iframe>
</Slide>
<Slide>
<Heading>Step 5. Keep learning</Heading>
<UnorderedList>
<ListItem>
<Link href="https://web.dev/lighthouse-accessibility/" target="_blank" rel="noopener noreferrer">
web.dev/lighthouse-accessibility
</Link>
</ListItem>
<ListItem>
<Link href="https://developer.mozilla.org/en-US/docs/Learn/Accessibility" target="_blank" rel="noopener noreferrer">
MDN - developer.mozilla.org
</Link>
</ListItem>
</UnorderedList>
</Slide>
<Slide>
<Heading>Twittersphere</Heading>
<UnorderedList>
<ListItem>
<Link href="https://twitter.com/LareneLg" target="_blank" rel="noopener noreferrer">
@LareneLg
</Link> and her<Link href="https://twitter.com/LareneLg/status/1262197938685530113" target="_blank" rel="noopener noreferrer">
fantastic thread
</Link>
</ListItem>
<ListItem>
<Link href="https://twitter.com/A11YProject" target="_blank" rel="noopener noreferrer">
@A11YProject
</Link>
</ListItem>
</UnorderedList>
</Slide>
<Slide>
<Heading>Thanks!</Heading>
</Slide>
</Deck>
);
ReactDOM.render(<Presentation />, document.getElementById('root')); | </Link>
</ListItem>
</UnorderedList> | random_line_split |
index.js | import React from 'react';
import ReactDOM from 'react-dom';
import lareneTweetImg from './img/larene-tweet.png';
import youtubeImg from './img/youtube.png';
import lighthouseImg from './img/lighthouse.png';
import axeImg from './img/axe.png';
import randomImg from './img/random-img.png';
import emulateVisionVideo from './img/emulateVisionDeficiencies.mp4';
import A11YInsights from './img/a11yInsights.mp4';
import './css/index.css';
import {
Appear,
Box,
CodePane,
CodeSpan,
Deck,
FlexBox,
FullScreen,
Grid,
Heading,
Image,
ListItem,
Markdown,
Notes,
OrderedList,
Link,
Quote,
Progress,
Slide,
Stepper,
Text,
UnorderedList,
indentNormalizer,
} from 'spectacle';
// SPECTACLE_CLI_THEME_START
const theme = {
fonts: {
header: '"Open Sans Condensed", Helvetica, Arial, sans-serif',
text: '"Open Sans Condensed", Helvetica, Arial, sans-serif'
}
};
// SPECTACLE_CLI_THEME_END
// SPECTACLE_CLI_TEMPLATE_START
const template = () => (
<FlexBox
justifyContent="space-between"
position="absolute"
bottom={0}
width={1}
>
<Box padding="0 1em">
<FullScreen />
</Box>
<Box padding="1em">
<Progress />
</Box>
</FlexBox>
);
// SPECTACLE_CLI_TEMPLATE_END
const cssTricksCodeBlock = indentNormalizer(`
img:not([alt]),
img[alt=""] {
border: 5px dashed red;
}
`)
const Presentation = () => (
<Deck theme={theme} template={template} transitionEffect="fade">
<Slide>
<FlexBox height="100%" flexDirection="column">
<Heading margin="0px" fontSize="80px">
Making WebApps Accessible for Everyone
</Heading>
<Heading margin="0px 32px" color="primary" fontSize="h2">
Maxence Poutord - @_maxpou
</Heading>
</FlexBox>
</Slide>
<Slide
backgroundColor="tertiary"
backgroundImage="url(https://heise.cloudimg.io/width/1220/q50.png-lossy-50.webp-lossy-50.foil1/_www-heise-de_/imgs/18/2/7/8/7/7/9/3/AoE2_Header_Microsoft-f5960ac0d6c87473.png)"
backgroundOpacity={0.5}
>
<Heading>The day I lost that battle...</Heading>
</Slide>
<Slide>
<FlexBox height="100%" flexDirection="column">
<Heading margin="0px" fontSize="80px">
Making WebApps Accessible for Everyone
</Heading>
<Heading margin="0px 32px" color="primary" fontSize="h2">
Maxence Poutord - @_maxpou
</Heading>
</FlexBox>
</Slide>
<Slide>
<Image src={lareneTweetImg} alt="Larene's Tweet https://twitter.com/LareneLg/status/1270578058714443776" style={{objectFit: 'contain'}}/>
</Slide>
<Slide>
<Heading>Types of Disabilities</Heading>
<UnorderedList>
<Appear elementNum={0}><ListItem>👨🦯 Visual</ListItem></Appear>
<Appear elementNum={1}><ListItem>👂 Auditory</ListItem></Appear>
<Appear elementNum={2}><ListItem>🗣 Speach</ListItem></Appear>
<Appear elementNum={3}><ListItem>👩🦼 Motor</ListItem></Appear>
<Appear elementNum={4}><ListItem>🧠 Cognitive</ListItem></Appear>
</UnorderedList>
</Slide>
<Slide>
<Heading margin="15% 0px">Building an a11y Friendly webapp in 5 steps</Heading>
</Slide>
<Slide>
<Heading>Step 1. Unplug your mouse</Heading>
</Slide>
<Slide>
<Heading >Exercice: keyboard only feature</Heading>
<Text>🕵️♀️ What to spot?</Text>
<UnorderedList>
<ListItem>Where's the focus</ListItem>
<ListItem>Click works with enter</ListItem>
<ListItem>Non-focusable items</ListItem>
</UnorderedList>
</Slide>
<Slide>
<Heading>Antipattern</Heading>
<CodePane
fontSize={18}
language="css"
autoFillHeight
>
{indentNormalizer(`
:focus {
outline: none;
}
`)}
</CodePane>
<div className='antipatternOutline'>
<button className='btn' onClick={() => console.log('Button 1')}>Button 1</button>
<button className='btn noOutline' onClick={() => console.log('Button 2')}>Button 2</button>
<button className='btn' onClick={() => console.log('Button 3')}>Button 3</button>
<a className='btn' onClick={() => console.log('Button 4')} onKeyUp={() => console.log('Button 4')}>Button 4</a>
<button className='btn' onClick={() => console.log('Button 5')}>Button 5</button>
</div>
</Slide>
<Slide>
<Heading>Cool patterns</Heading>
<Image src={youtubeImg} alt="Youtube skip navigation" style={{width: '100%'}}/>
</Slide>
<Slide>
<Heading>Step 2. Get tools</Heading>
</Slide>
<Slide>
<Heading style={{marginBottom: '0'}}>Lighthouse</Heading>
<Image src={lighthouseImg} alt="Lighthouse audit for Aliexpress.com" style={{width: '70%', margin: '0 auto'}}/>
</Slide>
<Slide>
<Heading style={{marginBottom: '0'}}>Accessibility Insights</Heading>
<video src={A11YInsights} autoPlay={true} controls style={{width: '60%', margin: '0 auto'}} />
</Slide>
<Slide>
<Heading style={{marginBottom: '0'}}>Axe</Heading>
<Image src={axeImg} alt="Axe audit for apple.com" style={{width: '85%', margin: '0 auto'}}/>
</Slide>
<Slide>
<Heading>CSS Hack</Heading>
<CodePane
fontSize={18}
language="css"
autoFillHeight
>
{cssTricksCodeBlock}
</CodePane>
<Image src={randomImg} style={{width: '25%', margin: '0 auto'}}/>
</Slide>
<Slide>
<Heading>Step 3. RTFM</Heading>
</Slide>
<Slide>
<Heading>w3.org</Heading>
<Link href="https://www.w3.org/TR/wai-aria-practices-1.1/#aria_ex" target="_blank" rel="noopener noreferrer">
w3.org/TR/wai-aria-practices-1.1/#aria_ex
</Link>
</Slide>
<Slide>
<Heading>Is My Component a11y friendly?</Heading>
<UnorderedList>
<ListItem>
<Link href="https://v5.getbootstrap.com/docs/5.0/components/collapse/" target="_blank" rel="noopener noreferrer">
v5.getbootstrap.com
</Link>
</ListItem>
<ListItem>
<Link href="https://polaris.shopify.com/components/navigation/tabs#navigation" target="_blank" rel="noopener noreferrer">
polaris.shopify.com
</Link>
</ListItem>
</UnorderedList>
</Slide>
<Slide>
<Heading>What do you prefer?</Heading>
<CodePane
fontSize={18}
language="html"
autoFillHeight
>
{indentNormalizer(`
<button>Save</button>
`)}
</CodePane>
<Text>OR:</Text>
<CodePane
fontSize={18}
language="html"
autoFillHeight
>
{indentNormalizer(`
<div tabindex="0" role="button" onclick="btnHandler(event)" onKeyDown="btnHandler(event)">
Save
</div>
<script>
function btnHandler(event) {
if (event instanceof KeyboardEvent && event.key !== 'Enter' && event.key !== ' ') {
return;
}
// function's body
}
</script>
`)}
</CodePane>
</Slide>
<Slide>
<Heading style={{marginTop: '20%'}}><i>"No ARIA is better than Bad ARIA"</i></Heading>
</Slide>
<Slide>
<Heading>Step 4. Mimic disabilities</Heading>
</Slide>
<Slide>
<Heading style={{marginBottom: '0'}}>DevTools</Heading>
<Text style={{margin: '0 auto'}}><strike>Chrome</strike> Brave > rendering > Emulate vision deficiencies</Text>
<video src={emulateVisionVideo} autoPlay={true} controls style={{width: '50%', margin: '0 auto'}} />
<form id="emulateVisionForm">
<input id="username-demo" type="text" placeholder="enter your email" onKeyUp={() => {
const input = document.getElementById('username-demo')
if (input.value === '') {
input.className='';
| .\S+/.test(input.value)
isEmail ? input.className="valid" : input.className="invalid"
}}/>
</form>
</Slide>
<Slide>
<Heading>Use a screen reader!</Heading>
<Text>iOS: VoiceOver</Text>
<Text>Windows: NVDA</Text>
<Text>Android: Talkback</Text>
</Slide>
<Slide>
<Heading>VoiceOver: cheat sheet</Heading>
<Text>
<code>⌘</code> + <code>F5</code>: Start/Stop
<br/>
<code>⇪</code>: VoiceOver Activation Key (VO)
<br/>
<code>VO</code> + <code>U</code>: Open Rotor
<br/>
<code>VO</code> + <code>⌘</code> + <code>← / →</code>: increase/decrease voice speed
<br/>
<code>VO</code> + <code>A</code> / <code>control</code>: start/stop reading
<br/>
<code>VO</code> + <code>← / →</code>: read next/previous item
</Text>
</Slide>
<Slide>
<Heading>🔎 What do you need to spot?</Heading>
<UnorderedList>
<ListItem>Missing labels on forms, alt on images...</ListItem>
<ListItem>Wrong/missing state (i.e. <Link href="https://v5.getbootstrap.com/docs/5.0/components/dropdowns/" target="_blank" rel="noopener noreferrer">dropdown</Link>)</ListItem>
<ListItem>Wrong item (link vs. button...)</ListItem>
<ListItem>Misformatted tables</ListItem>
</UnorderedList>
</Slide>
<Slide>
<Heading>👍 Worth watching</Heading>
<iframe
src="https://www.youtube.com/embed/dEbl5jvLKGQ"
frameBorder="0"
allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture"
allowFullScreen
style={{width: '60%', height: '60%', margin: '0 auto'}}
></iframe>
</Slide>
<Slide>
<Heading>Step 5. Keep learning</Heading>
<UnorderedList>
<ListItem>
<Link href="https://web.dev/lighthouse-accessibility/" target="_blank" rel="noopener noreferrer">
web.dev/lighthouse-accessibility
</Link>
</ListItem>
<ListItem>
<Link href="https://developer.mozilla.org/en-US/docs/Learn/Accessibility" target="_blank" rel="noopener noreferrer">
MDN - developer.mozilla.org
</Link>
</ListItem>
</UnorderedList>
</Slide>
<Slide>
<Heading>Twittersphere</Heading>
<UnorderedList>
<ListItem>
<Link href="https://twitter.com/LareneLg" target="_blank" rel="noopener noreferrer">
@LareneLg
</Link> and her<Link href="https://twitter.com/LareneLg/status/1262197938685530113" target="_blank" rel="noopener noreferrer">
fantastic thread
</Link>
</ListItem>
<ListItem>
<Link href="https://twitter.com/A11YProject" target="_blank" rel="noopener noreferrer">
@A11YProject
</Link>
</ListItem>
</UnorderedList>
</Slide>
<Slide>
<Heading>Thanks!</Heading>
</Slide>
</Deck>
);
ReactDOM.render(<Presentation />, document.getElementById('root'));
| return;
}
const isEmail = /\S+@\S+\ | conditional_block |
setup.go | // Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package usbprinter provides an interface to configure and attach a virtual
// USB printer onto the system to be used for testing.
package usbprinter
import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"golang.org/x/sys/unix"
"chromiumos/tast/common/testexec"
"chromiumos/tast/errors"
"chromiumos/tast/local/printing/lp"
"chromiumos/tast/local/upstart"
"chromiumos/tast/testing"
)
// DevInfo contains information used to identify a USB device.
type DevInfo struct {
// VID contains the device's vendor ID.
VID string
// PID contains the devices's product ID.
PID string
}
// config contains all information needed to run the
// virtual-usb-printer process.
type config struct {
// The actual arguments fed to (and not including) stdbuf.
//
// Config data path fields obey these rules:
// 1. Absolute paths are passed verbatim to the invocation of
// virtual-usb-printer.
// 2. Relative paths (and basenames) are joined with the default
// install location of virtual-usb-printer's config files.
args []string
// Populated with path from WithDescriptors().
descriptors string
// Whether or not Start() blocks on printer autoconfiguration.
waitUntilConfigured bool
// Whether or not Printer.Stop() should propagate an error if
// no udev event is observed on stoppage.
expectUdevEventOnStop bool
}
// Option provides the type for functional options used
// to build a Printer via Start().
type Option func(*config) error
// WithIPPUSBDescriptors passes the most commonly used USB descriptors.
func WithIPPUSBDescriptors() Option {
return WithDescriptors("ippusb_printer.json")
}
// WithDescriptors sets the required descriptors.
func WithDescriptors(path string) Option {
return func(o *config) error {
if len(path) == 0 {
return errors.New("empty descriptors path")
}
o.args = append(o.args, "--descriptors_path="+absoluteConfigPath(path))
o.descriptors = absoluteConfigPath(path)
return nil
}
}
// WithGenericIPPAttributes passes the most commonly used IPP attributes.
func WithGenericIPPAttributes() Option {
return WithAttributes("ipp_attributes.json")
}
// WithAttributes sets attributes.
func WithAttributes(path string) Option {
return func(o *config) error {
if len(path) == 0 {
return errors.New("empty attributes path")
}
o.args = append(o.args, "--attributes_path="+absoluteConfigPath(path))
return nil
}
}
// WithESCLCapabilities sets eSCL capabilities.
func WithESCLCapabilities(path string) Option {
return func(o *config) error {
if len(path) == 0 {
return errors.New("empty eSCL capabilities path")
}
o.args = append(o.args, "--scanner_capabilities_path="+absoluteConfigPath(path))
return nil
}
}
// WithOutputLogDirectory sets the output log directory.
func WithOutputLogDirectory(directory string) Option {
return func(o *config) error {
if !path.IsAbs(directory) {
return errors.Errorf("output log directory (%q) is not an absolute path", directory)
}
o.args = append(o.args, "--output_log_dir="+directory)
return nil
}
}
// WithHTTPLogDirectory sets the HTTP log directory.
func WithHTTPLogDirectory(directory string) Option {
return func(o *config) error {
if !path.IsAbs(directory) {
return errors.Errorf("HTTP log directory (%q) is not an absolute path",
directory)
}
o.args = append(o.args, "--http_header_output_dir="+directory)
return nil
}
}
// WithRecordPath sets the document output path.
func WithRecordPath(record string) Option {
return func(o *config) error {
if !path.IsAbs(record) {
return errors.Errorf("record path (%q) is not an absolute path", record)
}
o.args = append(o.args, "--record_doc_path="+record)
return nil
}
}
// WithMockPrinterScriptPath sets the mock printer script path.
func WithMockPrinterScriptPath(script string) Option {
return func(o *config) error {
if !path.IsAbs(script) {
return errors.Errorf("mock printer script path (%q) is not an absolute path", script)
}
o.args = append(o.args, "--mock_printer_script="+script)
return nil
}
}
// WaitUntilConfigured controls whether or not Start() blocks on printer
// autoconfiguration.
func WaitUntilConfigured() Option {
return func(o *config) error {
o.waitUntilConfigured = true
return nil
}
}
// ExpectUdevEventOnStop causes Printer.Stop() to propagate errors if
// a udev event is not seen.
func ExpectUdevEventOnStop() Option {
return func(o *config) error {
o.expectUdevEventOnStop = true
return nil
}
}
// Printer provides an interface to interact with the running
// virtual-usb-printer instance.
type Printer struct {
// The printer name as detected by autoconfiguration.
// Empty if Start() was called with info.WaitUntilConfigured
// set false.
ConfiguredName string
// The printer's device information parsed from its USB
// descriptors config.
DevInfo DevInfo
// The running virtual-usb-printer instance.
cmd *testexec.Cmd
// Whether or not Stop() should propagate an error if
// no udev event is observed on stoppage.
expectUdevEventOnStop bool
// The human-readable printer name as it would be displayed
// in the UI. This is parsed from its USB descriptors, e.g.
// "DavieV Virtual USB Printer (USB)".
VisibleName string
}
func ippUSBPrinterURI(devInfo DevInfo) string {
return fmt.Sprintf("ippusb://%s_%s/ipp/print", devInfo.VID, devInfo.PID)
}
// loadPrinterIDs loads the JSON file located at path and attempts to extract
// the "vid" and "pid" from the USB device descriptor which should be defined
// in path.
func loadPrinterIDs(path string) (devInfo DevInfo, deviceName string, err error) {
f, err := os.Open(path)
if err != nil {
return devInfo, "", errors.Wrapf(err, "failed to open %s", path)
}
defer f.Close()
var cfg struct {
DevDesc struct {
Vendor int `json:"idVendor"`
Product int `json:"idProduct"`
} `json:"device_descriptor"`
VendorModel []string `json:"string_descriptors"`
}
if err := json.NewDecoder(f).Decode(&cfg); err != nil {
return devInfo, "", errors.Wrapf(err, "failed to decode JSON in %s", path)
}
deviceName = fmt.Sprintf("%s %s (USB)", cfg.VendorModel[0], cfg.VendorModel[1])
return DevInfo{fmt.Sprintf("%04x", cfg.DevDesc.Vendor), fmt.Sprintf("%04x", cfg.DevDesc.Product)}, deviceName, nil
}
// absoluteConfigPath returns configPath untouched if it is absolute.
// Otherwise, it returns configPath prefixed with the default install
// directory of virtual-usb-printer.
func absoluteConfigPath(configPath string) string {
if path.IsAbs(configPath) {
return configPath
}
return path.Join("/usr/local/etc/virtual-usb-printer/", configPath)
}
func terminatePrinterProcess(ctx context.Context, cmd *testexec.Cmd) error {
testing.ContextLogf(ctx, "Terminating virtual-usb-printer with PID %d", cmd.Cmd.Process.Pid)
if err := cmd.Signal(unix.SIGTERM); err != nil {
return errors.Wrap(err, "failed to send SIGTERM to virtual-usb-printer")
}
if err := cmd.Wait(); err != nil {
// We're expecting the exit status to be non-zero if the process was killed by SIGTERM.
// Anything else indicates a problem.
if ws, ok := testexec.GetWaitStatus(err); !ok || !ws.Signaled() || ws.Signal() != unix.SIGTERM {
return errors.Wrap(err, "failed to wait for virtual-usb-printer termination")
}
}
return nil
}
func launchPrinter(ctx context.Context, op config) (cmd *testexec.Cmd, err error) {
testing.ContextLog(ctx, "Starting virtual printer: ", op.args)
launch := testexec.CommandContext(ctx, "stdbuf", op.args...)
p, err := launch.StdoutPipe()
if err != nil {
return nil, err
}
if err := launch.Start(); err != nil {
return nil, errors.Wrapf(err, "failed to start %v", launch.Args)
}
defer func(ctx context.Context) {
if err == nil {
return
}
if cleanupErr := terminatePrinterProcess(ctx, launch); cleanupErr != nil {
testing.ContextLogf(ctx, "Virtual printer termination failed (%q)", cleanupErr)
}
}(ctx)
if err := waitLaunch(p); err != nil {
return nil, errors.Wrap(err, "failed to launch virtual printer")
}
// We pull everything out from the pipe so that
// virtual-usb-printer doesn't block on writing to stdout.
go io.Copy(ioutil.Discard, p)
return launch, nil
}
// Start creates a new Printer and starts the underlying
// virtual-usb-printer process.
func Start(ctx context.Context, opts ...Option) (pr *Printer, err error) {
// Debugd needs to be running before the USB device shows up so Chrome can add the printer.
if err := upstart.EnsureJobRunning(ctx, "debugd"); err != nil {
testing.ContextLogf(ctx, "debugd not running: %q", err)
return nil, err
}
op := config{
args: []string{"-o0", "virtual-usb-printer"},
}
for _, field := range opts {
if err := field(&op); err != nil {
return nil, err
}
}
if len(op.descriptors) == 0 |
devInfo, deviceName, err := loadPrinterIDs(op.descriptors)
if err != nil {
return nil, err
}
cmd, err := launchPrinter(ctx, op)
if err != nil {
return nil, err
}
defer func(ctx context.Context) {
if err == nil {
return
}
if cleanupErr := cmd.Signal(unix.SIGTERM); cleanupErr != nil {
testing.ContextLogf(ctx, "Virtual printer termination failed (%q)", cleanupErr)
}
if cleanupErr := cmd.Wait(); cleanupErr != nil {
// This error is noisy: sending SIGTERM always causes Wait()
// to return an error.
testing.ContextLogf(ctx, "Virtual printer termination wait failed (%q)", cleanupErr)
}
}(ctx)
if err = attachUSBIPDevice(ctx, devInfo); err != nil {
return nil, err
}
printerName := ""
if op.waitUntilConfigured {
printerName, err = waitPrinterConfigured(ctx, devInfo)
if err != nil {
return nil, err
}
}
return &Printer{
ConfiguredName: printerName,
DevInfo: devInfo,
cmd: cmd,
expectUdevEventOnStop: op.expectUdevEventOnStop,
VisibleName: deviceName,
}, nil
}
// Stop terminates and waits for the virtual-usb-printer. Users must
// call this when finished with the virtual-usb-printer.
//
// Returns an error if we fail to terminate or wait for the
// virtual-usb-printer, or if we don't see an expected udev event
// upon stoppage.
//
// This method is idempotent.
func (p *Printer) Stop(ctx context.Context) error {
if p.cmd == nil {
return nil
}
defer func() {
p.cmd = nil
}()
var udevCh <-chan error
if p.expectUdevEventOnStop {
var err error
udevCh, err = startUdevMonitor(ctx, "remove", p.DevInfo)
if err != nil {
return err
}
}
if err := terminatePrinterProcess(ctx, p.cmd); err != nil {
testing.ContextLogf(ctx, "Failed to terminate printer (%q)", err)
}
if p.expectUdevEventOnStop {
// Wait for a signal from udevadm to say the device was successfully
// detached.
testing.ContextLog(ctx, "Waiting for udev remove event")
select {
case err := <-udevCh:
if err != nil {
return err
}
testing.ContextLog(ctx, "received remove event")
case <-ctx.Done():
return errors.Wrap(ctx.Err(), "didn't receive udev event")
}
}
return nil
}
// attachUSBIPDevice attaches the UsbIp device specified by devInfo to the
// system. Returns nil if the device was attached successfully.
func attachUSBIPDevice(ctx context.Context, devInfo DevInfo) error {
// Begin waiting for udev event.
udevCh, err := startUdevMonitor(ctx, "add", devInfo)
if err != nil {
return err
}
// Attach the virtual printer to the system using the "usbip attach" command.
testing.ContextLog(ctx, "Attaching virtual printer")
attach := testexec.CommandContext(ctx, "usbip", "attach", "-r", "localhost",
"-b", "1-1")
if err := attach.Run(); err != nil {
return errors.Wrap(err, "failed to attach virtual usb printer")
}
// Wait for a signal from udevadm to see if the device was successfully
// attached.
testing.ContextLog(ctx, "Waiting for udev add event")
select {
case err := <-udevCh:
if err != nil {
return err
}
testing.ContextLog(ctx, "Found add event")
case <-ctx.Done():
return errors.Wrap(ctx.Err(), "didn't get udev event")
}
// Run lsusb to validity check that that the device is actually connected.
id := fmt.Sprintf("%s:%s", devInfo.VID, devInfo.PID)
checkAttached := testexec.CommandContext(ctx, "lsusb", "-d", id)
if err := checkAttached.Run(); err != nil {
checkAttached.DumpLog(ctx)
return errors.Wrap(err, "printer was not successfully attached")
}
return nil
}
// waitPrinterConfigured waits for a printer which has the same VID/PID as
// devInfo to be configured on the system. If a match is found then the name of
// the configured device will be returned.
func waitPrinterConfigured(ctx context.Context, devInfo DevInfo) (string, error) {
var foundName string
uri := ippUSBPrinterURI(devInfo)
if err := testing.Poll(ctx, func(ctx context.Context) error {
name, err := lp.PrinterNameByURI(ctx, uri)
if err != nil {
return err
}
foundName = name
return nil
}, nil); err != nil {
return "", err
}
return foundName, nil
}
| {
return nil, errors.New("missing required WithDescriptors() option")
} | conditional_block |
setup.go | // Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package usbprinter provides an interface to configure and attach a virtual
// USB printer onto the system to be used for testing.
package usbprinter
import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"golang.org/x/sys/unix"
"chromiumos/tast/common/testexec"
"chromiumos/tast/errors"
"chromiumos/tast/local/printing/lp"
"chromiumos/tast/local/upstart"
"chromiumos/tast/testing"
)
// DevInfo contains information used to identify a USB device.
type DevInfo struct {
// VID contains the device's vendor ID.
VID string
// PID contains the devices's product ID.
PID string
}
// config contains all information needed to run the
// virtual-usb-printer process.
type config struct {
// The actual arguments fed to (and not including) stdbuf.
//
// Config data path fields obey these rules:
// 1. Absolute paths are passed verbatim to the invocation of
// virtual-usb-printer.
// 2. Relative paths (and basenames) are joined with the default
// install location of virtual-usb-printer's config files.
args []string
// Populated with path from WithDescriptors().
descriptors string
// Whether or not Start() blocks on printer autoconfiguration.
waitUntilConfigured bool
// Whether or not Printer.Stop() should propagate an error if
// no udev event is observed on stoppage.
expectUdevEventOnStop bool
}
// Option provides the type for functional options used
// to build a Printer via Start().
type Option func(*config) error
// WithIPPUSBDescriptors passes the most commonly used USB descriptors.
func WithIPPUSBDescriptors() Option {
return WithDescriptors("ippusb_printer.json")
}
// WithDescriptors sets the required descriptors.
func WithDescriptors(path string) Option {
return func(o *config) error {
if len(path) == 0 {
return errors.New("empty descriptors path")
}
o.args = append(o.args, "--descriptors_path="+absoluteConfigPath(path))
o.descriptors = absoluteConfigPath(path)
return nil
}
}
// WithGenericIPPAttributes passes the most commonly used IPP attributes.
func WithGenericIPPAttributes() Option {
return WithAttributes("ipp_attributes.json")
}
// WithAttributes sets attributes.
func WithAttributes(path string) Option {
return func(o *config) error {
if len(path) == 0 {
return errors.New("empty attributes path")
}
o.args = append(o.args, "--attributes_path="+absoluteConfigPath(path))
return nil
}
}
// WithESCLCapabilities sets eSCL capabilities.
func WithESCLCapabilities(path string) Option {
return func(o *config) error {
if len(path) == 0 {
return errors.New("empty eSCL capabilities path")
}
o.args = append(o.args, "--scanner_capabilities_path="+absoluteConfigPath(path))
return nil
}
}
// WithOutputLogDirectory sets the output log directory.
func WithOutputLogDirectory(directory string) Option {
return func(o *config) error {
if !path.IsAbs(directory) {
return errors.Errorf("output log directory (%q) is not an absolute path", directory)
}
o.args = append(o.args, "--output_log_dir="+directory)
return nil
}
}
// WithHTTPLogDirectory sets the HTTP log directory.
func WithHTTPLogDirectory(directory string) Option {
return func(o *config) error {
if !path.IsAbs(directory) {
return errors.Errorf("HTTP log directory (%q) is not an absolute path",
directory)
}
o.args = append(o.args, "--http_header_output_dir="+directory)
return nil
}
}
// WithRecordPath sets the document output path.
func WithRecordPath(record string) Option {
return func(o *config) error {
if !path.IsAbs(record) {
return errors.Errorf("record path (%q) is not an absolute path", record)
}
o.args = append(o.args, "--record_doc_path="+record)
return nil
}
}
// WithMockPrinterScriptPath sets the mock printer script path.
func WithMockPrinterScriptPath(script string) Option {
return func(o *config) error {
if !path.IsAbs(script) {
return errors.Errorf("mock printer script path (%q) is not an absolute path", script)
}
o.args = append(o.args, "--mock_printer_script="+script)
return nil
}
}
// WaitUntilConfigured controls whether or not Start() blocks on printer
// autoconfiguration.
func WaitUntilConfigured() Option {
return func(o *config) error {
o.waitUntilConfigured = true
return nil
}
}
// ExpectUdevEventOnStop causes Printer.Stop() to propagate errors if
// a udev event is not seen.
func ExpectUdevEventOnStop() Option {
return func(o *config) error {
o.expectUdevEventOnStop = true
return nil
}
}
// Printer provides an interface to interact with the running
// virtual-usb-printer instance.
type Printer struct {
// The printer name as detected by autoconfiguration.
// Empty if Start() was called with info.WaitUntilConfigured
// set false.
ConfiguredName string
// The printer's device information parsed from its USB
// descriptors config.
DevInfo DevInfo
// The running virtual-usb-printer instance.
cmd *testexec.Cmd
// Whether or not Stop() should propagate an error if
// no udev event is observed on stoppage.
expectUdevEventOnStop bool
// The human-readable printer name as it would be displayed
// in the UI. This is parsed from its USB descriptors, e.g.
// "DavieV Virtual USB Printer (USB)".
VisibleName string
}
func ippUSBPrinterURI(devInfo DevInfo) string {
return fmt.Sprintf("ippusb://%s_%s/ipp/print", devInfo.VID, devInfo.PID)
}
// loadPrinterIDs loads the JSON file located at path and attempts to extract
// the "vid" and "pid" from the USB device descriptor which should be defined
// in path.
func loadPrinterIDs(path string) (devInfo DevInfo, deviceName string, err error) {
f, err := os.Open(path)
if err != nil {
return devInfo, "", errors.Wrapf(err, "failed to open %s", path)
}
defer f.Close()
var cfg struct {
DevDesc struct {
Vendor int `json:"idVendor"`
Product int `json:"idProduct"`
} `json:"device_descriptor"`
VendorModel []string `json:"string_descriptors"`
}
if err := json.NewDecoder(f).Decode(&cfg); err != nil {
return devInfo, "", errors.Wrapf(err, "failed to decode JSON in %s", path)
}
deviceName = fmt.Sprintf("%s %s (USB)", cfg.VendorModel[0], cfg.VendorModel[1])
return DevInfo{fmt.Sprintf("%04x", cfg.DevDesc.Vendor), fmt.Sprintf("%04x", cfg.DevDesc.Product)}, deviceName, nil
}
// absoluteConfigPath returns configPath untouched if it is absolute.
// Otherwise, it returns configPath prefixed with the default install
// directory of virtual-usb-printer.
func absoluteConfigPath(configPath string) string {
if path.IsAbs(configPath) {
return configPath
}
return path.Join("/usr/local/etc/virtual-usb-printer/", configPath)
}
func terminatePrinterProcess(ctx context.Context, cmd *testexec.Cmd) error |
func launchPrinter(ctx context.Context, op config) (cmd *testexec.Cmd, err error) {
testing.ContextLog(ctx, "Starting virtual printer: ", op.args)
launch := testexec.CommandContext(ctx, "stdbuf", op.args...)
p, err := launch.StdoutPipe()
if err != nil {
return nil, err
}
if err := launch.Start(); err != nil {
return nil, errors.Wrapf(err, "failed to start %v", launch.Args)
}
defer func(ctx context.Context) {
if err == nil {
return
}
if cleanupErr := terminatePrinterProcess(ctx, launch); cleanupErr != nil {
testing.ContextLogf(ctx, "Virtual printer termination failed (%q)", cleanupErr)
}
}(ctx)
if err := waitLaunch(p); err != nil {
return nil, errors.Wrap(err, "failed to launch virtual printer")
}
// We pull everything out from the pipe so that
// virtual-usb-printer doesn't block on writing to stdout.
go io.Copy(ioutil.Discard, p)
return launch, nil
}
// Start creates a new Printer and starts the underlying
// virtual-usb-printer process.
func Start(ctx context.Context, opts ...Option) (pr *Printer, err error) {
// Debugd needs to be running before the USB device shows up so Chrome can add the printer.
if err := upstart.EnsureJobRunning(ctx, "debugd"); err != nil {
testing.ContextLogf(ctx, "debugd not running: %q", err)
return nil, err
}
op := config{
args: []string{"-o0", "virtual-usb-printer"},
}
for _, field := range opts {
if err := field(&op); err != nil {
return nil, err
}
}
if len(op.descriptors) == 0 {
return nil, errors.New("missing required WithDescriptors() option")
}
devInfo, deviceName, err := loadPrinterIDs(op.descriptors)
if err != nil {
return nil, err
}
cmd, err := launchPrinter(ctx, op)
if err != nil {
return nil, err
}
defer func(ctx context.Context) {
if err == nil {
return
}
if cleanupErr := cmd.Signal(unix.SIGTERM); cleanupErr != nil {
testing.ContextLogf(ctx, "Virtual printer termination failed (%q)", cleanupErr)
}
if cleanupErr := cmd.Wait(); cleanupErr != nil {
// This error is noisy: sending SIGTERM always causes Wait()
// to return an error.
testing.ContextLogf(ctx, "Virtual printer termination wait failed (%q)", cleanupErr)
}
}(ctx)
if err = attachUSBIPDevice(ctx, devInfo); err != nil {
return nil, err
}
printerName := ""
if op.waitUntilConfigured {
printerName, err = waitPrinterConfigured(ctx, devInfo)
if err != nil {
return nil, err
}
}
return &Printer{
ConfiguredName: printerName,
DevInfo: devInfo,
cmd: cmd,
expectUdevEventOnStop: op.expectUdevEventOnStop,
VisibleName: deviceName,
}, nil
}
// Stop terminates and waits for the virtual-usb-printer. Users must
// call this when finished with the virtual-usb-printer.
//
// Returns an error if we fail to terminate or wait for the
// virtual-usb-printer, or if we don't see an expected udev event
// upon stoppage.
//
// This method is idempotent.
func (p *Printer) Stop(ctx context.Context) error {
if p.cmd == nil {
return nil
}
defer func() {
p.cmd = nil
}()
var udevCh <-chan error
if p.expectUdevEventOnStop {
var err error
udevCh, err = startUdevMonitor(ctx, "remove", p.DevInfo)
if err != nil {
return err
}
}
if err := terminatePrinterProcess(ctx, p.cmd); err != nil {
testing.ContextLogf(ctx, "Failed to terminate printer (%q)", err)
}
if p.expectUdevEventOnStop {
// Wait for a signal from udevadm to say the device was successfully
// detached.
testing.ContextLog(ctx, "Waiting for udev remove event")
select {
case err := <-udevCh:
if err != nil {
return err
}
testing.ContextLog(ctx, "received remove event")
case <-ctx.Done():
return errors.Wrap(ctx.Err(), "didn't receive udev event")
}
}
return nil
}
// attachUSBIPDevice attaches the UsbIp device specified by devInfo to the
// system. Returns nil if the device was attached successfully.
func attachUSBIPDevice(ctx context.Context, devInfo DevInfo) error {
// Begin waiting for udev event.
udevCh, err := startUdevMonitor(ctx, "add", devInfo)
if err != nil {
return err
}
// Attach the virtual printer to the system using the "usbip attach" command.
testing.ContextLog(ctx, "Attaching virtual printer")
attach := testexec.CommandContext(ctx, "usbip", "attach", "-r", "localhost",
"-b", "1-1")
if err := attach.Run(); err != nil {
return errors.Wrap(err, "failed to attach virtual usb printer")
}
// Wait for a signal from udevadm to see if the device was successfully
// attached.
testing.ContextLog(ctx, "Waiting for udev add event")
select {
case err := <-udevCh:
if err != nil {
return err
}
testing.ContextLog(ctx, "Found add event")
case <-ctx.Done():
return errors.Wrap(ctx.Err(), "didn't get udev event")
}
// Run lsusb to validity check that that the device is actually connected.
id := fmt.Sprintf("%s:%s", devInfo.VID, devInfo.PID)
checkAttached := testexec.CommandContext(ctx, "lsusb", "-d", id)
if err := checkAttached.Run(); err != nil {
checkAttached.DumpLog(ctx)
return errors.Wrap(err, "printer was not successfully attached")
}
return nil
}
// waitPrinterConfigured waits for a printer which has the same VID/PID as
// devInfo to be configured on the system. If a match is found then the name of
// the configured device will be returned.
func waitPrinterConfigured(ctx context.Context, devInfo DevInfo) (string, error) {
var foundName string
uri := ippUSBPrinterURI(devInfo)
if err := testing.Poll(ctx, func(ctx context.Context) error {
name, err := lp.PrinterNameByURI(ctx, uri)
if err != nil {
return err
}
foundName = name
return nil
}, nil); err != nil {
return "", err
}
return foundName, nil
}
| {
testing.ContextLogf(ctx, "Terminating virtual-usb-printer with PID %d", cmd.Cmd.Process.Pid)
if err := cmd.Signal(unix.SIGTERM); err != nil {
return errors.Wrap(err, "failed to send SIGTERM to virtual-usb-printer")
}
if err := cmd.Wait(); err != nil {
// We're expecting the exit status to be non-zero if the process was killed by SIGTERM.
// Anything else indicates a problem.
if ws, ok := testexec.GetWaitStatus(err); !ok || !ws.Signaled() || ws.Signal() != unix.SIGTERM {
return errors.Wrap(err, "failed to wait for virtual-usb-printer termination")
}
}
return nil
} | identifier_body |
setup.go | // Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package usbprinter provides an interface to configure and attach a virtual
// USB printer onto the system to be used for testing.
package usbprinter
import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"golang.org/x/sys/unix"
"chromiumos/tast/common/testexec"
"chromiumos/tast/errors"
"chromiumos/tast/local/printing/lp"
"chromiumos/tast/local/upstart"
"chromiumos/tast/testing"
)
// DevInfo contains information used to identify a USB device.
type DevInfo struct {
// VID contains the device's vendor ID.
VID string
// PID contains the devices's product ID.
PID string
}
// config contains all information needed to run the
// virtual-usb-printer process.
type config struct {
// The actual arguments fed to (and not including) stdbuf.
//
// Config data path fields obey these rules:
// 1. Absolute paths are passed verbatim to the invocation of
// virtual-usb-printer.
// 2. Relative paths (and basenames) are joined with the default
// install location of virtual-usb-printer's config files.
args []string
// Populated with path from WithDescriptors().
descriptors string
// Whether or not Start() blocks on printer autoconfiguration.
waitUntilConfigured bool
// Whether or not Printer.Stop() should propagate an error if
// no udev event is observed on stoppage.
expectUdevEventOnStop bool
}
// Option provides the type for functional options used
// to build a Printer via Start().
type Option func(*config) error
// WithIPPUSBDescriptors passes the most commonly used USB descriptors.
func WithIPPUSBDescriptors() Option {
return WithDescriptors("ippusb_printer.json")
}
// WithDescriptors sets the required descriptors.
func WithDescriptors(path string) Option {
return func(o *config) error {
if len(path) == 0 {
return errors.New("empty descriptors path")
}
o.args = append(o.args, "--descriptors_path="+absoluteConfigPath(path))
o.descriptors = absoluteConfigPath(path)
return nil
}
}
// WithGenericIPPAttributes passes the most commonly used IPP attributes.
func WithGenericIPPAttributes() Option {
return WithAttributes("ipp_attributes.json")
}
// WithAttributes sets attributes.
func WithAttributes(path string) Option {
return func(o *config) error {
if len(path) == 0 {
return errors.New("empty attributes path")
}
o.args = append(o.args, "--attributes_path="+absoluteConfigPath(path))
return nil
}
}
// WithESCLCapabilities sets eSCL capabilities.
func WithESCLCapabilities(path string) Option {
return func(o *config) error {
if len(path) == 0 {
return errors.New("empty eSCL capabilities path")
}
o.args = append(o.args, "--scanner_capabilities_path="+absoluteConfigPath(path))
return nil
}
}
// WithOutputLogDirectory sets the output log directory.
func WithOutputLogDirectory(directory string) Option {
return func(o *config) error {
if !path.IsAbs(directory) {
return errors.Errorf("output log directory (%q) is not an absolute path", directory)
}
o.args = append(o.args, "--output_log_dir="+directory)
return nil
}
}
// WithHTTPLogDirectory sets the HTTP log directory.
func WithHTTPLogDirectory(directory string) Option {
return func(o *config) error {
if !path.IsAbs(directory) {
return errors.Errorf("HTTP log directory (%q) is not an absolute path",
directory)
}
o.args = append(o.args, "--http_header_output_dir="+directory)
return nil
}
}
// WithRecordPath sets the document output path.
func WithRecordPath(record string) Option {
return func(o *config) error {
if !path.IsAbs(record) {
return errors.Errorf("record path (%q) is not an absolute path", record)
}
o.args = append(o.args, "--record_doc_path="+record)
return nil
}
}
// WithMockPrinterScriptPath sets the mock printer script path.
func WithMockPrinterScriptPath(script string) Option {
return func(o *config) error {
if !path.IsAbs(script) {
return errors.Errorf("mock printer script path (%q) is not an absolute path", script)
}
o.args = append(o.args, "--mock_printer_script="+script)
return nil
}
}
// WaitUntilConfigured controls whether or not Start() blocks on printer
// autoconfiguration.
func WaitUntilConfigured() Option {
return func(o *config) error {
o.waitUntilConfigured = true
return nil
}
}
// ExpectUdevEventOnStop causes Printer.Stop() to propagate errors if
// a udev event is not seen.
func ExpectUdevEventOnStop() Option {
return func(o *config) error {
o.expectUdevEventOnStop = true
return nil
}
}
// Printer provides an interface to interact with the running
// virtual-usb-printer instance.
type Printer struct {
// The printer name as detected by autoconfiguration.
// Empty if Start() was called with info.WaitUntilConfigured
// set false.
ConfiguredName string
// The printer's device information parsed from its USB
// descriptors config.
DevInfo DevInfo
// The running virtual-usb-printer instance.
cmd *testexec.Cmd
// Whether or not Stop() should propagate an error if
// no udev event is observed on stoppage.
expectUdevEventOnStop bool
// The human-readable printer name as it would be displayed
// in the UI. This is parsed from its USB descriptors, e.g.
// "DavieV Virtual USB Printer (USB)".
VisibleName string
}
func ippUSBPrinterURI(devInfo DevInfo) string {
return fmt.Sprintf("ippusb://%s_%s/ipp/print", devInfo.VID, devInfo.PID)
}
// loadPrinterIDs loads the JSON file located at path and attempts to extract
// the "vid" and "pid" from the USB device descriptor which should be defined
// in path.
func loadPrinterIDs(path string) (devInfo DevInfo, deviceName string, err error) {
f, err := os.Open(path)
if err != nil {
return devInfo, "", errors.Wrapf(err, "failed to open %s", path)
}
defer f.Close()
var cfg struct {
DevDesc struct {
Vendor int `json:"idVendor"`
Product int `json:"idProduct"`
} `json:"device_descriptor"`
VendorModel []string `json:"string_descriptors"`
}
if err := json.NewDecoder(f).Decode(&cfg); err != nil {
return devInfo, "", errors.Wrapf(err, "failed to decode JSON in %s", path)
}
deviceName = fmt.Sprintf("%s %s (USB)", cfg.VendorModel[0], cfg.VendorModel[1])
return DevInfo{fmt.Sprintf("%04x", cfg.DevDesc.Vendor), fmt.Sprintf("%04x", cfg.DevDesc.Product)}, deviceName, nil
}
// absoluteConfigPath returns configPath untouched if it is absolute.
// Otherwise, it returns configPath prefixed with the default install
// directory of virtual-usb-printer.
func absoluteConfigPath(configPath string) string {
if path.IsAbs(configPath) {
return configPath
}
return path.Join("/usr/local/etc/virtual-usb-printer/", configPath)
}
func terminatePrinterProcess(ctx context.Context, cmd *testexec.Cmd) error {
testing.ContextLogf(ctx, "Terminating virtual-usb-printer with PID %d", cmd.Cmd.Process.Pid)
if err := cmd.Signal(unix.SIGTERM); err != nil {
return errors.Wrap(err, "failed to send SIGTERM to virtual-usb-printer")
}
if err := cmd.Wait(); err != nil {
// We're expecting the exit status to be non-zero if the process was killed by SIGTERM.
// Anything else indicates a problem.
if ws, ok := testexec.GetWaitStatus(err); !ok || !ws.Signaled() || ws.Signal() != unix.SIGTERM {
return errors.Wrap(err, "failed to wait for virtual-usb-printer termination")
}
}
return nil
}
func launchPrinter(ctx context.Context, op config) (cmd *testexec.Cmd, err error) {
testing.ContextLog(ctx, "Starting virtual printer: ", op.args)
launch := testexec.CommandContext(ctx, "stdbuf", op.args...)
p, err := launch.StdoutPipe()
if err != nil {
return nil, err
}
if err := launch.Start(); err != nil {
return nil, errors.Wrapf(err, "failed to start %v", launch.Args)
}
defer func(ctx context.Context) {
if err == nil {
return
}
if cleanupErr := terminatePrinterProcess(ctx, launch); cleanupErr != nil {
testing.ContextLogf(ctx, "Virtual printer termination failed (%q)", cleanupErr)
}
}(ctx)
if err := waitLaunch(p); err != nil {
return nil, errors.Wrap(err, "failed to launch virtual printer")
}
// We pull everything out from the pipe so that
// virtual-usb-printer doesn't block on writing to stdout.
go io.Copy(ioutil.Discard, p)
return launch, nil
}
// Start creates a new Printer and starts the underlying
// virtual-usb-printer process.
func | (ctx context.Context, opts ...Option) (pr *Printer, err error) {
// Debugd needs to be running before the USB device shows up so Chrome can add the printer.
if err := upstart.EnsureJobRunning(ctx, "debugd"); err != nil {
testing.ContextLogf(ctx, "debugd not running: %q", err)
return nil, err
}
op := config{
args: []string{"-o0", "virtual-usb-printer"},
}
for _, field := range opts {
if err := field(&op); err != nil {
return nil, err
}
}
if len(op.descriptors) == 0 {
return nil, errors.New("missing required WithDescriptors() option")
}
devInfo, deviceName, err := loadPrinterIDs(op.descriptors)
if err != nil {
return nil, err
}
cmd, err := launchPrinter(ctx, op)
if err != nil {
return nil, err
}
defer func(ctx context.Context) {
if err == nil {
return
}
if cleanupErr := cmd.Signal(unix.SIGTERM); cleanupErr != nil {
testing.ContextLogf(ctx, "Virtual printer termination failed (%q)", cleanupErr)
}
if cleanupErr := cmd.Wait(); cleanupErr != nil {
// This error is noisy: sending SIGTERM always causes Wait()
// to return an error.
testing.ContextLogf(ctx, "Virtual printer termination wait failed (%q)", cleanupErr)
}
}(ctx)
if err = attachUSBIPDevice(ctx, devInfo); err != nil {
return nil, err
}
printerName := ""
if op.waitUntilConfigured {
printerName, err = waitPrinterConfigured(ctx, devInfo)
if err != nil {
return nil, err
}
}
return &Printer{
ConfiguredName: printerName,
DevInfo: devInfo,
cmd: cmd,
expectUdevEventOnStop: op.expectUdevEventOnStop,
VisibleName: deviceName,
}, nil
}
// Stop terminates and waits for the virtual-usb-printer. Users must
// call this when finished with the virtual-usb-printer.
//
// Returns an error if we fail to terminate or wait for the
// virtual-usb-printer, or if we don't see an expected udev event
// upon stoppage.
//
// This method is idempotent.
func (p *Printer) Stop(ctx context.Context) error {
if p.cmd == nil {
return nil
}
defer func() {
p.cmd = nil
}()
var udevCh <-chan error
if p.expectUdevEventOnStop {
var err error
udevCh, err = startUdevMonitor(ctx, "remove", p.DevInfo)
if err != nil {
return err
}
}
if err := terminatePrinterProcess(ctx, p.cmd); err != nil {
testing.ContextLogf(ctx, "Failed to terminate printer (%q)", err)
}
if p.expectUdevEventOnStop {
// Wait for a signal from udevadm to say the device was successfully
// detached.
testing.ContextLog(ctx, "Waiting for udev remove event")
select {
case err := <-udevCh:
if err != nil {
return err
}
testing.ContextLog(ctx, "received remove event")
case <-ctx.Done():
return errors.Wrap(ctx.Err(), "didn't receive udev event")
}
}
return nil
}
// attachUSBIPDevice attaches the UsbIp device specified by devInfo to the
// system. Returns nil if the device was attached successfully.
func attachUSBIPDevice(ctx context.Context, devInfo DevInfo) error {
// Begin waiting for udev event.
udevCh, err := startUdevMonitor(ctx, "add", devInfo)
if err != nil {
return err
}
// Attach the virtual printer to the system using the "usbip attach" command.
testing.ContextLog(ctx, "Attaching virtual printer")
attach := testexec.CommandContext(ctx, "usbip", "attach", "-r", "localhost",
"-b", "1-1")
if err := attach.Run(); err != nil {
return errors.Wrap(err, "failed to attach virtual usb printer")
}
// Wait for a signal from udevadm to see if the device was successfully
// attached.
testing.ContextLog(ctx, "Waiting for udev add event")
select {
case err := <-udevCh:
if err != nil {
return err
}
testing.ContextLog(ctx, "Found add event")
case <-ctx.Done():
return errors.Wrap(ctx.Err(), "didn't get udev event")
}
// Run lsusb to validity check that that the device is actually connected.
id := fmt.Sprintf("%s:%s", devInfo.VID, devInfo.PID)
checkAttached := testexec.CommandContext(ctx, "lsusb", "-d", id)
if err := checkAttached.Run(); err != nil {
checkAttached.DumpLog(ctx)
return errors.Wrap(err, "printer was not successfully attached")
}
return nil
}
// waitPrinterConfigured waits for a printer which has the same VID/PID as
// devInfo to be configured on the system. If a match is found then the name of
// the configured device will be returned.
func waitPrinterConfigured(ctx context.Context, devInfo DevInfo) (string, error) {
var foundName string
uri := ippUSBPrinterURI(devInfo)
if err := testing.Poll(ctx, func(ctx context.Context) error {
name, err := lp.PrinterNameByURI(ctx, uri)
if err != nil {
return err
}
foundName = name
return nil
}, nil); err != nil {
return "", err
}
return foundName, nil
}
| Start | identifier_name |
setup.go | // Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package usbprinter provides an interface to configure and attach a virtual
// USB printer onto the system to be used for testing.
package usbprinter
import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"golang.org/x/sys/unix"
"chromiumos/tast/common/testexec"
"chromiumos/tast/errors"
"chromiumos/tast/local/printing/lp"
"chromiumos/tast/local/upstart"
"chromiumos/tast/testing"
)
// DevInfo contains information used to identify a USB device.
type DevInfo struct {
// VID contains the device's vendor ID.
VID string
// PID contains the devices's product ID.
PID string
}
// config contains all information needed to run the
// virtual-usb-printer process.
type config struct {
// The actual arguments fed to (and not including) stdbuf.
//
// Config data path fields obey these rules:
// 1. Absolute paths are passed verbatim to the invocation of
// virtual-usb-printer.
// 2. Relative paths (and basenames) are joined with the default
// install location of virtual-usb-printer's config files.
args []string
// Populated with path from WithDescriptors().
descriptors string
// Whether or not Start() blocks on printer autoconfiguration.
waitUntilConfigured bool
// Whether or not Printer.Stop() should propagate an error if
// no udev event is observed on stoppage.
expectUdevEventOnStop bool
}
// Option provides the type for functional options used
// to build a Printer via Start().
type Option func(*config) error
// WithIPPUSBDescriptors passes the most commonly used USB descriptors.
func WithIPPUSBDescriptors() Option {
return WithDescriptors("ippusb_printer.json")
}
// WithDescriptors sets the required descriptors.
func WithDescriptors(path string) Option {
return func(o *config) error {
if len(path) == 0 {
return errors.New("empty descriptors path")
}
o.args = append(o.args, "--descriptors_path="+absoluteConfigPath(path))
o.descriptors = absoluteConfigPath(path)
return nil
}
}
// WithGenericIPPAttributes passes the most commonly used IPP attributes.
func WithGenericIPPAttributes() Option {
return WithAttributes("ipp_attributes.json")
}
// WithAttributes sets attributes.
func WithAttributes(path string) Option {
return func(o *config) error {
if len(path) == 0 {
return errors.New("empty attributes path")
}
o.args = append(o.args, "--attributes_path="+absoluteConfigPath(path))
return nil
}
}
// WithESCLCapabilities sets eSCL capabilities.
func WithESCLCapabilities(path string) Option {
return func(o *config) error {
if len(path) == 0 {
return errors.New("empty eSCL capabilities path")
}
o.args = append(o.args, "--scanner_capabilities_path="+absoluteConfigPath(path))
return nil
}
}
// WithOutputLogDirectory sets the output log directory.
func WithOutputLogDirectory(directory string) Option {
return func(o *config) error {
if !path.IsAbs(directory) {
return errors.Errorf("output log directory (%q) is not an absolute path", directory)
}
o.args = append(o.args, "--output_log_dir="+directory)
return nil
}
}
// WithHTTPLogDirectory sets the HTTP log directory.
func WithHTTPLogDirectory(directory string) Option {
return func(o *config) error {
if !path.IsAbs(directory) {
return errors.Errorf("HTTP log directory (%q) is not an absolute path",
directory)
}
o.args = append(o.args, "--http_header_output_dir="+directory)
return nil
}
}
// WithRecordPath sets the document output path.
func WithRecordPath(record string) Option { | o.args = append(o.args, "--record_doc_path="+record)
return nil
}
}
// WithMockPrinterScriptPath sets the mock printer script path.
func WithMockPrinterScriptPath(script string) Option {
return func(o *config) error {
if !path.IsAbs(script) {
return errors.Errorf("mock printer script path (%q) is not an absolute path", script)
}
o.args = append(o.args, "--mock_printer_script="+script)
return nil
}
}
// WaitUntilConfigured controls whether or not Start() blocks on printer
// autoconfiguration.
func WaitUntilConfigured() Option {
return func(o *config) error {
o.waitUntilConfigured = true
return nil
}
}
// ExpectUdevEventOnStop causes Printer.Stop() to propagate errors if
// a udev event is not seen.
func ExpectUdevEventOnStop() Option {
return func(o *config) error {
o.expectUdevEventOnStop = true
return nil
}
}
// Printer provides an interface to interact with the running
// virtual-usb-printer instance.
type Printer struct {
// The printer name as detected by autoconfiguration.
// Empty if Start() was called with info.WaitUntilConfigured
// set false.
ConfiguredName string
// The printer's device information parsed from its USB
// descriptors config.
DevInfo DevInfo
// The running virtual-usb-printer instance.
cmd *testexec.Cmd
// Whether or not Stop() should propagate an error if
// no udev event is observed on stoppage.
expectUdevEventOnStop bool
// The human-readable printer name as it would be displayed
// in the UI. This is parsed from its USB descriptors, e.g.
// "DavieV Virtual USB Printer (USB)".
VisibleName string
}
func ippUSBPrinterURI(devInfo DevInfo) string {
return fmt.Sprintf("ippusb://%s_%s/ipp/print", devInfo.VID, devInfo.PID)
}
// loadPrinterIDs loads the JSON file located at path and attempts to extract
// the "vid" and "pid" from the USB device descriptor which should be defined
// in path.
func loadPrinterIDs(path string) (devInfo DevInfo, deviceName string, err error) {
f, err := os.Open(path)
if err != nil {
return devInfo, "", errors.Wrapf(err, "failed to open %s", path)
}
defer f.Close()
var cfg struct {
DevDesc struct {
Vendor int `json:"idVendor"`
Product int `json:"idProduct"`
} `json:"device_descriptor"`
VendorModel []string `json:"string_descriptors"`
}
if err := json.NewDecoder(f).Decode(&cfg); err != nil {
return devInfo, "", errors.Wrapf(err, "failed to decode JSON in %s", path)
}
deviceName = fmt.Sprintf("%s %s (USB)", cfg.VendorModel[0], cfg.VendorModel[1])
return DevInfo{fmt.Sprintf("%04x", cfg.DevDesc.Vendor), fmt.Sprintf("%04x", cfg.DevDesc.Product)}, deviceName, nil
}
// absoluteConfigPath returns configPath untouched if it is absolute.
// Otherwise, it returns configPath prefixed with the default install
// directory of virtual-usb-printer.
func absoluteConfigPath(configPath string) string {
if path.IsAbs(configPath) {
return configPath
}
return path.Join("/usr/local/etc/virtual-usb-printer/", configPath)
}
func terminatePrinterProcess(ctx context.Context, cmd *testexec.Cmd) error {
testing.ContextLogf(ctx, "Terminating virtual-usb-printer with PID %d", cmd.Cmd.Process.Pid)
if err := cmd.Signal(unix.SIGTERM); err != nil {
return errors.Wrap(err, "failed to send SIGTERM to virtual-usb-printer")
}
if err := cmd.Wait(); err != nil {
// We're expecting the exit status to be non-zero if the process was killed by SIGTERM.
// Anything else indicates a problem.
if ws, ok := testexec.GetWaitStatus(err); !ok || !ws.Signaled() || ws.Signal() != unix.SIGTERM {
return errors.Wrap(err, "failed to wait for virtual-usb-printer termination")
}
}
return nil
}
func launchPrinter(ctx context.Context, op config) (cmd *testexec.Cmd, err error) {
testing.ContextLog(ctx, "Starting virtual printer: ", op.args)
launch := testexec.CommandContext(ctx, "stdbuf", op.args...)
p, err := launch.StdoutPipe()
if err != nil {
return nil, err
}
if err := launch.Start(); err != nil {
return nil, errors.Wrapf(err, "failed to start %v", launch.Args)
}
defer func(ctx context.Context) {
if err == nil {
return
}
if cleanupErr := terminatePrinterProcess(ctx, launch); cleanupErr != nil {
testing.ContextLogf(ctx, "Virtual printer termination failed (%q)", cleanupErr)
}
}(ctx)
if err := waitLaunch(p); err != nil {
return nil, errors.Wrap(err, "failed to launch virtual printer")
}
// We pull everything out from the pipe so that
// virtual-usb-printer doesn't block on writing to stdout.
go io.Copy(ioutil.Discard, p)
return launch, nil
}
// Start creates a new Printer and starts the underlying
// virtual-usb-printer process.
func Start(ctx context.Context, opts ...Option) (pr *Printer, err error) {
// Debugd needs to be running before the USB device shows up so Chrome can add the printer.
if err := upstart.EnsureJobRunning(ctx, "debugd"); err != nil {
testing.ContextLogf(ctx, "debugd not running: %q", err)
return nil, err
}
op := config{
args: []string{"-o0", "virtual-usb-printer"},
}
for _, field := range opts {
if err := field(&op); err != nil {
return nil, err
}
}
if len(op.descriptors) == 0 {
return nil, errors.New("missing required WithDescriptors() option")
}
devInfo, deviceName, err := loadPrinterIDs(op.descriptors)
if err != nil {
return nil, err
}
cmd, err := launchPrinter(ctx, op)
if err != nil {
return nil, err
}
defer func(ctx context.Context) {
if err == nil {
return
}
if cleanupErr := cmd.Signal(unix.SIGTERM); cleanupErr != nil {
testing.ContextLogf(ctx, "Virtual printer termination failed (%q)", cleanupErr)
}
if cleanupErr := cmd.Wait(); cleanupErr != nil {
// This error is noisy: sending SIGTERM always causes Wait()
// to return an error.
testing.ContextLogf(ctx, "Virtual printer termination wait failed (%q)", cleanupErr)
}
}(ctx)
if err = attachUSBIPDevice(ctx, devInfo); err != nil {
return nil, err
}
printerName := ""
if op.waitUntilConfigured {
printerName, err = waitPrinterConfigured(ctx, devInfo)
if err != nil {
return nil, err
}
}
return &Printer{
ConfiguredName: printerName,
DevInfo: devInfo,
cmd: cmd,
expectUdevEventOnStop: op.expectUdevEventOnStop,
VisibleName: deviceName,
}, nil
}
// Stop terminates and waits for the virtual-usb-printer. Users must
// call this when finished with the virtual-usb-printer.
//
// Returns an error if we fail to terminate or wait for the
// virtual-usb-printer, or if we don't see an expected udev event
// upon stoppage.
//
// This method is idempotent.
func (p *Printer) Stop(ctx context.Context) error {
if p.cmd == nil {
return nil
}
defer func() {
p.cmd = nil
}()
var udevCh <-chan error
if p.expectUdevEventOnStop {
var err error
udevCh, err = startUdevMonitor(ctx, "remove", p.DevInfo)
if err != nil {
return err
}
}
if err := terminatePrinterProcess(ctx, p.cmd); err != nil {
testing.ContextLogf(ctx, "Failed to terminate printer (%q)", err)
}
if p.expectUdevEventOnStop {
// Wait for a signal from udevadm to say the device was successfully
// detached.
testing.ContextLog(ctx, "Waiting for udev remove event")
select {
case err := <-udevCh:
if err != nil {
return err
}
testing.ContextLog(ctx, "received remove event")
case <-ctx.Done():
return errors.Wrap(ctx.Err(), "didn't receive udev event")
}
}
return nil
}
// attachUSBIPDevice attaches the UsbIp device specified by devInfo to the
// system. Returns nil if the device was attached successfully.
func attachUSBIPDevice(ctx context.Context, devInfo DevInfo) error {
// Begin waiting for udev event.
udevCh, err := startUdevMonitor(ctx, "add", devInfo)
if err != nil {
return err
}
// Attach the virtual printer to the system using the "usbip attach" command.
testing.ContextLog(ctx, "Attaching virtual printer")
attach := testexec.CommandContext(ctx, "usbip", "attach", "-r", "localhost",
"-b", "1-1")
if err := attach.Run(); err != nil {
return errors.Wrap(err, "failed to attach virtual usb printer")
}
// Wait for a signal from udevadm to see if the device was successfully
// attached.
testing.ContextLog(ctx, "Waiting for udev add event")
select {
case err := <-udevCh:
if err != nil {
return err
}
testing.ContextLog(ctx, "Found add event")
case <-ctx.Done():
return errors.Wrap(ctx.Err(), "didn't get udev event")
}
// Run lsusb to validity check that that the device is actually connected.
id := fmt.Sprintf("%s:%s", devInfo.VID, devInfo.PID)
checkAttached := testexec.CommandContext(ctx, "lsusb", "-d", id)
if err := checkAttached.Run(); err != nil {
checkAttached.DumpLog(ctx)
return errors.Wrap(err, "printer was not successfully attached")
}
return nil
}
// waitPrinterConfigured waits for a printer which has the same VID/PID as
// devInfo to be configured on the system. If a match is found then the name of
// the configured device will be returned.
func waitPrinterConfigured(ctx context.Context, devInfo DevInfo) (string, error) {
var foundName string
uri := ippUSBPrinterURI(devInfo)
if err := testing.Poll(ctx, func(ctx context.Context) error {
name, err := lp.PrinterNameByURI(ctx, uri)
if err != nil {
return err
}
foundName = name
return nil
}, nil); err != nil {
return "", err
}
return foundName, nil
} | return func(o *config) error {
if !path.IsAbs(record) {
return errors.Errorf("record path (%q) is not an absolute path", record)
} | random_line_split |
create_sdk.py | #!/usr/bin/env python
#
# Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
#
# A script which will be invoked from gyp to create an SDK.
#
# Usage: create_sdk.py sdk_directory
#
# The SDK will be used either from the command-line or from the editor.
# Top structure is
#
# ..dart-sdk/
# ....bin/
# ......dart or dart.exe (executable)
# ......dart.lib (import library for VM native extensions on Windows)
# ......dartdoc
# ......dartfmt
# ......dart2js
# ......dartanalyzer
# ......dartdevc
# ......pub
# ......snapshots/
# ........analysis_server.dart.snapshot
# ........dart2js.dart.snapshot
# ........dartanalyzer.dart.snapshot
# ........dartdoc.dart.snapshot
# ........dartfmt.dart.snapshot
# ........dartdevc.dart.snapshot
# ........pub.dart.snapshot
# ........utils_wrapper.dart.snapshot
#.........resources/
#...........dartdoc/
#..............packages
#.............resources/
#.............templates/
# ....include/
# ......dart_api.h
# ......dart_mirrors_api.h
# ......dart_native_api.h
# ......dart_tools_api.h
# ....lib/
# ......dart_client.platform
# ......dart_server.platform
# ......dart_shared.platform
# ......_internal/
#.........spec.sum
#.........strong.sum
#.........dev_compiler/
# ......analysis_server/
# ......analyzer/
# ......async/
# ......collection/
# ......convert/
# ......core/
# ......front_end/
# ......html/
# ......internal/
# ......io/
# ......isolate/
# ......js/
# ......js_util/
# ......kernel/
# ......math/
# ......mirrors/
# ......typed_data/
# ......api_readme.md
# ....util/
# ......(more will come here)
import optparse
import os
import re
import sys
import subprocess
import utils
HOST_OS = utils.GuessOS()
# TODO(dgrove): Only import modules following Google style guide.
from os.path import basename, dirname, join, realpath, exists
# TODO(dgrove): Only import modules following Google style guide.
from shutil import copyfile, copymode, copytree, ignore_patterns, rmtree, move
def GetOptions():
options = optparse.OptionParser(usage='usage: %prog [options]')
options.add_option("--sdk_output_dir",
help='Where to output the sdk')
options.add_option("--snapshot_location",
help='Location of the snapshots.')
options.add_option("--copy_libs",
action="store_true", default=False,
help='Copy dynamically linked libraries to the SDK bin directory.')
options.add_option("--disable_stripping",
action="store_true", default=False,
help='Do not try to strip binaries. Use when they are already stripped')
return options.parse_args()
def ReplaceInFiles(paths, subs):
"""Reads a series of files, applies a series of substitutions to each, and
saves them back out. subs should by a list of (pattern, replace) tuples."""
for path in paths:
contents = open(path).read()
for pattern, replace in subs:
contents = re.sub(pattern, replace, contents)
dest = open(path, 'w')
dest.write(contents)
dest.close()
def | (src, dest):
copyfile(src, dest)
copymode(src, dest)
def CopyShellScript(src_file, dest_dir):
"""Copies a shell/batch script to the given destination directory. Handles
using the appropriate platform-specific file extension."""
file_extension = ''
if HOST_OS == 'win32':
file_extension = '.bat'
# If we're copying an SDK-specific shell script, strip off the suffix.
dest_file = basename(src_file)
if dest_file.endswith('_sdk'):
dest_file = dest_file.replace('_sdk', '')
src = src_file + file_extension
dest = join(dest_dir, dest_file + file_extension)
Copy(src, dest)
def CopyLibs(out_dir, bin_dir):
for library in ['libcrypto', 'libssl']:
ext = '.so'
if HOST_OS == 'macos':
ext = '.dylib'
elif HOST_OS == 'win32':
ext = '.dll'
src = os.path.join(out_dir, library + ext)
dst = os.path.join(bin_dir, library + ext)
if os.path.isfile(src):
copyfile(src, dst)
copymode(src, dst)
def CopyDartScripts(home, sdk_root):
for executable in ['dart2js_sdk', 'dartanalyzer_sdk', 'dartfmt_sdk',
'pub_sdk', 'dartdoc', 'dartdevc_sdk']:
CopyShellScript(os.path.join(home, 'sdk', 'bin', executable),
os.path.join(sdk_root, 'bin'))
def CopySnapshots(snapshots, sdk_root):
for snapshot in ['analysis_server', 'dart2js', 'dartanalyzer', 'dartfmt',
'utils_wrapper', 'pub', 'dartdoc', 'dartdevc']:
snapshot += '.dart.snapshot'
copyfile(join(snapshots, snapshot),
join(sdk_root, 'bin', 'snapshots', snapshot))
def CopyAnalyzerSources(home, lib_dir):
for library in ['analyzer', 'analysis_server', 'front_end', 'kernel']:
copytree(join(home, 'pkg', library), join(lib_dir, library),
ignore=ignore_patterns('*.svn', 'doc', '*.py', '*.gypi', '*.sh',
'.gitignore', 'packages'))
def CopyDartdocResources(home, sdk_root):
RESOURCE_DIR = join(sdk_root, 'bin', 'snapshots', 'resources')
DARTDOC = join(RESOURCE_DIR, 'dartdoc')
copytree(join(home, 'third_party', 'pkg', 'dartdoc', 'lib', 'templates'),
join(DARTDOC, 'templates'))
copytree(join(home, 'third_party', 'pkg', 'dartdoc', 'lib', 'resources'),
join(DARTDOC, 'resources'))
# write the .packages file
PACKAGES_FILE = join(DARTDOC, '.packages')
packages_file = open(PACKAGES_FILE, 'w')
packages_file.write('dartdoc:.')
packages_file.close()
def CopyAnalysisSummaries(snapshots, lib):
copyfile(join(snapshots, 'spec.sum'),
join(lib, '_internal', 'spec.sum'))
copyfile(join(snapshots, 'strong.sum'),
join(lib, '_internal', 'strong.sum'))
def CopyDevCompilerSdk(home, lib):
copyfile(join(home, 'pkg', 'dev_compiler', 'lib', 'sdk', 'ddc_sdk.sum'),
join(lib, '_internal', 'ddc_sdk.sum'))
copytree(join(home, 'pkg', 'dev_compiler', 'lib', 'js'),
join(lib, 'dev_compiler'))
copyfile(join(home, 'third_party', 'requirejs', 'require.js'),
join(lib, 'dev_compiler', 'amd', 'require.js'))
def Main():
# Pull in all of the gypi files which will be munged into the sdk.
HOME = dirname(dirname(realpath(__file__)))
(options, args) = GetOptions()
SDK = options.sdk_output_dir
SDK_tmp = '%s.tmp' % SDK
SNAPSHOT = options.snapshot_location
# TODO(dgrove) - deal with architectures that are not ia32.
if exists(SDK):
rmtree(SDK)
if exists(SDK_tmp):
rmtree(SDK_tmp)
os.makedirs(SDK_tmp)
# Create and populate sdk/bin.
BIN = join(SDK_tmp, 'bin')
os.makedirs(BIN)
os.makedirs(join(BIN, 'snapshots'))
# Copy the Dart VM binary and the Windows Dart VM link library
# into sdk/bin.
#
# TODO(dgrove) - deal with architectures that are not ia32.
build_dir = os.path.dirname(SDK)
dart_file_extension = ''
if HOST_OS == 'win32':
dart_file_extension = '.exe'
dart_import_lib_src = join(HOME, build_dir, 'dart.lib')
dart_import_lib_dest = join(BIN, 'dart.lib')
copyfile(dart_import_lib_src, dart_import_lib_dest)
dart_src_binary = join(HOME, build_dir, 'dart' + dart_file_extension)
dart_dest_binary = join(BIN, 'dart' + dart_file_extension)
copyfile(dart_src_binary, dart_dest_binary)
copymode(dart_src_binary, dart_dest_binary)
# Strip the binaries on platforms where that is supported.
if HOST_OS == 'linux' and not options.disable_stripping:
subprocess.call(['strip', dart_dest_binary])
elif HOST_OS == 'macos' and not options.disable_stripping:
subprocess.call(['strip', '-x', dart_dest_binary])
#
# Create and populate sdk/include.
#
INCLUDE = join(SDK_tmp, 'include')
os.makedirs(INCLUDE)
copyfile(join(HOME, 'runtime', 'include', 'dart_api.h'),
join(INCLUDE, 'dart_api.h'))
copyfile(join(HOME, 'runtime', 'include', 'dart_mirrors_api.h'),
join(INCLUDE, 'dart_mirrors_api.h'))
copyfile(join(HOME, 'runtime', 'include', 'dart_native_api.h'),
join(INCLUDE, 'dart_native_api.h'))
copyfile(join(HOME, 'runtime', 'include', 'dart_tools_api.h'),
join(INCLUDE, 'dart_tools_api.h'))
#
# Create and populate sdk/lib.
#
LIB = join(SDK_tmp, 'lib')
os.makedirs(LIB)
#
# Create and populate lib/{async, core, isolate, ...}.
#
os.makedirs(join(LIB, 'html'))
for library in [join('_blink', 'dartium'),
join('_chrome', 'dart2js'), join('_chrome', 'dartium'),
join('_internal', 'js_runtime'),
join('_internal', 'sdk_library_metadata'),
'async', 'collection', 'convert', 'core', 'developer',
'internal', 'io', 'isolate',
join('html', 'dart2js'), join('html', 'dartium'),
join('html', 'html_common'),
join('indexed_db', 'dart2js'), join('indexed_db', 'dartium'),
'js', 'js_util', 'math', 'mirrors', 'profiler', 'typed_data',
join('svg', 'dart2js'), join('svg', 'dartium'),
join('web_audio', 'dart2js'), join('web_audio', 'dartium'),
join('web_gl', 'dart2js'), join('web_gl', 'dartium'),
join('web_sql', 'dart2js'), join('web_sql', 'dartium')]:
copytree(join(HOME, 'sdk', 'lib', library), join(LIB, library),
ignore=ignore_patterns('*.svn', 'doc', '*.py', '*.gypi', '*.sh',
'.gitignore'))
# Copy the platform descriptors.
for file_name in ["dart_client.platform",
"dart_server.platform",
"dart_shared.platform"]:
copyfile(join(HOME, 'sdk', 'lib', file_name), join(LIB, file_name));
# Copy libraries.dart to lib/_internal/libraries.dart for backwards
# compatibility.
#
# TODO(sigmund): stop copying libraries.dart. Old versions (<=0.25.1-alpha.4)
# of the analyzer package do not support the new location of this file. We
# should be able to remove the old file once we release a newer version of
# analyzer and popular frameworks have migrated to use it.
copyfile(join(HOME, 'sdk', 'lib', '_internal',
'sdk_library_metadata', 'lib', 'libraries.dart'),
join(LIB, '_internal', 'libraries.dart'))
# Create and copy tools.
UTIL = join(SDK_tmp, 'util')
os.makedirs(UTIL)
RESOURCE = join(SDK_tmp, 'lib', '_internal', 'pub', 'asset')
os.makedirs(os.path.dirname(RESOURCE))
copytree(join(HOME, 'third_party', 'pkg', 'pub', 'lib', 'src',
'asset'),
join(RESOURCE),
ignore=ignore_patterns('.svn'))
# Copy in 7zip for Windows.
if HOST_OS == 'win32':
copytree(join(HOME, 'third_party', '7zip'),
join(RESOURCE, '7zip'),
ignore=ignore_patterns('.svn'))
# Copy dart2js/pub.
CopyDartScripts(HOME, SDK_tmp)
CopySnapshots(SNAPSHOT, SDK_tmp)
CopyDartdocResources(HOME, SDK_tmp)
CopyAnalyzerSources(HOME, LIB)
CopyAnalysisSummaries(SNAPSHOT, LIB)
CopyDevCompilerSdk(HOME, LIB)
if options.copy_libs:
CopyLibs(build_dir, BIN)
# Write the 'version' file
version = utils.GetVersion()
versionFile = open(os.path.join(SDK_tmp, 'version'), 'w')
versionFile.write(version + '\n')
versionFile.close()
# Write the 'revision' file
revision = utils.GetGitRevision()
if revision is not None:
with open(os.path.join(SDK_tmp, 'revision'), 'w') as f:
f.write('%s\n' % revision)
f.close()
Copy(join(HOME, 'README.dart-sdk'), join(SDK_tmp, 'README'))
Copy(join(HOME, 'LICENSE'), join(SDK_tmp, 'LICENSE'))
Copy(join(HOME, 'sdk', 'api_readme.md'), join(SDK_tmp, 'lib', 'api_readme.md'))
move(SDK_tmp, SDK)
if __name__ == '__main__':
sys.exit(Main())
| Copy | identifier_name |
create_sdk.py | #!/usr/bin/env python
#
# Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
#
# A script which will be invoked from gyp to create an SDK.
#
# Usage: create_sdk.py sdk_directory
#
# The SDK will be used either from the command-line or from the editor.
# Top structure is
#
# ..dart-sdk/
# ....bin/
# ......dart or dart.exe (executable)
# ......dart.lib (import library for VM native extensions on Windows)
# ......dartdoc
# ......dartfmt
# ......dart2js
# ......dartanalyzer
# ......dartdevc
# ......pub
# ......snapshots/
# ........analysis_server.dart.snapshot
# ........dart2js.dart.snapshot
# ........dartanalyzer.dart.snapshot
# ........dartdoc.dart.snapshot
# ........dartfmt.dart.snapshot
# ........dartdevc.dart.snapshot
# ........pub.dart.snapshot
# ........utils_wrapper.dart.snapshot
#.........resources/
#...........dartdoc/
#..............packages
#.............resources/
#.............templates/
# ....include/
# ......dart_api.h
# ......dart_mirrors_api.h
# ......dart_native_api.h
# ......dart_tools_api.h
# ....lib/
# ......dart_client.platform
# ......dart_server.platform
# ......dart_shared.platform
# ......_internal/
#.........spec.sum
#.........strong.sum
#.........dev_compiler/
# ......analysis_server/
# ......analyzer/
# ......async/
# ......collection/
# ......convert/
# ......core/
# ......front_end/
# ......html/
# ......internal/
# ......io/
# ......isolate/
# ......js/
# ......js_util/
# ......kernel/
# ......math/
# ......mirrors/
# ......typed_data/
# ......api_readme.md
# ....util/
# ......(more will come here)
import optparse
import os
import re
import sys
import subprocess
import utils
HOST_OS = utils.GuessOS()
# TODO(dgrove): Only import modules following Google style guide.
from os.path import basename, dirname, join, realpath, exists
# TODO(dgrove): Only import modules following Google style guide.
from shutil import copyfile, copymode, copytree, ignore_patterns, rmtree, move
def GetOptions():
options = optparse.OptionParser(usage='usage: %prog [options]')
options.add_option("--sdk_output_dir",
help='Where to output the sdk')
options.add_option("--snapshot_location",
help='Location of the snapshots.')
options.add_option("--copy_libs",
action="store_true", default=False,
help='Copy dynamically linked libraries to the SDK bin directory.')
options.add_option("--disable_stripping",
action="store_true", default=False,
help='Do not try to strip binaries. Use when they are already stripped')
return options.parse_args()
def ReplaceInFiles(paths, subs):
"""Reads a series of files, applies a series of substitutions to each, and
saves them back out. subs should by a list of (pattern, replace) tuples."""
for path in paths:
contents = open(path).read()
for pattern, replace in subs:
contents = re.sub(pattern, replace, contents)
dest = open(path, 'w')
dest.write(contents)
dest.close()
def Copy(src, dest):
copyfile(src, dest)
copymode(src, dest)
def CopyShellScript(src_file, dest_dir):
"""Copies a shell/batch script to the given destination directory. Handles
using the appropriate platform-specific file extension."""
file_extension = ''
if HOST_OS == 'win32':
file_extension = '.bat'
# If we're copying an SDK-specific shell script, strip off the suffix.
dest_file = basename(src_file)
if dest_file.endswith('_sdk'):
dest_file = dest_file.replace('_sdk', '')
src = src_file + file_extension
dest = join(dest_dir, dest_file + file_extension)
Copy(src, dest)
def CopyLibs(out_dir, bin_dir):
for library in ['libcrypto', 'libssl']:
ext = '.so'
if HOST_OS == 'macos':
ext = '.dylib'
elif HOST_OS == 'win32':
ext = '.dll'
src = os.path.join(out_dir, library + ext)
dst = os.path.join(bin_dir, library + ext)
if os.path.isfile(src):
copyfile(src, dst)
copymode(src, dst)
def CopyDartScripts(home, sdk_root):
for executable in ['dart2js_sdk', 'dartanalyzer_sdk', 'dartfmt_sdk',
'pub_sdk', 'dartdoc', 'dartdevc_sdk']:
CopyShellScript(os.path.join(home, 'sdk', 'bin', executable),
os.path.join(sdk_root, 'bin'))
def CopySnapshots(snapshots, sdk_root):
for snapshot in ['analysis_server', 'dart2js', 'dartanalyzer', 'dartfmt',
'utils_wrapper', 'pub', 'dartdoc', 'dartdevc']:
snapshot += '.dart.snapshot'
copyfile(join(snapshots, snapshot),
join(sdk_root, 'bin', 'snapshots', snapshot))
def CopyAnalyzerSources(home, lib_dir):
for library in ['analyzer', 'analysis_server', 'front_end', 'kernel']:
copytree(join(home, 'pkg', library), join(lib_dir, library),
ignore=ignore_patterns('*.svn', 'doc', '*.py', '*.gypi', '*.sh',
'.gitignore', 'packages'))
def CopyDartdocResources(home, sdk_root):
RESOURCE_DIR = join(sdk_root, 'bin', 'snapshots', 'resources')
DARTDOC = join(RESOURCE_DIR, 'dartdoc')
copytree(join(home, 'third_party', 'pkg', 'dartdoc', 'lib', 'templates'),
join(DARTDOC, 'templates'))
copytree(join(home, 'third_party', 'pkg', 'dartdoc', 'lib', 'resources'),
join(DARTDOC, 'resources'))
# write the .packages file
PACKAGES_FILE = join(DARTDOC, '.packages')
packages_file = open(PACKAGES_FILE, 'w')
packages_file.write('dartdoc:.')
packages_file.close()
def CopyAnalysisSummaries(snapshots, lib):
copyfile(join(snapshots, 'spec.sum'),
join(lib, '_internal', 'spec.sum'))
copyfile(join(snapshots, 'strong.sum'),
join(lib, '_internal', 'strong.sum'))
def CopyDevCompilerSdk(home, lib):
copyfile(join(home, 'pkg', 'dev_compiler', 'lib', 'sdk', 'ddc_sdk.sum'),
join(lib, '_internal', 'ddc_sdk.sum'))
copytree(join(home, 'pkg', 'dev_compiler', 'lib', 'js'),
join(lib, 'dev_compiler'))
copyfile(join(home, 'third_party', 'requirejs', 'require.js'),
join(lib, 'dev_compiler', 'amd', 'require.js'))
def Main():
# Pull in all of the gypi files which will be munged into the sdk.
HOME = dirname(dirname(realpath(__file__)))
(options, args) = GetOptions()
SDK = options.sdk_output_dir
SDK_tmp = '%s.tmp' % SDK
SNAPSHOT = options.snapshot_location
# TODO(dgrove) - deal with architectures that are not ia32.
if exists(SDK):
rmtree(SDK)
if exists(SDK_tmp):
rmtree(SDK_tmp)
os.makedirs(SDK_tmp)
# Create and populate sdk/bin.
BIN = join(SDK_tmp, 'bin')
os.makedirs(BIN)
os.makedirs(join(BIN, 'snapshots'))
# Copy the Dart VM binary and the Windows Dart VM link library
# into sdk/bin.
#
# TODO(dgrove) - deal with architectures that are not ia32.
build_dir = os.path.dirname(SDK)
dart_file_extension = ''
if HOST_OS == 'win32':
dart_file_extension = '.exe'
dart_import_lib_src = join(HOME, build_dir, 'dart.lib')
dart_import_lib_dest = join(BIN, 'dart.lib')
copyfile(dart_import_lib_src, dart_import_lib_dest)
dart_src_binary = join(HOME, build_dir, 'dart' + dart_file_extension)
dart_dest_binary = join(BIN, 'dart' + dart_file_extension)
copyfile(dart_src_binary, dart_dest_binary)
copymode(dart_src_binary, dart_dest_binary)
# Strip the binaries on platforms where that is supported.
if HOST_OS == 'linux' and not options.disable_stripping:
subprocess.call(['strip', dart_dest_binary])
elif HOST_OS == 'macos' and not options.disable_stripping:
subprocess.call(['strip', '-x', dart_dest_binary])
#
# Create and populate sdk/include.
#
INCLUDE = join(SDK_tmp, 'include')
os.makedirs(INCLUDE)
copyfile(join(HOME, 'runtime', 'include', 'dart_api.h'),
join(INCLUDE, 'dart_api.h'))
copyfile(join(HOME, 'runtime', 'include', 'dart_mirrors_api.h'),
join(INCLUDE, 'dart_mirrors_api.h'))
copyfile(join(HOME, 'runtime', 'include', 'dart_native_api.h'),
join(INCLUDE, 'dart_native_api.h'))
copyfile(join(HOME, 'runtime', 'include', 'dart_tools_api.h'),
join(INCLUDE, 'dart_tools_api.h'))
#
# Create and populate sdk/lib.
#
LIB = join(SDK_tmp, 'lib')
os.makedirs(LIB)
#
# Create and populate lib/{async, core, isolate, ...}.
#
os.makedirs(join(LIB, 'html'))
for library in [join('_blink', 'dartium'),
join('_chrome', 'dart2js'), join('_chrome', 'dartium'),
join('_internal', 'js_runtime'),
join('_internal', 'sdk_library_metadata'),
'async', 'collection', 'convert', 'core', 'developer',
'internal', 'io', 'isolate',
join('html', 'dart2js'), join('html', 'dartium'),
join('html', 'html_common'),
join('indexed_db', 'dart2js'), join('indexed_db', 'dartium'),
'js', 'js_util', 'math', 'mirrors', 'profiler', 'typed_data',
join('svg', 'dart2js'), join('svg', 'dartium'),
join('web_audio', 'dart2js'), join('web_audio', 'dartium'),
join('web_gl', 'dart2js'), join('web_gl', 'dartium'),
join('web_sql', 'dart2js'), join('web_sql', 'dartium')]:
|
# Copy the platform descriptors.
for file_name in ["dart_client.platform",
"dart_server.platform",
"dart_shared.platform"]:
copyfile(join(HOME, 'sdk', 'lib', file_name), join(LIB, file_name));
# Copy libraries.dart to lib/_internal/libraries.dart for backwards
# compatibility.
#
# TODO(sigmund): stop copying libraries.dart. Old versions (<=0.25.1-alpha.4)
# of the analyzer package do not support the new location of this file. We
# should be able to remove the old file once we release a newer version of
# analyzer and popular frameworks have migrated to use it.
copyfile(join(HOME, 'sdk', 'lib', '_internal',
'sdk_library_metadata', 'lib', 'libraries.dart'),
join(LIB, '_internal', 'libraries.dart'))
# Create and copy tools.
UTIL = join(SDK_tmp, 'util')
os.makedirs(UTIL)
RESOURCE = join(SDK_tmp, 'lib', '_internal', 'pub', 'asset')
os.makedirs(os.path.dirname(RESOURCE))
copytree(join(HOME, 'third_party', 'pkg', 'pub', 'lib', 'src',
'asset'),
join(RESOURCE),
ignore=ignore_patterns('.svn'))
# Copy in 7zip for Windows.
if HOST_OS == 'win32':
copytree(join(HOME, 'third_party', '7zip'),
join(RESOURCE, '7zip'),
ignore=ignore_patterns('.svn'))
# Copy dart2js/pub.
CopyDartScripts(HOME, SDK_tmp)
CopySnapshots(SNAPSHOT, SDK_tmp)
CopyDartdocResources(HOME, SDK_tmp)
CopyAnalyzerSources(HOME, LIB)
CopyAnalysisSummaries(SNAPSHOT, LIB)
CopyDevCompilerSdk(HOME, LIB)
if options.copy_libs:
CopyLibs(build_dir, BIN)
# Write the 'version' file
version = utils.GetVersion()
versionFile = open(os.path.join(SDK_tmp, 'version'), 'w')
versionFile.write(version + '\n')
versionFile.close()
# Write the 'revision' file
revision = utils.GetGitRevision()
if revision is not None:
with open(os.path.join(SDK_tmp, 'revision'), 'w') as f:
f.write('%s\n' % revision)
f.close()
Copy(join(HOME, 'README.dart-sdk'), join(SDK_tmp, 'README'))
Copy(join(HOME, 'LICENSE'), join(SDK_tmp, 'LICENSE'))
Copy(join(HOME, 'sdk', 'api_readme.md'), join(SDK_tmp, 'lib', 'api_readme.md'))
move(SDK_tmp, SDK)
if __name__ == '__main__':
sys.exit(Main())
| copytree(join(HOME, 'sdk', 'lib', library), join(LIB, library),
ignore=ignore_patterns('*.svn', 'doc', '*.py', '*.gypi', '*.sh',
'.gitignore')) | conditional_block |
create_sdk.py | #!/usr/bin/env python
#
# Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
#
# A script which will be invoked from gyp to create an SDK.
#
# Usage: create_sdk.py sdk_directory
#
# The SDK will be used either from the command-line or from the editor.
# Top structure is
#
# ..dart-sdk/
# ....bin/
# ......dart or dart.exe (executable)
# ......dart.lib (import library for VM native extensions on Windows)
# ......dartdoc
# ......dartfmt
# ......dart2js
# ......dartanalyzer
# ......dartdevc
# ......pub
# ......snapshots/
# ........analysis_server.dart.snapshot
# ........dart2js.dart.snapshot
# ........dartanalyzer.dart.snapshot
# ........dartdoc.dart.snapshot
# ........dartfmt.dart.snapshot
# ........dartdevc.dart.snapshot
# ........pub.dart.snapshot
# ........utils_wrapper.dart.snapshot
#.........resources/
#...........dartdoc/
#..............packages
#.............resources/
#.............templates/
# ....include/
# ......dart_api.h
# ......dart_mirrors_api.h
# ......dart_native_api.h
# ......dart_tools_api.h
# ....lib/
# ......dart_client.platform
# ......dart_server.platform
# ......dart_shared.platform
# ......_internal/
#.........spec.sum
#.........strong.sum
#.........dev_compiler/
# ......analysis_server/
# ......analyzer/
# ......async/
# ......collection/
# ......convert/
# ......core/
# ......front_end/
# ......html/
# ......internal/
# ......io/
# ......isolate/
# ......js/
# ......js_util/
# ......kernel/
# ......math/
# ......mirrors/
# ......typed_data/
# ......api_readme.md
# ....util/
# ......(more will come here)
import optparse
import os
import re
import sys
import subprocess
import utils
HOST_OS = utils.GuessOS()
# TODO(dgrove): Only import modules following Google style guide.
from os.path import basename, dirname, join, realpath, exists
# TODO(dgrove): Only import modules following Google style guide.
from shutil import copyfile, copymode, copytree, ignore_patterns, rmtree, move
def GetOptions():
options = optparse.OptionParser(usage='usage: %prog [options]')
options.add_option("--sdk_output_dir",
help='Where to output the sdk')
options.add_option("--snapshot_location",
help='Location of the snapshots.')
options.add_option("--copy_libs",
action="store_true", default=False,
help='Copy dynamically linked libraries to the SDK bin directory.')
options.add_option("--disable_stripping",
action="store_true", default=False,
help='Do not try to strip binaries. Use when they are already stripped')
return options.parse_args()
def ReplaceInFiles(paths, subs):
"""Reads a series of files, applies a series of substitutions to each, and
saves them back out. subs should by a list of (pattern, replace) tuples."""
for path in paths:
contents = open(path).read()
for pattern, replace in subs:
contents = re.sub(pattern, replace, contents)
dest = open(path, 'w')
dest.write(contents)
dest.close()
def Copy(src, dest):
copyfile(src, dest)
copymode(src, dest)
def CopyShellScript(src_file, dest_dir):
"""Copies a shell/batch script to the given destination directory. Handles
using the appropriate platform-specific file extension."""
file_extension = ''
if HOST_OS == 'win32':
file_extension = '.bat'
# If we're copying an SDK-specific shell script, strip off the suffix.
dest_file = basename(src_file)
if dest_file.endswith('_sdk'):
dest_file = dest_file.replace('_sdk', '')
src = src_file + file_extension
dest = join(dest_dir, dest_file + file_extension)
Copy(src, dest)
def CopyLibs(out_dir, bin_dir):
for library in ['libcrypto', 'libssl']:
ext = '.so'
if HOST_OS == 'macos':
ext = '.dylib'
elif HOST_OS == 'win32':
ext = '.dll'
src = os.path.join(out_dir, library + ext)
dst = os.path.join(bin_dir, library + ext)
if os.path.isfile(src):
copyfile(src, dst)
copymode(src, dst)
def CopyDartScripts(home, sdk_root):
for executable in ['dart2js_sdk', 'dartanalyzer_sdk', 'dartfmt_sdk',
'pub_sdk', 'dartdoc', 'dartdevc_sdk']:
CopyShellScript(os.path.join(home, 'sdk', 'bin', executable),
os.path.join(sdk_root, 'bin'))
def CopySnapshots(snapshots, sdk_root):
for snapshot in ['analysis_server', 'dart2js', 'dartanalyzer', 'dartfmt',
'utils_wrapper', 'pub', 'dartdoc', 'dartdevc']:
snapshot += '.dart.snapshot'
copyfile(join(snapshots, snapshot),
join(sdk_root, 'bin', 'snapshots', snapshot))
def CopyAnalyzerSources(home, lib_dir):
for library in ['analyzer', 'analysis_server', 'front_end', 'kernel']:
copytree(join(home, 'pkg', library), join(lib_dir, library),
ignore=ignore_patterns('*.svn', 'doc', '*.py', '*.gypi', '*.sh',
'.gitignore', 'packages'))
def CopyDartdocResources(home, sdk_root):
RESOURCE_DIR = join(sdk_root, 'bin', 'snapshots', 'resources')
DARTDOC = join(RESOURCE_DIR, 'dartdoc')
copytree(join(home, 'third_party', 'pkg', 'dartdoc', 'lib', 'templates'),
join(DARTDOC, 'templates'))
copytree(join(home, 'third_party', 'pkg', 'dartdoc', 'lib', 'resources'),
join(DARTDOC, 'resources'))
# write the .packages file
PACKAGES_FILE = join(DARTDOC, '.packages')
packages_file = open(PACKAGES_FILE, 'w')
packages_file.write('dartdoc:.')
packages_file.close()
def CopyAnalysisSummaries(snapshots, lib):
copyfile(join(snapshots, 'spec.sum'),
join(lib, '_internal', 'spec.sum'))
copyfile(join(snapshots, 'strong.sum'),
join(lib, '_internal', 'strong.sum'))
def CopyDevCompilerSdk(home, lib):
copyfile(join(home, 'pkg', 'dev_compiler', 'lib', 'sdk', 'ddc_sdk.sum'),
join(lib, '_internal', 'ddc_sdk.sum'))
copytree(join(home, 'pkg', 'dev_compiler', 'lib', 'js'),
join(lib, 'dev_compiler'))
copyfile(join(home, 'third_party', 'requirejs', 'require.js'),
join(lib, 'dev_compiler', 'amd', 'require.js'))
def Main():
# Pull in all of the gypi files which will be munged into the sdk.
HOME = dirname(dirname(realpath(__file__)))
(options, args) = GetOptions()
SDK = options.sdk_output_dir
SDK_tmp = '%s.tmp' % SDK
SNAPSHOT = options.snapshot_location
# TODO(dgrove) - deal with architectures that are not ia32.
if exists(SDK):
rmtree(SDK)
if exists(SDK_tmp):
rmtree(SDK_tmp)
os.makedirs(SDK_tmp)
# Create and populate sdk/bin.
BIN = join(SDK_tmp, 'bin')
os.makedirs(BIN)
os.makedirs(join(BIN, 'snapshots'))
# Copy the Dart VM binary and the Windows Dart VM link library
# into sdk/bin.
#
# TODO(dgrove) - deal with architectures that are not ia32.
build_dir = os.path.dirname(SDK)
dart_file_extension = ''
if HOST_OS == 'win32':
dart_file_extension = '.exe'
dart_import_lib_src = join(HOME, build_dir, 'dart.lib')
dart_import_lib_dest = join(BIN, 'dart.lib')
copyfile(dart_import_lib_src, dart_import_lib_dest)
dart_src_binary = join(HOME, build_dir, 'dart' + dart_file_extension)
dart_dest_binary = join(BIN, 'dart' + dart_file_extension)
copyfile(dart_src_binary, dart_dest_binary)
copymode(dart_src_binary, dart_dest_binary)
# Strip the binaries on platforms where that is supported.
if HOST_OS == 'linux' and not options.disable_stripping:
subprocess.call(['strip', dart_dest_binary])
elif HOST_OS == 'macos' and not options.disable_stripping:
subprocess.call(['strip', '-x', dart_dest_binary])
#
# Create and populate sdk/include.
#
INCLUDE = join(SDK_tmp, 'include')
os.makedirs(INCLUDE)
copyfile(join(HOME, 'runtime', 'include', 'dart_api.h'),
join(INCLUDE, 'dart_api.h'))
copyfile(join(HOME, 'runtime', 'include', 'dart_mirrors_api.h'),
join(INCLUDE, 'dart_mirrors_api.h'))
copyfile(join(HOME, 'runtime', 'include', 'dart_native_api.h'),
join(INCLUDE, 'dart_native_api.h'))
copyfile(join(HOME, 'runtime', 'include', 'dart_tools_api.h'),
join(INCLUDE, 'dart_tools_api.h'))
#
# Create and populate sdk/lib.
#
LIB = join(SDK_tmp, 'lib')
os.makedirs(LIB)
#
# Create and populate lib/{async, core, isolate, ...}.
#
os.makedirs(join(LIB, 'html'))
for library in [join('_blink', 'dartium'), | join('_chrome', 'dart2js'), join('_chrome', 'dartium'),
join('_internal', 'js_runtime'),
join('_internal', 'sdk_library_metadata'),
'async', 'collection', 'convert', 'core', 'developer',
'internal', 'io', 'isolate',
join('html', 'dart2js'), join('html', 'dartium'),
join('html', 'html_common'),
join('indexed_db', 'dart2js'), join('indexed_db', 'dartium'),
'js', 'js_util', 'math', 'mirrors', 'profiler', 'typed_data',
join('svg', 'dart2js'), join('svg', 'dartium'),
join('web_audio', 'dart2js'), join('web_audio', 'dartium'),
join('web_gl', 'dart2js'), join('web_gl', 'dartium'),
join('web_sql', 'dart2js'), join('web_sql', 'dartium')]:
copytree(join(HOME, 'sdk', 'lib', library), join(LIB, library),
ignore=ignore_patterns('*.svn', 'doc', '*.py', '*.gypi', '*.sh',
'.gitignore'))
# Copy the platform descriptors.
for file_name in ["dart_client.platform",
"dart_server.platform",
"dart_shared.platform"]:
copyfile(join(HOME, 'sdk', 'lib', file_name), join(LIB, file_name));
# Copy libraries.dart to lib/_internal/libraries.dart for backwards
# compatibility.
#
# TODO(sigmund): stop copying libraries.dart. Old versions (<=0.25.1-alpha.4)
# of the analyzer package do not support the new location of this file. We
# should be able to remove the old file once we release a newer version of
# analyzer and popular frameworks have migrated to use it.
copyfile(join(HOME, 'sdk', 'lib', '_internal',
'sdk_library_metadata', 'lib', 'libraries.dart'),
join(LIB, '_internal', 'libraries.dart'))
# Create and copy tools.
UTIL = join(SDK_tmp, 'util')
os.makedirs(UTIL)
RESOURCE = join(SDK_tmp, 'lib', '_internal', 'pub', 'asset')
os.makedirs(os.path.dirname(RESOURCE))
copytree(join(HOME, 'third_party', 'pkg', 'pub', 'lib', 'src',
'asset'),
join(RESOURCE),
ignore=ignore_patterns('.svn'))
# Copy in 7zip for Windows.
if HOST_OS == 'win32':
copytree(join(HOME, 'third_party', '7zip'),
join(RESOURCE, '7zip'),
ignore=ignore_patterns('.svn'))
# Copy dart2js/pub.
CopyDartScripts(HOME, SDK_tmp)
CopySnapshots(SNAPSHOT, SDK_tmp)
CopyDartdocResources(HOME, SDK_tmp)
CopyAnalyzerSources(HOME, LIB)
CopyAnalysisSummaries(SNAPSHOT, LIB)
CopyDevCompilerSdk(HOME, LIB)
if options.copy_libs:
CopyLibs(build_dir, BIN)
# Write the 'version' file
version = utils.GetVersion()
versionFile = open(os.path.join(SDK_tmp, 'version'), 'w')
versionFile.write(version + '\n')
versionFile.close()
# Write the 'revision' file
revision = utils.GetGitRevision()
if revision is not None:
with open(os.path.join(SDK_tmp, 'revision'), 'w') as f:
f.write('%s\n' % revision)
f.close()
Copy(join(HOME, 'README.dart-sdk'), join(SDK_tmp, 'README'))
Copy(join(HOME, 'LICENSE'), join(SDK_tmp, 'LICENSE'))
Copy(join(HOME, 'sdk', 'api_readme.md'), join(SDK_tmp, 'lib', 'api_readme.md'))
move(SDK_tmp, SDK)
if __name__ == '__main__':
sys.exit(Main()) | random_line_split | |
create_sdk.py | #!/usr/bin/env python
#
# Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
#
# A script which will be invoked from gyp to create an SDK.
#
# Usage: create_sdk.py sdk_directory
#
# The SDK will be used either from the command-line or from the editor.
# Top structure is
#
# ..dart-sdk/
# ....bin/
# ......dart or dart.exe (executable)
# ......dart.lib (import library for VM native extensions on Windows)
# ......dartdoc
# ......dartfmt
# ......dart2js
# ......dartanalyzer
# ......dartdevc
# ......pub
# ......snapshots/
# ........analysis_server.dart.snapshot
# ........dart2js.dart.snapshot
# ........dartanalyzer.dart.snapshot
# ........dartdoc.dart.snapshot
# ........dartfmt.dart.snapshot
# ........dartdevc.dart.snapshot
# ........pub.dart.snapshot
# ........utils_wrapper.dart.snapshot
#.........resources/
#...........dartdoc/
#..............packages
#.............resources/
#.............templates/
# ....include/
# ......dart_api.h
# ......dart_mirrors_api.h
# ......dart_native_api.h
# ......dart_tools_api.h
# ....lib/
# ......dart_client.platform
# ......dart_server.platform
# ......dart_shared.platform
# ......_internal/
#.........spec.sum
#.........strong.sum
#.........dev_compiler/
# ......analysis_server/
# ......analyzer/
# ......async/
# ......collection/
# ......convert/
# ......core/
# ......front_end/
# ......html/
# ......internal/
# ......io/
# ......isolate/
# ......js/
# ......js_util/
# ......kernel/
# ......math/
# ......mirrors/
# ......typed_data/
# ......api_readme.md
# ....util/
# ......(more will come here)
import optparse
import os
import re
import sys
import subprocess
import utils
HOST_OS = utils.GuessOS()
# TODO(dgrove): Only import modules following Google style guide.
from os.path import basename, dirname, join, realpath, exists
# TODO(dgrove): Only import modules following Google style guide.
from shutil import copyfile, copymode, copytree, ignore_patterns, rmtree, move
def GetOptions():
options = optparse.OptionParser(usage='usage: %prog [options]')
options.add_option("--sdk_output_dir",
help='Where to output the sdk')
options.add_option("--snapshot_location",
help='Location of the snapshots.')
options.add_option("--copy_libs",
action="store_true", default=False,
help='Copy dynamically linked libraries to the SDK bin directory.')
options.add_option("--disable_stripping",
action="store_true", default=False,
help='Do not try to strip binaries. Use when they are already stripped')
return options.parse_args()
def ReplaceInFiles(paths, subs):
"""Reads a series of files, applies a series of substitutions to each, and
saves them back out. subs should by a list of (pattern, replace) tuples."""
for path in paths:
contents = open(path).read()
for pattern, replace in subs:
contents = re.sub(pattern, replace, contents)
dest = open(path, 'w')
dest.write(contents)
dest.close()
def Copy(src, dest):
|
def CopyShellScript(src_file, dest_dir):
"""Copies a shell/batch script to the given destination directory. Handles
using the appropriate platform-specific file extension."""
file_extension = ''
if HOST_OS == 'win32':
file_extension = '.bat'
# If we're copying an SDK-specific shell script, strip off the suffix.
dest_file = basename(src_file)
if dest_file.endswith('_sdk'):
dest_file = dest_file.replace('_sdk', '')
src = src_file + file_extension
dest = join(dest_dir, dest_file + file_extension)
Copy(src, dest)
def CopyLibs(out_dir, bin_dir):
for library in ['libcrypto', 'libssl']:
ext = '.so'
if HOST_OS == 'macos':
ext = '.dylib'
elif HOST_OS == 'win32':
ext = '.dll'
src = os.path.join(out_dir, library + ext)
dst = os.path.join(bin_dir, library + ext)
if os.path.isfile(src):
copyfile(src, dst)
copymode(src, dst)
def CopyDartScripts(home, sdk_root):
for executable in ['dart2js_sdk', 'dartanalyzer_sdk', 'dartfmt_sdk',
'pub_sdk', 'dartdoc', 'dartdevc_sdk']:
CopyShellScript(os.path.join(home, 'sdk', 'bin', executable),
os.path.join(sdk_root, 'bin'))
def CopySnapshots(snapshots, sdk_root):
for snapshot in ['analysis_server', 'dart2js', 'dartanalyzer', 'dartfmt',
'utils_wrapper', 'pub', 'dartdoc', 'dartdevc']:
snapshot += '.dart.snapshot'
copyfile(join(snapshots, snapshot),
join(sdk_root, 'bin', 'snapshots', snapshot))
def CopyAnalyzerSources(home, lib_dir):
for library in ['analyzer', 'analysis_server', 'front_end', 'kernel']:
copytree(join(home, 'pkg', library), join(lib_dir, library),
ignore=ignore_patterns('*.svn', 'doc', '*.py', '*.gypi', '*.sh',
'.gitignore', 'packages'))
def CopyDartdocResources(home, sdk_root):
RESOURCE_DIR = join(sdk_root, 'bin', 'snapshots', 'resources')
DARTDOC = join(RESOURCE_DIR, 'dartdoc')
copytree(join(home, 'third_party', 'pkg', 'dartdoc', 'lib', 'templates'),
join(DARTDOC, 'templates'))
copytree(join(home, 'third_party', 'pkg', 'dartdoc', 'lib', 'resources'),
join(DARTDOC, 'resources'))
# write the .packages file
PACKAGES_FILE = join(DARTDOC, '.packages')
packages_file = open(PACKAGES_FILE, 'w')
packages_file.write('dartdoc:.')
packages_file.close()
def CopyAnalysisSummaries(snapshots, lib):
copyfile(join(snapshots, 'spec.sum'),
join(lib, '_internal', 'spec.sum'))
copyfile(join(snapshots, 'strong.sum'),
join(lib, '_internal', 'strong.sum'))
def CopyDevCompilerSdk(home, lib):
copyfile(join(home, 'pkg', 'dev_compiler', 'lib', 'sdk', 'ddc_sdk.sum'),
join(lib, '_internal', 'ddc_sdk.sum'))
copytree(join(home, 'pkg', 'dev_compiler', 'lib', 'js'),
join(lib, 'dev_compiler'))
copyfile(join(home, 'third_party', 'requirejs', 'require.js'),
join(lib, 'dev_compiler', 'amd', 'require.js'))
def Main():
# Pull in all of the gypi files which will be munged into the sdk.
HOME = dirname(dirname(realpath(__file__)))
(options, args) = GetOptions()
SDK = options.sdk_output_dir
SDK_tmp = '%s.tmp' % SDK
SNAPSHOT = options.snapshot_location
# TODO(dgrove) - deal with architectures that are not ia32.
if exists(SDK):
rmtree(SDK)
if exists(SDK_tmp):
rmtree(SDK_tmp)
os.makedirs(SDK_tmp)
# Create and populate sdk/bin.
BIN = join(SDK_tmp, 'bin')
os.makedirs(BIN)
os.makedirs(join(BIN, 'snapshots'))
# Copy the Dart VM binary and the Windows Dart VM link library
# into sdk/bin.
#
# TODO(dgrove) - deal with architectures that are not ia32.
build_dir = os.path.dirname(SDK)
dart_file_extension = ''
if HOST_OS == 'win32':
dart_file_extension = '.exe'
dart_import_lib_src = join(HOME, build_dir, 'dart.lib')
dart_import_lib_dest = join(BIN, 'dart.lib')
copyfile(dart_import_lib_src, dart_import_lib_dest)
dart_src_binary = join(HOME, build_dir, 'dart' + dart_file_extension)
dart_dest_binary = join(BIN, 'dart' + dart_file_extension)
copyfile(dart_src_binary, dart_dest_binary)
copymode(dart_src_binary, dart_dest_binary)
# Strip the binaries on platforms where that is supported.
if HOST_OS == 'linux' and not options.disable_stripping:
subprocess.call(['strip', dart_dest_binary])
elif HOST_OS == 'macos' and not options.disable_stripping:
subprocess.call(['strip', '-x', dart_dest_binary])
#
# Create and populate sdk/include.
#
INCLUDE = join(SDK_tmp, 'include')
os.makedirs(INCLUDE)
copyfile(join(HOME, 'runtime', 'include', 'dart_api.h'),
join(INCLUDE, 'dart_api.h'))
copyfile(join(HOME, 'runtime', 'include', 'dart_mirrors_api.h'),
join(INCLUDE, 'dart_mirrors_api.h'))
copyfile(join(HOME, 'runtime', 'include', 'dart_native_api.h'),
join(INCLUDE, 'dart_native_api.h'))
copyfile(join(HOME, 'runtime', 'include', 'dart_tools_api.h'),
join(INCLUDE, 'dart_tools_api.h'))
#
# Create and populate sdk/lib.
#
LIB = join(SDK_tmp, 'lib')
os.makedirs(LIB)
#
# Create and populate lib/{async, core, isolate, ...}.
#
os.makedirs(join(LIB, 'html'))
for library in [join('_blink', 'dartium'),
join('_chrome', 'dart2js'), join('_chrome', 'dartium'),
join('_internal', 'js_runtime'),
join('_internal', 'sdk_library_metadata'),
'async', 'collection', 'convert', 'core', 'developer',
'internal', 'io', 'isolate',
join('html', 'dart2js'), join('html', 'dartium'),
join('html', 'html_common'),
join('indexed_db', 'dart2js'), join('indexed_db', 'dartium'),
'js', 'js_util', 'math', 'mirrors', 'profiler', 'typed_data',
join('svg', 'dart2js'), join('svg', 'dartium'),
join('web_audio', 'dart2js'), join('web_audio', 'dartium'),
join('web_gl', 'dart2js'), join('web_gl', 'dartium'),
join('web_sql', 'dart2js'), join('web_sql', 'dartium')]:
copytree(join(HOME, 'sdk', 'lib', library), join(LIB, library),
ignore=ignore_patterns('*.svn', 'doc', '*.py', '*.gypi', '*.sh',
'.gitignore'))
# Copy the platform descriptors.
for file_name in ["dart_client.platform",
"dart_server.platform",
"dart_shared.platform"]:
copyfile(join(HOME, 'sdk', 'lib', file_name), join(LIB, file_name));
# Copy libraries.dart to lib/_internal/libraries.dart for backwards
# compatibility.
#
# TODO(sigmund): stop copying libraries.dart. Old versions (<=0.25.1-alpha.4)
# of the analyzer package do not support the new location of this file. We
# should be able to remove the old file once we release a newer version of
# analyzer and popular frameworks have migrated to use it.
copyfile(join(HOME, 'sdk', 'lib', '_internal',
'sdk_library_metadata', 'lib', 'libraries.dart'),
join(LIB, '_internal', 'libraries.dart'))
# Create and copy tools.
UTIL = join(SDK_tmp, 'util')
os.makedirs(UTIL)
RESOURCE = join(SDK_tmp, 'lib', '_internal', 'pub', 'asset')
os.makedirs(os.path.dirname(RESOURCE))
copytree(join(HOME, 'third_party', 'pkg', 'pub', 'lib', 'src',
'asset'),
join(RESOURCE),
ignore=ignore_patterns('.svn'))
# Copy in 7zip for Windows.
if HOST_OS == 'win32':
copytree(join(HOME, 'third_party', '7zip'),
join(RESOURCE, '7zip'),
ignore=ignore_patterns('.svn'))
# Copy dart2js/pub.
CopyDartScripts(HOME, SDK_tmp)
CopySnapshots(SNAPSHOT, SDK_tmp)
CopyDartdocResources(HOME, SDK_tmp)
CopyAnalyzerSources(HOME, LIB)
CopyAnalysisSummaries(SNAPSHOT, LIB)
CopyDevCompilerSdk(HOME, LIB)
if options.copy_libs:
CopyLibs(build_dir, BIN)
# Write the 'version' file
version = utils.GetVersion()
versionFile = open(os.path.join(SDK_tmp, 'version'), 'w')
versionFile.write(version + '\n')
versionFile.close()
# Write the 'revision' file
revision = utils.GetGitRevision()
if revision is not None:
with open(os.path.join(SDK_tmp, 'revision'), 'w') as f:
f.write('%s\n' % revision)
f.close()
Copy(join(HOME, 'README.dart-sdk'), join(SDK_tmp, 'README'))
Copy(join(HOME, 'LICENSE'), join(SDK_tmp, 'LICENSE'))
Copy(join(HOME, 'sdk', 'api_readme.md'), join(SDK_tmp, 'lib', 'api_readme.md'))
move(SDK_tmp, SDK)
if __name__ == '__main__':
sys.exit(Main())
| copyfile(src, dest)
copymode(src, dest) | identifier_body |
correctness_proof.rs | //! The proof of correct encryption of the given value.
//! For more details see section 5.2 of the whitepaper.
use super::errors::{ErrorKind, Fallible};
use crate::asset_proofs::{
encryption_proofs::{
AssetProofProver, AssetProofProverAwaitingChallenge, AssetProofVerifier, ZKPChallenge,
ZKProofResponse,
},
transcript::{TranscriptProtocol, UpdateTranscript},
CipherText, CommitmentWitness, ElgamalPublicKey,
};
use bulletproofs::PedersenGens;
use curve25519_dalek::{
constants::RISTRETTO_BASEPOINT_POINT,
ristretto::{CompressedRistretto, RistrettoPoint},
scalar::Scalar,
};
use merlin::{Transcript, TranscriptRng};
use rand_core::{CryptoRng, RngCore};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use zeroize::Zeroize;
use codec::{Decode, Encode, Error as CodecError, Input, Output};
use sp_std::convert::From;
/// The domain label for the correctness proof.
pub const CORRECTNESS_PROOF_FINAL_RESPONSE_LABEL: &[u8] = b"PolymathCorrectnessFinalResponse";
/// The domain label for the challenge.
pub const CORRECTNESS_PROOF_CHALLENGE_LABEL: &[u8] = b"PolymathCorrectnessChallenge";
// ------------------------------------------------------------------------
// Proof of Correct Encryption of the Given Value
// ------------------------------------------------------------------------
#[derive(PartialEq, Copy, Clone, Debug, Default)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct CorrectnessFinalResponse(Scalar);
impl From<Scalar> for CorrectnessFinalResponse {
fn from(response: Scalar) -> Self {
CorrectnessFinalResponse(response)
}
}
impl Encode for CorrectnessFinalResponse {
fn size_hint(&self) -> usize {
32
}
fn encode_to<W: Output>(&self, dest: &mut W) {
self.0.as_bytes().encode_to(dest)
}
}
impl Decode for CorrectnessFinalResponse {
fn decode<I: Input>(input: &mut I) -> Result<Self, CodecError> |
}
#[derive(PartialEq, Copy, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct CorrectnessInitialMessage {
a: RistrettoPoint,
b: RistrettoPoint,
}
impl Encode for CorrectnessInitialMessage {
fn size_hint(&self) -> usize {
64
}
fn encode_to<W: Output>(&self, dest: &mut W) {
let a = self.a.compress();
let b = self.b.compress();
a.as_bytes().encode_to(dest);
b.as_bytes().encode_to(dest);
}
}
impl Decode for CorrectnessInitialMessage {
fn decode<I: Input>(input: &mut I) -> Result<Self, CodecError> {
let (a, b) = <([u8; 32], [u8; 32])>::decode(input)?;
let a = CompressedRistretto(a)
.decompress()
.ok_or_else(|| CodecError::from("CorrectnessInitialMessage 'a' point is invalid"))?;
let b = CompressedRistretto(b)
.decompress()
.ok_or_else(|| CodecError::from("CorrectnessInitialMessage 'b' point is invalid"))?;
Ok(CorrectnessInitialMessage { a, b })
}
}
/// A default implementation used for testing.
impl Default for CorrectnessInitialMessage {
fn default() -> Self {
CorrectnessInitialMessage {
a: RISTRETTO_BASEPOINT_POINT,
b: RISTRETTO_BASEPOINT_POINT,
}
}
}
impl UpdateTranscript for CorrectnessInitialMessage {
fn update_transcript(&self, transcript: &mut Transcript) -> Fallible<()> {
transcript.append_domain_separator(CORRECTNESS_PROOF_CHALLENGE_LABEL);
transcript.append_validated_point(b"A", &self.a.compress())?;
transcript.append_validated_point(b"B", &self.b.compress())?;
Ok(())
}
}
/// Holds the non-interactive proofs of correctness, equivalent of L_correct of MERCAT paper.
pub type CorrectnessProof = ZKProofResponse<CorrectnessInitialMessage, CorrectnessFinalResponse>;
pub struct CorrectnessProverAwaitingChallenge<'a> {
/// The public key used for the elgamal encryption.
pub pub_key: ElgamalPublicKey,
/// The secret commitment witness.
pub w: CommitmentWitness,
/// Pedersen Generators
pub pc_gens: &'a PedersenGens,
}
#[derive(Zeroize)]
#[zeroize(drop)]
pub struct CorrectnessProver {
/// The secret commitment witness.
w: CommitmentWitness,
/// The randomness generate in the first round.
u: Scalar,
}
impl<'a> AssetProofProverAwaitingChallenge for CorrectnessProverAwaitingChallenge<'a> {
type ZKInitialMessage = CorrectnessInitialMessage;
type ZKFinalResponse = CorrectnessFinalResponse;
type ZKProver = CorrectnessProver;
fn create_transcript_rng<T: RngCore + CryptoRng>(
&self,
rng: &mut T,
transcript: &Transcript,
) -> TranscriptRng {
transcript.create_transcript_rng_from_witness(rng, &self.w)
}
fn generate_initial_message(
&self,
rng: &mut TranscriptRng,
) -> (Self::ZKProver, Self::ZKInitialMessage) {
let rand_commitment = Scalar::random(rng);
(
CorrectnessProver {
w: self.w.clone(),
u: rand_commitment,
},
CorrectnessInitialMessage {
a: rand_commitment * self.pub_key.pub_key,
b: rand_commitment * self.pc_gens.B_blinding,
},
)
}
}
impl AssetProofProver<CorrectnessFinalResponse> for CorrectnessProver {
fn apply_challenge(&self, c: &ZKPChallenge) -> CorrectnessFinalResponse {
CorrectnessFinalResponse(self.u + c.x() * self.w.blinding())
}
}
pub struct CorrectnessVerifier<'a> {
/// The encrypted value (aka the plain text).
pub value: Scalar,
/// The public key to which the `value` is encrypted.
pub pub_key: ElgamalPublicKey,
/// The encryption cipher text.
pub cipher: CipherText,
/// The Generator Points
pub pc_gens: &'a PedersenGens,
}
impl<'a> AssetProofVerifier for CorrectnessVerifier<'a> {
type ZKInitialMessage = CorrectnessInitialMessage;
type ZKFinalResponse = CorrectnessFinalResponse;
fn verify(
&self,
challenge: &ZKPChallenge,
initial_message: &Self::ZKInitialMessage,
z: &Self::ZKFinalResponse,
) -> Fallible<()> {
let generators = self.pc_gens;
let y_prime = self.cipher.y - (self.value * generators.B);
ensure!(
z.0 * self.pub_key.pub_key == initial_message.a + challenge.x() * self.cipher.x,
ErrorKind::CorrectnessFinalResponseVerificationError { check: 1 }
);
ensure!(
z.0 * generators.B_blinding == initial_message.b + challenge.x() * y_prime,
ErrorKind::CorrectnessFinalResponseVerificationError { check: 2 }
);
Ok(())
}
}
// ------------------------------------------------------------------------
// Tests
// ------------------------------------------------------------------------
#[cfg(test)]
mod tests {
extern crate wasm_bindgen_test;
use super::*;
use crate::asset_proofs::*;
use rand::{rngs::StdRng, SeedableRng};
use wasm_bindgen_test::*;
const SEED_1: [u8; 32] = [17u8; 32];
#[test]
#[wasm_bindgen_test]
fn test_correctness_proof() {
let gens = PedersenGens::default();
let mut rng = StdRng::from_seed(SEED_1);
let secret_value = 13u32;
let elg_secret = ElgamalSecretKey::new(Scalar::random(&mut rng));
let elg_pub = elg_secret.get_public_key();
let (w, cipher) = elg_pub.encrypt_value(secret_value.into(), &mut rng);
let prover = CorrectnessProverAwaitingChallenge {
pub_key: elg_pub,
w,
pc_gens: &gens,
};
let verifier = CorrectnessVerifier {
value: Scalar::from(secret_value),
pub_key: elg_pub,
cipher,
pc_gens: &gens,
};
let mut transcript = Transcript::new(CORRECTNESS_PROOF_FINAL_RESPONSE_LABEL);
// Positive tests
let mut transcript_rng = prover.create_transcript_rng(&mut rng, &transcript);
let (prover, initial_message) = prover.generate_initial_message(&mut transcript_rng);
initial_message.update_transcript(&mut transcript).unwrap();
let challenge = transcript
.scalar_challenge(CORRECTNESS_PROOF_CHALLENGE_LABEL)
.unwrap();
let final_response = prover.apply_challenge(&challenge);
let result = verifier.verify(&challenge, &initial_message, &final_response);
assert!(result.is_ok());
// Negative tests
let bad_initial_message = CorrectnessInitialMessage::default();
let result = verifier.verify(&challenge, &bad_initial_message, &final_response);
assert_err!(
result,
ErrorKind::CorrectnessFinalResponseVerificationError { check: 1 }
);
let bad_final_response = CorrectnessFinalResponse(Scalar::default());
let result = verifier.verify(&challenge, &initial_message, &bad_final_response);
assert_err!(
result,
ErrorKind::CorrectnessFinalResponseVerificationError { check: 1 }
);
}
#[test]
#[wasm_bindgen_test]
fn serialize_deserialize_proof() {
let mut rng = StdRng::from_seed(SEED_1);
let secret_value = 42u32;
let secret_key = ElgamalSecretKey::new(Scalar::random(&mut rng));
let pub_key = secret_key.get_public_key();
let rand_blind = Scalar::random(&mut rng);
let w = CommitmentWitness::new(secret_value.into(), rand_blind);
let gens = PedersenGens::default();
let prover = CorrectnessProverAwaitingChallenge {
pub_key,
w,
pc_gens: &gens,
};
let (initial_message, final_response) = encryption_proofs::single_property_prover::<
StdRng,
CorrectnessProverAwaitingChallenge,
>(prover, &mut rng)
.unwrap();
let bytes = initial_message.encode();
let mut input = bytes.as_slice();
let recovered_initial_message = <CorrectnessInitialMessage>::decode(&mut input).unwrap();
assert_eq!(recovered_initial_message, initial_message);
let bytes = final_response.encode();
let mut input = bytes.as_slice();
let recovered_final_response = <CorrectnessFinalResponse>::decode(&mut input).unwrap();
assert_eq!(recovered_final_response, final_response);
}
}
| {
let scalar = <[u8; 32]>::decode(input)?;
let scalar = Scalar::from_canonical_bytes(scalar)
.ok_or_else(|| CodecError::from("CorrectnessFinalResponse is invalid"))?;
Ok(CorrectnessFinalResponse(scalar))
} | identifier_body |
correctness_proof.rs | //! The proof of correct encryption of the given value.
//! For more details see section 5.2 of the whitepaper.
use super::errors::{ErrorKind, Fallible};
use crate::asset_proofs::{
encryption_proofs::{
AssetProofProver, AssetProofProverAwaitingChallenge, AssetProofVerifier, ZKPChallenge,
ZKProofResponse,
},
transcript::{TranscriptProtocol, UpdateTranscript},
CipherText, CommitmentWitness, ElgamalPublicKey,
};
use bulletproofs::PedersenGens;
use curve25519_dalek::{
constants::RISTRETTO_BASEPOINT_POINT,
ristretto::{CompressedRistretto, RistrettoPoint},
scalar::Scalar,
};
use merlin::{Transcript, TranscriptRng};
use rand_core::{CryptoRng, RngCore};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use zeroize::Zeroize;
use codec::{Decode, Encode, Error as CodecError, Input, Output};
use sp_std::convert::From;
/// The domain label for the correctness proof.
pub const CORRECTNESS_PROOF_FINAL_RESPONSE_LABEL: &[u8] = b"PolymathCorrectnessFinalResponse";
/// The domain label for the challenge.
pub const CORRECTNESS_PROOF_CHALLENGE_LABEL: &[u8] = b"PolymathCorrectnessChallenge";
// ------------------------------------------------------------------------
// Proof of Correct Encryption of the Given Value
// ------------------------------------------------------------------------
#[derive(PartialEq, Copy, Clone, Debug, Default)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct CorrectnessFinalResponse(Scalar);
impl From<Scalar> for CorrectnessFinalResponse {
fn from(response: Scalar) -> Self {
CorrectnessFinalResponse(response)
}
}
impl Encode for CorrectnessFinalResponse {
fn size_hint(&self) -> usize {
32
}
fn encode_to<W: Output>(&self, dest: &mut W) {
self.0.as_bytes().encode_to(dest)
}
}
impl Decode for CorrectnessFinalResponse {
fn decode<I: Input>(input: &mut I) -> Result<Self, CodecError> {
let scalar = <[u8; 32]>::decode(input)?;
let scalar = Scalar::from_canonical_bytes(scalar)
.ok_or_else(|| CodecError::from("CorrectnessFinalResponse is invalid"))?;
Ok(CorrectnessFinalResponse(scalar))
}
}
#[derive(PartialEq, Copy, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct CorrectnessInitialMessage {
a: RistrettoPoint,
b: RistrettoPoint,
}
impl Encode for CorrectnessInitialMessage {
fn size_hint(&self) -> usize {
64
}
fn encode_to<W: Output>(&self, dest: &mut W) {
let a = self.a.compress();
let b = self.b.compress();
a.as_bytes().encode_to(dest);
b.as_bytes().encode_to(dest);
}
}
impl Decode for CorrectnessInitialMessage {
fn | <I: Input>(input: &mut I) -> Result<Self, CodecError> {
let (a, b) = <([u8; 32], [u8; 32])>::decode(input)?;
let a = CompressedRistretto(a)
.decompress()
.ok_or_else(|| CodecError::from("CorrectnessInitialMessage 'a' point is invalid"))?;
let b = CompressedRistretto(b)
.decompress()
.ok_or_else(|| CodecError::from("CorrectnessInitialMessage 'b' point is invalid"))?;
Ok(CorrectnessInitialMessage { a, b })
}
}
/// A default implementation used for testing.
impl Default for CorrectnessInitialMessage {
fn default() -> Self {
CorrectnessInitialMessage {
a: RISTRETTO_BASEPOINT_POINT,
b: RISTRETTO_BASEPOINT_POINT,
}
}
}
impl UpdateTranscript for CorrectnessInitialMessage {
fn update_transcript(&self, transcript: &mut Transcript) -> Fallible<()> {
transcript.append_domain_separator(CORRECTNESS_PROOF_CHALLENGE_LABEL);
transcript.append_validated_point(b"A", &self.a.compress())?;
transcript.append_validated_point(b"B", &self.b.compress())?;
Ok(())
}
}
/// Holds the non-interactive proofs of correctness, equivalent of L_correct of MERCAT paper.
pub type CorrectnessProof = ZKProofResponse<CorrectnessInitialMessage, CorrectnessFinalResponse>;
pub struct CorrectnessProverAwaitingChallenge<'a> {
/// The public key used for the elgamal encryption.
pub pub_key: ElgamalPublicKey,
/// The secret commitment witness.
pub w: CommitmentWitness,
/// Pedersen Generators
pub pc_gens: &'a PedersenGens,
}
#[derive(Zeroize)]
#[zeroize(drop)]
pub struct CorrectnessProver {
/// The secret commitment witness.
w: CommitmentWitness,
/// The randomness generate in the first round.
u: Scalar,
}
impl<'a> AssetProofProverAwaitingChallenge for CorrectnessProverAwaitingChallenge<'a> {
type ZKInitialMessage = CorrectnessInitialMessage;
type ZKFinalResponse = CorrectnessFinalResponse;
type ZKProver = CorrectnessProver;
fn create_transcript_rng<T: RngCore + CryptoRng>(
&self,
rng: &mut T,
transcript: &Transcript,
) -> TranscriptRng {
transcript.create_transcript_rng_from_witness(rng, &self.w)
}
fn generate_initial_message(
&self,
rng: &mut TranscriptRng,
) -> (Self::ZKProver, Self::ZKInitialMessage) {
let rand_commitment = Scalar::random(rng);
(
CorrectnessProver {
w: self.w.clone(),
u: rand_commitment,
},
CorrectnessInitialMessage {
a: rand_commitment * self.pub_key.pub_key,
b: rand_commitment * self.pc_gens.B_blinding,
},
)
}
}
impl AssetProofProver<CorrectnessFinalResponse> for CorrectnessProver {
fn apply_challenge(&self, c: &ZKPChallenge) -> CorrectnessFinalResponse {
CorrectnessFinalResponse(self.u + c.x() * self.w.blinding())
}
}
pub struct CorrectnessVerifier<'a> {
/// The encrypted value (aka the plain text).
pub value: Scalar,
/// The public key to which the `value` is encrypted.
pub pub_key: ElgamalPublicKey,
/// The encryption cipher text.
pub cipher: CipherText,
/// The Generator Points
pub pc_gens: &'a PedersenGens,
}
impl<'a> AssetProofVerifier for CorrectnessVerifier<'a> {
type ZKInitialMessage = CorrectnessInitialMessage;
type ZKFinalResponse = CorrectnessFinalResponse;
fn verify(
&self,
challenge: &ZKPChallenge,
initial_message: &Self::ZKInitialMessage,
z: &Self::ZKFinalResponse,
) -> Fallible<()> {
let generators = self.pc_gens;
let y_prime = self.cipher.y - (self.value * generators.B);
ensure!(
z.0 * self.pub_key.pub_key == initial_message.a + challenge.x() * self.cipher.x,
ErrorKind::CorrectnessFinalResponseVerificationError { check: 1 }
);
ensure!(
z.0 * generators.B_blinding == initial_message.b + challenge.x() * y_prime,
ErrorKind::CorrectnessFinalResponseVerificationError { check: 2 }
);
Ok(())
}
}
// ------------------------------------------------------------------------
// Tests
// ------------------------------------------------------------------------
#[cfg(test)]
mod tests {
extern crate wasm_bindgen_test;
use super::*;
use crate::asset_proofs::*;
use rand::{rngs::StdRng, SeedableRng};
use wasm_bindgen_test::*;
const SEED_1: [u8; 32] = [17u8; 32];
#[test]
#[wasm_bindgen_test]
fn test_correctness_proof() {
let gens = PedersenGens::default();
let mut rng = StdRng::from_seed(SEED_1);
let secret_value = 13u32;
let elg_secret = ElgamalSecretKey::new(Scalar::random(&mut rng));
let elg_pub = elg_secret.get_public_key();
let (w, cipher) = elg_pub.encrypt_value(secret_value.into(), &mut rng);
let prover = CorrectnessProverAwaitingChallenge {
pub_key: elg_pub,
w,
pc_gens: &gens,
};
let verifier = CorrectnessVerifier {
value: Scalar::from(secret_value),
pub_key: elg_pub,
cipher,
pc_gens: &gens,
};
let mut transcript = Transcript::new(CORRECTNESS_PROOF_FINAL_RESPONSE_LABEL);
// Positive tests
let mut transcript_rng = prover.create_transcript_rng(&mut rng, &transcript);
let (prover, initial_message) = prover.generate_initial_message(&mut transcript_rng);
initial_message.update_transcript(&mut transcript).unwrap();
let challenge = transcript
.scalar_challenge(CORRECTNESS_PROOF_CHALLENGE_LABEL)
.unwrap();
let final_response = prover.apply_challenge(&challenge);
let result = verifier.verify(&challenge, &initial_message, &final_response);
assert!(result.is_ok());
// Negative tests
let bad_initial_message = CorrectnessInitialMessage::default();
let result = verifier.verify(&challenge, &bad_initial_message, &final_response);
assert_err!(
result,
ErrorKind::CorrectnessFinalResponseVerificationError { check: 1 }
);
let bad_final_response = CorrectnessFinalResponse(Scalar::default());
let result = verifier.verify(&challenge, &initial_message, &bad_final_response);
assert_err!(
result,
ErrorKind::CorrectnessFinalResponseVerificationError { check: 1 }
);
}
#[test]
#[wasm_bindgen_test]
fn serialize_deserialize_proof() {
let mut rng = StdRng::from_seed(SEED_1);
let secret_value = 42u32;
let secret_key = ElgamalSecretKey::new(Scalar::random(&mut rng));
let pub_key = secret_key.get_public_key();
let rand_blind = Scalar::random(&mut rng);
let w = CommitmentWitness::new(secret_value.into(), rand_blind);
let gens = PedersenGens::default();
let prover = CorrectnessProverAwaitingChallenge {
pub_key,
w,
pc_gens: &gens,
};
let (initial_message, final_response) = encryption_proofs::single_property_prover::<
StdRng,
CorrectnessProverAwaitingChallenge,
>(prover, &mut rng)
.unwrap();
let bytes = initial_message.encode();
let mut input = bytes.as_slice();
let recovered_initial_message = <CorrectnessInitialMessage>::decode(&mut input).unwrap();
assert_eq!(recovered_initial_message, initial_message);
let bytes = final_response.encode();
let mut input = bytes.as_slice();
let recovered_final_response = <CorrectnessFinalResponse>::decode(&mut input).unwrap();
assert_eq!(recovered_final_response, final_response);
}
}
| decode | identifier_name |
correctness_proof.rs | //! The proof of correct encryption of the given value.
//! For more details see section 5.2 of the whitepaper.
use super::errors::{ErrorKind, Fallible};
use crate::asset_proofs::{
encryption_proofs::{
AssetProofProver, AssetProofProverAwaitingChallenge, AssetProofVerifier, ZKPChallenge,
ZKProofResponse,
},
transcript::{TranscriptProtocol, UpdateTranscript},
CipherText, CommitmentWitness, ElgamalPublicKey,
};
use bulletproofs::PedersenGens;
use curve25519_dalek::{
constants::RISTRETTO_BASEPOINT_POINT,
ristretto::{CompressedRistretto, RistrettoPoint},
scalar::Scalar,
};
use merlin::{Transcript, TranscriptRng};
use rand_core::{CryptoRng, RngCore};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use zeroize::Zeroize;
use codec::{Decode, Encode, Error as CodecError, Input, Output};
use sp_std::convert::From;
/// The domain label for the correctness proof.
pub const CORRECTNESS_PROOF_FINAL_RESPONSE_LABEL: &[u8] = b"PolymathCorrectnessFinalResponse";
/// The domain label for the challenge.
pub const CORRECTNESS_PROOF_CHALLENGE_LABEL: &[u8] = b"PolymathCorrectnessChallenge";
// ------------------------------------------------------------------------
// Proof of Correct Encryption of the Given Value
// ------------------------------------------------------------------------
#[derive(PartialEq, Copy, Clone, Debug, Default)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct CorrectnessFinalResponse(Scalar);
impl From<Scalar> for CorrectnessFinalResponse {
fn from(response: Scalar) -> Self {
CorrectnessFinalResponse(response)
}
}
impl Encode for CorrectnessFinalResponse {
fn size_hint(&self) -> usize {
32
}
fn encode_to<W: Output>(&self, dest: &mut W) {
self.0.as_bytes().encode_to(dest)
}
}
impl Decode for CorrectnessFinalResponse {
fn decode<I: Input>(input: &mut I) -> Result<Self, CodecError> {
let scalar = <[u8; 32]>::decode(input)?;
let scalar = Scalar::from_canonical_bytes(scalar)
.ok_or_else(|| CodecError::from("CorrectnessFinalResponse is invalid"))?;
Ok(CorrectnessFinalResponse(scalar))
}
}
#[derive(PartialEq, Copy, Clone, Debug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct CorrectnessInitialMessage {
a: RistrettoPoint,
b: RistrettoPoint,
}
impl Encode for CorrectnessInitialMessage {
fn size_hint(&self) -> usize {
64
}
fn encode_to<W: Output>(&self, dest: &mut W) {
let a = self.a.compress();
let b = self.b.compress();
a.as_bytes().encode_to(dest);
b.as_bytes().encode_to(dest);
}
}
impl Decode for CorrectnessInitialMessage {
fn decode<I: Input>(input: &mut I) -> Result<Self, CodecError> {
let (a, b) = <([u8; 32], [u8; 32])>::decode(input)?;
let a = CompressedRistretto(a)
.decompress()
.ok_or_else(|| CodecError::from("CorrectnessInitialMessage 'a' point is invalid"))?;
let b = CompressedRistretto(b)
.decompress()
.ok_or_else(|| CodecError::from("CorrectnessInitialMessage 'b' point is invalid"))?;
Ok(CorrectnessInitialMessage { a, b })
}
}
| /// A default implementation used for testing.
impl Default for CorrectnessInitialMessage {
fn default() -> Self {
CorrectnessInitialMessage {
a: RISTRETTO_BASEPOINT_POINT,
b: RISTRETTO_BASEPOINT_POINT,
}
}
}
impl UpdateTranscript for CorrectnessInitialMessage {
fn update_transcript(&self, transcript: &mut Transcript) -> Fallible<()> {
transcript.append_domain_separator(CORRECTNESS_PROOF_CHALLENGE_LABEL);
transcript.append_validated_point(b"A", &self.a.compress())?;
transcript.append_validated_point(b"B", &self.b.compress())?;
Ok(())
}
}
/// Holds the non-interactive proofs of correctness, equivalent of L_correct of MERCAT paper.
pub type CorrectnessProof = ZKProofResponse<CorrectnessInitialMessage, CorrectnessFinalResponse>;
pub struct CorrectnessProverAwaitingChallenge<'a> {
/// The public key used for the elgamal encryption.
pub pub_key: ElgamalPublicKey,
/// The secret commitment witness.
pub w: CommitmentWitness,
/// Pedersen Generators
pub pc_gens: &'a PedersenGens,
}
#[derive(Zeroize)]
#[zeroize(drop)]
pub struct CorrectnessProver {
/// The secret commitment witness.
w: CommitmentWitness,
/// The randomness generate in the first round.
u: Scalar,
}
impl<'a> AssetProofProverAwaitingChallenge for CorrectnessProverAwaitingChallenge<'a> {
type ZKInitialMessage = CorrectnessInitialMessage;
type ZKFinalResponse = CorrectnessFinalResponse;
type ZKProver = CorrectnessProver;
fn create_transcript_rng<T: RngCore + CryptoRng>(
&self,
rng: &mut T,
transcript: &Transcript,
) -> TranscriptRng {
transcript.create_transcript_rng_from_witness(rng, &self.w)
}
fn generate_initial_message(
&self,
rng: &mut TranscriptRng,
) -> (Self::ZKProver, Self::ZKInitialMessage) {
let rand_commitment = Scalar::random(rng);
(
CorrectnessProver {
w: self.w.clone(),
u: rand_commitment,
},
CorrectnessInitialMessage {
a: rand_commitment * self.pub_key.pub_key,
b: rand_commitment * self.pc_gens.B_blinding,
},
)
}
}
impl AssetProofProver<CorrectnessFinalResponse> for CorrectnessProver {
fn apply_challenge(&self, c: &ZKPChallenge) -> CorrectnessFinalResponse {
CorrectnessFinalResponse(self.u + c.x() * self.w.blinding())
}
}
pub struct CorrectnessVerifier<'a> {
/// The encrypted value (aka the plain text).
pub value: Scalar,
/// The public key to which the `value` is encrypted.
pub pub_key: ElgamalPublicKey,
/// The encryption cipher text.
pub cipher: CipherText,
/// The Generator Points
pub pc_gens: &'a PedersenGens,
}
impl<'a> AssetProofVerifier for CorrectnessVerifier<'a> {
type ZKInitialMessage = CorrectnessInitialMessage;
type ZKFinalResponse = CorrectnessFinalResponse;
fn verify(
&self,
challenge: &ZKPChallenge,
initial_message: &Self::ZKInitialMessage,
z: &Self::ZKFinalResponse,
) -> Fallible<()> {
let generators = self.pc_gens;
let y_prime = self.cipher.y - (self.value * generators.B);
ensure!(
z.0 * self.pub_key.pub_key == initial_message.a + challenge.x() * self.cipher.x,
ErrorKind::CorrectnessFinalResponseVerificationError { check: 1 }
);
ensure!(
z.0 * generators.B_blinding == initial_message.b + challenge.x() * y_prime,
ErrorKind::CorrectnessFinalResponseVerificationError { check: 2 }
);
Ok(())
}
}
// ------------------------------------------------------------------------
// Tests
// ------------------------------------------------------------------------
#[cfg(test)]
mod tests {
extern crate wasm_bindgen_test;
use super::*;
use crate::asset_proofs::*;
use rand::{rngs::StdRng, SeedableRng};
use wasm_bindgen_test::*;
const SEED_1: [u8; 32] = [17u8; 32];
#[test]
#[wasm_bindgen_test]
fn test_correctness_proof() {
let gens = PedersenGens::default();
let mut rng = StdRng::from_seed(SEED_1);
let secret_value = 13u32;
let elg_secret = ElgamalSecretKey::new(Scalar::random(&mut rng));
let elg_pub = elg_secret.get_public_key();
let (w, cipher) = elg_pub.encrypt_value(secret_value.into(), &mut rng);
let prover = CorrectnessProverAwaitingChallenge {
pub_key: elg_pub,
w,
pc_gens: &gens,
};
let verifier = CorrectnessVerifier {
value: Scalar::from(secret_value),
pub_key: elg_pub,
cipher,
pc_gens: &gens,
};
let mut transcript = Transcript::new(CORRECTNESS_PROOF_FINAL_RESPONSE_LABEL);
// Positive tests
let mut transcript_rng = prover.create_transcript_rng(&mut rng, &transcript);
let (prover, initial_message) = prover.generate_initial_message(&mut transcript_rng);
initial_message.update_transcript(&mut transcript).unwrap();
let challenge = transcript
.scalar_challenge(CORRECTNESS_PROOF_CHALLENGE_LABEL)
.unwrap();
let final_response = prover.apply_challenge(&challenge);
let result = verifier.verify(&challenge, &initial_message, &final_response);
assert!(result.is_ok());
// Negative tests
let bad_initial_message = CorrectnessInitialMessage::default();
let result = verifier.verify(&challenge, &bad_initial_message, &final_response);
assert_err!(
result,
ErrorKind::CorrectnessFinalResponseVerificationError { check: 1 }
);
let bad_final_response = CorrectnessFinalResponse(Scalar::default());
let result = verifier.verify(&challenge, &initial_message, &bad_final_response);
assert_err!(
result,
ErrorKind::CorrectnessFinalResponseVerificationError { check: 1 }
);
}
#[test]
#[wasm_bindgen_test]
fn serialize_deserialize_proof() {
let mut rng = StdRng::from_seed(SEED_1);
let secret_value = 42u32;
let secret_key = ElgamalSecretKey::new(Scalar::random(&mut rng));
let pub_key = secret_key.get_public_key();
let rand_blind = Scalar::random(&mut rng);
let w = CommitmentWitness::new(secret_value.into(), rand_blind);
let gens = PedersenGens::default();
let prover = CorrectnessProverAwaitingChallenge {
pub_key,
w,
pc_gens: &gens,
};
let (initial_message, final_response) = encryption_proofs::single_property_prover::<
StdRng,
CorrectnessProverAwaitingChallenge,
>(prover, &mut rng)
.unwrap();
let bytes = initial_message.encode();
let mut input = bytes.as_slice();
let recovered_initial_message = <CorrectnessInitialMessage>::decode(&mut input).unwrap();
assert_eq!(recovered_initial_message, initial_message);
let bytes = final_response.encode();
let mut input = bytes.as_slice();
let recovered_final_response = <CorrectnessFinalResponse>::decode(&mut input).unwrap();
assert_eq!(recovered_final_response, final_response);
}
} | random_line_split | |
main.rs | use rusttype::{point, Font, Glyph, Scale};
use winit::{
dpi::PhysicalSize,
event::*,
event_loop::{ControlFlow, EventLoop},
window::{Window, WindowBuilder},
};
#[repr(C)]
#[derive(Copy, Clone, Debug)]
struct Vertex {
position: [f32; 3],
tex_coords: [f32; 2],
}
impl Vertex {
fn desc<'a>() -> wgpu::VertexBufferDescriptor<'a> {
use std::mem;
wgpu::VertexBufferDescriptor {
stride: mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &[
wgpu::VertexAttributeDescriptor {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float3,
},
wgpu::VertexAttributeDescriptor {
offset: mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float2,
},
],
}
}
}
const VERTICES: &[Vertex] = &[
Vertex {
position: [-0.5, -0.5, 0.0],
tex_coords: [0.0, 0.0],
},
Vertex {
position: [0.5, -0.5, 0.0],
tex_coords: [2.0, 0.0],
},
Vertex {
position: [0.5, 0.5, 0.0],
tex_coords: [2.0, 2.0],
},
Vertex {
position: [-0.5, 0.5, 0.0],
tex_coords: [0.0, 2.0],
},
];
const INDICES: &[u16] = &[2, 1, 0, 3, 2, 0];
struct State {
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
swap_chain: wgpu::SwapChain,
sc_desc: wgpu::SwapChainDescriptor,
render_pipeline: wgpu::RenderPipeline,
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
num_indices: u32,
diffuse_texture: wgpu::Texture,
diffuse_texture_view: wgpu::TextureView,
diffuse_sampler: wgpu::Sampler,
diffuse_bind_group: wgpu::BindGroup,
size: winit::dpi::PhysicalSize<u32>,
}
fn rgba_color(r: u32, g: u32, b: u32, a: u32) -> u32 {
r | (g << 8) | (b << 16) | (a << 24)
}
impl State {
fn new(window: &Window) -> Self {
let size = window.inner_size();
let surface = wgpu::Surface::create(window);
let adapter = wgpu::Adapter::request(&wgpu::RequestAdapterOptions {
..Default::default()
})
.unwrap();
let (device, mut queue) = adapter.request_device(&wgpu::DeviceDescriptor {
extensions: wgpu::Extensions {
anisotropic_filtering: false,
},
limits: Default::default(),
});
let (sc_desc, swap_chain) = Self::create_swap_chain(&device, size, &surface);
let (
size3d,
diffuse_texture,
diffuse_buffer,
diffuse_sampler,
diffuse_texture_view,
diffuse_bind_group,
texture_bind_group_layout,
) = Self::create_texture_stuff(&device, &mut queue);
let render_pipeline = Self::create_pipeline(&device, &sc_desc, &texture_bind_group_layout);
let vertex_buffer = device
.create_buffer_mapped(VERTICES.len(), wgpu::BufferUsage::VERTEX)
.fill_from_slice(VERTICES);
let index_buffer = device
.create_buffer_mapped(INDICES.len(), wgpu::BufferUsage::INDEX)
.fill_from_slice(INDICES);
let num_indices = INDICES.len() as u32;
Self {
surface,
device,
queue,
sc_desc,
swap_chain,
render_pipeline,
vertex_buffer,
index_buffer,
num_indices,
diffuse_texture,
diffuse_texture_view,
diffuse_sampler,
diffuse_bind_group,
size,
}
}
fn create_pipeline(
device: &wgpu::Device,
sc_desc: &wgpu::SwapChainDescriptor,
texture_bind_group_layout: &wgpu::BindGroupLayout,
) -> wgpu::RenderPipeline {
let vs_src = include_str!("vert.glsl");
let fs_src = include_str!("frag.glsl");
let vs_spirv = glsl_to_spirv::compile(vs_src, glsl_to_spirv::ShaderType::Vertex).unwrap();
let fs_spirv = glsl_to_spirv::compile(fs_src, glsl_to_spirv::ShaderType::Fragment).unwrap();
let vs_data = wgpu::read_spirv(vs_spirv).unwrap();
let fs_data = wgpu::read_spirv(fs_spirv).unwrap();
let vs_module = device.create_shader_module(&vs_data);
let fs_module = device.create_shader_module(&fs_data);
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: &[&texture_bind_group_layout],
});
device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
layout: &render_pipeline_layout,
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
alpha_blend: wgpu::BlendDescriptor {
src_factor: wgpu::BlendFactor::One,
dst_factor: wgpu::BlendFactor::One,
operation: wgpu::BlendOperation::Add,
},
write_mask: wgpu::ColorWrite::ALL,
}],
depth_stencil_state: None,
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[Vertex::desc()],
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
})
}
fn create_swap_chain(
device: &wgpu::Device,
size: PhysicalSize<u32>,
surface: &wgpu::Surface,
) -> (wgpu::SwapChainDescriptor, wgpu::SwapChain) {
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Vsync,
};
let swapchain = device.create_swap_chain(surface, &sc_desc);
(sc_desc, swapchain)
}
fn create_texture_stuff(
device: &wgpu::Device,
queue: &mut wgpu::Queue,
) -> (
wgpu::Extent3d,
wgpu::Texture,
wgpu::Buffer,
wgpu::Sampler,
wgpu::TextureView,
wgpu::BindGroup,
wgpu::BindGroupLayout,
) {
//let diffuse_bytes = include_bytes!("../happy-tree.png");
let font_bytes = include_bytes!("../ttf/JetBrainsMono-Regular.ttf");
let font = Font::from_bytes(font_bytes as &[u8]).expect("Failed to create font");
let glyph = font
.glyph('c')
.scaled(Scale { x: 50.0, y: 50.0 })
.positioned(point(10.0, 10.0));
let (gpos_x, gpos_y) = (glyph.position().x, glyph.position().y);
let mut font_buffer = vec![];
for _ in 0..40_000 {
font_buffer.push(rgba_color(255, 255, 255, 0));
}
glyph.draw(|y, x, v| {
font_buffer[((x + gpos_x as u32) * 200 + y + gpos_y as u32) as usize] =
rgba_color(255, 0, 0, (v * 255.0) as u32);
});
let dimensions = (200, 200);
let size3d = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
depth: 1,
};
let diffuse_texture = device.create_texture(&wgpu::TextureDescriptor {
size: size3d,
array_layer_count: 1,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
});
let diffuse_buffer = device
.create_buffer_mapped(font_buffer.len(), wgpu::BufferUsage::COPY_SRC)
.fill_from_slice(&font_buffer);
let mut encoder =
device.create_command_encoder(&wgpu::CommandEncoderDescriptor { todo: 0 });
encoder.copy_buffer_to_texture(
wgpu::BufferCopyView {
buffer: &diffuse_buffer,
offset: 0,
row_pitch: 4 * dimensions.0,
image_height: dimensions.1,
},
wgpu::TextureCopyView {
texture: &diffuse_texture,
mip_level: 0,
array_layer: 0,
origin: wgpu::Origin3d::ZERO,
},
size3d,
);
queue.submit(&[encoder.finish()]);
let diffuse_texture_view = diffuse_texture.create_default_view();
let diffuse_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
compare_function: wgpu::CompareFunction::Always,
});
let texture_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutBinding {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::SampledTexture {
multisampled: false,
dimension: wgpu::TextureViewDimension::D2,
},
},
wgpu::BindGroupLayoutBinding {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler,
},
],
});
let diffuse_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture_view),
},
wgpu::Binding {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_sampler),
},
],
});
(
size3d,
diffuse_texture,
diffuse_buffer,
diffuse_sampler,
diffuse_texture_view,
diffuse_bind_group,
texture_bind_group_layout,
)
}
fn | (&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
}
fn render(&mut self) {
let frame = self.swap_chain.get_next_texture();
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor { todo: 0 });
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
load_op: wgpu::LoadOp::Clear,
store_op: wgpu::StoreOp::Store,
clear_color: wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
},
}],
depth_stencil_attachment: None,
});
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
render_pass.set_vertex_buffers(0, &[(&self.vertex_buffer, 0)]);
render_pass.set_index_buffer(&self.index_buffer, 0);
render_pass.draw_indexed(0..self.num_indices, 0, 0..1);
}
self.queue.submit(&[encoder.finish()]);
}
}
fn main() {
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.with_inner_size(PhysicalSize::new(500, 500))
.build(&event_loop)
.unwrap();
let mut state = State::new(&window);
event_loop.run(move |event, _, control_flow| match event {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput { input, .. } => match input {
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
_ => *control_flow = ControlFlow::Wait,
},
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
*control_flow = ControlFlow::Wait;
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
state.resize(**new_inner_size);
*control_flow = ControlFlow::Wait;
}
_ => *control_flow = ControlFlow::Wait,
},
Event::MainEventsCleared => {
state.render();
*control_flow = ControlFlow::Wait;
}
_ => *control_flow = ControlFlow::Wait,
});
}
| resize | identifier_name |
main.rs | use rusttype::{point, Font, Glyph, Scale};
use winit::{
dpi::PhysicalSize,
event::*,
event_loop::{ControlFlow, EventLoop},
window::{Window, WindowBuilder},
};
#[repr(C)]
#[derive(Copy, Clone, Debug)]
struct Vertex {
position: [f32; 3],
tex_coords: [f32; 2],
}
impl Vertex {
fn desc<'a>() -> wgpu::VertexBufferDescriptor<'a> {
use std::mem;
wgpu::VertexBufferDescriptor {
stride: mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &[
wgpu::VertexAttributeDescriptor {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float3,
},
wgpu::VertexAttributeDescriptor {
offset: mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float2,
},
],
}
}
}
const VERTICES: &[Vertex] = &[
Vertex {
position: [-0.5, -0.5, 0.0],
tex_coords: [0.0, 0.0],
},
Vertex {
position: [0.5, -0.5, 0.0],
tex_coords: [2.0, 0.0],
},
Vertex {
position: [0.5, 0.5, 0.0],
tex_coords: [2.0, 2.0],
},
Vertex {
position: [-0.5, 0.5, 0.0],
tex_coords: [0.0, 2.0],
},
];
const INDICES: &[u16] = &[2, 1, 0, 3, 2, 0];
struct State {
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
swap_chain: wgpu::SwapChain,
sc_desc: wgpu::SwapChainDescriptor,
render_pipeline: wgpu::RenderPipeline,
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
num_indices: u32,
diffuse_texture: wgpu::Texture,
diffuse_texture_view: wgpu::TextureView,
diffuse_sampler: wgpu::Sampler,
diffuse_bind_group: wgpu::BindGroup,
size: winit::dpi::PhysicalSize<u32>,
}
fn rgba_color(r: u32, g: u32, b: u32, a: u32) -> u32 {
r | (g << 8) | (b << 16) | (a << 24)
}
impl State {
fn new(window: &Window) -> Self {
let size = window.inner_size();
let surface = wgpu::Surface::create(window);
let adapter = wgpu::Adapter::request(&wgpu::RequestAdapterOptions {
..Default::default()
})
.unwrap();
let (device, mut queue) = adapter.request_device(&wgpu::DeviceDescriptor {
extensions: wgpu::Extensions {
anisotropic_filtering: false,
},
limits: Default::default(),
});
let (sc_desc, swap_chain) = Self::create_swap_chain(&device, size, &surface);
let (
size3d,
diffuse_texture,
diffuse_buffer,
diffuse_sampler,
diffuse_texture_view,
diffuse_bind_group,
texture_bind_group_layout,
) = Self::create_texture_stuff(&device, &mut queue);
let render_pipeline = Self::create_pipeline(&device, &sc_desc, &texture_bind_group_layout);
let vertex_buffer = device
.create_buffer_mapped(VERTICES.len(), wgpu::BufferUsage::VERTEX)
.fill_from_slice(VERTICES);
let index_buffer = device
.create_buffer_mapped(INDICES.len(), wgpu::BufferUsage::INDEX)
.fill_from_slice(INDICES);
let num_indices = INDICES.len() as u32;
Self {
surface,
device,
queue,
sc_desc,
swap_chain,
render_pipeline,
vertex_buffer,
index_buffer,
num_indices,
diffuse_texture,
diffuse_texture_view,
diffuse_sampler,
diffuse_bind_group,
size,
}
}
fn create_pipeline(
device: &wgpu::Device,
sc_desc: &wgpu::SwapChainDescriptor,
texture_bind_group_layout: &wgpu::BindGroupLayout,
) -> wgpu::RenderPipeline {
let vs_src = include_str!("vert.glsl");
let fs_src = include_str!("frag.glsl");
let vs_spirv = glsl_to_spirv::compile(vs_src, glsl_to_spirv::ShaderType::Vertex).unwrap();
let fs_spirv = glsl_to_spirv::compile(fs_src, glsl_to_spirv::ShaderType::Fragment).unwrap();
let vs_data = wgpu::read_spirv(vs_spirv).unwrap();
let fs_data = wgpu::read_spirv(fs_spirv).unwrap();
let vs_module = device.create_shader_module(&vs_data);
let fs_module = device.create_shader_module(&fs_data);
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: &[&texture_bind_group_layout],
});
device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
layout: &render_pipeline_layout,
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw, | depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
alpha_blend: wgpu::BlendDescriptor {
src_factor: wgpu::BlendFactor::One,
dst_factor: wgpu::BlendFactor::One,
operation: wgpu::BlendOperation::Add,
},
write_mask: wgpu::ColorWrite::ALL,
}],
depth_stencil_state: None,
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[Vertex::desc()],
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
})
}
fn create_swap_chain(
device: &wgpu::Device,
size: PhysicalSize<u32>,
surface: &wgpu::Surface,
) -> (wgpu::SwapChainDescriptor, wgpu::SwapChain) {
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Vsync,
};
let swapchain = device.create_swap_chain(surface, &sc_desc);
(sc_desc, swapchain)
}
fn create_texture_stuff(
device: &wgpu::Device,
queue: &mut wgpu::Queue,
) -> (
wgpu::Extent3d,
wgpu::Texture,
wgpu::Buffer,
wgpu::Sampler,
wgpu::TextureView,
wgpu::BindGroup,
wgpu::BindGroupLayout,
) {
//let diffuse_bytes = include_bytes!("../happy-tree.png");
let font_bytes = include_bytes!("../ttf/JetBrainsMono-Regular.ttf");
let font = Font::from_bytes(font_bytes as &[u8]).expect("Failed to create font");
let glyph = font
.glyph('c')
.scaled(Scale { x: 50.0, y: 50.0 })
.positioned(point(10.0, 10.0));
let (gpos_x, gpos_y) = (glyph.position().x, glyph.position().y);
let mut font_buffer = vec![];
for _ in 0..40_000 {
font_buffer.push(rgba_color(255, 255, 255, 0));
}
glyph.draw(|y, x, v| {
font_buffer[((x + gpos_x as u32) * 200 + y + gpos_y as u32) as usize] =
rgba_color(255, 0, 0, (v * 255.0) as u32);
});
let dimensions = (200, 200);
let size3d = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
depth: 1,
};
let diffuse_texture = device.create_texture(&wgpu::TextureDescriptor {
size: size3d,
array_layer_count: 1,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
});
let diffuse_buffer = device
.create_buffer_mapped(font_buffer.len(), wgpu::BufferUsage::COPY_SRC)
.fill_from_slice(&font_buffer);
let mut encoder =
device.create_command_encoder(&wgpu::CommandEncoderDescriptor { todo: 0 });
encoder.copy_buffer_to_texture(
wgpu::BufferCopyView {
buffer: &diffuse_buffer,
offset: 0,
row_pitch: 4 * dimensions.0,
image_height: dimensions.1,
},
wgpu::TextureCopyView {
texture: &diffuse_texture,
mip_level: 0,
array_layer: 0,
origin: wgpu::Origin3d::ZERO,
},
size3d,
);
queue.submit(&[encoder.finish()]);
let diffuse_texture_view = diffuse_texture.create_default_view();
let diffuse_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
compare_function: wgpu::CompareFunction::Always,
});
let texture_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutBinding {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::SampledTexture {
multisampled: false,
dimension: wgpu::TextureViewDimension::D2,
},
},
wgpu::BindGroupLayoutBinding {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler,
},
],
});
let diffuse_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture_view),
},
wgpu::Binding {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_sampler),
},
],
});
(
size3d,
diffuse_texture,
diffuse_buffer,
diffuse_sampler,
diffuse_texture_view,
diffuse_bind_group,
texture_bind_group_layout,
)
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
}
fn render(&mut self) {
let frame = self.swap_chain.get_next_texture();
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor { todo: 0 });
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
load_op: wgpu::LoadOp::Clear,
store_op: wgpu::StoreOp::Store,
clear_color: wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
},
}],
depth_stencil_attachment: None,
});
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
render_pass.set_vertex_buffers(0, &[(&self.vertex_buffer, 0)]);
render_pass.set_index_buffer(&self.index_buffer, 0);
render_pass.draw_indexed(0..self.num_indices, 0, 0..1);
}
self.queue.submit(&[encoder.finish()]);
}
}
fn main() {
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.with_inner_size(PhysicalSize::new(500, 500))
.build(&event_loop)
.unwrap();
let mut state = State::new(&window);
event_loop.run(move |event, _, control_flow| match event {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput { input, .. } => match input {
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
_ => *control_flow = ControlFlow::Wait,
},
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
*control_flow = ControlFlow::Wait;
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
state.resize(**new_inner_size);
*control_flow = ControlFlow::Wait;
}
_ => *control_flow = ControlFlow::Wait,
},
Event::MainEventsCleared => {
state.render();
*control_flow = ControlFlow::Wait;
}
_ => *control_flow = ControlFlow::Wait,
});
} | cull_mode: wgpu::CullMode::Back, | random_line_split |
main.rs | use rusttype::{point, Font, Glyph, Scale};
use winit::{
dpi::PhysicalSize,
event::*,
event_loop::{ControlFlow, EventLoop},
window::{Window, WindowBuilder},
};
#[repr(C)]
#[derive(Copy, Clone, Debug)]
struct Vertex {
position: [f32; 3],
tex_coords: [f32; 2],
}
impl Vertex {
fn desc<'a>() -> wgpu::VertexBufferDescriptor<'a> {
use std::mem;
wgpu::VertexBufferDescriptor {
stride: mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &[
wgpu::VertexAttributeDescriptor {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float3,
},
wgpu::VertexAttributeDescriptor {
offset: mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float2,
},
],
}
}
}
const VERTICES: &[Vertex] = &[
Vertex {
position: [-0.5, -0.5, 0.0],
tex_coords: [0.0, 0.0],
},
Vertex {
position: [0.5, -0.5, 0.0],
tex_coords: [2.0, 0.0],
},
Vertex {
position: [0.5, 0.5, 0.0],
tex_coords: [2.0, 2.0],
},
Vertex {
position: [-0.5, 0.5, 0.0],
tex_coords: [0.0, 2.0],
},
];
const INDICES: &[u16] = &[2, 1, 0, 3, 2, 0];
struct State {
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
swap_chain: wgpu::SwapChain,
sc_desc: wgpu::SwapChainDescriptor,
render_pipeline: wgpu::RenderPipeline,
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
num_indices: u32,
diffuse_texture: wgpu::Texture,
diffuse_texture_view: wgpu::TextureView,
diffuse_sampler: wgpu::Sampler,
diffuse_bind_group: wgpu::BindGroup,
size: winit::dpi::PhysicalSize<u32>,
}
fn rgba_color(r: u32, g: u32, b: u32, a: u32) -> u32 {
r | (g << 8) | (b << 16) | (a << 24)
}
impl State {
fn new(window: &Window) -> Self {
let size = window.inner_size();
let surface = wgpu::Surface::create(window);
let adapter = wgpu::Adapter::request(&wgpu::RequestAdapterOptions {
..Default::default()
})
.unwrap();
let (device, mut queue) = adapter.request_device(&wgpu::DeviceDescriptor {
extensions: wgpu::Extensions {
anisotropic_filtering: false,
},
limits: Default::default(),
});
let (sc_desc, swap_chain) = Self::create_swap_chain(&device, size, &surface);
let (
size3d,
diffuse_texture,
diffuse_buffer,
diffuse_sampler,
diffuse_texture_view,
diffuse_bind_group,
texture_bind_group_layout,
) = Self::create_texture_stuff(&device, &mut queue);
let render_pipeline = Self::create_pipeline(&device, &sc_desc, &texture_bind_group_layout);
let vertex_buffer = device
.create_buffer_mapped(VERTICES.len(), wgpu::BufferUsage::VERTEX)
.fill_from_slice(VERTICES);
let index_buffer = device
.create_buffer_mapped(INDICES.len(), wgpu::BufferUsage::INDEX)
.fill_from_slice(INDICES);
let num_indices = INDICES.len() as u32;
Self {
surface,
device,
queue,
sc_desc,
swap_chain,
render_pipeline,
vertex_buffer,
index_buffer,
num_indices,
diffuse_texture,
diffuse_texture_view,
diffuse_sampler,
diffuse_bind_group,
size,
}
}
fn create_pipeline(
device: &wgpu::Device,
sc_desc: &wgpu::SwapChainDescriptor,
texture_bind_group_layout: &wgpu::BindGroupLayout,
) -> wgpu::RenderPipeline {
let vs_src = include_str!("vert.glsl");
let fs_src = include_str!("frag.glsl");
let vs_spirv = glsl_to_spirv::compile(vs_src, glsl_to_spirv::ShaderType::Vertex).unwrap();
let fs_spirv = glsl_to_spirv::compile(fs_src, glsl_to_spirv::ShaderType::Fragment).unwrap();
let vs_data = wgpu::read_spirv(vs_spirv).unwrap();
let fs_data = wgpu::read_spirv(fs_spirv).unwrap();
let vs_module = device.create_shader_module(&vs_data);
let fs_module = device.create_shader_module(&fs_data);
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: &[&texture_bind_group_layout],
});
device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
layout: &render_pipeline_layout,
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[wgpu::ColorStateDescriptor {
format: sc_desc.format,
color_blend: wgpu::BlendDescriptor {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
alpha_blend: wgpu::BlendDescriptor {
src_factor: wgpu::BlendFactor::One,
dst_factor: wgpu::BlendFactor::One,
operation: wgpu::BlendOperation::Add,
},
write_mask: wgpu::ColorWrite::ALL,
}],
depth_stencil_state: None,
index_format: wgpu::IndexFormat::Uint16,
vertex_buffers: &[Vertex::desc()],
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
})
}
fn create_swap_chain(
device: &wgpu::Device,
size: PhysicalSize<u32>,
surface: &wgpu::Surface,
) -> (wgpu::SwapChainDescriptor, wgpu::SwapChain) {
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Vsync,
};
let swapchain = device.create_swap_chain(surface, &sc_desc);
(sc_desc, swapchain)
}
fn create_texture_stuff(
device: &wgpu::Device,
queue: &mut wgpu::Queue,
) -> (
wgpu::Extent3d,
wgpu::Texture,
wgpu::Buffer,
wgpu::Sampler,
wgpu::TextureView,
wgpu::BindGroup,
wgpu::BindGroupLayout,
) |
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
}
fn render(&mut self) {
let frame = self.swap_chain.get_next_texture();
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor { todo: 0 });
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.view,
resolve_target: None,
load_op: wgpu::LoadOp::Clear,
store_op: wgpu::StoreOp::Store,
clear_color: wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
},
}],
depth_stencil_attachment: None,
});
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
render_pass.set_vertex_buffers(0, &[(&self.vertex_buffer, 0)]);
render_pass.set_index_buffer(&self.index_buffer, 0);
render_pass.draw_indexed(0..self.num_indices, 0, 0..1);
}
self.queue.submit(&[encoder.finish()]);
}
}
fn main() {
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.with_inner_size(PhysicalSize::new(500, 500))
.build(&event_loop)
.unwrap();
let mut state = State::new(&window);
event_loop.run(move |event, _, control_flow| match event {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput { input, .. } => match input {
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
_ => *control_flow = ControlFlow::Wait,
},
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
*control_flow = ControlFlow::Wait;
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
state.resize(**new_inner_size);
*control_flow = ControlFlow::Wait;
}
_ => *control_flow = ControlFlow::Wait,
},
Event::MainEventsCleared => {
state.render();
*control_flow = ControlFlow::Wait;
}
_ => *control_flow = ControlFlow::Wait,
});
}
| {
//let diffuse_bytes = include_bytes!("../happy-tree.png");
let font_bytes = include_bytes!("../ttf/JetBrainsMono-Regular.ttf");
let font = Font::from_bytes(font_bytes as &[u8]).expect("Failed to create font");
let glyph = font
.glyph('c')
.scaled(Scale { x: 50.0, y: 50.0 })
.positioned(point(10.0, 10.0));
let (gpos_x, gpos_y) = (glyph.position().x, glyph.position().y);
let mut font_buffer = vec![];
for _ in 0..40_000 {
font_buffer.push(rgba_color(255, 255, 255, 0));
}
glyph.draw(|y, x, v| {
font_buffer[((x + gpos_x as u32) * 200 + y + gpos_y as u32) as usize] =
rgba_color(255, 0, 0, (v * 255.0) as u32);
});
let dimensions = (200, 200);
let size3d = wgpu::Extent3d {
width: dimensions.0,
height: dimensions.1,
depth: 1,
};
let diffuse_texture = device.create_texture(&wgpu::TextureDescriptor {
size: size3d,
array_layer_count: 1,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
});
let diffuse_buffer = device
.create_buffer_mapped(font_buffer.len(), wgpu::BufferUsage::COPY_SRC)
.fill_from_slice(&font_buffer);
let mut encoder =
device.create_command_encoder(&wgpu::CommandEncoderDescriptor { todo: 0 });
encoder.copy_buffer_to_texture(
wgpu::BufferCopyView {
buffer: &diffuse_buffer,
offset: 0,
row_pitch: 4 * dimensions.0,
image_height: dimensions.1,
},
wgpu::TextureCopyView {
texture: &diffuse_texture,
mip_level: 0,
array_layer: 0,
origin: wgpu::Origin3d::ZERO,
},
size3d,
);
queue.submit(&[encoder.finish()]);
let diffuse_texture_view = diffuse_texture.create_default_view();
let diffuse_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
lod_min_clamp: -100.0,
lod_max_clamp: 100.0,
compare_function: wgpu::CompareFunction::Always,
});
let texture_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
bindings: &[
wgpu::BindGroupLayoutBinding {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::SampledTexture {
multisampled: false,
dimension: wgpu::TextureViewDimension::D2,
},
},
wgpu::BindGroupLayoutBinding {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler,
},
],
});
let diffuse_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
bindings: &[
wgpu::Binding {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture_view),
},
wgpu::Binding {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_sampler),
},
],
});
(
size3d,
diffuse_texture,
diffuse_buffer,
diffuse_sampler,
diffuse_texture_view,
diffuse_bind_group,
texture_bind_group_layout,
)
} | identifier_body |
DataLoader.py | # This python file helps to get the data from the files, format and make it ready for transformers
from .tools import *
from transformers import BertTokenizer
from multiprocessing import Pool, cpu_count
import pickle, copy
import logging
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from tqdm import tqdm , trange
import torch
CONFIG_FOLDER = 'config/'
id_label_file = 'id_2_label.json'
lable_2_id_file = 'label2_2_id.json'
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def convert_example_to_feature(example_row):
# return example_row
example, max_seq_length, tokenizer = example_row
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
return InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=example.label)
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def createDirectories(cls,config):
report_dir = config.programsettings.REPORTS_DIR
# if os.path.exists(report_dir) and os.listdir(report_dir):
# report_dir += f'/report_{len(os.listdir(report_dir))}'
# os.makedirs(report_dir)
output_dir = config.programsettings.OUTPUT_DIR
if not os.path.exists(output_dir):
os.makedirs(output_dir)
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
class MultiClassificationProcessor(DataProcessor):
"""Processor for binary classification dataset."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def | (self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ['Reason-Drug', 'Route-Drug', 'Strength-Drug', 'Frequency-Drug',
'Duration-Drug', 'Form-Drug', 'Dosage-Drug', 'ADE-Drug',
'no relation']
# return ['no_relation' , 'org:subsidiaries' , 'org:city_of_headquarters' , 'per:title',
# 'per:origin' , 'per:employee_of' , 'org:top_members/employees',
# 'org:alternate_names' , 'org:shareholders' , 'org:country_of_headquarters',
# 'per:countries_of_residence' , 'per:date_of_death',
# 'per:cities_of_residence' , 'per:city_of_death' , 'per:age' , 'org:founded_by',
# 'org:parents' , 'org:member_of' , 'per:stateorprovinces_of_residence',
# 'per:religion' , 'org:founded' , 'org:stateorprovince_of_headquarters',
# 'per:alternate_names' , 'per:siblings' , 'per:charges',
# 'org:number_of_employees/members' , 'per:stateorprovince_of_death',
# 'org:members' , 'per:cause_of_death' , 'per:parents' , 'per:other_family',
# 'per:schools_attended' , 'per:children' , 'per:spouse' , 'per:country_of_birth',
# 'org:political/religious_affiliation' , 'per:country_of_death',
# 'per:date_of_birth' , 'per:city_of_birth' , 'org:website' , 'org:dissolved',
# 'per:stateorprovince_of_birth']
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[3]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def get_data_loader(self, config, source="train"):
logging.basicConfig(level=logging.INFO)
self.config = config
# Create output, report directories, if doesn't exist already
self.createDirectories(config)
# This is to read input data and process them
if source == "train":
data = self.get_train_examples(config.programsettings.DATA_DIR)
elif source == "dev":
data = self.get_dev_examples(config.programsettings.DATA_DIR)
elif source == "test":
data = self.get_test_examples(config.programsettings.DATA_DIR)
data_len = len(data)
label_list = self.get_labels() # [0, 1] for binary classification
num_labels = len(label_list)
num_train_optimization_steps = int(
data_len / config.hyperparams.TRAIN_BATCH_SIZE / config.hyperparams.GRADIENT_ACCUMULATION_STEPS) * config.hyperparams.NUM_TRAIN_EPOCHS
seq_length = str(config.hyperparams.MAX_SEQ_LENGTH)
if source == "train":
feature_pickle_file = config.programsettings.DATA_DIR + "train_features_" + seq_length + ".pkl"
elif source == "dev":
feature_pickle_file = config.programsettings.DATA_DIR + "dev_features_" + seq_length + ".pkl"
elif source == "test":
feature_pickle_file = config.programsettings.DATA_DIR + "test_features_" + seq_length + ".pkl"
print("Looking for cached feature pickle file", feature_pickle_file)
if not os.path.exists(feature_pickle_file):
tokenizer = BertTokenizer.from_pretrained('bert-base-cased', do_lower_case=False)
examples_for_processing = [(example, config.hyperparams.MAX_SEQ_LENGTH, tokenizer) for example in data]
process_count = cpu_count() - 1
with Pool(process_count) as p:
features = list(tqdm(p.imap(convert_example_to_feature, examples_for_processing), total=data_len))
with open(feature_pickle_file, "wb") as f:
pickle.dump(features, f)
with open(feature_pickle_file, "rb") as f:
features = pickle.load(f)
logger.info(" Num examples = %d", data_len)
logger.info(" Batch size = %d", config.hyperparams.TRAIN_BATCH_SIZE)
logger.info(" Num steps = %d", num_train_optimization_steps)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([int(f.label_id) for f in features], dtype=torch.long)
tensor_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
t_sampler = RandomSampler(tensor_data)
dataloader = DataLoader(tensor_data, sampler=t_sampler, batch_size=config.hyperparams.TRAIN_BATCH_SIZE)
return dataloader, data_len, num_labels, num_train_optimization_steps, all_label_ids | get_dev_examples | identifier_name |
DataLoader.py | # This python file helps to get the data from the files, format and make it ready for transformers
from .tools import *
from transformers import BertTokenizer
from multiprocessing import Pool, cpu_count
import pickle, copy
import logging
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from tqdm import tqdm , trange
import torch
CONFIG_FOLDER = 'config/'
id_label_file = 'id_2_label.json'
lable_2_id_file = 'label2_2_id.json'
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def convert_example_to_feature(example_row):
# return example_row
example, max_seq_length, tokenizer = example_row
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
return InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=example.label)
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def createDirectories(cls,config):
report_dir = config.programsettings.REPORTS_DIR
# if os.path.exists(report_dir) and os.listdir(report_dir):
# report_dir += f'/report_{len(os.listdir(report_dir))}'
# os.makedirs(report_dir)
output_dir = config.programsettings.OUTPUT_DIR
if not os.path.exists(output_dir):
os.makedirs(output_dir)
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
class MultiClassificationProcessor(DataProcessor):
"""Processor for binary classification dataset."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ['Reason-Drug', 'Route-Drug', 'Strength-Drug', 'Frequency-Drug',
'Duration-Drug', 'Form-Drug', 'Dosage-Drug', 'ADE-Drug',
'no relation']
# return ['no_relation' , 'org:subsidiaries' , 'org:city_of_headquarters' , 'per:title',
# 'per:origin' , 'per:employee_of' , 'org:top_members/employees',
# 'org:alternate_names' , 'org:shareholders' , 'org:country_of_headquarters',
# 'per:countries_of_residence' , 'per:date_of_death',
# 'per:cities_of_residence' , 'per:city_of_death' , 'per:age' , 'org:founded_by',
# 'org:parents' , 'org:member_of' , 'per:stateorprovinces_of_residence',
# 'per:religion' , 'org:founded' , 'org:stateorprovince_of_headquarters',
# 'per:alternate_names' , 'per:siblings' , 'per:charges',
# 'org:number_of_employees/members' , 'per:stateorprovince_of_death',
# 'org:members' , 'per:cause_of_death' , 'per:parents' , 'per:other_family',
# 'per:schools_attended' , 'per:children' , 'per:spouse' , 'per:country_of_birth',
# 'org:political/religious_affiliation' , 'per:country_of_death',
# 'per:date_of_birth' , 'per:city_of_birth' , 'org:website' , 'org:dissolved',
# 'per:stateorprovince_of_birth']
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[3]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def get_data_loader(self, config, source="train"):
logging.basicConfig(level=logging.INFO)
self.config = config
# Create output, report directories, if doesn't exist already
self.createDirectories(config)
# This is to read input data and process them
if source == "train":
data = self.get_train_examples(config.programsettings.DATA_DIR)
elif source == "dev":
data = self.get_dev_examples(config.programsettings.DATA_DIR)
elif source == "test": | num_labels = len(label_list)
num_train_optimization_steps = int(
data_len / config.hyperparams.TRAIN_BATCH_SIZE / config.hyperparams.GRADIENT_ACCUMULATION_STEPS) * config.hyperparams.NUM_TRAIN_EPOCHS
seq_length = str(config.hyperparams.MAX_SEQ_LENGTH)
if source == "train":
feature_pickle_file = config.programsettings.DATA_DIR + "train_features_" + seq_length + ".pkl"
elif source == "dev":
feature_pickle_file = config.programsettings.DATA_DIR + "dev_features_" + seq_length + ".pkl"
elif source == "test":
feature_pickle_file = config.programsettings.DATA_DIR + "test_features_" + seq_length + ".pkl"
print("Looking for cached feature pickle file", feature_pickle_file)
if not os.path.exists(feature_pickle_file):
tokenizer = BertTokenizer.from_pretrained('bert-base-cased', do_lower_case=False)
examples_for_processing = [(example, config.hyperparams.MAX_SEQ_LENGTH, tokenizer) for example in data]
process_count = cpu_count() - 1
with Pool(process_count) as p:
features = list(tqdm(p.imap(convert_example_to_feature, examples_for_processing), total=data_len))
with open(feature_pickle_file, "wb") as f:
pickle.dump(features, f)
with open(feature_pickle_file, "rb") as f:
features = pickle.load(f)
logger.info(" Num examples = %d", data_len)
logger.info(" Batch size = %d", config.hyperparams.TRAIN_BATCH_SIZE)
logger.info(" Num steps = %d", num_train_optimization_steps)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([int(f.label_id) for f in features], dtype=torch.long)
tensor_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
t_sampler = RandomSampler(tensor_data)
dataloader = DataLoader(tensor_data, sampler=t_sampler, batch_size=config.hyperparams.TRAIN_BATCH_SIZE)
return dataloader, data_len, num_labels, num_train_optimization_steps, all_label_ids | data = self.get_test_examples(config.programsettings.DATA_DIR)
data_len = len(data)
label_list = self.get_labels() # [0, 1] for binary classification | random_line_split |
DataLoader.py | # This python file helps to get the data from the files, format and make it ready for transformers
from .tools import *
from transformers import BertTokenizer
from multiprocessing import Pool, cpu_count
import pickle, copy
import logging
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from tqdm import tqdm , trange
import torch
CONFIG_FOLDER = 'config/'
id_label_file = 'id_2_label.json'
lable_2_id_file = 'label2_2_id.json'
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
|
def convert_example_to_feature(example_row):
# return example_row
example, max_seq_length, tokenizer = example_row
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
return InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=example.label)
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def createDirectories(cls,config):
report_dir = config.programsettings.REPORTS_DIR
# if os.path.exists(report_dir) and os.listdir(report_dir):
# report_dir += f'/report_{len(os.listdir(report_dir))}'
# os.makedirs(report_dir)
output_dir = config.programsettings.OUTPUT_DIR
if not os.path.exists(output_dir):
os.makedirs(output_dir)
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
class MultiClassificationProcessor(DataProcessor):
"""Processor for binary classification dataset."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ['Reason-Drug', 'Route-Drug', 'Strength-Drug', 'Frequency-Drug',
'Duration-Drug', 'Form-Drug', 'Dosage-Drug', 'ADE-Drug',
'no relation']
# return ['no_relation' , 'org:subsidiaries' , 'org:city_of_headquarters' , 'per:title',
# 'per:origin' , 'per:employee_of' , 'org:top_members/employees',
# 'org:alternate_names' , 'org:shareholders' , 'org:country_of_headquarters',
# 'per:countries_of_residence' , 'per:date_of_death',
# 'per:cities_of_residence' , 'per:city_of_death' , 'per:age' , 'org:founded_by',
# 'org:parents' , 'org:member_of' , 'per:stateorprovinces_of_residence',
# 'per:religion' , 'org:founded' , 'org:stateorprovince_of_headquarters',
# 'per:alternate_names' , 'per:siblings' , 'per:charges',
# 'org:number_of_employees/members' , 'per:stateorprovince_of_death',
# 'org:members' , 'per:cause_of_death' , 'per:parents' , 'per:other_family',
# 'per:schools_attended' , 'per:children' , 'per:spouse' , 'per:country_of_birth',
# 'org:political/religious_affiliation' , 'per:country_of_death',
# 'per:date_of_birth' , 'per:city_of_birth' , 'org:website' , 'org:dissolved',
# 'per:stateorprovince_of_birth']
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[3]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def get_data_loader(self, config, source="train"):
logging.basicConfig(level=logging.INFO)
self.config = config
# Create output, report directories, if doesn't exist already
self.createDirectories(config)
# This is to read input data and process them
if source == "train":
data = self.get_train_examples(config.programsettings.DATA_DIR)
elif source == "dev":
data = self.get_dev_examples(config.programsettings.DATA_DIR)
elif source == "test":
data = self.get_test_examples(config.programsettings.DATA_DIR)
data_len = len(data)
label_list = self.get_labels() # [0, 1] for binary classification
num_labels = len(label_list)
num_train_optimization_steps = int(
data_len / config.hyperparams.TRAIN_BATCH_SIZE / config.hyperparams.GRADIENT_ACCUMULATION_STEPS) * config.hyperparams.NUM_TRAIN_EPOCHS
seq_length = str(config.hyperparams.MAX_SEQ_LENGTH)
if source == "train":
feature_pickle_file = config.programsettings.DATA_DIR + "train_features_" + seq_length + ".pkl"
elif source == "dev":
feature_pickle_file = config.programsettings.DATA_DIR + "dev_features_" + seq_length + ".pkl"
elif source == "test":
feature_pickle_file = config.programsettings.DATA_DIR + "test_features_" + seq_length + ".pkl"
print("Looking for cached feature pickle file", feature_pickle_file)
if not os.path.exists(feature_pickle_file):
tokenizer = BertTokenizer.from_pretrained('bert-base-cased', do_lower_case=False)
examples_for_processing = [(example, config.hyperparams.MAX_SEQ_LENGTH, tokenizer) for example in data]
process_count = cpu_count() - 1
with Pool(process_count) as p:
features = list(tqdm(p.imap(convert_example_to_feature, examples_for_processing), total=data_len))
with open(feature_pickle_file, "wb") as f:
pickle.dump(features, f)
with open(feature_pickle_file, "rb") as f:
features = pickle.load(f)
logger.info(" Num examples = %d", data_len)
logger.info(" Batch size = %d", config.hyperparams.TRAIN_BATCH_SIZE)
logger.info(" Num steps = %d", num_train_optimization_steps)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([int(f.label_id) for f in features], dtype=torch.long)
tensor_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
t_sampler = RandomSampler(tensor_data)
dataloader = DataLoader(tensor_data, sampler=t_sampler, batch_size=config.hyperparams.TRAIN_BATCH_SIZE)
return dataloader, data_len, num_labels, num_train_optimization_steps, all_label_ids | tokens_b.pop() | conditional_block |
DataLoader.py | # This python file helps to get the data from the files, format and make it ready for transformers
from .tools import *
from transformers import BertTokenizer
from multiprocessing import Pool, cpu_count
import pickle, copy
import logging
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from tqdm import tqdm , trange
import torch
CONFIG_FOLDER = 'config/'
id_label_file = 'id_2_label.json'
lable_2_id_file = 'label2_2_id.json'
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def convert_example_to_feature(example_row):
# return example_row
example, max_seq_length, tokenizer = example_row
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
return InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=example.label)
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def createDirectories(cls,config):
|
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
class MultiClassificationProcessor(DataProcessor):
"""Processor for binary classification dataset."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ['Reason-Drug', 'Route-Drug', 'Strength-Drug', 'Frequency-Drug',
'Duration-Drug', 'Form-Drug', 'Dosage-Drug', 'ADE-Drug',
'no relation']
# return ['no_relation' , 'org:subsidiaries' , 'org:city_of_headquarters' , 'per:title',
# 'per:origin' , 'per:employee_of' , 'org:top_members/employees',
# 'org:alternate_names' , 'org:shareholders' , 'org:country_of_headquarters',
# 'per:countries_of_residence' , 'per:date_of_death',
# 'per:cities_of_residence' , 'per:city_of_death' , 'per:age' , 'org:founded_by',
# 'org:parents' , 'org:member_of' , 'per:stateorprovinces_of_residence',
# 'per:religion' , 'org:founded' , 'org:stateorprovince_of_headquarters',
# 'per:alternate_names' , 'per:siblings' , 'per:charges',
# 'org:number_of_employees/members' , 'per:stateorprovince_of_death',
# 'org:members' , 'per:cause_of_death' , 'per:parents' , 'per:other_family',
# 'per:schools_attended' , 'per:children' , 'per:spouse' , 'per:country_of_birth',
# 'org:political/religious_affiliation' , 'per:country_of_death',
# 'per:date_of_birth' , 'per:city_of_birth' , 'org:website' , 'org:dissolved',
# 'per:stateorprovince_of_birth']
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[3]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def get_data_loader(self, config, source="train"):
logging.basicConfig(level=logging.INFO)
self.config = config
# Create output, report directories, if doesn't exist already
self.createDirectories(config)
# This is to read input data and process them
if source == "train":
data = self.get_train_examples(config.programsettings.DATA_DIR)
elif source == "dev":
data = self.get_dev_examples(config.programsettings.DATA_DIR)
elif source == "test":
data = self.get_test_examples(config.programsettings.DATA_DIR)
data_len = len(data)
label_list = self.get_labels() # [0, 1] for binary classification
num_labels = len(label_list)
num_train_optimization_steps = int(
data_len / config.hyperparams.TRAIN_BATCH_SIZE / config.hyperparams.GRADIENT_ACCUMULATION_STEPS) * config.hyperparams.NUM_TRAIN_EPOCHS
seq_length = str(config.hyperparams.MAX_SEQ_LENGTH)
if source == "train":
feature_pickle_file = config.programsettings.DATA_DIR + "train_features_" + seq_length + ".pkl"
elif source == "dev":
feature_pickle_file = config.programsettings.DATA_DIR + "dev_features_" + seq_length + ".pkl"
elif source == "test":
feature_pickle_file = config.programsettings.DATA_DIR + "test_features_" + seq_length + ".pkl"
print("Looking for cached feature pickle file", feature_pickle_file)
if not os.path.exists(feature_pickle_file):
tokenizer = BertTokenizer.from_pretrained('bert-base-cased', do_lower_case=False)
examples_for_processing = [(example, config.hyperparams.MAX_SEQ_LENGTH, tokenizer) for example in data]
process_count = cpu_count() - 1
with Pool(process_count) as p:
features = list(tqdm(p.imap(convert_example_to_feature, examples_for_processing), total=data_len))
with open(feature_pickle_file, "wb") as f:
pickle.dump(features, f)
with open(feature_pickle_file, "rb") as f:
features = pickle.load(f)
logger.info(" Num examples = %d", data_len)
logger.info(" Batch size = %d", config.hyperparams.TRAIN_BATCH_SIZE)
logger.info(" Num steps = %d", num_train_optimization_steps)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([int(f.label_id) for f in features], dtype=torch.long)
tensor_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
t_sampler = RandomSampler(tensor_data)
dataloader = DataLoader(tensor_data, sampler=t_sampler, batch_size=config.hyperparams.TRAIN_BATCH_SIZE)
return dataloader, data_len, num_labels, num_train_optimization_steps, all_label_ids | report_dir = config.programsettings.REPORTS_DIR
# if os.path.exists(report_dir) and os.listdir(report_dir):
# report_dir += f'/report_{len(os.listdir(report_dir))}'
# os.makedirs(report_dir)
output_dir = config.programsettings.OUTPUT_DIR
if not os.path.exists(output_dir):
os.makedirs(output_dir) | identifier_body |
getSocialInformation.js | import { getClient } from '../apiUtilities/commonRequestUtility';
import axios from 'axios';
export function getSocialInformation() | {
return axios({
method: "POST",
url: "http://localhost:4000/graphql",
data: {
query: `
{
authorDetails {
popularity
isTrending
title
description
numComments
thumbnail
codeSubmissionTotal
pledgeGoal
pledgerCount
pledgeTotal
status
authors{
name
picture
score
}
}
}
`
}
})
// const mySocialData=[
// {
// "author" : {
// "name" : "Mina Lambert",
// "picture" : "https://images.pexels.com/photos/220453/pexels-photo-220453.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 56.3
// },
// "popularity" : 34.13,
// "isTrending" : true,
// "date" : "2019-06-13T08:15:00.011Z",
// "title" : "Private Encrypted Direct Messaging",
// "description" : "React EOS aims to give developers and designers the tools necessary for creating clean, organized, and intentional code from the beginning of the development process. Defining your components, their hierarchy, and how they interact via state and props, can alleviate the need for refactoring, and",
// "numComments" : 10,
// "thumbnail" : "https://inkbotdesign.com/wp-content/uploads/2012/09/Walmart-Logo-Design.webp",
// "codeSubmissionTotal" : 4,
// "pledgeTotal" : 243,
// "pledgeGoal" : 800,
// "pledgerCount" : 24,
// "status" : 0
// },
// {
// "author" : {
// "name" : "Iris Barrett",
// "picture" : "https://images.pexels.com/photos/220453/pexels-photo-220453.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 12
// },
// "popularity" : 22,
// "isTrending" : true,
// "date" : "2019-02-13T08:15:00.011Z",
// "title" : "Azani Sport",
// "description" : "India-based Azani, which was founded by Indian squash champion Siddharth Suchde, is a DTC performance sportswear brand that offers high-quality footwear, apparel, and accessories that are both aspirational and accessible to mainstream consumers in emerging markets Read more at: https://yourstory.com/2019/04/sequoia-surge-early-stage-startups-cohort",
// "numComments" : 5,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Brand%20Logos/Old_Nike_logo.jpg?width=444&name=Old_Nike_logo.jpg",
// "codeSubmissionTotal" : 2,
// "pledgeTotal" : 0,
// "pledgeGoal" : 200,
// "pledgerCount" : 0,
// "status" : 0
// },
// {
// "author" : {
// "name" : "Phyllis Love",
// "picture" : "https://images.pexels.com/photos/774909/pexels-photo-774909.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 43.0
// },
// "popularity" : 14.13,
// "isTrending" : false,
// "date" : "2019-12-10T08:15:00.011Z",
// "title" : "Bobobox",
// "description" : "Launched in June 2018 by Indra Gunawan and Antonius Bong, Bobobox is a tech-first Indonesian capsule hotel brand built for millennial adventurers and smart business travellers. The first hotel the company opened, boasts an 88 percent occupancy rate while 97 percent of guests surveyed said they would return to stay there. Read more at: https://yourstory.com/2019/04/sequoia-surge-early-stage-startups-cohort",
// "numComments" : 5,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Brand%20Logos/1000px-Coca-Cola_logo.svg.png?width=634&name=1000px-Coca-Cola_logo.svg.png",
// "codeSubmissionTotal" : 20,
// "pledgeTotal" : 25,
// "pledgeGoal" : 100,
// "pledgerCount" : 2,
// "status" : 0
// },
// {
// "author" : {
// "name" : "Cody Hoffman",
// "picture" : "https://images.pexels.com/photos/220453/pexels-photo-220453.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 82.5
// },
// "popularity" : 29,
// "isTrending" : false,
// "date" : "2019-06-13T08:15:00.011Z",
// "title" : "Bulbul",
// "description" : "Launched in December 2018, Bulbul is India’s first vernacular live-streaming e-commerce platform. Founded by Sachin Bhatia, Atit Jain, and Sichen (Sianna) Liu, Bulbul is redefining how products are sold in India by providing a live, engaging, and interactive experience for buyers and sellers in most regional languages. Bulbul, which hit 500 transactions per day just 40 days after launch, will pass the 1,000 per day mark by end April and is growing 2x month-on-month.",
// "numComments" : 10,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Blog_pics/544px-Google_2015_logo.svg.png?width=710&name=544px-Google_2015_logo.svg.png",
// "codeSubmissionTotal" : 4,
// "pledgeTotal" : 44,
// "pledgeGoal" : 2000,
// "pledgerCount" : 10,
// "status" : 1
// },
// {
// "author" : {
// "name" : "Zachary Wood",
// "picture" : "https://images.pexels.com/photos/1239291/pexels-photo-1239291.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 22
// },
// "popularity" : 29,
// "isTrending" : true,
// "date" : "2019-10-13T08:15:00.011Z",
// "title" : "DancingMind",
// "description" : "Founded by Jennifer Zhang in September 2018, Singapore-based DancingMind has pioneered a VR-based ‘one to many’ remote therapy solution for stroke, dementia, Parkinson’s, and cognitively impaired patients. The company is making therapy more engaging, affordable, and accessible in a sector where therapists are, globally, in short supply. The startup has signed up 15 healthcare facilities in Singapore and the UK in six months of launch.",
// "numComments" : 2,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Blog_pics/340px-Apple_logo_black.svg.png?width=362&name=340px-Apple_logo_black.svg.png",
// "codeSubmissionTotal" : 42,
// "pledgeTotal" : 0,
// "pledgeGoal" : 2000,
// "pledgerCount" : 0,
// "status" : 1
// },
// {
// "author" : {
// "name" : "Dianna Grant",
// "picture" : "https://images.pexels.com/photos/736716/pexels-photo-736716.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 66
// },
// "popularity" : 299,
// "isTrending" : false,
// "date" : "2019-08-13T08:15:00.011Z",
// "title" : "Doubtnut",
// "description" : "India-based Doubtnut, which was founded by Tanushree Nagori and Aditya Shankar, makes learning easy: just snap a photo of a problem to find a solution and watch videos that help students learn the topic better. Launched in October 2017, Doubtnut is a comprehensive learning app that features a simple user interface in multiple local Indian languages, and uses complex AI and ML technologies to serve up answers and video lessons in response to photos of specific problems.",
// "numComments" : 0,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Brand%20Logos/Microsoft.png?width=790&name=Microsoft.png",
// "codeSubmissionTotal" : 24,
// "pledgeTotal" : 0,
// "pledgeGoal" : 500,
// "pledgerCount" : 0,
// "status" : 0
// },
// {
// "author" : {
// "name" : "Kerry Jenkins",
// "picture" : "https://images.pexels.com/photos/220453/pexels-photo-220453.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 51
// },
// "popularity" : 28,
// "isTrending" : true,
// "date" : "2019-03-13T08:15:00.011Z",
// "title" : "Flynote",
// "description" : "India-based Flynote, which was launched in April 2018 by Founders Abhinav Prakash, Devvarat Meena, Ankit Abhishek, Parth Pratik, and Shukant Agrawal, is building a global community of travel enthusiasts, known as Flynote Fellows, who travel and experience the world on the company’s sponsorship",
// "numComments" : 2,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Brand%20Logos/Pepsi.png?width=276&name=Pepsi.png",
// "codeSubmissionTotal" : 23,
// "pledgeTotal" : 100,
// "pledgeGoal" : 100,
// "pledgerCount" : 1,
// "status" : 0
// },
// {
// "author" : {
// "name" : "Roland Page",
// "picture" : "https://images.pexels.com/photos/1222271/pexels-photo-1222271.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 82
// },
// "popularity" : 88,
// "isTrending" : true,
// "date" : "2019-10-13T08:15:00.011Z",
// "title" : "Hippo Video",
// "description" : "Hippo Video is a next-gen video marketing SaaS platform that helps marketers create, edit, and personalise video content on a mass scale, as well as analyse responses and convert more viewers into customers. The company, which was founded by Karthi Mariappan, Nilam Chand Jain, and Srinivasan Krishnan in July 2018, has over 200 customers worldwide, including Freshworks, Essilor, and Goulet Pens.",
// "numComments" : 6,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Brand%20Logos/Ebay.png?width=796&name=Ebay.png",
// "codeSubmissionTotal" : 4,
// "pledgeTotal" : 200,
// "pledgeGoal" : 5000,
// "pledgerCount" : 3,
// "status" : 1
// },
// {
// "author" : {
// "name" : "Travis Erickson",
// "picture" : "https://images.pexels.com/photos/1222271/pexels-photo-1222271.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 16
// },
// "popularity" : 138,
// "isTrending" : true,
// "date" : "2019-12-13T08:15:00.011Z",
// "title" : "InterviewBit Academy",
// "description" : "InterviewBit Academy offers computer science education via live online classes to prepare students for jobs in leading technology companies. Students pay only when they land a job beyond a target salary threshold. Their first cohort, which went live on April 15, will train 300 candidates selected from 9,000 applications.",
// "numComments" : 1,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Blog_pics/590px-FedEx_Corporation_-_2016_Logo.svg.png?width=816&name=590px-FedEx_Corporation_-_2016_Logo.svg.png",
// "codeSubmissionTotal" : 2,
// "pledgeTotal" : 0,
// "pledgeGoal" : 200,
// "pledgerCount" : 0,
// "status" : 0
// },
// {
// "author" : {
// "name" : "Mattie Houston",
// "picture" : "https://images.pexels.com/photos/1024311/pexels-photo-1024311.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 77
// },
// "popularity" : 288,
// "isTrending" : true,
// "date" : "2019-05-13T08:15:00.011Z",
// "title" : "Khatabook",
// "description" : "Launched in October last year, Khatabook is India’s leading multi-language mobile application for small and medium businesses to record cash transactions and track payments. The company, which was founded by Ravish Naresh, Jaideep Poonia, Dhanesh Kumar, and Ashish Sonone, recently crossed over 120,000 weekly active merchants, are growing organically at 40 percent MoM.",
// "numComments" : 1,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Brand%20Logos/3M.png?width=446&name=3M.png",
// "codeSubmissionTotal" : 0,
// "pledgeTotal" : 50,
// "pledgeGoal" : 500,
// "pledgerCount" : 1,
// "status" : 0
// },
// {
// "author" : {
// "name" : "Leah Rogers",
// "picture" : "https://images.pexels.com/photos/1438072/pexels-photo-1438072.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 14
// },
// "popularity" : 25,
// "isTrending" : true,
// "date" : "2019-03-13T08:15:00.011Z",
// "title" : "Qoala",
// "description" : "Indonesia-based Qoala, which was founded by Harshet Lunani and Tommy Martin in September 2018, is an Insurtech company that works with insurers to offer innovative insurance products for new use cases like flight delays, phone screen damages, e-commerce logistics etc, using a combination of underwriting risk for high frequency events, digital fraud detection, and a simplified, photo-based claims process.",
// "numComments" : 5,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Blog_pics/684px-Olympic_rings_without_rims.svg.png?width=662&name=684px-Olympic_rings_without_rims.svg.png",
// "codeSubmissionTotal" : 0,
// "pledgeTotal" : 400,
// "pledgeGoal" : 8000,
// "pledgerCount" : 400,
// "status" : 0
// },
// {
// "author" : {
// "name" : "Peter Reid",
// "picture" : "https://images.pexels.com/photos/91227/pexels-photo-91227.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 84
// },
// "popularity" : 96,
// "isTrending" : false,
// "date" : "2019-01-13T08:15:00.011Z",
// "title" : "ShopUp",
// "description" : "Bangladesh-based ShopUp is a social commerce platform that helps micro-entrepreneurs in Bangladesh set up and run their businesses on Facebook, which accounts for more than 80 percent of the country’s online commerce sector. Founded by Afeef Zubaer Zaman, Siffat Sarwar, and Ataur Rahim Chowdhury, the ShopUp platform provides tools for sourcing, cataloguing, payments, shipping, and access to business loans.",
// "numComments" : 2,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Brand%20Logos/WaltDisney.png?width=618&name=WaltDisney.png",
// "codeSubmissionTotal" : 1,
// "pledgeTotal" : 300,
// "pledgeGoal" : 400,
// "pledgerCount" : 10,
// "status" : 1
// },
// {
// "author" : {
// "name" : "Edward George",
// "picture" : "https://images.pexels.com/photos/1082962/pexels-photo-1082962.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 55
// },
// "popularity" : 12,
// "isTrending" : true,
// "date" : "2019-06-13T08:15:00.011Z",
// "title" : "Skillmatics",
// "description" : "Skillmatics, which was founded by Dhvanil Sheth, is a D2C brand that develops innovative educational products and games that enable children to build core skills ranging from math and language to science and logic through systematic play. Skillmatics, which aims to disrupt a $100 billion global industry, has hit $1 million in revenue run rate. They sell across 15+ countries, with a strong focus on North America and is the first Indian brand ever, to sell across Hamleys globally.",
// "numComments" : 1,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Brand%20Logos/500px-United_Parcel_Service_logo_2014.svg.png?width=350&name=500px-United_Parcel_Service_logo_2014.svg.png",
// "codeSubmissionTotal" : 9,
// "pledgeTotal" : 400,
// "pledgeGoal" : 8000,
// "pledgerCount" : 400,
// "status" : 1
// },
// {
// "author" : {
// "name" : "Lydia Griffin",
// "picture" : "https://images.pexels.com/photos/937481/pexels-photo-937481.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 99
// },
// "popularity" : 285,
// "isTrending" : true,
// "date" : "2019-09-13T08:15:00.011Z",
// "title" : "Telio",
// "description" : "Founded by SyPhong Bui in November 2018, Telio is building Vietnam’s largest B2B commerce company using the power of tech and data. In Vietnam’s highly-fragmented $20 billion FMCG market, Telio enables small retailers to procure goods from its web and mobile platform, replacing the need to deal with multiple wholesalers and providing more choice, better pricing and more efficient logistics.",
// "numComments" : 20,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Brand%20Logos/Gap.png?width=398&name=Gap.png",
// "codeSubmissionTotal" : 2,
// "pledgeTotal" : 0,
// "pledgeGoal" : 100,
// "pledgerCount" : 0,
// "status" : 1
// },
// {
// "author" : {
// "name" : "Willard Wise",
// "picture" : "https://images.pexels.com/photos/1065084/pexels-photo-1065084.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 25
// },
// "popularity" : 255,
// "isTrending" : true,
// "date" : "2019-02-13T08:15:00.011Z",
// "title" : "Uiza",
// "description" : "Setting up a video streaming infrastructure is complex and expensive, while leveraging YouTube makes it harder to monetize, increase user engagement, and capture user data. Launched in July 2018 by Founders Kevin Nguyen and Gareth Nguyen, Uiza – which is based in both Vietnam and Singapore – is an API platform for video streaming that enables developers and companies to stream videos directly from their own app in a fast and cost-effective manner through a set of simple APIs.",
// "numComments" : 42,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Blog_pics/550px-Honda.svg.png?width=546&name=550px-Honda.svg.png",
// "codeSubmissionTotal" : 245,
// "pledgeTotal" : 40,
// "pledgeGoal" : 40,
// "pledgerCount" : 40,
// "status" : 0
// },
// {
// "author" : {
// "name" : "Geneva Thompson",
// "picture" : "https://images.pexels.com/photos/712521/pexels-photo-712521.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 83
// },
// "popularity" : 25,
// "isTrending" : false,
// "date" : "2019-03-13T08:15:00.011Z",
// "title" : "Vybes",
// "description" : "Launched in January 2019 by Wei Qing Jen and Samuel Lipoff, Singapore-based Vybes is on a mission to enable influencer-driven commerce in today’s social media powered world. Using a proprietary AI engine, Vybes matches influencers with brands, and provides them with a wide array of products and online tools to enable sales to their large follower base. Vybes has over 400,000 influencers in its database and has been onboarding brands at a monthly growth rate of 140 percent.",
// "numComments" : 23,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Brand%20Logos/BPLogo.jpg?width=534&name=BPLogo.jpg",
// "codeSubmissionTotal" : 2,
// "pledgeTotal" : 450,
// "pledgeGoal" : 750,
// "pledgerCount" : 4,
// "status" : 1
// },
// {
// "author" : {
// "name" : "Arthur Gutierrez",
// "picture" : "https://images.pexels.com/photos/941693/pexels-photo-941693.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 25
// },
// "popularity" : 86,
// "isTrending" : true,
// "date" : "2019-10-13T08:15:00.011Z",
// "title" : "Zenyum",
// "description" : "Singapore-based Zenyum is a fast-growing dental care brand providing high quality 3D-printed invisible braces to consumers. The dental care market in ASEAN and India is growing rapidly, especially amongst Asia’s burgeoning young, middle class. Zenyum has launched in four countries.",
// "numComments" : 6,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Brand%20Logos/Colgate.png?width=882&name=Colgate.png",
// "codeSubmissionTotal" : 11,
// "pledgeTotal" : 400,
// "pledgeGoal" : 500,
// "pledgerCount" : 10,
// "status" : 0
// }
// ]
// return mySocialData
} | identifier_body | |
getSocialInformation.js | import { getClient } from '../apiUtilities/commonRequestUtility';
import axios from 'axios';
export function | () {
return axios({
method: "POST",
url: "http://localhost:4000/graphql",
data: {
query: `
{
authorDetails {
popularity
isTrending
title
description
numComments
thumbnail
codeSubmissionTotal
pledgeGoal
pledgerCount
pledgeTotal
status
authors{
name
picture
score
}
}
}
`
}
})
// const mySocialData=[
// {
// "author" : {
// "name" : "Mina Lambert",
// "picture" : "https://images.pexels.com/photos/220453/pexels-photo-220453.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 56.3
// },
// "popularity" : 34.13,
// "isTrending" : true,
// "date" : "2019-06-13T08:15:00.011Z",
// "title" : "Private Encrypted Direct Messaging",
// "description" : "React EOS aims to give developers and designers the tools necessary for creating clean, organized, and intentional code from the beginning of the development process. Defining your components, their hierarchy, and how they interact via state and props, can alleviate the need for refactoring, and",
// "numComments" : 10,
// "thumbnail" : "https://inkbotdesign.com/wp-content/uploads/2012/09/Walmart-Logo-Design.webp",
// "codeSubmissionTotal" : 4,
// "pledgeTotal" : 243,
// "pledgeGoal" : 800,
// "pledgerCount" : 24,
// "status" : 0
// },
// {
// "author" : {
// "name" : "Iris Barrett",
// "picture" : "https://images.pexels.com/photos/220453/pexels-photo-220453.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 12
// },
// "popularity" : 22,
// "isTrending" : true,
// "date" : "2019-02-13T08:15:00.011Z",
// "title" : "Azani Sport",
// "description" : "India-based Azani, which was founded by Indian squash champion Siddharth Suchde, is a DTC performance sportswear brand that offers high-quality footwear, apparel, and accessories that are both aspirational and accessible to mainstream consumers in emerging markets Read more at: https://yourstory.com/2019/04/sequoia-surge-early-stage-startups-cohort",
// "numComments" : 5,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Brand%20Logos/Old_Nike_logo.jpg?width=444&name=Old_Nike_logo.jpg",
// "codeSubmissionTotal" : 2,
// "pledgeTotal" : 0,
// "pledgeGoal" : 200,
// "pledgerCount" : 0,
// "status" : 0
// },
// {
// "author" : {
// "name" : "Phyllis Love",
// "picture" : "https://images.pexels.com/photos/774909/pexels-photo-774909.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 43.0
// },
// "popularity" : 14.13,
// "isTrending" : false,
// "date" : "2019-12-10T08:15:00.011Z",
// "title" : "Bobobox",
// "description" : "Launched in June 2018 by Indra Gunawan and Antonius Bong, Bobobox is a tech-first Indonesian capsule hotel brand built for millennial adventurers and smart business travellers. The first hotel the company opened, boasts an 88 percent occupancy rate while 97 percent of guests surveyed said they would return to stay there. Read more at: https://yourstory.com/2019/04/sequoia-surge-early-stage-startups-cohort",
// "numComments" : 5,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Brand%20Logos/1000px-Coca-Cola_logo.svg.png?width=634&name=1000px-Coca-Cola_logo.svg.png",
// "codeSubmissionTotal" : 20,
// "pledgeTotal" : 25,
// "pledgeGoal" : 100,
// "pledgerCount" : 2,
// "status" : 0
// },
// {
// "author" : {
// "name" : "Cody Hoffman",
// "picture" : "https://images.pexels.com/photos/220453/pexels-photo-220453.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 82.5
// },
// "popularity" : 29,
// "isTrending" : false,
// "date" : "2019-06-13T08:15:00.011Z",
// "title" : "Bulbul",
// "description" : "Launched in December 2018, Bulbul is India’s first vernacular live-streaming e-commerce platform. Founded by Sachin Bhatia, Atit Jain, and Sichen (Sianna) Liu, Bulbul is redefining how products are sold in India by providing a live, engaging, and interactive experience for buyers and sellers in most regional languages. Bulbul, which hit 500 transactions per day just 40 days after launch, will pass the 1,000 per day mark by end April and is growing 2x month-on-month.",
// "numComments" : 10,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Blog_pics/544px-Google_2015_logo.svg.png?width=710&name=544px-Google_2015_logo.svg.png",
// "codeSubmissionTotal" : 4,
// "pledgeTotal" : 44,
// "pledgeGoal" : 2000,
// "pledgerCount" : 10,
// "status" : 1
// },
// {
// "author" : {
// "name" : "Zachary Wood",
// "picture" : "https://images.pexels.com/photos/1239291/pexels-photo-1239291.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 22
// },
// "popularity" : 29,
// "isTrending" : true,
// "date" : "2019-10-13T08:15:00.011Z",
// "title" : "DancingMind",
// "description" : "Founded by Jennifer Zhang in September 2018, Singapore-based DancingMind has pioneered a VR-based ‘one to many’ remote therapy solution for stroke, dementia, Parkinson’s, and cognitively impaired patients. The company is making therapy more engaging, affordable, and accessible in a sector where therapists are, globally, in short supply. The startup has signed up 15 healthcare facilities in Singapore and the UK in six months of launch.",
// "numComments" : 2,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Blog_pics/340px-Apple_logo_black.svg.png?width=362&name=340px-Apple_logo_black.svg.png",
// "codeSubmissionTotal" : 42,
// "pledgeTotal" : 0,
// "pledgeGoal" : 2000,
// "pledgerCount" : 0,
// "status" : 1
// },
// {
// "author" : {
// "name" : "Dianna Grant",
// "picture" : "https://images.pexels.com/photos/736716/pexels-photo-736716.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 66
// },
// "popularity" : 299,
// "isTrending" : false,
// "date" : "2019-08-13T08:15:00.011Z",
// "title" : "Doubtnut",
// "description" : "India-based Doubtnut, which was founded by Tanushree Nagori and Aditya Shankar, makes learning easy: just snap a photo of a problem to find a solution and watch videos that help students learn the topic better. Launched in October 2017, Doubtnut is a comprehensive learning app that features a simple user interface in multiple local Indian languages, and uses complex AI and ML technologies to serve up answers and video lessons in response to photos of specific problems.",
// "numComments" : 0,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Brand%20Logos/Microsoft.png?width=790&name=Microsoft.png",
// "codeSubmissionTotal" : 24,
// "pledgeTotal" : 0,
// "pledgeGoal" : 500,
// "pledgerCount" : 0,
// "status" : 0
// },
// {
// "author" : {
// "name" : "Kerry Jenkins",
// "picture" : "https://images.pexels.com/photos/220453/pexels-photo-220453.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 51
// },
// "popularity" : 28,
// "isTrending" : true,
// "date" : "2019-03-13T08:15:00.011Z",
// "title" : "Flynote",
// "description" : "India-based Flynote, which was launched in April 2018 by Founders Abhinav Prakash, Devvarat Meena, Ankit Abhishek, Parth Pratik, and Shukant Agrawal, is building a global community of travel enthusiasts, known as Flynote Fellows, who travel and experience the world on the company’s sponsorship",
// "numComments" : 2,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Brand%20Logos/Pepsi.png?width=276&name=Pepsi.png",
// "codeSubmissionTotal" : 23,
// "pledgeTotal" : 100,
// "pledgeGoal" : 100,
// "pledgerCount" : 1,
// "status" : 0
// },
// {
// "author" : {
// "name" : "Roland Page",
// "picture" : "https://images.pexels.com/photos/1222271/pexels-photo-1222271.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 82
// },
// "popularity" : 88,
// "isTrending" : true,
// "date" : "2019-10-13T08:15:00.011Z",
// "title" : "Hippo Video",
// "description" : "Hippo Video is a next-gen video marketing SaaS platform that helps marketers create, edit, and personalise video content on a mass scale, as well as analyse responses and convert more viewers into customers. The company, which was founded by Karthi Mariappan, Nilam Chand Jain, and Srinivasan Krishnan in July 2018, has over 200 customers worldwide, including Freshworks, Essilor, and Goulet Pens.",
// "numComments" : 6,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Brand%20Logos/Ebay.png?width=796&name=Ebay.png",
// "codeSubmissionTotal" : 4,
// "pledgeTotal" : 200,
// "pledgeGoal" : 5000,
// "pledgerCount" : 3,
// "status" : 1
// },
// {
// "author" : {
// "name" : "Travis Erickson",
// "picture" : "https://images.pexels.com/photos/1222271/pexels-photo-1222271.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 16
// },
// "popularity" : 138,
// "isTrending" : true,
// "date" : "2019-12-13T08:15:00.011Z",
// "title" : "InterviewBit Academy",
// "description" : "InterviewBit Academy offers computer science education via live online classes to prepare students for jobs in leading technology companies. Students pay only when they land a job beyond a target salary threshold. Their first cohort, which went live on April 15, will train 300 candidates selected from 9,000 applications.",
// "numComments" : 1,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Blog_pics/590px-FedEx_Corporation_-_2016_Logo.svg.png?width=816&name=590px-FedEx_Corporation_-_2016_Logo.svg.png",
// "codeSubmissionTotal" : 2,
// "pledgeTotal" : 0,
// "pledgeGoal" : 200,
// "pledgerCount" : 0,
// "status" : 0
// },
// {
// "author" : {
// "name" : "Mattie Houston",
// "picture" : "https://images.pexels.com/photos/1024311/pexels-photo-1024311.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 77
// },
// "popularity" : 288,
// "isTrending" : true,
// "date" : "2019-05-13T08:15:00.011Z",
// "title" : "Khatabook",
// "description" : "Launched in October last year, Khatabook is India’s leading multi-language mobile application for small and medium businesses to record cash transactions and track payments. The company, which was founded by Ravish Naresh, Jaideep Poonia, Dhanesh Kumar, and Ashish Sonone, recently crossed over 120,000 weekly active merchants, are growing organically at 40 percent MoM.",
// "numComments" : 1,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Brand%20Logos/3M.png?width=446&name=3M.png",
// "codeSubmissionTotal" : 0,
// "pledgeTotal" : 50,
// "pledgeGoal" : 500,
// "pledgerCount" : 1,
// "status" : 0
// },
// {
// "author" : {
// "name" : "Leah Rogers",
// "picture" : "https://images.pexels.com/photos/1438072/pexels-photo-1438072.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 14
// },
// "popularity" : 25,
// "isTrending" : true,
// "date" : "2019-03-13T08:15:00.011Z",
// "title" : "Qoala",
// "description" : "Indonesia-based Qoala, which was founded by Harshet Lunani and Tommy Martin in September 2018, is an Insurtech company that works with insurers to offer innovative insurance products for new use cases like flight delays, phone screen damages, e-commerce logistics etc, using a combination of underwriting risk for high frequency events, digital fraud detection, and a simplified, photo-based claims process.",
// "numComments" : 5,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Blog_pics/684px-Olympic_rings_without_rims.svg.png?width=662&name=684px-Olympic_rings_without_rims.svg.png",
// "codeSubmissionTotal" : 0,
// "pledgeTotal" : 400,
// "pledgeGoal" : 8000,
// "pledgerCount" : 400,
// "status" : 0
// },
// {
// "author" : {
// "name" : "Peter Reid",
// "picture" : "https://images.pexels.com/photos/91227/pexels-photo-91227.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 84
// },
// "popularity" : 96,
// "isTrending" : false,
// "date" : "2019-01-13T08:15:00.011Z",
// "title" : "ShopUp",
// "description" : "Bangladesh-based ShopUp is a social commerce platform that helps micro-entrepreneurs in Bangladesh set up and run their businesses on Facebook, which accounts for more than 80 percent of the country’s online commerce sector. Founded by Afeef Zubaer Zaman, Siffat Sarwar, and Ataur Rahim Chowdhury, the ShopUp platform provides tools for sourcing, cataloguing, payments, shipping, and access to business loans.",
// "numComments" : 2,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Brand%20Logos/WaltDisney.png?width=618&name=WaltDisney.png",
// "codeSubmissionTotal" : 1,
// "pledgeTotal" : 300,
// "pledgeGoal" : 400,
// "pledgerCount" : 10,
// "status" : 1
// },
// {
// "author" : {
// "name" : "Edward George",
// "picture" : "https://images.pexels.com/photos/1082962/pexels-photo-1082962.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 55
// },
// "popularity" : 12,
// "isTrending" : true,
// "date" : "2019-06-13T08:15:00.011Z",
// "title" : "Skillmatics",
// "description" : "Skillmatics, which was founded by Dhvanil Sheth, is a D2C brand that develops innovative educational products and games that enable children to build core skills ranging from math and language to science and logic through systematic play. Skillmatics, which aims to disrupt a $100 billion global industry, has hit $1 million in revenue run rate. They sell across 15+ countries, with a strong focus on North America and is the first Indian brand ever, to sell across Hamleys globally.",
// "numComments" : 1,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Brand%20Logos/500px-United_Parcel_Service_logo_2014.svg.png?width=350&name=500px-United_Parcel_Service_logo_2014.svg.png",
// "codeSubmissionTotal" : 9,
// "pledgeTotal" : 400,
// "pledgeGoal" : 8000,
// "pledgerCount" : 400,
// "status" : 1
// },
// {
// "author" : {
// "name" : "Lydia Griffin",
// "picture" : "https://images.pexels.com/photos/937481/pexels-photo-937481.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 99
// },
// "popularity" : 285,
// "isTrending" : true,
// "date" : "2019-09-13T08:15:00.011Z",
// "title" : "Telio",
// "description" : "Founded by SyPhong Bui in November 2018, Telio is building Vietnam’s largest B2B commerce company using the power of tech and data. In Vietnam’s highly-fragmented $20 billion FMCG market, Telio enables small retailers to procure goods from its web and mobile platform, replacing the need to deal with multiple wholesalers and providing more choice, better pricing and more efficient logistics.",
// "numComments" : 20,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Brand%20Logos/Gap.png?width=398&name=Gap.png",
// "codeSubmissionTotal" : 2,
// "pledgeTotal" : 0,
// "pledgeGoal" : 100,
// "pledgerCount" : 0,
// "status" : 1
// },
// {
// "author" : {
// "name" : "Willard Wise",
// "picture" : "https://images.pexels.com/photos/1065084/pexels-photo-1065084.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 25
// },
// "popularity" : 255,
// "isTrending" : true,
// "date" : "2019-02-13T08:15:00.011Z",
// "title" : "Uiza",
// "description" : "Setting up a video streaming infrastructure is complex and expensive, while leveraging YouTube makes it harder to monetize, increase user engagement, and capture user data. Launched in July 2018 by Founders Kevin Nguyen and Gareth Nguyen, Uiza – which is based in both Vietnam and Singapore – is an API platform for video streaming that enables developers and companies to stream videos directly from their own app in a fast and cost-effective manner through a set of simple APIs.",
// "numComments" : 42,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Blog_pics/550px-Honda.svg.png?width=546&name=550px-Honda.svg.png",
// "codeSubmissionTotal" : 245,
// "pledgeTotal" : 40,
// "pledgeGoal" : 40,
// "pledgerCount" : 40,
// "status" : 0
// },
// {
// "author" : {
// "name" : "Geneva Thompson",
// "picture" : "https://images.pexels.com/photos/712521/pexels-photo-712521.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 83
// },
// "popularity" : 25,
// "isTrending" : false,
// "date" : "2019-03-13T08:15:00.011Z",
// "title" : "Vybes",
// "description" : "Launched in January 2019 by Wei Qing Jen and Samuel Lipoff, Singapore-based Vybes is on a mission to enable influencer-driven commerce in today’s social media powered world. Using a proprietary AI engine, Vybes matches influencers with brands, and provides them with a wide array of products and online tools to enable sales to their large follower base. Vybes has over 400,000 influencers in its database and has been onboarding brands at a monthly growth rate of 140 percent.",
// "numComments" : 23,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Brand%20Logos/BPLogo.jpg?width=534&name=BPLogo.jpg",
// "codeSubmissionTotal" : 2,
// "pledgeTotal" : 450,
// "pledgeGoal" : 750,
// "pledgerCount" : 4,
// "status" : 1
// },
// {
// "author" : {
// "name" : "Arthur Gutierrez",
// "picture" : "https://images.pexels.com/photos/941693/pexels-photo-941693.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 25
// },
// "popularity" : 86,
// "isTrending" : true,
// "date" : "2019-10-13T08:15:00.011Z",
// "title" : "Zenyum",
// "description" : "Singapore-based Zenyum is a fast-growing dental care brand providing high quality 3D-printed invisible braces to consumers. The dental care market in ASEAN and India is growing rapidly, especially amongst Asia’s burgeoning young, middle class. Zenyum has launched in four countries.",
// "numComments" : 6,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Brand%20Logos/Colgate.png?width=882&name=Colgate.png",
// "codeSubmissionTotal" : 11,
// "pledgeTotal" : 400,
// "pledgeGoal" : 500,
// "pledgerCount" : 10,
// "status" : 0
// }
// ]
// return mySocialData
} | getSocialInformation | identifier_name |
getSocialInformation.js | import { getClient } from '../apiUtilities/commonRequestUtility';
import axios from 'axios';
export function getSocialInformation() {
return axios({
method: "POST",
url: "http://localhost:4000/graphql",
data: {
query: `
{
authorDetails {
popularity
isTrending
title
description
numComments
thumbnail
codeSubmissionTotal
pledgeGoal
pledgerCount
pledgeTotal
status
authors{
name
picture
score
}
}
}
`
}
})
// const mySocialData=[
// {
// "author" : {
// "name" : "Mina Lambert",
// "picture" : "https://images.pexels.com/photos/220453/pexels-photo-220453.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 56.3
// },
// "popularity" : 34.13,
// "isTrending" : true,
// "date" : "2019-06-13T08:15:00.011Z",
// "title" : "Private Encrypted Direct Messaging",
// "description" : "React EOS aims to give developers and designers the tools necessary for creating clean, organized, and intentional code from the beginning of the development process. Defining your components, their hierarchy, and how they interact via state and props, can alleviate the need for refactoring, and",
// "numComments" : 10,
// "thumbnail" : "https://inkbotdesign.com/wp-content/uploads/2012/09/Walmart-Logo-Design.webp",
// "codeSubmissionTotal" : 4,
// "pledgeTotal" : 243,
// "pledgeGoal" : 800,
// "pledgerCount" : 24,
// "status" : 0
// },
// {
// "author" : {
// "name" : "Iris Barrett",
// "picture" : "https://images.pexels.com/photos/220453/pexels-photo-220453.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 12
// },
// "popularity" : 22,
// "isTrending" : true,
// "date" : "2019-02-13T08:15:00.011Z",
// "title" : "Azani Sport",
// "description" : "India-based Azani, which was founded by Indian squash champion Siddharth Suchde, is a DTC performance sportswear brand that offers high-quality footwear, apparel, and accessories that are both aspirational and accessible to mainstream consumers in emerging markets Read more at: https://yourstory.com/2019/04/sequoia-surge-early-stage-startups-cohort",
// "numComments" : 5,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Brand%20Logos/Old_Nike_logo.jpg?width=444&name=Old_Nike_logo.jpg",
// "codeSubmissionTotal" : 2,
// "pledgeTotal" : 0, | // "pledgerCount" : 0,
// "status" : 0
// },
// {
// "author" : {
// "name" : "Phyllis Love",
// "picture" : "https://images.pexels.com/photos/774909/pexels-photo-774909.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 43.0
// },
// "popularity" : 14.13,
// "isTrending" : false,
// "date" : "2019-12-10T08:15:00.011Z",
// "title" : "Bobobox",
// "description" : "Launched in June 2018 by Indra Gunawan and Antonius Bong, Bobobox is a tech-first Indonesian capsule hotel brand built for millennial adventurers and smart business travellers. The first hotel the company opened, boasts an 88 percent occupancy rate while 97 percent of guests surveyed said they would return to stay there. Read more at: https://yourstory.com/2019/04/sequoia-surge-early-stage-startups-cohort",
// "numComments" : 5,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Brand%20Logos/1000px-Coca-Cola_logo.svg.png?width=634&name=1000px-Coca-Cola_logo.svg.png",
// "codeSubmissionTotal" : 20,
// "pledgeTotal" : 25,
// "pledgeGoal" : 100,
// "pledgerCount" : 2,
// "status" : 0
// },
// {
// "author" : {
// "name" : "Cody Hoffman",
// "picture" : "https://images.pexels.com/photos/220453/pexels-photo-220453.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 82.5
// },
// "popularity" : 29,
// "isTrending" : false,
// "date" : "2019-06-13T08:15:00.011Z",
// "title" : "Bulbul",
// "description" : "Launched in December 2018, Bulbul is India’s first vernacular live-streaming e-commerce platform. Founded by Sachin Bhatia, Atit Jain, and Sichen (Sianna) Liu, Bulbul is redefining how products are sold in India by providing a live, engaging, and interactive experience for buyers and sellers in most regional languages. Bulbul, which hit 500 transactions per day just 40 days after launch, will pass the 1,000 per day mark by end April and is growing 2x month-on-month.",
// "numComments" : 10,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Blog_pics/544px-Google_2015_logo.svg.png?width=710&name=544px-Google_2015_logo.svg.png",
// "codeSubmissionTotal" : 4,
// "pledgeTotal" : 44,
// "pledgeGoal" : 2000,
// "pledgerCount" : 10,
// "status" : 1
// },
// {
// "author" : {
// "name" : "Zachary Wood",
// "picture" : "https://images.pexels.com/photos/1239291/pexels-photo-1239291.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 22
// },
// "popularity" : 29,
// "isTrending" : true,
// "date" : "2019-10-13T08:15:00.011Z",
// "title" : "DancingMind",
// "description" : "Founded by Jennifer Zhang in September 2018, Singapore-based DancingMind has pioneered a VR-based ‘one to many’ remote therapy solution for stroke, dementia, Parkinson’s, and cognitively impaired patients. The company is making therapy more engaging, affordable, and accessible in a sector where therapists are, globally, in short supply. The startup has signed up 15 healthcare facilities in Singapore and the UK in six months of launch.",
// "numComments" : 2,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Blog_pics/340px-Apple_logo_black.svg.png?width=362&name=340px-Apple_logo_black.svg.png",
// "codeSubmissionTotal" : 42,
// "pledgeTotal" : 0,
// "pledgeGoal" : 2000,
// "pledgerCount" : 0,
// "status" : 1
// },
// {
// "author" : {
// "name" : "Dianna Grant",
// "picture" : "https://images.pexels.com/photos/736716/pexels-photo-736716.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 66
// },
// "popularity" : 299,
// "isTrending" : false,
// "date" : "2019-08-13T08:15:00.011Z",
// "title" : "Doubtnut",
// "description" : "India-based Doubtnut, which was founded by Tanushree Nagori and Aditya Shankar, makes learning easy: just snap a photo of a problem to find a solution and watch videos that help students learn the topic better. Launched in October 2017, Doubtnut is a comprehensive learning app that features a simple user interface in multiple local Indian languages, and uses complex AI and ML technologies to serve up answers and video lessons in response to photos of specific problems.",
// "numComments" : 0,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Brand%20Logos/Microsoft.png?width=790&name=Microsoft.png",
// "codeSubmissionTotal" : 24,
// "pledgeTotal" : 0,
// "pledgeGoal" : 500,
// "pledgerCount" : 0,
// "status" : 0
// },
// {
// "author" : {
// "name" : "Kerry Jenkins",
// "picture" : "https://images.pexels.com/photos/220453/pexels-photo-220453.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 51
// },
// "popularity" : 28,
// "isTrending" : true,
// "date" : "2019-03-13T08:15:00.011Z",
// "title" : "Flynote",
// "description" : "India-based Flynote, which was launched in April 2018 by Founders Abhinav Prakash, Devvarat Meena, Ankit Abhishek, Parth Pratik, and Shukant Agrawal, is building a global community of travel enthusiasts, known as Flynote Fellows, who travel and experience the world on the company’s sponsorship",
// "numComments" : 2,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Brand%20Logos/Pepsi.png?width=276&name=Pepsi.png",
// "codeSubmissionTotal" : 23,
// "pledgeTotal" : 100,
// "pledgeGoal" : 100,
// "pledgerCount" : 1,
// "status" : 0
// },
// {
// "author" : {
// "name" : "Roland Page",
// "picture" : "https://images.pexels.com/photos/1222271/pexels-photo-1222271.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 82
// },
// "popularity" : 88,
// "isTrending" : true,
// "date" : "2019-10-13T08:15:00.011Z",
// "title" : "Hippo Video",
// "description" : "Hippo Video is a next-gen video marketing SaaS platform that helps marketers create, edit, and personalise video content on a mass scale, as well as analyse responses and convert more viewers into customers. The company, which was founded by Karthi Mariappan, Nilam Chand Jain, and Srinivasan Krishnan in July 2018, has over 200 customers worldwide, including Freshworks, Essilor, and Goulet Pens.",
// "numComments" : 6,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Brand%20Logos/Ebay.png?width=796&name=Ebay.png",
// "codeSubmissionTotal" : 4,
// "pledgeTotal" : 200,
// "pledgeGoal" : 5000,
// "pledgerCount" : 3,
// "status" : 1
// },
// {
// "author" : {
// "name" : "Travis Erickson",
// "picture" : "https://images.pexels.com/photos/1222271/pexels-photo-1222271.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 16
// },
// "popularity" : 138,
// "isTrending" : true,
// "date" : "2019-12-13T08:15:00.011Z",
// "title" : "InterviewBit Academy",
// "description" : "InterviewBit Academy offers computer science education via live online classes to prepare students for jobs in leading technology companies. Students pay only when they land a job beyond a target salary threshold. Their first cohort, which went live on April 15, will train 300 candidates selected from 9,000 applications.",
// "numComments" : 1,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Blog_pics/590px-FedEx_Corporation_-_2016_Logo.svg.png?width=816&name=590px-FedEx_Corporation_-_2016_Logo.svg.png",
// "codeSubmissionTotal" : 2,
// "pledgeTotal" : 0,
// "pledgeGoal" : 200,
// "pledgerCount" : 0,
// "status" : 0
// },
// {
// "author" : {
// "name" : "Mattie Houston",
// "picture" : "https://images.pexels.com/photos/1024311/pexels-photo-1024311.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 77
// },
// "popularity" : 288,
// "isTrending" : true,
// "date" : "2019-05-13T08:15:00.011Z",
// "title" : "Khatabook",
// "description" : "Launched in October last year, Khatabook is India’s leading multi-language mobile application for small and medium businesses to record cash transactions and track payments. The company, which was founded by Ravish Naresh, Jaideep Poonia, Dhanesh Kumar, and Ashish Sonone, recently crossed over 120,000 weekly active merchants, are growing organically at 40 percent MoM.",
// "numComments" : 1,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Brand%20Logos/3M.png?width=446&name=3M.png",
// "codeSubmissionTotal" : 0,
// "pledgeTotal" : 50,
// "pledgeGoal" : 500,
// "pledgerCount" : 1,
// "status" : 0
// },
// {
// "author" : {
// "name" : "Leah Rogers",
// "picture" : "https://images.pexels.com/photos/1438072/pexels-photo-1438072.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 14
// },
// "popularity" : 25,
// "isTrending" : true,
// "date" : "2019-03-13T08:15:00.011Z",
// "title" : "Qoala",
// "description" : "Indonesia-based Qoala, which was founded by Harshet Lunani and Tommy Martin in September 2018, is an Insurtech company that works with insurers to offer innovative insurance products for new use cases like flight delays, phone screen damages, e-commerce logistics etc, using a combination of underwriting risk for high frequency events, digital fraud detection, and a simplified, photo-based claims process.",
// "numComments" : 5,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Blog_pics/684px-Olympic_rings_without_rims.svg.png?width=662&name=684px-Olympic_rings_without_rims.svg.png",
// "codeSubmissionTotal" : 0,
// "pledgeTotal" : 400,
// "pledgeGoal" : 8000,
// "pledgerCount" : 400,
// "status" : 0
// },
// {
// "author" : {
// "name" : "Peter Reid",
// "picture" : "https://images.pexels.com/photos/91227/pexels-photo-91227.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 84
// },
// "popularity" : 96,
// "isTrending" : false,
// "date" : "2019-01-13T08:15:00.011Z",
// "title" : "ShopUp",
// "description" : "Bangladesh-based ShopUp is a social commerce platform that helps micro-entrepreneurs in Bangladesh set up and run their businesses on Facebook, which accounts for more than 80 percent of the country’s online commerce sector. Founded by Afeef Zubaer Zaman, Siffat Sarwar, and Ataur Rahim Chowdhury, the ShopUp platform provides tools for sourcing, cataloguing, payments, shipping, and access to business loans.",
// "numComments" : 2,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Brand%20Logos/WaltDisney.png?width=618&name=WaltDisney.png",
// "codeSubmissionTotal" : 1,
// "pledgeTotal" : 300,
// "pledgeGoal" : 400,
// "pledgerCount" : 10,
// "status" : 1
// },
// {
// "author" : {
// "name" : "Edward George",
// "picture" : "https://images.pexels.com/photos/1082962/pexels-photo-1082962.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 55
// },
// "popularity" : 12,
// "isTrending" : true,
// "date" : "2019-06-13T08:15:00.011Z",
// "title" : "Skillmatics",
// "description" : "Skillmatics, which was founded by Dhvanil Sheth, is a D2C brand that develops innovative educational products and games that enable children to build core skills ranging from math and language to science and logic through systematic play. Skillmatics, which aims to disrupt a $100 billion global industry, has hit $1 million in revenue run rate. They sell across 15+ countries, with a strong focus on North America and is the first Indian brand ever, to sell across Hamleys globally.",
// "numComments" : 1,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Brand%20Logos/500px-United_Parcel_Service_logo_2014.svg.png?width=350&name=500px-United_Parcel_Service_logo_2014.svg.png",
// "codeSubmissionTotal" : 9,
// "pledgeTotal" : 400,
// "pledgeGoal" : 8000,
// "pledgerCount" : 400,
// "status" : 1
// },
// {
// "author" : {
// "name" : "Lydia Griffin",
// "picture" : "https://images.pexels.com/photos/937481/pexels-photo-937481.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 99
// },
// "popularity" : 285,
// "isTrending" : true,
// "date" : "2019-09-13T08:15:00.011Z",
// "title" : "Telio",
// "description" : "Founded by SyPhong Bui in November 2018, Telio is building Vietnam’s largest B2B commerce company using the power of tech and data. In Vietnam’s highly-fragmented $20 billion FMCG market, Telio enables small retailers to procure goods from its web and mobile platform, replacing the need to deal with multiple wholesalers and providing more choice, better pricing and more efficient logistics.",
// "numComments" : 20,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Brand%20Logos/Gap.png?width=398&name=Gap.png",
// "codeSubmissionTotal" : 2,
// "pledgeTotal" : 0,
// "pledgeGoal" : 100,
// "pledgerCount" : 0,
// "status" : 1
// },
// {
// "author" : {
// "name" : "Willard Wise",
// "picture" : "https://images.pexels.com/photos/1065084/pexels-photo-1065084.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 25
// },
// "popularity" : 255,
// "isTrending" : true,
// "date" : "2019-02-13T08:15:00.011Z",
// "title" : "Uiza",
// "description" : "Setting up a video streaming infrastructure is complex and expensive, while leveraging YouTube makes it harder to monetize, increase user engagement, and capture user data. Launched in July 2018 by Founders Kevin Nguyen and Gareth Nguyen, Uiza – which is based in both Vietnam and Singapore – is an API platform for video streaming that enables developers and companies to stream videos directly from their own app in a fast and cost-effective manner through a set of simple APIs.",
// "numComments" : 42,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Blog_pics/550px-Honda.svg.png?width=546&name=550px-Honda.svg.png",
// "codeSubmissionTotal" : 245,
// "pledgeTotal" : 40,
// "pledgeGoal" : 40,
// "pledgerCount" : 40,
// "status" : 0
// },
// {
// "author" : {
// "name" : "Geneva Thompson",
// "picture" : "https://images.pexels.com/photos/712521/pexels-photo-712521.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 83
// },
// "popularity" : 25,
// "isTrending" : false,
// "date" : "2019-03-13T08:15:00.011Z",
// "title" : "Vybes",
// "description" : "Launched in January 2019 by Wei Qing Jen and Samuel Lipoff, Singapore-based Vybes is on a mission to enable influencer-driven commerce in today’s social media powered world. Using a proprietary AI engine, Vybes matches influencers with brands, and provides them with a wide array of products and online tools to enable sales to their large follower base. Vybes has over 400,000 influencers in its database and has been onboarding brands at a monthly growth rate of 140 percent.",
// "numComments" : 23,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Brand%20Logos/BPLogo.jpg?width=534&name=BPLogo.jpg",
// "codeSubmissionTotal" : 2,
// "pledgeTotal" : 450,
// "pledgeGoal" : 750,
// "pledgerCount" : 4,
// "status" : 1
// },
// {
// "author" : {
// "name" : "Arthur Gutierrez",
// "picture" : "https://images.pexels.com/photos/941693/pexels-photo-941693.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500",
// "score" : 25
// },
// "popularity" : 86,
// "isTrending" : true,
// "date" : "2019-10-13T08:15:00.011Z",
// "title" : "Zenyum",
// "description" : "Singapore-based Zenyum is a fast-growing dental care brand providing high quality 3D-printed invisible braces to consumers. The dental care market in ASEAN and India is growing rapidly, especially amongst Asia’s burgeoning young, middle class. Zenyum has launched in four countries.",
// "numComments" : 6,
// "thumbnail" : "https://www.bluleadz.com/hs-fs/hubfs/Brand%20Logos/Colgate.png?width=882&name=Colgate.png",
// "codeSubmissionTotal" : 11,
// "pledgeTotal" : 400,
// "pledgeGoal" : 500,
// "pledgerCount" : 10,
// "status" : 0
// }
// ]
// return mySocialData
} | // "pledgeGoal" : 200, | random_line_split |
gen_plantuml.py | import os
import re
import requests
from .gen_base import ReportGenerator, CmdLineGenerator
# from pydbg import dbg
import logging
from common.logger import config_log
import functools
from common.url_to_data import url_to_data
from common.plantuml import deflate_and_encode
import asyncio
from async_lru import alru_cache
import aiohttp
from generate_code.plantuml_html_scan import extract_image_url
log = logging.getLogger(__name__)
config_log(log)
class PySourceAsPlantUml(ReportGenerator):
def __init__(self, ast=True):
ReportGenerator.__init__(self, ast=ast)
self.result = ""
def calc_plant_uml(self, optimise=False):
"""Optimise not used for anything, so use for sorting attributes and members"""
self.result = ""
classnames = list(self.pmodel.classlist.keys())
for self.aclass in classnames:
self.classentry = self.pmodel.classlist[self.aclass]
# Main class output
self.result += "\nclass %s {\n" % self.aclass
for attrobj in self.classentry.attrs:
self.result += " %s\n" % attrobj.attrname
for adef in self.classentry.defs:
self.result += " %s()\n" % adef
# end class
self.result += "}\n\n"
# class inheritance relationships
if self.classentry.classesinheritsfrom:
for parentclass in self.classentry.classesinheritsfrom:
self.result += "%s <|- %s\n" % (parentclass, self.aclass)
# aggregation relationships showing class dependency and composition relationships
for attrobj in self.classentry.attrs:
compositescreated = self._GetCompositeCreatedClassesFor(attrobj.attrname)
for c in compositescreated:
if "many" in attrobj.attrtype:
line = "*-->"
cardinality = "*"
else:
line = "..>"
cardinality = ""
if cardinality:
connector = '%s "%s"' % (line, cardinality)
else:
connector = line
self.result += "%s %s %s : %s\n" % (self.aclass, connector, c, attrobj.attrname)
def GenReportDump(self): # Override template method entirely and do it ourselves
"""
This method gets called by the __str__ of the parent ReportGenerator
We override here to do our own generation rather than the template method design pattern
approach of ReportGenerator class
"""
if not self.result: # Update: THIS MAY BE VALIDLY EMPTY if there are no classes in a file
# print "Warning, should call calc_plant_uml() after .Parse() and before str(p) - repairing..."
self.calc_plant_uml()
return self.result
class CmdLinePythonToPlantUml(CmdLineGenerator):
"""
The only thing we inherit from CmdLineGenerator is the constructor with gives us self.directories etc.
Everything else gets triggered by the ExportTo()
"""
def _GenerateAuxilliaryClasses(self):
pass
def ExportTo(self, outpath=None): # Override and redefine Template method entirely
"""
"""
globbed = self.directories
self.p = PySourceAsPlantUml()
self.p.optionModuleAsClass = self.optionModuleAsClass
self.p.verbose = self.verbose
for f in globbed:
self.p.Parse(f)
self.p.calc_plant_uml()
plant_uml_text = str(self.p)
print(plant_uml_text)
# Optionally generate image via plant uml server
outpng = outpath
if outpng != "nopng":
if not ".png" in outpng.lower():
print("output filename %s must have .png in the name" % outpng)
exit(1)
print("Generating plantuml diagram %s..." % outpng)
plant_uml_create_png(plant_uml_text, outpng)
# Windows specific solution
# os.system(outpng) # launch notepad or whatever on it
# Cross platform solution - conda install pillow
# This will open the image in your default image viewer.
from PIL import Image
img = Image.open(outpng)
img.show()
print("Done!")
@alru_cache(maxsize=32)
async def | (plant_uml_txt: str, plantuml_server_local_url: str) -> str:
"""Async version of getting image url from plantuml. This does not get the actual image,
that is a separate call - unlike in a browser where that second call is done automatically.
Uses `url_to_data`, and only returns extracted url, no response object is returned by this
routine, cos it was never used by anyone recently.
I'm caching the result, though truthfully the call to `url_to_data` is already cached,
so it probably doesn't make much difference.
"""
PLANTUML_URL_ON_INTERNET = "http://www.plantuml.com/plantuml/uml"
# PLANTUML_URL_LOCAL = "http://localhost:8080/plantuml/uml"
plant_uml_server = plantuml_server_local_url if plantuml_server_local_url else PLANTUML_URL_ON_INTERNET
log.info("plant_uml_server calculated to be %s" % (plant_uml_server,))
try:
# url = os.path.join(plant_uml_server, deflate_and_encode(plant_uml_txt)) # fails on windows cos \ char is not proper url
url = plant_uml_server + "/" + deflate_and_encode(plant_uml_txt)
data, status_code = await url_to_data(url)
log.info(f"Response for url {url} from plant_uml_server status_code is {status_code}")
response_text = data.decode('utf-8')
except (ConnectionError,
requests.exceptions.RequestException,
aiohttp.client_exceptions.ClientConnectorError) as e:
# log.exception("Trying to render using plantuml server %s str(e)" % plant_uml_server)
log.error(
f"Error trying to fetch initial html from plantuml server {plant_uml_server} {str(e)}")
return None
except asyncio.TimeoutError as e: # there is no string repr of this exception
log.error("TimeoutError getting plantuml html")
raise
if status_code == 200:
log.info("plant_uml_server responded with 200 ok, next we try to extract diagram url from this HTML...")
# Uncomment for debugging
# log.info("plant_uml_server responded with 200 ok, see plantuml_server_response.html for full page response")
# with open("plantuml_server_response.html", "w") as f:
# f.write(response_text)
image_url = extract_image_url(response_text)
if image_url:
# this is likely referencing localhost due to calc_plantuml_server_url() giving us a localhost
image_url = image_url[0]
if image_url.startswith("//"):
from urllib.parse import urlparse
o = urlparse(plant_uml_server)
image_url = f"{o.scheme}:{image_url}" # add the scheme back in e.g. http
else:
image_url = None
log.error(f"could not extract image from plant_uml_server - plantuml server html format has probably changed - please report this to https://github.com/abulka/pynsource/issues")
log.info("extracted actual diagram image_url of %s" % image_url)
return image_url
else:
log.error(f"plant_uml_server responded with {status_code} ok")
return None
@functools.lru_cache(maxsize=32)
def plant_uml_create_png_and_return_image_url(plant_uml_txt):
"""
Convert the plantuml text into a uml image url, using the plantuml server on the internet.
:param plant_uml_txt: plant uml text syntax
:return: a tuple: [image_url | None], response
"""
# import hashlib
# dbg(hashlib.md5(plant_uml_txt.encode('utf-8')).hexdigest())
# plant_uml_server = calc_plantuml_server_url()
PLANTUML_URL_ON_INTERNET = "http://www.plantuml.com/plantuml/uml"
plant_uml_server = PLANTUML_URL_ON_INTERNET
log.info("plant_uml_server calculated to be %s" % (plant_uml_server,))
try:
# response = requests.post(plant_uml_server, data={'text': plant_uml_txt})
url = os.path.join(plant_uml_server, deflate_and_encode(plant_uml_txt))
response = requests.get(url)
except (ConnectionError, requests.exceptions.RequestException) as e:
# log.exception("Trying to render using plantuml server %s str(e)" % plant_uml_server)
log.error(f"Error trying to fetch initial html from plantuml server {plant_uml_server} {str(e)}")
return None, None
if response.status_code == 200:
# print("plant_uml_server responded with 200 ok")
log.info("plant_uml_server responded with 200 ok")
"""
Need to find the fragment:
<p id="diagram">
<img src="http://www.plantuml.com:80/plantuml/png/SyfFKj2rKt3CoKnELR1Io4ZDoSa70000" alt="PlantUML diagram" onload="doneLoading()">
</p>
in the response.
"""
regex = r'.*<p id="diagram".*\s*<.*img src=\"(.*?)\"'
image_url = re.findall(regex, response.text, re.MULTILINE)
if image_url:
image_url = image_url[
0
] # this is likely referencing localhost due to calc_plantuml_server_url() giving us a localhost
# if PLANT_UML_LOCAL:
# image_url = normalise_plantuml_url(
# image_url
# ) # substitute the real host we are on. doesn't really matter, cos always will dynamically repair in all list and update views - but need this repair for ztree1 non persistent debug view, so that can ipad in to e.g. http://192.168.0.3:8000/ztree1 whilst images being returned are refering to localhost which the ipad cannot reach (cos its a different machine)
else:
image_url = None
return image_url, response
else:
log.error("plant_uml_server responded with %d ok" % (response.status_code,))
return None, response
def plant_uml_create_png(plant_uml_txt, output_filename):
image_url, response = plant_uml_create_png_and_return_image_url(plant_uml_txt)
print(image_url)
if image_url:
"""
Now fetch the image
"""
response = requests.get(image_url)
if response.status_code == 200:
with open(output_filename, "wb") as fp:
fp.write(response.content)
else:
print("ok getting generating uml but error pulling down image")
else:
print("error calling plantuml server", response.status_code)
# New
def displaymodel_to_plantuml(displaymodel):
# TODO Should use AlsmMgr.calc_plant_uml()
edge_lookup = {
"generalisation" : "--|>",
"composition" : "<--",
"association" : "..",
} # TODO need to persist and represent cardinality, which is in the pmodel after all !!
result = ""
for node in displaymodel.graph.nodes:
if hasattr(node, "comment"):
result += f"note as {node.id}\n"
result += node.comment + "\n"
result += "end note\n\n"
else:
if ".py" in node.id:
colour = "<< (M,lightgrey) >> #white"
name = node.id.replace('.', '_')
result += f"class {name} <<module>> {colour} {{\n"
else:
result += f"class {node.id} {{\n"
result += "\n".join([f" {attr}" for attr in node.attrs])
result += "\n"
result += "\n".join([f" {meth}()" for meth in node.meths])
result += "\n}\n\n"
for edge in displaymodel.graph.edges:
line = edge_lookup[edge['uml_edge_type']]
# See issue https://github.com/abulka/pynsource/issues/78
# This code displays types instead of attributes, which is not useful, so stop doing it for now
# label = f": {edge['source'].id}" if edge['uml_edge_type'] in ("composition",) else ""
# TODO but in alsm mode, we can have edge labels back! Should use AlsmMgr.calc_plant_uml()
label = ""
_from = edge['source'].id
_to = edge['target'].id
_from_is_module = '.' in _from
_to_is_module = '.' in _to
if '.' in _from or '.' in _to:
_from = _from.replace('.', '_')
_to = _to.replace('.', '_')
if _to_is_module: # its always the to side that is the module
if edge['uml_edge_type'] == "association":
label = ": contains >"
result += f"{_to} {line} {_from} {label}\n"
result += f"{_to} {line}[hidden] {_from} {label}\n"
else: # normal directional ref
result += f"{_from} {line} {_to} {label}\n"
else:
result += f"{_from} {line} {_to} {label}\n"
if edge['uml_edge_type'] == "association":
result += f"{_from} {line}[hidden] {_to} {label}\n"
return result
| plant_uml_create_png_and_return_image_url_async | identifier_name |
gen_plantuml.py | import os
import re
import requests
from .gen_base import ReportGenerator, CmdLineGenerator
# from pydbg import dbg
import logging
from common.logger import config_log
import functools
from common.url_to_data import url_to_data
from common.plantuml import deflate_and_encode
import asyncio
from async_lru import alru_cache
import aiohttp
from generate_code.plantuml_html_scan import extract_image_url
log = logging.getLogger(__name__)
config_log(log)
class PySourceAsPlantUml(ReportGenerator):
def __init__(self, ast=True):
ReportGenerator.__init__(self, ast=ast)
self.result = ""
def calc_plant_uml(self, optimise=False):
"""Optimise not used for anything, so use for sorting attributes and members"""
self.result = ""
classnames = list(self.pmodel.classlist.keys())
for self.aclass in classnames:
self.classentry = self.pmodel.classlist[self.aclass]
# Main class output
self.result += "\nclass %s {\n" % self.aclass
for attrobj in self.classentry.attrs:
self.result += " %s\n" % attrobj.attrname
for adef in self.classentry.defs:
self.result += " %s()\n" % adef
# end class
self.result += "}\n\n"
# class inheritance relationships
if self.classentry.classesinheritsfrom:
for parentclass in self.classentry.classesinheritsfrom:
self.result += "%s <|- %s\n" % (parentclass, self.aclass)
# aggregation relationships showing class dependency and composition relationships
for attrobj in self.classentry.attrs:
compositescreated = self._GetCompositeCreatedClassesFor(attrobj.attrname)
for c in compositescreated:
if "many" in attrobj.attrtype:
line = "*-->"
cardinality = "*"
else:
line = "..>"
cardinality = ""
if cardinality:
connector = '%s "%s"' % (line, cardinality)
else:
connector = line
self.result += "%s %s %s : %s\n" % (self.aclass, connector, c, attrobj.attrname)
def GenReportDump(self): # Override template method entirely and do it ourselves
"""
This method gets called by the __str__ of the parent ReportGenerator
We override here to do our own generation rather than the template method design pattern
approach of ReportGenerator class
"""
if not self.result: # Update: THIS MAY BE VALIDLY EMPTY if there are no classes in a file
# print "Warning, should call calc_plant_uml() after .Parse() and before str(p) - repairing..."
self.calc_plant_uml()
return self.result
class CmdLinePythonToPlantUml(CmdLineGenerator):
"""
The only thing we inherit from CmdLineGenerator is the constructor with gives us self.directories etc.
Everything else gets triggered by the ExportTo()
"""
def _GenerateAuxilliaryClasses(self):
pass
def ExportTo(self, outpath=None): # Override and redefine Template method entirely
"""
"""
globbed = self.directories
self.p = PySourceAsPlantUml()
self.p.optionModuleAsClass = self.optionModuleAsClass
self.p.verbose = self.verbose
for f in globbed:
self.p.Parse(f)
self.p.calc_plant_uml()
plant_uml_text = str(self.p)
print(plant_uml_text)
# Optionally generate image via plant uml server
outpng = outpath
if outpng != "nopng":
if not ".png" in outpng.lower():
print("output filename %s must have .png in the name" % outpng)
exit(1)
print("Generating plantuml diagram %s..." % outpng)
plant_uml_create_png(plant_uml_text, outpng)
# Windows specific solution
# os.system(outpng) # launch notepad or whatever on it
# Cross platform solution - conda install pillow
# This will open the image in your default image viewer.
from PIL import Image
img = Image.open(outpng)
img.show()
print("Done!")
@alru_cache(maxsize=32)
async def plant_uml_create_png_and_return_image_url_async(plant_uml_txt: str, plantuml_server_local_url: str) -> str:
"""Async version of getting image url from plantuml. This does not get the actual image,
that is a separate call - unlike in a browser where that second call is done automatically.
Uses `url_to_data`, and only returns extracted url, no response object is returned by this
routine, cos it was never used by anyone recently.
I'm caching the result, though truthfully the call to `url_to_data` is already cached,
so it probably doesn't make much difference.
"""
PLANTUML_URL_ON_INTERNET = "http://www.plantuml.com/plantuml/uml"
# PLANTUML_URL_LOCAL = "http://localhost:8080/plantuml/uml"
plant_uml_server = plantuml_server_local_url if plantuml_server_local_url else PLANTUML_URL_ON_INTERNET
log.info("plant_uml_server calculated to be %s" % (plant_uml_server,))
try:
# url = os.path.join(plant_uml_server, deflate_and_encode(plant_uml_txt)) # fails on windows cos \ char is not proper url
url = plant_uml_server + "/" + deflate_and_encode(plant_uml_txt)
data, status_code = await url_to_data(url)
log.info(f"Response for url {url} from plant_uml_server status_code is {status_code}")
response_text = data.decode('utf-8')
except (ConnectionError,
requests.exceptions.RequestException,
aiohttp.client_exceptions.ClientConnectorError) as e:
# log.exception("Trying to render using plantuml server %s str(e)" % plant_uml_server)
log.error(
f"Error trying to fetch initial html from plantuml server {plant_uml_server} {str(e)}")
return None
except asyncio.TimeoutError as e: # there is no string repr of this exception
log.error("TimeoutError getting plantuml html")
raise
if status_code == 200:
log.info("plant_uml_server responded with 200 ok, next we try to extract diagram url from this HTML...")
# Uncomment for debugging
# log.info("plant_uml_server responded with 200 ok, see plantuml_server_response.html for full page response")
# with open("plantuml_server_response.html", "w") as f:
# f.write(response_text)
image_url = extract_image_url(response_text)
if image_url:
# this is likely referencing localhost due to calc_plantuml_server_url() giving us a localhost
image_url = image_url[0]
if image_url.startswith("//"):
from urllib.parse import urlparse
o = urlparse(plant_uml_server)
image_url = f"{o.scheme}:{image_url}" # add the scheme back in e.g. http
else:
image_url = None
log.error(f"could not extract image from plant_uml_server - plantuml server html format has probably changed - please report this to https://github.com/abulka/pynsource/issues")
log.info("extracted actual diagram image_url of %s" % image_url)
return image_url
else:
log.error(f"plant_uml_server responded with {status_code} ok")
return None
@functools.lru_cache(maxsize=32)
def plant_uml_create_png_and_return_image_url(plant_uml_txt):
"""
Convert the plantuml text into a uml image url, using the plantuml server on the internet.
:param plant_uml_txt: plant uml text syntax
:return: a tuple: [image_url | None], response
"""
# import hashlib
# dbg(hashlib.md5(plant_uml_txt.encode('utf-8')).hexdigest())
# plant_uml_server = calc_plantuml_server_url()
PLANTUML_URL_ON_INTERNET = "http://www.plantuml.com/plantuml/uml"
plant_uml_server = PLANTUML_URL_ON_INTERNET
log.info("plant_uml_server calculated to be %s" % (plant_uml_server,))
try:
# response = requests.post(plant_uml_server, data={'text': plant_uml_txt})
url = os.path.join(plant_uml_server, deflate_and_encode(plant_uml_txt))
response = requests.get(url)
except (ConnectionError, requests.exceptions.RequestException) as e:
# log.exception("Trying to render using plantuml server %s str(e)" % plant_uml_server)
log.error(f"Error trying to fetch initial html from plantuml server {plant_uml_server} {str(e)}")
return None, None
if response.status_code == 200:
# print("plant_uml_server responded with 200 ok")
log.info("plant_uml_server responded with 200 ok")
"""
Need to find the fragment:
<p id="diagram">
<img src="http://www.plantuml.com:80/plantuml/png/SyfFKj2rKt3CoKnELR1Io4ZDoSa70000" alt="PlantUML diagram" onload="doneLoading()">
</p>
in the response.
"""
regex = r'.*<p id="diagram".*\s*<.*img src=\"(.*?)\"'
image_url = re.findall(regex, response.text, re.MULTILINE)
if image_url:
image_url = image_url[
0
] # this is likely referencing localhost due to calc_plantuml_server_url() giving us a localhost
# if PLANT_UML_LOCAL:
# image_url = normalise_plantuml_url(
# image_url
# ) # substitute the real host we are on. doesn't really matter, cos always will dynamically repair in all list and update views - but need this repair for ztree1 non persistent debug view, so that can ipad in to e.g. http://192.168.0.3:8000/ztree1 whilst images being returned are refering to localhost which the ipad cannot reach (cos its a different machine)
else:
image_url = None
return image_url, response
else:
log.error("plant_uml_server responded with %d ok" % (response.status_code,))
return None, response
def plant_uml_create_png(plant_uml_txt, output_filename):
image_url, response = plant_uml_create_png_and_return_image_url(plant_uml_txt)
print(image_url)
if image_url:
"""
Now fetch the image
"""
response = requests.get(image_url)
if response.status_code == 200:
with open(output_filename, "wb") as fp:
fp.write(response.content)
else:
print("ok getting generating uml but error pulling down image")
else:
print("error calling plantuml server", response.status_code)
# New
def displaymodel_to_plantuml(displaymodel):
# TODO Should use AlsmMgr.calc_plant_uml()
| edge_lookup = {
"generalisation" : "--|>",
"composition" : "<--",
"association" : "..",
} # TODO need to persist and represent cardinality, which is in the pmodel after all !!
result = ""
for node in displaymodel.graph.nodes:
if hasattr(node, "comment"):
result += f"note as {node.id}\n"
result += node.comment + "\n"
result += "end note\n\n"
else:
if ".py" in node.id:
colour = "<< (M,lightgrey) >> #white"
name = node.id.replace('.', '_')
result += f"class {name} <<module>> {colour} {{\n"
else:
result += f"class {node.id} {{\n"
result += "\n".join([f" {attr}" for attr in node.attrs])
result += "\n"
result += "\n".join([f" {meth}()" for meth in node.meths])
result += "\n}\n\n"
for edge in displaymodel.graph.edges:
line = edge_lookup[edge['uml_edge_type']]
# See issue https://github.com/abulka/pynsource/issues/78
# This code displays types instead of attributes, which is not useful, so stop doing it for now
# label = f": {edge['source'].id}" if edge['uml_edge_type'] in ("composition",) else ""
# TODO but in alsm mode, we can have edge labels back! Should use AlsmMgr.calc_plant_uml()
label = ""
_from = edge['source'].id
_to = edge['target'].id
_from_is_module = '.' in _from
_to_is_module = '.' in _to
if '.' in _from or '.' in _to:
_from = _from.replace('.', '_')
_to = _to.replace('.', '_')
if _to_is_module: # its always the to side that is the module
if edge['uml_edge_type'] == "association":
label = ": contains >"
result += f"{_to} {line} {_from} {label}\n"
result += f"{_to} {line}[hidden] {_from} {label}\n"
else: # normal directional ref
result += f"{_from} {line} {_to} {label}\n"
else:
result += f"{_from} {line} {_to} {label}\n"
if edge['uml_edge_type'] == "association":
result += f"{_from} {line}[hidden] {_to} {label}\n"
return result | identifier_body | |
gen_plantuml.py | import os
import re
import requests
from .gen_base import ReportGenerator, CmdLineGenerator
# from pydbg import dbg
import logging
from common.logger import config_log
import functools
from common.url_to_data import url_to_data
from common.plantuml import deflate_and_encode
import asyncio
from async_lru import alru_cache
import aiohttp
from generate_code.plantuml_html_scan import extract_image_url
log = logging.getLogger(__name__)
config_log(log)
class PySourceAsPlantUml(ReportGenerator):
def __init__(self, ast=True):
ReportGenerator.__init__(self, ast=ast)
self.result = ""
def calc_plant_uml(self, optimise=False):
"""Optimise not used for anything, so use for sorting attributes and members"""
self.result = ""
classnames = list(self.pmodel.classlist.keys())
for self.aclass in classnames:
self.classentry = self.pmodel.classlist[self.aclass]
# Main class output
self.result += "\nclass %s {\n" % self.aclass
for attrobj in self.classentry.attrs:
self.result += " %s\n" % attrobj.attrname
for adef in self.classentry.defs:
self.result += " %s()\n" % adef
# end class
self.result += "}\n\n"
# class inheritance relationships
if self.classentry.classesinheritsfrom:
for parentclass in self.classentry.classesinheritsfrom:
self.result += "%s <|- %s\n" % (parentclass, self.aclass)
# aggregation relationships showing class dependency and composition relationships
for attrobj in self.classentry.attrs:
compositescreated = self._GetCompositeCreatedClassesFor(attrobj.attrname)
for c in compositescreated:
if "many" in attrobj.attrtype:
line = "*-->"
cardinality = "*"
else:
line = "..>"
cardinality = ""
if cardinality:
connector = '%s "%s"' % (line, cardinality)
else:
connector = line
self.result += "%s %s %s : %s\n" % (self.aclass, connector, c, attrobj.attrname)
def GenReportDump(self): # Override template method entirely and do it ourselves
"""
This method gets called by the __str__ of the parent ReportGenerator
We override here to do our own generation rather than the template method design pattern
approach of ReportGenerator class
"""
if not self.result: # Update: THIS MAY BE VALIDLY EMPTY if there are no classes in a file
# print "Warning, should call calc_plant_uml() after .Parse() and before str(p) - repairing..."
self.calc_plant_uml()
return self.result
class CmdLinePythonToPlantUml(CmdLineGenerator):
"""
The only thing we inherit from CmdLineGenerator is the constructor with gives us self.directories etc.
Everything else gets triggered by the ExportTo()
"""
def _GenerateAuxilliaryClasses(self):
pass
def ExportTo(self, outpath=None): # Override and redefine Template method entirely
"""
"""
globbed = self.directories
self.p = PySourceAsPlantUml()
self.p.optionModuleAsClass = self.optionModuleAsClass
self.p.verbose = self.verbose
for f in globbed:
self.p.Parse(f)
self.p.calc_plant_uml()
plant_uml_text = str(self.p)
print(plant_uml_text)
# Optionally generate image via plant uml server
outpng = outpath
if outpng != "nopng":
if not ".png" in outpng.lower():
print("output filename %s must have .png in the name" % outpng)
exit(1)
print("Generating plantuml diagram %s..." % outpng)
plant_uml_create_png(plant_uml_text, outpng)
# Windows specific solution
# os.system(outpng) # launch notepad or whatever on it
# Cross platform solution - conda install pillow | img.show()
print("Done!")
@alru_cache(maxsize=32)
async def plant_uml_create_png_and_return_image_url_async(plant_uml_txt: str, plantuml_server_local_url: str) -> str:
"""Async version of getting image url from plantuml. This does not get the actual image,
that is a separate call - unlike in a browser where that second call is done automatically.
Uses `url_to_data`, and only returns extracted url, no response object is returned by this
routine, cos it was never used by anyone recently.
I'm caching the result, though truthfully the call to `url_to_data` is already cached,
so it probably doesn't make much difference.
"""
PLANTUML_URL_ON_INTERNET = "http://www.plantuml.com/plantuml/uml"
# PLANTUML_URL_LOCAL = "http://localhost:8080/plantuml/uml"
plant_uml_server = plantuml_server_local_url if plantuml_server_local_url else PLANTUML_URL_ON_INTERNET
log.info("plant_uml_server calculated to be %s" % (plant_uml_server,))
try:
# url = os.path.join(plant_uml_server, deflate_and_encode(plant_uml_txt)) # fails on windows cos \ char is not proper url
url = plant_uml_server + "/" + deflate_and_encode(plant_uml_txt)
data, status_code = await url_to_data(url)
log.info(f"Response for url {url} from plant_uml_server status_code is {status_code}")
response_text = data.decode('utf-8')
except (ConnectionError,
requests.exceptions.RequestException,
aiohttp.client_exceptions.ClientConnectorError) as e:
# log.exception("Trying to render using plantuml server %s str(e)" % plant_uml_server)
log.error(
f"Error trying to fetch initial html from plantuml server {plant_uml_server} {str(e)}")
return None
except asyncio.TimeoutError as e: # there is no string repr of this exception
log.error("TimeoutError getting plantuml html")
raise
if status_code == 200:
log.info("plant_uml_server responded with 200 ok, next we try to extract diagram url from this HTML...")
# Uncomment for debugging
# log.info("plant_uml_server responded with 200 ok, see plantuml_server_response.html for full page response")
# with open("plantuml_server_response.html", "w") as f:
# f.write(response_text)
image_url = extract_image_url(response_text)
if image_url:
# this is likely referencing localhost due to calc_plantuml_server_url() giving us a localhost
image_url = image_url[0]
if image_url.startswith("//"):
from urllib.parse import urlparse
o = urlparse(plant_uml_server)
image_url = f"{o.scheme}:{image_url}" # add the scheme back in e.g. http
else:
image_url = None
log.error(f"could not extract image from plant_uml_server - plantuml server html format has probably changed - please report this to https://github.com/abulka/pynsource/issues")
log.info("extracted actual diagram image_url of %s" % image_url)
return image_url
else:
log.error(f"plant_uml_server responded with {status_code} ok")
return None
@functools.lru_cache(maxsize=32)
def plant_uml_create_png_and_return_image_url(plant_uml_txt):
"""
Convert the plantuml text into a uml image url, using the plantuml server on the internet.
:param plant_uml_txt: plant uml text syntax
:return: a tuple: [image_url | None], response
"""
# import hashlib
# dbg(hashlib.md5(plant_uml_txt.encode('utf-8')).hexdigest())
# plant_uml_server = calc_plantuml_server_url()
PLANTUML_URL_ON_INTERNET = "http://www.plantuml.com/plantuml/uml"
plant_uml_server = PLANTUML_URL_ON_INTERNET
log.info("plant_uml_server calculated to be %s" % (plant_uml_server,))
try:
# response = requests.post(plant_uml_server, data={'text': plant_uml_txt})
url = os.path.join(plant_uml_server, deflate_and_encode(plant_uml_txt))
response = requests.get(url)
except (ConnectionError, requests.exceptions.RequestException) as e:
# log.exception("Trying to render using plantuml server %s str(e)" % plant_uml_server)
log.error(f"Error trying to fetch initial html from plantuml server {plant_uml_server} {str(e)}")
return None, None
if response.status_code == 200:
# print("plant_uml_server responded with 200 ok")
log.info("plant_uml_server responded with 200 ok")
"""
Need to find the fragment:
<p id="diagram">
<img src="http://www.plantuml.com:80/plantuml/png/SyfFKj2rKt3CoKnELR1Io4ZDoSa70000" alt="PlantUML diagram" onload="doneLoading()">
</p>
in the response.
"""
regex = r'.*<p id="diagram".*\s*<.*img src=\"(.*?)\"'
image_url = re.findall(regex, response.text, re.MULTILINE)
if image_url:
image_url = image_url[
0
] # this is likely referencing localhost due to calc_plantuml_server_url() giving us a localhost
# if PLANT_UML_LOCAL:
# image_url = normalise_plantuml_url(
# image_url
# ) # substitute the real host we are on. doesn't really matter, cos always will dynamically repair in all list and update views - but need this repair for ztree1 non persistent debug view, so that can ipad in to e.g. http://192.168.0.3:8000/ztree1 whilst images being returned are refering to localhost which the ipad cannot reach (cos its a different machine)
else:
image_url = None
return image_url, response
else:
log.error("plant_uml_server responded with %d ok" % (response.status_code,))
return None, response
def plant_uml_create_png(plant_uml_txt, output_filename):
image_url, response = plant_uml_create_png_and_return_image_url(plant_uml_txt)
print(image_url)
if image_url:
"""
Now fetch the image
"""
response = requests.get(image_url)
if response.status_code == 200:
with open(output_filename, "wb") as fp:
fp.write(response.content)
else:
print("ok getting generating uml but error pulling down image")
else:
print("error calling plantuml server", response.status_code)
# New
def displaymodel_to_plantuml(displaymodel):
# TODO Should use AlsmMgr.calc_plant_uml()
edge_lookup = {
"generalisation" : "--|>",
"composition" : "<--",
"association" : "..",
} # TODO need to persist and represent cardinality, which is in the pmodel after all !!
result = ""
for node in displaymodel.graph.nodes:
if hasattr(node, "comment"):
result += f"note as {node.id}\n"
result += node.comment + "\n"
result += "end note\n\n"
else:
if ".py" in node.id:
colour = "<< (M,lightgrey) >> #white"
name = node.id.replace('.', '_')
result += f"class {name} <<module>> {colour} {{\n"
else:
result += f"class {node.id} {{\n"
result += "\n".join([f" {attr}" for attr in node.attrs])
result += "\n"
result += "\n".join([f" {meth}()" for meth in node.meths])
result += "\n}\n\n"
for edge in displaymodel.graph.edges:
line = edge_lookup[edge['uml_edge_type']]
# See issue https://github.com/abulka/pynsource/issues/78
# This code displays types instead of attributes, which is not useful, so stop doing it for now
# label = f": {edge['source'].id}" if edge['uml_edge_type'] in ("composition",) else ""
# TODO but in alsm mode, we can have edge labels back! Should use AlsmMgr.calc_plant_uml()
label = ""
_from = edge['source'].id
_to = edge['target'].id
_from_is_module = '.' in _from
_to_is_module = '.' in _to
if '.' in _from or '.' in _to:
_from = _from.replace('.', '_')
_to = _to.replace('.', '_')
if _to_is_module: # its always the to side that is the module
if edge['uml_edge_type'] == "association":
label = ": contains >"
result += f"{_to} {line} {_from} {label}\n"
result += f"{_to} {line}[hidden] {_from} {label}\n"
else: # normal directional ref
result += f"{_from} {line} {_to} {label}\n"
else:
result += f"{_from} {line} {_to} {label}\n"
if edge['uml_edge_type'] == "association":
result += f"{_from} {line}[hidden] {_to} {label}\n"
return result | # This will open the image in your default image viewer.
from PIL import Image
img = Image.open(outpng) | random_line_split |
gen_plantuml.py | import os
import re
import requests
from .gen_base import ReportGenerator, CmdLineGenerator
# from pydbg import dbg
import logging
from common.logger import config_log
import functools
from common.url_to_data import url_to_data
from common.plantuml import deflate_and_encode
import asyncio
from async_lru import alru_cache
import aiohttp
from generate_code.plantuml_html_scan import extract_image_url
log = logging.getLogger(__name__)
config_log(log)
class PySourceAsPlantUml(ReportGenerator):
def __init__(self, ast=True):
ReportGenerator.__init__(self, ast=ast)
self.result = ""
def calc_plant_uml(self, optimise=False):
"""Optimise not used for anything, so use for sorting attributes and members"""
self.result = ""
classnames = list(self.pmodel.classlist.keys())
for self.aclass in classnames:
self.classentry = self.pmodel.classlist[self.aclass]
# Main class output
self.result += "\nclass %s {\n" % self.aclass
for attrobj in self.classentry.attrs:
self.result += " %s\n" % attrobj.attrname
for adef in self.classentry.defs:
self.result += " %s()\n" % adef
# end class
self.result += "}\n\n"
# class inheritance relationships
if self.classentry.classesinheritsfrom:
for parentclass in self.classentry.classesinheritsfrom:
self.result += "%s <|- %s\n" % (parentclass, self.aclass)
# aggregation relationships showing class dependency and composition relationships
for attrobj in self.classentry.attrs:
compositescreated = self._GetCompositeCreatedClassesFor(attrobj.attrname)
for c in compositescreated:
if "many" in attrobj.attrtype:
line = "*-->"
cardinality = "*"
else:
line = "..>"
cardinality = ""
if cardinality:
connector = '%s "%s"' % (line, cardinality)
else:
connector = line
self.result += "%s %s %s : %s\n" % (self.aclass, connector, c, attrobj.attrname)
def GenReportDump(self): # Override template method entirely and do it ourselves
"""
This method gets called by the __str__ of the parent ReportGenerator
We override here to do our own generation rather than the template method design pattern
approach of ReportGenerator class
"""
if not self.result: # Update: THIS MAY BE VALIDLY EMPTY if there are no classes in a file
# print "Warning, should call calc_plant_uml() after .Parse() and before str(p) - repairing..."
self.calc_plant_uml()
return self.result
class CmdLinePythonToPlantUml(CmdLineGenerator):
"""
The only thing we inherit from CmdLineGenerator is the constructor with gives us self.directories etc.
Everything else gets triggered by the ExportTo()
"""
def _GenerateAuxilliaryClasses(self):
pass
def ExportTo(self, outpath=None): # Override and redefine Template method entirely
"""
"""
globbed = self.directories
self.p = PySourceAsPlantUml()
self.p.optionModuleAsClass = self.optionModuleAsClass
self.p.verbose = self.verbose
for f in globbed:
self.p.Parse(f)
self.p.calc_plant_uml()
plant_uml_text = str(self.p)
print(plant_uml_text)
# Optionally generate image via plant uml server
outpng = outpath
if outpng != "nopng":
if not ".png" in outpng.lower():
print("output filename %s must have .png in the name" % outpng)
exit(1)
print("Generating plantuml diagram %s..." % outpng)
plant_uml_create_png(plant_uml_text, outpng)
# Windows specific solution
# os.system(outpng) # launch notepad or whatever on it
# Cross platform solution - conda install pillow
# This will open the image in your default image viewer.
from PIL import Image
img = Image.open(outpng)
img.show()
print("Done!")
@alru_cache(maxsize=32)
async def plant_uml_create_png_and_return_image_url_async(plant_uml_txt: str, plantuml_server_local_url: str) -> str:
"""Async version of getting image url from plantuml. This does not get the actual image,
that is a separate call - unlike in a browser where that second call is done automatically.
Uses `url_to_data`, and only returns extracted url, no response object is returned by this
routine, cos it was never used by anyone recently.
I'm caching the result, though truthfully the call to `url_to_data` is already cached,
so it probably doesn't make much difference.
"""
PLANTUML_URL_ON_INTERNET = "http://www.plantuml.com/plantuml/uml"
# PLANTUML_URL_LOCAL = "http://localhost:8080/plantuml/uml"
plant_uml_server = plantuml_server_local_url if plantuml_server_local_url else PLANTUML_URL_ON_INTERNET
log.info("plant_uml_server calculated to be %s" % (plant_uml_server,))
try:
# url = os.path.join(plant_uml_server, deflate_and_encode(plant_uml_txt)) # fails on windows cos \ char is not proper url
url = plant_uml_server + "/" + deflate_and_encode(plant_uml_txt)
data, status_code = await url_to_data(url)
log.info(f"Response for url {url} from plant_uml_server status_code is {status_code}")
response_text = data.decode('utf-8')
except (ConnectionError,
requests.exceptions.RequestException,
aiohttp.client_exceptions.ClientConnectorError) as e:
# log.exception("Trying to render using plantuml server %s str(e)" % plant_uml_server)
log.error(
f"Error trying to fetch initial html from plantuml server {plant_uml_server} {str(e)}")
return None
except asyncio.TimeoutError as e: # there is no string repr of this exception
log.error("TimeoutError getting plantuml html")
raise
if status_code == 200:
log.info("plant_uml_server responded with 200 ok, next we try to extract diagram url from this HTML...")
# Uncomment for debugging
# log.info("plant_uml_server responded with 200 ok, see plantuml_server_response.html for full page response")
# with open("plantuml_server_response.html", "w") as f:
# f.write(response_text)
image_url = extract_image_url(response_text)
if image_url:
# this is likely referencing localhost due to calc_plantuml_server_url() giving us a localhost
image_url = image_url[0]
if image_url.startswith("//"):
from urllib.parse import urlparse
o = urlparse(plant_uml_server)
image_url = f"{o.scheme}:{image_url}" # add the scheme back in e.g. http
else:
image_url = None
log.error(f"could not extract image from plant_uml_server - plantuml server html format has probably changed - please report this to https://github.com/abulka/pynsource/issues")
log.info("extracted actual diagram image_url of %s" % image_url)
return image_url
else:
log.error(f"plant_uml_server responded with {status_code} ok")
return None
@functools.lru_cache(maxsize=32)
def plant_uml_create_png_and_return_image_url(plant_uml_txt):
"""
Convert the plantuml text into a uml image url, using the plantuml server on the internet.
:param plant_uml_txt: plant uml text syntax
:return: a tuple: [image_url | None], response
"""
# import hashlib
# dbg(hashlib.md5(plant_uml_txt.encode('utf-8')).hexdigest())
# plant_uml_server = calc_plantuml_server_url()
PLANTUML_URL_ON_INTERNET = "http://www.plantuml.com/plantuml/uml"
plant_uml_server = PLANTUML_URL_ON_INTERNET
log.info("plant_uml_server calculated to be %s" % (plant_uml_server,))
try:
# response = requests.post(plant_uml_server, data={'text': plant_uml_txt})
url = os.path.join(plant_uml_server, deflate_and_encode(plant_uml_txt))
response = requests.get(url)
except (ConnectionError, requests.exceptions.RequestException) as e:
# log.exception("Trying to render using plantuml server %s str(e)" % plant_uml_server)
log.error(f"Error trying to fetch initial html from plantuml server {plant_uml_server} {str(e)}")
return None, None
if response.status_code == 200:
# print("plant_uml_server responded with 200 ok")
log.info("plant_uml_server responded with 200 ok")
"""
Need to find the fragment:
<p id="diagram">
<img src="http://www.plantuml.com:80/plantuml/png/SyfFKj2rKt3CoKnELR1Io4ZDoSa70000" alt="PlantUML diagram" onload="doneLoading()">
</p>
in the response.
"""
regex = r'.*<p id="diagram".*\s*<.*img src=\"(.*?)\"'
image_url = re.findall(regex, response.text, re.MULTILINE)
if image_url:
image_url = image_url[
0
] # this is likely referencing localhost due to calc_plantuml_server_url() giving us a localhost
# if PLANT_UML_LOCAL:
# image_url = normalise_plantuml_url(
# image_url
# ) # substitute the real host we are on. doesn't really matter, cos always will dynamically repair in all list and update views - but need this repair for ztree1 non persistent debug view, so that can ipad in to e.g. http://192.168.0.3:8000/ztree1 whilst images being returned are refering to localhost which the ipad cannot reach (cos its a different machine)
else:
image_url = None
return image_url, response
else:
log.error("plant_uml_server responded with %d ok" % (response.status_code,))
return None, response
def plant_uml_create_png(plant_uml_txt, output_filename):
image_url, response = plant_uml_create_png_and_return_image_url(plant_uml_txt)
print(image_url)
if image_url:
"""
Now fetch the image
"""
response = requests.get(image_url)
if response.status_code == 200:
|
else:
print("ok getting generating uml but error pulling down image")
else:
print("error calling plantuml server", response.status_code)
# New
def displaymodel_to_plantuml(displaymodel):
# TODO Should use AlsmMgr.calc_plant_uml()
edge_lookup = {
"generalisation" : "--|>",
"composition" : "<--",
"association" : "..",
} # TODO need to persist and represent cardinality, which is in the pmodel after all !!
result = ""
for node in displaymodel.graph.nodes:
if hasattr(node, "comment"):
result += f"note as {node.id}\n"
result += node.comment + "\n"
result += "end note\n\n"
else:
if ".py" in node.id:
colour = "<< (M,lightgrey) >> #white"
name = node.id.replace('.', '_')
result += f"class {name} <<module>> {colour} {{\n"
else:
result += f"class {node.id} {{\n"
result += "\n".join([f" {attr}" for attr in node.attrs])
result += "\n"
result += "\n".join([f" {meth}()" for meth in node.meths])
result += "\n}\n\n"
for edge in displaymodel.graph.edges:
line = edge_lookup[edge['uml_edge_type']]
# See issue https://github.com/abulka/pynsource/issues/78
# This code displays types instead of attributes, which is not useful, so stop doing it for now
# label = f": {edge['source'].id}" if edge['uml_edge_type'] in ("composition",) else ""
# TODO but in alsm mode, we can have edge labels back! Should use AlsmMgr.calc_plant_uml()
label = ""
_from = edge['source'].id
_to = edge['target'].id
_from_is_module = '.' in _from
_to_is_module = '.' in _to
if '.' in _from or '.' in _to:
_from = _from.replace('.', '_')
_to = _to.replace('.', '_')
if _to_is_module: # its always the to side that is the module
if edge['uml_edge_type'] == "association":
label = ": contains >"
result += f"{_to} {line} {_from} {label}\n"
result += f"{_to} {line}[hidden] {_from} {label}\n"
else: # normal directional ref
result += f"{_from} {line} {_to} {label}\n"
else:
result += f"{_from} {line} {_to} {label}\n"
if edge['uml_edge_type'] == "association":
result += f"{_from} {line}[hidden] {_to} {label}\n"
return result
| with open(output_filename, "wb") as fp:
fp.write(response.content) | conditional_block |
update.go | // Copyright (c) 2019 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package event
import (
"context"
"strings"
"time"
mesos "github.com/uber/peloton/.gen/mesos/v1"
pbjob "github.com/uber/peloton/.gen/peloton/api/v0/job"
pb_task "github.com/uber/peloton/.gen/peloton/api/v0/task"
"github.com/uber/peloton/.gen/peloton/api/v0/volume"
pb_eventstream "github.com/uber/peloton/.gen/peloton/private/eventstream"
pbeventstream "github.com/uber/peloton/.gen/peloton/private/eventstream"
v1pbevent "github.com/uber/peloton/.gen/peloton/private/eventstream/v1alpha/event"
"github.com/uber/peloton/pkg/common"
"github.com/uber/peloton/pkg/common/api"
"github.com/uber/peloton/pkg/common/eventstream"
"github.com/uber/peloton/pkg/common/statusupdate"
"github.com/uber/peloton/pkg/common/util"
v1eventstream "github.com/uber/peloton/pkg/common/v1alpha/eventstream"
"github.com/uber/peloton/pkg/jobmgr/cached"
"github.com/uber/peloton/pkg/jobmgr/goalstate"
jobmgr_task "github.com/uber/peloton/pkg/jobmgr/task"
"github.com/uber/peloton/pkg/jobmgr/task/lifecyclemgr"
taskutil "github.com/uber/peloton/pkg/jobmgr/util/task"
"github.com/uber/peloton/pkg/storage"
"github.com/gogo/protobuf/proto"
log "github.com/sirupsen/logrus"
"github.com/uber-go/tally"
"go.uber.org/yarpc"
"go.uber.org/yarpc/yarpcerrors"
)
const (
// Mesos event message that indicates duplicate task ID
_msgMesosDuplicateID = "Task has duplicate ID"
// _numOrphanTaskKillAttempts is number of attempts to
// kill orphan task in case of error from host manager
_numOrphanTaskKillAttempts = 3
// _waitForRetryOnError is the time between successive retries
// to kill orphan task in case of error from host manager
_waitForRetryOnErrorOrphanTaskKill = 5 * time.Millisecond
)
// Declare a Now function so that we can mock it in unit tests.
var now = time.Now
// StatusUpdate is the interface for task status updates
type StatusUpdate interface {
Start()
Stop()
}
// Listener is the interface for StatusUpdate listener
type Listener interface {
OnV0Events(events []*pbeventstream.Event)
Start()
Stop()
}
// StatusUpdate reads and processes the task state change events from HM
type statusUpdate struct {
jobStore storage.JobStore
taskStore storage.TaskStore
volumeStore storage.PersistentVolumeStore
eventClients map[string]StatusUpdate
lm lifecyclemgr.Manager
applier *asyncEventProcessor
jobFactory cached.JobFactory
goalStateDriver goalstate.Driver
listeners []Listener
rootCtx context.Context
metrics *Metrics
}
// NewTaskStatusUpdate creates a statusUpdate
func NewTaskStatusUpdate(
d *yarpc.Dispatcher,
jobStore storage.JobStore,
taskStore storage.TaskStore,
volumeStore storage.PersistentVolumeStore,
jobFactory cached.JobFactory,
goalStateDriver goalstate.Driver,
listeners []Listener,
parentScope tally.Scope,
hmVersion api.Version,
) StatusUpdate {
statusUpdater := &statusUpdate{
jobStore: jobStore,
taskStore: taskStore,
volumeStore: volumeStore,
rootCtx: context.Background(),
metrics: NewMetrics(parentScope.SubScope("status_updater")),
eventClients: make(map[string]StatusUpdate),
jobFactory: jobFactory,
goalStateDriver: goalStateDriver,
listeners: listeners,
lm: lifecyclemgr.New(hmVersion, d, parentScope),
}
// TODO: add config for BucketEventProcessor
statusUpdater.applier = newBucketEventProcessor(statusUpdater, 100, 10000)
if hmVersion.IsV1() {
v1eventClient := v1eventstream.NewEventStreamClient(
d,
common.PelotonJobManager,
common.PelotonHostManager,
statusUpdater,
parentScope.SubScope("HostmgrV1EventStreamClient"))
statusUpdater.eventClients[common.PelotonV1HostManager] = v1eventClient
} else {
eventClient := eventstream.NewEventStreamClient(
d,
common.PelotonJobManager,
common.PelotonHostManager,
statusUpdater,
parentScope.SubScope("HostmgrEventStreamClient"))
statusUpdater.eventClients[common.PelotonHostManager] = eventClient
}
eventClientRM := eventstream.NewEventStreamClient(
d,
common.PelotonJobManager,
common.PelotonResourceManager,
statusUpdater,
parentScope.SubScope("ResmgrEventStreamClient"))
statusUpdater.eventClients[common.PelotonResourceManager] = eventClientRM
return statusUpdater
}
// OnV0Event is the callback function notifying an event
func (p *statusUpdate) OnV0Event(event *pb_eventstream.Event) {
log.WithField("event_offset", event.Offset).Debug("JobMgr received v0 event")
if event.GetType() != pbeventstream.Event_HOST_EVENT {
p.applier.addV0Event(event)
}
}
// OnV1Event is the callback function notifying an event
func (p *statusUpdate) OnV1Event(event *v1pbevent.Event) {
log.WithField("event_offset", event.Offset).Debug("JobMgr received v1 event")
p.applier.addV1Event(event)
}
// GetEventProgress returns the progress of the event progressing
func (p *statusUpdate) GetEventProgress() uint64 {
return p.applier.GetEventProgress()
}
// ProcessStatusUpdate processes the actual task status
func (p *statusUpdate) ProcessStatusUpdate(
ctx context.Context,
updateEvent *statusupdate.Event,
) error {
var currTaskResourceUsage map[string]float64
p.logTaskMetrics(updateEvent)
isOrphanTask, taskInfo, err := p.isOrphanTaskEvent(ctx, updateEvent)
if err != nil {
return err
}
if isOrphanTask {
p.metrics.SkipOrphanTasksTotal.Inc(1)
taskInfo := &pb_task.TaskInfo{
Runtime: &pb_task.RuntimeInfo{
State: updateEvent.State(),
MesosTaskId: updateEvent.MesosTaskID(),
AgentID: updateEvent.AgentID(),
},
}
// Kill the orphan task
for i := 0; i < _numOrphanTaskKillAttempts; i++ {
err = jobmgr_task.KillOrphanTask(ctx, p.lm, taskInfo)
if err == nil {
return nil
}
time.Sleep(_waitForRetryOnErrorOrphanTaskKill)
}
return nil
}
// whether to skip or not if instance state is similar before and after
if isDuplicateStateUpdate(taskInfo, updateEvent) {
return nil
}
if updateEvent.State() == pb_task.TaskState_RUNNING &&
taskInfo.GetConfig().GetVolume() != nil &&
len(taskInfo.GetRuntime().GetVolumeID().GetValue()) != 0 {
// Update volume state to be CREATED upon task RUNNING.
if err := p.updatePersistentVolumeState(ctx, taskInfo); err != nil {
return err
}
}
newRuntime := proto.Clone(taskInfo.GetRuntime()).(*pb_task.RuntimeInfo)
// Persist the reason and message for mesos updates
newRuntime.Message = updateEvent.StatusMsg()
newRuntime.Reason = ""
// Persist healthy field if health check is enabled
if taskInfo.GetConfig().GetHealthCheck() != nil {
reason := updateEvent.Reason()
healthy := updateEvent.Healthy()
p.persistHealthyField(updateEvent.State(), reason, healthy, newRuntime)
}
// Update FailureCount
updateFailureCount(updateEvent.State(), taskInfo.GetRuntime(), newRuntime)
switch updateEvent.State() {
case pb_task.TaskState_FAILED:
reason := updateEvent.Reason()
msg := updateEvent.Message()
if reason == mesos.TaskStatus_REASON_TASK_INVALID.String() &&
strings.Contains(msg, _msgMesosDuplicateID) {
log.WithField("task_id", updateEvent.TaskID()).
Info("ignoring duplicate task id failure")
return nil
}
newRuntime.Reason = reason
newRuntime.State = updateEvent.State()
newRuntime.Message = msg
// TODO p2k: can we build TerminationStatus from PodEvent?
termStatus := &pb_task.TerminationStatus{
Reason: pb_task.TerminationStatus_TERMINATION_STATUS_REASON_FAILED,
}
if code, err := taskutil.GetExitStatusFromMessage(msg); err == nil {
termStatus.ExitCode = code
} else if yarpcerrors.IsNotFound(err) == false {
log.WithField("task_id", updateEvent.TaskID()).
WithField("error", err).
Debug("Failed to extract exit status from message")
}
if sig, err := taskutil.GetSignalFromMessage(msg); err == nil {
termStatus.Signal = sig
} else if yarpcerrors.IsNotFound(err) == false {
log.WithField("task_id", updateEvent.TaskID()).
WithField("error", err).
Debug("Failed to extract termination signal from message")
}
newRuntime.TerminationStatus = termStatus
case pb_task.TaskState_LOST:
newRuntime.Reason = updateEvent.Reason()
if util.IsPelotonStateTerminal(taskInfo.GetRuntime().GetState()) {
// Skip LOST status update if current state is terminal state.
log.WithFields(log.Fields{
"task_id": updateEvent.TaskID(),
"db_task_runtime": taskInfo.GetRuntime(),
"task_status_event": updateEvent.MesosTaskStatus(),
}).Debug("skip reschedule lost task as it is already in terminal state")
return nil
}
if taskInfo.GetRuntime().GetGoalState() == pb_task.TaskState_KILLED {
// Do not take any action for killed tasks, just mark it killed.
// Same message will go to resource manager which will release the placement.
log.WithFields(log.Fields{
"task_id": updateEvent.TaskID(),
"db_task_runtime": taskInfo.GetRuntime(),
"task_status_event": updateEvent.MesosTaskStatus(),
}).Debug("mark stopped task as killed due to LOST")
newRuntime.State = pb_task.TaskState_KILLED
newRuntime.Message = "Stopped task LOST event: " + updateEvent.StatusMsg()
break
}
if taskInfo.GetConfig().GetVolume() != nil &&
len(taskInfo.GetRuntime().GetVolumeID().GetValue()) != 0 {
// Do not reschedule stateful task. Storage layer will decide
// whether to start or replace this task.
newRuntime.State = pb_task.TaskState_LOST
break
}
log.WithFields(log.Fields{
"task_id": updateEvent.TaskID(),
"db_task_runtime": taskInfo.GetRuntime(),
"task_status_event": updateEvent.MesosTaskStatus(),
}).Info("reschedule lost task if needed")
newRuntime.State = pb_task.TaskState_LOST
newRuntime.Message = "Task LOST: " + updateEvent.StatusMsg()
newRuntime.Reason = updateEvent.Reason()
// Calculate resource usage for TaskState_LOST using time.Now() as
// completion time
currTaskResourceUsage = getCurrTaskResourceUsage(
updateEvent.TaskID(), updateEvent.State(), taskInfo.GetConfig().GetResource(),
taskInfo.GetRuntime().GetStartTime(),
now().UTC().Format(time.RFC3339Nano))
default:
newRuntime.State = updateEvent.State()
}
cachedJob := p.jobFactory.AddJob(taskInfo.GetJobId())
// Update task start and completion timestamps
if newRuntime.GetState() == pb_task.TaskState_RUNNING {
if updateEvent.State() != taskInfo.GetRuntime().GetState() {
// StartTime is set at the time of first RUNNING event
// CompletionTime may have been set (e.g. task has been set),
// which could make StartTime larger than CompletionTime.
// Reset CompletionTime every time a task transits to RUNNING state.
newRuntime.StartTime = now().UTC().Format(time.RFC3339Nano)
newRuntime.CompletionTime = ""
// when task is RUNNING, reset the desired host field. Therefore,
// the task would be scheduled onto a different host when the task
// restarts (e.g due to health check or fail retry)
newRuntime.DesiredHost = ""
if len(taskInfo.GetRuntime().GetDesiredHost()) != 0 {
p.metrics.TasksInPlacePlacementTotal.Inc(1)
if taskInfo.GetRuntime().GetDesiredHost() == taskInfo.GetRuntime().GetHost() {
p.metrics.TasksInPlacePlacementSuccess.Inc(1)
} else {
log.WithField("job_id", taskInfo.GetJobId().GetValue()).
WithField("instance_id", taskInfo.GetInstanceId()).
Info("task fail to place on desired host")
}
}
}
} else if util.IsPelotonStateTerminal(newRuntime.GetState()) &&
cachedJob.GetJobType() == pbjob.JobType_BATCH {
// only update resource count when a batch job is in terminal state
completionTime := now().UTC().Format(time.RFC3339Nano)
newRuntime.CompletionTime = completionTime
currTaskResourceUsage = getCurrTaskResourceUsage(
updateEvent.TaskID(), updateEvent.State(), taskInfo.GetConfig().GetResource(),
taskInfo.GetRuntime().GetStartTime(), completionTime)
if len(currTaskResourceUsage) > 0 {
// current task resource usage was updated by this event, so we should
// add it to aggregated resource usage for the task and update runtime
aggregateTaskResourceUsage := taskInfo.GetRuntime().GetResourceUsage()
if len(aggregateTaskResourceUsage) > 0 {
for k, v := range currTaskResourceUsage {
aggregateTaskResourceUsage[k] += v
}
newRuntime.ResourceUsage = aggregateTaskResourceUsage
}
}
} else if cachedJob.GetJobType() == pbjob.JobType_SERVICE {
// for service job, reset resource usage
currTaskResourceUsage = nil
newRuntime.ResourceUsage = nil
}
// Update the task update times in job cache and then update the task runtime in cache and DB
cachedJob.SetTaskUpdateTime(updateEvent.Timestamp())
if _, err = cachedJob.CompareAndSetTask(
ctx,
taskInfo.GetInstanceId(),
newRuntime,
false,
); err != nil {
log.WithError(err).
WithFields(log.Fields{
"task_id": updateEvent.TaskID(),
"state": updateEvent.State().String()}).
Error("Fail to update runtime for taskID")
return err
}
// Enqueue task to goal state
p.goalStateDriver.EnqueueTask(
taskInfo.GetJobId(),
taskInfo.GetInstanceId(),
time.Now())
// Enqueue job to goal state as well
goalstate.EnqueueJobWithDefaultDelay(
taskInfo.GetJobId(), p.goalStateDriver, cachedJob)
// Update job's resource usage with the current task resource usage.
// This is a noop in case currTaskResourceUsage is nil
// This operation is not idempotent. So we will update job resource usage
// in cache only after successfully updating task resource usage in DB
// In case of errors in PatchTasks(), ProcessStatusUpdate will be retried
// indefinitely until errors are resolved.
cachedJob.UpdateResourceUsage(currTaskResourceUsage)
return nil
}
// logTaskMetrics logs events metrics
func (p *statusUpdate) logTaskMetrics(event *statusupdate.Event) {
if event.V0() == nil {
return
}
// Update task state counter for non-reconcilication update.
reason := event.MesosTaskStatus().GetReason()
if reason != mesos.TaskStatus_REASON_RECONCILIATION {
switch event.State() {
case pb_task.TaskState_RUNNING:
p.metrics.TasksRunningTotal.Inc(1)
case pb_task.TaskState_SUCCEEDED:
p.metrics.TasksSucceededTotal.Inc(1)
case pb_task.TaskState_FAILED:
p.metrics.TasksFailedTotal.Inc(1)
p.metrics.TasksFailedReason[int32(reason)].Inc(1)
log.WithFields(log.Fields{
"task_id": event.TaskID(),
"failed_reason": mesos.TaskStatus_Reason_name[int32(reason)],
}).Debug("received failed task")
case pb_task.TaskState_KILLED:
p.metrics.TasksKilledTotal.Inc(1)
case pb_task.TaskState_LOST:
p.metrics.TasksLostTotal.Inc(1)
case pb_task.TaskState_LAUNCHED:
p.metrics.TasksLaunchedTotal.Inc(1)
case pb_task.TaskState_STARTING:
p.metrics.TasksStartingTotal.Inc(1)
}
} else {
p.metrics.TasksReconciledTotal.Inc(1)
}
}
// isOrphanTaskEvent returns if a task event is from orphan task,
// it returns the TaskInfo if task is not orphan
func (p *statusUpdate) isOrphanTaskEvent(
ctx context.Context,
event *statusupdate.Event,
) (bool, *pb_task.TaskInfo, error) {
taskInfo, err := p.taskStore.GetTaskByID(ctx, event.TaskID())
if err != nil {
if yarpcerrors.IsNotFound(err) {
// if task runtime or config is not present in the DB,
// then the task is orphan
log.WithFields(log.Fields{
"mesos_task_id": event.MesosTaskStatus(),
"task_status_event≠": event.State().String(),
}).Info("received status update for task not found in DB")
return true, nil, nil
}
log.WithError(err).
WithField("task_id", event.TaskID()).
WithField("task_status_event", event.MesosTaskStatus()).
WithField("state", event.State().String()).
Error("fail to find taskInfo for taskID for mesos event")
return false, nil, err
}
// TODO p2k: verify v1 pod id in taskInfo
if event.V0() != nil {
dbTaskID := taskInfo.GetRuntime().GetMesosTaskId().GetValue()
if dbTaskID != event.MesosTaskStatus().GetTaskId().GetValue() {
log.WithFields(log.Fields{
"orphan_task_id": event.MesosTaskStatus().GetTaskId().GetValue(),
"db_task_id": dbTaskID,
"db_task_runtime_state": taskInfo.GetRuntime().GetState().String(),
"mesos_event_state": event.State().String(),
}).Info("received status update for orphan mesos task")
return true, nil, nil
}
}
return false, taskInfo, nil
}
// updatePersistentVolumeState updates volume state to be CREATED.
func (p *statusUpdate) updatePersistentVolumeState(ctx context.Context, taskInfo *pb_task.TaskInfo) error {
// Update volume state to be created if task enters RUNNING state.
volumeInfo, err := p.volumeStore.GetPersistentVolume(ctx, taskInfo.GetRuntime().GetVolumeID())
if err != nil {
log.WithError(err).WithFields(log.Fields{
"job_id": taskInfo.GetJobId().GetValue(),
"instance_id": taskInfo.GetInstanceId(),
"db_task_runtime": taskInfo.GetRuntime(),
"volume_id": taskInfo.GetRuntime().GetVolumeID(),
}).Error("Failed to read db for given volume")
_, ok := err.(*storage.VolumeNotFoundError)
if !ok {
// Do not ack status update running if db read error.
return err
}
return nil
}
// Do not update volume db if state is already CREATED or goalstate is DELETED.
if volumeInfo.GetState() == volume.VolumeState_CREATED ||
volumeInfo.GetGoalState() == volume.VolumeState_DELETED {
return nil
}
volumeInfo.State = volume.VolumeState_CREATED
return p.volumeStore.UpdatePersistentVolume(ctx, volumeInfo)
}
// ProcessListeners is for v0 only as we will remove the eventforwarder in v1.
func (p *statusUpdate) ProcessListeners(event *statusupdate.Event) {
if event != nil && event.V1() != nil {
return
}
for _, listener := range p.listeners {
listener.OnV0Events([]*pb_eventstream.Event{event.V0()})
}
}
// OnEvents is the callback function notifying a batch of events
func (p *statusUpdate) OnV0Events(events []*pb_eventstream.Event) {}
func (p *statusUpdate) On | vents []*v1pbevent.Event) {}
// Start starts processing status update events
func (p *statusUpdate) Start() {
p.applier.start()
for _, client := range p.eventClients {
client.Start()
}
log.Info("Task status updater started")
for _, listener := range p.listeners {
listener.Start()
}
}
// Stop stops processing status update events
func (p *statusUpdate) Stop() {
for _, client := range p.eventClients {
client.Stop()
}
log.Info("Task status updater stopped")
for _, listener := range p.listeners {
listener.Stop()
}
p.applier.drainAndShutdown()
}
func getCurrTaskResourceUsage(taskID string, state pb_task.TaskState,
resourceCfg *pb_task.ResourceConfig,
startTime, completionTime string) map[string]float64 {
currTaskResourceUsage, err := jobmgr_task.CreateResourceUsageMap(
resourceCfg, startTime, completionTime)
if err != nil {
// only log the error here and continue processing the event
// in this case resource usage map will be nil
log.WithError(err).
WithFields(log.Fields{
"task_id": taskID,
"state": state}).
Error("failed to calculate resource usage")
}
return currTaskResourceUsage
}
// persistHealthyField update the healthy field in runtimeDiff
func (p *statusUpdate) persistHealthyField(
state pb_task.TaskState,
reason string,
healthy bool,
newRuntime *pb_task.RuntimeInfo) {
switch {
case util.IsPelotonStateTerminal(state):
// Set healthy to INVALID for all terminal state
newRuntime.Healthy = pb_task.HealthState_INVALID
case state == pb_task.TaskState_RUNNING:
// Only record the health check result when
// the reason for the event is TASK_HEALTH_CHECK_STATUS_UPDATED
if reason == mesos.TaskStatus_REASON_TASK_HEALTH_CHECK_STATUS_UPDATED.String() {
newRuntime.Reason = reason
if healthy {
newRuntime.Healthy = pb_task.HealthState_HEALTHY
p.metrics.TasksHealthyTotal.Inc(1)
} else {
newRuntime.Healthy = pb_task.HealthState_UNHEALTHY
p.metrics.TasksUnHealthyTotal.Inc(1)
}
}
}
}
func updateFailureCount(
eventState pb_task.TaskState,
runtime *pb_task.RuntimeInfo,
newRuntime *pb_task.RuntimeInfo) {
if !util.IsPelotonStateTerminal(eventState) {
return
}
if runtime.GetConfigVersion() != runtime.GetDesiredConfigVersion() {
// do not increment the failure count if config version has changed
return
}
switch {
case eventState == pb_task.TaskState_FAILED:
newRuntime.FailureCount = runtime.GetFailureCount() + 1
case eventState == pb_task.TaskState_SUCCEEDED &&
runtime.GetGoalState() == pb_task.TaskState_RUNNING:
newRuntime.FailureCount = runtime.GetFailureCount() + 1
case eventState == pb_task.TaskState_KILLED &&
runtime.GetGoalState() != pb_task.TaskState_KILLED:
// This KILLED event is unexpected
newRuntime.FailureCount = runtime.GetFailureCount() + 1
}
}
// isDuplicateStateUpdate validates if the current instance state is left unchanged
// by this status update.
// If it is left unchanged, then the status update should be ignored.
// The state is said to be left unchanged
// if any of the following conditions is satisfied.
//
// 1. State is the same and that state is not running.
// 2. State is the same, that state is running, and health check is not configured.
// 3. State is the same, that state is running, and the update is not due to health check result.
// 4. State is the same, that state is running, the update is due to health check result and the task is healthy.
//
// Each unhealthy state needs to be logged into the pod events table.
func isDuplicateStateUpdate(
taskInfo *pb_task.TaskInfo,
updateEvent *statusupdate.Event,
) bool {
if updateEvent.State() != taskInfo.GetRuntime().GetState() {
return false
}
mesosTaskStatus := updateEvent.MesosTaskStatus()
podEvent := updateEvent.PodEvent()
if updateEvent.State() != pb_task.TaskState_RUNNING {
log.WithFields(log.Fields{
"db_task_runtime": taskInfo.GetRuntime(),
"task_status_event": mesosTaskStatus,
"pod_event": podEvent,
}).Debug("skip same status update if state is not RUNNING")
return true
}
if taskInfo.GetConfig().GetHealthCheck() == nil ||
!taskInfo.GetConfig().GetHealthCheck().GetEnabled() {
log.WithFields(log.Fields{
"db_task_runtime": taskInfo.GetRuntime(),
"task_status_event": mesosTaskStatus,
"pod_event": podEvent,
}).Debug("skip same status update if health check is not configured or " +
"disabled")
return true
}
newStateReason := updateEvent.Reason()
// TODO p2k: not sure which kubelet reason matches this.
// Should we skip some status updates from kubelets?
if newStateReason != mesos.TaskStatus_REASON_TASK_HEALTH_CHECK_STATUS_UPDATED.String() {
log.WithFields(log.Fields{
"db_task_runtime": taskInfo.GetRuntime(),
"task_status_event": mesosTaskStatus,
"pod_event": podEvent,
}).Debug("skip same status update if status update reason is not from health check")
return true
}
// Current behavior will log consecutive negative health check results
// ToDo (varung): Evaluate if consecutive negative results should be logged or not
isPreviousStateHealthy := taskInfo.GetRuntime().GetHealthy() == pb_task.HealthState_HEALTHY
if !isPreviousStateHealthy {
log.WithFields(log.Fields{
"db_task_runtime": taskInfo.GetRuntime(),
"task_status_event": mesosTaskStatus,
"pod_event": podEvent,
}).Debug("log each negative health check result")
return false
}
if updateEvent.Healthy() == isPreviousStateHealthy {
log.WithFields(log.Fields{
"db_task_runtime": taskInfo.GetRuntime(),
"task_status_event": mesosTaskStatus,
"pod_event": podEvent,
}).Debug("skip same status update if health check result is positive consecutively")
return true
}
return false
}
| V1Events(e | identifier_name |
update.go | // Copyright (c) 2019 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package event
import (
"context"
"strings"
"time"
mesos "github.com/uber/peloton/.gen/mesos/v1"
pbjob "github.com/uber/peloton/.gen/peloton/api/v0/job"
pb_task "github.com/uber/peloton/.gen/peloton/api/v0/task"
"github.com/uber/peloton/.gen/peloton/api/v0/volume"
pb_eventstream "github.com/uber/peloton/.gen/peloton/private/eventstream"
pbeventstream "github.com/uber/peloton/.gen/peloton/private/eventstream"
v1pbevent "github.com/uber/peloton/.gen/peloton/private/eventstream/v1alpha/event"
"github.com/uber/peloton/pkg/common"
"github.com/uber/peloton/pkg/common/api"
"github.com/uber/peloton/pkg/common/eventstream"
"github.com/uber/peloton/pkg/common/statusupdate"
"github.com/uber/peloton/pkg/common/util"
v1eventstream "github.com/uber/peloton/pkg/common/v1alpha/eventstream"
"github.com/uber/peloton/pkg/jobmgr/cached"
"github.com/uber/peloton/pkg/jobmgr/goalstate"
jobmgr_task "github.com/uber/peloton/pkg/jobmgr/task"
"github.com/uber/peloton/pkg/jobmgr/task/lifecyclemgr"
taskutil "github.com/uber/peloton/pkg/jobmgr/util/task"
"github.com/uber/peloton/pkg/storage"
"github.com/gogo/protobuf/proto"
log "github.com/sirupsen/logrus"
"github.com/uber-go/tally"
"go.uber.org/yarpc"
"go.uber.org/yarpc/yarpcerrors"
)
const (
// Mesos event message that indicates duplicate task ID
_msgMesosDuplicateID = "Task has duplicate ID"
// _numOrphanTaskKillAttempts is number of attempts to
// kill orphan task in case of error from host manager
_numOrphanTaskKillAttempts = 3
// _waitForRetryOnError is the time between successive retries
// to kill orphan task in case of error from host manager
_waitForRetryOnErrorOrphanTaskKill = 5 * time.Millisecond
)
// Declare a Now function so that we can mock it in unit tests.
var now = time.Now
// StatusUpdate is the interface for task status updates
type StatusUpdate interface {
Start()
Stop()
}
// Listener is the interface for StatusUpdate listener
type Listener interface {
OnV0Events(events []*pbeventstream.Event)
Start()
Stop()
}
// StatusUpdate reads and processes the task state change events from HM
type statusUpdate struct {
jobStore storage.JobStore
taskStore storage.TaskStore
volumeStore storage.PersistentVolumeStore
eventClients map[string]StatusUpdate
lm lifecyclemgr.Manager
applier *asyncEventProcessor
jobFactory cached.JobFactory
goalStateDriver goalstate.Driver
listeners []Listener
rootCtx context.Context
metrics *Metrics
}
// NewTaskStatusUpdate creates a statusUpdate
func NewTaskStatusUpdate(
d *yarpc.Dispatcher,
jobStore storage.JobStore,
taskStore storage.TaskStore,
volumeStore storage.PersistentVolumeStore,
jobFactory cached.JobFactory,
goalStateDriver goalstate.Driver,
listeners []Listener,
parentScope tally.Scope,
hmVersion api.Version,
) StatusUpdate {
statusUpdater := &statusUpdate{
jobStore: jobStore,
taskStore: taskStore,
volumeStore: volumeStore,
rootCtx: context.Background(),
metrics: NewMetrics(parentScope.SubScope("status_updater")),
eventClients: make(map[string]StatusUpdate),
jobFactory: jobFactory,
goalStateDriver: goalStateDriver,
listeners: listeners,
lm: lifecyclemgr.New(hmVersion, d, parentScope),
}
// TODO: add config for BucketEventProcessor
statusUpdater.applier = newBucketEventProcessor(statusUpdater, 100, 10000)
if hmVersion.IsV1() {
v1eventClient := v1eventstream.NewEventStreamClient(
d,
common.PelotonJobManager,
common.PelotonHostManager,
statusUpdater,
parentScope.SubScope("HostmgrV1EventStreamClient"))
statusUpdater.eventClients[common.PelotonV1HostManager] = v1eventClient
} else {
eventClient := eventstream.NewEventStreamClient(
d,
common.PelotonJobManager,
common.PelotonHostManager,
statusUpdater,
parentScope.SubScope("HostmgrEventStreamClient"))
statusUpdater.eventClients[common.PelotonHostManager] = eventClient
}
eventClientRM := eventstream.NewEventStreamClient(
d,
common.PelotonJobManager,
common.PelotonResourceManager,
statusUpdater,
parentScope.SubScope("ResmgrEventStreamClient"))
statusUpdater.eventClients[common.PelotonResourceManager] = eventClientRM
return statusUpdater
}
// OnV0Event is the callback function notifying an event
func (p *statusUpdate) OnV0Event(event *pb_eventstream.Event) {
log.WithField("event_offset", event.Offset).Debug("JobMgr received v0 event")
if event.GetType() != pbeventstream.Event_HOST_EVENT {
p.applier.addV0Event(event)
}
}
// OnV1Event is the callback function notifying an event
func (p *statusUpdate) OnV1Event(event *v1pbevent.Event) {
log.WithField("event_offset", event.Offset).Debug("JobMgr received v1 event")
p.applier.addV1Event(event)
}
// GetEventProgress returns the progress of the event progressing
func (p *statusUpdate) GetEventProgress() uint64 {
return p.applier.GetEventProgress()
}
// ProcessStatusUpdate processes the actual task status
func (p *statusUpdate) ProcessStatusUpdate(
ctx context.Context,
updateEvent *statusupdate.Event,
) error {
var currTaskResourceUsage map[string]float64
p.logTaskMetrics(updateEvent)
isOrphanTask, taskInfo, err := p.isOrphanTaskEvent(ctx, updateEvent)
if err != nil {
return err
}
if isOrphanTask {
p.metrics.SkipOrphanTasksTotal.Inc(1)
taskInfo := &pb_task.TaskInfo{
Runtime: &pb_task.RuntimeInfo{
State: updateEvent.State(),
MesosTaskId: updateEvent.MesosTaskID(),
AgentID: updateEvent.AgentID(),
},
}
// Kill the orphan task
for i := 0; i < _numOrphanTaskKillAttempts; i++ {
err = jobmgr_task.KillOrphanTask(ctx, p.lm, taskInfo)
if err == nil {
return nil
}
time.Sleep(_waitForRetryOnErrorOrphanTaskKill)
}
return nil
}
// whether to skip or not if instance state is similar before and after
if isDuplicateStateUpdate(taskInfo, updateEvent) {
return nil
}
if updateEvent.State() == pb_task.TaskState_RUNNING &&
taskInfo.GetConfig().GetVolume() != nil &&
len(taskInfo.GetRuntime().GetVolumeID().GetValue()) != 0 {
// Update volume state to be CREATED upon task RUNNING.
if err := p.updatePersistentVolumeState(ctx, taskInfo); err != nil {
return err
}
}
newRuntime := proto.Clone(taskInfo.GetRuntime()).(*pb_task.RuntimeInfo)
// Persist the reason and message for mesos updates
newRuntime.Message = updateEvent.StatusMsg()
newRuntime.Reason = ""
// Persist healthy field if health check is enabled
if taskInfo.GetConfig().GetHealthCheck() != nil {
reason := updateEvent.Reason()
healthy := updateEvent.Healthy()
p.persistHealthyField(updateEvent.State(), reason, healthy, newRuntime)
}
// Update FailureCount
updateFailureCount(updateEvent.State(), taskInfo.GetRuntime(), newRuntime)
switch updateEvent.State() {
case pb_task.TaskState_FAILED:
reason := updateEvent.Reason()
msg := updateEvent.Message()
if reason == mesos.TaskStatus_REASON_TASK_INVALID.String() &&
strings.Contains(msg, _msgMesosDuplicateID) {
log.WithField("task_id", updateEvent.TaskID()).
Info("ignoring duplicate task id failure")
return nil
}
newRuntime.Reason = reason
newRuntime.State = updateEvent.State()
newRuntime.Message = msg
// TODO p2k: can we build TerminationStatus from PodEvent?
termStatus := &pb_task.TerminationStatus{
Reason: pb_task.TerminationStatus_TERMINATION_STATUS_REASON_FAILED,
}
if code, err := taskutil.GetExitStatusFromMessage(msg); err == nil {
termStatus.ExitCode = code
} else if yarpcerrors.IsNotFound(err) == false {
log.WithField("task_id", updateEvent.TaskID()).
WithField("error", err).
Debug("Failed to extract exit status from message")
}
if sig, err := taskutil.GetSignalFromMessage(msg); err == nil {
termStatus.Signal = sig
} else if yarpcerrors.IsNotFound(err) == false {
log.WithField("task_id", updateEvent.TaskID()).
WithField("error", err).
Debug("Failed to extract termination signal from message")
}
newRuntime.TerminationStatus = termStatus
case pb_task.TaskState_LOST:
newRuntime.Reason = updateEvent.Reason()
if util.IsPelotonStateTerminal(taskInfo.GetRuntime().GetState()) {
// Skip LOST status update if current state is terminal state.
log.WithFields(log.Fields{
"task_id": updateEvent.TaskID(),
"db_task_runtime": taskInfo.GetRuntime(),
"task_status_event": updateEvent.MesosTaskStatus(),
}).Debug("skip reschedule lost task as it is already in terminal state")
return nil
}
if taskInfo.GetRuntime().GetGoalState() == pb_task.TaskState_KILLED {
// Do not take any action for killed tasks, just mark it killed.
// Same message will go to resource manager which will release the placement.
log.WithFields(log.Fields{
"task_id": updateEvent.TaskID(),
"db_task_runtime": taskInfo.GetRuntime(),
"task_status_event": updateEvent.MesosTaskStatus(),
}).Debug("mark stopped task as killed due to LOST")
newRuntime.State = pb_task.TaskState_KILLED
newRuntime.Message = "Stopped task LOST event: " + updateEvent.StatusMsg()
break
}
if taskInfo.GetConfig().GetVolume() != nil &&
len(taskInfo.GetRuntime().GetVolumeID().GetValue()) != 0 {
// Do not reschedule stateful task. Storage layer will decide
// whether to start or replace this task.
newRuntime.State = pb_task.TaskState_LOST
break
}
log.WithFields(log.Fields{
"task_id": updateEvent.TaskID(),
"db_task_runtime": taskInfo.GetRuntime(),
"task_status_event": updateEvent.MesosTaskStatus(),
}).Info("reschedule lost task if needed")
newRuntime.State = pb_task.TaskState_LOST
newRuntime.Message = "Task LOST: " + updateEvent.StatusMsg()
newRuntime.Reason = updateEvent.Reason()
// Calculate resource usage for TaskState_LOST using time.Now() as
// completion time
currTaskResourceUsage = getCurrTaskResourceUsage(
updateEvent.TaskID(), updateEvent.State(), taskInfo.GetConfig().GetResource(),
taskInfo.GetRuntime().GetStartTime(),
now().UTC().Format(time.RFC3339Nano))
default:
newRuntime.State = updateEvent.State()
}
cachedJob := p.jobFactory.AddJob(taskInfo.GetJobId())
// Update task start and completion timestamps
if newRuntime.GetState() == pb_task.TaskState_RUNNING {
if updateEvent.State() != taskInfo.GetRuntime().GetState() {
// StartTime is set at the time of first RUNNING event
// CompletionTime may have been set (e.g. task has been set),
// which could make StartTime larger than CompletionTime.
// Reset CompletionTime every time a task transits to RUNNING state.
newRuntime.StartTime = now().UTC().Format(time.RFC3339Nano)
newRuntime.CompletionTime = ""
// when task is RUNNING, reset the desired host field. Therefore,
// the task would be scheduled onto a different host when the task
// restarts (e.g due to health check or fail retry)
newRuntime.DesiredHost = ""
if len(taskInfo.GetRuntime().GetDesiredHost()) != 0 {
p.metrics.TasksInPlacePlacementTotal.Inc(1)
if taskInfo.GetRuntime().GetDesiredHost() == taskInfo.GetRuntime().GetHost() {
p.metrics.TasksInPlacePlacementSuccess.Inc(1)
} else {
log.WithField("job_id", taskInfo.GetJobId().GetValue()).
WithField("instance_id", taskInfo.GetInstanceId()).
Info("task fail to place on desired host")
}
}
}
} else if util.IsPelotonStateTerminal(newRuntime.GetState()) &&
cachedJob.GetJobType() == pbjob.JobType_BATCH {
// only update resource count when a batch job is in terminal state
completionTime := now().UTC().Format(time.RFC3339Nano)
newRuntime.CompletionTime = completionTime
currTaskResourceUsage = getCurrTaskResourceUsage(
updateEvent.TaskID(), updateEvent.State(), taskInfo.GetConfig().GetResource(),
taskInfo.GetRuntime().GetStartTime(), completionTime)
if len(currTaskResourceUsage) > 0 {
// current task resource usage was updated by this event, so we should
// add it to aggregated resource usage for the task and update runtime
aggregateTaskResourceUsage := taskInfo.GetRuntime().GetResourceUsage()
if len(aggregateTaskResourceUsage) > 0 {
for k, v := range currTaskResourceUsage {
aggregateTaskResourceUsage[k] += v
}
newRuntime.ResourceUsage = aggregateTaskResourceUsage
}
}
} else if cachedJob.GetJobType() == pbjob.JobType_SERVICE {
// for service job, reset resource usage
currTaskResourceUsage = nil
newRuntime.ResourceUsage = nil
}
// Update the task update times in job cache and then update the task runtime in cache and DB
cachedJob.SetTaskUpdateTime(updateEvent.Timestamp())
if _, err = cachedJob.CompareAndSetTask(
ctx,
taskInfo.GetInstanceId(),
newRuntime,
false,
); err != nil {
log.WithError(err).
WithFields(log.Fields{
"task_id": updateEvent.TaskID(),
"state": updateEvent.State().String()}).
Error("Fail to update runtime for taskID")
return err
}
// Enqueue task to goal state
p.goalStateDriver.EnqueueTask(
taskInfo.GetJobId(),
taskInfo.GetInstanceId(),
time.Now())
// Enqueue job to goal state as well
goalstate.EnqueueJobWithDefaultDelay(
taskInfo.GetJobId(), p.goalStateDriver, cachedJob)
// Update job's resource usage with the current task resource usage.
// This is a noop in case currTaskResourceUsage is nil
// This operation is not idempotent. So we will update job resource usage
// in cache only after successfully updating task resource usage in DB
// In case of errors in PatchTasks(), ProcessStatusUpdate will be retried
// indefinitely until errors are resolved.
cachedJob.UpdateResourceUsage(currTaskResourceUsage)
return nil
}
// logTaskMetrics logs events metrics
func (p *statusUpdate) logTaskMetrics(event *statusupdate.Event) {
if event.V0() == nil {
return
}
// Update task state counter for non-reconcilication update.
reason := event.MesosTaskStatus().GetReason()
if reason != mesos.TaskStatus_REASON_RECONCILIATION {
switch event.State() {
case pb_task.TaskState_RUNNING:
p.metrics.TasksRunningTotal.Inc(1)
case pb_task.TaskState_SUCCEEDED:
p.metrics.TasksSucceededTotal.Inc(1)
case pb_task.TaskState_FAILED:
p.metrics.TasksFailedTotal.Inc(1)
p.metrics.TasksFailedReason[int32(reason)].Inc(1)
log.WithFields(log.Fields{
"task_id": event.TaskID(),
"failed_reason": mesos.TaskStatus_Reason_name[int32(reason)],
}).Debug("received failed task")
case pb_task.TaskState_KILLED:
p.metrics.TasksKilledTotal.Inc(1)
case pb_task.TaskState_LOST:
p.metrics.TasksLostTotal.Inc(1)
case pb_task.TaskState_LAUNCHED:
p.metrics.TasksLaunchedTotal.Inc(1)
case pb_task.TaskState_STARTING:
p.metrics.TasksStartingTotal.Inc(1)
}
} else {
p.metrics.TasksReconciledTotal.Inc(1)
}
}
// isOrphanTaskEvent returns if a task event is from orphan task,
// it returns the TaskInfo if task is not orphan
func (p *statusUpdate) isOrphanTaskEvent(
ctx context.Context,
event *statusupdate.Event,
) (bool, *pb_task.TaskInfo, error) {
taskInfo, err := p.taskStore.GetTaskByID(ctx, event.TaskID())
if err != nil {
if yarpcerrors.IsNotFound(err) {
// if task runtime or config is not present in the DB,
// then the task is orphan
log.WithFields(log.Fields{
"mesos_task_id": event.MesosTaskStatus(),
"task_status_event≠": event.State().String(),
}).Info("received status update for task not found in DB")
return true, nil, nil
}
log.WithError(err).
WithField("task_id", event.TaskID()).
WithField("task_status_event", event.MesosTaskStatus()).
WithField("state", event.State().String()).
Error("fail to find taskInfo for taskID for mesos event")
return false, nil, err
}
// TODO p2k: verify v1 pod id in taskInfo
if event.V0() != nil {
dbTaskID := taskInfo.GetRuntime().GetMesosTaskId().GetValue()
if dbTaskID != event.MesosTaskStatus().GetTaskId().GetValue() {
log.WithFields(log.Fields{
"orphan_task_id": event.MesosTaskStatus().GetTaskId().GetValue(),
"db_task_id": dbTaskID,
"db_task_runtime_state": taskInfo.GetRuntime().GetState().String(),
"mesos_event_state": event.State().String(),
}).Info("received status update for orphan mesos task")
return true, nil, nil
}
}
return false, taskInfo, nil
}
// updatePersistentVolumeState updates volume state to be CREATED.
func (p *statusUpdate) updatePersistentVolumeState(ctx context.Context, taskInfo *pb_task.TaskInfo) error {
// Update volume state to be created if task enters RUNNING state.
volumeInfo, err := p.volumeStore.GetPersistentVolume(ctx, taskInfo.GetRuntime().GetVolumeID())
if err != nil {
log.WithError(err).WithFields(log.Fields{
"job_id": taskInfo.GetJobId().GetValue(),
"instance_id": taskInfo.GetInstanceId(),
"db_task_runtime": taskInfo.GetRuntime(),
"volume_id": taskInfo.GetRuntime().GetVolumeID(),
}).Error("Failed to read db for given volume")
_, ok := err.(*storage.VolumeNotFoundError)
if !ok {
// Do not ack status update running if db read error.
return err
}
return nil
}
// Do not update volume db if state is already CREATED or goalstate is DELETED.
if volumeInfo.GetState() == volume.VolumeState_CREATED ||
volumeInfo.GetGoalState() == volume.VolumeState_DELETED {
return nil
}
volumeInfo.State = volume.VolumeState_CREATED
return p.volumeStore.UpdatePersistentVolume(ctx, volumeInfo)
}
// ProcessListeners is for v0 only as we will remove the eventforwarder in v1.
func (p *statusUpdate) ProcessListeners(event *statusupdate.Event) {
if event != nil && event.V1() != nil {
return
}
for _, listener := range p.listeners {
listener.OnV0Events([]*pb_eventstream.Event{event.V0()})
}
}
// OnEvents is the callback function notifying a batch of events
func (p *statusUpdate) OnV0Events(events []*pb_eventstream.Event) {}
func (p *statusUpdate) OnV1Events(events []*v1pbevent.Event) {}
// Start starts processing status update events
func (p *statusUpdate) Start() {
p.applier.start()
for _, client := range p.eventClients {
| log.Info("Task status updater started")
for _, listener := range p.listeners {
listener.Start()
}
}
// Stop stops processing status update events
func (p *statusUpdate) Stop() {
for _, client := range p.eventClients {
client.Stop()
}
log.Info("Task status updater stopped")
for _, listener := range p.listeners {
listener.Stop()
}
p.applier.drainAndShutdown()
}
func getCurrTaskResourceUsage(taskID string, state pb_task.TaskState,
resourceCfg *pb_task.ResourceConfig,
startTime, completionTime string) map[string]float64 {
currTaskResourceUsage, err := jobmgr_task.CreateResourceUsageMap(
resourceCfg, startTime, completionTime)
if err != nil {
// only log the error here and continue processing the event
// in this case resource usage map will be nil
log.WithError(err).
WithFields(log.Fields{
"task_id": taskID,
"state": state}).
Error("failed to calculate resource usage")
}
return currTaskResourceUsage
}
// persistHealthyField update the healthy field in runtimeDiff
func (p *statusUpdate) persistHealthyField(
state pb_task.TaskState,
reason string,
healthy bool,
newRuntime *pb_task.RuntimeInfo) {
switch {
case util.IsPelotonStateTerminal(state):
// Set healthy to INVALID for all terminal state
newRuntime.Healthy = pb_task.HealthState_INVALID
case state == pb_task.TaskState_RUNNING:
// Only record the health check result when
// the reason for the event is TASK_HEALTH_CHECK_STATUS_UPDATED
if reason == mesos.TaskStatus_REASON_TASK_HEALTH_CHECK_STATUS_UPDATED.String() {
newRuntime.Reason = reason
if healthy {
newRuntime.Healthy = pb_task.HealthState_HEALTHY
p.metrics.TasksHealthyTotal.Inc(1)
} else {
newRuntime.Healthy = pb_task.HealthState_UNHEALTHY
p.metrics.TasksUnHealthyTotal.Inc(1)
}
}
}
}
func updateFailureCount(
eventState pb_task.TaskState,
runtime *pb_task.RuntimeInfo,
newRuntime *pb_task.RuntimeInfo) {
if !util.IsPelotonStateTerminal(eventState) {
return
}
if runtime.GetConfigVersion() != runtime.GetDesiredConfigVersion() {
// do not increment the failure count if config version has changed
return
}
switch {
case eventState == pb_task.TaskState_FAILED:
newRuntime.FailureCount = runtime.GetFailureCount() + 1
case eventState == pb_task.TaskState_SUCCEEDED &&
runtime.GetGoalState() == pb_task.TaskState_RUNNING:
newRuntime.FailureCount = runtime.GetFailureCount() + 1
case eventState == pb_task.TaskState_KILLED &&
runtime.GetGoalState() != pb_task.TaskState_KILLED:
// This KILLED event is unexpected
newRuntime.FailureCount = runtime.GetFailureCount() + 1
}
}
// isDuplicateStateUpdate validates if the current instance state is left unchanged
// by this status update.
// If it is left unchanged, then the status update should be ignored.
// The state is said to be left unchanged
// if any of the following conditions is satisfied.
//
// 1. State is the same and that state is not running.
// 2. State is the same, that state is running, and health check is not configured.
// 3. State is the same, that state is running, and the update is not due to health check result.
// 4. State is the same, that state is running, the update is due to health check result and the task is healthy.
//
// Each unhealthy state needs to be logged into the pod events table.
func isDuplicateStateUpdate(
taskInfo *pb_task.TaskInfo,
updateEvent *statusupdate.Event,
) bool {
if updateEvent.State() != taskInfo.GetRuntime().GetState() {
return false
}
mesosTaskStatus := updateEvent.MesosTaskStatus()
podEvent := updateEvent.PodEvent()
if updateEvent.State() != pb_task.TaskState_RUNNING {
log.WithFields(log.Fields{
"db_task_runtime": taskInfo.GetRuntime(),
"task_status_event": mesosTaskStatus,
"pod_event": podEvent,
}).Debug("skip same status update if state is not RUNNING")
return true
}
if taskInfo.GetConfig().GetHealthCheck() == nil ||
!taskInfo.GetConfig().GetHealthCheck().GetEnabled() {
log.WithFields(log.Fields{
"db_task_runtime": taskInfo.GetRuntime(),
"task_status_event": mesosTaskStatus,
"pod_event": podEvent,
}).Debug("skip same status update if health check is not configured or " +
"disabled")
return true
}
newStateReason := updateEvent.Reason()
// TODO p2k: not sure which kubelet reason matches this.
// Should we skip some status updates from kubelets?
if newStateReason != mesos.TaskStatus_REASON_TASK_HEALTH_CHECK_STATUS_UPDATED.String() {
log.WithFields(log.Fields{
"db_task_runtime": taskInfo.GetRuntime(),
"task_status_event": mesosTaskStatus,
"pod_event": podEvent,
}).Debug("skip same status update if status update reason is not from health check")
return true
}
// Current behavior will log consecutive negative health check results
// ToDo (varung): Evaluate if consecutive negative results should be logged or not
isPreviousStateHealthy := taskInfo.GetRuntime().GetHealthy() == pb_task.HealthState_HEALTHY
if !isPreviousStateHealthy {
log.WithFields(log.Fields{
"db_task_runtime": taskInfo.GetRuntime(),
"task_status_event": mesosTaskStatus,
"pod_event": podEvent,
}).Debug("log each negative health check result")
return false
}
if updateEvent.Healthy() == isPreviousStateHealthy {
log.WithFields(log.Fields{
"db_task_runtime": taskInfo.GetRuntime(),
"task_status_event": mesosTaskStatus,
"pod_event": podEvent,
}).Debug("skip same status update if health check result is positive consecutively")
return true
}
return false
}
| client.Start()
}
| conditional_block |
update.go | // Copyright (c) 2019 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package event
import (
"context"
"strings"
"time"
mesos "github.com/uber/peloton/.gen/mesos/v1"
pbjob "github.com/uber/peloton/.gen/peloton/api/v0/job"
pb_task "github.com/uber/peloton/.gen/peloton/api/v0/task"
"github.com/uber/peloton/.gen/peloton/api/v0/volume"
pb_eventstream "github.com/uber/peloton/.gen/peloton/private/eventstream"
pbeventstream "github.com/uber/peloton/.gen/peloton/private/eventstream"
v1pbevent "github.com/uber/peloton/.gen/peloton/private/eventstream/v1alpha/event"
"github.com/uber/peloton/pkg/common"
"github.com/uber/peloton/pkg/common/api"
"github.com/uber/peloton/pkg/common/eventstream"
"github.com/uber/peloton/pkg/common/statusupdate"
"github.com/uber/peloton/pkg/common/util"
v1eventstream "github.com/uber/peloton/pkg/common/v1alpha/eventstream"
"github.com/uber/peloton/pkg/jobmgr/cached"
"github.com/uber/peloton/pkg/jobmgr/goalstate"
jobmgr_task "github.com/uber/peloton/pkg/jobmgr/task"
"github.com/uber/peloton/pkg/jobmgr/task/lifecyclemgr"
taskutil "github.com/uber/peloton/pkg/jobmgr/util/task"
"github.com/uber/peloton/pkg/storage"
"github.com/gogo/protobuf/proto"
log "github.com/sirupsen/logrus"
"github.com/uber-go/tally"
"go.uber.org/yarpc"
"go.uber.org/yarpc/yarpcerrors"
)
const (
// Mesos event message that indicates duplicate task ID
_msgMesosDuplicateID = "Task has duplicate ID"
// _numOrphanTaskKillAttempts is number of attempts to
// kill orphan task in case of error from host manager
_numOrphanTaskKillAttempts = 3
// _waitForRetryOnError is the time between successive retries
// to kill orphan task in case of error from host manager
_waitForRetryOnErrorOrphanTaskKill = 5 * time.Millisecond
)
// Declare a Now function so that we can mock it in unit tests.
var now = time.Now
// StatusUpdate is the interface for task status updates
type StatusUpdate interface {
Start()
Stop()
}
// Listener is the interface for StatusUpdate listener
type Listener interface {
OnV0Events(events []*pbeventstream.Event)
Start()
Stop()
}
// StatusUpdate reads and processes the task state change events from HM
type statusUpdate struct {
jobStore storage.JobStore
taskStore storage.TaskStore
volumeStore storage.PersistentVolumeStore
eventClients map[string]StatusUpdate
lm lifecyclemgr.Manager
applier *asyncEventProcessor
jobFactory cached.JobFactory
goalStateDriver goalstate.Driver
listeners []Listener
rootCtx context.Context
metrics *Metrics
}
// NewTaskStatusUpdate creates a statusUpdate
func NewTaskStatusUpdate(
d *yarpc.Dispatcher,
jobStore storage.JobStore,
taskStore storage.TaskStore,
volumeStore storage.PersistentVolumeStore,
jobFactory cached.JobFactory,
goalStateDriver goalstate.Driver,
listeners []Listener,
parentScope tally.Scope,
hmVersion api.Version,
) StatusUpdate {
statusUpdater := &statusUpdate{
jobStore: jobStore,
taskStore: taskStore,
volumeStore: volumeStore,
rootCtx: context.Background(),
metrics: NewMetrics(parentScope.SubScope("status_updater")),
eventClients: make(map[string]StatusUpdate),
jobFactory: jobFactory,
goalStateDriver: goalStateDriver,
listeners: listeners,
lm: lifecyclemgr.New(hmVersion, d, parentScope),
}
// TODO: add config for BucketEventProcessor
statusUpdater.applier = newBucketEventProcessor(statusUpdater, 100, 10000)
if hmVersion.IsV1() {
v1eventClient := v1eventstream.NewEventStreamClient(
d,
common.PelotonJobManager,
common.PelotonHostManager,
statusUpdater,
parentScope.SubScope("HostmgrV1EventStreamClient"))
statusUpdater.eventClients[common.PelotonV1HostManager] = v1eventClient
} else {
eventClient := eventstream.NewEventStreamClient(
d,
common.PelotonJobManager,
common.PelotonHostManager,
statusUpdater,
parentScope.SubScope("HostmgrEventStreamClient"))
statusUpdater.eventClients[common.PelotonHostManager] = eventClient
}
eventClientRM := eventstream.NewEventStreamClient(
d,
common.PelotonJobManager,
common.PelotonResourceManager,
statusUpdater,
parentScope.SubScope("ResmgrEventStreamClient"))
statusUpdater.eventClients[common.PelotonResourceManager] = eventClientRM
return statusUpdater
}
// OnV0Event is the callback function notifying an event
func (p *statusUpdate) OnV0Event(event *pb_eventstream.Event) {
log.WithField("event_offset", event.Offset).Debug("JobMgr received v0 event")
if event.GetType() != pbeventstream.Event_HOST_EVENT {
p.applier.addV0Event(event)
}
}
|
// GetEventProgress returns the progress of the event progressing
func (p *statusUpdate) GetEventProgress() uint64 {
return p.applier.GetEventProgress()
}
// ProcessStatusUpdate processes the actual task status
func (p *statusUpdate) ProcessStatusUpdate(
ctx context.Context,
updateEvent *statusupdate.Event,
) error {
var currTaskResourceUsage map[string]float64
p.logTaskMetrics(updateEvent)
isOrphanTask, taskInfo, err := p.isOrphanTaskEvent(ctx, updateEvent)
if err != nil {
return err
}
if isOrphanTask {
p.metrics.SkipOrphanTasksTotal.Inc(1)
taskInfo := &pb_task.TaskInfo{
Runtime: &pb_task.RuntimeInfo{
State: updateEvent.State(),
MesosTaskId: updateEvent.MesosTaskID(),
AgentID: updateEvent.AgentID(),
},
}
// Kill the orphan task
for i := 0; i < _numOrphanTaskKillAttempts; i++ {
err = jobmgr_task.KillOrphanTask(ctx, p.lm, taskInfo)
if err == nil {
return nil
}
time.Sleep(_waitForRetryOnErrorOrphanTaskKill)
}
return nil
}
// whether to skip or not if instance state is similar before and after
if isDuplicateStateUpdate(taskInfo, updateEvent) {
return nil
}
if updateEvent.State() == pb_task.TaskState_RUNNING &&
taskInfo.GetConfig().GetVolume() != nil &&
len(taskInfo.GetRuntime().GetVolumeID().GetValue()) != 0 {
// Update volume state to be CREATED upon task RUNNING.
if err := p.updatePersistentVolumeState(ctx, taskInfo); err != nil {
return err
}
}
newRuntime := proto.Clone(taskInfo.GetRuntime()).(*pb_task.RuntimeInfo)
// Persist the reason and message for mesos updates
newRuntime.Message = updateEvent.StatusMsg()
newRuntime.Reason = ""
// Persist healthy field if health check is enabled
if taskInfo.GetConfig().GetHealthCheck() != nil {
reason := updateEvent.Reason()
healthy := updateEvent.Healthy()
p.persistHealthyField(updateEvent.State(), reason, healthy, newRuntime)
}
// Update FailureCount
updateFailureCount(updateEvent.State(), taskInfo.GetRuntime(), newRuntime)
switch updateEvent.State() {
case pb_task.TaskState_FAILED:
reason := updateEvent.Reason()
msg := updateEvent.Message()
if reason == mesos.TaskStatus_REASON_TASK_INVALID.String() &&
strings.Contains(msg, _msgMesosDuplicateID) {
log.WithField("task_id", updateEvent.TaskID()).
Info("ignoring duplicate task id failure")
return nil
}
newRuntime.Reason = reason
newRuntime.State = updateEvent.State()
newRuntime.Message = msg
// TODO p2k: can we build TerminationStatus from PodEvent?
termStatus := &pb_task.TerminationStatus{
Reason: pb_task.TerminationStatus_TERMINATION_STATUS_REASON_FAILED,
}
if code, err := taskutil.GetExitStatusFromMessage(msg); err == nil {
termStatus.ExitCode = code
} else if yarpcerrors.IsNotFound(err) == false {
log.WithField("task_id", updateEvent.TaskID()).
WithField("error", err).
Debug("Failed to extract exit status from message")
}
if sig, err := taskutil.GetSignalFromMessage(msg); err == nil {
termStatus.Signal = sig
} else if yarpcerrors.IsNotFound(err) == false {
log.WithField("task_id", updateEvent.TaskID()).
WithField("error", err).
Debug("Failed to extract termination signal from message")
}
newRuntime.TerminationStatus = termStatus
case pb_task.TaskState_LOST:
newRuntime.Reason = updateEvent.Reason()
if util.IsPelotonStateTerminal(taskInfo.GetRuntime().GetState()) {
// Skip LOST status update if current state is terminal state.
log.WithFields(log.Fields{
"task_id": updateEvent.TaskID(),
"db_task_runtime": taskInfo.GetRuntime(),
"task_status_event": updateEvent.MesosTaskStatus(),
}).Debug("skip reschedule lost task as it is already in terminal state")
return nil
}
if taskInfo.GetRuntime().GetGoalState() == pb_task.TaskState_KILLED {
// Do not take any action for killed tasks, just mark it killed.
// Same message will go to resource manager which will release the placement.
log.WithFields(log.Fields{
"task_id": updateEvent.TaskID(),
"db_task_runtime": taskInfo.GetRuntime(),
"task_status_event": updateEvent.MesosTaskStatus(),
}).Debug("mark stopped task as killed due to LOST")
newRuntime.State = pb_task.TaskState_KILLED
newRuntime.Message = "Stopped task LOST event: " + updateEvent.StatusMsg()
break
}
if taskInfo.GetConfig().GetVolume() != nil &&
len(taskInfo.GetRuntime().GetVolumeID().GetValue()) != 0 {
// Do not reschedule stateful task. Storage layer will decide
// whether to start or replace this task.
newRuntime.State = pb_task.TaskState_LOST
break
}
log.WithFields(log.Fields{
"task_id": updateEvent.TaskID(),
"db_task_runtime": taskInfo.GetRuntime(),
"task_status_event": updateEvent.MesosTaskStatus(),
}).Info("reschedule lost task if needed")
newRuntime.State = pb_task.TaskState_LOST
newRuntime.Message = "Task LOST: " + updateEvent.StatusMsg()
newRuntime.Reason = updateEvent.Reason()
// Calculate resource usage for TaskState_LOST using time.Now() as
// completion time
currTaskResourceUsage = getCurrTaskResourceUsage(
updateEvent.TaskID(), updateEvent.State(), taskInfo.GetConfig().GetResource(),
taskInfo.GetRuntime().GetStartTime(),
now().UTC().Format(time.RFC3339Nano))
default:
newRuntime.State = updateEvent.State()
}
cachedJob := p.jobFactory.AddJob(taskInfo.GetJobId())
// Update task start and completion timestamps
if newRuntime.GetState() == pb_task.TaskState_RUNNING {
if updateEvent.State() != taskInfo.GetRuntime().GetState() {
// StartTime is set at the time of first RUNNING event
// CompletionTime may have been set (e.g. task has been set),
// which could make StartTime larger than CompletionTime.
// Reset CompletionTime every time a task transits to RUNNING state.
newRuntime.StartTime = now().UTC().Format(time.RFC3339Nano)
newRuntime.CompletionTime = ""
// when task is RUNNING, reset the desired host field. Therefore,
// the task would be scheduled onto a different host when the task
// restarts (e.g due to health check or fail retry)
newRuntime.DesiredHost = ""
if len(taskInfo.GetRuntime().GetDesiredHost()) != 0 {
p.metrics.TasksInPlacePlacementTotal.Inc(1)
if taskInfo.GetRuntime().GetDesiredHost() == taskInfo.GetRuntime().GetHost() {
p.metrics.TasksInPlacePlacementSuccess.Inc(1)
} else {
log.WithField("job_id", taskInfo.GetJobId().GetValue()).
WithField("instance_id", taskInfo.GetInstanceId()).
Info("task fail to place on desired host")
}
}
}
} else if util.IsPelotonStateTerminal(newRuntime.GetState()) &&
cachedJob.GetJobType() == pbjob.JobType_BATCH {
// only update resource count when a batch job is in terminal state
completionTime := now().UTC().Format(time.RFC3339Nano)
newRuntime.CompletionTime = completionTime
currTaskResourceUsage = getCurrTaskResourceUsage(
updateEvent.TaskID(), updateEvent.State(), taskInfo.GetConfig().GetResource(),
taskInfo.GetRuntime().GetStartTime(), completionTime)
if len(currTaskResourceUsage) > 0 {
// current task resource usage was updated by this event, so we should
// add it to aggregated resource usage for the task and update runtime
aggregateTaskResourceUsage := taskInfo.GetRuntime().GetResourceUsage()
if len(aggregateTaskResourceUsage) > 0 {
for k, v := range currTaskResourceUsage {
aggregateTaskResourceUsage[k] += v
}
newRuntime.ResourceUsage = aggregateTaskResourceUsage
}
}
} else if cachedJob.GetJobType() == pbjob.JobType_SERVICE {
// for service job, reset resource usage
currTaskResourceUsage = nil
newRuntime.ResourceUsage = nil
}
// Update the task update times in job cache and then update the task runtime in cache and DB
cachedJob.SetTaskUpdateTime(updateEvent.Timestamp())
if _, err = cachedJob.CompareAndSetTask(
ctx,
taskInfo.GetInstanceId(),
newRuntime,
false,
); err != nil {
log.WithError(err).
WithFields(log.Fields{
"task_id": updateEvent.TaskID(),
"state": updateEvent.State().String()}).
Error("Fail to update runtime for taskID")
return err
}
// Enqueue task to goal state
p.goalStateDriver.EnqueueTask(
taskInfo.GetJobId(),
taskInfo.GetInstanceId(),
time.Now())
// Enqueue job to goal state as well
goalstate.EnqueueJobWithDefaultDelay(
taskInfo.GetJobId(), p.goalStateDriver, cachedJob)
// Update job's resource usage with the current task resource usage.
// This is a noop in case currTaskResourceUsage is nil
// This operation is not idempotent. So we will update job resource usage
// in cache only after successfully updating task resource usage in DB
// In case of errors in PatchTasks(), ProcessStatusUpdate will be retried
// indefinitely until errors are resolved.
cachedJob.UpdateResourceUsage(currTaskResourceUsage)
return nil
}
// logTaskMetrics logs events metrics
func (p *statusUpdate) logTaskMetrics(event *statusupdate.Event) {
if event.V0() == nil {
return
}
// Update task state counter for non-reconcilication update.
reason := event.MesosTaskStatus().GetReason()
if reason != mesos.TaskStatus_REASON_RECONCILIATION {
switch event.State() {
case pb_task.TaskState_RUNNING:
p.metrics.TasksRunningTotal.Inc(1)
case pb_task.TaskState_SUCCEEDED:
p.metrics.TasksSucceededTotal.Inc(1)
case pb_task.TaskState_FAILED:
p.metrics.TasksFailedTotal.Inc(1)
p.metrics.TasksFailedReason[int32(reason)].Inc(1)
log.WithFields(log.Fields{
"task_id": event.TaskID(),
"failed_reason": mesos.TaskStatus_Reason_name[int32(reason)],
}).Debug("received failed task")
case pb_task.TaskState_KILLED:
p.metrics.TasksKilledTotal.Inc(1)
case pb_task.TaskState_LOST:
p.metrics.TasksLostTotal.Inc(1)
case pb_task.TaskState_LAUNCHED:
p.metrics.TasksLaunchedTotal.Inc(1)
case pb_task.TaskState_STARTING:
p.metrics.TasksStartingTotal.Inc(1)
}
} else {
p.metrics.TasksReconciledTotal.Inc(1)
}
}
// isOrphanTaskEvent returns if a task event is from orphan task,
// it returns the TaskInfo if task is not orphan
func (p *statusUpdate) isOrphanTaskEvent(
ctx context.Context,
event *statusupdate.Event,
) (bool, *pb_task.TaskInfo, error) {
taskInfo, err := p.taskStore.GetTaskByID(ctx, event.TaskID())
if err != nil {
if yarpcerrors.IsNotFound(err) {
// if task runtime or config is not present in the DB,
// then the task is orphan
log.WithFields(log.Fields{
"mesos_task_id": event.MesosTaskStatus(),
"task_status_event≠": event.State().String(),
}).Info("received status update for task not found in DB")
return true, nil, nil
}
log.WithError(err).
WithField("task_id", event.TaskID()).
WithField("task_status_event", event.MesosTaskStatus()).
WithField("state", event.State().String()).
Error("fail to find taskInfo for taskID for mesos event")
return false, nil, err
}
// TODO p2k: verify v1 pod id in taskInfo
if event.V0() != nil {
dbTaskID := taskInfo.GetRuntime().GetMesosTaskId().GetValue()
if dbTaskID != event.MesosTaskStatus().GetTaskId().GetValue() {
log.WithFields(log.Fields{
"orphan_task_id": event.MesosTaskStatus().GetTaskId().GetValue(),
"db_task_id": dbTaskID,
"db_task_runtime_state": taskInfo.GetRuntime().GetState().String(),
"mesos_event_state": event.State().String(),
}).Info("received status update for orphan mesos task")
return true, nil, nil
}
}
return false, taskInfo, nil
}
// updatePersistentVolumeState updates volume state to be CREATED.
func (p *statusUpdate) updatePersistentVolumeState(ctx context.Context, taskInfo *pb_task.TaskInfo) error {
// Update volume state to be created if task enters RUNNING state.
volumeInfo, err := p.volumeStore.GetPersistentVolume(ctx, taskInfo.GetRuntime().GetVolumeID())
if err != nil {
log.WithError(err).WithFields(log.Fields{
"job_id": taskInfo.GetJobId().GetValue(),
"instance_id": taskInfo.GetInstanceId(),
"db_task_runtime": taskInfo.GetRuntime(),
"volume_id": taskInfo.GetRuntime().GetVolumeID(),
}).Error("Failed to read db for given volume")
_, ok := err.(*storage.VolumeNotFoundError)
if !ok {
// Do not ack status update running if db read error.
return err
}
return nil
}
// Do not update volume db if state is already CREATED or goalstate is DELETED.
if volumeInfo.GetState() == volume.VolumeState_CREATED ||
volumeInfo.GetGoalState() == volume.VolumeState_DELETED {
return nil
}
volumeInfo.State = volume.VolumeState_CREATED
return p.volumeStore.UpdatePersistentVolume(ctx, volumeInfo)
}
// ProcessListeners is for v0 only as we will remove the eventforwarder in v1.
func (p *statusUpdate) ProcessListeners(event *statusupdate.Event) {
if event != nil && event.V1() != nil {
return
}
for _, listener := range p.listeners {
listener.OnV0Events([]*pb_eventstream.Event{event.V0()})
}
}
// OnEvents is the callback function notifying a batch of events
func (p *statusUpdate) OnV0Events(events []*pb_eventstream.Event) {}
func (p *statusUpdate) OnV1Events(events []*v1pbevent.Event) {}
// Start starts processing status update events
func (p *statusUpdate) Start() {
p.applier.start()
for _, client := range p.eventClients {
client.Start()
}
log.Info("Task status updater started")
for _, listener := range p.listeners {
listener.Start()
}
}
// Stop stops processing status update events
func (p *statusUpdate) Stop() {
for _, client := range p.eventClients {
client.Stop()
}
log.Info("Task status updater stopped")
for _, listener := range p.listeners {
listener.Stop()
}
p.applier.drainAndShutdown()
}
func getCurrTaskResourceUsage(taskID string, state pb_task.TaskState,
resourceCfg *pb_task.ResourceConfig,
startTime, completionTime string) map[string]float64 {
currTaskResourceUsage, err := jobmgr_task.CreateResourceUsageMap(
resourceCfg, startTime, completionTime)
if err != nil {
// only log the error here and continue processing the event
// in this case resource usage map will be nil
log.WithError(err).
WithFields(log.Fields{
"task_id": taskID,
"state": state}).
Error("failed to calculate resource usage")
}
return currTaskResourceUsage
}
// persistHealthyField update the healthy field in runtimeDiff
func (p *statusUpdate) persistHealthyField(
state pb_task.TaskState,
reason string,
healthy bool,
newRuntime *pb_task.RuntimeInfo) {
switch {
case util.IsPelotonStateTerminal(state):
// Set healthy to INVALID for all terminal state
newRuntime.Healthy = pb_task.HealthState_INVALID
case state == pb_task.TaskState_RUNNING:
// Only record the health check result when
// the reason for the event is TASK_HEALTH_CHECK_STATUS_UPDATED
if reason == mesos.TaskStatus_REASON_TASK_HEALTH_CHECK_STATUS_UPDATED.String() {
newRuntime.Reason = reason
if healthy {
newRuntime.Healthy = pb_task.HealthState_HEALTHY
p.metrics.TasksHealthyTotal.Inc(1)
} else {
newRuntime.Healthy = pb_task.HealthState_UNHEALTHY
p.metrics.TasksUnHealthyTotal.Inc(1)
}
}
}
}
func updateFailureCount(
eventState pb_task.TaskState,
runtime *pb_task.RuntimeInfo,
newRuntime *pb_task.RuntimeInfo) {
if !util.IsPelotonStateTerminal(eventState) {
return
}
if runtime.GetConfigVersion() != runtime.GetDesiredConfigVersion() {
// do not increment the failure count if config version has changed
return
}
switch {
case eventState == pb_task.TaskState_FAILED:
newRuntime.FailureCount = runtime.GetFailureCount() + 1
case eventState == pb_task.TaskState_SUCCEEDED &&
runtime.GetGoalState() == pb_task.TaskState_RUNNING:
newRuntime.FailureCount = runtime.GetFailureCount() + 1
case eventState == pb_task.TaskState_KILLED &&
runtime.GetGoalState() != pb_task.TaskState_KILLED:
// This KILLED event is unexpected
newRuntime.FailureCount = runtime.GetFailureCount() + 1
}
}
// isDuplicateStateUpdate validates if the current instance state is left unchanged
// by this status update.
// If it is left unchanged, then the status update should be ignored.
// The state is said to be left unchanged
// if any of the following conditions is satisfied.
//
// 1. State is the same and that state is not running.
// 2. State is the same, that state is running, and health check is not configured.
// 3. State is the same, that state is running, and the update is not due to health check result.
// 4. State is the same, that state is running, the update is due to health check result and the task is healthy.
//
// Each unhealthy state needs to be logged into the pod events table.
func isDuplicateStateUpdate(
taskInfo *pb_task.TaskInfo,
updateEvent *statusupdate.Event,
) bool {
if updateEvent.State() != taskInfo.GetRuntime().GetState() {
return false
}
mesosTaskStatus := updateEvent.MesosTaskStatus()
podEvent := updateEvent.PodEvent()
if updateEvent.State() != pb_task.TaskState_RUNNING {
log.WithFields(log.Fields{
"db_task_runtime": taskInfo.GetRuntime(),
"task_status_event": mesosTaskStatus,
"pod_event": podEvent,
}).Debug("skip same status update if state is not RUNNING")
return true
}
if taskInfo.GetConfig().GetHealthCheck() == nil ||
!taskInfo.GetConfig().GetHealthCheck().GetEnabled() {
log.WithFields(log.Fields{
"db_task_runtime": taskInfo.GetRuntime(),
"task_status_event": mesosTaskStatus,
"pod_event": podEvent,
}).Debug("skip same status update if health check is not configured or " +
"disabled")
return true
}
newStateReason := updateEvent.Reason()
// TODO p2k: not sure which kubelet reason matches this.
// Should we skip some status updates from kubelets?
if newStateReason != mesos.TaskStatus_REASON_TASK_HEALTH_CHECK_STATUS_UPDATED.String() {
log.WithFields(log.Fields{
"db_task_runtime": taskInfo.GetRuntime(),
"task_status_event": mesosTaskStatus,
"pod_event": podEvent,
}).Debug("skip same status update if status update reason is not from health check")
return true
}
// Current behavior will log consecutive negative health check results
// ToDo (varung): Evaluate if consecutive negative results should be logged or not
isPreviousStateHealthy := taskInfo.GetRuntime().GetHealthy() == pb_task.HealthState_HEALTHY
if !isPreviousStateHealthy {
log.WithFields(log.Fields{
"db_task_runtime": taskInfo.GetRuntime(),
"task_status_event": mesosTaskStatus,
"pod_event": podEvent,
}).Debug("log each negative health check result")
return false
}
if updateEvent.Healthy() == isPreviousStateHealthy {
log.WithFields(log.Fields{
"db_task_runtime": taskInfo.GetRuntime(),
"task_status_event": mesosTaskStatus,
"pod_event": podEvent,
}).Debug("skip same status update if health check result is positive consecutively")
return true
}
return false
} | // OnV1Event is the callback function notifying an event
func (p *statusUpdate) OnV1Event(event *v1pbevent.Event) {
log.WithField("event_offset", event.Offset).Debug("JobMgr received v1 event")
p.applier.addV1Event(event)
} | random_line_split |
update.go | // Copyright (c) 2019 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package event
import (
"context"
"strings"
"time"
mesos "github.com/uber/peloton/.gen/mesos/v1"
pbjob "github.com/uber/peloton/.gen/peloton/api/v0/job"
pb_task "github.com/uber/peloton/.gen/peloton/api/v0/task"
"github.com/uber/peloton/.gen/peloton/api/v0/volume"
pb_eventstream "github.com/uber/peloton/.gen/peloton/private/eventstream"
pbeventstream "github.com/uber/peloton/.gen/peloton/private/eventstream"
v1pbevent "github.com/uber/peloton/.gen/peloton/private/eventstream/v1alpha/event"
"github.com/uber/peloton/pkg/common"
"github.com/uber/peloton/pkg/common/api"
"github.com/uber/peloton/pkg/common/eventstream"
"github.com/uber/peloton/pkg/common/statusupdate"
"github.com/uber/peloton/pkg/common/util"
v1eventstream "github.com/uber/peloton/pkg/common/v1alpha/eventstream"
"github.com/uber/peloton/pkg/jobmgr/cached"
"github.com/uber/peloton/pkg/jobmgr/goalstate"
jobmgr_task "github.com/uber/peloton/pkg/jobmgr/task"
"github.com/uber/peloton/pkg/jobmgr/task/lifecyclemgr"
taskutil "github.com/uber/peloton/pkg/jobmgr/util/task"
"github.com/uber/peloton/pkg/storage"
"github.com/gogo/protobuf/proto"
log "github.com/sirupsen/logrus"
"github.com/uber-go/tally"
"go.uber.org/yarpc"
"go.uber.org/yarpc/yarpcerrors"
)
const (
// Mesos event message that indicates duplicate task ID
_msgMesosDuplicateID = "Task has duplicate ID"
// _numOrphanTaskKillAttempts is number of attempts to
// kill orphan task in case of error from host manager
_numOrphanTaskKillAttempts = 3
// _waitForRetryOnError is the time between successive retries
// to kill orphan task in case of error from host manager
_waitForRetryOnErrorOrphanTaskKill = 5 * time.Millisecond
)
// Declare a Now function so that we can mock it in unit tests.
var now = time.Now
// StatusUpdate is the interface for task status updates
type StatusUpdate interface {
Start()
Stop()
}
// Listener is the interface for StatusUpdate listener
type Listener interface {
OnV0Events(events []*pbeventstream.Event)
Start()
Stop()
}
// StatusUpdate reads and processes the task state change events from HM
type statusUpdate struct {
jobStore storage.JobStore
taskStore storage.TaskStore
volumeStore storage.PersistentVolumeStore
eventClients map[string]StatusUpdate
lm lifecyclemgr.Manager
applier *asyncEventProcessor
jobFactory cached.JobFactory
goalStateDriver goalstate.Driver
listeners []Listener
rootCtx context.Context
metrics *Metrics
}
// NewTaskStatusUpdate creates a statusUpdate
func NewTaskStatusUpdate(
d *yarpc.Dispatcher,
jobStore storage.JobStore,
taskStore storage.TaskStore,
volumeStore storage.PersistentVolumeStore,
jobFactory cached.JobFactory,
goalStateDriver goalstate.Driver,
listeners []Listener,
parentScope tally.Scope,
hmVersion api.Version,
) StatusUpdate {
statusUpdater := &statusUpdate{
jobStore: jobStore,
taskStore: taskStore,
volumeStore: volumeStore,
rootCtx: context.Background(),
metrics: NewMetrics(parentScope.SubScope("status_updater")),
eventClients: make(map[string]StatusUpdate),
jobFactory: jobFactory,
goalStateDriver: goalStateDriver,
listeners: listeners,
lm: lifecyclemgr.New(hmVersion, d, parentScope),
}
// TODO: add config for BucketEventProcessor
statusUpdater.applier = newBucketEventProcessor(statusUpdater, 100, 10000)
if hmVersion.IsV1() {
v1eventClient := v1eventstream.NewEventStreamClient(
d,
common.PelotonJobManager,
common.PelotonHostManager,
statusUpdater,
parentScope.SubScope("HostmgrV1EventStreamClient"))
statusUpdater.eventClients[common.PelotonV1HostManager] = v1eventClient
} else {
eventClient := eventstream.NewEventStreamClient(
d,
common.PelotonJobManager,
common.PelotonHostManager,
statusUpdater,
parentScope.SubScope("HostmgrEventStreamClient"))
statusUpdater.eventClients[common.PelotonHostManager] = eventClient
}
eventClientRM := eventstream.NewEventStreamClient(
d,
common.PelotonJobManager,
common.PelotonResourceManager,
statusUpdater,
parentScope.SubScope("ResmgrEventStreamClient"))
statusUpdater.eventClients[common.PelotonResourceManager] = eventClientRM
return statusUpdater
}
// OnV0Event is the callback function notifying an event
func (p *statusUpdate) OnV0Event(event *pb_eventstream.Event) {
log.WithField("event_offset", event.Offset).Debug("JobMgr received v0 event")
if event.GetType() != pbeventstream.Event_HOST_EVENT {
p.applier.addV0Event(event)
}
}
// OnV1Event is the callback function notifying an event
func (p *statusUpdate) OnV1Event(event *v1pbevent.Event) {
log.WithField("event_offset", event.Offset).Debug("JobMgr received v1 event")
p.applier.addV1Event(event)
}
// GetEventProgress returns the progress of the event progressing
func (p *statusUpdate) GetEventProgress() uint64 {
return p.applier.GetEventProgress()
}
// ProcessStatusUpdate processes the actual task status
func (p *statusUpdate) ProcessStatusUpdate(
ctx context.Context,
updateEvent *statusupdate.Event,
) error {
var currTaskResourceUsage map[string]float64
p.logTaskMetrics(updateEvent)
isOrphanTask, taskInfo, err := p.isOrphanTaskEvent(ctx, updateEvent)
if err != nil {
return err
}
if isOrphanTask {
p.metrics.SkipOrphanTasksTotal.Inc(1)
taskInfo := &pb_task.TaskInfo{
Runtime: &pb_task.RuntimeInfo{
State: updateEvent.State(),
MesosTaskId: updateEvent.MesosTaskID(),
AgentID: updateEvent.AgentID(),
},
}
// Kill the orphan task
for i := 0; i < _numOrphanTaskKillAttempts; i++ {
err = jobmgr_task.KillOrphanTask(ctx, p.lm, taskInfo)
if err == nil {
return nil
}
time.Sleep(_waitForRetryOnErrorOrphanTaskKill)
}
return nil
}
// whether to skip or not if instance state is similar before and after
if isDuplicateStateUpdate(taskInfo, updateEvent) {
return nil
}
if updateEvent.State() == pb_task.TaskState_RUNNING &&
taskInfo.GetConfig().GetVolume() != nil &&
len(taskInfo.GetRuntime().GetVolumeID().GetValue()) != 0 {
// Update volume state to be CREATED upon task RUNNING.
if err := p.updatePersistentVolumeState(ctx, taskInfo); err != nil {
return err
}
}
newRuntime := proto.Clone(taskInfo.GetRuntime()).(*pb_task.RuntimeInfo)
// Persist the reason and message for mesos updates
newRuntime.Message = updateEvent.StatusMsg()
newRuntime.Reason = ""
// Persist healthy field if health check is enabled
if taskInfo.GetConfig().GetHealthCheck() != nil {
reason := updateEvent.Reason()
healthy := updateEvent.Healthy()
p.persistHealthyField(updateEvent.State(), reason, healthy, newRuntime)
}
// Update FailureCount
updateFailureCount(updateEvent.State(), taskInfo.GetRuntime(), newRuntime)
switch updateEvent.State() {
case pb_task.TaskState_FAILED:
reason := updateEvent.Reason()
msg := updateEvent.Message()
if reason == mesos.TaskStatus_REASON_TASK_INVALID.String() &&
strings.Contains(msg, _msgMesosDuplicateID) {
log.WithField("task_id", updateEvent.TaskID()).
Info("ignoring duplicate task id failure")
return nil
}
newRuntime.Reason = reason
newRuntime.State = updateEvent.State()
newRuntime.Message = msg
// TODO p2k: can we build TerminationStatus from PodEvent?
termStatus := &pb_task.TerminationStatus{
Reason: pb_task.TerminationStatus_TERMINATION_STATUS_REASON_FAILED,
}
if code, err := taskutil.GetExitStatusFromMessage(msg); err == nil {
termStatus.ExitCode = code
} else if yarpcerrors.IsNotFound(err) == false {
log.WithField("task_id", updateEvent.TaskID()).
WithField("error", err).
Debug("Failed to extract exit status from message")
}
if sig, err := taskutil.GetSignalFromMessage(msg); err == nil {
termStatus.Signal = sig
} else if yarpcerrors.IsNotFound(err) == false {
log.WithField("task_id", updateEvent.TaskID()).
WithField("error", err).
Debug("Failed to extract termination signal from message")
}
newRuntime.TerminationStatus = termStatus
case pb_task.TaskState_LOST:
newRuntime.Reason = updateEvent.Reason()
if util.IsPelotonStateTerminal(taskInfo.GetRuntime().GetState()) {
// Skip LOST status update if current state is terminal state.
log.WithFields(log.Fields{
"task_id": updateEvent.TaskID(),
"db_task_runtime": taskInfo.GetRuntime(),
"task_status_event": updateEvent.MesosTaskStatus(),
}).Debug("skip reschedule lost task as it is already in terminal state")
return nil
}
if taskInfo.GetRuntime().GetGoalState() == pb_task.TaskState_KILLED {
// Do not take any action for killed tasks, just mark it killed.
// Same message will go to resource manager which will release the placement.
log.WithFields(log.Fields{
"task_id": updateEvent.TaskID(),
"db_task_runtime": taskInfo.GetRuntime(),
"task_status_event": updateEvent.MesosTaskStatus(),
}).Debug("mark stopped task as killed due to LOST")
newRuntime.State = pb_task.TaskState_KILLED
newRuntime.Message = "Stopped task LOST event: " + updateEvent.StatusMsg()
break
}
if taskInfo.GetConfig().GetVolume() != nil &&
len(taskInfo.GetRuntime().GetVolumeID().GetValue()) != 0 {
// Do not reschedule stateful task. Storage layer will decide
// whether to start or replace this task.
newRuntime.State = pb_task.TaskState_LOST
break
}
log.WithFields(log.Fields{
"task_id": updateEvent.TaskID(),
"db_task_runtime": taskInfo.GetRuntime(),
"task_status_event": updateEvent.MesosTaskStatus(),
}).Info("reschedule lost task if needed")
newRuntime.State = pb_task.TaskState_LOST
newRuntime.Message = "Task LOST: " + updateEvent.StatusMsg()
newRuntime.Reason = updateEvent.Reason()
// Calculate resource usage for TaskState_LOST using time.Now() as
// completion time
currTaskResourceUsage = getCurrTaskResourceUsage(
updateEvent.TaskID(), updateEvent.State(), taskInfo.GetConfig().GetResource(),
taskInfo.GetRuntime().GetStartTime(),
now().UTC().Format(time.RFC3339Nano))
default:
newRuntime.State = updateEvent.State()
}
cachedJob := p.jobFactory.AddJob(taskInfo.GetJobId())
// Update task start and completion timestamps
if newRuntime.GetState() == pb_task.TaskState_RUNNING {
if updateEvent.State() != taskInfo.GetRuntime().GetState() {
// StartTime is set at the time of first RUNNING event
// CompletionTime may have been set (e.g. task has been set),
// which could make StartTime larger than CompletionTime.
// Reset CompletionTime every time a task transits to RUNNING state.
newRuntime.StartTime = now().UTC().Format(time.RFC3339Nano)
newRuntime.CompletionTime = ""
// when task is RUNNING, reset the desired host field. Therefore,
// the task would be scheduled onto a different host when the task
// restarts (e.g due to health check or fail retry)
newRuntime.DesiredHost = ""
if len(taskInfo.GetRuntime().GetDesiredHost()) != 0 {
p.metrics.TasksInPlacePlacementTotal.Inc(1)
if taskInfo.GetRuntime().GetDesiredHost() == taskInfo.GetRuntime().GetHost() {
p.metrics.TasksInPlacePlacementSuccess.Inc(1)
} else {
log.WithField("job_id", taskInfo.GetJobId().GetValue()).
WithField("instance_id", taskInfo.GetInstanceId()).
Info("task fail to place on desired host")
}
}
}
} else if util.IsPelotonStateTerminal(newRuntime.GetState()) &&
cachedJob.GetJobType() == pbjob.JobType_BATCH {
// only update resource count when a batch job is in terminal state
completionTime := now().UTC().Format(time.RFC3339Nano)
newRuntime.CompletionTime = completionTime
currTaskResourceUsage = getCurrTaskResourceUsage(
updateEvent.TaskID(), updateEvent.State(), taskInfo.GetConfig().GetResource(),
taskInfo.GetRuntime().GetStartTime(), completionTime)
if len(currTaskResourceUsage) > 0 {
// current task resource usage was updated by this event, so we should
// add it to aggregated resource usage for the task and update runtime
aggregateTaskResourceUsage := taskInfo.GetRuntime().GetResourceUsage()
if len(aggregateTaskResourceUsage) > 0 {
for k, v := range currTaskResourceUsage {
aggregateTaskResourceUsage[k] += v
}
newRuntime.ResourceUsage = aggregateTaskResourceUsage
}
}
} else if cachedJob.GetJobType() == pbjob.JobType_SERVICE {
// for service job, reset resource usage
currTaskResourceUsage = nil
newRuntime.ResourceUsage = nil
}
// Update the task update times in job cache and then update the task runtime in cache and DB
cachedJob.SetTaskUpdateTime(updateEvent.Timestamp())
if _, err = cachedJob.CompareAndSetTask(
ctx,
taskInfo.GetInstanceId(),
newRuntime,
false,
); err != nil {
log.WithError(err).
WithFields(log.Fields{
"task_id": updateEvent.TaskID(),
"state": updateEvent.State().String()}).
Error("Fail to update runtime for taskID")
return err
}
// Enqueue task to goal state
p.goalStateDriver.EnqueueTask(
taskInfo.GetJobId(),
taskInfo.GetInstanceId(),
time.Now())
// Enqueue job to goal state as well
goalstate.EnqueueJobWithDefaultDelay(
taskInfo.GetJobId(), p.goalStateDriver, cachedJob)
// Update job's resource usage with the current task resource usage.
// This is a noop in case currTaskResourceUsage is nil
// This operation is not idempotent. So we will update job resource usage
// in cache only after successfully updating task resource usage in DB
// In case of errors in PatchTasks(), ProcessStatusUpdate will be retried
// indefinitely until errors are resolved.
cachedJob.UpdateResourceUsage(currTaskResourceUsage)
return nil
}
// logTaskMetrics logs events metrics
func (p *statusUpdate) logTaskMetrics(event *statusupdate.Event) {
if event.V0() == nil {
return
}
// Update task state counter for non-reconcilication update.
reason := event.MesosTaskStatus().GetReason()
if reason != mesos.TaskStatus_REASON_RECONCILIATION {
switch event.State() {
case pb_task.TaskState_RUNNING:
p.metrics.TasksRunningTotal.Inc(1)
case pb_task.TaskState_SUCCEEDED:
p.metrics.TasksSucceededTotal.Inc(1)
case pb_task.TaskState_FAILED:
p.metrics.TasksFailedTotal.Inc(1)
p.metrics.TasksFailedReason[int32(reason)].Inc(1)
log.WithFields(log.Fields{
"task_id": event.TaskID(),
"failed_reason": mesos.TaskStatus_Reason_name[int32(reason)],
}).Debug("received failed task")
case pb_task.TaskState_KILLED:
p.metrics.TasksKilledTotal.Inc(1)
case pb_task.TaskState_LOST:
p.metrics.TasksLostTotal.Inc(1)
case pb_task.TaskState_LAUNCHED:
p.metrics.TasksLaunchedTotal.Inc(1)
case pb_task.TaskState_STARTING:
p.metrics.TasksStartingTotal.Inc(1)
}
} else {
p.metrics.TasksReconciledTotal.Inc(1)
}
}
// isOrphanTaskEvent returns if a task event is from orphan task,
// it returns the TaskInfo if task is not orphan
func (p *statusUpdate) isOrphanTaskEvent(
ctx context.Context,
event *statusupdate.Event,
) (bool, *pb_task.TaskInfo, error) {
taskInfo, err := p.taskStore.GetTaskByID(ctx, event.TaskID())
if err != nil {
if yarpcerrors.IsNotFound(err) {
// if task runtime or config is not present in the DB,
// then the task is orphan
log.WithFields(log.Fields{
"mesos_task_id": event.MesosTaskStatus(),
"task_status_event≠": event.State().String(),
}).Info("received status update for task not found in DB")
return true, nil, nil
}
log.WithError(err).
WithField("task_id", event.TaskID()).
WithField("task_status_event", event.MesosTaskStatus()).
WithField("state", event.State().String()).
Error("fail to find taskInfo for taskID for mesos event")
return false, nil, err
}
// TODO p2k: verify v1 pod id in taskInfo
if event.V0() != nil {
dbTaskID := taskInfo.GetRuntime().GetMesosTaskId().GetValue()
if dbTaskID != event.MesosTaskStatus().GetTaskId().GetValue() {
log.WithFields(log.Fields{
"orphan_task_id": event.MesosTaskStatus().GetTaskId().GetValue(),
"db_task_id": dbTaskID,
"db_task_runtime_state": taskInfo.GetRuntime().GetState().String(),
"mesos_event_state": event.State().String(),
}).Info("received status update for orphan mesos task")
return true, nil, nil
}
}
return false, taskInfo, nil
}
// updatePersistentVolumeState updates volume state to be CREATED.
func (p *statusUpdate) updatePersistentVolumeState(ctx context.Context, taskInfo *pb_task.TaskInfo) error {
| // ProcessListeners is for v0 only as we will remove the eventforwarder in v1.
func (p *statusUpdate) ProcessListeners(event *statusupdate.Event) {
if event != nil && event.V1() != nil {
return
}
for _, listener := range p.listeners {
listener.OnV0Events([]*pb_eventstream.Event{event.V0()})
}
}
// OnEvents is the callback function notifying a batch of events
func (p *statusUpdate) OnV0Events(events []*pb_eventstream.Event) {}
func (p *statusUpdate) OnV1Events(events []*v1pbevent.Event) {}
// Start starts processing status update events
func (p *statusUpdate) Start() {
p.applier.start()
for _, client := range p.eventClients {
client.Start()
}
log.Info("Task status updater started")
for _, listener := range p.listeners {
listener.Start()
}
}
// Stop stops processing status update events
func (p *statusUpdate) Stop() {
for _, client := range p.eventClients {
client.Stop()
}
log.Info("Task status updater stopped")
for _, listener := range p.listeners {
listener.Stop()
}
p.applier.drainAndShutdown()
}
func getCurrTaskResourceUsage(taskID string, state pb_task.TaskState,
resourceCfg *pb_task.ResourceConfig,
startTime, completionTime string) map[string]float64 {
currTaskResourceUsage, err := jobmgr_task.CreateResourceUsageMap(
resourceCfg, startTime, completionTime)
if err != nil {
// only log the error here and continue processing the event
// in this case resource usage map will be nil
log.WithError(err).
WithFields(log.Fields{
"task_id": taskID,
"state": state}).
Error("failed to calculate resource usage")
}
return currTaskResourceUsage
}
// persistHealthyField update the healthy field in runtimeDiff
func (p *statusUpdate) persistHealthyField(
state pb_task.TaskState,
reason string,
healthy bool,
newRuntime *pb_task.RuntimeInfo) {
switch {
case util.IsPelotonStateTerminal(state):
// Set healthy to INVALID for all terminal state
newRuntime.Healthy = pb_task.HealthState_INVALID
case state == pb_task.TaskState_RUNNING:
// Only record the health check result when
// the reason for the event is TASK_HEALTH_CHECK_STATUS_UPDATED
if reason == mesos.TaskStatus_REASON_TASK_HEALTH_CHECK_STATUS_UPDATED.String() {
newRuntime.Reason = reason
if healthy {
newRuntime.Healthy = pb_task.HealthState_HEALTHY
p.metrics.TasksHealthyTotal.Inc(1)
} else {
newRuntime.Healthy = pb_task.HealthState_UNHEALTHY
p.metrics.TasksUnHealthyTotal.Inc(1)
}
}
}
}
func updateFailureCount(
eventState pb_task.TaskState,
runtime *pb_task.RuntimeInfo,
newRuntime *pb_task.RuntimeInfo) {
if !util.IsPelotonStateTerminal(eventState) {
return
}
if runtime.GetConfigVersion() != runtime.GetDesiredConfigVersion() {
// do not increment the failure count if config version has changed
return
}
switch {
case eventState == pb_task.TaskState_FAILED:
newRuntime.FailureCount = runtime.GetFailureCount() + 1
case eventState == pb_task.TaskState_SUCCEEDED &&
runtime.GetGoalState() == pb_task.TaskState_RUNNING:
newRuntime.FailureCount = runtime.GetFailureCount() + 1
case eventState == pb_task.TaskState_KILLED &&
runtime.GetGoalState() != pb_task.TaskState_KILLED:
// This KILLED event is unexpected
newRuntime.FailureCount = runtime.GetFailureCount() + 1
}
}
// isDuplicateStateUpdate validates if the current instance state is left unchanged
// by this status update.
// If it is left unchanged, then the status update should be ignored.
// The state is said to be left unchanged
// if any of the following conditions is satisfied.
//
// 1. State is the same and that state is not running.
// 2. State is the same, that state is running, and health check is not configured.
// 3. State is the same, that state is running, and the update is not due to health check result.
// 4. State is the same, that state is running, the update is due to health check result and the task is healthy.
//
// Each unhealthy state needs to be logged into the pod events table.
func isDuplicateStateUpdate(
taskInfo *pb_task.TaskInfo,
updateEvent *statusupdate.Event,
) bool {
if updateEvent.State() != taskInfo.GetRuntime().GetState() {
return false
}
mesosTaskStatus := updateEvent.MesosTaskStatus()
podEvent := updateEvent.PodEvent()
if updateEvent.State() != pb_task.TaskState_RUNNING {
log.WithFields(log.Fields{
"db_task_runtime": taskInfo.GetRuntime(),
"task_status_event": mesosTaskStatus,
"pod_event": podEvent,
}).Debug("skip same status update if state is not RUNNING")
return true
}
if taskInfo.GetConfig().GetHealthCheck() == nil ||
!taskInfo.GetConfig().GetHealthCheck().GetEnabled() {
log.WithFields(log.Fields{
"db_task_runtime": taskInfo.GetRuntime(),
"task_status_event": mesosTaskStatus,
"pod_event": podEvent,
}).Debug("skip same status update if health check is not configured or " +
"disabled")
return true
}
newStateReason := updateEvent.Reason()
// TODO p2k: not sure which kubelet reason matches this.
// Should we skip some status updates from kubelets?
if newStateReason != mesos.TaskStatus_REASON_TASK_HEALTH_CHECK_STATUS_UPDATED.String() {
log.WithFields(log.Fields{
"db_task_runtime": taskInfo.GetRuntime(),
"task_status_event": mesosTaskStatus,
"pod_event": podEvent,
}).Debug("skip same status update if status update reason is not from health check")
return true
}
// Current behavior will log consecutive negative health check results
// ToDo (varung): Evaluate if consecutive negative results should be logged or not
isPreviousStateHealthy := taskInfo.GetRuntime().GetHealthy() == pb_task.HealthState_HEALTHY
if !isPreviousStateHealthy {
log.WithFields(log.Fields{
"db_task_runtime": taskInfo.GetRuntime(),
"task_status_event": mesosTaskStatus,
"pod_event": podEvent,
}).Debug("log each negative health check result")
return false
}
if updateEvent.Healthy() == isPreviousStateHealthy {
log.WithFields(log.Fields{
"db_task_runtime": taskInfo.GetRuntime(),
"task_status_event": mesosTaskStatus,
"pod_event": podEvent,
}).Debug("skip same status update if health check result is positive consecutively")
return true
}
return false
}
| // Update volume state to be created if task enters RUNNING state.
volumeInfo, err := p.volumeStore.GetPersistentVolume(ctx, taskInfo.GetRuntime().GetVolumeID())
if err != nil {
log.WithError(err).WithFields(log.Fields{
"job_id": taskInfo.GetJobId().GetValue(),
"instance_id": taskInfo.GetInstanceId(),
"db_task_runtime": taskInfo.GetRuntime(),
"volume_id": taskInfo.GetRuntime().GetVolumeID(),
}).Error("Failed to read db for given volume")
_, ok := err.(*storage.VolumeNotFoundError)
if !ok {
// Do not ack status update running if db read error.
return err
}
return nil
}
// Do not update volume db if state is already CREATED or goalstate is DELETED.
if volumeInfo.GetState() == volume.VolumeState_CREATED ||
volumeInfo.GetGoalState() == volume.VolumeState_DELETED {
return nil
}
volumeInfo.State = volume.VolumeState_CREATED
return p.volumeStore.UpdatePersistentVolume(ctx, volumeInfo)
}
| identifier_body |
parser.go | package parser
import (
"bytes"
"fmt"
"github.com/go-graphite/carbonapi/expr/holtwinters"
"regexp"
"strconv"
"strings"
"time"
"unicode"
"unicode/utf8"
"github.com/ansel1/merry"
)
// expression parser
type expr struct {
target string
etype ExprType
val float64
valStr string
args []*expr // positional
namedArgs map[string]*expr
argString string
}
func (e *expr) IsName() bool {
return e.etype == EtName
}
func (e *expr) IsFunc() bool {
return e.etype == EtFunc
}
func (e *expr) IsConst() bool {
return e.etype == EtConst
}
func (e *expr) IsString() bool {
return e.etype == EtString
}
func (e *expr) IsBool() bool {
return e.etype == EtBool
}
func (e *expr) Type() ExprType {
return e.etype
}
func (e *expr) ToString() string {
switch e.etype {
case EtFunc:
return e.target + "(" + e.argString + ")"
case EtConst:
return e.valStr
case EtString:
s := e.valStr
s = strings.ReplaceAll(s, `\`, `\\`)
s = strings.ReplaceAll(s, `'`, `\'`)
return "'" + s + "'"
case EtBool:
return fmt.Sprint(e.val)
}
return e.target
}
func (e *expr) SetTarget(target string) {
e.target = target
}
func (e *expr) MutateTarget(target string) Expr {
e.SetTarget(target)
return e
}
func (e *expr) | () string {
return e.target
}
func (e *expr) FloatValue() float64 {
return e.val
}
func (e *expr) StringValue() string {
return e.valStr
}
func (e *expr) SetValString(value string) {
e.valStr = value
}
func (e *expr) MutateValString(value string) Expr {
e.SetValString(value)
return e
}
func (e *expr) RawArgs() string {
return e.argString
}
func (e *expr) SetRawArgs(args string) {
e.argString = args
}
func (e *expr) MutateRawArgs(args string) Expr {
e.SetRawArgs(args)
return e
}
func (e *expr) Args() []Expr {
ret := make([]Expr, len(e.args))
for i := 0; i < len(e.args); i++ {
ret[i] = e.args[i]
}
return ret
}
func (e *expr) Arg(i int) Expr {
return e.args[i]
}
func (e *expr) ArgsLen() int {
return len(e.args)
}
func (e *expr) NamedArgs() map[string]Expr {
ret := make(map[string]Expr)
for k, v := range e.namedArgs {
ret[k] = v
}
return ret
}
func (e *expr) NamedArg(name string) (Expr, bool) {
expr, exist := e.namedArgs[name]
return expr, exist
}
func (e *expr) Metrics(from, until int64) []MetricRequest {
switch e.etype {
case EtName:
return []MetricRequest{{Metric: e.target, From: from, Until: until}}
case EtConst, EtString:
return nil
case EtFunc:
var r []MetricRequest
for _, a := range e.args {
r = append(r, a.Metrics(from, until)...)
}
switch e.target {
case "transformNull":
referenceSeriesExpr := e.GetNamedArg("referenceSeries")
if !referenceSeriesExpr.IsInterfaceNil() {
r = append(r, referenceSeriesExpr.Metrics(from, until)...)
}
case "timeShift":
offs, err := e.GetIntervalArg(1, -1)
if err != nil {
return nil
}
for i := range r {
r[i].From += int64(offs)
r[i].Until += int64(offs)
}
case "timeStack":
offs, err := e.GetIntervalArg(1, -1)
if err != nil {
return nil
}
start, err := e.GetIntArg(2)
if err != nil {
return nil
}
end, err := e.GetIntArg(3)
if err != nil {
return nil
}
var r2 []MetricRequest
for _, v := range r {
for i := int64(start); i < int64(end); i++ {
fromNew := v.From + i*int64(offs)
untilNew := v.Until + i*int64(offs)
r2 = append(r2, MetricRequest{
Metric: v.Metric,
From: fromNew,
Until: untilNew,
})
}
}
return r2
case "holtWintersForecast":
bootstrapInterval, err := e.GetIntervalNamedOrPosArgDefault("bootstrapInterval", 1, 1, holtwinters.DefaultBootstrapInterval)
if err != nil {
return nil
}
for i := range r {
r[i].From -= bootstrapInterval
}
case "holtWintersConfidenceBands", "holtWintersConfidenceArea":
bootstrapInterval, err := e.GetIntervalNamedOrPosArgDefault("bootstrapInterval", 2, 1, holtwinters.DefaultBootstrapInterval)
if err != nil {
return nil
}
for i := range r {
r[i].From -= bootstrapInterval
}
case "holtWintersAberration":
bootstrapInterval, err := e.GetIntervalNamedOrPosArgDefault("bootstrapInterval", 2, 1, holtwinters.DefaultBootstrapInterval)
if err != nil {
return nil
}
// For this function, we also need to pull data with an adjusted From time,
// so additional requests are added with the adjusted start time based on the
// bootstrapInterval
for i := range r {
adjustedReq := MetricRequest{
Metric: r[i].Metric,
From: r[i].From - bootstrapInterval,
Until: r[i].Until,
}
r = append(r, adjustedReq)
}
case "movingAverage", "movingMedian", "movingMin", "movingMax", "movingSum", "exponentialMovingAverage":
if len(e.args) < 2 {
return nil
}
if e.args[1].etype == EtString {
offs, err := e.GetIntervalArg(1, 1)
if err != nil {
return nil
}
for i := range r {
fromNew := r[i].From - int64(offs)
r[i].From = fromNew
}
}
case "hitcount":
if len(e.args) < 2 {
return nil
}
alignToInterval, err := e.GetBoolNamedOrPosArgDefault("alignToInterval", 2, false)
if err != nil {
return nil
}
if alignToInterval {
bucketSizeInt32, err := e.GetIntervalArg(1, 1)
if err != nil {
return nil
}
interval := int64(bucketSizeInt32)
// This is done in order to replicate the behavior in Graphite web when alignToInterval is set,
// in which new data is fetched with the adjusted start time.
for i, _ := range r {
start := r[i].From
for _, v := range []int64{86400, 3600, 60} {
if interval >= v {
start -= start % v
break
}
}
r[i].From = start
}
}
case "smartSummarize":
if len(e.args) < 2 {
return nil
}
alignToInterval, err := e.GetStringNamedOrPosArgDefault("alignTo", 3, "")
if err != nil {
return nil
}
if alignToInterval != "" {
for i, _ := range r {
newStart, err := StartAlignTo(r[i].From, alignToInterval)
if err != nil {
return nil
}
r[i].From = newStart
}
}
}
return r
}
return nil
}
func (e *expr) GetIntervalArg(n, defaultSign int) (int32, error) {
if len(e.args) <= n {
return 0, ErrMissingArgument
}
if e.args[n].etype != EtString {
return 0, ErrBadType
}
seconds, err := IntervalString(e.args[n].valStr, defaultSign)
if err != nil {
return 0, ErrBadType
}
return seconds, nil
}
func (e *expr) GetIntervalNamedOrPosArgDefault(k string, n, defaultSign int, v int64) (int64, error) {
var val string
var err error
if a := e.getNamedArg(k); a != nil {
val, err = a.doGetStringArg()
if err != nil {
return 0, ErrBadType
}
} else {
if len(e.args) <= n {
return v, nil
}
if e.args[n].etype != EtString {
return 0, ErrBadType
}
val = e.args[n].valStr
}
seconds, err := IntervalString(val, defaultSign)
if err != nil {
return 0, ErrBadType
}
return int64(seconds), nil
}
func (e *expr) GetStringArg(n int) (string, error) {
if len(e.args) <= n {
return "", ErrMissingArgument
}
return e.args[n].doGetStringArg()
}
func (e *expr) GetStringArgs(n int) ([]string, error) {
if len(e.args) <= n {
return nil, ErrMissingArgument
}
strs := make([]string, 0, len(e.args)-n)
for i := n; i < len(e.args); i++ {
a, err := e.GetStringArg(i)
if err != nil {
return nil, err
}
strs = append(strs, a)
}
return strs, nil
}
func (e *expr) GetStringArgDefault(n int, s string) (string, error) {
if len(e.args) <= n {
return s, nil
}
return e.args[n].doGetStringArg()
}
func (e *expr) GetStringNamedOrPosArgDefault(k string, n int, s string) (string, error) {
if a := e.getNamedArg(k); a != nil {
return a.doGetStringArg()
}
return e.GetStringArgDefault(n, s)
}
func (e *expr) GetFloatArg(n int) (float64, error) {
if len(e.args) <= n {
return 0, ErrMissingArgument
}
return e.args[n].doGetFloatArg()
}
func (e *expr) GetFloatArgDefault(n int, v float64) (float64, error) {
if len(e.args) <= n {
return v, nil
}
return e.args[n].doGetFloatArg()
}
func (e *expr) GetFloatNamedOrPosArgDefault(k string, n int, v float64) (float64, error) {
if a := e.getNamedArg(k); a != nil {
return a.doGetFloatArg()
}
return e.GetFloatArgDefault(n, v)
}
func (e *expr) GetIntArg(n int) (int, error) {
if len(e.args) <= n {
return 0, ErrMissingArgument
}
return e.args[n].doGetIntArg()
}
func (e *expr) GetIntArgs(n int) ([]int, error) {
if len(e.args) < n {
return nil, ErrMissingArgument
}
ints := make([]int, 0, len(e.args)-n)
for i := n; i < len(e.args); i++ {
a, err := e.GetIntArg(i)
if err != nil {
return nil, err
}
ints = append(ints, a)
}
return ints, nil
}
func (e *expr) GetIntArgDefault(n, d int) (int, error) {
if len(e.args) <= n {
return d, nil
}
return e.args[n].doGetIntArg()
}
func (e *expr) GetIntArgWithIndication(n int) (int, bool, error) {
if len(e.args) <= n {
return 0, false, nil
}
v, err := e.args[n].doGetIntArg()
return v, true, err
}
func (e *expr) GetIntNamedOrPosArgWithIndication(k string, n int) (int, bool, error) {
if a := e.getNamedArg(k); a != nil {
v, err := a.doGetIntArg()
return v, true, err
}
return e.GetIntArgWithIndication(n)
}
func (e *expr) GetIntNamedOrPosArgDefault(k string, n, d int) (int, error) {
if a := e.getNamedArg(k); a != nil {
return a.doGetIntArg()
}
return e.GetIntArgDefault(n, d)
}
func (e *expr) GetIntOrInfArg(n int) (IntOrInf, error) {
if len(e.args) <= n {
return IntOrInf{}, ErrMissingArgument
}
return e.args[n].doGetIntOrInfArg()
}
func (e *expr) GetIntOrInfArgDefault(n int, d IntOrInf) (IntOrInf, error) {
if len(e.args) <= n {
return d, nil
}
return e.args[n].doGetIntOrInfArg()
}
func (e *expr) GetIntOrInfNamedOrPosArgDefault(k string, n int, d IntOrInf) (IntOrInf, error) {
if a := e.getNamedArg(k); a != nil {
return a.doGetIntOrInfArg()
}
return e.GetIntOrInfArgDefault(n, d)
}
func (e *expr) GetNamedArg(name string) Expr {
return e.getNamedArg(name)
}
func (e *expr) GetBoolNamedOrPosArgDefault(k string, n int, b bool) (bool, error) {
if a := e.getNamedArg(k); a != nil {
return a.doGetBoolArg()
}
return e.GetBoolArgDefault(n, b)
}
func (e *expr) GetBoolArgDefault(n int, b bool) (bool, error) {
if len(e.args) <= n {
return b, nil
}
return e.args[n].doGetBoolArg()
}
func (e *expr) GetNodeOrTagArgs(n int, single bool) ([]NodeOrTag, error) {
// if single==false, zero nodes is OK
if single && len(e.args) <= n || len(e.args) < n {
return nil, ErrMissingArgument
}
nodeTags := make([]NodeOrTag, 0, len(e.args)-n)
var err error
until := len(e.args)
if single {
until = n + 1
}
for i := n; i < until; i++ {
var nodeTag NodeOrTag
nodeTag.Value, err = e.GetIntArg(i)
if err != nil {
// Try to parse it as String
nodeTag.Value, err = e.GetStringArg(i)
if err != nil {
return nil, err
}
nodeTag.IsTag = true
}
nodeTags = append(nodeTags, nodeTag)
}
return nodeTags, nil
}
func (e *expr) IsInterfaceNil() bool {
return e == nil
}
func (e *expr) insertFirstArg(exp *expr) error {
if e.etype != EtFunc {
return fmt.Errorf("pipe to not a function")
}
newArgs := []*expr{exp}
e.args = append(newArgs, e.args...)
if e.argString == "" {
e.argString = exp.ToString()
} else {
e.argString = exp.ToString() + "," + e.argString
}
return nil
}
func skipWhitespace(e string) string {
skipTo := len(e)
for i, r := range e {
if !unicode.IsSpace(r) {
skipTo = i
break
}
}
return e[skipTo:]
}
func parseExprWithoutPipe(e string) (Expr, string, error) {
e = skipWhitespace(e)
if e == "" {
return nil, "", ErrMissingExpr
}
if '0' <= e[0] && e[0] <= '9' || e[0] == '-' || e[0] == '+' {
val, valStr, e, err := parseConst(e)
r, _ := utf8.DecodeRuneInString(e)
if !unicode.IsLetter(r) {
return &expr{val: val, etype: EtConst, valStr: valStr}, e, err
}
}
if e[0] == '\'' || e[0] == '"' {
val, e, err := parseString(e)
return &expr{valStr: val, etype: EtString}, e, err
}
name, e := parseName(e)
if name == "" {
return nil, e, ErrMissingArgument
}
nameLower := strings.ToLower(name)
if nameLower == "false" || nameLower == "true" {
return &expr{valStr: nameLower, etype: EtBool, target: nameLower}, e, nil
}
if e != "" && e[0] == '(' {
// TODO(civil): Tags: make it a proper Expression
if name == "seriesByTag" {
argString, _, _, e, err := parseArgList(e)
return &expr{target: name + "(" + argString + ")", etype: EtName}, e, err
}
exp := &expr{target: name, etype: EtFunc}
argString, posArgs, namedArgs, e, err := parseArgList(e)
exp.argString = argString
exp.args = posArgs
exp.namedArgs = namedArgs
return exp, e, err
}
return &expr{target: name}, e, nil
}
func parseExprInner(e string) (Expr, string, error) {
exp, e, err := parseExprWithoutPipe(e)
if err != nil {
return exp, e, err
}
return pipe(exp.(*expr), e)
}
// ParseExpr actually do all the parsing. It returns expression, original string and error (if any)
func ParseExpr(e string) (Expr, string, error) {
exp, e, err := parseExprInner(e)
if err != nil {
return exp, e, err
}
exp, err = defineMap.expandExpr(exp.(*expr))
return exp, e, err
}
func pipe(exp *expr, e string) (*expr, string, error) {
e = skipWhitespace(e)
if e == "" || e[0] != '|' {
return exp, e, nil
}
wr, e, err := parseExprWithoutPipe(e[1:])
if err != nil {
return exp, e, err
}
if wr == nil {
return exp, e, nil
}
err = wr.(*expr).insertFirstArg(exp)
if err != nil {
return exp, e, err
}
exp = wr.(*expr)
return pipe(exp, e)
}
// IsNameChar checks if specified char is actually a valid (from graphite's protocol point of view)
func IsNameChar(r byte) bool {
return false ||
'a' <= r && r <= 'z' ||
'A' <= r && r <= 'Z' ||
'0' <= r && r <= '9' ||
r == '.' || r == '_' ||
r == '-' || r == '*' ||
r == '?' || r == ':' ||
r == '[' || r == ']' ||
r == '^' || r == '$' ||
r == '<' || r == '>' ||
r == '&' || r == '#' ||
r == '/' || r == '%' ||
r == '@'
}
func IsDigit(r byte) bool {
return '0' <= r && r <= '9'
}
func parseArgList(e string) (string, []*expr, map[string]*expr, string, error) {
var (
posArgs []*expr
namedArgs map[string]*expr
)
eOrig := e
if e[0] != '(' {
panic("arg list should start with paren")
}
var argStringBuffer bytes.Buffer
e = e[1:]
// check for empty args
t := skipWhitespace(e)
if t != "" && t[0] == ')' {
return "", posArgs, namedArgs, t[1:], nil
}
charNum := 1
for {
var arg Expr
var err error
charNum++
argString := e
arg, e, err = parseExprInner(e)
if err != nil {
return "", nil, nil, e, err
}
if e == "" {
return "", nil, nil, "", ErrMissingComma
}
// we now know we're parsing a key-value pair
if arg.IsName() && e[0] == '=' {
e = e[1:]
argCont, eCont, errCont := parseExprInner(e)
if errCont != nil {
return "", nil, nil, eCont, errCont
}
if eCont == "" {
return "", nil, nil, "", ErrMissingComma
}
if !argCont.IsConst() && !argCont.IsName() && !argCont.IsString() && !argCont.IsBool() {
return "", nil, nil, eCont, ErrBadType
}
if namedArgs == nil {
namedArgs = make(map[string]*expr)
}
exp := &expr{
etype: argCont.Type(),
val: argCont.FloatValue(),
valStr: argCont.StringValue(),
target: argCont.Target(),
}
namedArgs[arg.Target()] = exp
e = eCont
if argStringBuffer.Len() > 0 {
argStringBuffer.WriteByte(',')
}
argStringBuffer.WriteString(argString[:len(argString)-len(e)])
charNum += len(argString) - len(e)
} else {
exp := arg.toExpr().(*expr)
posArgs = append(posArgs, exp)
if argStringBuffer.Len() > 0 {
argStringBuffer.WriteByte(',')
}
if exp.IsFunc() {
expString := exp.ToString()
argStringBuffer.WriteString(expString)
charNum += len(expString)
} else {
argStringBuffer.WriteString(argString[:len(argString)-len(e)])
charNum += len(argString) - len(e)
}
}
// after the argument, trim any trailing spaces
e = skipWhitespace(e)
if e[0] == ')' {
return argStringBuffer.String(), posArgs, namedArgs, e[1:], nil
}
if e[0] != ',' && e[0] != ' ' {
return "", nil, nil, "", merry.Wrap(ErrUnexpectedCharacter).WithUserMessagef("string_to_parse=`%v`, character_number=%v, character=`%v`", eOrig, charNum, string(e[0]))
}
e = e[1:]
}
}
func parseConst(s string) (float64, string, string, error) {
var i int
// All valid characters for a floating-point constant
// Just slurp them all in and let ParseFloat sort 'em out
for i < len(s) && (IsDigit(s[i]) || s[i] == '.' || s[i] == '+' || s[i] == '-' || s[i] == 'e' || s[i] == 'E') {
i++
}
v, err := strconv.ParseFloat(s[:i], 64)
if err != nil {
return 0, "", "", err
}
return v, s[:i], s[i:], err
}
// RangeTables is an array of *unicode.RangeTable
var RangeTables []*unicode.RangeTable
var disallowedCharactersInMetricName = map[rune]struct{}{
'(': struct{}{},
')': struct{}{},
'"': struct{}{},
'\'': struct{}{},
' ': struct{}{},
'/': struct{}{},
}
func unicodeRuneAllowedInName(r rune) bool {
if _, ok := disallowedCharactersInMetricName[r]; ok {
return false
}
return true
}
func parseName(s string) (string, string) {
var (
braces, i, w int
r rune
isEscape bool
isDefault bool
)
buf := bytes.NewBuffer(make([]byte, 0, len(s)))
FOR:
for braces, i, w = 0, 0, 0; i < len(s); i += w {
if s[i] != '\\' {
err := buf.WriteByte(s[i])
if err != nil {
break FOR
}
}
isDefault = false
w = 1
if IsNameChar(s[i]) {
continue
}
switch s[i] {
case '\\':
if isEscape {
err := buf.WriteByte(s[i])
if err != nil {
break FOR
}
isEscape = false
continue
}
isEscape = true
case '{':
if isEscape {
isDefault = true
} else {
braces++
}
case '}':
if isEscape {
isDefault = true
} else {
if braces == 0 {
break FOR
}
braces--
}
case ',':
if isEscape {
isDefault = true
} else if braces == 0 {
break FOR
}
/* */
case '=':
// allow metric name to end with any amount of `=` without treating it as a named arg or tag
if !isEscape {
if len(s) < i+2 || s[i+1] == '=' || s[i+1] == ',' || s[i+1] == ')' {
continue
}
}
fallthrough
/* */
default:
isDefault = true
}
if isDefault {
r, w = utf8.DecodeRuneInString(s[i:])
if unicodeRuneAllowedInName(r) && unicode.In(r, RangeTables...) {
continue
}
if !isEscape {
break FOR
}
isEscape = false
continue
}
}
if i == len(s) {
return buf.String(), ""
}
return s[:i], s[i:]
}
func parseString(s string) (string, string, error) {
if s[0] != '\'' && s[0] != '"' {
panic("string should start with open quote")
}
match := s[0]
s = s[1:]
var i int
for i < len(s) && s[i] != match {
i++
}
if i == len(s) {
return "", "", ErrMissingQuote
}
return s[:i], s[i+1:], nil
}
func StartAlignTo(start int64, alignTo string) (int64, error) {
var newDate time.Time
re := regexp.MustCompile(`^[0-9]+`)
alignTo = re.ReplaceAllString(alignTo, "")
startDate := time.Unix(start, 0).UTC()
switch {
case strings.HasPrefix(alignTo, "y"):
newDate = time.Date(startDate.Year(), 1, 1, 0, 0, 0, 0, time.UTC)
case strings.HasPrefix(alignTo, "mon"):
newDate = time.Date(startDate.Year(), startDate.Month(), 1, 0, 0, 0, 0, time.UTC)
case strings.HasPrefix(alignTo, "w"):
if !IsDigit(alignTo[len(alignTo)-1]) {
return start, ErrInvalidInterval
}
newDate = time.Date(startDate.Year(), startDate.Month(), startDate.Day(), 0, 0, 0, 0, time.UTC)
dayOfWeek, err := strconv.Atoi(alignTo[len(alignTo)-1:])
if err != nil {
return start, ErrInvalidInterval
}
startDayOfWeek := int(startDate.Weekday())
daysToSubtract := startDayOfWeek - dayOfWeek
if daysToSubtract < 0 {
daysToSubtract += 7
}
newDate = newDate.AddDate(0, 0, -daysToSubtract)
case strings.HasPrefix(alignTo, "d"):
newDate = time.Date(startDate.Year(), startDate.Month(), startDate.Day(), 0, 0, 0, 0, time.UTC)
case strings.HasPrefix(alignTo, "h"):
newDate = time.Date(startDate.Year(), startDate.Month(), startDate.Day(), startDate.Hour(), 0, 0, 0, time.UTC)
case strings.HasPrefix(alignTo, "min"):
newDate = time.Date(startDate.Year(), startDate.Month(), startDate.Day(), startDate.Hour(), startDate.Minute(), 0, 0, time.UTC)
case strings.HasPrefix(alignTo, "s"):
newDate = time.Date(startDate.Year(), startDate.Month(), startDate.Day(), startDate.Hour(), startDate.Minute(), startDate.Second(), 0, time.UTC)
default:
return start, ErrInvalidInterval
}
return newDate.Unix(), nil
}
| Target | identifier_name |
parser.go | package parser
import (
"bytes"
"fmt"
"github.com/go-graphite/carbonapi/expr/holtwinters"
"regexp"
"strconv"
"strings"
"time"
"unicode"
"unicode/utf8"
"github.com/ansel1/merry"
)
// expression parser
type expr struct {
target string
etype ExprType
val float64
valStr string
args []*expr // positional
namedArgs map[string]*expr
argString string
}
func (e *expr) IsName() bool {
return e.etype == EtName
}
func (e *expr) IsFunc() bool {
return e.etype == EtFunc
}
func (e *expr) IsConst() bool {
return e.etype == EtConst
}
func (e *expr) IsString() bool {
return e.etype == EtString
}
func (e *expr) IsBool() bool {
return e.etype == EtBool
}
func (e *expr) Type() ExprType {
return e.etype
}
func (e *expr) ToString() string {
switch e.etype {
case EtFunc:
return e.target + "(" + e.argString + ")"
case EtConst:
return e.valStr
case EtString:
s := e.valStr
s = strings.ReplaceAll(s, `\`, `\\`)
s = strings.ReplaceAll(s, `'`, `\'`)
return "'" + s + "'"
case EtBool:
return fmt.Sprint(e.val)
}
return e.target
}
func (e *expr) SetTarget(target string) {
e.target = target
}
func (e *expr) MutateTarget(target string) Expr {
e.SetTarget(target)
return e
}
func (e *expr) Target() string {
return e.target
}
func (e *expr) FloatValue() float64 {
return e.val
}
func (e *expr) StringValue() string {
return e.valStr
}
func (e *expr) SetValString(value string) {
e.valStr = value
}
func (e *expr) MutateValString(value string) Expr {
e.SetValString(value)
return e
}
func (e *expr) RawArgs() string {
return e.argString
}
func (e *expr) SetRawArgs(args string) {
e.argString = args
}
func (e *expr) MutateRawArgs(args string) Expr {
e.SetRawArgs(args)
return e
}
func (e *expr) Args() []Expr {
ret := make([]Expr, len(e.args))
for i := 0; i < len(e.args); i++ {
ret[i] = e.args[i]
}
return ret
}
func (e *expr) Arg(i int) Expr {
return e.args[i]
}
func (e *expr) ArgsLen() int {
return len(e.args)
}
func (e *expr) NamedArgs() map[string]Expr {
ret := make(map[string]Expr)
for k, v := range e.namedArgs {
ret[k] = v
}
return ret
}
func (e *expr) NamedArg(name string) (Expr, bool) {
expr, exist := e.namedArgs[name]
return expr, exist
}
func (e *expr) Metrics(from, until int64) []MetricRequest {
switch e.etype {
case EtName:
return []MetricRequest{{Metric: e.target, From: from, Until: until}}
case EtConst, EtString:
return nil
case EtFunc:
var r []MetricRequest
for _, a := range e.args {
r = append(r, a.Metrics(from, until)...)
}
switch e.target {
case "transformNull":
referenceSeriesExpr := e.GetNamedArg("referenceSeries")
if !referenceSeriesExpr.IsInterfaceNil() {
r = append(r, referenceSeriesExpr.Metrics(from, until)...)
}
case "timeShift":
offs, err := e.GetIntervalArg(1, -1)
if err != nil {
return nil
}
for i := range r {
r[i].From += int64(offs)
r[i].Until += int64(offs)
}
case "timeStack":
offs, err := e.GetIntervalArg(1, -1)
if err != nil {
return nil
}
start, err := e.GetIntArg(2)
if err != nil {
return nil
}
end, err := e.GetIntArg(3)
if err != nil {
return nil
}
var r2 []MetricRequest
for _, v := range r {
for i := int64(start); i < int64(end); i++ {
fromNew := v.From + i*int64(offs)
untilNew := v.Until + i*int64(offs)
r2 = append(r2, MetricRequest{
Metric: v.Metric,
From: fromNew,
Until: untilNew,
})
}
}
return r2
case "holtWintersForecast":
bootstrapInterval, err := e.GetIntervalNamedOrPosArgDefault("bootstrapInterval", 1, 1, holtwinters.DefaultBootstrapInterval)
if err != nil {
return nil
}
for i := range r {
r[i].From -= bootstrapInterval
}
case "holtWintersConfidenceBands", "holtWintersConfidenceArea":
bootstrapInterval, err := e.GetIntervalNamedOrPosArgDefault("bootstrapInterval", 2, 1, holtwinters.DefaultBootstrapInterval)
if err != nil {
return nil
}
for i := range r {
r[i].From -= bootstrapInterval
}
case "holtWintersAberration":
bootstrapInterval, err := e.GetIntervalNamedOrPosArgDefault("bootstrapInterval", 2, 1, holtwinters.DefaultBootstrapInterval)
if err != nil {
return nil
}
// For this function, we also need to pull data with an adjusted From time,
// so additional requests are added with the adjusted start time based on the
// bootstrapInterval
for i := range r {
adjustedReq := MetricRequest{
Metric: r[i].Metric,
From: r[i].From - bootstrapInterval,
Until: r[i].Until,
}
r = append(r, adjustedReq)
}
case "movingAverage", "movingMedian", "movingMin", "movingMax", "movingSum", "exponentialMovingAverage":
if len(e.args) < 2 {
return nil
}
if e.args[1].etype == EtString {
offs, err := e.GetIntervalArg(1, 1)
if err != nil {
return nil
}
for i := range r {
fromNew := r[i].From - int64(offs)
r[i].From = fromNew
}
}
case "hitcount":
if len(e.args) < 2 {
return nil
}
alignToInterval, err := e.GetBoolNamedOrPosArgDefault("alignToInterval", 2, false)
if err != nil {
return nil
}
if alignToInterval {
bucketSizeInt32, err := e.GetIntervalArg(1, 1)
if err != nil {
return nil
}
interval := int64(bucketSizeInt32)
// This is done in order to replicate the behavior in Graphite web when alignToInterval is set,
// in which new data is fetched with the adjusted start time.
for i, _ := range r {
start := r[i].From
for _, v := range []int64{86400, 3600, 60} {
if interval >= v {
start -= start % v
break
}
}
r[i].From = start
}
}
case "smartSummarize":
if len(e.args) < 2 {
return nil
}
alignToInterval, err := e.GetStringNamedOrPosArgDefault("alignTo", 3, "")
if err != nil {
return nil
}
if alignToInterval != "" {
for i, _ := range r {
newStart, err := StartAlignTo(r[i].From, alignToInterval)
if err != nil {
return nil
}
r[i].From = newStart
}
}
}
return r
}
return nil
}
func (e *expr) GetIntervalArg(n, defaultSign int) (int32, error) {
if len(e.args) <= n {
return 0, ErrMissingArgument
}
if e.args[n].etype != EtString {
return 0, ErrBadType
}
seconds, err := IntervalString(e.args[n].valStr, defaultSign)
if err != nil {
return 0, ErrBadType
}
return seconds, nil
}
func (e *expr) GetIntervalNamedOrPosArgDefault(k string, n, defaultSign int, v int64) (int64, error) {
var val string
var err error
if a := e.getNamedArg(k); a != nil {
val, err = a.doGetStringArg()
if err != nil {
return 0, ErrBadType
}
} else {
if len(e.args) <= n {
return v, nil
}
if e.args[n].etype != EtString {
return 0, ErrBadType
}
val = e.args[n].valStr
}
seconds, err := IntervalString(val, defaultSign)
if err != nil {
return 0, ErrBadType
}
return int64(seconds), nil
}
func (e *expr) GetStringArg(n int) (string, error) {
if len(e.args) <= n {
return "", ErrMissingArgument
}
return e.args[n].doGetStringArg()
}
func (e *expr) GetStringArgs(n int) ([]string, error) {
if len(e.args) <= n {
return nil, ErrMissingArgument
}
strs := make([]string, 0, len(e.args)-n)
for i := n; i < len(e.args); i++ {
a, err := e.GetStringArg(i)
if err != nil {
return nil, err
}
strs = append(strs, a)
}
return strs, nil
}
func (e *expr) GetStringArgDefault(n int, s string) (string, error) {
if len(e.args) <= n {
return s, nil
}
return e.args[n].doGetStringArg()
}
func (e *expr) GetStringNamedOrPosArgDefault(k string, n int, s string) (string, error) {
if a := e.getNamedArg(k); a != nil {
return a.doGetStringArg()
}
return e.GetStringArgDefault(n, s)
}
func (e *expr) GetFloatArg(n int) (float64, error) {
if len(e.args) <= n {
return 0, ErrMissingArgument
}
return e.args[n].doGetFloatArg()
} | func (e *expr) GetFloatArgDefault(n int, v float64) (float64, error) {
if len(e.args) <= n {
return v, nil
}
return e.args[n].doGetFloatArg()
}
func (e *expr) GetFloatNamedOrPosArgDefault(k string, n int, v float64) (float64, error) {
if a := e.getNamedArg(k); a != nil {
return a.doGetFloatArg()
}
return e.GetFloatArgDefault(n, v)
}
func (e *expr) GetIntArg(n int) (int, error) {
if len(e.args) <= n {
return 0, ErrMissingArgument
}
return e.args[n].doGetIntArg()
}
func (e *expr) GetIntArgs(n int) ([]int, error) {
if len(e.args) < n {
return nil, ErrMissingArgument
}
ints := make([]int, 0, len(e.args)-n)
for i := n; i < len(e.args); i++ {
a, err := e.GetIntArg(i)
if err != nil {
return nil, err
}
ints = append(ints, a)
}
return ints, nil
}
func (e *expr) GetIntArgDefault(n, d int) (int, error) {
if len(e.args) <= n {
return d, nil
}
return e.args[n].doGetIntArg()
}
func (e *expr) GetIntArgWithIndication(n int) (int, bool, error) {
if len(e.args) <= n {
return 0, false, nil
}
v, err := e.args[n].doGetIntArg()
return v, true, err
}
func (e *expr) GetIntNamedOrPosArgWithIndication(k string, n int) (int, bool, error) {
if a := e.getNamedArg(k); a != nil {
v, err := a.doGetIntArg()
return v, true, err
}
return e.GetIntArgWithIndication(n)
}
func (e *expr) GetIntNamedOrPosArgDefault(k string, n, d int) (int, error) {
if a := e.getNamedArg(k); a != nil {
return a.doGetIntArg()
}
return e.GetIntArgDefault(n, d)
}
func (e *expr) GetIntOrInfArg(n int) (IntOrInf, error) {
if len(e.args) <= n {
return IntOrInf{}, ErrMissingArgument
}
return e.args[n].doGetIntOrInfArg()
}
func (e *expr) GetIntOrInfArgDefault(n int, d IntOrInf) (IntOrInf, error) {
if len(e.args) <= n {
return d, nil
}
return e.args[n].doGetIntOrInfArg()
}
func (e *expr) GetIntOrInfNamedOrPosArgDefault(k string, n int, d IntOrInf) (IntOrInf, error) {
if a := e.getNamedArg(k); a != nil {
return a.doGetIntOrInfArg()
}
return e.GetIntOrInfArgDefault(n, d)
}
func (e *expr) GetNamedArg(name string) Expr {
return e.getNamedArg(name)
}
func (e *expr) GetBoolNamedOrPosArgDefault(k string, n int, b bool) (bool, error) {
if a := e.getNamedArg(k); a != nil {
return a.doGetBoolArg()
}
return e.GetBoolArgDefault(n, b)
}
func (e *expr) GetBoolArgDefault(n int, b bool) (bool, error) {
if len(e.args) <= n {
return b, nil
}
return e.args[n].doGetBoolArg()
}
func (e *expr) GetNodeOrTagArgs(n int, single bool) ([]NodeOrTag, error) {
// if single==false, zero nodes is OK
if single && len(e.args) <= n || len(e.args) < n {
return nil, ErrMissingArgument
}
nodeTags := make([]NodeOrTag, 0, len(e.args)-n)
var err error
until := len(e.args)
if single {
until = n + 1
}
for i := n; i < until; i++ {
var nodeTag NodeOrTag
nodeTag.Value, err = e.GetIntArg(i)
if err != nil {
// Try to parse it as String
nodeTag.Value, err = e.GetStringArg(i)
if err != nil {
return nil, err
}
nodeTag.IsTag = true
}
nodeTags = append(nodeTags, nodeTag)
}
return nodeTags, nil
}
func (e *expr) IsInterfaceNil() bool {
return e == nil
}
func (e *expr) insertFirstArg(exp *expr) error {
if e.etype != EtFunc {
return fmt.Errorf("pipe to not a function")
}
newArgs := []*expr{exp}
e.args = append(newArgs, e.args...)
if e.argString == "" {
e.argString = exp.ToString()
} else {
e.argString = exp.ToString() + "," + e.argString
}
return nil
}
func skipWhitespace(e string) string {
skipTo := len(e)
for i, r := range e {
if !unicode.IsSpace(r) {
skipTo = i
break
}
}
return e[skipTo:]
}
func parseExprWithoutPipe(e string) (Expr, string, error) {
e = skipWhitespace(e)
if e == "" {
return nil, "", ErrMissingExpr
}
if '0' <= e[0] && e[0] <= '9' || e[0] == '-' || e[0] == '+' {
val, valStr, e, err := parseConst(e)
r, _ := utf8.DecodeRuneInString(e)
if !unicode.IsLetter(r) {
return &expr{val: val, etype: EtConst, valStr: valStr}, e, err
}
}
if e[0] == '\'' || e[0] == '"' {
val, e, err := parseString(e)
return &expr{valStr: val, etype: EtString}, e, err
}
name, e := parseName(e)
if name == "" {
return nil, e, ErrMissingArgument
}
nameLower := strings.ToLower(name)
if nameLower == "false" || nameLower == "true" {
return &expr{valStr: nameLower, etype: EtBool, target: nameLower}, e, nil
}
if e != "" && e[0] == '(' {
// TODO(civil): Tags: make it a proper Expression
if name == "seriesByTag" {
argString, _, _, e, err := parseArgList(e)
return &expr{target: name + "(" + argString + ")", etype: EtName}, e, err
}
exp := &expr{target: name, etype: EtFunc}
argString, posArgs, namedArgs, e, err := parseArgList(e)
exp.argString = argString
exp.args = posArgs
exp.namedArgs = namedArgs
return exp, e, err
}
return &expr{target: name}, e, nil
}
func parseExprInner(e string) (Expr, string, error) {
exp, e, err := parseExprWithoutPipe(e)
if err != nil {
return exp, e, err
}
return pipe(exp.(*expr), e)
}
// ParseExpr actually do all the parsing. It returns expression, original string and error (if any)
func ParseExpr(e string) (Expr, string, error) {
exp, e, err := parseExprInner(e)
if err != nil {
return exp, e, err
}
exp, err = defineMap.expandExpr(exp.(*expr))
return exp, e, err
}
func pipe(exp *expr, e string) (*expr, string, error) {
e = skipWhitespace(e)
if e == "" || e[0] != '|' {
return exp, e, nil
}
wr, e, err := parseExprWithoutPipe(e[1:])
if err != nil {
return exp, e, err
}
if wr == nil {
return exp, e, nil
}
err = wr.(*expr).insertFirstArg(exp)
if err != nil {
return exp, e, err
}
exp = wr.(*expr)
return pipe(exp, e)
}
// IsNameChar checks if specified char is actually a valid (from graphite's protocol point of view)
func IsNameChar(r byte) bool {
return false ||
'a' <= r && r <= 'z' ||
'A' <= r && r <= 'Z' ||
'0' <= r && r <= '9' ||
r == '.' || r == '_' ||
r == '-' || r == '*' ||
r == '?' || r == ':' ||
r == '[' || r == ']' ||
r == '^' || r == '$' ||
r == '<' || r == '>' ||
r == '&' || r == '#' ||
r == '/' || r == '%' ||
r == '@'
}
func IsDigit(r byte) bool {
return '0' <= r && r <= '9'
}
func parseArgList(e string) (string, []*expr, map[string]*expr, string, error) {
var (
posArgs []*expr
namedArgs map[string]*expr
)
eOrig := e
if e[0] != '(' {
panic("arg list should start with paren")
}
var argStringBuffer bytes.Buffer
e = e[1:]
// check for empty args
t := skipWhitespace(e)
if t != "" && t[0] == ')' {
return "", posArgs, namedArgs, t[1:], nil
}
charNum := 1
for {
var arg Expr
var err error
charNum++
argString := e
arg, e, err = parseExprInner(e)
if err != nil {
return "", nil, nil, e, err
}
if e == "" {
return "", nil, nil, "", ErrMissingComma
}
// we now know we're parsing a key-value pair
if arg.IsName() && e[0] == '=' {
e = e[1:]
argCont, eCont, errCont := parseExprInner(e)
if errCont != nil {
return "", nil, nil, eCont, errCont
}
if eCont == "" {
return "", nil, nil, "", ErrMissingComma
}
if !argCont.IsConst() && !argCont.IsName() && !argCont.IsString() && !argCont.IsBool() {
return "", nil, nil, eCont, ErrBadType
}
if namedArgs == nil {
namedArgs = make(map[string]*expr)
}
exp := &expr{
etype: argCont.Type(),
val: argCont.FloatValue(),
valStr: argCont.StringValue(),
target: argCont.Target(),
}
namedArgs[arg.Target()] = exp
e = eCont
if argStringBuffer.Len() > 0 {
argStringBuffer.WriteByte(',')
}
argStringBuffer.WriteString(argString[:len(argString)-len(e)])
charNum += len(argString) - len(e)
} else {
exp := arg.toExpr().(*expr)
posArgs = append(posArgs, exp)
if argStringBuffer.Len() > 0 {
argStringBuffer.WriteByte(',')
}
if exp.IsFunc() {
expString := exp.ToString()
argStringBuffer.WriteString(expString)
charNum += len(expString)
} else {
argStringBuffer.WriteString(argString[:len(argString)-len(e)])
charNum += len(argString) - len(e)
}
}
// after the argument, trim any trailing spaces
e = skipWhitespace(e)
if e[0] == ')' {
return argStringBuffer.String(), posArgs, namedArgs, e[1:], nil
}
if e[0] != ',' && e[0] != ' ' {
return "", nil, nil, "", merry.Wrap(ErrUnexpectedCharacter).WithUserMessagef("string_to_parse=`%v`, character_number=%v, character=`%v`", eOrig, charNum, string(e[0]))
}
e = e[1:]
}
}
func parseConst(s string) (float64, string, string, error) {
var i int
// All valid characters for a floating-point constant
// Just slurp them all in and let ParseFloat sort 'em out
for i < len(s) && (IsDigit(s[i]) || s[i] == '.' || s[i] == '+' || s[i] == '-' || s[i] == 'e' || s[i] == 'E') {
i++
}
v, err := strconv.ParseFloat(s[:i], 64)
if err != nil {
return 0, "", "", err
}
return v, s[:i], s[i:], err
}
// RangeTables is an array of *unicode.RangeTable
var RangeTables []*unicode.RangeTable
var disallowedCharactersInMetricName = map[rune]struct{}{
'(': struct{}{},
')': struct{}{},
'"': struct{}{},
'\'': struct{}{},
' ': struct{}{},
'/': struct{}{},
}
func unicodeRuneAllowedInName(r rune) bool {
if _, ok := disallowedCharactersInMetricName[r]; ok {
return false
}
return true
}
func parseName(s string) (string, string) {
var (
braces, i, w int
r rune
isEscape bool
isDefault bool
)
buf := bytes.NewBuffer(make([]byte, 0, len(s)))
FOR:
for braces, i, w = 0, 0, 0; i < len(s); i += w {
if s[i] != '\\' {
err := buf.WriteByte(s[i])
if err != nil {
break FOR
}
}
isDefault = false
w = 1
if IsNameChar(s[i]) {
continue
}
switch s[i] {
case '\\':
if isEscape {
err := buf.WriteByte(s[i])
if err != nil {
break FOR
}
isEscape = false
continue
}
isEscape = true
case '{':
if isEscape {
isDefault = true
} else {
braces++
}
case '}':
if isEscape {
isDefault = true
} else {
if braces == 0 {
break FOR
}
braces--
}
case ',':
if isEscape {
isDefault = true
} else if braces == 0 {
break FOR
}
/* */
case '=':
// allow metric name to end with any amount of `=` without treating it as a named arg or tag
if !isEscape {
if len(s) < i+2 || s[i+1] == '=' || s[i+1] == ',' || s[i+1] == ')' {
continue
}
}
fallthrough
/* */
default:
isDefault = true
}
if isDefault {
r, w = utf8.DecodeRuneInString(s[i:])
if unicodeRuneAllowedInName(r) && unicode.In(r, RangeTables...) {
continue
}
if !isEscape {
break FOR
}
isEscape = false
continue
}
}
if i == len(s) {
return buf.String(), ""
}
return s[:i], s[i:]
}
func parseString(s string) (string, string, error) {
if s[0] != '\'' && s[0] != '"' {
panic("string should start with open quote")
}
match := s[0]
s = s[1:]
var i int
for i < len(s) && s[i] != match {
i++
}
if i == len(s) {
return "", "", ErrMissingQuote
}
return s[:i], s[i+1:], nil
}
func StartAlignTo(start int64, alignTo string) (int64, error) {
var newDate time.Time
re := regexp.MustCompile(`^[0-9]+`)
alignTo = re.ReplaceAllString(alignTo, "")
startDate := time.Unix(start, 0).UTC()
switch {
case strings.HasPrefix(alignTo, "y"):
newDate = time.Date(startDate.Year(), 1, 1, 0, 0, 0, 0, time.UTC)
case strings.HasPrefix(alignTo, "mon"):
newDate = time.Date(startDate.Year(), startDate.Month(), 1, 0, 0, 0, 0, time.UTC)
case strings.HasPrefix(alignTo, "w"):
if !IsDigit(alignTo[len(alignTo)-1]) {
return start, ErrInvalidInterval
}
newDate = time.Date(startDate.Year(), startDate.Month(), startDate.Day(), 0, 0, 0, 0, time.UTC)
dayOfWeek, err := strconv.Atoi(alignTo[len(alignTo)-1:])
if err != nil {
return start, ErrInvalidInterval
}
startDayOfWeek := int(startDate.Weekday())
daysToSubtract := startDayOfWeek - dayOfWeek
if daysToSubtract < 0 {
daysToSubtract += 7
}
newDate = newDate.AddDate(0, 0, -daysToSubtract)
case strings.HasPrefix(alignTo, "d"):
newDate = time.Date(startDate.Year(), startDate.Month(), startDate.Day(), 0, 0, 0, 0, time.UTC)
case strings.HasPrefix(alignTo, "h"):
newDate = time.Date(startDate.Year(), startDate.Month(), startDate.Day(), startDate.Hour(), 0, 0, 0, time.UTC)
case strings.HasPrefix(alignTo, "min"):
newDate = time.Date(startDate.Year(), startDate.Month(), startDate.Day(), startDate.Hour(), startDate.Minute(), 0, 0, time.UTC)
case strings.HasPrefix(alignTo, "s"):
newDate = time.Date(startDate.Year(), startDate.Month(), startDate.Day(), startDate.Hour(), startDate.Minute(), startDate.Second(), 0, time.UTC)
default:
return start, ErrInvalidInterval
}
return newDate.Unix(), nil
} | random_line_split | |
parser.go | package parser
import (
"bytes"
"fmt"
"github.com/go-graphite/carbonapi/expr/holtwinters"
"regexp"
"strconv"
"strings"
"time"
"unicode"
"unicode/utf8"
"github.com/ansel1/merry"
)
// expression parser
type expr struct {
target string
etype ExprType
val float64
valStr string
args []*expr // positional
namedArgs map[string]*expr
argString string
}
func (e *expr) IsName() bool {
return e.etype == EtName
}
func (e *expr) IsFunc() bool {
return e.etype == EtFunc
}
func (e *expr) IsConst() bool {
return e.etype == EtConst
}
func (e *expr) IsString() bool {
return e.etype == EtString
}
func (e *expr) IsBool() bool {
return e.etype == EtBool
}
func (e *expr) Type() ExprType {
return e.etype
}
func (e *expr) ToString() string {
switch e.etype {
case EtFunc:
return e.target + "(" + e.argString + ")"
case EtConst:
return e.valStr
case EtString:
s := e.valStr
s = strings.ReplaceAll(s, `\`, `\\`)
s = strings.ReplaceAll(s, `'`, `\'`)
return "'" + s + "'"
case EtBool:
return fmt.Sprint(e.val)
}
return e.target
}
func (e *expr) SetTarget(target string) {
e.target = target
}
func (e *expr) MutateTarget(target string) Expr {
e.SetTarget(target)
return e
}
func (e *expr) Target() string {
return e.target
}
func (e *expr) FloatValue() float64 {
return e.val
}
func (e *expr) StringValue() string {
return e.valStr
}
func (e *expr) SetValString(value string) {
e.valStr = value
}
func (e *expr) MutateValString(value string) Expr {
e.SetValString(value)
return e
}
func (e *expr) RawArgs() string {
return e.argString
}
func (e *expr) SetRawArgs(args string) {
e.argString = args
}
func (e *expr) MutateRawArgs(args string) Expr {
e.SetRawArgs(args)
return e
}
func (e *expr) Args() []Expr {
ret := make([]Expr, len(e.args))
for i := 0; i < len(e.args); i++ {
ret[i] = e.args[i]
}
return ret
}
func (e *expr) Arg(i int) Expr {
return e.args[i]
}
func (e *expr) ArgsLen() int {
return len(e.args)
}
func (e *expr) NamedArgs() map[string]Expr {
ret := make(map[string]Expr)
for k, v := range e.namedArgs {
ret[k] = v
}
return ret
}
func (e *expr) NamedArg(name string) (Expr, bool) {
expr, exist := e.namedArgs[name]
return expr, exist
}
func (e *expr) Metrics(from, until int64) []MetricRequest {
switch e.etype {
case EtName:
return []MetricRequest{{Metric: e.target, From: from, Until: until}}
case EtConst, EtString:
return nil
case EtFunc:
var r []MetricRequest
for _, a := range e.args {
r = append(r, a.Metrics(from, until)...)
}
switch e.target {
case "transformNull":
referenceSeriesExpr := e.GetNamedArg("referenceSeries")
if !referenceSeriesExpr.IsInterfaceNil() {
r = append(r, referenceSeriesExpr.Metrics(from, until)...)
}
case "timeShift":
offs, err := e.GetIntervalArg(1, -1)
if err != nil {
return nil
}
for i := range r {
r[i].From += int64(offs)
r[i].Until += int64(offs)
}
case "timeStack":
offs, err := e.GetIntervalArg(1, -1)
if err != nil {
return nil
}
start, err := e.GetIntArg(2)
if err != nil {
return nil
}
end, err := e.GetIntArg(3)
if err != nil {
return nil
}
var r2 []MetricRequest
for _, v := range r {
for i := int64(start); i < int64(end); i++ {
fromNew := v.From + i*int64(offs)
untilNew := v.Until + i*int64(offs)
r2 = append(r2, MetricRequest{
Metric: v.Metric,
From: fromNew,
Until: untilNew,
})
}
}
return r2
case "holtWintersForecast":
bootstrapInterval, err := e.GetIntervalNamedOrPosArgDefault("bootstrapInterval", 1, 1, holtwinters.DefaultBootstrapInterval)
if err != nil {
return nil
}
for i := range r {
r[i].From -= bootstrapInterval
}
case "holtWintersConfidenceBands", "holtWintersConfidenceArea":
bootstrapInterval, err := e.GetIntervalNamedOrPosArgDefault("bootstrapInterval", 2, 1, holtwinters.DefaultBootstrapInterval)
if err != nil {
return nil
}
for i := range r {
r[i].From -= bootstrapInterval
}
case "holtWintersAberration":
bootstrapInterval, err := e.GetIntervalNamedOrPosArgDefault("bootstrapInterval", 2, 1, holtwinters.DefaultBootstrapInterval)
if err != nil {
return nil
}
// For this function, we also need to pull data with an adjusted From time,
// so additional requests are added with the adjusted start time based on the
// bootstrapInterval
for i := range r {
adjustedReq := MetricRequest{
Metric: r[i].Metric,
From: r[i].From - bootstrapInterval,
Until: r[i].Until,
}
r = append(r, adjustedReq)
}
case "movingAverage", "movingMedian", "movingMin", "movingMax", "movingSum", "exponentialMovingAverage":
if len(e.args) < 2 {
return nil
}
if e.args[1].etype == EtString {
offs, err := e.GetIntervalArg(1, 1)
if err != nil {
return nil
}
for i := range r {
fromNew := r[i].From - int64(offs)
r[i].From = fromNew
}
}
case "hitcount":
if len(e.args) < 2 {
return nil
}
alignToInterval, err := e.GetBoolNamedOrPosArgDefault("alignToInterval", 2, false)
if err != nil {
return nil
}
if alignToInterval {
bucketSizeInt32, err := e.GetIntervalArg(1, 1)
if err != nil {
return nil
}
interval := int64(bucketSizeInt32)
// This is done in order to replicate the behavior in Graphite web when alignToInterval is set,
// in which new data is fetched with the adjusted start time.
for i, _ := range r {
start := r[i].From
for _, v := range []int64{86400, 3600, 60} {
if interval >= v {
start -= start % v
break
}
}
r[i].From = start
}
}
case "smartSummarize":
if len(e.args) < 2 {
return nil
}
alignToInterval, err := e.GetStringNamedOrPosArgDefault("alignTo", 3, "")
if err != nil {
return nil
}
if alignToInterval != "" {
for i, _ := range r {
newStart, err := StartAlignTo(r[i].From, alignToInterval)
if err != nil {
return nil
}
r[i].From = newStart
}
}
}
return r
}
return nil
}
func (e *expr) GetIntervalArg(n, defaultSign int) (int32, error) {
if len(e.args) <= n {
return 0, ErrMissingArgument
}
if e.args[n].etype != EtString {
return 0, ErrBadType
}
seconds, err := IntervalString(e.args[n].valStr, defaultSign)
if err != nil {
return 0, ErrBadType
}
return seconds, nil
}
func (e *expr) GetIntervalNamedOrPosArgDefault(k string, n, defaultSign int, v int64) (int64, error) {
var val string
var err error
if a := e.getNamedArg(k); a != nil {
val, err = a.doGetStringArg()
if err != nil {
return 0, ErrBadType
}
} else {
if len(e.args) <= n {
return v, nil
}
if e.args[n].etype != EtString {
return 0, ErrBadType
}
val = e.args[n].valStr
}
seconds, err := IntervalString(val, defaultSign)
if err != nil {
return 0, ErrBadType
}
return int64(seconds), nil
}
func (e *expr) GetStringArg(n int) (string, error) {
if len(e.args) <= n {
return "", ErrMissingArgument
}
return e.args[n].doGetStringArg()
}
func (e *expr) GetStringArgs(n int) ([]string, error) {
if len(e.args) <= n {
return nil, ErrMissingArgument
}
strs := make([]string, 0, len(e.args)-n)
for i := n; i < len(e.args); i++ {
a, err := e.GetStringArg(i)
if err != nil {
return nil, err
}
strs = append(strs, a)
}
return strs, nil
}
func (e *expr) GetStringArgDefault(n int, s string) (string, error) {
if len(e.args) <= n {
return s, nil
}
return e.args[n].doGetStringArg()
}
func (e *expr) GetStringNamedOrPosArgDefault(k string, n int, s string) (string, error) {
if a := e.getNamedArg(k); a != nil {
return a.doGetStringArg()
}
return e.GetStringArgDefault(n, s)
}
func (e *expr) GetFloatArg(n int) (float64, error) {
if len(e.args) <= n {
return 0, ErrMissingArgument
}
return e.args[n].doGetFloatArg()
}
func (e *expr) GetFloatArgDefault(n int, v float64) (float64, error) {
if len(e.args) <= n {
return v, nil
}
return e.args[n].doGetFloatArg()
}
func (e *expr) GetFloatNamedOrPosArgDefault(k string, n int, v float64) (float64, error) {
if a := e.getNamedArg(k); a != nil {
return a.doGetFloatArg()
}
return e.GetFloatArgDefault(n, v)
}
func (e *expr) GetIntArg(n int) (int, error) |
func (e *expr) GetIntArgs(n int) ([]int, error) {
if len(e.args) < n {
return nil, ErrMissingArgument
}
ints := make([]int, 0, len(e.args)-n)
for i := n; i < len(e.args); i++ {
a, err := e.GetIntArg(i)
if err != nil {
return nil, err
}
ints = append(ints, a)
}
return ints, nil
}
func (e *expr) GetIntArgDefault(n, d int) (int, error) {
if len(e.args) <= n {
return d, nil
}
return e.args[n].doGetIntArg()
}
func (e *expr) GetIntArgWithIndication(n int) (int, bool, error) {
if len(e.args) <= n {
return 0, false, nil
}
v, err := e.args[n].doGetIntArg()
return v, true, err
}
func (e *expr) GetIntNamedOrPosArgWithIndication(k string, n int) (int, bool, error) {
if a := e.getNamedArg(k); a != nil {
v, err := a.doGetIntArg()
return v, true, err
}
return e.GetIntArgWithIndication(n)
}
func (e *expr) GetIntNamedOrPosArgDefault(k string, n, d int) (int, error) {
if a := e.getNamedArg(k); a != nil {
return a.doGetIntArg()
}
return e.GetIntArgDefault(n, d)
}
func (e *expr) GetIntOrInfArg(n int) (IntOrInf, error) {
if len(e.args) <= n {
return IntOrInf{}, ErrMissingArgument
}
return e.args[n].doGetIntOrInfArg()
}
func (e *expr) GetIntOrInfArgDefault(n int, d IntOrInf) (IntOrInf, error) {
if len(e.args) <= n {
return d, nil
}
return e.args[n].doGetIntOrInfArg()
}
func (e *expr) GetIntOrInfNamedOrPosArgDefault(k string, n int, d IntOrInf) (IntOrInf, error) {
if a := e.getNamedArg(k); a != nil {
return a.doGetIntOrInfArg()
}
return e.GetIntOrInfArgDefault(n, d)
}
func (e *expr) GetNamedArg(name string) Expr {
return e.getNamedArg(name)
}
func (e *expr) GetBoolNamedOrPosArgDefault(k string, n int, b bool) (bool, error) {
if a := e.getNamedArg(k); a != nil {
return a.doGetBoolArg()
}
return e.GetBoolArgDefault(n, b)
}
func (e *expr) GetBoolArgDefault(n int, b bool) (bool, error) {
if len(e.args) <= n {
return b, nil
}
return e.args[n].doGetBoolArg()
}
func (e *expr) GetNodeOrTagArgs(n int, single bool) ([]NodeOrTag, error) {
// if single==false, zero nodes is OK
if single && len(e.args) <= n || len(e.args) < n {
return nil, ErrMissingArgument
}
nodeTags := make([]NodeOrTag, 0, len(e.args)-n)
var err error
until := len(e.args)
if single {
until = n + 1
}
for i := n; i < until; i++ {
var nodeTag NodeOrTag
nodeTag.Value, err = e.GetIntArg(i)
if err != nil {
// Try to parse it as String
nodeTag.Value, err = e.GetStringArg(i)
if err != nil {
return nil, err
}
nodeTag.IsTag = true
}
nodeTags = append(nodeTags, nodeTag)
}
return nodeTags, nil
}
func (e *expr) IsInterfaceNil() bool {
return e == nil
}
func (e *expr) insertFirstArg(exp *expr) error {
if e.etype != EtFunc {
return fmt.Errorf("pipe to not a function")
}
newArgs := []*expr{exp}
e.args = append(newArgs, e.args...)
if e.argString == "" {
e.argString = exp.ToString()
} else {
e.argString = exp.ToString() + "," + e.argString
}
return nil
}
func skipWhitespace(e string) string {
skipTo := len(e)
for i, r := range e {
if !unicode.IsSpace(r) {
skipTo = i
break
}
}
return e[skipTo:]
}
func parseExprWithoutPipe(e string) (Expr, string, error) {
e = skipWhitespace(e)
if e == "" {
return nil, "", ErrMissingExpr
}
if '0' <= e[0] && e[0] <= '9' || e[0] == '-' || e[0] == '+' {
val, valStr, e, err := parseConst(e)
r, _ := utf8.DecodeRuneInString(e)
if !unicode.IsLetter(r) {
return &expr{val: val, etype: EtConst, valStr: valStr}, e, err
}
}
if e[0] == '\'' || e[0] == '"' {
val, e, err := parseString(e)
return &expr{valStr: val, etype: EtString}, e, err
}
name, e := parseName(e)
if name == "" {
return nil, e, ErrMissingArgument
}
nameLower := strings.ToLower(name)
if nameLower == "false" || nameLower == "true" {
return &expr{valStr: nameLower, etype: EtBool, target: nameLower}, e, nil
}
if e != "" && e[0] == '(' {
// TODO(civil): Tags: make it a proper Expression
if name == "seriesByTag" {
argString, _, _, e, err := parseArgList(e)
return &expr{target: name + "(" + argString + ")", etype: EtName}, e, err
}
exp := &expr{target: name, etype: EtFunc}
argString, posArgs, namedArgs, e, err := parseArgList(e)
exp.argString = argString
exp.args = posArgs
exp.namedArgs = namedArgs
return exp, e, err
}
return &expr{target: name}, e, nil
}
func parseExprInner(e string) (Expr, string, error) {
exp, e, err := parseExprWithoutPipe(e)
if err != nil {
return exp, e, err
}
return pipe(exp.(*expr), e)
}
// ParseExpr actually do all the parsing. It returns expression, original string and error (if any)
func ParseExpr(e string) (Expr, string, error) {
exp, e, err := parseExprInner(e)
if err != nil {
return exp, e, err
}
exp, err = defineMap.expandExpr(exp.(*expr))
return exp, e, err
}
func pipe(exp *expr, e string) (*expr, string, error) {
e = skipWhitespace(e)
if e == "" || e[0] != '|' {
return exp, e, nil
}
wr, e, err := parseExprWithoutPipe(e[1:])
if err != nil {
return exp, e, err
}
if wr == nil {
return exp, e, nil
}
err = wr.(*expr).insertFirstArg(exp)
if err != nil {
return exp, e, err
}
exp = wr.(*expr)
return pipe(exp, e)
}
// IsNameChar checks if specified char is actually a valid (from graphite's protocol point of view)
func IsNameChar(r byte) bool {
return false ||
'a' <= r && r <= 'z' ||
'A' <= r && r <= 'Z' ||
'0' <= r && r <= '9' ||
r == '.' || r == '_' ||
r == '-' || r == '*' ||
r == '?' || r == ':' ||
r == '[' || r == ']' ||
r == '^' || r == '$' ||
r == '<' || r == '>' ||
r == '&' || r == '#' ||
r == '/' || r == '%' ||
r == '@'
}
func IsDigit(r byte) bool {
return '0' <= r && r <= '9'
}
func parseArgList(e string) (string, []*expr, map[string]*expr, string, error) {
var (
posArgs []*expr
namedArgs map[string]*expr
)
eOrig := e
if e[0] != '(' {
panic("arg list should start with paren")
}
var argStringBuffer bytes.Buffer
e = e[1:]
// check for empty args
t := skipWhitespace(e)
if t != "" && t[0] == ')' {
return "", posArgs, namedArgs, t[1:], nil
}
charNum := 1
for {
var arg Expr
var err error
charNum++
argString := e
arg, e, err = parseExprInner(e)
if err != nil {
return "", nil, nil, e, err
}
if e == "" {
return "", nil, nil, "", ErrMissingComma
}
// we now know we're parsing a key-value pair
if arg.IsName() && e[0] == '=' {
e = e[1:]
argCont, eCont, errCont := parseExprInner(e)
if errCont != nil {
return "", nil, nil, eCont, errCont
}
if eCont == "" {
return "", nil, nil, "", ErrMissingComma
}
if !argCont.IsConst() && !argCont.IsName() && !argCont.IsString() && !argCont.IsBool() {
return "", nil, nil, eCont, ErrBadType
}
if namedArgs == nil {
namedArgs = make(map[string]*expr)
}
exp := &expr{
etype: argCont.Type(),
val: argCont.FloatValue(),
valStr: argCont.StringValue(),
target: argCont.Target(),
}
namedArgs[arg.Target()] = exp
e = eCont
if argStringBuffer.Len() > 0 {
argStringBuffer.WriteByte(',')
}
argStringBuffer.WriteString(argString[:len(argString)-len(e)])
charNum += len(argString) - len(e)
} else {
exp := arg.toExpr().(*expr)
posArgs = append(posArgs, exp)
if argStringBuffer.Len() > 0 {
argStringBuffer.WriteByte(',')
}
if exp.IsFunc() {
expString := exp.ToString()
argStringBuffer.WriteString(expString)
charNum += len(expString)
} else {
argStringBuffer.WriteString(argString[:len(argString)-len(e)])
charNum += len(argString) - len(e)
}
}
// after the argument, trim any trailing spaces
e = skipWhitespace(e)
if e[0] == ')' {
return argStringBuffer.String(), posArgs, namedArgs, e[1:], nil
}
if e[0] != ',' && e[0] != ' ' {
return "", nil, nil, "", merry.Wrap(ErrUnexpectedCharacter).WithUserMessagef("string_to_parse=`%v`, character_number=%v, character=`%v`", eOrig, charNum, string(e[0]))
}
e = e[1:]
}
}
func parseConst(s string) (float64, string, string, error) {
var i int
// All valid characters for a floating-point constant
// Just slurp them all in and let ParseFloat sort 'em out
for i < len(s) && (IsDigit(s[i]) || s[i] == '.' || s[i] == '+' || s[i] == '-' || s[i] == 'e' || s[i] == 'E') {
i++
}
v, err := strconv.ParseFloat(s[:i], 64)
if err != nil {
return 0, "", "", err
}
return v, s[:i], s[i:], err
}
// RangeTables is an array of *unicode.RangeTable
var RangeTables []*unicode.RangeTable
var disallowedCharactersInMetricName = map[rune]struct{}{
'(': struct{}{},
')': struct{}{},
'"': struct{}{},
'\'': struct{}{},
' ': struct{}{},
'/': struct{}{},
}
func unicodeRuneAllowedInName(r rune) bool {
if _, ok := disallowedCharactersInMetricName[r]; ok {
return false
}
return true
}
func parseName(s string) (string, string) {
var (
braces, i, w int
r rune
isEscape bool
isDefault bool
)
buf := bytes.NewBuffer(make([]byte, 0, len(s)))
FOR:
for braces, i, w = 0, 0, 0; i < len(s); i += w {
if s[i] != '\\' {
err := buf.WriteByte(s[i])
if err != nil {
break FOR
}
}
isDefault = false
w = 1
if IsNameChar(s[i]) {
continue
}
switch s[i] {
case '\\':
if isEscape {
err := buf.WriteByte(s[i])
if err != nil {
break FOR
}
isEscape = false
continue
}
isEscape = true
case '{':
if isEscape {
isDefault = true
} else {
braces++
}
case '}':
if isEscape {
isDefault = true
} else {
if braces == 0 {
break FOR
}
braces--
}
case ',':
if isEscape {
isDefault = true
} else if braces == 0 {
break FOR
}
/* */
case '=':
// allow metric name to end with any amount of `=` without treating it as a named arg or tag
if !isEscape {
if len(s) < i+2 || s[i+1] == '=' || s[i+1] == ',' || s[i+1] == ')' {
continue
}
}
fallthrough
/* */
default:
isDefault = true
}
if isDefault {
r, w = utf8.DecodeRuneInString(s[i:])
if unicodeRuneAllowedInName(r) && unicode.In(r, RangeTables...) {
continue
}
if !isEscape {
break FOR
}
isEscape = false
continue
}
}
if i == len(s) {
return buf.String(), ""
}
return s[:i], s[i:]
}
func parseString(s string) (string, string, error) {
if s[0] != '\'' && s[0] != '"' {
panic("string should start with open quote")
}
match := s[0]
s = s[1:]
var i int
for i < len(s) && s[i] != match {
i++
}
if i == len(s) {
return "", "", ErrMissingQuote
}
return s[:i], s[i+1:], nil
}
func StartAlignTo(start int64, alignTo string) (int64, error) {
var newDate time.Time
re := regexp.MustCompile(`^[0-9]+`)
alignTo = re.ReplaceAllString(alignTo, "")
startDate := time.Unix(start, 0).UTC()
switch {
case strings.HasPrefix(alignTo, "y"):
newDate = time.Date(startDate.Year(), 1, 1, 0, 0, 0, 0, time.UTC)
case strings.HasPrefix(alignTo, "mon"):
newDate = time.Date(startDate.Year(), startDate.Month(), 1, 0, 0, 0, 0, time.UTC)
case strings.HasPrefix(alignTo, "w"):
if !IsDigit(alignTo[len(alignTo)-1]) {
return start, ErrInvalidInterval
}
newDate = time.Date(startDate.Year(), startDate.Month(), startDate.Day(), 0, 0, 0, 0, time.UTC)
dayOfWeek, err := strconv.Atoi(alignTo[len(alignTo)-1:])
if err != nil {
return start, ErrInvalidInterval
}
startDayOfWeek := int(startDate.Weekday())
daysToSubtract := startDayOfWeek - dayOfWeek
if daysToSubtract < 0 {
daysToSubtract += 7
}
newDate = newDate.AddDate(0, 0, -daysToSubtract)
case strings.HasPrefix(alignTo, "d"):
newDate = time.Date(startDate.Year(), startDate.Month(), startDate.Day(), 0, 0, 0, 0, time.UTC)
case strings.HasPrefix(alignTo, "h"):
newDate = time.Date(startDate.Year(), startDate.Month(), startDate.Day(), startDate.Hour(), 0, 0, 0, time.UTC)
case strings.HasPrefix(alignTo, "min"):
newDate = time.Date(startDate.Year(), startDate.Month(), startDate.Day(), startDate.Hour(), startDate.Minute(), 0, 0, time.UTC)
case strings.HasPrefix(alignTo, "s"):
newDate = time.Date(startDate.Year(), startDate.Month(), startDate.Day(), startDate.Hour(), startDate.Minute(), startDate.Second(), 0, time.UTC)
default:
return start, ErrInvalidInterval
}
return newDate.Unix(), nil
}
| {
if len(e.args) <= n {
return 0, ErrMissingArgument
}
return e.args[n].doGetIntArg()
} | identifier_body |
parser.go | package parser
import (
"bytes"
"fmt"
"github.com/go-graphite/carbonapi/expr/holtwinters"
"regexp"
"strconv"
"strings"
"time"
"unicode"
"unicode/utf8"
"github.com/ansel1/merry"
)
// expression parser
type expr struct {
target string
etype ExprType
val float64
valStr string
args []*expr // positional
namedArgs map[string]*expr
argString string
}
func (e *expr) IsName() bool {
return e.etype == EtName
}
func (e *expr) IsFunc() bool {
return e.etype == EtFunc
}
func (e *expr) IsConst() bool {
return e.etype == EtConst
}
func (e *expr) IsString() bool {
return e.etype == EtString
}
func (e *expr) IsBool() bool {
return e.etype == EtBool
}
func (e *expr) Type() ExprType {
return e.etype
}
func (e *expr) ToString() string {
switch e.etype {
case EtFunc:
return e.target + "(" + e.argString + ")"
case EtConst:
return e.valStr
case EtString:
s := e.valStr
s = strings.ReplaceAll(s, `\`, `\\`)
s = strings.ReplaceAll(s, `'`, `\'`)
return "'" + s + "'"
case EtBool:
return fmt.Sprint(e.val)
}
return e.target
}
func (e *expr) SetTarget(target string) {
e.target = target
}
func (e *expr) MutateTarget(target string) Expr {
e.SetTarget(target)
return e
}
func (e *expr) Target() string {
return e.target
}
func (e *expr) FloatValue() float64 {
return e.val
}
func (e *expr) StringValue() string {
return e.valStr
}
func (e *expr) SetValString(value string) {
e.valStr = value
}
func (e *expr) MutateValString(value string) Expr {
e.SetValString(value)
return e
}
func (e *expr) RawArgs() string {
return e.argString
}
func (e *expr) SetRawArgs(args string) {
e.argString = args
}
func (e *expr) MutateRawArgs(args string) Expr {
e.SetRawArgs(args)
return e
}
func (e *expr) Args() []Expr {
ret := make([]Expr, len(e.args))
for i := 0; i < len(e.args); i++ {
ret[i] = e.args[i]
}
return ret
}
func (e *expr) Arg(i int) Expr {
return e.args[i]
}
func (e *expr) ArgsLen() int {
return len(e.args)
}
func (e *expr) NamedArgs() map[string]Expr {
ret := make(map[string]Expr)
for k, v := range e.namedArgs {
ret[k] = v
}
return ret
}
func (e *expr) NamedArg(name string) (Expr, bool) {
expr, exist := e.namedArgs[name]
return expr, exist
}
func (e *expr) Metrics(from, until int64) []MetricRequest {
switch e.etype {
case EtName:
return []MetricRequest{{Metric: e.target, From: from, Until: until}}
case EtConst, EtString:
return nil
case EtFunc:
var r []MetricRequest
for _, a := range e.args {
r = append(r, a.Metrics(from, until)...)
}
switch e.target {
case "transformNull":
referenceSeriesExpr := e.GetNamedArg("referenceSeries")
if !referenceSeriesExpr.IsInterfaceNil() {
r = append(r, referenceSeriesExpr.Metrics(from, until)...)
}
case "timeShift":
offs, err := e.GetIntervalArg(1, -1)
if err != nil {
return nil
}
for i := range r {
r[i].From += int64(offs)
r[i].Until += int64(offs)
}
case "timeStack":
offs, err := e.GetIntervalArg(1, -1)
if err != nil {
return nil
}
start, err := e.GetIntArg(2)
if err != nil {
return nil
}
end, err := e.GetIntArg(3)
if err != nil {
return nil
}
var r2 []MetricRequest
for _, v := range r {
for i := int64(start); i < int64(end); i++ |
}
return r2
case "holtWintersForecast":
bootstrapInterval, err := e.GetIntervalNamedOrPosArgDefault("bootstrapInterval", 1, 1, holtwinters.DefaultBootstrapInterval)
if err != nil {
return nil
}
for i := range r {
r[i].From -= bootstrapInterval
}
case "holtWintersConfidenceBands", "holtWintersConfidenceArea":
bootstrapInterval, err := e.GetIntervalNamedOrPosArgDefault("bootstrapInterval", 2, 1, holtwinters.DefaultBootstrapInterval)
if err != nil {
return nil
}
for i := range r {
r[i].From -= bootstrapInterval
}
case "holtWintersAberration":
bootstrapInterval, err := e.GetIntervalNamedOrPosArgDefault("bootstrapInterval", 2, 1, holtwinters.DefaultBootstrapInterval)
if err != nil {
return nil
}
// For this function, we also need to pull data with an adjusted From time,
// so additional requests are added with the adjusted start time based on the
// bootstrapInterval
for i := range r {
adjustedReq := MetricRequest{
Metric: r[i].Metric,
From: r[i].From - bootstrapInterval,
Until: r[i].Until,
}
r = append(r, adjustedReq)
}
case "movingAverage", "movingMedian", "movingMin", "movingMax", "movingSum", "exponentialMovingAverage":
if len(e.args) < 2 {
return nil
}
if e.args[1].etype == EtString {
offs, err := e.GetIntervalArg(1, 1)
if err != nil {
return nil
}
for i := range r {
fromNew := r[i].From - int64(offs)
r[i].From = fromNew
}
}
case "hitcount":
if len(e.args) < 2 {
return nil
}
alignToInterval, err := e.GetBoolNamedOrPosArgDefault("alignToInterval", 2, false)
if err != nil {
return nil
}
if alignToInterval {
bucketSizeInt32, err := e.GetIntervalArg(1, 1)
if err != nil {
return nil
}
interval := int64(bucketSizeInt32)
// This is done in order to replicate the behavior in Graphite web when alignToInterval is set,
// in which new data is fetched with the adjusted start time.
for i, _ := range r {
start := r[i].From
for _, v := range []int64{86400, 3600, 60} {
if interval >= v {
start -= start % v
break
}
}
r[i].From = start
}
}
case "smartSummarize":
if len(e.args) < 2 {
return nil
}
alignToInterval, err := e.GetStringNamedOrPosArgDefault("alignTo", 3, "")
if err != nil {
return nil
}
if alignToInterval != "" {
for i, _ := range r {
newStart, err := StartAlignTo(r[i].From, alignToInterval)
if err != nil {
return nil
}
r[i].From = newStart
}
}
}
return r
}
return nil
}
func (e *expr) GetIntervalArg(n, defaultSign int) (int32, error) {
if len(e.args) <= n {
return 0, ErrMissingArgument
}
if e.args[n].etype != EtString {
return 0, ErrBadType
}
seconds, err := IntervalString(e.args[n].valStr, defaultSign)
if err != nil {
return 0, ErrBadType
}
return seconds, nil
}
func (e *expr) GetIntervalNamedOrPosArgDefault(k string, n, defaultSign int, v int64) (int64, error) {
var val string
var err error
if a := e.getNamedArg(k); a != nil {
val, err = a.doGetStringArg()
if err != nil {
return 0, ErrBadType
}
} else {
if len(e.args) <= n {
return v, nil
}
if e.args[n].etype != EtString {
return 0, ErrBadType
}
val = e.args[n].valStr
}
seconds, err := IntervalString(val, defaultSign)
if err != nil {
return 0, ErrBadType
}
return int64(seconds), nil
}
func (e *expr) GetStringArg(n int) (string, error) {
if len(e.args) <= n {
return "", ErrMissingArgument
}
return e.args[n].doGetStringArg()
}
func (e *expr) GetStringArgs(n int) ([]string, error) {
if len(e.args) <= n {
return nil, ErrMissingArgument
}
strs := make([]string, 0, len(e.args)-n)
for i := n; i < len(e.args); i++ {
a, err := e.GetStringArg(i)
if err != nil {
return nil, err
}
strs = append(strs, a)
}
return strs, nil
}
func (e *expr) GetStringArgDefault(n int, s string) (string, error) {
if len(e.args) <= n {
return s, nil
}
return e.args[n].doGetStringArg()
}
func (e *expr) GetStringNamedOrPosArgDefault(k string, n int, s string) (string, error) {
if a := e.getNamedArg(k); a != nil {
return a.doGetStringArg()
}
return e.GetStringArgDefault(n, s)
}
func (e *expr) GetFloatArg(n int) (float64, error) {
if len(e.args) <= n {
return 0, ErrMissingArgument
}
return e.args[n].doGetFloatArg()
}
func (e *expr) GetFloatArgDefault(n int, v float64) (float64, error) {
if len(e.args) <= n {
return v, nil
}
return e.args[n].doGetFloatArg()
}
func (e *expr) GetFloatNamedOrPosArgDefault(k string, n int, v float64) (float64, error) {
if a := e.getNamedArg(k); a != nil {
return a.doGetFloatArg()
}
return e.GetFloatArgDefault(n, v)
}
func (e *expr) GetIntArg(n int) (int, error) {
if len(e.args) <= n {
return 0, ErrMissingArgument
}
return e.args[n].doGetIntArg()
}
func (e *expr) GetIntArgs(n int) ([]int, error) {
if len(e.args) < n {
return nil, ErrMissingArgument
}
ints := make([]int, 0, len(e.args)-n)
for i := n; i < len(e.args); i++ {
a, err := e.GetIntArg(i)
if err != nil {
return nil, err
}
ints = append(ints, a)
}
return ints, nil
}
func (e *expr) GetIntArgDefault(n, d int) (int, error) {
if len(e.args) <= n {
return d, nil
}
return e.args[n].doGetIntArg()
}
func (e *expr) GetIntArgWithIndication(n int) (int, bool, error) {
if len(e.args) <= n {
return 0, false, nil
}
v, err := e.args[n].doGetIntArg()
return v, true, err
}
func (e *expr) GetIntNamedOrPosArgWithIndication(k string, n int) (int, bool, error) {
if a := e.getNamedArg(k); a != nil {
v, err := a.doGetIntArg()
return v, true, err
}
return e.GetIntArgWithIndication(n)
}
func (e *expr) GetIntNamedOrPosArgDefault(k string, n, d int) (int, error) {
if a := e.getNamedArg(k); a != nil {
return a.doGetIntArg()
}
return e.GetIntArgDefault(n, d)
}
func (e *expr) GetIntOrInfArg(n int) (IntOrInf, error) {
if len(e.args) <= n {
return IntOrInf{}, ErrMissingArgument
}
return e.args[n].doGetIntOrInfArg()
}
func (e *expr) GetIntOrInfArgDefault(n int, d IntOrInf) (IntOrInf, error) {
if len(e.args) <= n {
return d, nil
}
return e.args[n].doGetIntOrInfArg()
}
func (e *expr) GetIntOrInfNamedOrPosArgDefault(k string, n int, d IntOrInf) (IntOrInf, error) {
if a := e.getNamedArg(k); a != nil {
return a.doGetIntOrInfArg()
}
return e.GetIntOrInfArgDefault(n, d)
}
func (e *expr) GetNamedArg(name string) Expr {
return e.getNamedArg(name)
}
func (e *expr) GetBoolNamedOrPosArgDefault(k string, n int, b bool) (bool, error) {
if a := e.getNamedArg(k); a != nil {
return a.doGetBoolArg()
}
return e.GetBoolArgDefault(n, b)
}
func (e *expr) GetBoolArgDefault(n int, b bool) (bool, error) {
if len(e.args) <= n {
return b, nil
}
return e.args[n].doGetBoolArg()
}
func (e *expr) GetNodeOrTagArgs(n int, single bool) ([]NodeOrTag, error) {
// if single==false, zero nodes is OK
if single && len(e.args) <= n || len(e.args) < n {
return nil, ErrMissingArgument
}
nodeTags := make([]NodeOrTag, 0, len(e.args)-n)
var err error
until := len(e.args)
if single {
until = n + 1
}
for i := n; i < until; i++ {
var nodeTag NodeOrTag
nodeTag.Value, err = e.GetIntArg(i)
if err != nil {
// Try to parse it as String
nodeTag.Value, err = e.GetStringArg(i)
if err != nil {
return nil, err
}
nodeTag.IsTag = true
}
nodeTags = append(nodeTags, nodeTag)
}
return nodeTags, nil
}
func (e *expr) IsInterfaceNil() bool {
return e == nil
}
func (e *expr) insertFirstArg(exp *expr) error {
if e.etype != EtFunc {
return fmt.Errorf("pipe to not a function")
}
newArgs := []*expr{exp}
e.args = append(newArgs, e.args...)
if e.argString == "" {
e.argString = exp.ToString()
} else {
e.argString = exp.ToString() + "," + e.argString
}
return nil
}
func skipWhitespace(e string) string {
skipTo := len(e)
for i, r := range e {
if !unicode.IsSpace(r) {
skipTo = i
break
}
}
return e[skipTo:]
}
func parseExprWithoutPipe(e string) (Expr, string, error) {
e = skipWhitespace(e)
if e == "" {
return nil, "", ErrMissingExpr
}
if '0' <= e[0] && e[0] <= '9' || e[0] == '-' || e[0] == '+' {
val, valStr, e, err := parseConst(e)
r, _ := utf8.DecodeRuneInString(e)
if !unicode.IsLetter(r) {
return &expr{val: val, etype: EtConst, valStr: valStr}, e, err
}
}
if e[0] == '\'' || e[0] == '"' {
val, e, err := parseString(e)
return &expr{valStr: val, etype: EtString}, e, err
}
name, e := parseName(e)
if name == "" {
return nil, e, ErrMissingArgument
}
nameLower := strings.ToLower(name)
if nameLower == "false" || nameLower == "true" {
return &expr{valStr: nameLower, etype: EtBool, target: nameLower}, e, nil
}
if e != "" && e[0] == '(' {
// TODO(civil): Tags: make it a proper Expression
if name == "seriesByTag" {
argString, _, _, e, err := parseArgList(e)
return &expr{target: name + "(" + argString + ")", etype: EtName}, e, err
}
exp := &expr{target: name, etype: EtFunc}
argString, posArgs, namedArgs, e, err := parseArgList(e)
exp.argString = argString
exp.args = posArgs
exp.namedArgs = namedArgs
return exp, e, err
}
return &expr{target: name}, e, nil
}
func parseExprInner(e string) (Expr, string, error) {
exp, e, err := parseExprWithoutPipe(e)
if err != nil {
return exp, e, err
}
return pipe(exp.(*expr), e)
}
// ParseExpr actually do all the parsing. It returns expression, original string and error (if any)
func ParseExpr(e string) (Expr, string, error) {
exp, e, err := parseExprInner(e)
if err != nil {
return exp, e, err
}
exp, err = defineMap.expandExpr(exp.(*expr))
return exp, e, err
}
func pipe(exp *expr, e string) (*expr, string, error) {
e = skipWhitespace(e)
if e == "" || e[0] != '|' {
return exp, e, nil
}
wr, e, err := parseExprWithoutPipe(e[1:])
if err != nil {
return exp, e, err
}
if wr == nil {
return exp, e, nil
}
err = wr.(*expr).insertFirstArg(exp)
if err != nil {
return exp, e, err
}
exp = wr.(*expr)
return pipe(exp, e)
}
// IsNameChar checks if specified char is actually a valid (from graphite's protocol point of view)
func IsNameChar(r byte) bool {
return false ||
'a' <= r && r <= 'z' ||
'A' <= r && r <= 'Z' ||
'0' <= r && r <= '9' ||
r == '.' || r == '_' ||
r == '-' || r == '*' ||
r == '?' || r == ':' ||
r == '[' || r == ']' ||
r == '^' || r == '$' ||
r == '<' || r == '>' ||
r == '&' || r == '#' ||
r == '/' || r == '%' ||
r == '@'
}
func IsDigit(r byte) bool {
return '0' <= r && r <= '9'
}
func parseArgList(e string) (string, []*expr, map[string]*expr, string, error) {
var (
posArgs []*expr
namedArgs map[string]*expr
)
eOrig := e
if e[0] != '(' {
panic("arg list should start with paren")
}
var argStringBuffer bytes.Buffer
e = e[1:]
// check for empty args
t := skipWhitespace(e)
if t != "" && t[0] == ')' {
return "", posArgs, namedArgs, t[1:], nil
}
charNum := 1
for {
var arg Expr
var err error
charNum++
argString := e
arg, e, err = parseExprInner(e)
if err != nil {
return "", nil, nil, e, err
}
if e == "" {
return "", nil, nil, "", ErrMissingComma
}
// we now know we're parsing a key-value pair
if arg.IsName() && e[0] == '=' {
e = e[1:]
argCont, eCont, errCont := parseExprInner(e)
if errCont != nil {
return "", nil, nil, eCont, errCont
}
if eCont == "" {
return "", nil, nil, "", ErrMissingComma
}
if !argCont.IsConst() && !argCont.IsName() && !argCont.IsString() && !argCont.IsBool() {
return "", nil, nil, eCont, ErrBadType
}
if namedArgs == nil {
namedArgs = make(map[string]*expr)
}
exp := &expr{
etype: argCont.Type(),
val: argCont.FloatValue(),
valStr: argCont.StringValue(),
target: argCont.Target(),
}
namedArgs[arg.Target()] = exp
e = eCont
if argStringBuffer.Len() > 0 {
argStringBuffer.WriteByte(',')
}
argStringBuffer.WriteString(argString[:len(argString)-len(e)])
charNum += len(argString) - len(e)
} else {
exp := arg.toExpr().(*expr)
posArgs = append(posArgs, exp)
if argStringBuffer.Len() > 0 {
argStringBuffer.WriteByte(',')
}
if exp.IsFunc() {
expString := exp.ToString()
argStringBuffer.WriteString(expString)
charNum += len(expString)
} else {
argStringBuffer.WriteString(argString[:len(argString)-len(e)])
charNum += len(argString) - len(e)
}
}
// after the argument, trim any trailing spaces
e = skipWhitespace(e)
if e[0] == ')' {
return argStringBuffer.String(), posArgs, namedArgs, e[1:], nil
}
if e[0] != ',' && e[0] != ' ' {
return "", nil, nil, "", merry.Wrap(ErrUnexpectedCharacter).WithUserMessagef("string_to_parse=`%v`, character_number=%v, character=`%v`", eOrig, charNum, string(e[0]))
}
e = e[1:]
}
}
func parseConst(s string) (float64, string, string, error) {
var i int
// All valid characters for a floating-point constant
// Just slurp them all in and let ParseFloat sort 'em out
for i < len(s) && (IsDigit(s[i]) || s[i] == '.' || s[i] == '+' || s[i] == '-' || s[i] == 'e' || s[i] == 'E') {
i++
}
v, err := strconv.ParseFloat(s[:i], 64)
if err != nil {
return 0, "", "", err
}
return v, s[:i], s[i:], err
}
// RangeTables is an array of *unicode.RangeTable
var RangeTables []*unicode.RangeTable
var disallowedCharactersInMetricName = map[rune]struct{}{
'(': struct{}{},
')': struct{}{},
'"': struct{}{},
'\'': struct{}{},
' ': struct{}{},
'/': struct{}{},
}
func unicodeRuneAllowedInName(r rune) bool {
if _, ok := disallowedCharactersInMetricName[r]; ok {
return false
}
return true
}
func parseName(s string) (string, string) {
var (
braces, i, w int
r rune
isEscape bool
isDefault bool
)
buf := bytes.NewBuffer(make([]byte, 0, len(s)))
FOR:
for braces, i, w = 0, 0, 0; i < len(s); i += w {
if s[i] != '\\' {
err := buf.WriteByte(s[i])
if err != nil {
break FOR
}
}
isDefault = false
w = 1
if IsNameChar(s[i]) {
continue
}
switch s[i] {
case '\\':
if isEscape {
err := buf.WriteByte(s[i])
if err != nil {
break FOR
}
isEscape = false
continue
}
isEscape = true
case '{':
if isEscape {
isDefault = true
} else {
braces++
}
case '}':
if isEscape {
isDefault = true
} else {
if braces == 0 {
break FOR
}
braces--
}
case ',':
if isEscape {
isDefault = true
} else if braces == 0 {
break FOR
}
/* */
case '=':
// allow metric name to end with any amount of `=` without treating it as a named arg or tag
if !isEscape {
if len(s) < i+2 || s[i+1] == '=' || s[i+1] == ',' || s[i+1] == ')' {
continue
}
}
fallthrough
/* */
default:
isDefault = true
}
if isDefault {
r, w = utf8.DecodeRuneInString(s[i:])
if unicodeRuneAllowedInName(r) && unicode.In(r, RangeTables...) {
continue
}
if !isEscape {
break FOR
}
isEscape = false
continue
}
}
if i == len(s) {
return buf.String(), ""
}
return s[:i], s[i:]
}
func parseString(s string) (string, string, error) {
if s[0] != '\'' && s[0] != '"' {
panic("string should start with open quote")
}
match := s[0]
s = s[1:]
var i int
for i < len(s) && s[i] != match {
i++
}
if i == len(s) {
return "", "", ErrMissingQuote
}
return s[:i], s[i+1:], nil
}
func StartAlignTo(start int64, alignTo string) (int64, error) {
var newDate time.Time
re := regexp.MustCompile(`^[0-9]+`)
alignTo = re.ReplaceAllString(alignTo, "")
startDate := time.Unix(start, 0).UTC()
switch {
case strings.HasPrefix(alignTo, "y"):
newDate = time.Date(startDate.Year(), 1, 1, 0, 0, 0, 0, time.UTC)
case strings.HasPrefix(alignTo, "mon"):
newDate = time.Date(startDate.Year(), startDate.Month(), 1, 0, 0, 0, 0, time.UTC)
case strings.HasPrefix(alignTo, "w"):
if !IsDigit(alignTo[len(alignTo)-1]) {
return start, ErrInvalidInterval
}
newDate = time.Date(startDate.Year(), startDate.Month(), startDate.Day(), 0, 0, 0, 0, time.UTC)
dayOfWeek, err := strconv.Atoi(alignTo[len(alignTo)-1:])
if err != nil {
return start, ErrInvalidInterval
}
startDayOfWeek := int(startDate.Weekday())
daysToSubtract := startDayOfWeek - dayOfWeek
if daysToSubtract < 0 {
daysToSubtract += 7
}
newDate = newDate.AddDate(0, 0, -daysToSubtract)
case strings.HasPrefix(alignTo, "d"):
newDate = time.Date(startDate.Year(), startDate.Month(), startDate.Day(), 0, 0, 0, 0, time.UTC)
case strings.HasPrefix(alignTo, "h"):
newDate = time.Date(startDate.Year(), startDate.Month(), startDate.Day(), startDate.Hour(), 0, 0, 0, time.UTC)
case strings.HasPrefix(alignTo, "min"):
newDate = time.Date(startDate.Year(), startDate.Month(), startDate.Day(), startDate.Hour(), startDate.Minute(), 0, 0, time.UTC)
case strings.HasPrefix(alignTo, "s"):
newDate = time.Date(startDate.Year(), startDate.Month(), startDate.Day(), startDate.Hour(), startDate.Minute(), startDate.Second(), 0, time.UTC)
default:
return start, ErrInvalidInterval
}
return newDate.Unix(), nil
}
| {
fromNew := v.From + i*int64(offs)
untilNew := v.Until + i*int64(offs)
r2 = append(r2, MetricRequest{
Metric: v.Metric,
From: fromNew,
Until: untilNew,
})
} | conditional_block |
02a-instructions.js | exports.seed = function(knex) {
return knex('instructions').insert([
{text: 'Whisk the eggs, flour, sugar and salt until combined. Gradually add the milk and whisk until you get a smooth batter. Let stand for 15 minutes.'},
{text: 'In a small skillet over medium heat, melt the butter. Use a ladle to pour and spread out some of the mixture.'},
{text: 'Cook 2 minutes, then flip and cook 1 minute more; repeat with remaining batter. Serve crepes warm.'},
{text: 'Mix the flour, baking powder, caster sugar and a pinch of salt together in a large bowl...'},
{text: 'Heat a small knob of butter and 1 tsp of oil in a large, non-stick frying pan over a medium heat...'},
{text: 'Serve your pancakes stacked up on a plate with a drizzle of maple syrup and any of your favourite toppings.'},
{text: 'In a medium bowl, whisk together the flour, sugar, baking powder and salt.'},
{text: 'In a small bowl, mash the banana with a fork until almost smooth. Whisk in the eggs, then add the milk and vanilla...'},
{text: 'Set a griddle or non-stick pan over medium heat until hot. Put a pad of butter and one tablespoon vegetable oil onto the griddle, and swirl it around...'},
{text: 'Wipe the griddle clean with paper towels, add more butter and oil, and repeat with the remaining batter. Serve the pancakes while still hot with maple syrup, sliced bananas and icing sugar if desired.'},
{text: 'In a bowl whisk the flour with the eggs then add in the milk and the water and mix until well combined.'},
{text: 'Add salt and butter and beat until smooth.'},
{text: 'On a medium heat, heat a greased frying pan and pour in a scoop of thin batter (the size of the batter depends on how big you want your crepes to be). Tilt the pan so the batter spreads evenly.'},
{text: 'Fry the crepe for about 2 minutes or until the the bottom of the crepe is light brown, turn the crepe to fry the other side. Top with fresh fruits if desired and serve warm.'},
{text: 'Mix together 200g self-raising flour, 1 tsp baking powder and a pinch of salt in a large bowl.'},
{text: 'Beat 1 egg with 300ml milk, make a well in the centre of the dry ingredients and whisk in the milk to make a thick smooth batter.'},
{text: 'Beat in a knob of melted butter, and gently stir in half of the 150g pack of blueberries.'},
{text: 'Heat a teaspoon of sunflower oil or small knob of butter in a large non-stick frying pan.'},
{text: 'Drop a large tablespoonful of the batter per pancake into the pan to make pancakes about 7.5cm across. Make three or four pancakes at a time.'},
{text: 'Cook for about 3 minutes over a medium heat until small bubbles appear on the surface of each pancake, then turn and cook another 2-3 minutes until golden.'},
{text: 'Cover with kitchen paper to keep warm while you use up the rest of the batter.'},
{text: 'Serve with golden or maple syrup and the rest of the blueberries.'}, | {text: 'Preheat the oven to 350°C.'},
{text: 'In a bowl whisk the flour with the eggs then add in the milk and the water and mix until well combined.'},
{text: 'Add salt and butter and beat until smooth.'},
{text: 'Grease a cupcake pan with butter. Pour the batter into the holes.'},
{text: 'Bake for 5-10 minutes, until brown.'},
{text: 'In a medium bowl, whisk together the flour, sugar, baking powder and salt.'},
{text: 'In a small bowl, mash the banana with a fork until almost smooth and add the nutella. Whisk in the eggs, then add the milk and vanilla...'},
{text: 'Set a griddle or non-stick pan over medium heat until hot. Put a pad of butter and one tablespoon vegetable oil onto the griddle, and swirl it around...'},
{text: 'Wipe the griddle clean with paper towels, add more butter and oil, and repeat with the remaining batter. Serve the pancakes while still hot with maple syrup, sliced bananas and icing sugar if desired.'},
{text: 'Whisk the eggs, flour, and salt until combined. Gradually add the milk and whisk until you get a smooth batter. Let stand for 15 minutes.'},
{text: 'In a small skillet over medium heat, melt the butter. Use a ladle to pour and spread out some of the mixture.'},
{text: 'Cook 2 minutes, then flip and cook 1 minute more; repeat with remaining batter. Serve crepes warm.'},
{text: 'In a large bowl, stir together the flours and salt. Use a wooden spoon to stir in the olive oil and enough water to make a soft dough that is elastic but not sticky.'},
{text: 'Knead the dough on a lightly floured surface for 5-10 mins until it is smooth. Divide into 10 pieces, or less if you want bigger breads. Roll each piece into a ball. Let rest for a few mins.'},
{text: 'Heat a frying pan over medium heat until hot, and grease lightly. On a lightly floured surface, use a floured rolling pin to roll out the balls of dough until very thin like a tortilla.'},
{text: 'When the pan starts smoking, put a chapati on it. Cook until the underside has brown spots, about 30 seconds, then flip and cook on the other side. Put on a plate and keep warm while you cook the rest of the chapatis.'},
{text: 'Sift the flour and salt into a large bowl. Sprinkle over the oil, and add enough water to make a soft dough – add a little more flour or water if needed. Knead gently until smooth. Cover and leave to rest for about 30 minutes.'},
{text: 'On a floured work surface, divide the dough into six equal pieces and roll each one into a thin circle about the thickness of a 20 pence coin (about 2mm), using a rolling pin.'},
{text: 'Brush the bottom third of one of the roti with oil using a pastry brush, dust with a little extra flour and fold the oiled third towards the middle. Repeat with the top third, fold inwards again. Give the roti a quarter turn (90 degrees) and repeat the folding process – you should end up with a rough square. Leave to rest while you make the remaining five rotis.'},
{text: 'Heat a little oil in a heavy-based pan. Using a rolling pin, roll out one of the roti thinly and then fry on one side until it puffs up and is speckled brown on the underside. Turn it over and fry on the other side for a few minutes, until it too is puffed and speckled brown. Remove from the pan, allowing the roti to cool for a few seconds, then fold into four. Wrap in a clean tea towel placed in a colander until ready to eat. Repeat until all the roti are cooked. Serve warm.'},
{text: 'In a large bowl, combine flour and salt. Stir in water and oil. Turn onto a floured surface; knead 10-12 times, adding a little flour or water if needed to achieve a smooth dough. Let rest for 10 minutes.'},
{text: 'Divide dough into 8 portions. On a lightly floured surface, roll each portion into a 7-in. circle.'},
{text: 'Divide dough into 8 portions. On a lightly floured surface, roll each portion into a 7-in. circle.'},
{text: 'In a greased cast-iron or other heavy skillet, cook tortillas over medium heat until lightly browned, 1 minute on each side. Keep warm.'},
{text: 'Preheat the oven to 180C/160C Fan/Gas 4.'},
{text: 'For the salsa, combine the onion, tomatoes, garlic and coriander in a bowl. Season with pepper. Cover and chill for 30 minutes.'},
{text: 'For the chicken, heat the oil in a wok or large frying pan, add the onion and peppers and stir-fry for 3-4 minutes. Add the chicken, paprika, chilli powder, cumin and oregano and cook for 5 minutes, or until the chicken is cooked through.'},
{text: 'Meanwhile, wrap the tortillas in foil and warm them in the oven for 5 minutes.'},
{text: 'Spoon one-quarter of the chicken mixture into the centre of each tortilla, add a couple of tablespoons of salsa and some shredded lettuce. Roll up and serve warm.'},
{text: 'Heat the oil in a large pan – a casserole is ideal. Fry the onions for 8 mins, then add the garlic, spices and oregano and cook for 1 min. Crumble over the mince and sizzle for 5 mins, stirring, until browned. Stir in the sugar and leave for a minute, then splash in the vinegar and pour in the tomatoes.'},
{text: 'Simmer for 5 mins then tip in the beans and the water from the can. Season, stir and simmer everything for 20 mins until the beef is in a thick gravy. The sauce can be prepared up to 2 days ahead, chilled and reheated with a splash of water or frozen for 6 months.'},
{text: 'To make the burritos, heat the tortillas following pack instructions. Pile some rice and beef sauce along each tortilla and scatter over your choice of topping. Fold over the ends and roll up to seal. Secure by wrapping with foil if you want. Eat immediately.'},
{text: 'Start by making the enchilada sauce. Put the onion into a medium saucepan with the olive oil and cook over a low-medium heat for 7–8 minutes, or until soft and just starting to brown at the edges.'},
{text: 'Add the crushed garlic and cook for a further minute. Add the smoked paprika, dried oregano, chilli powder and ground cumin, mix well and cook for a further 30 seconds.'},
{text: 'Add the passata, brown sugar and vinegar to the pan, season well with salt and freshly ground black pepper and cook over a low–medium heat for 20 minutes until thickened slightly. Remove from the heat and blend until smooth.'},
{text: 'Preheat the oven to 190C/170C/Gas 5.'},
{text: 'For the enchilada, heat half of the olive oil in a large frying pan, add the sliced onions and peppers and cook over a medium heat for about 3 minutes, or until just tender and starting to caramelise at the edges. Add the garlic and red chilli and cook for a further 30 seconds. Remove from the pan and set aside.'},
{text: 'Heat the remaining oil in the frying pan, add the chicken and cook quickly over a medium heat until cooked through and golden brown.'},
{text: 'Return the onion and pepper mixture to the pan, add half of the enchilada sauce, and drained kidney beans, season well and cook for a further minute.'},
{text: 'Lay the flour tortillas on the work surface and divide the chicken mixture between them. Roll the flour tortillas around the filling into cigars and arrange neatly and snuggly in an ovenproof dish (roughly 20 x 30cm/8x 12in). Spoon the remaining enchilada sauce over the top and scatter with grated cheese.'},
{text: 'Bake for about 20 minutes, or until the filling is piping hot and the cheese bubbling, melted and golden-brown.'},
{text: 'To serve, scatter with the spring onions and chopped coriander and serve with sliced avocado and soured cream alongside.'},
{text: 'To make the red onion pickle, put the vinegar, sugar and salt in a small saucepan, cover with cold water and bring to the boil. Take the pan off the heat and add the onion. Leave to stand for 30–60 minutes. Drain well before serving.'},
{text: 'To make the tortillas, place the flour and salt in a large bowl and gradually add the oil and water, stirring constantly until the mixture comes together to form a rough dough. Transfer to a lightly floured surface and knead for 5 minutes. Roll into a ball, wrap in cling film and chill for 30 minutes.'},
{text: 'Etc.'}
]);
} |
{text: 'Mix the flour, baking powder, maple syrup, caster sugar and a pinch of salt together in a large bowl...'},
{text: 'Heat a small knob of butter and 1 tsp of oil in a large, non-stick frying pan over a medium heat...'},
{text: 'Serve your pancakes stacked up on a plate with a drizzle of maple syrup and any of your favourite toppings.'},
| random_line_split |
slicerUserInteraction.py | import slicer
import vtk
import os
import sys
from diffusionqclib.dwi_attributes import dwi_attributes
from diffusionqclib.saveResults import saveResults
import numpy as np
FAIL= '\tFail' # \t is for visually separating fail gradients
UNSURE= '\tUnsure' # \t is for visually separating Unsure gradients
class slicerGUI():
def slicerUserInterface(self, userDWIpath, userDWInode, label, summary,
discardButton, keepButton, sureButton, unsureButton, nextButton, resetButton, saveButton):
self.dwiPath= userDWIpath
self.prefix = os.path.basename(self.dwiPath.split('.')[0])
self.directory = os.path.dirname(os.path.abspath(self.dwiPath))
self.deletion= np.load(os.path.join(self.directory, self.prefix+'_QC.npy'))
self.confidence= np.load(os.path.join(self.directory, self.prefix+'_confidence.npy'))
self.KLdiv= np.load(os.path.join(self.directory, self.prefix+'_KLdiv.npy'))
self.qualityBackUp= self.deletion.copy()
self.confidenceBackUp= self.confidence.copy()
self.dwiNode= userDWInode
self.decisionLabel= label
self.summaryLabel= summary
# Write out algorithm summary
self.summaryUpdate()
# The following code is for making a table
# Create table with gradient index, decision, and confidence
self.tableNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLTableNode")
table = self.tableNode.GetTable()
arrX= vtk.vtkStringArray( )
arrX.SetName("Gradient #")
table.AddColumn(arrX)
arrY1 = vtk.vtkStringArray()
arrY1.SetName("Decision")
table.AddColumn(arrY1)
arrY2 = vtk.vtkStringArray()
arrY2.SetName("Confidence")
table.AddColumn(arrY2)
arrY3 = vtk.vtkStringArray()
arrY3.SetName("Marked for deletion")
table.AddColumn(arrY3)
table.SetNumberOfRows(self.KLdiv.shape[0])
for i in range(self.KLdiv.shape[0]):
# To prevent the gradient index confuse with row number '{:10}'.format(i)
table.SetValue(i, 0, '{:10}'.format(i))
table.SetValue(i, 1, 'Pass' if self.deletion[i] else FAIL)
table.SetValue(i, 2, 'Sure' if self.confidence[i] else UNSURE)
table.SetValue(i, 3, 'X' if not self.deletion[i] else ' ')
currentLayout = slicer.app.layoutManager().layout
layoutWithTable = slicer.modules.tables.logic().GetLayoutWithTable(currentLayout)
slicer.app.layoutManager().setLayout(layoutWithTable)
slicer.app.applicationLogic().GetSelectionNode().SetReferenceActiveTableID(self.tableNode.GetID())
slicer.app.applicationLogic().PropagateTableSelection()
# Table display finished --------------------------------------------------------------------
# The following code is for making graph
self.graphTableNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLTableNode")
graphTable = self.graphTableNode.GetTable()
arrX = vtk.vtkIntArray()
arrX.SetName("Slice #")
graphTable.AddColumn(arrX)
arrY1 = vtk.vtkFloatArray()
arrY1.SetName("Divergence")
graphTable.AddColumn(arrY1)
graphTable.SetNumberOfRows(self.KLdiv.shape[1])
# Create a plot series nodes
plotSeriesNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLPlotSeriesNode", "KL div")
plotSeriesNode.SetAndObserveTableNodeID(self.graphTableNode.GetID())
plotSeriesNode.SetXColumnName("Slice #")
plotSeriesNode.SetYColumnName("Divergence")
plotSeriesNode.SetPlotType(slicer.vtkMRMLPlotSeriesNode.PlotTypeScatter)
plotSeriesNode.SetLineStyle(slicer.vtkMRMLPlotSeriesNode.LineStyleSolid)
plotSeriesNode.SetMarkerStyle(slicer.vtkMRMLPlotSeriesNode.MarkerStyleSquare)
plotSeriesNode.SetUniqueColor()
# Create plot chart node
plotChartNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLPlotChartNode")
plotChartNode.AddAndObservePlotSeriesNodeID(plotSeriesNode.GetID())
plotChartNode.SetTitle('KL divergence value plot')
plotChartNode.SetXAxisTitle('Slice index')
plotChartNode.SetYAxisTitle('KL div')
# Switch to a layout that contains a plot view to create a plot widget
layoutManager = slicer.app.layoutManager()
layoutWithPlot = slicer.modules.plots.logic().GetLayoutWithPlot(layoutManager.layout)
layoutManager.setLayout(layoutWithPlot)
# Properly set the plot chart interaction mode
# Select chart in plot view
plotWidget = layoutManager.plotWidget(0)
plotViewNode = plotWidget.mrmlPlotViewNode()
plotViewNode.SetPlotChartNodeID(plotChartNode.GetID())
plotViewNode.SetInteractionMode(plotViewNode.InteractionModeSelectPoints) # select points mode
# Graph display finished -------------------------------------------------------------
mainwindow = slicer.util.mainWindow()
self.figureHandle= slicer.util.findChildren(mainwindow, className="qMRMLPlotView")[0]
self.tableHandle = slicer.util.findChildren(mainwindow, className="qMRMLTableView")[0]
self.tableHandle.connect('selectionChanged()', self.gradientUpdate)
self.figureHandle.connect('dataSelected(vtkStringArray*, vtkCollection*)', self.sliceUpdate)
discardButton.connect('clicked(bool)', self.discardGradient)
keepButton.connect('clicked(bool)', self.keepGradient)
nextButton.connect('clicked(bool)', self.nextReview)
sureButton.connect('clicked(bool)', self.makeSure)
unsureButton.connect('clicked(bool)', self.makeUnsure)
saveButton.connect('clicked(bool)', self.finishInteraction)
resetButton.connect('clicked(bool)', self.resetResults)
# TODO: Use the above handles to disconnect all signals after 'Save' (not much necessary)
# Displaying 0th gradient graph as default
self.plotUpdate(0)
def | (self):
# Return only if pushbutton save is pressed
hdr, mri, grad_axis, _, _, _ = dwi_attributes(self.dwiPath)
saveResults(self.prefix, self.directory, self.deletion, None, None, hdr, mri, grad_axis, True)
# Getting specific point ID from graph
# Switching among slices
def sliceUpdate(self,_,dataPointID):
res = self.dwiNode.GetSpacing()[2] # The 2 corresponds to axial view (check if we need to soft code)
org = self.dwiNode.GetOrigin()[2] # The 2 corresponds to axial view (check if we need to soft code)
axialView = slicer.util.getNode('vtkMRMLSliceNodeRed')
if not dataPointID.GetNumberOfItems( ):
# if no point is selected, do nothing
return
else:
array= dataPointID.GetItemAsObject(0)
slice_index = array.GetValue(0)
# The following lines set appropriate slice in the axial view only
if slice_index<= abs(org)/2:
offset= res*slice_index+org-1
else:
offset= res*slice_index+org
axialView.SetSliceOffset(offset)
def discardGradient(self):
arr = self.dwiNode.GetDiffusionWeightedVolumeDisplayNode()
# Mark the corresponding gradient as fail
diffusion_index= arr.GetDiffusionComponent()
self.deletion[diffusion_index]= 0 # bad ones are marked with 0
table = self.tableNode.GetTable()
table.SetValue(diffusion_index, 1, FAIL)
table.SetValue(diffusion_index, 3, 'X')
self.tableNode.Modified()
self.summaryUpdate()
def keepGradient(self):
arr = self.dwiNode.GetDiffusionWeightedVolumeDisplayNode()
# Mark the corresponding gradient for self.deletion
diffusion_index= arr.GetDiffusionComponent()
self.deletion[diffusion_index]= 1 # good ones are marked with 1
table = self.tableNode.GetTable()
table.SetValue(diffusion_index, 1, 'Pass')
table.SetValue(diffusion_index, 3, ' ')
self.tableNode.Modified()
self.summaryUpdate()
def makeUnsure(self):
arr = self.dwiNode.GetDiffusionWeightedVolumeDisplayNode()
# Mark the corresponding gradient as fail
diffusion_index= arr.GetDiffusionComponent()
self.confidence[diffusion_index]= 0 # unsure ones are marked with 0
table = self.tableNode.GetTable()
table.SetValue(diffusion_index, 2, UNSURE)
self.tableNode.Modified()
self.summaryUpdate()
def makeSure(self):
arr = self.dwiNode.GetDiffusionWeightedVolumeDisplayNode()
# Mark the corresponding gradient for self.deletion
diffusion_index= arr.GetDiffusionComponent()
self.confidence[diffusion_index]= 1 # sure ones are marked with 1
table = self.tableNode.GetTable()
table.SetValue(diffusion_index, 2, 'Sure')
self.tableNode.Modified()
self.summaryUpdate()
# Getting specific point ID from table
# Switching among gradients
def gradientUpdate(self):
index = self.tableHandle.selectedIndexes()[0]
diffusion_index = index.row( )-1
# Probable BUG: every time selectionChanged(), gradientUpdate() is called twice
# That means all the work below are done twice for no reason
# The following print statement is for debugging
# print("Index ", diffusion_index)
# The following line sets appropriate gradient
if diffusion_index >= 0:
self.plotUpdate(diffusion_index) # Label update is done inside this function
def plotUpdate(self, diffusion_index):
# Select corresponding row of the table
self.tableHandle.selectRow(diffusion_index+1)
# Label update is done below
arr = self.dwiNode.GetDiffusionWeightedVolumeDisplayNode()
arr.SetDiffusionComponent(diffusion_index)
q= 'Pass' if self.deletion[diffusion_index] else FAIL
c= 'Sure' if self.confidence[diffusion_index] else UNSURE
self.decisionLabel.setText("Displaying gradient # "+str(diffusion_index)+' ,\t'+
"Quality: "+ q +' ,\t'+ "Confidence: "+ c)
# The following code is for making a plot
# Create table with slice index, and divergence value
graphTable = self.graphTableNode.GetTable()
for i in range(self.KLdiv.shape[1]):
# Filling up row wise
graphTable.SetValue(i, 0, i)
graphTable.SetValue(i, 1, self.KLdiv[diffusion_index, i])
self.graphTableNode.Modified()
def resetResults(self):
table = self.tableNode.GetTable()
for i in range(self.KLdiv.shape[0]):
table.SetValue(i, 0, i)
table.SetValue(i, 1, 'Pass' if self.qualityBackUp[i] else FAIL)
table.SetValue(i, 2, 'Sure' if self.confidenceBackUp[i] else UNSURE)
table.SetValue(i, 3, 'X' if not self.qualityBackUp[i] else ' ')
self.tableNode.Modified()
self.deletion= self.qualityBackUp.copy()
self.plotUpdate(0)
self.summaryUpdate()
def nextReview(self):
arr = self.dwiNode.GetDiffusionWeightedVolumeDisplayNode()
diffusion_index = arr.GetDiffusionComponent()
if (self.confidence==0).any( ):
if diffusion_index<len(self.confidence)-1:
i=diffusion_index+1
else:
i=0 # force to start from beginning
while self.confidence[i]: # While sure, continue looping for the next unsure
i+=1
if i!=diffusion_index:
arr.SetDiffusionComponent(i)
self.plotUpdate(i)
def summaryUpdate(self):
self.summaryLabel.setText("Total gradients "+str(len(self.deletion))+
",\t\t# of fails "+ str(len(np.where(self.deletion==0)[0]))+
",\t# of unsures " + str(len(np.where(self.confidence == 0)[0]))) | finishInteraction | identifier_name |
slicerUserInteraction.py | import slicer
import vtk
import os
import sys
from diffusionqclib.dwi_attributes import dwi_attributes
from diffusionqclib.saveResults import saveResults
import numpy as np
FAIL= '\tFail' # \t is for visually separating fail gradients
UNSURE= '\tUnsure' # \t is for visually separating Unsure gradients
class slicerGUI():
def slicerUserInterface(self, userDWIpath, userDWInode, label, summary,
discardButton, keepButton, sureButton, unsureButton, nextButton, resetButton, saveButton):
self.dwiPath= userDWIpath
self.prefix = os.path.basename(self.dwiPath.split('.')[0])
self.directory = os.path.dirname(os.path.abspath(self.dwiPath))
self.deletion= np.load(os.path.join(self.directory, self.prefix+'_QC.npy'))
self.confidence= np.load(os.path.join(self.directory, self.prefix+'_confidence.npy'))
self.KLdiv= np.load(os.path.join(self.directory, self.prefix+'_KLdiv.npy'))
self.qualityBackUp= self.deletion.copy()
self.confidenceBackUp= self.confidence.copy()
self.dwiNode= userDWInode
self.decisionLabel= label
self.summaryLabel= summary
# Write out algorithm summary
self.summaryUpdate()
# The following code is for making a table
# Create table with gradient index, decision, and confidence
self.tableNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLTableNode")
table = self.tableNode.GetTable()
arrX= vtk.vtkStringArray( )
arrX.SetName("Gradient #")
table.AddColumn(arrX)
arrY1 = vtk.vtkStringArray()
arrY1.SetName("Decision")
table.AddColumn(arrY1)
arrY2 = vtk.vtkStringArray()
arrY2.SetName("Confidence")
table.AddColumn(arrY2)
arrY3 = vtk.vtkStringArray()
arrY3.SetName("Marked for deletion")
table.AddColumn(arrY3)
table.SetNumberOfRows(self.KLdiv.shape[0])
for i in range(self.KLdiv.shape[0]):
# To prevent the gradient index confuse with row number '{:10}'.format(i)
table.SetValue(i, 0, '{:10}'.format(i))
table.SetValue(i, 1, 'Pass' if self.deletion[i] else FAIL)
table.SetValue(i, 2, 'Sure' if self.confidence[i] else UNSURE)
table.SetValue(i, 3, 'X' if not self.deletion[i] else ' ')
currentLayout = slicer.app.layoutManager().layout
layoutWithTable = slicer.modules.tables.logic().GetLayoutWithTable(currentLayout)
slicer.app.layoutManager().setLayout(layoutWithTable)
slicer.app.applicationLogic().GetSelectionNode().SetReferenceActiveTableID(self.tableNode.GetID())
slicer.app.applicationLogic().PropagateTableSelection()
# Table display finished --------------------------------------------------------------------
# The following code is for making graph
self.graphTableNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLTableNode")
graphTable = self.graphTableNode.GetTable()
arrX = vtk.vtkIntArray()
arrX.SetName("Slice #")
graphTable.AddColumn(arrX)
arrY1 = vtk.vtkFloatArray()
arrY1.SetName("Divergence")
graphTable.AddColumn(arrY1)
graphTable.SetNumberOfRows(self.KLdiv.shape[1])
# Create a plot series nodes
plotSeriesNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLPlotSeriesNode", "KL div")
plotSeriesNode.SetAndObserveTableNodeID(self.graphTableNode.GetID())
plotSeriesNode.SetXColumnName("Slice #")
plotSeriesNode.SetYColumnName("Divergence")
plotSeriesNode.SetPlotType(slicer.vtkMRMLPlotSeriesNode.PlotTypeScatter)
plotSeriesNode.SetLineStyle(slicer.vtkMRMLPlotSeriesNode.LineStyleSolid)
plotSeriesNode.SetMarkerStyle(slicer.vtkMRMLPlotSeriesNode.MarkerStyleSquare)
plotSeriesNode.SetUniqueColor()
# Create plot chart node
plotChartNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLPlotChartNode")
plotChartNode.AddAndObservePlotSeriesNodeID(plotSeriesNode.GetID())
plotChartNode.SetTitle('KL divergence value plot')
plotChartNode.SetXAxisTitle('Slice index')
plotChartNode.SetYAxisTitle('KL div')
# Switch to a layout that contains a plot view to create a plot widget
layoutManager = slicer.app.layoutManager()
layoutWithPlot = slicer.modules.plots.logic().GetLayoutWithPlot(layoutManager.layout)
layoutManager.setLayout(layoutWithPlot)
# Properly set the plot chart interaction mode
# Select chart in plot view
plotWidget = layoutManager.plotWidget(0)
plotViewNode = plotWidget.mrmlPlotViewNode()
plotViewNode.SetPlotChartNodeID(plotChartNode.GetID())
plotViewNode.SetInteractionMode(plotViewNode.InteractionModeSelectPoints) # select points mode
# Graph display finished -------------------------------------------------------------
mainwindow = slicer.util.mainWindow()
self.figureHandle= slicer.util.findChildren(mainwindow, className="qMRMLPlotView")[0]
self.tableHandle = slicer.util.findChildren(mainwindow, className="qMRMLTableView")[0]
self.tableHandle.connect('selectionChanged()', self.gradientUpdate)
self.figureHandle.connect('dataSelected(vtkStringArray*, vtkCollection*)', self.sliceUpdate)
discardButton.connect('clicked(bool)', self.discardGradient)
keepButton.connect('clicked(bool)', self.keepGradient)
nextButton.connect('clicked(bool)', self.nextReview)
sureButton.connect('clicked(bool)', self.makeSure)
unsureButton.connect('clicked(bool)', self.makeUnsure)
saveButton.connect('clicked(bool)', self.finishInteraction)
resetButton.connect('clicked(bool)', self.resetResults)
# TODO: Use the above handles to disconnect all signals after 'Save' (not much necessary)
# Displaying 0th gradient graph as default
self.plotUpdate(0)
def finishInteraction(self):
# Return only if pushbutton save is pressed
hdr, mri, grad_axis, _, _, _ = dwi_attributes(self.dwiPath)
saveResults(self.prefix, self.directory, self.deletion, None, None, hdr, mri, grad_axis, True)
# Getting specific point ID from graph
# Switching among slices
def sliceUpdate(self,_,dataPointID):
res = self.dwiNode.GetSpacing()[2] # The 2 corresponds to axial view (check if we need to soft code)
org = self.dwiNode.GetOrigin()[2] # The 2 corresponds to axial view (check if we need to soft code)
axialView = slicer.util.getNode('vtkMRMLSliceNodeRed')
if not dataPointID.GetNumberOfItems( ):
# if no point is selected, do nothing
return
else:
array= dataPointID.GetItemAsObject(0)
slice_index = array.GetValue(0)
# The following lines set appropriate slice in the axial view only
if slice_index<= abs(org)/2:
offset= res*slice_index+org-1
else:
|
axialView.SetSliceOffset(offset)
def discardGradient(self):
arr = self.dwiNode.GetDiffusionWeightedVolumeDisplayNode()
# Mark the corresponding gradient as fail
diffusion_index= arr.GetDiffusionComponent()
self.deletion[diffusion_index]= 0 # bad ones are marked with 0
table = self.tableNode.GetTable()
table.SetValue(diffusion_index, 1, FAIL)
table.SetValue(diffusion_index, 3, 'X')
self.tableNode.Modified()
self.summaryUpdate()
def keepGradient(self):
arr = self.dwiNode.GetDiffusionWeightedVolumeDisplayNode()
# Mark the corresponding gradient for self.deletion
diffusion_index= arr.GetDiffusionComponent()
self.deletion[diffusion_index]= 1 # good ones are marked with 1
table = self.tableNode.GetTable()
table.SetValue(diffusion_index, 1, 'Pass')
table.SetValue(diffusion_index, 3, ' ')
self.tableNode.Modified()
self.summaryUpdate()
def makeUnsure(self):
arr = self.dwiNode.GetDiffusionWeightedVolumeDisplayNode()
# Mark the corresponding gradient as fail
diffusion_index= arr.GetDiffusionComponent()
self.confidence[diffusion_index]= 0 # unsure ones are marked with 0
table = self.tableNode.GetTable()
table.SetValue(diffusion_index, 2, UNSURE)
self.tableNode.Modified()
self.summaryUpdate()
def makeSure(self):
arr = self.dwiNode.GetDiffusionWeightedVolumeDisplayNode()
# Mark the corresponding gradient for self.deletion
diffusion_index= arr.GetDiffusionComponent()
self.confidence[diffusion_index]= 1 # sure ones are marked with 1
table = self.tableNode.GetTable()
table.SetValue(diffusion_index, 2, 'Sure')
self.tableNode.Modified()
self.summaryUpdate()
# Getting specific point ID from table
# Switching among gradients
def gradientUpdate(self):
index = self.tableHandle.selectedIndexes()[0]
diffusion_index = index.row( )-1
# Probable BUG: every time selectionChanged(), gradientUpdate() is called twice
# That means all the work below are done twice for no reason
# The following print statement is for debugging
# print("Index ", diffusion_index)
# The following line sets appropriate gradient
if diffusion_index >= 0:
self.plotUpdate(diffusion_index) # Label update is done inside this function
def plotUpdate(self, diffusion_index):
# Select corresponding row of the table
self.tableHandle.selectRow(diffusion_index+1)
# Label update is done below
arr = self.dwiNode.GetDiffusionWeightedVolumeDisplayNode()
arr.SetDiffusionComponent(diffusion_index)
q= 'Pass' if self.deletion[diffusion_index] else FAIL
c= 'Sure' if self.confidence[diffusion_index] else UNSURE
self.decisionLabel.setText("Displaying gradient # "+str(diffusion_index)+' ,\t'+
"Quality: "+ q +' ,\t'+ "Confidence: "+ c)
# The following code is for making a plot
# Create table with slice index, and divergence value
graphTable = self.graphTableNode.GetTable()
for i in range(self.KLdiv.shape[1]):
# Filling up row wise
graphTable.SetValue(i, 0, i)
graphTable.SetValue(i, 1, self.KLdiv[diffusion_index, i])
self.graphTableNode.Modified()
def resetResults(self):
table = self.tableNode.GetTable()
for i in range(self.KLdiv.shape[0]):
table.SetValue(i, 0, i)
table.SetValue(i, 1, 'Pass' if self.qualityBackUp[i] else FAIL)
table.SetValue(i, 2, 'Sure' if self.confidenceBackUp[i] else UNSURE)
table.SetValue(i, 3, 'X' if not self.qualityBackUp[i] else ' ')
self.tableNode.Modified()
self.deletion= self.qualityBackUp.copy()
self.plotUpdate(0)
self.summaryUpdate()
def nextReview(self):
arr = self.dwiNode.GetDiffusionWeightedVolumeDisplayNode()
diffusion_index = arr.GetDiffusionComponent()
if (self.confidence==0).any( ):
if diffusion_index<len(self.confidence)-1:
i=diffusion_index+1
else:
i=0 # force to start from beginning
while self.confidence[i]: # While sure, continue looping for the next unsure
i+=1
if i!=diffusion_index:
arr.SetDiffusionComponent(i)
self.plotUpdate(i)
def summaryUpdate(self):
self.summaryLabel.setText("Total gradients "+str(len(self.deletion))+
",\t\t# of fails "+ str(len(np.where(self.deletion==0)[0]))+
",\t# of unsures " + str(len(np.where(self.confidence == 0)[0]))) | offset= res*slice_index+org | conditional_block |
slicerUserInteraction.py | import slicer
import vtk
import os
import sys
from diffusionqclib.dwi_attributes import dwi_attributes
from diffusionqclib.saveResults import saveResults
import numpy as np
FAIL= '\tFail' # \t is for visually separating fail gradients
UNSURE= '\tUnsure' # \t is for visually separating Unsure gradients
class slicerGUI():
def slicerUserInterface(self, userDWIpath, userDWInode, label, summary,
discardButton, keepButton, sureButton, unsureButton, nextButton, resetButton, saveButton):
|
def finishInteraction(self):
# Return only if pushbutton save is pressed
hdr, mri, grad_axis, _, _, _ = dwi_attributes(self.dwiPath)
saveResults(self.prefix, self.directory, self.deletion, None, None, hdr, mri, grad_axis, True)
# Getting specific point ID from graph
# Switching among slices
def sliceUpdate(self,_,dataPointID):
res = self.dwiNode.GetSpacing()[2] # The 2 corresponds to axial view (check if we need to soft code)
org = self.dwiNode.GetOrigin()[2] # The 2 corresponds to axial view (check if we need to soft code)
axialView = slicer.util.getNode('vtkMRMLSliceNodeRed')
if not dataPointID.GetNumberOfItems( ):
# if no point is selected, do nothing
return
else:
array= dataPointID.GetItemAsObject(0)
slice_index = array.GetValue(0)
# The following lines set appropriate slice in the axial view only
if slice_index<= abs(org)/2:
offset= res*slice_index+org-1
else:
offset= res*slice_index+org
axialView.SetSliceOffset(offset)
def discardGradient(self):
arr = self.dwiNode.GetDiffusionWeightedVolumeDisplayNode()
# Mark the corresponding gradient as fail
diffusion_index= arr.GetDiffusionComponent()
self.deletion[diffusion_index]= 0 # bad ones are marked with 0
table = self.tableNode.GetTable()
table.SetValue(diffusion_index, 1, FAIL)
table.SetValue(diffusion_index, 3, 'X')
self.tableNode.Modified()
self.summaryUpdate()
def keepGradient(self):
arr = self.dwiNode.GetDiffusionWeightedVolumeDisplayNode()
# Mark the corresponding gradient for self.deletion
diffusion_index= arr.GetDiffusionComponent()
self.deletion[diffusion_index]= 1 # good ones are marked with 1
table = self.tableNode.GetTable()
table.SetValue(diffusion_index, 1, 'Pass')
table.SetValue(diffusion_index, 3, ' ')
self.tableNode.Modified()
self.summaryUpdate()
def makeUnsure(self):
arr = self.dwiNode.GetDiffusionWeightedVolumeDisplayNode()
# Mark the corresponding gradient as fail
diffusion_index= arr.GetDiffusionComponent()
self.confidence[diffusion_index]= 0 # unsure ones are marked with 0
table = self.tableNode.GetTable()
table.SetValue(diffusion_index, 2, UNSURE)
self.tableNode.Modified()
self.summaryUpdate()
def makeSure(self):
arr = self.dwiNode.GetDiffusionWeightedVolumeDisplayNode()
# Mark the corresponding gradient for self.deletion
diffusion_index= arr.GetDiffusionComponent()
self.confidence[diffusion_index]= 1 # sure ones are marked with 1
table = self.tableNode.GetTable()
table.SetValue(diffusion_index, 2, 'Sure')
self.tableNode.Modified()
self.summaryUpdate()
# Getting specific point ID from table
# Switching among gradients
def gradientUpdate(self):
index = self.tableHandle.selectedIndexes()[0]
diffusion_index = index.row( )-1
# Probable BUG: every time selectionChanged(), gradientUpdate() is called twice
# That means all the work below are done twice for no reason
# The following print statement is for debugging
# print("Index ", diffusion_index)
# The following line sets appropriate gradient
if diffusion_index >= 0:
self.plotUpdate(diffusion_index) # Label update is done inside this function
def plotUpdate(self, diffusion_index):
# Select corresponding row of the table
self.tableHandle.selectRow(diffusion_index+1)
# Label update is done below
arr = self.dwiNode.GetDiffusionWeightedVolumeDisplayNode()
arr.SetDiffusionComponent(diffusion_index)
q= 'Pass' if self.deletion[diffusion_index] else FAIL
c= 'Sure' if self.confidence[diffusion_index] else UNSURE
self.decisionLabel.setText("Displaying gradient # "+str(diffusion_index)+' ,\t'+
"Quality: "+ q +' ,\t'+ "Confidence: "+ c)
# The following code is for making a plot
# Create table with slice index, and divergence value
graphTable = self.graphTableNode.GetTable()
for i in range(self.KLdiv.shape[1]):
# Filling up row wise
graphTable.SetValue(i, 0, i)
graphTable.SetValue(i, 1, self.KLdiv[diffusion_index, i])
self.graphTableNode.Modified()
def resetResults(self):
table = self.tableNode.GetTable()
for i in range(self.KLdiv.shape[0]):
table.SetValue(i, 0, i)
table.SetValue(i, 1, 'Pass' if self.qualityBackUp[i] else FAIL)
table.SetValue(i, 2, 'Sure' if self.confidenceBackUp[i] else UNSURE)
table.SetValue(i, 3, 'X' if not self.qualityBackUp[i] else ' ')
self.tableNode.Modified()
self.deletion= self.qualityBackUp.copy()
self.plotUpdate(0)
self.summaryUpdate()
def nextReview(self):
arr = self.dwiNode.GetDiffusionWeightedVolumeDisplayNode()
diffusion_index = arr.GetDiffusionComponent()
if (self.confidence==0).any( ):
if diffusion_index<len(self.confidence)-1:
i=diffusion_index+1
else:
i=0 # force to start from beginning
while self.confidence[i]: # While sure, continue looping for the next unsure
i+=1
if i!=diffusion_index:
arr.SetDiffusionComponent(i)
self.plotUpdate(i)
def summaryUpdate(self):
self.summaryLabel.setText("Total gradients "+str(len(self.deletion))+
",\t\t# of fails "+ str(len(np.where(self.deletion==0)[0]))+
",\t# of unsures " + str(len(np.where(self.confidence == 0)[0]))) | self.dwiPath= userDWIpath
self.prefix = os.path.basename(self.dwiPath.split('.')[0])
self.directory = os.path.dirname(os.path.abspath(self.dwiPath))
self.deletion= np.load(os.path.join(self.directory, self.prefix+'_QC.npy'))
self.confidence= np.load(os.path.join(self.directory, self.prefix+'_confidence.npy'))
self.KLdiv= np.load(os.path.join(self.directory, self.prefix+'_KLdiv.npy'))
self.qualityBackUp= self.deletion.copy()
self.confidenceBackUp= self.confidence.copy()
self.dwiNode= userDWInode
self.decisionLabel= label
self.summaryLabel= summary
# Write out algorithm summary
self.summaryUpdate()
# The following code is for making a table
# Create table with gradient index, decision, and confidence
self.tableNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLTableNode")
table = self.tableNode.GetTable()
arrX= vtk.vtkStringArray( )
arrX.SetName("Gradient #")
table.AddColumn(arrX)
arrY1 = vtk.vtkStringArray()
arrY1.SetName("Decision")
table.AddColumn(arrY1)
arrY2 = vtk.vtkStringArray()
arrY2.SetName("Confidence")
table.AddColumn(arrY2)
arrY3 = vtk.vtkStringArray()
arrY3.SetName("Marked for deletion")
table.AddColumn(arrY3)
table.SetNumberOfRows(self.KLdiv.shape[0])
for i in range(self.KLdiv.shape[0]):
# To prevent the gradient index confuse with row number '{:10}'.format(i)
table.SetValue(i, 0, '{:10}'.format(i))
table.SetValue(i, 1, 'Pass' if self.deletion[i] else FAIL)
table.SetValue(i, 2, 'Sure' if self.confidence[i] else UNSURE)
table.SetValue(i, 3, 'X' if not self.deletion[i] else ' ')
currentLayout = slicer.app.layoutManager().layout
layoutWithTable = slicer.modules.tables.logic().GetLayoutWithTable(currentLayout)
slicer.app.layoutManager().setLayout(layoutWithTable)
slicer.app.applicationLogic().GetSelectionNode().SetReferenceActiveTableID(self.tableNode.GetID())
slicer.app.applicationLogic().PropagateTableSelection()
# Table display finished --------------------------------------------------------------------
# The following code is for making graph
self.graphTableNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLTableNode")
graphTable = self.graphTableNode.GetTable()
arrX = vtk.vtkIntArray()
arrX.SetName("Slice #")
graphTable.AddColumn(arrX)
arrY1 = vtk.vtkFloatArray()
arrY1.SetName("Divergence")
graphTable.AddColumn(arrY1)
graphTable.SetNumberOfRows(self.KLdiv.shape[1])
# Create a plot series nodes
plotSeriesNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLPlotSeriesNode", "KL div")
plotSeriesNode.SetAndObserveTableNodeID(self.graphTableNode.GetID())
plotSeriesNode.SetXColumnName("Slice #")
plotSeriesNode.SetYColumnName("Divergence")
plotSeriesNode.SetPlotType(slicer.vtkMRMLPlotSeriesNode.PlotTypeScatter)
plotSeriesNode.SetLineStyle(slicer.vtkMRMLPlotSeriesNode.LineStyleSolid)
plotSeriesNode.SetMarkerStyle(slicer.vtkMRMLPlotSeriesNode.MarkerStyleSquare)
plotSeriesNode.SetUniqueColor()
# Create plot chart node
plotChartNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLPlotChartNode")
plotChartNode.AddAndObservePlotSeriesNodeID(plotSeriesNode.GetID())
plotChartNode.SetTitle('KL divergence value plot')
plotChartNode.SetXAxisTitle('Slice index')
plotChartNode.SetYAxisTitle('KL div')
# Switch to a layout that contains a plot view to create a plot widget
layoutManager = slicer.app.layoutManager()
layoutWithPlot = slicer.modules.plots.logic().GetLayoutWithPlot(layoutManager.layout)
layoutManager.setLayout(layoutWithPlot)
# Properly set the plot chart interaction mode
# Select chart in plot view
plotWidget = layoutManager.plotWidget(0)
plotViewNode = plotWidget.mrmlPlotViewNode()
plotViewNode.SetPlotChartNodeID(plotChartNode.GetID())
plotViewNode.SetInteractionMode(plotViewNode.InteractionModeSelectPoints) # select points mode
# Graph display finished -------------------------------------------------------------
mainwindow = slicer.util.mainWindow()
self.figureHandle= slicer.util.findChildren(mainwindow, className="qMRMLPlotView")[0]
self.tableHandle = slicer.util.findChildren(mainwindow, className="qMRMLTableView")[0]
self.tableHandle.connect('selectionChanged()', self.gradientUpdate)
self.figureHandle.connect('dataSelected(vtkStringArray*, vtkCollection*)', self.sliceUpdate)
discardButton.connect('clicked(bool)', self.discardGradient)
keepButton.connect('clicked(bool)', self.keepGradient)
nextButton.connect('clicked(bool)', self.nextReview)
sureButton.connect('clicked(bool)', self.makeSure)
unsureButton.connect('clicked(bool)', self.makeUnsure)
saveButton.connect('clicked(bool)', self.finishInteraction)
resetButton.connect('clicked(bool)', self.resetResults)
# TODO: Use the above handles to disconnect all signals after 'Save' (not much necessary)
# Displaying 0th gradient graph as default
self.plotUpdate(0) | identifier_body |
slicerUserInteraction.py | import slicer
import vtk
import os
import sys
from diffusionqclib.dwi_attributes import dwi_attributes
from diffusionqclib.saveResults import saveResults
import numpy as np
FAIL= '\tFail' # \t is for visually separating fail gradients
UNSURE= '\tUnsure' # \t is for visually separating Unsure gradients
class slicerGUI():
def slicerUserInterface(self, userDWIpath, userDWInode, label, summary,
discardButton, keepButton, sureButton, unsureButton, nextButton, resetButton, saveButton):
self.dwiPath= userDWIpath
self.prefix = os.path.basename(self.dwiPath.split('.')[0])
self.directory = os.path.dirname(os.path.abspath(self.dwiPath))
self.deletion= np.load(os.path.join(self.directory, self.prefix+'_QC.npy'))
self.confidence= np.load(os.path.join(self.directory, self.prefix+'_confidence.npy'))
self.KLdiv= np.load(os.path.join(self.directory, self.prefix+'_KLdiv.npy'))
self.qualityBackUp= self.deletion.copy()
self.confidenceBackUp= self.confidence.copy()
self.dwiNode= userDWInode
self.decisionLabel= label
self.summaryLabel= summary
# Write out algorithm summary
self.summaryUpdate()
# The following code is for making a table
# Create table with gradient index, decision, and confidence
self.tableNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLTableNode")
table = self.tableNode.GetTable()
arrX= vtk.vtkStringArray( )
arrX.SetName("Gradient #")
table.AddColumn(arrX)
arrY1 = vtk.vtkStringArray()
arrY1.SetName("Decision")
table.AddColumn(arrY1)
arrY2 = vtk.vtkStringArray()
arrY2.SetName("Confidence")
table.AddColumn(arrY2)
arrY3 = vtk.vtkStringArray()
arrY3.SetName("Marked for deletion")
table.AddColumn(arrY3)
table.SetNumberOfRows(self.KLdiv.shape[0])
for i in range(self.KLdiv.shape[0]):
# To prevent the gradient index confuse with row number '{:10}'.format(i)
table.SetValue(i, 0, '{:10}'.format(i))
table.SetValue(i, 1, 'Pass' if self.deletion[i] else FAIL)
table.SetValue(i, 2, 'Sure' if self.confidence[i] else UNSURE)
table.SetValue(i, 3, 'X' if not self.deletion[i] else ' ')
currentLayout = slicer.app.layoutManager().layout
layoutWithTable = slicer.modules.tables.logic().GetLayoutWithTable(currentLayout)
slicer.app.layoutManager().setLayout(layoutWithTable)
slicer.app.applicationLogic().GetSelectionNode().SetReferenceActiveTableID(self.tableNode.GetID())
slicer.app.applicationLogic().PropagateTableSelection()
# Table display finished --------------------------------------------------------------------
# The following code is for making graph
self.graphTableNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLTableNode")
graphTable = self.graphTableNode.GetTable()
arrX = vtk.vtkIntArray()
arrX.SetName("Slice #")
graphTable.AddColumn(arrX)
arrY1 = vtk.vtkFloatArray()
arrY1.SetName("Divergence")
graphTable.AddColumn(arrY1)
graphTable.SetNumberOfRows(self.KLdiv.shape[1])
# Create a plot series nodes
plotSeriesNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLPlotSeriesNode", "KL div")
plotSeriesNode.SetAndObserveTableNodeID(self.graphTableNode.GetID())
plotSeriesNode.SetXColumnName("Slice #")
plotSeriesNode.SetYColumnName("Divergence")
plotSeriesNode.SetPlotType(slicer.vtkMRMLPlotSeriesNode.PlotTypeScatter)
plotSeriesNode.SetLineStyle(slicer.vtkMRMLPlotSeriesNode.LineStyleSolid)
plotSeriesNode.SetMarkerStyle(slicer.vtkMRMLPlotSeriesNode.MarkerStyleSquare)
plotSeriesNode.SetUniqueColor()
# Create plot chart node
plotChartNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLPlotChartNode")
plotChartNode.AddAndObservePlotSeriesNodeID(plotSeriesNode.GetID())
plotChartNode.SetTitle('KL divergence value plot')
plotChartNode.SetXAxisTitle('Slice index') |
# Switch to a layout that contains a plot view to create a plot widget
layoutManager = slicer.app.layoutManager()
layoutWithPlot = slicer.modules.plots.logic().GetLayoutWithPlot(layoutManager.layout)
layoutManager.setLayout(layoutWithPlot)
# Properly set the plot chart interaction mode
# Select chart in plot view
plotWidget = layoutManager.plotWidget(0)
plotViewNode = plotWidget.mrmlPlotViewNode()
plotViewNode.SetPlotChartNodeID(plotChartNode.GetID())
plotViewNode.SetInteractionMode(plotViewNode.InteractionModeSelectPoints) # select points mode
# Graph display finished -------------------------------------------------------------
mainwindow = slicer.util.mainWindow()
self.figureHandle= slicer.util.findChildren(mainwindow, className="qMRMLPlotView")[0]
self.tableHandle = slicer.util.findChildren(mainwindow, className="qMRMLTableView")[0]
self.tableHandle.connect('selectionChanged()', self.gradientUpdate)
self.figureHandle.connect('dataSelected(vtkStringArray*, vtkCollection*)', self.sliceUpdate)
discardButton.connect('clicked(bool)', self.discardGradient)
keepButton.connect('clicked(bool)', self.keepGradient)
nextButton.connect('clicked(bool)', self.nextReview)
sureButton.connect('clicked(bool)', self.makeSure)
unsureButton.connect('clicked(bool)', self.makeUnsure)
saveButton.connect('clicked(bool)', self.finishInteraction)
resetButton.connect('clicked(bool)', self.resetResults)
# TODO: Use the above handles to disconnect all signals after 'Save' (not much necessary)
# Displaying 0th gradient graph as default
self.plotUpdate(0)
def finishInteraction(self):
# Return only if pushbutton save is pressed
hdr, mri, grad_axis, _, _, _ = dwi_attributes(self.dwiPath)
saveResults(self.prefix, self.directory, self.deletion, None, None, hdr, mri, grad_axis, True)
# Getting specific point ID from graph
# Switching among slices
def sliceUpdate(self,_,dataPointID):
res = self.dwiNode.GetSpacing()[2] # The 2 corresponds to axial view (check if we need to soft code)
org = self.dwiNode.GetOrigin()[2] # The 2 corresponds to axial view (check if we need to soft code)
axialView = slicer.util.getNode('vtkMRMLSliceNodeRed')
if not dataPointID.GetNumberOfItems( ):
# if no point is selected, do nothing
return
else:
array= dataPointID.GetItemAsObject(0)
slice_index = array.GetValue(0)
# The following lines set appropriate slice in the axial view only
if slice_index<= abs(org)/2:
offset= res*slice_index+org-1
else:
offset= res*slice_index+org
axialView.SetSliceOffset(offset)
def discardGradient(self):
arr = self.dwiNode.GetDiffusionWeightedVolumeDisplayNode()
# Mark the corresponding gradient as fail
diffusion_index= arr.GetDiffusionComponent()
self.deletion[diffusion_index]= 0 # bad ones are marked with 0
table = self.tableNode.GetTable()
table.SetValue(diffusion_index, 1, FAIL)
table.SetValue(diffusion_index, 3, 'X')
self.tableNode.Modified()
self.summaryUpdate()
def keepGradient(self):
arr = self.dwiNode.GetDiffusionWeightedVolumeDisplayNode()
# Mark the corresponding gradient for self.deletion
diffusion_index= arr.GetDiffusionComponent()
self.deletion[diffusion_index]= 1 # good ones are marked with 1
table = self.tableNode.GetTable()
table.SetValue(diffusion_index, 1, 'Pass')
table.SetValue(diffusion_index, 3, ' ')
self.tableNode.Modified()
self.summaryUpdate()
def makeUnsure(self):
arr = self.dwiNode.GetDiffusionWeightedVolumeDisplayNode()
# Mark the corresponding gradient as fail
diffusion_index= arr.GetDiffusionComponent()
self.confidence[diffusion_index]= 0 # unsure ones are marked with 0
table = self.tableNode.GetTable()
table.SetValue(diffusion_index, 2, UNSURE)
self.tableNode.Modified()
self.summaryUpdate()
def makeSure(self):
arr = self.dwiNode.GetDiffusionWeightedVolumeDisplayNode()
# Mark the corresponding gradient for self.deletion
diffusion_index= arr.GetDiffusionComponent()
self.confidence[diffusion_index]= 1 # sure ones are marked with 1
table = self.tableNode.GetTable()
table.SetValue(diffusion_index, 2, 'Sure')
self.tableNode.Modified()
self.summaryUpdate()
# Getting specific point ID from table
# Switching among gradients
def gradientUpdate(self):
index = self.tableHandle.selectedIndexes()[0]
diffusion_index = index.row( )-1
# Probable BUG: every time selectionChanged(), gradientUpdate() is called twice
# That means all the work below are done twice for no reason
# The following print statement is for debugging
# print("Index ", diffusion_index)
# The following line sets appropriate gradient
if diffusion_index >= 0:
self.plotUpdate(diffusion_index) # Label update is done inside this function
def plotUpdate(self, diffusion_index):
# Select corresponding row of the table
self.tableHandle.selectRow(diffusion_index+1)
# Label update is done below
arr = self.dwiNode.GetDiffusionWeightedVolumeDisplayNode()
arr.SetDiffusionComponent(diffusion_index)
q= 'Pass' if self.deletion[diffusion_index] else FAIL
c= 'Sure' if self.confidence[diffusion_index] else UNSURE
self.decisionLabel.setText("Displaying gradient # "+str(diffusion_index)+' ,\t'+
"Quality: "+ q +' ,\t'+ "Confidence: "+ c)
# The following code is for making a plot
# Create table with slice index, and divergence value
graphTable = self.graphTableNode.GetTable()
for i in range(self.KLdiv.shape[1]):
# Filling up row wise
graphTable.SetValue(i, 0, i)
graphTable.SetValue(i, 1, self.KLdiv[diffusion_index, i])
self.graphTableNode.Modified()
def resetResults(self):
table = self.tableNode.GetTable()
for i in range(self.KLdiv.shape[0]):
table.SetValue(i, 0, i)
table.SetValue(i, 1, 'Pass' if self.qualityBackUp[i] else FAIL)
table.SetValue(i, 2, 'Sure' if self.confidenceBackUp[i] else UNSURE)
table.SetValue(i, 3, 'X' if not self.qualityBackUp[i] else ' ')
self.tableNode.Modified()
self.deletion= self.qualityBackUp.copy()
self.plotUpdate(0)
self.summaryUpdate()
def nextReview(self):
arr = self.dwiNode.GetDiffusionWeightedVolumeDisplayNode()
diffusion_index = arr.GetDiffusionComponent()
if (self.confidence==0).any( ):
if diffusion_index<len(self.confidence)-1:
i=diffusion_index+1
else:
i=0 # force to start from beginning
while self.confidence[i]: # While sure, continue looping for the next unsure
i+=1
if i!=diffusion_index:
arr.SetDiffusionComponent(i)
self.plotUpdate(i)
def summaryUpdate(self):
self.summaryLabel.setText("Total gradients "+str(len(self.deletion))+
",\t\t# of fails "+ str(len(np.where(self.deletion==0)[0]))+
",\t# of unsures " + str(len(np.where(self.confidence == 0)[0]))) | plotChartNode.SetYAxisTitle('KL div')
| random_line_split |
main.rs | use crate::{message::Message, weapon::Weapon};
use rg3d::{
core::{
algebra::{UnitQuaternion, Vector3},
color::Color,
color_gradient::{ColorGradient, GradientPoint},
math::ray::Ray,
numeric_range::NumericRange,
pool::{Handle, Pool},
},
engine::{
resource_manager::ResourceManager,
Engine,
RigidBodyHandle,
ColliderHandle
},
event::{DeviceEvent, ElementState, Event, MouseButton, VirtualKeyCode, WindowEvent},
event_loop::{ControlFlow, EventLoop},
gui::node::StubNode,
physics::{dynamics::RigidBodyBuilder, geometry::ColliderBuilder},
scene::mesh::surface::{SurfaceBuilder, SurfaceData},
resource::texture::TextureWrapMode,
scene::{
base::BaseBuilder,
camera::{CameraBuilder, SkyBox},
graph::Graph,
mesh::{MeshBuilder, RenderPath},
node::Node,
particle_system::{BaseEmitterBuilder, ParticleSystemBuilder, SphereEmitterBuilder},
physics::RayCastOptions,
transform::TransformBuilder,
Scene,
},
window::WindowBuilder,
};
use std::{
path::Path,
sync::{
mpsc::{self, Receiver, Sender},
Arc, RwLock,
},
time,
};
pub mod message;
pub mod weapon;
// Create our own engine type aliases. These specializations are needed, because the engine
// provides a way to extend UI with custom nodes and messages.
type GameEngine = Engine<(), StubNode>;
// Our game logic will be updated at 60 Hz rate.
const TIMESTEP: f32 = 1.0 / 60.0;
#[derive(Default)]
struct InputController {
move_forward: bool,
move_backward: bool,
move_left: bool,
move_right: bool,
pitch: f32,
yaw: f32,
shoot: bool,
}
struct Player {
pivot: Handle<Node>,
camera: Handle<Node>,
rigid_body: RigidBodyHandle,
controller: InputController,
weapon_pivot: Handle<Node>,
sender: Sender<Message>,
weapon: Handle<Weapon>,
collider: ColliderHandle,
}
async fn create_skybox(resource_manager: ResourceManager) -> SkyBox {
// Load skybox textures in parallel.
let (front, back, left, right, top, bottom) = rg3d::core::futures::join!(
resource_manager.request_texture("data/textures/skybox/front.jpg"),
resource_manager.request_texture("data/textures/skybox/back.jpg"),
resource_manager.request_texture("data/textures/skybox/left.jpg"),
resource_manager.request_texture("data/textures/skybox/right.jpg"),
resource_manager.request_texture("data/textures/skybox/up.jpg"),
resource_manager.request_texture("data/textures/skybox/down.jpg")
);
// Unwrap everything.
let skybox = SkyBox {
front: Some(front.unwrap()),
back: Some(back.unwrap()),
left: Some(left.unwrap()),
right: Some(right.unwrap()),
top: Some(top.unwrap()),
bottom: Some(bottom.unwrap()),
};
// Set S and T coordinate wrap mode, ClampToEdge will remove any possible seams on edges
// of the skybox.
for skybox_texture in skybox.textures().iter().filter_map(|t| t.clone()) {
let mut data = skybox_texture.data_ref();
data.set_s_wrap_mode(TextureWrapMode::ClampToEdge);
data.set_t_wrap_mode(TextureWrapMode::ClampToEdge);
}
skybox
}
fn create_bullet_impact(
graph: &mut Graph,
resource_manager: ResourceManager,
pos: Vector3<f32>,
orientation: UnitQuaternion<f32>,
) -> Handle<Node> {
// Create sphere emitter first.
let emitter = SphereEmitterBuilder::new(
BaseEmitterBuilder::new()
.with_max_particles(200)
.with_spawn_rate(1000)
.with_size_modifier_range(NumericRange::new(-0.01, -0.0125))
.with_size_range(NumericRange::new(0.0010, 0.025))
.with_x_velocity_range(NumericRange::new(-0.01, 0.01))
.with_y_velocity_range(NumericRange::new(0.017, 0.02))
.with_z_velocity_range(NumericRange::new(-0.01, 0.01))
.resurrect_particles(false),
)
.with_radius(0.01)
.build();
// Color gradient will be used to modify color of each particle over its lifetime.
let color_gradient = {
let mut gradient = ColorGradient::new();
gradient.add_point(GradientPoint::new(0.00, Color::from_rgba(255, 255, 0, 0)));
gradient.add_point(GradientPoint::new(0.05, Color::from_rgba(255, 160, 0, 255)));
gradient.add_point(GradientPoint::new(0.95, Color::from_rgba(255, 120, 0, 255)));
gradient.add_point(GradientPoint::new(1.00, Color::from_rgba(255, 60, 0, 0)));
gradient
};
// Create new transform to orient and position particle system.
let transform = TransformBuilder::new()
.with_local_position(pos)
.with_local_rotation(orientation)
.build();
// Finally create particle system with limited lifetime.
ParticleSystemBuilder::new(
BaseBuilder::new()
.with_lifetime(1.0)
.with_local_transform(transform),
)
.with_acceleration(Vector3::new(0.0, -10.0, 0.0))
.with_color_over_lifetime_gradient(color_gradient)
.with_emitters(vec![emitter])
// We'll use simple spark texture for each particle.
.with_texture(resource_manager.request_texture(Path::new("data/textures/spark.png")))
.build(graph)
}
impl Player {
async fn new(
scene: &mut Scene,
resource_manager: ResourceManager,
sender: Sender<Message>,
) -> Self {
// Create a pivot and attach a camera to it, move it a bit up to "emulate" head.
let camera;
let weapon_pivot;
let pivot = BaseBuilder::new()
.with_children(&[{
camera = CameraBuilder::new(
BaseBuilder::new()
.with_local_transform(
TransformBuilder::new()
.with_local_position(Vector3::new(0.0, 0.25, 0.0))
.build(),
)
.with_children(&[{
weapon_pivot = BaseBuilder::new()
.with_local_transform(
TransformBuilder::new()
.with_local_position(Vector3::new(-0.1, -0.05, 0.015))
.build(),
)
.build(&mut scene.graph);
weapon_pivot
}]),
)
.with_skybox(create_skybox(resource_manager).await)
.build(&mut scene.graph);
camera
}])
.build(&mut scene.graph);
// Create rigid body, it will be used for interaction with the world.
let rigid_body_handle = scene.physics.add_body(
RigidBodyBuilder::new_dynamic()
.lock_rotations() // We don't want the player to tilt.
.translation(Vector3::new(0.0, 1.0, -1.0)) // Offset player a bit.
.build(),
);
// Add capsule collider for the rigid body.
let collider = scene.physics.add_collider(
ColliderBuilder::capsule_y(0.25, 0.2).build(),
&rigid_body_handle,
);
// Bind pivot with rigid body. Scene will automatically sync transform of the pivot
// with the transform of the rigid body.
scene.physics_binder.bind(pivot, rigid_body_handle);
Self {
pivot,
camera,
weapon_pivot,
rigid_body: rigid_body_handle,
controller: Default::default(),
sender,
collider,
weapon: Default::default(), // Leave it unassigned for now.
}
}
fn update(&mut self, scene: &mut Scene) {
// Set pitch for the camera. These lines responsible for up-down camera rotation.
scene.graph[self.camera].local_transform_mut().set_rotation(
UnitQuaternion::from_axis_angle(&Vector3::x_axis(), self.controller.pitch.to_radians()),
);
// Borrow the pivot in the graph.
let pivot = &mut scene.graph[self.pivot];
// Borrow rigid body in the physics.
let body = scene
.physics
.bodies
.get_mut(&self.rigid_body)
.unwrap();
// Keep only vertical velocity, and drop horizontal.
let mut velocity = Vector3::new(0.0, body.linvel().y, 0.0);
// Change the velocity depending on the keys pressed.
if self.controller.move_forward {
// If we moving forward then add "look" vector of the pivot.
velocity += pivot.look_vector();
}
if self.controller.move_backward {
// If we moving backward then subtract "look" vector of the pivot.
velocity -= pivot.look_vector();
}
if self.controller.move_left {
// If we moving left then add "side" vector of the pivot.
velocity += pivot.side_vector();
}
if self.controller.move_right {
// If we moving right then subtract "side" vector of the pivot.
velocity -= pivot.side_vector();
}
// Finally new linear velocity.
body.set_linvel(velocity, true);
// Change the rotation of the rigid body according to current yaw. These lines responsible for
// left-right rotation.
let mut position = *body.position();
position.rotation =
UnitQuaternion::from_axis_angle(&Vector3::y_axis(), self.controller.yaw.to_radians());
body.set_position(position, true);
if self.controller.shoot {
self.sender
.send(Message::ShootWeapon {
weapon: self.weapon,
})
.unwrap();
}
}
fn process_input_event(&mut self, event: &Event<()>) {
match event {
Event::WindowEvent { event, .. } => match event {
WindowEvent::KeyboardInput { input, .. } => {
if let Some(key_code) = input.virtual_keycode {
match key_code {
VirtualKeyCode::W => {
self.controller.move_forward = input.state == ElementState::Pressed;
}
VirtualKeyCode::S => {
self.controller.move_backward =
input.state == ElementState::Pressed;
}
VirtualKeyCode::A => {
self.controller.move_left = input.state == ElementState::Pressed;
}
VirtualKeyCode::D => {
self.controller.move_right = input.state == ElementState::Pressed;
}
_ => (),
}
}
}
&WindowEvent::MouseInput { button, state, .. } => {
if button == MouseButton::Left {
self.controller.shoot = state == ElementState::Pressed;
}
}
_ => {}
},
Event::DeviceEvent { event, .. } => {
if let DeviceEvent::MouseMotion { delta } = event {
let mouse_sens = 0.5;
self.controller.yaw -= mouse_sens * delta.0 as f32;
self.controller.pitch =
(self.controller.pitch + mouse_sens * delta.1 as f32).clamp(-90.0, 90.0);
}
}
_ => (),
}
}
}
fn create_shot_trail(
graph: &mut Graph,
origin: Vector3<f32>,
direction: Vector3<f32>,
trail_length: f32,
) {
let transform = TransformBuilder::new()
.with_local_position(origin)
// Scale the trail in XZ plane to make it thin, and apply `trail_length` scale on Y axis
// to stretch is out.
.with_local_scale(Vector3::new(0.0025, 0.0025, trail_length))
// Rotate the trail along given `direction`
.with_local_rotation(UnitQuaternion::face_towards(&direction, &Vector3::y()))
.build();
// Create unit cylinder with caps that faces toward Z axis.
let shape = Arc::new(RwLock::new(SurfaceData::make_cylinder(
6, // Count of sides
1.0, // Radius
1.0, // Height
false, // No caps are needed.
// Rotate vertical cylinder around X axis to make it face towards Z axis
&UnitQuaternion::from_axis_angle(&Vector3::x_axis(), 90.0f32.to_radians()).to_homogeneous(),
)));
MeshBuilder::new(
BaseBuilder::new()
.with_local_transform(transform)
// Shot trail should live ~0.25 seconds, after that it will be automatically
// destroyed.
.with_lifetime(0.25),
)
.with_surfaces(vec![SurfaceBuilder::new(shape)
// Set yellow-ish color.
.with_color(Color::from_rgba(255, 255, 0, 120))
.build()])
// Do not cast shadows.
.with_cast_shadows(false)
// Make sure to set Forward render path, otherwise the object won't be
// transparent.
.with_render_path(RenderPath::Forward)
.build(graph);
}
struct Game {
scene: Handle<Scene>,
player: Player,
weapons: Pool<Weapon>,
receiver: Receiver<Message>,
sender: Sender<Message>,
}
impl Game {
pub async fn new(engine: &mut GameEngine) -> Self {
// Make message queue.
let (sender, receiver) = mpsc::channel();
let mut scene = Scene::new();
// Load a scene resource and create its instance.
engine
.resource_manager
.request_model("data/models/scene.rgs")
.await
.unwrap()
.instantiate_geometry(&mut scene);
// Create player first.
let mut player =
Player::new(&mut scene, engine.resource_manager.clone(), sender.clone()).await;
// Create weapon next.
let weapon = Weapon::new(&mut scene, engine.resource_manager.clone()).await;
// "Attach" the weapon to the weapon pivot of the player.
scene.graph.link_nodes(weapon.model(), player.weapon_pivot);
// Create a container for the weapons.
let mut weapons = Pool::new();
// Put the weapon into it - this operation moves the weapon in the pool and returns handle.
let weapon = weapons.spawn(weapon);
// "Give" the weapon to the player.
player.weapon = weapon;
Self {
player,
scene: engine.scenes.add(scene),
weapons,
sender,
receiver,
}
}
fn shoot_weapon(&mut self, weapon: Handle<Weapon>, engine: &mut GameEngine) {
let weapon = &mut self.weapons[weapon];
if weapon.can_shoot() {
weapon.shoot();
let scene = &mut engine.scenes[self.scene];
let weapon_model = &scene.graph[weapon.model()];
// Make a ray that starts at the weapon's position in the world and look toward
// "look" vector of the weapon.
let ray = Ray::new(
scene.graph[weapon.shot_point()].global_position(),
weapon_model.look_vector().scale(1000.0),
);
let mut intersections = Vec::new();
scene.physics.cast_ray(
RayCastOptions {
ray,
max_len: ray.dir.norm(),
groups: Default::default(),
sort_results: true, // We need intersections to be sorted from closest to furthest.
},
&mut intersections,
);
// Ignore intersections with player's capsule.
let trail_length = if let Some(intersection) = intersections
.iter()
.find(|i| i.collider != self.player.collider)
{
//
// TODO: Add code to handle intersections with bots.
//
// For now just apply some force at the point of impact.
let collider = scene
.physics
.colliders
.get(&intersection.collider)
.unwrap();
scene
.physics
.bodies
.native_mut(collider.parent().unwrap())
.unwrap()
.apply_force_at_point(
ray.dir.normalize().scale(10.0),
intersection.position,
true,
);
// Add bullet impact effect.
let effect_orientation = if intersection.normal.normalize() == Vector3::y() {
// Handle singularity when normal of impact point is collinear with Y axis.
UnitQuaternion::from_axis_angle(&Vector3::y_axis(), 0.0)
} else {
UnitQuaternion::face_towards(&intersection.normal, &Vector3::y())
};
create_bullet_impact(
&mut scene.graph,
engine.resource_manager.clone(),
intersection.position.coords,
effect_orientation,
);
// Trail length will be the length of line between intersection point and ray origin.
(intersection.position.coords - ray.origin).norm()
} else {
// Otherwise trail length will be just the ray length.
ray.dir.norm()
};
create_shot_trail(&mut scene.graph, ray.origin, ray.dir, trail_length);
}
}
pub fn update(&mut self, engine: &mut GameEngine, dt: f32) {
let scene = &mut engine.scenes[self.scene];
self.player.update(scene);
for weapon in self.weapons.iter_mut() {
weapon.update(dt, &mut scene.graph);
}
// We're using `try_recv` here because we don't want to wait until next message -
// if the queue is empty just continue to next frame.
while let Ok(message) = self.receiver.try_recv() {
match message {
Message::ShootWeapon { weapon } => {
self.shoot_weapon(weapon, engine);
}
}
}
}
}
fn main() {
// Configure main window first.
let window_builder = WindowBuilder::new().with_title("3D Shooter Tutorial");
// Create event loop that will be used to "listen" events from the OS.
let event_loop = EventLoop::new();
// Finally create an instance of the engine.
let mut engine = GameEngine::new(window_builder, &event_loop, true).unwrap();
// Initialize game instance.
let mut game = rg3d::core::futures::executor::block_on(Game::new(&mut engine));
// Run the event loop of the main window. which will respond to OS and window events and update
// engine's state accordingly. Engine lets you to decide which event should be handled,
// this is minimal working example if how it should be. | game.player.process_input_event(&event);
match event {
Event::MainEventsCleared => {
// This main game loop - it has fixed time step which means that game
// code will run at fixed speed even if renderer can't give you desired
// 60 fps.
let mut dt = clock.elapsed().as_secs_f32() - elapsed_time;
while dt >= TIMESTEP {
dt -= TIMESTEP;
elapsed_time += TIMESTEP;
// Run our game's logic.
game.update(&mut engine, TIMESTEP);
// Update engine each frame.
engine.update(TIMESTEP);
}
// Rendering must be explicitly requested and handled after RedrawRequested event is received.
engine.get_window().request_redraw();
}
Event::RedrawRequested(_) => {
// Render at max speed - it is not tied to the game code.
engine.render(TIMESTEP).unwrap();
}
Event::WindowEvent { event, .. } => match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput { input, .. } => {
// Exit game by hitting Escape.
if let Some(VirtualKeyCode::Escape) = input.virtual_keycode {
*control_flow = ControlFlow::Exit
}
}
WindowEvent::Resized(size) => {
// It is very important to handle Resized event from window, because
// renderer knows nothing about window size - it must be notified
// directly when window size has changed.
engine.renderer.set_frame_size(size.into());
}
_ => (),
},
_ => *control_flow = ControlFlow::Poll,
}
});
} | let clock = time::Instant::now();
let mut elapsed_time = 0.0;
event_loop.run(move |event, _, control_flow| { | random_line_split |
main.rs | use crate::{message::Message, weapon::Weapon};
use rg3d::{
core::{
algebra::{UnitQuaternion, Vector3},
color::Color,
color_gradient::{ColorGradient, GradientPoint},
math::ray::Ray,
numeric_range::NumericRange,
pool::{Handle, Pool},
},
engine::{
resource_manager::ResourceManager,
Engine,
RigidBodyHandle,
ColliderHandle
},
event::{DeviceEvent, ElementState, Event, MouseButton, VirtualKeyCode, WindowEvent},
event_loop::{ControlFlow, EventLoop},
gui::node::StubNode,
physics::{dynamics::RigidBodyBuilder, geometry::ColliderBuilder},
scene::mesh::surface::{SurfaceBuilder, SurfaceData},
resource::texture::TextureWrapMode,
scene::{
base::BaseBuilder,
camera::{CameraBuilder, SkyBox},
graph::Graph,
mesh::{MeshBuilder, RenderPath},
node::Node,
particle_system::{BaseEmitterBuilder, ParticleSystemBuilder, SphereEmitterBuilder},
physics::RayCastOptions,
transform::TransformBuilder,
Scene,
},
window::WindowBuilder,
};
use std::{
path::Path,
sync::{
mpsc::{self, Receiver, Sender},
Arc, RwLock,
},
time,
};
pub mod message;
pub mod weapon;
// Create our own engine type aliases. These specializations are needed, because the engine
// provides a way to extend UI with custom nodes and messages.
type GameEngine = Engine<(), StubNode>;
// Our game logic will be updated at 60 Hz rate.
const TIMESTEP: f32 = 1.0 / 60.0;
#[derive(Default)]
struct InputController {
move_forward: bool,
move_backward: bool,
move_left: bool,
move_right: bool,
pitch: f32,
yaw: f32,
shoot: bool,
}
struct Player {
pivot: Handle<Node>,
camera: Handle<Node>,
rigid_body: RigidBodyHandle,
controller: InputController,
weapon_pivot: Handle<Node>,
sender: Sender<Message>,
weapon: Handle<Weapon>,
collider: ColliderHandle,
}
async fn create_skybox(resource_manager: ResourceManager) -> SkyBox {
// Load skybox textures in parallel.
let (front, back, left, right, top, bottom) = rg3d::core::futures::join!(
resource_manager.request_texture("data/textures/skybox/front.jpg"),
resource_manager.request_texture("data/textures/skybox/back.jpg"),
resource_manager.request_texture("data/textures/skybox/left.jpg"),
resource_manager.request_texture("data/textures/skybox/right.jpg"),
resource_manager.request_texture("data/textures/skybox/up.jpg"),
resource_manager.request_texture("data/textures/skybox/down.jpg")
);
// Unwrap everything.
let skybox = SkyBox {
front: Some(front.unwrap()),
back: Some(back.unwrap()),
left: Some(left.unwrap()),
right: Some(right.unwrap()),
top: Some(top.unwrap()),
bottom: Some(bottom.unwrap()),
};
// Set S and T coordinate wrap mode, ClampToEdge will remove any possible seams on edges
// of the skybox.
for skybox_texture in skybox.textures().iter().filter_map(|t| t.clone()) {
let mut data = skybox_texture.data_ref();
data.set_s_wrap_mode(TextureWrapMode::ClampToEdge);
data.set_t_wrap_mode(TextureWrapMode::ClampToEdge);
}
skybox
}
fn create_bullet_impact(
graph: &mut Graph,
resource_manager: ResourceManager,
pos: Vector3<f32>,
orientation: UnitQuaternion<f32>,
) -> Handle<Node> {
// Create sphere emitter first.
let emitter = SphereEmitterBuilder::new(
BaseEmitterBuilder::new()
.with_max_particles(200)
.with_spawn_rate(1000)
.with_size_modifier_range(NumericRange::new(-0.01, -0.0125))
.with_size_range(NumericRange::new(0.0010, 0.025))
.with_x_velocity_range(NumericRange::new(-0.01, 0.01))
.with_y_velocity_range(NumericRange::new(0.017, 0.02))
.with_z_velocity_range(NumericRange::new(-0.01, 0.01))
.resurrect_particles(false),
)
.with_radius(0.01)
.build();
// Color gradient will be used to modify color of each particle over its lifetime.
let color_gradient = {
let mut gradient = ColorGradient::new();
gradient.add_point(GradientPoint::new(0.00, Color::from_rgba(255, 255, 0, 0)));
gradient.add_point(GradientPoint::new(0.05, Color::from_rgba(255, 160, 0, 255)));
gradient.add_point(GradientPoint::new(0.95, Color::from_rgba(255, 120, 0, 255)));
gradient.add_point(GradientPoint::new(1.00, Color::from_rgba(255, 60, 0, 0)));
gradient
};
// Create new transform to orient and position particle system.
let transform = TransformBuilder::new()
.with_local_position(pos)
.with_local_rotation(orientation)
.build();
// Finally create particle system with limited lifetime.
ParticleSystemBuilder::new(
BaseBuilder::new()
.with_lifetime(1.0)
.with_local_transform(transform),
)
.with_acceleration(Vector3::new(0.0, -10.0, 0.0))
.with_color_over_lifetime_gradient(color_gradient)
.with_emitters(vec![emitter])
// We'll use simple spark texture for each particle.
.with_texture(resource_manager.request_texture(Path::new("data/textures/spark.png")))
.build(graph)
}
impl Player {
async fn new(
scene: &mut Scene,
resource_manager: ResourceManager,
sender: Sender<Message>,
) -> Self {
// Create a pivot and attach a camera to it, move it a bit up to "emulate" head.
let camera;
let weapon_pivot;
let pivot = BaseBuilder::new()
.with_children(&[{
camera = CameraBuilder::new(
BaseBuilder::new()
.with_local_transform(
TransformBuilder::new()
.with_local_position(Vector3::new(0.0, 0.25, 0.0))
.build(),
)
.with_children(&[{
weapon_pivot = BaseBuilder::new()
.with_local_transform(
TransformBuilder::new()
.with_local_position(Vector3::new(-0.1, -0.05, 0.015))
.build(),
)
.build(&mut scene.graph);
weapon_pivot
}]),
)
.with_skybox(create_skybox(resource_manager).await)
.build(&mut scene.graph);
camera
}])
.build(&mut scene.graph);
// Create rigid body, it will be used for interaction with the world.
let rigid_body_handle = scene.physics.add_body(
RigidBodyBuilder::new_dynamic()
.lock_rotations() // We don't want the player to tilt.
.translation(Vector3::new(0.0, 1.0, -1.0)) // Offset player a bit.
.build(),
);
// Add capsule collider for the rigid body.
let collider = scene.physics.add_collider(
ColliderBuilder::capsule_y(0.25, 0.2).build(),
&rigid_body_handle,
);
// Bind pivot with rigid body. Scene will automatically sync transform of the pivot
// with the transform of the rigid body.
scene.physics_binder.bind(pivot, rigid_body_handle);
Self {
pivot,
camera,
weapon_pivot,
rigid_body: rigid_body_handle,
controller: Default::default(),
sender,
collider,
weapon: Default::default(), // Leave it unassigned for now.
}
}
fn update(&mut self, scene: &mut Scene) {
// Set pitch for the camera. These lines responsible for up-down camera rotation.
scene.graph[self.camera].local_transform_mut().set_rotation(
UnitQuaternion::from_axis_angle(&Vector3::x_axis(), self.controller.pitch.to_radians()),
);
// Borrow the pivot in the graph.
let pivot = &mut scene.graph[self.pivot];
// Borrow rigid body in the physics.
let body = scene
.physics
.bodies
.get_mut(&self.rigid_body)
.unwrap();
// Keep only vertical velocity, and drop horizontal.
let mut velocity = Vector3::new(0.0, body.linvel().y, 0.0);
// Change the velocity depending on the keys pressed.
if self.controller.move_forward {
// If we moving forward then add "look" vector of the pivot.
velocity += pivot.look_vector();
}
if self.controller.move_backward {
// If we moving backward then subtract "look" vector of the pivot.
velocity -= pivot.look_vector();
}
if self.controller.move_left {
// If we moving left then add "side" vector of the pivot.
velocity += pivot.side_vector();
}
if self.controller.move_right {
// If we moving right then subtract "side" vector of the pivot.
velocity -= pivot.side_vector();
}
// Finally new linear velocity.
body.set_linvel(velocity, true);
// Change the rotation of the rigid body according to current yaw. These lines responsible for
// left-right rotation.
let mut position = *body.position();
position.rotation =
UnitQuaternion::from_axis_angle(&Vector3::y_axis(), self.controller.yaw.to_radians());
body.set_position(position, true);
if self.controller.shoot {
self.sender
.send(Message::ShootWeapon {
weapon: self.weapon,
})
.unwrap();
}
}
fn process_input_event(&mut self, event: &Event<()>) {
match event {
Event::WindowEvent { event, .. } => match event {
WindowEvent::KeyboardInput { input, .. } => {
if let Some(key_code) = input.virtual_keycode {
match key_code {
VirtualKeyCode::W => {
self.controller.move_forward = input.state == ElementState::Pressed;
}
VirtualKeyCode::S => {
self.controller.move_backward =
input.state == ElementState::Pressed;
}
VirtualKeyCode::A => {
self.controller.move_left = input.state == ElementState::Pressed;
}
VirtualKeyCode::D => {
self.controller.move_right = input.state == ElementState::Pressed;
}
_ => (),
}
}
}
&WindowEvent::MouseInput { button, state, .. } => {
if button == MouseButton::Left {
self.controller.shoot = state == ElementState::Pressed;
}
}
_ => {}
},
Event::DeviceEvent { event, .. } => {
if let DeviceEvent::MouseMotion { delta } = event {
let mouse_sens = 0.5;
self.controller.yaw -= mouse_sens * delta.0 as f32;
self.controller.pitch =
(self.controller.pitch + mouse_sens * delta.1 as f32).clamp(-90.0, 90.0);
}
}
_ => (),
}
}
}
fn create_shot_trail(
graph: &mut Graph,
origin: Vector3<f32>,
direction: Vector3<f32>,
trail_length: f32,
) {
let transform = TransformBuilder::new()
.with_local_position(origin)
// Scale the trail in XZ plane to make it thin, and apply `trail_length` scale on Y axis
// to stretch is out.
.with_local_scale(Vector3::new(0.0025, 0.0025, trail_length))
// Rotate the trail along given `direction`
.with_local_rotation(UnitQuaternion::face_towards(&direction, &Vector3::y()))
.build();
// Create unit cylinder with caps that faces toward Z axis.
let shape = Arc::new(RwLock::new(SurfaceData::make_cylinder(
6, // Count of sides
1.0, // Radius
1.0, // Height
false, // No caps are needed.
// Rotate vertical cylinder around X axis to make it face towards Z axis
&UnitQuaternion::from_axis_angle(&Vector3::x_axis(), 90.0f32.to_radians()).to_homogeneous(),
)));
MeshBuilder::new(
BaseBuilder::new()
.with_local_transform(transform)
// Shot trail should live ~0.25 seconds, after that it will be automatically
// destroyed.
.with_lifetime(0.25),
)
.with_surfaces(vec![SurfaceBuilder::new(shape)
// Set yellow-ish color.
.with_color(Color::from_rgba(255, 255, 0, 120))
.build()])
// Do not cast shadows.
.with_cast_shadows(false)
// Make sure to set Forward render path, otherwise the object won't be
// transparent.
.with_render_path(RenderPath::Forward)
.build(graph);
}
struct Game {
scene: Handle<Scene>,
player: Player,
weapons: Pool<Weapon>,
receiver: Receiver<Message>,
sender: Sender<Message>,
}
impl Game {
pub async fn new(engine: &mut GameEngine) -> Self {
// Make message queue.
let (sender, receiver) = mpsc::channel();
let mut scene = Scene::new();
// Load a scene resource and create its instance.
engine
.resource_manager
.request_model("data/models/scene.rgs")
.await
.unwrap()
.instantiate_geometry(&mut scene);
// Create player first.
let mut player =
Player::new(&mut scene, engine.resource_manager.clone(), sender.clone()).await;
// Create weapon next.
let weapon = Weapon::new(&mut scene, engine.resource_manager.clone()).await;
// "Attach" the weapon to the weapon pivot of the player.
scene.graph.link_nodes(weapon.model(), player.weapon_pivot);
// Create a container for the weapons.
let mut weapons = Pool::new();
// Put the weapon into it - this operation moves the weapon in the pool and returns handle.
let weapon = weapons.spawn(weapon);
// "Give" the weapon to the player.
player.weapon = weapon;
Self {
player,
scene: engine.scenes.add(scene),
weapons,
sender,
receiver,
}
}
fn shoot_weapon(&mut self, weapon: Handle<Weapon>, engine: &mut GameEngine) {
let weapon = &mut self.weapons[weapon];
if weapon.can_shoot() {
weapon.shoot();
let scene = &mut engine.scenes[self.scene];
let weapon_model = &scene.graph[weapon.model()];
// Make a ray that starts at the weapon's position in the world and look toward
// "look" vector of the weapon.
let ray = Ray::new(
scene.graph[weapon.shot_point()].global_position(),
weapon_model.look_vector().scale(1000.0),
);
let mut intersections = Vec::new();
scene.physics.cast_ray(
RayCastOptions {
ray,
max_len: ray.dir.norm(),
groups: Default::default(),
sort_results: true, // We need intersections to be sorted from closest to furthest.
},
&mut intersections,
);
// Ignore intersections with player's capsule.
let trail_length = if let Some(intersection) = intersections
.iter()
.find(|i| i.collider != self.player.collider)
{
//
// TODO: Add code to handle intersections with bots.
//
// For now just apply some force at the point of impact.
let collider = scene
.physics
.colliders
.get(&intersection.collider)
.unwrap();
scene
.physics
.bodies
.native_mut(collider.parent().unwrap())
.unwrap()
.apply_force_at_point(
ray.dir.normalize().scale(10.0),
intersection.position,
true,
);
// Add bullet impact effect.
let effect_orientation = if intersection.normal.normalize() == Vector3::y() {
// Handle singularity when normal of impact point is collinear with Y axis.
UnitQuaternion::from_axis_angle(&Vector3::y_axis(), 0.0)
} else {
UnitQuaternion::face_towards(&intersection.normal, &Vector3::y())
};
create_bullet_impact(
&mut scene.graph,
engine.resource_manager.clone(),
intersection.position.coords,
effect_orientation,
);
// Trail length will be the length of line between intersection point and ray origin.
(intersection.position.coords - ray.origin).norm()
} else {
// Otherwise trail length will be just the ray length.
ray.dir.norm()
};
create_shot_trail(&mut scene.graph, ray.origin, ray.dir, trail_length);
}
}
pub fn update(&mut self, engine: &mut GameEngine, dt: f32) {
let scene = &mut engine.scenes[self.scene];
self.player.update(scene);
for weapon in self.weapons.iter_mut() {
weapon.update(dt, &mut scene.graph);
}
// We're using `try_recv` here because we don't want to wait until next message -
// if the queue is empty just continue to next frame.
while let Ok(message) = self.receiver.try_recv() {
match message {
Message::ShootWeapon { weapon } => {
self.shoot_weapon(weapon, engine);
}
}
}
}
}
fn | () {
// Configure main window first.
let window_builder = WindowBuilder::new().with_title("3D Shooter Tutorial");
// Create event loop that will be used to "listen" events from the OS.
let event_loop = EventLoop::new();
// Finally create an instance of the engine.
let mut engine = GameEngine::new(window_builder, &event_loop, true).unwrap();
// Initialize game instance.
let mut game = rg3d::core::futures::executor::block_on(Game::new(&mut engine));
// Run the event loop of the main window. which will respond to OS and window events and update
// engine's state accordingly. Engine lets you to decide which event should be handled,
// this is minimal working example if how it should be.
let clock = time::Instant::now();
let mut elapsed_time = 0.0;
event_loop.run(move |event, _, control_flow| {
game.player.process_input_event(&event);
match event {
Event::MainEventsCleared => {
// This main game loop - it has fixed time step which means that game
// code will run at fixed speed even if renderer can't give you desired
// 60 fps.
let mut dt = clock.elapsed().as_secs_f32() - elapsed_time;
while dt >= TIMESTEP {
dt -= TIMESTEP;
elapsed_time += TIMESTEP;
// Run our game's logic.
game.update(&mut engine, TIMESTEP);
// Update engine each frame.
engine.update(TIMESTEP);
}
// Rendering must be explicitly requested and handled after RedrawRequested event is received.
engine.get_window().request_redraw();
}
Event::RedrawRequested(_) => {
// Render at max speed - it is not tied to the game code.
engine.render(TIMESTEP).unwrap();
}
Event::WindowEvent { event, .. } => match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput { input, .. } => {
// Exit game by hitting Escape.
if let Some(VirtualKeyCode::Escape) = input.virtual_keycode {
*control_flow = ControlFlow::Exit
}
}
WindowEvent::Resized(size) => {
// It is very important to handle Resized event from window, because
// renderer knows nothing about window size - it must be notified
// directly when window size has changed.
engine.renderer.set_frame_size(size.into());
}
_ => (),
},
_ => *control_flow = ControlFlow::Poll,
}
});
}
| main | identifier_name |
main.rs | use crate::{message::Message, weapon::Weapon};
use rg3d::{
core::{
algebra::{UnitQuaternion, Vector3},
color::Color,
color_gradient::{ColorGradient, GradientPoint},
math::ray::Ray,
numeric_range::NumericRange,
pool::{Handle, Pool},
},
engine::{
resource_manager::ResourceManager,
Engine,
RigidBodyHandle,
ColliderHandle
},
event::{DeviceEvent, ElementState, Event, MouseButton, VirtualKeyCode, WindowEvent},
event_loop::{ControlFlow, EventLoop},
gui::node::StubNode,
physics::{dynamics::RigidBodyBuilder, geometry::ColliderBuilder},
scene::mesh::surface::{SurfaceBuilder, SurfaceData},
resource::texture::TextureWrapMode,
scene::{
base::BaseBuilder,
camera::{CameraBuilder, SkyBox},
graph::Graph,
mesh::{MeshBuilder, RenderPath},
node::Node,
particle_system::{BaseEmitterBuilder, ParticleSystemBuilder, SphereEmitterBuilder},
physics::RayCastOptions,
transform::TransformBuilder,
Scene,
},
window::WindowBuilder,
};
use std::{
path::Path,
sync::{
mpsc::{self, Receiver, Sender},
Arc, RwLock,
},
time,
};
pub mod message;
pub mod weapon;
// Create our own engine type aliases. These specializations are needed, because the engine
// provides a way to extend UI with custom nodes and messages.
type GameEngine = Engine<(), StubNode>;
// Our game logic will be updated at 60 Hz rate.
const TIMESTEP: f32 = 1.0 / 60.0;
#[derive(Default)]
struct InputController {
move_forward: bool,
move_backward: bool,
move_left: bool,
move_right: bool,
pitch: f32,
yaw: f32,
shoot: bool,
}
struct Player {
pivot: Handle<Node>,
camera: Handle<Node>,
rigid_body: RigidBodyHandle,
controller: InputController,
weapon_pivot: Handle<Node>,
sender: Sender<Message>,
weapon: Handle<Weapon>,
collider: ColliderHandle,
}
async fn create_skybox(resource_manager: ResourceManager) -> SkyBox {
// Load skybox textures in parallel.
let (front, back, left, right, top, bottom) = rg3d::core::futures::join!(
resource_manager.request_texture("data/textures/skybox/front.jpg"),
resource_manager.request_texture("data/textures/skybox/back.jpg"),
resource_manager.request_texture("data/textures/skybox/left.jpg"),
resource_manager.request_texture("data/textures/skybox/right.jpg"),
resource_manager.request_texture("data/textures/skybox/up.jpg"),
resource_manager.request_texture("data/textures/skybox/down.jpg")
);
// Unwrap everything.
let skybox = SkyBox {
front: Some(front.unwrap()),
back: Some(back.unwrap()),
left: Some(left.unwrap()),
right: Some(right.unwrap()),
top: Some(top.unwrap()),
bottom: Some(bottom.unwrap()),
};
// Set S and T coordinate wrap mode, ClampToEdge will remove any possible seams on edges
// of the skybox.
for skybox_texture in skybox.textures().iter().filter_map(|t| t.clone()) {
let mut data = skybox_texture.data_ref();
data.set_s_wrap_mode(TextureWrapMode::ClampToEdge);
data.set_t_wrap_mode(TextureWrapMode::ClampToEdge);
}
skybox
}
fn create_bullet_impact(
graph: &mut Graph,
resource_manager: ResourceManager,
pos: Vector3<f32>,
orientation: UnitQuaternion<f32>,
) -> Handle<Node> {
// Create sphere emitter first.
let emitter = SphereEmitterBuilder::new(
BaseEmitterBuilder::new()
.with_max_particles(200)
.with_spawn_rate(1000)
.with_size_modifier_range(NumericRange::new(-0.01, -0.0125))
.with_size_range(NumericRange::new(0.0010, 0.025))
.with_x_velocity_range(NumericRange::new(-0.01, 0.01))
.with_y_velocity_range(NumericRange::new(0.017, 0.02))
.with_z_velocity_range(NumericRange::new(-0.01, 0.01))
.resurrect_particles(false),
)
.with_radius(0.01)
.build();
// Color gradient will be used to modify color of each particle over its lifetime.
let color_gradient = {
let mut gradient = ColorGradient::new();
gradient.add_point(GradientPoint::new(0.00, Color::from_rgba(255, 255, 0, 0)));
gradient.add_point(GradientPoint::new(0.05, Color::from_rgba(255, 160, 0, 255)));
gradient.add_point(GradientPoint::new(0.95, Color::from_rgba(255, 120, 0, 255)));
gradient.add_point(GradientPoint::new(1.00, Color::from_rgba(255, 60, 0, 0)));
gradient
};
// Create new transform to orient and position particle system.
let transform = TransformBuilder::new()
.with_local_position(pos)
.with_local_rotation(orientation)
.build();
// Finally create particle system with limited lifetime.
ParticleSystemBuilder::new(
BaseBuilder::new()
.with_lifetime(1.0)
.with_local_transform(transform),
)
.with_acceleration(Vector3::new(0.0, -10.0, 0.0))
.with_color_over_lifetime_gradient(color_gradient)
.with_emitters(vec![emitter])
// We'll use simple spark texture for each particle.
.with_texture(resource_manager.request_texture(Path::new("data/textures/spark.png")))
.build(graph)
}
impl Player {
async fn new(
scene: &mut Scene,
resource_manager: ResourceManager,
sender: Sender<Message>,
) -> Self |
fn update(&mut self, scene: &mut Scene) {
// Set pitch for the camera. These lines responsible for up-down camera rotation.
scene.graph[self.camera].local_transform_mut().set_rotation(
UnitQuaternion::from_axis_angle(&Vector3::x_axis(), self.controller.pitch.to_radians()),
);
// Borrow the pivot in the graph.
let pivot = &mut scene.graph[self.pivot];
// Borrow rigid body in the physics.
let body = scene
.physics
.bodies
.get_mut(&self.rigid_body)
.unwrap();
// Keep only vertical velocity, and drop horizontal.
let mut velocity = Vector3::new(0.0, body.linvel().y, 0.0);
// Change the velocity depending on the keys pressed.
if self.controller.move_forward {
// If we moving forward then add "look" vector of the pivot.
velocity += pivot.look_vector();
}
if self.controller.move_backward {
// If we moving backward then subtract "look" vector of the pivot.
velocity -= pivot.look_vector();
}
if self.controller.move_left {
// If we moving left then add "side" vector of the pivot.
velocity += pivot.side_vector();
}
if self.controller.move_right {
// If we moving right then subtract "side" vector of the pivot.
velocity -= pivot.side_vector();
}
// Finally new linear velocity.
body.set_linvel(velocity, true);
// Change the rotation of the rigid body according to current yaw. These lines responsible for
// left-right rotation.
let mut position = *body.position();
position.rotation =
UnitQuaternion::from_axis_angle(&Vector3::y_axis(), self.controller.yaw.to_radians());
body.set_position(position, true);
if self.controller.shoot {
self.sender
.send(Message::ShootWeapon {
weapon: self.weapon,
})
.unwrap();
}
}
fn process_input_event(&mut self, event: &Event<()>) {
match event {
Event::WindowEvent { event, .. } => match event {
WindowEvent::KeyboardInput { input, .. } => {
if let Some(key_code) = input.virtual_keycode {
match key_code {
VirtualKeyCode::W => {
self.controller.move_forward = input.state == ElementState::Pressed;
}
VirtualKeyCode::S => {
self.controller.move_backward =
input.state == ElementState::Pressed;
}
VirtualKeyCode::A => {
self.controller.move_left = input.state == ElementState::Pressed;
}
VirtualKeyCode::D => {
self.controller.move_right = input.state == ElementState::Pressed;
}
_ => (),
}
}
}
&WindowEvent::MouseInput { button, state, .. } => {
if button == MouseButton::Left {
self.controller.shoot = state == ElementState::Pressed;
}
}
_ => {}
},
Event::DeviceEvent { event, .. } => {
if let DeviceEvent::MouseMotion { delta } = event {
let mouse_sens = 0.5;
self.controller.yaw -= mouse_sens * delta.0 as f32;
self.controller.pitch =
(self.controller.pitch + mouse_sens * delta.1 as f32).clamp(-90.0, 90.0);
}
}
_ => (),
}
}
}
fn create_shot_trail(
graph: &mut Graph,
origin: Vector3<f32>,
direction: Vector3<f32>,
trail_length: f32,
) {
let transform = TransformBuilder::new()
.with_local_position(origin)
// Scale the trail in XZ plane to make it thin, and apply `trail_length` scale on Y axis
// to stretch is out.
.with_local_scale(Vector3::new(0.0025, 0.0025, trail_length))
// Rotate the trail along given `direction`
.with_local_rotation(UnitQuaternion::face_towards(&direction, &Vector3::y()))
.build();
// Create unit cylinder with caps that faces toward Z axis.
let shape = Arc::new(RwLock::new(SurfaceData::make_cylinder(
6, // Count of sides
1.0, // Radius
1.0, // Height
false, // No caps are needed.
// Rotate vertical cylinder around X axis to make it face towards Z axis
&UnitQuaternion::from_axis_angle(&Vector3::x_axis(), 90.0f32.to_radians()).to_homogeneous(),
)));
MeshBuilder::new(
BaseBuilder::new()
.with_local_transform(transform)
// Shot trail should live ~0.25 seconds, after that it will be automatically
// destroyed.
.with_lifetime(0.25),
)
.with_surfaces(vec![SurfaceBuilder::new(shape)
// Set yellow-ish color.
.with_color(Color::from_rgba(255, 255, 0, 120))
.build()])
// Do not cast shadows.
.with_cast_shadows(false)
// Make sure to set Forward render path, otherwise the object won't be
// transparent.
.with_render_path(RenderPath::Forward)
.build(graph);
}
struct Game {
scene: Handle<Scene>,
player: Player,
weapons: Pool<Weapon>,
receiver: Receiver<Message>,
sender: Sender<Message>,
}
impl Game {
pub async fn new(engine: &mut GameEngine) -> Self {
// Make message queue.
let (sender, receiver) = mpsc::channel();
let mut scene = Scene::new();
// Load a scene resource and create its instance.
engine
.resource_manager
.request_model("data/models/scene.rgs")
.await
.unwrap()
.instantiate_geometry(&mut scene);
// Create player first.
let mut player =
Player::new(&mut scene, engine.resource_manager.clone(), sender.clone()).await;
// Create weapon next.
let weapon = Weapon::new(&mut scene, engine.resource_manager.clone()).await;
// "Attach" the weapon to the weapon pivot of the player.
scene.graph.link_nodes(weapon.model(), player.weapon_pivot);
// Create a container for the weapons.
let mut weapons = Pool::new();
// Put the weapon into it - this operation moves the weapon in the pool and returns handle.
let weapon = weapons.spawn(weapon);
// "Give" the weapon to the player.
player.weapon = weapon;
Self {
player,
scene: engine.scenes.add(scene),
weapons,
sender,
receiver,
}
}
fn shoot_weapon(&mut self, weapon: Handle<Weapon>, engine: &mut GameEngine) {
let weapon = &mut self.weapons[weapon];
if weapon.can_shoot() {
weapon.shoot();
let scene = &mut engine.scenes[self.scene];
let weapon_model = &scene.graph[weapon.model()];
// Make a ray that starts at the weapon's position in the world and look toward
// "look" vector of the weapon.
let ray = Ray::new(
scene.graph[weapon.shot_point()].global_position(),
weapon_model.look_vector().scale(1000.0),
);
let mut intersections = Vec::new();
scene.physics.cast_ray(
RayCastOptions {
ray,
max_len: ray.dir.norm(),
groups: Default::default(),
sort_results: true, // We need intersections to be sorted from closest to furthest.
},
&mut intersections,
);
// Ignore intersections with player's capsule.
let trail_length = if let Some(intersection) = intersections
.iter()
.find(|i| i.collider != self.player.collider)
{
//
// TODO: Add code to handle intersections with bots.
//
// For now just apply some force at the point of impact.
let collider = scene
.physics
.colliders
.get(&intersection.collider)
.unwrap();
scene
.physics
.bodies
.native_mut(collider.parent().unwrap())
.unwrap()
.apply_force_at_point(
ray.dir.normalize().scale(10.0),
intersection.position,
true,
);
// Add bullet impact effect.
let effect_orientation = if intersection.normal.normalize() == Vector3::y() {
// Handle singularity when normal of impact point is collinear with Y axis.
UnitQuaternion::from_axis_angle(&Vector3::y_axis(), 0.0)
} else {
UnitQuaternion::face_towards(&intersection.normal, &Vector3::y())
};
create_bullet_impact(
&mut scene.graph,
engine.resource_manager.clone(),
intersection.position.coords,
effect_orientation,
);
// Trail length will be the length of line between intersection point and ray origin.
(intersection.position.coords - ray.origin).norm()
} else {
// Otherwise trail length will be just the ray length.
ray.dir.norm()
};
create_shot_trail(&mut scene.graph, ray.origin, ray.dir, trail_length);
}
}
pub fn update(&mut self, engine: &mut GameEngine, dt: f32) {
let scene = &mut engine.scenes[self.scene];
self.player.update(scene);
for weapon in self.weapons.iter_mut() {
weapon.update(dt, &mut scene.graph);
}
// We're using `try_recv` here because we don't want to wait until next message -
// if the queue is empty just continue to next frame.
while let Ok(message) = self.receiver.try_recv() {
match message {
Message::ShootWeapon { weapon } => {
self.shoot_weapon(weapon, engine);
}
}
}
}
}
fn main() {
// Configure main window first.
let window_builder = WindowBuilder::new().with_title("3D Shooter Tutorial");
// Create event loop that will be used to "listen" events from the OS.
let event_loop = EventLoop::new();
// Finally create an instance of the engine.
let mut engine = GameEngine::new(window_builder, &event_loop, true).unwrap();
// Initialize game instance.
let mut game = rg3d::core::futures::executor::block_on(Game::new(&mut engine));
// Run the event loop of the main window. which will respond to OS and window events and update
// engine's state accordingly. Engine lets you to decide which event should be handled,
// this is minimal working example if how it should be.
let clock = time::Instant::now();
let mut elapsed_time = 0.0;
event_loop.run(move |event, _, control_flow| {
game.player.process_input_event(&event);
match event {
Event::MainEventsCleared => {
// This main game loop - it has fixed time step which means that game
// code will run at fixed speed even if renderer can't give you desired
// 60 fps.
let mut dt = clock.elapsed().as_secs_f32() - elapsed_time;
while dt >= TIMESTEP {
dt -= TIMESTEP;
elapsed_time += TIMESTEP;
// Run our game's logic.
game.update(&mut engine, TIMESTEP);
// Update engine each frame.
engine.update(TIMESTEP);
}
// Rendering must be explicitly requested and handled after RedrawRequested event is received.
engine.get_window().request_redraw();
}
Event::RedrawRequested(_) => {
// Render at max speed - it is not tied to the game code.
engine.render(TIMESTEP).unwrap();
}
Event::WindowEvent { event, .. } => match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput { input, .. } => {
// Exit game by hitting Escape.
if let Some(VirtualKeyCode::Escape) = input.virtual_keycode {
*control_flow = ControlFlow::Exit
}
}
WindowEvent::Resized(size) => {
// It is very important to handle Resized event from window, because
// renderer knows nothing about window size - it must be notified
// directly when window size has changed.
engine.renderer.set_frame_size(size.into());
}
_ => (),
},
_ => *control_flow = ControlFlow::Poll,
}
});
}
| {
// Create a pivot and attach a camera to it, move it a bit up to "emulate" head.
let camera;
let weapon_pivot;
let pivot = BaseBuilder::new()
.with_children(&[{
camera = CameraBuilder::new(
BaseBuilder::new()
.with_local_transform(
TransformBuilder::new()
.with_local_position(Vector3::new(0.0, 0.25, 0.0))
.build(),
)
.with_children(&[{
weapon_pivot = BaseBuilder::new()
.with_local_transform(
TransformBuilder::new()
.with_local_position(Vector3::new(-0.1, -0.05, 0.015))
.build(),
)
.build(&mut scene.graph);
weapon_pivot
}]),
)
.with_skybox(create_skybox(resource_manager).await)
.build(&mut scene.graph);
camera
}])
.build(&mut scene.graph);
// Create rigid body, it will be used for interaction with the world.
let rigid_body_handle = scene.physics.add_body(
RigidBodyBuilder::new_dynamic()
.lock_rotations() // We don't want the player to tilt.
.translation(Vector3::new(0.0, 1.0, -1.0)) // Offset player a bit.
.build(),
);
// Add capsule collider for the rigid body.
let collider = scene.physics.add_collider(
ColliderBuilder::capsule_y(0.25, 0.2).build(),
&rigid_body_handle,
);
// Bind pivot with rigid body. Scene will automatically sync transform of the pivot
// with the transform of the rigid body.
scene.physics_binder.bind(pivot, rigid_body_handle);
Self {
pivot,
camera,
weapon_pivot,
rigid_body: rigid_body_handle,
controller: Default::default(),
sender,
collider,
weapon: Default::default(), // Leave it unassigned for now.
}
} | identifier_body |
lib.rs | #![deny(
future_incompatible,
nonstandard_style,
rust_2018_compatibility,
rust_2018_idioms,
unused,
missing_docs
)]
//! # luomu-libpcap
//!
//! Safe and mostly sane Rust bindings for [libpcap](https://www.tcpdump.org/).
//!
//! We are split in two different crates:
//!
//! * `luomu-libpcap-sys` for unsafe Rust bindings generated directly from
//! `libpcap`.
//! * `luomu-libpcap` for safe and sane libpcap interface.
//!
//! `luomu-libpcap` crate is split into two parts itself:
//!
//! * `functions` module contains safe wrappers and sane return values for
//! libpcap functions.
//! * the root of the project contains `Pcap` struct et al. for more Rusty API
//! to interact with libpcap.
//!
//! You probably want to use the `Pcap` struct and other things from root of
//! this crate.
use std::collections::{BTreeSet, HashSet};
use std::convert::TryFrom;
use std::default;
use std::net::IpAddr;
use std::ops::Deref;
use std::path::Path;
use std::result;
use std::time::Duration;
use luomu_common::{Address, MacAddr};
use luomu_libpcap_sys as libpcap;
pub mod functions;
use functions::*;
mod error;
pub use error::Error;
mod packet;
pub use packet::{BorrowedPacket, OwnedPacket, Packet};
#[cfg(feature = "async-tokio")]
pub mod tokio;
/// A `Result` wrapping luomu-libpcap's errors in `Err` side
pub type Result<T> = result::Result<T, Error>;
/// Keeper of the `libpcap`'s `pcap_t`.
pub struct PcapT {
pcap_t: *mut libpcap::pcap_t,
#[allow(dead_code)]
errbuf: Vec<u8>,
interface: Option<String>,
}
// I assume the pcap_t pointer is safe to move between threads, but it can only
// be used from one thread. libpcap documentation is vague about thread safety,
// so we try this.
unsafe impl Send for PcapT {}
impl PcapT {
/// get interface name
///
/// `get_interface` returns the interface name if known or "<unknown>".
pub fn get_inteface(&self) -> String {
if let Some(name) = &self.interface {
name.to_owned()
} else {
String::from("<unknown>")
}
}
/// get libpcap error message text
///
/// `get_error()` returns the error pertaining to the last pcap library error.
///
/// This function can also fail, how awesome is that? The `Result` of
/// `Ok(Error)` contains the error from libpcap as intended. `Err(Error)`
/// contains the error happened while calling this function.
pub fn get_error(&self) -> Result<Error> {
get_error(self)
}
}
impl Drop for PcapT {
fn drop(&mut self) {
log::trace!("PcapT::drop({:p})", self.pcap_t);
unsafe { luomu_libpcap_sys::pcap_close(self.pcap_t) }
}
}
/// Pcap capture
///
/// This contains everything needed to capture the packets from network.
///
/// To get started use `Pcap::builder()` to start a new Pcap capture builder.
/// Use it to set required options for the capture and then call
/// `PcapBuider::activate()` to activate the capture.
///
/// Then `Pcap::capture()` can be used to start an iterator for capturing
/// packets.
pub struct Pcap {
pcap_t: PcapT,
}
impl Pcap {
/// Create a live capture handle
///
/// This is used to create a packet capture handle to look at packets on the
/// network. `source` is a string that specifies the network device to open.
pub fn new(source: &str) -> Result<Pcap> {
let pcap_t = pcap_create(source)?;
Ok(Pcap { pcap_t })
}
/// Create a capture handle for reading packets from given savefile.
///
/// This function can be used to create handle to read packes from saved
/// pcap -file. Use `capture()` to get iterator for packets in the file.
pub fn offline<P: AsRef<Path>>(savefile: P) -> Result<Pcap> {
Ok(Pcap {
pcap_t: pcap_open_offline(savefile)?,
})
}
/// Use builder to create a live capture handle
///
/// This is used to create a packet capture handle to look at packets on the
/// network. source is a string that specifies the network device to open.
pub fn builder(source: &str) -> Result<PcapBuilder> {
let pcap_t = pcap_create(source)?;
Ok(PcapBuilder { pcap_t })
}
/// set a filter expression
///
/// `Set a filter for capture. See
/// [pcap-filter(7)](https://www.tcpdump.org/manpages/pcap-filter.7.html)
/// for the syntax of that string.
pub fn set_filter(&self, filter: &str) -> Result<()> {
let mut bpf_program = PcapFilter::compile_with_pcap_t(&self.pcap_t, filter)?;
pcap_setfilter(&self.pcap_t, &mut bpf_program)
}
/// Start capturing packets
///
/// This returns an iterator `PcapIter` which can be used to get captured
/// packets.
pub fn capture(&self) -> PcapIter<'_> {
PcapIter::new(&self.pcap_t)
}
/// Transmit a packet
pub fn inject(&self, buf: &[u8]) -> Result<usize> {
pcap_inject(&self.pcap_t, buf)
}
/// activate a capture
///
/// This is used to activate a packet capture to look at packets on the
/// network, with the options that were set on the handle being in effect.
pub fn activate(&self) -> Result<()> {
pcap_activate(&self.pcap_t)
}
/// get capture statistics
///
/// Returns statistics from current capture. The values represent packet
/// statistics from the start of the run to the time of the call.
pub fn stats(&self) -> Result<PcapStat> {
let mut stats: PcapStat = Default::default();
match pcap_stats(&self.pcap_t, &mut stats) {
Ok(()) => Ok(stats),
Err(e) => Err(e),
}
}
}
impl Deref for Pcap {
type Target = PcapT;
fn deref(&self) -> &Self::Target {
&self.pcap_t
}
}
/// Builder for a `Pcap`. Call `Pcap::builder()` to get started.
pub struct PcapBuilder {
pcap_t: PcapT,
}
impl PcapBuilder {
/// set the buffer size for a capture
///
/// `set_buffer_size()` sets the buffer size that will be used on a capture
/// handle when the handle is activated to buffer_size, which is in units of
/// bytes.
pub fn set_buffer_size(self, buffer_size: usize) -> Result<PcapBuilder> {
pcap_set_buffer_size(&self.pcap_t, buffer_size)?;
Ok(self)
}
/// set promiscuous mode for a capture
///
/// `set_promisc()` sets whether promiscuous mode should be set on a capture
/// handle when the handle is activated.
pub fn set_promiscuous(self, promiscuous: bool) -> Result<PcapBuilder> {
pcap_set_promisc(&self.pcap_t, promiscuous)?;
Ok(self)
}
/// set immediate mode for a capture
///
/// `set_immediate_mode()` sets whether immediate mode should be set on a
/// capture handle when the handle is activated. In immediate mode, packets
/// are always delivered as soon as they arrive, with no buffering.
pub fn set_immediate(self, immediate: bool) -> Result<PcapBuilder> {
pcap_set_immediate_mode(&self.pcap_t, immediate)?;
Ok(self)
}
/// set packet buffer timeout for a capture
///
/// `pcap_set_timeout()` sets the packet buffer timeout that will be used on a
/// capture handle when the handle is activated to to_ms, which is in units of
/// milliseconds.
pub fn set_timeout(self, to_ms: Duration) -> Result<PcapBuilder> {
pcap_set_timeout(
&self.pcap_t,
(to_ms.as_millis().min(i32::MAX as u128)) as i32,
)?;
Ok(self)
}
/// set the snapshot length for a capture
///
/// `set_snaplen()` sets the snapshot length to be used on a capture handle
/// when the handle is activated to snaplen.
///
/// `libpcap` says 65535 bytes should be enough for everyone.
pub fn set_snaplen(self, snaplen: usize) -> Result<PcapBuilder> {
pcap_set_snaplen(&self.pcap_t, snaplen)?;
Ok(self)
}
/// activate a capture
///
/// `activate()` is used to activate a packet capture to look at packets on
/// the network, with the options that were set on the handle being in
/// effect.
pub fn activate(self) -> Result<Pcap> {
pcap_activate(&self.pcap_t)?;
Ok(Pcap {
pcap_t: self.pcap_t,
})
}
}
/// A BPF filter program for Pcap.
pub struct PcapFilter {
bpf_program: libpcap::bpf_program,
}
impl PcapFilter {
/// compile a filter expression
///
/// `compile()` is used to compile the filter into a filter program. See
/// [pcap-filter(7)](https://www.tcpdump.org/manpages/pcap-filter.7.html)
/// for the syntax of that string.
pub fn compile(filter: &str) -> Result<PcapFilter> {
let pcap = pcap_open_dead()?;
pcap_compile(&pcap, filter)
}
/// compile a filter expression with `PcapT`
///
/// `compile_with_pcap_t()` is used to compile the filter into a filter
/// program. See
/// [pcap-filter(7)](https://www.tcpdump.org/manpages/pcap-filter.7.html)
/// for the syntax of that string.
pub fn compile_with_pcap_t(pcap_t: &PcapT, filter_str: &str) -> Result<PcapFilter> {
pcap_compile(pcap_t, filter_str)
}
/// Get length of the compiled filter
pub fn get_raw_filter_len(&self) -> u32 {
self.bpf_program.bf_len
}
/// Get pointer to the raw compiled filter program.
/// Raw filter may be used when attaching filter to socket outside libpcap.
/// # Safety
/// Note that the pointer is valid only as long as this filter is valid.
/// The returned pointer will be cast as *void since there is no common
/// structure to which export the program.
pub unsafe fn get_raw_filter(&self) -> &std::ffi::c_void {
(self.bpf_program.bf_insns as *const std::ffi::c_void)
.as_ref()
.unwrap()
}
}
impl Drop for PcapFilter {
fn drop(&mut self) {
log::trace!("PcapFilter::drop({:p})", &self.bpf_program);
unsafe { luomu_libpcap_sys::pcap_freecode(&mut self.bpf_program) }
}
}
/// Pcap capture iterator
pub struct PcapIter<'p> {
pcap_t: &'p PcapT,
}
impl<'p> PcapIter<'p> {
fn new(pcap_t: &'p PcapT) -> Self {
PcapIter { pcap_t }
}
}
impl<'p> Iterator for PcapIter<'p> {
type Item = BorrowedPacket;
fn next(&mut self) -> Option<Self::Item> {
loop {
match pcap_next_ex(self.pcap_t) {
Ok(p) => return Some(p),
Err(e) => match e {
// pcap_next_ex() sometimes seems to return
// "packet buffer expired" (whatever that means),
// even if the immediate mode is set. Just retry in
// this case.
Error::Timeout => continue,
_ => return None,
},
}
}
}
}
/// Pcap capture statistics
pub struct PcapStat {
stats: libpcap::pcap_stat,
}
impl default::Default for PcapStat {
fn default() -> Self {
PcapStat {
stats: libpcap::pcap_stat {
ps_recv: 0,
ps_drop: 0,
ps_ifdrop: 0,
},
}
}
}
impl PcapStat {
/// Return number of packets received.
pub fn | (&self) -> u32 {
self.stats.ps_recv
}
/// Return number of packets dropped because there was no room in the
/// operating system's buffer when they arrived, because packets weren't
/// being read fast enough.
pub fn packets_dropped(&self) -> u32 {
self.stats.ps_drop
}
/// Return number of packets dropped by the network interface or its driver.
pub fn packets_dropped_interface(&self) -> u32 {
self.stats.ps_ifdrop
}
}
/// Keeper of the `libpcap`'s `pcap_if_t`.
pub struct PcapIfT {
pcap_if_t: *mut libpcap::pcap_if_t,
}
impl PcapIfT {
/// get a list of capture devices
///
/// Constructs a list of network devices that can be opened with
/// `Pcap::new()` and `Pcap::builder()`. Note that there may be network
/// devices that cannot be opened by the process calling, because, for
/// example, that process does not have sufficient privileges to open them
/// for capturing; if so, those devices will not appear on the list.
pub fn new() -> Result<Self> {
pcap_findalldevs()
}
/// Return iterator for iterating capture devices.
pub fn iter(&self) -> InterfaceIter {
InterfaceIter {
start: self.pcap_if_t,
next: Some(self.pcap_if_t),
}
}
/// Get all capture devices.
pub fn get_interfaces(&self) -> HashSet<Interface> {
self.iter().collect()
}
/// Find capture device with interface name `name`.
pub fn find_interface_with_name(&self, name: &str) -> Option<Interface> {
for interface in self.get_interfaces() {
if interface.has_name(name) {
log::trace!("find_interface_with_name({}) = {:?}", name, interface);
return Some(interface);
}
}
None
}
/// Find capture device which have IP address `ip`.
pub fn find_interface_with_ip(&self, ip: &IpAddr) -> Option<String> {
for interface in self.get_interfaces() {
if interface.has_address(ip) {
log::trace!("find_interface_with_ip({}) = {:?}", ip, interface);
return Some(interface.name);
}
}
None
}
}
impl Drop for PcapIfT {
fn drop(&mut self) {
log::trace!("PcapIfT::drop({:?})", self.pcap_if_t);
unsafe { luomu_libpcap_sys::pcap_freealldevs(self.pcap_if_t) }
}
}
/// A network device that can be opened with `Pcap::new()` and
/// `Pcap::builder()`.
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct Interface {
/// Devices name
pub name: String,
/// Devices description
pub description: Option<String>,
/// All addresses found from device
pub addresses: BTreeSet<InterfaceAddress>,
/// Flags set for device
pub flags: BTreeSet<InterfaceFlag>,
}
impl Interface {
/// True if interface is up
pub fn is_up(&self) -> bool {
self.flags.get(&InterfaceFlag::Up).is_some()
}
/// True if interface is running
pub fn is_running(&self) -> bool {
self.flags.get(&InterfaceFlag::Running).is_some()
}
/// True if interface is loopback
pub fn is_loopback(&self) -> bool {
self.flags.get(&InterfaceFlag::Loopback).is_some()
}
/// True if interface is has name `name`
pub fn has_name(&self, name: &str) -> bool {
self.name == name
}
/// Return MAC aka Ethernet address of the interface
pub fn get_ether_address(&self) -> Option<MacAddr> {
for ia in &self.addresses {
if let Address::Mac(addr) = ia.addr {
return Some(addr);
}
}
None
}
/// Return IP addresses of interface
pub fn get_ip_addresses(&self) -> HashSet<IpAddr> {
self.addresses
.iter()
.filter_map(|i| IpAddr::try_from(&i.addr).ok())
.collect()
}
/// True if interface is has IP address `ip`
pub fn has_address(&self, ip: &IpAddr) -> bool {
self.get_ip_addresses().get(ip).is_some()
}
}
/// Interface iterator
///
/// Iterates all capture interfaces.
pub struct InterfaceIter {
// First item in linked list, only used for trace logging
start: *mut libpcap::pcap_if_t,
// Next item in linked list, used for iteration
next: Option<*mut libpcap::pcap_if_t>,
}
impl Iterator for InterfaceIter {
type Item = Interface;
fn next(&mut self) -> Option<Interface> {
log::trace!(
"InterfaceIter(start: {:p}, next: {:p})",
self.start,
self.next.unwrap_or(std::ptr::null_mut())
);
let pcap_if_t = self.next?;
if pcap_if_t.is_null() {
self.next = None;
return None;
}
let next = unsafe { (*pcap_if_t).next };
if next.is_null() {
self.next = None;
} else {
self.next = Some(next);
}
match try_interface_from(pcap_if_t) {
Ok(dev) => Some(dev),
Err(err) => {
log::error!("try_interface_from{:p}: {}", pcap_if_t, err);
None
}
}
}
}
/// Collection of addresses for network interface.
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct InterfaceAddress {
/// Network interface's address
addr: Address,
/// The netmask corresponding to the address pointed to by addr.
netmask: Option<Address>,
/// The broadcast address corresponding to the address pointed to by addr;
/// may be `None` if the device doesn't support broadcasts.
broadaddr: Option<Address>,
/// The destination address corresponding to the address pointed to by addr;
/// may be `None` if the device isn't a point-to-point interface.
dstaddr: Option<Address>,
}
/// Iterator for network device's addresses.
pub struct AddressIter {
// First item in linked list, only used for trace logging
start: *mut libpcap::pcap_addr_t,
// Next item in linked list, used for iteration
next: Option<*mut libpcap::pcap_addr_t>,
}
impl Iterator for AddressIter {
type Item = InterfaceAddress;
fn next(&mut self) -> Option<InterfaceAddress> {
log::trace!(
"AddressIter(start: {:p}, next: {:p})",
self.start,
self.next.unwrap_or(std::ptr::null_mut())
);
let pcap_addr_t = self.next?;
if pcap_addr_t.is_null() {
self.next = None;
return None;
}
let next = unsafe { (*pcap_addr_t).next };
if next.is_null() {
self.next = None;
} else {
self.next = Some(next);
}
if let Some(dev) = try_address_from(pcap_addr_t) {
Some(dev)
} else {
// Address was something we don't know how to handle. Move
// to next address in list.
self.next()
}
}
}
/// Various flags which can be set on network interface
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum InterfaceFlag {
/// set if the interface is a loopback interface
Loopback,
/// set if the interface is up
Up,
/// set if the interface is running
Running,
}
| packets_received | identifier_name |
lib.rs | #![deny(
future_incompatible,
nonstandard_style,
rust_2018_compatibility,
rust_2018_idioms,
unused,
missing_docs
)]
//! # luomu-libpcap
//!
//! Safe and mostly sane Rust bindings for [libpcap](https://www.tcpdump.org/).
//!
//! We are split in two different crates:
//!
//! * `luomu-libpcap-sys` for unsafe Rust bindings generated directly from
//! `libpcap`.
//! * `luomu-libpcap` for safe and sane libpcap interface.
//!
//! `luomu-libpcap` crate is split into two parts itself:
//!
//! * `functions` module contains safe wrappers and sane return values for
//! libpcap functions.
//! * the root of the project contains `Pcap` struct et al. for more Rusty API
//! to interact with libpcap.
//!
//! You probably want to use the `Pcap` struct and other things from root of
//! this crate.
use std::collections::{BTreeSet, HashSet};
use std::convert::TryFrom;
use std::default;
use std::net::IpAddr;
use std::ops::Deref;
use std::path::Path;
use std::result;
use std::time::Duration;
use luomu_common::{Address, MacAddr};
use luomu_libpcap_sys as libpcap;
pub mod functions;
use functions::*;
mod error;
pub use error::Error;
mod packet;
pub use packet::{BorrowedPacket, OwnedPacket, Packet};
#[cfg(feature = "async-tokio")]
pub mod tokio;
/// A `Result` wrapping luomu-libpcap's errors in `Err` side
pub type Result<T> = result::Result<T, Error>;
/// Keeper of the `libpcap`'s `pcap_t`.
pub struct PcapT {
pcap_t: *mut libpcap::pcap_t,
#[allow(dead_code)]
errbuf: Vec<u8>,
interface: Option<String>,
}
// I assume the pcap_t pointer is safe to move between threads, but it can only
// be used from one thread. libpcap documentation is vague about thread safety,
// so we try this.
unsafe impl Send for PcapT {}
impl PcapT {
/// get interface name
///
/// `get_interface` returns the interface name if known or "<unknown>".
pub fn get_inteface(&self) -> String {
if let Some(name) = &self.interface {
name.to_owned()
} else {
String::from("<unknown>")
}
}
/// get libpcap error message text
///
/// `get_error()` returns the error pertaining to the last pcap library error.
///
/// This function can also fail, how awesome is that? The `Result` of
/// `Ok(Error)` contains the error from libpcap as intended. `Err(Error)`
/// contains the error happened while calling this function.
pub fn get_error(&self) -> Result<Error> {
get_error(self)
}
}
impl Drop for PcapT {
fn drop(&mut self) {
log::trace!("PcapT::drop({:p})", self.pcap_t);
unsafe { luomu_libpcap_sys::pcap_close(self.pcap_t) }
}
}
/// Pcap capture
///
/// This contains everything needed to capture the packets from network.
///
/// To get started use `Pcap::builder()` to start a new Pcap capture builder.
/// Use it to set required options for the capture and then call
/// `PcapBuider::activate()` to activate the capture.
///
/// Then `Pcap::capture()` can be used to start an iterator for capturing
/// packets.
pub struct Pcap {
pcap_t: PcapT,
}
impl Pcap {
/// Create a live capture handle
///
/// This is used to create a packet capture handle to look at packets on the
/// network. `source` is a string that specifies the network device to open.
pub fn new(source: &str) -> Result<Pcap> {
let pcap_t = pcap_create(source)?;
Ok(Pcap { pcap_t })
}
/// Create a capture handle for reading packets from given savefile.
///
/// This function can be used to create handle to read packes from saved
/// pcap -file. Use `capture()` to get iterator for packets in the file.
pub fn offline<P: AsRef<Path>>(savefile: P) -> Result<Pcap> {
Ok(Pcap {
pcap_t: pcap_open_offline(savefile)?,
})
}
/// Use builder to create a live capture handle
///
/// This is used to create a packet capture handle to look at packets on the
/// network. source is a string that specifies the network device to open.
pub fn builder(source: &str) -> Result<PcapBuilder> {
let pcap_t = pcap_create(source)?;
Ok(PcapBuilder { pcap_t })
}
/// set a filter expression
///
/// `Set a filter for capture. See
/// [pcap-filter(7)](https://www.tcpdump.org/manpages/pcap-filter.7.html)
/// for the syntax of that string.
pub fn set_filter(&self, filter: &str) -> Result<()> {
let mut bpf_program = PcapFilter::compile_with_pcap_t(&self.pcap_t, filter)?;
pcap_setfilter(&self.pcap_t, &mut bpf_program)
}
/// Start capturing packets
///
/// This returns an iterator `PcapIter` which can be used to get captured
/// packets. | pub fn capture(&self) -> PcapIter<'_> {
PcapIter::new(&self.pcap_t)
}
/// Transmit a packet
pub fn inject(&self, buf: &[u8]) -> Result<usize> {
pcap_inject(&self.pcap_t, buf)
}
/// activate a capture
///
/// This is used to activate a packet capture to look at packets on the
/// network, with the options that were set on the handle being in effect.
pub fn activate(&self) -> Result<()> {
pcap_activate(&self.pcap_t)
}
/// get capture statistics
///
/// Returns statistics from current capture. The values represent packet
/// statistics from the start of the run to the time of the call.
pub fn stats(&self) -> Result<PcapStat> {
let mut stats: PcapStat = Default::default();
match pcap_stats(&self.pcap_t, &mut stats) {
Ok(()) => Ok(stats),
Err(e) => Err(e),
}
}
}
impl Deref for Pcap {
type Target = PcapT;
fn deref(&self) -> &Self::Target {
&self.pcap_t
}
}
/// Builder for a `Pcap`. Call `Pcap::builder()` to get started.
pub struct PcapBuilder {
pcap_t: PcapT,
}
impl PcapBuilder {
/// set the buffer size for a capture
///
/// `set_buffer_size()` sets the buffer size that will be used on a capture
/// handle when the handle is activated to buffer_size, which is in units of
/// bytes.
pub fn set_buffer_size(self, buffer_size: usize) -> Result<PcapBuilder> {
pcap_set_buffer_size(&self.pcap_t, buffer_size)?;
Ok(self)
}
/// set promiscuous mode for a capture
///
/// `set_promisc()` sets whether promiscuous mode should be set on a capture
/// handle when the handle is activated.
pub fn set_promiscuous(self, promiscuous: bool) -> Result<PcapBuilder> {
pcap_set_promisc(&self.pcap_t, promiscuous)?;
Ok(self)
}
/// set immediate mode for a capture
///
/// `set_immediate_mode()` sets whether immediate mode should be set on a
/// capture handle when the handle is activated. In immediate mode, packets
/// are always delivered as soon as they arrive, with no buffering.
pub fn set_immediate(self, immediate: bool) -> Result<PcapBuilder> {
pcap_set_immediate_mode(&self.pcap_t, immediate)?;
Ok(self)
}
/// set packet buffer timeout for a capture
///
/// `pcap_set_timeout()` sets the packet buffer timeout that will be used on a
/// capture handle when the handle is activated to to_ms, which is in units of
/// milliseconds.
pub fn set_timeout(self, to_ms: Duration) -> Result<PcapBuilder> {
pcap_set_timeout(
&self.pcap_t,
(to_ms.as_millis().min(i32::MAX as u128)) as i32,
)?;
Ok(self)
}
/// set the snapshot length for a capture
///
/// `set_snaplen()` sets the snapshot length to be used on a capture handle
/// when the handle is activated to snaplen.
///
/// `libpcap` says 65535 bytes should be enough for everyone.
pub fn set_snaplen(self, snaplen: usize) -> Result<PcapBuilder> {
pcap_set_snaplen(&self.pcap_t, snaplen)?;
Ok(self)
}
/// activate a capture
///
/// `activate()` is used to activate a packet capture to look at packets on
/// the network, with the options that were set on the handle being in
/// effect.
pub fn activate(self) -> Result<Pcap> {
pcap_activate(&self.pcap_t)?;
Ok(Pcap {
pcap_t: self.pcap_t,
})
}
}
/// A BPF filter program for Pcap.
pub struct PcapFilter {
bpf_program: libpcap::bpf_program,
}
impl PcapFilter {
/// compile a filter expression
///
/// `compile()` is used to compile the filter into a filter program. See
/// [pcap-filter(7)](https://www.tcpdump.org/manpages/pcap-filter.7.html)
/// for the syntax of that string.
pub fn compile(filter: &str) -> Result<PcapFilter> {
let pcap = pcap_open_dead()?;
pcap_compile(&pcap, filter)
}
/// compile a filter expression with `PcapT`
///
/// `compile_with_pcap_t()` is used to compile the filter into a filter
/// program. See
/// [pcap-filter(7)](https://www.tcpdump.org/manpages/pcap-filter.7.html)
/// for the syntax of that string.
pub fn compile_with_pcap_t(pcap_t: &PcapT, filter_str: &str) -> Result<PcapFilter> {
pcap_compile(pcap_t, filter_str)
}
/// Get length of the compiled filter
pub fn get_raw_filter_len(&self) -> u32 {
self.bpf_program.bf_len
}
/// Get pointer to the raw compiled filter program.
/// Raw filter may be used when attaching filter to socket outside libpcap.
/// # Safety
/// Note that the pointer is valid only as long as this filter is valid.
/// The returned pointer will be cast as *void since there is no common
/// structure to which export the program.
pub unsafe fn get_raw_filter(&self) -> &std::ffi::c_void {
(self.bpf_program.bf_insns as *const std::ffi::c_void)
.as_ref()
.unwrap()
}
}
impl Drop for PcapFilter {
fn drop(&mut self) {
log::trace!("PcapFilter::drop({:p})", &self.bpf_program);
unsafe { luomu_libpcap_sys::pcap_freecode(&mut self.bpf_program) }
}
}
/// Pcap capture iterator
pub struct PcapIter<'p> {
pcap_t: &'p PcapT,
}
impl<'p> PcapIter<'p> {
fn new(pcap_t: &'p PcapT) -> Self {
PcapIter { pcap_t }
}
}
impl<'p> Iterator for PcapIter<'p> {
type Item = BorrowedPacket;
fn next(&mut self) -> Option<Self::Item> {
loop {
match pcap_next_ex(self.pcap_t) {
Ok(p) => return Some(p),
Err(e) => match e {
// pcap_next_ex() sometimes seems to return
// "packet buffer expired" (whatever that means),
// even if the immediate mode is set. Just retry in
// this case.
Error::Timeout => continue,
_ => return None,
},
}
}
}
}
/// Pcap capture statistics
pub struct PcapStat {
stats: libpcap::pcap_stat,
}
impl default::Default for PcapStat {
fn default() -> Self {
PcapStat {
stats: libpcap::pcap_stat {
ps_recv: 0,
ps_drop: 0,
ps_ifdrop: 0,
},
}
}
}
impl PcapStat {
/// Return number of packets received.
pub fn packets_received(&self) -> u32 {
self.stats.ps_recv
}
/// Return number of packets dropped because there was no room in the
/// operating system's buffer when they arrived, because packets weren't
/// being read fast enough.
pub fn packets_dropped(&self) -> u32 {
self.stats.ps_drop
}
/// Return number of packets dropped by the network interface or its driver.
pub fn packets_dropped_interface(&self) -> u32 {
self.stats.ps_ifdrop
}
}
/// Keeper of the `libpcap`'s `pcap_if_t`.
pub struct PcapIfT {
pcap_if_t: *mut libpcap::pcap_if_t,
}
impl PcapIfT {
/// get a list of capture devices
///
/// Constructs a list of network devices that can be opened with
/// `Pcap::new()` and `Pcap::builder()`. Note that there may be network
/// devices that cannot be opened by the process calling, because, for
/// example, that process does not have sufficient privileges to open them
/// for capturing; if so, those devices will not appear on the list.
pub fn new() -> Result<Self> {
pcap_findalldevs()
}
/// Return iterator for iterating capture devices.
pub fn iter(&self) -> InterfaceIter {
InterfaceIter {
start: self.pcap_if_t,
next: Some(self.pcap_if_t),
}
}
/// Get all capture devices.
pub fn get_interfaces(&self) -> HashSet<Interface> {
self.iter().collect()
}
/// Find capture device with interface name `name`.
pub fn find_interface_with_name(&self, name: &str) -> Option<Interface> {
for interface in self.get_interfaces() {
if interface.has_name(name) {
log::trace!("find_interface_with_name({}) = {:?}", name, interface);
return Some(interface);
}
}
None
}
/// Find capture device which have IP address `ip`.
pub fn find_interface_with_ip(&self, ip: &IpAddr) -> Option<String> {
for interface in self.get_interfaces() {
if interface.has_address(ip) {
log::trace!("find_interface_with_ip({}) = {:?}", ip, interface);
return Some(interface.name);
}
}
None
}
}
impl Drop for PcapIfT {
fn drop(&mut self) {
log::trace!("PcapIfT::drop({:?})", self.pcap_if_t);
unsafe { luomu_libpcap_sys::pcap_freealldevs(self.pcap_if_t) }
}
}
/// A network device that can be opened with `Pcap::new()` and
/// `Pcap::builder()`.
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct Interface {
/// Devices name
pub name: String,
/// Devices description
pub description: Option<String>,
/// All addresses found from device
pub addresses: BTreeSet<InterfaceAddress>,
/// Flags set for device
pub flags: BTreeSet<InterfaceFlag>,
}
impl Interface {
/// True if interface is up
pub fn is_up(&self) -> bool {
self.flags.get(&InterfaceFlag::Up).is_some()
}
/// True if interface is running
pub fn is_running(&self) -> bool {
self.flags.get(&InterfaceFlag::Running).is_some()
}
/// True if interface is loopback
pub fn is_loopback(&self) -> bool {
self.flags.get(&InterfaceFlag::Loopback).is_some()
}
/// True if interface is has name `name`
pub fn has_name(&self, name: &str) -> bool {
self.name == name
}
/// Return MAC aka Ethernet address of the interface
pub fn get_ether_address(&self) -> Option<MacAddr> {
for ia in &self.addresses {
if let Address::Mac(addr) = ia.addr {
return Some(addr);
}
}
None
}
/// Return IP addresses of interface
pub fn get_ip_addresses(&self) -> HashSet<IpAddr> {
self.addresses
.iter()
.filter_map(|i| IpAddr::try_from(&i.addr).ok())
.collect()
}
/// True if interface is has IP address `ip`
pub fn has_address(&self, ip: &IpAddr) -> bool {
self.get_ip_addresses().get(ip).is_some()
}
}
/// Interface iterator
///
/// Iterates all capture interfaces.
pub struct InterfaceIter {
// First item in linked list, only used for trace logging
start: *mut libpcap::pcap_if_t,
// Next item in linked list, used for iteration
next: Option<*mut libpcap::pcap_if_t>,
}
impl Iterator for InterfaceIter {
type Item = Interface;
fn next(&mut self) -> Option<Interface> {
log::trace!(
"InterfaceIter(start: {:p}, next: {:p})",
self.start,
self.next.unwrap_or(std::ptr::null_mut())
);
let pcap_if_t = self.next?;
if pcap_if_t.is_null() {
self.next = None;
return None;
}
let next = unsafe { (*pcap_if_t).next };
if next.is_null() {
self.next = None;
} else {
self.next = Some(next);
}
match try_interface_from(pcap_if_t) {
Ok(dev) => Some(dev),
Err(err) => {
log::error!("try_interface_from{:p}: {}", pcap_if_t, err);
None
}
}
}
}
/// Collection of addresses for network interface.
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct InterfaceAddress {
/// Network interface's address
addr: Address,
/// The netmask corresponding to the address pointed to by addr.
netmask: Option<Address>,
/// The broadcast address corresponding to the address pointed to by addr;
/// may be `None` if the device doesn't support broadcasts.
broadaddr: Option<Address>,
/// The destination address corresponding to the address pointed to by addr;
/// may be `None` if the device isn't a point-to-point interface.
dstaddr: Option<Address>,
}
/// Iterator for network device's addresses.
pub struct AddressIter {
// First item in linked list, only used for trace logging
start: *mut libpcap::pcap_addr_t,
// Next item in linked list, used for iteration
next: Option<*mut libpcap::pcap_addr_t>,
}
impl Iterator for AddressIter {
type Item = InterfaceAddress;
fn next(&mut self) -> Option<InterfaceAddress> {
log::trace!(
"AddressIter(start: {:p}, next: {:p})",
self.start,
self.next.unwrap_or(std::ptr::null_mut())
);
let pcap_addr_t = self.next?;
if pcap_addr_t.is_null() {
self.next = None;
return None;
}
let next = unsafe { (*pcap_addr_t).next };
if next.is_null() {
self.next = None;
} else {
self.next = Some(next);
}
if let Some(dev) = try_address_from(pcap_addr_t) {
Some(dev)
} else {
// Address was something we don't know how to handle. Move
// to next address in list.
self.next()
}
}
}
/// Various flags which can be set on network interface
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum InterfaceFlag {
/// set if the interface is a loopback interface
Loopback,
/// set if the interface is up
Up,
/// set if the interface is running
Running,
} | random_line_split | |
datatransform.py | """
DataTransform Module
- IDStruct
- DataTransform (superclass)
"""
# pylint: disable=E0401, E0611
import re
from functools import wraps
from biothings.utils.common import is_str, iter_n
from biothings.utils.loggers import get_logger
from .histogram import Histogram
class IDStruct(object):
"""
IDStruct - id structure for use with the DataTransform classes. The basic idea
is to provide a structure that provides a list of (original_id, current_id)
pairs.
"""
def __init__(self, field=None, doc_lst=None):
"""
Initialize the structure
:param field: field for documents to use as an initial id (optional)
:param doc_lst: list of documents to use when building an initial list (optional)
"""
self.forward = {}
self.inverse = {}
self.debug = {}
if field and doc_lst:
self._init_strct(field, doc_lst)
def _init_strct(self, field, doc_lst):
"""initialze _id_tuple_lst"""
for doc in doc_lst:
value = nested_lookup(doc, field)
if value:
self.add(value, value)
def __iter__(self):
"""iterator overload function"""
for key in self.forward:
for val in self.forward[key]:
yield key, val
def add(self, left, right):
"""add a (original_id, current_id) pair to the list"""
if not left or not right:
return # identifiers cannot be None
if self.lookup(left, right):
return # tuple already in the list
# ensure it's hashable
if not isinstance(left, (list, tuple)):
left = [left]
if not isinstance(right, (list, tuple)):
right = [right]
# These two blocks collapse duplicates in a list of keys
if isinstance(left, list):
left = set(left)
if isinstance(right, list):
right = set(right)
for val in left:
if val not in self.forward.keys():
self.forward[val] = right
else:
self.forward[val] = self.forward[val] | right
for val in right:
if val not in self.inverse.keys():
self.inverse[val] = left
else:
self.inverse[val] = self.inverse[val] | left
def __iadd__(self, other):
"""object += additional, which combines lists"""
if not isinstance(other, IDStruct):
raise TypeError("other is not of type IDStruct")
for left, right in other:
self.add(left, right)
# retain debug information
self.transfer_debug(left, other)
return self
def __len__(self):
"""Return the number of keys (forward direction)"""
return len(self.forward.keys())
def __str__(self):
"""convert to a string, useful for debugging"""
lst = []
for key in self.forward:
for val in self.forward[key]:
lst.append((key, val))
return str(lst)
@property
def id_lst(self):
"""Build up a list of current ids"""
id_set = set()
for key in self.forward:
for val in self.forward[key]:
id_set.add(val)
return list(id_set)
def lookup(self, left, right):
"""Find if a (left, right) pair is already in the list"""
for val in self.find_left(left):
if right == val:
return True
return False
@staticmethod
def side(_id, where):
"""Find if an _id is a key in where"""
if isinstance(_id, list):
_id = tuple(_id)
return _id in where.keys()
def left(self, key):
"""Determine if the id (left, _) is registered"""
return self.side(key, self.forward)
@staticmethod
def find(where, ids):
"""Find all ids in dictionary where"""
if not ids:
return
if not isinstance(ids, (set, list, tuple)):
ids = [ids]
for key in ids:
if key in where:
for i in where[key]:
yield i
def find_left(self, ids):
"""Find left values given a list of ids"""
return self.find(self.forward, ids)
def right(self, key):
"""Determine if the id (_, right) is registered"""
return self.side(key, self.inverse)
def find_right(self, ids):
"""Find the first id founding by searching the (_, right) identifiers"""
return self.find(self.inverse, ids)
def set_debug(self, left, label, right):
"""Set debug (left, right) debug values for the structure"""
# lowercase left and right keys
if is_str(left):
left = left.lower()
if is_str(right):
right = right.lower()
# remove duplicates in the debug structure
# - duplicates in the structure itself are
# - handled elsewhere
if isinstance(right, list):
right = list(set(right))
# if there is only one element in the list, collapse
if len(right) == 1:
right = right.pop()
# capture the label if it is used
if label:
right = (label, right)
try:
self.debug[left] = self.debug[left] + [right]
except KeyError:
self.debug[left] = [left, right]
def get_debug(self, key):
"""Get debug information for a given key"""
# lowercase key if possible
if is_str(key):
key = key.lower()
# return debug information
if isinstance(key, list):
return "type(list)"
try:
return self.debug[key]
except KeyError:
return "not-available"
def import_debug(self, other):
"""
import debug information the entire IDStruct object
"""
for key in other.debug:
self.transfer_debug(key, other)
def transfer_debug(self, key, other):
"""
transfer debug information for one key in the IDStruct object
"""
# ensure lower case key
if is_str(key):
key = key.lower()
# transfer debug information
self.debug[key] = other.get_debug(key)
class DataTransform(object):
"""DataTransform class. This class is the public interface for
the DataTransform module. Much of the core logic is
in the subclass."""
# pylint: disable=R0902
# Constants
batch_size = 1000
DEFAULT_WEIGHT = 1
default_source = "_id"
debug = False
def __init__(
self,
input_types,
output_types,
id_priority_list=None,
skip_on_failure=False,
skip_w_regex=None,
skip_on_success=False,
idstruct_class=IDStruct,
copy_from_doc=False,
debug=False,
):
# pylint: disable=R0913, W0102
"""
Initialize the keylookup object and precompute paths from the
start key to all target keys.
The decorator is intended to be applied to the load_data function
of an uploader. The load_data function yields documents, which
are then post processed by call and the 'id' key conversion is
performed.
:param G: nx.DiGraph (networkx 2.1) configuration graph
:param collections: list of mongodb collection names
:param input_type: key type to start key lookup from
:param output_types: list of all output types to convert to
:param id_priority_list: A priority list of identifiers to to
sort input and output types by.
:type id_priority_list: list(str)
:param id_struct_class: IDStruct used to manager/fetch IDs from docs
:param copy_from_doc: if transform failed using the graph, try to get
value from the document itself when output_type == input_type.
No check is performed, it's a straight copy. If checks are needed
(eg. check that an ID referenced in the doc actually exists in
another collection, nodes with self-loops can be used, so
ID resolution will be forced to go through these loops to ensure
data exists)
"""
self.input_types = self._parse_input_types(input_types)
self.output_types = self._parse_output_types(output_types)
self.id_priority_list = id_priority_list or []
self.skip_on_failure = skip_on_failure
self.skip_on_success = skip_on_success
if skip_w_regex and not isinstance(skip_w_regex, str):
raise ValueError("skip_w_regex must be a string")
elif not skip_w_regex:
self.skip_w_regex = None
else:
self.skip_w_regex = re.compile(skip_w_regex)
self.idstruct_class = idstruct_class
self.copy_from_doc = copy_from_doc
self.histogram = Histogram()
# Setup logger and logging level
self.logger, _ = get_logger("datatransform")
self.debug = debug
def _parse_input_types(self, input_types):
"""
Parse the input_types argument
:return:
"""
res_input_types = []
if isinstance(input_types, str):
input_types = [input_types]
if isinstance(input_types, list):
for input_type in input_types:
if isinstance(input_type, (tuple, list)):
if not self._valid_input_type(input_type[0]):
raise ValueError("input_type '%s' is not a node in the key_lookup graph" % repr(input_type[0]))
res_input_types.append((input_type[0].lower(), input_type[1]))
elif isinstance(input_type, str):
if not self._valid_input_type(input_type.lower()):
raise ValueError("input_type '%s' is not a node in the key_lookup graph" % repr(input_type))
res_input_types.append((input_type, self.default_source))
else:
raise ValueError("Provided input_types is not of the correct type")
else:
raise ValueError("Provided input_types is not of the correct type")
return res_input_types
def _valid_input_type(self, input_type):
# pylint: disable=W0613, R0201
"""In the base class, all input_types and output_types are valid."""
return True
def _parse_output_types(self, output_types):
"""
Parse through output_types
:param output_types:
:return:
"""
if not isinstance(output_types, list):
raise ValueError("output_types should be of type list")
for output_type in output_types:
if not self._valid_output_type(output_type):
raise ValueError("output_type is not a node in the key_lookup graph")
return output_types
def _valid_output_type(self, output_type):
# pylint: disable=W0613, R0201
"""In the base class, all input_types and output_types are valid."""
return True
def __call__(self, func, debug=None):
"""
Perform the data transformation on all documents on call.
:param func: function to apply to
:param debug: Enable debugging information.
:type debug: bool
:param debug: Enable debugging information. When enabled, debugging information
will be retained in the 'dt_debug' field of each document. This parameter
can be either list of original id's to retain debugging information for or
a True, which will retain debugging information for all documents.
:type debug: bool or list(str)
:return:
"""
# additional handling for the debug option
if not debug:
self.debug = False
elif debug is True:
self.debug = True
self.logger.debug("DataTransform Debug Mode Enabled for all documents.")
elif isinstance(debug, list):
self.logger.debug("DataTransform Debug Mode: {}".format(debug))
self.debug = debug
@wraps(func)
def wrapped_f(*args):
"""This is a wrapped function which will be called by the decorator method."""
input_docs = func(*args)
output_doc_cnt = 0
# split input_docs into chunks of size self.batch_size
for batchiter in iter_n(input_docs, int(self.batch_size / len(self.input_types))):
output_docs = self.key_lookup_batch(batchiter)
for odoc in output_docs:
# print debug information if the original id is the in the debug list
if "dt_debug" in odoc:
if isinstance(self.debug, list) and odoc["dt_debug"]["orig_id"] in self.debug:
self.logger.debug("DataTransform Debug doc['dt_debug']: {}".format(odoc["dt_debug"]))
output_doc_cnt += 1
yield odoc
self.logger.info("wrapped_f Num. output_docs: {}".format(output_doc_cnt))
self.logger.info("DataTransform.histogram: {}".format(self.histogram))
return wrapped_f
def key_lookup_batch(self, batchiter):
"""
Core method for looking up all keys in batch (iterator)
:param batchiter:
:return:
"""
pass
def lookup_one(self, doc):
"""
KeyLookup on document. This method is called as a function call instead of a
decorator on a document iterator.
"""
# special handling for the debug option
self.debug = [doc["_id"]]
output_docs = self.key_lookup_batch([doc])
for odoc in output_docs:
# print debug information if available
if self.debug and "dt_debug" in odoc:
self.logger.debug("DataTransform Debug doc['dt_debug']: {}".format(odoc["dt_debug"]))
yield odoc
self.logger.info("DataTransform.histogram: {}".format(self.histogram))
@staticmethod
def _nested_lookup(doc, field):
"""
Performs a nested lookup of doc using a period (.) delimited
list of fields. This is a nested dictionary lookup.
:param doc: document to perform lookup on
:param field: period delimited list of fields
:return:
"""
value = doc
keys = field.split(".")
try:
for k in keys:
value = value[k]
except KeyError:
return None
return str(value)
@property
def id_priority_list(self):
"""Property method for getting id_priority_list"""
return self._id_priority_list
@id_priority_list.setter
def id_priority_list(self, value):
# pylint: disable=W0201
"""Property method for setting id_priority_list and
sorting input_types and output_types."""
self._id_priority_list = value
self.input_types = self.sort_input_by_priority_list(self.input_types)
self.output_types = self.sort_output_by_priority_list(self.output_types)
def sort_input_by_priority_list(self, input_types):
"""
Reorder the given input_types to follow a priority list. Inputs not in the
priority list should remain in their given order at the end of the list.
"""
# construct temporary id_priority_list with extra elements at the end
id_priority_list = self._expand_priority_order([x[0] for x in input_types])
input_types = sorted(input_types, key=lambda e: self._priority_order(id_priority_list, e[0]))
return input_types
def sort_output_by_priority_list(self, output_types):
"""
Reorder the given output_types to follow a priority list. Outputs not in the
priority list should remain in their given order at the end of the list.
"""
# construct temporary id_priority_list with extra elements at the end
id_priority_list = self._expand_priority_order(output_types)
output_types = sorted(output_types, key=lambda e: self._priority_order(id_priority_list, e))
return output_types
def _expand_priority_order(self, id_list):
"""
Expand the self.id_priority_list to also include elements in id_list that are not
in the priority list. These elements are added to the priority list in the order
that they appear in the id_list.
Example:
> self.id_priority_list = ['a', 'c']
> self._expand_priority_order(['a', 'd', 'e'])
['a', 'c', 'd', 'e']
"""
res = self.id_priority_list.copy()
for key in id_list:
if key not in self.id_priority_list:
res.append(key)
return res
@staticmethod
def _priority_order(id_priority_list, elem):
"""
Determine the priority order of an input_type following a id_priority_list.
This list, first defined in DataTransformMDB is used to reorder the input_types
so that their order matches the id types listed in id_priority_list. If an id
type is not in that list then the input_type will be placed at the end of the list
in arbitrary order.
"""
assert isinstance(id_priority_list, list)
# match id types with id priority
for index, id_elem in enumerate(id_priority_list):
if elem == id_elem:
return index
# the id type is not in id_priority_list so it will be placed last
return len(id_priority_list) + 1
class DataTransformEdge(object):
"""
DataTransformEdge. This class contains information needed to
transform one key to another.
"""
def __init__(self, label=None):
"""
Initialize the class
:param label: A label can be used for debugging purposes.
"""
self.prepared = False
self.label = label
self.init_state()
def edge_lookup(self, keylookup_obj, id_strct, debug=False):
# pylint: disable=E1102, R0201, W0613
"""
virtual method for edge lookup. Each edge class is
responsible for its own lookup procedures given a
keylookup_obj and an id_strct
:param keylookup_obj:
:param id_strct: - list of tuples (orig_id, current_id)
:return:
"""
yield NotImplemented("This method must be overridden by the base class.")
def init_state(self):
"""initialize the state of pickleable objects"""
self._state = {"logger": None}
@property
def logger(self):
"""getter for the logger property"""
if not self._state["logger"]:
self.prepare()
return self._state["logger"]
@logger.setter
def logger(self, value):
"""setter for the logger variable"""
self._state["logger"] = value
def setup_log(self):
"""setup the logger member variable"""
self.logger, _ = get_logger("datatransform")
def prepare(self, state=None):
# pylint: disable=W0102
"""Prepare class state objects (pickleable objects)"""
state = state or {}
if self.prepared:
return
if state:
# let's be explicit, _state takes what it wants
for k in self._state:
self._state[k] = state[k]
return
self.setup_log()
def unprepare(self):
"""
reset anything that's not picklable (so self can be pickled)
return what's been reset as a dict, so self can be restored
once pickled
"""
state = {
"logger": self._state["logger"],
}
for k in state:
self._state[k] = None
self.prepared = False
return state
class RegExEdge(DataTransformEdge):
"""
The RegExEdge allows an identifier to be transformed using a
regular expression. POSIX regular expressions are supported.
"""
def __init__(self, from_regex, to_regex, weight=1, label=None):
"""
:param from_regex: The first parameter of the regular expression substitution.
:type from_regex: str
:param to_regex: The second parameter of the regular expression substitution.
:type to_regex: str
:param weight: Weights are used to prefer one path over another. The path
with the lowest weight is preferred. The default weight is 1.
:type weight: int
"""
super(RegExEdge, self).__init__(label)
self.from_regex = from_regex
self.to_regex = to_regex
self.weight = weight
def edge_lookup(self, keylookup_obj, id_strct, debug=False):
"""
Transform identifiers using a regular expression substitution.
"""
res_id_strct = IDStruct()
for left, right in id_strct:
|
return res_id_strct
def nested_lookup(doc, field):
"""
Performs a nested lookup of doc using a period (.) delimited
list of fields. This is a nested dictionary lookup.
:param doc: document to perform lookup on
:param field: period delimited list of fields
:return:
"""
value = doc
keys = field.split(".")
try:
for k in keys:
if isinstance(value, (list, tuple)):
# assuming we have a list of dict with k as one of the keys
stype = set([type(e) for e in value])
if not stype:
return None
assert len(stype) == 1 and stype == {dict}, "Expecting a list of dict, found types: %s" % stype
value = [e[k] for e in value if e.get(k)]
# can't go further ?
return value
else:
value = value[k]
except KeyError:
return None
return value
| res_id_strct.add(left, re.sub(self.from_regex, self.to_regex, right)) | conditional_block |
datatransform.py | """
DataTransform Module
- IDStruct
- DataTransform (superclass)
"""
# pylint: disable=E0401, E0611
import re
from functools import wraps
from biothings.utils.common import is_str, iter_n
from biothings.utils.loggers import get_logger
from .histogram import Histogram
class IDStruct(object):
"""
IDStruct - id structure for use with the DataTransform classes. The basic idea
is to provide a structure that provides a list of (original_id, current_id)
pairs.
"""
def __init__(self, field=None, doc_lst=None):
"""
Initialize the structure
:param field: field for documents to use as an initial id (optional)
:param doc_lst: list of documents to use when building an initial list (optional)
"""
self.forward = {}
self.inverse = {}
self.debug = {}
if field and doc_lst:
self._init_strct(field, doc_lst)
def _init_strct(self, field, doc_lst):
"""initialze _id_tuple_lst"""
for doc in doc_lst:
value = nested_lookup(doc, field)
if value:
self.add(value, value)
def __iter__(self):
"""iterator overload function"""
for key in self.forward:
for val in self.forward[key]:
yield key, val
def add(self, left, right):
"""add a (original_id, current_id) pair to the list"""
if not left or not right:
return # identifiers cannot be None
if self.lookup(left, right):
return # tuple already in the list
# ensure it's hashable
if not isinstance(left, (list, tuple)):
left = [left]
if not isinstance(right, (list, tuple)):
right = [right]
# These two blocks collapse duplicates in a list of keys
if isinstance(left, list):
left = set(left)
if isinstance(right, list):
right = set(right)
for val in left:
if val not in self.forward.keys():
self.forward[val] = right
else:
self.forward[val] = self.forward[val] | right
for val in right:
if val not in self.inverse.keys():
self.inverse[val] = left
else:
self.inverse[val] = self.inverse[val] | left
def __iadd__(self, other):
"""object += additional, which combines lists"""
if not isinstance(other, IDStruct):
raise TypeError("other is not of type IDStruct")
for left, right in other:
self.add(left, right)
# retain debug information
self.transfer_debug(left, other)
return self
def __len__(self):
"""Return the number of keys (forward direction)"""
return len(self.forward.keys())
def __str__(self):
"""convert to a string, useful for debugging"""
lst = []
for key in self.forward:
for val in self.forward[key]:
lst.append((key, val))
return str(lst)
@property
def id_lst(self):
"""Build up a list of current ids"""
id_set = set()
for key in self.forward:
for val in self.forward[key]:
id_set.add(val)
return list(id_set)
def lookup(self, left, right):
"""Find if a (left, right) pair is already in the list"""
for val in self.find_left(left):
if right == val:
return True
return False
@staticmethod
def side(_id, where):
"""Find if an _id is a key in where"""
if isinstance(_id, list):
_id = tuple(_id)
return _id in where.keys()
def left(self, key):
"""Determine if the id (left, _) is registered"""
return self.side(key, self.forward)
@staticmethod
def find(where, ids):
"""Find all ids in dictionary where"""
if not ids:
return
if not isinstance(ids, (set, list, tuple)):
ids = [ids]
for key in ids:
if key in where:
for i in where[key]:
yield i
def find_left(self, ids):
"""Find left values given a list of ids"""
return self.find(self.forward, ids)
def right(self, key):
"""Determine if the id (_, right) is registered"""
return self.side(key, self.inverse)
def find_right(self, ids):
"""Find the first id founding by searching the (_, right) identifiers"""
return self.find(self.inverse, ids)
def set_debug(self, left, label, right):
"""Set debug (left, right) debug values for the structure"""
# lowercase left and right keys
if is_str(left):
left = left.lower()
if is_str(right):
right = right.lower()
# remove duplicates in the debug structure
# - duplicates in the structure itself are
# - handled elsewhere
if isinstance(right, list):
right = list(set(right))
# if there is only one element in the list, collapse
if len(right) == 1:
right = right.pop()
# capture the label if it is used
if label:
right = (label, right)
try:
self.debug[left] = self.debug[left] + [right]
except KeyError:
self.debug[left] = [left, right]
def get_debug(self, key):
"""Get debug information for a given key"""
# lowercase key if possible
if is_str(key):
key = key.lower()
# return debug information
if isinstance(key, list):
return "type(list)"
try:
return self.debug[key]
except KeyError:
return "not-available"
def import_debug(self, other):
"""
import debug information the entire IDStruct object
"""
for key in other.debug:
self.transfer_debug(key, other)
def transfer_debug(self, key, other):
"""
transfer debug information for one key in the IDStruct object
"""
# ensure lower case key
if is_str(key):
key = key.lower()
# transfer debug information
self.debug[key] = other.get_debug(key)
class DataTransform(object):
"""DataTransform class. This class is the public interface for
the DataTransform module. Much of the core logic is
in the subclass."""
# pylint: disable=R0902
# Constants
batch_size = 1000
DEFAULT_WEIGHT = 1
default_source = "_id"
debug = False
def __init__(
self,
input_types,
output_types,
id_priority_list=None,
skip_on_failure=False,
skip_w_regex=None,
skip_on_success=False,
idstruct_class=IDStruct,
copy_from_doc=False,
debug=False,
):
# pylint: disable=R0913, W0102
"""
Initialize the keylookup object and precompute paths from the
start key to all target keys.
The decorator is intended to be applied to the load_data function
of an uploader. The load_data function yields documents, which
are then post processed by call and the 'id' key conversion is
performed.
:param G: nx.DiGraph (networkx 2.1) configuration graph
:param collections: list of mongodb collection names
:param input_type: key type to start key lookup from
:param output_types: list of all output types to convert to
:param id_priority_list: A priority list of identifiers to to
sort input and output types by.
:type id_priority_list: list(str)
:param id_struct_class: IDStruct used to manager/fetch IDs from docs
:param copy_from_doc: if transform failed using the graph, try to get
value from the document itself when output_type == input_type.
No check is performed, it's a straight copy. If checks are needed
(eg. check that an ID referenced in the doc actually exists in
another collection, nodes with self-loops can be used, so
ID resolution will be forced to go through these loops to ensure
data exists)
"""
self.input_types = self._parse_input_types(input_types)
self.output_types = self._parse_output_types(output_types)
self.id_priority_list = id_priority_list or []
self.skip_on_failure = skip_on_failure
self.skip_on_success = skip_on_success
if skip_w_regex and not isinstance(skip_w_regex, str):
raise ValueError("skip_w_regex must be a string")
elif not skip_w_regex:
self.skip_w_regex = None
else:
self.skip_w_regex = re.compile(skip_w_regex)
self.idstruct_class = idstruct_class
self.copy_from_doc = copy_from_doc
self.histogram = Histogram()
# Setup logger and logging level
self.logger, _ = get_logger("datatransform")
self.debug = debug
def _parse_input_types(self, input_types):
"""
Parse the input_types argument
:return:
"""
res_input_types = []
if isinstance(input_types, str):
input_types = [input_types]
if isinstance(input_types, list):
for input_type in input_types:
if isinstance(input_type, (tuple, list)):
if not self._valid_input_type(input_type[0]):
raise ValueError("input_type '%s' is not a node in the key_lookup graph" % repr(input_type[0]))
res_input_types.append((input_type[0].lower(), input_type[1]))
elif isinstance(input_type, str):
if not self._valid_input_type(input_type.lower()):
raise ValueError("input_type '%s' is not a node in the key_lookup graph" % repr(input_type))
res_input_types.append((input_type, self.default_source))
else:
raise ValueError("Provided input_types is not of the correct type")
else:
raise ValueError("Provided input_types is not of the correct type")
return res_input_types
def _valid_input_type(self, input_type):
# pylint: disable=W0613, R0201
"""In the base class, all input_types and output_types are valid."""
return True
def _parse_output_types(self, output_types):
"""
Parse through output_types
:param output_types:
:return:
"""
if not isinstance(output_types, list):
raise ValueError("output_types should be of type list")
for output_type in output_types:
if not self._valid_output_type(output_type):
raise ValueError("output_type is not a node in the key_lookup graph")
return output_types
def _valid_output_type(self, output_type):
# pylint: disable=W0613, R0201
"""In the base class, all input_types and output_types are valid."""
return True
def __call__(self, func, debug=None):
"""
Perform the data transformation on all documents on call.
:param func: function to apply to
:param debug: Enable debugging information.
:type debug: bool
:param debug: Enable debugging information. When enabled, debugging information
will be retained in the 'dt_debug' field of each document. This parameter
can be either list of original id's to retain debugging information for or
a True, which will retain debugging information for all documents.
:type debug: bool or list(str)
:return:
"""
# additional handling for the debug option
if not debug:
self.debug = False
elif debug is True:
self.debug = True
self.logger.debug("DataTransform Debug Mode Enabled for all documents.")
elif isinstance(debug, list):
self.logger.debug("DataTransform Debug Mode: {}".format(debug))
self.debug = debug
@wraps(func)
def wrapped_f(*args):
"""This is a wrapped function which will be called by the decorator method."""
input_docs = func(*args)
output_doc_cnt = 0
# split input_docs into chunks of size self.batch_size
for batchiter in iter_n(input_docs, int(self.batch_size / len(self.input_types))):
output_docs = self.key_lookup_batch(batchiter)
for odoc in output_docs:
# print debug information if the original id is the in the debug list
if "dt_debug" in odoc:
if isinstance(self.debug, list) and odoc["dt_debug"]["orig_id"] in self.debug:
self.logger.debug("DataTransform Debug doc['dt_debug']: {}".format(odoc["dt_debug"]))
output_doc_cnt += 1
yield odoc
self.logger.info("wrapped_f Num. output_docs: {}".format(output_doc_cnt))
self.logger.info("DataTransform.histogram: {}".format(self.histogram))
return wrapped_f
def key_lookup_batch(self, batchiter):
"""
Core method for looking up all keys in batch (iterator)
:param batchiter:
:return:
"""
pass
def lookup_one(self, doc):
"""
KeyLookup on document. This method is called as a function call instead of a
decorator on a document iterator.
"""
# special handling for the debug option
self.debug = [doc["_id"]]
output_docs = self.key_lookup_batch([doc])
for odoc in output_docs:
# print debug information if available
if self.debug and "dt_debug" in odoc:
self.logger.debug("DataTransform Debug doc['dt_debug']: {}".format(odoc["dt_debug"]))
yield odoc
self.logger.info("DataTransform.histogram: {}".format(self.histogram))
@staticmethod
def _nested_lookup(doc, field):
"""
Performs a nested lookup of doc using a period (.) delimited
list of fields. This is a nested dictionary lookup.
:param doc: document to perform lookup on
:param field: period delimited list of fields
:return:
"""
value = doc
keys = field.split(".")
try:
for k in keys:
value = value[k]
except KeyError:
return None
return str(value)
@property
def id_priority_list(self):
"""Property method for getting id_priority_list"""
return self._id_priority_list
@id_priority_list.setter
def id_priority_list(self, value):
# pylint: disable=W0201
"""Property method for setting id_priority_list and
sorting input_types and output_types."""
self._id_priority_list = value
self.input_types = self.sort_input_by_priority_list(self.input_types)
self.output_types = self.sort_output_by_priority_list(self.output_types)
def sort_input_by_priority_list(self, input_types):
"""
Reorder the given input_types to follow a priority list. Inputs not in the
priority list should remain in their given order at the end of the list.
"""
# construct temporary id_priority_list with extra elements at the end
id_priority_list = self._expand_priority_order([x[0] for x in input_types])
input_types = sorted(input_types, key=lambda e: self._priority_order(id_priority_list, e[0]))
return input_types
def sort_output_by_priority_list(self, output_types):
"""
Reorder the given output_types to follow a priority list. Outputs not in the
priority list should remain in their given order at the end of the list.
"""
# construct temporary id_priority_list with extra elements at the end
id_priority_list = self._expand_priority_order(output_types)
output_types = sorted(output_types, key=lambda e: self._priority_order(id_priority_list, e))
return output_types
def _expand_priority_order(self, id_list):
"""
Expand the self.id_priority_list to also include elements in id_list that are not
in the priority list. These elements are added to the priority list in the order
that they appear in the id_list.
Example:
> self.id_priority_list = ['a', 'c']
> self._expand_priority_order(['a', 'd', 'e'])
['a', 'c', 'd', 'e']
"""
res = self.id_priority_list.copy()
for key in id_list:
if key not in self.id_priority_list:
res.append(key)
return res
@staticmethod
def _priority_order(id_priority_list, elem):
"""
Determine the priority order of an input_type following a id_priority_list.
This list, first defined in DataTransformMDB is used to reorder the input_types
so that their order matches the id types listed in id_priority_list. If an id
type is not in that list then the input_type will be placed at the end of the list
in arbitrary order.
"""
assert isinstance(id_priority_list, list)
# match id types with id priority
for index, id_elem in enumerate(id_priority_list):
if elem == id_elem:
return index
# the id type is not in id_priority_list so it will be placed last
return len(id_priority_list) + 1
class DataTransformEdge(object):
"""
DataTransformEdge. This class contains information needed to
transform one key to another.
"""
def __init__(self, label=None):
"""
Initialize the class
:param label: A label can be used for debugging purposes.
"""
self.prepared = False
self.label = label
self.init_state() | """
virtual method for edge lookup. Each edge class is
responsible for its own lookup procedures given a
keylookup_obj and an id_strct
:param keylookup_obj:
:param id_strct: - list of tuples (orig_id, current_id)
:return:
"""
yield NotImplemented("This method must be overridden by the base class.")
def init_state(self):
"""initialize the state of pickleable objects"""
self._state = {"logger": None}
@property
def logger(self):
"""getter for the logger property"""
if not self._state["logger"]:
self.prepare()
return self._state["logger"]
@logger.setter
def logger(self, value):
"""setter for the logger variable"""
self._state["logger"] = value
def setup_log(self):
"""setup the logger member variable"""
self.logger, _ = get_logger("datatransform")
def prepare(self, state=None):
# pylint: disable=W0102
"""Prepare class state objects (pickleable objects)"""
state = state or {}
if self.prepared:
return
if state:
# let's be explicit, _state takes what it wants
for k in self._state:
self._state[k] = state[k]
return
self.setup_log()
def unprepare(self):
"""
reset anything that's not picklable (so self can be pickled)
return what's been reset as a dict, so self can be restored
once pickled
"""
state = {
"logger": self._state["logger"],
}
for k in state:
self._state[k] = None
self.prepared = False
return state
class RegExEdge(DataTransformEdge):
"""
The RegExEdge allows an identifier to be transformed using a
regular expression. POSIX regular expressions are supported.
"""
def __init__(self, from_regex, to_regex, weight=1, label=None):
"""
:param from_regex: The first parameter of the regular expression substitution.
:type from_regex: str
:param to_regex: The second parameter of the regular expression substitution.
:type to_regex: str
:param weight: Weights are used to prefer one path over another. The path
with the lowest weight is preferred. The default weight is 1.
:type weight: int
"""
super(RegExEdge, self).__init__(label)
self.from_regex = from_regex
self.to_regex = to_regex
self.weight = weight
def edge_lookup(self, keylookup_obj, id_strct, debug=False):
"""
Transform identifiers using a regular expression substitution.
"""
res_id_strct = IDStruct()
for left, right in id_strct:
res_id_strct.add(left, re.sub(self.from_regex, self.to_regex, right))
return res_id_strct
def nested_lookup(doc, field):
"""
Performs a nested lookup of doc using a period (.) delimited
list of fields. This is a nested dictionary lookup.
:param doc: document to perform lookup on
:param field: period delimited list of fields
:return:
"""
value = doc
keys = field.split(".")
try:
for k in keys:
if isinstance(value, (list, tuple)):
# assuming we have a list of dict with k as one of the keys
stype = set([type(e) for e in value])
if not stype:
return None
assert len(stype) == 1 and stype == {dict}, "Expecting a list of dict, found types: %s" % stype
value = [e[k] for e in value if e.get(k)]
# can't go further ?
return value
else:
value = value[k]
except KeyError:
return None
return value |
def edge_lookup(self, keylookup_obj, id_strct, debug=False):
# pylint: disable=E1102, R0201, W0613 | random_line_split |
datatransform.py | """
DataTransform Module
- IDStruct
- DataTransform (superclass)
"""
# pylint: disable=E0401, E0611
import re
from functools import wraps
from biothings.utils.common import is_str, iter_n
from biothings.utils.loggers import get_logger
from .histogram import Histogram
class IDStruct(object):
"""
IDStruct - id structure for use with the DataTransform classes. The basic idea
is to provide a structure that provides a list of (original_id, current_id)
pairs.
"""
def __init__(self, field=None, doc_lst=None):
"""
Initialize the structure
:param field: field for documents to use as an initial id (optional)
:param doc_lst: list of documents to use when building an initial list (optional)
"""
self.forward = {}
self.inverse = {}
self.debug = {}
if field and doc_lst:
self._init_strct(field, doc_lst)
def _init_strct(self, field, doc_lst):
"""initialze _id_tuple_lst"""
for doc in doc_lst:
value = nested_lookup(doc, field)
if value:
self.add(value, value)
def __iter__(self):
"""iterator overload function"""
for key in self.forward:
for val in self.forward[key]:
yield key, val
def add(self, left, right):
"""add a (original_id, current_id) pair to the list"""
if not left or not right:
return # identifiers cannot be None
if self.lookup(left, right):
return # tuple already in the list
# ensure it's hashable
if not isinstance(left, (list, tuple)):
left = [left]
if not isinstance(right, (list, tuple)):
right = [right]
# These two blocks collapse duplicates in a list of keys
if isinstance(left, list):
left = set(left)
if isinstance(right, list):
right = set(right)
for val in left:
if val not in self.forward.keys():
self.forward[val] = right
else:
self.forward[val] = self.forward[val] | right
for val in right:
if val not in self.inverse.keys():
self.inverse[val] = left
else:
self.inverse[val] = self.inverse[val] | left
def | (self, other):
"""object += additional, which combines lists"""
if not isinstance(other, IDStruct):
raise TypeError("other is not of type IDStruct")
for left, right in other:
self.add(left, right)
# retain debug information
self.transfer_debug(left, other)
return self
def __len__(self):
"""Return the number of keys (forward direction)"""
return len(self.forward.keys())
def __str__(self):
"""convert to a string, useful for debugging"""
lst = []
for key in self.forward:
for val in self.forward[key]:
lst.append((key, val))
return str(lst)
@property
def id_lst(self):
"""Build up a list of current ids"""
id_set = set()
for key in self.forward:
for val in self.forward[key]:
id_set.add(val)
return list(id_set)
def lookup(self, left, right):
"""Find if a (left, right) pair is already in the list"""
for val in self.find_left(left):
if right == val:
return True
return False
@staticmethod
def side(_id, where):
"""Find if an _id is a key in where"""
if isinstance(_id, list):
_id = tuple(_id)
return _id in where.keys()
def left(self, key):
"""Determine if the id (left, _) is registered"""
return self.side(key, self.forward)
@staticmethod
def find(where, ids):
"""Find all ids in dictionary where"""
if not ids:
return
if not isinstance(ids, (set, list, tuple)):
ids = [ids]
for key in ids:
if key in where:
for i in where[key]:
yield i
def find_left(self, ids):
"""Find left values given a list of ids"""
return self.find(self.forward, ids)
def right(self, key):
"""Determine if the id (_, right) is registered"""
return self.side(key, self.inverse)
def find_right(self, ids):
"""Find the first id founding by searching the (_, right) identifiers"""
return self.find(self.inverse, ids)
def set_debug(self, left, label, right):
"""Set debug (left, right) debug values for the structure"""
# lowercase left and right keys
if is_str(left):
left = left.lower()
if is_str(right):
right = right.lower()
# remove duplicates in the debug structure
# - duplicates in the structure itself are
# - handled elsewhere
if isinstance(right, list):
right = list(set(right))
# if there is only one element in the list, collapse
if len(right) == 1:
right = right.pop()
# capture the label if it is used
if label:
right = (label, right)
try:
self.debug[left] = self.debug[left] + [right]
except KeyError:
self.debug[left] = [left, right]
def get_debug(self, key):
"""Get debug information for a given key"""
# lowercase key if possible
if is_str(key):
key = key.lower()
# return debug information
if isinstance(key, list):
return "type(list)"
try:
return self.debug[key]
except KeyError:
return "not-available"
def import_debug(self, other):
"""
import debug information the entire IDStruct object
"""
for key in other.debug:
self.transfer_debug(key, other)
def transfer_debug(self, key, other):
"""
transfer debug information for one key in the IDStruct object
"""
# ensure lower case key
if is_str(key):
key = key.lower()
# transfer debug information
self.debug[key] = other.get_debug(key)
class DataTransform(object):
"""DataTransform class. This class is the public interface for
the DataTransform module. Much of the core logic is
in the subclass."""
# pylint: disable=R0902
# Constants
batch_size = 1000
DEFAULT_WEIGHT = 1
default_source = "_id"
debug = False
def __init__(
self,
input_types,
output_types,
id_priority_list=None,
skip_on_failure=False,
skip_w_regex=None,
skip_on_success=False,
idstruct_class=IDStruct,
copy_from_doc=False,
debug=False,
):
# pylint: disable=R0913, W0102
"""
Initialize the keylookup object and precompute paths from the
start key to all target keys.
The decorator is intended to be applied to the load_data function
of an uploader. The load_data function yields documents, which
are then post processed by call and the 'id' key conversion is
performed.
:param G: nx.DiGraph (networkx 2.1) configuration graph
:param collections: list of mongodb collection names
:param input_type: key type to start key lookup from
:param output_types: list of all output types to convert to
:param id_priority_list: A priority list of identifiers to to
sort input and output types by.
:type id_priority_list: list(str)
:param id_struct_class: IDStruct used to manager/fetch IDs from docs
:param copy_from_doc: if transform failed using the graph, try to get
value from the document itself when output_type == input_type.
No check is performed, it's a straight copy. If checks are needed
(eg. check that an ID referenced in the doc actually exists in
another collection, nodes with self-loops can be used, so
ID resolution will be forced to go through these loops to ensure
data exists)
"""
self.input_types = self._parse_input_types(input_types)
self.output_types = self._parse_output_types(output_types)
self.id_priority_list = id_priority_list or []
self.skip_on_failure = skip_on_failure
self.skip_on_success = skip_on_success
if skip_w_regex and not isinstance(skip_w_regex, str):
raise ValueError("skip_w_regex must be a string")
elif not skip_w_regex:
self.skip_w_regex = None
else:
self.skip_w_regex = re.compile(skip_w_regex)
self.idstruct_class = idstruct_class
self.copy_from_doc = copy_from_doc
self.histogram = Histogram()
# Setup logger and logging level
self.logger, _ = get_logger("datatransform")
self.debug = debug
def _parse_input_types(self, input_types):
"""
Parse the input_types argument
:return:
"""
res_input_types = []
if isinstance(input_types, str):
input_types = [input_types]
if isinstance(input_types, list):
for input_type in input_types:
if isinstance(input_type, (tuple, list)):
if not self._valid_input_type(input_type[0]):
raise ValueError("input_type '%s' is not a node in the key_lookup graph" % repr(input_type[0]))
res_input_types.append((input_type[0].lower(), input_type[1]))
elif isinstance(input_type, str):
if not self._valid_input_type(input_type.lower()):
raise ValueError("input_type '%s' is not a node in the key_lookup graph" % repr(input_type))
res_input_types.append((input_type, self.default_source))
else:
raise ValueError("Provided input_types is not of the correct type")
else:
raise ValueError("Provided input_types is not of the correct type")
return res_input_types
def _valid_input_type(self, input_type):
# pylint: disable=W0613, R0201
"""In the base class, all input_types and output_types are valid."""
return True
def _parse_output_types(self, output_types):
"""
Parse through output_types
:param output_types:
:return:
"""
if not isinstance(output_types, list):
raise ValueError("output_types should be of type list")
for output_type in output_types:
if not self._valid_output_type(output_type):
raise ValueError("output_type is not a node in the key_lookup graph")
return output_types
def _valid_output_type(self, output_type):
# pylint: disable=W0613, R0201
"""In the base class, all input_types and output_types are valid."""
return True
def __call__(self, func, debug=None):
"""
Perform the data transformation on all documents on call.
:param func: function to apply to
:param debug: Enable debugging information.
:type debug: bool
:param debug: Enable debugging information. When enabled, debugging information
will be retained in the 'dt_debug' field of each document. This parameter
can be either list of original id's to retain debugging information for or
a True, which will retain debugging information for all documents.
:type debug: bool or list(str)
:return:
"""
# additional handling for the debug option
if not debug:
self.debug = False
elif debug is True:
self.debug = True
self.logger.debug("DataTransform Debug Mode Enabled for all documents.")
elif isinstance(debug, list):
self.logger.debug("DataTransform Debug Mode: {}".format(debug))
self.debug = debug
@wraps(func)
def wrapped_f(*args):
"""This is a wrapped function which will be called by the decorator method."""
input_docs = func(*args)
output_doc_cnt = 0
# split input_docs into chunks of size self.batch_size
for batchiter in iter_n(input_docs, int(self.batch_size / len(self.input_types))):
output_docs = self.key_lookup_batch(batchiter)
for odoc in output_docs:
# print debug information if the original id is the in the debug list
if "dt_debug" in odoc:
if isinstance(self.debug, list) and odoc["dt_debug"]["orig_id"] in self.debug:
self.logger.debug("DataTransform Debug doc['dt_debug']: {}".format(odoc["dt_debug"]))
output_doc_cnt += 1
yield odoc
self.logger.info("wrapped_f Num. output_docs: {}".format(output_doc_cnt))
self.logger.info("DataTransform.histogram: {}".format(self.histogram))
return wrapped_f
def key_lookup_batch(self, batchiter):
"""
Core method for looking up all keys in batch (iterator)
:param batchiter:
:return:
"""
pass
def lookup_one(self, doc):
"""
KeyLookup on document. This method is called as a function call instead of a
decorator on a document iterator.
"""
# special handling for the debug option
self.debug = [doc["_id"]]
output_docs = self.key_lookup_batch([doc])
for odoc in output_docs:
# print debug information if available
if self.debug and "dt_debug" in odoc:
self.logger.debug("DataTransform Debug doc['dt_debug']: {}".format(odoc["dt_debug"]))
yield odoc
self.logger.info("DataTransform.histogram: {}".format(self.histogram))
@staticmethod
def _nested_lookup(doc, field):
"""
Performs a nested lookup of doc using a period (.) delimited
list of fields. This is a nested dictionary lookup.
:param doc: document to perform lookup on
:param field: period delimited list of fields
:return:
"""
value = doc
keys = field.split(".")
try:
for k in keys:
value = value[k]
except KeyError:
return None
return str(value)
@property
def id_priority_list(self):
"""Property method for getting id_priority_list"""
return self._id_priority_list
@id_priority_list.setter
def id_priority_list(self, value):
# pylint: disable=W0201
"""Property method for setting id_priority_list and
sorting input_types and output_types."""
self._id_priority_list = value
self.input_types = self.sort_input_by_priority_list(self.input_types)
self.output_types = self.sort_output_by_priority_list(self.output_types)
def sort_input_by_priority_list(self, input_types):
"""
Reorder the given input_types to follow a priority list. Inputs not in the
priority list should remain in their given order at the end of the list.
"""
# construct temporary id_priority_list with extra elements at the end
id_priority_list = self._expand_priority_order([x[0] for x in input_types])
input_types = sorted(input_types, key=lambda e: self._priority_order(id_priority_list, e[0]))
return input_types
def sort_output_by_priority_list(self, output_types):
"""
Reorder the given output_types to follow a priority list. Outputs not in the
priority list should remain in their given order at the end of the list.
"""
# construct temporary id_priority_list with extra elements at the end
id_priority_list = self._expand_priority_order(output_types)
output_types = sorted(output_types, key=lambda e: self._priority_order(id_priority_list, e))
return output_types
def _expand_priority_order(self, id_list):
"""
Expand the self.id_priority_list to also include elements in id_list that are not
in the priority list. These elements are added to the priority list in the order
that they appear in the id_list.
Example:
> self.id_priority_list = ['a', 'c']
> self._expand_priority_order(['a', 'd', 'e'])
['a', 'c', 'd', 'e']
"""
res = self.id_priority_list.copy()
for key in id_list:
if key not in self.id_priority_list:
res.append(key)
return res
@staticmethod
def _priority_order(id_priority_list, elem):
"""
Determine the priority order of an input_type following a id_priority_list.
This list, first defined in DataTransformMDB is used to reorder the input_types
so that their order matches the id types listed in id_priority_list. If an id
type is not in that list then the input_type will be placed at the end of the list
in arbitrary order.
"""
assert isinstance(id_priority_list, list)
# match id types with id priority
for index, id_elem in enumerate(id_priority_list):
if elem == id_elem:
return index
# the id type is not in id_priority_list so it will be placed last
return len(id_priority_list) + 1
class DataTransformEdge(object):
"""
DataTransformEdge. This class contains information needed to
transform one key to another.
"""
def __init__(self, label=None):
"""
Initialize the class
:param label: A label can be used for debugging purposes.
"""
self.prepared = False
self.label = label
self.init_state()
def edge_lookup(self, keylookup_obj, id_strct, debug=False):
# pylint: disable=E1102, R0201, W0613
"""
virtual method for edge lookup. Each edge class is
responsible for its own lookup procedures given a
keylookup_obj and an id_strct
:param keylookup_obj:
:param id_strct: - list of tuples (orig_id, current_id)
:return:
"""
yield NotImplemented("This method must be overridden by the base class.")
def init_state(self):
"""initialize the state of pickleable objects"""
self._state = {"logger": None}
@property
def logger(self):
"""getter for the logger property"""
if not self._state["logger"]:
self.prepare()
return self._state["logger"]
@logger.setter
def logger(self, value):
"""setter for the logger variable"""
self._state["logger"] = value
def setup_log(self):
"""setup the logger member variable"""
self.logger, _ = get_logger("datatransform")
def prepare(self, state=None):
# pylint: disable=W0102
"""Prepare class state objects (pickleable objects)"""
state = state or {}
if self.prepared:
return
if state:
# let's be explicit, _state takes what it wants
for k in self._state:
self._state[k] = state[k]
return
self.setup_log()
def unprepare(self):
"""
reset anything that's not picklable (so self can be pickled)
return what's been reset as a dict, so self can be restored
once pickled
"""
state = {
"logger": self._state["logger"],
}
for k in state:
self._state[k] = None
self.prepared = False
return state
class RegExEdge(DataTransformEdge):
"""
The RegExEdge allows an identifier to be transformed using a
regular expression. POSIX regular expressions are supported.
"""
def __init__(self, from_regex, to_regex, weight=1, label=None):
"""
:param from_regex: The first parameter of the regular expression substitution.
:type from_regex: str
:param to_regex: The second parameter of the regular expression substitution.
:type to_regex: str
:param weight: Weights are used to prefer one path over another. The path
with the lowest weight is preferred. The default weight is 1.
:type weight: int
"""
super(RegExEdge, self).__init__(label)
self.from_regex = from_regex
self.to_regex = to_regex
self.weight = weight
def edge_lookup(self, keylookup_obj, id_strct, debug=False):
"""
Transform identifiers using a regular expression substitution.
"""
res_id_strct = IDStruct()
for left, right in id_strct:
res_id_strct.add(left, re.sub(self.from_regex, self.to_regex, right))
return res_id_strct
def nested_lookup(doc, field):
"""
Performs a nested lookup of doc using a period (.) delimited
list of fields. This is a nested dictionary lookup.
:param doc: document to perform lookup on
:param field: period delimited list of fields
:return:
"""
value = doc
keys = field.split(".")
try:
for k in keys:
if isinstance(value, (list, tuple)):
# assuming we have a list of dict with k as one of the keys
stype = set([type(e) for e in value])
if not stype:
return None
assert len(stype) == 1 and stype == {dict}, "Expecting a list of dict, found types: %s" % stype
value = [e[k] for e in value if e.get(k)]
# can't go further ?
return value
else:
value = value[k]
except KeyError:
return None
return value
| __iadd__ | identifier_name |
datatransform.py | """
DataTransform Module
- IDStruct
- DataTransform (superclass)
"""
# pylint: disable=E0401, E0611
import re
from functools import wraps
from biothings.utils.common import is_str, iter_n
from biothings.utils.loggers import get_logger
from .histogram import Histogram
class IDStruct(object):
"""
IDStruct - id structure for use with the DataTransform classes. The basic idea
is to provide a structure that provides a list of (original_id, current_id)
pairs.
"""
def __init__(self, field=None, doc_lst=None):
"""
Initialize the structure
:param field: field for documents to use as an initial id (optional)
:param doc_lst: list of documents to use when building an initial list (optional)
"""
self.forward = {}
self.inverse = {}
self.debug = {}
if field and doc_lst:
self._init_strct(field, doc_lst)
def _init_strct(self, field, doc_lst):
|
def __iter__(self):
"""iterator overload function"""
for key in self.forward:
for val in self.forward[key]:
yield key, val
def add(self, left, right):
"""add a (original_id, current_id) pair to the list"""
if not left or not right:
return # identifiers cannot be None
if self.lookup(left, right):
return # tuple already in the list
# ensure it's hashable
if not isinstance(left, (list, tuple)):
left = [left]
if not isinstance(right, (list, tuple)):
right = [right]
# These two blocks collapse duplicates in a list of keys
if isinstance(left, list):
left = set(left)
if isinstance(right, list):
right = set(right)
for val in left:
if val not in self.forward.keys():
self.forward[val] = right
else:
self.forward[val] = self.forward[val] | right
for val in right:
if val not in self.inverse.keys():
self.inverse[val] = left
else:
self.inverse[val] = self.inverse[val] | left
def __iadd__(self, other):
"""object += additional, which combines lists"""
if not isinstance(other, IDStruct):
raise TypeError("other is not of type IDStruct")
for left, right in other:
self.add(left, right)
# retain debug information
self.transfer_debug(left, other)
return self
def __len__(self):
"""Return the number of keys (forward direction)"""
return len(self.forward.keys())
def __str__(self):
"""convert to a string, useful for debugging"""
lst = []
for key in self.forward:
for val in self.forward[key]:
lst.append((key, val))
return str(lst)
@property
def id_lst(self):
"""Build up a list of current ids"""
id_set = set()
for key in self.forward:
for val in self.forward[key]:
id_set.add(val)
return list(id_set)
def lookup(self, left, right):
"""Find if a (left, right) pair is already in the list"""
for val in self.find_left(left):
if right == val:
return True
return False
@staticmethod
def side(_id, where):
"""Find if an _id is a key in where"""
if isinstance(_id, list):
_id = tuple(_id)
return _id in where.keys()
def left(self, key):
"""Determine if the id (left, _) is registered"""
return self.side(key, self.forward)
@staticmethod
def find(where, ids):
"""Find all ids in dictionary where"""
if not ids:
return
if not isinstance(ids, (set, list, tuple)):
ids = [ids]
for key in ids:
if key in where:
for i in where[key]:
yield i
def find_left(self, ids):
"""Find left values given a list of ids"""
return self.find(self.forward, ids)
def right(self, key):
"""Determine if the id (_, right) is registered"""
return self.side(key, self.inverse)
def find_right(self, ids):
"""Find the first id founding by searching the (_, right) identifiers"""
return self.find(self.inverse, ids)
def set_debug(self, left, label, right):
"""Set debug (left, right) debug values for the structure"""
# lowercase left and right keys
if is_str(left):
left = left.lower()
if is_str(right):
right = right.lower()
# remove duplicates in the debug structure
# - duplicates in the structure itself are
# - handled elsewhere
if isinstance(right, list):
right = list(set(right))
# if there is only one element in the list, collapse
if len(right) == 1:
right = right.pop()
# capture the label if it is used
if label:
right = (label, right)
try:
self.debug[left] = self.debug[left] + [right]
except KeyError:
self.debug[left] = [left, right]
def get_debug(self, key):
"""Get debug information for a given key"""
# lowercase key if possible
if is_str(key):
key = key.lower()
# return debug information
if isinstance(key, list):
return "type(list)"
try:
return self.debug[key]
except KeyError:
return "not-available"
def import_debug(self, other):
"""
import debug information the entire IDStruct object
"""
for key in other.debug:
self.transfer_debug(key, other)
def transfer_debug(self, key, other):
"""
transfer debug information for one key in the IDStruct object
"""
# ensure lower case key
if is_str(key):
key = key.lower()
# transfer debug information
self.debug[key] = other.get_debug(key)
class DataTransform(object):
"""DataTransform class. This class is the public interface for
the DataTransform module. Much of the core logic is
in the subclass."""
# pylint: disable=R0902
# Constants
batch_size = 1000
DEFAULT_WEIGHT = 1
default_source = "_id"
debug = False
def __init__(
self,
input_types,
output_types,
id_priority_list=None,
skip_on_failure=False,
skip_w_regex=None,
skip_on_success=False,
idstruct_class=IDStruct,
copy_from_doc=False,
debug=False,
):
# pylint: disable=R0913, W0102
"""
Initialize the keylookup object and precompute paths from the
start key to all target keys.
The decorator is intended to be applied to the load_data function
of an uploader. The load_data function yields documents, which
are then post processed by call and the 'id' key conversion is
performed.
:param G: nx.DiGraph (networkx 2.1) configuration graph
:param collections: list of mongodb collection names
:param input_type: key type to start key lookup from
:param output_types: list of all output types to convert to
:param id_priority_list: A priority list of identifiers to to
sort input and output types by.
:type id_priority_list: list(str)
:param id_struct_class: IDStruct used to manager/fetch IDs from docs
:param copy_from_doc: if transform failed using the graph, try to get
value from the document itself when output_type == input_type.
No check is performed, it's a straight copy. If checks are needed
(eg. check that an ID referenced in the doc actually exists in
another collection, nodes with self-loops can be used, so
ID resolution will be forced to go through these loops to ensure
data exists)
"""
self.input_types = self._parse_input_types(input_types)
self.output_types = self._parse_output_types(output_types)
self.id_priority_list = id_priority_list or []
self.skip_on_failure = skip_on_failure
self.skip_on_success = skip_on_success
if skip_w_regex and not isinstance(skip_w_regex, str):
raise ValueError("skip_w_regex must be a string")
elif not skip_w_regex:
self.skip_w_regex = None
else:
self.skip_w_regex = re.compile(skip_w_regex)
self.idstruct_class = idstruct_class
self.copy_from_doc = copy_from_doc
self.histogram = Histogram()
# Setup logger and logging level
self.logger, _ = get_logger("datatransform")
self.debug = debug
def _parse_input_types(self, input_types):
"""
Parse the input_types argument
:return:
"""
res_input_types = []
if isinstance(input_types, str):
input_types = [input_types]
if isinstance(input_types, list):
for input_type in input_types:
if isinstance(input_type, (tuple, list)):
if not self._valid_input_type(input_type[0]):
raise ValueError("input_type '%s' is not a node in the key_lookup graph" % repr(input_type[0]))
res_input_types.append((input_type[0].lower(), input_type[1]))
elif isinstance(input_type, str):
if not self._valid_input_type(input_type.lower()):
raise ValueError("input_type '%s' is not a node in the key_lookup graph" % repr(input_type))
res_input_types.append((input_type, self.default_source))
else:
raise ValueError("Provided input_types is not of the correct type")
else:
raise ValueError("Provided input_types is not of the correct type")
return res_input_types
def _valid_input_type(self, input_type):
# pylint: disable=W0613, R0201
"""In the base class, all input_types and output_types are valid."""
return True
def _parse_output_types(self, output_types):
"""
Parse through output_types
:param output_types:
:return:
"""
if not isinstance(output_types, list):
raise ValueError("output_types should be of type list")
for output_type in output_types:
if not self._valid_output_type(output_type):
raise ValueError("output_type is not a node in the key_lookup graph")
return output_types
def _valid_output_type(self, output_type):
# pylint: disable=W0613, R0201
"""In the base class, all input_types and output_types are valid."""
return True
def __call__(self, func, debug=None):
"""
Perform the data transformation on all documents on call.
:param func: function to apply to
:param debug: Enable debugging information.
:type debug: bool
:param debug: Enable debugging information. When enabled, debugging information
will be retained in the 'dt_debug' field of each document. This parameter
can be either list of original id's to retain debugging information for or
a True, which will retain debugging information for all documents.
:type debug: bool or list(str)
:return:
"""
# additional handling for the debug option
if not debug:
self.debug = False
elif debug is True:
self.debug = True
self.logger.debug("DataTransform Debug Mode Enabled for all documents.")
elif isinstance(debug, list):
self.logger.debug("DataTransform Debug Mode: {}".format(debug))
self.debug = debug
@wraps(func)
def wrapped_f(*args):
"""This is a wrapped function which will be called by the decorator method."""
input_docs = func(*args)
output_doc_cnt = 0
# split input_docs into chunks of size self.batch_size
for batchiter in iter_n(input_docs, int(self.batch_size / len(self.input_types))):
output_docs = self.key_lookup_batch(batchiter)
for odoc in output_docs:
# print debug information if the original id is the in the debug list
if "dt_debug" in odoc:
if isinstance(self.debug, list) and odoc["dt_debug"]["orig_id"] in self.debug:
self.logger.debug("DataTransform Debug doc['dt_debug']: {}".format(odoc["dt_debug"]))
output_doc_cnt += 1
yield odoc
self.logger.info("wrapped_f Num. output_docs: {}".format(output_doc_cnt))
self.logger.info("DataTransform.histogram: {}".format(self.histogram))
return wrapped_f
def key_lookup_batch(self, batchiter):
"""
Core method for looking up all keys in batch (iterator)
:param batchiter:
:return:
"""
pass
def lookup_one(self, doc):
"""
KeyLookup on document. This method is called as a function call instead of a
decorator on a document iterator.
"""
# special handling for the debug option
self.debug = [doc["_id"]]
output_docs = self.key_lookup_batch([doc])
for odoc in output_docs:
# print debug information if available
if self.debug and "dt_debug" in odoc:
self.logger.debug("DataTransform Debug doc['dt_debug']: {}".format(odoc["dt_debug"]))
yield odoc
self.logger.info("DataTransform.histogram: {}".format(self.histogram))
@staticmethod
def _nested_lookup(doc, field):
"""
Performs a nested lookup of doc using a period (.) delimited
list of fields. This is a nested dictionary lookup.
:param doc: document to perform lookup on
:param field: period delimited list of fields
:return:
"""
value = doc
keys = field.split(".")
try:
for k in keys:
value = value[k]
except KeyError:
return None
return str(value)
@property
def id_priority_list(self):
"""Property method for getting id_priority_list"""
return self._id_priority_list
@id_priority_list.setter
def id_priority_list(self, value):
# pylint: disable=W0201
"""Property method for setting id_priority_list and
sorting input_types and output_types."""
self._id_priority_list = value
self.input_types = self.sort_input_by_priority_list(self.input_types)
self.output_types = self.sort_output_by_priority_list(self.output_types)
def sort_input_by_priority_list(self, input_types):
"""
Reorder the given input_types to follow a priority list. Inputs not in the
priority list should remain in their given order at the end of the list.
"""
# construct temporary id_priority_list with extra elements at the end
id_priority_list = self._expand_priority_order([x[0] for x in input_types])
input_types = sorted(input_types, key=lambda e: self._priority_order(id_priority_list, e[0]))
return input_types
def sort_output_by_priority_list(self, output_types):
"""
Reorder the given output_types to follow a priority list. Outputs not in the
priority list should remain in their given order at the end of the list.
"""
# construct temporary id_priority_list with extra elements at the end
id_priority_list = self._expand_priority_order(output_types)
output_types = sorted(output_types, key=lambda e: self._priority_order(id_priority_list, e))
return output_types
def _expand_priority_order(self, id_list):
"""
Expand the self.id_priority_list to also include elements in id_list that are not
in the priority list. These elements are added to the priority list in the order
that they appear in the id_list.
Example:
> self.id_priority_list = ['a', 'c']
> self._expand_priority_order(['a', 'd', 'e'])
['a', 'c', 'd', 'e']
"""
res = self.id_priority_list.copy()
for key in id_list:
if key not in self.id_priority_list:
res.append(key)
return res
@staticmethod
def _priority_order(id_priority_list, elem):
"""
Determine the priority order of an input_type following a id_priority_list.
This list, first defined in DataTransformMDB is used to reorder the input_types
so that their order matches the id types listed in id_priority_list. If an id
type is not in that list then the input_type will be placed at the end of the list
in arbitrary order.
"""
assert isinstance(id_priority_list, list)
# match id types with id priority
for index, id_elem in enumerate(id_priority_list):
if elem == id_elem:
return index
# the id type is not in id_priority_list so it will be placed last
return len(id_priority_list) + 1
class DataTransformEdge(object):
"""
DataTransformEdge. This class contains information needed to
transform one key to another.
"""
def __init__(self, label=None):
"""
Initialize the class
:param label: A label can be used for debugging purposes.
"""
self.prepared = False
self.label = label
self.init_state()
def edge_lookup(self, keylookup_obj, id_strct, debug=False):
# pylint: disable=E1102, R0201, W0613
"""
virtual method for edge lookup. Each edge class is
responsible for its own lookup procedures given a
keylookup_obj and an id_strct
:param keylookup_obj:
:param id_strct: - list of tuples (orig_id, current_id)
:return:
"""
yield NotImplemented("This method must be overridden by the base class.")
def init_state(self):
"""initialize the state of pickleable objects"""
self._state = {"logger": None}
@property
def logger(self):
"""getter for the logger property"""
if not self._state["logger"]:
self.prepare()
return self._state["logger"]
@logger.setter
def logger(self, value):
"""setter for the logger variable"""
self._state["logger"] = value
def setup_log(self):
"""setup the logger member variable"""
self.logger, _ = get_logger("datatransform")
def prepare(self, state=None):
# pylint: disable=W0102
"""Prepare class state objects (pickleable objects)"""
state = state or {}
if self.prepared:
return
if state:
# let's be explicit, _state takes what it wants
for k in self._state:
self._state[k] = state[k]
return
self.setup_log()
def unprepare(self):
"""
reset anything that's not picklable (so self can be pickled)
return what's been reset as a dict, so self can be restored
once pickled
"""
state = {
"logger": self._state["logger"],
}
for k in state:
self._state[k] = None
self.prepared = False
return state
class RegExEdge(DataTransformEdge):
"""
The RegExEdge allows an identifier to be transformed using a
regular expression. POSIX regular expressions are supported.
"""
def __init__(self, from_regex, to_regex, weight=1, label=None):
"""
:param from_regex: The first parameter of the regular expression substitution.
:type from_regex: str
:param to_regex: The second parameter of the regular expression substitution.
:type to_regex: str
:param weight: Weights are used to prefer one path over another. The path
with the lowest weight is preferred. The default weight is 1.
:type weight: int
"""
super(RegExEdge, self).__init__(label)
self.from_regex = from_regex
self.to_regex = to_regex
self.weight = weight
def edge_lookup(self, keylookup_obj, id_strct, debug=False):
"""
Transform identifiers using a regular expression substitution.
"""
res_id_strct = IDStruct()
for left, right in id_strct:
res_id_strct.add(left, re.sub(self.from_regex, self.to_regex, right))
return res_id_strct
def nested_lookup(doc, field):
"""
Performs a nested lookup of doc using a period (.) delimited
list of fields. This is a nested dictionary lookup.
:param doc: document to perform lookup on
:param field: period delimited list of fields
:return:
"""
value = doc
keys = field.split(".")
try:
for k in keys:
if isinstance(value, (list, tuple)):
# assuming we have a list of dict with k as one of the keys
stype = set([type(e) for e in value])
if not stype:
return None
assert len(stype) == 1 and stype == {dict}, "Expecting a list of dict, found types: %s" % stype
value = [e[k] for e in value if e.get(k)]
# can't go further ?
return value
else:
value = value[k]
except KeyError:
return None
return value
| """initialze _id_tuple_lst"""
for doc in doc_lst:
value = nested_lookup(doc, field)
if value:
self.add(value, value) | identifier_body |
macos.rs | #![cfg(target_os = "macos")]
use super::*;
use std::ffi::{OsStr, OsString};
use std::os::unix::ffi::{OsStrExt, OsStringExt};
impl From<u32> for LocalProcessStatus {
fn from(s: u32) -> Self {
match s {
1 => Self::Idle,
2 => Self::Run,
3 => Self::Sleep,
4 => Self::Stop,
5 => Self::Zombie,
_ => Self::Unknown,
}
}
}
impl LocalProcessInfo {
pub fn current_working_dir(pid: u32) -> Option<PathBuf> {
let mut pathinfo: libc::proc_vnodepathinfo = unsafe { std::mem::zeroed() };
let size = std::mem::size_of_val(&pathinfo) as libc::c_int;
let ret = unsafe {
libc::proc_pidinfo(
pid as _,
libc::PROC_PIDVNODEPATHINFO,
0,
&mut pathinfo as *mut _ as *mut _,
size,
)
};
if ret != size {
return None;
}
// Workaround a workaround for an old rustc version supported by libc;
// the type of vip_path should just be [c_char; MAXPATHLEN] but it
// is defined as a horrible nested array by the libc crate:
// `[[c_char; 32]; 32]`.
// Urgh. Let's re-cast it as the correct kind of slice.
let vip_path = unsafe {
std::slice::from_raw_parts(
pathinfo.pvi_cdir.vip_path.as_ptr() as *const u8,
libc::MAXPATHLEN as usize,
)
};
let nul = vip_path.iter().position(|&c| c == 0)?;
Some(OsStr::from_bytes(&vip_path[0..nul]).into())
}
pub fn executable_path(pid: u32) -> Option<PathBuf> {
let mut buffer: Vec<u8> = Vec::with_capacity(libc::PROC_PIDPATHINFO_MAXSIZE as _);
let x = unsafe {
libc::proc_pidpath(
pid as _,
buffer.as_mut_ptr() as *mut _,
libc::PROC_PIDPATHINFO_MAXSIZE as _,
)
};
if x <= 0 {
return None;
}
unsafe { buffer.set_len(x as usize) };
Some(OsString::from_vec(buffer).into())
}
pub fn with_root_pid(pid: u32) -> Option<Self> {
/// Enumerate all current process identifiers
fn all_pids() -> Vec<libc::pid_t> {
let num_pids = unsafe { libc::proc_listallpids(std::ptr::null_mut(), 0) };
if num_pids < 1 {
return vec![];
}
// Give a bit of padding to avoid looping if processes are spawning
// rapidly while we're trying to collect this info
const PADDING: usize = 32;
let mut pids: Vec<libc::pid_t> = Vec::with_capacity(num_pids as usize + PADDING);
loop {
let n = unsafe {
libc::proc_listallpids(
pids.as_mut_ptr() as *mut _,
(pids.capacity() * std::mem::size_of::<libc::pid_t>()) as _,
)
};
if n < 1 {
return vec![];
}
let n = n as usize;
if n > pids.capacity() {
pids.reserve(n + PADDING);
continue;
}
unsafe { pids.set_len(n) };
return pids;
}
}
/// Obtain info block for a pid.
/// Note that the process could have gone away since we first
/// observed the pid and the time we call this, so we must
/// be able to tolerate this failing.
fn info_for_pid(pid: libc::pid_t) -> Option<libc::proc_bsdinfo> {
let mut info: libc::proc_bsdinfo = unsafe { std::mem::zeroed() };
let wanted_size = std::mem::size_of::<libc::proc_bsdinfo>() as _;
let res = unsafe {
libc::proc_pidinfo(
pid,
libc::PROC_PIDTBSDINFO,
0,
&mut info as *mut _ as *mut _,
wanted_size,
)
};
if res == wanted_size {
Some(info)
} else {
None
}
}
fn cwd_for_pid(pid: libc::pid_t) -> PathBuf {
LocalProcessInfo::current_working_dir(pid as _).unwrap_or_else(PathBuf::new)
}
fn exe_and_args_for_pid_sysctl(pid: libc::pid_t) -> Option<(PathBuf, Vec<String>)> {
use libc::c_int;
let mut size = 64 * 1024;
let mut buf: Vec<u8> = Vec::with_capacity(size);
let mut mib = [libc::CTL_KERN, libc::KERN_PROCARGS2, pid as c_int];
let res = unsafe {
libc::sysctl(
mib.as_mut_ptr(),
mib.len() as _,
buf.as_mut_ptr() as *mut _,
&mut size,
std::ptr::null_mut(),
0,
)
};
if res == -1 {
return None;
}
if size < (std::mem::size_of::<c_int>() * 2) {
// Not big enough
return None;
}
unsafe { buf.set_len(size) };
parse_exe_and_argv_sysctl(buf)
}
fn exe_for_pid(pid: libc::pid_t) -> PathBuf {
LocalProcessInfo::executable_path(pid as _).unwrap_or_else(PathBuf::new)
}
let procs: Vec<_> = all_pids().into_iter().filter_map(info_for_pid).collect();
fn build_proc(info: &libc::proc_bsdinfo, procs: &[libc::proc_bsdinfo]) -> LocalProcessInfo {
let mut children = HashMap::new();
for kid in procs {
if kid.pbi_ppid == info.pbi_pid {
children.insert(kid.pbi_pid, build_proc(kid, procs));
}
}
let (executable, argv) = exe_and_args_for_pid_sysctl(info.pbi_pid as _)
.unwrap_or_else(|| (exe_for_pid(info.pbi_pid as _), vec![]));
let name = unsafe { std::ffi::CStr::from_ptr(info.pbi_comm.as_ptr() as _) };
let name = name.to_str().unwrap_or("").to_string();
LocalProcessInfo {
pid: info.pbi_pid,
ppid: info.pbi_ppid,
name,
executable,
cwd: cwd_for_pid(info.pbi_pid as _),
argv,
start_time: info.pbi_start_tvsec,
status: LocalProcessStatus::from(info.pbi_status),
children,
}
}
if let Some(info) = procs.iter().find(|info| info.pbi_pid == pid) {
Some(build_proc(info, &procs))
} else {
None
}
}
}
fn parse_exe_and_argv_sysctl(buf: Vec<u8>) -> Option<(PathBuf, Vec<String>)> |
#[cfg(test)]
mod tests {
use std::path::Path;
use super::parse_exe_and_argv_sysctl;
#[test]
fn test_trailing_zeros() {
// Example data generated from running 'sleep 5' on the commit author's local machine,
let buf = vec![
2, 0, 0, 0, 47, 98, 105, 110, 47, 115, 108, 101, 101, 112, 0, 0, 0, 0, 0, 0, 115, 108,
101, 101, 112, 0, 53, 0,
];
let (exe_path, argv) = parse_exe_and_argv_sysctl(buf).unwrap();
assert_eq!(exe_path, Path::new("/bin/sleep").to_path_buf());
assert_eq!(argv, vec!["sleep".to_string(), "5".to_string()]);
}
#[test]
fn test_no_trailing_zeros() {
// Example data generated from running 'sleep 5' on the commit author's local machine,
// then modified to remove the trailing 0s between the exe_path and the argv
let buf = vec![
2, 0, 0, 0, 47, 98, 105, 110, 47, 115, 108, 101, 101, 112, 0, 115, 108, 101, 101, 112,
0, 53, 0,
];
let (exe_path, argv) = parse_exe_and_argv_sysctl(buf).unwrap();
assert_eq!(exe_path, Path::new("/bin/sleep").to_path_buf());
assert_eq!(argv, vec!["sleep".to_string(), "5".to_string()]);
}
#[test]
fn test_multiple_trailing_zeros() {
// Example data generated from running 'sleep 5' on the commit author's local machine,
// then modified to add trailing 0s between argv items
let buf = vec![
2, 0, 0, 0, 47, 98, 105, 110, 47, 115, 108, 101, 101, 112, 0, 0, 0, 115, 108, 101, 101,
112, 0, 0, 0, 53, 0,
];
let (exe_path, argv) = parse_exe_and_argv_sysctl(buf).unwrap();
assert_eq!(exe_path, Path::new("/bin/sleep").to_path_buf());
assert_eq!(argv, vec!["sleep".to_string(), "5".to_string()]);
}
#[test]
fn test_trailing_zeros_at_end() {
// Example data generated from running 'sleep 5' on the commit author's local machine,
// then modified to add zeroes to the end of the buffer
let buf = vec![
2, 0, 0, 0, 47, 98, 105, 110, 47, 115, 108, 101, 101, 112, 0, 0, 0, 115, 108, 101, 101,
112, 0, 0, 0, 53, 0, 0, 0, 0, 0,
];
let (exe_path, argv) = parse_exe_and_argv_sysctl(buf).unwrap();
assert_eq!(exe_path, Path::new("/bin/sleep").to_path_buf());
assert_eq!(argv, vec!["sleep".to_string(), "5".to_string()]);
}
#[test]
fn test_malformed() {
// Example data generated from running 'sleep 5' on the commit author's local machine,
// then modified to remove the last 0, making a malformed null-terminated string
let buf = vec![
2, 0, 0, 0, 47, 98, 105, 110, 47, 115, 108, 101, 101, 112, 0, 0, 0, 115, 108, 101, 101,
112, 0, 0, 0, 53,
];
assert!(parse_exe_and_argv_sysctl(buf).is_none());
}
}
| {
use libc::c_int;
// The data in our buffer is laid out like this:
// argc - c_int
// exe_path - NUL terminated string
// argv[0] - NUL terminated string
// argv[1] - NUL terminated string
// ...
// argv[n] - NUL terminated string
// envp[0] - NUL terminated string
// ...
let mut ptr = &buf[0..buf.len()];
let argc: c_int = unsafe { std::ptr::read(ptr.as_ptr() as *const c_int) };
ptr = &ptr[std::mem::size_of::<c_int>()..];
fn consume_cstr(ptr: &mut &[u8]) -> Option<String> {
// Parse to the end of a null terminated string
let nul = ptr.iter().position(|&c| c == 0)?;
let s = String::from_utf8_lossy(&ptr[0..nul]).to_owned().to_string();
*ptr = ptr.get(nul + 1..)?;
// Find the position of the first non null byte. `.position()`
// will return None if we run off the end.
if let Some(not_nul) = ptr.iter().position(|&c| c != 0) {
// If there are no trailing nulls, not_nul will be 0
// and this call will be a noop
*ptr = ptr.get(not_nul..)?;
}
Some(s)
}
let exe_path = consume_cstr(&mut ptr)?.into();
let mut args = vec![];
for _ in 0..argc {
args.push(consume_cstr(&mut ptr)?);
}
Some((exe_path, args))
} | identifier_body |
macos.rs | #![cfg(target_os = "macos")]
use super::*;
use std::ffi::{OsStr, OsString};
use std::os::unix::ffi::{OsStrExt, OsStringExt};
impl From<u32> for LocalProcessStatus {
fn from(s: u32) -> Self {
match s {
1 => Self::Idle,
2 => Self::Run,
3 => Self::Sleep,
4 => Self::Stop,
5 => Self::Zombie,
_ => Self::Unknown,
}
}
}
impl LocalProcessInfo {
pub fn current_working_dir(pid: u32) -> Option<PathBuf> {
let mut pathinfo: libc::proc_vnodepathinfo = unsafe { std::mem::zeroed() };
let size = std::mem::size_of_val(&pathinfo) as libc::c_int;
let ret = unsafe {
libc::proc_pidinfo(
pid as _,
libc::PROC_PIDVNODEPATHINFO,
0,
&mut pathinfo as *mut _ as *mut _,
size,
)
};
if ret != size {
return None;
}
// Workaround a workaround for an old rustc version supported by libc;
// the type of vip_path should just be [c_char; MAXPATHLEN] but it
// is defined as a horrible nested array by the libc crate:
// `[[c_char; 32]; 32]`.
// Urgh. Let's re-cast it as the correct kind of slice.
let vip_path = unsafe {
std::slice::from_raw_parts(
pathinfo.pvi_cdir.vip_path.as_ptr() as *const u8,
libc::MAXPATHLEN as usize,
)
};
let nul = vip_path.iter().position(|&c| c == 0)?;
Some(OsStr::from_bytes(&vip_path[0..nul]).into())
}
pub fn executable_path(pid: u32) -> Option<PathBuf> {
let mut buffer: Vec<u8> = Vec::with_capacity(libc::PROC_PIDPATHINFO_MAXSIZE as _);
let x = unsafe {
libc::proc_pidpath(
pid as _,
buffer.as_mut_ptr() as *mut _,
libc::PROC_PIDPATHINFO_MAXSIZE as _,
)
};
if x <= 0 {
return None;
}
unsafe { buffer.set_len(x as usize) };
Some(OsString::from_vec(buffer).into())
}
pub fn with_root_pid(pid: u32) -> Option<Self> {
/// Enumerate all current process identifiers
fn all_pids() -> Vec<libc::pid_t> {
let num_pids = unsafe { libc::proc_listallpids(std::ptr::null_mut(), 0) };
if num_pids < 1 {
return vec![];
}
// Give a bit of padding to avoid looping if processes are spawning
// rapidly while we're trying to collect this info
const PADDING: usize = 32;
let mut pids: Vec<libc::pid_t> = Vec::with_capacity(num_pids as usize + PADDING);
loop {
let n = unsafe {
libc::proc_listallpids(
pids.as_mut_ptr() as *mut _,
(pids.capacity() * std::mem::size_of::<libc::pid_t>()) as _,
)
};
if n < 1 {
return vec![];
}
let n = n as usize;
if n > pids.capacity() {
pids.reserve(n + PADDING);
continue;
}
unsafe { pids.set_len(n) };
return pids;
}
}
/// Obtain info block for a pid.
/// Note that the process could have gone away since we first
/// observed the pid and the time we call this, so we must
/// be able to tolerate this failing.
fn info_for_pid(pid: libc::pid_t) -> Option<libc::proc_bsdinfo> {
let mut info: libc::proc_bsdinfo = unsafe { std::mem::zeroed() };
let wanted_size = std::mem::size_of::<libc::proc_bsdinfo>() as _;
let res = unsafe {
libc::proc_pidinfo(
pid,
libc::PROC_PIDTBSDINFO,
0,
&mut info as *mut _ as *mut _,
wanted_size,
)
};
if res == wanted_size {
Some(info)
} else {
None
}
}
fn cwd_for_pid(pid: libc::pid_t) -> PathBuf {
LocalProcessInfo::current_working_dir(pid as _).unwrap_or_else(PathBuf::new)
}
fn exe_and_args_for_pid_sysctl(pid: libc::pid_t) -> Option<(PathBuf, Vec<String>)> {
use libc::c_int;
let mut size = 64 * 1024;
let mut buf: Vec<u8> = Vec::with_capacity(size);
let mut mib = [libc::CTL_KERN, libc::KERN_PROCARGS2, pid as c_int];
let res = unsafe {
libc::sysctl(
mib.as_mut_ptr(),
mib.len() as _,
buf.as_mut_ptr() as *mut _,
&mut size,
std::ptr::null_mut(),
0,
)
};
if res == -1 {
return None;
}
if size < (std::mem::size_of::<c_int>() * 2) {
// Not big enough
return None;
}
unsafe { buf.set_len(size) };
| }
fn exe_for_pid(pid: libc::pid_t) -> PathBuf {
LocalProcessInfo::executable_path(pid as _).unwrap_or_else(PathBuf::new)
}
let procs: Vec<_> = all_pids().into_iter().filter_map(info_for_pid).collect();
fn build_proc(info: &libc::proc_bsdinfo, procs: &[libc::proc_bsdinfo]) -> LocalProcessInfo {
let mut children = HashMap::new();
for kid in procs {
if kid.pbi_ppid == info.pbi_pid {
children.insert(kid.pbi_pid, build_proc(kid, procs));
}
}
let (executable, argv) = exe_and_args_for_pid_sysctl(info.pbi_pid as _)
.unwrap_or_else(|| (exe_for_pid(info.pbi_pid as _), vec![]));
let name = unsafe { std::ffi::CStr::from_ptr(info.pbi_comm.as_ptr() as _) };
let name = name.to_str().unwrap_or("").to_string();
LocalProcessInfo {
pid: info.pbi_pid,
ppid: info.pbi_ppid,
name,
executable,
cwd: cwd_for_pid(info.pbi_pid as _),
argv,
start_time: info.pbi_start_tvsec,
status: LocalProcessStatus::from(info.pbi_status),
children,
}
}
if let Some(info) = procs.iter().find(|info| info.pbi_pid == pid) {
Some(build_proc(info, &procs))
} else {
None
}
}
}
fn parse_exe_and_argv_sysctl(buf: Vec<u8>) -> Option<(PathBuf, Vec<String>)> {
use libc::c_int;
// The data in our buffer is laid out like this:
// argc - c_int
// exe_path - NUL terminated string
// argv[0] - NUL terminated string
// argv[1] - NUL terminated string
// ...
// argv[n] - NUL terminated string
// envp[0] - NUL terminated string
// ...
let mut ptr = &buf[0..buf.len()];
let argc: c_int = unsafe { std::ptr::read(ptr.as_ptr() as *const c_int) };
ptr = &ptr[std::mem::size_of::<c_int>()..];
fn consume_cstr(ptr: &mut &[u8]) -> Option<String> {
// Parse to the end of a null terminated string
let nul = ptr.iter().position(|&c| c == 0)?;
let s = String::from_utf8_lossy(&ptr[0..nul]).to_owned().to_string();
*ptr = ptr.get(nul + 1..)?;
// Find the position of the first non null byte. `.position()`
// will return None if we run off the end.
if let Some(not_nul) = ptr.iter().position(|&c| c != 0) {
// If there are no trailing nulls, not_nul will be 0
// and this call will be a noop
*ptr = ptr.get(not_nul..)?;
}
Some(s)
}
let exe_path = consume_cstr(&mut ptr)?.into();
let mut args = vec![];
for _ in 0..argc {
args.push(consume_cstr(&mut ptr)?);
}
Some((exe_path, args))
}
#[cfg(test)]
mod tests {
use std::path::Path;
use super::parse_exe_and_argv_sysctl;
#[test]
fn test_trailing_zeros() {
// Example data generated from running 'sleep 5' on the commit author's local machine,
let buf = vec![
2, 0, 0, 0, 47, 98, 105, 110, 47, 115, 108, 101, 101, 112, 0, 0, 0, 0, 0, 0, 115, 108,
101, 101, 112, 0, 53, 0,
];
let (exe_path, argv) = parse_exe_and_argv_sysctl(buf).unwrap();
assert_eq!(exe_path, Path::new("/bin/sleep").to_path_buf());
assert_eq!(argv, vec!["sleep".to_string(), "5".to_string()]);
}
#[test]
fn test_no_trailing_zeros() {
// Example data generated from running 'sleep 5' on the commit author's local machine,
// then modified to remove the trailing 0s between the exe_path and the argv
let buf = vec![
2, 0, 0, 0, 47, 98, 105, 110, 47, 115, 108, 101, 101, 112, 0, 115, 108, 101, 101, 112,
0, 53, 0,
];
let (exe_path, argv) = parse_exe_and_argv_sysctl(buf).unwrap();
assert_eq!(exe_path, Path::new("/bin/sleep").to_path_buf());
assert_eq!(argv, vec!["sleep".to_string(), "5".to_string()]);
}
#[test]
fn test_multiple_trailing_zeros() {
// Example data generated from running 'sleep 5' on the commit author's local machine,
// then modified to add trailing 0s between argv items
let buf = vec![
2, 0, 0, 0, 47, 98, 105, 110, 47, 115, 108, 101, 101, 112, 0, 0, 0, 115, 108, 101, 101,
112, 0, 0, 0, 53, 0,
];
let (exe_path, argv) = parse_exe_and_argv_sysctl(buf).unwrap();
assert_eq!(exe_path, Path::new("/bin/sleep").to_path_buf());
assert_eq!(argv, vec!["sleep".to_string(), "5".to_string()]);
}
#[test]
fn test_trailing_zeros_at_end() {
// Example data generated from running 'sleep 5' on the commit author's local machine,
// then modified to add zeroes to the end of the buffer
let buf = vec![
2, 0, 0, 0, 47, 98, 105, 110, 47, 115, 108, 101, 101, 112, 0, 0, 0, 115, 108, 101, 101,
112, 0, 0, 0, 53, 0, 0, 0, 0, 0,
];
let (exe_path, argv) = parse_exe_and_argv_sysctl(buf).unwrap();
assert_eq!(exe_path, Path::new("/bin/sleep").to_path_buf());
assert_eq!(argv, vec!["sleep".to_string(), "5".to_string()]);
}
#[test]
fn test_malformed() {
// Example data generated from running 'sleep 5' on the commit author's local machine,
// then modified to remove the last 0, making a malformed null-terminated string
let buf = vec![
2, 0, 0, 0, 47, 98, 105, 110, 47, 115, 108, 101, 101, 112, 0, 0, 0, 115, 108, 101, 101,
112, 0, 0, 0, 53,
];
assert!(parse_exe_and_argv_sysctl(buf).is_none());
}
} | parse_exe_and_argv_sysctl(buf) | random_line_split |
macos.rs | #![cfg(target_os = "macos")]
use super::*;
use std::ffi::{OsStr, OsString};
use std::os::unix::ffi::{OsStrExt, OsStringExt};
impl From<u32> for LocalProcessStatus {
fn from(s: u32) -> Self {
match s {
1 => Self::Idle,
2 => Self::Run,
3 => Self::Sleep,
4 => Self::Stop,
5 => Self::Zombie,
_ => Self::Unknown,
}
}
}
impl LocalProcessInfo {
pub fn current_working_dir(pid: u32) -> Option<PathBuf> {
let mut pathinfo: libc::proc_vnodepathinfo = unsafe { std::mem::zeroed() };
let size = std::mem::size_of_val(&pathinfo) as libc::c_int;
let ret = unsafe {
libc::proc_pidinfo(
pid as _,
libc::PROC_PIDVNODEPATHINFO,
0,
&mut pathinfo as *mut _ as *mut _,
size,
)
};
if ret != size {
return None;
}
// Workaround a workaround for an old rustc version supported by libc;
// the type of vip_path should just be [c_char; MAXPATHLEN] but it
// is defined as a horrible nested array by the libc crate:
// `[[c_char; 32]; 32]`.
// Urgh. Let's re-cast it as the correct kind of slice.
let vip_path = unsafe {
std::slice::from_raw_parts(
pathinfo.pvi_cdir.vip_path.as_ptr() as *const u8,
libc::MAXPATHLEN as usize,
)
};
let nul = vip_path.iter().position(|&c| c == 0)?;
Some(OsStr::from_bytes(&vip_path[0..nul]).into())
}
pub fn executable_path(pid: u32) -> Option<PathBuf> {
let mut buffer: Vec<u8> = Vec::with_capacity(libc::PROC_PIDPATHINFO_MAXSIZE as _);
let x = unsafe {
libc::proc_pidpath(
pid as _,
buffer.as_mut_ptr() as *mut _,
libc::PROC_PIDPATHINFO_MAXSIZE as _,
)
};
if x <= 0 {
return None;
}
unsafe { buffer.set_len(x as usize) };
Some(OsString::from_vec(buffer).into())
}
pub fn with_root_pid(pid: u32) -> Option<Self> {
/// Enumerate all current process identifiers
fn all_pids() -> Vec<libc::pid_t> {
let num_pids = unsafe { libc::proc_listallpids(std::ptr::null_mut(), 0) };
if num_pids < 1 {
return vec![];
}
// Give a bit of padding to avoid looping if processes are spawning
// rapidly while we're trying to collect this info
const PADDING: usize = 32;
let mut pids: Vec<libc::pid_t> = Vec::with_capacity(num_pids as usize + PADDING);
loop {
let n = unsafe {
libc::proc_listallpids(
pids.as_mut_ptr() as *mut _,
(pids.capacity() * std::mem::size_of::<libc::pid_t>()) as _,
)
};
if n < 1 {
return vec![];
}
let n = n as usize;
if n > pids.capacity() {
pids.reserve(n + PADDING);
continue;
}
unsafe { pids.set_len(n) };
return pids;
}
}
/// Obtain info block for a pid.
/// Note that the process could have gone away since we first
/// observed the pid and the time we call this, so we must
/// be able to tolerate this failing.
fn info_for_pid(pid: libc::pid_t) -> Option<libc::proc_bsdinfo> {
let mut info: libc::proc_bsdinfo = unsafe { std::mem::zeroed() };
let wanted_size = std::mem::size_of::<libc::proc_bsdinfo>() as _;
let res = unsafe {
libc::proc_pidinfo(
pid,
libc::PROC_PIDTBSDINFO,
0,
&mut info as *mut _ as *mut _,
wanted_size,
)
};
if res == wanted_size {
Some(info)
} else {
None
}
}
fn cwd_for_pid(pid: libc::pid_t) -> PathBuf {
LocalProcessInfo::current_working_dir(pid as _).unwrap_or_else(PathBuf::new)
}
fn exe_and_args_for_pid_sysctl(pid: libc::pid_t) -> Option<(PathBuf, Vec<String>)> {
use libc::c_int;
let mut size = 64 * 1024;
let mut buf: Vec<u8> = Vec::with_capacity(size);
let mut mib = [libc::CTL_KERN, libc::KERN_PROCARGS2, pid as c_int];
let res = unsafe {
libc::sysctl(
mib.as_mut_ptr(),
mib.len() as _,
buf.as_mut_ptr() as *mut _,
&mut size,
std::ptr::null_mut(),
0,
)
};
if res == -1 {
return None;
}
if size < (std::mem::size_of::<c_int>() * 2) {
// Not big enough
return None;
}
unsafe { buf.set_len(size) };
parse_exe_and_argv_sysctl(buf)
}
fn exe_for_pid(pid: libc::pid_t) -> PathBuf {
LocalProcessInfo::executable_path(pid as _).unwrap_or_else(PathBuf::new)
}
let procs: Vec<_> = all_pids().into_iter().filter_map(info_for_pid).collect();
fn build_proc(info: &libc::proc_bsdinfo, procs: &[libc::proc_bsdinfo]) -> LocalProcessInfo {
let mut children = HashMap::new();
for kid in procs {
if kid.pbi_ppid == info.pbi_pid {
children.insert(kid.pbi_pid, build_proc(kid, procs));
}
}
let (executable, argv) = exe_and_args_for_pid_sysctl(info.pbi_pid as _)
.unwrap_or_else(|| (exe_for_pid(info.pbi_pid as _), vec![]));
let name = unsafe { std::ffi::CStr::from_ptr(info.pbi_comm.as_ptr() as _) };
let name = name.to_str().unwrap_or("").to_string();
LocalProcessInfo {
pid: info.pbi_pid,
ppid: info.pbi_ppid,
name,
executable,
cwd: cwd_for_pid(info.pbi_pid as _),
argv,
start_time: info.pbi_start_tvsec,
status: LocalProcessStatus::from(info.pbi_status),
children,
}
}
if let Some(info) = procs.iter().find(|info| info.pbi_pid == pid) {
Some(build_proc(info, &procs))
} else {
None
}
}
}
fn parse_exe_and_argv_sysctl(buf: Vec<u8>) -> Option<(PathBuf, Vec<String>)> {
use libc::c_int;
// The data in our buffer is laid out like this:
// argc - c_int
// exe_path - NUL terminated string
// argv[0] - NUL terminated string
// argv[1] - NUL terminated string
// ...
// argv[n] - NUL terminated string
// envp[0] - NUL terminated string
// ...
let mut ptr = &buf[0..buf.len()];
let argc: c_int = unsafe { std::ptr::read(ptr.as_ptr() as *const c_int) };
ptr = &ptr[std::mem::size_of::<c_int>()..];
fn | (ptr: &mut &[u8]) -> Option<String> {
// Parse to the end of a null terminated string
let nul = ptr.iter().position(|&c| c == 0)?;
let s = String::from_utf8_lossy(&ptr[0..nul]).to_owned().to_string();
*ptr = ptr.get(nul + 1..)?;
// Find the position of the first non null byte. `.position()`
// will return None if we run off the end.
if let Some(not_nul) = ptr.iter().position(|&c| c != 0) {
// If there are no trailing nulls, not_nul will be 0
// and this call will be a noop
*ptr = ptr.get(not_nul..)?;
}
Some(s)
}
let exe_path = consume_cstr(&mut ptr)?.into();
let mut args = vec![];
for _ in 0..argc {
args.push(consume_cstr(&mut ptr)?);
}
Some((exe_path, args))
}
#[cfg(test)]
mod tests {
use std::path::Path;
use super::parse_exe_and_argv_sysctl;
#[test]
fn test_trailing_zeros() {
// Example data generated from running 'sleep 5' on the commit author's local machine,
let buf = vec![
2, 0, 0, 0, 47, 98, 105, 110, 47, 115, 108, 101, 101, 112, 0, 0, 0, 0, 0, 0, 115, 108,
101, 101, 112, 0, 53, 0,
];
let (exe_path, argv) = parse_exe_and_argv_sysctl(buf).unwrap();
assert_eq!(exe_path, Path::new("/bin/sleep").to_path_buf());
assert_eq!(argv, vec!["sleep".to_string(), "5".to_string()]);
}
#[test]
fn test_no_trailing_zeros() {
// Example data generated from running 'sleep 5' on the commit author's local machine,
// then modified to remove the trailing 0s between the exe_path and the argv
let buf = vec![
2, 0, 0, 0, 47, 98, 105, 110, 47, 115, 108, 101, 101, 112, 0, 115, 108, 101, 101, 112,
0, 53, 0,
];
let (exe_path, argv) = parse_exe_and_argv_sysctl(buf).unwrap();
assert_eq!(exe_path, Path::new("/bin/sleep").to_path_buf());
assert_eq!(argv, vec!["sleep".to_string(), "5".to_string()]);
}
#[test]
fn test_multiple_trailing_zeros() {
// Example data generated from running 'sleep 5' on the commit author's local machine,
// then modified to add trailing 0s between argv items
let buf = vec![
2, 0, 0, 0, 47, 98, 105, 110, 47, 115, 108, 101, 101, 112, 0, 0, 0, 115, 108, 101, 101,
112, 0, 0, 0, 53, 0,
];
let (exe_path, argv) = parse_exe_and_argv_sysctl(buf).unwrap();
assert_eq!(exe_path, Path::new("/bin/sleep").to_path_buf());
assert_eq!(argv, vec!["sleep".to_string(), "5".to_string()]);
}
#[test]
fn test_trailing_zeros_at_end() {
// Example data generated from running 'sleep 5' on the commit author's local machine,
// then modified to add zeroes to the end of the buffer
let buf = vec![
2, 0, 0, 0, 47, 98, 105, 110, 47, 115, 108, 101, 101, 112, 0, 0, 0, 115, 108, 101, 101,
112, 0, 0, 0, 53, 0, 0, 0, 0, 0,
];
let (exe_path, argv) = parse_exe_and_argv_sysctl(buf).unwrap();
assert_eq!(exe_path, Path::new("/bin/sleep").to_path_buf());
assert_eq!(argv, vec!["sleep".to_string(), "5".to_string()]);
}
#[test]
fn test_malformed() {
// Example data generated from running 'sleep 5' on the commit author's local machine,
// then modified to remove the last 0, making a malformed null-terminated string
let buf = vec![
2, 0, 0, 0, 47, 98, 105, 110, 47, 115, 108, 101, 101, 112, 0, 0, 0, 115, 108, 101, 101,
112, 0, 0, 0, 53,
];
assert!(parse_exe_and_argv_sysctl(buf).is_none());
}
}
| consume_cstr | identifier_name |
wallet.rs | //! The wallet module.
//!
//! Since this wallet implementation is supposed to work on top of both a
//! Bitcoin Core node as an Elements or Liquid node, we avoid using the
//! specialized bitcoincore-rpc and liquid-rpc client interfaces, but use
//! general call methods so we can leverage the common parts of the raw
//! responses. This might make the code a but harder to read or error-prone
//! but it avoids having very big code duplication.
//!
#![allow(clippy::redundant_field_names)]
use hex;
use std::collections::HashMap;
use std::str::FromStr;
use std::time::{Duration, Instant};
use std::{cell, fmt};
use bitcoin::{util::bip32, Address, Network as BNetwork};
use bitcoin_hashes::hex::{FromHex, ToHex};
use bitcoin_hashes::sha256d;
use bitcoincore_rpc::{json as rpcjson, Client as RpcClient, RpcApi};
use serde_json::Value;
#[cfg(feature = "liquid")]
use elements;
use crate::coins;
use crate::constants::{SAT_PER_BIT, SAT_PER_BTC, SAT_PER_MBTC};
use crate::errors::{Error, OptionExt};
use crate::network::{Network, NetworkId};
use crate::util::{btc_to_isat, btc_to_usat, extend, f64_from_val, fmt_time, SECP};
use crate::wally;
const PER_PAGE: usize = 30;
const FEE_ESTIMATES_TTL: Duration = Duration::from_secs(240);
/// Meta-information about an address that we need to store.
///
/// Since we don't have a persistent database, we use the Core wallet to store
/// the information required for operating GDK. For addresses, it's important
/// to keep the information needed to re-derive the private key: an identifier
/// of the master private key (i.e. the fingerprint) and the derivation path.
///
/// GDK also allows storing memos on transaction. Because Core doesn't support
/// transaction labels but only address labels, we inject the tx memos inside
/// the address label of an (preferably the first) address used in that
/// transaction.
///
/// This struct is used to structure the data stored in a label. It is
/// serialized as JSON when stored in a label, so that new fields can easily
/// be added.
#[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)]
pub(crate) struct AddressMeta {
/// The fingerprint of the extended private key used to derive the
/// private key for this address.
#[serde(rename = "fp", skip_serializing_if = "Option::is_none")]
pub fingerprint: Option<bip32::Fingerprint>,
/// The derivation path from the extended private key identified
/// by the fingerprint field.
#[serde(skip_serializing_if = "Option::is_none")]
pub child: Option<bip32::ChildNumber>,
/// Since an address can be used in multiple transactions, we keep a map
/// from the txid to the memo for the transaction.
#[serde(default, skip_serializing_if = "HashMap::is_empty")]
pub txmemo: HashMap<sha256d::Hash, String>,
}
impl AddressMeta {
/// Parse a label from Core.
pub fn from_label<S: AsRef<str>>(l: Option<S>) -> Result<AddressMeta, Error> {
match l {
Some(ref s) if s.as_ref().is_empty() => Ok(Default::default()),
Some(s) => Ok(serde_json::from_str(s.as_ref())?),
None => Ok(Default::default()),
}
}
/// Serialize to string to save into a label.
pub fn to_label(&self) -> Result<String, Error> {
Ok(serde_json::to_string(self)?)
}
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
struct PersistentWalletState {
#[serde(rename = "nec")]
next_external_child: bip32::ChildNumber,
#[serde(rename = "nic")]
next_internal_child: bip32::ChildNumber,
}
pub struct Wallet {
network: &'static Network,
rpc: RpcClient,
mnemonic: String,
// For the BIP32 keys, the network variable should be ignored and not used.
/// The BIP32 master extended private key.
master_xpriv: bip32::ExtendedPrivKey,
/// The BIP32 extended private key for external addresses.
external_xpriv: bip32::ExtendedPrivKey,
/// The BIP32 extended private key for internal (i.e. change) addresses.
internal_xpriv: bip32::ExtendedPrivKey,
/// The master blinding key.
#[cfg(feature = "liquid")]
master_blinding_key: [u8; 64],
next_external_child: cell::Cell<bip32::ChildNumber>,
next_internal_child: cell::Cell<bip32::ChildNumber>,
tip: Option<sha256d::Hash>,
last_tx: Option<sha256d::Hash>,
cached_fees: (Value, Instant),
}
impl Wallet {
/// Get the address to use to store persistent state.
fn persistent_state_address(
network: NetworkId,
master_xpriv: &bip32::ExtendedPrivKey,
) -> String {
let child = bip32::ChildNumber::from_hardened_idx(350).unwrap();
let child_xpriv = master_xpriv.derive_priv(&SECP, &[child]).unwrap();
let child_xpub = bip32::ExtendedPubKey::from_private(&SECP, &child_xpriv);
match network {
#[cfg(feature = "liquid")]
NetworkId::Elements(enet) => elements::Address::p2wpkh(
&child_xpub.public_key,
None,
coins::liq::address_params(enet),
)
.to_string(),
NetworkId::Bitcoin(bnet) => Address::p2wpkh(&child_xpub.public_key, bnet).to_string(),
#[cfg(not(feature = "liquid"))]
_ => unimplemented!(),
}
}
/// Store the persistent wallet state.
fn save_persistent_state(&self) -> Result<(), Error> {
let state = PersistentWalletState {
next_external_child: self.next_external_child.get(),
next_internal_child: self.next_internal_child.get(),
};
let store_addr = Wallet::persistent_state_address(self.network.id(), &self.master_xpriv);
// Generic call for liquid compat.
self.rpc.call("setlabel", &[store_addr.into(), serde_json::to_string(&state)?.into()])?;
Ok(())
}
/// Load the persistent wallet state from the node.
#[allow(clippy::match_wild_err_arm)]
fn load_persistent_state(
rpc: &bitcoincore_rpc::Client,
state_addr: &str,
) -> Result<PersistentWalletState, Error> {
let info: Value = rpc.call("getaddressinfo", &[state_addr.into()])?;
match info.get("label") {
None => Err(Error::WalletNotRegistered),
Some(&Value::String(ref label)) => {
Ok(match serde_json::from_str::<PersistentWalletState>(label) {
Err(_) => panic!(
"corrupt persistent wallet state label (address: {}): {}",
state_addr, label
),
Ok(s) => s,
})
}
Some(_) => unreachable!(),
}
}
/// Calculates the bip32 seeds from the mnemonic phrase.
/// In order are returned:
/// - the master xpriv
/// - the external address xpriv
/// - the internal address xpriv
fn calc_xkeys(
seed: &[u8],
) -> (bip32::ExtendedPrivKey, bip32::ExtendedPrivKey, bip32::ExtendedPrivKey) {
// Network isn't of importance here.
let master_xpriv =
bip32::ExtendedPrivKey::new_master(BNetwork::Bitcoin, &seed[..]).unwrap();
// Add BIP-44 derivations for external and internal addresses.
let external_xpriv = master_xpriv
.derive_priv(&SECP, &bip32::DerivationPath::from_str("m/44'/0'/0'/0'/0").unwrap())
.unwrap();
let internal_xpriv = master_xpriv
.derive_priv(&SECP, &bip32::DerivationPath::from_str("m/44'/0'/0'/0'/1").unwrap())
.unwrap();
(master_xpriv, external_xpriv, internal_xpriv)
}
/// Register a new [Wallet].
pub fn register(network: &'static Network, mnemonic: &str) -> Result<Wallet, Error> {
let seed = wally::bip39_mnemonic_to_seed(&mnemonic, "")?;
let (master_xpriv, external_xpriv, internal_xpriv) = Wallet::calc_xkeys(&seed);
let fp = hex::encode(master_xpriv.fingerprint(&SECP).as_bytes());
// create the wallet in Core
let tmp_rpc = network.connect(None)?;
match tmp_rpc.create_wallet(fp.as_str(), Some(true))?.warning {
None => {}
Some(ref s) if s.is_empty() => {}
Some(warning) => {
warn!("Received warning when creating wallet {} in Core: {}", fp, warning,)
}
}
let rpc = network.connect(Some(&fp))?;
// Check if the user was already registered.
let state_addr = Wallet::persistent_state_address(network.id(), &master_xpriv);
match Wallet::load_persistent_state(&rpc, &state_addr) {
Err(Error::WalletNotRegistered) => {} // good
Ok(_) => return Err(Error::WalletAlreadyRegistered),
Err(e) => {
warn!("Unexpected error while registering wallet: {}", e);
return Err(e);
}
}
let wallet = Wallet {
network: network,
rpc: rpc,
mnemonic: mnemonic.to_owned(),
master_xpriv: master_xpriv,
external_xpriv: external_xpriv,
internal_xpriv: internal_xpriv,
#[cfg(feature = "liquid")]
master_blinding_key: wally::asset_blinding_key_from_seed(&seed),
next_external_child: cell::Cell::new(bip32::ChildNumber::from_normal_idx(0).unwrap()),
next_internal_child: cell::Cell::new(bip32::ChildNumber::from_normal_idx(0).unwrap()),
tip: None,
last_tx: None,
cached_fees: (Value::Null, Instant::now() - FEE_ESTIMATES_TTL * 2),
};
wallet.save_persistent_state()?;
Ok(wallet)
}
/// Login to an existing [Wallet].
pub fn login(network: &'static Network, mnemonic: &str) -> Result<Wallet, Error> {
let seed = wally::bip39_mnemonic_to_seed(&mnemonic, "")?;
let (master_xpriv, external_xpriv, internal_xpriv) = Wallet::calc_xkeys(&seed);
let fp = hex::encode(master_xpriv.fingerprint(&SECP).as_bytes());
let tmp_rpc = network.connect(None)?;
tmp_rpc.load_wallet(&fp)?;
let rpc = network.connect(Some(&fp))?;
let state_addr = Wallet::persistent_state_address(network.id(), &master_xpriv);
let state = Wallet::load_persistent_state(&rpc, &state_addr)?;
Ok(Wallet {
network: network,
rpc: rpc,
mnemonic: mnemonic.to_owned(),
master_xpriv: master_xpriv,
external_xpriv: external_xpriv,
internal_xpriv: internal_xpriv,
#[cfg(feature = "liquid")]
master_blinding_key: wally::asset_blinding_key_from_seed(&seed),
next_external_child: cell::Cell::new(state.next_external_child),
next_internal_child: cell::Cell::new(state.next_internal_child),
tip: None,
last_tx: None,
cached_fees: (Value::Null, Instant::now() - FEE_ESTIMATES_TTL * 2),
})
}
pub fn fingerprint(&self) -> bip32::Fingerprint {
self.master_xpriv.fingerprint(&SECP)
}
pub fn logout(self) -> Result<(), Error> {
self.rpc.unload_wallet(None)?;
Ok(())
}
pub fn mnemonic(&self) -> String {
self.mnemonic.clone()
}
fn derive_private_key(
&self,
fp: bip32::Fingerprint,
child: bip32::ChildNumber,
) -> Result<secp256k1::SecretKey, Error> {
let xpriv = if fp == self.external_xpriv.fingerprint(&SECP) { | error!("Address is labeled with unknown master xpriv fingerprint: {:?}", fp);
return Err(Error::CorruptNodeData);
};
let privkey = xpriv.derive_priv(&SECP, &[child])?.private_key;
Ok(privkey.key)
}
pub fn updates(&mut self) -> Result<Vec<Value>, Error> {
let mut msgs = vec![];
// check for new blocks
let tip = self.rpc.get_best_block_hash()?;
if self.tip != Some(tip) {
let info: Value = self.rpc.call("getblock", &[tip.to_hex().into(), 1.into()])?;
msgs.push(json!({
"event": "block",
"block": {
"block_height": info["height"].as_u64().req()?,
"block_hash": tip.to_hex()
}
}));
self.tip = Some(tip);
}
// check for new transactions
// XXX does the app care about the transaction data in the event?
if let Some(last_tx) = self._get_transactions(1, 0)?.0.get(0) {
let txid = last_tx["txhash"].as_str().req()?;
let txid = sha256d::Hash::from_hex(txid)?;
if self.last_tx != Some(txid) {
self.last_tx = Some(txid);
msgs.push(json!({ "event": "transaction", "transaction": last_tx }));
}
}
// update fees once every FEE_ESTIMATES_TTL
if self.cached_fees.1.elapsed() >= FEE_ESTIMATES_TTL {
self.cached_fees = (self._make_fee_estimates()?, Instant::now());
msgs.push(json!({ "event": "fees", "fees": self.cached_fees.0 }));
}
// TODO:
// {"event":"subaccount","subaccount":{"bits":"701144.66","btc":"0.70114466","fiat":"0.7712591260000000622741556099981585311432","fiat_currency":"EUR","fiat_rate":"1.10000000000000008881784197001252","has_transactions":true,"mbtc":"701.14466","name":"","pointer":0,"receiving_id":"GA3MQKVp6pP7royXDuZcw55F2TXTgg","recovery_chain_code":"","recovery_pub_key":"","satoshi":70114466,"type":"2of2","ubtc":"701144.66"}}
// XXX use zmq?
Ok(msgs)
}
pub fn get_account(&self) -> Result<Value, Error> {
let has_transactions = self._get_transactions(1, 0)?.1;
extend(
json!({
"type": "core",
"pointer": 0,
"receiving_id": "",
"name": "RPC wallet",
"has_transactions": has_transactions,
}),
self._get_balance(0)?,
)
}
pub fn get_balance(&self, details: &Value) -> Result<Value, Error> {
let min_conf = details["num_confs"].as_u64().req()? as u32;
self._get_balance(min_conf)
}
fn _get_balance(&self, min_conf: u32) -> Result<Value, Error> {
//TODO(stevenroose) implement in rust-bitcoincore-rpc once bitcoin::Amount lands
let mut args = vec![Value::Null, json!(min_conf), json!(true)];
#[cfg(feature = "liquid")]
{
if let NetworkId::Elements(net) = self.network.id() {
args.push(coins::liq::asset_hex(net).into());
}
}
let balance: f64 = self.rpc.call("getbalance", &args)?;
Ok(self._convert_satoshi(btc_to_usat(balance)))
}
pub fn get_transactions(&self, details: &Value) -> Result<Value, Error> {
let page = details["page_id"].as_u64().req()? as usize;
let (txs, potentially_has_more) = self._get_transactions(PER_PAGE, PER_PAGE * page)?;
Ok(json!({
"list": txs,
"page_id": page,
"next_page_id": if potentially_has_more { Some(page+1) } else { None },
}))
}
fn _get_transactions(&self, limit: usize, start: usize) -> Result<(Vec<Value>, bool), Error> {
// fetch listtranssactions
let txdescs: Vec<Value> = self
.rpc
.call("listtransactions", &["*".into(), limit.into(), start.into(), true.into()])?;
let potentially_has_more = txdescs.len() == limit;
// fetch full transactions and convert to GDK format
let mut txs = Vec::new();
for desc in txdescs.into_iter() {
let txid = sha256d::Hash::from_hex(desc["txid"].as_str().req()?)?;
let blockhash = &desc["blockhash"];
let tx_hex: String = self.rpc.call(
"getrawtransaction",
&[txid.to_hex().into(), false.into(), blockhash.clone()],
)?;
txs.push(format_gdk_tx(&desc, &hex::decode(&tx_hex)?, self.network.id())?);
}
Ok((txs, potentially_has_more))
}
pub fn get_transaction(&self, txid: &str) -> Result<Value, Error> {
let txid = sha256d::Hash::from_hex(txid)?;
let desc: Value = self.rpc.call("gettransaction", &[txid.to_hex().into(), true.into()])?;
let raw_tx = hex::decode(desc["hex"].as_str().req()?)?;
format_gdk_tx(&desc, &raw_tx, self.network.id())
}
pub fn create_transaction(&self, details: &Value) -> Result<String, Error> {
debug!("create_transaction(): {:?}", details);
let unfunded_tx = match self.network.id() {
NetworkId::Bitcoin(..) => coins::btc::create_transaction(&self.rpc, details)?,
NetworkId::Elements(..) => coins::liq::create_transaction(&self.rpc, details)?,
};
debug!("create_transaction unfunded tx: {:?}", hex::encode(&unfunded_tx));
// TODO explicit handling for id_no_amount_specified id_fee_rate_is_below_minimum id_invalid_replacement_fee_rate
// id_send_all_requires_a_single_output
Ok(hex::encode(unfunded_tx))
}
pub fn sign_transaction(&self, details: &Value) -> Result<String, Error> {
debug!("sign_transaction(): {:?}", details);
let change_address = self.next_address(&self.internal_xpriv, &self.next_internal_child)?;
// If we don't have any inputs, we can fail early.
let unspent: Vec<Value> = self.rpc.call("listunspent", &[0.into()])?;
if unspent.is_empty() {
return Err(Error::NoUtxosFound);
}
debug!("list_unspent: {:?}", unspent);
let raw_tx = match self.network.id() {
NetworkId::Bitcoin(_) => {
coins::btc::sign_transaction(&self.rpc, details, &change_address, |fp, child| {
self.derive_private_key(*fp, *child)
})?
}
NetworkId::Elements(net) => coins::liq::sign_transaction(
&self.rpc,
net,
details,
&change_address,
|fp, child| self.derive_private_key(*fp, *child),
)?,
};
let hex_tx = hex::encode(&raw_tx);
//TODO(stevenroose) remove when confident in signing code
let ret: Vec<Value> = self.rpc.call("testmempoolaccept", &[vec![hex_tx.clone()].into()])?;
let accept = ret.into_iter().next().unwrap();
if !(accept["allowed"].as_bool().req()?) {
error!(
"sign_transaction(): signed tx is not valid: {}",
accept["reject-reason"].as_str().req()?
);
// TODO(stevenroose) should we return an error??
}
Ok(hex_tx)
}
pub fn send_transaction(&self, details: &Value) -> Result<String, Error> {
let tx_hex = details["hex"].as_str().req()?;
Ok(self.rpc.call::<String>("sendrawtransaction", &[tx_hex.into()])?)
}
pub fn send_raw_transaction(&self, tx_hex: &str) -> Result<String, Error> {
Ok(self.rpc.call::<String>("sendrawtransaction", &[tx_hex.into()])?)
}
/// Return the next address for the derivation and import it in Core.
fn next_address(
&self,
xpriv: &bip32::ExtendedPrivKey,
child: &cell::Cell<bip32::ChildNumber>,
) -> Result<String, Error> {
let child_xpriv = xpriv.derive_priv(&SECP, &[child.get()])?;
let child_xpub = bip32::ExtendedPubKey::from_private(&SECP, &child_xpriv);
let meta = AddressMeta {
fingerprint: Some(xpriv.fingerprint(&SECP)),
child: Some(child.get()),
..Default::default()
};
let address_str = match self.network.id() {
#[cfg(feature = "liquid")]
NetworkId::Elements(enet) => {
let mut addr = elements::Address::p2shwpkh(
&child_xpub.public_key,
None,
coins::liq::address_params(enet),
);
let blinding_key = wally::asset_blinding_key_to_ec_private_key(
&self.master_blinding_key,
&addr.script_pubkey(),
);
let blinding_pubkey = secp256k1::PublicKey::from_secret_key(&SECP, &blinding_key);
addr.blinding_pubkey = Some(blinding_pubkey);
// Store blinding privkey in the node.
let addr_str = addr.to_string();
coins::liq::store_blinding_key(&self.rpc, &addr_str, &blinding_key)?;
addr_str
}
NetworkId::Bitcoin(bnet) => Address::p2wpkh(&child_xpub.public_key, bnet).to_string(),
#[cfg(not(feature = "liquid"))]
_ => unimplemented!(),
};
// Since this is a newly generated address, rescanning is not required.
self.rpc.import_public_key(&child_xpub.public_key, Some(&meta.to_label()?), Some(false))?;
child.set(match child.get() {
bip32::ChildNumber::Normal {
index,
} => bip32::ChildNumber::from_normal_idx(index + 1)?,
_ => unreachable!(),
});
self.save_persistent_state()?;
Ok(address_str)
}
pub fn get_receive_address(&self, _details: &Value) -> Result<Value, Error> {
let address = self.next_address(&self.external_xpriv, &self.next_external_child)?;
// {
// "address": "2N2x4EgizS2w3DUiWYWW9pEf4sGYRfo6PAX",
// "address_type": "p2wsh",
// "branch": 1,
// "pointer": 13,
// "script": "52210338832debc5e15ce143d5cf9241147ac0019e7516d3d9569e04b0e18f3278718921025dfaa85d64963252604e1b139b40182bb859a9e2e1aa2904876c34e82158d85452ae",
// "script_type": 14,
// "subtype": null
// }
Ok(json!({
"address": address,
"address_type": "p2wpkh",
}))
}
pub fn get_fee_estimates(&self) -> Option<&Value> {
// will not be available before the first "tick", which should
// happen as soon as GA_connect initializes the wallet
if self.cached_fees.0.is_null() {
None
} else {
Some(&self.cached_fees.0)
}
}
pub fn _make_fee_estimates(&self) -> Result<Value, Error> {
let mempoolinfo: Value = self.rpc.call("getmempoolinfo", &[])?;
let minrelayfee = json!(btc_to_usat(mempoolinfo["minrelaytxfee"].as_f64().req()? / 1000.0));
let mut estimates: Vec<Value> = (2u16..25u16)
.map(|target| {
let est: rpcjson::EstimateSmartFeeResult =
self.rpc.call("estimatesmartfee", &[json!(target)])?;
Ok(est.feerate.unwrap_or_else(|| minrelayfee.clone()))
})
.collect::<Result<Vec<Value>, Error>>()?;
// prepend the estimate for 2 blocks as the estimate for 1 blocks
estimates.insert(0, estimates[0].clone());
// prepend the minrelayfee as the first item
estimates.insert(0, minrelayfee);
// the final format is: [ minrelayfee, est_for_2_blocks, est_for_2_blocks, est_for_3_blocks, ... ]
Ok(json!(estimates))
}
pub fn get_available_currencies(&self) -> Value {
// TODO
json!({ "all": [ "USD" ], "per_exchange": { "BITSTAMP": [ "USD" ] } })
}
pub fn exchange_rate(&self, _currency: &str) -> f64 {
// TODO
420.00
}
pub fn convert_amount(&self, details: &Value) -> Result<Value, Error> {
// XXX should convert_amonut support negative numbers?
let satoshi = details["satoshi"]
.as_u64()
.or_else(|| f64_from_val(&details["btc"]).map(btc_to_usat))
.or_else(|| f64_from_val(&details["fiat"]).map(|x| self._fiat_to_usat(x)))
.or_err("id_no_amount_specified")?;
Ok(self._convert_satoshi(satoshi))
}
pub fn set_tx_memo(&self, txid: &str, memo: &str) -> Result<(), Error> {
// we can't really set a tx memo, so we fake it by setting a memo on the address
let txid = sha256d::Hash::from_hex(txid)?;
let txdesc: Value =
self.rpc.call("gettransaction", &[txid.to_hex().into(), true.into()])?;
let details = txdesc["details"].as_array().req()?;
if details.is_empty() {
throw!("Tx info for {} does not contain any details", txid);
}
// We just need any usable address label. Let's just take the first
// and hope Core always orders them in the same way, so we can also
// efficiently find it back later. We explicitly tag this label with
// the txid of this tx, so that if an address gets assigned multiple
// transaction memos, they won't conflict.
let detail = &details[0];
let mut label = AddressMeta::from_label(detail["label"].as_str())?;
label.txmemo.insert(txid, memo.to_owned());
debug!("set_tx_memo() for {}, memo={}, address={}", txid, memo, detail["address"]);
self.rpc.call("setlabel", &[detail["address"].clone(), label.to_label()?.into()])?;
Ok(())
}
fn _convert_satoshi(&self, amount: u64) -> Value {
let currency = "USD"; // TODO
let exchange_rate = self.exchange_rate(currency);
let amount_f = amount as f64;
json!({
"satoshi": amount.to_string(),
"bits": (amount_f / SAT_PER_BIT).to_string(),
"ubtc": (amount_f / SAT_PER_BIT).to_string(), // XXX why twice? same as bits
"mbtc": (amount_f / SAT_PER_MBTC).to_string(),
"btc": (amount_f / SAT_PER_BTC).to_string(),
"fiat_rate": (exchange_rate).to_string(),
"fiat_currency": currency,
"fiat": (amount_f / SAT_PER_BTC * exchange_rate).to_string(),
})
}
fn _fiat_to_usat(&self, amount: f64) -> u64 {
btc_to_usat(amount / self.exchange_rate("USD"))
}
}
impl fmt::Debug for Wallet {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Wallet {{ }}")
}
}
/// Finds a transction memo inside the transaction description field.
/// This method can be provided with a response from either
/// `gettransaction` or `listtransactions`.
/// It returns "" if none if found.
fn find_memo_in_desc(txid: sha256d::Hash, txdesc: &Value) -> Result<String, Error> {
// First we try the top-level label field from listtransactions.
if let Some(label) = txdesc["label"].as_str() {
let meta = AddressMeta::from_label(Some(label))?;
if let Some(memo) = meta.txmemo.get(&txid) {
return Ok(memo.to_owned());
}
}
// Then we iterate over the details array.
if let Some(details) = txdesc["details"].as_array() {
for detail in details {
let meta = AddressMeta::from_label(detail["label"].as_str())?;
if let Some(memo) = meta.txmemo.get(&txid) {
return Ok(memo.to_owned());
}
}
}
Ok(String::new())
}
fn format_gdk_tx(txdesc: &Value, raw_tx: &[u8], network: NetworkId) -> Result<Value, Error> {
let txid = sha256d::Hash::from_hex(txdesc["txid"].as_str().req()?)?;
//TODO(stevenroose) optimize with Amount
let amount = match network {
NetworkId::Elements(..) => btc_to_isat(match txdesc["amount"] {
serde_json::Value::Object(ref v) => v["bitcoin"].as_f64().req()?,
ref v => v.as_f64().req()?,
}),
NetworkId::Bitcoin(..) => btc_to_isat(txdesc["amount"].as_f64().req()?),
};
let fee = txdesc["fee"].as_f64().map_or(0, |f| btc_to_usat(f * -1.0));
let type_str = match txdesc["category"].as_str() {
// for listtransactions, read out the category field
Some(category) => match category {
"send" => "outgoing",
"receive" => "incoming",
"immature" => "incoming",
_ => throw!("invalid tx category"),
},
// gettransaction doesn't have a top-level category,
// figure it out from the amount instead.
None => {
if amount > 0 {
"incoming"
} else {
"outgoing"
}
}
};
let tx_props = match network {
NetworkId::Bitcoin(_) => coins::btc::tx_props(&raw_tx)?,
NetworkId::Elements(_) => coins::liq::tx_props(&raw_tx)?,
};
let vsize = tx_props["transaction_vsize"].as_u64().unwrap();
let ret = json!({
"block_height": 1,
"created_at": fmt_time(txdesc["time"].as_u64().req()?),
"type": type_str,
"memo": find_memo_in_desc(txid, &txdesc)?,
"txhash": txid.to_hex(),
"transaction": hex::encode(&raw_tx),
"satoshi": amount,
"rbf_optin": txdesc["bip125-replaceable"].as_str().req()? == "yes",
"cap_cpfp": false, // TODO
"can_rbf": false, // TODO
"has_payment_request": false, // TODO
"server_signed": false,
"user_signed": true,
"instant": false,
"fee": fee,
"fee_rate": (fee as f64)/(vsize as f64),
"addressees": [], // notice the extra "e" -- its intentional
"inputs": [], // tx.input.iter().map(format_gdk_input).collect(),
"outputs": [], //tx.output.iter().map(format_gdk_output).collect(),
});
Ok(extend(ret, tx_props)?)
} | self.external_xpriv
} else if fp == self.internal_xpriv.fingerprint(&SECP) {
self.internal_xpriv
} else { | random_line_split |
wallet.rs | //! The wallet module.
//!
//! Since this wallet implementation is supposed to work on top of both a
//! Bitcoin Core node as an Elements or Liquid node, we avoid using the
//! specialized bitcoincore-rpc and liquid-rpc client interfaces, but use
//! general call methods so we can leverage the common parts of the raw
//! responses. This might make the code a but harder to read or error-prone
//! but it avoids having very big code duplication.
//!
#![allow(clippy::redundant_field_names)]
use hex;
use std::collections::HashMap;
use std::str::FromStr;
use std::time::{Duration, Instant};
use std::{cell, fmt};
use bitcoin::{util::bip32, Address, Network as BNetwork};
use bitcoin_hashes::hex::{FromHex, ToHex};
use bitcoin_hashes::sha256d;
use bitcoincore_rpc::{json as rpcjson, Client as RpcClient, RpcApi};
use serde_json::Value;
#[cfg(feature = "liquid")]
use elements;
use crate::coins;
use crate::constants::{SAT_PER_BIT, SAT_PER_BTC, SAT_PER_MBTC};
use crate::errors::{Error, OptionExt};
use crate::network::{Network, NetworkId};
use crate::util::{btc_to_isat, btc_to_usat, extend, f64_from_val, fmt_time, SECP};
use crate::wally;
const PER_PAGE: usize = 30;
const FEE_ESTIMATES_TTL: Duration = Duration::from_secs(240);
/// Meta-information about an address that we need to store.
///
/// Since we don't have a persistent database, we use the Core wallet to store
/// the information required for operating GDK. For addresses, it's important
/// to keep the information needed to re-derive the private key: an identifier
/// of the master private key (i.e. the fingerprint) and the derivation path.
///
/// GDK also allows storing memos on transaction. Because Core doesn't support
/// transaction labels but only address labels, we inject the tx memos inside
/// the address label of an (preferably the first) address used in that
/// transaction.
///
/// This struct is used to structure the data stored in a label. It is
/// serialized as JSON when stored in a label, so that new fields can easily
/// be added.
#[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)]
pub(crate) struct AddressMeta {
/// The fingerprint of the extended private key used to derive the
/// private key for this address.
#[serde(rename = "fp", skip_serializing_if = "Option::is_none")]
pub fingerprint: Option<bip32::Fingerprint>,
/// The derivation path from the extended private key identified
/// by the fingerprint field.
#[serde(skip_serializing_if = "Option::is_none")]
pub child: Option<bip32::ChildNumber>,
/// Since an address can be used in multiple transactions, we keep a map
/// from the txid to the memo for the transaction.
#[serde(default, skip_serializing_if = "HashMap::is_empty")]
pub txmemo: HashMap<sha256d::Hash, String>,
}
impl AddressMeta {
/// Parse a label from Core.
pub fn from_label<S: AsRef<str>>(l: Option<S>) -> Result<AddressMeta, Error> {
match l {
Some(ref s) if s.as_ref().is_empty() => Ok(Default::default()),
Some(s) => Ok(serde_json::from_str(s.as_ref())?),
None => Ok(Default::default()),
}
}
/// Serialize to string to save into a label.
pub fn to_label(&self) -> Result<String, Error> {
Ok(serde_json::to_string(self)?)
}
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
struct PersistentWalletState {
#[serde(rename = "nec")]
next_external_child: bip32::ChildNumber,
#[serde(rename = "nic")]
next_internal_child: bip32::ChildNumber,
}
pub struct Wallet {
network: &'static Network,
rpc: RpcClient,
mnemonic: String,
// For the BIP32 keys, the network variable should be ignored and not used.
/// The BIP32 master extended private key.
master_xpriv: bip32::ExtendedPrivKey,
/// The BIP32 extended private key for external addresses.
external_xpriv: bip32::ExtendedPrivKey,
/// The BIP32 extended private key for internal (i.e. change) addresses.
internal_xpriv: bip32::ExtendedPrivKey,
/// The master blinding key.
#[cfg(feature = "liquid")]
master_blinding_key: [u8; 64],
next_external_child: cell::Cell<bip32::ChildNumber>,
next_internal_child: cell::Cell<bip32::ChildNumber>,
tip: Option<sha256d::Hash>,
last_tx: Option<sha256d::Hash>,
cached_fees: (Value, Instant),
}
impl Wallet {
/// Get the address to use to store persistent state.
fn persistent_state_address(
network: NetworkId,
master_xpriv: &bip32::ExtendedPrivKey,
) -> String {
let child = bip32::ChildNumber::from_hardened_idx(350).unwrap();
let child_xpriv = master_xpriv.derive_priv(&SECP, &[child]).unwrap();
let child_xpub = bip32::ExtendedPubKey::from_private(&SECP, &child_xpriv);
match network {
#[cfg(feature = "liquid")]
NetworkId::Elements(enet) => elements::Address::p2wpkh(
&child_xpub.public_key,
None,
coins::liq::address_params(enet),
)
.to_string(),
NetworkId::Bitcoin(bnet) => Address::p2wpkh(&child_xpub.public_key, bnet).to_string(),
#[cfg(not(feature = "liquid"))]
_ => unimplemented!(),
}
}
/// Store the persistent wallet state.
fn save_persistent_state(&self) -> Result<(), Error> {
let state = PersistentWalletState {
next_external_child: self.next_external_child.get(),
next_internal_child: self.next_internal_child.get(),
};
let store_addr = Wallet::persistent_state_address(self.network.id(), &self.master_xpriv);
// Generic call for liquid compat.
self.rpc.call("setlabel", &[store_addr.into(), serde_json::to_string(&state)?.into()])?;
Ok(())
}
/// Load the persistent wallet state from the node.
#[allow(clippy::match_wild_err_arm)]
fn load_persistent_state(
rpc: &bitcoincore_rpc::Client,
state_addr: &str,
) -> Result<PersistentWalletState, Error> {
let info: Value = rpc.call("getaddressinfo", &[state_addr.into()])?;
match info.get("label") {
None => Err(Error::WalletNotRegistered),
Some(&Value::String(ref label)) => {
Ok(match serde_json::from_str::<PersistentWalletState>(label) {
Err(_) => panic!(
"corrupt persistent wallet state label (address: {}): {}",
state_addr, label
),
Ok(s) => s,
})
}
Some(_) => unreachable!(),
}
}
/// Calculates the bip32 seeds from the mnemonic phrase.
/// In order are returned:
/// - the master xpriv
/// - the external address xpriv
/// - the internal address xpriv
fn calc_xkeys(
seed: &[u8],
) -> (bip32::ExtendedPrivKey, bip32::ExtendedPrivKey, bip32::ExtendedPrivKey) {
// Network isn't of importance here.
let master_xpriv =
bip32::ExtendedPrivKey::new_master(BNetwork::Bitcoin, &seed[..]).unwrap();
// Add BIP-44 derivations for external and internal addresses.
let external_xpriv = master_xpriv
.derive_priv(&SECP, &bip32::DerivationPath::from_str("m/44'/0'/0'/0'/0").unwrap())
.unwrap();
let internal_xpriv = master_xpriv
.derive_priv(&SECP, &bip32::DerivationPath::from_str("m/44'/0'/0'/0'/1").unwrap())
.unwrap();
(master_xpriv, external_xpriv, internal_xpriv)
}
/// Register a new [Wallet].
pub fn register(network: &'static Network, mnemonic: &str) -> Result<Wallet, Error> {
let seed = wally::bip39_mnemonic_to_seed(&mnemonic, "")?;
let (master_xpriv, external_xpriv, internal_xpriv) = Wallet::calc_xkeys(&seed);
let fp = hex::encode(master_xpriv.fingerprint(&SECP).as_bytes());
// create the wallet in Core
let tmp_rpc = network.connect(None)?;
match tmp_rpc.create_wallet(fp.as_str(), Some(true))?.warning {
None => {}
Some(ref s) if s.is_empty() => {}
Some(warning) => {
warn!("Received warning when creating wallet {} in Core: {}", fp, warning,)
}
}
let rpc = network.connect(Some(&fp))?;
// Check if the user was already registered.
let state_addr = Wallet::persistent_state_address(network.id(), &master_xpriv);
match Wallet::load_persistent_state(&rpc, &state_addr) {
Err(Error::WalletNotRegistered) => {} // good
Ok(_) => return Err(Error::WalletAlreadyRegistered),
Err(e) => {
warn!("Unexpected error while registering wallet: {}", e);
return Err(e);
}
}
let wallet = Wallet {
network: network,
rpc: rpc,
mnemonic: mnemonic.to_owned(),
master_xpriv: master_xpriv,
external_xpriv: external_xpriv,
internal_xpriv: internal_xpriv,
#[cfg(feature = "liquid")]
master_blinding_key: wally::asset_blinding_key_from_seed(&seed),
next_external_child: cell::Cell::new(bip32::ChildNumber::from_normal_idx(0).unwrap()),
next_internal_child: cell::Cell::new(bip32::ChildNumber::from_normal_idx(0).unwrap()),
tip: None,
last_tx: None,
cached_fees: (Value::Null, Instant::now() - FEE_ESTIMATES_TTL * 2),
};
wallet.save_persistent_state()?;
Ok(wallet)
}
/// Login to an existing [Wallet].
pub fn login(network: &'static Network, mnemonic: &str) -> Result<Wallet, Error> {
let seed = wally::bip39_mnemonic_to_seed(&mnemonic, "")?;
let (master_xpriv, external_xpriv, internal_xpriv) = Wallet::calc_xkeys(&seed);
let fp = hex::encode(master_xpriv.fingerprint(&SECP).as_bytes());
let tmp_rpc = network.connect(None)?;
tmp_rpc.load_wallet(&fp)?;
let rpc = network.connect(Some(&fp))?;
let state_addr = Wallet::persistent_state_address(network.id(), &master_xpriv);
let state = Wallet::load_persistent_state(&rpc, &state_addr)?;
Ok(Wallet {
network: network,
rpc: rpc,
mnemonic: mnemonic.to_owned(),
master_xpriv: master_xpriv,
external_xpriv: external_xpriv,
internal_xpriv: internal_xpriv,
#[cfg(feature = "liquid")]
master_blinding_key: wally::asset_blinding_key_from_seed(&seed),
next_external_child: cell::Cell::new(state.next_external_child),
next_internal_child: cell::Cell::new(state.next_internal_child),
tip: None,
last_tx: None,
cached_fees: (Value::Null, Instant::now() - FEE_ESTIMATES_TTL * 2),
})
}
pub fn fingerprint(&self) -> bip32::Fingerprint {
self.master_xpriv.fingerprint(&SECP)
}
pub fn logout(self) -> Result<(), Error> {
self.rpc.unload_wallet(None)?;
Ok(())
}
pub fn mnemonic(&self) -> String {
self.mnemonic.clone()
}
fn derive_private_key(
&self,
fp: bip32::Fingerprint,
child: bip32::ChildNumber,
) -> Result<secp256k1::SecretKey, Error> {
let xpriv = if fp == self.external_xpriv.fingerprint(&SECP) {
self.external_xpriv
} else if fp == self.internal_xpriv.fingerprint(&SECP) {
self.internal_xpriv
} else {
error!("Address is labeled with unknown master xpriv fingerprint: {:?}", fp);
return Err(Error::CorruptNodeData);
};
let privkey = xpriv.derive_priv(&SECP, &[child])?.private_key;
Ok(privkey.key)
}
pub fn updates(&mut self) -> Result<Vec<Value>, Error> {
let mut msgs = vec![];
// check for new blocks
let tip = self.rpc.get_best_block_hash()?;
if self.tip != Some(tip) {
let info: Value = self.rpc.call("getblock", &[tip.to_hex().into(), 1.into()])?;
msgs.push(json!({
"event": "block",
"block": {
"block_height": info["height"].as_u64().req()?,
"block_hash": tip.to_hex()
}
}));
self.tip = Some(tip);
}
// check for new transactions
// XXX does the app care about the transaction data in the event?
if let Some(last_tx) = self._get_transactions(1, 0)?.0.get(0) {
let txid = last_tx["txhash"].as_str().req()?;
let txid = sha256d::Hash::from_hex(txid)?;
if self.last_tx != Some(txid) {
self.last_tx = Some(txid);
msgs.push(json!({ "event": "transaction", "transaction": last_tx }));
}
}
// update fees once every FEE_ESTIMATES_TTL
if self.cached_fees.1.elapsed() >= FEE_ESTIMATES_TTL {
self.cached_fees = (self._make_fee_estimates()?, Instant::now());
msgs.push(json!({ "event": "fees", "fees": self.cached_fees.0 }));
}
// TODO:
// {"event":"subaccount","subaccount":{"bits":"701144.66","btc":"0.70114466","fiat":"0.7712591260000000622741556099981585311432","fiat_currency":"EUR","fiat_rate":"1.10000000000000008881784197001252","has_transactions":true,"mbtc":"701.14466","name":"","pointer":0,"receiving_id":"GA3MQKVp6pP7royXDuZcw55F2TXTgg","recovery_chain_code":"","recovery_pub_key":"","satoshi":70114466,"type":"2of2","ubtc":"701144.66"}}
// XXX use zmq?
Ok(msgs)
}
pub fn get_account(&self) -> Result<Value, Error> {
let has_transactions = self._get_transactions(1, 0)?.1;
extend(
json!({
"type": "core",
"pointer": 0,
"receiving_id": "",
"name": "RPC wallet",
"has_transactions": has_transactions,
}),
self._get_balance(0)?,
)
}
pub fn get_balance(&self, details: &Value) -> Result<Value, Error> {
let min_conf = details["num_confs"].as_u64().req()? as u32;
self._get_balance(min_conf)
}
fn _get_balance(&self, min_conf: u32) -> Result<Value, Error> {
//TODO(stevenroose) implement in rust-bitcoincore-rpc once bitcoin::Amount lands
let mut args = vec![Value::Null, json!(min_conf), json!(true)];
#[cfg(feature = "liquid")]
{
if let NetworkId::Elements(net) = self.network.id() {
args.push(coins::liq::asset_hex(net).into());
}
}
let balance: f64 = self.rpc.call("getbalance", &args)?;
Ok(self._convert_satoshi(btc_to_usat(balance)))
}
pub fn get_transactions(&self, details: &Value) -> Result<Value, Error> {
let page = details["page_id"].as_u64().req()? as usize;
let (txs, potentially_has_more) = self._get_transactions(PER_PAGE, PER_PAGE * page)?;
Ok(json!({
"list": txs,
"page_id": page,
"next_page_id": if potentially_has_more { Some(page+1) } else { None },
}))
}
fn _get_transactions(&self, limit: usize, start: usize) -> Result<(Vec<Value>, bool), Error> {
// fetch listtranssactions
let txdescs: Vec<Value> = self
.rpc
.call("listtransactions", &["*".into(), limit.into(), start.into(), true.into()])?;
let potentially_has_more = txdescs.len() == limit;
// fetch full transactions and convert to GDK format
let mut txs = Vec::new();
for desc in txdescs.into_iter() {
let txid = sha256d::Hash::from_hex(desc["txid"].as_str().req()?)?;
let blockhash = &desc["blockhash"];
let tx_hex: String = self.rpc.call(
"getrawtransaction",
&[txid.to_hex().into(), false.into(), blockhash.clone()],
)?;
txs.push(format_gdk_tx(&desc, &hex::decode(&tx_hex)?, self.network.id())?);
}
Ok((txs, potentially_has_more))
}
pub fn get_transaction(&self, txid: &str) -> Result<Value, Error> {
let txid = sha256d::Hash::from_hex(txid)?;
let desc: Value = self.rpc.call("gettransaction", &[txid.to_hex().into(), true.into()])?;
let raw_tx = hex::decode(desc["hex"].as_str().req()?)?;
format_gdk_tx(&desc, &raw_tx, self.network.id())
}
pub fn create_transaction(&self, details: &Value) -> Result<String, Error> {
debug!("create_transaction(): {:?}", details);
let unfunded_tx = match self.network.id() {
NetworkId::Bitcoin(..) => coins::btc::create_transaction(&self.rpc, details)?,
NetworkId::Elements(..) => coins::liq::create_transaction(&self.rpc, details)?,
};
debug!("create_transaction unfunded tx: {:?}", hex::encode(&unfunded_tx));
// TODO explicit handling for id_no_amount_specified id_fee_rate_is_below_minimum id_invalid_replacement_fee_rate
// id_send_all_requires_a_single_output
Ok(hex::encode(unfunded_tx))
}
pub fn sign_transaction(&self, details: &Value) -> Result<String, Error> {
debug!("sign_transaction(): {:?}", details);
let change_address = self.next_address(&self.internal_xpriv, &self.next_internal_child)?;
// If we don't have any inputs, we can fail early.
let unspent: Vec<Value> = self.rpc.call("listunspent", &[0.into()])?;
if unspent.is_empty() {
return Err(Error::NoUtxosFound);
}
debug!("list_unspent: {:?}", unspent);
let raw_tx = match self.network.id() {
NetworkId::Bitcoin(_) => {
coins::btc::sign_transaction(&self.rpc, details, &change_address, |fp, child| {
self.derive_private_key(*fp, *child)
})?
}
NetworkId::Elements(net) => coins::liq::sign_transaction(
&self.rpc,
net,
details,
&change_address,
|fp, child| self.derive_private_key(*fp, *child),
)?,
};
let hex_tx = hex::encode(&raw_tx);
//TODO(stevenroose) remove when confident in signing code
let ret: Vec<Value> = self.rpc.call("testmempoolaccept", &[vec![hex_tx.clone()].into()])?;
let accept = ret.into_iter().next().unwrap();
if !(accept["allowed"].as_bool().req()?) {
error!(
"sign_transaction(): signed tx is not valid: {}",
accept["reject-reason"].as_str().req()?
);
// TODO(stevenroose) should we return an error??
}
Ok(hex_tx)
}
pub fn send_transaction(&self, details: &Value) -> Result<String, Error> {
let tx_hex = details["hex"].as_str().req()?;
Ok(self.rpc.call::<String>("sendrawtransaction", &[tx_hex.into()])?)
}
pub fn send_raw_transaction(&self, tx_hex: &str) -> Result<String, Error> {
Ok(self.rpc.call::<String>("sendrawtransaction", &[tx_hex.into()])?)
}
/// Return the next address for the derivation and import it in Core.
fn next_address(
&self,
xpriv: &bip32::ExtendedPrivKey,
child: &cell::Cell<bip32::ChildNumber>,
) -> Result<String, Error> {
let child_xpriv = xpriv.derive_priv(&SECP, &[child.get()])?;
let child_xpub = bip32::ExtendedPubKey::from_private(&SECP, &child_xpriv);
let meta = AddressMeta {
fingerprint: Some(xpriv.fingerprint(&SECP)),
child: Some(child.get()),
..Default::default()
};
let address_str = match self.network.id() {
#[cfg(feature = "liquid")]
NetworkId::Elements(enet) => {
let mut addr = elements::Address::p2shwpkh(
&child_xpub.public_key,
None,
coins::liq::address_params(enet),
);
let blinding_key = wally::asset_blinding_key_to_ec_private_key(
&self.master_blinding_key,
&addr.script_pubkey(),
);
let blinding_pubkey = secp256k1::PublicKey::from_secret_key(&SECP, &blinding_key);
addr.blinding_pubkey = Some(blinding_pubkey);
// Store blinding privkey in the node.
let addr_str = addr.to_string();
coins::liq::store_blinding_key(&self.rpc, &addr_str, &blinding_key)?;
addr_str
}
NetworkId::Bitcoin(bnet) => Address::p2wpkh(&child_xpub.public_key, bnet).to_string(),
#[cfg(not(feature = "liquid"))]
_ => unimplemented!(),
};
// Since this is a newly generated address, rescanning is not required.
self.rpc.import_public_key(&child_xpub.public_key, Some(&meta.to_label()?), Some(false))?;
child.set(match child.get() {
bip32::ChildNumber::Normal {
index,
} => bip32::ChildNumber::from_normal_idx(index + 1)?,
_ => unreachable!(),
});
self.save_persistent_state()?;
Ok(address_str)
}
pub fn get_receive_address(&self, _details: &Value) -> Result<Value, Error> {
let address = self.next_address(&self.external_xpriv, &self.next_external_child)?;
// {
// "address": "2N2x4EgizS2w3DUiWYWW9pEf4sGYRfo6PAX",
// "address_type": "p2wsh",
// "branch": 1,
// "pointer": 13,
// "script": "52210338832debc5e15ce143d5cf9241147ac0019e7516d3d9569e04b0e18f3278718921025dfaa85d64963252604e1b139b40182bb859a9e2e1aa2904876c34e82158d85452ae",
// "script_type": 14,
// "subtype": null
// }
Ok(json!({
"address": address,
"address_type": "p2wpkh",
}))
}
pub fn get_fee_estimates(&self) -> Option<&Value> {
// will not be available before the first "tick", which should
// happen as soon as GA_connect initializes the wallet
if self.cached_fees.0.is_null() {
None
} else {
Some(&self.cached_fees.0)
}
}
pub fn | (&self) -> Result<Value, Error> {
let mempoolinfo: Value = self.rpc.call("getmempoolinfo", &[])?;
let minrelayfee = json!(btc_to_usat(mempoolinfo["minrelaytxfee"].as_f64().req()? / 1000.0));
let mut estimates: Vec<Value> = (2u16..25u16)
.map(|target| {
let est: rpcjson::EstimateSmartFeeResult =
self.rpc.call("estimatesmartfee", &[json!(target)])?;
Ok(est.feerate.unwrap_or_else(|| minrelayfee.clone()))
})
.collect::<Result<Vec<Value>, Error>>()?;
// prepend the estimate for 2 blocks as the estimate for 1 blocks
estimates.insert(0, estimates[0].clone());
// prepend the minrelayfee as the first item
estimates.insert(0, minrelayfee);
// the final format is: [ minrelayfee, est_for_2_blocks, est_for_2_blocks, est_for_3_blocks, ... ]
Ok(json!(estimates))
}
pub fn get_available_currencies(&self) -> Value {
// TODO
json!({ "all": [ "USD" ], "per_exchange": { "BITSTAMP": [ "USD" ] } })
}
pub fn exchange_rate(&self, _currency: &str) -> f64 {
// TODO
420.00
}
pub fn convert_amount(&self, details: &Value) -> Result<Value, Error> {
// XXX should convert_amonut support negative numbers?
let satoshi = details["satoshi"]
.as_u64()
.or_else(|| f64_from_val(&details["btc"]).map(btc_to_usat))
.or_else(|| f64_from_val(&details["fiat"]).map(|x| self._fiat_to_usat(x)))
.or_err("id_no_amount_specified")?;
Ok(self._convert_satoshi(satoshi))
}
pub fn set_tx_memo(&self, txid: &str, memo: &str) -> Result<(), Error> {
// we can't really set a tx memo, so we fake it by setting a memo on the address
let txid = sha256d::Hash::from_hex(txid)?;
let txdesc: Value =
self.rpc.call("gettransaction", &[txid.to_hex().into(), true.into()])?;
let details = txdesc["details"].as_array().req()?;
if details.is_empty() {
throw!("Tx info for {} does not contain any details", txid);
}
// We just need any usable address label. Let's just take the first
// and hope Core always orders them in the same way, so we can also
// efficiently find it back later. We explicitly tag this label with
// the txid of this tx, so that if an address gets assigned multiple
// transaction memos, they won't conflict.
let detail = &details[0];
let mut label = AddressMeta::from_label(detail["label"].as_str())?;
label.txmemo.insert(txid, memo.to_owned());
debug!("set_tx_memo() for {}, memo={}, address={}", txid, memo, detail["address"]);
self.rpc.call("setlabel", &[detail["address"].clone(), label.to_label()?.into()])?;
Ok(())
}
fn _convert_satoshi(&self, amount: u64) -> Value {
let currency = "USD"; // TODO
let exchange_rate = self.exchange_rate(currency);
let amount_f = amount as f64;
json!({
"satoshi": amount.to_string(),
"bits": (amount_f / SAT_PER_BIT).to_string(),
"ubtc": (amount_f / SAT_PER_BIT).to_string(), // XXX why twice? same as bits
"mbtc": (amount_f / SAT_PER_MBTC).to_string(),
"btc": (amount_f / SAT_PER_BTC).to_string(),
"fiat_rate": (exchange_rate).to_string(),
"fiat_currency": currency,
"fiat": (amount_f / SAT_PER_BTC * exchange_rate).to_string(),
})
}
fn _fiat_to_usat(&self, amount: f64) -> u64 {
btc_to_usat(amount / self.exchange_rate("USD"))
}
}
impl fmt::Debug for Wallet {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Wallet {{ }}")
}
}
/// Finds a transction memo inside the transaction description field.
/// This method can be provided with a response from either
/// `gettransaction` or `listtransactions`.
/// It returns "" if none if found.
fn find_memo_in_desc(txid: sha256d::Hash, txdesc: &Value) -> Result<String, Error> {
// First we try the top-level label field from listtransactions.
if let Some(label) = txdesc["label"].as_str() {
let meta = AddressMeta::from_label(Some(label))?;
if let Some(memo) = meta.txmemo.get(&txid) {
return Ok(memo.to_owned());
}
}
// Then we iterate over the details array.
if let Some(details) = txdesc["details"].as_array() {
for detail in details {
let meta = AddressMeta::from_label(detail["label"].as_str())?;
if let Some(memo) = meta.txmemo.get(&txid) {
return Ok(memo.to_owned());
}
}
}
Ok(String::new())
}
fn format_gdk_tx(txdesc: &Value, raw_tx: &[u8], network: NetworkId) -> Result<Value, Error> {
let txid = sha256d::Hash::from_hex(txdesc["txid"].as_str().req()?)?;
//TODO(stevenroose) optimize with Amount
let amount = match network {
NetworkId::Elements(..) => btc_to_isat(match txdesc["amount"] {
serde_json::Value::Object(ref v) => v["bitcoin"].as_f64().req()?,
ref v => v.as_f64().req()?,
}),
NetworkId::Bitcoin(..) => btc_to_isat(txdesc["amount"].as_f64().req()?),
};
let fee = txdesc["fee"].as_f64().map_or(0, |f| btc_to_usat(f * -1.0));
let type_str = match txdesc["category"].as_str() {
// for listtransactions, read out the category field
Some(category) => match category {
"send" => "outgoing",
"receive" => "incoming",
"immature" => "incoming",
_ => throw!("invalid tx category"),
},
// gettransaction doesn't have a top-level category,
// figure it out from the amount instead.
None => {
if amount > 0 {
"incoming"
} else {
"outgoing"
}
}
};
let tx_props = match network {
NetworkId::Bitcoin(_) => coins::btc::tx_props(&raw_tx)?,
NetworkId::Elements(_) => coins::liq::tx_props(&raw_tx)?,
};
let vsize = tx_props["transaction_vsize"].as_u64().unwrap();
let ret = json!({
"block_height": 1,
"created_at": fmt_time(txdesc["time"].as_u64().req()?),
"type": type_str,
"memo": find_memo_in_desc(txid, &txdesc)?,
"txhash": txid.to_hex(),
"transaction": hex::encode(&raw_tx),
"satoshi": amount,
"rbf_optin": txdesc["bip125-replaceable"].as_str().req()? == "yes",
"cap_cpfp": false, // TODO
"can_rbf": false, // TODO
"has_payment_request": false, // TODO
"server_signed": false,
"user_signed": true,
"instant": false,
"fee": fee,
"fee_rate": (fee as f64)/(vsize as f64),
"addressees": [], // notice the extra "e" -- its intentional
"inputs": [], // tx.input.iter().map(format_gdk_input).collect(),
"outputs": [], //tx.output.iter().map(format_gdk_output).collect(),
});
Ok(extend(ret, tx_props)?)
}
| _make_fee_estimates | identifier_name |
wallet.rs | //! The wallet module.
//!
//! Since this wallet implementation is supposed to work on top of both a
//! Bitcoin Core node as an Elements or Liquid node, we avoid using the
//! specialized bitcoincore-rpc and liquid-rpc client interfaces, but use
//! general call methods so we can leverage the common parts of the raw
//! responses. This might make the code a but harder to read or error-prone
//! but it avoids having very big code duplication.
//!
#![allow(clippy::redundant_field_names)]
use hex;
use std::collections::HashMap;
use std::str::FromStr;
use std::time::{Duration, Instant};
use std::{cell, fmt};
use bitcoin::{util::bip32, Address, Network as BNetwork};
use bitcoin_hashes::hex::{FromHex, ToHex};
use bitcoin_hashes::sha256d;
use bitcoincore_rpc::{json as rpcjson, Client as RpcClient, RpcApi};
use serde_json::Value;
#[cfg(feature = "liquid")]
use elements;
use crate::coins;
use crate::constants::{SAT_PER_BIT, SAT_PER_BTC, SAT_PER_MBTC};
use crate::errors::{Error, OptionExt};
use crate::network::{Network, NetworkId};
use crate::util::{btc_to_isat, btc_to_usat, extend, f64_from_val, fmt_time, SECP};
use crate::wally;
const PER_PAGE: usize = 30;
const FEE_ESTIMATES_TTL: Duration = Duration::from_secs(240);
/// Meta-information about an address that we need to store.
///
/// Since we don't have a persistent database, we use the Core wallet to store
/// the information required for operating GDK. For addresses, it's important
/// to keep the information needed to re-derive the private key: an identifier
/// of the master private key (i.e. the fingerprint) and the derivation path.
///
/// GDK also allows storing memos on transaction. Because Core doesn't support
/// transaction labels but only address labels, we inject the tx memos inside
/// the address label of an (preferably the first) address used in that
/// transaction.
///
/// This struct is used to structure the data stored in a label. It is
/// serialized as JSON when stored in a label, so that new fields can easily
/// be added.
#[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)]
pub(crate) struct AddressMeta {
/// The fingerprint of the extended private key used to derive the
/// private key for this address.
#[serde(rename = "fp", skip_serializing_if = "Option::is_none")]
pub fingerprint: Option<bip32::Fingerprint>,
/// The derivation path from the extended private key identified
/// by the fingerprint field.
#[serde(skip_serializing_if = "Option::is_none")]
pub child: Option<bip32::ChildNumber>,
/// Since an address can be used in multiple transactions, we keep a map
/// from the txid to the memo for the transaction.
#[serde(default, skip_serializing_if = "HashMap::is_empty")]
pub txmemo: HashMap<sha256d::Hash, String>,
}
impl AddressMeta {
/// Parse a label from Core.
pub fn from_label<S: AsRef<str>>(l: Option<S>) -> Result<AddressMeta, Error> {
match l {
Some(ref s) if s.as_ref().is_empty() => Ok(Default::default()),
Some(s) => Ok(serde_json::from_str(s.as_ref())?),
None => Ok(Default::default()),
}
}
/// Serialize to string to save into a label.
pub fn to_label(&self) -> Result<String, Error> {
Ok(serde_json::to_string(self)?)
}
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
struct PersistentWalletState {
#[serde(rename = "nec")]
next_external_child: bip32::ChildNumber,
#[serde(rename = "nic")]
next_internal_child: bip32::ChildNumber,
}
pub struct Wallet {
network: &'static Network,
rpc: RpcClient,
mnemonic: String,
// For the BIP32 keys, the network variable should be ignored and not used.
/// The BIP32 master extended private key.
master_xpriv: bip32::ExtendedPrivKey,
/// The BIP32 extended private key for external addresses.
external_xpriv: bip32::ExtendedPrivKey,
/// The BIP32 extended private key for internal (i.e. change) addresses.
internal_xpriv: bip32::ExtendedPrivKey,
/// The master blinding key.
#[cfg(feature = "liquid")]
master_blinding_key: [u8; 64],
next_external_child: cell::Cell<bip32::ChildNumber>,
next_internal_child: cell::Cell<bip32::ChildNumber>,
tip: Option<sha256d::Hash>,
last_tx: Option<sha256d::Hash>,
cached_fees: (Value, Instant),
}
impl Wallet {
/// Get the address to use to store persistent state.
fn persistent_state_address(
network: NetworkId,
master_xpriv: &bip32::ExtendedPrivKey,
) -> String {
let child = bip32::ChildNumber::from_hardened_idx(350).unwrap();
let child_xpriv = master_xpriv.derive_priv(&SECP, &[child]).unwrap();
let child_xpub = bip32::ExtendedPubKey::from_private(&SECP, &child_xpriv);
match network {
#[cfg(feature = "liquid")]
NetworkId::Elements(enet) => elements::Address::p2wpkh(
&child_xpub.public_key,
None,
coins::liq::address_params(enet),
)
.to_string(),
NetworkId::Bitcoin(bnet) => Address::p2wpkh(&child_xpub.public_key, bnet).to_string(),
#[cfg(not(feature = "liquid"))]
_ => unimplemented!(),
}
}
/// Store the persistent wallet state.
fn save_persistent_state(&self) -> Result<(), Error> {
let state = PersistentWalletState {
next_external_child: self.next_external_child.get(),
next_internal_child: self.next_internal_child.get(),
};
let store_addr = Wallet::persistent_state_address(self.network.id(), &self.master_xpriv);
// Generic call for liquid compat.
self.rpc.call("setlabel", &[store_addr.into(), serde_json::to_string(&state)?.into()])?;
Ok(())
}
/// Load the persistent wallet state from the node.
#[allow(clippy::match_wild_err_arm)]
fn load_persistent_state(
rpc: &bitcoincore_rpc::Client,
state_addr: &str,
) -> Result<PersistentWalletState, Error> {
let info: Value = rpc.call("getaddressinfo", &[state_addr.into()])?;
match info.get("label") {
None => Err(Error::WalletNotRegistered),
Some(&Value::String(ref label)) => {
Ok(match serde_json::from_str::<PersistentWalletState>(label) {
Err(_) => panic!(
"corrupt persistent wallet state label (address: {}): {}",
state_addr, label
),
Ok(s) => s,
})
}
Some(_) => unreachable!(),
}
}
/// Calculates the bip32 seeds from the mnemonic phrase.
/// In order are returned:
/// - the master xpriv
/// - the external address xpriv
/// - the internal address xpriv
fn calc_xkeys(
seed: &[u8],
) -> (bip32::ExtendedPrivKey, bip32::ExtendedPrivKey, bip32::ExtendedPrivKey) {
// Network isn't of importance here.
let master_xpriv =
bip32::ExtendedPrivKey::new_master(BNetwork::Bitcoin, &seed[..]).unwrap();
// Add BIP-44 derivations for external and internal addresses.
let external_xpriv = master_xpriv
.derive_priv(&SECP, &bip32::DerivationPath::from_str("m/44'/0'/0'/0'/0").unwrap())
.unwrap();
let internal_xpriv = master_xpriv
.derive_priv(&SECP, &bip32::DerivationPath::from_str("m/44'/0'/0'/0'/1").unwrap())
.unwrap();
(master_xpriv, external_xpriv, internal_xpriv)
}
/// Register a new [Wallet].
pub fn register(network: &'static Network, mnemonic: &str) -> Result<Wallet, Error> {
let seed = wally::bip39_mnemonic_to_seed(&mnemonic, "")?;
let (master_xpriv, external_xpriv, internal_xpriv) = Wallet::calc_xkeys(&seed);
let fp = hex::encode(master_xpriv.fingerprint(&SECP).as_bytes());
// create the wallet in Core
let tmp_rpc = network.connect(None)?;
match tmp_rpc.create_wallet(fp.as_str(), Some(true))?.warning {
None => {}
Some(ref s) if s.is_empty() => {}
Some(warning) => {
warn!("Received warning when creating wallet {} in Core: {}", fp, warning,)
}
}
let rpc = network.connect(Some(&fp))?;
// Check if the user was already registered.
let state_addr = Wallet::persistent_state_address(network.id(), &master_xpriv);
match Wallet::load_persistent_state(&rpc, &state_addr) {
Err(Error::WalletNotRegistered) => {} // good
Ok(_) => return Err(Error::WalletAlreadyRegistered),
Err(e) => {
warn!("Unexpected error while registering wallet: {}", e);
return Err(e);
}
}
let wallet = Wallet {
network: network,
rpc: rpc,
mnemonic: mnemonic.to_owned(),
master_xpriv: master_xpriv,
external_xpriv: external_xpriv,
internal_xpriv: internal_xpriv,
#[cfg(feature = "liquid")]
master_blinding_key: wally::asset_blinding_key_from_seed(&seed),
next_external_child: cell::Cell::new(bip32::ChildNumber::from_normal_idx(0).unwrap()),
next_internal_child: cell::Cell::new(bip32::ChildNumber::from_normal_idx(0).unwrap()),
tip: None,
last_tx: None,
cached_fees: (Value::Null, Instant::now() - FEE_ESTIMATES_TTL * 2),
};
wallet.save_persistent_state()?;
Ok(wallet)
}
/// Login to an existing [Wallet].
pub fn login(network: &'static Network, mnemonic: &str) -> Result<Wallet, Error> {
let seed = wally::bip39_mnemonic_to_seed(&mnemonic, "")?;
let (master_xpriv, external_xpriv, internal_xpriv) = Wallet::calc_xkeys(&seed);
let fp = hex::encode(master_xpriv.fingerprint(&SECP).as_bytes());
let tmp_rpc = network.connect(None)?;
tmp_rpc.load_wallet(&fp)?;
let rpc = network.connect(Some(&fp))?;
let state_addr = Wallet::persistent_state_address(network.id(), &master_xpriv);
let state = Wallet::load_persistent_state(&rpc, &state_addr)?;
Ok(Wallet {
network: network,
rpc: rpc,
mnemonic: mnemonic.to_owned(),
master_xpriv: master_xpriv,
external_xpriv: external_xpriv,
internal_xpriv: internal_xpriv,
#[cfg(feature = "liquid")]
master_blinding_key: wally::asset_blinding_key_from_seed(&seed),
next_external_child: cell::Cell::new(state.next_external_child),
next_internal_child: cell::Cell::new(state.next_internal_child),
tip: None,
last_tx: None,
cached_fees: (Value::Null, Instant::now() - FEE_ESTIMATES_TTL * 2),
})
}
pub fn fingerprint(&self) -> bip32::Fingerprint {
self.master_xpriv.fingerprint(&SECP)
}
pub fn logout(self) -> Result<(), Error> {
self.rpc.unload_wallet(None)?;
Ok(())
}
pub fn mnemonic(&self) -> String {
self.mnemonic.clone()
}
fn derive_private_key(
&self,
fp: bip32::Fingerprint,
child: bip32::ChildNumber,
) -> Result<secp256k1::SecretKey, Error> {
let xpriv = if fp == self.external_xpriv.fingerprint(&SECP) {
self.external_xpriv
} else if fp == self.internal_xpriv.fingerprint(&SECP) {
self.internal_xpriv
} else {
error!("Address is labeled with unknown master xpriv fingerprint: {:?}", fp);
return Err(Error::CorruptNodeData);
};
let privkey = xpriv.derive_priv(&SECP, &[child])?.private_key;
Ok(privkey.key)
}
pub fn updates(&mut self) -> Result<Vec<Value>, Error> {
let mut msgs = vec![];
// check for new blocks
let tip = self.rpc.get_best_block_hash()?;
if self.tip != Some(tip) {
let info: Value = self.rpc.call("getblock", &[tip.to_hex().into(), 1.into()])?;
msgs.push(json!({
"event": "block",
"block": {
"block_height": info["height"].as_u64().req()?,
"block_hash": tip.to_hex()
}
}));
self.tip = Some(tip);
}
// check for new transactions
// XXX does the app care about the transaction data in the event?
if let Some(last_tx) = self._get_transactions(1, 0)?.0.get(0) {
let txid = last_tx["txhash"].as_str().req()?;
let txid = sha256d::Hash::from_hex(txid)?;
if self.last_tx != Some(txid) {
self.last_tx = Some(txid);
msgs.push(json!({ "event": "transaction", "transaction": last_tx }));
}
}
// update fees once every FEE_ESTIMATES_TTL
if self.cached_fees.1.elapsed() >= FEE_ESTIMATES_TTL {
self.cached_fees = (self._make_fee_estimates()?, Instant::now());
msgs.push(json!({ "event": "fees", "fees": self.cached_fees.0 }));
}
// TODO:
// {"event":"subaccount","subaccount":{"bits":"701144.66","btc":"0.70114466","fiat":"0.7712591260000000622741556099981585311432","fiat_currency":"EUR","fiat_rate":"1.10000000000000008881784197001252","has_transactions":true,"mbtc":"701.14466","name":"","pointer":0,"receiving_id":"GA3MQKVp6pP7royXDuZcw55F2TXTgg","recovery_chain_code":"","recovery_pub_key":"","satoshi":70114466,"type":"2of2","ubtc":"701144.66"}}
// XXX use zmq?
Ok(msgs)
}
pub fn get_account(&self) -> Result<Value, Error> {
let has_transactions = self._get_transactions(1, 0)?.1;
extend(
json!({
"type": "core",
"pointer": 0,
"receiving_id": "",
"name": "RPC wallet",
"has_transactions": has_transactions,
}),
self._get_balance(0)?,
)
}
pub fn get_balance(&self, details: &Value) -> Result<Value, Error> {
let min_conf = details["num_confs"].as_u64().req()? as u32;
self._get_balance(min_conf)
}
fn _get_balance(&self, min_conf: u32) -> Result<Value, Error> {
//TODO(stevenroose) implement in rust-bitcoincore-rpc once bitcoin::Amount lands
let mut args = vec![Value::Null, json!(min_conf), json!(true)];
#[cfg(feature = "liquid")]
{
if let NetworkId::Elements(net) = self.network.id() {
args.push(coins::liq::asset_hex(net).into());
}
}
let balance: f64 = self.rpc.call("getbalance", &args)?;
Ok(self._convert_satoshi(btc_to_usat(balance)))
}
pub fn get_transactions(&self, details: &Value) -> Result<Value, Error> {
let page = details["page_id"].as_u64().req()? as usize;
let (txs, potentially_has_more) = self._get_transactions(PER_PAGE, PER_PAGE * page)?;
Ok(json!({
"list": txs,
"page_id": page,
"next_page_id": if potentially_has_more { Some(page+1) } else { None },
}))
}
fn _get_transactions(&self, limit: usize, start: usize) -> Result<(Vec<Value>, bool), Error> {
// fetch listtranssactions
let txdescs: Vec<Value> = self
.rpc
.call("listtransactions", &["*".into(), limit.into(), start.into(), true.into()])?;
let potentially_has_more = txdescs.len() == limit;
// fetch full transactions and convert to GDK format
let mut txs = Vec::new();
for desc in txdescs.into_iter() {
let txid = sha256d::Hash::from_hex(desc["txid"].as_str().req()?)?;
let blockhash = &desc["blockhash"];
let tx_hex: String = self.rpc.call(
"getrawtransaction",
&[txid.to_hex().into(), false.into(), blockhash.clone()],
)?;
txs.push(format_gdk_tx(&desc, &hex::decode(&tx_hex)?, self.network.id())?);
}
Ok((txs, potentially_has_more))
}
pub fn get_transaction(&self, txid: &str) -> Result<Value, Error> {
let txid = sha256d::Hash::from_hex(txid)?;
let desc: Value = self.rpc.call("gettransaction", &[txid.to_hex().into(), true.into()])?;
let raw_tx = hex::decode(desc["hex"].as_str().req()?)?;
format_gdk_tx(&desc, &raw_tx, self.network.id())
}
pub fn create_transaction(&self, details: &Value) -> Result<String, Error> {
debug!("create_transaction(): {:?}", details);
let unfunded_tx = match self.network.id() {
NetworkId::Bitcoin(..) => coins::btc::create_transaction(&self.rpc, details)?,
NetworkId::Elements(..) => coins::liq::create_transaction(&self.rpc, details)?,
};
debug!("create_transaction unfunded tx: {:?}", hex::encode(&unfunded_tx));
// TODO explicit handling for id_no_amount_specified id_fee_rate_is_below_minimum id_invalid_replacement_fee_rate
// id_send_all_requires_a_single_output
Ok(hex::encode(unfunded_tx))
}
pub fn sign_transaction(&self, details: &Value) -> Result<String, Error> {
debug!("sign_transaction(): {:?}", details);
let change_address = self.next_address(&self.internal_xpriv, &self.next_internal_child)?;
// If we don't have any inputs, we can fail early.
let unspent: Vec<Value> = self.rpc.call("listunspent", &[0.into()])?;
if unspent.is_empty() {
return Err(Error::NoUtxosFound);
}
debug!("list_unspent: {:?}", unspent);
let raw_tx = match self.network.id() {
NetworkId::Bitcoin(_) => {
coins::btc::sign_transaction(&self.rpc, details, &change_address, |fp, child| {
self.derive_private_key(*fp, *child)
})?
}
NetworkId::Elements(net) => coins::liq::sign_transaction(
&self.rpc,
net,
details,
&change_address,
|fp, child| self.derive_private_key(*fp, *child),
)?,
};
let hex_tx = hex::encode(&raw_tx);
//TODO(stevenroose) remove when confident in signing code
let ret: Vec<Value> = self.rpc.call("testmempoolaccept", &[vec![hex_tx.clone()].into()])?;
let accept = ret.into_iter().next().unwrap();
if !(accept["allowed"].as_bool().req()?) {
error!(
"sign_transaction(): signed tx is not valid: {}",
accept["reject-reason"].as_str().req()?
);
// TODO(stevenroose) should we return an error??
}
Ok(hex_tx)
}
pub fn send_transaction(&self, details: &Value) -> Result<String, Error> {
let tx_hex = details["hex"].as_str().req()?;
Ok(self.rpc.call::<String>("sendrawtransaction", &[tx_hex.into()])?)
}
pub fn send_raw_transaction(&self, tx_hex: &str) -> Result<String, Error> {
Ok(self.rpc.call::<String>("sendrawtransaction", &[tx_hex.into()])?)
}
/// Return the next address for the derivation and import it in Core.
fn next_address(
&self,
xpriv: &bip32::ExtendedPrivKey,
child: &cell::Cell<bip32::ChildNumber>,
) -> Result<String, Error> {
let child_xpriv = xpriv.derive_priv(&SECP, &[child.get()])?;
let child_xpub = bip32::ExtendedPubKey::from_private(&SECP, &child_xpriv);
let meta = AddressMeta {
fingerprint: Some(xpriv.fingerprint(&SECP)),
child: Some(child.get()),
..Default::default()
};
let address_str = match self.network.id() {
#[cfg(feature = "liquid")]
NetworkId::Elements(enet) => {
let mut addr = elements::Address::p2shwpkh(
&child_xpub.public_key,
None,
coins::liq::address_params(enet),
);
let blinding_key = wally::asset_blinding_key_to_ec_private_key(
&self.master_blinding_key,
&addr.script_pubkey(),
);
let blinding_pubkey = secp256k1::PublicKey::from_secret_key(&SECP, &blinding_key);
addr.blinding_pubkey = Some(blinding_pubkey);
// Store blinding privkey in the node.
let addr_str = addr.to_string();
coins::liq::store_blinding_key(&self.rpc, &addr_str, &blinding_key)?;
addr_str
}
NetworkId::Bitcoin(bnet) => Address::p2wpkh(&child_xpub.public_key, bnet).to_string(),
#[cfg(not(feature = "liquid"))]
_ => unimplemented!(),
};
// Since this is a newly generated address, rescanning is not required.
self.rpc.import_public_key(&child_xpub.public_key, Some(&meta.to_label()?), Some(false))?;
child.set(match child.get() {
bip32::ChildNumber::Normal {
index,
} => bip32::ChildNumber::from_normal_idx(index + 1)?,
_ => unreachable!(),
});
self.save_persistent_state()?;
Ok(address_str)
}
pub fn get_receive_address(&self, _details: &Value) -> Result<Value, Error> {
let address = self.next_address(&self.external_xpriv, &self.next_external_child)?;
// {
// "address": "2N2x4EgizS2w3DUiWYWW9pEf4sGYRfo6PAX",
// "address_type": "p2wsh",
// "branch": 1,
// "pointer": 13,
// "script": "52210338832debc5e15ce143d5cf9241147ac0019e7516d3d9569e04b0e18f3278718921025dfaa85d64963252604e1b139b40182bb859a9e2e1aa2904876c34e82158d85452ae",
// "script_type": 14,
// "subtype": null
// }
Ok(json!({
"address": address,
"address_type": "p2wpkh",
}))
}
pub fn get_fee_estimates(&self) -> Option<&Value> {
// will not be available before the first "tick", which should
// happen as soon as GA_connect initializes the wallet
if self.cached_fees.0.is_null() | else {
Some(&self.cached_fees.0)
}
}
pub fn _make_fee_estimates(&self) -> Result<Value, Error> {
let mempoolinfo: Value = self.rpc.call("getmempoolinfo", &[])?;
let minrelayfee = json!(btc_to_usat(mempoolinfo["minrelaytxfee"].as_f64().req()? / 1000.0));
let mut estimates: Vec<Value> = (2u16..25u16)
.map(|target| {
let est: rpcjson::EstimateSmartFeeResult =
self.rpc.call("estimatesmartfee", &[json!(target)])?;
Ok(est.feerate.unwrap_or_else(|| minrelayfee.clone()))
})
.collect::<Result<Vec<Value>, Error>>()?;
// prepend the estimate for 2 blocks as the estimate for 1 blocks
estimates.insert(0, estimates[0].clone());
// prepend the minrelayfee as the first item
estimates.insert(0, minrelayfee);
// the final format is: [ minrelayfee, est_for_2_blocks, est_for_2_blocks, est_for_3_blocks, ... ]
Ok(json!(estimates))
}
pub fn get_available_currencies(&self) -> Value {
// TODO
json!({ "all": [ "USD" ], "per_exchange": { "BITSTAMP": [ "USD" ] } })
}
pub fn exchange_rate(&self, _currency: &str) -> f64 {
// TODO
420.00
}
pub fn convert_amount(&self, details: &Value) -> Result<Value, Error> {
// XXX should convert_amonut support negative numbers?
let satoshi = details["satoshi"]
.as_u64()
.or_else(|| f64_from_val(&details["btc"]).map(btc_to_usat))
.or_else(|| f64_from_val(&details["fiat"]).map(|x| self._fiat_to_usat(x)))
.or_err("id_no_amount_specified")?;
Ok(self._convert_satoshi(satoshi))
}
pub fn set_tx_memo(&self, txid: &str, memo: &str) -> Result<(), Error> {
// we can't really set a tx memo, so we fake it by setting a memo on the address
let txid = sha256d::Hash::from_hex(txid)?;
let txdesc: Value =
self.rpc.call("gettransaction", &[txid.to_hex().into(), true.into()])?;
let details = txdesc["details"].as_array().req()?;
if details.is_empty() {
throw!("Tx info for {} does not contain any details", txid);
}
// We just need any usable address label. Let's just take the first
// and hope Core always orders them in the same way, so we can also
// efficiently find it back later. We explicitly tag this label with
// the txid of this tx, so that if an address gets assigned multiple
// transaction memos, they won't conflict.
let detail = &details[0];
let mut label = AddressMeta::from_label(detail["label"].as_str())?;
label.txmemo.insert(txid, memo.to_owned());
debug!("set_tx_memo() for {}, memo={}, address={}", txid, memo, detail["address"]);
self.rpc.call("setlabel", &[detail["address"].clone(), label.to_label()?.into()])?;
Ok(())
}
fn _convert_satoshi(&self, amount: u64) -> Value {
let currency = "USD"; // TODO
let exchange_rate = self.exchange_rate(currency);
let amount_f = amount as f64;
json!({
"satoshi": amount.to_string(),
"bits": (amount_f / SAT_PER_BIT).to_string(),
"ubtc": (amount_f / SAT_PER_BIT).to_string(), // XXX why twice? same as bits
"mbtc": (amount_f / SAT_PER_MBTC).to_string(),
"btc": (amount_f / SAT_PER_BTC).to_string(),
"fiat_rate": (exchange_rate).to_string(),
"fiat_currency": currency,
"fiat": (amount_f / SAT_PER_BTC * exchange_rate).to_string(),
})
}
fn _fiat_to_usat(&self, amount: f64) -> u64 {
btc_to_usat(amount / self.exchange_rate("USD"))
}
}
impl fmt::Debug for Wallet {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Wallet {{ }}")
}
}
/// Finds a transction memo inside the transaction description field.
/// This method can be provided with a response from either
/// `gettransaction` or `listtransactions`.
/// It returns "" if none if found.
fn find_memo_in_desc(txid: sha256d::Hash, txdesc: &Value) -> Result<String, Error> {
// First we try the top-level label field from listtransactions.
if let Some(label) = txdesc["label"].as_str() {
let meta = AddressMeta::from_label(Some(label))?;
if let Some(memo) = meta.txmemo.get(&txid) {
return Ok(memo.to_owned());
}
}
// Then we iterate over the details array.
if let Some(details) = txdesc["details"].as_array() {
for detail in details {
let meta = AddressMeta::from_label(detail["label"].as_str())?;
if let Some(memo) = meta.txmemo.get(&txid) {
return Ok(memo.to_owned());
}
}
}
Ok(String::new())
}
fn format_gdk_tx(txdesc: &Value, raw_tx: &[u8], network: NetworkId) -> Result<Value, Error> {
let txid = sha256d::Hash::from_hex(txdesc["txid"].as_str().req()?)?;
//TODO(stevenroose) optimize with Amount
let amount = match network {
NetworkId::Elements(..) => btc_to_isat(match txdesc["amount"] {
serde_json::Value::Object(ref v) => v["bitcoin"].as_f64().req()?,
ref v => v.as_f64().req()?,
}),
NetworkId::Bitcoin(..) => btc_to_isat(txdesc["amount"].as_f64().req()?),
};
let fee = txdesc["fee"].as_f64().map_or(0, |f| btc_to_usat(f * -1.0));
let type_str = match txdesc["category"].as_str() {
// for listtransactions, read out the category field
Some(category) => match category {
"send" => "outgoing",
"receive" => "incoming",
"immature" => "incoming",
_ => throw!("invalid tx category"),
},
// gettransaction doesn't have a top-level category,
// figure it out from the amount instead.
None => {
if amount > 0 {
"incoming"
} else {
"outgoing"
}
}
};
let tx_props = match network {
NetworkId::Bitcoin(_) => coins::btc::tx_props(&raw_tx)?,
NetworkId::Elements(_) => coins::liq::tx_props(&raw_tx)?,
};
let vsize = tx_props["transaction_vsize"].as_u64().unwrap();
let ret = json!({
"block_height": 1,
"created_at": fmt_time(txdesc["time"].as_u64().req()?),
"type": type_str,
"memo": find_memo_in_desc(txid, &txdesc)?,
"txhash": txid.to_hex(),
"transaction": hex::encode(&raw_tx),
"satoshi": amount,
"rbf_optin": txdesc["bip125-replaceable"].as_str().req()? == "yes",
"cap_cpfp": false, // TODO
"can_rbf": false, // TODO
"has_payment_request": false, // TODO
"server_signed": false,
"user_signed": true,
"instant": false,
"fee": fee,
"fee_rate": (fee as f64)/(vsize as f64),
"addressees": [], // notice the extra "e" -- its intentional
"inputs": [], // tx.input.iter().map(format_gdk_input).collect(),
"outputs": [], //tx.output.iter().map(format_gdk_output).collect(),
});
Ok(extend(ret, tx_props)?)
}
| {
None
} | conditional_block |
wallet.rs | //! The wallet module.
//!
//! Since this wallet implementation is supposed to work on top of both a
//! Bitcoin Core node as an Elements or Liquid node, we avoid using the
//! specialized bitcoincore-rpc and liquid-rpc client interfaces, but use
//! general call methods so we can leverage the common parts of the raw
//! responses. This might make the code a but harder to read or error-prone
//! but it avoids having very big code duplication.
//!
#![allow(clippy::redundant_field_names)]
use hex;
use std::collections::HashMap;
use std::str::FromStr;
use std::time::{Duration, Instant};
use std::{cell, fmt};
use bitcoin::{util::bip32, Address, Network as BNetwork};
use bitcoin_hashes::hex::{FromHex, ToHex};
use bitcoin_hashes::sha256d;
use bitcoincore_rpc::{json as rpcjson, Client as RpcClient, RpcApi};
use serde_json::Value;
#[cfg(feature = "liquid")]
use elements;
use crate::coins;
use crate::constants::{SAT_PER_BIT, SAT_PER_BTC, SAT_PER_MBTC};
use crate::errors::{Error, OptionExt};
use crate::network::{Network, NetworkId};
use crate::util::{btc_to_isat, btc_to_usat, extend, f64_from_val, fmt_time, SECP};
use crate::wally;
const PER_PAGE: usize = 30;
const FEE_ESTIMATES_TTL: Duration = Duration::from_secs(240);
/// Meta-information about an address that we need to store.
///
/// Since we don't have a persistent database, we use the Core wallet to store
/// the information required for operating GDK. For addresses, it's important
/// to keep the information needed to re-derive the private key: an identifier
/// of the master private key (i.e. the fingerprint) and the derivation path.
///
/// GDK also allows storing memos on transaction. Because Core doesn't support
/// transaction labels but only address labels, we inject the tx memos inside
/// the address label of an (preferably the first) address used in that
/// transaction.
///
/// This struct is used to structure the data stored in a label. It is
/// serialized as JSON when stored in a label, so that new fields can easily
/// be added.
#[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)]
pub(crate) struct AddressMeta {
/// The fingerprint of the extended private key used to derive the
/// private key for this address.
#[serde(rename = "fp", skip_serializing_if = "Option::is_none")]
pub fingerprint: Option<bip32::Fingerprint>,
/// The derivation path from the extended private key identified
/// by the fingerprint field.
#[serde(skip_serializing_if = "Option::is_none")]
pub child: Option<bip32::ChildNumber>,
/// Since an address can be used in multiple transactions, we keep a map
/// from the txid to the memo for the transaction.
#[serde(default, skip_serializing_if = "HashMap::is_empty")]
pub txmemo: HashMap<sha256d::Hash, String>,
}
impl AddressMeta {
/// Parse a label from Core.
pub fn from_label<S: AsRef<str>>(l: Option<S>) -> Result<AddressMeta, Error> {
match l {
Some(ref s) if s.as_ref().is_empty() => Ok(Default::default()),
Some(s) => Ok(serde_json::from_str(s.as_ref())?),
None => Ok(Default::default()),
}
}
/// Serialize to string to save into a label.
pub fn to_label(&self) -> Result<String, Error> {
Ok(serde_json::to_string(self)?)
}
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
struct PersistentWalletState {
#[serde(rename = "nec")]
next_external_child: bip32::ChildNumber,
#[serde(rename = "nic")]
next_internal_child: bip32::ChildNumber,
}
pub struct Wallet {
network: &'static Network,
rpc: RpcClient,
mnemonic: String,
// For the BIP32 keys, the network variable should be ignored and not used.
/// The BIP32 master extended private key.
master_xpriv: bip32::ExtendedPrivKey,
/// The BIP32 extended private key for external addresses.
external_xpriv: bip32::ExtendedPrivKey,
/// The BIP32 extended private key for internal (i.e. change) addresses.
internal_xpriv: bip32::ExtendedPrivKey,
/// The master blinding key.
#[cfg(feature = "liquid")]
master_blinding_key: [u8; 64],
next_external_child: cell::Cell<bip32::ChildNumber>,
next_internal_child: cell::Cell<bip32::ChildNumber>,
tip: Option<sha256d::Hash>,
last_tx: Option<sha256d::Hash>,
cached_fees: (Value, Instant),
}
impl Wallet {
/// Get the address to use to store persistent state.
fn persistent_state_address(
network: NetworkId,
master_xpriv: &bip32::ExtendedPrivKey,
) -> String {
let child = bip32::ChildNumber::from_hardened_idx(350).unwrap();
let child_xpriv = master_xpriv.derive_priv(&SECP, &[child]).unwrap();
let child_xpub = bip32::ExtendedPubKey::from_private(&SECP, &child_xpriv);
match network {
#[cfg(feature = "liquid")]
NetworkId::Elements(enet) => elements::Address::p2wpkh(
&child_xpub.public_key,
None,
coins::liq::address_params(enet),
)
.to_string(),
NetworkId::Bitcoin(bnet) => Address::p2wpkh(&child_xpub.public_key, bnet).to_string(),
#[cfg(not(feature = "liquid"))]
_ => unimplemented!(),
}
}
/// Store the persistent wallet state.
fn save_persistent_state(&self) -> Result<(), Error> {
let state = PersistentWalletState {
next_external_child: self.next_external_child.get(),
next_internal_child: self.next_internal_child.get(),
};
let store_addr = Wallet::persistent_state_address(self.network.id(), &self.master_xpriv);
// Generic call for liquid compat.
self.rpc.call("setlabel", &[store_addr.into(), serde_json::to_string(&state)?.into()])?;
Ok(())
}
/// Load the persistent wallet state from the node.
#[allow(clippy::match_wild_err_arm)]
fn load_persistent_state(
rpc: &bitcoincore_rpc::Client,
state_addr: &str,
) -> Result<PersistentWalletState, Error> {
let info: Value = rpc.call("getaddressinfo", &[state_addr.into()])?;
match info.get("label") {
None => Err(Error::WalletNotRegistered),
Some(&Value::String(ref label)) => {
Ok(match serde_json::from_str::<PersistentWalletState>(label) {
Err(_) => panic!(
"corrupt persistent wallet state label (address: {}): {}",
state_addr, label
),
Ok(s) => s,
})
}
Some(_) => unreachable!(),
}
}
/// Calculates the bip32 seeds from the mnemonic phrase.
/// In order are returned:
/// - the master xpriv
/// - the external address xpriv
/// - the internal address xpriv
fn calc_xkeys(
seed: &[u8],
) -> (bip32::ExtendedPrivKey, bip32::ExtendedPrivKey, bip32::ExtendedPrivKey) {
// Network isn't of importance here.
let master_xpriv =
bip32::ExtendedPrivKey::new_master(BNetwork::Bitcoin, &seed[..]).unwrap();
// Add BIP-44 derivations for external and internal addresses.
let external_xpriv = master_xpriv
.derive_priv(&SECP, &bip32::DerivationPath::from_str("m/44'/0'/0'/0'/0").unwrap())
.unwrap();
let internal_xpriv = master_xpriv
.derive_priv(&SECP, &bip32::DerivationPath::from_str("m/44'/0'/0'/0'/1").unwrap())
.unwrap();
(master_xpriv, external_xpriv, internal_xpriv)
}
/// Register a new [Wallet].
pub fn register(network: &'static Network, mnemonic: &str) -> Result<Wallet, Error> {
let seed = wally::bip39_mnemonic_to_seed(&mnemonic, "")?;
let (master_xpriv, external_xpriv, internal_xpriv) = Wallet::calc_xkeys(&seed);
let fp = hex::encode(master_xpriv.fingerprint(&SECP).as_bytes());
// create the wallet in Core
let tmp_rpc = network.connect(None)?;
match tmp_rpc.create_wallet(fp.as_str(), Some(true))?.warning {
None => {}
Some(ref s) if s.is_empty() => {}
Some(warning) => {
warn!("Received warning when creating wallet {} in Core: {}", fp, warning,)
}
}
let rpc = network.connect(Some(&fp))?;
// Check if the user was already registered.
let state_addr = Wallet::persistent_state_address(network.id(), &master_xpriv);
match Wallet::load_persistent_state(&rpc, &state_addr) {
Err(Error::WalletNotRegistered) => {} // good
Ok(_) => return Err(Error::WalletAlreadyRegistered),
Err(e) => {
warn!("Unexpected error while registering wallet: {}", e);
return Err(e);
}
}
let wallet = Wallet {
network: network,
rpc: rpc,
mnemonic: mnemonic.to_owned(),
master_xpriv: master_xpriv,
external_xpriv: external_xpriv,
internal_xpriv: internal_xpriv,
#[cfg(feature = "liquid")]
master_blinding_key: wally::asset_blinding_key_from_seed(&seed),
next_external_child: cell::Cell::new(bip32::ChildNumber::from_normal_idx(0).unwrap()),
next_internal_child: cell::Cell::new(bip32::ChildNumber::from_normal_idx(0).unwrap()),
tip: None,
last_tx: None,
cached_fees: (Value::Null, Instant::now() - FEE_ESTIMATES_TTL * 2),
};
wallet.save_persistent_state()?;
Ok(wallet)
}
/// Login to an existing [Wallet].
pub fn login(network: &'static Network, mnemonic: &str) -> Result<Wallet, Error> {
let seed = wally::bip39_mnemonic_to_seed(&mnemonic, "")?;
let (master_xpriv, external_xpriv, internal_xpriv) = Wallet::calc_xkeys(&seed);
let fp = hex::encode(master_xpriv.fingerprint(&SECP).as_bytes());
let tmp_rpc = network.connect(None)?;
tmp_rpc.load_wallet(&fp)?;
let rpc = network.connect(Some(&fp))?;
let state_addr = Wallet::persistent_state_address(network.id(), &master_xpriv);
let state = Wallet::load_persistent_state(&rpc, &state_addr)?;
Ok(Wallet {
network: network,
rpc: rpc,
mnemonic: mnemonic.to_owned(),
master_xpriv: master_xpriv,
external_xpriv: external_xpriv,
internal_xpriv: internal_xpriv,
#[cfg(feature = "liquid")]
master_blinding_key: wally::asset_blinding_key_from_seed(&seed),
next_external_child: cell::Cell::new(state.next_external_child),
next_internal_child: cell::Cell::new(state.next_internal_child),
tip: None,
last_tx: None,
cached_fees: (Value::Null, Instant::now() - FEE_ESTIMATES_TTL * 2),
})
}
pub fn fingerprint(&self) -> bip32::Fingerprint {
self.master_xpriv.fingerprint(&SECP)
}
pub fn logout(self) -> Result<(), Error> {
self.rpc.unload_wallet(None)?;
Ok(())
}
pub fn mnemonic(&self) -> String {
self.mnemonic.clone()
}
fn derive_private_key(
&self,
fp: bip32::Fingerprint,
child: bip32::ChildNumber,
) -> Result<secp256k1::SecretKey, Error> {
let xpriv = if fp == self.external_xpriv.fingerprint(&SECP) {
self.external_xpriv
} else if fp == self.internal_xpriv.fingerprint(&SECP) {
self.internal_xpriv
} else {
error!("Address is labeled with unknown master xpriv fingerprint: {:?}", fp);
return Err(Error::CorruptNodeData);
};
let privkey = xpriv.derive_priv(&SECP, &[child])?.private_key;
Ok(privkey.key)
}
pub fn updates(&mut self) -> Result<Vec<Value>, Error> {
let mut msgs = vec![];
// check for new blocks
let tip = self.rpc.get_best_block_hash()?;
if self.tip != Some(tip) {
let info: Value = self.rpc.call("getblock", &[tip.to_hex().into(), 1.into()])?;
msgs.push(json!({
"event": "block",
"block": {
"block_height": info["height"].as_u64().req()?,
"block_hash": tip.to_hex()
}
}));
self.tip = Some(tip);
}
// check for new transactions
// XXX does the app care about the transaction data in the event?
if let Some(last_tx) = self._get_transactions(1, 0)?.0.get(0) {
let txid = last_tx["txhash"].as_str().req()?;
let txid = sha256d::Hash::from_hex(txid)?;
if self.last_tx != Some(txid) {
self.last_tx = Some(txid);
msgs.push(json!({ "event": "transaction", "transaction": last_tx }));
}
}
// update fees once every FEE_ESTIMATES_TTL
if self.cached_fees.1.elapsed() >= FEE_ESTIMATES_TTL {
self.cached_fees = (self._make_fee_estimates()?, Instant::now());
msgs.push(json!({ "event": "fees", "fees": self.cached_fees.0 }));
}
// TODO:
// {"event":"subaccount","subaccount":{"bits":"701144.66","btc":"0.70114466","fiat":"0.7712591260000000622741556099981585311432","fiat_currency":"EUR","fiat_rate":"1.10000000000000008881784197001252","has_transactions":true,"mbtc":"701.14466","name":"","pointer":0,"receiving_id":"GA3MQKVp6pP7royXDuZcw55F2TXTgg","recovery_chain_code":"","recovery_pub_key":"","satoshi":70114466,"type":"2of2","ubtc":"701144.66"}}
// XXX use zmq?
Ok(msgs)
}
pub fn get_account(&self) -> Result<Value, Error> {
let has_transactions = self._get_transactions(1, 0)?.1;
extend(
json!({
"type": "core",
"pointer": 0,
"receiving_id": "",
"name": "RPC wallet",
"has_transactions": has_transactions,
}),
self._get_balance(0)?,
)
}
pub fn get_balance(&self, details: &Value) -> Result<Value, Error> {
let min_conf = details["num_confs"].as_u64().req()? as u32;
self._get_balance(min_conf)
}
fn _get_balance(&self, min_conf: u32) -> Result<Value, Error> {
//TODO(stevenroose) implement in rust-bitcoincore-rpc once bitcoin::Amount lands
let mut args = vec![Value::Null, json!(min_conf), json!(true)];
#[cfg(feature = "liquid")]
{
if let NetworkId::Elements(net) = self.network.id() {
args.push(coins::liq::asset_hex(net).into());
}
}
let balance: f64 = self.rpc.call("getbalance", &args)?;
Ok(self._convert_satoshi(btc_to_usat(balance)))
}
pub fn get_transactions(&self, details: &Value) -> Result<Value, Error> {
let page = details["page_id"].as_u64().req()? as usize;
let (txs, potentially_has_more) = self._get_transactions(PER_PAGE, PER_PAGE * page)?;
Ok(json!({
"list": txs,
"page_id": page,
"next_page_id": if potentially_has_more { Some(page+1) } else { None },
}))
}
fn _get_transactions(&self, limit: usize, start: usize) -> Result<(Vec<Value>, bool), Error> {
// fetch listtranssactions
let txdescs: Vec<Value> = self
.rpc
.call("listtransactions", &["*".into(), limit.into(), start.into(), true.into()])?;
let potentially_has_more = txdescs.len() == limit;
// fetch full transactions and convert to GDK format
let mut txs = Vec::new();
for desc in txdescs.into_iter() {
let txid = sha256d::Hash::from_hex(desc["txid"].as_str().req()?)?;
let blockhash = &desc["blockhash"];
let tx_hex: String = self.rpc.call(
"getrawtransaction",
&[txid.to_hex().into(), false.into(), blockhash.clone()],
)?;
txs.push(format_gdk_tx(&desc, &hex::decode(&tx_hex)?, self.network.id())?);
}
Ok((txs, potentially_has_more))
}
pub fn get_transaction(&self, txid: &str) -> Result<Value, Error> {
let txid = sha256d::Hash::from_hex(txid)?;
let desc: Value = self.rpc.call("gettransaction", &[txid.to_hex().into(), true.into()])?;
let raw_tx = hex::decode(desc["hex"].as_str().req()?)?;
format_gdk_tx(&desc, &raw_tx, self.network.id())
}
pub fn create_transaction(&self, details: &Value) -> Result<String, Error> |
pub fn sign_transaction(&self, details: &Value) -> Result<String, Error> {
debug!("sign_transaction(): {:?}", details);
let change_address = self.next_address(&self.internal_xpriv, &self.next_internal_child)?;
// If we don't have any inputs, we can fail early.
let unspent: Vec<Value> = self.rpc.call("listunspent", &[0.into()])?;
if unspent.is_empty() {
return Err(Error::NoUtxosFound);
}
debug!("list_unspent: {:?}", unspent);
let raw_tx = match self.network.id() {
NetworkId::Bitcoin(_) => {
coins::btc::sign_transaction(&self.rpc, details, &change_address, |fp, child| {
self.derive_private_key(*fp, *child)
})?
}
NetworkId::Elements(net) => coins::liq::sign_transaction(
&self.rpc,
net,
details,
&change_address,
|fp, child| self.derive_private_key(*fp, *child),
)?,
};
let hex_tx = hex::encode(&raw_tx);
//TODO(stevenroose) remove when confident in signing code
let ret: Vec<Value> = self.rpc.call("testmempoolaccept", &[vec![hex_tx.clone()].into()])?;
let accept = ret.into_iter().next().unwrap();
if !(accept["allowed"].as_bool().req()?) {
error!(
"sign_transaction(): signed tx is not valid: {}",
accept["reject-reason"].as_str().req()?
);
// TODO(stevenroose) should we return an error??
}
Ok(hex_tx)
}
pub fn send_transaction(&self, details: &Value) -> Result<String, Error> {
let tx_hex = details["hex"].as_str().req()?;
Ok(self.rpc.call::<String>("sendrawtransaction", &[tx_hex.into()])?)
}
pub fn send_raw_transaction(&self, tx_hex: &str) -> Result<String, Error> {
Ok(self.rpc.call::<String>("sendrawtransaction", &[tx_hex.into()])?)
}
/// Return the next address for the derivation and import it in Core.
fn next_address(
&self,
xpriv: &bip32::ExtendedPrivKey,
child: &cell::Cell<bip32::ChildNumber>,
) -> Result<String, Error> {
let child_xpriv = xpriv.derive_priv(&SECP, &[child.get()])?;
let child_xpub = bip32::ExtendedPubKey::from_private(&SECP, &child_xpriv);
let meta = AddressMeta {
fingerprint: Some(xpriv.fingerprint(&SECP)),
child: Some(child.get()),
..Default::default()
};
let address_str = match self.network.id() {
#[cfg(feature = "liquid")]
NetworkId::Elements(enet) => {
let mut addr = elements::Address::p2shwpkh(
&child_xpub.public_key,
None,
coins::liq::address_params(enet),
);
let blinding_key = wally::asset_blinding_key_to_ec_private_key(
&self.master_blinding_key,
&addr.script_pubkey(),
);
let blinding_pubkey = secp256k1::PublicKey::from_secret_key(&SECP, &blinding_key);
addr.blinding_pubkey = Some(blinding_pubkey);
// Store blinding privkey in the node.
let addr_str = addr.to_string();
coins::liq::store_blinding_key(&self.rpc, &addr_str, &blinding_key)?;
addr_str
}
NetworkId::Bitcoin(bnet) => Address::p2wpkh(&child_xpub.public_key, bnet).to_string(),
#[cfg(not(feature = "liquid"))]
_ => unimplemented!(),
};
// Since this is a newly generated address, rescanning is not required.
self.rpc.import_public_key(&child_xpub.public_key, Some(&meta.to_label()?), Some(false))?;
child.set(match child.get() {
bip32::ChildNumber::Normal {
index,
} => bip32::ChildNumber::from_normal_idx(index + 1)?,
_ => unreachable!(),
});
self.save_persistent_state()?;
Ok(address_str)
}
pub fn get_receive_address(&self, _details: &Value) -> Result<Value, Error> {
let address = self.next_address(&self.external_xpriv, &self.next_external_child)?;
// {
// "address": "2N2x4EgizS2w3DUiWYWW9pEf4sGYRfo6PAX",
// "address_type": "p2wsh",
// "branch": 1,
// "pointer": 13,
// "script": "52210338832debc5e15ce143d5cf9241147ac0019e7516d3d9569e04b0e18f3278718921025dfaa85d64963252604e1b139b40182bb859a9e2e1aa2904876c34e82158d85452ae",
// "script_type": 14,
// "subtype": null
// }
Ok(json!({
"address": address,
"address_type": "p2wpkh",
}))
}
pub fn get_fee_estimates(&self) -> Option<&Value> {
// will not be available before the first "tick", which should
// happen as soon as GA_connect initializes the wallet
if self.cached_fees.0.is_null() {
None
} else {
Some(&self.cached_fees.0)
}
}
pub fn _make_fee_estimates(&self) -> Result<Value, Error> {
let mempoolinfo: Value = self.rpc.call("getmempoolinfo", &[])?;
let minrelayfee = json!(btc_to_usat(mempoolinfo["minrelaytxfee"].as_f64().req()? / 1000.0));
let mut estimates: Vec<Value> = (2u16..25u16)
.map(|target| {
let est: rpcjson::EstimateSmartFeeResult =
self.rpc.call("estimatesmartfee", &[json!(target)])?;
Ok(est.feerate.unwrap_or_else(|| minrelayfee.clone()))
})
.collect::<Result<Vec<Value>, Error>>()?;
// prepend the estimate for 2 blocks as the estimate for 1 blocks
estimates.insert(0, estimates[0].clone());
// prepend the minrelayfee as the first item
estimates.insert(0, minrelayfee);
// the final format is: [ minrelayfee, est_for_2_blocks, est_for_2_blocks, est_for_3_blocks, ... ]
Ok(json!(estimates))
}
pub fn get_available_currencies(&self) -> Value {
// TODO
json!({ "all": [ "USD" ], "per_exchange": { "BITSTAMP": [ "USD" ] } })
}
pub fn exchange_rate(&self, _currency: &str) -> f64 {
// TODO
420.00
}
pub fn convert_amount(&self, details: &Value) -> Result<Value, Error> {
// XXX should convert_amonut support negative numbers?
let satoshi = details["satoshi"]
.as_u64()
.or_else(|| f64_from_val(&details["btc"]).map(btc_to_usat))
.or_else(|| f64_from_val(&details["fiat"]).map(|x| self._fiat_to_usat(x)))
.or_err("id_no_amount_specified")?;
Ok(self._convert_satoshi(satoshi))
}
pub fn set_tx_memo(&self, txid: &str, memo: &str) -> Result<(), Error> {
// we can't really set a tx memo, so we fake it by setting a memo on the address
let txid = sha256d::Hash::from_hex(txid)?;
let txdesc: Value =
self.rpc.call("gettransaction", &[txid.to_hex().into(), true.into()])?;
let details = txdesc["details"].as_array().req()?;
if details.is_empty() {
throw!("Tx info for {} does not contain any details", txid);
}
// We just need any usable address label. Let's just take the first
// and hope Core always orders them in the same way, so we can also
// efficiently find it back later. We explicitly tag this label with
// the txid of this tx, so that if an address gets assigned multiple
// transaction memos, they won't conflict.
let detail = &details[0];
let mut label = AddressMeta::from_label(detail["label"].as_str())?;
label.txmemo.insert(txid, memo.to_owned());
debug!("set_tx_memo() for {}, memo={}, address={}", txid, memo, detail["address"]);
self.rpc.call("setlabel", &[detail["address"].clone(), label.to_label()?.into()])?;
Ok(())
}
fn _convert_satoshi(&self, amount: u64) -> Value {
let currency = "USD"; // TODO
let exchange_rate = self.exchange_rate(currency);
let amount_f = amount as f64;
json!({
"satoshi": amount.to_string(),
"bits": (amount_f / SAT_PER_BIT).to_string(),
"ubtc": (amount_f / SAT_PER_BIT).to_string(), // XXX why twice? same as bits
"mbtc": (amount_f / SAT_PER_MBTC).to_string(),
"btc": (amount_f / SAT_PER_BTC).to_string(),
"fiat_rate": (exchange_rate).to_string(),
"fiat_currency": currency,
"fiat": (amount_f / SAT_PER_BTC * exchange_rate).to_string(),
})
}
fn _fiat_to_usat(&self, amount: f64) -> u64 {
btc_to_usat(amount / self.exchange_rate("USD"))
}
}
impl fmt::Debug for Wallet {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Wallet {{ }}")
}
}
/// Finds a transction memo inside the transaction description field.
/// This method can be provided with a response from either
/// `gettransaction` or `listtransactions`.
/// It returns "" if none if found.
fn find_memo_in_desc(txid: sha256d::Hash, txdesc: &Value) -> Result<String, Error> {
// First we try the top-level label field from listtransactions.
if let Some(label) = txdesc["label"].as_str() {
let meta = AddressMeta::from_label(Some(label))?;
if let Some(memo) = meta.txmemo.get(&txid) {
return Ok(memo.to_owned());
}
}
// Then we iterate over the details array.
if let Some(details) = txdesc["details"].as_array() {
for detail in details {
let meta = AddressMeta::from_label(detail["label"].as_str())?;
if let Some(memo) = meta.txmemo.get(&txid) {
return Ok(memo.to_owned());
}
}
}
Ok(String::new())
}
fn format_gdk_tx(txdesc: &Value, raw_tx: &[u8], network: NetworkId) -> Result<Value, Error> {
let txid = sha256d::Hash::from_hex(txdesc["txid"].as_str().req()?)?;
//TODO(stevenroose) optimize with Amount
let amount = match network {
NetworkId::Elements(..) => btc_to_isat(match txdesc["amount"] {
serde_json::Value::Object(ref v) => v["bitcoin"].as_f64().req()?,
ref v => v.as_f64().req()?,
}),
NetworkId::Bitcoin(..) => btc_to_isat(txdesc["amount"].as_f64().req()?),
};
let fee = txdesc["fee"].as_f64().map_or(0, |f| btc_to_usat(f * -1.0));
let type_str = match txdesc["category"].as_str() {
// for listtransactions, read out the category field
Some(category) => match category {
"send" => "outgoing",
"receive" => "incoming",
"immature" => "incoming",
_ => throw!("invalid tx category"),
},
// gettransaction doesn't have a top-level category,
// figure it out from the amount instead.
None => {
if amount > 0 {
"incoming"
} else {
"outgoing"
}
}
};
let tx_props = match network {
NetworkId::Bitcoin(_) => coins::btc::tx_props(&raw_tx)?,
NetworkId::Elements(_) => coins::liq::tx_props(&raw_tx)?,
};
let vsize = tx_props["transaction_vsize"].as_u64().unwrap();
let ret = json!({
"block_height": 1,
"created_at": fmt_time(txdesc["time"].as_u64().req()?),
"type": type_str,
"memo": find_memo_in_desc(txid, &txdesc)?,
"txhash": txid.to_hex(),
"transaction": hex::encode(&raw_tx),
"satoshi": amount,
"rbf_optin": txdesc["bip125-replaceable"].as_str().req()? == "yes",
"cap_cpfp": false, // TODO
"can_rbf": false, // TODO
"has_payment_request": false, // TODO
"server_signed": false,
"user_signed": true,
"instant": false,
"fee": fee,
"fee_rate": (fee as f64)/(vsize as f64),
"addressees": [], // notice the extra "e" -- its intentional
"inputs": [], // tx.input.iter().map(format_gdk_input).collect(),
"outputs": [], //tx.output.iter().map(format_gdk_output).collect(),
});
Ok(extend(ret, tx_props)?)
}
| {
debug!("create_transaction(): {:?}", details);
let unfunded_tx = match self.network.id() {
NetworkId::Bitcoin(..) => coins::btc::create_transaction(&self.rpc, details)?,
NetworkId::Elements(..) => coins::liq::create_transaction(&self.rpc, details)?,
};
debug!("create_transaction unfunded tx: {:?}", hex::encode(&unfunded_tx));
// TODO explicit handling for id_no_amount_specified id_fee_rate_is_below_minimum id_invalid_replacement_fee_rate
// id_send_all_requires_a_single_output
Ok(hex::encode(unfunded_tx))
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.