file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
Histo.js | /**
* @jsx React.DOM
*/
var React = require('react/addons');
var cx = React.addons.classSet;
var moment = require('moment');
var Router = require('react-router');
var Route = Router.Route;
var NotFoundRoute = Router.NotFoundRoute;
var DefaultRoute = Router.DefaultRoute;
var Link = Router.Link;
var RouteHandler = Router.RouteHandler;
var Row = require('react-bootstrap').Row;
var Accordion = require('react-bootstrap').Accordion;
var Panel = require('react-bootstrap').Panel;
var Grid = require('react-bootstrap').Grid;
var Label = require('react-bootstrap').Label;
var Col = require('react-bootstrap').Col;
var ModalTrigger = require('react-bootstrap').ModalTrigger;
var ButtonGroup = require('react-bootstrap').ButtonGroup;
var Button = require('react-bootstrap').Button;
var OverlayTrigger = require('react-bootstrap').OverlayTrigger;
var Tooltip = require('react-bootstrap').Tooltip;
var Modal = require('react-bootstrap').Modal;
var Badge = require('react-bootstrap').Badge;
var TabbedArea = require('react-bootstrap').TabbedArea;
var TabPane = require('react-bootstrap').TabPane;
var DropdownButton = require('react-bootstrap').DropdownButton;
var MenuItem = require('react-bootstrap').MenuItem;
var Table = require('react-bootstrap').Table;
var Promise = require('es6-promise').Promise;
var Utils = require('./Utils');
var $ = require('jquery');
var d3 = require('d3');
require('../libs/d3.tip.js');
var debug = require('debug')('Histo.js');
var startYear;
var Histo = React.createClass({
toggleGraph: function() {
if(this.state.chartType === 'pi') {
this.setState({chartType: 'real'});
} else {
this.setState({chartType: 'pi'});
}
},
getInitialState: function() {
var refval;
if(localStorage.getItem(this.props.title)) {
refval = localStorage.getItem(this.props.title);
} else {
localStorage.setItem(this.props.title, 50);
refval = localStorage.getItem(this.props.title);
}
return {
selected: false,
referenceValue: refval,
chartType: 'pi' // Can be 'pi' or 'real'
};
},
handleReferenceValueChange: function(e) {
debug(e.target.value);
localStorage.setItem(this.props.title, e.target.value);
this.setState({
referenceValue: e.target.value
});
},
handleHistoClick: function() {
var self = this;
self.props.handleSelection({
'title': self.props.title,
'selected': !self.state.selected
});
if(self.props.active) {
self.setState({selected: false});
} else {
self.setState({selected: true});
}
},
render: function() {
var self = this;
var title = this.props.title;
var chartType = this.state.chartType;
var values = this.props.values;
var period = this.props.period;
var referenceValue = this.state.referenceValue;
var line, lastValue;
var tooltipString = 'PI';
// if(values[values.length - 1].Value === 'NULL') {
// // Last value is NULL... do not draw this chart!
// return <div style={{display:'none'}} />;
// }
if(chartType === 'real') {
lastValue = Math.round(values[values.length - 1].Abs) || 0;
tooltipString = 'Aantal';
} else {
lastValue = Math.round(values[values.length - 1].Score) || 0;
tooltipString = 'PI';
}
// D3 configuration
var margin = {top: 20, right: 20, bottom: 60, left: 50},
width = 500 - margin.left - margin.right,
height = 200 - margin.top - margin.bottom;
var parseDate = d3.time.format("%m/%d/%Y").parse;
if(period) {
startYear = moment('2014').subtract(period, 'years');
} else {
startYear = moment('2014').subtract(5, 'years');
}
var x = d3.time.scale()
.range([0, width])
.domain([startYear.toDate(), moment('12-30-2014').toDate()]).clamp(true);
var y = d3.scale.linear()
.range([height, 1]);
var xAxis = d3.svg.axis()
.scale(x)
.tickFormat(d3.time.format("%m/%y"))
.tickPadding(12)
.orient("bottom");
var yAxis = d3.svg.axis()
.scale(y)
.orient("left");
if(chartType === 'real') {
line = d3.svg.line()
.defined(function(d) { return d.Abs != 'NULL'; })
.interpolate("cardinal")
.x(function(d) { return x(parseDate(d.Date)); })
.y(function(d) { return y(Number(d.Abs)); });
} else {
line = d3.svg.line()
.defined(function(d) { return d.Score != 'NULL'; })
.x(function(d) { return x(parseDate(d.Date)); })
.y(function(d) { return y(Number(d.Score)); });
}
d3.select('#'+title.replace(/ /g, '-')).select('svg').remove(); // This should not be necessary, break up d3 into smaller components?
var svg = d3.select('#'+title.replace(/ /g, '-')).append("svg")
.attr("width", width + margin.left + margin.right)
.attr("height", height + margin.top + margin.bottom)
.append("g")
.attr("transform", "translate(" + margin.left + "," + margin.top + ")");
if(chartType === 'real') {
y.domain(d3.extent(values, function(d) { return Number(d.Abs); }));
} else {
y.domain([1, 10]);
}
var xaxis = svg.append("g")
.attr("class", "x axis")
.attr("transform", "translate(0," + height + ")")
.call(xAxis);
xaxis.selectAll("text")
.style("text-anchor", "end")
.attr("dx", "-.8em")
.attr("dy", ".15em")
.attr("transform", function(d) {
return "rotate(-65)";
});
svg.append("g")
.attr("class", "y axis")
.call(yAxis)
.append("text")
.text(chartType === 'real' ? 'Aantal' : 'PI')
.attr("transform", "rotate(-90)")
.attr("y", 6)
.attr("dy", ".71em")
.style("text-anchor", "end");
try {
if(chartType === 'pi') {
var withoutNull = _.without(values, 'NULL');
svg.append('circle')
.attr('class', 'sparkcircle')
.attr('cx', x(parseDate(withoutNull[withoutNull.length - 1].Date)))
.attr('cy', y(Number(withoutNull[withoutNull.length - 1].Score)))
.attr('r', 5);
}
} catch(error) {
console.log(error);
}
var path = svg.append("path")
.datum(values)
.attr("class", "line")
.attr("d", line);
path.each(function(d) { d.totalLength = this.getTotalLength(); }); // Add total length per path, needed for animating over full length
if(chartType === 'pi') {
path
.attr("stroke-dasharray", function(d) {
return d.totalLength + " " + d.totalLength;
})
.attr("stroke-dashoffset", function(d) {
return d.totalLength;
})
.transition()
.duration(1000)
.ease("linear")
.attr("stroke-dashoffset", 0);
}
var bisectDate = d3.bisector(function(d) {
return parseDate(d.Date);
}).left;
var focus = svg.append("g")
.attr("class", "focus")
.style("display", "none");
focus.append("circle")
.attr("r", 4.5);
focus.append("text")
.attr("x", 9)
.attr("dy", ".35em");
svg.append("rect")
.attr("class", "overlay")
.attr("width", width)
.attr("height", height)
.on("mouseover", function() {
focus.style("display", null);
})
.on("mouseout", function() {
focus.style("display", "none");
})
.on("mousemove", mousemove);
function | () {
var x0 = x.invert(d3.mouse(this)[0]),
i = bisectDate(values, x0, 1),
d0 = values[i - 1],
d1 = values[i],
d = x0 - d0.Date > d1.Date - x0 ? d1 : d0;
if(chartType === 'pi') {
focus.attr("transform", "translate(" + x(parseDate(d.Date)) + "," + y(d.Score) + ")");
focus.select("text").text(d.Score);
} else {
focus.attr("transform", "translate(" + x(parseDate(d.Date)) + "," + y(d.Abs) + ")");
focus.select("text").text(d.Abs);
}
}
var max = d3.max(values, function(d) { return Number(d.Abs); });
if(self.state.referenceValue && chartType === 'real') {
svg.append('line')
.attr({
x1: xAxis.scale()(0),
y1: yAxis.scale()((self.state.referenceValue * max) / 100),
x2: 1000,
y2: yAxis.scale()((self.state.referenceValue * max) / 100)
})
.style("stroke-dasharray", "5,5")
.style("stroke", "#000");
}
var refval = ((self.state.referenceValue * max) / 100);
var alarmTxt = '';
var alarmClass = false;
if(refval > lastValue) {
alarmClass = true;
alarmTxt = 'Onder referentiewaarde'
} else {
alarmClass = false;
alarmTxt = 'Boven referentiewaarde'
}
self.props.setRefVal(refval);
var alarmClasses = cx({
'danger': alarmClass
});
var classesTitlebar = cx({
'panel-heading': true,
'rightarrowdiv': self.props.active,
'active': self.props.active
});
var classesContainer = cx({
'panel': true,
'panel-default': true,
'active': self.props.active
});
var classesToggleSlider = cx({
'hide': self.state.chartType === 'pi'
});
var bgCol = '#fff';
if(self.state.chartType === 'pi') {
bgCol = Utils.quantize(lastValue);
} else {
bgCol = '#fff';
}
var txtCol = (self.state.chartType === 'pi') ? '#fff' : '#000';
return (
<div className={classesContainer}
onClick={this.handleHistoClick}
ref="histoRoot">
<div style={{position:'relative'}} className={classesTitlebar}>
<OverlayTrigger placement="right" overlay={<Tooltip><strong>{tooltipString}</strong></Tooltip>}>
<Label onClick={this.toggleGraph}
style={{float:'right', fontSize:'1.1em', cursor: 'pointer', backgroundColor: bgCol, color: txtCol}}>
{lastValue} {(self.state.chartType === 'pi') ? '' : ' ('+alarmTxt+')'}
</Label>
</OverlayTrigger>
<span onClick={this.handleHistoClick}
style={{cursor:'pointer', fontWeight:'bold', fontSize:'1.1em'}}>
{title}
</span>
</div>
<div className="panel-body">
<div className="histoChart" ref="chart" id={title.replace(/ /g, '-')}>
<input onChange={this.handleReferenceValueChange}
className={classesToggleSlider}
type='range'
defaultValue={this.state.referenceValue}
style={{width:159,
position:'relative',
top:103,
left:437,
'-webkit-transform':'rotate(-90deg)',
'-moz-transform':'rotate(-90deg)',
transform:'rotate(-90deg)'}} />
</div>
</div>
</div>
);
}
});
module.exports = Histo; | mousemove | identifier_name |
ws.rs | // Copyright 2021, The Tremor Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! The [`ws_server`](#ws_server) and [`ws_client`](#ws_client) connectors provide support for the `WebSocket` protocol specification.
//!
//! Tremor can expose a client or server connection.
//!
//! Text and binary frames can be used.
//!
//! ## `ws_server`
//!
//! This connector is a websocket server. It opens a TCP listening socket, and for each incoming connection it initiates the Websocket handshake. Then websocket frames can flow
//! and are processed with the given `preprocessors` and `codec` and sent to the `out` port of the connector.
//!
//! Each incoming connection creates a new stream of events. Events from a websocket connection bear the following metadata record at `$ws_server`:
//!
//! ```js
//! {
//! "tls": true, // whether or not TLS is configured
//! "peer": {
//! "host": "127.0.0.1", // ip of the connection peer
//! "port": 12345 // port of the connection peer
//! }
//! }
//! ```
//!
//! When a connection is established and events are received, it is possible to send events to any open connection. In order to achieve this, a pipeline needs to be connected to the `in` port of this connector and send events to it. There are multiple ways to target a certain connection with a specific event:
//!
//! * Send the event you just received from the `ws_server` right back to it. It will be able to track the the event to its websocket connection. You can even do this with an aggregate event coming from a select with a window. If an event is the result of events from multiple websocket connections, it will send the event back down to each websocket connection.
//! * Attach the same metadata you receive on the connection under `$ws_server` to the event you want to send to that connection.
//!
//! ### Configuration
//!
//! | Option | Description | Type | Required | Default value |
//! |------------------|-------------------------------------------------------------------------------------------------------------|------------------|----------|------------------------------------------------------------------------------|
//! | `url` | The host and port as url to listen on. | string | yes | |
//! | `tls` | Optional Transport Level Security configuration. See [TLS configuration](./index.md#server). | record | no | No TLS configured. |
//! | `backlog` | The maximum size of the queue of pending connections not yet `accept`ed. | positive integer | no | 128 |
//! | `socket_options` | See [TCP socket options](./index.md#tcp-socket-options). | record | no | See [TCP socket options defaults](./index#tcp-socket-options) |
//!
//! ### Examples
//!
//! An annotated example of a plain WS cient configuration leveraging defaults:
//!
//! ```tremor title="config.troy"
//! define connector in from ws_server
//! with
//! preprocessors = [
//! {
//! "name": "separate",
//! "config": {
//! "buffered": false
//! }
//! }
//! ],
//! codec = "json",
//! config = {
//! "url": "127.0.0.1:4242",
//! }
//! end;
//! ```
//!
//! An annotated example of a secure WS server configuration:
//!
//! ```tremor title="config.troy"
//! define connector ws_server from ws_server
//! with
//! preprocessors = ["separate"],
//! codec = "json",
//! config = {
//! "url": "0.0.0.0:65535",
//! "tls": {
//! # Security certificate for this service endpoint
//! "cert": "./before/localhost.cert",
//! # Security key
//! "key": "./before/localhost.key",
//! }
//! }
//! end;
//! ```
//!
//! ## `ws_client`
//!
//! This connector is a websocket client, that establishes one connection to the host and port configured in `url`. Events sent to the `in` port of this connector will be processed by the configured `codec` and `postprocessors` and turned into a text or binary frame, depending on the events boolean metadata value `$ws_server.binary`. If you want to sent a binary frame, you need to set:
//!
//! ```tremor
//! let $ws_server["binary"] = true;
//! ```
//!
//! If nothing is provided a text frame is sent.
//!
//! Data received on the open connection is processed frame by frame by the configured `preprocessors` and `codec` and sent as event via the `out` port of the connector. Each event contains a metadata record of the following form via `$ws_server`:
//!
//! ```js
//! {
//! "tls": false, // whether or not tls is enabled on the connection
//! "peer": {
//! "host": "192.168.0.1", // ip of the connection peer
//! "port": 56431 // port of the connection peer
//! }
//! }
//! ```
//!
//! ### Configuration
//!
//! | Option | Description | Type | Required | Default value |
//! |------------------|-------------------------------------------------------------------------------------------------------------|-------------------|----------|------------------------------------------------------------------------------|
//! | `url` | The URL to connect to in order to initiate the websocket connection. | string | yes | |
//! | `tls` | Optional Transport Level Security configuration. See [TLS configuration](./index.md#client). | record or boolean | no | No TLS configured. |
//! | `socket_options` | See [TCP socket options](./index.md#tcp-socket-options). | record | no | See [TCP socket options defaults](./index#tcp-socket-options) |
//!
//! ### Examples
//!
//! An annotated example of a non-tls plain WS cient configuration leveraging defaults:
//!
//! ```tremor title="config.troy"
//! define my_wsc out from ws_client
//! with
//! postprocessors = ["separate"],
//! codec = "json",
//! config = {
//! # Connect to port 4242 on the loopback device
//! "url": "ws://127.0.0.1:4242/"
//!
//! # Optional Transport Level Security configuration
//! # "tls" = { ... }
//!
//! # Optional tuning of the Nagle algorithm ( default: true )
//! # - By default no delay is preferred
//! # "no_delay" = false
//! }
//! end;
//! ```
//!
//! An annotated example of a secure WS client configuration with
//! reconnection quality of service configured:
//!
//! ```tremor title="config.troy"
//! define connector ws_client from ws_client
//! with
//! postprocessors = ["separate"],
//! codec = "json",
//! config = {
//! # Listen on all interfaces on TCP port 65535
//! "url": "wss://0.0.0.0:65535",
//!
//! # Prefer delay and enable the TCP Nagle algorithm
//! "no_delay": false,
//!
//! # Enable SSL/TLS
//! "tls": {
//! # CA certificate
//! "cafile": "./before/localhost.cert",
//! # Domain
//! "domain": "localhost",
//! }
//! },
//! # Reconnect starting at half a second, backoff by doubling, maximum of 3 tries before circuit breaking
//! reconnect = {
//! "retry": {
//! "interval_ms": 500,
//! "growth_rate": 2,
//! "max_retries": 3,
//! }
//! }
//! end;
//! ```
pub(crate) mod client;
pub(crate) mod server;
use crate::connectors::prelude::*;
use futures::prelude::*;
use futures::stream::{SplitSink, SplitStream};
use tokio::net::TcpStream;
use tokio_rustls::server::TlsStream;
use tokio_tungstenite::tungstenite::Message;
use tokio_tungstenite::WebSocketStream;
pub(crate) struct WsDefaults;
impl Defaults for WsDefaults {
const SCHEME: &'static str = "ws";
const HOST: &'static str = "localhost";
const PORT: u16 = 80;
}
struct WsReader<Stream, Ctx, Runtime>
where
Stream: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send + Sync + Unpin,
Ctx: Context + Send,
Runtime: SinkRuntime,
{
stream: SplitStream<WebSocketStream<Stream>>,
// we keep this around for closing the writing part if the reader is done
sink_runtime: Option<Runtime>,
origin_uri: EventOriginUri,
meta: Value<'static>,
ctx: Ctx,
}
impl<Stream, Ctx, Runtime> WsReader<Stream, Ctx, Runtime>
where
Stream: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send + Sync + Unpin,
Ctx: Context + Send + Sync,
Runtime: SinkRuntime,
{
fn new(
stream: SplitStream<WebSocketStream<Stream>>,
sink_runtime: Option<Runtime>,
origin_uri: EventOriginUri,
meta: Value<'static>,
ctx: Ctx,
) -> Self {
Self {
stream,
sink_runtime,
origin_uri,
meta,
ctx,
}
}
}
#[async_trait::async_trait]
impl<Stream, Ctx, Runtime> StreamReader for WsReader<Stream, Ctx, Runtime>
where
Stream: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send + Sync + Unpin,
Ctx: Context + Send + Sync,
Runtime: SinkRuntime,
{
async fn quiesce(&mut self, stream: u64) -> Option<SourceReply> {
Some(SourceReply::EndStream {
origin_uri: self.origin_uri.clone(),
stream,
meta: Some(self.meta.clone()),
})
}
async fn read(&mut self, stream: u64) -> Result<SourceReply> {
let mut is_binary = false;
match self.stream.next().await {
Some(Ok(message)) => {
let data = match message {
Message::Text(text) => text.into_bytes(),
Message::Binary(binary) => {
is_binary = true;
binary
} | let after_close = self.stream.next().await;
debug_assert!(
after_close.is_none(),
"WS reader not behaving as expected after receiving a close message"
);
return Ok(SourceReply::EndStream {
origin_uri: self.origin_uri.clone(),
stream,
meta: Some(self.meta.clone()),
});
}
Message::Ping(_) | Message::Pong(_) | Message::Frame(_) => {
// ignore those, but don't let the source wait
return self.read(stream).await;
}
};
let mut meta = self.meta.clone();
if is_binary {
meta.insert("binary", Value::const_true())?;
};
Ok(SourceReply::Data {
origin_uri: self.origin_uri.clone(),
stream: Some(stream),
meta: Some(meta),
data,
port: None,
codec_overwrite: None,
})
}
Some(Err(_)) | None => Ok(SourceReply::EndStream {
origin_uri: self.origin_uri.clone(),
stream,
meta: Some(self.meta.clone()),
}),
}
}
async fn on_done(&mut self, stream: u64) -> StreamDone {
// make the writer stop, otherwise the underlying socket will never be closed
if let Some(sink_runtime) = self.sink_runtime.as_mut() {
self.ctx.swallow_err(
sink_runtime.unregister_stream_writer(stream).await,
"Error unregistering stream",
);
}
StreamDone::StreamClosed
}
}
struct WsWriter<S>
where
S: tokio::io::AsyncRead + tokio::io::AsyncWrite + Unpin + Sync + Send,
{
sink: SplitSink<WebSocketStream<S>, Message>,
}
impl WsWriter<TcpStream> {
fn new(sink: SplitSink<WebSocketStream<TcpStream>, Message>) -> Self {
Self { sink }
}
}
impl WsWriter<TlsStream<TcpStream>> {
fn new_tls_server(sink: SplitSink<WebSocketStream<TlsStream<TcpStream>>, Message>) -> Self {
Self { sink }
}
}
impl WsWriter<TcpStream> {
fn new_tungstenite_client(sink: SplitSink<WebSocketStream<TcpStream>, Message>) -> Self {
Self { sink }
}
}
impl WsWriter<tokio_rustls::client::TlsStream<TcpStream>> {
fn new_tls_client(
sink: SplitSink<WebSocketStream<tokio_rustls::client::TlsStream<TcpStream>>, Message>,
) -> Self {
Self { sink }
}
}
#[async_trait::async_trait]
impl<S> StreamWriter for WsWriter<S>
where
S: tokio::io::AsyncRead + tokio::io::AsyncWrite + Sync + Send + Unpin,
{
async fn write(&mut self, data: Vec<Vec<u8>>, meta: Option<&Value>) -> Result<()> {
for chunk in data {
if let Some(meta) = &meta {
// If metadata is set, check for a binary framing flag
if let Some(true) = meta.get_bool("binary") {
let message = Message::Binary(chunk);
self.sink.send(message).await?;
} else {
let message = std::str::from_utf8(&chunk)?;
let message = Message::Text(message.to_string());
self.sink.send(message).await?;
}
} else {
// No metadata, default to text ws framing
let message = std::str::from_utf8(&chunk)?;
let message = Message::Text(message.to_string());
self.sink.send(message).await?;
};
}
Ok(())
}
async fn on_done(&mut self, _stream: u64) -> Result<StreamDone> {
self.sink.close().await?;
Ok(StreamDone::StreamClosed)
}
} | Message::Close(_) => {
// read from the stream once again to drive the closing handshake | random_line_split |
ws.rs | // Copyright 2021, The Tremor Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! The [`ws_server`](#ws_server) and [`ws_client`](#ws_client) connectors provide support for the `WebSocket` protocol specification.
//!
//! Tremor can expose a client or server connection.
//!
//! Text and binary frames can be used.
//!
//! ## `ws_server`
//!
//! This connector is a websocket server. It opens a TCP listening socket, and for each incoming connection it initiates the Websocket handshake. Then websocket frames can flow
//! and are processed with the given `preprocessors` and `codec` and sent to the `out` port of the connector.
//!
//! Each incoming connection creates a new stream of events. Events from a websocket connection bear the following metadata record at `$ws_server`:
//!
//! ```js
//! {
//! "tls": true, // whether or not TLS is configured
//! "peer": {
//! "host": "127.0.0.1", // ip of the connection peer
//! "port": 12345 // port of the connection peer
//! }
//! }
//! ```
//!
//! When a connection is established and events are received, it is possible to send events to any open connection. In order to achieve this, a pipeline needs to be connected to the `in` port of this connector and send events to it. There are multiple ways to target a certain connection with a specific event:
//!
//! * Send the event you just received from the `ws_server` right back to it. It will be able to track the the event to its websocket connection. You can even do this with an aggregate event coming from a select with a window. If an event is the result of events from multiple websocket connections, it will send the event back down to each websocket connection.
//! * Attach the same metadata you receive on the connection under `$ws_server` to the event you want to send to that connection.
//!
//! ### Configuration
//!
//! | Option | Description | Type | Required | Default value |
//! |------------------|-------------------------------------------------------------------------------------------------------------|------------------|----------|------------------------------------------------------------------------------|
//! | `url` | The host and port as url to listen on. | string | yes | |
//! | `tls` | Optional Transport Level Security configuration. See [TLS configuration](./index.md#server). | record | no | No TLS configured. |
//! | `backlog` | The maximum size of the queue of pending connections not yet `accept`ed. | positive integer | no | 128 |
//! | `socket_options` | See [TCP socket options](./index.md#tcp-socket-options). | record | no | See [TCP socket options defaults](./index#tcp-socket-options) |
//!
//! ### Examples
//!
//! An annotated example of a plain WS cient configuration leveraging defaults:
//!
//! ```tremor title="config.troy"
//! define connector in from ws_server
//! with
//! preprocessors = [
//! {
//! "name": "separate",
//! "config": {
//! "buffered": false
//! }
//! }
//! ],
//! codec = "json",
//! config = {
//! "url": "127.0.0.1:4242",
//! }
//! end;
//! ```
//!
//! An annotated example of a secure WS server configuration:
//!
//! ```tremor title="config.troy"
//! define connector ws_server from ws_server
//! with
//! preprocessors = ["separate"],
//! codec = "json",
//! config = {
//! "url": "0.0.0.0:65535",
//! "tls": {
//! # Security certificate for this service endpoint
//! "cert": "./before/localhost.cert",
//! # Security key
//! "key": "./before/localhost.key",
//! }
//! }
//! end;
//! ```
//!
//! ## `ws_client`
//!
//! This connector is a websocket client, that establishes one connection to the host and port configured in `url`. Events sent to the `in` port of this connector will be processed by the configured `codec` and `postprocessors` and turned into a text or binary frame, depending on the events boolean metadata value `$ws_server.binary`. If you want to sent a binary frame, you need to set:
//!
//! ```tremor
//! let $ws_server["binary"] = true;
//! ```
//!
//! If nothing is provided a text frame is sent.
//!
//! Data received on the open connection is processed frame by frame by the configured `preprocessors` and `codec` and sent as event via the `out` port of the connector. Each event contains a metadata record of the following form via `$ws_server`:
//!
//! ```js
//! {
//! "tls": false, // whether or not tls is enabled on the connection
//! "peer": {
//! "host": "192.168.0.1", // ip of the connection peer
//! "port": 56431 // port of the connection peer
//! }
//! }
//! ```
//!
//! ### Configuration
//!
//! | Option | Description | Type | Required | Default value |
//! |------------------|-------------------------------------------------------------------------------------------------------------|-------------------|----------|------------------------------------------------------------------------------|
//! | `url` | The URL to connect to in order to initiate the websocket connection. | string | yes | |
//! | `tls` | Optional Transport Level Security configuration. See [TLS configuration](./index.md#client). | record or boolean | no | No TLS configured. |
//! | `socket_options` | See [TCP socket options](./index.md#tcp-socket-options). | record | no | See [TCP socket options defaults](./index#tcp-socket-options) |
//!
//! ### Examples
//!
//! An annotated example of a non-tls plain WS cient configuration leveraging defaults:
//!
//! ```tremor title="config.troy"
//! define my_wsc out from ws_client
//! with
//! postprocessors = ["separate"],
//! codec = "json",
//! config = {
//! # Connect to port 4242 on the loopback device
//! "url": "ws://127.0.0.1:4242/"
//!
//! # Optional Transport Level Security configuration
//! # "tls" = { ... }
//!
//! # Optional tuning of the Nagle algorithm ( default: true )
//! # - By default no delay is preferred
//! # "no_delay" = false
//! }
//! end;
//! ```
//!
//! An annotated example of a secure WS client configuration with
//! reconnection quality of service configured:
//!
//! ```tremor title="config.troy"
//! define connector ws_client from ws_client
//! with
//! postprocessors = ["separate"],
//! codec = "json",
//! config = {
//! # Listen on all interfaces on TCP port 65535
//! "url": "wss://0.0.0.0:65535",
//!
//! # Prefer delay and enable the TCP Nagle algorithm
//! "no_delay": false,
//!
//! # Enable SSL/TLS
//! "tls": {
//! # CA certificate
//! "cafile": "./before/localhost.cert",
//! # Domain
//! "domain": "localhost",
//! }
//! },
//! # Reconnect starting at half a second, backoff by doubling, maximum of 3 tries before circuit breaking
//! reconnect = {
//! "retry": {
//! "interval_ms": 500,
//! "growth_rate": 2,
//! "max_retries": 3,
//! }
//! }
//! end;
//! ```
pub(crate) mod client;
pub(crate) mod server;
use crate::connectors::prelude::*;
use futures::prelude::*;
use futures::stream::{SplitSink, SplitStream};
use tokio::net::TcpStream;
use tokio_rustls::server::TlsStream;
use tokio_tungstenite::tungstenite::Message;
use tokio_tungstenite::WebSocketStream;
pub(crate) struct WsDefaults;
impl Defaults for WsDefaults {
const SCHEME: &'static str = "ws";
const HOST: &'static str = "localhost";
const PORT: u16 = 80;
}
struct WsReader<Stream, Ctx, Runtime>
where
Stream: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send + Sync + Unpin,
Ctx: Context + Send,
Runtime: SinkRuntime,
{
stream: SplitStream<WebSocketStream<Stream>>,
// we keep this around for closing the writing part if the reader is done
sink_runtime: Option<Runtime>,
origin_uri: EventOriginUri,
meta: Value<'static>,
ctx: Ctx,
}
impl<Stream, Ctx, Runtime> WsReader<Stream, Ctx, Runtime>
where
Stream: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send + Sync + Unpin,
Ctx: Context + Send + Sync,
Runtime: SinkRuntime,
{
fn new(
stream: SplitStream<WebSocketStream<Stream>>,
sink_runtime: Option<Runtime>,
origin_uri: EventOriginUri,
meta: Value<'static>,
ctx: Ctx,
) -> Self {
Self {
stream,
sink_runtime,
origin_uri,
meta,
ctx,
}
}
}
#[async_trait::async_trait]
impl<Stream, Ctx, Runtime> StreamReader for WsReader<Stream, Ctx, Runtime>
where
Stream: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send + Sync + Unpin,
Ctx: Context + Send + Sync,
Runtime: SinkRuntime,
{
async fn quiesce(&mut self, stream: u64) -> Option<SourceReply> {
Some(SourceReply::EndStream {
origin_uri: self.origin_uri.clone(),
stream,
meta: Some(self.meta.clone()),
})
}
async fn read(&mut self, stream: u64) -> Result<SourceReply> {
let mut is_binary = false;
match self.stream.next().await {
Some(Ok(message)) => {
let data = match message {
Message::Text(text) => text.into_bytes(),
Message::Binary(binary) => {
is_binary = true;
binary
}
Message::Close(_) => {
// read from the stream once again to drive the closing handshake
let after_close = self.stream.next().await;
debug_assert!(
after_close.is_none(),
"WS reader not behaving as expected after receiving a close message"
);
return Ok(SourceReply::EndStream {
origin_uri: self.origin_uri.clone(),
stream,
meta: Some(self.meta.clone()),
});
}
Message::Ping(_) | Message::Pong(_) | Message::Frame(_) => {
// ignore those, but don't let the source wait
return self.read(stream).await;
}
};
let mut meta = self.meta.clone();
if is_binary | ;
Ok(SourceReply::Data {
origin_uri: self.origin_uri.clone(),
stream: Some(stream),
meta: Some(meta),
data,
port: None,
codec_overwrite: None,
})
}
Some(Err(_)) | None => Ok(SourceReply::EndStream {
origin_uri: self.origin_uri.clone(),
stream,
meta: Some(self.meta.clone()),
}),
}
}
async fn on_done(&mut self, stream: u64) -> StreamDone {
// make the writer stop, otherwise the underlying socket will never be closed
if let Some(sink_runtime) = self.sink_runtime.as_mut() {
self.ctx.swallow_err(
sink_runtime.unregister_stream_writer(stream).await,
"Error unregistering stream",
);
}
StreamDone::StreamClosed
}
}
struct WsWriter<S>
where
S: tokio::io::AsyncRead + tokio::io::AsyncWrite + Unpin + Sync + Send,
{
sink: SplitSink<WebSocketStream<S>, Message>,
}
impl WsWriter<TcpStream> {
fn new(sink: SplitSink<WebSocketStream<TcpStream>, Message>) -> Self {
Self { sink }
}
}
impl WsWriter<TlsStream<TcpStream>> {
fn new_tls_server(sink: SplitSink<WebSocketStream<TlsStream<TcpStream>>, Message>) -> Self {
Self { sink }
}
}
impl WsWriter<TcpStream> {
fn new_tungstenite_client(sink: SplitSink<WebSocketStream<TcpStream>, Message>) -> Self {
Self { sink }
}
}
impl WsWriter<tokio_rustls::client::TlsStream<TcpStream>> {
fn new_tls_client(
sink: SplitSink<WebSocketStream<tokio_rustls::client::TlsStream<TcpStream>>, Message>,
) -> Self {
Self { sink }
}
}
#[async_trait::async_trait]
impl<S> StreamWriter for WsWriter<S>
where
S: tokio::io::AsyncRead + tokio::io::AsyncWrite + Sync + Send + Unpin,
{
async fn write(&mut self, data: Vec<Vec<u8>>, meta: Option<&Value>) -> Result<()> {
for chunk in data {
if let Some(meta) = &meta {
// If metadata is set, check for a binary framing flag
if let Some(true) = meta.get_bool("binary") {
let message = Message::Binary(chunk);
self.sink.send(message).await?;
} else {
let message = std::str::from_utf8(&chunk)?;
let message = Message::Text(message.to_string());
self.sink.send(message).await?;
}
} else {
// No metadata, default to text ws framing
let message = std::str::from_utf8(&chunk)?;
let message = Message::Text(message.to_string());
self.sink.send(message).await?;
};
}
Ok(())
}
async fn on_done(&mut self, _stream: u64) -> Result<StreamDone> {
self.sink.close().await?;
Ok(StreamDone::StreamClosed)
}
}
| {
meta.insert("binary", Value::const_true())?;
} | conditional_block |
ws.rs | // Copyright 2021, The Tremor Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! The [`ws_server`](#ws_server) and [`ws_client`](#ws_client) connectors provide support for the `WebSocket` protocol specification.
//!
//! Tremor can expose a client or server connection.
//!
//! Text and binary frames can be used.
//!
//! ## `ws_server`
//!
//! This connector is a websocket server. It opens a TCP listening socket, and for each incoming connection it initiates the Websocket handshake. Then websocket frames can flow
//! and are processed with the given `preprocessors` and `codec` and sent to the `out` port of the connector.
//!
//! Each incoming connection creates a new stream of events. Events from a websocket connection bear the following metadata record at `$ws_server`:
//!
//! ```js
//! {
//! "tls": true, // whether or not TLS is configured
//! "peer": {
//! "host": "127.0.0.1", // ip of the connection peer
//! "port": 12345 // port of the connection peer
//! }
//! }
//! ```
//!
//! When a connection is established and events are received, it is possible to send events to any open connection. In order to achieve this, a pipeline needs to be connected to the `in` port of this connector and send events to it. There are multiple ways to target a certain connection with a specific event:
//!
//! * Send the event you just received from the `ws_server` right back to it. It will be able to track the the event to its websocket connection. You can even do this with an aggregate event coming from a select with a window. If an event is the result of events from multiple websocket connections, it will send the event back down to each websocket connection.
//! * Attach the same metadata you receive on the connection under `$ws_server` to the event you want to send to that connection.
//!
//! ### Configuration
//!
//! | Option | Description | Type | Required | Default value |
//! |------------------|-------------------------------------------------------------------------------------------------------------|------------------|----------|------------------------------------------------------------------------------|
//! | `url` | The host and port as url to listen on. | string | yes | |
//! | `tls` | Optional Transport Level Security configuration. See [TLS configuration](./index.md#server). | record | no | No TLS configured. |
//! | `backlog` | The maximum size of the queue of pending connections not yet `accept`ed. | positive integer | no | 128 |
//! | `socket_options` | See [TCP socket options](./index.md#tcp-socket-options). | record | no | See [TCP socket options defaults](./index#tcp-socket-options) |
//!
//! ### Examples
//!
//! An annotated example of a plain WS cient configuration leveraging defaults:
//!
//! ```tremor title="config.troy"
//! define connector in from ws_server
//! with
//! preprocessors = [
//! {
//! "name": "separate",
//! "config": {
//! "buffered": false
//! }
//! }
//! ],
//! codec = "json",
//! config = {
//! "url": "127.0.0.1:4242",
//! }
//! end;
//! ```
//!
//! An annotated example of a secure WS server configuration:
//!
//! ```tremor title="config.troy"
//! define connector ws_server from ws_server
//! with
//! preprocessors = ["separate"],
//! codec = "json",
//! config = {
//! "url": "0.0.0.0:65535",
//! "tls": {
//! # Security certificate for this service endpoint
//! "cert": "./before/localhost.cert",
//! # Security key
//! "key": "./before/localhost.key",
//! }
//! }
//! end;
//! ```
//!
//! ## `ws_client`
//!
//! This connector is a websocket client, that establishes one connection to the host and port configured in `url`. Events sent to the `in` port of this connector will be processed by the configured `codec` and `postprocessors` and turned into a text or binary frame, depending on the events boolean metadata value `$ws_server.binary`. If you want to sent a binary frame, you need to set:
//!
//! ```tremor
//! let $ws_server["binary"] = true;
//! ```
//!
//! If nothing is provided a text frame is sent.
//!
//! Data received on the open connection is processed frame by frame by the configured `preprocessors` and `codec` and sent as event via the `out` port of the connector. Each event contains a metadata record of the following form via `$ws_server`:
//!
//! ```js
//! {
//! "tls": false, // whether or not tls is enabled on the connection
//! "peer": {
//! "host": "192.168.0.1", // ip of the connection peer
//! "port": 56431 // port of the connection peer
//! }
//! }
//! ```
//!
//! ### Configuration
//!
//! | Option | Description | Type | Required | Default value |
//! |------------------|-------------------------------------------------------------------------------------------------------------|-------------------|----------|------------------------------------------------------------------------------|
//! | `url` | The URL to connect to in order to initiate the websocket connection. | string | yes | |
//! | `tls` | Optional Transport Level Security configuration. See [TLS configuration](./index.md#client). | record or boolean | no | No TLS configured. |
//! | `socket_options` | See [TCP socket options](./index.md#tcp-socket-options). | record | no | See [TCP socket options defaults](./index#tcp-socket-options) |
//!
//! ### Examples
//!
//! An annotated example of a non-tls plain WS cient configuration leveraging defaults:
//!
//! ```tremor title="config.troy"
//! define my_wsc out from ws_client
//! with
//! postprocessors = ["separate"],
//! codec = "json",
//! config = {
//! # Connect to port 4242 on the loopback device
//! "url": "ws://127.0.0.1:4242/"
//!
//! # Optional Transport Level Security configuration
//! # "tls" = { ... }
//!
//! # Optional tuning of the Nagle algorithm ( default: true )
//! # - By default no delay is preferred
//! # "no_delay" = false
//! }
//! end;
//! ```
//!
//! An annotated example of a secure WS client configuration with
//! reconnection quality of service configured:
//!
//! ```tremor title="config.troy"
//! define connector ws_client from ws_client
//! with
//! postprocessors = ["separate"],
//! codec = "json",
//! config = {
//! # Listen on all interfaces on TCP port 65535
//! "url": "wss://0.0.0.0:65535",
//!
//! # Prefer delay and enable the TCP Nagle algorithm
//! "no_delay": false,
//!
//! # Enable SSL/TLS
//! "tls": {
//! # CA certificate
//! "cafile": "./before/localhost.cert",
//! # Domain
//! "domain": "localhost",
//! }
//! },
//! # Reconnect starting at half a second, backoff by doubling, maximum of 3 tries before circuit breaking
//! reconnect = {
//! "retry": {
//! "interval_ms": 500,
//! "growth_rate": 2,
//! "max_retries": 3,
//! }
//! }
//! end;
//! ```
pub(crate) mod client;
pub(crate) mod server;
use crate::connectors::prelude::*;
use futures::prelude::*;
use futures::stream::{SplitSink, SplitStream};
use tokio::net::TcpStream;
use tokio_rustls::server::TlsStream;
use tokio_tungstenite::tungstenite::Message;
use tokio_tungstenite::WebSocketStream;
pub(crate) struct WsDefaults;
impl Defaults for WsDefaults {
const SCHEME: &'static str = "ws";
const HOST: &'static str = "localhost";
const PORT: u16 = 80;
}
struct WsReader<Stream, Ctx, Runtime>
where
Stream: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send + Sync + Unpin,
Ctx: Context + Send,
Runtime: SinkRuntime,
{
stream: SplitStream<WebSocketStream<Stream>>,
// we keep this around for closing the writing part if the reader is done
sink_runtime: Option<Runtime>,
origin_uri: EventOriginUri,
meta: Value<'static>,
ctx: Ctx,
}
impl<Stream, Ctx, Runtime> WsReader<Stream, Ctx, Runtime>
where
Stream: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send + Sync + Unpin,
Ctx: Context + Send + Sync,
Runtime: SinkRuntime,
{
fn new(
stream: SplitStream<WebSocketStream<Stream>>,
sink_runtime: Option<Runtime>,
origin_uri: EventOriginUri,
meta: Value<'static>,
ctx: Ctx,
) -> Self {
Self {
stream,
sink_runtime,
origin_uri,
meta,
ctx,
}
}
}
#[async_trait::async_trait]
impl<Stream, Ctx, Runtime> StreamReader for WsReader<Stream, Ctx, Runtime>
where
Stream: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send + Sync + Unpin,
Ctx: Context + Send + Sync,
Runtime: SinkRuntime,
{
async fn quiesce(&mut self, stream: u64) -> Option<SourceReply> {
Some(SourceReply::EndStream {
origin_uri: self.origin_uri.clone(),
stream,
meta: Some(self.meta.clone()),
})
}
async fn read(&mut self, stream: u64) -> Result<SourceReply> {
let mut is_binary = false;
match self.stream.next().await {
Some(Ok(message)) => {
let data = match message {
Message::Text(text) => text.into_bytes(),
Message::Binary(binary) => {
is_binary = true;
binary
}
Message::Close(_) => {
// read from the stream once again to drive the closing handshake
let after_close = self.stream.next().await;
debug_assert!(
after_close.is_none(),
"WS reader not behaving as expected after receiving a close message"
);
return Ok(SourceReply::EndStream {
origin_uri: self.origin_uri.clone(),
stream,
meta: Some(self.meta.clone()),
});
}
Message::Ping(_) | Message::Pong(_) | Message::Frame(_) => {
// ignore those, but don't let the source wait
return self.read(stream).await;
}
};
let mut meta = self.meta.clone();
if is_binary {
meta.insert("binary", Value::const_true())?;
};
Ok(SourceReply::Data {
origin_uri: self.origin_uri.clone(),
stream: Some(stream),
meta: Some(meta),
data,
port: None,
codec_overwrite: None,
})
}
Some(Err(_)) | None => Ok(SourceReply::EndStream {
origin_uri: self.origin_uri.clone(),
stream,
meta: Some(self.meta.clone()),
}),
}
}
async fn on_done(&mut self, stream: u64) -> StreamDone {
// make the writer stop, otherwise the underlying socket will never be closed
if let Some(sink_runtime) = self.sink_runtime.as_mut() {
self.ctx.swallow_err(
sink_runtime.unregister_stream_writer(stream).await,
"Error unregistering stream",
);
}
StreamDone::StreamClosed
}
}
struct WsWriter<S>
where
S: tokio::io::AsyncRead + tokio::io::AsyncWrite + Unpin + Sync + Send,
{
sink: SplitSink<WebSocketStream<S>, Message>,
}
impl WsWriter<TcpStream> {
fn | (sink: SplitSink<WebSocketStream<TcpStream>, Message>) -> Self {
Self { sink }
}
}
impl WsWriter<TlsStream<TcpStream>> {
fn new_tls_server(sink: SplitSink<WebSocketStream<TlsStream<TcpStream>>, Message>) -> Self {
Self { sink }
}
}
impl WsWriter<TcpStream> {
fn new_tungstenite_client(sink: SplitSink<WebSocketStream<TcpStream>, Message>) -> Self {
Self { sink }
}
}
impl WsWriter<tokio_rustls::client::TlsStream<TcpStream>> {
fn new_tls_client(
sink: SplitSink<WebSocketStream<tokio_rustls::client::TlsStream<TcpStream>>, Message>,
) -> Self {
Self { sink }
}
}
#[async_trait::async_trait]
impl<S> StreamWriter for WsWriter<S>
where
S: tokio::io::AsyncRead + tokio::io::AsyncWrite + Sync + Send + Unpin,
{
async fn write(&mut self, data: Vec<Vec<u8>>, meta: Option<&Value>) -> Result<()> {
for chunk in data {
if let Some(meta) = &meta {
// If metadata is set, check for a binary framing flag
if let Some(true) = meta.get_bool("binary") {
let message = Message::Binary(chunk);
self.sink.send(message).await?;
} else {
let message = std::str::from_utf8(&chunk)?;
let message = Message::Text(message.to_string());
self.sink.send(message).await?;
}
} else {
// No metadata, default to text ws framing
let message = std::str::from_utf8(&chunk)?;
let message = Message::Text(message.to_string());
self.sink.send(message).await?;
};
}
Ok(())
}
async fn on_done(&mut self, _stream: u64) -> Result<StreamDone> {
self.sink.close().await?;
Ok(StreamDone::StreamClosed)
}
}
| new | identifier_name |
ws.rs | // Copyright 2021, The Tremor Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! The [`ws_server`](#ws_server) and [`ws_client`](#ws_client) connectors provide support for the `WebSocket` protocol specification.
//!
//! Tremor can expose a client or server connection.
//!
//! Text and binary frames can be used.
//!
//! ## `ws_server`
//!
//! This connector is a websocket server. It opens a TCP listening socket, and for each incoming connection it initiates the Websocket handshake. Then websocket frames can flow
//! and are processed with the given `preprocessors` and `codec` and sent to the `out` port of the connector.
//!
//! Each incoming connection creates a new stream of events. Events from a websocket connection bear the following metadata record at `$ws_server`:
//!
//! ```js
//! {
//! "tls": true, // whether or not TLS is configured
//! "peer": {
//! "host": "127.0.0.1", // ip of the connection peer
//! "port": 12345 // port of the connection peer
//! }
//! }
//! ```
//!
//! When a connection is established and events are received, it is possible to send events to any open connection. In order to achieve this, a pipeline needs to be connected to the `in` port of this connector and send events to it. There are multiple ways to target a certain connection with a specific event:
//!
//! * Send the event you just received from the `ws_server` right back to it. It will be able to track the the event to its websocket connection. You can even do this with an aggregate event coming from a select with a window. If an event is the result of events from multiple websocket connections, it will send the event back down to each websocket connection.
//! * Attach the same metadata you receive on the connection under `$ws_server` to the event you want to send to that connection.
//!
//! ### Configuration
//!
//! | Option | Description | Type | Required | Default value |
//! |------------------|-------------------------------------------------------------------------------------------------------------|------------------|----------|------------------------------------------------------------------------------|
//! | `url` | The host and port as url to listen on. | string | yes | |
//! | `tls` | Optional Transport Level Security configuration. See [TLS configuration](./index.md#server). | record | no | No TLS configured. |
//! | `backlog` | The maximum size of the queue of pending connections not yet `accept`ed. | positive integer | no | 128 |
//! | `socket_options` | See [TCP socket options](./index.md#tcp-socket-options). | record | no | See [TCP socket options defaults](./index#tcp-socket-options) |
//!
//! ### Examples
//!
//! An annotated example of a plain WS cient configuration leveraging defaults:
//!
//! ```tremor title="config.troy"
//! define connector in from ws_server
//! with
//! preprocessors = [
//! {
//! "name": "separate",
//! "config": {
//! "buffered": false
//! }
//! }
//! ],
//! codec = "json",
//! config = {
//! "url": "127.0.0.1:4242",
//! }
//! end;
//! ```
//!
//! An annotated example of a secure WS server configuration:
//!
//! ```tremor title="config.troy"
//! define connector ws_server from ws_server
//! with
//! preprocessors = ["separate"],
//! codec = "json",
//! config = {
//! "url": "0.0.0.0:65535",
//! "tls": {
//! # Security certificate for this service endpoint
//! "cert": "./before/localhost.cert",
//! # Security key
//! "key": "./before/localhost.key",
//! }
//! }
//! end;
//! ```
//!
//! ## `ws_client`
//!
//! This connector is a websocket client, that establishes one connection to the host and port configured in `url`. Events sent to the `in` port of this connector will be processed by the configured `codec` and `postprocessors` and turned into a text or binary frame, depending on the events boolean metadata value `$ws_server.binary`. If you want to sent a binary frame, you need to set:
//!
//! ```tremor
//! let $ws_server["binary"] = true;
//! ```
//!
//! If nothing is provided a text frame is sent.
//!
//! Data received on the open connection is processed frame by frame by the configured `preprocessors` and `codec` and sent as event via the `out` port of the connector. Each event contains a metadata record of the following form via `$ws_server`:
//!
//! ```js
//! {
//! "tls": false, // whether or not tls is enabled on the connection
//! "peer": {
//! "host": "192.168.0.1", // ip of the connection peer
//! "port": 56431 // port of the connection peer
//! }
//! }
//! ```
//!
//! ### Configuration
//!
//! | Option | Description | Type | Required | Default value |
//! |------------------|-------------------------------------------------------------------------------------------------------------|-------------------|----------|------------------------------------------------------------------------------|
//! | `url` | The URL to connect to in order to initiate the websocket connection. | string | yes | |
//! | `tls` | Optional Transport Level Security configuration. See [TLS configuration](./index.md#client). | record or boolean | no | No TLS configured. |
//! | `socket_options` | See [TCP socket options](./index.md#tcp-socket-options). | record | no | See [TCP socket options defaults](./index#tcp-socket-options) |
//!
//! ### Examples
//!
//! An annotated example of a non-tls plain WS cient configuration leveraging defaults:
//!
//! ```tremor title="config.troy"
//! define my_wsc out from ws_client
//! with
//! postprocessors = ["separate"],
//! codec = "json",
//! config = {
//! # Connect to port 4242 on the loopback device
//! "url": "ws://127.0.0.1:4242/"
//!
//! # Optional Transport Level Security configuration
//! # "tls" = { ... }
//!
//! # Optional tuning of the Nagle algorithm ( default: true )
//! # - By default no delay is preferred
//! # "no_delay" = false
//! }
//! end;
//! ```
//!
//! An annotated example of a secure WS client configuration with
//! reconnection quality of service configured:
//!
//! ```tremor title="config.troy"
//! define connector ws_client from ws_client
//! with
//! postprocessors = ["separate"],
//! codec = "json",
//! config = {
//! # Listen on all interfaces on TCP port 65535
//! "url": "wss://0.0.0.0:65535",
//!
//! # Prefer delay and enable the TCP Nagle algorithm
//! "no_delay": false,
//!
//! # Enable SSL/TLS
//! "tls": {
//! # CA certificate
//! "cafile": "./before/localhost.cert",
//! # Domain
//! "domain": "localhost",
//! }
//! },
//! # Reconnect starting at half a second, backoff by doubling, maximum of 3 tries before circuit breaking
//! reconnect = {
//! "retry": {
//! "interval_ms": 500,
//! "growth_rate": 2,
//! "max_retries": 3,
//! }
//! }
//! end;
//! ```
pub(crate) mod client;
pub(crate) mod server;
use crate::connectors::prelude::*;
use futures::prelude::*;
use futures::stream::{SplitSink, SplitStream};
use tokio::net::TcpStream;
use tokio_rustls::server::TlsStream;
use tokio_tungstenite::tungstenite::Message;
use tokio_tungstenite::WebSocketStream;
pub(crate) struct WsDefaults;
impl Defaults for WsDefaults {
const SCHEME: &'static str = "ws";
const HOST: &'static str = "localhost";
const PORT: u16 = 80;
}
struct WsReader<Stream, Ctx, Runtime>
where
Stream: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send + Sync + Unpin,
Ctx: Context + Send,
Runtime: SinkRuntime,
{
stream: SplitStream<WebSocketStream<Stream>>,
// we keep this around for closing the writing part if the reader is done
sink_runtime: Option<Runtime>,
origin_uri: EventOriginUri,
meta: Value<'static>,
ctx: Ctx,
}
impl<Stream, Ctx, Runtime> WsReader<Stream, Ctx, Runtime>
where
Stream: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send + Sync + Unpin,
Ctx: Context + Send + Sync,
Runtime: SinkRuntime,
{
fn new(
stream: SplitStream<WebSocketStream<Stream>>,
sink_runtime: Option<Runtime>,
origin_uri: EventOriginUri,
meta: Value<'static>,
ctx: Ctx,
) -> Self {
Self {
stream,
sink_runtime,
origin_uri,
meta,
ctx,
}
}
}
#[async_trait::async_trait]
impl<Stream, Ctx, Runtime> StreamReader for WsReader<Stream, Ctx, Runtime>
where
Stream: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send + Sync + Unpin,
Ctx: Context + Send + Sync,
Runtime: SinkRuntime,
{
async fn quiesce(&mut self, stream: u64) -> Option<SourceReply> {
Some(SourceReply::EndStream {
origin_uri: self.origin_uri.clone(),
stream,
meta: Some(self.meta.clone()),
})
}
async fn read(&mut self, stream: u64) -> Result<SourceReply> {
let mut is_binary = false;
match self.stream.next().await {
Some(Ok(message)) => {
let data = match message {
Message::Text(text) => text.into_bytes(),
Message::Binary(binary) => {
is_binary = true;
binary
}
Message::Close(_) => {
// read from the stream once again to drive the closing handshake
let after_close = self.stream.next().await;
debug_assert!(
after_close.is_none(),
"WS reader not behaving as expected after receiving a close message"
);
return Ok(SourceReply::EndStream {
origin_uri: self.origin_uri.clone(),
stream,
meta: Some(self.meta.clone()),
});
}
Message::Ping(_) | Message::Pong(_) | Message::Frame(_) => {
// ignore those, but don't let the source wait
return self.read(stream).await;
}
};
let mut meta = self.meta.clone();
if is_binary {
meta.insert("binary", Value::const_true())?;
};
Ok(SourceReply::Data {
origin_uri: self.origin_uri.clone(),
stream: Some(stream),
meta: Some(meta),
data,
port: None,
codec_overwrite: None,
})
}
Some(Err(_)) | None => Ok(SourceReply::EndStream {
origin_uri: self.origin_uri.clone(),
stream,
meta: Some(self.meta.clone()),
}),
}
}
async fn on_done(&mut self, stream: u64) -> StreamDone {
// make the writer stop, otherwise the underlying socket will never be closed
if let Some(sink_runtime) = self.sink_runtime.as_mut() {
self.ctx.swallow_err(
sink_runtime.unregister_stream_writer(stream).await,
"Error unregistering stream",
);
}
StreamDone::StreamClosed
}
}
struct WsWriter<S>
where
S: tokio::io::AsyncRead + tokio::io::AsyncWrite + Unpin + Sync + Send,
{
sink: SplitSink<WebSocketStream<S>, Message>,
}
impl WsWriter<TcpStream> {
fn new(sink: SplitSink<WebSocketStream<TcpStream>, Message>) -> Self {
Self { sink }
}
}
impl WsWriter<TlsStream<TcpStream>> {
fn new_tls_server(sink: SplitSink<WebSocketStream<TlsStream<TcpStream>>, Message>) -> Self |
}
impl WsWriter<TcpStream> {
fn new_tungstenite_client(sink: SplitSink<WebSocketStream<TcpStream>, Message>) -> Self {
Self { sink }
}
}
impl WsWriter<tokio_rustls::client::TlsStream<TcpStream>> {
fn new_tls_client(
sink: SplitSink<WebSocketStream<tokio_rustls::client::TlsStream<TcpStream>>, Message>,
) -> Self {
Self { sink }
}
}
#[async_trait::async_trait]
impl<S> StreamWriter for WsWriter<S>
where
S: tokio::io::AsyncRead + tokio::io::AsyncWrite + Sync + Send + Unpin,
{
async fn write(&mut self, data: Vec<Vec<u8>>, meta: Option<&Value>) -> Result<()> {
for chunk in data {
if let Some(meta) = &meta {
// If metadata is set, check for a binary framing flag
if let Some(true) = meta.get_bool("binary") {
let message = Message::Binary(chunk);
self.sink.send(message).await?;
} else {
let message = std::str::from_utf8(&chunk)?;
let message = Message::Text(message.to_string());
self.sink.send(message).await?;
}
} else {
// No metadata, default to text ws framing
let message = std::str::from_utf8(&chunk)?;
let message = Message::Text(message.to_string());
self.sink.send(message).await?;
};
}
Ok(())
}
async fn on_done(&mut self, _stream: u64) -> Result<StreamDone> {
self.sink.close().await?;
Ok(StreamDone::StreamClosed)
}
}
| {
Self { sink }
} | identifier_body |
cabi.rs | use std::ptr;
use std::mem;
use std::slice;
use std::panic;
use std::ffi::{CStr, OsStr};
use std::borrow::Cow;
use std::os::raw::{c_int, c_uint, c_char};
use std::os::unix::ffi::OsStrExt;
use proguard::MappingView;
use sourcemap::Error as SourceMapError;
use errors::{Error, ErrorKind, Result};
use unified::{View, TokenMatch, Index, ViewOrIndex};
use memdb::DumpOptions;
fn resultbox<T>(val: T) -> Result<*mut T> {
Ok(Box::into_raw(Box::new(val)))
}
#[derive(Debug)]
#[repr(C)]
pub struct Token {
pub dst_line: c_uint,
pub dst_col: c_uint,
pub src_line: c_uint,
pub src_col: c_uint,
pub name: *const u8,
pub name_len: c_uint,
pub src: *const u8,
pub src_len: c_uint,
pub src_id: c_uint,
}
#[derive(Debug)]
#[repr(C)]
pub struct Str {
pub data: *const u8,
pub len: c_uint,
}
#[derive(Debug)]
#[repr(C)]
pub struct CError {
pub message: *const u8,
pub failed: c_int,
pub code: c_int,
}
fn get_error_code_from_kind(kind: &ErrorKind) -> c_int {
match *kind {
ErrorKind::SourceMap(SourceMapError::IndexedSourcemap) => 2,
ErrorKind::SourceMap(SourceMapError::BadJson(_)) => 3,
ErrorKind::SourceMap(SourceMapError::CannotFlatten(_)) => 4,
ErrorKind::UnsupportedMemDbVersion => 5,
ErrorKind::Io(_) => 6,
ErrorKind::TooManySources => 20,
ErrorKind::TooManyNames => 21,
ErrorKind::LocationOverflow => 22,
ErrorKind::AlreadyMemDb => 23,
_ => 1,
}
}
unsafe fn set_token<'a>(out: *mut Token, tm: &'a TokenMatch<'a>) {
(*out).dst_line = tm.dst_line;
(*out).dst_col = tm.dst_col;
(*out).src_line = tm.src_line;
(*out).src_col = tm.src_col;
(*out).name = match tm.name {
Some(name) => name.as_ptr(),
None => ptr::null()
};
(*out).name_len = tm.name.map(|x| x.as_bytes().len()).unwrap_or(0) as c_uint;
(*out).src = match tm.src {
Some(src) => src.as_ptr(),
None => ptr::null()
};
(*out).src_len = tm.src.map(|x| x.as_bytes().len()).unwrap_or(0) as c_uint;
(*out).src_id = tm.src_id;
}
unsafe fn notify_err(err: Error, err_out: *mut CError) {
if !err_out.is_null() {
let s = format!("{}\x00", err);
(*err_out).failed = 1;
(*err_out).message = Box::into_raw(s.into_boxed_str()) as *mut u8;
(*err_out).code = get_error_code_from_kind(err.kind());
}
}
unsafe fn landingpad<F: FnOnce() -> Result<T> + panic::UnwindSafe, T>(
f: F, err_out: *mut CError) -> T
{
match panic::catch_unwind(f) {
Ok(rv) => rv.map_err(|err| notify_err(err, err_out)).unwrap_or(mem::zeroed()),
Err(err) => {
use std::any::Any;
let err = &*err as &Any;
let msg = match err.downcast_ref::<&'static str>() {
Some(s) => *s,
None => {
match err.downcast_ref::<String>() {
Some(s) => &**s,
None => "Box<Any>",
}
}
};
notify_err(ErrorKind::InternalError(msg.to_string()).into(), err_out);
mem::zeroed()
}
}
}
macro_rules! export (
(
$name:ident($($aname:ident: $aty:ty),*) -> Result<$rv:ty> $body:block
) => (
#[no_mangle]
pub unsafe extern "C" fn $name($($aname: $aty,)* err_out: *mut CError) -> $rv
{
landingpad(|| $body, err_out)
}
);
(
$name:ident($($aname:ident: $aty:ty),*) $body:block
) => {
#[no_mangle]
pub unsafe extern "C" fn $name($($aname: $aty,)*)
{
// this silences panics and stuff
landingpad(|| { $body; Ok(0 as c_int)}, ptr::null_mut());
}
}
);
export!(lsm_init() {
fn silent_panic_handler(_pi: &panic::PanicInfo) {
// don't do anything here. This disables the default printing of
// panics to stderr which we really don't care about here.
}
panic::set_hook(Box::new(silent_panic_handler));
});
export!(lsm_view_from_json(bytes: *const u8, len: c_uint) -> Result<*mut View> {
resultbox(View::json_from_slice(slice::from_raw_parts(bytes, len as usize))?)
});
export!(lsm_view_from_memdb(
bytes: *const u8, len: c_uint) -> Result<*mut View>
{
// XXX: this currently copies because that's safer. Consider improving this?
resultbox(View::memdb_from_vec(slice::from_raw_parts(
bytes,
len as usize
).to_vec())?)
});
export!(lsm_view_from_memdb_file(path: *const c_char) -> Result<*mut View> {
resultbox(View::memdb_from_path(CStr::from_ptr(path).to_str()?)?)
});
export!(lsm_view_free(view: *mut View) {
if !view.is_null() {
Box::from_raw(view);
}
});
export!(lsm_view_get_token_count(view: *const View) -> Result<c_uint> {
Ok((*view).get_token_count() as c_uint)
});
export!(lsm_view_get_token(view: *const View, idx: c_uint, out: *mut Token) -> Result<c_int> {
Ok(match (*view).get_token(idx as u32) {
None => 0,
Some(tm) => {
set_token(out, &tm);
1
}
})
});
export!(lsm_view_lookup_token(
view: *const View, line: c_uint, col: c_uint, out: *mut Token) -> Result<c_int>
{
Ok(match (*view).lookup_token(line, col) {
None => 0,
Some(tm) => {
set_token(out, &tm);
1
}
})
});
export!(lsm_view_get_original_function_name(
view: *const View, line: c_uint, col: c_uint, minified_name: *const c_char,
minified_source: *const c_char, name_out: *mut *const c_char) -> Result<c_uint>
{
Ok(match (*view).get_original_function_name(
line as u32, col as u32, CStr::from_ptr(minified_name).to_str()?,
CStr::from_ptr(minified_source).to_str()?)
{
Some(name) => {
*name_out = name.as_ptr() as *const c_char;
name.len() as c_uint
}
None => 0
})
});
export!(lsm_view_get_source_count(view: *const View) -> Result<c_uint> {
Ok((*view).get_source_count() as c_uint)
});
export!(lsm_view_has_source_contents(view: *const View, src_id: c_uint) -> Result<c_int> {
Ok(if (*view).get_source_contents(src_id as u32).is_some() { 1 } else { 0 })
});
export!(lsm_view_get_source_contents(
view: *const View, src_id: c_uint, len_out: *mut c_uint,
must_free: *mut c_int) -> Result<*mut u8>
{
*must_free = 0;
Ok(match (*view).get_source_contents(src_id as u32) {
None => ptr::null_mut(),
Some(contents) => {
*len_out = contents.len() as c_uint;
match contents {
Cow::Borrowed(s) => s.as_ptr() as *mut u8,
Cow::Owned(val) => {
*must_free = 1;
Box::into_raw(val.into_boxed_str()) as *mut u8
}
}
}
})
});
export!(lsm_view_get_source_name(
view: *const View, src_id: c_uint, len_out: *mut c_uint) -> Result<*const u8>
{
Ok(match (*view).get_source(src_id as u32) {
None => ptr::null(),
Some(name) => {
*len_out = name.len() as c_uint;
name.as_ptr()
}
})
});
export!(lsm_view_dump_memdb(
view: *mut View, len_out: *mut c_uint, with_source_contents: c_int,
with_names: c_int) -> Result<*mut u8>
{
let memdb = (*view).dump_memdb(DumpOptions {
with_source_contents: with_source_contents != 0,
with_names: with_names != 0,
})?;
*len_out = memdb.len() as c_uint;
Ok(Box::into_raw(memdb.into_boxed_slice()) as *mut u8)
});
export!(lsm_buffer_free(buf: *mut u8) {
if !buf.is_null() {
Box::from_raw(buf);
}
});
export!(lsm_index_from_json(bytes: *const u8, len: c_uint) -> Result<*mut Index> {
resultbox(Index::json_from_slice(slice::from_raw_parts(
bytes,
len as usize
))?)
});
export!(lsm_index_free(idx: *mut Index) {
if !idx.is_null() {
Box::from_raw(idx);
}
});
export!(lsm_index_can_flatten(idx: *const Index) -> Result<c_int> {
Ok(if (*idx).can_flatten() { 1 } else { 0 })
});
export!(lsm_index_into_view(idx: *mut Index) -> Result<*mut View> {
resultbox(Box::from_raw(idx).into_view()?)
});
export!(lsm_view_or_index_from_json(
bytes: *const u8, len: c_uint, view_out: *mut *mut View,
idx_out: *mut *mut Index) -> Result<c_int> {
match ViewOrIndex::from_slice(slice::from_raw_parts(
bytes,
len as usize
))? {
ViewOrIndex::View(view) => {
*view_out = Box::into_raw(Box::new(view));
*idx_out = ptr::null_mut();
Ok(1)
}
ViewOrIndex::Index(idx) => {
*view_out = ptr::null_mut();
*idx_out = Box::into_raw(Box::new(idx));
Ok(2)
}
}
});
export!(lsm_proguard_mapping_from_bytes(bytes: *const u8, len: c_uint)
-> Result<*mut MappingView<'static>>
{
resultbox(MappingView::from_vec(slice::from_raw_parts(bytes, len as usize).to_vec())?)
});
export!(lsm_proguard_mapping_from_path(filename: *const c_char)
-> Result<*mut MappingView<'static>>
{
resultbox(MappingView::from_path(
OsStr::from_bytes(CStr::from_ptr(filename).to_bytes()))?)
});
export!(lsm_proguard_mapping_free(view: *mut MappingView) {
if !view.is_null() {
Box::from_raw(view);
}
});
export!(lsm_proguard_mapping_has_line_info(view: *const MappingView) -> Result<c_int> {
Ok(if (*view).has_line_info() {
1
} else {
0
})
});
export!(lsm_proguard_mapping_convert_dotted_path(
view: *const MappingView, path: *const c_char, lineno: c_int)
-> Result<*mut u8>
{
let path = CStr::from_ptr(path).to_str()?;
let mut iter = path.splitn(2, ':');
let cls_name = iter.next().unwrap_or("");
let meth_name = iter.next();
let s = if let Some(cls) = (*view).find_class(cls_name) {
let class_name = cls.class_name();
if let Some(meth_name) = meth_name {
let methods = cls.get_methods(meth_name, if lineno == 0 {
None
} else {
Some(lineno as u32)
});
if !methods.is_empty() {
format!("{}:{}\x00", class_name, methods[0].name())
} else {
format!("{}:{}\x00", class_name, meth_name)
}
} else {
format!("{}\x00", class_name)
} | format!("{}\x00", path)
};
Ok(Box::into_raw(s.into_boxed_str()) as *mut u8)
}); | } else { | random_line_split |
cabi.rs | use std::ptr;
use std::mem;
use std::slice;
use std::panic;
use std::ffi::{CStr, OsStr};
use std::borrow::Cow;
use std::os::raw::{c_int, c_uint, c_char};
use std::os::unix::ffi::OsStrExt;
use proguard::MappingView;
use sourcemap::Error as SourceMapError;
use errors::{Error, ErrorKind, Result};
use unified::{View, TokenMatch, Index, ViewOrIndex};
use memdb::DumpOptions;
fn resultbox<T>(val: T) -> Result<*mut T> {
Ok(Box::into_raw(Box::new(val)))
}
#[derive(Debug)]
#[repr(C)]
pub struct Token {
pub dst_line: c_uint,
pub dst_col: c_uint,
pub src_line: c_uint,
pub src_col: c_uint,
pub name: *const u8,
pub name_len: c_uint,
pub src: *const u8,
pub src_len: c_uint,
pub src_id: c_uint,
}
#[derive(Debug)]
#[repr(C)]
pub struct Str {
pub data: *const u8,
pub len: c_uint,
}
#[derive(Debug)]
#[repr(C)]
pub struct CError {
pub message: *const u8,
pub failed: c_int,
pub code: c_int,
}
fn get_error_code_from_kind(kind: &ErrorKind) -> c_int {
match *kind {
ErrorKind::SourceMap(SourceMapError::IndexedSourcemap) => 2,
ErrorKind::SourceMap(SourceMapError::BadJson(_)) => 3,
ErrorKind::SourceMap(SourceMapError::CannotFlatten(_)) => 4,
ErrorKind::UnsupportedMemDbVersion => 5,
ErrorKind::Io(_) => 6,
ErrorKind::TooManySources => 20,
ErrorKind::TooManyNames => 21,
ErrorKind::LocationOverflow => 22,
ErrorKind::AlreadyMemDb => 23,
_ => 1,
}
}
unsafe fn | <'a>(out: *mut Token, tm: &'a TokenMatch<'a>) {
(*out).dst_line = tm.dst_line;
(*out).dst_col = tm.dst_col;
(*out).src_line = tm.src_line;
(*out).src_col = tm.src_col;
(*out).name = match tm.name {
Some(name) => name.as_ptr(),
None => ptr::null()
};
(*out).name_len = tm.name.map(|x| x.as_bytes().len()).unwrap_or(0) as c_uint;
(*out).src = match tm.src {
Some(src) => src.as_ptr(),
None => ptr::null()
};
(*out).src_len = tm.src.map(|x| x.as_bytes().len()).unwrap_or(0) as c_uint;
(*out).src_id = tm.src_id;
}
unsafe fn notify_err(err: Error, err_out: *mut CError) {
if !err_out.is_null() {
let s = format!("{}\x00", err);
(*err_out).failed = 1;
(*err_out).message = Box::into_raw(s.into_boxed_str()) as *mut u8;
(*err_out).code = get_error_code_from_kind(err.kind());
}
}
unsafe fn landingpad<F: FnOnce() -> Result<T> + panic::UnwindSafe, T>(
f: F, err_out: *mut CError) -> T
{
match panic::catch_unwind(f) {
Ok(rv) => rv.map_err(|err| notify_err(err, err_out)).unwrap_or(mem::zeroed()),
Err(err) => {
use std::any::Any;
let err = &*err as &Any;
let msg = match err.downcast_ref::<&'static str>() {
Some(s) => *s,
None => {
match err.downcast_ref::<String>() {
Some(s) => &**s,
None => "Box<Any>",
}
}
};
notify_err(ErrorKind::InternalError(msg.to_string()).into(), err_out);
mem::zeroed()
}
}
}
macro_rules! export (
(
$name:ident($($aname:ident: $aty:ty),*) -> Result<$rv:ty> $body:block
) => (
#[no_mangle]
pub unsafe extern "C" fn $name($($aname: $aty,)* err_out: *mut CError) -> $rv
{
landingpad(|| $body, err_out)
}
);
(
$name:ident($($aname:ident: $aty:ty),*) $body:block
) => {
#[no_mangle]
pub unsafe extern "C" fn $name($($aname: $aty,)*)
{
// this silences panics and stuff
landingpad(|| { $body; Ok(0 as c_int)}, ptr::null_mut());
}
}
);
export!(lsm_init() {
fn silent_panic_handler(_pi: &panic::PanicInfo) {
// don't do anything here. This disables the default printing of
// panics to stderr which we really don't care about here.
}
panic::set_hook(Box::new(silent_panic_handler));
});
export!(lsm_view_from_json(bytes: *const u8, len: c_uint) -> Result<*mut View> {
resultbox(View::json_from_slice(slice::from_raw_parts(bytes, len as usize))?)
});
export!(lsm_view_from_memdb(
bytes: *const u8, len: c_uint) -> Result<*mut View>
{
// XXX: this currently copies because that's safer. Consider improving this?
resultbox(View::memdb_from_vec(slice::from_raw_parts(
bytes,
len as usize
).to_vec())?)
});
export!(lsm_view_from_memdb_file(path: *const c_char) -> Result<*mut View> {
resultbox(View::memdb_from_path(CStr::from_ptr(path).to_str()?)?)
});
export!(lsm_view_free(view: *mut View) {
if !view.is_null() {
Box::from_raw(view);
}
});
export!(lsm_view_get_token_count(view: *const View) -> Result<c_uint> {
Ok((*view).get_token_count() as c_uint)
});
export!(lsm_view_get_token(view: *const View, idx: c_uint, out: *mut Token) -> Result<c_int> {
Ok(match (*view).get_token(idx as u32) {
None => 0,
Some(tm) => {
set_token(out, &tm);
1
}
})
});
export!(lsm_view_lookup_token(
view: *const View, line: c_uint, col: c_uint, out: *mut Token) -> Result<c_int>
{
Ok(match (*view).lookup_token(line, col) {
None => 0,
Some(tm) => {
set_token(out, &tm);
1
}
})
});
export!(lsm_view_get_original_function_name(
view: *const View, line: c_uint, col: c_uint, minified_name: *const c_char,
minified_source: *const c_char, name_out: *mut *const c_char) -> Result<c_uint>
{
Ok(match (*view).get_original_function_name(
line as u32, col as u32, CStr::from_ptr(minified_name).to_str()?,
CStr::from_ptr(minified_source).to_str()?)
{
Some(name) => {
*name_out = name.as_ptr() as *const c_char;
name.len() as c_uint
}
None => 0
})
});
export!(lsm_view_get_source_count(view: *const View) -> Result<c_uint> {
Ok((*view).get_source_count() as c_uint)
});
export!(lsm_view_has_source_contents(view: *const View, src_id: c_uint) -> Result<c_int> {
Ok(if (*view).get_source_contents(src_id as u32).is_some() { 1 } else { 0 })
});
export!(lsm_view_get_source_contents(
view: *const View, src_id: c_uint, len_out: *mut c_uint,
must_free: *mut c_int) -> Result<*mut u8>
{
*must_free = 0;
Ok(match (*view).get_source_contents(src_id as u32) {
None => ptr::null_mut(),
Some(contents) => {
*len_out = contents.len() as c_uint;
match contents {
Cow::Borrowed(s) => s.as_ptr() as *mut u8,
Cow::Owned(val) => {
*must_free = 1;
Box::into_raw(val.into_boxed_str()) as *mut u8
}
}
}
})
});
export!(lsm_view_get_source_name(
view: *const View, src_id: c_uint, len_out: *mut c_uint) -> Result<*const u8>
{
Ok(match (*view).get_source(src_id as u32) {
None => ptr::null(),
Some(name) => {
*len_out = name.len() as c_uint;
name.as_ptr()
}
})
});
export!(lsm_view_dump_memdb(
view: *mut View, len_out: *mut c_uint, with_source_contents: c_int,
with_names: c_int) -> Result<*mut u8>
{
let memdb = (*view).dump_memdb(DumpOptions {
with_source_contents: with_source_contents != 0,
with_names: with_names != 0,
})?;
*len_out = memdb.len() as c_uint;
Ok(Box::into_raw(memdb.into_boxed_slice()) as *mut u8)
});
export!(lsm_buffer_free(buf: *mut u8) {
if !buf.is_null() {
Box::from_raw(buf);
}
});
export!(lsm_index_from_json(bytes: *const u8, len: c_uint) -> Result<*mut Index> {
resultbox(Index::json_from_slice(slice::from_raw_parts(
bytes,
len as usize
))?)
});
export!(lsm_index_free(idx: *mut Index) {
if !idx.is_null() {
Box::from_raw(idx);
}
});
export!(lsm_index_can_flatten(idx: *const Index) -> Result<c_int> {
Ok(if (*idx).can_flatten() { 1 } else { 0 })
});
export!(lsm_index_into_view(idx: *mut Index) -> Result<*mut View> {
resultbox(Box::from_raw(idx).into_view()?)
});
export!(lsm_view_or_index_from_json(
bytes: *const u8, len: c_uint, view_out: *mut *mut View,
idx_out: *mut *mut Index) -> Result<c_int> {
match ViewOrIndex::from_slice(slice::from_raw_parts(
bytes,
len as usize
))? {
ViewOrIndex::View(view) => {
*view_out = Box::into_raw(Box::new(view));
*idx_out = ptr::null_mut();
Ok(1)
}
ViewOrIndex::Index(idx) => {
*view_out = ptr::null_mut();
*idx_out = Box::into_raw(Box::new(idx));
Ok(2)
}
}
});
export!(lsm_proguard_mapping_from_bytes(bytes: *const u8, len: c_uint)
-> Result<*mut MappingView<'static>>
{
resultbox(MappingView::from_vec(slice::from_raw_parts(bytes, len as usize).to_vec())?)
});
export!(lsm_proguard_mapping_from_path(filename: *const c_char)
-> Result<*mut MappingView<'static>>
{
resultbox(MappingView::from_path(
OsStr::from_bytes(CStr::from_ptr(filename).to_bytes()))?)
});
export!(lsm_proguard_mapping_free(view: *mut MappingView) {
if !view.is_null() {
Box::from_raw(view);
}
});
export!(lsm_proguard_mapping_has_line_info(view: *const MappingView) -> Result<c_int> {
Ok(if (*view).has_line_info() {
1
} else {
0
})
});
export!(lsm_proguard_mapping_convert_dotted_path(
view: *const MappingView, path: *const c_char, lineno: c_int)
-> Result<*mut u8>
{
let path = CStr::from_ptr(path).to_str()?;
let mut iter = path.splitn(2, ':');
let cls_name = iter.next().unwrap_or("");
let meth_name = iter.next();
let s = if let Some(cls) = (*view).find_class(cls_name) {
let class_name = cls.class_name();
if let Some(meth_name) = meth_name {
let methods = cls.get_methods(meth_name, if lineno == 0 {
None
} else {
Some(lineno as u32)
});
if !methods.is_empty() {
format!("{}:{}\x00", class_name, methods[0].name())
} else {
format!("{}:{}\x00", class_name, meth_name)
}
} else {
format!("{}\x00", class_name)
}
} else {
format!("{}\x00", path)
};
Ok(Box::into_raw(s.into_boxed_str()) as *mut u8)
});
| set_token | identifier_name |
main.rs | /**
* Rust's module/package system is *very* fully-featured and rich.
* It's worth revisiting the rust book chapter, which is chock full of
* special-case details, synonyms, tricks and tips.
*
* https://doc.rust-lang.org/book/ch07-00-packages-crates-and-modules.html
*
*
* Here are a few of the highest-level details before we start:
*
* - Cargo expects 0 or 1 `src/main.rs` per project (i.e. per Cargo.toml)
* - Cargo expects 0 or 1 `src/lib.rs` per project (i.e. per Cargo.toml)
* - The `main.js` becomes an executable / binary
* - The `lib.js` becomes an exportable library
* - Additional top-level executable files can go into `src/bin/foo.rs`
* Each such `*.rs` file will become one (1) top-level binary
* - Other top-level files in `src/` are importable within the project,
* but do not become exportable automatically.
* - You can combine keywords in your lib.rs to re-export everything,
* but you only get that one anointed lib.rs file for that purpose.
*
* Until now we have never used the `mod` keyword, which has made it easy
* to do simple `hello world` style examples. That has meant that all the
* code in each demo file is in the same namespace. Everything is visible
* to everything else, which has made everything easy, hooray! But at the
* same time, *nothing* has been exportable outside of the single main.rs
* files, which means *none* of that code can ever be re-used, booo!
*
* The `mod` keyword is thus a double edged sword. As soon as you start
* using it:
*
* - you suddenly have to deal with public vs private and _access_ issues
* - entities can now be *re-used* by other files that import the module
*
* Obviously that's a necessary trade-off for anything non-trivial, so let's
* roll up our sleeves and get familiar with it!
*
* This file is our one (1) `main.rs` for the project, so it will be our
* one (1) executable. However, we will make reference to four (4) other
* _modules_ for this project, with differing access strategies.
*
* - `foo`, a module defined right here inline with this file
* But defining modules within `main.rs` is too trivial to be useful.
* - `spam`, a module defined in a sibling file (./spam.rs) all by itself
* This pattern is probably all you would need for small projects.
* - `sounds`, a module defined in a sibling file (./sounds.rs) but which
* also has with associated subdirectories. This example comes from the
* official Rust book, but I don't like the dual use of a sounds.rs file
* with a ./sounds/ directory. There is implicit magic here which I dislike.
* - `things`, a module defined in a sibling directory with an internal `mod.rs`
* file. This pattern comes from the Blandy & Orendorff book, and I like
* the fact that everything about it is explicit. This is the one I would
* use in my own projects, but you have to be familiar with all of them!
*
* Finally, we'll show the use of completely external modules inside `things`.
* It uses the *external* crate `rand`, the de facto standard way to generate
* random values. This library is not part of the rust core, but it *was* a
* part of the core long ago, and it is still maintained by the same devs who
* *do* maintain the rust core. So it's as anointed as you can get without
* being bundled with standard rust.
*
* All of those target modules are *somewhere* in the project, but none of them
* are part of the default public scope of this `main.rs` file. Therefore, we
* have to announce that we will be using each of them, and then provide the
* correct implementation target for each. Both steps are necessary! You must
* _declare_ that you're going to use a module, and then you must _implement_
* that module. The declarations take the same shape for all four modules,
* but their implementations are all different.
*
*/
// For inline examples *only*, the `declaration` and the `implementation`
// take place in the same location. That's what the word "inline" means!
mod foo {
// you are free to define as many nested submodules as you like
// but remember that *everything* is private by default!
// So we have to explicitly declare the submodules as public
pub mod bar {
// but eventually you will need a leaf node or why bother?
pub fn zug(path: &str) {
println!("I am Zug; hear me roar (via a {} path!)", path);
}
// this function demonstrates the use of `super::`
// this is the only way to reach *up* and *over*
pub fn qux() {
// without super::, qux cannot see up to blort
// blort("qux cannot see blort directly");
// compiler error > blort not found in this scope
// but this works:
super::blort("message from qux");
}
}
// this fn is part of foo, so it c
pub fn | (msg: &str) {
println!("Blort says: {}", msg);
}
}
// next, the series of *declarations*, each of which points to one of the
// module implementations discussed above. The declaration phase is easy to
// forget, because the implementations are all part of the project, and so
// their source files are very close by. But you are *required* to make an
// explicit declaration nontheless. If you throw in references to `crate::x::y`
// without having preceded them with one of these declarations (e.g. `mod x;`),
// the compiler will error out with a "failed to resolve" error message.
// (So remember!!): even though these modules are local to the project, you
// cannot make *undeclared* relative or absolute path references to them!!
// Declare that we are looking for a `spam` module as a peer of some kind
// Note that this differs from `use`, which would mean we were expecting
// Cargo to find the installed library in whatever cache directory it uses.
// NB: this means that a peer/sibling directory is _not_ automatically treated
// as a module by default! Only peers that you declare in this way are modules.
mod spam; // treat a spam peer (of some kind!) as a module
// in this case, it's a file: spam.rs
// and that file is self-contained, with no further path-based shenanigans
// Declare that we are looking for a `sound` module as a peer of some kind.
mod sound; // treat a sound peer (of some kind!) as a module
// ending in semicolon instead of braces tells the compiler to find this module
// In this case it is a `./sound.rs` file, which *happens* to include its own
// submodule in its own subdirectory. The peer file is *definitive*, but nested
// subdirectories are a *maybe*. I don't like maybe! Therefore, this pattern
// bothers me, and I much prefer the next and final one, in which we have a
// top-level directory matching the module name, plus an explicit barrel file.
// Declare that we are looking for a `things` module as a peer or some kind
mod things; // treat a things peer (of some kind!) as a module.
// in this case, it's a directory, which has an inner ./mod.js file, which
// acts as the one-and-only barrel file for that module. I like this approach
// _much_ better than the weird one used for sound, above. Everything here is
// explicit, and there is no compiler magic going on anywhere.
fn main() {
// module `foo` is the first and simplest example, since it is inline.
// we can get to the inline `foo` module two ways:
// Via an absolute path, starting with the language-level keyword `crate`
crate::foo::bar::zug("absolute");
// Or via *relative* path, where anything that is a peer of `main`
// can be used as the top of the path.
foo::bar::zug("relative");
// NB: you could also start with `super::` to back up one level
// There is no need for a `sub::`, because that's what you're doing with `::`!
foo::blort("message from main"); // call blort directly
foo::bar::qux(); // qux also calls blort, via super:: shenanigans
// module `spam` is the second-simplest example. It points to an all-in-one
// peer file whose contents are eerily similar to our inline foo, above.
crate::spam::eggs::toast("absolute");
spam::eggs::toast("relative");
// and here is the same super:: demo we did with foo, but for spam
spam::beans("message from main");
spam::eggs::ham();
// Then the `sound` module uses a weird pattern where there is
// both a `./sound.rs` peer file, and a `./sound/` peer directory.
// The weirdest part is that the `sound.rs` peer file is allowed to
// refer to the `instrument` file without specifying the true path:
// there's just an implicit automagic compiler leap where it knows to
// look for a ./sound/instrument.rs` file. This bothers me a lot!
crate::sound::instrument::clarinet("absolute");
// But once you get path that irritant, you can do the same absolute vs
// relative thing that we've demonstrated for everytone else.
sound::instrument::clarinet("relative");
// module `things` shows a more-scalable approach to modules
// There is no `things.rs`, but there *is* a ./things/ peer directory
// and that directory has a `mod.rs` file, which acts as the top level
// file for the module, much like `index.js` does in a node project.
crate::things::greet();
// use things via relative path
let stuff = things::assortment();
println!("An assortment of things: {:?}", stuff);
// accessing nested modules can get verbose!
let dog = things::animal::Animal::new("Rover");
println!("Rover says 'ruff ruff': {:?}", dog);
// use the `use` keyword to allow terser access
use crate::things::mineral::Mineral; // the final segment is now in scope as is
let coal = Mineral::new("Coal, ick!");
println!("Hi! I cause global warming!: {:?}", coal);
// the `as` option allows you to avoid namespace collisions if necessary
use crate::things::vegetable::Vegetable as Plant;
let oak = Plant::new("oak");
println!("From a tiny acorn did I grow: {:?}", oak);
}
// there are still plenty of other little details to review in the article
// in the main Rust book. This is a big topic, because it is _important_!
// For example, you can `pub use` to re-export under shorter names, and there
// are import syntaxes to condense multiple imports from sub-branches of the
// same overall module. And there's a wildcard glob '*' to import everything
// from a module, complete with the usual warnings about how that can be a bad
// thing, because it makes it much harder to trace relationships.
// TODO: go back and re-read the whole chapter, seriously!
| blort | identifier_name |
main.rs | /**
* Rust's module/package system is *very* fully-featured and rich.
* It's worth revisiting the rust book chapter, which is chock full of
* special-case details, synonyms, tricks and tips.
*
* https://doc.rust-lang.org/book/ch07-00-packages-crates-and-modules.html
*
*
* Here are a few of the highest-level details before we start:
*
* - Cargo expects 0 or 1 `src/main.rs` per project (i.e. per Cargo.toml)
* - Cargo expects 0 or 1 `src/lib.rs` per project (i.e. per Cargo.toml)
* - The `main.js` becomes an executable / binary
* - The `lib.js` becomes an exportable library
* - Additional top-level executable files can go into `src/bin/foo.rs`
* Each such `*.rs` file will become one (1) top-level binary
* - Other top-level files in `src/` are importable within the project,
* but do not become exportable automatically.
* - You can combine keywords in your lib.rs to re-export everything,
* but you only get that one anointed lib.rs file for that purpose.
*
* Until now we have never used the `mod` keyword, which has made it easy
* to do simple `hello world` style examples. That has meant that all the
* code in each demo file is in the same namespace. Everything is visible
* to everything else, which has made everything easy, hooray! But at the
* same time, *nothing* has been exportable outside of the single main.rs
* files, which means *none* of that code can ever be re-used, booo!
*
* The `mod` keyword is thus a double edged sword. As soon as you start
* using it:
*
* - you suddenly have to deal with public vs private and _access_ issues
* - entities can now be *re-used* by other files that import the module
*
* Obviously that's a necessary trade-off for anything non-trivial, so let's
* roll up our sleeves and get familiar with it!
*
* This file is our one (1) `main.rs` for the project, so it will be our
* one (1) executable. However, we will make reference to four (4) other
* _modules_ for this project, with differing access strategies.
*
* - `foo`, a module defined right here inline with this file
* But defining modules within `main.rs` is too trivial to be useful.
* - `spam`, a module defined in a sibling file (./spam.rs) all by itself
* This pattern is probably all you would need for small projects.
* - `sounds`, a module defined in a sibling file (./sounds.rs) but which
* also has with associated subdirectories. This example comes from the
* official Rust book, but I don't like the dual use of a sounds.rs file
* with a ./sounds/ directory. There is implicit magic here which I dislike.
* - `things`, a module defined in a sibling directory with an internal `mod.rs`
* file. This pattern comes from the Blandy & Orendorff book, and I like
* the fact that everything about it is explicit. This is the one I would
* use in my own projects, but you have to be familiar with all of them!
*
* Finally, we'll show the use of completely external modules inside `things`.
* It uses the *external* crate `rand`, the de facto standard way to generate
* random values. This library is not part of the rust core, but it *was* a
* part of the core long ago, and it is still maintained by the same devs who
* *do* maintain the rust core. So it's as anointed as you can get without
* being bundled with standard rust.
*
* All of those target modules are *somewhere* in the project, but none of them
* are part of the default public scope of this `main.rs` file. Therefore, we
* have to announce that we will be using each of them, and then provide the
* correct implementation target for each. Both steps are necessary! You must
* _declare_ that you're going to use a module, and then you must _implement_
* that module. The declarations take the same shape for all four modules,
* but their implementations are all different.
*
*/
// For inline examples *only*, the `declaration` and the `implementation`
// take place in the same location. That's what the word "inline" means!
mod foo {
// you are free to define as many nested submodules as you like
// but remember that *everything* is private by default!
// So we have to explicitly declare the submodules as public
pub mod bar {
// but eventually you will need a leaf node or why bother?
pub fn zug(path: &str) {
println!("I am Zug; hear me roar (via a {} path!)", path);
}
// this function demonstrates the use of `super::`
// this is the only way to reach *up* and *over*
pub fn qux() {
// without super::, qux cannot see up to blort
// blort("qux cannot see blort directly");
// compiler error > blort not found in this scope
// but this works:
super::blort("message from qux");
}
}
// this fn is part of foo, so it c
pub fn blort(msg: &str) |
}
// next, the series of *declarations*, each of which points to one of the
// module implementations discussed above. The declaration phase is easy to
// forget, because the implementations are all part of the project, and so
// their source files are very close by. But you are *required* to make an
// explicit declaration nontheless. If you throw in references to `crate::x::y`
// without having preceded them with one of these declarations (e.g. `mod x;`),
// the compiler will error out with a "failed to resolve" error message.
// (So remember!!): even though these modules are local to the project, you
// cannot make *undeclared* relative or absolute path references to them!!
// Declare that we are looking for a `spam` module as a peer of some kind
// Note that this differs from `use`, which would mean we were expecting
// Cargo to find the installed library in whatever cache directory it uses.
// NB: this means that a peer/sibling directory is _not_ automatically treated
// as a module by default! Only peers that you declare in this way are modules.
mod spam; // treat a spam peer (of some kind!) as a module
// in this case, it's a file: spam.rs
// and that file is self-contained, with no further path-based shenanigans
// Declare that we are looking for a `sound` module as a peer of some kind.
mod sound; // treat a sound peer (of some kind!) as a module
// ending in semicolon instead of braces tells the compiler to find this module
// In this case it is a `./sound.rs` file, which *happens* to include its own
// submodule in its own subdirectory. The peer file is *definitive*, but nested
// subdirectories are a *maybe*. I don't like maybe! Therefore, this pattern
// bothers me, and I much prefer the next and final one, in which we have a
// top-level directory matching the module name, plus an explicit barrel file.
// Declare that we are looking for a `things` module as a peer or some kind
mod things; // treat a things peer (of some kind!) as a module.
// in this case, it's a directory, which has an inner ./mod.js file, which
// acts as the one-and-only barrel file for that module. I like this approach
// _much_ better than the weird one used for sound, above. Everything here is
// explicit, and there is no compiler magic going on anywhere.
fn main() {
// module `foo` is the first and simplest example, since it is inline.
// we can get to the inline `foo` module two ways:
// Via an absolute path, starting with the language-level keyword `crate`
crate::foo::bar::zug("absolute");
// Or via *relative* path, where anything that is a peer of `main`
// can be used as the top of the path.
foo::bar::zug("relative");
// NB: you could also start with `super::` to back up one level
// There is no need for a `sub::`, because that's what you're doing with `::`!
foo::blort("message from main"); // call blort directly
foo::bar::qux(); // qux also calls blort, via super:: shenanigans
// module `spam` is the second-simplest example. It points to an all-in-one
// peer file whose contents are eerily similar to our inline foo, above.
crate::spam::eggs::toast("absolute");
spam::eggs::toast("relative");
// and here is the same super:: demo we did with foo, but for spam
spam::beans("message from main");
spam::eggs::ham();
// Then the `sound` module uses a weird pattern where there is
// both a `./sound.rs` peer file, and a `./sound/` peer directory.
// The weirdest part is that the `sound.rs` peer file is allowed to
// refer to the `instrument` file without specifying the true path:
// there's just an implicit automagic compiler leap where it knows to
// look for a ./sound/instrument.rs` file. This bothers me a lot!
crate::sound::instrument::clarinet("absolute");
// But once you get path that irritant, you can do the same absolute vs
// relative thing that we've demonstrated for everytone else.
sound::instrument::clarinet("relative");
// module `things` shows a more-scalable approach to modules
// There is no `things.rs`, but there *is* a ./things/ peer directory
// and that directory has a `mod.rs` file, which acts as the top level
// file for the module, much like `index.js` does in a node project.
crate::things::greet();
// use things via relative path
let stuff = things::assortment();
println!("An assortment of things: {:?}", stuff);
// accessing nested modules can get verbose!
let dog = things::animal::Animal::new("Rover");
println!("Rover says 'ruff ruff': {:?}", dog);
// use the `use` keyword to allow terser access
use crate::things::mineral::Mineral; // the final segment is now in scope as is
let coal = Mineral::new("Coal, ick!");
println!("Hi! I cause global warming!: {:?}", coal);
// the `as` option allows you to avoid namespace collisions if necessary
use crate::things::vegetable::Vegetable as Plant;
let oak = Plant::new("oak");
println!("From a tiny acorn did I grow: {:?}", oak);
}
// there are still plenty of other little details to review in the article
// in the main Rust book. This is a big topic, because it is _important_!
// For example, you can `pub use` to re-export under shorter names, and there
// are import syntaxes to condense multiple imports from sub-branches of the
// same overall module. And there's a wildcard glob '*' to import everything
// from a module, complete with the usual warnings about how that can be a bad
// thing, because it makes it much harder to trace relationships.
// TODO: go back and re-read the whole chapter, seriously!
| {
println!("Blort says: {}", msg);
} | identifier_body |
main.rs | /**
* Rust's module/package system is *very* fully-featured and rich.
* It's worth revisiting the rust book chapter, which is chock full of
* special-case details, synonyms, tricks and tips.
*
* https://doc.rust-lang.org/book/ch07-00-packages-crates-and-modules.html
*
*
* Here are a few of the highest-level details before we start:
*
* - Cargo expects 0 or 1 `src/main.rs` per project (i.e. per Cargo.toml)
* - Cargo expects 0 or 1 `src/lib.rs` per project (i.e. per Cargo.toml)
* - The `main.js` becomes an executable / binary
* - The `lib.js` becomes an exportable library
* - Additional top-level executable files can go into `src/bin/foo.rs`
* Each such `*.rs` file will become one (1) top-level binary
* - Other top-level files in `src/` are importable within the project,
* but do not become exportable automatically.
* - You can combine keywords in your lib.rs to re-export everything,
* but you only get that one anointed lib.rs file for that purpose.
*
* Until now we have never used the `mod` keyword, which has made it easy
* to do simple `hello world` style examples. That has meant that all the
* code in each demo file is in the same namespace. Everything is visible
* to everything else, which has made everything easy, hooray! But at the
* same time, *nothing* has been exportable outside of the single main.rs
* files, which means *none* of that code can ever be re-used, booo!
*
* The `mod` keyword is thus a double edged sword. As soon as you start
* using it:
*
* - you suddenly have to deal with public vs private and _access_ issues
* - entities can now be *re-used* by other files that import the module
*
* Obviously that's a necessary trade-off for anything non-trivial, so let's
* roll up our sleeves and get familiar with it!
*
* This file is our one (1) `main.rs` for the project, so it will be our
* one (1) executable. However, we will make reference to four (4) other
* _modules_ for this project, with differing access strategies.
*
* - `foo`, a module defined right here inline with this file
* But defining modules within `main.rs` is too trivial to be useful.
* - `spam`, a module defined in a sibling file (./spam.rs) all by itself
* This pattern is probably all you would need for small projects.
* - `sounds`, a module defined in a sibling file (./sounds.rs) but which
* also has with associated subdirectories. This example comes from the
* official Rust book, but I don't like the dual use of a sounds.rs file
* with a ./sounds/ directory. There is implicit magic here which I dislike.
* - `things`, a module defined in a sibling directory with an internal `mod.rs`
* file. This pattern comes from the Blandy & Orendorff book, and I like
* the fact that everything about it is explicit. This is the one I would
* use in my own projects, but you have to be familiar with all of them!
*
* Finally, we'll show the use of completely external modules inside `things`.
* It uses the *external* crate `rand`, the de facto standard way to generate
* random values. This library is not part of the rust core, but it *was* a
* part of the core long ago, and it is still maintained by the same devs who
* *do* maintain the rust core. So it's as anointed as you can get without
* being bundled with standard rust.
*
* All of those target modules are *somewhere* in the project, but none of them
* are part of the default public scope of this `main.rs` file. Therefore, we
* have to announce that we will be using each of them, and then provide the
* correct implementation target for each. Both steps are necessary! You must
* _declare_ that you're going to use a module, and then you must _implement_
* that module. The declarations take the same shape for all four modules,
* but their implementations are all different.
*
*/
// For inline examples *only*, the `declaration` and the `implementation`
// take place in the same location. That's what the word "inline" means!
mod foo {
// you are free to define as many nested submodules as you like
// but remember that *everything* is private by default!
// So we have to explicitly declare the submodules as public
pub mod bar {
// but eventually you will need a leaf node or why bother?
pub fn zug(path: &str) {
println!("I am Zug; hear me roar (via a {} path!)", path);
}
// this function demonstrates the use of `super::`
// this is the only way to reach *up* and *over*
pub fn qux() {
// without super::, qux cannot see up to blort
// blort("qux cannot see blort directly");
// compiler error > blort not found in this scope
// but this works:
super::blort("message from qux");
}
}
// this fn is part of foo, so it c
pub fn blort(msg: &str) { | // module implementations discussed above. The declaration phase is easy to
// forget, because the implementations are all part of the project, and so
// their source files are very close by. But you are *required* to make an
// explicit declaration nontheless. If you throw in references to `crate::x::y`
// without having preceded them with one of these declarations (e.g. `mod x;`),
// the compiler will error out with a "failed to resolve" error message.
// (So remember!!): even though these modules are local to the project, you
// cannot make *undeclared* relative or absolute path references to them!!
// Declare that we are looking for a `spam` module as a peer of some kind
// Note that this differs from `use`, which would mean we were expecting
// Cargo to find the installed library in whatever cache directory it uses.
// NB: this means that a peer/sibling directory is _not_ automatically treated
// as a module by default! Only peers that you declare in this way are modules.
mod spam; // treat a spam peer (of some kind!) as a module
// in this case, it's a file: spam.rs
// and that file is self-contained, with no further path-based shenanigans
// Declare that we are looking for a `sound` module as a peer of some kind.
mod sound; // treat a sound peer (of some kind!) as a module
// ending in semicolon instead of braces tells the compiler to find this module
// In this case it is a `./sound.rs` file, which *happens* to include its own
// submodule in its own subdirectory. The peer file is *definitive*, but nested
// subdirectories are a *maybe*. I don't like maybe! Therefore, this pattern
// bothers me, and I much prefer the next and final one, in which we have a
// top-level directory matching the module name, plus an explicit barrel file.
// Declare that we are looking for a `things` module as a peer or some kind
mod things; // treat a things peer (of some kind!) as a module.
// in this case, it's a directory, which has an inner ./mod.js file, which
// acts as the one-and-only barrel file for that module. I like this approach
// _much_ better than the weird one used for sound, above. Everything here is
// explicit, and there is no compiler magic going on anywhere.
fn main() {
// module `foo` is the first and simplest example, since it is inline.
// we can get to the inline `foo` module two ways:
// Via an absolute path, starting with the language-level keyword `crate`
crate::foo::bar::zug("absolute");
// Or via *relative* path, where anything that is a peer of `main`
// can be used as the top of the path.
foo::bar::zug("relative");
// NB: you could also start with `super::` to back up one level
// There is no need for a `sub::`, because that's what you're doing with `::`!
foo::blort("message from main"); // call blort directly
foo::bar::qux(); // qux also calls blort, via super:: shenanigans
// module `spam` is the second-simplest example. It points to an all-in-one
// peer file whose contents are eerily similar to our inline foo, above.
crate::spam::eggs::toast("absolute");
spam::eggs::toast("relative");
// and here is the same super:: demo we did with foo, but for spam
spam::beans("message from main");
spam::eggs::ham();
// Then the `sound` module uses a weird pattern where there is
// both a `./sound.rs` peer file, and a `./sound/` peer directory.
// The weirdest part is that the `sound.rs` peer file is allowed to
// refer to the `instrument` file without specifying the true path:
// there's just an implicit automagic compiler leap where it knows to
// look for a ./sound/instrument.rs` file. This bothers me a lot!
crate::sound::instrument::clarinet("absolute");
// But once you get path that irritant, you can do the same absolute vs
// relative thing that we've demonstrated for everytone else.
sound::instrument::clarinet("relative");
// module `things` shows a more-scalable approach to modules
// There is no `things.rs`, but there *is* a ./things/ peer directory
// and that directory has a `mod.rs` file, which acts as the top level
// file for the module, much like `index.js` does in a node project.
crate::things::greet();
// use things via relative path
let stuff = things::assortment();
println!("An assortment of things: {:?}", stuff);
// accessing nested modules can get verbose!
let dog = things::animal::Animal::new("Rover");
println!("Rover says 'ruff ruff': {:?}", dog);
// use the `use` keyword to allow terser access
use crate::things::mineral::Mineral; // the final segment is now in scope as is
let coal = Mineral::new("Coal, ick!");
println!("Hi! I cause global warming!: {:?}", coal);
// the `as` option allows you to avoid namespace collisions if necessary
use crate::things::vegetable::Vegetable as Plant;
let oak = Plant::new("oak");
println!("From a tiny acorn did I grow: {:?}", oak);
}
// there are still plenty of other little details to review in the article
// in the main Rust book. This is a big topic, because it is _important_!
// For example, you can `pub use` to re-export under shorter names, and there
// are import syntaxes to condense multiple imports from sub-branches of the
// same overall module. And there's a wildcard glob '*' to import everything
// from a module, complete with the usual warnings about how that can be a bad
// thing, because it makes it much harder to trace relationships.
// TODO: go back and re-read the whole chapter, seriously! | println!("Blort says: {}", msg);
}
}
// next, the series of *declarations*, each of which points to one of the | random_line_split |
validate.rs | use crate::{
config::{self, Config, ConfigDiff},
topology::{self, builder::Pieces},
};
use colored::*;
use exitcode::ExitCode;
use std::collections::HashMap;
use std::{fmt, fs::remove_dir_all, path::PathBuf};
use structopt::StructOpt;
const TEMPORARY_DIRECTORY: &str = "validate_tmp";
#[derive(StructOpt, Debug)]
#[structopt(rename_all = "kebab-case")]
pub struct Opts {
/// Disables environment checks. That includes component checks and health checks.
#[structopt(long)]
no_environment: bool,
/// Fail validation on warnings that are probably a mistake in the configuration
/// or are recommended to be fixed.
#[structopt(short, long)]
deny_warnings: bool,
/// Vector config files in TOML format to validate.
#[structopt(
name = "config-toml",
long,
env = "VECTOR_CONFIG_TOML",
use_delimiter(true)
)]
paths_toml: Vec<PathBuf>,
/// Vector config files in JSON format to validate.
#[structopt(
name = "config-json",
long,
env = "VECTOR_CONFIG_JSON",
use_delimiter(true)
)]
paths_json: Vec<PathBuf>,
/// Vector config files in YAML format to validate.
#[structopt(
name = "config-yaml",
long,
env = "VECTOR_CONFIG_YAML",
use_delimiter(true)
)]
paths_yaml: Vec<PathBuf>,
/// Any number of Vector config files to validate.
/// Format is detected from the file name.
/// If none are specified the default config path `/etc/vector/vector.toml`
/// will be targeted.
#[structopt(env = "VECTOR_CONFIG", use_delimiter(true))]
paths: Vec<PathBuf>,
/// Read configuration from files in one or more directories.
/// File format is detected from the file name.
///
/// Files not ending in .toml, .json, .yaml, or .yml will be ignored.
#[structopt(
name = "config-dir",
short = "C",
long,
env = "VECTOR_CONFIG_DIR",
use_delimiter(true)
)]
pub config_dirs: Vec<PathBuf>,
}
impl Opts {
fn paths_with_formats(&self) -> Vec<config::ConfigPath> |
}
/// Performs topology, component, and health checks.
pub async fn validate(opts: &Opts, color: bool) -> ExitCode {
let mut fmt = Formatter::new(color);
let mut validated = true;
let mut config = match validate_config(opts, &mut fmt) {
Some(config) => config,
None => return exitcode::CONFIG,
};
if !opts.no_environment {
if let Some(tmp_directory) = create_tmp_directory(&mut config, &mut fmt) {
validated &= validate_environment(opts, &config, &mut fmt).await;
remove_tmp_directory(tmp_directory);
} else {
validated = false;
}
}
if validated {
fmt.validated();
exitcode::OK
} else {
exitcode::CONFIG
}
}
fn validate_config(opts: &Opts, fmt: &mut Formatter) -> Option<Config> {
// Prepare paths
let paths = opts.paths_with_formats();
let paths = if let Some(paths) = config::process_paths(&paths) {
paths
} else {
fmt.error("No config file paths");
return None;
};
// Load
let paths_list: Vec<_> = paths.iter().map(<&PathBuf>::from).collect();
let mut report_error = |errors| {
fmt.title(format!("Failed to load {:?}", &paths_list));
fmt.sub_error(errors);
};
config::init_log_schema(&paths, true)
.map_err(&mut report_error)
.ok()?;
let (builder, load_warnings) = config::load_builder_from_paths(&paths)
.map_err(&mut report_error)
.ok()?;
// Build
let (config, build_warnings) = builder
.build_with_warnings()
.map_err(&mut report_error)
.ok()?;
// Warnings
let warnings = load_warnings
.into_iter()
.chain(build_warnings)
.collect::<Vec<_>>();
if !warnings.is_empty() {
if opts.deny_warnings {
report_error(warnings);
return None;
}
fmt.title(format!("Loaded with warnings {:?}", &paths_list));
fmt.sub_warning(warnings);
} else {
fmt.success(format!("Loaded {:?}", &paths_list));
}
Some(config)
}
async fn validate_environment(opts: &Opts, config: &Config, fmt: &mut Formatter) -> bool {
let diff = ConfigDiff::initial(config);
let mut pieces = if let Some(pieces) = validate_components(config, &diff, fmt).await {
pieces
} else {
return false;
};
validate_healthchecks(opts, config, &diff, &mut pieces, fmt).await
}
async fn validate_components(
config: &Config,
diff: &ConfigDiff,
fmt: &mut Formatter,
) -> Option<Pieces> {
match topology::builder::build_pieces(config, diff, HashMap::new()).await {
Ok(pieces) => {
fmt.success("Component configuration");
Some(pieces)
}
Err(errors) => {
fmt.title("Component errors");
fmt.sub_error(errors);
None
}
}
}
async fn validate_healthchecks(
opts: &Opts,
config: &Config,
diff: &ConfigDiff,
pieces: &mut Pieces,
fmt: &mut Formatter,
) -> bool {
if !config.healthchecks.enabled {
fmt.warning("Health checks are disabled");
return !opts.deny_warnings;
}
let healthchecks = topology::take_healthchecks(diff, pieces);
// We are running health checks in serial so it's easier for the users
// to parse which errors/warnings/etc. belong to which healthcheck.
let mut validated = true;
for (name, healthcheck) in healthchecks {
let mut failed = |error| {
validated = false;
fmt.error(error);
};
match tokio::spawn(healthcheck).await {
Ok(Ok(_)) => {
if config
.sinks
.get(&name)
.expect("Sink not present")
.healthcheck()
.enabled
{
fmt.success(format!("Health check `{}`", name.as_str()));
} else {
fmt.warning(format!("Health check disabled for `{}`", name));
validated &= !opts.deny_warnings;
}
}
Ok(Err(())) => failed(format!("Health check for `{}` failed", name.as_str())),
Err(error) if error.is_cancelled() => failed(format!(
"Health check for `{}` was cancelled",
name.as_str()
)),
Err(_) => failed(format!("Health check for `{}` panicked", name.as_str())),
}
}
validated
}
/// For data directory that we write to:
/// 1. Create a tmp directory in it.
/// 2. Change config to point to that tmp directory.
fn create_tmp_directory(config: &mut Config, fmt: &mut Formatter) -> Option<PathBuf> {
match config
.global
.resolve_and_make_data_subdir(None, TEMPORARY_DIRECTORY)
{
Ok(path) => {
config.global.data_dir = Some(path.clone());
Some(path)
}
Err(error) => {
fmt.error(format!("{}", error));
None
}
}
}
fn remove_tmp_directory(path: PathBuf) {
if let Err(error) = remove_dir_all(&path) {
error!(message = "Failed to remove temporary directory.", path = ?path, %error);
}
}
struct Formatter {
/// Width of largest printed line
max_line_width: usize,
/// Can empty line be printed
print_space: bool,
color: bool,
// Intros
error_intro: String,
warning_intro: String,
success_intro: String,
}
impl Formatter {
fn new(color: bool) -> Self {
Self {
max_line_width: 0,
print_space: false,
error_intro: if color {
format!("{}", "x".red())
} else {
"x".to_owned()
},
warning_intro: if color {
format!("{}", "~".yellow())
} else {
"~".to_owned()
},
success_intro: if color {
format!("{}", "√".green())
} else {
"√".to_owned()
},
color,
}
}
/// Final confirmation that validation process was successful.
fn validated(&self) {
println!("{:-^width$}", "", width = self.max_line_width);
if self.color {
// Coloring needs to be used directly so that print
// infrastructure correctly determines length of the
// "Validated". Otherwise, ansi escape coloring is
// calculated into the length.
println!(
"{:>width$}",
"Validated".green(),
width = self.max_line_width
);
} else {
println!("{:>width$}", "Validated", width = self.max_line_width)
}
}
/// Standalone line
fn success(&mut self, msg: impl AsRef<str>) {
self.print(format!("{} {}\n", self.success_intro, msg.as_ref()))
}
/// Standalone line
fn warning(&mut self, warning: impl AsRef<str>) {
self.print(format!("{} {}\n", self.warning_intro, warning.as_ref()))
}
/// Standalone line
fn error(&mut self, error: impl AsRef<str>) {
self.print(format!("{} {}\n", self.error_intro, error.as_ref()))
}
/// Marks sub
fn title(&mut self, title: impl AsRef<str>) {
self.space();
self.print(format!(
"{}\n{:-<width$}\n",
title.as_ref(),
"",
width = title.as_ref().len()
))
}
/// A list of warnings that go with a title.
fn sub_warning<I: IntoIterator>(&mut self, warnings: I)
where
I::Item: fmt::Display,
{
self.sub(self.warning_intro.clone(), warnings)
}
/// A list of errors that go with a title.
fn sub_error<I: IntoIterator>(&mut self, errors: I)
where
I::Item: fmt::Display,
{
self.sub(self.error_intro.clone(), errors)
}
fn sub<I: IntoIterator>(&mut self, intro: impl AsRef<str>, msgs: I)
where
I::Item: fmt::Display,
{
for msg in msgs {
self.print(format!("{} {}\n", intro.as_ref(), msg));
}
self.space();
}
/// Prints empty space if necessary.
fn space(&mut self) {
if self.print_space {
self.print_space = false;
println!();
}
}
fn print(&mut self, print: impl AsRef<str>) {
let width = print
.as_ref()
.lines()
.map(|line| {
String::from_utf8_lossy(&strip_ansi_escapes::strip(line).unwrap())
.chars()
.count()
})
.max()
.unwrap_or(0);
self.max_line_width = width.max(self.max_line_width);
self.print_space = true;
print!("{}", print.as_ref())
}
}
| {
config::merge_path_lists(vec![
(&self.paths, None),
(&self.paths_toml, Some(config::Format::Toml)),
(&self.paths_json, Some(config::Format::Json)),
(&self.paths_yaml, Some(config::Format::Yaml)),
])
.map(|(path, hint)| config::ConfigPath::File(path, hint))
.chain(
self.config_dirs
.iter()
.map(|dir| config::ConfigPath::Dir(dir.to_path_buf())),
)
.collect()
} | identifier_body |
validate.rs | use crate::{
config::{self, Config, ConfigDiff},
topology::{self, builder::Pieces},
};
use colored::*;
use exitcode::ExitCode;
use std::collections::HashMap;
use std::{fmt, fs::remove_dir_all, path::PathBuf};
use structopt::StructOpt;
const TEMPORARY_DIRECTORY: &str = "validate_tmp";
#[derive(StructOpt, Debug)]
#[structopt(rename_all = "kebab-case")]
pub struct Opts {
/// Disables environment checks. That includes component checks and health checks.
#[structopt(long)]
no_environment: bool,
/// Fail validation on warnings that are probably a mistake in the configuration
/// or are recommended to be fixed.
#[structopt(short, long)]
deny_warnings: bool,
/// Vector config files in TOML format to validate.
#[structopt(
name = "config-toml",
long,
env = "VECTOR_CONFIG_TOML",
use_delimiter(true)
)]
paths_toml: Vec<PathBuf>,
/// Vector config files in JSON format to validate.
#[structopt(
name = "config-json",
long,
env = "VECTOR_CONFIG_JSON",
use_delimiter(true)
)]
paths_json: Vec<PathBuf>,
/// Vector config files in YAML format to validate.
#[structopt(
name = "config-yaml",
long,
env = "VECTOR_CONFIG_YAML",
use_delimiter(true)
)]
paths_yaml: Vec<PathBuf>,
/// Any number of Vector config files to validate.
/// Format is detected from the file name.
/// If none are specified the default config path `/etc/vector/vector.toml`
/// will be targeted.
#[structopt(env = "VECTOR_CONFIG", use_delimiter(true))]
paths: Vec<PathBuf>,
/// Read configuration from files in one or more directories.
/// File format is detected from the file name.
///
/// Files not ending in .toml, .json, .yaml, or .yml will be ignored.
#[structopt(
name = "config-dir",
short = "C",
long,
env = "VECTOR_CONFIG_DIR",
use_delimiter(true)
)]
pub config_dirs: Vec<PathBuf>,
}
impl Opts {
fn paths_with_formats(&self) -> Vec<config::ConfigPath> {
config::merge_path_lists(vec![
(&self.paths, None),
(&self.paths_toml, Some(config::Format::Toml)),
(&self.paths_json, Some(config::Format::Json)),
(&self.paths_yaml, Some(config::Format::Yaml)),
])
.map(|(path, hint)| config::ConfigPath::File(path, hint))
.chain(
self.config_dirs
.iter()
.map(|dir| config::ConfigPath::Dir(dir.to_path_buf())),
)
.collect()
}
}
/// Performs topology, component, and health checks.
pub async fn validate(opts: &Opts, color: bool) -> ExitCode {
let mut fmt = Formatter::new(color);
let mut validated = true;
let mut config = match validate_config(opts, &mut fmt) {
Some(config) => config,
None => return exitcode::CONFIG,
};
if !opts.no_environment {
if let Some(tmp_directory) = create_tmp_directory(&mut config, &mut fmt) {
validated &= validate_environment(opts, &config, &mut fmt).await;
remove_tmp_directory(tmp_directory);
} else {
validated = false;
}
}
if validated {
fmt.validated();
exitcode::OK
} else {
exitcode::CONFIG
}
}
fn validate_config(opts: &Opts, fmt: &mut Formatter) -> Option<Config> {
// Prepare paths
let paths = opts.paths_with_formats();
let paths = if let Some(paths) = config::process_paths(&paths) {
paths
} else {
fmt.error("No config file paths");
return None;
};
// Load
let paths_list: Vec<_> = paths.iter().map(<&PathBuf>::from).collect();
let mut report_error = |errors| {
fmt.title(format!("Failed to load {:?}", &paths_list));
fmt.sub_error(errors);
};
config::init_log_schema(&paths, true)
.map_err(&mut report_error)
.ok()?;
let (builder, load_warnings) = config::load_builder_from_paths(&paths)
.map_err(&mut report_error)
.ok()?;
// Build
let (config, build_warnings) = builder
.build_with_warnings()
.map_err(&mut report_error)
.ok()?;
// Warnings
let warnings = load_warnings
.into_iter()
.chain(build_warnings)
.collect::<Vec<_>>();
if !warnings.is_empty() {
if opts.deny_warnings {
report_error(warnings);
return None;
}
fmt.title(format!("Loaded with warnings {:?}", &paths_list));
fmt.sub_warning(warnings);
} else {
fmt.success(format!("Loaded {:?}", &paths_list));
}
Some(config)
}
async fn validate_environment(opts: &Opts, config: &Config, fmt: &mut Formatter) -> bool {
let diff = ConfigDiff::initial(config);
let mut pieces = if let Some(pieces) = validate_components(config, &diff, fmt).await {
pieces
} else {
return false;
};
validate_healthchecks(opts, config, &diff, &mut pieces, fmt).await
}
async fn validate_components(
config: &Config,
diff: &ConfigDiff,
fmt: &mut Formatter,
) -> Option<Pieces> {
match topology::builder::build_pieces(config, diff, HashMap::new()).await {
Ok(pieces) => {
fmt.success("Component configuration");
Some(pieces)
}
Err(errors) => {
fmt.title("Component errors");
fmt.sub_error(errors);
None
}
}
}
async fn validate_healthchecks(
opts: &Opts,
config: &Config,
diff: &ConfigDiff,
pieces: &mut Pieces,
fmt: &mut Formatter,
) -> bool {
if !config.healthchecks.enabled {
fmt.warning("Health checks are disabled");
return !opts.deny_warnings;
}
let healthchecks = topology::take_healthchecks(diff, pieces);
// We are running health checks in serial so it's easier for the users
// to parse which errors/warnings/etc. belong to which healthcheck.
let mut validated = true;
for (name, healthcheck) in healthchecks {
let mut failed = |error| {
validated = false;
fmt.error(error);
};
match tokio::spawn(healthcheck).await {
Ok(Ok(_)) => {
if config
.sinks
.get(&name)
.expect("Sink not present")
.healthcheck()
.enabled
{
fmt.success(format!("Health check `{}`", name.as_str()));
} else {
fmt.warning(format!("Health check disabled for `{}`", name));
validated &= !opts.deny_warnings;
}
}
Ok(Err(())) => failed(format!("Health check for `{}` failed", name.as_str())),
Err(error) if error.is_cancelled() => failed(format!(
"Health check for `{}` was cancelled",
name.as_str()
)),
Err(_) => failed(format!("Health check for `{}` panicked", name.as_str())),
}
}
validated
}
/// For data directory that we write to:
/// 1. Create a tmp directory in it.
/// 2. Change config to point to that tmp directory.
fn create_tmp_directory(config: &mut Config, fmt: &mut Formatter) -> Option<PathBuf> {
match config
.global
.resolve_and_make_data_subdir(None, TEMPORARY_DIRECTORY)
{
Ok(path) => {
config.global.data_dir = Some(path.clone());
Some(path)
}
Err(error) => {
fmt.error(format!("{}", error));
None
}
}
}
fn remove_tmp_directory(path: PathBuf) {
if let Err(error) = remove_dir_all(&path) {
error!(message = "Failed to remove temporary directory.", path = ?path, %error);
}
}
struct Formatter {
/// Width of largest printed line
max_line_width: usize,
/// Can empty line be printed
print_space: bool,
color: bool,
// Intros
error_intro: String,
warning_intro: String,
success_intro: String,
}
impl Formatter {
fn new(color: bool) -> Self {
Self {
max_line_width: 0,
print_space: false,
error_intro: if color {
format!("{}", "x".red())
} else {
"x".to_owned()
},
warning_intro: if color {
format!("{}", "~".yellow())
} else {
"~".to_owned()
},
success_intro: if color {
format!("{}", "√".green())
} else {
"√".to_owned()
},
color,
}
}
/// Final confirmation that validation process was successful.
fn validated(&self) {
println!("{:-^width$}", "", width = self.max_line_width);
if self.color {
// Coloring needs to be used directly so that print
// infrastructure correctly determines length of the
// "Validated". Otherwise, ansi escape coloring is
// calculated into the length.
println!(
"{:>width$}",
"Validated".green(),
width = self.max_line_width
);
} else {
println!("{:>width$}", "Validated", width = self.max_line_width)
}
}
/// Standalone line
fn success(&mut self, msg: impl AsRef<str>) {
self.print(format!("{} {}\n", self.success_intro, msg.as_ref()))
}
/// Standalone line
fn warning(&mut self, warning: impl AsRef<str>) {
self.print(format!("{} {}\n", self.warning_intro, warning.as_ref()))
}
/// Standalone line
fn error(&mut self, error: impl AsRef<str>) {
self.print(format!("{} {}\n", self.error_intro, error.as_ref()))
}
/// Marks sub
fn title(&mut self, title: impl AsRef<str>) {
self.space();
self.print(format!(
"{}\n{:-<width$}\n",
title.as_ref(),
"",
width = title.as_ref().len()
))
}
/// A list of warnings that go with a title.
fn sub_warning<I: IntoIterator>(&mut self, warnings: I)
where
I::Item: fmt::Display,
{
self.sub(self.warning_intro.clone(), warnings)
}
/// A list of errors that go with a title.
fn sub_error<I: IntoIterator>(&mut self, errors: I)
where
I::Item: fmt::Display,
{
self.sub(self.error_intro.clone(), errors)
}
fn sub<I: IntoIterator>(&mut self, intro: impl AsRef<str>, msgs: I)
where
I::Item: fmt::Display,
{
for msg in msgs {
self.print(format!("{} {}\n", intro.as_ref(), msg));
}
self.space();
}
/// Prints empty space if necessary.
fn spac | t self) {
if self.print_space {
self.print_space = false;
println!();
}
}
fn print(&mut self, print: impl AsRef<str>) {
let width = print
.as_ref()
.lines()
.map(|line| {
String::from_utf8_lossy(&strip_ansi_escapes::strip(line).unwrap())
.chars()
.count()
})
.max()
.unwrap_or(0);
self.max_line_width = width.max(self.max_line_width);
self.print_space = true;
print!("{}", print.as_ref())
}
}
| e(&mu | identifier_name |
validate.rs | use crate::{
config::{self, Config, ConfigDiff},
topology::{self, builder::Pieces},
};
use colored::*;
use exitcode::ExitCode;
use std::collections::HashMap;
use std::{fmt, fs::remove_dir_all, path::PathBuf};
use structopt::StructOpt;
const TEMPORARY_DIRECTORY: &str = "validate_tmp";
#[derive(StructOpt, Debug)]
#[structopt(rename_all = "kebab-case")]
pub struct Opts {
/// Disables environment checks. That includes component checks and health checks.
#[structopt(long)]
no_environment: bool,
/// Fail validation on warnings that are probably a mistake in the configuration
/// or are recommended to be fixed.
#[structopt(short, long)]
deny_warnings: bool,
/// Vector config files in TOML format to validate.
#[structopt(
name = "config-toml",
long,
env = "VECTOR_CONFIG_TOML",
use_delimiter(true)
)]
paths_toml: Vec<PathBuf>,
/// Vector config files in JSON format to validate.
#[structopt(
name = "config-json",
long,
env = "VECTOR_CONFIG_JSON",
use_delimiter(true)
)]
paths_json: Vec<PathBuf>,
/// Vector config files in YAML format to validate.
#[structopt(
name = "config-yaml",
long,
env = "VECTOR_CONFIG_YAML",
use_delimiter(true)
)]
paths_yaml: Vec<PathBuf>,
/// Any number of Vector config files to validate.
/// Format is detected from the file name.
/// If none are specified the default config path `/etc/vector/vector.toml`
/// will be targeted.
#[structopt(env = "VECTOR_CONFIG", use_delimiter(true))]
paths: Vec<PathBuf>,
/// Read configuration from files in one or more directories.
/// File format is detected from the file name.
///
/// Files not ending in .toml, .json, .yaml, or .yml will be ignored.
#[structopt(
name = "config-dir",
short = "C",
long,
env = "VECTOR_CONFIG_DIR",
use_delimiter(true)
)]
pub config_dirs: Vec<PathBuf>,
}
impl Opts {
fn paths_with_formats(&self) -> Vec<config::ConfigPath> {
config::merge_path_lists(vec![
(&self.paths, None),
(&self.paths_toml, Some(config::Format::Toml)),
(&self.paths_json, Some(config::Format::Json)),
(&self.paths_yaml, Some(config::Format::Yaml)),
])
.map(|(path, hint)| config::ConfigPath::File(path, hint))
.chain(
self.config_dirs
.iter()
.map(|dir| config::ConfigPath::Dir(dir.to_path_buf())),
)
.collect()
}
}
/// Performs topology, component, and health checks.
pub async fn validate(opts: &Opts, color: bool) -> ExitCode {
let mut fmt = Formatter::new(color);
let mut validated = true;
let mut config = match validate_config(opts, &mut fmt) {
Some(config) => config,
None => return exitcode::CONFIG,
};
if !opts.no_environment {
if let Some(tmp_directory) = create_tmp_directory(&mut config, &mut fmt) {
validated &= validate_environment(opts, &config, &mut fmt).await;
remove_tmp_directory(tmp_directory);
} else {
validated = false;
}
}
if validated {
fmt.validated();
exitcode::OK
} else {
exitcode::CONFIG
}
}
fn validate_config(opts: &Opts, fmt: &mut Formatter) -> Option<Config> {
// Prepare paths
let paths = opts.paths_with_formats();
let paths = if let Some(paths) = config::process_paths(&paths) {
paths
} else {
fmt.error("No config file paths");
return None;
};
// Load
let paths_list: Vec<_> = paths.iter().map(<&PathBuf>::from).collect();
let mut report_error = |errors| {
fmt.title(format!("Failed to load {:?}", &paths_list));
fmt.sub_error(errors);
};
config::init_log_schema(&paths, true)
.map_err(&mut report_error)
.ok()?;
let (builder, load_warnings) = config::load_builder_from_paths(&paths)
.map_err(&mut report_error)
.ok()?;
// Build
let (config, build_warnings) = builder
.build_with_warnings()
.map_err(&mut report_error)
.ok()?;
// Warnings
let warnings = load_warnings
.into_iter()
.chain(build_warnings)
.collect::<Vec<_>>();
if !warnings.is_empty() {
if opts.deny_warnings {
report_error(warnings);
return None;
}
fmt.title(format!("Loaded with warnings {:?}", &paths_list));
fmt.sub_warning(warnings);
} else {
fmt.success(format!("Loaded {:?}", &paths_list));
}
Some(config)
}
async fn validate_environment(opts: &Opts, config: &Config, fmt: &mut Formatter) -> bool {
let diff = ConfigDiff::initial(config);
let mut pieces = if let Some(pieces) = validate_components(config, &diff, fmt).await {
pieces
} else {
return false;
};
validate_healthchecks(opts, config, &diff, &mut pieces, fmt).await
}
async fn validate_components(
config: &Config,
diff: &ConfigDiff,
fmt: &mut Formatter,
) -> Option<Pieces> {
match topology::builder::build_pieces(config, diff, HashMap::new()).await {
Ok(pieces) => {
fmt.success("Component configuration");
Some(pieces)
}
Err(errors) => {
fmt.title("Component errors");
fmt.sub_error(errors);
None
}
}
}
async fn validate_healthchecks(
opts: &Opts,
config: &Config,
diff: &ConfigDiff,
pieces: &mut Pieces,
fmt: &mut Formatter,
) -> bool {
if !config.healthchecks.enabled {
fmt.warning("Health checks are disabled");
return !opts.deny_warnings;
}
let healthchecks = topology::take_healthchecks(diff, pieces);
// We are running health checks in serial so it's easier for the users
// to parse which errors/warnings/etc. belong to which healthcheck.
let mut validated = true;
for (name, healthcheck) in healthchecks {
let mut failed = |error| {
validated = false;
fmt.error(error);
};
match tokio::spawn(healthcheck).await {
Ok(Ok(_)) => {
if config
.sinks
.get(&name)
.expect("Sink not present")
.healthcheck()
.enabled
{
fmt.success(format!("Health check `{}`", name.as_str()));
} else {
fmt.warning(format!("Health check disabled for `{}`", name));
validated &= !opts.deny_warnings;
}
}
Ok(Err(())) => failed(format!("Health check for `{}` failed", name.as_str())),
Err(error) if error.is_cancelled() => failed(format!(
"Health check for `{}` was cancelled",
name.as_str()
)),
Err(_) => failed(format!("Health check for `{}` panicked", name.as_str())),
}
}
validated
}
/// For data directory that we write to:
/// 1. Create a tmp directory in it.
/// 2. Change config to point to that tmp directory.
fn create_tmp_directory(config: &mut Config, fmt: &mut Formatter) -> Option<PathBuf> {
match config
.global
.resolve_and_make_data_subdir(None, TEMPORARY_DIRECTORY)
{
Ok(path) => {
config.global.data_dir = Some(path.clone());
Some(path)
}
Err(error) => {
fmt.error(format!("{}", error));
None
}
}
}
fn remove_tmp_directory(path: PathBuf) {
if let Err(error) = remove_dir_all(&path) {
error!(message = "Failed to remove temporary directory.", path = ?path, %error);
}
}
struct Formatter {
/// Width of largest printed line
max_line_width: usize,
/// Can empty line be printed
print_space: bool,
color: bool,
// Intros
error_intro: String,
warning_intro: String,
success_intro: String,
}
impl Formatter {
fn new(color: bool) -> Self {
Self {
max_line_width: 0,
print_space: false,
error_intro: if color {
format!("{}", "x".red())
} else {
"x".to_owned()
},
warning_intro: if color {
format!("{}", "~".yellow())
} else {
"~".to_owned()
},
success_intro: if color {
format!("{}", "√".green())
} else {
"√".to_owned()
},
color,
}
}
/// Final confirmation that validation process was successful.
fn validated(&self) {
println!("{:-^width$}", "", width = self.max_line_width);
if self.color {
// Coloring needs to be used directly so that print
// infrastructure correctly determines length of the
// "Validated". Otherwise, ansi escape coloring is
// calculated into the length.
println!(
"{:>width$}",
"Validated".green(),
width = self.max_line_width
); | }
/// Standalone line
fn success(&mut self, msg: impl AsRef<str>) {
self.print(format!("{} {}\n", self.success_intro, msg.as_ref()))
}
/// Standalone line
fn warning(&mut self, warning: impl AsRef<str>) {
self.print(format!("{} {}\n", self.warning_intro, warning.as_ref()))
}
/// Standalone line
fn error(&mut self, error: impl AsRef<str>) {
self.print(format!("{} {}\n", self.error_intro, error.as_ref()))
}
/// Marks sub
fn title(&mut self, title: impl AsRef<str>) {
self.space();
self.print(format!(
"{}\n{:-<width$}\n",
title.as_ref(),
"",
width = title.as_ref().len()
))
}
/// A list of warnings that go with a title.
fn sub_warning<I: IntoIterator>(&mut self, warnings: I)
where
I::Item: fmt::Display,
{
self.sub(self.warning_intro.clone(), warnings)
}
/// A list of errors that go with a title.
fn sub_error<I: IntoIterator>(&mut self, errors: I)
where
I::Item: fmt::Display,
{
self.sub(self.error_intro.clone(), errors)
}
fn sub<I: IntoIterator>(&mut self, intro: impl AsRef<str>, msgs: I)
where
I::Item: fmt::Display,
{
for msg in msgs {
self.print(format!("{} {}\n", intro.as_ref(), msg));
}
self.space();
}
/// Prints empty space if necessary.
fn space(&mut self) {
if self.print_space {
self.print_space = false;
println!();
}
}
fn print(&mut self, print: impl AsRef<str>) {
let width = print
.as_ref()
.lines()
.map(|line| {
String::from_utf8_lossy(&strip_ansi_escapes::strip(line).unwrap())
.chars()
.count()
})
.max()
.unwrap_or(0);
self.max_line_width = width.max(self.max_line_width);
self.print_space = true;
print!("{}", print.as_ref())
}
} | } else {
println!("{:>width$}", "Validated", width = self.max_line_width)
} | random_line_split |
mod.rs | #![allow(clippy::pub_enum_variant_names)]
use std::collections::HashMap;
use serde::{Serialize, Serializer};
extern crate snowflake;
pub use std::sync::Arc;
use crate::ast::*;
use crate::externals::{External, ArgumentType, EXTERNALS};
mod intexp;
mod opexp;
mod recordexp;
mod seqexp;
mod assignexp;
mod ifexp;
mod whileexp;
mod forexp;
mod letexp;
mod arrayexp;
mod varexp;
mod nilexp;
mod unitexp;
mod stringexp;
mod callexp;
mod breakexp;
#[derive(Debug, PartialEq, Clone, Serialize)]
/// Write permissions for an int value
pub enum R {
/// Read-only
RO,
/// Read-write
RW
}
/// Unique identifier for Records and Arrays
pub type TypeId = snowflake::ProcessUniqueId;
/// Generate new type id for a Record or Array
pub fn newtypeid() -> TypeId {
snowflake::ProcessUniqueId::new()
}
/// Types in the Tiger language
#[derive(Debug, Clone)]
pub enum TigerType {
/// as in `()`
TUnit,
/// as in `nil`
TNil,
/// as in `3`
TInt(R),
/// as in `"perro`
TString,
/// as in `arrtype1 [10] of 0`
TArray(Arc<TigerType>, TypeId),
/// as in `{name : string, address : string, id : int, age : int}`
TRecord(Vec<(Symbol, RecordFieldType, i32)>, TypeId),
/// Type synonym
Internal(String),
/// This struct still has not been typed yet. The parser gives this type to all nodes in the AST
Untyped,
}
#[derive(Debug, Clone)]
pub enum RecordFieldType {
Record(TypeId),
Type(Arc::<TigerType>)
}
impl PartialEq for RecordFieldType {
fn eq(&self, other: &Self) -> bool {
use RecordFieldType::*;
match (self, other) {
(Record(id1), Record(id2)) => id1 == id2,
(Record(..), Type(t2)) => if let TigerType::TNil = **t2 { true } else { false },
(Type(t1), Record(..)) => if let TigerType::TNil = **t1 { true } else { false },
(Type(t1), Type(t2)) => t1 == t2,
}
}
}
impl Serialize for TigerType {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match self {
TigerType::TUnit => {
serializer.serialize_str("Unit")
}
TigerType::TNil => {
serializer.serialize_str("Nil")
}
TigerType::TString => {
serializer.serialize_str("String")
}
TigerType::TInt(..) => {
serializer.serialize_str("Int")
}
TigerType::TArray(..) => {
serializer.serialize_str("Array")
}
TigerType::TRecord(..) => {
serializer.serialize_str("Record")
}
TigerType::Internal(..) => {
serializer.serialize_str("Internal")
}
TigerType::Untyped => {
serializer.serialize_str("Untyped")
}
}
}
}
/// Converts an internal type to the logical type
pub fn tipo_real(t: Arc<TigerType>, tenv: &TypeEnviroment) -> Arc<TigerType> {
match &*t {
TigerType::Internal(s) => match tenv.get(s) {
Some(tipo) => Arc::clone(&tipo),
None => panic!("Undefined")
},
_ => t
}
}
/// Returns true iif the type is an Int
pub fn es_int(t: &TigerType) -> bool {
if let TigerType::TInt(_) = *t {
true
} else { false }
}
/// An entry in our `TypeEnviroment` table.
#[derive(Clone, Debug)]
pub enum EnvEntry {
/// A declared varaible
Var {
/// The type of the variable
ty: Arc<TigerType>,
},
/// A declared function
Func {
/// The types of the arguments of the function
formals: Vec<Arc<TigerType>>,
/// The type of the return value of the function
result: Arc<TigerType>,
}
}
/// A table where we store the types that are declared as this point in typechecking.
///
/// When a type is used in a declaration, we look in this table and raise a `TypeError` if it's not found.
type TypeEnviroment = HashMap<Symbol, Arc<TigerType>>;
/// A table where we store the values that are declared as this point in typechecking.
///
/// When a variable or function is used somewhere in the code, we check this table and raise `TypeError` if it's not found.
type ValueEnviroment = HashMap<Symbol, EnvEntry>;
/// Generate a `TypeEnv` that contains integers and strings
fn initial_type_env() -> TypeEnviroment {
vec![
(Symbol::from("int"), Arc::new(TigerType::TInt(R::RW))),
(Symbol::from("string"), Arc::new(TigerType::TString))
]
.into_iter()
.collect()
}
impl From<ArgumentType> for TigerType {
fn from(arg: ArgumentType) -> Self {
match arg {
ArgumentType::String => TigerType::TString,
ArgumentType::Int => TigerType::TInt(R::RO)
}
}
}
fn initial_value_env() -> ValueEnviroment {
EXTERNALS
.iter()
.filter(|External {is_runtime, ..}| !is_runtime)
.map(|External {name, arguments, return_value, ..}|
((*name).to_string(), EnvEntry::Func {
formals: arguments
.iter()
.map(|arg| Arc::new(TigerType::from(*arg)))
.collect(),
result: if let Some(rt) = return_value {
Arc::new(TigerType::from(*rt))
} else {
Arc::new(TigerType::TUnit)
}
}))
.collect()
}
/// Errors that the typechecker can fail with.
#[derive(Debug, Clone, Serialize)]
pub enum TypeError {
/// Using variable that was not declared.
UndeclaredSimpleVar(Pos),
/// Using function that was not declared.
UndeclaredFunction(Pos),
/// Using type that was not declared.
UndeclaredType(Pos),
/// Using a field from a record that was not declared
UndeclaredField(Pos),
/// Tried to use an array or record as a simple variable
NotSimpleVar(Pos),
/// Tried to do a function call on a variable
NotFunctionVar(Pos),
/// Tried to access a record field on something other than a record
NotRecordType(Pos),
/// Tried to index something other than an array
NotArrayType(Pos),
/// Called a function with too many arguments
TooManyArguments(Pos),
/// Called a function with too few arguments
TooFewArguments(Pos),
/// Expected a different type
TypeMismatch(Pos),
/// An if-then-else with different types for each branch
ThenElseTypeMismatch(Pos),
/// Assigning to an Int with `R::RO`
ReadOnlyAssignment(Pos),
/// The bodies of for, while or if whithout else statements should type to Unit
NonUnitBody(Pos),
/// Type mismatch in function call argument
InvalidCallArgument(Pos),
/// A definition is not defining values for all record fields.
MissingRecordField(Pos),
/// The sizes of array definitions should type to Int
NonIntegerSize(Pos),
/// All conditionals should type to Int
NonIntegerCondition(Pos),
/// The range boundaries of for expressions should type to Int
NonIntegerForRange(Pos),
/// Integer operation over non integer operands
NonIntegerOperand(Pos),
/// The subscript of a field varaible should type to Int
NonIntegerSubscript(Pos),
/// Type declarations form an illicit cycle
TypeCycle(Pos),
/// Something is declared twice in the same block
DuplicatedDeclarations(Pos),
/// You can only assign nil to variables with explicit type
UnconstrainedNilInitialization(Pos),
/// All tiger programs should return something of Int type.
NonIntegerProgram(Pos)
}
impl PartialEq for TigerType {
fn eq(&self, other: &Self) -> bool {
use TigerType::*;
match (self, other) {
(TUnit, TUnit)
| (TString, TString)
| (TRecord(_, _), TNil)
| (TNil, TRecord(_, _))
| (TInt(_),TInt(_)) => true,
(TRecord(_, uid1), TRecord(_, uid2 ))
| (TArray(_, uid1), TArray(_, uid2)) => uid1 == uid2,
(Internal(s), Internal(t)) => s == t,
(Internal(_), _) => panic!("Estamos comparando un Internal"),
(_, Internal(_)) => panic!("Estamos comparando un Internal"),
(_, _) => false,
}
}
}
/// Rebuild an `AST` with the correct types given the context in the enviroments or return a `TypeError`
fn type_exp(ast : AST, type_env : &TypeEnviroment, value_env: &ValueEnviroment) -> Result<AST, TypeError> {
let AST {node, ..} = *
match node {
Exp::Var(..) => varexp::typecheck(ast, type_env, value_env),
Exp::Unit => unitexp::typecheck(ast, type_env, value_env),
Exp::Nil => nilexp::typecheck(ast, type_env, value_env),
Exp::Int(..) => intexp::typecheck(ast, type_env,&value_env),
Exp::String(..) => stringexp::typecheck(ast, type_env, value_env),
Exp::Call{..} => callexp::typecheck(ast, type_env, value_env),
Exp::Op{..} => opexp::typecheck(ast,&type_env, value_env),
Exp::Assign{..} => assignexp::typecheck(ast, type_env, value_env),
Exp::Record{..} => recordexp::typecheck(ast, type_env, value_env),
Exp::Seq(..) => seqexp::typecheck(ast, type_env, value_env),
Exp::If{..} => ifexp::typecheck(ast, type_env, value_env),
Exp::While{..} => whileexp::typecheck(ast, type_env, value_env),
Exp::For{..} => forexp::typecheck(ast, type_env, value_env),
Exp::Let{..} => letexp::typecheck(ast, type_env, value_env),
Exp::Break => breakexp::typecheck(ast, type_env, value_env),
Exp::Array{..} => arrayexp::typecheck(ast, type_env, value_env),
}
}
/// Typecheck the program
pub fn | (ast : AST) -> Result<AST, TypeError> {
let typed_ast = type_exp(ast, &initial_type_env(), &initial_value_env())?;
if *typed_ast.typ == TigerType::TInt(R::RW) {
Ok(typed_ast)
} else {
Err(TypeError::NonIntegerProgram(typed_ast.pos))
}
} | typecheck | identifier_name |
mod.rs | #![allow(clippy::pub_enum_variant_names)]
use std::collections::HashMap;
use serde::{Serialize, Serializer};
extern crate snowflake;
pub use std::sync::Arc;
use crate::ast::*;
use crate::externals::{External, ArgumentType, EXTERNALS};
mod intexp;
mod opexp;
mod recordexp;
mod seqexp;
mod assignexp;
mod ifexp;
mod whileexp;
mod forexp;
mod letexp;
mod arrayexp;
mod varexp;
mod nilexp;
mod unitexp;
mod stringexp;
mod callexp;
mod breakexp;
#[derive(Debug, PartialEq, Clone, Serialize)]
/// Write permissions for an int value
pub enum R {
/// Read-only
RO,
/// Read-write
RW
}
/// Unique identifier for Records and Arrays
pub type TypeId = snowflake::ProcessUniqueId;
/// Generate new type id for a Record or Array
pub fn newtypeid() -> TypeId {
snowflake::ProcessUniqueId::new()
}
/// Types in the Tiger language
#[derive(Debug, Clone)]
pub enum TigerType {
/// as in `()`
TUnit,
/// as in `nil`
TNil,
/// as in `3`
TInt(R),
/// as in `"perro`
TString,
/// as in `arrtype1 [10] of 0`
TArray(Arc<TigerType>, TypeId),
/// as in `{name : string, address : string, id : int, age : int}`
TRecord(Vec<(Symbol, RecordFieldType, i32)>, TypeId),
/// Type synonym
Internal(String),
/// This struct still has not been typed yet. The parser gives this type to all nodes in the AST
Untyped,
}
#[derive(Debug, Clone)]
pub enum RecordFieldType {
Record(TypeId),
Type(Arc::<TigerType>)
}
impl PartialEq for RecordFieldType {
fn eq(&self, other: &Self) -> bool {
use RecordFieldType::*;
match (self, other) {
(Record(id1), Record(id2)) => id1 == id2,
(Record(..), Type(t2)) => if let TigerType::TNil = **t2 { true } else { false },
(Type(t1), Record(..)) => if let TigerType::TNil = **t1 { true } else { false },
(Type(t1), Type(t2)) => t1 == t2,
}
}
}
impl Serialize for TigerType {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match self {
TigerType::TUnit => {
serializer.serialize_str("Unit")
}
TigerType::TNil => {
serializer.serialize_str("Nil")
}
TigerType::TString => {
serializer.serialize_str("String")
}
TigerType::TInt(..) => {
serializer.serialize_str("Int")
}
TigerType::TArray(..) => {
serializer.serialize_str("Array")
}
TigerType::TRecord(..) => {
serializer.serialize_str("Record")
}
TigerType::Internal(..) => {
serializer.serialize_str("Internal")
}
TigerType::Untyped => {
serializer.serialize_str("Untyped")
}
}
}
}
/// Converts an internal type to the logical type
pub fn tipo_real(t: Arc<TigerType>, tenv: &TypeEnviroment) -> Arc<TigerType> {
match &*t {
TigerType::Internal(s) => match tenv.get(s) {
Some(tipo) => Arc::clone(&tipo),
None => panic!("Undefined")
},
_ => t
}
} | } else { false }
}
/// An entry in our `TypeEnviroment` table.
#[derive(Clone, Debug)]
pub enum EnvEntry {
/// A declared varaible
Var {
/// The type of the variable
ty: Arc<TigerType>,
},
/// A declared function
Func {
/// The types of the arguments of the function
formals: Vec<Arc<TigerType>>,
/// The type of the return value of the function
result: Arc<TigerType>,
}
}
/// A table where we store the types that are declared as this point in typechecking.
///
/// When a type is used in a declaration, we look in this table and raise a `TypeError` if it's not found.
type TypeEnviroment = HashMap<Symbol, Arc<TigerType>>;
/// A table where we store the values that are declared as this point in typechecking.
///
/// When a variable or function is used somewhere in the code, we check this table and raise `TypeError` if it's not found.
type ValueEnviroment = HashMap<Symbol, EnvEntry>;
/// Generate a `TypeEnv` that contains integers and strings
fn initial_type_env() -> TypeEnviroment {
vec![
(Symbol::from("int"), Arc::new(TigerType::TInt(R::RW))),
(Symbol::from("string"), Arc::new(TigerType::TString))
]
.into_iter()
.collect()
}
impl From<ArgumentType> for TigerType {
fn from(arg: ArgumentType) -> Self {
match arg {
ArgumentType::String => TigerType::TString,
ArgumentType::Int => TigerType::TInt(R::RO)
}
}
}
fn initial_value_env() -> ValueEnviroment {
EXTERNALS
.iter()
.filter(|External {is_runtime, ..}| !is_runtime)
.map(|External {name, arguments, return_value, ..}|
((*name).to_string(), EnvEntry::Func {
formals: arguments
.iter()
.map(|arg| Arc::new(TigerType::from(*arg)))
.collect(),
result: if let Some(rt) = return_value {
Arc::new(TigerType::from(*rt))
} else {
Arc::new(TigerType::TUnit)
}
}))
.collect()
}
/// Errors that the typechecker can fail with.
#[derive(Debug, Clone, Serialize)]
pub enum TypeError {
/// Using variable that was not declared.
UndeclaredSimpleVar(Pos),
/// Using function that was not declared.
UndeclaredFunction(Pos),
/// Using type that was not declared.
UndeclaredType(Pos),
/// Using a field from a record that was not declared
UndeclaredField(Pos),
/// Tried to use an array or record as a simple variable
NotSimpleVar(Pos),
/// Tried to do a function call on a variable
NotFunctionVar(Pos),
/// Tried to access a record field on something other than a record
NotRecordType(Pos),
/// Tried to index something other than an array
NotArrayType(Pos),
/// Called a function with too many arguments
TooManyArguments(Pos),
/// Called a function with too few arguments
TooFewArguments(Pos),
/// Expected a different type
TypeMismatch(Pos),
/// An if-then-else with different types for each branch
ThenElseTypeMismatch(Pos),
/// Assigning to an Int with `R::RO`
ReadOnlyAssignment(Pos),
/// The bodies of for, while or if whithout else statements should type to Unit
NonUnitBody(Pos),
/// Type mismatch in function call argument
InvalidCallArgument(Pos),
/// A definition is not defining values for all record fields.
MissingRecordField(Pos),
/// The sizes of array definitions should type to Int
NonIntegerSize(Pos),
/// All conditionals should type to Int
NonIntegerCondition(Pos),
/// The range boundaries of for expressions should type to Int
NonIntegerForRange(Pos),
/// Integer operation over non integer operands
NonIntegerOperand(Pos),
/// The subscript of a field varaible should type to Int
NonIntegerSubscript(Pos),
/// Type declarations form an illicit cycle
TypeCycle(Pos),
/// Something is declared twice in the same block
DuplicatedDeclarations(Pos),
/// You can only assign nil to variables with explicit type
UnconstrainedNilInitialization(Pos),
/// All tiger programs should return something of Int type.
NonIntegerProgram(Pos)
}
impl PartialEq for TigerType {
fn eq(&self, other: &Self) -> bool {
use TigerType::*;
match (self, other) {
(TUnit, TUnit)
| (TString, TString)
| (TRecord(_, _), TNil)
| (TNil, TRecord(_, _))
| (TInt(_),TInt(_)) => true,
(TRecord(_, uid1), TRecord(_, uid2 ))
| (TArray(_, uid1), TArray(_, uid2)) => uid1 == uid2,
(Internal(s), Internal(t)) => s == t,
(Internal(_), _) => panic!("Estamos comparando un Internal"),
(_, Internal(_)) => panic!("Estamos comparando un Internal"),
(_, _) => false,
}
}
}
/// Rebuild an `AST` with the correct types given the context in the enviroments or return a `TypeError`
fn type_exp(ast : AST, type_env : &TypeEnviroment, value_env: &ValueEnviroment) -> Result<AST, TypeError> {
let AST {node, ..} = *
match node {
Exp::Var(..) => varexp::typecheck(ast, type_env, value_env),
Exp::Unit => unitexp::typecheck(ast, type_env, value_env),
Exp::Nil => nilexp::typecheck(ast, type_env, value_env),
Exp::Int(..) => intexp::typecheck(ast, type_env,&value_env),
Exp::String(..) => stringexp::typecheck(ast, type_env, value_env),
Exp::Call{..} => callexp::typecheck(ast, type_env, value_env),
Exp::Op{..} => opexp::typecheck(ast,&type_env, value_env),
Exp::Assign{..} => assignexp::typecheck(ast, type_env, value_env),
Exp::Record{..} => recordexp::typecheck(ast, type_env, value_env),
Exp::Seq(..) => seqexp::typecheck(ast, type_env, value_env),
Exp::If{..} => ifexp::typecheck(ast, type_env, value_env),
Exp::While{..} => whileexp::typecheck(ast, type_env, value_env),
Exp::For{..} => forexp::typecheck(ast, type_env, value_env),
Exp::Let{..} => letexp::typecheck(ast, type_env, value_env),
Exp::Break => breakexp::typecheck(ast, type_env, value_env),
Exp::Array{..} => arrayexp::typecheck(ast, type_env, value_env),
}
}
/// Typecheck the program
pub fn typecheck(ast : AST) -> Result<AST, TypeError> {
let typed_ast = type_exp(ast, &initial_type_env(), &initial_value_env())?;
if *typed_ast.typ == TigerType::TInt(R::RW) {
Ok(typed_ast)
} else {
Err(TypeError::NonIntegerProgram(typed_ast.pos))
}
} |
/// Returns true iif the type is an Int
pub fn es_int(t: &TigerType) -> bool {
if let TigerType::TInt(_) = *t {
true | random_line_split |
models.py | from enum import Enum
from typing import List, Dict, Union, Tuple
class UserAgents:
def __init__(self, head: str, version: List[str]):
self.head = head
self.version = version
self.index = -1
def get_next_user_agent(self):
self.index = (self.index + 1) % len(self.version)
return '{head} {version}'.format(head=self.head, version=self.version[self.index])
class Language(Enum):
English = 'en_US'
Spanish = 'es_ES'
SimplifiedChinese = 'zh_CN'
TraditionalChinese_ = 'zh_TW'
German = 'de_DE'
Portuguese = 'pt_BR'
Korean = 'ko_KR'
Hebrew = 'he_IL'
Arabic = 'ar_AE'
Hindi = 'hi_IN'
Tamil = 'ta_IN'
Telugu = 'te_IN'
Kannada = 'kn_IN'
Malayalam = 'ml_IN'
Italian = 'it_IT'
Swedish = 'sv_SE'
French = 'fr_FR'
Japanese = 'ja_JP'
Dutch = 'nl_NL'
Polish = 'pl_PL'
Turkish = 'tr_TR'
EnglishAustralia = 'en_AU'
EnglishCanada = 'en_CA'
EnglishSingapore = 'en_SG'
EnglishSpain = 'en_ES'
EnglishUnitedArabEmirates = 'en_AE'
EnglishUnitedKingdom = 'en_GB'
SpanishMexico = 'es_MX'
SpanishUnitedStates = 'es_US'
class Currency(Enum):
ArabEmiratesDirham = "AED"
ArgentinePeso = "ARS"
AustralianDollar = "AUD"
AzerbaijanNewManat = "AZN"
BahamasDollar = "BSD"
BarbadianDollar = "BBD"
BermudaDollar = "BMD"
BrazilianReal = "BRL"
BruneianDollar = "BND"
BulgariaLev = "BGN"
CanadianDollar = "CAD"
CaymanianDollar = "KYD"
ChileanPeso = "CLP"
ChineseYuanRenminbi = "CNY"
ColombianPeso = "COP"
CostaRicanColon = "CRC"
CzechKoruna = "CZK"
DanishKrone = "DKK"
DominicanRepublicPeso = "DOP"
EgyptianPound = "EGP"
Euro = "EUR"
GhanaianCedi = "GHS"
GuatemalanQuetzal = "GTQ"
HongKongDollar = "HKD"
HungarianForint = "HUF"
IndianRupee = "INR"
IndonesianRupiah = "IDR"
IsraeliShekel = "ILS"
JamaicanDollar = "JMD"
JapaneseYen = "JPY"
KazakhstanTenge = "KZT"
KenyanShilling = "KES"
LebanesePound = "LBP"
MalaysianRinggit = "MYR"
MauritianRupee = "MUR"
MexicoPeso = "MXN"
MoroccanDirham = "MAD"
NamibiaDollar = "NAD"
NewZealandDollar = "NZD"
NigerianNaira = "NGN"
NorwegianKrone = "NOK"
PakistaniRupee = "PKR"
PanamanianBalboa = "PAB"
PeruvianSol = "PEN"
PhilippinePeso = "PHP"
PolishZloty = "PLN"
Pounds = "GBP"
QatariRiyal = "QAR"
RomanianLei = "RON"
RussianRuble = "RUB"
SaudiArabianRiyal = "SAR"
SingaporeDollar = "SGD"
SouthKoreanWon = "KRW"
SriLankanRupee = "LKR"
SwedishKrona = "SEK"
SwissFranc = "CHF"
TaiwanNewDollar = "TWD"
TanzaniaShilling = "TZS"
ThaiBaht = "THB"
TrinidadianDollar = "TTD"
TurkishLira = "TRY"
USDollar = "USD"
class Country(Enum):
Australia = "com.au"
Brazil = "com.br"
Canada = "ca"
ChinaMainland = "cn"
France = "fr"
Germany = "de"
India = "in"
Italy = "it"
Japan = "co.jp"
Mexico = "com.mx"
Netherlands = "nl"
Poland = "pl"
SaudiArabia = "sa"
Singapore = "sg"
Spain = "es"
Sweden = "se"
Turkey = "com.tr"
UnitedArabEmirates = "ae"
UnitedKingdom = "co.uk"
UnitedStates = "com"
def lang_and_currency(self) -> Tuple[Language, Currency]:
return {
Country.Australia: (Language.EnglishAustralia, Currency.AustralianDollar),
Country.Brazil: (Language.Portuguese, Currency.BrazilianReal),
Country.Canada: (Language.EnglishCanada, Currency.CanadianDollar),
Country.ChinaMainland: (Language.SimplifiedChinese, Currency.ChineseYuanRenminbi),
Country.France: (Language.French, Currency.Euro),
Country.Germany: (Language.German, Currency.Euro),
Country.India: (Language.Hindi, Currency.IndianRupee),
Country.Italy: (Language.Italian, Currency.Euro),
Country.Japan: (Language.Japanese, Currency.JapaneseYen),
Country.Mexico: (Language.SpanishMexico, Currency.MexicoPeso),
Country.Netherlands: (Language.Dutch, Currency.Euro),
Country.Poland: (Language.Polish, Currency.PolishZloty),
Country.SaudiArabia: (Language.Arabic, Currency.SaudiArabianRiyal),
Country.Singapore: (Language.EnglishSingapore, Currency.SingaporeDollar),
Country.Spain: (Language.Spanish, Currency.Euro),
Country.Sweden: (Language.Swedish, Currency.SwedishKrona),
Country.Turkey: (Language.Turkish, Currency.TurkishLira),
Country.UnitedArabEmirates: (Language.EnglishUnitedArabEmirates, Currency.ArabEmiratesDirham),
Country.UnitedKingdom: (Language.EnglishUnitedKingdom, Currency.Pounds),
Country.UnitedStates: (Language.English, Currency.USDollar)
}[self]
class Offer:
def __init__(self, price: Union[float, None], currency: str, rating: float, condition: str, ships_from: str,
sold_by: str, sold_by_url: str):
self.price = price
self.currency = currency
self.approx_review = rating
self.condition = condition
self.ships_from = ships_from
self.sold_by = sold_by
self.sold_by_url = sold_by_url
def __repr__(self):
return ('Offer(price={}, currency={}, approx_review={}, condition={}, '
'ships_from={}, sold_by={}, sold_by_url={})').format(self.price, repr(self.currency),
self.approx_review, repr(self.condition),
repr(self.ships_from), repr(self.sold_by),
repr(self.sold_by_url))
class OfferList:
def __init__(self, product_name: str, offer_count: int, offers: List[Offer], settings: Dict[str, bool]):
self.product_name = product_name
self.offer_count = offer_count
self.offers = offers
self.page = settings['page']
self.settings = settings
def __repr__(self):
offers_repr_length = 100
offers_repr = repr(self.offers)
print_offers = offers_repr[:offers_repr_length]
if offers_repr[offers_repr_length:]:
print_offers += '...'
return 'OfferList(product_name={}, offer_count={}, ' \
'offers={}, page={}, settings={})'.format(repr(self.product_name), self.offer_count,
print_offers, self.page, repr(self.settings)[:30] + '...')
class Review:
def __init__(self, reviewer: str, reviewer_url: str, review_url: str, title: str, rating: int, helpful: int,
body: str):
self.reviewer = reviewer
self.reviewer_url = reviewer_url
self.review_url = review_url
self.title = title
self.rating = rating
self.helpful = helpful
self.body = body
def | (self):
body_repr_length = 100
body_repr = repr(self.body)
print_body = body_repr[:body_repr_length]
if body_repr[body_repr_length:]:
print_body += '...'
return 'Review(reviewer={}, reviewer_url={}, review_url={}, title={}, rating={}, helpful={}, body={})'.format(
repr(self.reviewer), repr(self.reviewer_url), repr(self.review_url),
repr(self.title), self.rating, self.helpful, print_body)
class ReviewList:
def __init__(self, reviews: List[Review], asin: str, country: Country, settings: Dict, last_page=False):
self.reviews = reviews
self.asin = asin
self.country = country
self.settings = settings
self.page = settings['pageNumber']
self.last_page = last_page
def __repr__(self):
reviews_repr_length = 100
reviews_repr = repr(self.reviews)
print_reviews = reviews_repr[:reviews_repr_length]
if reviews_repr[reviews_repr_length:]:
print_reviews += '...'
return 'ReviewList(reviews={}, asin={}, country={}, page={}, last_page={})'.format(print_reviews,
repr(self.asin),
self.country,
self.page, self.last_page)
class ReviewParameter:
class SortBy(Enum):
Helpful = 'helpful'
"""Sort by helpful. default"""
Recent = 'recent'
"""Sort by recent"""
class ReviewerType(Enum):
AllReviews = 'all_reviews'
"""Show all reviews. default"""
AVPOnlyReviews = 'avp_only_reviews'
"""Show only verified purchase reviews"""
class FormatType(Enum):
AllFormats = 'all_formats'
"""Show reviews for all format. default"""
CurrentFormat = 'current_format'
"""Show reviews for only current format"""
class MediaType(Enum):
AllContents = 'all_contents'
"""Show reviews with text, image or video. default"""
MediaReviewsOnly = 'media_reviews_only'
"""Show reviews with image or video"""
class FilterByStar(Enum):
AllStars = 'all_stars'
"""Show all reviews. default"""
FiveStar = 'five_star'
"""Show reviews with 5 star"""
FourStar = 'four_star'
"""Show reviews with 4 star"""
ThreeStar = 'three_star'
"""Show reviews with 3 star"""
TwoStar = 'two_star'
"""Show reviews with 2 star"""
OneStar = 'one_star'
"""Show reviews with 1 star"""
Positive = 'positive'
"""Show positive reviews. Maybe 5 and 4 stars."""
Critical = 'critical'
"""Show critical reviews. Maybe 3, 2 and 1 stars."""
class ReviewSettings:
def __init__(self,
sort_by: ReviewParameter.SortBy = ReviewParameter.SortBy.Helpful,
reviewer_type: ReviewParameter.SortBy = ReviewParameter.ReviewerType.AllReviews,
format_type: ReviewParameter.FormatType = ReviewParameter.FormatType.AllFormats,
media_type: ReviewParameter.MediaType = ReviewParameter.MediaType.AllContents,
filter_by_star: ReviewParameter.FilterByStar = ReviewParameter.FilterByStar.AllStars,
page_number: int = 1, filter_by_language: str = ''):
pass
| __repr__ | identifier_name |
models.py | from enum import Enum
from typing import List, Dict, Union, Tuple
class UserAgents:
def __init__(self, head: str, version: List[str]):
self.head = head
self.version = version
self.index = -1
def get_next_user_agent(self):
self.index = (self.index + 1) % len(self.version)
return '{head} {version}'.format(head=self.head, version=self.version[self.index])
class Language(Enum):
English = 'en_US'
Spanish = 'es_ES'
SimplifiedChinese = 'zh_CN'
TraditionalChinese_ = 'zh_TW'
German = 'de_DE'
Portuguese = 'pt_BR'
Korean = 'ko_KR'
Hebrew = 'he_IL'
Arabic = 'ar_AE'
Hindi = 'hi_IN'
Tamil = 'ta_IN'
Telugu = 'te_IN'
Kannada = 'kn_IN'
Malayalam = 'ml_IN'
Italian = 'it_IT'
Swedish = 'sv_SE'
French = 'fr_FR'
Japanese = 'ja_JP'
Dutch = 'nl_NL'
Polish = 'pl_PL'
Turkish = 'tr_TR'
EnglishAustralia = 'en_AU'
EnglishCanada = 'en_CA'
EnglishSingapore = 'en_SG'
EnglishSpain = 'en_ES'
EnglishUnitedArabEmirates = 'en_AE'
EnglishUnitedKingdom = 'en_GB'
SpanishMexico = 'es_MX'
SpanishUnitedStates = 'es_US'
class Currency(Enum):
ArabEmiratesDirham = "AED"
ArgentinePeso = "ARS"
AustralianDollar = "AUD"
AzerbaijanNewManat = "AZN"
BahamasDollar = "BSD"
BarbadianDollar = "BBD"
BermudaDollar = "BMD"
BrazilianReal = "BRL"
BruneianDollar = "BND"
BulgariaLev = "BGN"
CanadianDollar = "CAD"
CaymanianDollar = "KYD"
ChileanPeso = "CLP"
ChineseYuanRenminbi = "CNY"
ColombianPeso = "COP"
CostaRicanColon = "CRC"
CzechKoruna = "CZK"
DanishKrone = "DKK"
DominicanRepublicPeso = "DOP"
EgyptianPound = "EGP"
Euro = "EUR"
GhanaianCedi = "GHS"
GuatemalanQuetzal = "GTQ"
HongKongDollar = "HKD"
HungarianForint = "HUF"
IndianRupee = "INR"
IndonesianRupiah = "IDR"
IsraeliShekel = "ILS"
JamaicanDollar = "JMD"
JapaneseYen = "JPY"
KazakhstanTenge = "KZT"
KenyanShilling = "KES"
LebanesePound = "LBP"
MalaysianRinggit = "MYR"
MauritianRupee = "MUR"
MexicoPeso = "MXN"
MoroccanDirham = "MAD"
NamibiaDollar = "NAD"
NewZealandDollar = "NZD"
NigerianNaira = "NGN"
NorwegianKrone = "NOK"
PakistaniRupee = "PKR"
PanamanianBalboa = "PAB"
PeruvianSol = "PEN"
PhilippinePeso = "PHP"
PolishZloty = "PLN"
Pounds = "GBP"
QatariRiyal = "QAR"
RomanianLei = "RON"
RussianRuble = "RUB"
SaudiArabianRiyal = "SAR"
SingaporeDollar = "SGD"
SouthKoreanWon = "KRW"
SriLankanRupee = "LKR"
SwedishKrona = "SEK"
SwissFranc = "CHF"
TaiwanNewDollar = "TWD"
TanzaniaShilling = "TZS"
ThaiBaht = "THB"
TrinidadianDollar = "TTD"
TurkishLira = "TRY"
USDollar = "USD"
class Country(Enum):
Australia = "com.au"
Brazil = "com.br"
Canada = "ca"
ChinaMainland = "cn"
France = "fr"
Germany = "de"
India = "in"
Italy = "it"
Japan = "co.jp"
Mexico = "com.mx"
Netherlands = "nl"
Poland = "pl"
SaudiArabia = "sa"
Singapore = "sg"
Spain = "es"
Sweden = "se"
Turkey = "com.tr"
UnitedArabEmirates = "ae"
UnitedKingdom = "co.uk"
UnitedStates = "com"
def lang_and_currency(self) -> Tuple[Language, Currency]:
return {
Country.Australia: (Language.EnglishAustralia, Currency.AustralianDollar),
Country.Brazil: (Language.Portuguese, Currency.BrazilianReal),
Country.Canada: (Language.EnglishCanada, Currency.CanadianDollar),
Country.ChinaMainland: (Language.SimplifiedChinese, Currency.ChineseYuanRenminbi),
Country.France: (Language.French, Currency.Euro),
Country.Germany: (Language.German, Currency.Euro),
Country.India: (Language.Hindi, Currency.IndianRupee),
Country.Italy: (Language.Italian, Currency.Euro),
Country.Japan: (Language.Japanese, Currency.JapaneseYen),
Country.Mexico: (Language.SpanishMexico, Currency.MexicoPeso),
Country.Netherlands: (Language.Dutch, Currency.Euro),
Country.Poland: (Language.Polish, Currency.PolishZloty),
Country.SaudiArabia: (Language.Arabic, Currency.SaudiArabianRiyal),
Country.Singapore: (Language.EnglishSingapore, Currency.SingaporeDollar),
Country.Spain: (Language.Spanish, Currency.Euro),
Country.Sweden: (Language.Swedish, Currency.SwedishKrona),
Country.Turkey: (Language.Turkish, Currency.TurkishLira),
Country.UnitedArabEmirates: (Language.EnglishUnitedArabEmirates, Currency.ArabEmiratesDirham),
Country.UnitedKingdom: (Language.EnglishUnitedKingdom, Currency.Pounds),
Country.UnitedStates: (Language.English, Currency.USDollar)
}[self]
class Offer:
def __init__(self, price: Union[float, None], currency: str, rating: float, condition: str, ships_from: str,
sold_by: str, sold_by_url: str):
self.price = price
self.currency = currency
self.approx_review = rating
self.condition = condition
self.ships_from = ships_from
self.sold_by = sold_by
self.sold_by_url = sold_by_url
def __repr__(self):
return ('Offer(price={}, currency={}, approx_review={}, condition={}, '
'ships_from={}, sold_by={}, sold_by_url={})').format(self.price, repr(self.currency),
self.approx_review, repr(self.condition),
repr(self.ships_from), repr(self.sold_by),
repr(self.sold_by_url))
class OfferList:
def __init__(self, product_name: str, offer_count: int, offers: List[Offer], settings: Dict[str, bool]):
self.product_name = product_name
self.offer_count = offer_count
self.offers = offers
self.page = settings['page']
self.settings = settings
def __repr__(self):
offers_repr_length = 100
offers_repr = repr(self.offers)
print_offers = offers_repr[:offers_repr_length]
if offers_repr[offers_repr_length:]:
print_offers += '...'
return 'OfferList(product_name={}, offer_count={}, ' \
'offers={}, page={}, settings={})'.format(repr(self.product_name), self.offer_count,
print_offers, self.page, repr(self.settings)[:30] + '...')
class Review:
def __init__(self, reviewer: str, reviewer_url: str, review_url: str, title: str, rating: int, helpful: int,
body: str):
self.reviewer = reviewer
self.reviewer_url = reviewer_url
self.review_url = review_url
self.title = title
self.rating = rating
self.helpful = helpful
self.body = body
def __repr__(self):
body_repr_length = 100
body_repr = repr(self.body)
print_body = body_repr[:body_repr_length]
if body_repr[body_repr_length:]:
print_body += '...'
return 'Review(reviewer={}, reviewer_url={}, review_url={}, title={}, rating={}, helpful={}, body={})'.format(
repr(self.reviewer), repr(self.reviewer_url), repr(self.review_url),
repr(self.title), self.rating, self.helpful, print_body)
class ReviewList:
def __init__(self, reviews: List[Review], asin: str, country: Country, settings: Dict, last_page=False):
self.reviews = reviews
self.asin = asin
self.country = country
self.settings = settings
self.page = settings['pageNumber']
self.last_page = last_page
def __repr__(self):
reviews_repr_length = 100
reviews_repr = repr(self.reviews)
print_reviews = reviews_repr[:reviews_repr_length]
if reviews_repr[reviews_repr_length:]:
|
return 'ReviewList(reviews={}, asin={}, country={}, page={}, last_page={})'.format(print_reviews,
repr(self.asin),
self.country,
self.page, self.last_page)
class ReviewParameter:
class SortBy(Enum):
Helpful = 'helpful'
"""Sort by helpful. default"""
Recent = 'recent'
"""Sort by recent"""
class ReviewerType(Enum):
AllReviews = 'all_reviews'
"""Show all reviews. default"""
AVPOnlyReviews = 'avp_only_reviews'
"""Show only verified purchase reviews"""
class FormatType(Enum):
AllFormats = 'all_formats'
"""Show reviews for all format. default"""
CurrentFormat = 'current_format'
"""Show reviews for only current format"""
class MediaType(Enum):
AllContents = 'all_contents'
"""Show reviews with text, image or video. default"""
MediaReviewsOnly = 'media_reviews_only'
"""Show reviews with image or video"""
class FilterByStar(Enum):
AllStars = 'all_stars'
"""Show all reviews. default"""
FiveStar = 'five_star'
"""Show reviews with 5 star"""
FourStar = 'four_star'
"""Show reviews with 4 star"""
ThreeStar = 'three_star'
"""Show reviews with 3 star"""
TwoStar = 'two_star'
"""Show reviews with 2 star"""
OneStar = 'one_star'
"""Show reviews with 1 star"""
Positive = 'positive'
"""Show positive reviews. Maybe 5 and 4 stars."""
Critical = 'critical'
"""Show critical reviews. Maybe 3, 2 and 1 stars."""
class ReviewSettings:
def __init__(self,
sort_by: ReviewParameter.SortBy = ReviewParameter.SortBy.Helpful,
reviewer_type: ReviewParameter.SortBy = ReviewParameter.ReviewerType.AllReviews,
format_type: ReviewParameter.FormatType = ReviewParameter.FormatType.AllFormats,
media_type: ReviewParameter.MediaType = ReviewParameter.MediaType.AllContents,
filter_by_star: ReviewParameter.FilterByStar = ReviewParameter.FilterByStar.AllStars,
page_number: int = 1, filter_by_language: str = ''):
pass
| print_reviews += '...' | conditional_block |
models.py | from enum import Enum
from typing import List, Dict, Union, Tuple
class UserAgents:
def __init__(self, head: str, version: List[str]):
self.head = head
self.version = version
self.index = -1
def get_next_user_agent(self):
self.index = (self.index + 1) % len(self.version)
return '{head} {version}'.format(head=self.head, version=self.version[self.index])
class Language(Enum):
English = 'en_US'
Spanish = 'es_ES'
SimplifiedChinese = 'zh_CN'
TraditionalChinese_ = 'zh_TW'
German = 'de_DE'
Portuguese = 'pt_BR'
Korean = 'ko_KR'
Hebrew = 'he_IL'
Arabic = 'ar_AE'
Hindi = 'hi_IN'
Tamil = 'ta_IN'
Telugu = 'te_IN'
Kannada = 'kn_IN'
Malayalam = 'ml_IN'
Italian = 'it_IT'
Swedish = 'sv_SE'
French = 'fr_FR'
Japanese = 'ja_JP'
Dutch = 'nl_NL'
Polish = 'pl_PL'
Turkish = 'tr_TR'
EnglishAustralia = 'en_AU'
EnglishCanada = 'en_CA'
EnglishSingapore = 'en_SG'
EnglishSpain = 'en_ES'
EnglishUnitedArabEmirates = 'en_AE'
EnglishUnitedKingdom = 'en_GB'
SpanishMexico = 'es_MX'
SpanishUnitedStates = 'es_US'
class Currency(Enum):
ArabEmiratesDirham = "AED"
ArgentinePeso = "ARS"
AustralianDollar = "AUD"
AzerbaijanNewManat = "AZN"
BahamasDollar = "BSD"
BarbadianDollar = "BBD"
BermudaDollar = "BMD"
BrazilianReal = "BRL"
BruneianDollar = "BND"
BulgariaLev = "BGN"
CanadianDollar = "CAD"
CaymanianDollar = "KYD"
ChileanPeso = "CLP"
ChineseYuanRenminbi = "CNY"
ColombianPeso = "COP"
CostaRicanColon = "CRC"
CzechKoruna = "CZK"
DanishKrone = "DKK"
DominicanRepublicPeso = "DOP"
EgyptianPound = "EGP"
Euro = "EUR"
GhanaianCedi = "GHS"
GuatemalanQuetzal = "GTQ"
HongKongDollar = "HKD"
HungarianForint = "HUF"
IndianRupee = "INR"
IndonesianRupiah = "IDR"
IsraeliShekel = "ILS"
JamaicanDollar = "JMD"
JapaneseYen = "JPY"
KazakhstanTenge = "KZT"
KenyanShilling = "KES"
LebanesePound = "LBP"
MalaysianRinggit = "MYR"
MauritianRupee = "MUR"
MexicoPeso = "MXN"
MoroccanDirham = "MAD"
NamibiaDollar = "NAD"
NewZealandDollar = "NZD"
NigerianNaira = "NGN"
NorwegianKrone = "NOK"
PakistaniRupee = "PKR"
PanamanianBalboa = "PAB"
PeruvianSol = "PEN"
PhilippinePeso = "PHP"
PolishZloty = "PLN"
Pounds = "GBP"
QatariRiyal = "QAR"
RomanianLei = "RON"
RussianRuble = "RUB"
SaudiArabianRiyal = "SAR"
SingaporeDollar = "SGD"
SouthKoreanWon = "KRW"
SriLankanRupee = "LKR"
SwedishKrona = "SEK"
SwissFranc = "CHF"
TaiwanNewDollar = "TWD"
TanzaniaShilling = "TZS"
ThaiBaht = "THB"
TrinidadianDollar = "TTD"
TurkishLira = "TRY"
USDollar = "USD"
class Country(Enum):
Australia = "com.au"
Brazil = "com.br"
Canada = "ca"
ChinaMainland = "cn"
France = "fr"
Germany = "de"
India = "in"
Italy = "it"
Japan = "co.jp"
Mexico = "com.mx"
Netherlands = "nl"
Poland = "pl"
SaudiArabia = "sa"
Singapore = "sg"
Spain = "es"
Sweden = "se"
Turkey = "com.tr"
UnitedArabEmirates = "ae"
UnitedKingdom = "co.uk"
UnitedStates = "com"
def lang_and_currency(self) -> Tuple[Language, Currency]:
return {
Country.Australia: (Language.EnglishAustralia, Currency.AustralianDollar),
Country.Brazil: (Language.Portuguese, Currency.BrazilianReal),
Country.Canada: (Language.EnglishCanada, Currency.CanadianDollar),
Country.ChinaMainland: (Language.SimplifiedChinese, Currency.ChineseYuanRenminbi),
Country.France: (Language.French, Currency.Euro),
Country.Germany: (Language.German, Currency.Euro),
Country.India: (Language.Hindi, Currency.IndianRupee),
Country.Italy: (Language.Italian, Currency.Euro),
Country.Japan: (Language.Japanese, Currency.JapaneseYen),
Country.Mexico: (Language.SpanishMexico, Currency.MexicoPeso),
Country.Netherlands: (Language.Dutch, Currency.Euro),
Country.Poland: (Language.Polish, Currency.PolishZloty),
Country.SaudiArabia: (Language.Arabic, Currency.SaudiArabianRiyal),
Country.Singapore: (Language.EnglishSingapore, Currency.SingaporeDollar),
Country.Spain: (Language.Spanish, Currency.Euro),
Country.Sweden: (Language.Swedish, Currency.SwedishKrona),
Country.Turkey: (Language.Turkish, Currency.TurkishLira),
Country.UnitedArabEmirates: (Language.EnglishUnitedArabEmirates, Currency.ArabEmiratesDirham),
Country.UnitedKingdom: (Language.EnglishUnitedKingdom, Currency.Pounds),
Country.UnitedStates: (Language.English, Currency.USDollar)
}[self]
class Offer:
def __init__(self, price: Union[float, None], currency: str, rating: float, condition: str, ships_from: str,
sold_by: str, sold_by_url: str):
self.price = price
self.currency = currency
self.approx_review = rating
self.condition = condition
self.ships_from = ships_from
self.sold_by = sold_by
self.sold_by_url = sold_by_url
def __repr__(self):
return ('Offer(price={}, currency={}, approx_review={}, condition={}, '
'ships_from={}, sold_by={}, sold_by_url={})').format(self.price, repr(self.currency),
self.approx_review, repr(self.condition),
repr(self.ships_from), repr(self.sold_by),
repr(self.sold_by_url))
class OfferList:
def __init__(self, product_name: str, offer_count: int, offers: List[Offer], settings: Dict[str, bool]):
self.product_name = product_name
self.offer_count = offer_count
self.offers = offers
self.page = settings['page']
self.settings = settings
def __repr__(self):
offers_repr_length = 100
offers_repr = repr(self.offers)
print_offers = offers_repr[:offers_repr_length]
if offers_repr[offers_repr_length:]:
print_offers += '...'
return 'OfferList(product_name={}, offer_count={}, ' \
'offers={}, page={}, settings={})'.format(repr(self.product_name), self.offer_count,
print_offers, self.page, repr(self.settings)[:30] + '...')
class Review:
def __init__(self, reviewer: str, reviewer_url: str, review_url: str, title: str, rating: int, helpful: int,
body: str):
self.reviewer = reviewer
self.reviewer_url = reviewer_url
self.review_url = review_url
self.title = title
self.rating = rating
self.helpful = helpful
self.body = body
def __repr__(self):
body_repr_length = 100
body_repr = repr(self.body)
print_body = body_repr[:body_repr_length]
if body_repr[body_repr_length:]:
print_body += '...'
return 'Review(reviewer={}, reviewer_url={}, review_url={}, title={}, rating={}, helpful={}, body={})'.format(
repr(self.reviewer), repr(self.reviewer_url), repr(self.review_url),
repr(self.title), self.rating, self.helpful, print_body)
class ReviewList:
def __init__(self, reviews: List[Review], asin: str, country: Country, settings: Dict, last_page=False):
self.reviews = reviews
self.asin = asin
self.country = country
self.settings = settings
self.page = settings['pageNumber']
self.last_page = last_page
def __repr__(self):
reviews_repr_length = 100
reviews_repr = repr(self.reviews)
print_reviews = reviews_repr[:reviews_repr_length]
if reviews_repr[reviews_repr_length:]:
print_reviews += '...'
return 'ReviewList(reviews={}, asin={}, country={}, page={}, last_page={})'.format(print_reviews,
repr(self.asin),
self.country,
self.page, self.last_page)
class ReviewParameter:
class SortBy(Enum):
Helpful = 'helpful'
"""Sort by helpful. default"""
Recent = 'recent'
"""Sort by recent"""
class ReviewerType(Enum):
AllReviews = 'all_reviews'
"""Show all reviews. default"""
AVPOnlyReviews = 'avp_only_reviews'
"""Show only verified purchase reviews"""
class FormatType(Enum):
AllFormats = 'all_formats'
"""Show reviews for all format. default"""
CurrentFormat = 'current_format'
"""Show reviews for only current format"""
class MediaType(Enum):
AllContents = 'all_contents'
"""Show reviews with text, image or video. default"""
MediaReviewsOnly = 'media_reviews_only'
"""Show reviews with image or video"""
class FilterByStar(Enum):
AllStars = 'all_stars'
"""Show all reviews. default"""
FiveStar = 'five_star'
"""Show reviews with 5 star"""
FourStar = 'four_star'
"""Show reviews with 4 star"""
ThreeStar = 'three_star'
"""Show reviews with 3 star"""
TwoStar = 'two_star'
"""Show reviews with 2 star"""
OneStar = 'one_star'
"""Show reviews with 1 star"""
Positive = 'positive'
"""Show positive reviews. Maybe 5 and 4 stars."""
Critical = 'critical'
"""Show critical reviews. Maybe 3, 2 and 1 stars.""" | def __init__(self,
sort_by: ReviewParameter.SortBy = ReviewParameter.SortBy.Helpful,
reviewer_type: ReviewParameter.SortBy = ReviewParameter.ReviewerType.AllReviews,
format_type: ReviewParameter.FormatType = ReviewParameter.FormatType.AllFormats,
media_type: ReviewParameter.MediaType = ReviewParameter.MediaType.AllContents,
filter_by_star: ReviewParameter.FilterByStar = ReviewParameter.FilterByStar.AllStars,
page_number: int = 1, filter_by_language: str = ''):
pass |
class ReviewSettings: | random_line_split |
models.py | from enum import Enum
from typing import List, Dict, Union, Tuple
class UserAgents:
def __init__(self, head: str, version: List[str]):
self.head = head
self.version = version
self.index = -1
def get_next_user_agent(self):
self.index = (self.index + 1) % len(self.version)
return '{head} {version}'.format(head=self.head, version=self.version[self.index])
class Language(Enum):
English = 'en_US'
Spanish = 'es_ES'
SimplifiedChinese = 'zh_CN'
TraditionalChinese_ = 'zh_TW'
German = 'de_DE'
Portuguese = 'pt_BR'
Korean = 'ko_KR'
Hebrew = 'he_IL'
Arabic = 'ar_AE'
Hindi = 'hi_IN'
Tamil = 'ta_IN'
Telugu = 'te_IN'
Kannada = 'kn_IN'
Malayalam = 'ml_IN'
Italian = 'it_IT'
Swedish = 'sv_SE'
French = 'fr_FR'
Japanese = 'ja_JP'
Dutch = 'nl_NL'
Polish = 'pl_PL'
Turkish = 'tr_TR'
EnglishAustralia = 'en_AU'
EnglishCanada = 'en_CA'
EnglishSingapore = 'en_SG'
EnglishSpain = 'en_ES'
EnglishUnitedArabEmirates = 'en_AE'
EnglishUnitedKingdom = 'en_GB'
SpanishMexico = 'es_MX'
SpanishUnitedStates = 'es_US'
class Currency(Enum):
ArabEmiratesDirham = "AED"
ArgentinePeso = "ARS"
AustralianDollar = "AUD"
AzerbaijanNewManat = "AZN"
BahamasDollar = "BSD"
BarbadianDollar = "BBD"
BermudaDollar = "BMD"
BrazilianReal = "BRL"
BruneianDollar = "BND"
BulgariaLev = "BGN"
CanadianDollar = "CAD"
CaymanianDollar = "KYD"
ChileanPeso = "CLP"
ChineseYuanRenminbi = "CNY"
ColombianPeso = "COP"
CostaRicanColon = "CRC"
CzechKoruna = "CZK"
DanishKrone = "DKK"
DominicanRepublicPeso = "DOP"
EgyptianPound = "EGP"
Euro = "EUR"
GhanaianCedi = "GHS"
GuatemalanQuetzal = "GTQ"
HongKongDollar = "HKD"
HungarianForint = "HUF"
IndianRupee = "INR"
IndonesianRupiah = "IDR"
IsraeliShekel = "ILS"
JamaicanDollar = "JMD"
JapaneseYen = "JPY"
KazakhstanTenge = "KZT"
KenyanShilling = "KES"
LebanesePound = "LBP"
MalaysianRinggit = "MYR"
MauritianRupee = "MUR"
MexicoPeso = "MXN"
MoroccanDirham = "MAD"
NamibiaDollar = "NAD"
NewZealandDollar = "NZD"
NigerianNaira = "NGN"
NorwegianKrone = "NOK"
PakistaniRupee = "PKR"
PanamanianBalboa = "PAB"
PeruvianSol = "PEN"
PhilippinePeso = "PHP"
PolishZloty = "PLN"
Pounds = "GBP"
QatariRiyal = "QAR"
RomanianLei = "RON"
RussianRuble = "RUB"
SaudiArabianRiyal = "SAR"
SingaporeDollar = "SGD"
SouthKoreanWon = "KRW"
SriLankanRupee = "LKR"
SwedishKrona = "SEK"
SwissFranc = "CHF"
TaiwanNewDollar = "TWD"
TanzaniaShilling = "TZS"
ThaiBaht = "THB"
TrinidadianDollar = "TTD"
TurkishLira = "TRY"
USDollar = "USD"
class Country(Enum):
Australia = "com.au"
Brazil = "com.br"
Canada = "ca"
ChinaMainland = "cn"
France = "fr"
Germany = "de"
India = "in"
Italy = "it"
Japan = "co.jp"
Mexico = "com.mx"
Netherlands = "nl"
Poland = "pl"
SaudiArabia = "sa"
Singapore = "sg"
Spain = "es"
Sweden = "se"
Turkey = "com.tr"
UnitedArabEmirates = "ae"
UnitedKingdom = "co.uk"
UnitedStates = "com"
def lang_and_currency(self) -> Tuple[Language, Currency]:
return {
Country.Australia: (Language.EnglishAustralia, Currency.AustralianDollar),
Country.Brazil: (Language.Portuguese, Currency.BrazilianReal),
Country.Canada: (Language.EnglishCanada, Currency.CanadianDollar),
Country.ChinaMainland: (Language.SimplifiedChinese, Currency.ChineseYuanRenminbi),
Country.France: (Language.French, Currency.Euro),
Country.Germany: (Language.German, Currency.Euro),
Country.India: (Language.Hindi, Currency.IndianRupee),
Country.Italy: (Language.Italian, Currency.Euro),
Country.Japan: (Language.Japanese, Currency.JapaneseYen),
Country.Mexico: (Language.SpanishMexico, Currency.MexicoPeso),
Country.Netherlands: (Language.Dutch, Currency.Euro),
Country.Poland: (Language.Polish, Currency.PolishZloty),
Country.SaudiArabia: (Language.Arabic, Currency.SaudiArabianRiyal),
Country.Singapore: (Language.EnglishSingapore, Currency.SingaporeDollar),
Country.Spain: (Language.Spanish, Currency.Euro),
Country.Sweden: (Language.Swedish, Currency.SwedishKrona),
Country.Turkey: (Language.Turkish, Currency.TurkishLira),
Country.UnitedArabEmirates: (Language.EnglishUnitedArabEmirates, Currency.ArabEmiratesDirham),
Country.UnitedKingdom: (Language.EnglishUnitedKingdom, Currency.Pounds),
Country.UnitedStates: (Language.English, Currency.USDollar)
}[self]
class Offer:
def __init__(self, price: Union[float, None], currency: str, rating: float, condition: str, ships_from: str,
sold_by: str, sold_by_url: str):
self.price = price
self.currency = currency
self.approx_review = rating
self.condition = condition
self.ships_from = ships_from
self.sold_by = sold_by
self.sold_by_url = sold_by_url
def __repr__(self):
return ('Offer(price={}, currency={}, approx_review={}, condition={}, '
'ships_from={}, sold_by={}, sold_by_url={})').format(self.price, repr(self.currency),
self.approx_review, repr(self.condition),
repr(self.ships_from), repr(self.sold_by),
repr(self.sold_by_url))
class OfferList:
|
class Review:
def __init__(self, reviewer: str, reviewer_url: str, review_url: str, title: str, rating: int, helpful: int,
body: str):
self.reviewer = reviewer
self.reviewer_url = reviewer_url
self.review_url = review_url
self.title = title
self.rating = rating
self.helpful = helpful
self.body = body
def __repr__(self):
body_repr_length = 100
body_repr = repr(self.body)
print_body = body_repr[:body_repr_length]
if body_repr[body_repr_length:]:
print_body += '...'
return 'Review(reviewer={}, reviewer_url={}, review_url={}, title={}, rating={}, helpful={}, body={})'.format(
repr(self.reviewer), repr(self.reviewer_url), repr(self.review_url),
repr(self.title), self.rating, self.helpful, print_body)
class ReviewList:
def __init__(self, reviews: List[Review], asin: str, country: Country, settings: Dict, last_page=False):
self.reviews = reviews
self.asin = asin
self.country = country
self.settings = settings
self.page = settings['pageNumber']
self.last_page = last_page
def __repr__(self):
reviews_repr_length = 100
reviews_repr = repr(self.reviews)
print_reviews = reviews_repr[:reviews_repr_length]
if reviews_repr[reviews_repr_length:]:
print_reviews += '...'
return 'ReviewList(reviews={}, asin={}, country={}, page={}, last_page={})'.format(print_reviews,
repr(self.asin),
self.country,
self.page, self.last_page)
class ReviewParameter:
class SortBy(Enum):
Helpful = 'helpful'
"""Sort by helpful. default"""
Recent = 'recent'
"""Sort by recent"""
class ReviewerType(Enum):
AllReviews = 'all_reviews'
"""Show all reviews. default"""
AVPOnlyReviews = 'avp_only_reviews'
"""Show only verified purchase reviews"""
class FormatType(Enum):
AllFormats = 'all_formats'
"""Show reviews for all format. default"""
CurrentFormat = 'current_format'
"""Show reviews for only current format"""
class MediaType(Enum):
AllContents = 'all_contents'
"""Show reviews with text, image or video. default"""
MediaReviewsOnly = 'media_reviews_only'
"""Show reviews with image or video"""
class FilterByStar(Enum):
AllStars = 'all_stars'
"""Show all reviews. default"""
FiveStar = 'five_star'
"""Show reviews with 5 star"""
FourStar = 'four_star'
"""Show reviews with 4 star"""
ThreeStar = 'three_star'
"""Show reviews with 3 star"""
TwoStar = 'two_star'
"""Show reviews with 2 star"""
OneStar = 'one_star'
"""Show reviews with 1 star"""
Positive = 'positive'
"""Show positive reviews. Maybe 5 and 4 stars."""
Critical = 'critical'
"""Show critical reviews. Maybe 3, 2 and 1 stars."""
class ReviewSettings:
def __init__(self,
sort_by: ReviewParameter.SortBy = ReviewParameter.SortBy.Helpful,
reviewer_type: ReviewParameter.SortBy = ReviewParameter.ReviewerType.AllReviews,
format_type: ReviewParameter.FormatType = ReviewParameter.FormatType.AllFormats,
media_type: ReviewParameter.MediaType = ReviewParameter.MediaType.AllContents,
filter_by_star: ReviewParameter.FilterByStar = ReviewParameter.FilterByStar.AllStars,
page_number: int = 1, filter_by_language: str = ''):
pass
| def __init__(self, product_name: str, offer_count: int, offers: List[Offer], settings: Dict[str, bool]):
self.product_name = product_name
self.offer_count = offer_count
self.offers = offers
self.page = settings['page']
self.settings = settings
def __repr__(self):
offers_repr_length = 100
offers_repr = repr(self.offers)
print_offers = offers_repr[:offers_repr_length]
if offers_repr[offers_repr_length:]:
print_offers += '...'
return 'OfferList(product_name={}, offer_count={}, ' \
'offers={}, page={}, settings={})'.format(repr(self.product_name), self.offer_count,
print_offers, self.page, repr(self.settings)[:30] + '...') | identifier_body |
training.go | // training is a package for managing MXNet training jobs.
package trainer
import (
"fmt"
"reflect"
"github.com/deepinsight/mxnet-operator/pkg/spec"
"github.com/deepinsight/mxnet-operator/pkg/util"
"github.com/deepinsight/mxnet-operator/pkg/util/k8sutil"
"github.com/deepinsight/mxnet-operator/pkg/util/retryutil"
log "github.com/golang/glog"
"math"
"sync"
"time"
"github.com/deepinsight/mxnet-operator/pkg/garbagecollection"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/pkg/api/v1"
)
const (
NAMESPACE string = "default"
)
var (
reconcileInterval = 8 * time.Second
)
type jobEventType string
const (
eventDeleteJob jobEventType = "Delete"
eventModifyJob jobEventType = "Modify"
)
type jobEvent struct {
typ jobEventType
// TODO(jlewi): Rename cluster to job.
cluster *spec.MxJob
}
// TODO(jlewi): We should switch a New pattern and make trainingJob private so we can
// ensure correctness on creation.
type TrainingJob struct {
job *spec.MxJob
KubeCli kubernetes.Interface
Replicas []*MXReplicaSet
mxJobClient k8sutil.MxJobClient
// in memory state of the job.
// status is the source of truth after job struct is materialized. Changes to the status to be persisted
// should be made here.
status spec.MxJobStatus
memberCounter int
// eventCh is used to provide Kubernetes events for a particular cluster that need to be handled.
eventCh chan *jobEvent
// stopCh is a channel used to communicate that the cluster needs to be stopped.
stopCh chan struct{}
gc *garbagecollection.GC
}
func initJob(kubeCli kubernetes.Interface, mxJobClient k8sutil.MxJobClient, job *spec.MxJob, stopC <-chan struct{}, wg *sync.WaitGroup) (*TrainingJob, error) {
j := &TrainingJob{
KubeCli: kubeCli,
mxJobClient: mxJobClient,
Replicas: make([]*MXReplicaSet, 0),
job: job,
eventCh: make(chan *jobEvent, 100),
stopCh: make(chan struct{}),
status: job.Status.Copy(),
gc: garbagecollection.New(kubeCli, mxJobClient, job.Metadata.Namespace),
}
return j, nil
}
func NewJob(kubeCli kubernetes.Interface, mxJobClient k8sutil.MxJobClient, mxjob *spec.MxJob, stopC <-chan struct{}, wg *sync.WaitGroup, config *spec.ControllerConfig) (*TrainingJob, error) {
j, err := initJob(kubeCli, mxJobClient, mxjob, stopC, wg)
if err != nil {
return nil, err
}
// Increment the wait group which the controller uses to monitor the job processing.
wg.Add(1)
go func() {
defer wg.Done()
if err := j.setup(config); err != nil {
log.Errorf("MxJob failed to setup: %v", err)
if j.status.Phase != spec.MxJobPhaseFailed {
j.status.SetReason(err.Error())
j.status.SetPhase(spec.MxJobPhaseFailed)
if err := j.updateTPRStatus(); err != nil {
log.Errorf("failed to update cluster phase (%v): %v", spec.MxJobPhaseFailed, err)
}
}
return
}
j.run(stopC)
}()
return j, nil
}
// createResources creates all the replicas
func (j *TrainingJob) createResources() error {
for _, r := range j.Replicas {
if err := r.Create(); err != nil {
return err
}
}
return nil
}
// deleteResources deletes the replicas
func (j *TrainingJob) deleteResources() error {
for _, r := range j.Replicas {
if err := r.Delete(); err != nil {
return err
}
}
return nil
}
// TODO(jlewi): We can probably delete this.
//func replicaSetStatusToProto(r *MXReplicaSet, status *MXReplicaSetStatus) *tpb.MXReplicaSetStatus {
//
// p := &tpb.MXReplicaSetStatus{
// State: status.State.Enum(),
// // Type: r.Spec.MxReplicaTypeProcess.Type,
// ReplicaStates: make([]*tpb.MXReplicaSetStatus_ReplicaStates, 0),
// }
//
// for state, count := range status.ReplicasStates {
// p.ReplicaStates = append(p.ReplicaStates, &tpb.MXReplicaSetStatus_ReplicaStates{
// State: state.Enum(),
// NumReplicas: proto.Int(count),
// })
// }
// return p
//}
func (j *TrainingJob) GetStatus() (spec.State, []*spec.MxReplicaStatus, error) {
state := spec.StateUnknown
replicaStatuses := make([]*spec.MxReplicaStatus, 0)
// The state for each replica.
// TODO(jlewi): We will need to modify this code if we want to allow multiples of a given type of replica.
replicaSetStates := make(map[spec.MxReplicaType]spec.ReplicaState)
for _, r := range j.Replicas {
rStatus, err := r.GetStatus()
if err != nil {
log.Errorf("GetStatus() for %v returned error; %v", r.Spec.MxReplicaType, err)
}
replicaSetStates[r.Spec.MxReplicaType] = rStatus.State
replicaStatuses = append(replicaStatuses, &rStatus)
// If any replicas are failed mark job as failed.
if rStatus.State == spec.ReplicaStateFailed {
state = spec.StateFailed
}
}
if j.job.Spec.JobMode == spec.LocalJob {
if v, ok := replicaSetStates[spec.WORKER]; ok && v == spec.ReplicaStateSucceeded {
state = spec.StateSucceeded
return state, replicaStatuses, nil
}
} else if j.job.Spec.JobMode == spec.DistJob {
if v, ok := replicaSetStates[spec.SCHEDULER]; ok && v == spec.ReplicaStateSucceeded {
state = spec.StateSucceeded
return state, replicaStatuses, nil
}
}
for _, s := range replicaSetStates {
if s == spec.ReplicaStateFailed {
state = spec.StateFailed
return state, replicaStatuses, nil
}
}
state = spec.StateRunning
return state, replicaStatuses, nil
}
// isRetryableTerminationState returns true if a container terminated in a state
// that we consider retryable.
func isRetryableTerminationState(s *v1.ContainerStateTerminated) bool {
// TODO(jlewi): Need to match logic in
// https://cs.corp.google.com/piper///depot/google3/cloud/ml/beta/job/training_job_state_util.cc?l=88
if s.Reason == "OOMKilled" {
// If the user's process causes an OOM and Docker kills the container,
// the termination reason of ContainerState will be specified to
// 'OOMKilled'. In this case, we can't assume this to be a retryable error.
//
// This check should happen before checking the termination log, since
// if the container terminated with an OOM, the termination log may not
// be written.
return false
}
if s.Message == "" {
// launcher.sh should produce a termination log message. So if Kubernetes
// doesn't report a termmination message then we can infer that
// launcher.sh didn't exit cleanly. For example, the container might
// have failed to start. We consider this a retryable error regardless
// of the actual exit code.
return true
}
// TODO(jlewi): Should we use the exit code reported in the termination
// log message and not the ExitCode reported by the container.
if s.ExitCode >= 0 && s.ExitCode <= 127 {
// For the exit_code in [0, 127]:
// 0 means success,
// 1 - 127 corresponds to permanent user errors.
// We don't want to retry for both cases.
// More info about exit status can be found in:
// https://www.gnu.org/software/bash/manual/html_node/Exit-Status.html
return false
}
// For the remaining cases that exit_code from workers that doesn't
// fall into [0, 127]. They can be:
// 137 corresponds to SIGKILL,
// 143 corresponds to SIGTERM,
// other values that have undefined behavior.
// We treat them as internal errors for now and all the internal errors
// will be retired.
return true
}
func (j *TrainingJob) masterName() string {
return fmt.Sprintf("master-%v-0", j.job.Spec.RuntimeId)
}
// setup the training job.
func (j *TrainingJob) setup(config *spec.ControllerConfig) error {
if j.job == nil {
return fmt.Errorf("job.Spec can't be nil")
}
err := j.job.Spec.SetDefaults()
if err != nil |
err = j.job.Spec.Validate()
if err != nil {
return fmt.Errorf("invalid job spec: %v", err)
}
for _, t := range j.job.Spec.ReplicaSpecs {
r, err := NewMXReplicaSet(j.KubeCli, *t, j)
if err != nil {
return err
}
j.Replicas = append(j.Replicas, r)
}
if err := j.job.Spec.ConfigureAccelerators(config.Accelerators); err != nil {
return fmt.Errorf("ConfigureAccelerators(...) error; %v", err)
}
if j.job.Spec.RuntimeId == "" {
j.job.Spec.RuntimeId = util.RandString(4)
}
var shouldCreateCluster bool
switch j.status.Phase {
case spec.MxJobPhaseNone:
shouldCreateCluster = true
//case spec.MxJobPhaseCreating:
// return errCreatedCluster
case spec.MxJobPhaseRunning:
shouldCreateCluster = false
case spec.MxJobPhaseFailed:
shouldCreateCluster = false
default:
return fmt.Errorf("unexpected MxJob phase: %s", j.status.Phase)
}
if shouldCreateCluster {
return j.triggerCreatePhase()
}
return nil
}
// triggerCreatePhase sets the phase to MxJobPhaseCreating additional resource creation happens in TrainingJob.run
// TODO(jlewi): Need to reconcile this function copied from the etcd core operator OS code with the pattern
// for the MX job. What exactly do we want to do during the Create job phase? Right now the create method
// is called on each invocation of reconcile in run to ensure all the required resources exist. Maybe there's
// a better way?
func (j *TrainingJob) triggerCreatePhase() error {
j.status.SetPhase(spec.MxJobPhaseCreating)
if err := j.updateTPRStatus(); err != nil {
return fmt.Errorf("cluster create: failed to update MxJob phase (%v): %v", spec.MxJobPhaseCreating, err)
}
log.Infof("Creating job: %v with Spec (%#v), Status (%#v)", j.job.Metadata.Name, j.job.Spec, j.job.Status)
// TODO(jlewi): I think this collects all the existing resources that have the labels indicating
// they should be owned by this job. Do they get deleted?
j.gc.CollectJob(j.job.Metadata.Name, j.job.Metadata.UID)
return nil
}
func (j *TrainingJob) Delete() {
// Delete doesn't actually delete any resources. It just sends an event which will be processed by the run
// method.
j.send(&jobEvent{typ: eventDeleteJob})
}
// TODO(jlewi): This delete function was copied from the etcd-operator. Need to figure out what the right thing to
// do is. Should we be calling deleteReplicas here?
func (j *TrainingJob) delete() {
j.gc.CollectJob(j.job.Metadata.Name, garbagecollection.NullUID)
}
// TODO(jlewi): This is sending a clusterEvent to the channel. I think these are events
// coming from the cluster code and not k8s events.
func (j *TrainingJob) send(ev *jobEvent) {
select {
case j.eventCh <- ev:
l, ecap := len(j.eventCh), cap(j.eventCh)
if l > int(float64(ecap)*0.8) {
log.Warningf("eventCh buffer is almost full [%d/%d]", l, ecap)
}
case <-j.stopCh:
}
}
// Update sends an update event for the job.
func (j *TrainingJob) Update(newJob *spec.MxJob) {
j.send(&jobEvent{
typ: eventModifyJob,
cluster: newJob,
})
}
// updateTPRStatus updates the job status based on TraingingJob.status.
func (j *TrainingJob) updateTPRStatus() error {
// If the status hasn't changed then there's no reason to update the TPR.
if reflect.DeepEqual(j.job.Status, j.status) {
return nil
}
newJob := j.job
newJob.Status = j.status
newJob, err := j.mxJobClient.Update(j.job.Metadata.Namespace, newJob)
if err != nil {
return err
}
j.job = newJob
return nil
}
func (j *TrainingJob) run(stopC <-chan struct{}) {
// TODO(jlewi): What does the run function do?
clusterFailed := false
defer func() {
if clusterFailed {
j.reportFailedStatus()
log.Infof("Deleting the failed MxJob")
j.delete()
}
close(j.stopCh)
}()
for {
select {
case <-stopC:
return
case event := <-j.eventCh:
switch event.typ {
// TODO(jlewi): We need handle a modify event.
//case eventModifyCluster:
// if isSpecEqual(event.cluster.Spec, j.job.Spec) {
// break
// }
case eventDeleteJob:
// TODO(jlewi): Delete is what should cause us to delete the Pods.
// we shouldn't delete the pods when the jobs finish because leaving the pods
// allows us to get the logs from the pods after the job finishes.
//
log.Infof("MxJob is deleted by the user")
// TODO(jlewi): This logic is probably insufficient.
if j.job.Status.Phase != spec.MxJobPhaseCleanUp {
j.status.SetPhase(spec.MxJobPhaseCleanUp)
}
if cErr := j.deleteResources(); cErr != nil {
log.Errorf("trainingJob.deleteResources() error; %v", cErr)
}
// j.status.SetPhase(spec.MxJobPhaseDone)
// Return from run because we want to stop reconciling the object.
return
}
case <-time.After(reconcileInterval):
// TODO(jlewi): Can we determine from the TPR status whether we should
// Create the resources or not? We need to ensure the resources exist so for
// now we always call Create.
if j.job.Status.Phase == spec.MxJobPhaseCreating {
// We call Create to make sure all the resources exist and are running.
if cErr := j.createResources(); cErr != nil {
log.Errorf("trainingJobCreateReplicas() error; %v", cErr)
} else {
// Update the phase to running.
j.status.SetPhase(spec.MxJobPhaseRunning)
if err := j.updateTPRStatus(); err != nil {
log.Warningf("failed to update TPR status: %v", err)
}
log.Infof("start running...")
}
}
state, replicaStatuses, err := j.GetStatus()
j.status.ReplicaStatuses = replicaStatuses
if err != nil {
log.Errorf("GetStatus() for job %v returned error: %v", j.job.Metadata.Name, err)
}
// TODO(jlewi): We should update the Phase if we detect the job is done.
if state == spec.StateFailed {
log.Errorf("Master failed Job: %v.", j.job.Metadata.Name)
j.status.SetPhase(spec.MxJobPhaseDone)
j.status.SetState(spec.StateFailed)
} else if state == spec.StateSucceeded {
log.Infof("Master succeeded Job: %v.", j.job.Metadata.Name)
j.status.SetPhase(spec.MxJobPhaseDone)
j.status.SetState(spec.StateSucceeded)
} else {
log.V(1).Infof("Job %v status=%v", j.job.Metadata.Name, util.Pformat(j.status))
}
// If the phase changed we should update the TPR.
if err := j.updateTPRStatus(); err != nil {
log.Warningf("Job %v, failed to update TPR status error: %v", j.job.Metadata.Name, err)
}
if j.job.Status.Phase == spec.MxJobPhaseCleanUp {
if cErr := j.deleteResources(); cErr != nil {
log.Errorf("Job %v trainingJob.Delete() error; %v", j.job.Metadata.Name, cErr)
}
// j.status.SetPhase(spec.MxJobPhaseDone)
// Return from run because we want to stop reconciling the object.
return
}
}
}
}
//func isSpecEqual(s1, s2 spec.MxJobSpec) bool {
// // TODO(jlewi): Need to implement this function.
// return false
// //if s1.Size != s2.Size || s1.Paused != s2.Paused || s1.Version != s2.Version {
// // return false
// //}
// //return isBackupPolicyEqual(s1.Backup, s2.Backup)
//}
// TODO(jlewi): We probably need to update this function.
func (j *TrainingJob) reportFailedStatus() {
retryInterval := 5 * time.Second
f := func() (bool, error) {
j.status.SetPhase(spec.MxJobPhaseFailed)
err := j.updateTPRStatus()
if err == nil || k8sutil.IsKubernetesResourceNotFoundError(err) {
return true, nil
}
if !apierrors.IsConflict(err) {
log.Warningf("retry report status in %v: fail to update: %v", retryInterval, err)
return false, nil
}
cl, err := j.mxJobClient.Get(j.job.Metadata.Namespace, j.job.Metadata.Name)
if err != nil {
// Update (PUT) will return conflict even if object is deleted since we have UID set in object.
// Because it will check UID first and return something like:
// "Precondition failed: UID in precondition: 0xc42712c0f0, UID in object meta: ".
if k8sutil.IsKubernetesResourceNotFoundError(err) {
return true, nil
}
log.Warningf("retry report status in %v: fail to get latest version: %v", retryInterval, err)
return false, nil
}
j.job = cl
return false, nil
}
retryutil.Retry(retryInterval, math.MaxInt64, f)
}
func (j *TrainingJob) name() string {
return j.job.Metadata.GetName()
}
| {
return fmt.Errorf("there was a problem setting defaults for job spec: %v", err)
} | conditional_block |
training.go | // training is a package for managing MXNet training jobs.
package trainer
import (
"fmt"
"reflect"
"github.com/deepinsight/mxnet-operator/pkg/spec"
"github.com/deepinsight/mxnet-operator/pkg/util"
"github.com/deepinsight/mxnet-operator/pkg/util/k8sutil"
"github.com/deepinsight/mxnet-operator/pkg/util/retryutil"
log "github.com/golang/glog"
"math"
"sync"
"time"
"github.com/deepinsight/mxnet-operator/pkg/garbagecollection"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/pkg/api/v1"
)
const (
NAMESPACE string = "default"
)
var (
reconcileInterval = 8 * time.Second
)
type jobEventType string
const (
eventDeleteJob jobEventType = "Delete"
eventModifyJob jobEventType = "Modify"
)
type jobEvent struct {
typ jobEventType
// TODO(jlewi): Rename cluster to job.
cluster *spec.MxJob
}
// TODO(jlewi): We should switch a New pattern and make trainingJob private so we can
// ensure correctness on creation.
type TrainingJob struct {
job *spec.MxJob
KubeCli kubernetes.Interface
Replicas []*MXReplicaSet
mxJobClient k8sutil.MxJobClient
// in memory state of the job.
// status is the source of truth after job struct is materialized. Changes to the status to be persisted
// should be made here.
status spec.MxJobStatus
memberCounter int
// eventCh is used to provide Kubernetes events for a particular cluster that need to be handled.
eventCh chan *jobEvent
// stopCh is a channel used to communicate that the cluster needs to be stopped.
stopCh chan struct{}
gc *garbagecollection.GC
}
func initJob(kubeCli kubernetes.Interface, mxJobClient k8sutil.MxJobClient, job *spec.MxJob, stopC <-chan struct{}, wg *sync.WaitGroup) (*TrainingJob, error) {
j := &TrainingJob{
KubeCli: kubeCli,
mxJobClient: mxJobClient,
Replicas: make([]*MXReplicaSet, 0),
job: job,
eventCh: make(chan *jobEvent, 100),
stopCh: make(chan struct{}),
status: job.Status.Copy(),
gc: garbagecollection.New(kubeCli, mxJobClient, job.Metadata.Namespace),
}
return j, nil
}
func NewJob(kubeCli kubernetes.Interface, mxJobClient k8sutil.MxJobClient, mxjob *spec.MxJob, stopC <-chan struct{}, wg *sync.WaitGroup, config *spec.ControllerConfig) (*TrainingJob, error) {
j, err := initJob(kubeCli, mxJobClient, mxjob, stopC, wg)
if err != nil {
return nil, err
}
// Increment the wait group which the controller uses to monitor the job processing.
wg.Add(1)
go func() {
defer wg.Done()
if err := j.setup(config); err != nil {
log.Errorf("MxJob failed to setup: %v", err)
if j.status.Phase != spec.MxJobPhaseFailed {
j.status.SetReason(err.Error())
j.status.SetPhase(spec.MxJobPhaseFailed)
if err := j.updateTPRStatus(); err != nil {
log.Errorf("failed to update cluster phase (%v): %v", spec.MxJobPhaseFailed, err)
}
}
return
}
j.run(stopC)
}()
return j, nil
}
// createResources creates all the replicas
func (j *TrainingJob) createResources() error {
for _, r := range j.Replicas {
if err := r.Create(); err != nil {
return err
}
}
return nil
}
// deleteResources deletes the replicas
func (j *TrainingJob) deleteResources() error {
for _, r := range j.Replicas {
if err := r.Delete(); err != nil {
return err
}
}
return nil
}
// TODO(jlewi): We can probably delete this.
//func replicaSetStatusToProto(r *MXReplicaSet, status *MXReplicaSetStatus) *tpb.MXReplicaSetStatus {
//
// p := &tpb.MXReplicaSetStatus{
// State: status.State.Enum(),
// // Type: r.Spec.MxReplicaTypeProcess.Type,
// ReplicaStates: make([]*tpb.MXReplicaSetStatus_ReplicaStates, 0),
// }
//
// for state, count := range status.ReplicasStates {
// p.ReplicaStates = append(p.ReplicaStates, &tpb.MXReplicaSetStatus_ReplicaStates{
// State: state.Enum(),
// NumReplicas: proto.Int(count),
// })
// }
// return p
//}
func (j *TrainingJob) GetStatus() (spec.State, []*spec.MxReplicaStatus, error) {
state := spec.StateUnknown
replicaStatuses := make([]*spec.MxReplicaStatus, 0)
// The state for each replica.
// TODO(jlewi): We will need to modify this code if we want to allow multiples of a given type of replica.
replicaSetStates := make(map[spec.MxReplicaType]spec.ReplicaState)
for _, r := range j.Replicas {
rStatus, err := r.GetStatus()
if err != nil {
log.Errorf("GetStatus() for %v returned error; %v", r.Spec.MxReplicaType, err)
}
replicaSetStates[r.Spec.MxReplicaType] = rStatus.State
replicaStatuses = append(replicaStatuses, &rStatus)
// If any replicas are failed mark job as failed.
if rStatus.State == spec.ReplicaStateFailed {
state = spec.StateFailed
}
}
if j.job.Spec.JobMode == spec.LocalJob {
if v, ok := replicaSetStates[spec.WORKER]; ok && v == spec.ReplicaStateSucceeded {
state = spec.StateSucceeded
return state, replicaStatuses, nil
}
} else if j.job.Spec.JobMode == spec.DistJob {
if v, ok := replicaSetStates[spec.SCHEDULER]; ok && v == spec.ReplicaStateSucceeded {
state = spec.StateSucceeded
return state, replicaStatuses, nil
}
}
for _, s := range replicaSetStates {
if s == spec.ReplicaStateFailed {
state = spec.StateFailed
return state, replicaStatuses, nil
}
}
state = spec.StateRunning
return state, replicaStatuses, nil
}
// isRetryableTerminationState returns true if a container terminated in a state
// that we consider retryable.
func isRetryableTerminationState(s *v1.ContainerStateTerminated) bool {
// TODO(jlewi): Need to match logic in
// https://cs.corp.google.com/piper///depot/google3/cloud/ml/beta/job/training_job_state_util.cc?l=88
if s.Reason == "OOMKilled" {
// If the user's process causes an OOM and Docker kills the container,
// the termination reason of ContainerState will be specified to
// 'OOMKilled'. In this case, we can't assume this to be a retryable error.
//
// This check should happen before checking the termination log, since
// if the container terminated with an OOM, the termination log may not
// be written.
return false
}
if s.Message == "" {
// launcher.sh should produce a termination log message. So if Kubernetes
// doesn't report a termmination message then we can infer that
// launcher.sh didn't exit cleanly. For example, the container might
// have failed to start. We consider this a retryable error regardless
// of the actual exit code.
return true
}
// TODO(jlewi): Should we use the exit code reported in the termination
// log message and not the ExitCode reported by the container.
if s.ExitCode >= 0 && s.ExitCode <= 127 {
// For the exit_code in [0, 127]:
// 0 means success,
// 1 - 127 corresponds to permanent user errors.
// We don't want to retry for both cases.
// More info about exit status can be found in:
// https://www.gnu.org/software/bash/manual/html_node/Exit-Status.html
return false
}
// For the remaining cases that exit_code from workers that doesn't
// fall into [0, 127]. They can be:
// 137 corresponds to SIGKILL,
// 143 corresponds to SIGTERM,
// other values that have undefined behavior.
// We treat them as internal errors for now and all the internal errors
// will be retired.
return true
}
func (j *TrainingJob) masterName() string {
return fmt.Sprintf("master-%v-0", j.job.Spec.RuntimeId)
}
// setup the training job.
func (j *TrainingJob) setup(config *spec.ControllerConfig) error {
if j.job == nil {
return fmt.Errorf("job.Spec can't be nil")
}
err := j.job.Spec.SetDefaults()
if err != nil {
return fmt.Errorf("there was a problem setting defaults for job spec: %v", err)
}
err = j.job.Spec.Validate()
if err != nil {
return fmt.Errorf("invalid job spec: %v", err)
}
for _, t := range j.job.Spec.ReplicaSpecs {
r, err := NewMXReplicaSet(j.KubeCli, *t, j)
if err != nil {
return err
}
j.Replicas = append(j.Replicas, r)
}
if err := j.job.Spec.ConfigureAccelerators(config.Accelerators); err != nil {
return fmt.Errorf("ConfigureAccelerators(...) error; %v", err)
}
if j.job.Spec.RuntimeId == "" {
j.job.Spec.RuntimeId = util.RandString(4)
}
var shouldCreateCluster bool
switch j.status.Phase {
case spec.MxJobPhaseNone:
shouldCreateCluster = true
//case spec.MxJobPhaseCreating:
// return errCreatedCluster
case spec.MxJobPhaseRunning:
shouldCreateCluster = false
case spec.MxJobPhaseFailed:
shouldCreateCluster = false
default:
return fmt.Errorf("unexpected MxJob phase: %s", j.status.Phase)
}
if shouldCreateCluster {
return j.triggerCreatePhase()
}
return nil
}
// triggerCreatePhase sets the phase to MxJobPhaseCreating additional resource creation happens in TrainingJob.run
// TODO(jlewi): Need to reconcile this function copied from the etcd core operator OS code with the pattern
// for the MX job. What exactly do we want to do during the Create job phase? Right now the create method
// is called on each invocation of reconcile in run to ensure all the required resources exist. Maybe there's
// a better way?
func (j *TrainingJob) triggerCreatePhase() error {
j.status.SetPhase(spec.MxJobPhaseCreating)
if err := j.updateTPRStatus(); err != nil {
return fmt.Errorf("cluster create: failed to update MxJob phase (%v): %v", spec.MxJobPhaseCreating, err)
}
log.Infof("Creating job: %v with Spec (%#v), Status (%#v)", j.job.Metadata.Name, j.job.Spec, j.job.Status)
// TODO(jlewi): I think this collects all the existing resources that have the labels indicating
// they should be owned by this job. Do they get deleted?
j.gc.CollectJob(j.job.Metadata.Name, j.job.Metadata.UID)
return nil
}
func (j *TrainingJob) Delete() {
// Delete doesn't actually delete any resources. It just sends an event which will be processed by the run
// method.
j.send(&jobEvent{typ: eventDeleteJob})
}
// TODO(jlewi): This delete function was copied from the etcd-operator. Need to figure out what the right thing to
// do is. Should we be calling deleteReplicas here?
func (j *TrainingJob) delete() {
j.gc.CollectJob(j.job.Metadata.Name, garbagecollection.NullUID)
}
// TODO(jlewi): This is sending a clusterEvent to the channel. I think these are events
// coming from the cluster code and not k8s events.
func (j *TrainingJob) send(ev *jobEvent) {
select {
case j.eventCh <- ev:
l, ecap := len(j.eventCh), cap(j.eventCh)
if l > int(float64(ecap)*0.8) {
log.Warningf("eventCh buffer is almost full [%d/%d]", l, ecap)
}
case <-j.stopCh:
}
}
// Update sends an update event for the job.
func (j *TrainingJob) Update(newJob *spec.MxJob) {
j.send(&jobEvent{
typ: eventModifyJob,
cluster: newJob,
})
}
// updateTPRStatus updates the job status based on TraingingJob.status.
func (j *TrainingJob) updateTPRStatus() error {
// If the status hasn't changed then there's no reason to update the TPR.
if reflect.DeepEqual(j.job.Status, j.status) {
return nil
}
newJob := j.job
newJob.Status = j.status
newJob, err := j.mxJobClient.Update(j.job.Metadata.Namespace, newJob)
if err != nil {
return err
}
j.job = newJob
return nil
}
func (j *TrainingJob) run(stopC <-chan struct{}) {
// TODO(jlewi): What does the run function do?
clusterFailed := false
defer func() {
if clusterFailed {
j.reportFailedStatus()
log.Infof("Deleting the failed MxJob")
j.delete()
}
close(j.stopCh)
}()
for {
select {
case <-stopC:
return
case event := <-j.eventCh:
switch event.typ {
// TODO(jlewi): We need handle a modify event.
//case eventModifyCluster:
// if isSpecEqual(event.cluster.Spec, j.job.Spec) {
// break
// }
case eventDeleteJob:
// TODO(jlewi): Delete is what should cause us to delete the Pods.
// we shouldn't delete the pods when the jobs finish because leaving the pods
// allows us to get the logs from the pods after the job finishes.
//
log.Infof("MxJob is deleted by the user")
// TODO(jlewi): This logic is probably insufficient.
if j.job.Status.Phase != spec.MxJobPhaseCleanUp {
j.status.SetPhase(spec.MxJobPhaseCleanUp)
}
if cErr := j.deleteResources(); cErr != nil {
log.Errorf("trainingJob.deleteResources() error; %v", cErr)
}
// j.status.SetPhase(spec.MxJobPhaseDone)
// Return from run because we want to stop reconciling the object.
return
}
case <-time.After(reconcileInterval):
// TODO(jlewi): Can we determine from the TPR status whether we should
// Create the resources or not? We need to ensure the resources exist so for
// now we always call Create.
if j.job.Status.Phase == spec.MxJobPhaseCreating {
// We call Create to make sure all the resources exist and are running.
if cErr := j.createResources(); cErr != nil {
log.Errorf("trainingJobCreateReplicas() error; %v", cErr)
} else {
// Update the phase to running.
j.status.SetPhase(spec.MxJobPhaseRunning)
if err := j.updateTPRStatus(); err != nil {
log.Warningf("failed to update TPR status: %v", err)
}
log.Infof("start running...")
}
}
state, replicaStatuses, err := j.GetStatus()
j.status.ReplicaStatuses = replicaStatuses
if err != nil {
log.Errorf("GetStatus() for job %v returned error: %v", j.job.Metadata.Name, err)
}
// TODO(jlewi): We should update the Phase if we detect the job is done.
if state == spec.StateFailed {
log.Errorf("Master failed Job: %v.", j.job.Metadata.Name)
j.status.SetPhase(spec.MxJobPhaseDone)
j.status.SetState(spec.StateFailed)
} else if state == spec.StateSucceeded {
log.Infof("Master succeeded Job: %v.", j.job.Metadata.Name)
j.status.SetPhase(spec.MxJobPhaseDone)
j.status.SetState(spec.StateSucceeded)
} else {
log.V(1).Infof("Job %v status=%v", j.job.Metadata.Name, util.Pformat(j.status))
}
// If the phase changed we should update the TPR.
if err := j.updateTPRStatus(); err != nil {
log.Warningf("Job %v, failed to update TPR status error: %v", j.job.Metadata.Name, err)
}
if j.job.Status.Phase == spec.MxJobPhaseCleanUp {
if cErr := j.deleteResources(); cErr != nil {
log.Errorf("Job %v trainingJob.Delete() error; %v", j.job.Metadata.Name, cErr)
}
// j.status.SetPhase(spec.MxJobPhaseDone)
// Return from run because we want to stop reconciling the object.
return
}
}
}
}
//func isSpecEqual(s1, s2 spec.MxJobSpec) bool {
// // TODO(jlewi): Need to implement this function.
// return false
// //if s1.Size != s2.Size || s1.Paused != s2.Paused || s1.Version != s2.Version {
// // return false
// //}
// //return isBackupPolicyEqual(s1.Backup, s2.Backup)
//}
// TODO(jlewi): We probably need to update this function.
func (j *TrainingJob) reportFailedStatus() {
retryInterval := 5 * time.Second
f := func() (bool, error) {
j.status.SetPhase(spec.MxJobPhaseFailed)
err := j.updateTPRStatus()
if err == nil || k8sutil.IsKubernetesResourceNotFoundError(err) {
return true, nil
}
if !apierrors.IsConflict(err) {
log.Warningf("retry report status in %v: fail to update: %v", retryInterval, err)
return false, nil
}
cl, err := j.mxJobClient.Get(j.job.Metadata.Namespace, j.job.Metadata.Name)
if err != nil {
// Update (PUT) will return conflict even if object is deleted since we have UID set in object. | }
log.Warningf("retry report status in %v: fail to get latest version: %v", retryInterval, err)
return false, nil
}
j.job = cl
return false, nil
}
retryutil.Retry(retryInterval, math.MaxInt64, f)
}
func (j *TrainingJob) name() string {
return j.job.Metadata.GetName()
} | // Because it will check UID first and return something like:
// "Precondition failed: UID in precondition: 0xc42712c0f0, UID in object meta: ".
if k8sutil.IsKubernetesResourceNotFoundError(err) {
return true, nil | random_line_split |
training.go | // training is a package for managing MXNet training jobs.
package trainer
import (
"fmt"
"reflect"
"github.com/deepinsight/mxnet-operator/pkg/spec"
"github.com/deepinsight/mxnet-operator/pkg/util"
"github.com/deepinsight/mxnet-operator/pkg/util/k8sutil"
"github.com/deepinsight/mxnet-operator/pkg/util/retryutil"
log "github.com/golang/glog"
"math"
"sync"
"time"
"github.com/deepinsight/mxnet-operator/pkg/garbagecollection"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/pkg/api/v1"
)
const (
NAMESPACE string = "default"
)
var (
reconcileInterval = 8 * time.Second
)
type jobEventType string
const (
eventDeleteJob jobEventType = "Delete"
eventModifyJob jobEventType = "Modify"
)
type jobEvent struct {
typ jobEventType
// TODO(jlewi): Rename cluster to job.
cluster *spec.MxJob
}
// TODO(jlewi): We should switch a New pattern and make trainingJob private so we can
// ensure correctness on creation.
type TrainingJob struct {
job *spec.MxJob
KubeCli kubernetes.Interface
Replicas []*MXReplicaSet
mxJobClient k8sutil.MxJobClient
// in memory state of the job.
// status is the source of truth after job struct is materialized. Changes to the status to be persisted
// should be made here.
status spec.MxJobStatus
memberCounter int
// eventCh is used to provide Kubernetes events for a particular cluster that need to be handled.
eventCh chan *jobEvent
// stopCh is a channel used to communicate that the cluster needs to be stopped.
stopCh chan struct{}
gc *garbagecollection.GC
}
func initJob(kubeCli kubernetes.Interface, mxJobClient k8sutil.MxJobClient, job *spec.MxJob, stopC <-chan struct{}, wg *sync.WaitGroup) (*TrainingJob, error) {
j := &TrainingJob{
KubeCli: kubeCli,
mxJobClient: mxJobClient,
Replicas: make([]*MXReplicaSet, 0),
job: job,
eventCh: make(chan *jobEvent, 100),
stopCh: make(chan struct{}),
status: job.Status.Copy(),
gc: garbagecollection.New(kubeCli, mxJobClient, job.Metadata.Namespace),
}
return j, nil
}
func NewJob(kubeCli kubernetes.Interface, mxJobClient k8sutil.MxJobClient, mxjob *spec.MxJob, stopC <-chan struct{}, wg *sync.WaitGroup, config *spec.ControllerConfig) (*TrainingJob, error) {
j, err := initJob(kubeCli, mxJobClient, mxjob, stopC, wg)
if err != nil {
return nil, err
}
// Increment the wait group which the controller uses to monitor the job processing.
wg.Add(1)
go func() {
defer wg.Done()
if err := j.setup(config); err != nil {
log.Errorf("MxJob failed to setup: %v", err)
if j.status.Phase != spec.MxJobPhaseFailed {
j.status.SetReason(err.Error())
j.status.SetPhase(spec.MxJobPhaseFailed)
if err := j.updateTPRStatus(); err != nil {
log.Errorf("failed to update cluster phase (%v): %v", spec.MxJobPhaseFailed, err)
}
}
return
}
j.run(stopC)
}()
return j, nil
}
// createResources creates all the replicas
func (j *TrainingJob) createResources() error {
for _, r := range j.Replicas {
if err := r.Create(); err != nil {
return err
}
}
return nil
}
// deleteResources deletes the replicas
func (j *TrainingJob) deleteResources() error {
for _, r := range j.Replicas {
if err := r.Delete(); err != nil {
return err
}
}
return nil
}
// TODO(jlewi): We can probably delete this.
//func replicaSetStatusToProto(r *MXReplicaSet, status *MXReplicaSetStatus) *tpb.MXReplicaSetStatus {
//
// p := &tpb.MXReplicaSetStatus{
// State: status.State.Enum(),
// // Type: r.Spec.MxReplicaTypeProcess.Type,
// ReplicaStates: make([]*tpb.MXReplicaSetStatus_ReplicaStates, 0),
// }
//
// for state, count := range status.ReplicasStates {
// p.ReplicaStates = append(p.ReplicaStates, &tpb.MXReplicaSetStatus_ReplicaStates{
// State: state.Enum(),
// NumReplicas: proto.Int(count),
// })
// }
// return p
//}
func (j *TrainingJob) GetStatus() (spec.State, []*spec.MxReplicaStatus, error) |
// isRetryableTerminationState returns true if a container terminated in a state
// that we consider retryable.
func isRetryableTerminationState(s *v1.ContainerStateTerminated) bool {
// TODO(jlewi): Need to match logic in
// https://cs.corp.google.com/piper///depot/google3/cloud/ml/beta/job/training_job_state_util.cc?l=88
if s.Reason == "OOMKilled" {
// If the user's process causes an OOM and Docker kills the container,
// the termination reason of ContainerState will be specified to
// 'OOMKilled'. In this case, we can't assume this to be a retryable error.
//
// This check should happen before checking the termination log, since
// if the container terminated with an OOM, the termination log may not
// be written.
return false
}
if s.Message == "" {
// launcher.sh should produce a termination log message. So if Kubernetes
// doesn't report a termmination message then we can infer that
// launcher.sh didn't exit cleanly. For example, the container might
// have failed to start. We consider this a retryable error regardless
// of the actual exit code.
return true
}
// TODO(jlewi): Should we use the exit code reported in the termination
// log message and not the ExitCode reported by the container.
if s.ExitCode >= 0 && s.ExitCode <= 127 {
// For the exit_code in [0, 127]:
// 0 means success,
// 1 - 127 corresponds to permanent user errors.
// We don't want to retry for both cases.
// More info about exit status can be found in:
// https://www.gnu.org/software/bash/manual/html_node/Exit-Status.html
return false
}
// For the remaining cases that exit_code from workers that doesn't
// fall into [0, 127]. They can be:
// 137 corresponds to SIGKILL,
// 143 corresponds to SIGTERM,
// other values that have undefined behavior.
// We treat them as internal errors for now and all the internal errors
// will be retired.
return true
}
func (j *TrainingJob) masterName() string {
return fmt.Sprintf("master-%v-0", j.job.Spec.RuntimeId)
}
// setup the training job.
func (j *TrainingJob) setup(config *spec.ControllerConfig) error {
if j.job == nil {
return fmt.Errorf("job.Spec can't be nil")
}
err := j.job.Spec.SetDefaults()
if err != nil {
return fmt.Errorf("there was a problem setting defaults for job spec: %v", err)
}
err = j.job.Spec.Validate()
if err != nil {
return fmt.Errorf("invalid job spec: %v", err)
}
for _, t := range j.job.Spec.ReplicaSpecs {
r, err := NewMXReplicaSet(j.KubeCli, *t, j)
if err != nil {
return err
}
j.Replicas = append(j.Replicas, r)
}
if err := j.job.Spec.ConfigureAccelerators(config.Accelerators); err != nil {
return fmt.Errorf("ConfigureAccelerators(...) error; %v", err)
}
if j.job.Spec.RuntimeId == "" {
j.job.Spec.RuntimeId = util.RandString(4)
}
var shouldCreateCluster bool
switch j.status.Phase {
case spec.MxJobPhaseNone:
shouldCreateCluster = true
//case spec.MxJobPhaseCreating:
// return errCreatedCluster
case spec.MxJobPhaseRunning:
shouldCreateCluster = false
case spec.MxJobPhaseFailed:
shouldCreateCluster = false
default:
return fmt.Errorf("unexpected MxJob phase: %s", j.status.Phase)
}
if shouldCreateCluster {
return j.triggerCreatePhase()
}
return nil
}
// triggerCreatePhase sets the phase to MxJobPhaseCreating additional resource creation happens in TrainingJob.run
// TODO(jlewi): Need to reconcile this function copied from the etcd core operator OS code with the pattern
// for the MX job. What exactly do we want to do during the Create job phase? Right now the create method
// is called on each invocation of reconcile in run to ensure all the required resources exist. Maybe there's
// a better way?
func (j *TrainingJob) triggerCreatePhase() error {
j.status.SetPhase(spec.MxJobPhaseCreating)
if err := j.updateTPRStatus(); err != nil {
return fmt.Errorf("cluster create: failed to update MxJob phase (%v): %v", spec.MxJobPhaseCreating, err)
}
log.Infof("Creating job: %v with Spec (%#v), Status (%#v)", j.job.Metadata.Name, j.job.Spec, j.job.Status)
// TODO(jlewi): I think this collects all the existing resources that have the labels indicating
// they should be owned by this job. Do they get deleted?
j.gc.CollectJob(j.job.Metadata.Name, j.job.Metadata.UID)
return nil
}
func (j *TrainingJob) Delete() {
// Delete doesn't actually delete any resources. It just sends an event which will be processed by the run
// method.
j.send(&jobEvent{typ: eventDeleteJob})
}
// TODO(jlewi): This delete function was copied from the etcd-operator. Need to figure out what the right thing to
// do is. Should we be calling deleteReplicas here?
func (j *TrainingJob) delete() {
j.gc.CollectJob(j.job.Metadata.Name, garbagecollection.NullUID)
}
// TODO(jlewi): This is sending a clusterEvent to the channel. I think these are events
// coming from the cluster code and not k8s events.
func (j *TrainingJob) send(ev *jobEvent) {
select {
case j.eventCh <- ev:
l, ecap := len(j.eventCh), cap(j.eventCh)
if l > int(float64(ecap)*0.8) {
log.Warningf("eventCh buffer is almost full [%d/%d]", l, ecap)
}
case <-j.stopCh:
}
}
// Update sends an update event for the job.
func (j *TrainingJob) Update(newJob *spec.MxJob) {
j.send(&jobEvent{
typ: eventModifyJob,
cluster: newJob,
})
}
// updateTPRStatus updates the job status based on TraingingJob.status.
func (j *TrainingJob) updateTPRStatus() error {
// If the status hasn't changed then there's no reason to update the TPR.
if reflect.DeepEqual(j.job.Status, j.status) {
return nil
}
newJob := j.job
newJob.Status = j.status
newJob, err := j.mxJobClient.Update(j.job.Metadata.Namespace, newJob)
if err != nil {
return err
}
j.job = newJob
return nil
}
func (j *TrainingJob) run(stopC <-chan struct{}) {
// TODO(jlewi): What does the run function do?
clusterFailed := false
defer func() {
if clusterFailed {
j.reportFailedStatus()
log.Infof("Deleting the failed MxJob")
j.delete()
}
close(j.stopCh)
}()
for {
select {
case <-stopC:
return
case event := <-j.eventCh:
switch event.typ {
// TODO(jlewi): We need handle a modify event.
//case eventModifyCluster:
// if isSpecEqual(event.cluster.Spec, j.job.Spec) {
// break
// }
case eventDeleteJob:
// TODO(jlewi): Delete is what should cause us to delete the Pods.
// we shouldn't delete the pods when the jobs finish because leaving the pods
// allows us to get the logs from the pods after the job finishes.
//
log.Infof("MxJob is deleted by the user")
// TODO(jlewi): This logic is probably insufficient.
if j.job.Status.Phase != spec.MxJobPhaseCleanUp {
j.status.SetPhase(spec.MxJobPhaseCleanUp)
}
if cErr := j.deleteResources(); cErr != nil {
log.Errorf("trainingJob.deleteResources() error; %v", cErr)
}
// j.status.SetPhase(spec.MxJobPhaseDone)
// Return from run because we want to stop reconciling the object.
return
}
case <-time.After(reconcileInterval):
// TODO(jlewi): Can we determine from the TPR status whether we should
// Create the resources or not? We need to ensure the resources exist so for
// now we always call Create.
if j.job.Status.Phase == spec.MxJobPhaseCreating {
// We call Create to make sure all the resources exist and are running.
if cErr := j.createResources(); cErr != nil {
log.Errorf("trainingJobCreateReplicas() error; %v", cErr)
} else {
// Update the phase to running.
j.status.SetPhase(spec.MxJobPhaseRunning)
if err := j.updateTPRStatus(); err != nil {
log.Warningf("failed to update TPR status: %v", err)
}
log.Infof("start running...")
}
}
state, replicaStatuses, err := j.GetStatus()
j.status.ReplicaStatuses = replicaStatuses
if err != nil {
log.Errorf("GetStatus() for job %v returned error: %v", j.job.Metadata.Name, err)
}
// TODO(jlewi): We should update the Phase if we detect the job is done.
if state == spec.StateFailed {
log.Errorf("Master failed Job: %v.", j.job.Metadata.Name)
j.status.SetPhase(spec.MxJobPhaseDone)
j.status.SetState(spec.StateFailed)
} else if state == spec.StateSucceeded {
log.Infof("Master succeeded Job: %v.", j.job.Metadata.Name)
j.status.SetPhase(spec.MxJobPhaseDone)
j.status.SetState(spec.StateSucceeded)
} else {
log.V(1).Infof("Job %v status=%v", j.job.Metadata.Name, util.Pformat(j.status))
}
// If the phase changed we should update the TPR.
if err := j.updateTPRStatus(); err != nil {
log.Warningf("Job %v, failed to update TPR status error: %v", j.job.Metadata.Name, err)
}
if j.job.Status.Phase == spec.MxJobPhaseCleanUp {
if cErr := j.deleteResources(); cErr != nil {
log.Errorf("Job %v trainingJob.Delete() error; %v", j.job.Metadata.Name, cErr)
}
// j.status.SetPhase(spec.MxJobPhaseDone)
// Return from run because we want to stop reconciling the object.
return
}
}
}
}
//func isSpecEqual(s1, s2 spec.MxJobSpec) bool {
// // TODO(jlewi): Need to implement this function.
// return false
// //if s1.Size != s2.Size || s1.Paused != s2.Paused || s1.Version != s2.Version {
// // return false
// //}
// //return isBackupPolicyEqual(s1.Backup, s2.Backup)
//}
// TODO(jlewi): We probably need to update this function.
func (j *TrainingJob) reportFailedStatus() {
retryInterval := 5 * time.Second
f := func() (bool, error) {
j.status.SetPhase(spec.MxJobPhaseFailed)
err := j.updateTPRStatus()
if err == nil || k8sutil.IsKubernetesResourceNotFoundError(err) {
return true, nil
}
if !apierrors.IsConflict(err) {
log.Warningf("retry report status in %v: fail to update: %v", retryInterval, err)
return false, nil
}
cl, err := j.mxJobClient.Get(j.job.Metadata.Namespace, j.job.Metadata.Name)
if err != nil {
// Update (PUT) will return conflict even if object is deleted since we have UID set in object.
// Because it will check UID first and return something like:
// "Precondition failed: UID in precondition: 0xc42712c0f0, UID in object meta: ".
if k8sutil.IsKubernetesResourceNotFoundError(err) {
return true, nil
}
log.Warningf("retry report status in %v: fail to get latest version: %v", retryInterval, err)
return false, nil
}
j.job = cl
return false, nil
}
retryutil.Retry(retryInterval, math.MaxInt64, f)
}
func (j *TrainingJob) name() string {
return j.job.Metadata.GetName()
}
| {
state := spec.StateUnknown
replicaStatuses := make([]*spec.MxReplicaStatus, 0)
// The state for each replica.
// TODO(jlewi): We will need to modify this code if we want to allow multiples of a given type of replica.
replicaSetStates := make(map[spec.MxReplicaType]spec.ReplicaState)
for _, r := range j.Replicas {
rStatus, err := r.GetStatus()
if err != nil {
log.Errorf("GetStatus() for %v returned error; %v", r.Spec.MxReplicaType, err)
}
replicaSetStates[r.Spec.MxReplicaType] = rStatus.State
replicaStatuses = append(replicaStatuses, &rStatus)
// If any replicas are failed mark job as failed.
if rStatus.State == spec.ReplicaStateFailed {
state = spec.StateFailed
}
}
if j.job.Spec.JobMode == spec.LocalJob {
if v, ok := replicaSetStates[spec.WORKER]; ok && v == spec.ReplicaStateSucceeded {
state = spec.StateSucceeded
return state, replicaStatuses, nil
}
} else if j.job.Spec.JobMode == spec.DistJob {
if v, ok := replicaSetStates[spec.SCHEDULER]; ok && v == spec.ReplicaStateSucceeded {
state = spec.StateSucceeded
return state, replicaStatuses, nil
}
}
for _, s := range replicaSetStates {
if s == spec.ReplicaStateFailed {
state = spec.StateFailed
return state, replicaStatuses, nil
}
}
state = spec.StateRunning
return state, replicaStatuses, nil
} | identifier_body |
training.go | // training is a package for managing MXNet training jobs.
package trainer
import (
"fmt"
"reflect"
"github.com/deepinsight/mxnet-operator/pkg/spec"
"github.com/deepinsight/mxnet-operator/pkg/util"
"github.com/deepinsight/mxnet-operator/pkg/util/k8sutil"
"github.com/deepinsight/mxnet-operator/pkg/util/retryutil"
log "github.com/golang/glog"
"math"
"sync"
"time"
"github.com/deepinsight/mxnet-operator/pkg/garbagecollection"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/pkg/api/v1"
)
const (
NAMESPACE string = "default"
)
var (
reconcileInterval = 8 * time.Second
)
type jobEventType string
const (
eventDeleteJob jobEventType = "Delete"
eventModifyJob jobEventType = "Modify"
)
type jobEvent struct {
typ jobEventType
// TODO(jlewi): Rename cluster to job.
cluster *spec.MxJob
}
// TODO(jlewi): We should switch a New pattern and make trainingJob private so we can
// ensure correctness on creation.
type TrainingJob struct {
job *spec.MxJob
KubeCli kubernetes.Interface
Replicas []*MXReplicaSet
mxJobClient k8sutil.MxJobClient
// in memory state of the job.
// status is the source of truth after job struct is materialized. Changes to the status to be persisted
// should be made here.
status spec.MxJobStatus
memberCounter int
// eventCh is used to provide Kubernetes events for a particular cluster that need to be handled.
eventCh chan *jobEvent
// stopCh is a channel used to communicate that the cluster needs to be stopped.
stopCh chan struct{}
gc *garbagecollection.GC
}
func initJob(kubeCli kubernetes.Interface, mxJobClient k8sutil.MxJobClient, job *spec.MxJob, stopC <-chan struct{}, wg *sync.WaitGroup) (*TrainingJob, error) {
j := &TrainingJob{
KubeCli: kubeCli,
mxJobClient: mxJobClient,
Replicas: make([]*MXReplicaSet, 0),
job: job,
eventCh: make(chan *jobEvent, 100),
stopCh: make(chan struct{}),
status: job.Status.Copy(),
gc: garbagecollection.New(kubeCli, mxJobClient, job.Metadata.Namespace),
}
return j, nil
}
func NewJob(kubeCli kubernetes.Interface, mxJobClient k8sutil.MxJobClient, mxjob *spec.MxJob, stopC <-chan struct{}, wg *sync.WaitGroup, config *spec.ControllerConfig) (*TrainingJob, error) {
j, err := initJob(kubeCli, mxJobClient, mxjob, stopC, wg)
if err != nil {
return nil, err
}
// Increment the wait group which the controller uses to monitor the job processing.
wg.Add(1)
go func() {
defer wg.Done()
if err := j.setup(config); err != nil {
log.Errorf("MxJob failed to setup: %v", err)
if j.status.Phase != spec.MxJobPhaseFailed {
j.status.SetReason(err.Error())
j.status.SetPhase(spec.MxJobPhaseFailed)
if err := j.updateTPRStatus(); err != nil {
log.Errorf("failed to update cluster phase (%v): %v", spec.MxJobPhaseFailed, err)
}
}
return
}
j.run(stopC)
}()
return j, nil
}
// createResources creates all the replicas
func (j *TrainingJob) createResources() error {
for _, r := range j.Replicas {
if err := r.Create(); err != nil {
return err
}
}
return nil
}
// deleteResources deletes the replicas
func (j *TrainingJob) deleteResources() error {
for _, r := range j.Replicas {
if err := r.Delete(); err != nil {
return err
}
}
return nil
}
// TODO(jlewi): We can probably delete this.
//func replicaSetStatusToProto(r *MXReplicaSet, status *MXReplicaSetStatus) *tpb.MXReplicaSetStatus {
//
// p := &tpb.MXReplicaSetStatus{
// State: status.State.Enum(),
// // Type: r.Spec.MxReplicaTypeProcess.Type,
// ReplicaStates: make([]*tpb.MXReplicaSetStatus_ReplicaStates, 0),
// }
//
// for state, count := range status.ReplicasStates {
// p.ReplicaStates = append(p.ReplicaStates, &tpb.MXReplicaSetStatus_ReplicaStates{
// State: state.Enum(),
// NumReplicas: proto.Int(count),
// })
// }
// return p
//}
func (j *TrainingJob) GetStatus() (spec.State, []*spec.MxReplicaStatus, error) {
state := spec.StateUnknown
replicaStatuses := make([]*spec.MxReplicaStatus, 0)
// The state for each replica.
// TODO(jlewi): We will need to modify this code if we want to allow multiples of a given type of replica.
replicaSetStates := make(map[spec.MxReplicaType]spec.ReplicaState)
for _, r := range j.Replicas {
rStatus, err := r.GetStatus()
if err != nil {
log.Errorf("GetStatus() for %v returned error; %v", r.Spec.MxReplicaType, err)
}
replicaSetStates[r.Spec.MxReplicaType] = rStatus.State
replicaStatuses = append(replicaStatuses, &rStatus)
// If any replicas are failed mark job as failed.
if rStatus.State == spec.ReplicaStateFailed {
state = spec.StateFailed
}
}
if j.job.Spec.JobMode == spec.LocalJob {
if v, ok := replicaSetStates[spec.WORKER]; ok && v == spec.ReplicaStateSucceeded {
state = spec.StateSucceeded
return state, replicaStatuses, nil
}
} else if j.job.Spec.JobMode == spec.DistJob {
if v, ok := replicaSetStates[spec.SCHEDULER]; ok && v == spec.ReplicaStateSucceeded {
state = spec.StateSucceeded
return state, replicaStatuses, nil
}
}
for _, s := range replicaSetStates {
if s == spec.ReplicaStateFailed {
state = spec.StateFailed
return state, replicaStatuses, nil
}
}
state = spec.StateRunning
return state, replicaStatuses, nil
}
// isRetryableTerminationState returns true if a container terminated in a state
// that we consider retryable.
func isRetryableTerminationState(s *v1.ContainerStateTerminated) bool {
// TODO(jlewi): Need to match logic in
// https://cs.corp.google.com/piper///depot/google3/cloud/ml/beta/job/training_job_state_util.cc?l=88
if s.Reason == "OOMKilled" {
// If the user's process causes an OOM and Docker kills the container,
// the termination reason of ContainerState will be specified to
// 'OOMKilled'. In this case, we can't assume this to be a retryable error.
//
// This check should happen before checking the termination log, since
// if the container terminated with an OOM, the termination log may not
// be written.
return false
}
if s.Message == "" {
// launcher.sh should produce a termination log message. So if Kubernetes
// doesn't report a termmination message then we can infer that
// launcher.sh didn't exit cleanly. For example, the container might
// have failed to start. We consider this a retryable error regardless
// of the actual exit code.
return true
}
// TODO(jlewi): Should we use the exit code reported in the termination
// log message and not the ExitCode reported by the container.
if s.ExitCode >= 0 && s.ExitCode <= 127 {
// For the exit_code in [0, 127]:
// 0 means success,
// 1 - 127 corresponds to permanent user errors.
// We don't want to retry for both cases.
// More info about exit status can be found in:
// https://www.gnu.org/software/bash/manual/html_node/Exit-Status.html
return false
}
// For the remaining cases that exit_code from workers that doesn't
// fall into [0, 127]. They can be:
// 137 corresponds to SIGKILL,
// 143 corresponds to SIGTERM,
// other values that have undefined behavior.
// We treat them as internal errors for now and all the internal errors
// will be retired.
return true
}
func (j *TrainingJob) | () string {
return fmt.Sprintf("master-%v-0", j.job.Spec.RuntimeId)
}
// setup the training job.
func (j *TrainingJob) setup(config *spec.ControllerConfig) error {
if j.job == nil {
return fmt.Errorf("job.Spec can't be nil")
}
err := j.job.Spec.SetDefaults()
if err != nil {
return fmt.Errorf("there was a problem setting defaults for job spec: %v", err)
}
err = j.job.Spec.Validate()
if err != nil {
return fmt.Errorf("invalid job spec: %v", err)
}
for _, t := range j.job.Spec.ReplicaSpecs {
r, err := NewMXReplicaSet(j.KubeCli, *t, j)
if err != nil {
return err
}
j.Replicas = append(j.Replicas, r)
}
if err := j.job.Spec.ConfigureAccelerators(config.Accelerators); err != nil {
return fmt.Errorf("ConfigureAccelerators(...) error; %v", err)
}
if j.job.Spec.RuntimeId == "" {
j.job.Spec.RuntimeId = util.RandString(4)
}
var shouldCreateCluster bool
switch j.status.Phase {
case spec.MxJobPhaseNone:
shouldCreateCluster = true
//case spec.MxJobPhaseCreating:
// return errCreatedCluster
case spec.MxJobPhaseRunning:
shouldCreateCluster = false
case spec.MxJobPhaseFailed:
shouldCreateCluster = false
default:
return fmt.Errorf("unexpected MxJob phase: %s", j.status.Phase)
}
if shouldCreateCluster {
return j.triggerCreatePhase()
}
return nil
}
// triggerCreatePhase sets the phase to MxJobPhaseCreating additional resource creation happens in TrainingJob.run
// TODO(jlewi): Need to reconcile this function copied from the etcd core operator OS code with the pattern
// for the MX job. What exactly do we want to do during the Create job phase? Right now the create method
// is called on each invocation of reconcile in run to ensure all the required resources exist. Maybe there's
// a better way?
func (j *TrainingJob) triggerCreatePhase() error {
j.status.SetPhase(spec.MxJobPhaseCreating)
if err := j.updateTPRStatus(); err != nil {
return fmt.Errorf("cluster create: failed to update MxJob phase (%v): %v", spec.MxJobPhaseCreating, err)
}
log.Infof("Creating job: %v with Spec (%#v), Status (%#v)", j.job.Metadata.Name, j.job.Spec, j.job.Status)
// TODO(jlewi): I think this collects all the existing resources that have the labels indicating
// they should be owned by this job. Do they get deleted?
j.gc.CollectJob(j.job.Metadata.Name, j.job.Metadata.UID)
return nil
}
func (j *TrainingJob) Delete() {
// Delete doesn't actually delete any resources. It just sends an event which will be processed by the run
// method.
j.send(&jobEvent{typ: eventDeleteJob})
}
// TODO(jlewi): This delete function was copied from the etcd-operator. Need to figure out what the right thing to
// do is. Should we be calling deleteReplicas here?
func (j *TrainingJob) delete() {
j.gc.CollectJob(j.job.Metadata.Name, garbagecollection.NullUID)
}
// TODO(jlewi): This is sending a clusterEvent to the channel. I think these are events
// coming from the cluster code and not k8s events.
func (j *TrainingJob) send(ev *jobEvent) {
select {
case j.eventCh <- ev:
l, ecap := len(j.eventCh), cap(j.eventCh)
if l > int(float64(ecap)*0.8) {
log.Warningf("eventCh buffer is almost full [%d/%d]", l, ecap)
}
case <-j.stopCh:
}
}
// Update sends an update event for the job.
func (j *TrainingJob) Update(newJob *spec.MxJob) {
j.send(&jobEvent{
typ: eventModifyJob,
cluster: newJob,
})
}
// updateTPRStatus updates the job status based on TraingingJob.status.
func (j *TrainingJob) updateTPRStatus() error {
// If the status hasn't changed then there's no reason to update the TPR.
if reflect.DeepEqual(j.job.Status, j.status) {
return nil
}
newJob := j.job
newJob.Status = j.status
newJob, err := j.mxJobClient.Update(j.job.Metadata.Namespace, newJob)
if err != nil {
return err
}
j.job = newJob
return nil
}
func (j *TrainingJob) run(stopC <-chan struct{}) {
// TODO(jlewi): What does the run function do?
clusterFailed := false
defer func() {
if clusterFailed {
j.reportFailedStatus()
log.Infof("Deleting the failed MxJob")
j.delete()
}
close(j.stopCh)
}()
for {
select {
case <-stopC:
return
case event := <-j.eventCh:
switch event.typ {
// TODO(jlewi): We need handle a modify event.
//case eventModifyCluster:
// if isSpecEqual(event.cluster.Spec, j.job.Spec) {
// break
// }
case eventDeleteJob:
// TODO(jlewi): Delete is what should cause us to delete the Pods.
// we shouldn't delete the pods when the jobs finish because leaving the pods
// allows us to get the logs from the pods after the job finishes.
//
log.Infof("MxJob is deleted by the user")
// TODO(jlewi): This logic is probably insufficient.
if j.job.Status.Phase != spec.MxJobPhaseCleanUp {
j.status.SetPhase(spec.MxJobPhaseCleanUp)
}
if cErr := j.deleteResources(); cErr != nil {
log.Errorf("trainingJob.deleteResources() error; %v", cErr)
}
// j.status.SetPhase(spec.MxJobPhaseDone)
// Return from run because we want to stop reconciling the object.
return
}
case <-time.After(reconcileInterval):
// TODO(jlewi): Can we determine from the TPR status whether we should
// Create the resources or not? We need to ensure the resources exist so for
// now we always call Create.
if j.job.Status.Phase == spec.MxJobPhaseCreating {
// We call Create to make sure all the resources exist and are running.
if cErr := j.createResources(); cErr != nil {
log.Errorf("trainingJobCreateReplicas() error; %v", cErr)
} else {
// Update the phase to running.
j.status.SetPhase(spec.MxJobPhaseRunning)
if err := j.updateTPRStatus(); err != nil {
log.Warningf("failed to update TPR status: %v", err)
}
log.Infof("start running...")
}
}
state, replicaStatuses, err := j.GetStatus()
j.status.ReplicaStatuses = replicaStatuses
if err != nil {
log.Errorf("GetStatus() for job %v returned error: %v", j.job.Metadata.Name, err)
}
// TODO(jlewi): We should update the Phase if we detect the job is done.
if state == spec.StateFailed {
log.Errorf("Master failed Job: %v.", j.job.Metadata.Name)
j.status.SetPhase(spec.MxJobPhaseDone)
j.status.SetState(spec.StateFailed)
} else if state == spec.StateSucceeded {
log.Infof("Master succeeded Job: %v.", j.job.Metadata.Name)
j.status.SetPhase(spec.MxJobPhaseDone)
j.status.SetState(spec.StateSucceeded)
} else {
log.V(1).Infof("Job %v status=%v", j.job.Metadata.Name, util.Pformat(j.status))
}
// If the phase changed we should update the TPR.
if err := j.updateTPRStatus(); err != nil {
log.Warningf("Job %v, failed to update TPR status error: %v", j.job.Metadata.Name, err)
}
if j.job.Status.Phase == spec.MxJobPhaseCleanUp {
if cErr := j.deleteResources(); cErr != nil {
log.Errorf("Job %v trainingJob.Delete() error; %v", j.job.Metadata.Name, cErr)
}
// j.status.SetPhase(spec.MxJobPhaseDone)
// Return from run because we want to stop reconciling the object.
return
}
}
}
}
//func isSpecEqual(s1, s2 spec.MxJobSpec) bool {
// // TODO(jlewi): Need to implement this function.
// return false
// //if s1.Size != s2.Size || s1.Paused != s2.Paused || s1.Version != s2.Version {
// // return false
// //}
// //return isBackupPolicyEqual(s1.Backup, s2.Backup)
//}
// TODO(jlewi): We probably need to update this function.
func (j *TrainingJob) reportFailedStatus() {
retryInterval := 5 * time.Second
f := func() (bool, error) {
j.status.SetPhase(spec.MxJobPhaseFailed)
err := j.updateTPRStatus()
if err == nil || k8sutil.IsKubernetesResourceNotFoundError(err) {
return true, nil
}
if !apierrors.IsConflict(err) {
log.Warningf("retry report status in %v: fail to update: %v", retryInterval, err)
return false, nil
}
cl, err := j.mxJobClient.Get(j.job.Metadata.Namespace, j.job.Metadata.Name)
if err != nil {
// Update (PUT) will return conflict even if object is deleted since we have UID set in object.
// Because it will check UID first and return something like:
// "Precondition failed: UID in precondition: 0xc42712c0f0, UID in object meta: ".
if k8sutil.IsKubernetesResourceNotFoundError(err) {
return true, nil
}
log.Warningf("retry report status in %v: fail to get latest version: %v", retryInterval, err)
return false, nil
}
j.job = cl
return false, nil
}
retryutil.Retry(retryInterval, math.MaxInt64, f)
}
func (j *TrainingJob) name() string {
return j.job.Metadata.GetName()
}
| masterName | identifier_name |
model_pki_patch_role_response.go | // Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
//
// Code generated with OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT.
package schema
// PkiPatchRoleResponse struct for PkiPatchRoleResponse
type PkiPatchRoleResponse struct {
// If set, clients can request certificates for any domain, regardless of allowed_domains restrictions. See the documentation for more information.
AllowAnyName bool `json:"allow_any_name,omitempty"`
// If set, clients can request certificates for the base domains themselves, e.g. \"example.com\" of domains listed in allowed_domains. This is a separate option as in some cases this can be considered a security threat. See the documentation for more information.
AllowBareDomains bool `json:"allow_bare_domains,omitempty"`
// If set, domains specified in allowed_domains can include shell-style glob patterns, e.g. \"ftp*.example.com\". See the documentation for more information.
AllowGlobDomains bool `json:"allow_glob_domains,omitempty"`
// If set, IP Subject Alternative Names are allowed. Any valid IP is accepted and No authorization checking is performed.
AllowIpSans bool `json:"allow_ip_sans,omitempty"`
// Whether to allow \"localhost\" and \"localdomain\" as a valid common name in a request, independent of allowed_domains value.
AllowLocalhost bool `json:"allow_localhost,omitempty"`
// If set, clients can request certificates for subdomains of domains listed in allowed_domains, including wildcard subdomains. See the documentation for more information.
AllowSubdomains bool `json:"allow_subdomains,omitempty"`
// Whether to allow \"localhost\" and \"localdomain\" as a valid common name in a request, independent of allowed_domains value.
AllowTokenDisplayname bool `json:"allow_token_displayname,omitempty"`
// If set, allows certificates with wildcards in the common name to be issued, conforming to RFC 6125's Section 6.4.3; e.g., \"*.example.net\" or \"b*z.example.net\". See the documentation for more information.
AllowWildcardCertificates bool `json:"allow_wildcard_certificates,omitempty"`
// Specifies the domains this role is allowed to issue certificates for. This is used with the allow_bare_domains, allow_subdomains, and allow_glob_domains to determine matches for the common name, DNS-typed SAN entries, and Email-typed SAN entries of certificates. See the documentation for more information. This parameter accepts a comma-separated string or list of domains.
AllowedDomains []string `json:"allowed_domains,omitempty"`
// If set, Allowed domains can be specified using identity template policies. Non-templated domains are also permitted.
AllowedDomainsTemplate bool `json:"allowed_domains_template,omitempty"`
// If set, an array of allowed other names to put in SANs. These values support globbing and must be in the format <oid>;<type>:<value>. Currently only \"utf8\" is a valid type. All values, including globbing values, must use this syntax, with the exception being a single \"*\" which allows any OID and any value (but type must still be utf8).
AllowedOtherSans []string `json:"allowed_other_sans,omitempty"`
// If set, an array of allowed serial numbers to put in Subject. These values support globbing.
AllowedSerialNumbers []string `json:"allowed_serial_numbers,omitempty"`
// If set, an array of allowed URIs for URI Subject Alternative Names. Any valid URI is accepted, these values support globbing.
AllowedUriSans []string `json:"allowed_uri_sans,omitempty"`
// If set, Allowed URI SANs can be specified using identity template policies. Non-templated URI SANs are also permitted.
AllowedUriSansTemplate bool `json:"allowed_uri_sans_template,omitempty"`
// If set, an array of allowed user-ids to put in user system login name specified here: https://www.rfc-editor.org/rfc/rfc1274#section-9.3.1
AllowedUserIds []string `json:"allowed_user_ids,omitempty"`
// Mark Basic Constraints valid when issuing non-CA certificates.
BasicConstraintsValidForNonCa bool `json:"basic_constraints_valid_for_non_ca,omitempty"`
// If set, certificates are flagged for client auth use. Defaults to true. See also RFC 5280 Section 4.2.1.12.
ClientFlag bool `json:"client_flag,omitempty"`
// List of allowed validations to run against the Common Name field. Values can include 'email' to validate the CN is a email address, 'hostname' to validate the CN is a valid hostname (potentially including wildcards). When multiple validations are specified, these take OR semantics (either email OR hostname are allowed). The special value 'disabled' allows disabling all CN name validations, allowing for arbitrary non-Hostname, non-Email address CNs.
CnValidations []string `json:"cn_validations,omitempty"`
// If set, certificates are flagged for code signing use. Defaults to false. See also RFC 5280 Section 4.2.1.12.
CodeSigningFlag bool `json:"code_signing_flag,omitempty"`
// If set, Country will be set to this value in certificates issued by this role.
Country []string `json:"country,omitempty"`
// If set, certificates are flagged for email protection use. Defaults to false. See also RFC 5280 Section 4.2.1.12.
EmailProtectionFlag bool `json:"email_protection_flag,omitempty"`
// If set, only valid host names are allowed for CN and DNS SANs, and the host part of email addresses. Defaults to true.
EnforceHostnames bool `json:"enforce_hostnames,omitempty"`
// A comma-separated string or list of extended key usages. Valid values can be found at https://golang.org/pkg/crypto/x509/#ExtKeyUsage -- simply drop the \"ExtKeyUsage\" part of the name. To remove all key usages from being set, set this value to an empty list. See also RFC 5280 Section 4.2.1.12.
ExtKeyUsage []string `json:"ext_key_usage,omitempty"`
// A comma-separated string or list of extended key usage oids.
ExtKeyUsageOids []string `json:"ext_key_usage_oids,omitempty"`
// If set, certificates issued/signed against this role will have Vault leases attached to them. Defaults to \"false\". Certificates can be added to the CRL by \"vault revoke <lease_id>\" when certificates are associated with leases. It can also be done using the \"pki/revoke\" endpoint. However, when lease generation is disabled, invoking \"pki/revoke\" would be the only way to add the certificates to the CRL. When large number of certificates are generated with long lifetimes, it is recommended that lease generation be disabled, as large amount of leases adversely affect the startup time of Vault.
GenerateLease bool `json:"generate_lease,omitempty"`
// Reference to the issuer used to sign requests serviced by this role.
IssuerRef string `json:"issuer_ref,omitempty"`
// The number of bits to use. Allowed values are 0 (universal default); with rsa key_type: 2048 (default), 3072, or 4096; with ec key_type: 224, 256 (default), 384, or 521; ignored with ed25519.
KeyBits int32 `json:"key_bits,omitempty"`
// The type of key to use; defaults to RSA. \"rsa\" \"ec\", \"ed25519\" and \"any\" are the only valid values.
KeyType string `json:"key_type,omitempty"`
// A comma-separated string or list of key usages (not extended key usages). Valid values can be found at https://golang.org/pkg/crypto/x509/#KeyUsage -- simply drop the \"KeyUsage\" part of the name. To remove all key usages from being set, set this value to an empty list. See also RFC 5280 Section 4.2.1.3.
KeyUsage []string `json:"key_usage,omitempty"`
// If set, Locality will be set to this value in certificates issued by this role.
Locality []string `json:"locality,omitempty"`
// The maximum allowed lease duration. If not set, defaults to the system maximum lease TTL.
MaxTtl int32 `json:"max_ttl,omitempty"`
// If set, certificates issued/signed against this role will not be stored in the storage backend. This can improve performance when issuing large numbers of certificates. However, certificates issued in this way cannot be enumerated or revoked, so this option is recommended only for certificates that are non-sensitive, or extremely short-lived. This option implies a value of \"false\" for \"generate_lease\".
NoStore bool `json:"no_store,omitempty"`
// Set the not after field of the certificate with specified date value. The value format should be given in UTC format YYYY-MM-ddTHH:MM:SSZ.
NotAfter string `json:"not_after,omitempty"`
// The duration before now which the certificate needs to be backdated by.
NotBeforeDuration int32 `json:"not_before_duration,omitempty"`
// If set, O (Organization) will be set to this value in certificates issued by this role.
Organization []string `json:"organization,omitempty"`
// If set, OU (OrganizationalUnit) will be set to this value in certificates issued by this role.
Ou []string `json:"ou,omitempty"`
// A comma-separated string or list of policy OIDs, or a JSON list of qualified policy information, which must include an oid, and may include a notice and/or cps url, using the form [{\"oid\"=\"1.3.6.1.4.1.7.8\",\"notice\"=\"I am a user Notice\"}, {\"oid\"=\"1.3.6.1.4.1.44947.1.2.4 \",\"cps\"=\"https://example.com\"}].
PolicyIdentifiers []string `json:"policy_identifiers,omitempty"`
// If set, Postal Code will be set to this value in certificates issued by this role.
PostalCode []string `json:"postal_code,omitempty"`
// If set, Province will be set to this value in certificates issued by this role.
Province []string `json:"province,omitempty"`
// If set to false, makes the 'common_name' field optional while generating a certificate.
RequireCn bool `json:"require_cn,omitempty"`
// If set, certificates are flagged for server auth use. Defaults to true. See also RFC 5280 Section 4.2.1.12.
ServerFlag bool `json:"server_flag,omitempty"`
// The number of bits to use in the signature algorithm; accepts 256 for SHA-2-256, 384 for SHA-2-384, and 512 for SHA-2-512. Defaults to 0 to automatically detect based on key length (SHA-2-256 for RSA keys, and matching the curve size for NIST P-Curves).
SignatureBits int32 `json:"signature_bits,omitempty"`
// If set, Street Address will be set to this value in certificates issued by this role.
StreetAddress []string `json:"street_address,omitempty"`
// The lease duration (validity period of the certificate) if no specific lease duration is requested. The lease duration controls the expiration of certificates issued by this backend. Defaults to the system default value or the value of max_ttl, whichever is shorter.
Ttl int32 `json:"ttl,omitempty"`
// If set, when used with a signing profile, the common name in the CSR will be used. This does *not* include any requested Subject Alternative Names; use use_csr_sans for that. Defaults to true.
UseCsrCommonName bool `json:"use_csr_common_name,omitempty"`
// If set, when used with a signing profile, the SANs in the CSR will be used. This does *not* include the Common Name (cn); use use_csr_common_name for that. Defaults to true.
UseCsrSans bool `json:"use_csr_sans,omitempty"`
// Whether or not to use PSS signatures when using a RSA key-type issuer. Defaults to false.
UsePss bool `json:"use_pss,omitempty"`
}
// NewPkiPatchRoleResponseWithDefaults instantiates a new PkiPatchRoleResponse object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func | () *PkiPatchRoleResponse {
var this PkiPatchRoleResponse
this.ServerFlag = true
return &this
}
| NewPkiPatchRoleResponseWithDefaults | identifier_name |
model_pki_patch_role_response.go | // Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
//
// Code generated with OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT.
package schema
// PkiPatchRoleResponse struct for PkiPatchRoleResponse
type PkiPatchRoleResponse struct {
// If set, clients can request certificates for any domain, regardless of allowed_domains restrictions. See the documentation for more information.
AllowAnyName bool `json:"allow_any_name,omitempty"`
// If set, clients can request certificates for the base domains themselves, e.g. \"example.com\" of domains listed in allowed_domains. This is a separate option as in some cases this can be considered a security threat. See the documentation for more information.
AllowBareDomains bool `json:"allow_bare_domains,omitempty"`
// If set, domains specified in allowed_domains can include shell-style glob patterns, e.g. \"ftp*.example.com\". See the documentation for more information.
AllowGlobDomains bool `json:"allow_glob_domains,omitempty"`
// If set, IP Subject Alternative Names are allowed. Any valid IP is accepted and No authorization checking is performed.
AllowIpSans bool `json:"allow_ip_sans,omitempty"`
// Whether to allow \"localhost\" and \"localdomain\" as a valid common name in a request, independent of allowed_domains value.
AllowLocalhost bool `json:"allow_localhost,omitempty"`
// If set, clients can request certificates for subdomains of domains listed in allowed_domains, including wildcard subdomains. See the documentation for more information.
AllowSubdomains bool `json:"allow_subdomains,omitempty"`
// Whether to allow \"localhost\" and \"localdomain\" as a valid common name in a request, independent of allowed_domains value.
AllowTokenDisplayname bool `json:"allow_token_displayname,omitempty"`
// If set, allows certificates with wildcards in the common name to be issued, conforming to RFC 6125's Section 6.4.3; e.g., \"*.example.net\" or \"b*z.example.net\". See the documentation for more information.
AllowWildcardCertificates bool `json:"allow_wildcard_certificates,omitempty"`
// Specifies the domains this role is allowed to issue certificates for. This is used with the allow_bare_domains, allow_subdomains, and allow_glob_domains to determine matches for the common name, DNS-typed SAN entries, and Email-typed SAN entries of certificates. See the documentation for more information. This parameter accepts a comma-separated string or list of domains.
AllowedDomains []string `json:"allowed_domains,omitempty"`
// If set, Allowed domains can be specified using identity template policies. Non-templated domains are also permitted.
AllowedDomainsTemplate bool `json:"allowed_domains_template,omitempty"`
// If set, an array of allowed other names to put in SANs. These values support globbing and must be in the format <oid>;<type>:<value>. Currently only \"utf8\" is a valid type. All values, including globbing values, must use this syntax, with the exception being a single \"*\" which allows any OID and any value (but type must still be utf8).
AllowedOtherSans []string `json:"allowed_other_sans,omitempty"`
// If set, an array of allowed serial numbers to put in Subject. These values support globbing.
AllowedSerialNumbers []string `json:"allowed_serial_numbers,omitempty"`
// If set, an array of allowed URIs for URI Subject Alternative Names. Any valid URI is accepted, these values support globbing.
AllowedUriSans []string `json:"allowed_uri_sans,omitempty"`
// If set, Allowed URI SANs can be specified using identity template policies. Non-templated URI SANs are also permitted.
AllowedUriSansTemplate bool `json:"allowed_uri_sans_template,omitempty"`
// If set, an array of allowed user-ids to put in user system login name specified here: https://www.rfc-editor.org/rfc/rfc1274#section-9.3.1
AllowedUserIds []string `json:"allowed_user_ids,omitempty"`
// Mark Basic Constraints valid when issuing non-CA certificates.
BasicConstraintsValidForNonCa bool `json:"basic_constraints_valid_for_non_ca,omitempty"`
// If set, certificates are flagged for client auth use. Defaults to true. See also RFC 5280 Section 4.2.1.12.
ClientFlag bool `json:"client_flag,omitempty"`
// List of allowed validations to run against the Common Name field. Values can include 'email' to validate the CN is a email address, 'hostname' to validate the CN is a valid hostname (potentially including wildcards). When multiple validations are specified, these take OR semantics (either email OR hostname are allowed). The special value 'disabled' allows disabling all CN name validations, allowing for arbitrary non-Hostname, non-Email address CNs.
CnValidations []string `json:"cn_validations,omitempty"`
// If set, certificates are flagged for code signing use. Defaults to false. See also RFC 5280 Section 4.2.1.12.
CodeSigningFlag bool `json:"code_signing_flag,omitempty"`
// If set, Country will be set to this value in certificates issued by this role.
Country []string `json:"country,omitempty"`
// If set, certificates are flagged for email protection use. Defaults to false. See also RFC 5280 Section 4.2.1.12.
EmailProtectionFlag bool `json:"email_protection_flag,omitempty"`
// If set, only valid host names are allowed for CN and DNS SANs, and the host part of email addresses. Defaults to true.
EnforceHostnames bool `json:"enforce_hostnames,omitempty"`
// A comma-separated string or list of extended key usages. Valid values can be found at https://golang.org/pkg/crypto/x509/#ExtKeyUsage -- simply drop the \"ExtKeyUsage\" part of the name. To remove all key usages from being set, set this value to an empty list. See also RFC 5280 Section 4.2.1.12.
ExtKeyUsage []string `json:"ext_key_usage,omitempty"`
// A comma-separated string or list of extended key usage oids.
ExtKeyUsageOids []string `json:"ext_key_usage_oids,omitempty"`
// If set, certificates issued/signed against this role will have Vault leases attached to them. Defaults to \"false\". Certificates can be added to the CRL by \"vault revoke <lease_id>\" when certificates are associated with leases. It can also be done using the \"pki/revoke\" endpoint. However, when lease generation is disabled, invoking \"pki/revoke\" would be the only way to add the certificates to the CRL. When large number of certificates are generated with long lifetimes, it is recommended that lease generation be disabled, as large amount of leases adversely affect the startup time of Vault.
GenerateLease bool `json:"generate_lease,omitempty"`
// Reference to the issuer used to sign requests serviced by this role.
IssuerRef string `json:"issuer_ref,omitempty"`
// The number of bits to use. Allowed values are 0 (universal default); with rsa key_type: 2048 (default), 3072, or 4096; with ec key_type: 224, 256 (default), 384, or 521; ignored with ed25519.
KeyBits int32 `json:"key_bits,omitempty"`
// The type of key to use; defaults to RSA. \"rsa\" \"ec\", \"ed25519\" and \"any\" are the only valid values.
KeyType string `json:"key_type,omitempty"`
// A comma-separated string or list of key usages (not extended key usages). Valid values can be found at https://golang.org/pkg/crypto/x509/#KeyUsage -- simply drop the \"KeyUsage\" part of the name. To remove all key usages from being set, set this value to an empty list. See also RFC 5280 Section 4.2.1.3.
KeyUsage []string `json:"key_usage,omitempty"`
// If set, Locality will be set to this value in certificates issued by this role.
Locality []string `json:"locality,omitempty"`
// The maximum allowed lease duration. If not set, defaults to the system maximum lease TTL.
MaxTtl int32 `json:"max_ttl,omitempty"`
// If set, certificates issued/signed against this role will not be stored in the storage backend. This can improve performance when issuing large numbers of certificates. However, certificates issued in this way cannot be enumerated or revoked, so this option is recommended only for certificates that are non-sensitive, or extremely short-lived. This option implies a value of \"false\" for \"generate_lease\".
NoStore bool `json:"no_store,omitempty"`
// Set the not after field of the certificate with specified date value. The value format should be given in UTC format YYYY-MM-ddTHH:MM:SSZ.
NotAfter string `json:"not_after,omitempty"`
// The duration before now which the certificate needs to be backdated by.
NotBeforeDuration int32 `json:"not_before_duration,omitempty"`
// If set, O (Organization) will be set to this value in certificates issued by this role.
Organization []string `json:"organization,omitempty"`
// If set, OU (OrganizationalUnit) will be set to this value in certificates issued by this role.
Ou []string `json:"ou,omitempty"`
// A comma-separated string or list of policy OIDs, or a JSON list of qualified policy information, which must include an oid, and may include a notice and/or cps url, using the form [{\"oid\"=\"1.3.6.1.4.1.7.8\",\"notice\"=\"I am a user Notice\"}, {\"oid\"=\"1.3.6.1.4.1.44947.1.2.4 \",\"cps\"=\"https://example.com\"}].
PolicyIdentifiers []string `json:"policy_identifiers,omitempty"`
// If set, Postal Code will be set to this value in certificates issued by this role.
PostalCode []string `json:"postal_code,omitempty"`
// If set, Province will be set to this value in certificates issued by this role.
Province []string `json:"province,omitempty"`
// If set to false, makes the 'common_name' field optional while generating a certificate.
RequireCn bool `json:"require_cn,omitempty"`
// If set, certificates are flagged for server auth use. Defaults to true. See also RFC 5280 Section 4.2.1.12.
ServerFlag bool `json:"server_flag,omitempty"`
// The number of bits to use in the signature algorithm; accepts 256 for SHA-2-256, 384 for SHA-2-384, and 512 for SHA-2-512. Defaults to 0 to automatically detect based on key length (SHA-2-256 for RSA keys, and matching the curve size for NIST P-Curves).
SignatureBits int32 `json:"signature_bits,omitempty"`
// If set, Street Address will be set to this value in certificates issued by this role.
StreetAddress []string `json:"street_address,omitempty"`
// The lease duration (validity period of the certificate) if no specific lease duration is requested. The lease duration controls the expiration of certificates issued by this backend. Defaults to the system default value or the value of max_ttl, whichever is shorter.
Ttl int32 `json:"ttl,omitempty"`
// If set, when used with a signing profile, the common name in the CSR will be used. This does *not* include any requested Subject Alternative Names; use use_csr_sans for that. Defaults to true.
UseCsrCommonName bool `json:"use_csr_common_name,omitempty"`
// If set, when used with a signing profile, the SANs in the CSR will be used. This does *not* include the Common Name (cn); use use_csr_common_name for that. Defaults to true.
UseCsrSans bool `json:"use_csr_sans,omitempty"`
// Whether or not to use PSS signatures when using a RSA key-type issuer. Defaults to false.
UsePss bool `json:"use_pss,omitempty"`
}
// NewPkiPatchRoleResponseWithDefaults instantiates a new PkiPatchRoleResponse object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewPkiPatchRoleResponseWithDefaults() *PkiPatchRoleResponse | {
var this PkiPatchRoleResponse
this.ServerFlag = true
return &this
} | identifier_body | |
model_pki_patch_role_response.go | // Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
//
// Code generated with OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT.
package schema
// PkiPatchRoleResponse struct for PkiPatchRoleResponse
type PkiPatchRoleResponse struct {
// If set, clients can request certificates for any domain, regardless of allowed_domains restrictions. See the documentation for more information.
AllowAnyName bool `json:"allow_any_name,omitempty"`
// If set, clients can request certificates for the base domains themselves, e.g. \"example.com\" of domains listed in allowed_domains. This is a separate option as in some cases this can be considered a security threat. See the documentation for more information.
AllowBareDomains bool `json:"allow_bare_domains,omitempty"`
// If set, domains specified in allowed_domains can include shell-style glob patterns, e.g. \"ftp*.example.com\". See the documentation for more information.
AllowGlobDomains bool `json:"allow_glob_domains,omitempty"`
// If set, IP Subject Alternative Names are allowed. Any valid IP is accepted and No authorization checking is performed.
AllowIpSans bool `json:"allow_ip_sans,omitempty"`
// Whether to allow \"localhost\" and \"localdomain\" as a valid common name in a request, independent of allowed_domains value.
AllowLocalhost bool `json:"allow_localhost,omitempty"`
// If set, clients can request certificates for subdomains of domains listed in allowed_domains, including wildcard subdomains. See the documentation for more information.
AllowSubdomains bool `json:"allow_subdomains,omitempty"`
// Whether to allow \"localhost\" and \"localdomain\" as a valid common name in a request, independent of allowed_domains value.
AllowTokenDisplayname bool `json:"allow_token_displayname,omitempty"`
// If set, allows certificates with wildcards in the common name to be issued, conforming to RFC 6125's Section 6.4.3; e.g., \"*.example.net\" or \"b*z.example.net\". See the documentation for more information.
AllowWildcardCertificates bool `json:"allow_wildcard_certificates,omitempty"`
// Specifies the domains this role is allowed to issue certificates for. This is used with the allow_bare_domains, allow_subdomains, and allow_glob_domains to determine matches for the common name, DNS-typed SAN entries, and Email-typed SAN entries of certificates. See the documentation for more information. This parameter accepts a comma-separated string or list of domains.
AllowedDomains []string `json:"allowed_domains,omitempty"`
// If set, Allowed domains can be specified using identity template policies. Non-templated domains are also permitted.
AllowedDomainsTemplate bool `json:"allowed_domains_template,omitempty"`
// If set, an array of allowed other names to put in SANs. These values support globbing and must be in the format <oid>;<type>:<value>. Currently only \"utf8\" is a valid type. All values, including globbing values, must use this syntax, with the exception being a single \"*\" which allows any OID and any value (but type must still be utf8).
AllowedOtherSans []string `json:"allowed_other_sans,omitempty"`
// If set, an array of allowed serial numbers to put in Subject. These values support globbing.
AllowedSerialNumbers []string `json:"allowed_serial_numbers,omitempty"`
// If set, an array of allowed URIs for URI Subject Alternative Names. Any valid URI is accepted, these values support globbing.
AllowedUriSans []string `json:"allowed_uri_sans,omitempty"`
// If set, Allowed URI SANs can be specified using identity template policies. Non-templated URI SANs are also permitted.
AllowedUriSansTemplate bool `json:"allowed_uri_sans_template,omitempty"`
// If set, an array of allowed user-ids to put in user system login name specified here: https://www.rfc-editor.org/rfc/rfc1274#section-9.3.1
AllowedUserIds []string `json:"allowed_user_ids,omitempty"` | ClientFlag bool `json:"client_flag,omitempty"`
// List of allowed validations to run against the Common Name field. Values can include 'email' to validate the CN is a email address, 'hostname' to validate the CN is a valid hostname (potentially including wildcards). When multiple validations are specified, these take OR semantics (either email OR hostname are allowed). The special value 'disabled' allows disabling all CN name validations, allowing for arbitrary non-Hostname, non-Email address CNs.
CnValidations []string `json:"cn_validations,omitempty"`
// If set, certificates are flagged for code signing use. Defaults to false. See also RFC 5280 Section 4.2.1.12.
CodeSigningFlag bool `json:"code_signing_flag,omitempty"`
// If set, Country will be set to this value in certificates issued by this role.
Country []string `json:"country,omitempty"`
// If set, certificates are flagged for email protection use. Defaults to false. See also RFC 5280 Section 4.2.1.12.
EmailProtectionFlag bool `json:"email_protection_flag,omitempty"`
// If set, only valid host names are allowed for CN and DNS SANs, and the host part of email addresses. Defaults to true.
EnforceHostnames bool `json:"enforce_hostnames,omitempty"`
// A comma-separated string or list of extended key usages. Valid values can be found at https://golang.org/pkg/crypto/x509/#ExtKeyUsage -- simply drop the \"ExtKeyUsage\" part of the name. To remove all key usages from being set, set this value to an empty list. See also RFC 5280 Section 4.2.1.12.
ExtKeyUsage []string `json:"ext_key_usage,omitempty"`
// A comma-separated string or list of extended key usage oids.
ExtKeyUsageOids []string `json:"ext_key_usage_oids,omitempty"`
// If set, certificates issued/signed against this role will have Vault leases attached to them. Defaults to \"false\". Certificates can be added to the CRL by \"vault revoke <lease_id>\" when certificates are associated with leases. It can also be done using the \"pki/revoke\" endpoint. However, when lease generation is disabled, invoking \"pki/revoke\" would be the only way to add the certificates to the CRL. When large number of certificates are generated with long lifetimes, it is recommended that lease generation be disabled, as large amount of leases adversely affect the startup time of Vault.
GenerateLease bool `json:"generate_lease,omitempty"`
// Reference to the issuer used to sign requests serviced by this role.
IssuerRef string `json:"issuer_ref,omitempty"`
// The number of bits to use. Allowed values are 0 (universal default); with rsa key_type: 2048 (default), 3072, or 4096; with ec key_type: 224, 256 (default), 384, or 521; ignored with ed25519.
KeyBits int32 `json:"key_bits,omitempty"`
// The type of key to use; defaults to RSA. \"rsa\" \"ec\", \"ed25519\" and \"any\" are the only valid values.
KeyType string `json:"key_type,omitempty"`
// A comma-separated string or list of key usages (not extended key usages). Valid values can be found at https://golang.org/pkg/crypto/x509/#KeyUsage -- simply drop the \"KeyUsage\" part of the name. To remove all key usages from being set, set this value to an empty list. See also RFC 5280 Section 4.2.1.3.
KeyUsage []string `json:"key_usage,omitempty"`
// If set, Locality will be set to this value in certificates issued by this role.
Locality []string `json:"locality,omitempty"`
// The maximum allowed lease duration. If not set, defaults to the system maximum lease TTL.
MaxTtl int32 `json:"max_ttl,omitempty"`
// If set, certificates issued/signed against this role will not be stored in the storage backend. This can improve performance when issuing large numbers of certificates. However, certificates issued in this way cannot be enumerated or revoked, so this option is recommended only for certificates that are non-sensitive, or extremely short-lived. This option implies a value of \"false\" for \"generate_lease\".
NoStore bool `json:"no_store,omitempty"`
// Set the not after field of the certificate with specified date value. The value format should be given in UTC format YYYY-MM-ddTHH:MM:SSZ.
NotAfter string `json:"not_after,omitempty"`
// The duration before now which the certificate needs to be backdated by.
NotBeforeDuration int32 `json:"not_before_duration,omitempty"`
// If set, O (Organization) will be set to this value in certificates issued by this role.
Organization []string `json:"organization,omitempty"`
// If set, OU (OrganizationalUnit) will be set to this value in certificates issued by this role.
Ou []string `json:"ou,omitempty"`
// A comma-separated string or list of policy OIDs, or a JSON list of qualified policy information, which must include an oid, and may include a notice and/or cps url, using the form [{\"oid\"=\"1.3.6.1.4.1.7.8\",\"notice\"=\"I am a user Notice\"}, {\"oid\"=\"1.3.6.1.4.1.44947.1.2.4 \",\"cps\"=\"https://example.com\"}].
PolicyIdentifiers []string `json:"policy_identifiers,omitempty"`
// If set, Postal Code will be set to this value in certificates issued by this role.
PostalCode []string `json:"postal_code,omitempty"`
// If set, Province will be set to this value in certificates issued by this role.
Province []string `json:"province,omitempty"`
// If set to false, makes the 'common_name' field optional while generating a certificate.
RequireCn bool `json:"require_cn,omitempty"`
// If set, certificates are flagged for server auth use. Defaults to true. See also RFC 5280 Section 4.2.1.12.
ServerFlag bool `json:"server_flag,omitempty"`
// The number of bits to use in the signature algorithm; accepts 256 for SHA-2-256, 384 for SHA-2-384, and 512 for SHA-2-512. Defaults to 0 to automatically detect based on key length (SHA-2-256 for RSA keys, and matching the curve size for NIST P-Curves).
SignatureBits int32 `json:"signature_bits,omitempty"`
// If set, Street Address will be set to this value in certificates issued by this role.
StreetAddress []string `json:"street_address,omitempty"`
// The lease duration (validity period of the certificate) if no specific lease duration is requested. The lease duration controls the expiration of certificates issued by this backend. Defaults to the system default value or the value of max_ttl, whichever is shorter.
Ttl int32 `json:"ttl,omitempty"`
// If set, when used with a signing profile, the common name in the CSR will be used. This does *not* include any requested Subject Alternative Names; use use_csr_sans for that. Defaults to true.
UseCsrCommonName bool `json:"use_csr_common_name,omitempty"`
// If set, when used with a signing profile, the SANs in the CSR will be used. This does *not* include the Common Name (cn); use use_csr_common_name for that. Defaults to true.
UseCsrSans bool `json:"use_csr_sans,omitempty"`
// Whether or not to use PSS signatures when using a RSA key-type issuer. Defaults to false.
UsePss bool `json:"use_pss,omitempty"`
}
// NewPkiPatchRoleResponseWithDefaults instantiates a new PkiPatchRoleResponse object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewPkiPatchRoleResponseWithDefaults() *PkiPatchRoleResponse {
var this PkiPatchRoleResponse
this.ServerFlag = true
return &this
} |
// Mark Basic Constraints valid when issuing non-CA certificates.
BasicConstraintsValidForNonCa bool `json:"basic_constraints_valid_for_non_ca,omitempty"`
// If set, certificates are flagged for client auth use. Defaults to true. See also RFC 5280 Section 4.2.1.12. | random_line_split |
api_test.go | /*
Copyright 2017 Continusec Pty Ltd
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package test
import (
"bytes"
"fmt"
"log"
"math/rand"
"strings"
"testing"
"time"
"github.com/continusec/verifiabledatastructures/merkle"
"github.com/continusec/verifiabledatastructures/mutator/batch"
"github.com/continusec/verifiabledatastructures/mutator/instant"
"github.com/continusec/verifiabledatastructures/oracle/policy"
"github.com/continusec/verifiabledatastructures/pb"
"github.com/continusec/verifiabledatastructures/server/grpc"
"github.com/continusec/verifiabledatastructures/server/httprest"
"github.com/continusec/verifiabledatastructures/storage/badger"
"github.com/continusec/verifiabledatastructures/storage/bolt"
"github.com/continusec/verifiabledatastructures/storage/memory"
"github.com/continusec/verifiabledatastructures/verifiable"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
func testMap(ctx context.Context, t *testing.T, service pb.VerifiableDataStructuresServiceServer) |
func testLog(ctx context.Context, t *testing.T, service pb.VerifiableDataStructuresServiceServer) {
account := (&verifiable.Client{
Service: service,
}).Account("999", "secret")
log := account.VerifiableLog("smoketest")
treeRoot, err := log.TreeHead(ctx, 0)
if !(treeRoot == nil || (treeRoot.TreeSize == 0 && len(treeRoot.RootHash) == 0)) {
t.Fatal("Expecting log to not exist.")
}
aer, err := log.Add(ctx, &pb.LeafData{LeafInput: []byte("foo")})
if err != nil {
t.Fatal("Failed adding item", err)
}
lh := aer.LeafHash()
if !bytes.Equal(lh, merkle.LeafHash([]byte("foo"))) {
t.Fatal("Failed adding item")
}
for treeRoot == nil || treeRoot.TreeSize < 1 {
treeRoot, err = log.TreeHead(ctx, verifiable.Head)
if err != nil {
t.Fatal("Failure getting root hash")
}
}
if !bytes.Equal(treeRoot.RootHash, merkle.LeafHash([]byte("foo"))) {
t.Fatal("Failed calculating tree root")
}
_, err = log.Add(ctx, &pb.LeafData{LeafInput: []byte("fooz")})
if err != nil {
t.Fatal("Failed adding item")
}
_, err = log.Add(ctx, &pb.LeafData{LeafInput: []byte("bar")})
if err != nil {
t.Fatal("Failed adding item")
}
_, err = log.Add(ctx, &pb.LeafData{LeafInput: []byte("baz")})
if err != nil {
t.Fatal("Failed adding item")
}
p, err := log.Add(ctx, &pb.LeafData{LeafInput: []byte("smez")})
if err != nil {
t.Fatal("Failed adding item")
}
treeRoot, err = p.Wait(ctx)
if err != nil {
t.Fatal(err)
}
if treeRoot.TreeSize != 5 {
t.Fatal("Failure getting root hash")
}
entries := make([]*pb.LeafData, treeRoot.TreeSize)
for i := int64(0); i < treeRoot.TreeSize; i++ {
entries[i], err = log.Entry(ctx, i)
if err != nil {
t.Fatal("Failure getting entry")
}
}
if !verifyRootHash(entries, treeRoot.RootHash) {
t.Fatal("Failure verifying root hash")
}
for i := 0; i < 200; i++ {
p, err = log.Add(ctx, &pb.LeafData{LeafInput: []byte(fmt.Sprintf("foo %d", rand.Int()))})
if err != nil {
t.Fatal("Failed adding item")
}
}
treeRoot, err = p.Wait(ctx)
if err != nil {
t.Fatal(err)
}
if treeRoot.TreeSize != 205 {
t.Fatal("Failure getting root hash")
}
cnt := 0
for entry := range log.Entries(context.Background(), 0, treeRoot.TreeSize) {
err = log.VerifyInclusion(ctx, treeRoot, merkle.LeafHash(entry.LeafInput))
if err != nil {
t.Fatal("Failure verifiying inclusion")
}
cnt++
}
if cnt != 205 {
t.Fatal("Failed to get all entries")
}
th3, err := log.TreeHead(ctx, 3)
if err != nil {
t.Fatal("Failure getting root hash")
}
th7, err := log.TreeHead(ctx, 7)
if err != nil {
t.Fatal("Failure getting root hash")
}
err = log.VerifyConsistency(ctx, th3, th7)
if err != nil {
t.Fatal("Failure to generate consistency between 3 and 7")
}
rootHashes := generateRootHashes(context.Background(), log.Entries(context.Background(), 0, treeRoot.TreeSize))
i := 0
var last []byte
for rh := range rootHashes {
last = rh
i++
}
if i != 205 {
t.Fatal("Wrong i")
}
if !bytes.Equal(treeRoot.RootHash, last) {
t.Fatal("Failed calculating tree root")
}
for i := 0; i < 200; i++ {
p, err = log.Add(ctx, &pb.LeafData{LeafInput: []byte(fmt.Sprintf("foo %d", rand.Int()))})
if err != nil {
t.Fatal("Failed adding item")
}
}
treeRoot, err = p.Wait(ctx)
if err != nil {
t.Fatal(err)
}
if treeRoot.TreeSize != 405 {
t.Fatal("Failure getting root hash")
}
rootHashes = generateRootHashes(context.Background(), log.Entries(context.Background(), 0, treeRoot.TreeSize))
i = 0
for rh := range rootHashes {
last = rh
i++
}
if i != 405 {
t.Fatal("Wrong i")
}
if !bytes.Equal(treeRoot.RootHash, last) {
t.Fatal("Failed calculating tree root")
}
}
func createCleanEmptyBatchMutatorService() pb.VerifiableDataStructuresServiceServer {
db := &memory.TransientStorage{}
return (&verifiable.Service{
AccessPolicy: policy.Open,
Mutator: (&batch.Mutator{
Writer: db,
BatchSize: 1000,
BufferSize: 100000,
Timeout: time.Millisecond * 10,
}).MustCreate(),
Reader: db,
}).MustCreate()
}
func createCleanEmptyService() pb.VerifiableDataStructuresServiceServer {
db := &memory.TransientStorage{}
return (&verifiable.Service{
AccessPolicy: policy.Open,
Mutator: &instant.Mutator{Writer: db},
Reader: db,
}).MustCreate()
}
func expectErr(t *testing.T, exp, err error) {
if exp != err {
t.Fatalf("Wanted %s, got %s", exp, err)
}
}
func expectErrCode(t *testing.T, c codes.Code, err error) {
if err == nil {
t.Fatalf("Wanting bad code")
}
s, ok := status.FromError(err)
if !ok {
t.Fatalf("Bad error type")
}
if s.Code() != c {
t.Fatalf("Bad error ")
}
}
func TestPermissions(t *testing.T) {
db := &memory.TransientStorage{}
c := &verifiable.Client{Service: (&verifiable.Service{
Mutator: &instant.Mutator{Writer: db},
Reader: db,
AccessPolicy: &policy.Static{
Policy: []*pb.ResourceAccount{
{
Id: "0",
Policy: []*pb.AccessPolicy{
{
NameMatch: "foo",
Permissions: []pb.Permission{pb.Permission_PERM_ALL_PERMISSIONS},
ApiKey: "secret",
AllowedFields: []string{"*"},
},
{
NameMatch: "f*",
Permissions: []pb.Permission{pb.Permission_PERM_LOG_READ_ENTRY},
ApiKey: "*",
AllowedFields: []string{"name"},
},
},
},
},
},
}).MustCreate()}
var err error
var v *pb.LeafData
ctx := context.TODO()
_, err = c.Account("0", "secr3t").VerifiableLog("foo").Add(ctx, &pb.LeafData{LeafInput: []byte("bar")})
expectErrCode(t, codes.PermissionDenied, err)
_, err = c.Account("0", "secret").VerifiableLog("fofo").Add(ctx, &pb.LeafData{LeafInput: []byte("bar")})
expectErrCode(t, codes.PermissionDenied, err)
_, err = c.Account("1", "secret").VerifiableLog("foo").Add(ctx, &pb.LeafData{LeafInput: []byte("bar")})
expectErrCode(t, codes.PermissionDenied, err)
_, err = c.Account("0", "secret").VerifiableLog("foo").Add(ctx, &pb.LeafData{LeafInput: []byte("bar")})
expectErr(t, nil, err)
v, err = verifiable.CreateRedactableJSONLeafData([]byte("{\"name\":\"adam\",\"dob\":\"100000\"}"))
expectErr(t, nil, err)
p, err := c.Account("0", "secret").VerifiableLog("foo").Add(ctx, v)
expectErr(t, nil, err)
_, err = p.Wait(ctx)
expectErr(t, nil, err)
// Test less fields
resp, err := c.Account("0", "").VerifiableLog("foo").Entry(ctx, 1)
expectErr(t, nil, err)
st := string(resp.ExtraData)
if !strings.Contains(st, "\"dob\":\"***REDACTED***") {
t.Fatal("Expected redacted")
}
if !strings.Contains(st, "adam") {
t.Fatal("Expected name")
}
if strings.Contains(st, "100000") {
t.Fatal("Value should not appear (unless incredibly unlucky with random generator)")
}
// Test more fields
resp, err = c.Account("0", "secret").VerifiableLog("foo").Entry(ctx, 1)
expectErr(t, nil, err)
st = string(resp.ExtraData)
if strings.Contains(st, "\"dob\":\"***REDACTED***") {
t.Fatal("Not expected redacted")
}
if !strings.Contains(st, "adam") {
t.Fatal("Expected name")
}
if !strings.Contains(st, "100000") {
t.Fatal("Value should appear")
}
}
func runSmokeTests(c pb.VerifiableDataStructuresServiceServer, t *testing.T) {
// First wrap it in REST
go httprest.StartServer(&pb.ServerConfig{
InsecureServerForTesting: true,
RestListenBind: ":8092",
}, c)
time.Sleep(50 * time.Millisecond)
// Get client to it
restClient := (&httprest.Client{
BaseURL: "http://localhost:8092",
}).MustDial()
// Now wrap that in a gRPC server
go grpc.StartServer(&pb.ServerConfig{
InsecureServerForTesting: true,
GrpcListenBind: ":8081",
GrpcListenProtocol: "tcp4",
}, restClient)
time.Sleep(50 * time.Millisecond)
// And grab a client for that
d := (&grpc.Client{
Address: "localhost:8081",
NoGrpcSecurity: true,
}).MustDial()
testLog(context.TODO(), t, d)
testMap(context.TODO(), t, d)
}
func TestClientComms(t *testing.T) {
runSmokeTests(createCleanEmptyService(), t)
}
func TestBatchMutator(t *testing.T) {
d := createCleanEmptyBatchMutatorService()
testLog(context.TODO(), t, d)
testMap(context.TODO(), t, d)
}
// GenerateRootHashes is a utility function that emits a channel of root hashes
// given a channel of input values. This is useful for some unit tests.
func generateRootHashes(ctx context.Context, input <-chan *pb.LeafData) <-chan []byte {
rv := make(chan []byte)
go func() {
defer close(rv)
index := 0
stack := make([][]byte, 0)
for {
select {
case <-ctx.Done():
return
case b, ok := <-input:
if !ok {
return
}
stack = append(stack, merkle.LeafHash(b.GetLeafInput()))
}
for j := index; (j & 1) == 1; j >>= 1 {
stack = append(stack[:len(stack)-2], merkle.NodeHash(stack[len(stack)-2], stack[len(stack)-1]))
}
rh := stack[len(stack)-1]
for j := len(stack) - 2; j >= 0; j-- {
rh = merkle.NodeHash(stack[j], rh)
}
select {
case <-ctx.Done():
return
case rv <- rh:
index++
}
}
}()
return rv
}
func verifyRootHash(entries []*pb.LeafData, answer []byte) bool {
stack := make([][]byte, 0)
for i, b := range entries {
stack = append(stack, merkle.LeafHash(b.LeafInput))
for j := i; (j & 1) == 1; j >>= 1 {
stack = append(stack[:len(stack)-2], merkle.NodeHash(stack[len(stack)-2], stack[len(stack)-1]))
}
}
for len(stack) > 1 {
stack = append(stack[:len(stack)-2], merkle.NodeHash(stack[len(stack)-2], stack[len(stack)-1]))
}
return bytes.Equal(stack[0], answer)
}
// TestObjectsMeetReq simply includes objects that might compile, but not comply with APIs
// since possibly we didn't test them properly otherwise.
func TestObjectsMeetReq(t *testing.T) {
var kr verifiable.StorageReader
var kw verifiable.StorageWriter
var m verifiable.MutatorService
var o verifiable.AuthorizationOracle
kr = &memory.TransientStorage{}
kw = &memory.TransientStorage{}
kr = &bolt.Storage{}
kw = &bolt.Storage{}
kr = &badger.Storage{}
kw = &badger.Storage{}
m = &instant.Mutator{}
m = (&batch.Mutator{}).MustCreate()
o = policy.Open
o = &policy.Static{}
log.Println(kr, kw, m, o) // "use" these so that go compiler will be quiet
}
| {
account := (&verifiable.Client{
Service: service,
}).Account("999", "secret")
vmap := account.VerifiableMap("testmap")
numToDo := 1000
var lastP verifiable.MapUpdatePromise
var err error
for i := 0; i < numToDo; i++ {
lastP, err = vmap.Set(ctx, []byte(fmt.Sprintf("foo%d", i)), &pb.LeafData{LeafInput: []byte(fmt.Sprintf("fooval%d", i))})
if err != nil {
t.Fatal(err)
}
}
_, err = lastP.Wait(ctx)
if err != nil {
t.Fatal(err)
}
ms, err := vmap.VerifiedLatestMapState(ctx, nil)
if err != nil {
t.Fatal(err)
}
// Make sure we don't break on non-existent entries
for i := 0; i < numToDo; i++ {
entry, err := vmap.VerifiedGet(ctx, []byte(fmt.Sprintf("baz%d", i)), ms)
if err != nil {
t.Fatal(err)
}
dd := entry.GetLeafInput()
if len(dd) != 0 {
t.Fatal(string(dd))
}
}
for i := 0; i < numToDo; i++ {
entry, err := vmap.VerifiedGet(ctx, []byte(fmt.Sprintf("foo%d", i)), ms)
if err != nil {
t.Fatal(err)
}
dd := entry.GetLeafInput()
if string(dd) != fmt.Sprintf("fooval%d", i) {
t.Fatal(string(dd))
}
}
} | identifier_body |
api_test.go | /*
Copyright 2017 Continusec Pty Ltd
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package test
import (
"bytes"
"fmt"
"log"
"math/rand"
"strings"
"testing"
"time"
"github.com/continusec/verifiabledatastructures/merkle"
"github.com/continusec/verifiabledatastructures/mutator/batch"
"github.com/continusec/verifiabledatastructures/mutator/instant"
"github.com/continusec/verifiabledatastructures/oracle/policy"
"github.com/continusec/verifiabledatastructures/pb"
"github.com/continusec/verifiabledatastructures/server/grpc"
"github.com/continusec/verifiabledatastructures/server/httprest"
"github.com/continusec/verifiabledatastructures/storage/badger"
"github.com/continusec/verifiabledatastructures/storage/bolt"
"github.com/continusec/verifiabledatastructures/storage/memory"
"github.com/continusec/verifiabledatastructures/verifiable"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
func | (ctx context.Context, t *testing.T, service pb.VerifiableDataStructuresServiceServer) {
account := (&verifiable.Client{
Service: service,
}).Account("999", "secret")
vmap := account.VerifiableMap("testmap")
numToDo := 1000
var lastP verifiable.MapUpdatePromise
var err error
for i := 0; i < numToDo; i++ {
lastP, err = vmap.Set(ctx, []byte(fmt.Sprintf("foo%d", i)), &pb.LeafData{LeafInput: []byte(fmt.Sprintf("fooval%d", i))})
if err != nil {
t.Fatal(err)
}
}
_, err = lastP.Wait(ctx)
if err != nil {
t.Fatal(err)
}
ms, err := vmap.VerifiedLatestMapState(ctx, nil)
if err != nil {
t.Fatal(err)
}
// Make sure we don't break on non-existent entries
for i := 0; i < numToDo; i++ {
entry, err := vmap.VerifiedGet(ctx, []byte(fmt.Sprintf("baz%d", i)), ms)
if err != nil {
t.Fatal(err)
}
dd := entry.GetLeafInput()
if len(dd) != 0 {
t.Fatal(string(dd))
}
}
for i := 0; i < numToDo; i++ {
entry, err := vmap.VerifiedGet(ctx, []byte(fmt.Sprintf("foo%d", i)), ms)
if err != nil {
t.Fatal(err)
}
dd := entry.GetLeafInput()
if string(dd) != fmt.Sprintf("fooval%d", i) {
t.Fatal(string(dd))
}
}
}
func testLog(ctx context.Context, t *testing.T, service pb.VerifiableDataStructuresServiceServer) {
account := (&verifiable.Client{
Service: service,
}).Account("999", "secret")
log := account.VerifiableLog("smoketest")
treeRoot, err := log.TreeHead(ctx, 0)
if !(treeRoot == nil || (treeRoot.TreeSize == 0 && len(treeRoot.RootHash) == 0)) {
t.Fatal("Expecting log to not exist.")
}
aer, err := log.Add(ctx, &pb.LeafData{LeafInput: []byte("foo")})
if err != nil {
t.Fatal("Failed adding item", err)
}
lh := aer.LeafHash()
if !bytes.Equal(lh, merkle.LeafHash([]byte("foo"))) {
t.Fatal("Failed adding item")
}
for treeRoot == nil || treeRoot.TreeSize < 1 {
treeRoot, err = log.TreeHead(ctx, verifiable.Head)
if err != nil {
t.Fatal("Failure getting root hash")
}
}
if !bytes.Equal(treeRoot.RootHash, merkle.LeafHash([]byte("foo"))) {
t.Fatal("Failed calculating tree root")
}
_, err = log.Add(ctx, &pb.LeafData{LeafInput: []byte("fooz")})
if err != nil {
t.Fatal("Failed adding item")
}
_, err = log.Add(ctx, &pb.LeafData{LeafInput: []byte("bar")})
if err != nil {
t.Fatal("Failed adding item")
}
_, err = log.Add(ctx, &pb.LeafData{LeafInput: []byte("baz")})
if err != nil {
t.Fatal("Failed adding item")
}
p, err := log.Add(ctx, &pb.LeafData{LeafInput: []byte("smez")})
if err != nil {
t.Fatal("Failed adding item")
}
treeRoot, err = p.Wait(ctx)
if err != nil {
t.Fatal(err)
}
if treeRoot.TreeSize != 5 {
t.Fatal("Failure getting root hash")
}
entries := make([]*pb.LeafData, treeRoot.TreeSize)
for i := int64(0); i < treeRoot.TreeSize; i++ {
entries[i], err = log.Entry(ctx, i)
if err != nil {
t.Fatal("Failure getting entry")
}
}
if !verifyRootHash(entries, treeRoot.RootHash) {
t.Fatal("Failure verifying root hash")
}
for i := 0; i < 200; i++ {
p, err = log.Add(ctx, &pb.LeafData{LeafInput: []byte(fmt.Sprintf("foo %d", rand.Int()))})
if err != nil {
t.Fatal("Failed adding item")
}
}
treeRoot, err = p.Wait(ctx)
if err != nil {
t.Fatal(err)
}
if treeRoot.TreeSize != 205 {
t.Fatal("Failure getting root hash")
}
cnt := 0
for entry := range log.Entries(context.Background(), 0, treeRoot.TreeSize) {
err = log.VerifyInclusion(ctx, treeRoot, merkle.LeafHash(entry.LeafInput))
if err != nil {
t.Fatal("Failure verifiying inclusion")
}
cnt++
}
if cnt != 205 {
t.Fatal("Failed to get all entries")
}
th3, err := log.TreeHead(ctx, 3)
if err != nil {
t.Fatal("Failure getting root hash")
}
th7, err := log.TreeHead(ctx, 7)
if err != nil {
t.Fatal("Failure getting root hash")
}
err = log.VerifyConsistency(ctx, th3, th7)
if err != nil {
t.Fatal("Failure to generate consistency between 3 and 7")
}
rootHashes := generateRootHashes(context.Background(), log.Entries(context.Background(), 0, treeRoot.TreeSize))
i := 0
var last []byte
for rh := range rootHashes {
last = rh
i++
}
if i != 205 {
t.Fatal("Wrong i")
}
if !bytes.Equal(treeRoot.RootHash, last) {
t.Fatal("Failed calculating tree root")
}
for i := 0; i < 200; i++ {
p, err = log.Add(ctx, &pb.LeafData{LeafInput: []byte(fmt.Sprintf("foo %d", rand.Int()))})
if err != nil {
t.Fatal("Failed adding item")
}
}
treeRoot, err = p.Wait(ctx)
if err != nil {
t.Fatal(err)
}
if treeRoot.TreeSize != 405 {
t.Fatal("Failure getting root hash")
}
rootHashes = generateRootHashes(context.Background(), log.Entries(context.Background(), 0, treeRoot.TreeSize))
i = 0
for rh := range rootHashes {
last = rh
i++
}
if i != 405 {
t.Fatal("Wrong i")
}
if !bytes.Equal(treeRoot.RootHash, last) {
t.Fatal("Failed calculating tree root")
}
}
func createCleanEmptyBatchMutatorService() pb.VerifiableDataStructuresServiceServer {
db := &memory.TransientStorage{}
return (&verifiable.Service{
AccessPolicy: policy.Open,
Mutator: (&batch.Mutator{
Writer: db,
BatchSize: 1000,
BufferSize: 100000,
Timeout: time.Millisecond * 10,
}).MustCreate(),
Reader: db,
}).MustCreate()
}
func createCleanEmptyService() pb.VerifiableDataStructuresServiceServer {
db := &memory.TransientStorage{}
return (&verifiable.Service{
AccessPolicy: policy.Open,
Mutator: &instant.Mutator{Writer: db},
Reader: db,
}).MustCreate()
}
func expectErr(t *testing.T, exp, err error) {
if exp != err {
t.Fatalf("Wanted %s, got %s", exp, err)
}
}
func expectErrCode(t *testing.T, c codes.Code, err error) {
if err == nil {
t.Fatalf("Wanting bad code")
}
s, ok := status.FromError(err)
if !ok {
t.Fatalf("Bad error type")
}
if s.Code() != c {
t.Fatalf("Bad error ")
}
}
func TestPermissions(t *testing.T) {
db := &memory.TransientStorage{}
c := &verifiable.Client{Service: (&verifiable.Service{
Mutator: &instant.Mutator{Writer: db},
Reader: db,
AccessPolicy: &policy.Static{
Policy: []*pb.ResourceAccount{
{
Id: "0",
Policy: []*pb.AccessPolicy{
{
NameMatch: "foo",
Permissions: []pb.Permission{pb.Permission_PERM_ALL_PERMISSIONS},
ApiKey: "secret",
AllowedFields: []string{"*"},
},
{
NameMatch: "f*",
Permissions: []pb.Permission{pb.Permission_PERM_LOG_READ_ENTRY},
ApiKey: "*",
AllowedFields: []string{"name"},
},
},
},
},
},
}).MustCreate()}
var err error
var v *pb.LeafData
ctx := context.TODO()
_, err = c.Account("0", "secr3t").VerifiableLog("foo").Add(ctx, &pb.LeafData{LeafInput: []byte("bar")})
expectErrCode(t, codes.PermissionDenied, err)
_, err = c.Account("0", "secret").VerifiableLog("fofo").Add(ctx, &pb.LeafData{LeafInput: []byte("bar")})
expectErrCode(t, codes.PermissionDenied, err)
_, err = c.Account("1", "secret").VerifiableLog("foo").Add(ctx, &pb.LeafData{LeafInput: []byte("bar")})
expectErrCode(t, codes.PermissionDenied, err)
_, err = c.Account("0", "secret").VerifiableLog("foo").Add(ctx, &pb.LeafData{LeafInput: []byte("bar")})
expectErr(t, nil, err)
v, err = verifiable.CreateRedactableJSONLeafData([]byte("{\"name\":\"adam\",\"dob\":\"100000\"}"))
expectErr(t, nil, err)
p, err := c.Account("0", "secret").VerifiableLog("foo").Add(ctx, v)
expectErr(t, nil, err)
_, err = p.Wait(ctx)
expectErr(t, nil, err)
// Test less fields
resp, err := c.Account("0", "").VerifiableLog("foo").Entry(ctx, 1)
expectErr(t, nil, err)
st := string(resp.ExtraData)
if !strings.Contains(st, "\"dob\":\"***REDACTED***") {
t.Fatal("Expected redacted")
}
if !strings.Contains(st, "adam") {
t.Fatal("Expected name")
}
if strings.Contains(st, "100000") {
t.Fatal("Value should not appear (unless incredibly unlucky with random generator)")
}
// Test more fields
resp, err = c.Account("0", "secret").VerifiableLog("foo").Entry(ctx, 1)
expectErr(t, nil, err)
st = string(resp.ExtraData)
if strings.Contains(st, "\"dob\":\"***REDACTED***") {
t.Fatal("Not expected redacted")
}
if !strings.Contains(st, "adam") {
t.Fatal("Expected name")
}
if !strings.Contains(st, "100000") {
t.Fatal("Value should appear")
}
}
func runSmokeTests(c pb.VerifiableDataStructuresServiceServer, t *testing.T) {
// First wrap it in REST
go httprest.StartServer(&pb.ServerConfig{
InsecureServerForTesting: true,
RestListenBind: ":8092",
}, c)
time.Sleep(50 * time.Millisecond)
// Get client to it
restClient := (&httprest.Client{
BaseURL: "http://localhost:8092",
}).MustDial()
// Now wrap that in a gRPC server
go grpc.StartServer(&pb.ServerConfig{
InsecureServerForTesting: true,
GrpcListenBind: ":8081",
GrpcListenProtocol: "tcp4",
}, restClient)
time.Sleep(50 * time.Millisecond)
// And grab a client for that
d := (&grpc.Client{
Address: "localhost:8081",
NoGrpcSecurity: true,
}).MustDial()
testLog(context.TODO(), t, d)
testMap(context.TODO(), t, d)
}
func TestClientComms(t *testing.T) {
runSmokeTests(createCleanEmptyService(), t)
}
func TestBatchMutator(t *testing.T) {
d := createCleanEmptyBatchMutatorService()
testLog(context.TODO(), t, d)
testMap(context.TODO(), t, d)
}
// GenerateRootHashes is a utility function that emits a channel of root hashes
// given a channel of input values. This is useful for some unit tests.
func generateRootHashes(ctx context.Context, input <-chan *pb.LeafData) <-chan []byte {
rv := make(chan []byte)
go func() {
defer close(rv)
index := 0
stack := make([][]byte, 0)
for {
select {
case <-ctx.Done():
return
case b, ok := <-input:
if !ok {
return
}
stack = append(stack, merkle.LeafHash(b.GetLeafInput()))
}
for j := index; (j & 1) == 1; j >>= 1 {
stack = append(stack[:len(stack)-2], merkle.NodeHash(stack[len(stack)-2], stack[len(stack)-1]))
}
rh := stack[len(stack)-1]
for j := len(stack) - 2; j >= 0; j-- {
rh = merkle.NodeHash(stack[j], rh)
}
select {
case <-ctx.Done():
return
case rv <- rh:
index++
}
}
}()
return rv
}
func verifyRootHash(entries []*pb.LeafData, answer []byte) bool {
stack := make([][]byte, 0)
for i, b := range entries {
stack = append(stack, merkle.LeafHash(b.LeafInput))
for j := i; (j & 1) == 1; j >>= 1 {
stack = append(stack[:len(stack)-2], merkle.NodeHash(stack[len(stack)-2], stack[len(stack)-1]))
}
}
for len(stack) > 1 {
stack = append(stack[:len(stack)-2], merkle.NodeHash(stack[len(stack)-2], stack[len(stack)-1]))
}
return bytes.Equal(stack[0], answer)
}
// TestObjectsMeetReq simply includes objects that might compile, but not comply with APIs
// since possibly we didn't test them properly otherwise.
func TestObjectsMeetReq(t *testing.T) {
var kr verifiable.StorageReader
var kw verifiable.StorageWriter
var m verifiable.MutatorService
var o verifiable.AuthorizationOracle
kr = &memory.TransientStorage{}
kw = &memory.TransientStorage{}
kr = &bolt.Storage{}
kw = &bolt.Storage{}
kr = &badger.Storage{}
kw = &badger.Storage{}
m = &instant.Mutator{}
m = (&batch.Mutator{}).MustCreate()
o = policy.Open
o = &policy.Static{}
log.Println(kr, kw, m, o) // "use" these so that go compiler will be quiet
}
| testMap | identifier_name |
api_test.go | /*
Copyright 2017 Continusec Pty Ltd
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package test
import (
"bytes"
"fmt"
"log"
"math/rand"
"strings"
"testing"
"time"
"github.com/continusec/verifiabledatastructures/merkle"
"github.com/continusec/verifiabledatastructures/mutator/batch"
"github.com/continusec/verifiabledatastructures/mutator/instant"
"github.com/continusec/verifiabledatastructures/oracle/policy"
"github.com/continusec/verifiabledatastructures/pb"
"github.com/continusec/verifiabledatastructures/server/grpc"
"github.com/continusec/verifiabledatastructures/server/httprest"
"github.com/continusec/verifiabledatastructures/storage/badger"
"github.com/continusec/verifiabledatastructures/storage/bolt"
"github.com/continusec/verifiabledatastructures/storage/memory"
"github.com/continusec/verifiabledatastructures/verifiable"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
func testMap(ctx context.Context, t *testing.T, service pb.VerifiableDataStructuresServiceServer) {
account := (&verifiable.Client{
Service: service,
}).Account("999", "secret")
vmap := account.VerifiableMap("testmap")
numToDo := 1000
var lastP verifiable.MapUpdatePromise
var err error
for i := 0; i < numToDo; i++ {
lastP, err = vmap.Set(ctx, []byte(fmt.Sprintf("foo%d", i)), &pb.LeafData{LeafInput: []byte(fmt.Sprintf("fooval%d", i))})
if err != nil {
t.Fatal(err)
}
}
_, err = lastP.Wait(ctx)
if err != nil {
t.Fatal(err)
}
ms, err := vmap.VerifiedLatestMapState(ctx, nil)
if err != nil {
t.Fatal(err)
}
// Make sure we don't break on non-existent entries
for i := 0; i < numToDo; i++ {
entry, err := vmap.VerifiedGet(ctx, []byte(fmt.Sprintf("baz%d", i)), ms)
if err != nil {
t.Fatal(err)
}
dd := entry.GetLeafInput()
if len(dd) != 0 {
t.Fatal(string(dd))
}
}
for i := 0; i < numToDo; i++ {
entry, err := vmap.VerifiedGet(ctx, []byte(fmt.Sprintf("foo%d", i)), ms)
if err != nil {
t.Fatal(err)
}
dd := entry.GetLeafInput()
if string(dd) != fmt.Sprintf("fooval%d", i) {
t.Fatal(string(dd))
}
}
}
func testLog(ctx context.Context, t *testing.T, service pb.VerifiableDataStructuresServiceServer) {
account := (&verifiable.Client{
Service: service,
}).Account("999", "secret")
log := account.VerifiableLog("smoketest")
treeRoot, err := log.TreeHead(ctx, 0)
if !(treeRoot == nil || (treeRoot.TreeSize == 0 && len(treeRoot.RootHash) == 0)) {
t.Fatal("Expecting log to not exist.")
}
aer, err := log.Add(ctx, &pb.LeafData{LeafInput: []byte("foo")})
if err != nil {
t.Fatal("Failed adding item", err)
}
lh := aer.LeafHash()
if !bytes.Equal(lh, merkle.LeafHash([]byte("foo"))) {
t.Fatal("Failed adding item")
}
for treeRoot == nil || treeRoot.TreeSize < 1 {
treeRoot, err = log.TreeHead(ctx, verifiable.Head)
if err != nil {
t.Fatal("Failure getting root hash")
}
}
if !bytes.Equal(treeRoot.RootHash, merkle.LeafHash([]byte("foo"))) {
t.Fatal("Failed calculating tree root")
}
_, err = log.Add(ctx, &pb.LeafData{LeafInput: []byte("fooz")})
if err != nil {
t.Fatal("Failed adding item")
}
_, err = log.Add(ctx, &pb.LeafData{LeafInput: []byte("bar")})
if err != nil {
t.Fatal("Failed adding item")
}
_, err = log.Add(ctx, &pb.LeafData{LeafInput: []byte("baz")})
if err != nil {
t.Fatal("Failed adding item")
}
p, err := log.Add(ctx, &pb.LeafData{LeafInput: []byte("smez")})
if err != nil {
t.Fatal("Failed adding item")
}
treeRoot, err = p.Wait(ctx)
if err != nil {
t.Fatal(err)
}
if treeRoot.TreeSize != 5 {
t.Fatal("Failure getting root hash")
}
entries := make([]*pb.LeafData, treeRoot.TreeSize)
for i := int64(0); i < treeRoot.TreeSize; i++ {
entries[i], err = log.Entry(ctx, i)
if err != nil {
t.Fatal("Failure getting entry")
}
}
if !verifyRootHash(entries, treeRoot.RootHash) {
t.Fatal("Failure verifying root hash")
}
for i := 0; i < 200; i++ {
p, err = log.Add(ctx, &pb.LeafData{LeafInput: []byte(fmt.Sprintf("foo %d", rand.Int()))})
if err != nil {
t.Fatal("Failed adding item")
}
}
treeRoot, err = p.Wait(ctx)
if err != nil {
t.Fatal(err)
}
if treeRoot.TreeSize != 205 {
t.Fatal("Failure getting root hash")
}
cnt := 0
for entry := range log.Entries(context.Background(), 0, treeRoot.TreeSize) {
err = log.VerifyInclusion(ctx, treeRoot, merkle.LeafHash(entry.LeafInput))
if err != nil {
t.Fatal("Failure verifiying inclusion")
}
cnt++
}
if cnt != 205 {
t.Fatal("Failed to get all entries")
}
th3, err := log.TreeHead(ctx, 3)
if err != nil {
t.Fatal("Failure getting root hash")
}
th7, err := log.TreeHead(ctx, 7)
if err != nil {
t.Fatal("Failure getting root hash")
}
err = log.VerifyConsistency(ctx, th3, th7)
if err != nil {
t.Fatal("Failure to generate consistency between 3 and 7")
}
rootHashes := generateRootHashes(context.Background(), log.Entries(context.Background(), 0, treeRoot.TreeSize))
i := 0
var last []byte
for rh := range rootHashes {
last = rh
i++
}
if i != 205 {
t.Fatal("Wrong i")
}
if !bytes.Equal(treeRoot.RootHash, last) {
t.Fatal("Failed calculating tree root")
}
for i := 0; i < 200; i++ {
p, err = log.Add(ctx, &pb.LeafData{LeafInput: []byte(fmt.Sprintf("foo %d", rand.Int()))})
if err != nil {
t.Fatal("Failed adding item")
}
}
treeRoot, err = p.Wait(ctx)
if err != nil {
t.Fatal(err)
}
if treeRoot.TreeSize != 405 {
t.Fatal("Failure getting root hash")
}
rootHashes = generateRootHashes(context.Background(), log.Entries(context.Background(), 0, treeRoot.TreeSize))
i = 0
for rh := range rootHashes {
last = rh
i++
}
if i != 405 {
t.Fatal("Wrong i")
}
if !bytes.Equal(treeRoot.RootHash, last) {
t.Fatal("Failed calculating tree root")
}
}
func createCleanEmptyBatchMutatorService() pb.VerifiableDataStructuresServiceServer {
db := &memory.TransientStorage{}
return (&verifiable.Service{
AccessPolicy: policy.Open,
Mutator: (&batch.Mutator{
Writer: db,
BatchSize: 1000,
BufferSize: 100000,
Timeout: time.Millisecond * 10,
}).MustCreate(),
Reader: db,
}).MustCreate()
}
func createCleanEmptyService() pb.VerifiableDataStructuresServiceServer {
db := &memory.TransientStorage{}
return (&verifiable.Service{
AccessPolicy: policy.Open,
Mutator: &instant.Mutator{Writer: db},
Reader: db,
}).MustCreate()
}
func expectErr(t *testing.T, exp, err error) {
if exp != err {
t.Fatalf("Wanted %s, got %s", exp, err)
}
}
func expectErrCode(t *testing.T, c codes.Code, err error) {
if err == nil {
t.Fatalf("Wanting bad code")
}
s, ok := status.FromError(err)
if !ok {
t.Fatalf("Bad error type")
}
if s.Code() != c {
t.Fatalf("Bad error ")
}
}
func TestPermissions(t *testing.T) {
db := &memory.TransientStorage{}
c := &verifiable.Client{Service: (&verifiable.Service{
Mutator: &instant.Mutator{Writer: db},
Reader: db,
AccessPolicy: &policy.Static{
Policy: []*pb.ResourceAccount{
{
Id: "0",
Policy: []*pb.AccessPolicy{
{
NameMatch: "foo",
Permissions: []pb.Permission{pb.Permission_PERM_ALL_PERMISSIONS},
ApiKey: "secret",
AllowedFields: []string{"*"},
},
{
NameMatch: "f*",
Permissions: []pb.Permission{pb.Permission_PERM_LOG_READ_ENTRY},
ApiKey: "*",
AllowedFields: []string{"name"},
},
},
},
},
},
}).MustCreate()}
var err error
var v *pb.LeafData
ctx := context.TODO()
_, err = c.Account("0", "secr3t").VerifiableLog("foo").Add(ctx, &pb.LeafData{LeafInput: []byte("bar")})
expectErrCode(t, codes.PermissionDenied, err)
_, err = c.Account("0", "secret").VerifiableLog("fofo").Add(ctx, &pb.LeafData{LeafInput: []byte("bar")})
expectErrCode(t, codes.PermissionDenied, err)
_, err = c.Account("1", "secret").VerifiableLog("foo").Add(ctx, &pb.LeafData{LeafInput: []byte("bar")})
expectErrCode(t, codes.PermissionDenied, err)
_, err = c.Account("0", "secret").VerifiableLog("foo").Add(ctx, &pb.LeafData{LeafInput: []byte("bar")})
expectErr(t, nil, err)
v, err = verifiable.CreateRedactableJSONLeafData([]byte("{\"name\":\"adam\",\"dob\":\"100000\"}"))
expectErr(t, nil, err)
p, err := c.Account("0", "secret").VerifiableLog("foo").Add(ctx, v)
expectErr(t, nil, err)
_, err = p.Wait(ctx)
expectErr(t, nil, err)
// Test less fields
resp, err := c.Account("0", "").VerifiableLog("foo").Entry(ctx, 1)
expectErr(t, nil, err)
st := string(resp.ExtraData)
if !strings.Contains(st, "\"dob\":\"***REDACTED***") {
t.Fatal("Expected redacted")
}
if !strings.Contains(st, "adam") {
t.Fatal("Expected name")
}
if strings.Contains(st, "100000") {
t.Fatal("Value should not appear (unless incredibly unlucky with random generator)")
}
// Test more fields
resp, err = c.Account("0", "secret").VerifiableLog("foo").Entry(ctx, 1)
expectErr(t, nil, err)
st = string(resp.ExtraData)
if strings.Contains(st, "\"dob\":\"***REDACTED***") {
t.Fatal("Not expected redacted")
}
if !strings.Contains(st, "adam") {
t.Fatal("Expected name")
}
if !strings.Contains(st, "100000") {
t.Fatal("Value should appear")
}
}
func runSmokeTests(c pb.VerifiableDataStructuresServiceServer, t *testing.T) {
// First wrap it in REST
go httprest.StartServer(&pb.ServerConfig{
InsecureServerForTesting: true,
RestListenBind: ":8092",
}, c)
time.Sleep(50 * time.Millisecond)
// Get client to it
restClient := (&httprest.Client{
BaseURL: "http://localhost:8092",
}).MustDial()
// Now wrap that in a gRPC server
go grpc.StartServer(&pb.ServerConfig{
InsecureServerForTesting: true,
GrpcListenBind: ":8081",
GrpcListenProtocol: "tcp4",
}, restClient)
time.Sleep(50 * time.Millisecond)
// And grab a client for that
d := (&grpc.Client{
Address: "localhost:8081",
NoGrpcSecurity: true,
}).MustDial()
testLog(context.TODO(), t, d)
testMap(context.TODO(), t, d)
}
func TestClientComms(t *testing.T) {
runSmokeTests(createCleanEmptyService(), t)
}
func TestBatchMutator(t *testing.T) {
d := createCleanEmptyBatchMutatorService()
testLog(context.TODO(), t, d)
testMap(context.TODO(), t, d)
}
// GenerateRootHashes is a utility function that emits a channel of root hashes
// given a channel of input values. This is useful for some unit tests.
func generateRootHashes(ctx context.Context, input <-chan *pb.LeafData) <-chan []byte {
rv := make(chan []byte)
go func() {
defer close(rv)
index := 0
stack := make([][]byte, 0)
for {
select {
case <-ctx.Done():
return
case b, ok := <-input:
if !ok {
return
}
stack = append(stack, merkle.LeafHash(b.GetLeafInput()))
}
for j := index; (j & 1) == 1; j >>= 1 {
stack = append(stack[:len(stack)-2], merkle.NodeHash(stack[len(stack)-2], stack[len(stack)-1]))
}
rh := stack[len(stack)-1]
for j := len(stack) - 2; j >= 0; j-- {
rh = merkle.NodeHash(stack[j], rh)
}
select {
case <-ctx.Done():
return
case rv <- rh:
index++
}
}
}()
return rv
}
func verifyRootHash(entries []*pb.LeafData, answer []byte) bool {
stack := make([][]byte, 0)
for i, b := range entries {
stack = append(stack, merkle.LeafHash(b.LeafInput))
for j := i; (j & 1) == 1; j >>= 1 {
stack = append(stack[:len(stack)-2], merkle.NodeHash(stack[len(stack)-2], stack[len(stack)-1]))
}
}
for len(stack) > 1 {
stack = append(stack[:len(stack)-2], merkle.NodeHash(stack[len(stack)-2], stack[len(stack)-1]))
}
return bytes.Equal(stack[0], answer)
}
// TestObjectsMeetReq simply includes objects that might compile, but not comply with APIs
// since possibly we didn't test them properly otherwise.
func TestObjectsMeetReq(t *testing.T) {
var kr verifiable.StorageReader
var kw verifiable.StorageWriter
var m verifiable.MutatorService
var o verifiable.AuthorizationOracle
kr = &memory.TransientStorage{} | kw = &memory.TransientStorage{}
kr = &bolt.Storage{}
kw = &bolt.Storage{}
kr = &badger.Storage{}
kw = &badger.Storage{}
m = &instant.Mutator{}
m = (&batch.Mutator{}).MustCreate()
o = policy.Open
o = &policy.Static{}
log.Println(kr, kw, m, o) // "use" these so that go compiler will be quiet
} | random_line_split | |
api_test.go | /*
Copyright 2017 Continusec Pty Ltd
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package test
import (
"bytes"
"fmt"
"log"
"math/rand"
"strings"
"testing"
"time"
"github.com/continusec/verifiabledatastructures/merkle"
"github.com/continusec/verifiabledatastructures/mutator/batch"
"github.com/continusec/verifiabledatastructures/mutator/instant"
"github.com/continusec/verifiabledatastructures/oracle/policy"
"github.com/continusec/verifiabledatastructures/pb"
"github.com/continusec/verifiabledatastructures/server/grpc"
"github.com/continusec/verifiabledatastructures/server/httprest"
"github.com/continusec/verifiabledatastructures/storage/badger"
"github.com/continusec/verifiabledatastructures/storage/bolt"
"github.com/continusec/verifiabledatastructures/storage/memory"
"github.com/continusec/verifiabledatastructures/verifiable"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
func testMap(ctx context.Context, t *testing.T, service pb.VerifiableDataStructuresServiceServer) {
account := (&verifiable.Client{
Service: service,
}).Account("999", "secret")
vmap := account.VerifiableMap("testmap")
numToDo := 1000
var lastP verifiable.MapUpdatePromise
var err error
for i := 0; i < numToDo; i++ {
lastP, err = vmap.Set(ctx, []byte(fmt.Sprintf("foo%d", i)), &pb.LeafData{LeafInput: []byte(fmt.Sprintf("fooval%d", i))})
if err != nil {
t.Fatal(err)
}
}
_, err = lastP.Wait(ctx)
if err != nil {
t.Fatal(err)
}
ms, err := vmap.VerifiedLatestMapState(ctx, nil)
if err != nil {
t.Fatal(err)
}
// Make sure we don't break on non-existent entries
for i := 0; i < numToDo; i++ {
entry, err := vmap.VerifiedGet(ctx, []byte(fmt.Sprintf("baz%d", i)), ms)
if err != nil {
t.Fatal(err)
}
dd := entry.GetLeafInput()
if len(dd) != 0 {
t.Fatal(string(dd))
}
}
for i := 0; i < numToDo; i++ {
entry, err := vmap.VerifiedGet(ctx, []byte(fmt.Sprintf("foo%d", i)), ms)
if err != nil {
t.Fatal(err)
}
dd := entry.GetLeafInput()
if string(dd) != fmt.Sprintf("fooval%d", i) {
t.Fatal(string(dd))
}
}
}
func testLog(ctx context.Context, t *testing.T, service pb.VerifiableDataStructuresServiceServer) {
account := (&verifiable.Client{
Service: service,
}).Account("999", "secret")
log := account.VerifiableLog("smoketest")
treeRoot, err := log.TreeHead(ctx, 0)
if !(treeRoot == nil || (treeRoot.TreeSize == 0 && len(treeRoot.RootHash) == 0)) {
t.Fatal("Expecting log to not exist.")
}
aer, err := log.Add(ctx, &pb.LeafData{LeafInput: []byte("foo")})
if err != nil {
t.Fatal("Failed adding item", err)
}
lh := aer.LeafHash()
if !bytes.Equal(lh, merkle.LeafHash([]byte("foo"))) {
t.Fatal("Failed adding item")
}
for treeRoot == nil || treeRoot.TreeSize < 1 {
treeRoot, err = log.TreeHead(ctx, verifiable.Head)
if err != nil {
t.Fatal("Failure getting root hash")
}
}
if !bytes.Equal(treeRoot.RootHash, merkle.LeafHash([]byte("foo"))) {
t.Fatal("Failed calculating tree root")
}
_, err = log.Add(ctx, &pb.LeafData{LeafInput: []byte("fooz")})
if err != nil {
t.Fatal("Failed adding item")
}
_, err = log.Add(ctx, &pb.LeafData{LeafInput: []byte("bar")})
if err != nil {
t.Fatal("Failed adding item")
}
_, err = log.Add(ctx, &pb.LeafData{LeafInput: []byte("baz")})
if err != nil {
t.Fatal("Failed adding item")
}
p, err := log.Add(ctx, &pb.LeafData{LeafInput: []byte("smez")})
if err != nil {
t.Fatal("Failed adding item")
}
treeRoot, err = p.Wait(ctx)
if err != nil {
t.Fatal(err)
}
if treeRoot.TreeSize != 5 {
t.Fatal("Failure getting root hash")
}
entries := make([]*pb.LeafData, treeRoot.TreeSize)
for i := int64(0); i < treeRoot.TreeSize; i++ {
entries[i], err = log.Entry(ctx, i)
if err != nil {
t.Fatal("Failure getting entry")
}
}
if !verifyRootHash(entries, treeRoot.RootHash) {
t.Fatal("Failure verifying root hash")
}
for i := 0; i < 200; i++ {
p, err = log.Add(ctx, &pb.LeafData{LeafInput: []byte(fmt.Sprintf("foo %d", rand.Int()))})
if err != nil {
t.Fatal("Failed adding item")
}
}
treeRoot, err = p.Wait(ctx)
if err != nil |
if treeRoot.TreeSize != 205 {
t.Fatal("Failure getting root hash")
}
cnt := 0
for entry := range log.Entries(context.Background(), 0, treeRoot.TreeSize) {
err = log.VerifyInclusion(ctx, treeRoot, merkle.LeafHash(entry.LeafInput))
if err != nil {
t.Fatal("Failure verifiying inclusion")
}
cnt++
}
if cnt != 205 {
t.Fatal("Failed to get all entries")
}
th3, err := log.TreeHead(ctx, 3)
if err != nil {
t.Fatal("Failure getting root hash")
}
th7, err := log.TreeHead(ctx, 7)
if err != nil {
t.Fatal("Failure getting root hash")
}
err = log.VerifyConsistency(ctx, th3, th7)
if err != nil {
t.Fatal("Failure to generate consistency between 3 and 7")
}
rootHashes := generateRootHashes(context.Background(), log.Entries(context.Background(), 0, treeRoot.TreeSize))
i := 0
var last []byte
for rh := range rootHashes {
last = rh
i++
}
if i != 205 {
t.Fatal("Wrong i")
}
if !bytes.Equal(treeRoot.RootHash, last) {
t.Fatal("Failed calculating tree root")
}
for i := 0; i < 200; i++ {
p, err = log.Add(ctx, &pb.LeafData{LeafInput: []byte(fmt.Sprintf("foo %d", rand.Int()))})
if err != nil {
t.Fatal("Failed adding item")
}
}
treeRoot, err = p.Wait(ctx)
if err != nil {
t.Fatal(err)
}
if treeRoot.TreeSize != 405 {
t.Fatal("Failure getting root hash")
}
rootHashes = generateRootHashes(context.Background(), log.Entries(context.Background(), 0, treeRoot.TreeSize))
i = 0
for rh := range rootHashes {
last = rh
i++
}
if i != 405 {
t.Fatal("Wrong i")
}
if !bytes.Equal(treeRoot.RootHash, last) {
t.Fatal("Failed calculating tree root")
}
}
func createCleanEmptyBatchMutatorService() pb.VerifiableDataStructuresServiceServer {
db := &memory.TransientStorage{}
return (&verifiable.Service{
AccessPolicy: policy.Open,
Mutator: (&batch.Mutator{
Writer: db,
BatchSize: 1000,
BufferSize: 100000,
Timeout: time.Millisecond * 10,
}).MustCreate(),
Reader: db,
}).MustCreate()
}
func createCleanEmptyService() pb.VerifiableDataStructuresServiceServer {
db := &memory.TransientStorage{}
return (&verifiable.Service{
AccessPolicy: policy.Open,
Mutator: &instant.Mutator{Writer: db},
Reader: db,
}).MustCreate()
}
func expectErr(t *testing.T, exp, err error) {
if exp != err {
t.Fatalf("Wanted %s, got %s", exp, err)
}
}
func expectErrCode(t *testing.T, c codes.Code, err error) {
if err == nil {
t.Fatalf("Wanting bad code")
}
s, ok := status.FromError(err)
if !ok {
t.Fatalf("Bad error type")
}
if s.Code() != c {
t.Fatalf("Bad error ")
}
}
func TestPermissions(t *testing.T) {
db := &memory.TransientStorage{}
c := &verifiable.Client{Service: (&verifiable.Service{
Mutator: &instant.Mutator{Writer: db},
Reader: db,
AccessPolicy: &policy.Static{
Policy: []*pb.ResourceAccount{
{
Id: "0",
Policy: []*pb.AccessPolicy{
{
NameMatch: "foo",
Permissions: []pb.Permission{pb.Permission_PERM_ALL_PERMISSIONS},
ApiKey: "secret",
AllowedFields: []string{"*"},
},
{
NameMatch: "f*",
Permissions: []pb.Permission{pb.Permission_PERM_LOG_READ_ENTRY},
ApiKey: "*",
AllowedFields: []string{"name"},
},
},
},
},
},
}).MustCreate()}
var err error
var v *pb.LeafData
ctx := context.TODO()
_, err = c.Account("0", "secr3t").VerifiableLog("foo").Add(ctx, &pb.LeafData{LeafInput: []byte("bar")})
expectErrCode(t, codes.PermissionDenied, err)
_, err = c.Account("0", "secret").VerifiableLog("fofo").Add(ctx, &pb.LeafData{LeafInput: []byte("bar")})
expectErrCode(t, codes.PermissionDenied, err)
_, err = c.Account("1", "secret").VerifiableLog("foo").Add(ctx, &pb.LeafData{LeafInput: []byte("bar")})
expectErrCode(t, codes.PermissionDenied, err)
_, err = c.Account("0", "secret").VerifiableLog("foo").Add(ctx, &pb.LeafData{LeafInput: []byte("bar")})
expectErr(t, nil, err)
v, err = verifiable.CreateRedactableJSONLeafData([]byte("{\"name\":\"adam\",\"dob\":\"100000\"}"))
expectErr(t, nil, err)
p, err := c.Account("0", "secret").VerifiableLog("foo").Add(ctx, v)
expectErr(t, nil, err)
_, err = p.Wait(ctx)
expectErr(t, nil, err)
// Test less fields
resp, err := c.Account("0", "").VerifiableLog("foo").Entry(ctx, 1)
expectErr(t, nil, err)
st := string(resp.ExtraData)
if !strings.Contains(st, "\"dob\":\"***REDACTED***") {
t.Fatal("Expected redacted")
}
if !strings.Contains(st, "adam") {
t.Fatal("Expected name")
}
if strings.Contains(st, "100000") {
t.Fatal("Value should not appear (unless incredibly unlucky with random generator)")
}
// Test more fields
resp, err = c.Account("0", "secret").VerifiableLog("foo").Entry(ctx, 1)
expectErr(t, nil, err)
st = string(resp.ExtraData)
if strings.Contains(st, "\"dob\":\"***REDACTED***") {
t.Fatal("Not expected redacted")
}
if !strings.Contains(st, "adam") {
t.Fatal("Expected name")
}
if !strings.Contains(st, "100000") {
t.Fatal("Value should appear")
}
}
func runSmokeTests(c pb.VerifiableDataStructuresServiceServer, t *testing.T) {
// First wrap it in REST
go httprest.StartServer(&pb.ServerConfig{
InsecureServerForTesting: true,
RestListenBind: ":8092",
}, c)
time.Sleep(50 * time.Millisecond)
// Get client to it
restClient := (&httprest.Client{
BaseURL: "http://localhost:8092",
}).MustDial()
// Now wrap that in a gRPC server
go grpc.StartServer(&pb.ServerConfig{
InsecureServerForTesting: true,
GrpcListenBind: ":8081",
GrpcListenProtocol: "tcp4",
}, restClient)
time.Sleep(50 * time.Millisecond)
// And grab a client for that
d := (&grpc.Client{
Address: "localhost:8081",
NoGrpcSecurity: true,
}).MustDial()
testLog(context.TODO(), t, d)
testMap(context.TODO(), t, d)
}
func TestClientComms(t *testing.T) {
runSmokeTests(createCleanEmptyService(), t)
}
func TestBatchMutator(t *testing.T) {
d := createCleanEmptyBatchMutatorService()
testLog(context.TODO(), t, d)
testMap(context.TODO(), t, d)
}
// GenerateRootHashes is a utility function that emits a channel of root hashes
// given a channel of input values. This is useful for some unit tests.
func generateRootHashes(ctx context.Context, input <-chan *pb.LeafData) <-chan []byte {
rv := make(chan []byte)
go func() {
defer close(rv)
index := 0
stack := make([][]byte, 0)
for {
select {
case <-ctx.Done():
return
case b, ok := <-input:
if !ok {
return
}
stack = append(stack, merkle.LeafHash(b.GetLeafInput()))
}
for j := index; (j & 1) == 1; j >>= 1 {
stack = append(stack[:len(stack)-2], merkle.NodeHash(stack[len(stack)-2], stack[len(stack)-1]))
}
rh := stack[len(stack)-1]
for j := len(stack) - 2; j >= 0; j-- {
rh = merkle.NodeHash(stack[j], rh)
}
select {
case <-ctx.Done():
return
case rv <- rh:
index++
}
}
}()
return rv
}
func verifyRootHash(entries []*pb.LeafData, answer []byte) bool {
stack := make([][]byte, 0)
for i, b := range entries {
stack = append(stack, merkle.LeafHash(b.LeafInput))
for j := i; (j & 1) == 1; j >>= 1 {
stack = append(stack[:len(stack)-2], merkle.NodeHash(stack[len(stack)-2], stack[len(stack)-1]))
}
}
for len(stack) > 1 {
stack = append(stack[:len(stack)-2], merkle.NodeHash(stack[len(stack)-2], stack[len(stack)-1]))
}
return bytes.Equal(stack[0], answer)
}
// TestObjectsMeetReq simply includes objects that might compile, but not comply with APIs
// since possibly we didn't test them properly otherwise.
func TestObjectsMeetReq(t *testing.T) {
var kr verifiable.StorageReader
var kw verifiable.StorageWriter
var m verifiable.MutatorService
var o verifiable.AuthorizationOracle
kr = &memory.TransientStorage{}
kw = &memory.TransientStorage{}
kr = &bolt.Storage{}
kw = &bolt.Storage{}
kr = &badger.Storage{}
kw = &badger.Storage{}
m = &instant.Mutator{}
m = (&batch.Mutator{}).MustCreate()
o = policy.Open
o = &policy.Static{}
log.Println(kr, kw, m, o) // "use" these so that go compiler will be quiet
}
| {
t.Fatal(err)
} | conditional_block |
client.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package packet
import (
"context"
"errors"
"fmt"
"net"
"net/http"
"os"
"strconv"
"strings"
"text/template"
metal "github.com/equinix-labs/metal-go/metal/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/utils/pointer"
infrav1 "sigs.k8s.io/cluster-api-provider-packet/api/v1beta1"
"sigs.k8s.io/cluster-api-provider-packet/pkg/cloud/packet/scope"
"sigs.k8s.io/cluster-api-provider-packet/version"
)
const (
apiTokenVarName = "PACKET_API_KEY" //nolint:gosec
ipxeOS = "custom_ipxe"
envVarLocalASN = "METAL_LOCAL_ASN"
envVarBGPPass = "METAL_BGP_PASS" //nolint:gosec
DefaultLocalASN = 65000
legacyDebugVar = "PACKNGO_DEBUG" // For backwards compatibility with packngo
)
var (
clientName = "CAPP-v1beta1"
clientUAFormat = "cluster-api-provider-packet/%s %s"
ErrControlPlanEndpointNotFound = errors.New("control plane not found")
ErrElasticIPQuotaExceeded = errors.New("could not create an Elastic IP due to quota limits on the account, please contact Equinix Metal support")
ErrInvalidIP = errors.New("invalid IP")
ErrMissingEnvVar = errors.New("missing required env var")
ErrInvalidRequest = errors.New("invalid request")
)
type Client struct {
*metal.APIClient
}
// NewClient creates a new Client for the given Packet credentials
func NewClient(packetAPIKey string) *Client |
func GetClient() (*Client, error) {
token := os.Getenv(apiTokenVarName)
if token == "" {
return nil, fmt.Errorf("%w: %s", ErrMissingEnvVar, apiTokenVarName)
}
return NewClient(token), nil
}
func (p *Client) GetDevice(ctx context.Context, deviceID string) (*metal.Device, *http.Response, error) {
dev, resp, err := p.DevicesApi.FindDeviceById(ctx, deviceID).Execute()
return dev, resp, err
}
type CreateDeviceRequest struct {
ExtraTags []string
MachineScope *scope.MachineScope
ControlPlaneEndpoint string
}
func (p *Client) NewDevice(ctx context.Context, req CreateDeviceRequest) (*metal.Device, error) {
packetMachineSpec := req.MachineScope.PacketMachine.Spec
packetClusterSpec := req.MachineScope.PacketCluster.Spec
if packetMachineSpec.IPXEUrl != "" {
// Error if pxe url and OS conflict
if packetMachineSpec.OS != ipxeOS {
return nil, fmt.Errorf("os should be set to custom_pxe when using pxe urls: %w", ErrInvalidRequest)
}
}
userDataRaw, err := req.MachineScope.GetRawBootstrapData(ctx)
if err != nil {
return nil, fmt.Errorf("unable to retrieve bootstrap data from secret: %w", err)
}
stringWriter := &strings.Builder{}
userData := string(userDataRaw)
userDataValues := map[string]interface{}{
"kubernetesVersion": pointer.StringPtrDerefOr(req.MachineScope.Machine.Spec.Version, ""),
}
tags := make([]string, 0, len(packetMachineSpec.Tags)+len(req.ExtraTags))
copy(tags, packetMachineSpec.Tags)
tags = append(tags, req.ExtraTags...)
tmpl, err := template.New("user-data").Parse(userData)
if err != nil {
return nil, fmt.Errorf("error parsing userdata template: %w", err)
}
if req.MachineScope.IsControlPlane() {
// control plane machines should get the API key injected
userDataValues["apiKey"] = p.APIClient.GetConfig().DefaultHeader["X-Auth-Token"]
if req.ControlPlaneEndpoint != "" {
userDataValues["controlPlaneEndpoint"] = req.ControlPlaneEndpoint
}
tags = append(tags, infrav1.ControlPlaneTag)
} else {
tags = append(tags, infrav1.WorkerTag)
}
if err := tmpl.Execute(stringWriter, userDataValues); err != nil {
return nil, fmt.Errorf("error executing userdata template: %w", err)
}
userData = stringWriter.String()
// If Metro or Facility are specified at the Machine level, we ignore the
// values set at the Cluster level
facility := packetClusterSpec.Facility
metro := packetClusterSpec.Metro
if packetMachineSpec.Facility != "" || packetMachineSpec.Metro != "" {
metro = packetMachineSpec.Metro
facility = packetMachineSpec.Facility
}
hostname := req.MachineScope.Name()
serverCreateOpts := metal.CreateDeviceRequest{}
if facility != "" {
serverCreateOpts.DeviceCreateInFacilityInput = &metal.DeviceCreateInFacilityInput{
Hostname: &hostname,
Facility: []string{facility},
BillingCycle: &req.MachineScope.PacketMachine.Spec.BillingCycle,
Plan: req.MachineScope.PacketMachine.Spec.MachineType,
OperatingSystem: req.MachineScope.PacketMachine.Spec.OS,
IpxeScriptUrl: &req.MachineScope.PacketMachine.Spec.IPXEUrl,
Tags: tags,
Userdata: &userData,
}
} else {
serverCreateOpts.DeviceCreateInMetroInput = &metal.DeviceCreateInMetroInput{
Hostname: &hostname,
Metro: metro,
BillingCycle: &req.MachineScope.PacketMachine.Spec.BillingCycle,
Plan: req.MachineScope.PacketMachine.Spec.MachineType,
OperatingSystem: req.MachineScope.PacketMachine.Spec.OS,
IpxeScriptUrl: &req.MachineScope.PacketMachine.Spec.IPXEUrl,
Tags: tags,
Userdata: &userData,
}
}
reservationIDs := strings.Split(packetMachineSpec.HardwareReservationID, ",")
// If there are no reservationIDs to process, go ahead and return early
if len(reservationIDs) <= 1 {
apiRequest := p.DevicesApi.CreateDevice(ctx, req.MachineScope.PacketCluster.Spec.ProjectID)
dev, _, err := apiRequest.CreateDeviceRequest(serverCreateOpts).Execute() //nolint:bodyclose // see https://github.com/timakin/bodyclose/issues/42
return dev, err
}
// Do a naive loop through the list of reservationIDs, continuing if we hit any error
// TODO: if we can determine how to differentiate a failure based on the reservation
// being in use vs other errors, then we can make this a bit smarter in the future.
var lastErr error
for _, resID := range reservationIDs {
reservationID := resID
serverCreateOpts.DeviceCreateInFacilityInput.HardwareReservationId = &reservationID
apiRequest := p.DevicesApi.CreateDevice(ctx, req.MachineScope.PacketCluster.Spec.ProjectID)
dev, _, err := apiRequest.CreateDeviceRequest(serverCreateOpts).Execute() //nolint:bodyclose // see https://github.com/timakin/bodyclose/issues/42
if err != nil {
lastErr = err
continue
}
return dev, nil
}
return nil, lastErr
}
func (p *Client) GetDeviceAddresses(device *metal.Device) []corev1.NodeAddress {
addrs := make([]corev1.NodeAddress, 0)
for _, addr := range device.IpAddresses {
addrType := corev1.NodeInternalIP
if addr.GetPublic() {
addrType = corev1.NodeExternalIP
}
a := corev1.NodeAddress{
Type: addrType,
Address: addr.GetAddress(),
}
addrs = append(addrs, a)
}
return addrs
}
func (p *Client) GetDeviceByTags(ctx context.Context, project string, tags []string) (*metal.Device, error) {
devices, _, err := p.DevicesApi.FindProjectDevices(ctx, project).Execute() //nolint:bodyclose // see https://github.com/timakin/bodyclose/issues/42
if err != nil {
return nil, fmt.Errorf("error retrieving devices: %w", err)
}
// returns the first one that matches all of the tags
for _, device := range devices.Devices {
if ItemsInList(device.Tags, tags) {
return &device, nil
}
}
return nil, nil
}
// CreateIP reserves an IP via Packet API. The request fails straight if no IP are available for the specified project.
// This prevent the cluster to become ready.
func (p *Client) CreateIP(ctx context.Context, namespace, clusterName, projectID, facility, metro string) (net.IP, error) {
failOnApprovalRequired := true
req := metal.IPReservationRequestInput{
Type: "public_ipv4",
Quantity: 1,
Facility: &facility,
Metro: &metro,
FailOnApprovalRequired: &failOnApprovalRequired,
Tags: []string{generateElasticIPIdentifier(clusterName)},
}
apiRequest := p.IPAddressesApi.RequestIPReservation(ctx, projectID)
r, resp, err := apiRequest.RequestIPReservationRequest(metal.RequestIPReservationRequest{
IPReservationRequestInput: &req,
}).Execute() //nolint:bodyclose // see https://github.com/timakin/bodyclose/issues/42
if err != nil {
return nil, err
}
if resp.StatusCode == http.StatusUnprocessableEntity {
return nil, ErrElasticIPQuotaExceeded
}
rawIP := r.IPReservation.GetAddress()
ip := net.ParseIP(rawIP)
if ip == nil {
return nil, fmt.Errorf("failed to parse IP: %s, %w", rawIP, ErrInvalidIP)
}
return ip, nil
}
// enableBGP enable bgp on the project
func (p *Client) EnableProjectBGP(ctx context.Context, projectID string) error {
// first check if it is enabled before trying to create it
bgpConfig, _, err := p.BGPApi.FindBgpConfigByProject(ctx, projectID).Execute() //nolint:bodyclose // see https://github.com/timakin/bodyclose/issues/42
// if we already have a config, just return
// we need some extra handling logic because the API always returns 200, even if
// not BGP config is in place.
// We treat it as valid config already exists only if ALL of the above is true:
// - no error
// - bgpConfig struct exists
// - bgpConfig struct has non-blank ID
// - bgpConfig struct does not have Status=="disabled"
if err != nil {
return err
} else if bgpConfig != nil && bgpConfig.GetId() != "" && strings.ToLower(bgpConfig.GetStatus()) != "disabled" {
return nil
}
// get the local ASN
localASN := os.Getenv(envVarLocalASN)
var outLocalASN int
switch {
case localASN != "":
localASNNo, err := strconv.Atoi(localASN)
if err != nil {
return fmt.Errorf("env var %s must be a number, was %s: %w", envVarLocalASN, localASN, err)
}
outLocalASN = localASNNo
default:
outLocalASN = DefaultLocalASN
}
var outBGPPass string
bgpPass := os.Getenv(envVarBGPPass)
if bgpPass != "" {
outBGPPass = bgpPass
}
// we did not have a valid one, so create it
useCase := "kubernetes-load-balancer"
apiRequest := p.BGPApi.RequestBgpConfig(ctx, projectID)
_, err = apiRequest.BgpConfigRequestInput(metal.BgpConfigRequestInput{
Asn: int32(outLocalASN),
Md5: &outBGPPass,
DeploymentType: "local",
UseCase: &useCase,
}).Execute() //nolint:bodyclose // see https://github.com/timakin/bodyclose/issues/42
return err
}
// ensureNodeBGPEnabled check if the node has bgp enabled, and set it if it does not
func (p *Client) EnsureNodeBGPEnabled(ctx context.Context, id string) error {
// fortunately, this is idempotent, so just create
addressFamily := "ipv4"
req := metal.BGPSessionInput{
AddressFamily: &addressFamily,
}
_, response, err := p.DevicesApi.CreateBgpSession(ctx, id).BGPSessionInput(req).Execute() //nolint:bodyclose // see https://github.com/timakin/bodyclose/issues/42
// if we already had one, then we can ignore the error
// this really should be a 409, but 422 is what is returned
if response != nil && response.StatusCode == 422 && strings.Contains(fmt.Sprintf("%s", err), "already has session") {
err = nil
}
return err
}
func (p *Client) GetIPByClusterIdentifier(ctx context.Context, namespace, name, projectID string) (*metal.IPReservation, error) {
var err error
var ipReservation *metal.IPReservation
eipIdentifier := generateElasticIPIdentifier(name)
reservedIPs, _, err := p.IPAddressesApi.FindIPReservations(ctx, projectID).Execute() //nolint:bodyclose // see https://github.com/timakin/bodyclose/issues/42
if err != nil {
return ipReservation, err
}
for _, reservedIPWrapper := range reservedIPs.IpAddresses {
ipReservation = reservedIPWrapper.IPReservation
if ipReservation != nil {
for _, tag := range ipReservation.Tags {
if tag == eipIdentifier {
return ipReservation, nil
}
}
}
}
return ipReservation, ErrControlPlanEndpointNotFound
}
func generateElasticIPIdentifier(name string) string {
return fmt.Sprintf("cluster-api-provider-packet:cluster-id:%s", name)
}
// This function provides backwards compatibility for the packngo
// debug environment variable while allowing us to introduce a new
// debug variable in the future that is not tied to packngo
func checkEnvForDebug() bool {
return os.Getenv(legacyDebugVar) != ""
}
| {
token := strings.TrimSpace(packetAPIKey)
if token != "" {
configuration := metal.NewConfiguration()
configuration.Debug = checkEnvForDebug()
configuration.AddDefaultHeader("X-Auth-Token", token)
configuration.AddDefaultHeader("X-Consumer-Token", clientName)
configuration.UserAgent = fmt.Sprintf(clientUAFormat, version.Get(), configuration.UserAgent)
metalClient := &Client{metal.NewAPIClient(configuration)}
return metalClient
}
return nil
} | identifier_body |
client.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package packet
import (
"context"
"errors"
"fmt"
"net"
"net/http"
"os"
"strconv"
"strings"
"text/template"
metal "github.com/equinix-labs/metal-go/metal/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/utils/pointer"
infrav1 "sigs.k8s.io/cluster-api-provider-packet/api/v1beta1"
"sigs.k8s.io/cluster-api-provider-packet/pkg/cloud/packet/scope"
"sigs.k8s.io/cluster-api-provider-packet/version"
)
const (
apiTokenVarName = "PACKET_API_KEY" //nolint:gosec
ipxeOS = "custom_ipxe"
envVarLocalASN = "METAL_LOCAL_ASN"
envVarBGPPass = "METAL_BGP_PASS" //nolint:gosec
DefaultLocalASN = 65000
legacyDebugVar = "PACKNGO_DEBUG" // For backwards compatibility with packngo
)
var (
clientName = "CAPP-v1beta1"
clientUAFormat = "cluster-api-provider-packet/%s %s"
ErrControlPlanEndpointNotFound = errors.New("control plane not found")
ErrElasticIPQuotaExceeded = errors.New("could not create an Elastic IP due to quota limits on the account, please contact Equinix Metal support")
ErrInvalidIP = errors.New("invalid IP")
ErrMissingEnvVar = errors.New("missing required env var")
ErrInvalidRequest = errors.New("invalid request")
)
type Client struct {
*metal.APIClient
}
// NewClient creates a new Client for the given Packet credentials
func NewClient(packetAPIKey string) *Client {
token := strings.TrimSpace(packetAPIKey)
if token != "" {
configuration := metal.NewConfiguration()
configuration.Debug = checkEnvForDebug()
configuration.AddDefaultHeader("X-Auth-Token", token)
configuration.AddDefaultHeader("X-Consumer-Token", clientName)
configuration.UserAgent = fmt.Sprintf(clientUAFormat, version.Get(), configuration.UserAgent)
metalClient := &Client{metal.NewAPIClient(configuration)}
return metalClient
}
return nil
}
func GetClient() (*Client, error) {
token := os.Getenv(apiTokenVarName)
if token == "" {
return nil, fmt.Errorf("%w: %s", ErrMissingEnvVar, apiTokenVarName)
}
return NewClient(token), nil
}
func (p *Client) GetDevice(ctx context.Context, deviceID string) (*metal.Device, *http.Response, error) {
dev, resp, err := p.DevicesApi.FindDeviceById(ctx, deviceID).Execute()
return dev, resp, err
}
type CreateDeviceRequest struct {
ExtraTags []string
MachineScope *scope.MachineScope
ControlPlaneEndpoint string
}
func (p *Client) NewDevice(ctx context.Context, req CreateDeviceRequest) (*metal.Device, error) {
packetMachineSpec := req.MachineScope.PacketMachine.Spec
packetClusterSpec := req.MachineScope.PacketCluster.Spec
if packetMachineSpec.IPXEUrl != "" {
// Error if pxe url and OS conflict
if packetMachineSpec.OS != ipxeOS {
return nil, fmt.Errorf("os should be set to custom_pxe when using pxe urls: %w", ErrInvalidRequest)
}
}
userDataRaw, err := req.MachineScope.GetRawBootstrapData(ctx)
if err != nil |
stringWriter := &strings.Builder{}
userData := string(userDataRaw)
userDataValues := map[string]interface{}{
"kubernetesVersion": pointer.StringPtrDerefOr(req.MachineScope.Machine.Spec.Version, ""),
}
tags := make([]string, 0, len(packetMachineSpec.Tags)+len(req.ExtraTags))
copy(tags, packetMachineSpec.Tags)
tags = append(tags, req.ExtraTags...)
tmpl, err := template.New("user-data").Parse(userData)
if err != nil {
return nil, fmt.Errorf("error parsing userdata template: %w", err)
}
if req.MachineScope.IsControlPlane() {
// control plane machines should get the API key injected
userDataValues["apiKey"] = p.APIClient.GetConfig().DefaultHeader["X-Auth-Token"]
if req.ControlPlaneEndpoint != "" {
userDataValues["controlPlaneEndpoint"] = req.ControlPlaneEndpoint
}
tags = append(tags, infrav1.ControlPlaneTag)
} else {
tags = append(tags, infrav1.WorkerTag)
}
if err := tmpl.Execute(stringWriter, userDataValues); err != nil {
return nil, fmt.Errorf("error executing userdata template: %w", err)
}
userData = stringWriter.String()
// If Metro or Facility are specified at the Machine level, we ignore the
// values set at the Cluster level
facility := packetClusterSpec.Facility
metro := packetClusterSpec.Metro
if packetMachineSpec.Facility != "" || packetMachineSpec.Metro != "" {
metro = packetMachineSpec.Metro
facility = packetMachineSpec.Facility
}
hostname := req.MachineScope.Name()
serverCreateOpts := metal.CreateDeviceRequest{}
if facility != "" {
serverCreateOpts.DeviceCreateInFacilityInput = &metal.DeviceCreateInFacilityInput{
Hostname: &hostname,
Facility: []string{facility},
BillingCycle: &req.MachineScope.PacketMachine.Spec.BillingCycle,
Plan: req.MachineScope.PacketMachine.Spec.MachineType,
OperatingSystem: req.MachineScope.PacketMachine.Spec.OS,
IpxeScriptUrl: &req.MachineScope.PacketMachine.Spec.IPXEUrl,
Tags: tags,
Userdata: &userData,
}
} else {
serverCreateOpts.DeviceCreateInMetroInput = &metal.DeviceCreateInMetroInput{
Hostname: &hostname,
Metro: metro,
BillingCycle: &req.MachineScope.PacketMachine.Spec.BillingCycle,
Plan: req.MachineScope.PacketMachine.Spec.MachineType,
OperatingSystem: req.MachineScope.PacketMachine.Spec.OS,
IpxeScriptUrl: &req.MachineScope.PacketMachine.Spec.IPXEUrl,
Tags: tags,
Userdata: &userData,
}
}
reservationIDs := strings.Split(packetMachineSpec.HardwareReservationID, ",")
// If there are no reservationIDs to process, go ahead and return early
if len(reservationIDs) <= 1 {
apiRequest := p.DevicesApi.CreateDevice(ctx, req.MachineScope.PacketCluster.Spec.ProjectID)
dev, _, err := apiRequest.CreateDeviceRequest(serverCreateOpts).Execute() //nolint:bodyclose // see https://github.com/timakin/bodyclose/issues/42
return dev, err
}
// Do a naive loop through the list of reservationIDs, continuing if we hit any error
// TODO: if we can determine how to differentiate a failure based on the reservation
// being in use vs other errors, then we can make this a bit smarter in the future.
var lastErr error
for _, resID := range reservationIDs {
reservationID := resID
serverCreateOpts.DeviceCreateInFacilityInput.HardwareReservationId = &reservationID
apiRequest := p.DevicesApi.CreateDevice(ctx, req.MachineScope.PacketCluster.Spec.ProjectID)
dev, _, err := apiRequest.CreateDeviceRequest(serverCreateOpts).Execute() //nolint:bodyclose // see https://github.com/timakin/bodyclose/issues/42
if err != nil {
lastErr = err
continue
}
return dev, nil
}
return nil, lastErr
}
func (p *Client) GetDeviceAddresses(device *metal.Device) []corev1.NodeAddress {
addrs := make([]corev1.NodeAddress, 0)
for _, addr := range device.IpAddresses {
addrType := corev1.NodeInternalIP
if addr.GetPublic() {
addrType = corev1.NodeExternalIP
}
a := corev1.NodeAddress{
Type: addrType,
Address: addr.GetAddress(),
}
addrs = append(addrs, a)
}
return addrs
}
func (p *Client) GetDeviceByTags(ctx context.Context, project string, tags []string) (*metal.Device, error) {
devices, _, err := p.DevicesApi.FindProjectDevices(ctx, project).Execute() //nolint:bodyclose // see https://github.com/timakin/bodyclose/issues/42
if err != nil {
return nil, fmt.Errorf("error retrieving devices: %w", err)
}
// returns the first one that matches all of the tags
for _, device := range devices.Devices {
if ItemsInList(device.Tags, tags) {
return &device, nil
}
}
return nil, nil
}
// CreateIP reserves an IP via Packet API. The request fails straight if no IP are available for the specified project.
// This prevent the cluster to become ready.
func (p *Client) CreateIP(ctx context.Context, namespace, clusterName, projectID, facility, metro string) (net.IP, error) {
failOnApprovalRequired := true
req := metal.IPReservationRequestInput{
Type: "public_ipv4",
Quantity: 1,
Facility: &facility,
Metro: &metro,
FailOnApprovalRequired: &failOnApprovalRequired,
Tags: []string{generateElasticIPIdentifier(clusterName)},
}
apiRequest := p.IPAddressesApi.RequestIPReservation(ctx, projectID)
r, resp, err := apiRequest.RequestIPReservationRequest(metal.RequestIPReservationRequest{
IPReservationRequestInput: &req,
}).Execute() //nolint:bodyclose // see https://github.com/timakin/bodyclose/issues/42
if err != nil {
return nil, err
}
if resp.StatusCode == http.StatusUnprocessableEntity {
return nil, ErrElasticIPQuotaExceeded
}
rawIP := r.IPReservation.GetAddress()
ip := net.ParseIP(rawIP)
if ip == nil {
return nil, fmt.Errorf("failed to parse IP: %s, %w", rawIP, ErrInvalidIP)
}
return ip, nil
}
// enableBGP enable bgp on the project
func (p *Client) EnableProjectBGP(ctx context.Context, projectID string) error {
// first check if it is enabled before trying to create it
bgpConfig, _, err := p.BGPApi.FindBgpConfigByProject(ctx, projectID).Execute() //nolint:bodyclose // see https://github.com/timakin/bodyclose/issues/42
// if we already have a config, just return
// we need some extra handling logic because the API always returns 200, even if
// not BGP config is in place.
// We treat it as valid config already exists only if ALL of the above is true:
// - no error
// - bgpConfig struct exists
// - bgpConfig struct has non-blank ID
// - bgpConfig struct does not have Status=="disabled"
if err != nil {
return err
} else if bgpConfig != nil && bgpConfig.GetId() != "" && strings.ToLower(bgpConfig.GetStatus()) != "disabled" {
return nil
}
// get the local ASN
localASN := os.Getenv(envVarLocalASN)
var outLocalASN int
switch {
case localASN != "":
localASNNo, err := strconv.Atoi(localASN)
if err != nil {
return fmt.Errorf("env var %s must be a number, was %s: %w", envVarLocalASN, localASN, err)
}
outLocalASN = localASNNo
default:
outLocalASN = DefaultLocalASN
}
var outBGPPass string
bgpPass := os.Getenv(envVarBGPPass)
if bgpPass != "" {
outBGPPass = bgpPass
}
// we did not have a valid one, so create it
useCase := "kubernetes-load-balancer"
apiRequest := p.BGPApi.RequestBgpConfig(ctx, projectID)
_, err = apiRequest.BgpConfigRequestInput(metal.BgpConfigRequestInput{
Asn: int32(outLocalASN),
Md5: &outBGPPass,
DeploymentType: "local",
UseCase: &useCase,
}).Execute() //nolint:bodyclose // see https://github.com/timakin/bodyclose/issues/42
return err
}
// ensureNodeBGPEnabled check if the node has bgp enabled, and set it if it does not
func (p *Client) EnsureNodeBGPEnabled(ctx context.Context, id string) error {
// fortunately, this is idempotent, so just create
addressFamily := "ipv4"
req := metal.BGPSessionInput{
AddressFamily: &addressFamily,
}
_, response, err := p.DevicesApi.CreateBgpSession(ctx, id).BGPSessionInput(req).Execute() //nolint:bodyclose // see https://github.com/timakin/bodyclose/issues/42
// if we already had one, then we can ignore the error
// this really should be a 409, but 422 is what is returned
if response != nil && response.StatusCode == 422 && strings.Contains(fmt.Sprintf("%s", err), "already has session") {
err = nil
}
return err
}
func (p *Client) GetIPByClusterIdentifier(ctx context.Context, namespace, name, projectID string) (*metal.IPReservation, error) {
var err error
var ipReservation *metal.IPReservation
eipIdentifier := generateElasticIPIdentifier(name)
reservedIPs, _, err := p.IPAddressesApi.FindIPReservations(ctx, projectID).Execute() //nolint:bodyclose // see https://github.com/timakin/bodyclose/issues/42
if err != nil {
return ipReservation, err
}
for _, reservedIPWrapper := range reservedIPs.IpAddresses {
ipReservation = reservedIPWrapper.IPReservation
if ipReservation != nil {
for _, tag := range ipReservation.Tags {
if tag == eipIdentifier {
return ipReservation, nil
}
}
}
}
return ipReservation, ErrControlPlanEndpointNotFound
}
func generateElasticIPIdentifier(name string) string {
return fmt.Sprintf("cluster-api-provider-packet:cluster-id:%s", name)
}
// This function provides backwards compatibility for the packngo
// debug environment variable while allowing us to introduce a new
// debug variable in the future that is not tied to packngo
func checkEnvForDebug() bool {
return os.Getenv(legacyDebugVar) != ""
}
| {
return nil, fmt.Errorf("unable to retrieve bootstrap data from secret: %w", err)
} | conditional_block |
client.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package packet
import (
"context"
"errors"
"fmt"
"net"
"net/http"
"os"
"strconv"
"strings"
"text/template"
metal "github.com/equinix-labs/metal-go/metal/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/utils/pointer"
infrav1 "sigs.k8s.io/cluster-api-provider-packet/api/v1beta1"
"sigs.k8s.io/cluster-api-provider-packet/pkg/cloud/packet/scope"
"sigs.k8s.io/cluster-api-provider-packet/version"
)
const (
apiTokenVarName = "PACKET_API_KEY" //nolint:gosec
ipxeOS = "custom_ipxe"
envVarLocalASN = "METAL_LOCAL_ASN"
envVarBGPPass = "METAL_BGP_PASS" //nolint:gosec
DefaultLocalASN = 65000
legacyDebugVar = "PACKNGO_DEBUG" // For backwards compatibility with packngo
)
var (
clientName = "CAPP-v1beta1"
clientUAFormat = "cluster-api-provider-packet/%s %s"
ErrControlPlanEndpointNotFound = errors.New("control plane not found")
ErrElasticIPQuotaExceeded = errors.New("could not create an Elastic IP due to quota limits on the account, please contact Equinix Metal support")
ErrInvalidIP = errors.New("invalid IP")
ErrMissingEnvVar = errors.New("missing required env var")
ErrInvalidRequest = errors.New("invalid request")
)
type Client struct {
*metal.APIClient
}
// NewClient creates a new Client for the given Packet credentials
func NewClient(packetAPIKey string) *Client {
token := strings.TrimSpace(packetAPIKey)
if token != "" {
configuration := metal.NewConfiguration()
configuration.Debug = checkEnvForDebug()
configuration.AddDefaultHeader("X-Auth-Token", token)
configuration.AddDefaultHeader("X-Consumer-Token", clientName)
configuration.UserAgent = fmt.Sprintf(clientUAFormat, version.Get(), configuration.UserAgent)
metalClient := &Client{metal.NewAPIClient(configuration)}
return metalClient
}
return nil
}
func | () (*Client, error) {
token := os.Getenv(apiTokenVarName)
if token == "" {
return nil, fmt.Errorf("%w: %s", ErrMissingEnvVar, apiTokenVarName)
}
return NewClient(token), nil
}
func (p *Client) GetDevice(ctx context.Context, deviceID string) (*metal.Device, *http.Response, error) {
dev, resp, err := p.DevicesApi.FindDeviceById(ctx, deviceID).Execute()
return dev, resp, err
}
type CreateDeviceRequest struct {
ExtraTags []string
MachineScope *scope.MachineScope
ControlPlaneEndpoint string
}
func (p *Client) NewDevice(ctx context.Context, req CreateDeviceRequest) (*metal.Device, error) {
packetMachineSpec := req.MachineScope.PacketMachine.Spec
packetClusterSpec := req.MachineScope.PacketCluster.Spec
if packetMachineSpec.IPXEUrl != "" {
// Error if pxe url and OS conflict
if packetMachineSpec.OS != ipxeOS {
return nil, fmt.Errorf("os should be set to custom_pxe when using pxe urls: %w", ErrInvalidRequest)
}
}
userDataRaw, err := req.MachineScope.GetRawBootstrapData(ctx)
if err != nil {
return nil, fmt.Errorf("unable to retrieve bootstrap data from secret: %w", err)
}
stringWriter := &strings.Builder{}
userData := string(userDataRaw)
userDataValues := map[string]interface{}{
"kubernetesVersion": pointer.StringPtrDerefOr(req.MachineScope.Machine.Spec.Version, ""),
}
tags := make([]string, 0, len(packetMachineSpec.Tags)+len(req.ExtraTags))
copy(tags, packetMachineSpec.Tags)
tags = append(tags, req.ExtraTags...)
tmpl, err := template.New("user-data").Parse(userData)
if err != nil {
return nil, fmt.Errorf("error parsing userdata template: %w", err)
}
if req.MachineScope.IsControlPlane() {
// control plane machines should get the API key injected
userDataValues["apiKey"] = p.APIClient.GetConfig().DefaultHeader["X-Auth-Token"]
if req.ControlPlaneEndpoint != "" {
userDataValues["controlPlaneEndpoint"] = req.ControlPlaneEndpoint
}
tags = append(tags, infrav1.ControlPlaneTag)
} else {
tags = append(tags, infrav1.WorkerTag)
}
if err := tmpl.Execute(stringWriter, userDataValues); err != nil {
return nil, fmt.Errorf("error executing userdata template: %w", err)
}
userData = stringWriter.String()
// If Metro or Facility are specified at the Machine level, we ignore the
// values set at the Cluster level
facility := packetClusterSpec.Facility
metro := packetClusterSpec.Metro
if packetMachineSpec.Facility != "" || packetMachineSpec.Metro != "" {
metro = packetMachineSpec.Metro
facility = packetMachineSpec.Facility
}
hostname := req.MachineScope.Name()
serverCreateOpts := metal.CreateDeviceRequest{}
if facility != "" {
serverCreateOpts.DeviceCreateInFacilityInput = &metal.DeviceCreateInFacilityInput{
Hostname: &hostname,
Facility: []string{facility},
BillingCycle: &req.MachineScope.PacketMachine.Spec.BillingCycle,
Plan: req.MachineScope.PacketMachine.Spec.MachineType,
OperatingSystem: req.MachineScope.PacketMachine.Spec.OS,
IpxeScriptUrl: &req.MachineScope.PacketMachine.Spec.IPXEUrl,
Tags: tags,
Userdata: &userData,
}
} else {
serverCreateOpts.DeviceCreateInMetroInput = &metal.DeviceCreateInMetroInput{
Hostname: &hostname,
Metro: metro,
BillingCycle: &req.MachineScope.PacketMachine.Spec.BillingCycle,
Plan: req.MachineScope.PacketMachine.Spec.MachineType,
OperatingSystem: req.MachineScope.PacketMachine.Spec.OS,
IpxeScriptUrl: &req.MachineScope.PacketMachine.Spec.IPXEUrl,
Tags: tags,
Userdata: &userData,
}
}
reservationIDs := strings.Split(packetMachineSpec.HardwareReservationID, ",")
// If there are no reservationIDs to process, go ahead and return early
if len(reservationIDs) <= 1 {
apiRequest := p.DevicesApi.CreateDevice(ctx, req.MachineScope.PacketCluster.Spec.ProjectID)
dev, _, err := apiRequest.CreateDeviceRequest(serverCreateOpts).Execute() //nolint:bodyclose // see https://github.com/timakin/bodyclose/issues/42
return dev, err
}
// Do a naive loop through the list of reservationIDs, continuing if we hit any error
// TODO: if we can determine how to differentiate a failure based on the reservation
// being in use vs other errors, then we can make this a bit smarter in the future.
var lastErr error
for _, resID := range reservationIDs {
reservationID := resID
serverCreateOpts.DeviceCreateInFacilityInput.HardwareReservationId = &reservationID
apiRequest := p.DevicesApi.CreateDevice(ctx, req.MachineScope.PacketCluster.Spec.ProjectID)
dev, _, err := apiRequest.CreateDeviceRequest(serverCreateOpts).Execute() //nolint:bodyclose // see https://github.com/timakin/bodyclose/issues/42
if err != nil {
lastErr = err
continue
}
return dev, nil
}
return nil, lastErr
}
func (p *Client) GetDeviceAddresses(device *metal.Device) []corev1.NodeAddress {
addrs := make([]corev1.NodeAddress, 0)
for _, addr := range device.IpAddresses {
addrType := corev1.NodeInternalIP
if addr.GetPublic() {
addrType = corev1.NodeExternalIP
}
a := corev1.NodeAddress{
Type: addrType,
Address: addr.GetAddress(),
}
addrs = append(addrs, a)
}
return addrs
}
func (p *Client) GetDeviceByTags(ctx context.Context, project string, tags []string) (*metal.Device, error) {
devices, _, err := p.DevicesApi.FindProjectDevices(ctx, project).Execute() //nolint:bodyclose // see https://github.com/timakin/bodyclose/issues/42
if err != nil {
return nil, fmt.Errorf("error retrieving devices: %w", err)
}
// returns the first one that matches all of the tags
for _, device := range devices.Devices {
if ItemsInList(device.Tags, tags) {
return &device, nil
}
}
return nil, nil
}
// CreateIP reserves an IP via Packet API. The request fails straight if no IP are available for the specified project.
// This prevent the cluster to become ready.
func (p *Client) CreateIP(ctx context.Context, namespace, clusterName, projectID, facility, metro string) (net.IP, error) {
failOnApprovalRequired := true
req := metal.IPReservationRequestInput{
Type: "public_ipv4",
Quantity: 1,
Facility: &facility,
Metro: &metro,
FailOnApprovalRequired: &failOnApprovalRequired,
Tags: []string{generateElasticIPIdentifier(clusterName)},
}
apiRequest := p.IPAddressesApi.RequestIPReservation(ctx, projectID)
r, resp, err := apiRequest.RequestIPReservationRequest(metal.RequestIPReservationRequest{
IPReservationRequestInput: &req,
}).Execute() //nolint:bodyclose // see https://github.com/timakin/bodyclose/issues/42
if err != nil {
return nil, err
}
if resp.StatusCode == http.StatusUnprocessableEntity {
return nil, ErrElasticIPQuotaExceeded
}
rawIP := r.IPReservation.GetAddress()
ip := net.ParseIP(rawIP)
if ip == nil {
return nil, fmt.Errorf("failed to parse IP: %s, %w", rawIP, ErrInvalidIP)
}
return ip, nil
}
// enableBGP enable bgp on the project
func (p *Client) EnableProjectBGP(ctx context.Context, projectID string) error {
// first check if it is enabled before trying to create it
bgpConfig, _, err := p.BGPApi.FindBgpConfigByProject(ctx, projectID).Execute() //nolint:bodyclose // see https://github.com/timakin/bodyclose/issues/42
// if we already have a config, just return
// we need some extra handling logic because the API always returns 200, even if
// not BGP config is in place.
// We treat it as valid config already exists only if ALL of the above is true:
// - no error
// - bgpConfig struct exists
// - bgpConfig struct has non-blank ID
// - bgpConfig struct does not have Status=="disabled"
if err != nil {
return err
} else if bgpConfig != nil && bgpConfig.GetId() != "" && strings.ToLower(bgpConfig.GetStatus()) != "disabled" {
return nil
}
// get the local ASN
localASN := os.Getenv(envVarLocalASN)
var outLocalASN int
switch {
case localASN != "":
localASNNo, err := strconv.Atoi(localASN)
if err != nil {
return fmt.Errorf("env var %s must be a number, was %s: %w", envVarLocalASN, localASN, err)
}
outLocalASN = localASNNo
default:
outLocalASN = DefaultLocalASN
}
var outBGPPass string
bgpPass := os.Getenv(envVarBGPPass)
if bgpPass != "" {
outBGPPass = bgpPass
}
// we did not have a valid one, so create it
useCase := "kubernetes-load-balancer"
apiRequest := p.BGPApi.RequestBgpConfig(ctx, projectID)
_, err = apiRequest.BgpConfigRequestInput(metal.BgpConfigRequestInput{
Asn: int32(outLocalASN),
Md5: &outBGPPass,
DeploymentType: "local",
UseCase: &useCase,
}).Execute() //nolint:bodyclose // see https://github.com/timakin/bodyclose/issues/42
return err
}
// ensureNodeBGPEnabled check if the node has bgp enabled, and set it if it does not
func (p *Client) EnsureNodeBGPEnabled(ctx context.Context, id string) error {
// fortunately, this is idempotent, so just create
addressFamily := "ipv4"
req := metal.BGPSessionInput{
AddressFamily: &addressFamily,
}
_, response, err := p.DevicesApi.CreateBgpSession(ctx, id).BGPSessionInput(req).Execute() //nolint:bodyclose // see https://github.com/timakin/bodyclose/issues/42
// if we already had one, then we can ignore the error
// this really should be a 409, but 422 is what is returned
if response != nil && response.StatusCode == 422 && strings.Contains(fmt.Sprintf("%s", err), "already has session") {
err = nil
}
return err
}
func (p *Client) GetIPByClusterIdentifier(ctx context.Context, namespace, name, projectID string) (*metal.IPReservation, error) {
var err error
var ipReservation *metal.IPReservation
eipIdentifier := generateElasticIPIdentifier(name)
reservedIPs, _, err := p.IPAddressesApi.FindIPReservations(ctx, projectID).Execute() //nolint:bodyclose // see https://github.com/timakin/bodyclose/issues/42
if err != nil {
return ipReservation, err
}
for _, reservedIPWrapper := range reservedIPs.IpAddresses {
ipReservation = reservedIPWrapper.IPReservation
if ipReservation != nil {
for _, tag := range ipReservation.Tags {
if tag == eipIdentifier {
return ipReservation, nil
}
}
}
}
return ipReservation, ErrControlPlanEndpointNotFound
}
func generateElasticIPIdentifier(name string) string {
return fmt.Sprintf("cluster-api-provider-packet:cluster-id:%s", name)
}
// This function provides backwards compatibility for the packngo
// debug environment variable while allowing us to introduce a new
// debug variable in the future that is not tied to packngo
func checkEnvForDebug() bool {
return os.Getenv(legacyDebugVar) != ""
}
| GetClient | identifier_name |
client.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package packet
import (
"context"
"errors"
"fmt"
"net"
"net/http"
"os"
"strconv"
"strings"
"text/template"
metal "github.com/equinix-labs/metal-go/metal/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/utils/pointer"
infrav1 "sigs.k8s.io/cluster-api-provider-packet/api/v1beta1"
"sigs.k8s.io/cluster-api-provider-packet/pkg/cloud/packet/scope"
"sigs.k8s.io/cluster-api-provider-packet/version"
)
const (
apiTokenVarName = "PACKET_API_KEY" //nolint:gosec
ipxeOS = "custom_ipxe"
envVarLocalASN = "METAL_LOCAL_ASN"
envVarBGPPass = "METAL_BGP_PASS" //nolint:gosec
DefaultLocalASN = 65000
legacyDebugVar = "PACKNGO_DEBUG" // For backwards compatibility with packngo
)
var (
clientName = "CAPP-v1beta1"
clientUAFormat = "cluster-api-provider-packet/%s %s"
ErrControlPlanEndpointNotFound = errors.New("control plane not found")
ErrElasticIPQuotaExceeded = errors.New("could not create an Elastic IP due to quota limits on the account, please contact Equinix Metal support")
ErrInvalidIP = errors.New("invalid IP")
ErrMissingEnvVar = errors.New("missing required env var")
ErrInvalidRequest = errors.New("invalid request")
)
type Client struct {
*metal.APIClient
}
// NewClient creates a new Client for the given Packet credentials
func NewClient(packetAPIKey string) *Client {
token := strings.TrimSpace(packetAPIKey)
if token != "" {
configuration := metal.NewConfiguration()
configuration.Debug = checkEnvForDebug()
configuration.AddDefaultHeader("X-Auth-Token", token)
configuration.AddDefaultHeader("X-Consumer-Token", clientName)
configuration.UserAgent = fmt.Sprintf(clientUAFormat, version.Get(), configuration.UserAgent)
metalClient := &Client{metal.NewAPIClient(configuration)}
return metalClient
}
return nil
}
func GetClient() (*Client, error) {
token := os.Getenv(apiTokenVarName)
if token == "" {
return nil, fmt.Errorf("%w: %s", ErrMissingEnvVar, apiTokenVarName)
}
return NewClient(token), nil
}
func (p *Client) GetDevice(ctx context.Context, deviceID string) (*metal.Device, *http.Response, error) {
dev, resp, err := p.DevicesApi.FindDeviceById(ctx, deviceID).Execute()
return dev, resp, err
}
type CreateDeviceRequest struct {
ExtraTags []string
MachineScope *scope.MachineScope
ControlPlaneEndpoint string
}
func (p *Client) NewDevice(ctx context.Context, req CreateDeviceRequest) (*metal.Device, error) {
packetMachineSpec := req.MachineScope.PacketMachine.Spec
packetClusterSpec := req.MachineScope.PacketCluster.Spec
if packetMachineSpec.IPXEUrl != "" {
// Error if pxe url and OS conflict
if packetMachineSpec.OS != ipxeOS {
return nil, fmt.Errorf("os should be set to custom_pxe when using pxe urls: %w", ErrInvalidRequest)
}
}
userDataRaw, err := req.MachineScope.GetRawBootstrapData(ctx)
if err != nil {
return nil, fmt.Errorf("unable to retrieve bootstrap data from secret: %w", err)
}
stringWriter := &strings.Builder{}
userData := string(userDataRaw)
userDataValues := map[string]interface{}{
"kubernetesVersion": pointer.StringPtrDerefOr(req.MachineScope.Machine.Spec.Version, ""),
}
tags := make([]string, 0, len(packetMachineSpec.Tags)+len(req.ExtraTags))
copy(tags, packetMachineSpec.Tags)
tags = append(tags, req.ExtraTags...)
tmpl, err := template.New("user-data").Parse(userData)
if err != nil {
return nil, fmt.Errorf("error parsing userdata template: %w", err)
}
if req.MachineScope.IsControlPlane() {
// control plane machines should get the API key injected
userDataValues["apiKey"] = p.APIClient.GetConfig().DefaultHeader["X-Auth-Token"]
if req.ControlPlaneEndpoint != "" {
userDataValues["controlPlaneEndpoint"] = req.ControlPlaneEndpoint
}
tags = append(tags, infrav1.ControlPlaneTag)
} else {
tags = append(tags, infrav1.WorkerTag)
}
if err := tmpl.Execute(stringWriter, userDataValues); err != nil {
return nil, fmt.Errorf("error executing userdata template: %w", err)
}
userData = stringWriter.String()
// If Metro or Facility are specified at the Machine level, we ignore the
// values set at the Cluster level
facility := packetClusterSpec.Facility
metro := packetClusterSpec.Metro
if packetMachineSpec.Facility != "" || packetMachineSpec.Metro != "" {
metro = packetMachineSpec.Metro
facility = packetMachineSpec.Facility
}
hostname := req.MachineScope.Name()
| Facility: []string{facility},
BillingCycle: &req.MachineScope.PacketMachine.Spec.BillingCycle,
Plan: req.MachineScope.PacketMachine.Spec.MachineType,
OperatingSystem: req.MachineScope.PacketMachine.Spec.OS,
IpxeScriptUrl: &req.MachineScope.PacketMachine.Spec.IPXEUrl,
Tags: tags,
Userdata: &userData,
}
} else {
serverCreateOpts.DeviceCreateInMetroInput = &metal.DeviceCreateInMetroInput{
Hostname: &hostname,
Metro: metro,
BillingCycle: &req.MachineScope.PacketMachine.Spec.BillingCycle,
Plan: req.MachineScope.PacketMachine.Spec.MachineType,
OperatingSystem: req.MachineScope.PacketMachine.Spec.OS,
IpxeScriptUrl: &req.MachineScope.PacketMachine.Spec.IPXEUrl,
Tags: tags,
Userdata: &userData,
}
}
reservationIDs := strings.Split(packetMachineSpec.HardwareReservationID, ",")
// If there are no reservationIDs to process, go ahead and return early
if len(reservationIDs) <= 1 {
apiRequest := p.DevicesApi.CreateDevice(ctx, req.MachineScope.PacketCluster.Spec.ProjectID)
dev, _, err := apiRequest.CreateDeviceRequest(serverCreateOpts).Execute() //nolint:bodyclose // see https://github.com/timakin/bodyclose/issues/42
return dev, err
}
// Do a naive loop through the list of reservationIDs, continuing if we hit any error
// TODO: if we can determine how to differentiate a failure based on the reservation
// being in use vs other errors, then we can make this a bit smarter in the future.
var lastErr error
for _, resID := range reservationIDs {
reservationID := resID
serverCreateOpts.DeviceCreateInFacilityInput.HardwareReservationId = &reservationID
apiRequest := p.DevicesApi.CreateDevice(ctx, req.MachineScope.PacketCluster.Spec.ProjectID)
dev, _, err := apiRequest.CreateDeviceRequest(serverCreateOpts).Execute() //nolint:bodyclose // see https://github.com/timakin/bodyclose/issues/42
if err != nil {
lastErr = err
continue
}
return dev, nil
}
return nil, lastErr
}
func (p *Client) GetDeviceAddresses(device *metal.Device) []corev1.NodeAddress {
addrs := make([]corev1.NodeAddress, 0)
for _, addr := range device.IpAddresses {
addrType := corev1.NodeInternalIP
if addr.GetPublic() {
addrType = corev1.NodeExternalIP
}
a := corev1.NodeAddress{
Type: addrType,
Address: addr.GetAddress(),
}
addrs = append(addrs, a)
}
return addrs
}
func (p *Client) GetDeviceByTags(ctx context.Context, project string, tags []string) (*metal.Device, error) {
devices, _, err := p.DevicesApi.FindProjectDevices(ctx, project).Execute() //nolint:bodyclose // see https://github.com/timakin/bodyclose/issues/42
if err != nil {
return nil, fmt.Errorf("error retrieving devices: %w", err)
}
// returns the first one that matches all of the tags
for _, device := range devices.Devices {
if ItemsInList(device.Tags, tags) {
return &device, nil
}
}
return nil, nil
}
// CreateIP reserves an IP via Packet API. The request fails straight if no IP are available for the specified project.
// This prevent the cluster to become ready.
func (p *Client) CreateIP(ctx context.Context, namespace, clusterName, projectID, facility, metro string) (net.IP, error) {
failOnApprovalRequired := true
req := metal.IPReservationRequestInput{
Type: "public_ipv4",
Quantity: 1,
Facility: &facility,
Metro: &metro,
FailOnApprovalRequired: &failOnApprovalRequired,
Tags: []string{generateElasticIPIdentifier(clusterName)},
}
apiRequest := p.IPAddressesApi.RequestIPReservation(ctx, projectID)
r, resp, err := apiRequest.RequestIPReservationRequest(metal.RequestIPReservationRequest{
IPReservationRequestInput: &req,
}).Execute() //nolint:bodyclose // see https://github.com/timakin/bodyclose/issues/42
if err != nil {
return nil, err
}
if resp.StatusCode == http.StatusUnprocessableEntity {
return nil, ErrElasticIPQuotaExceeded
}
rawIP := r.IPReservation.GetAddress()
ip := net.ParseIP(rawIP)
if ip == nil {
return nil, fmt.Errorf("failed to parse IP: %s, %w", rawIP, ErrInvalidIP)
}
return ip, nil
}
// enableBGP enable bgp on the project
func (p *Client) EnableProjectBGP(ctx context.Context, projectID string) error {
// first check if it is enabled before trying to create it
bgpConfig, _, err := p.BGPApi.FindBgpConfigByProject(ctx, projectID).Execute() //nolint:bodyclose // see https://github.com/timakin/bodyclose/issues/42
// if we already have a config, just return
// we need some extra handling logic because the API always returns 200, even if
// not BGP config is in place.
// We treat it as valid config already exists only if ALL of the above is true:
// - no error
// - bgpConfig struct exists
// - bgpConfig struct has non-blank ID
// - bgpConfig struct does not have Status=="disabled"
if err != nil {
return err
} else if bgpConfig != nil && bgpConfig.GetId() != "" && strings.ToLower(bgpConfig.GetStatus()) != "disabled" {
return nil
}
// get the local ASN
localASN := os.Getenv(envVarLocalASN)
var outLocalASN int
switch {
case localASN != "":
localASNNo, err := strconv.Atoi(localASN)
if err != nil {
return fmt.Errorf("env var %s must be a number, was %s: %w", envVarLocalASN, localASN, err)
}
outLocalASN = localASNNo
default:
outLocalASN = DefaultLocalASN
}
var outBGPPass string
bgpPass := os.Getenv(envVarBGPPass)
if bgpPass != "" {
outBGPPass = bgpPass
}
// we did not have a valid one, so create it
useCase := "kubernetes-load-balancer"
apiRequest := p.BGPApi.RequestBgpConfig(ctx, projectID)
_, err = apiRequest.BgpConfigRequestInput(metal.BgpConfigRequestInput{
Asn: int32(outLocalASN),
Md5: &outBGPPass,
DeploymentType: "local",
UseCase: &useCase,
}).Execute() //nolint:bodyclose // see https://github.com/timakin/bodyclose/issues/42
return err
}
// ensureNodeBGPEnabled check if the node has bgp enabled, and set it if it does not
func (p *Client) EnsureNodeBGPEnabled(ctx context.Context, id string) error {
// fortunately, this is idempotent, so just create
addressFamily := "ipv4"
req := metal.BGPSessionInput{
AddressFamily: &addressFamily,
}
_, response, err := p.DevicesApi.CreateBgpSession(ctx, id).BGPSessionInput(req).Execute() //nolint:bodyclose // see https://github.com/timakin/bodyclose/issues/42
// if we already had one, then we can ignore the error
// this really should be a 409, but 422 is what is returned
if response != nil && response.StatusCode == 422 && strings.Contains(fmt.Sprintf("%s", err), "already has session") {
err = nil
}
return err
}
func (p *Client) GetIPByClusterIdentifier(ctx context.Context, namespace, name, projectID string) (*metal.IPReservation, error) {
var err error
var ipReservation *metal.IPReservation
eipIdentifier := generateElasticIPIdentifier(name)
reservedIPs, _, err := p.IPAddressesApi.FindIPReservations(ctx, projectID).Execute() //nolint:bodyclose // see https://github.com/timakin/bodyclose/issues/42
if err != nil {
return ipReservation, err
}
for _, reservedIPWrapper := range reservedIPs.IpAddresses {
ipReservation = reservedIPWrapper.IPReservation
if ipReservation != nil {
for _, tag := range ipReservation.Tags {
if tag == eipIdentifier {
return ipReservation, nil
}
}
}
}
return ipReservation, ErrControlPlanEndpointNotFound
}
func generateElasticIPIdentifier(name string) string {
return fmt.Sprintf("cluster-api-provider-packet:cluster-id:%s", name)
}
// This function provides backwards compatibility for the packngo
// debug environment variable while allowing us to introduce a new
// debug variable in the future that is not tied to packngo
func checkEnvForDebug() bool {
return os.Getenv(legacyDebugVar) != ""
} | serverCreateOpts := metal.CreateDeviceRequest{}
if facility != "" {
serverCreateOpts.DeviceCreateInFacilityInput = &metal.DeviceCreateInFacilityInput{
Hostname: &hostname, | random_line_split |
main.rs | use bulletproofs::r1cs::{
ConstraintSystem,
LinearCombination,
Prover,
R1CSError,
Variable,
Verifier,
};
use bulletproofs::{
BulletproofGens,
PedersenGens,
};
use curve25519_dalek::scalar::Scalar;
use merlin::Transcript;
use rand::Rng;
use std::u64;
struct TaxBrackets(Vec<(u64, u64)>);
fn negate_bit<T>(x: T) -> LinearCombination
where T: Into<LinearCombination>
{
LinearCombination::from(Variable::One()) - x
}
fn scalar_to_bits_le<CS: ConstraintSystem>(
cs: &mut CS,
n_bits: usize,
var: LinearCombination
) -> Result<Vec<Variable>, R1CSError> {
// This is a helper function that caches the evaluation of the input variable so that it
// doesn't get recomputed and verified for each bit allocation.
let mut cache_evaluation = {
let get_bit = |scalar: &Scalar, i: usize| (scalar.as_bytes()[i >> 3] >> (i & 7)) & 1;
let local_var = var.clone();
let mut val_cache = None;
move |eval: &dyn Fn(&LinearCombination) -> Scalar, i: usize| -> Result<u8, R1CSError> {
if val_cache.is_none() {
let val = eval(&local_var);
let valid = (n_bits..256).any(|i| get_bit(&val, i) == 0);
val_cache = Some(
if valid {
Ok(val)
} else {
Err(R1CSError::GadgetError {
description: format!("Value is not represented in {} bits", n_bits)
})
}
);
}
val_cache.as_ref()
.expect("the value must have been computed and cached by the block above")
.as_ref()
.map(|scalar| get_bit(scalar, i))
.map_err(|e| e.clone())
}
};
let bit_vars = (0..n_bits)
.map(|i| {
let (lhs, rhs, out) = cs.allocate(|eval| {
let bit = cache_evaluation(eval, i)?;
Ok((bit.into(), (1 - bit).into(), Scalar::zero()))
})?;
// Enforce that lhs variable represents a bit.
// b (1 - b) = 0
cs.constrain(LinearCombination::default() + rhs + lhs - Variable::One());
cs.constrain(out.into());
Ok(lhs)
})
.collect::<Result<Vec<_>, _>>()?;
let two_powers = (0..n_bits).map(|i| {
let mut two_power_repr = [0u8; 32];
two_power_repr[i >> 3] |= 1 << (i & 7);
Scalar::from_bits(two_power_repr)
});
let bit_sum = bit_vars.iter()
.cloned()
.zip(two_powers)
.collect::<LinearCombination>();
// Enforce that var is equal to the inner product of the bits with powers of two.
cs.constrain(var - bit_sum);
Ok(bit_vars)
}
fn lt_gate<CS: ConstraintSystem>(
cs: &mut CS,
n_bits: usize,
lhs: LinearCombination,
rhs: LinearCombination
) -> Result<LinearCombination, R1CSError> {
let lhs_bits = scalar_to_bits_le(cs, n_bits, lhs)?;
let rhs_bits = scalar_to_bits_le(cs, n_bits, rhs)?;
let zero = LinearCombination::default();
// Iterate through bits from most significant to least, comparing each pair.
let (lt, _) = lhs_bits.into_iter().zip(rhs_bits.into_iter())
.rev()
.fold((zero.clone(), zero.clone()), |(lt, gt), (l_bit, r_bit)| {
// lt and gt are boolean LinearCombinations that are 1 if lhs < rhs or lhs > rhs
// respectively after the first i most significant bits.
// Invariant: lt & gt will never both be 1, so (lt || gt) = (lt + gt).
// eq = !(lt || gt)
let eq = negate_bit(lt.clone() + gt.clone());
// Whether left bit i is < or > right bit i.
// bit_lt = !l_bit && r_bit = (1 - l_bit) * r_bit
// bit_gt = l_bit && !r_bit = l_bit * (1 - r_bit)
let (_, _, bit_lt) = cs.multiply(negate_bit(l_bit), r_bit.into());
let (_, _, bit_gt) = cs.multiply(l_bit.into(), negate_bit(r_bit));
// new_lt = lt + eq && bit_lt
// -> lt_diff = new_lt - lt = eq * bit_lt
// new_gt = gt + eq && bit_gt
// -> gt_diff = new_gt - gt = eq * bit_gt
let (_, _, lt_diff) = cs.multiply(eq.clone(), bit_lt.into());
let (_, _, gt_diff) = cs.multiply(eq.clone(), bit_gt.into());
(lt + lt_diff, gt + gt_diff)
});
Ok(lt)
}
fn synthesize<CS: ConstraintSystem>(
cs: &mut CS,
brackets: &TaxBrackets,
values: &[Variable],
expected: &Variable
) -> Result<(), R1CSError> {
// Compute Σ values.
let total = values.iter()
.map(|val| (val.clone(), Scalar::one()))
.collect::<LinearCombination>();
let mut last_cutoff = Scalar::zero();
let mut cumulative = LinearCombination::default();
for (cutoff, rate) in brackets.0.iter() {
let next_cutoff = Scalar::from(*cutoff);
let rate_scalar = Scalar::from(*rate);
let gt_last = lt_gate(cs, 64, last_cutoff.into(), total.clone())?;
let gt_next = lt_gate(cs, 64, next_cutoff.into(), total.clone())?;
let (_, _, between_last_next) = cs.multiply(gt_last.clone(), negate_bit(gt_next.clone()));
let (_, _, between_value) = cs.multiply(
total.clone() - last_cutoff,
LinearCombination::from(between_last_next) * rate_scalar
);
let (_, _, exceeds_value) = cs.multiply(
LinearCombination::from(next_cutoff - last_cutoff),
gt_next * rate_scalar
);
cumulative = cumulative + between_value + exceeds_value;
last_cutoff = next_cutoff;
}
cumulative = cumulative - expected.clone();
cs.constrain(cumulative);
Ok(())
}
fn compute_taxes(brackets: &TaxBrackets, total: u64) -> u64 {
(0..brackets.0.len())
.map(|i| {
let last_cutoff = if i == 0 { 0u64 } else { brackets.0[i-1].0 };
let (next_cutoff, rate) = brackets.0[i];
let amount = if total > next_cutoff {
next_cutoff - last_cutoff
} else if total > last_cutoff {
total - last_cutoff
} else {
0
};
amount * rate
})
.fold(0, |sum, v| sum + v)
}
fn main() {
let brackets = TaxBrackets(vec![
(952500, 10),
(3870000, 12),
(8250000, 22),
(15750000, 24),
(20000000, 32),
(50000000, 35),
(u64::MAX, 37),
]);
let pc_gens = PedersenGens::default();
let bp_gens = BulletproofGens::new(8192, 1);
let mut prover_transcript = Transcript::new(b"zk taxes");
let mut prover = Prover::new(
&bp_gens,
&pc_gens,
&mut prover_transcript,
);
let mut rng = rand::thread_rng();
let income_amounts = (0..4)
// Multiply by 100 cents to ensure there is no rounding necessary.
.map(|_| rng.gen_range(0, 100000) * 100)
.collect::<Vec<_>>();
let total_income = income_amounts.iter().fold(0, |sum, v| sum + v);
let total_tax = compute_taxes(&brackets, total_income);
println!("Total: {}, Taxes: {}", total_income, total_tax);
let inputs = income_amounts.iter()
.map(|value| (Scalar::from(*value), Scalar::random(&mut rng)))
.collect::<Vec<_>>();
let output_v = Scalar::from(total_tax);
let output_r = Scalar::random(&mut rng);
let (input_pts, input_vars) = inputs.iter()
.map(|(v, r)| prover.commit(*v, *r))
.unzip::<_, _, Vec<_>, Vec<_>>();
let (output_pt, output_var) = prover.commit(output_v, output_r);
synthesize(&mut prover, &brackets, &input_vars, &output_var).unwrap();
let proof = prover.prove().unwrap();
let mut verifier_transcript = Transcript::new(b"zk taxes");
let mut verifier = Verifier::new(
&bp_gens,
&pc_gens,
&mut verifier_transcript,
);
let input_vars = input_pts.iter()
.map(|pt| verifier.commit(*pt))
.collect::<Vec<_>>();
let output_var = verifier.commit(output_pt);
synthesize(&mut verifier, &brackets, &input_vars, &output_var).unwrap();
assert!(verifier.verify(&proof).is_ok());
println!("Success!");
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_to_bits_gadget() {
let pc_gens = PedersenGens::default();
let bp_gens = BulletproofGens::new(128, 1);
let mut rng = rand::thread_rng();
for _ in 0..100 {
let x = rng.gen::<u64>();
let mut prover_transcript = Transcript::new(b"test");
let mut prover = Prover::new(
&bp_gens,
&pc_gens,
&mut prover_transcript,
);
let (in_pt, in_var) = prover.commit(x.into(), Scalar::random(&mut rng));
let (out_pts, out_vars) = (0..64)
.map(|i| {
prover.commit(((x >> i) & 1).into(), Scalar::random(&mut rng))
})
.unzip::<_, _, Vec<_>, Vec<_>>();
let result = scalar_to_bits_le(&mut prover, 64, in_var.into()).unwrap();
for (wire1, wire2) in result.into_iter().zip(out_vars.into_iter()) {
prover.constrain(wire1 - wire2);
}
let proof = prover.prove().unwrap();
let mut verifier_transcript = Transcript::new(b"test");
let mut verifier = Verifier::new(
&bp_gens,
&pc_gens,
&mut verifier_transcript,
);
let in_var = verifier.commit(in_pt);
let out_vars = out_pts.into_iter()
.map(|out_pt| verifier.commit(out_pt))
.collect::<Vec<_>>();
let result = scalar_to_bits_le(&mut verifier, 64, in_var.into()).unwrap();
for (wire1, wire2) in result.into_iter().zip(out_vars.into_iter()) {
verifier.constrain(wire1 - wire2);
}
assert!(verifier.verify(&proof).is_ok());
}
}
#[test]
fn test_lt_gadget() {
let pc_gens = PedersenGens::default();
let bp_gens = BulletproofGens::new(512, 1);
let mut rng = rand::thread_rng();
for _ in 0..100 {
let x1 = rng.gen::<u64>();
let x2 = rng.gen::<u64>();
let expected_out = if x1 < x2 { 1u64 } else { |
let mut prover_transcript = Transcript::new(b"test");
let mut prover = Prover::new(
&bp_gens,
&pc_gens,
&mut prover_transcript,
);
let (in1_pt, in1_var) = prover.commit(x1.into(), Scalar::random(&mut rng));
let (in2_pt, in2_var) = prover.commit(x2.into(), Scalar::random(&mut rng));
let (out_pt, out_var) = prover.commit(expected_out.into(), Scalar::random(&mut rng));
let result = lt_gate(
&mut prover,
64,
in1_var.into(),
in2_var.into()
).unwrap();
prover.constrain(result - out_var);
let proof = prover.prove().unwrap();
let mut verifier_transcript = Transcript::new(b"test");
let mut verifier = Verifier::new(
&bp_gens,
&pc_gens,
&mut verifier_transcript,
);
let in1_var = verifier.commit(in1_pt);
let in2_var = verifier.commit(in2_pt);
let out_var = verifier.commit(out_pt);
let result = lt_gate(
&mut verifier,
64,
in1_var.into(),
in2_var.into()
).unwrap();
verifier.constrain(result - out_var);
assert!(verifier.verify(&proof).is_ok());
}
}
}
| 0u64 }; | conditional_block |
main.rs | use bulletproofs::r1cs::{
ConstraintSystem,
LinearCombination,
Prover,
R1CSError,
Variable,
Verifier,
};
use bulletproofs::{
BulletproofGens,
PedersenGens,
};
use curve25519_dalek::scalar::Scalar;
use merlin::Transcript;
use rand::Rng;
use std::u64;
struct TaxBrackets(Vec<(u64, u64)>);
fn negate_bit<T>(x: T) -> LinearCombination
where T: Into<LinearCombination>
{
LinearCombination::from(Variable::One()) - x
}
fn scalar_to_bits_le<CS: ConstraintSystem>(
cs: &mut CS,
n_bits: usize,
var: LinearCombination
) -> Result<Vec<Variable>, R1CSError> {
// This is a helper function that caches the evaluation of the input variable so that it
// doesn't get recomputed and verified for each bit allocation.
let mut cache_evaluation = {
let get_bit = |scalar: &Scalar, i: usize| (scalar.as_bytes()[i >> 3] >> (i & 7)) & 1;
let local_var = var.clone();
let mut val_cache = None;
move |eval: &dyn Fn(&LinearCombination) -> Scalar, i: usize| -> Result<u8, R1CSError> {
if val_cache.is_none() {
let val = eval(&local_var);
let valid = (n_bits..256).any(|i| get_bit(&val, i) == 0);
val_cache = Some(
if valid {
Ok(val)
} else {
Err(R1CSError::GadgetError {
description: format!("Value is not represented in {} bits", n_bits)
})
}
);
}
val_cache.as_ref()
.expect("the value must have been computed and cached by the block above")
.as_ref()
.map(|scalar| get_bit(scalar, i))
.map_err(|e| e.clone())
}
};
let bit_vars = (0..n_bits)
.map(|i| {
let (lhs, rhs, out) = cs.allocate(|eval| {
let bit = cache_evaluation(eval, i)?;
Ok((bit.into(), (1 - bit).into(), Scalar::zero()))
})?;
// Enforce that lhs variable represents a bit.
// b (1 - b) = 0
cs.constrain(LinearCombination::default() + rhs + lhs - Variable::One());
cs.constrain(out.into());
Ok(lhs)
})
.collect::<Result<Vec<_>, _>>()?;
let two_powers = (0..n_bits).map(|i| {
let mut two_power_repr = [0u8; 32];
two_power_repr[i >> 3] |= 1 << (i & 7);
Scalar::from_bits(two_power_repr)
});
let bit_sum = bit_vars.iter()
.cloned()
.zip(two_powers)
.collect::<LinearCombination>();
// Enforce that var is equal to the inner product of the bits with powers of two.
cs.constrain(var - bit_sum);
Ok(bit_vars)
}
fn lt_gate<CS: ConstraintSystem>(
cs: &mut CS,
n_bits: usize,
lhs: LinearCombination,
rhs: LinearCombination
) -> Result<LinearCombination, R1CSError> {
let lhs_bits = scalar_to_bits_le(cs, n_bits, lhs)?;
let rhs_bits = scalar_to_bits_le(cs, n_bits, rhs)?;
let zero = LinearCombination::default();
// Iterate through bits from most significant to least, comparing each pair.
let (lt, _) = lhs_bits.into_iter().zip(rhs_bits.into_iter())
.rev()
.fold((zero.clone(), zero.clone()), |(lt, gt), (l_bit, r_bit)| {
// lt and gt are boolean LinearCombinations that are 1 if lhs < rhs or lhs > rhs
// respectively after the first i most significant bits.
// Invariant: lt & gt will never both be 1, so (lt || gt) = (lt + gt).
// eq = !(lt || gt)
let eq = negate_bit(lt.clone() + gt.clone());
// Whether left bit i is < or > right bit i.
// bit_lt = !l_bit && r_bit = (1 - l_bit) * r_bit
// bit_gt = l_bit && !r_bit = l_bit * (1 - r_bit)
let (_, _, bit_lt) = cs.multiply(negate_bit(l_bit), r_bit.into());
let (_, _, bit_gt) = cs.multiply(l_bit.into(), negate_bit(r_bit));
// new_lt = lt + eq && bit_lt
// -> lt_diff = new_lt - lt = eq * bit_lt
// new_gt = gt + eq && bit_gt
// -> gt_diff = new_gt - gt = eq * bit_gt
let (_, _, lt_diff) = cs.multiply(eq.clone(), bit_lt.into());
let (_, _, gt_diff) = cs.multiply(eq.clone(), bit_gt.into());
(lt + lt_diff, gt + gt_diff)
});
Ok(lt)
}
fn synthesize<CS: ConstraintSystem>(
cs: &mut CS,
brackets: &TaxBrackets,
values: &[Variable],
expected: &Variable
) -> Result<(), R1CSError> {
// Compute Σ values.
let total = values.iter()
.map(|val| (val.clone(), Scalar::one()))
.collect::<LinearCombination>();
let mut last_cutoff = Scalar::zero();
let mut cumulative = LinearCombination::default();
for (cutoff, rate) in brackets.0.iter() {
let next_cutoff = Scalar::from(*cutoff);
let rate_scalar = Scalar::from(*rate);
let gt_last = lt_gate(cs, 64, last_cutoff.into(), total.clone())?;
let gt_next = lt_gate(cs, 64, next_cutoff.into(), total.clone())?;
let (_, _, between_last_next) = cs.multiply(gt_last.clone(), negate_bit(gt_next.clone()));
let (_, _, between_value) = cs.multiply(
total.clone() - last_cutoff,
LinearCombination::from(between_last_next) * rate_scalar
);
let (_, _, exceeds_value) = cs.multiply(
LinearCombination::from(next_cutoff - last_cutoff),
gt_next * rate_scalar
);
cumulative = cumulative + between_value + exceeds_value;
last_cutoff = next_cutoff;
}
cumulative = cumulative - expected.clone();
cs.constrain(cumulative);
Ok(())
}
fn c | brackets: &TaxBrackets, total: u64) -> u64 {
(0..brackets.0.len())
.map(|i| {
let last_cutoff = if i == 0 { 0u64 } else { brackets.0[i-1].0 };
let (next_cutoff, rate) = brackets.0[i];
let amount = if total > next_cutoff {
next_cutoff - last_cutoff
} else if total > last_cutoff {
total - last_cutoff
} else {
0
};
amount * rate
})
.fold(0, |sum, v| sum + v)
}
fn main() {
let brackets = TaxBrackets(vec![
(952500, 10),
(3870000, 12),
(8250000, 22),
(15750000, 24),
(20000000, 32),
(50000000, 35),
(u64::MAX, 37),
]);
let pc_gens = PedersenGens::default();
let bp_gens = BulletproofGens::new(8192, 1);
let mut prover_transcript = Transcript::new(b"zk taxes");
let mut prover = Prover::new(
&bp_gens,
&pc_gens,
&mut prover_transcript,
);
let mut rng = rand::thread_rng();
let income_amounts = (0..4)
// Multiply by 100 cents to ensure there is no rounding necessary.
.map(|_| rng.gen_range(0, 100000) * 100)
.collect::<Vec<_>>();
let total_income = income_amounts.iter().fold(0, |sum, v| sum + v);
let total_tax = compute_taxes(&brackets, total_income);
println!("Total: {}, Taxes: {}", total_income, total_tax);
let inputs = income_amounts.iter()
.map(|value| (Scalar::from(*value), Scalar::random(&mut rng)))
.collect::<Vec<_>>();
let output_v = Scalar::from(total_tax);
let output_r = Scalar::random(&mut rng);
let (input_pts, input_vars) = inputs.iter()
.map(|(v, r)| prover.commit(*v, *r))
.unzip::<_, _, Vec<_>, Vec<_>>();
let (output_pt, output_var) = prover.commit(output_v, output_r);
synthesize(&mut prover, &brackets, &input_vars, &output_var).unwrap();
let proof = prover.prove().unwrap();
let mut verifier_transcript = Transcript::new(b"zk taxes");
let mut verifier = Verifier::new(
&bp_gens,
&pc_gens,
&mut verifier_transcript,
);
let input_vars = input_pts.iter()
.map(|pt| verifier.commit(*pt))
.collect::<Vec<_>>();
let output_var = verifier.commit(output_pt);
synthesize(&mut verifier, &brackets, &input_vars, &output_var).unwrap();
assert!(verifier.verify(&proof).is_ok());
println!("Success!");
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_to_bits_gadget() {
let pc_gens = PedersenGens::default();
let bp_gens = BulletproofGens::new(128, 1);
let mut rng = rand::thread_rng();
for _ in 0..100 {
let x = rng.gen::<u64>();
let mut prover_transcript = Transcript::new(b"test");
let mut prover = Prover::new(
&bp_gens,
&pc_gens,
&mut prover_transcript,
);
let (in_pt, in_var) = prover.commit(x.into(), Scalar::random(&mut rng));
let (out_pts, out_vars) = (0..64)
.map(|i| {
prover.commit(((x >> i) & 1).into(), Scalar::random(&mut rng))
})
.unzip::<_, _, Vec<_>, Vec<_>>();
let result = scalar_to_bits_le(&mut prover, 64, in_var.into()).unwrap();
for (wire1, wire2) in result.into_iter().zip(out_vars.into_iter()) {
prover.constrain(wire1 - wire2);
}
let proof = prover.prove().unwrap();
let mut verifier_transcript = Transcript::new(b"test");
let mut verifier = Verifier::new(
&bp_gens,
&pc_gens,
&mut verifier_transcript,
);
let in_var = verifier.commit(in_pt);
let out_vars = out_pts.into_iter()
.map(|out_pt| verifier.commit(out_pt))
.collect::<Vec<_>>();
let result = scalar_to_bits_le(&mut verifier, 64, in_var.into()).unwrap();
for (wire1, wire2) in result.into_iter().zip(out_vars.into_iter()) {
verifier.constrain(wire1 - wire2);
}
assert!(verifier.verify(&proof).is_ok());
}
}
#[test]
fn test_lt_gadget() {
let pc_gens = PedersenGens::default();
let bp_gens = BulletproofGens::new(512, 1);
let mut rng = rand::thread_rng();
for _ in 0..100 {
let x1 = rng.gen::<u64>();
let x2 = rng.gen::<u64>();
let expected_out = if x1 < x2 { 1u64 } else { 0u64 };
let mut prover_transcript = Transcript::new(b"test");
let mut prover = Prover::new(
&bp_gens,
&pc_gens,
&mut prover_transcript,
);
let (in1_pt, in1_var) = prover.commit(x1.into(), Scalar::random(&mut rng));
let (in2_pt, in2_var) = prover.commit(x2.into(), Scalar::random(&mut rng));
let (out_pt, out_var) = prover.commit(expected_out.into(), Scalar::random(&mut rng));
let result = lt_gate(
&mut prover,
64,
in1_var.into(),
in2_var.into()
).unwrap();
prover.constrain(result - out_var);
let proof = prover.prove().unwrap();
let mut verifier_transcript = Transcript::new(b"test");
let mut verifier = Verifier::new(
&bp_gens,
&pc_gens,
&mut verifier_transcript,
);
let in1_var = verifier.commit(in1_pt);
let in2_var = verifier.commit(in2_pt);
let out_var = verifier.commit(out_pt);
let result = lt_gate(
&mut verifier,
64,
in1_var.into(),
in2_var.into()
).unwrap();
verifier.constrain(result - out_var);
assert!(verifier.verify(&proof).is_ok());
}
}
}
| ompute_taxes( | identifier_name |
main.rs | use bulletproofs::r1cs::{
ConstraintSystem,
LinearCombination,
Prover,
R1CSError,
Variable,
Verifier,
};
use bulletproofs::{
BulletproofGens,
PedersenGens,
};
use curve25519_dalek::scalar::Scalar;
use merlin::Transcript;
use rand::Rng;
use std::u64;
struct TaxBrackets(Vec<(u64, u64)>);
fn negate_bit<T>(x: T) -> LinearCombination
where T: Into<LinearCombination>
{
LinearCombination::from(Variable::One()) - x
}
fn scalar_to_bits_le<CS: ConstraintSystem>(
cs: &mut CS,
n_bits: usize,
var: LinearCombination
) -> Result<Vec<Variable>, R1CSError> {
// This is a helper function that caches the evaluation of the input variable so that it
// doesn't get recomputed and verified for each bit allocation.
let mut cache_evaluation = {
let get_bit = |scalar: &Scalar, i: usize| (scalar.as_bytes()[i >> 3] >> (i & 7)) & 1;
let local_var = var.clone();
let mut val_cache = None;
move |eval: &dyn Fn(&LinearCombination) -> Scalar, i: usize| -> Result<u8, R1CSError> {
if val_cache.is_none() {
let val = eval(&local_var);
let valid = (n_bits..256).any(|i| get_bit(&val, i) == 0);
val_cache = Some(
if valid {
Ok(val)
} else {
Err(R1CSError::GadgetError {
description: format!("Value is not represented in {} bits", n_bits)
})
}
);
}
val_cache.as_ref()
.expect("the value must have been computed and cached by the block above")
.as_ref()
.map(|scalar| get_bit(scalar, i))
.map_err(|e| e.clone())
}
};
let bit_vars = (0..n_bits)
.map(|i| {
let (lhs, rhs, out) = cs.allocate(|eval| {
let bit = cache_evaluation(eval, i)?;
Ok((bit.into(), (1 - bit).into(), Scalar::zero()))
})?;
// Enforce that lhs variable represents a bit.
// b (1 - b) = 0
cs.constrain(LinearCombination::default() + rhs + lhs - Variable::One());
cs.constrain(out.into());
Ok(lhs)
})
.collect::<Result<Vec<_>, _>>()?;
let two_powers = (0..n_bits).map(|i| {
let mut two_power_repr = [0u8; 32];
two_power_repr[i >> 3] |= 1 << (i & 7);
Scalar::from_bits(two_power_repr)
});
let bit_sum = bit_vars.iter()
.cloned()
.zip(two_powers)
.collect::<LinearCombination>();
// Enforce that var is equal to the inner product of the bits with powers of two.
cs.constrain(var - bit_sum);
Ok(bit_vars)
}
fn lt_gate<CS: ConstraintSystem>(
cs: &mut CS,
n_bits: usize,
lhs: LinearCombination, | ) -> Result<LinearCombination, R1CSError> {
let lhs_bits = scalar_to_bits_le(cs, n_bits, lhs)?;
let rhs_bits = scalar_to_bits_le(cs, n_bits, rhs)?;
let zero = LinearCombination::default();
// Iterate through bits from most significant to least, comparing each pair.
let (lt, _) = lhs_bits.into_iter().zip(rhs_bits.into_iter())
.rev()
.fold((zero.clone(), zero.clone()), |(lt, gt), (l_bit, r_bit)| {
// lt and gt are boolean LinearCombinations that are 1 if lhs < rhs or lhs > rhs
// respectively after the first i most significant bits.
// Invariant: lt & gt will never both be 1, so (lt || gt) = (lt + gt).
// eq = !(lt || gt)
let eq = negate_bit(lt.clone() + gt.clone());
// Whether left bit i is < or > right bit i.
// bit_lt = !l_bit && r_bit = (1 - l_bit) * r_bit
// bit_gt = l_bit && !r_bit = l_bit * (1 - r_bit)
let (_, _, bit_lt) = cs.multiply(negate_bit(l_bit), r_bit.into());
let (_, _, bit_gt) = cs.multiply(l_bit.into(), negate_bit(r_bit));
// new_lt = lt + eq && bit_lt
// -> lt_diff = new_lt - lt = eq * bit_lt
// new_gt = gt + eq && bit_gt
// -> gt_diff = new_gt - gt = eq * bit_gt
let (_, _, lt_diff) = cs.multiply(eq.clone(), bit_lt.into());
let (_, _, gt_diff) = cs.multiply(eq.clone(), bit_gt.into());
(lt + lt_diff, gt + gt_diff)
});
Ok(lt)
}
fn synthesize<CS: ConstraintSystem>(
cs: &mut CS,
brackets: &TaxBrackets,
values: &[Variable],
expected: &Variable
) -> Result<(), R1CSError> {
// Compute Σ values.
let total = values.iter()
.map(|val| (val.clone(), Scalar::one()))
.collect::<LinearCombination>();
let mut last_cutoff = Scalar::zero();
let mut cumulative = LinearCombination::default();
for (cutoff, rate) in brackets.0.iter() {
let next_cutoff = Scalar::from(*cutoff);
let rate_scalar = Scalar::from(*rate);
let gt_last = lt_gate(cs, 64, last_cutoff.into(), total.clone())?;
let gt_next = lt_gate(cs, 64, next_cutoff.into(), total.clone())?;
let (_, _, between_last_next) = cs.multiply(gt_last.clone(), negate_bit(gt_next.clone()));
let (_, _, between_value) = cs.multiply(
total.clone() - last_cutoff,
LinearCombination::from(between_last_next) * rate_scalar
);
let (_, _, exceeds_value) = cs.multiply(
LinearCombination::from(next_cutoff - last_cutoff),
gt_next * rate_scalar
);
cumulative = cumulative + between_value + exceeds_value;
last_cutoff = next_cutoff;
}
cumulative = cumulative - expected.clone();
cs.constrain(cumulative);
Ok(())
}
fn compute_taxes(brackets: &TaxBrackets, total: u64) -> u64 {
(0..brackets.0.len())
.map(|i| {
let last_cutoff = if i == 0 { 0u64 } else { brackets.0[i-1].0 };
let (next_cutoff, rate) = brackets.0[i];
let amount = if total > next_cutoff {
next_cutoff - last_cutoff
} else if total > last_cutoff {
total - last_cutoff
} else {
0
};
amount * rate
})
.fold(0, |sum, v| sum + v)
}
fn main() {
let brackets = TaxBrackets(vec![
(952500, 10),
(3870000, 12),
(8250000, 22),
(15750000, 24),
(20000000, 32),
(50000000, 35),
(u64::MAX, 37),
]);
let pc_gens = PedersenGens::default();
let bp_gens = BulletproofGens::new(8192, 1);
let mut prover_transcript = Transcript::new(b"zk taxes");
let mut prover = Prover::new(
&bp_gens,
&pc_gens,
&mut prover_transcript,
);
let mut rng = rand::thread_rng();
let income_amounts = (0..4)
// Multiply by 100 cents to ensure there is no rounding necessary.
.map(|_| rng.gen_range(0, 100000) * 100)
.collect::<Vec<_>>();
let total_income = income_amounts.iter().fold(0, |sum, v| sum + v);
let total_tax = compute_taxes(&brackets, total_income);
println!("Total: {}, Taxes: {}", total_income, total_tax);
let inputs = income_amounts.iter()
.map(|value| (Scalar::from(*value), Scalar::random(&mut rng)))
.collect::<Vec<_>>();
let output_v = Scalar::from(total_tax);
let output_r = Scalar::random(&mut rng);
let (input_pts, input_vars) = inputs.iter()
.map(|(v, r)| prover.commit(*v, *r))
.unzip::<_, _, Vec<_>, Vec<_>>();
let (output_pt, output_var) = prover.commit(output_v, output_r);
synthesize(&mut prover, &brackets, &input_vars, &output_var).unwrap();
let proof = prover.prove().unwrap();
let mut verifier_transcript = Transcript::new(b"zk taxes");
let mut verifier = Verifier::new(
&bp_gens,
&pc_gens,
&mut verifier_transcript,
);
let input_vars = input_pts.iter()
.map(|pt| verifier.commit(*pt))
.collect::<Vec<_>>();
let output_var = verifier.commit(output_pt);
synthesize(&mut verifier, &brackets, &input_vars, &output_var).unwrap();
assert!(verifier.verify(&proof).is_ok());
println!("Success!");
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_to_bits_gadget() {
let pc_gens = PedersenGens::default();
let bp_gens = BulletproofGens::new(128, 1);
let mut rng = rand::thread_rng();
for _ in 0..100 {
let x = rng.gen::<u64>();
let mut prover_transcript = Transcript::new(b"test");
let mut prover = Prover::new(
&bp_gens,
&pc_gens,
&mut prover_transcript,
);
let (in_pt, in_var) = prover.commit(x.into(), Scalar::random(&mut rng));
let (out_pts, out_vars) = (0..64)
.map(|i| {
prover.commit(((x >> i) & 1).into(), Scalar::random(&mut rng))
})
.unzip::<_, _, Vec<_>, Vec<_>>();
let result = scalar_to_bits_le(&mut prover, 64, in_var.into()).unwrap();
for (wire1, wire2) in result.into_iter().zip(out_vars.into_iter()) {
prover.constrain(wire1 - wire2);
}
let proof = prover.prove().unwrap();
let mut verifier_transcript = Transcript::new(b"test");
let mut verifier = Verifier::new(
&bp_gens,
&pc_gens,
&mut verifier_transcript,
);
let in_var = verifier.commit(in_pt);
let out_vars = out_pts.into_iter()
.map(|out_pt| verifier.commit(out_pt))
.collect::<Vec<_>>();
let result = scalar_to_bits_le(&mut verifier, 64, in_var.into()).unwrap();
for (wire1, wire2) in result.into_iter().zip(out_vars.into_iter()) {
verifier.constrain(wire1 - wire2);
}
assert!(verifier.verify(&proof).is_ok());
}
}
#[test]
fn test_lt_gadget() {
let pc_gens = PedersenGens::default();
let bp_gens = BulletproofGens::new(512, 1);
let mut rng = rand::thread_rng();
for _ in 0..100 {
let x1 = rng.gen::<u64>();
let x2 = rng.gen::<u64>();
let expected_out = if x1 < x2 { 1u64 } else { 0u64 };
let mut prover_transcript = Transcript::new(b"test");
let mut prover = Prover::new(
&bp_gens,
&pc_gens,
&mut prover_transcript,
);
let (in1_pt, in1_var) = prover.commit(x1.into(), Scalar::random(&mut rng));
let (in2_pt, in2_var) = prover.commit(x2.into(), Scalar::random(&mut rng));
let (out_pt, out_var) = prover.commit(expected_out.into(), Scalar::random(&mut rng));
let result = lt_gate(
&mut prover,
64,
in1_var.into(),
in2_var.into()
).unwrap();
prover.constrain(result - out_var);
let proof = prover.prove().unwrap();
let mut verifier_transcript = Transcript::new(b"test");
let mut verifier = Verifier::new(
&bp_gens,
&pc_gens,
&mut verifier_transcript,
);
let in1_var = verifier.commit(in1_pt);
let in2_var = verifier.commit(in2_pt);
let out_var = verifier.commit(out_pt);
let result = lt_gate(
&mut verifier,
64,
in1_var.into(),
in2_var.into()
).unwrap();
verifier.constrain(result - out_var);
assert!(verifier.verify(&proof).is_ok());
}
}
} | rhs: LinearCombination | random_line_split |
main.rs | use bulletproofs::r1cs::{
ConstraintSystem,
LinearCombination,
Prover,
R1CSError,
Variable,
Verifier,
};
use bulletproofs::{
BulletproofGens,
PedersenGens,
};
use curve25519_dalek::scalar::Scalar;
use merlin::Transcript;
use rand::Rng;
use std::u64;
struct TaxBrackets(Vec<(u64, u64)>);
fn negate_bit<T>(x: T) -> LinearCombination
where T: Into<LinearCombination>
{
LinearCombination::from(Variable::One()) - x
}
fn scalar_to_bits_le<CS: ConstraintSystem>(
cs: &mut CS,
n_bits: usize,
var: LinearCombination
) -> Result<Vec<Variable>, R1CSError> {
// This is a helper function that caches the evaluation of the input variable so that it
// doesn't get recomputed and verified for each bit allocation.
let mut cache_evaluation = {
let get_bit = |scalar: &Scalar, i: usize| (scalar.as_bytes()[i >> 3] >> (i & 7)) & 1;
let local_var = var.clone();
let mut val_cache = None;
move |eval: &dyn Fn(&LinearCombination) -> Scalar, i: usize| -> Result<u8, R1CSError> {
if val_cache.is_none() {
let val = eval(&local_var);
let valid = (n_bits..256).any(|i| get_bit(&val, i) == 0);
val_cache = Some(
if valid {
Ok(val)
} else {
Err(R1CSError::GadgetError {
description: format!("Value is not represented in {} bits", n_bits)
})
}
);
}
val_cache.as_ref()
.expect("the value must have been computed and cached by the block above")
.as_ref()
.map(|scalar| get_bit(scalar, i))
.map_err(|e| e.clone())
}
};
let bit_vars = (0..n_bits)
.map(|i| {
let (lhs, rhs, out) = cs.allocate(|eval| {
let bit = cache_evaluation(eval, i)?;
Ok((bit.into(), (1 - bit).into(), Scalar::zero()))
})?;
// Enforce that lhs variable represents a bit.
// b (1 - b) = 0
cs.constrain(LinearCombination::default() + rhs + lhs - Variable::One());
cs.constrain(out.into());
Ok(lhs)
})
.collect::<Result<Vec<_>, _>>()?;
let two_powers = (0..n_bits).map(|i| {
let mut two_power_repr = [0u8; 32];
two_power_repr[i >> 3] |= 1 << (i & 7);
Scalar::from_bits(two_power_repr)
});
let bit_sum = bit_vars.iter()
.cloned()
.zip(two_powers)
.collect::<LinearCombination>();
// Enforce that var is equal to the inner product of the bits with powers of two.
cs.constrain(var - bit_sum);
Ok(bit_vars)
}
fn lt_gate<CS: ConstraintSystem>(
cs: &mut CS,
n_bits: usize,
lhs: LinearCombination,
rhs: LinearCombination
) -> Result<LinearCombination, R1CSError> |
fn synthesize<CS: ConstraintSystem>(
cs: &mut CS,
brackets: &TaxBrackets,
values: &[Variable],
expected: &Variable
) -> Result<(), R1CSError> {
// Compute Σ values.
let total = values.iter()
.map(|val| (val.clone(), Scalar::one()))
.collect::<LinearCombination>();
let mut last_cutoff = Scalar::zero();
let mut cumulative = LinearCombination::default();
for (cutoff, rate) in brackets.0.iter() {
let next_cutoff = Scalar::from(*cutoff);
let rate_scalar = Scalar::from(*rate);
let gt_last = lt_gate(cs, 64, last_cutoff.into(), total.clone())?;
let gt_next = lt_gate(cs, 64, next_cutoff.into(), total.clone())?;
let (_, _, between_last_next) = cs.multiply(gt_last.clone(), negate_bit(gt_next.clone()));
let (_, _, between_value) = cs.multiply(
total.clone() - last_cutoff,
LinearCombination::from(between_last_next) * rate_scalar
);
let (_, _, exceeds_value) = cs.multiply(
LinearCombination::from(next_cutoff - last_cutoff),
gt_next * rate_scalar
);
cumulative = cumulative + between_value + exceeds_value;
last_cutoff = next_cutoff;
}
cumulative = cumulative - expected.clone();
cs.constrain(cumulative);
Ok(())
}
fn compute_taxes(brackets: &TaxBrackets, total: u64) -> u64 {
(0..brackets.0.len())
.map(|i| {
let last_cutoff = if i == 0 { 0u64 } else { brackets.0[i-1].0 };
let (next_cutoff, rate) = brackets.0[i];
let amount = if total > next_cutoff {
next_cutoff - last_cutoff
} else if total > last_cutoff {
total - last_cutoff
} else {
0
};
amount * rate
})
.fold(0, |sum, v| sum + v)
}
fn main() {
let brackets = TaxBrackets(vec![
(952500, 10),
(3870000, 12),
(8250000, 22),
(15750000, 24),
(20000000, 32),
(50000000, 35),
(u64::MAX, 37),
]);
let pc_gens = PedersenGens::default();
let bp_gens = BulletproofGens::new(8192, 1);
let mut prover_transcript = Transcript::new(b"zk taxes");
let mut prover = Prover::new(
&bp_gens,
&pc_gens,
&mut prover_transcript,
);
let mut rng = rand::thread_rng();
let income_amounts = (0..4)
// Multiply by 100 cents to ensure there is no rounding necessary.
.map(|_| rng.gen_range(0, 100000) * 100)
.collect::<Vec<_>>();
let total_income = income_amounts.iter().fold(0, |sum, v| sum + v);
let total_tax = compute_taxes(&brackets, total_income);
println!("Total: {}, Taxes: {}", total_income, total_tax);
let inputs = income_amounts.iter()
.map(|value| (Scalar::from(*value), Scalar::random(&mut rng)))
.collect::<Vec<_>>();
let output_v = Scalar::from(total_tax);
let output_r = Scalar::random(&mut rng);
let (input_pts, input_vars) = inputs.iter()
.map(|(v, r)| prover.commit(*v, *r))
.unzip::<_, _, Vec<_>, Vec<_>>();
let (output_pt, output_var) = prover.commit(output_v, output_r);
synthesize(&mut prover, &brackets, &input_vars, &output_var).unwrap();
let proof = prover.prove().unwrap();
let mut verifier_transcript = Transcript::new(b"zk taxes");
let mut verifier = Verifier::new(
&bp_gens,
&pc_gens,
&mut verifier_transcript,
);
let input_vars = input_pts.iter()
.map(|pt| verifier.commit(*pt))
.collect::<Vec<_>>();
let output_var = verifier.commit(output_pt);
synthesize(&mut verifier, &brackets, &input_vars, &output_var).unwrap();
assert!(verifier.verify(&proof).is_ok());
println!("Success!");
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_to_bits_gadget() {
let pc_gens = PedersenGens::default();
let bp_gens = BulletproofGens::new(128, 1);
let mut rng = rand::thread_rng();
for _ in 0..100 {
let x = rng.gen::<u64>();
let mut prover_transcript = Transcript::new(b"test");
let mut prover = Prover::new(
&bp_gens,
&pc_gens,
&mut prover_transcript,
);
let (in_pt, in_var) = prover.commit(x.into(), Scalar::random(&mut rng));
let (out_pts, out_vars) = (0..64)
.map(|i| {
prover.commit(((x >> i) & 1).into(), Scalar::random(&mut rng))
})
.unzip::<_, _, Vec<_>, Vec<_>>();
let result = scalar_to_bits_le(&mut prover, 64, in_var.into()).unwrap();
for (wire1, wire2) in result.into_iter().zip(out_vars.into_iter()) {
prover.constrain(wire1 - wire2);
}
let proof = prover.prove().unwrap();
let mut verifier_transcript = Transcript::new(b"test");
let mut verifier = Verifier::new(
&bp_gens,
&pc_gens,
&mut verifier_transcript,
);
let in_var = verifier.commit(in_pt);
let out_vars = out_pts.into_iter()
.map(|out_pt| verifier.commit(out_pt))
.collect::<Vec<_>>();
let result = scalar_to_bits_le(&mut verifier, 64, in_var.into()).unwrap();
for (wire1, wire2) in result.into_iter().zip(out_vars.into_iter()) {
verifier.constrain(wire1 - wire2);
}
assert!(verifier.verify(&proof).is_ok());
}
}
#[test]
fn test_lt_gadget() {
let pc_gens = PedersenGens::default();
let bp_gens = BulletproofGens::new(512, 1);
let mut rng = rand::thread_rng();
for _ in 0..100 {
let x1 = rng.gen::<u64>();
let x2 = rng.gen::<u64>();
let expected_out = if x1 < x2 { 1u64 } else { 0u64 };
let mut prover_transcript = Transcript::new(b"test");
let mut prover = Prover::new(
&bp_gens,
&pc_gens,
&mut prover_transcript,
);
let (in1_pt, in1_var) = prover.commit(x1.into(), Scalar::random(&mut rng));
let (in2_pt, in2_var) = prover.commit(x2.into(), Scalar::random(&mut rng));
let (out_pt, out_var) = prover.commit(expected_out.into(), Scalar::random(&mut rng));
let result = lt_gate(
&mut prover,
64,
in1_var.into(),
in2_var.into()
).unwrap();
prover.constrain(result - out_var);
let proof = prover.prove().unwrap();
let mut verifier_transcript = Transcript::new(b"test");
let mut verifier = Verifier::new(
&bp_gens,
&pc_gens,
&mut verifier_transcript,
);
let in1_var = verifier.commit(in1_pt);
let in2_var = verifier.commit(in2_pt);
let out_var = verifier.commit(out_pt);
let result = lt_gate(
&mut verifier,
64,
in1_var.into(),
in2_var.into()
).unwrap();
verifier.constrain(result - out_var);
assert!(verifier.verify(&proof).is_ok());
}
}
}
| {
let lhs_bits = scalar_to_bits_le(cs, n_bits, lhs)?;
let rhs_bits = scalar_to_bits_le(cs, n_bits, rhs)?;
let zero = LinearCombination::default();
// Iterate through bits from most significant to least, comparing each pair.
let (lt, _) = lhs_bits.into_iter().zip(rhs_bits.into_iter())
.rev()
.fold((zero.clone(), zero.clone()), |(lt, gt), (l_bit, r_bit)| {
// lt and gt are boolean LinearCombinations that are 1 if lhs < rhs or lhs > rhs
// respectively after the first i most significant bits.
// Invariant: lt & gt will never both be 1, so (lt || gt) = (lt + gt).
// eq = !(lt || gt)
let eq = negate_bit(lt.clone() + gt.clone());
// Whether left bit i is < or > right bit i.
// bit_lt = !l_bit && r_bit = (1 - l_bit) * r_bit
// bit_gt = l_bit && !r_bit = l_bit * (1 - r_bit)
let (_, _, bit_lt) = cs.multiply(negate_bit(l_bit), r_bit.into());
let (_, _, bit_gt) = cs.multiply(l_bit.into(), negate_bit(r_bit));
// new_lt = lt + eq && bit_lt
// -> lt_diff = new_lt - lt = eq * bit_lt
// new_gt = gt + eq && bit_gt
// -> gt_diff = new_gt - gt = eq * bit_gt
let (_, _, lt_diff) = cs.multiply(eq.clone(), bit_lt.into());
let (_, _, gt_diff) = cs.multiply(eq.clone(), bit_gt.into());
(lt + lt_diff, gt + gt_diff)
});
Ok(lt)
} | identifier_body |
infer.py | # Copyright 2020 Tomas Hodan (hodantom@cmp.felk.cvut.cz).
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
"""A script for inference/visualization.
Example:
python infer.py --model=ycbv-bop20-xc65-f64
"""
import os
import os.path
import time
import numpy as np
import cv2
import tensorflow as tf
import pyprogressivex
import bop_renderer
from bop_toolkit_lib import dataset_params
from bop_toolkit_lib import inout
from bop_toolkit_lib import transform
from bop_toolkit_lib import visualization
from epos_lib import common
from epos_lib import config
from epos_lib import corresp
from epos_lib import datagen
from epos_lib import misc
from epos_lib import model
from epos_lib import vis
# Flags (other common flags are defined in epos_lib/common.py; the flag values
# can be defined on the command line or in params.yml in the model folder).
# ------------------------------------------------------------------------------
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
'master', '',
'BNS name of the tensorflow server')
flags.DEFINE_boolean(
'cpu_only', False,
'Whether to run the inference on CPU only.')
flags.DEFINE_string(
'task_type', common.LOCALIZATION, # LOCALIZATION, DETECTION
'Type of the 6D object pose estimation task.')
flags.DEFINE_list(
'infer_tfrecord_names', None,
'Names of tfrecord files (without suffix) used for inference.')
flags.DEFINE_integer(
'infer_max_height_before_crop', '480',
'Maximum image height before cropping (the image is downscaled if larger).')
flags.DEFINE_list(
'infer_crop_size', '640,480',
'Image size [height, width] during inference.')
flags.DEFINE_string(
'checkpoint_name', None,
'Name of the checkpoint to evaluate (e.g. "model.ckpt-1000000"). The latest '
'available checkpoint is used if None.')
flags.DEFINE_boolean(
'project_to_surface', False,
'Whether to project the predicted 3D locations to the object model.')
flags.DEFINE_boolean(
'save_estimates', True,
'Whether to save pose estimates in format expected by the BOP Challenge.')
flags.DEFINE_boolean(
'save_corresp', False,
'Whether to save established correspondences to text files.')
flags.DEFINE_string(
'infer_name', None,
'Name of the inference used in the filename of the saved estimates.')
# Pose fitting parameters.
flags.DEFINE_string(
'fitting_method', common.PROGRESSIVE_X, # PROGRESSIVE_X, OPENCV_RANSAC
'Pose fitting method.')
flags.DEFINE_float(
'inlier_thresh', 4.0,
'Tau_r in the CVPR 2020 paper. Inlier threshold [px] on the '
'reprojection error.')
flags.DEFINE_float(
'neighbour_max_dist', 20.0,
'Tau_d in the CVPR 2020 paper.')
flags.DEFINE_float(
'min_hypothesis_quality', 0.5,
'Tau_q in the CVPR 2020 paper')
flags.DEFINE_float(
'required_progx_confidence', 0.5,
'The required confidence used to calculate the number of Prog-X iterations.')
flags.DEFINE_float(
'required_ransac_confidence', 1.0,
'The required confidence used to calculate the number of RANSAC iterations.')
flags.DEFINE_float(
'min_triangle_area', 0.0,
'Tau_t in the CVPR 2020 paper.')
flags.DEFINE_boolean(
'use_prosac', False,
'Whether to use the PROSAC sampler.')
flags.DEFINE_integer(
'max_model_number_for_pearl', 5,
'Maximum number of instances to optimize by PEARL. PEARL is turned off if '
'there are more instances to find.')
flags.DEFINE_float(
'spatial_coherence_weight', 0.1,
'Weight of the spatial coherence in Graph-Cut RANSAC.')
flags.DEFINE_float(
'scaling_from_millimeters', 0.1,
'Scaling factor of 3D coordinates when constructing the neighborhood graph. '
'0.1 will convert mm to cm. See the CVPR 2020 paper for details.')
flags.DEFINE_float(
'max_tanimoto_similarity', 0.9,
'See the Progressive-X paper.')
flags.DEFINE_integer(
'max_correspondences', None,
'Maximum number of correspondences to use for fitting. Not applied if None.')
flags.DEFINE_integer(
'max_instances_to_fit', None,
'Maximum number of instances to fit. Not applied if None.')
flags.DEFINE_integer(
'max_fitting_iterations', 400,
'The maximum number of fitting iterations.')
# Visualization parameters.
flags.DEFINE_boolean(
'vis', False,
'Global switch for visualizations.')
flags.DEFINE_boolean(
'vis_gt_poses', True,
'Whether to visualize the GT poses.')
flags.DEFINE_boolean(
'vis_pred_poses', True,
'Whether to visualize the predicted poses.')
flags.DEFINE_boolean(
'vis_gt_obj_labels', True,
'Whether to visualize the GT object labels.')
flags.DEFINE_boolean(
'vis_pred_obj_labels', True,
'Whether to visualize the predicted object labels.')
flags.DEFINE_boolean(
'vis_pred_obj_confs', False,
'Whether to visualize the predicted object confidences.')
flags.DEFINE_boolean(
'vis_gt_frag_fields', False,
'Whether to visualize the GT fragment fields.')
flags.DEFINE_boolean(
'vis_pred_frag_fields', False,
'Whether to visualize the predicted fragment fields.')
# ------------------------------------------------------------------------------
def visualize(
samples, predictions, pred_poses, im_ind, crop_size, output_scale,
model_store, renderer, vis_dir):
"""Visualizes estimates from one image.
Args:
samples: Dictionary with input data.
predictions: Dictionary with predictions.
pred_poses: Predicted poses.
im_ind: Image index.
crop_size: Image crop size (width, height).
output_scale: Scale of the model output w.r.t. the input (output / input).
model_store: Store for 3D object models of class ObjectModelStore.
renderer: Renderer of class bop_renderer.Renderer().
vis_dir: Directory where the visualizations will be saved.
"""
tf.logging.info('Visualization for: {}'.format(
samples[common.IMAGE_PATH][0].decode('utf8')))
# Size of a visualization grid tile.
tile_size = (300, 225)
# Extension of the saved visualizations ('jpg', 'png', etc.).
vis_ext = 'jpg'
# Font settings.
font_size = 10
font_color = (0.8, 0.8, 0.8)
# Intrinsics.
K = samples[common.K][0]
output_K = K * output_scale
output_K[2, 2] = 1.0
# Tiles for the grid visualization.
tiles = []
# Size of the output fields.
output_size =\
int(output_scale * crop_size[0]), int(output_scale * crop_size[1])
# Prefix of the visualization names.
vis_prefix = '{:06d}'.format(im_ind)
# Input RGB image.
rgb = np.squeeze(samples[common.IMAGE][0])
vis_rgb = visualization.write_text_on_image(
misc.resize_image_py(rgb, tile_size).astype(np.uint8),
[{'name': '', 'val': 'input', 'fmt': ':s'}],
size=font_size, color=font_color)
tiles.append(vis_rgb)
# Visualize the ground-truth poses.
if FLAGS.vis_gt_poses:
gt_poses = []
for gt_id, obj_id in enumerate(samples[common.GT_OBJ_IDS][0]):
q = samples[common.GT_OBJ_QUATS][0][gt_id]
R = transform.quaternion_matrix(q)[:3, :3]
t = samples[common.GT_OBJ_TRANS][0][gt_id].reshape((3, 1))
gt_poses.append({'obj_id': obj_id, 'R': R, 't': t})
vis_gt_poses = vis.visualize_object_poses(rgb, K, gt_poses, renderer)
vis_gt_poses = visualization.write_text_on_image(
misc.resize_image_py(vis_gt_poses, tile_size),
[{'name': '', 'val': 'gt poses', 'fmt': ':s'}],
size=font_size, color=font_color)
tiles.append(vis_gt_poses)
# Visualize the estimated poses.
if FLAGS.vis_pred_poses:
vis_pred_poses = vis.visualize_object_poses(rgb, K, pred_poses, renderer)
vis_pred_poses = visualization.write_text_on_image(
misc.resize_image_py(vis_pred_poses, tile_size),
[{'name': '', 'val': 'pred poses', 'fmt': ':s'}],
size=font_size, color=font_color)
tiles.append(vis_pred_poses)
# Ground-truth object labels.
if FLAGS.vis_gt_obj_labels and common.GT_OBJ_LABEL in samples:
obj_labels = np.squeeze(samples[common.GT_OBJ_LABEL][0])
obj_labels = obj_labels[:crop_size[1], :crop_size[0]]
obj_labels = vis.colorize_label_map(obj_labels)
obj_labels = visualization.write_text_on_image(
misc.resize_image_py(obj_labels.astype(np.uint8), tile_size),
[{'name': '', 'val': 'gt obj labels', 'fmt': ':s'}],
size=font_size, color=font_color)
tiles.append(obj_labels)
# Predicted object labels.
if FLAGS.vis_pred_obj_labels:
obj_labels = np.squeeze(predictions[common.PRED_OBJ_LABEL][0])
obj_labels = obj_labels[:crop_size[1], :crop_size[0]]
obj_labels = vis.colorize_label_map(obj_labels)
obj_labels = visualization.write_text_on_image(
misc.resize_image_py(obj_labels.astype(np.uint8), tile_size),
[{'name': '', 'val': 'predicted obj labels', 'fmt': ':s'}],
size=font_size, color=font_color)
tiles.append(obj_labels)
# Predicted object confidences.
if FLAGS.vis_pred_obj_confs:
num_obj_labels = predictions[common.PRED_OBJ_CONF].shape[-1]
for obj_label in range(num_obj_labels):
obj_confs = misc.resize_image_py(np.array(
predictions[common.PRED_OBJ_CONF][0, :, :, obj_label]), tile_size)
obj_confs = (255.0 * obj_confs).astype(np.uint8)
obj_confs = np.dstack([obj_confs, obj_confs, obj_confs]) # To RGB.
obj_confs = visualization.write_text_on_image(
obj_confs, [{'name': 'cls', 'val': obj_label, 'fmt': ':d'}],
size=font_size, color=font_color)
tiles.append(obj_confs)
# Visualization of ground-truth fragment fields.
if FLAGS.vis_gt_frag_fields and common.GT_OBJ_IDS in samples:
vis.visualize_gt_frag(
gt_obj_ids=samples[common.GT_OBJ_IDS][0],
gt_obj_masks=samples[common.GT_OBJ_MASKS][0],
gt_frag_labels=samples[common.GT_FRAG_LABEL][0],
gt_frag_weights=samples[common.GT_FRAG_WEIGHT][0],
gt_frag_coords=samples[common.GT_FRAG_LOC][0],
output_size=output_size,
model_store=model_store,
vis_prefix=vis_prefix,
vis_dir=vis_dir)
# Visualization of predicted fragment fields.
if FLAGS.vis_pred_frag_fields:
vis.visualize_pred_frag(
frag_confs=predictions[common.PRED_FRAG_CONF][0],
frag_coords=predictions[common.PRED_FRAG_LOC][0],
output_size=output_size,
model_store=model_store,
vis_prefix=vis_prefix,
vis_dir=vis_dir,
vis_ext=vis_ext)
# Build and save a visualization grid.
grid = vis.build_grid(tiles, tile_size)
grid_vis_path = os.path.join(
vis_dir, '{}_grid.{}'.format(vis_prefix, vis_ext))
inout.save_im(grid_vis_path, grid)
def save_correspondences(
scene_id, im_id, im_ind, obj_id, image_path, K, obj_pred, pred_time,
infer_name, obj_gt_poses, infer_dir):
# Add meta information.
txt = '# Corr format: u v x y z px_id frag_id conf conf_obj conf_frag\n'
txt += '{}\n'.format(image_path)
txt += '{} {} {} {}\n'.format(scene_id, im_id, obj_id, pred_time)
# Add intrinsics.
for i in range(3):
txt += '{} {} {}\n'.format(K[i, 0], K[i, 1], K[i, 2])
# Add ground-truth poses.
txt += '{}\n'.format(len(obj_gt_poses))
for pose in obj_gt_poses:
for i in range(3):
txt += '{} {} {} {}\n'.format(
pose['R'][i, 0], pose['R'][i, 1], pose['R'][i, 2], pose['t'][i, 0])
# Sort the predicted correspondences by confidence.
sort_inds = np.argsort(obj_pred['conf'])[::-1]
px_id = obj_pred['px_id'][sort_inds]
frag_id = obj_pred['frag_id'][sort_inds]
coord_2d = obj_pred['coord_2d'][sort_inds]
coord_3d = obj_pred['coord_3d'][sort_inds]
conf = obj_pred['conf'][sort_inds]
conf_obj = obj_pred['conf_obj'][sort_inds]
conf_frag = obj_pred['conf_frag'][sort_inds]
# Add the predicted correspondences.
pred_corr_num = len(coord_2d)
txt += '{}\n'.format(pred_corr_num)
for i in range(pred_corr_num):
txt += '{} {} {} {} {} {} {} {} {} {}\n'.format(
coord_2d[i, 0], coord_2d[i, 1],
coord_3d[i, 0], coord_3d[i, 1], coord_3d[i, 2],
px_id[i], frag_id[i], conf[i], conf_obj[i], conf_frag[i])
# Save the correspondences into a file.
corr_suffix = infer_name
if corr_suffix is None:
corr_suffix = ''
else:
corr_suffix = '_' + corr_suffix
corr_path = os.path.join(
infer_dir, 'corr{}'.format(corr_suffix),
'{:06d}_corr_{:02d}.txt'.format(im_ind, obj_id))
tf.gfile.MakeDirs(os.path.dirname(corr_path))
with open(corr_path, 'w') as f:
f.write(txt)
def | (
sess, samples, predictions, im_ind, crop_size, output_scale, model_store,
renderer, task_type, infer_name, infer_dir, vis_dir):
"""Estimates object poses from one image.
Args:
sess: TensorFlow session.
samples: Dictionary with input data.
predictions: Dictionary with predictions.
im_ind: Index of the current image.
crop_size: Image crop size (width, height).
output_scale: Scale of the model output w.r.t. the input (output / input).
model_store: Store for 3D object models of class ObjectModelStore.
renderer: Renderer of class bop_renderer.Renderer().
task_type: 6D object pose estimation task (common.LOCALIZATION or
common.DETECTION).
infer_name: Name of the current inference.
infer_dir: Folder for inference results.
vis_dir: Folder for visualizations.
"""
# Dictionary for run times.
run_times = {}
# Prediction.
time_start = time.time()
(samples, predictions) = sess.run([samples, predictions])
run_times['prediction'] = time.time() - time_start
# Scene and image ID's.
scene_id = samples[common.SCENE_ID][0]
im_id = samples[common.IM_ID][0]
# Intrinsic parameters.
K = samples[common.K][0]
if task_type == common.LOCALIZATION:
gt_poses = []
gt_obj_ids = samples[common.GT_OBJ_IDS][0]
for gt_id in range(len(gt_obj_ids)):
R = transform.quaternion_matrix(
samples[common.GT_OBJ_QUATS][0][gt_id])[:3, :3]
t = samples[common.GT_OBJ_TRANS][0][gt_id].reshape((3, 1))
gt_poses.append({'obj_id': gt_obj_ids[gt_id], 'R': R, 't': t})
else:
gt_poses = None
# Establish many-to-many 2D-3D correspondences.
time_start = time.time()
corr = corresp.establish_many_to_many(
obj_confs=predictions[common.PRED_OBJ_CONF][0],
frag_confs=predictions[common.PRED_FRAG_CONF][0],
frag_coords=predictions[common.PRED_FRAG_LOC][0],
gt_obj_ids=[x['obj_id'] for x in gt_poses],
model_store=model_store,
output_scale=output_scale,
min_obj_conf=FLAGS.corr_min_obj_conf,
min_frag_rel_conf=FLAGS.corr_min_frag_rel_conf,
project_to_surface=FLAGS.project_to_surface,
only_annotated_objs=(task_type == common.LOCALIZATION))
run_times['establish_corr'] = time.time() - time_start
# PnP-RANSAC to estimate 6D object poses from the correspondences.
time_start = time.time()
poses = []
for obj_id, obj_corr in corr.items():
# tf.logging.info(
# 'Image path: {}, obj: {}'.format(samples[common.IMAGE_PATH][0], obj_id))
# Number of established correspondences.
num_corrs = obj_corr['coord_2d'].shape[0]
# Skip the fitting if there are too few correspondences.
min_required_corrs = 6
if num_corrs < min_required_corrs:
continue
# The correspondences need to be sorted for PROSAC.
if FLAGS.use_prosac:
sorted_inds = np.argsort(obj_corr['conf'])[::-1]
for key in obj_corr.keys():
obj_corr[key] = obj_corr[key][sorted_inds]
# Select correspondences with the highest confidence.
if FLAGS.max_correspondences is not None \
and num_corrs > FLAGS.max_correspondences:
# Sort the correspondences only if they have not been sorted for PROSAC.
if FLAGS.use_prosac:
keep_inds = np.arange(num_corrs)
else:
keep_inds = np.argsort(obj_corr['conf'])[::-1]
keep_inds = keep_inds[:FLAGS.max_correspondences]
for key in obj_corr.keys():
obj_corr[key] = obj_corr[key][keep_inds]
# Save the established correspondences (for analysis).
if FLAGS.save_corresp:
obj_gt_poses = []
if gt_poses is not None:
obj_gt_poses = [x for x in gt_poses if x['obj_id'] == obj_id]
pred_time = float(np.sum(list(run_times.values())))
image_path = samples[common.IMAGE_PATH][0].decode('utf-8')
save_correspondences(
scene_id, im_id, im_ind, obj_id, image_path, K, obj_corr, pred_time,
infer_name, obj_gt_poses, infer_dir)
# Make sure the coordinates are saved continuously in memory.
coord_2d = np.ascontiguousarray(obj_corr['coord_2d'].astype(np.float64))
coord_3d = np.ascontiguousarray(obj_corr['coord_3d'].astype(np.float64))
if FLAGS.fitting_method == common.PROGRESSIVE_X:
# If num_instances == 1, then only GC-RANSAC is applied. If > 1, then
# Progressive-X is applied and up to num_instances poses are returned.
# If num_instances == -1, then Progressive-X is applied and all found
# poses are returned.
if task_type == common.LOCALIZATION:
num_instances = len([x for x in gt_poses if x['obj_id'] == obj_id])
else:
num_instances = -1
if FLAGS.max_instances_to_fit is not None:
num_instances = min(num_instances, FLAGS.max_instances_to_fit)
pose_ests, inlier_indices, pose_qualities = pyprogressivex.find6DPoses(
x1y1=coord_2d,
x2y2z2=coord_3d,
K=K,
threshold=FLAGS.inlier_thresh,
neighborhood_ball_radius=FLAGS.neighbour_max_dist,
spatial_coherence_weight=FLAGS.spatial_coherence_weight,
scaling_from_millimeters=FLAGS.scaling_from_millimeters,
max_tanimoto_similarity=FLAGS.max_tanimoto_similarity,
max_iters=FLAGS.max_fitting_iterations,
conf=FLAGS.required_progx_confidence,
proposal_engine_conf=FLAGS.required_ransac_confidence,
min_coverage=FLAGS.min_hypothesis_quality,
min_triangle_area=FLAGS.min_triangle_area,
min_point_number=6,
max_model_number=num_instances,
max_model_number_for_optimization=FLAGS.max_model_number_for_pearl,
use_prosac=FLAGS.use_prosac,
log=False)
pose_est_success = pose_ests is not None
if pose_est_success:
for i in range(int(pose_ests.shape[0] / 3)):
j = i * 3
R_est = pose_ests[j:(j + 3), :3]
t_est = pose_ests[j:(j + 3), 3].reshape((3, 1))
poses.append({
'scene_id': scene_id,
'im_id': im_id,
'obj_id': obj_id,
'R': R_est,
't': t_est,
'score': pose_qualities[i],
})
elif FLAGS.fitting_method == common.OPENCV_RANSAC:
# This integration of OpenCV-RANSAC can estimate pose of only one object
# instance. Note that in Table 3 of the EPOS CVPR'20 paper, the scores
# for OpenCV-RANSAC were obtained with integrating cv2.solvePnPRansac
# in the Progressive-X scheme (as the other methods in that table).
pose_est_success, r_est, t_est, inliers = cv2.solvePnPRansac(
objectPoints=coord_3d,
imagePoints=coord_2d,
cameraMatrix=K,
distCoeffs=None,
iterationsCount=FLAGS.max_fitting_iterations,
reprojectionError=FLAGS.inlier_thresh,
confidence=0.99, # FLAGS.required_ransac_confidence
flags=cv2.SOLVEPNP_EPNP)
if pose_est_success:
poses.append({
'scene_id': scene_id,
'im_id': im_id,
'obj_id': obj_id,
'R': cv2.Rodrigues(r_est)[0],
't': t_est,
'score': 0.0, # TODO: Define the score.
})
else:
raise ValueError(
'Unknown pose fitting method ({}).'.format(FLAGS.fitting_method))
run_times['fitting'] = time.time() - time_start
run_times['total'] = np.sum(list(run_times.values()))
# Add the total time to each pose.
for pose in poses:
pose['time'] = run_times['total']
# Visualization.
if FLAGS.vis:
visualize(
samples=samples,
predictions=predictions,
pred_poses=poses,
im_ind=im_ind,
crop_size=crop_size,
output_scale=output_scale,
model_store=model_store,
renderer=renderer,
vis_dir=vis_dir)
return poses, run_times
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
# Model folder.
model_dir = os.path.join(config.TF_MODELS_PATH, FLAGS.model)
# Update flags with parameters loaded from the model folder.
common.update_flags(os.path.join(model_dir, common.PARAMS_FILENAME))
# Print the flag values.
common.print_flags()
# Folder from which the latest model checkpoint will be loaded.
checkpoint_dir = os.path.join(model_dir, 'train')
# Folder for the inference output.
infer_dir = os.path.join(model_dir, 'infer')
tf.gfile.MakeDirs(infer_dir)
# Folder for the visualization output.
vis_dir = os.path.join(model_dir, 'vis')
tf.gfile.MakeDirs(vis_dir)
# TFRecord files used for training.
tfrecord_names = FLAGS.infer_tfrecord_names
if not isinstance(FLAGS.infer_tfrecord_names, list):
tfrecord_names = [FLAGS.infer_tfrecord_names]
# Stride of the final output.
if FLAGS.upsample_logits:
# The stride is 1 if the logits are upsampled to the input resolution.
output_stride = 1
else:
assert (len(FLAGS.decoder_output_stride) == 1)
output_stride = FLAGS.decoder_output_stride[0]
with tf.Graph().as_default():
return_gt_orig = np.any([
FLAGS.task_type == common.LOCALIZATION,
FLAGS.vis_gt_poses])
return_gt_maps = np.any([
FLAGS.vis_pred_obj_labels,
FLAGS.vis_pred_obj_confs,
FLAGS.vis_pred_frag_fields])
# Dataset provider.
dataset = datagen.Dataset(
dataset_name=FLAGS.dataset,
tfrecord_names=tfrecord_names,
model_dir=model_dir,
model_variant=FLAGS.model_variant,
batch_size=1,
max_height_before_crop=FLAGS.infer_max_height_before_crop,
crop_size=list(map(int, FLAGS.infer_crop_size)),
num_frags=FLAGS.num_frags,
min_visib_fract=None,
gt_knn_frags=1,
output_stride=output_stride,
is_training=False,
return_gt_orig=return_gt_orig,
return_gt_maps=return_gt_maps,
should_shuffle=False,
should_repeat=False,
prepare_for_projection=FLAGS.project_to_surface,
data_augmentations=None)
# Initialize a renderer for visualization.
renderer = None
if FLAGS.vis_gt_poses or FLAGS.vis_pred_poses:
tf.logging.info('Initializing renderer for visualization...')
renderer = bop_renderer.Renderer()
renderer.init(dataset.crop_size[0], dataset.crop_size[1])
model_type_vis = 'eval'
dp_model = dataset_params.get_model_params(
config.BOP_PATH, dataset.dataset_name, model_type=model_type_vis)
for obj_id in dp_model['obj_ids']:
path = dp_model['model_tpath'].format(obj_id=obj_id)
renderer.add_object(obj_id, path)
tf.logging.info('Renderer initialized.')
# Inputs.
samples = dataset.get_one_shot_iterator().get_next()
# A map from output type to the number of associated channels.
outputs_to_num_channels = common.get_outputs_to_num_channels(
dataset.num_objs, dataset.model_store.num_frags)
# Options of the neural network model.
model_options = common.ModelOptions(
outputs_to_num_channels=outputs_to_num_channels,
crop_size=list(map(int, FLAGS.infer_crop_size)),
atrous_rates=FLAGS.atrous_rates,
encoder_output_stride=FLAGS.encoder_output_stride)
# Construct the inference graph.
predictions = model.predict(
images=samples[common.IMAGE],
model_options=model_options,
upsample_logits=FLAGS.upsample_logits,
image_pyramid=FLAGS.image_pyramid,
num_objs=dataset.num_objs,
num_frags=dataset.num_frags,
frag_cls_agnostic=FLAGS.frag_cls_agnostic,
frag_loc_agnostic=FLAGS.frag_loc_agnostic)
# Global step.
tf.train.get_or_create_global_step()
# Get path to the model checkpoint.
if FLAGS.checkpoint_name is None:
checkpoint_path = tf.train.latest_checkpoint(checkpoint_dir)
else:
checkpoint_path = os.path.join(checkpoint_dir, FLAGS.checkpoint_name)
time_str = time.strftime('%Y-%m-%d-%H:%M:%S', time.gmtime())
tf.logging.info('Starting inference at: {}'.format(time_str))
tf.logging.info('Inference with model: {}'.format(checkpoint_path))
# Scaffold for initialization.
scaffold = tf.train.Scaffold(
init_op=tf.global_variables_initializer(),
saver=tf.train.Saver(var_list=misc.get_variable_dict()))
# TensorFlow configuration.
if FLAGS.cpu_only:
tf_config = tf.ConfigProto(device_count={'GPU': 0})
else:
tf_config = tf.ConfigProto()
# tf_config.gpu_options.allow_growth = True # Only necessary GPU memory.
tf_config.gpu_options.allow_growth = False
# Nodes that can use multiple threads to parallelize their execution will
# schedule the individual pieces into this pool.
tf_config.intra_op_parallelism_threads = 10
# All ready nodes are scheduled in this pool.
tf_config.inter_op_parallelism_threads = 10
poses_all = []
first_im_poses_num = 0
session_creator = tf.train.ChiefSessionCreator(
config=tf_config,
scaffold=scaffold,
master=FLAGS.master,
checkpoint_filename_with_path=checkpoint_path)
with tf.train.MonitoredSession(
session_creator=session_creator, hooks=None) as sess:
im_ind = 0
while not sess.should_stop():
# Estimate object poses for the current image.
poses, run_times = process_image(
sess=sess,
samples=samples,
predictions=predictions,
im_ind=im_ind,
crop_size=dataset.crop_size,
output_scale=(1.0 / output_stride),
model_store=dataset.model_store,
renderer=renderer,
task_type=FLAGS.task_type,
infer_name=FLAGS.infer_name,
infer_dir=infer_dir,
vis_dir=vis_dir)
# Note that the first image takes longer time (because of TF init).
tf.logging.info(
'Image: {}, prediction: {:.3f}, establish_corr: {:.3f}, '
'fitting: {:.3f}, total time: {:.3f}'.format(
im_ind, run_times['prediction'], run_times['establish_corr'],
run_times['fitting'], run_times['total']))
poses_all += poses
if im_ind == 0:
first_im_poses_num = len(poses)
im_ind += 1
# Set the time of pose estimates from the first image to the average time.
# Tensorflow takes a long time on the first image (because of init).
time_avg = 0.0
for pose in poses_all:
time_avg += pose['time']
if len(poses_all) > 0:
time_avg /= float((len(poses_all)))
for i in range(first_im_poses_num):
poses_all[i]['time'] = time_avg
# Save the estimated poses in the BOP format:
# https://bop.felk.cvut.cz/challenges/bop-challenge-2020/#formatofresults
if FLAGS.save_estimates:
suffix = ''
if FLAGS.infer_name is not None:
suffix = '_{}'.format(FLAGS.infer_name)
poses_path = os.path.join(
infer_dir, 'estimated-poses{}.csv'.format(suffix))
tf.logging.info('Saving estimated poses to: {}'.format(poses_path))
inout.save_bop_results(poses_path, poses_all, version='bop19')
time_str = time.strftime('%Y-%m-%d-%H:%M:%S', time.gmtime())
tf.logging.info('Finished inference at: {}'.format(time_str))
if __name__ == '__main__':
tf.app.run()
| process_image | identifier_name |
infer.py | # Copyright 2020 Tomas Hodan (hodantom@cmp.felk.cvut.cz).
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
"""A script for inference/visualization.
Example:
python infer.py --model=ycbv-bop20-xc65-f64
"""
import os
import os.path
import time
import numpy as np
import cv2
import tensorflow as tf
import pyprogressivex
import bop_renderer
from bop_toolkit_lib import dataset_params
from bop_toolkit_lib import inout
from bop_toolkit_lib import transform
from bop_toolkit_lib import visualization
from epos_lib import common
from epos_lib import config
from epos_lib import corresp
from epos_lib import datagen
from epos_lib import misc
from epos_lib import model
from epos_lib import vis
# Flags (other common flags are defined in epos_lib/common.py; the flag values
# can be defined on the command line or in params.yml in the model folder).
# ------------------------------------------------------------------------------
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
'master', '',
'BNS name of the tensorflow server')
flags.DEFINE_boolean(
'cpu_only', False,
'Whether to run the inference on CPU only.')
flags.DEFINE_string(
'task_type', common.LOCALIZATION, # LOCALIZATION, DETECTION
'Type of the 6D object pose estimation task.')
flags.DEFINE_list(
'infer_tfrecord_names', None,
'Names of tfrecord files (without suffix) used for inference.')
flags.DEFINE_integer(
'infer_max_height_before_crop', '480',
'Maximum image height before cropping (the image is downscaled if larger).')
flags.DEFINE_list(
'infer_crop_size', '640,480',
'Image size [height, width] during inference.')
flags.DEFINE_string(
'checkpoint_name', None,
'Name of the checkpoint to evaluate (e.g. "model.ckpt-1000000"). The latest '
'available checkpoint is used if None.')
flags.DEFINE_boolean(
'project_to_surface', False,
'Whether to project the predicted 3D locations to the object model.')
flags.DEFINE_boolean(
'save_estimates', True,
'Whether to save pose estimates in format expected by the BOP Challenge.')
flags.DEFINE_boolean(
'save_corresp', False,
'Whether to save established correspondences to text files.')
flags.DEFINE_string(
'infer_name', None,
'Name of the inference used in the filename of the saved estimates.')
# Pose fitting parameters.
flags.DEFINE_string(
'fitting_method', common.PROGRESSIVE_X, # PROGRESSIVE_X, OPENCV_RANSAC
'Pose fitting method.')
flags.DEFINE_float(
'inlier_thresh', 4.0,
'Tau_r in the CVPR 2020 paper. Inlier threshold [px] on the '
'reprojection error.')
flags.DEFINE_float(
'neighbour_max_dist', 20.0,
'Tau_d in the CVPR 2020 paper.')
flags.DEFINE_float(
'min_hypothesis_quality', 0.5,
'Tau_q in the CVPR 2020 paper')
flags.DEFINE_float(
'required_progx_confidence', 0.5,
'The required confidence used to calculate the number of Prog-X iterations.')
flags.DEFINE_float(
'required_ransac_confidence', 1.0,
'The required confidence used to calculate the number of RANSAC iterations.')
flags.DEFINE_float(
'min_triangle_area', 0.0,
'Tau_t in the CVPR 2020 paper.')
flags.DEFINE_boolean(
'use_prosac', False,
'Whether to use the PROSAC sampler.')
flags.DEFINE_integer(
'max_model_number_for_pearl', 5,
'Maximum number of instances to optimize by PEARL. PEARL is turned off if '
'there are more instances to find.')
flags.DEFINE_float(
'spatial_coherence_weight', 0.1,
'Weight of the spatial coherence in Graph-Cut RANSAC.')
flags.DEFINE_float(
'scaling_from_millimeters', 0.1,
'Scaling factor of 3D coordinates when constructing the neighborhood graph. '
'0.1 will convert mm to cm. See the CVPR 2020 paper for details.')
flags.DEFINE_float(
'max_tanimoto_similarity', 0.9,
'See the Progressive-X paper.')
flags.DEFINE_integer(
'max_correspondences', None,
'Maximum number of correspondences to use for fitting. Not applied if None.')
flags.DEFINE_integer(
'max_instances_to_fit', None,
'Maximum number of instances to fit. Not applied if None.')
flags.DEFINE_integer(
'max_fitting_iterations', 400,
'The maximum number of fitting iterations.')
# Visualization parameters.
flags.DEFINE_boolean(
'vis', False,
'Global switch for visualizations.')
flags.DEFINE_boolean(
'vis_gt_poses', True,
'Whether to visualize the GT poses.')
flags.DEFINE_boolean(
'vis_pred_poses', True,
'Whether to visualize the predicted poses.')
flags.DEFINE_boolean(
'vis_gt_obj_labels', True,
'Whether to visualize the GT object labels.')
flags.DEFINE_boolean(
'vis_pred_obj_labels', True,
'Whether to visualize the predicted object labels.')
flags.DEFINE_boolean(
'vis_pred_obj_confs', False,
'Whether to visualize the predicted object confidences.')
flags.DEFINE_boolean(
'vis_gt_frag_fields', False,
'Whether to visualize the GT fragment fields.')
flags.DEFINE_boolean(
'vis_pred_frag_fields', False,
'Whether to visualize the predicted fragment fields.')
# ------------------------------------------------------------------------------
def visualize(
samples, predictions, pred_poses, im_ind, crop_size, output_scale,
model_store, renderer, vis_dir):
"""Visualizes estimates from one image.
Args:
samples: Dictionary with input data.
predictions: Dictionary with predictions.
pred_poses: Predicted poses.
im_ind: Image index.
crop_size: Image crop size (width, height).
output_scale: Scale of the model output w.r.t. the input (output / input).
model_store: Store for 3D object models of class ObjectModelStore.
renderer: Renderer of class bop_renderer.Renderer().
vis_dir: Directory where the visualizations will be saved.
"""
tf.logging.info('Visualization for: {}'.format(
samples[common.IMAGE_PATH][0].decode('utf8')))
# Size of a visualization grid tile.
tile_size = (300, 225)
# Extension of the saved visualizations ('jpg', 'png', etc.).
vis_ext = 'jpg'
# Font settings.
font_size = 10
font_color = (0.8, 0.8, 0.8)
# Intrinsics.
K = samples[common.K][0]
output_K = K * output_scale
output_K[2, 2] = 1.0
# Tiles for the grid visualization.
tiles = []
# Size of the output fields.
output_size =\
int(output_scale * crop_size[0]), int(output_scale * crop_size[1])
# Prefix of the visualization names.
vis_prefix = '{:06d}'.format(im_ind)
# Input RGB image.
rgb = np.squeeze(samples[common.IMAGE][0])
vis_rgb = visualization.write_text_on_image(
misc.resize_image_py(rgb, tile_size).astype(np.uint8),
[{'name': '', 'val': 'input', 'fmt': ':s'}],
size=font_size, color=font_color)
tiles.append(vis_rgb)
# Visualize the ground-truth poses.
if FLAGS.vis_gt_poses:
gt_poses = []
for gt_id, obj_id in enumerate(samples[common.GT_OBJ_IDS][0]):
q = samples[common.GT_OBJ_QUATS][0][gt_id]
R = transform.quaternion_matrix(q)[:3, :3]
t = samples[common.GT_OBJ_TRANS][0][gt_id].reshape((3, 1))
gt_poses.append({'obj_id': obj_id, 'R': R, 't': t})
vis_gt_poses = vis.visualize_object_poses(rgb, K, gt_poses, renderer)
vis_gt_poses = visualization.write_text_on_image(
misc.resize_image_py(vis_gt_poses, tile_size),
[{'name': '', 'val': 'gt poses', 'fmt': ':s'}],
size=font_size, color=font_color)
tiles.append(vis_gt_poses)
# Visualize the estimated poses.
if FLAGS.vis_pred_poses:
vis_pred_poses = vis.visualize_object_poses(rgb, K, pred_poses, renderer)
vis_pred_poses = visualization.write_text_on_image(
misc.resize_image_py(vis_pred_poses, tile_size),
[{'name': '', 'val': 'pred poses', 'fmt': ':s'}],
size=font_size, color=font_color)
tiles.append(vis_pred_poses)
# Ground-truth object labels.
if FLAGS.vis_gt_obj_labels and common.GT_OBJ_LABEL in samples:
obj_labels = np.squeeze(samples[common.GT_OBJ_LABEL][0])
obj_labels = obj_labels[:crop_size[1], :crop_size[0]]
obj_labels = vis.colorize_label_map(obj_labels)
obj_labels = visualization.write_text_on_image(
misc.resize_image_py(obj_labels.astype(np.uint8), tile_size),
[{'name': '', 'val': 'gt obj labels', 'fmt': ':s'}],
size=font_size, color=font_color)
tiles.append(obj_labels)
# Predicted object labels.
if FLAGS.vis_pred_obj_labels:
obj_labels = np.squeeze(predictions[common.PRED_OBJ_LABEL][0])
obj_labels = obj_labels[:crop_size[1], :crop_size[0]]
obj_labels = vis.colorize_label_map(obj_labels)
obj_labels = visualization.write_text_on_image(
misc.resize_image_py(obj_labels.astype(np.uint8), tile_size),
[{'name': '', 'val': 'predicted obj labels', 'fmt': ':s'}],
size=font_size, color=font_color)
tiles.append(obj_labels)
# Predicted object confidences.
if FLAGS.vis_pred_obj_confs:
num_obj_labels = predictions[common.PRED_OBJ_CONF].shape[-1]
for obj_label in range(num_obj_labels):
obj_confs = misc.resize_image_py(np.array(
predictions[common.PRED_OBJ_CONF][0, :, :, obj_label]), tile_size)
obj_confs = (255.0 * obj_confs).astype(np.uint8)
obj_confs = np.dstack([obj_confs, obj_confs, obj_confs]) # To RGB.
obj_confs = visualization.write_text_on_image(
obj_confs, [{'name': 'cls', 'val': obj_label, 'fmt': ':d'}],
size=font_size, color=font_color)
tiles.append(obj_confs)
# Visualization of ground-truth fragment fields.
if FLAGS.vis_gt_frag_fields and common.GT_OBJ_IDS in samples:
vis.visualize_gt_frag(
gt_obj_ids=samples[common.GT_OBJ_IDS][0],
gt_obj_masks=samples[common.GT_OBJ_MASKS][0],
gt_frag_labels=samples[common.GT_FRAG_LABEL][0],
gt_frag_weights=samples[common.GT_FRAG_WEIGHT][0],
gt_frag_coords=samples[common.GT_FRAG_LOC][0],
output_size=output_size,
model_store=model_store,
vis_prefix=vis_prefix,
vis_dir=vis_dir)
# Visualization of predicted fragment fields.
if FLAGS.vis_pred_frag_fields:
vis.visualize_pred_frag(
frag_confs=predictions[common.PRED_FRAG_CONF][0],
frag_coords=predictions[common.PRED_FRAG_LOC][0],
output_size=output_size,
model_store=model_store,
vis_prefix=vis_prefix,
vis_dir=vis_dir,
vis_ext=vis_ext)
# Build and save a visualization grid.
grid = vis.build_grid(tiles, tile_size)
grid_vis_path = os.path.join(
vis_dir, '{}_grid.{}'.format(vis_prefix, vis_ext))
inout.save_im(grid_vis_path, grid)
def save_correspondences(
scene_id, im_id, im_ind, obj_id, image_path, K, obj_pred, pred_time,
infer_name, obj_gt_poses, infer_dir):
# Add meta information.
txt = '# Corr format: u v x y z px_id frag_id conf conf_obj conf_frag\n'
txt += '{}\n'.format(image_path)
txt += '{} {} {} {}\n'.format(scene_id, im_id, obj_id, pred_time)
# Add intrinsics.
for i in range(3):
txt += '{} {} {}\n'.format(K[i, 0], K[i, 1], K[i, 2])
# Add ground-truth poses.
txt += '{}\n'.format(len(obj_gt_poses))
for pose in obj_gt_poses:
for i in range(3):
txt += '{} {} {} {}\n'.format(
pose['R'][i, 0], pose['R'][i, 1], pose['R'][i, 2], pose['t'][i, 0])
# Sort the predicted correspondences by confidence.
sort_inds = np.argsort(obj_pred['conf'])[::-1]
px_id = obj_pred['px_id'][sort_inds]
frag_id = obj_pred['frag_id'][sort_inds]
coord_2d = obj_pred['coord_2d'][sort_inds]
coord_3d = obj_pred['coord_3d'][sort_inds]
conf = obj_pred['conf'][sort_inds]
conf_obj = obj_pred['conf_obj'][sort_inds]
conf_frag = obj_pred['conf_frag'][sort_inds]
# Add the predicted correspondences.
pred_corr_num = len(coord_2d)
txt += '{}\n'.format(pred_corr_num)
for i in range(pred_corr_num):
txt += '{} {} {} {} {} {} {} {} {} {}\n'.format(
coord_2d[i, 0], coord_2d[i, 1],
coord_3d[i, 0], coord_3d[i, 1], coord_3d[i, 2],
px_id[i], frag_id[i], conf[i], conf_obj[i], conf_frag[i])
# Save the correspondences into a file.
corr_suffix = infer_name
if corr_suffix is None:
corr_suffix = ''
else:
corr_suffix = '_' + corr_suffix
corr_path = os.path.join(
infer_dir, 'corr{}'.format(corr_suffix),
'{:06d}_corr_{:02d}.txt'.format(im_ind, obj_id))
tf.gfile.MakeDirs(os.path.dirname(corr_path))
with open(corr_path, 'w') as f:
f.write(txt)
def process_image(
sess, samples, predictions, im_ind, crop_size, output_scale, model_store,
renderer, task_type, infer_name, infer_dir, vis_dir):
"""Estimates object poses from one image.
Args:
sess: TensorFlow session.
samples: Dictionary with input data.
predictions: Dictionary with predictions.
im_ind: Index of the current image.
crop_size: Image crop size (width, height).
output_scale: Scale of the model output w.r.t. the input (output / input).
model_store: Store for 3D object models of class ObjectModelStore.
renderer: Renderer of class bop_renderer.Renderer().
task_type: 6D object pose estimation task (common.LOCALIZATION or
common.DETECTION).
infer_name: Name of the current inference.
infer_dir: Folder for inference results.
vis_dir: Folder for visualizations.
"""
# Dictionary for run times.
run_times = {}
# Prediction.
time_start = time.time()
(samples, predictions) = sess.run([samples, predictions])
run_times['prediction'] = time.time() - time_start
# Scene and image ID's.
scene_id = samples[common.SCENE_ID][0]
im_id = samples[common.IM_ID][0]
# Intrinsic parameters.
K = samples[common.K][0]
if task_type == common.LOCALIZATION:
gt_poses = []
gt_obj_ids = samples[common.GT_OBJ_IDS][0]
for gt_id in range(len(gt_obj_ids)):
R = transform.quaternion_matrix(
samples[common.GT_OBJ_QUATS][0][gt_id])[:3, :3]
t = samples[common.GT_OBJ_TRANS][0][gt_id].reshape((3, 1))
gt_poses.append({'obj_id': gt_obj_ids[gt_id], 'R': R, 't': t})
else:
gt_poses = None
# Establish many-to-many 2D-3D correspondences.
time_start = time.time()
corr = corresp.establish_many_to_many(
obj_confs=predictions[common.PRED_OBJ_CONF][0],
frag_confs=predictions[common.PRED_FRAG_CONF][0],
frag_coords=predictions[common.PRED_FRAG_LOC][0],
gt_obj_ids=[x['obj_id'] for x in gt_poses],
model_store=model_store,
output_scale=output_scale,
min_obj_conf=FLAGS.corr_min_obj_conf,
min_frag_rel_conf=FLAGS.corr_min_frag_rel_conf,
project_to_surface=FLAGS.project_to_surface,
only_annotated_objs=(task_type == common.LOCALIZATION))
run_times['establish_corr'] = time.time() - time_start
# PnP-RANSAC to estimate 6D object poses from the correspondences.
time_start = time.time()
poses = []
for obj_id, obj_corr in corr.items():
# tf.logging.info(
# 'Image path: {}, obj: {}'.format(samples[common.IMAGE_PATH][0], obj_id))
# Number of established correspondences.
num_corrs = obj_corr['coord_2d'].shape[0]
# Skip the fitting if there are too few correspondences.
min_required_corrs = 6
if num_corrs < min_required_corrs:
continue
# The correspondences need to be sorted for PROSAC.
if FLAGS.use_prosac:
sorted_inds = np.argsort(obj_corr['conf'])[::-1]
for key in obj_corr.keys():
|
# Select correspondences with the highest confidence.
if FLAGS.max_correspondences is not None \
and num_corrs > FLAGS.max_correspondences:
# Sort the correspondences only if they have not been sorted for PROSAC.
if FLAGS.use_prosac:
keep_inds = np.arange(num_corrs)
else:
keep_inds = np.argsort(obj_corr['conf'])[::-1]
keep_inds = keep_inds[:FLAGS.max_correspondences]
for key in obj_corr.keys():
obj_corr[key] = obj_corr[key][keep_inds]
# Save the established correspondences (for analysis).
if FLAGS.save_corresp:
obj_gt_poses = []
if gt_poses is not None:
obj_gt_poses = [x for x in gt_poses if x['obj_id'] == obj_id]
pred_time = float(np.sum(list(run_times.values())))
image_path = samples[common.IMAGE_PATH][0].decode('utf-8')
save_correspondences(
scene_id, im_id, im_ind, obj_id, image_path, K, obj_corr, pred_time,
infer_name, obj_gt_poses, infer_dir)
# Make sure the coordinates are saved continuously in memory.
coord_2d = np.ascontiguousarray(obj_corr['coord_2d'].astype(np.float64))
coord_3d = np.ascontiguousarray(obj_corr['coord_3d'].astype(np.float64))
if FLAGS.fitting_method == common.PROGRESSIVE_X:
# If num_instances == 1, then only GC-RANSAC is applied. If > 1, then
# Progressive-X is applied and up to num_instances poses are returned.
# If num_instances == -1, then Progressive-X is applied and all found
# poses are returned.
if task_type == common.LOCALIZATION:
num_instances = len([x for x in gt_poses if x['obj_id'] == obj_id])
else:
num_instances = -1
if FLAGS.max_instances_to_fit is not None:
num_instances = min(num_instances, FLAGS.max_instances_to_fit)
pose_ests, inlier_indices, pose_qualities = pyprogressivex.find6DPoses(
x1y1=coord_2d,
x2y2z2=coord_3d,
K=K,
threshold=FLAGS.inlier_thresh,
neighborhood_ball_radius=FLAGS.neighbour_max_dist,
spatial_coherence_weight=FLAGS.spatial_coherence_weight,
scaling_from_millimeters=FLAGS.scaling_from_millimeters,
max_tanimoto_similarity=FLAGS.max_tanimoto_similarity,
max_iters=FLAGS.max_fitting_iterations,
conf=FLAGS.required_progx_confidence,
proposal_engine_conf=FLAGS.required_ransac_confidence,
min_coverage=FLAGS.min_hypothesis_quality,
min_triangle_area=FLAGS.min_triangle_area,
min_point_number=6,
max_model_number=num_instances,
max_model_number_for_optimization=FLAGS.max_model_number_for_pearl,
use_prosac=FLAGS.use_prosac,
log=False)
pose_est_success = pose_ests is not None
if pose_est_success:
for i in range(int(pose_ests.shape[0] / 3)):
j = i * 3
R_est = pose_ests[j:(j + 3), :3]
t_est = pose_ests[j:(j + 3), 3].reshape((3, 1))
poses.append({
'scene_id': scene_id,
'im_id': im_id,
'obj_id': obj_id,
'R': R_est,
't': t_est,
'score': pose_qualities[i],
})
elif FLAGS.fitting_method == common.OPENCV_RANSAC:
# This integration of OpenCV-RANSAC can estimate pose of only one object
# instance. Note that in Table 3 of the EPOS CVPR'20 paper, the scores
# for OpenCV-RANSAC were obtained with integrating cv2.solvePnPRansac
# in the Progressive-X scheme (as the other methods in that table).
pose_est_success, r_est, t_est, inliers = cv2.solvePnPRansac(
objectPoints=coord_3d,
imagePoints=coord_2d,
cameraMatrix=K,
distCoeffs=None,
iterationsCount=FLAGS.max_fitting_iterations,
reprojectionError=FLAGS.inlier_thresh,
confidence=0.99, # FLAGS.required_ransac_confidence
flags=cv2.SOLVEPNP_EPNP)
if pose_est_success:
poses.append({
'scene_id': scene_id,
'im_id': im_id,
'obj_id': obj_id,
'R': cv2.Rodrigues(r_est)[0],
't': t_est,
'score': 0.0, # TODO: Define the score.
})
else:
raise ValueError(
'Unknown pose fitting method ({}).'.format(FLAGS.fitting_method))
run_times['fitting'] = time.time() - time_start
run_times['total'] = np.sum(list(run_times.values()))
# Add the total time to each pose.
for pose in poses:
pose['time'] = run_times['total']
# Visualization.
if FLAGS.vis:
visualize(
samples=samples,
predictions=predictions,
pred_poses=poses,
im_ind=im_ind,
crop_size=crop_size,
output_scale=output_scale,
model_store=model_store,
renderer=renderer,
vis_dir=vis_dir)
return poses, run_times
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
# Model folder.
model_dir = os.path.join(config.TF_MODELS_PATH, FLAGS.model)
# Update flags with parameters loaded from the model folder.
common.update_flags(os.path.join(model_dir, common.PARAMS_FILENAME))
# Print the flag values.
common.print_flags()
# Folder from which the latest model checkpoint will be loaded.
checkpoint_dir = os.path.join(model_dir, 'train')
# Folder for the inference output.
infer_dir = os.path.join(model_dir, 'infer')
tf.gfile.MakeDirs(infer_dir)
# Folder for the visualization output.
vis_dir = os.path.join(model_dir, 'vis')
tf.gfile.MakeDirs(vis_dir)
# TFRecord files used for training.
tfrecord_names = FLAGS.infer_tfrecord_names
if not isinstance(FLAGS.infer_tfrecord_names, list):
tfrecord_names = [FLAGS.infer_tfrecord_names]
# Stride of the final output.
if FLAGS.upsample_logits:
# The stride is 1 if the logits are upsampled to the input resolution.
output_stride = 1
else:
assert (len(FLAGS.decoder_output_stride) == 1)
output_stride = FLAGS.decoder_output_stride[0]
with tf.Graph().as_default():
return_gt_orig = np.any([
FLAGS.task_type == common.LOCALIZATION,
FLAGS.vis_gt_poses])
return_gt_maps = np.any([
FLAGS.vis_pred_obj_labels,
FLAGS.vis_pred_obj_confs,
FLAGS.vis_pred_frag_fields])
# Dataset provider.
dataset = datagen.Dataset(
dataset_name=FLAGS.dataset,
tfrecord_names=tfrecord_names,
model_dir=model_dir,
model_variant=FLAGS.model_variant,
batch_size=1,
max_height_before_crop=FLAGS.infer_max_height_before_crop,
crop_size=list(map(int, FLAGS.infer_crop_size)),
num_frags=FLAGS.num_frags,
min_visib_fract=None,
gt_knn_frags=1,
output_stride=output_stride,
is_training=False,
return_gt_orig=return_gt_orig,
return_gt_maps=return_gt_maps,
should_shuffle=False,
should_repeat=False,
prepare_for_projection=FLAGS.project_to_surface,
data_augmentations=None)
# Initialize a renderer for visualization.
renderer = None
if FLAGS.vis_gt_poses or FLAGS.vis_pred_poses:
tf.logging.info('Initializing renderer for visualization...')
renderer = bop_renderer.Renderer()
renderer.init(dataset.crop_size[0], dataset.crop_size[1])
model_type_vis = 'eval'
dp_model = dataset_params.get_model_params(
config.BOP_PATH, dataset.dataset_name, model_type=model_type_vis)
for obj_id in dp_model['obj_ids']:
path = dp_model['model_tpath'].format(obj_id=obj_id)
renderer.add_object(obj_id, path)
tf.logging.info('Renderer initialized.')
# Inputs.
samples = dataset.get_one_shot_iterator().get_next()
# A map from output type to the number of associated channels.
outputs_to_num_channels = common.get_outputs_to_num_channels(
dataset.num_objs, dataset.model_store.num_frags)
# Options of the neural network model.
model_options = common.ModelOptions(
outputs_to_num_channels=outputs_to_num_channels,
crop_size=list(map(int, FLAGS.infer_crop_size)),
atrous_rates=FLAGS.atrous_rates,
encoder_output_stride=FLAGS.encoder_output_stride)
# Construct the inference graph.
predictions = model.predict(
images=samples[common.IMAGE],
model_options=model_options,
upsample_logits=FLAGS.upsample_logits,
image_pyramid=FLAGS.image_pyramid,
num_objs=dataset.num_objs,
num_frags=dataset.num_frags,
frag_cls_agnostic=FLAGS.frag_cls_agnostic,
frag_loc_agnostic=FLAGS.frag_loc_agnostic)
# Global step.
tf.train.get_or_create_global_step()
# Get path to the model checkpoint.
if FLAGS.checkpoint_name is None:
checkpoint_path = tf.train.latest_checkpoint(checkpoint_dir)
else:
checkpoint_path = os.path.join(checkpoint_dir, FLAGS.checkpoint_name)
time_str = time.strftime('%Y-%m-%d-%H:%M:%S', time.gmtime())
tf.logging.info('Starting inference at: {}'.format(time_str))
tf.logging.info('Inference with model: {}'.format(checkpoint_path))
# Scaffold for initialization.
scaffold = tf.train.Scaffold(
init_op=tf.global_variables_initializer(),
saver=tf.train.Saver(var_list=misc.get_variable_dict()))
# TensorFlow configuration.
if FLAGS.cpu_only:
tf_config = tf.ConfigProto(device_count={'GPU': 0})
else:
tf_config = tf.ConfigProto()
# tf_config.gpu_options.allow_growth = True # Only necessary GPU memory.
tf_config.gpu_options.allow_growth = False
# Nodes that can use multiple threads to parallelize their execution will
# schedule the individual pieces into this pool.
tf_config.intra_op_parallelism_threads = 10
# All ready nodes are scheduled in this pool.
tf_config.inter_op_parallelism_threads = 10
poses_all = []
first_im_poses_num = 0
session_creator = tf.train.ChiefSessionCreator(
config=tf_config,
scaffold=scaffold,
master=FLAGS.master,
checkpoint_filename_with_path=checkpoint_path)
with tf.train.MonitoredSession(
session_creator=session_creator, hooks=None) as sess:
im_ind = 0
while not sess.should_stop():
# Estimate object poses for the current image.
poses, run_times = process_image(
sess=sess,
samples=samples,
predictions=predictions,
im_ind=im_ind,
crop_size=dataset.crop_size,
output_scale=(1.0 / output_stride),
model_store=dataset.model_store,
renderer=renderer,
task_type=FLAGS.task_type,
infer_name=FLAGS.infer_name,
infer_dir=infer_dir,
vis_dir=vis_dir)
# Note that the first image takes longer time (because of TF init).
tf.logging.info(
'Image: {}, prediction: {:.3f}, establish_corr: {:.3f}, '
'fitting: {:.3f}, total time: {:.3f}'.format(
im_ind, run_times['prediction'], run_times['establish_corr'],
run_times['fitting'], run_times['total']))
poses_all += poses
if im_ind == 0:
first_im_poses_num = len(poses)
im_ind += 1
# Set the time of pose estimates from the first image to the average time.
# Tensorflow takes a long time on the first image (because of init).
time_avg = 0.0
for pose in poses_all:
time_avg += pose['time']
if len(poses_all) > 0:
time_avg /= float((len(poses_all)))
for i in range(first_im_poses_num):
poses_all[i]['time'] = time_avg
# Save the estimated poses in the BOP format:
# https://bop.felk.cvut.cz/challenges/bop-challenge-2020/#formatofresults
if FLAGS.save_estimates:
suffix = ''
if FLAGS.infer_name is not None:
suffix = '_{}'.format(FLAGS.infer_name)
poses_path = os.path.join(
infer_dir, 'estimated-poses{}.csv'.format(suffix))
tf.logging.info('Saving estimated poses to: {}'.format(poses_path))
inout.save_bop_results(poses_path, poses_all, version='bop19')
time_str = time.strftime('%Y-%m-%d-%H:%M:%S', time.gmtime())
tf.logging.info('Finished inference at: {}'.format(time_str))
if __name__ == '__main__':
tf.app.run()
| obj_corr[key] = obj_corr[key][sorted_inds] | conditional_block |
infer.py | # Copyright 2020 Tomas Hodan (hodantom@cmp.felk.cvut.cz).
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
"""A script for inference/visualization.
Example:
python infer.py --model=ycbv-bop20-xc65-f64
"""
import os
import os.path
import time
import numpy as np
import cv2
import tensorflow as tf
import pyprogressivex
import bop_renderer
from bop_toolkit_lib import dataset_params
from bop_toolkit_lib import inout
from bop_toolkit_lib import transform
from bop_toolkit_lib import visualization
from epos_lib import common
from epos_lib import config
from epos_lib import corresp
from epos_lib import datagen
from epos_lib import misc
from epos_lib import model
from epos_lib import vis
# Flags (other common flags are defined in epos_lib/common.py; the flag values
# can be defined on the command line or in params.yml in the model folder).
# ------------------------------------------------------------------------------
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
'master', '',
'BNS name of the tensorflow server')
flags.DEFINE_boolean(
'cpu_only', False,
'Whether to run the inference on CPU only.')
flags.DEFINE_string(
'task_type', common.LOCALIZATION, # LOCALIZATION, DETECTION
'Type of the 6D object pose estimation task.')
flags.DEFINE_list(
'infer_tfrecord_names', None,
'Names of tfrecord files (without suffix) used for inference.')
flags.DEFINE_integer(
'infer_max_height_before_crop', '480',
'Maximum image height before cropping (the image is downscaled if larger).')
flags.DEFINE_list(
'infer_crop_size', '640,480',
'Image size [height, width] during inference.')
flags.DEFINE_string(
'checkpoint_name', None,
'Name of the checkpoint to evaluate (e.g. "model.ckpt-1000000"). The latest '
'available checkpoint is used if None.')
flags.DEFINE_boolean(
'project_to_surface', False,
'Whether to project the predicted 3D locations to the object model.')
flags.DEFINE_boolean(
'save_estimates', True,
'Whether to save pose estimates in format expected by the BOP Challenge.')
flags.DEFINE_boolean(
'save_corresp', False,
'Whether to save established correspondences to text files.')
flags.DEFINE_string(
'infer_name', None,
'Name of the inference used in the filename of the saved estimates.')
# Pose fitting parameters.
flags.DEFINE_string(
'fitting_method', common.PROGRESSIVE_X, # PROGRESSIVE_X, OPENCV_RANSAC
'Pose fitting method.')
flags.DEFINE_float(
'inlier_thresh', 4.0,
'Tau_r in the CVPR 2020 paper. Inlier threshold [px] on the '
'reprojection error.')
flags.DEFINE_float(
'neighbour_max_dist', 20.0,
'Tau_d in the CVPR 2020 paper.')
flags.DEFINE_float(
'min_hypothesis_quality', 0.5,
'Tau_q in the CVPR 2020 paper')
flags.DEFINE_float(
'required_progx_confidence', 0.5,
'The required confidence used to calculate the number of Prog-X iterations.')
flags.DEFINE_float(
'required_ransac_confidence', 1.0,
'The required confidence used to calculate the number of RANSAC iterations.')
flags.DEFINE_float(
'min_triangle_area', 0.0,
'Tau_t in the CVPR 2020 paper.')
flags.DEFINE_boolean(
'use_prosac', False,
'Whether to use the PROSAC sampler.')
flags.DEFINE_integer(
'max_model_number_for_pearl', 5,
'Maximum number of instances to optimize by PEARL. PEARL is turned off if '
'there are more instances to find.')
flags.DEFINE_float(
'spatial_coherence_weight', 0.1,
'Weight of the spatial coherence in Graph-Cut RANSAC.')
flags.DEFINE_float(
'scaling_from_millimeters', 0.1,
'Scaling factor of 3D coordinates when constructing the neighborhood graph. '
'0.1 will convert mm to cm. See the CVPR 2020 paper for details.')
flags.DEFINE_float(
'max_tanimoto_similarity', 0.9,
'See the Progressive-X paper.')
flags.DEFINE_integer(
'max_correspondences', None,
'Maximum number of correspondences to use for fitting. Not applied if None.')
flags.DEFINE_integer(
'max_instances_to_fit', None,
'Maximum number of instances to fit. Not applied if None.')
flags.DEFINE_integer(
'max_fitting_iterations', 400,
'The maximum number of fitting iterations.')
# Visualization parameters.
flags.DEFINE_boolean(
'vis', False,
'Global switch for visualizations.')
flags.DEFINE_boolean(
'vis_gt_poses', True,
'Whether to visualize the GT poses.')
flags.DEFINE_boolean(
'vis_pred_poses', True,
'Whether to visualize the predicted poses.')
flags.DEFINE_boolean(
'vis_gt_obj_labels', True,
'Whether to visualize the GT object labels.')
flags.DEFINE_boolean(
'vis_pred_obj_labels', True,
'Whether to visualize the predicted object labels.')
flags.DEFINE_boolean(
'vis_pred_obj_confs', False,
'Whether to visualize the predicted object confidences.')
flags.DEFINE_boolean(
'vis_gt_frag_fields', False,
'Whether to visualize the GT fragment fields.')
flags.DEFINE_boolean(
'vis_pred_frag_fields', False,
'Whether to visualize the predicted fragment fields.')
# ------------------------------------------------------------------------------
def visualize(
samples, predictions, pred_poses, im_ind, crop_size, output_scale,
model_store, renderer, vis_dir):
"""Visualizes estimates from one image.
Args:
samples: Dictionary with input data.
predictions: Dictionary with predictions.
pred_poses: Predicted poses.
im_ind: Image index.
crop_size: Image crop size (width, height).
output_scale: Scale of the model output w.r.t. the input (output / input).
model_store: Store for 3D object models of class ObjectModelStore.
renderer: Renderer of class bop_renderer.Renderer().
vis_dir: Directory where the visualizations will be saved.
"""
tf.logging.info('Visualization for: {}'.format(
samples[common.IMAGE_PATH][0].decode('utf8')))
# Size of a visualization grid tile.
tile_size = (300, 225)
# Extension of the saved visualizations ('jpg', 'png', etc.).
vis_ext = 'jpg'
# Font settings.
font_size = 10
font_color = (0.8, 0.8, 0.8)
# Intrinsics.
K = samples[common.K][0]
output_K = K * output_scale
output_K[2, 2] = 1.0
# Tiles for the grid visualization.
tiles = []
# Size of the output fields.
output_size =\
int(output_scale * crop_size[0]), int(output_scale * crop_size[1])
# Prefix of the visualization names.
vis_prefix = '{:06d}'.format(im_ind)
# Input RGB image.
rgb = np.squeeze(samples[common.IMAGE][0])
vis_rgb = visualization.write_text_on_image(
misc.resize_image_py(rgb, tile_size).astype(np.uint8),
[{'name': '', 'val': 'input', 'fmt': ':s'}],
size=font_size, color=font_color)
tiles.append(vis_rgb)
# Visualize the ground-truth poses.
if FLAGS.vis_gt_poses:
gt_poses = []
for gt_id, obj_id in enumerate(samples[common.GT_OBJ_IDS][0]):
q = samples[common.GT_OBJ_QUATS][0][gt_id]
R = transform.quaternion_matrix(q)[:3, :3]
t = samples[common.GT_OBJ_TRANS][0][gt_id].reshape((3, 1))
gt_poses.append({'obj_id': obj_id, 'R': R, 't': t})
vis_gt_poses = vis.visualize_object_poses(rgb, K, gt_poses, renderer)
vis_gt_poses = visualization.write_text_on_image(
misc.resize_image_py(vis_gt_poses, tile_size),
[{'name': '', 'val': 'gt poses', 'fmt': ':s'}],
size=font_size, color=font_color)
tiles.append(vis_gt_poses)
# Visualize the estimated poses.
if FLAGS.vis_pred_poses:
vis_pred_poses = vis.visualize_object_poses(rgb, K, pred_poses, renderer)
vis_pred_poses = visualization.write_text_on_image(
misc.resize_image_py(vis_pred_poses, tile_size),
[{'name': '', 'val': 'pred poses', 'fmt': ':s'}],
size=font_size, color=font_color)
tiles.append(vis_pred_poses)
# Ground-truth object labels.
if FLAGS.vis_gt_obj_labels and common.GT_OBJ_LABEL in samples:
obj_labels = np.squeeze(samples[common.GT_OBJ_LABEL][0])
obj_labels = obj_labels[:crop_size[1], :crop_size[0]]
obj_labels = vis.colorize_label_map(obj_labels)
obj_labels = visualization.write_text_on_image(
misc.resize_image_py(obj_labels.astype(np.uint8), tile_size),
[{'name': '', 'val': 'gt obj labels', 'fmt': ':s'}],
size=font_size, color=font_color)
tiles.append(obj_labels)
# Predicted object labels.
if FLAGS.vis_pred_obj_labels:
obj_labels = np.squeeze(predictions[common.PRED_OBJ_LABEL][0])
obj_labels = obj_labels[:crop_size[1], :crop_size[0]]
obj_labels = vis.colorize_label_map(obj_labels)
obj_labels = visualization.write_text_on_image(
misc.resize_image_py(obj_labels.astype(np.uint8), tile_size),
[{'name': '', 'val': 'predicted obj labels', 'fmt': ':s'}],
size=font_size, color=font_color)
tiles.append(obj_labels)
# Predicted object confidences.
if FLAGS.vis_pred_obj_confs:
num_obj_labels = predictions[common.PRED_OBJ_CONF].shape[-1]
for obj_label in range(num_obj_labels):
obj_confs = misc.resize_image_py(np.array(
predictions[common.PRED_OBJ_CONF][0, :, :, obj_label]), tile_size)
obj_confs = (255.0 * obj_confs).astype(np.uint8)
obj_confs = np.dstack([obj_confs, obj_confs, obj_confs]) # To RGB.
obj_confs = visualization.write_text_on_image(
obj_confs, [{'name': 'cls', 'val': obj_label, 'fmt': ':d'}],
size=font_size, color=font_color)
tiles.append(obj_confs)
# Visualization of ground-truth fragment fields.
if FLAGS.vis_gt_frag_fields and common.GT_OBJ_IDS in samples:
vis.visualize_gt_frag(
gt_obj_ids=samples[common.GT_OBJ_IDS][0],
gt_obj_masks=samples[common.GT_OBJ_MASKS][0],
gt_frag_labels=samples[common.GT_FRAG_LABEL][0],
gt_frag_weights=samples[common.GT_FRAG_WEIGHT][0],
gt_frag_coords=samples[common.GT_FRAG_LOC][0],
output_size=output_size,
model_store=model_store,
vis_prefix=vis_prefix,
vis_dir=vis_dir)
# Visualization of predicted fragment fields.
if FLAGS.vis_pred_frag_fields:
vis.visualize_pred_frag(
frag_confs=predictions[common.PRED_FRAG_CONF][0],
frag_coords=predictions[common.PRED_FRAG_LOC][0],
output_size=output_size,
model_store=model_store,
vis_prefix=vis_prefix,
vis_dir=vis_dir,
vis_ext=vis_ext)
# Build and save a visualization grid.
grid = vis.build_grid(tiles, tile_size)
grid_vis_path = os.path.join(
vis_dir, '{}_grid.{}'.format(vis_prefix, vis_ext))
inout.save_im(grid_vis_path, grid)
def save_correspondences(
scene_id, im_id, im_ind, obj_id, image_path, K, obj_pred, pred_time,
infer_name, obj_gt_poses, infer_dir):
# Add meta information.
txt = '# Corr format: u v x y z px_id frag_id conf conf_obj conf_frag\n'
txt += '{}\n'.format(image_path)
txt += '{} {} {} {}\n'.format(scene_id, im_id, obj_id, pred_time)
# Add intrinsics.
for i in range(3):
txt += '{} {} {}\n'.format(K[i, 0], K[i, 1], K[i, 2])
# Add ground-truth poses.
txt += '{}\n'.format(len(obj_gt_poses))
for pose in obj_gt_poses:
for i in range(3):
txt += '{} {} {} {}\n'.format(
pose['R'][i, 0], pose['R'][i, 1], pose['R'][i, 2], pose['t'][i, 0])
# Sort the predicted correspondences by confidence.
sort_inds = np.argsort(obj_pred['conf'])[::-1]
px_id = obj_pred['px_id'][sort_inds]
frag_id = obj_pred['frag_id'][sort_inds]
coord_2d = obj_pred['coord_2d'][sort_inds]
coord_3d = obj_pred['coord_3d'][sort_inds]
conf = obj_pred['conf'][sort_inds]
conf_obj = obj_pred['conf_obj'][sort_inds]
conf_frag = obj_pred['conf_frag'][sort_inds]
# Add the predicted correspondences.
pred_corr_num = len(coord_2d)
txt += '{}\n'.format(pred_corr_num)
for i in range(pred_corr_num):
txt += '{} {} {} {} {} {} {} {} {} {}\n'.format(
coord_2d[i, 0], coord_2d[i, 1],
coord_3d[i, 0], coord_3d[i, 1], coord_3d[i, 2],
px_id[i], frag_id[i], conf[i], conf_obj[i], conf_frag[i])
# Save the correspondences into a file.
corr_suffix = infer_name
if corr_suffix is None:
corr_suffix = ''
else:
corr_suffix = '_' + corr_suffix
corr_path = os.path.join(
infer_dir, 'corr{}'.format(corr_suffix),
'{:06d}_corr_{:02d}.txt'.format(im_ind, obj_id))
tf.gfile.MakeDirs(os.path.dirname(corr_path))
with open(corr_path, 'w') as f:
f.write(txt)
def process_image(
sess, samples, predictions, im_ind, crop_size, output_scale, model_store,
renderer, task_type, infer_name, infer_dir, vis_dir):
"""Estimates object poses from one image.
Args:
sess: TensorFlow session.
samples: Dictionary with input data.
predictions: Dictionary with predictions.
im_ind: Index of the current image.
crop_size: Image crop size (width, height).
output_scale: Scale of the model output w.r.t. the input (output / input).
model_store: Store for 3D object models of class ObjectModelStore.
renderer: Renderer of class bop_renderer.Renderer().
task_type: 6D object pose estimation task (common.LOCALIZATION or
common.DETECTION).
infer_name: Name of the current inference.
infer_dir: Folder for inference results.
vis_dir: Folder for visualizations.
"""
# Dictionary for run times.
run_times = {}
# Prediction.
time_start = time.time()
(samples, predictions) = sess.run([samples, predictions])
run_times['prediction'] = time.time() - time_start
# Scene and image ID's.
scene_id = samples[common.SCENE_ID][0]
im_id = samples[common.IM_ID][0]
# Intrinsic parameters.
K = samples[common.K][0]
if task_type == common.LOCALIZATION:
gt_poses = []
gt_obj_ids = samples[common.GT_OBJ_IDS][0]
for gt_id in range(len(gt_obj_ids)):
R = transform.quaternion_matrix(
samples[common.GT_OBJ_QUATS][0][gt_id])[:3, :3]
t = samples[common.GT_OBJ_TRANS][0][gt_id].reshape((3, 1))
gt_poses.append({'obj_id': gt_obj_ids[gt_id], 'R': R, 't': t})
else:
gt_poses = None
# Establish many-to-many 2D-3D correspondences.
time_start = time.time()
corr = corresp.establish_many_to_many(
obj_confs=predictions[common.PRED_OBJ_CONF][0],
frag_confs=predictions[common.PRED_FRAG_CONF][0],
frag_coords=predictions[common.PRED_FRAG_LOC][0],
gt_obj_ids=[x['obj_id'] for x in gt_poses],
model_store=model_store,
output_scale=output_scale,
min_obj_conf=FLAGS.corr_min_obj_conf,
min_frag_rel_conf=FLAGS.corr_min_frag_rel_conf,
project_to_surface=FLAGS.project_to_surface,
only_annotated_objs=(task_type == common.LOCALIZATION))
run_times['establish_corr'] = time.time() - time_start
# PnP-RANSAC to estimate 6D object poses from the correspondences.
time_start = time.time()
poses = []
for obj_id, obj_corr in corr.items():
# tf.logging.info(
# 'Image path: {}, obj: {}'.format(samples[common.IMAGE_PATH][0], obj_id))
# Number of established correspondences.
num_corrs = obj_corr['coord_2d'].shape[0]
# Skip the fitting if there are too few correspondences.
min_required_corrs = 6
if num_corrs < min_required_corrs:
continue
# The correspondences need to be sorted for PROSAC.
if FLAGS.use_prosac:
sorted_inds = np.argsort(obj_corr['conf'])[::-1]
for key in obj_corr.keys():
obj_corr[key] = obj_corr[key][sorted_inds]
# Select correspondences with the highest confidence.
if FLAGS.max_correspondences is not None \
and num_corrs > FLAGS.max_correspondences:
# Sort the correspondences only if they have not been sorted for PROSAC.
if FLAGS.use_prosac:
keep_inds = np.arange(num_corrs)
else:
keep_inds = np.argsort(obj_corr['conf'])[::-1]
keep_inds = keep_inds[:FLAGS.max_correspondences]
for key in obj_corr.keys():
obj_corr[key] = obj_corr[key][keep_inds]
# Save the established correspondences (for analysis).
if FLAGS.save_corresp:
obj_gt_poses = []
if gt_poses is not None:
obj_gt_poses = [x for x in gt_poses if x['obj_id'] == obj_id]
pred_time = float(np.sum(list(run_times.values())))
image_path = samples[common.IMAGE_PATH][0].decode('utf-8')
save_correspondences(
scene_id, im_id, im_ind, obj_id, image_path, K, obj_corr, pred_time,
infer_name, obj_gt_poses, infer_dir)
# Make sure the coordinates are saved continuously in memory.
coord_2d = np.ascontiguousarray(obj_corr['coord_2d'].astype(np.float64))
coord_3d = np.ascontiguousarray(obj_corr['coord_3d'].astype(np.float64))
if FLAGS.fitting_method == common.PROGRESSIVE_X:
# If num_instances == 1, then only GC-RANSAC is applied. If > 1, then
# Progressive-X is applied and up to num_instances poses are returned.
# If num_instances == -1, then Progressive-X is applied and all found
# poses are returned.
if task_type == common.LOCALIZATION:
num_instances = len([x for x in gt_poses if x['obj_id'] == obj_id])
else:
num_instances = -1
if FLAGS.max_instances_to_fit is not None:
num_instances = min(num_instances, FLAGS.max_instances_to_fit)
pose_ests, inlier_indices, pose_qualities = pyprogressivex.find6DPoses(
x1y1=coord_2d,
x2y2z2=coord_3d,
K=K,
threshold=FLAGS.inlier_thresh,
neighborhood_ball_radius=FLAGS.neighbour_max_dist,
spatial_coherence_weight=FLAGS.spatial_coherence_weight,
scaling_from_millimeters=FLAGS.scaling_from_millimeters,
max_tanimoto_similarity=FLAGS.max_tanimoto_similarity,
max_iters=FLAGS.max_fitting_iterations,
conf=FLAGS.required_progx_confidence,
proposal_engine_conf=FLAGS.required_ransac_confidence,
min_coverage=FLAGS.min_hypothesis_quality,
min_triangle_area=FLAGS.min_triangle_area,
min_point_number=6,
max_model_number=num_instances,
max_model_number_for_optimization=FLAGS.max_model_number_for_pearl,
use_prosac=FLAGS.use_prosac,
log=False)
pose_est_success = pose_ests is not None
if pose_est_success:
for i in range(int(pose_ests.shape[0] / 3)):
j = i * 3
R_est = pose_ests[j:(j + 3), :3]
t_est = pose_ests[j:(j + 3), 3].reshape((3, 1))
poses.append({
'scene_id': scene_id,
'im_id': im_id,
'obj_id': obj_id,
'R': R_est,
't': t_est,
'score': pose_qualities[i],
})
elif FLAGS.fitting_method == common.OPENCV_RANSAC:
# This integration of OpenCV-RANSAC can estimate pose of only one object
# instance. Note that in Table 3 of the EPOS CVPR'20 paper, the scores
# for OpenCV-RANSAC were obtained with integrating cv2.solvePnPRansac
# in the Progressive-X scheme (as the other methods in that table).
pose_est_success, r_est, t_est, inliers = cv2.solvePnPRansac(
objectPoints=coord_3d,
imagePoints=coord_2d,
cameraMatrix=K,
distCoeffs=None,
iterationsCount=FLAGS.max_fitting_iterations,
reprojectionError=FLAGS.inlier_thresh,
confidence=0.99, # FLAGS.required_ransac_confidence
flags=cv2.SOLVEPNP_EPNP)
if pose_est_success:
poses.append({
'scene_id': scene_id,
'im_id': im_id,
'obj_id': obj_id,
'R': cv2.Rodrigues(r_est)[0],
't': t_est,
'score': 0.0, # TODO: Define the score.
})
else:
raise ValueError(
'Unknown pose fitting method ({}).'.format(FLAGS.fitting_method))
run_times['fitting'] = time.time() - time_start
run_times['total'] = np.sum(list(run_times.values()))
# Add the total time to each pose.
for pose in poses:
pose['time'] = run_times['total']
# Visualization.
if FLAGS.vis:
visualize(
samples=samples,
predictions=predictions,
pred_poses=poses,
im_ind=im_ind,
crop_size=crop_size,
output_scale=output_scale, | renderer=renderer,
vis_dir=vis_dir)
return poses, run_times
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
# Model folder.
model_dir = os.path.join(config.TF_MODELS_PATH, FLAGS.model)
# Update flags with parameters loaded from the model folder.
common.update_flags(os.path.join(model_dir, common.PARAMS_FILENAME))
# Print the flag values.
common.print_flags()
# Folder from which the latest model checkpoint will be loaded.
checkpoint_dir = os.path.join(model_dir, 'train')
# Folder for the inference output.
infer_dir = os.path.join(model_dir, 'infer')
tf.gfile.MakeDirs(infer_dir)
# Folder for the visualization output.
vis_dir = os.path.join(model_dir, 'vis')
tf.gfile.MakeDirs(vis_dir)
# TFRecord files used for training.
tfrecord_names = FLAGS.infer_tfrecord_names
if not isinstance(FLAGS.infer_tfrecord_names, list):
tfrecord_names = [FLAGS.infer_tfrecord_names]
# Stride of the final output.
if FLAGS.upsample_logits:
# The stride is 1 if the logits are upsampled to the input resolution.
output_stride = 1
else:
assert (len(FLAGS.decoder_output_stride) == 1)
output_stride = FLAGS.decoder_output_stride[0]
with tf.Graph().as_default():
return_gt_orig = np.any([
FLAGS.task_type == common.LOCALIZATION,
FLAGS.vis_gt_poses])
return_gt_maps = np.any([
FLAGS.vis_pred_obj_labels,
FLAGS.vis_pred_obj_confs,
FLAGS.vis_pred_frag_fields])
# Dataset provider.
dataset = datagen.Dataset(
dataset_name=FLAGS.dataset,
tfrecord_names=tfrecord_names,
model_dir=model_dir,
model_variant=FLAGS.model_variant,
batch_size=1,
max_height_before_crop=FLAGS.infer_max_height_before_crop,
crop_size=list(map(int, FLAGS.infer_crop_size)),
num_frags=FLAGS.num_frags,
min_visib_fract=None,
gt_knn_frags=1,
output_stride=output_stride,
is_training=False,
return_gt_orig=return_gt_orig,
return_gt_maps=return_gt_maps,
should_shuffle=False,
should_repeat=False,
prepare_for_projection=FLAGS.project_to_surface,
data_augmentations=None)
# Initialize a renderer for visualization.
renderer = None
if FLAGS.vis_gt_poses or FLAGS.vis_pred_poses:
tf.logging.info('Initializing renderer for visualization...')
renderer = bop_renderer.Renderer()
renderer.init(dataset.crop_size[0], dataset.crop_size[1])
model_type_vis = 'eval'
dp_model = dataset_params.get_model_params(
config.BOP_PATH, dataset.dataset_name, model_type=model_type_vis)
for obj_id in dp_model['obj_ids']:
path = dp_model['model_tpath'].format(obj_id=obj_id)
renderer.add_object(obj_id, path)
tf.logging.info('Renderer initialized.')
# Inputs.
samples = dataset.get_one_shot_iterator().get_next()
# A map from output type to the number of associated channels.
outputs_to_num_channels = common.get_outputs_to_num_channels(
dataset.num_objs, dataset.model_store.num_frags)
# Options of the neural network model.
model_options = common.ModelOptions(
outputs_to_num_channels=outputs_to_num_channels,
crop_size=list(map(int, FLAGS.infer_crop_size)),
atrous_rates=FLAGS.atrous_rates,
encoder_output_stride=FLAGS.encoder_output_stride)
# Construct the inference graph.
predictions = model.predict(
images=samples[common.IMAGE],
model_options=model_options,
upsample_logits=FLAGS.upsample_logits,
image_pyramid=FLAGS.image_pyramid,
num_objs=dataset.num_objs,
num_frags=dataset.num_frags,
frag_cls_agnostic=FLAGS.frag_cls_agnostic,
frag_loc_agnostic=FLAGS.frag_loc_agnostic)
# Global step.
tf.train.get_or_create_global_step()
# Get path to the model checkpoint.
if FLAGS.checkpoint_name is None:
checkpoint_path = tf.train.latest_checkpoint(checkpoint_dir)
else:
checkpoint_path = os.path.join(checkpoint_dir, FLAGS.checkpoint_name)
time_str = time.strftime('%Y-%m-%d-%H:%M:%S', time.gmtime())
tf.logging.info('Starting inference at: {}'.format(time_str))
tf.logging.info('Inference with model: {}'.format(checkpoint_path))
# Scaffold for initialization.
scaffold = tf.train.Scaffold(
init_op=tf.global_variables_initializer(),
saver=tf.train.Saver(var_list=misc.get_variable_dict()))
# TensorFlow configuration.
if FLAGS.cpu_only:
tf_config = tf.ConfigProto(device_count={'GPU': 0})
else:
tf_config = tf.ConfigProto()
# tf_config.gpu_options.allow_growth = True # Only necessary GPU memory.
tf_config.gpu_options.allow_growth = False
# Nodes that can use multiple threads to parallelize their execution will
# schedule the individual pieces into this pool.
tf_config.intra_op_parallelism_threads = 10
# All ready nodes are scheduled in this pool.
tf_config.inter_op_parallelism_threads = 10
poses_all = []
first_im_poses_num = 0
session_creator = tf.train.ChiefSessionCreator(
config=tf_config,
scaffold=scaffold,
master=FLAGS.master,
checkpoint_filename_with_path=checkpoint_path)
with tf.train.MonitoredSession(
session_creator=session_creator, hooks=None) as sess:
im_ind = 0
while not sess.should_stop():
# Estimate object poses for the current image.
poses, run_times = process_image(
sess=sess,
samples=samples,
predictions=predictions,
im_ind=im_ind,
crop_size=dataset.crop_size,
output_scale=(1.0 / output_stride),
model_store=dataset.model_store,
renderer=renderer,
task_type=FLAGS.task_type,
infer_name=FLAGS.infer_name,
infer_dir=infer_dir,
vis_dir=vis_dir)
# Note that the first image takes longer time (because of TF init).
tf.logging.info(
'Image: {}, prediction: {:.3f}, establish_corr: {:.3f}, '
'fitting: {:.3f}, total time: {:.3f}'.format(
im_ind, run_times['prediction'], run_times['establish_corr'],
run_times['fitting'], run_times['total']))
poses_all += poses
if im_ind == 0:
first_im_poses_num = len(poses)
im_ind += 1
# Set the time of pose estimates from the first image to the average time.
# Tensorflow takes a long time on the first image (because of init).
time_avg = 0.0
for pose in poses_all:
time_avg += pose['time']
if len(poses_all) > 0:
time_avg /= float((len(poses_all)))
for i in range(first_im_poses_num):
poses_all[i]['time'] = time_avg
# Save the estimated poses in the BOP format:
# https://bop.felk.cvut.cz/challenges/bop-challenge-2020/#formatofresults
if FLAGS.save_estimates:
suffix = ''
if FLAGS.infer_name is not None:
suffix = '_{}'.format(FLAGS.infer_name)
poses_path = os.path.join(
infer_dir, 'estimated-poses{}.csv'.format(suffix))
tf.logging.info('Saving estimated poses to: {}'.format(poses_path))
inout.save_bop_results(poses_path, poses_all, version='bop19')
time_str = time.strftime('%Y-%m-%d-%H:%M:%S', time.gmtime())
tf.logging.info('Finished inference at: {}'.format(time_str))
if __name__ == '__main__':
tf.app.run() | model_store=model_store, | random_line_split |
infer.py | # Copyright 2020 Tomas Hodan (hodantom@cmp.felk.cvut.cz).
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
"""A script for inference/visualization.
Example:
python infer.py --model=ycbv-bop20-xc65-f64
"""
import os
import os.path
import time
import numpy as np
import cv2
import tensorflow as tf
import pyprogressivex
import bop_renderer
from bop_toolkit_lib import dataset_params
from bop_toolkit_lib import inout
from bop_toolkit_lib import transform
from bop_toolkit_lib import visualization
from epos_lib import common
from epos_lib import config
from epos_lib import corresp
from epos_lib import datagen
from epos_lib import misc
from epos_lib import model
from epos_lib import vis
# Flags (other common flags are defined in epos_lib/common.py; the flag values
# can be defined on the command line or in params.yml in the model folder).
# ------------------------------------------------------------------------------
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
'master', '',
'BNS name of the tensorflow server')
flags.DEFINE_boolean(
'cpu_only', False,
'Whether to run the inference on CPU only.')
flags.DEFINE_string(
'task_type', common.LOCALIZATION, # LOCALIZATION, DETECTION
'Type of the 6D object pose estimation task.')
flags.DEFINE_list(
'infer_tfrecord_names', None,
'Names of tfrecord files (without suffix) used for inference.')
flags.DEFINE_integer(
'infer_max_height_before_crop', '480',
'Maximum image height before cropping (the image is downscaled if larger).')
flags.DEFINE_list(
'infer_crop_size', '640,480',
'Image size [height, width] during inference.')
flags.DEFINE_string(
'checkpoint_name', None,
'Name of the checkpoint to evaluate (e.g. "model.ckpt-1000000"). The latest '
'available checkpoint is used if None.')
flags.DEFINE_boolean(
'project_to_surface', False,
'Whether to project the predicted 3D locations to the object model.')
flags.DEFINE_boolean(
'save_estimates', True,
'Whether to save pose estimates in format expected by the BOP Challenge.')
flags.DEFINE_boolean(
'save_corresp', False,
'Whether to save established correspondences to text files.')
flags.DEFINE_string(
'infer_name', None,
'Name of the inference used in the filename of the saved estimates.')
# Pose fitting parameters.
flags.DEFINE_string(
'fitting_method', common.PROGRESSIVE_X, # PROGRESSIVE_X, OPENCV_RANSAC
'Pose fitting method.')
flags.DEFINE_float(
'inlier_thresh', 4.0,
'Tau_r in the CVPR 2020 paper. Inlier threshold [px] on the '
'reprojection error.')
flags.DEFINE_float(
'neighbour_max_dist', 20.0,
'Tau_d in the CVPR 2020 paper.')
flags.DEFINE_float(
'min_hypothesis_quality', 0.5,
'Tau_q in the CVPR 2020 paper')
flags.DEFINE_float(
'required_progx_confidence', 0.5,
'The required confidence used to calculate the number of Prog-X iterations.')
flags.DEFINE_float(
'required_ransac_confidence', 1.0,
'The required confidence used to calculate the number of RANSAC iterations.')
flags.DEFINE_float(
'min_triangle_area', 0.0,
'Tau_t in the CVPR 2020 paper.')
flags.DEFINE_boolean(
'use_prosac', False,
'Whether to use the PROSAC sampler.')
flags.DEFINE_integer(
'max_model_number_for_pearl', 5,
'Maximum number of instances to optimize by PEARL. PEARL is turned off if '
'there are more instances to find.')
flags.DEFINE_float(
'spatial_coherence_weight', 0.1,
'Weight of the spatial coherence in Graph-Cut RANSAC.')
flags.DEFINE_float(
'scaling_from_millimeters', 0.1,
'Scaling factor of 3D coordinates when constructing the neighborhood graph. '
'0.1 will convert mm to cm. See the CVPR 2020 paper for details.')
flags.DEFINE_float(
'max_tanimoto_similarity', 0.9,
'See the Progressive-X paper.')
flags.DEFINE_integer(
'max_correspondences', None,
'Maximum number of correspondences to use for fitting. Not applied if None.')
flags.DEFINE_integer(
'max_instances_to_fit', None,
'Maximum number of instances to fit. Not applied if None.')
flags.DEFINE_integer(
'max_fitting_iterations', 400,
'The maximum number of fitting iterations.')
# Visualization parameters.
flags.DEFINE_boolean(
'vis', False,
'Global switch for visualizations.')
flags.DEFINE_boolean(
'vis_gt_poses', True,
'Whether to visualize the GT poses.')
flags.DEFINE_boolean(
'vis_pred_poses', True,
'Whether to visualize the predicted poses.')
flags.DEFINE_boolean(
'vis_gt_obj_labels', True,
'Whether to visualize the GT object labels.')
flags.DEFINE_boolean(
'vis_pred_obj_labels', True,
'Whether to visualize the predicted object labels.')
flags.DEFINE_boolean(
'vis_pred_obj_confs', False,
'Whether to visualize the predicted object confidences.')
flags.DEFINE_boolean(
'vis_gt_frag_fields', False,
'Whether to visualize the GT fragment fields.')
flags.DEFINE_boolean(
'vis_pred_frag_fields', False,
'Whether to visualize the predicted fragment fields.')
# ------------------------------------------------------------------------------
def visualize(
samples, predictions, pred_poses, im_ind, crop_size, output_scale,
model_store, renderer, vis_dir):
"""Visualizes estimates from one image.
Args:
samples: Dictionary with input data.
predictions: Dictionary with predictions.
pred_poses: Predicted poses.
im_ind: Image index.
crop_size: Image crop size (width, height).
output_scale: Scale of the model output w.r.t. the input (output / input).
model_store: Store for 3D object models of class ObjectModelStore.
renderer: Renderer of class bop_renderer.Renderer().
vis_dir: Directory where the visualizations will be saved.
"""
tf.logging.info('Visualization for: {}'.format(
samples[common.IMAGE_PATH][0].decode('utf8')))
# Size of a visualization grid tile.
tile_size = (300, 225)
# Extension of the saved visualizations ('jpg', 'png', etc.).
vis_ext = 'jpg'
# Font settings.
font_size = 10
font_color = (0.8, 0.8, 0.8)
# Intrinsics.
K = samples[common.K][0]
output_K = K * output_scale
output_K[2, 2] = 1.0
# Tiles for the grid visualization.
tiles = []
# Size of the output fields.
output_size =\
int(output_scale * crop_size[0]), int(output_scale * crop_size[1])
# Prefix of the visualization names.
vis_prefix = '{:06d}'.format(im_ind)
# Input RGB image.
rgb = np.squeeze(samples[common.IMAGE][0])
vis_rgb = visualization.write_text_on_image(
misc.resize_image_py(rgb, tile_size).astype(np.uint8),
[{'name': '', 'val': 'input', 'fmt': ':s'}],
size=font_size, color=font_color)
tiles.append(vis_rgb)
# Visualize the ground-truth poses.
if FLAGS.vis_gt_poses:
gt_poses = []
for gt_id, obj_id in enumerate(samples[common.GT_OBJ_IDS][0]):
q = samples[common.GT_OBJ_QUATS][0][gt_id]
R = transform.quaternion_matrix(q)[:3, :3]
t = samples[common.GT_OBJ_TRANS][0][gt_id].reshape((3, 1))
gt_poses.append({'obj_id': obj_id, 'R': R, 't': t})
vis_gt_poses = vis.visualize_object_poses(rgb, K, gt_poses, renderer)
vis_gt_poses = visualization.write_text_on_image(
misc.resize_image_py(vis_gt_poses, tile_size),
[{'name': '', 'val': 'gt poses', 'fmt': ':s'}],
size=font_size, color=font_color)
tiles.append(vis_gt_poses)
# Visualize the estimated poses.
if FLAGS.vis_pred_poses:
vis_pred_poses = vis.visualize_object_poses(rgb, K, pred_poses, renderer)
vis_pred_poses = visualization.write_text_on_image(
misc.resize_image_py(vis_pred_poses, tile_size),
[{'name': '', 'val': 'pred poses', 'fmt': ':s'}],
size=font_size, color=font_color)
tiles.append(vis_pred_poses)
# Ground-truth object labels.
if FLAGS.vis_gt_obj_labels and common.GT_OBJ_LABEL in samples:
obj_labels = np.squeeze(samples[common.GT_OBJ_LABEL][0])
obj_labels = obj_labels[:crop_size[1], :crop_size[0]]
obj_labels = vis.colorize_label_map(obj_labels)
obj_labels = visualization.write_text_on_image(
misc.resize_image_py(obj_labels.astype(np.uint8), tile_size),
[{'name': '', 'val': 'gt obj labels', 'fmt': ':s'}],
size=font_size, color=font_color)
tiles.append(obj_labels)
# Predicted object labels.
if FLAGS.vis_pred_obj_labels:
obj_labels = np.squeeze(predictions[common.PRED_OBJ_LABEL][0])
obj_labels = obj_labels[:crop_size[1], :crop_size[0]]
obj_labels = vis.colorize_label_map(obj_labels)
obj_labels = visualization.write_text_on_image(
misc.resize_image_py(obj_labels.astype(np.uint8), tile_size),
[{'name': '', 'val': 'predicted obj labels', 'fmt': ':s'}],
size=font_size, color=font_color)
tiles.append(obj_labels)
# Predicted object confidences.
if FLAGS.vis_pred_obj_confs:
num_obj_labels = predictions[common.PRED_OBJ_CONF].shape[-1]
for obj_label in range(num_obj_labels):
obj_confs = misc.resize_image_py(np.array(
predictions[common.PRED_OBJ_CONF][0, :, :, obj_label]), tile_size)
obj_confs = (255.0 * obj_confs).astype(np.uint8)
obj_confs = np.dstack([obj_confs, obj_confs, obj_confs]) # To RGB.
obj_confs = visualization.write_text_on_image(
obj_confs, [{'name': 'cls', 'val': obj_label, 'fmt': ':d'}],
size=font_size, color=font_color)
tiles.append(obj_confs)
# Visualization of ground-truth fragment fields.
if FLAGS.vis_gt_frag_fields and common.GT_OBJ_IDS in samples:
vis.visualize_gt_frag(
gt_obj_ids=samples[common.GT_OBJ_IDS][0],
gt_obj_masks=samples[common.GT_OBJ_MASKS][0],
gt_frag_labels=samples[common.GT_FRAG_LABEL][0],
gt_frag_weights=samples[common.GT_FRAG_WEIGHT][0],
gt_frag_coords=samples[common.GT_FRAG_LOC][0],
output_size=output_size,
model_store=model_store,
vis_prefix=vis_prefix,
vis_dir=vis_dir)
# Visualization of predicted fragment fields.
if FLAGS.vis_pred_frag_fields:
vis.visualize_pred_frag(
frag_confs=predictions[common.PRED_FRAG_CONF][0],
frag_coords=predictions[common.PRED_FRAG_LOC][0],
output_size=output_size,
model_store=model_store,
vis_prefix=vis_prefix,
vis_dir=vis_dir,
vis_ext=vis_ext)
# Build and save a visualization grid.
grid = vis.build_grid(tiles, tile_size)
grid_vis_path = os.path.join(
vis_dir, '{}_grid.{}'.format(vis_prefix, vis_ext))
inout.save_im(grid_vis_path, grid)
def save_correspondences(
scene_id, im_id, im_ind, obj_id, image_path, K, obj_pred, pred_time,
infer_name, obj_gt_poses, infer_dir):
# Add meta information.
|
def process_image(
sess, samples, predictions, im_ind, crop_size, output_scale, model_store,
renderer, task_type, infer_name, infer_dir, vis_dir):
"""Estimates object poses from one image.
Args:
sess: TensorFlow session.
samples: Dictionary with input data.
predictions: Dictionary with predictions.
im_ind: Index of the current image.
crop_size: Image crop size (width, height).
output_scale: Scale of the model output w.r.t. the input (output / input).
model_store: Store for 3D object models of class ObjectModelStore.
renderer: Renderer of class bop_renderer.Renderer().
task_type: 6D object pose estimation task (common.LOCALIZATION or
common.DETECTION).
infer_name: Name of the current inference.
infer_dir: Folder for inference results.
vis_dir: Folder for visualizations.
"""
# Dictionary for run times.
run_times = {}
# Prediction.
time_start = time.time()
(samples, predictions) = sess.run([samples, predictions])
run_times['prediction'] = time.time() - time_start
# Scene and image ID's.
scene_id = samples[common.SCENE_ID][0]
im_id = samples[common.IM_ID][0]
# Intrinsic parameters.
K = samples[common.K][0]
if task_type == common.LOCALIZATION:
gt_poses = []
gt_obj_ids = samples[common.GT_OBJ_IDS][0]
for gt_id in range(len(gt_obj_ids)):
R = transform.quaternion_matrix(
samples[common.GT_OBJ_QUATS][0][gt_id])[:3, :3]
t = samples[common.GT_OBJ_TRANS][0][gt_id].reshape((3, 1))
gt_poses.append({'obj_id': gt_obj_ids[gt_id], 'R': R, 't': t})
else:
gt_poses = None
# Establish many-to-many 2D-3D correspondences.
time_start = time.time()
corr = corresp.establish_many_to_many(
obj_confs=predictions[common.PRED_OBJ_CONF][0],
frag_confs=predictions[common.PRED_FRAG_CONF][0],
frag_coords=predictions[common.PRED_FRAG_LOC][0],
gt_obj_ids=[x['obj_id'] for x in gt_poses],
model_store=model_store,
output_scale=output_scale,
min_obj_conf=FLAGS.corr_min_obj_conf,
min_frag_rel_conf=FLAGS.corr_min_frag_rel_conf,
project_to_surface=FLAGS.project_to_surface,
only_annotated_objs=(task_type == common.LOCALIZATION))
run_times['establish_corr'] = time.time() - time_start
# PnP-RANSAC to estimate 6D object poses from the correspondences.
time_start = time.time()
poses = []
for obj_id, obj_corr in corr.items():
# tf.logging.info(
# 'Image path: {}, obj: {}'.format(samples[common.IMAGE_PATH][0], obj_id))
# Number of established correspondences.
num_corrs = obj_corr['coord_2d'].shape[0]
# Skip the fitting if there are too few correspondences.
min_required_corrs = 6
if num_corrs < min_required_corrs:
continue
# The correspondences need to be sorted for PROSAC.
if FLAGS.use_prosac:
sorted_inds = np.argsort(obj_corr['conf'])[::-1]
for key in obj_corr.keys():
obj_corr[key] = obj_corr[key][sorted_inds]
# Select correspondences with the highest confidence.
if FLAGS.max_correspondences is not None \
and num_corrs > FLAGS.max_correspondences:
# Sort the correspondences only if they have not been sorted for PROSAC.
if FLAGS.use_prosac:
keep_inds = np.arange(num_corrs)
else:
keep_inds = np.argsort(obj_corr['conf'])[::-1]
keep_inds = keep_inds[:FLAGS.max_correspondences]
for key in obj_corr.keys():
obj_corr[key] = obj_corr[key][keep_inds]
# Save the established correspondences (for analysis).
if FLAGS.save_corresp:
obj_gt_poses = []
if gt_poses is not None:
obj_gt_poses = [x for x in gt_poses if x['obj_id'] == obj_id]
pred_time = float(np.sum(list(run_times.values())))
image_path = samples[common.IMAGE_PATH][0].decode('utf-8')
save_correspondences(
scene_id, im_id, im_ind, obj_id, image_path, K, obj_corr, pred_time,
infer_name, obj_gt_poses, infer_dir)
# Make sure the coordinates are saved continuously in memory.
coord_2d = np.ascontiguousarray(obj_corr['coord_2d'].astype(np.float64))
coord_3d = np.ascontiguousarray(obj_corr['coord_3d'].astype(np.float64))
if FLAGS.fitting_method == common.PROGRESSIVE_X:
# If num_instances == 1, then only GC-RANSAC is applied. If > 1, then
# Progressive-X is applied and up to num_instances poses are returned.
# If num_instances == -1, then Progressive-X is applied and all found
# poses are returned.
if task_type == common.LOCALIZATION:
num_instances = len([x for x in gt_poses if x['obj_id'] == obj_id])
else:
num_instances = -1
if FLAGS.max_instances_to_fit is not None:
num_instances = min(num_instances, FLAGS.max_instances_to_fit)
pose_ests, inlier_indices, pose_qualities = pyprogressivex.find6DPoses(
x1y1=coord_2d,
x2y2z2=coord_3d,
K=K,
threshold=FLAGS.inlier_thresh,
neighborhood_ball_radius=FLAGS.neighbour_max_dist,
spatial_coherence_weight=FLAGS.spatial_coherence_weight,
scaling_from_millimeters=FLAGS.scaling_from_millimeters,
max_tanimoto_similarity=FLAGS.max_tanimoto_similarity,
max_iters=FLAGS.max_fitting_iterations,
conf=FLAGS.required_progx_confidence,
proposal_engine_conf=FLAGS.required_ransac_confidence,
min_coverage=FLAGS.min_hypothesis_quality,
min_triangle_area=FLAGS.min_triangle_area,
min_point_number=6,
max_model_number=num_instances,
max_model_number_for_optimization=FLAGS.max_model_number_for_pearl,
use_prosac=FLAGS.use_prosac,
log=False)
pose_est_success = pose_ests is not None
if pose_est_success:
for i in range(int(pose_ests.shape[0] / 3)):
j = i * 3
R_est = pose_ests[j:(j + 3), :3]
t_est = pose_ests[j:(j + 3), 3].reshape((3, 1))
poses.append({
'scene_id': scene_id,
'im_id': im_id,
'obj_id': obj_id,
'R': R_est,
't': t_est,
'score': pose_qualities[i],
})
elif FLAGS.fitting_method == common.OPENCV_RANSAC:
# This integration of OpenCV-RANSAC can estimate pose of only one object
# instance. Note that in Table 3 of the EPOS CVPR'20 paper, the scores
# for OpenCV-RANSAC were obtained with integrating cv2.solvePnPRansac
# in the Progressive-X scheme (as the other methods in that table).
pose_est_success, r_est, t_est, inliers = cv2.solvePnPRansac(
objectPoints=coord_3d,
imagePoints=coord_2d,
cameraMatrix=K,
distCoeffs=None,
iterationsCount=FLAGS.max_fitting_iterations,
reprojectionError=FLAGS.inlier_thresh,
confidence=0.99, # FLAGS.required_ransac_confidence
flags=cv2.SOLVEPNP_EPNP)
if pose_est_success:
poses.append({
'scene_id': scene_id,
'im_id': im_id,
'obj_id': obj_id,
'R': cv2.Rodrigues(r_est)[0],
't': t_est,
'score': 0.0, # TODO: Define the score.
})
else:
raise ValueError(
'Unknown pose fitting method ({}).'.format(FLAGS.fitting_method))
run_times['fitting'] = time.time() - time_start
run_times['total'] = np.sum(list(run_times.values()))
# Add the total time to each pose.
for pose in poses:
pose['time'] = run_times['total']
# Visualization.
if FLAGS.vis:
visualize(
samples=samples,
predictions=predictions,
pred_poses=poses,
im_ind=im_ind,
crop_size=crop_size,
output_scale=output_scale,
model_store=model_store,
renderer=renderer,
vis_dir=vis_dir)
return poses, run_times
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
# Model folder.
model_dir = os.path.join(config.TF_MODELS_PATH, FLAGS.model)
# Update flags with parameters loaded from the model folder.
common.update_flags(os.path.join(model_dir, common.PARAMS_FILENAME))
# Print the flag values.
common.print_flags()
# Folder from which the latest model checkpoint will be loaded.
checkpoint_dir = os.path.join(model_dir, 'train')
# Folder for the inference output.
infer_dir = os.path.join(model_dir, 'infer')
tf.gfile.MakeDirs(infer_dir)
# Folder for the visualization output.
vis_dir = os.path.join(model_dir, 'vis')
tf.gfile.MakeDirs(vis_dir)
# TFRecord files used for training.
tfrecord_names = FLAGS.infer_tfrecord_names
if not isinstance(FLAGS.infer_tfrecord_names, list):
tfrecord_names = [FLAGS.infer_tfrecord_names]
# Stride of the final output.
if FLAGS.upsample_logits:
# The stride is 1 if the logits are upsampled to the input resolution.
output_stride = 1
else:
assert (len(FLAGS.decoder_output_stride) == 1)
output_stride = FLAGS.decoder_output_stride[0]
with tf.Graph().as_default():
return_gt_orig = np.any([
FLAGS.task_type == common.LOCALIZATION,
FLAGS.vis_gt_poses])
return_gt_maps = np.any([
FLAGS.vis_pred_obj_labels,
FLAGS.vis_pred_obj_confs,
FLAGS.vis_pred_frag_fields])
# Dataset provider.
dataset = datagen.Dataset(
dataset_name=FLAGS.dataset,
tfrecord_names=tfrecord_names,
model_dir=model_dir,
model_variant=FLAGS.model_variant,
batch_size=1,
max_height_before_crop=FLAGS.infer_max_height_before_crop,
crop_size=list(map(int, FLAGS.infer_crop_size)),
num_frags=FLAGS.num_frags,
min_visib_fract=None,
gt_knn_frags=1,
output_stride=output_stride,
is_training=False,
return_gt_orig=return_gt_orig,
return_gt_maps=return_gt_maps,
should_shuffle=False,
should_repeat=False,
prepare_for_projection=FLAGS.project_to_surface,
data_augmentations=None)
# Initialize a renderer for visualization.
renderer = None
if FLAGS.vis_gt_poses or FLAGS.vis_pred_poses:
tf.logging.info('Initializing renderer for visualization...')
renderer = bop_renderer.Renderer()
renderer.init(dataset.crop_size[0], dataset.crop_size[1])
model_type_vis = 'eval'
dp_model = dataset_params.get_model_params(
config.BOP_PATH, dataset.dataset_name, model_type=model_type_vis)
for obj_id in dp_model['obj_ids']:
path = dp_model['model_tpath'].format(obj_id=obj_id)
renderer.add_object(obj_id, path)
tf.logging.info('Renderer initialized.')
# Inputs.
samples = dataset.get_one_shot_iterator().get_next()
# A map from output type to the number of associated channels.
outputs_to_num_channels = common.get_outputs_to_num_channels(
dataset.num_objs, dataset.model_store.num_frags)
# Options of the neural network model.
model_options = common.ModelOptions(
outputs_to_num_channels=outputs_to_num_channels,
crop_size=list(map(int, FLAGS.infer_crop_size)),
atrous_rates=FLAGS.atrous_rates,
encoder_output_stride=FLAGS.encoder_output_stride)
# Construct the inference graph.
predictions = model.predict(
images=samples[common.IMAGE],
model_options=model_options,
upsample_logits=FLAGS.upsample_logits,
image_pyramid=FLAGS.image_pyramid,
num_objs=dataset.num_objs,
num_frags=dataset.num_frags,
frag_cls_agnostic=FLAGS.frag_cls_agnostic,
frag_loc_agnostic=FLAGS.frag_loc_agnostic)
# Global step.
tf.train.get_or_create_global_step()
# Get path to the model checkpoint.
if FLAGS.checkpoint_name is None:
checkpoint_path = tf.train.latest_checkpoint(checkpoint_dir)
else:
checkpoint_path = os.path.join(checkpoint_dir, FLAGS.checkpoint_name)
time_str = time.strftime('%Y-%m-%d-%H:%M:%S', time.gmtime())
tf.logging.info('Starting inference at: {}'.format(time_str))
tf.logging.info('Inference with model: {}'.format(checkpoint_path))
# Scaffold for initialization.
scaffold = tf.train.Scaffold(
init_op=tf.global_variables_initializer(),
saver=tf.train.Saver(var_list=misc.get_variable_dict()))
# TensorFlow configuration.
if FLAGS.cpu_only:
tf_config = tf.ConfigProto(device_count={'GPU': 0})
else:
tf_config = tf.ConfigProto()
# tf_config.gpu_options.allow_growth = True # Only necessary GPU memory.
tf_config.gpu_options.allow_growth = False
# Nodes that can use multiple threads to parallelize their execution will
# schedule the individual pieces into this pool.
tf_config.intra_op_parallelism_threads = 10
# All ready nodes are scheduled in this pool.
tf_config.inter_op_parallelism_threads = 10
poses_all = []
first_im_poses_num = 0
session_creator = tf.train.ChiefSessionCreator(
config=tf_config,
scaffold=scaffold,
master=FLAGS.master,
checkpoint_filename_with_path=checkpoint_path)
with tf.train.MonitoredSession(
session_creator=session_creator, hooks=None) as sess:
im_ind = 0
while not sess.should_stop():
# Estimate object poses for the current image.
poses, run_times = process_image(
sess=sess,
samples=samples,
predictions=predictions,
im_ind=im_ind,
crop_size=dataset.crop_size,
output_scale=(1.0 / output_stride),
model_store=dataset.model_store,
renderer=renderer,
task_type=FLAGS.task_type,
infer_name=FLAGS.infer_name,
infer_dir=infer_dir,
vis_dir=vis_dir)
# Note that the first image takes longer time (because of TF init).
tf.logging.info(
'Image: {}, prediction: {:.3f}, establish_corr: {:.3f}, '
'fitting: {:.3f}, total time: {:.3f}'.format(
im_ind, run_times['prediction'], run_times['establish_corr'],
run_times['fitting'], run_times['total']))
poses_all += poses
if im_ind == 0:
first_im_poses_num = len(poses)
im_ind += 1
# Set the time of pose estimates from the first image to the average time.
# Tensorflow takes a long time on the first image (because of init).
time_avg = 0.0
for pose in poses_all:
time_avg += pose['time']
if len(poses_all) > 0:
time_avg /= float((len(poses_all)))
for i in range(first_im_poses_num):
poses_all[i]['time'] = time_avg
# Save the estimated poses in the BOP format:
# https://bop.felk.cvut.cz/challenges/bop-challenge-2020/#formatofresults
if FLAGS.save_estimates:
suffix = ''
if FLAGS.infer_name is not None:
suffix = '_{}'.format(FLAGS.infer_name)
poses_path = os.path.join(
infer_dir, 'estimated-poses{}.csv'.format(suffix))
tf.logging.info('Saving estimated poses to: {}'.format(poses_path))
inout.save_bop_results(poses_path, poses_all, version='bop19')
time_str = time.strftime('%Y-%m-%d-%H:%M:%S', time.gmtime())
tf.logging.info('Finished inference at: {}'.format(time_str))
if __name__ == '__main__':
tf.app.run()
| txt = '# Corr format: u v x y z px_id frag_id conf conf_obj conf_frag\n'
txt += '{}\n'.format(image_path)
txt += '{} {} {} {}\n'.format(scene_id, im_id, obj_id, pred_time)
# Add intrinsics.
for i in range(3):
txt += '{} {} {}\n'.format(K[i, 0], K[i, 1], K[i, 2])
# Add ground-truth poses.
txt += '{}\n'.format(len(obj_gt_poses))
for pose in obj_gt_poses:
for i in range(3):
txt += '{} {} {} {}\n'.format(
pose['R'][i, 0], pose['R'][i, 1], pose['R'][i, 2], pose['t'][i, 0])
# Sort the predicted correspondences by confidence.
sort_inds = np.argsort(obj_pred['conf'])[::-1]
px_id = obj_pred['px_id'][sort_inds]
frag_id = obj_pred['frag_id'][sort_inds]
coord_2d = obj_pred['coord_2d'][sort_inds]
coord_3d = obj_pred['coord_3d'][sort_inds]
conf = obj_pred['conf'][sort_inds]
conf_obj = obj_pred['conf_obj'][sort_inds]
conf_frag = obj_pred['conf_frag'][sort_inds]
# Add the predicted correspondences.
pred_corr_num = len(coord_2d)
txt += '{}\n'.format(pred_corr_num)
for i in range(pred_corr_num):
txt += '{} {} {} {} {} {} {} {} {} {}\n'.format(
coord_2d[i, 0], coord_2d[i, 1],
coord_3d[i, 0], coord_3d[i, 1], coord_3d[i, 2],
px_id[i], frag_id[i], conf[i], conf_obj[i], conf_frag[i])
# Save the correspondences into a file.
corr_suffix = infer_name
if corr_suffix is None:
corr_suffix = ''
else:
corr_suffix = '_' + corr_suffix
corr_path = os.path.join(
infer_dir, 'corr{}'.format(corr_suffix),
'{:06d}_corr_{:02d}.txt'.format(im_ind, obj_id))
tf.gfile.MakeDirs(os.path.dirname(corr_path))
with open(corr_path, 'w') as f:
f.write(txt) | identifier_body |
main.rs | extern crate rand;
extern crate palette;
use std::fs::File;
use std::io::prelude::*;
use std::path::Path;
extern crate clap;
use std::thread;
use std::sync::mpsc;
extern crate rayon;
use rayon::prelude::*;
mod kohonen_neuron;
//use kohonen_neuron::rgb_vector_neuron;
mod kohonen;
use kohonen::Kohonen;
mod sphere_of_influence;
/** note: the energy coefficient should be from [0, 1] and should take into account both
* distance from the BMU and color disparity
*/
pub fn get_within_radius<T>(net: &Kohonen<T>, pos: (usize, usize), radius: i32) ->
std::vec::Vec<(usize, usize, f64)>
where T: kohonen_neuron::KohonenNeuron {
let mut rv = Vec::new();
let (r, c) = pos;
let bmu = &net[r][c];
for r2 in 0..net.rows {
for c2 in 0..net.cols {
let comp1 = (r as f64) - (r2 as f64);
let comp2 = (c as f64) - (c2 as f64);
let distance = ((comp1 * comp1) + (comp2 * comp2)).sqrt();
if distance < (radius as f64) {
let color_dist = bmu.distance(&net[r2][c2]);
let energy = (distance / radius as f64) * (1.0 - color_dist);
rv.push((r2, c2, energy))
}
}
}
rv
}
pub fn get_neuron_neighbors<T>(net: &Kohonen<T>, pos: (usize, usize)) -> [(usize, usize); 8]
where T: kohonen_neuron::KohonenNeuron {
let (r, c) = pos;
let rows = net.rows;
let cols = net.cols;
assert_eq!(rows, cols);
let prev = |x| {
if x as i32 - 1 < 0 {
rows - 1
} else {
x - 1
}
};
let next = |x| (x + 1) % rows;
[ (prev(r), prev(c)), (prev(r), c), (prev(r), next(c)),
(r, prev(c)), (r, next(c)),
(next(r), prev(c)), (next(r), c), (next(r), next(c))
]
}
/**
* @returns a vector of triples consisting of (row, col, energy coefficient from [0, 1])
*/
pub fn get_within_radius_fluid<T>(
net: &Kohonen<T>,
pos: (usize, usize),
radius: i32,
bucket_decay: f64)
-> std::vec::Vec<(usize, usize, f64)>
where T: kohonen_neuron::KohonenNeuron {
use std::collections::{HashSet, HashMap};
fn fluid_collect<T: kohonen_neuron::KohonenNeuron>(
net: &Kohonen<T>,
pos: (usize, usize),
range: i32,
pow_exp: f64)
-> Vec<(usize, usize, f64)> {
let (ro, co) = pos;
// use variant of Dijkstra's algorithm to produce the shortest-path tree, then
// prune that tree
let mut unvisited_nodes = HashSet::new();
for r in 0..net.rows {
for c in 0..net.cols {
unvisited_nodes.insert((r, c));
}
}
let inf = 0.0;
let mut energies: HashMap<(usize, usize), f64> = unvisited_nodes.clone().into_iter()
.map(|cur_pos| if cur_pos != pos {
(cur_pos, inf)
} else {
let (cur_r, cur_c) = cur_pos;
(pos, (1.0 - net[ro][co].distance(&net[cur_r][cur_c]).powf(pow_exp)))
})
.collect();
let mut current = pos;
while unvisited_nodes.len() > 0 {
let neighbours = get_neuron_neighbors(net, current);
let unvisited_neighbours: Vec<(usize, usize)> =
neighbours.iter()
.filter(|neighbour| unvisited_nodes.contains(*neighbour))
.map(|pos| *pos)
.collect();
let current_dist = *energies.get(¤t).unwrap();
{
let _res: Vec<(usize, usize)> =
unvisited_neighbours.clone().into_iter().map(
|(r, c)| {
let decay = 1.0 - 1.0 / range as f64;
let new_dist = (1.0 - net[ro][co].distance(&net[r][c]).powf(pow_exp)) * current_dist * decay;
let old_dist = *energies.get(&(r, c)).unwrap();
if new_dist > old_dist {
energies.remove(&(r, c));
energies.insert((r, c), new_dist);
};
(r, c)
})
.collect();
};
let old_len = unvisited_nodes.len();
unvisited_nodes.remove(¤t);
assert!(old_len > unvisited_nodes.len());
if unvisited_nodes.len() > 0 {
let old_cur = current;
current =
unvisited_nodes.clone().into_iter().fold(
None,
|acc, cand|
match acc {
None =>
Some(cand),
Some(pos) =>
if energies.get(&cand) > energies.get(&pos) {
Some(cand)
} else {
acc
},
})
.unwrap();
assert!(old_cur != current);
};
}
energies.into_iter()
.filter(|(_pos, energy)| range as f64 * energy >= 1.0)
.map(|((r, c), energy)| (r, c, /*range as f64 * */energy))
.collect()
}
let collected = fluid_collect(net, pos, radius, bucket_decay);
collected
.into_iter()
/*.map(|(r, c, local_range)| {
(r, c, radius - local_range)
})*/
.collect()
}
pub fn feed_sample<T>(
net: &mut Kohonen<T>,
sample: &T,
rate: f64,
radius: i32,
associate: sphere_of_influence::AssociationKind)
-> ()
where T: kohonen_neuron::KohonenNeuron {
let (r, c, bmu_dist) = kohonen::get_bmu(net, sample);
let bmu_pos = (r, c);
let items =
match associate {
sphere_of_influence::AssociationKind::Bucket(bucket_decay) =>
get_within_radius_fluid(net, (r, c), radius, bucket_decay),
sphere_of_influence::AssociationKind::Euclidean =>
get_within_radius(net, (r, c), radius),
};
let mut displaced = 0.0;
for i in 0..items.len() {
let (r, c, item_dist) = items[i];
let dist = item_dist as f64 / radius as f64;
let weight = (1.0 - dist).sqrt() * rate;
let old = &net[r][c].clone();
let _ = (&mut net[r][c]).shift(&sample, weight);
displaced = displaced + old.distance(&net[r][c]);
if (r, c) == bmu_pos {
//println!("\tweighting with {} at the BMU.", weight);
} else {
//println!("\tweighting with {} as {:?}.", weight, (r, c));
}
}
println!("\tDisplaced total of {} from {} items on a BMU of distance {}.",
displaced,
items.len(),
bmu_dist);
std::io::stdout().flush().unwrap();
thread::yield_now();
()
}
pub fn train<T>(
net: Kohonen<T>,
samples: &Vec<T>,
rate: f64,
radius: i32,
associate: sphere_of_influence::AssociationKind)
-> Kohonen<T>
where T: kohonen_neuron::KohonenNeuron + Send + Sync + Clone + 'static,
Kohonen<T>: Send + Sync {
let mut descs = Vec::new();
for i in 0..samples.len() {
descs.push((net.clone(), samples[i].clone()));
//feed_sample(net, &samples[i], rate, radius);
}
let nets: Vec<Kohonen<T>> =
descs
.par_iter()
.map(|(my_net, sample)| {
let associate = associate.clone();
let mut net = my_net.clone();
feed_sample(&mut net, &sample, rate, radius, associate);
net
})
.collect();
std::io::stdout().flush().unwrap();
kohonen::combine(net, nets)
}
pub fn | <T>(
net: &Kohonen<T>,
samples: &std::vec::Vec<T>,
its: u32,
associate: sphere_of_influence::AssociationKind)
-> Kohonen<T>
where T: kohonen_neuron::KohonenNeuron + Send + Sync + 'static {
let mut rv = net.clone();
let width = net.cols as f64;
// training with a large fixed radius for a bit should help things get into
// the right general places
/*for _i in 0..(its / 2) {
let radius = width / 2.0;
let rate = 0.5;
rv = train(rv.clone(), samples, rate, radius as i32, associate.clone());
}
let its = its / 2 + (its % 2);*/
let time_constant = (its + 1) as f64 / width.ln();
for i in 0..its {
let radius = width * (0.0 - (i as f64 + 1.0) / time_constant).exp();
//let radius = width / 2.0;
let rate = (0.0 - (i as f64 + 1.0) / time_constant).exp().sqrt();
//let rate = 0.75;
println!("Radius: {radius}, rate: {rate}", radius=radius, rate=rate);
std::io::stdout().flush().unwrap();
let net2 = rv.clone();
rv = train(net2, samples, rate, radius.ceil() as i32, associate.clone())
}
rv
}
pub fn show<T: kohonen_neuron::KohonenNeuron>(net: &Kohonen<T>, path: &str) {
let rows = net.rows;
let cols = net.cols;
let path = Path::new(path);
let mut os = match File::create(&path) {
Err(why) => panic!("couldn't make file pls halp: {}", why),
Ok(file) => file,
};
let _ = os.write_all("P6\n".as_bytes());
let _ = os.write_all((cols as u64).to_string().as_bytes());
let _ = os.write_all(" ".as_bytes());
let _ = os.write_all((rows as u64).to_string().as_bytes());
let _ = os.write_all("\n255\n".as_bytes());
for r in 0..rows {
for c in 0..cols {
let (r, g, b) = net[r][c].get_rgb();
let _ = os.write_all(&[r, g, b]);
}
}
}
/*pub fn show_csv<T: kohonen_neuron::KohonenNeuron>(net: &kohonen<T>, path: &str) {
}*/
fn main() {
use clap::{Arg, App};
let matches =
App::new("kohonen")
.version("0.1.0")
.about("A Kohonen SOFM")
.author("Fuck off")
.arg(Arg::with_name("iterations")
.short("its")
.long("iterations")
.help("How many training iterations to do")
.takes_value(true))
.arg(Arg::with_name("dim")
.short("dim")
.long("dimension")
.help("The size of the network")
.takes_value(true))
.arg(Arg::with_name("associate")
.short("a")
.long("associate")
.help("The association method")
.default_value("bucket")
.possible_values(&["bucket", "euclidean"])
.takes_value(true))
.arg(Arg::with_name("bucket decay")
.long("bucket-decay")
.help( "Exponentially affects how much energy it takes to overcome a higher \
difference. Lower values will keep spheres of influence small and \
tight, while higher ones (above 1.0) will allow greater spread.")
.default_value("0.7")
.takes_value(true))
.arg(Arg::with_name("colour model")
.long("colour-model")
.help("The color model to use.")
.default_value("hsl")
.possible_values(&["hsl", "rgb"])
.takes_value(true))
.arg(Arg::with_name("centroids")
.long("centroids")
.help("A list of centroids")
.takes_value(true))
.get_matches();
let net_dim = str::parse::<u32>(matches.value_of("dim").unwrap()).unwrap();
let train_its = str::parse::<u32>(matches.value_of("iterations").unwrap()).unwrap();
let associate = sphere_of_influence::from_str(matches.value_of("associate").unwrap()).unwrap();
let bucket_decay = str::parse::<f64>(matches.value_of("bucket decay").unwrap()).unwrap();
let associate = match associate {
sphere_of_influence::AssociationKind::Bucket(_) =>
sphere_of_influence::AssociationKind::Bucket(bucket_decay),
_ => associate
};
println!(
"Building a Kohonen net of {dim}x{dim} and training it for {its} iterations.",
dim=net_dim, its=train_its);
let colors: Vec<[f64; 3]> = vec![
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0],
[0.8, 0.8, 0.0],
[0.0, 0.8, 0.8],
[0.8, 0.0, 0.8],
[0.4, 0.4, 0.4],
[1.0, 1.0, 1.0],
[0.0, 0.0, 0.0],
[1.0, 0.66, 0.75],
];
match matches.value_of("colour model").unwrap() {
"hsl" => {
let mut net = kohonen::new(net_dim as usize);
let old_net = net.clone();
let colors = colors
.into_iter()
.map(|[r, g, b]|
palette::Hsl::from(palette::Srgb::new(r as f32, g as f32, b as f32)))
.rev()
.collect();
net = iter_train(&net, &colors, train_its, associate);
println!("Overall displacement: {}", kohonen::disp(&old_net, &net));
let file = format!("./map_{its}its.ppm", its=train_its);
show(&net, &file)
},
"rgb" => {
let mut net = kohonen::new(net_dim as usize);
let old_net = net.clone();
net = iter_train(&net, &colors, train_its, associate);
println!("Overall displacement: {}", kohonen::disp(&old_net, &net));
let file = format!("./map_{its}its.ppm", its=train_its);
show(&net, &file)
},
_ => ()
};
}
| iter_train | identifier_name |
main.rs | extern crate rand;
extern crate palette;
use std::fs::File;
use std::io::prelude::*;
use std::path::Path;
extern crate clap;
use std::thread;
use std::sync::mpsc;
extern crate rayon;
use rayon::prelude::*;
mod kohonen_neuron;
//use kohonen_neuron::rgb_vector_neuron;
mod kohonen;
use kohonen::Kohonen;
mod sphere_of_influence;
/** note: the energy coefficient should be from [0, 1] and should take into account both
* distance from the BMU and color disparity
*/
pub fn get_within_radius<T>(net: &Kohonen<T>, pos: (usize, usize), radius: i32) ->
std::vec::Vec<(usize, usize, f64)>
where T: kohonen_neuron::KohonenNeuron {
let mut rv = Vec::new();
let (r, c) = pos;
let bmu = &net[r][c];
for r2 in 0..net.rows {
for c2 in 0..net.cols {
let comp1 = (r as f64) - (r2 as f64);
let comp2 = (c as f64) - (c2 as f64);
let distance = ((comp1 * comp1) + (comp2 * comp2)).sqrt();
if distance < (radius as f64) {
let color_dist = bmu.distance(&net[r2][c2]);
let energy = (distance / radius as f64) * (1.0 - color_dist);
rv.push((r2, c2, energy))
}
}
}
rv
}
pub fn get_neuron_neighbors<T>(net: &Kohonen<T>, pos: (usize, usize)) -> [(usize, usize); 8]
where T: kohonen_neuron::KohonenNeuron {
let (r, c) = pos;
let rows = net.rows;
let cols = net.cols;
assert_eq!(rows, cols);
let prev = |x| {
if x as i32 - 1 < 0 {
rows - 1
} else {
x - 1
}
};
let next = |x| (x + 1) % rows;
[ (prev(r), prev(c)), (prev(r), c), (prev(r), next(c)),
(r, prev(c)), (r, next(c)),
(next(r), prev(c)), (next(r), c), (next(r), next(c))
]
}
/**
* @returns a vector of triples consisting of (row, col, energy coefficient from [0, 1])
*/
pub fn get_within_radius_fluid<T>(
net: &Kohonen<T>,
pos: (usize, usize),
radius: i32,
bucket_decay: f64)
-> std::vec::Vec<(usize, usize, f64)>
where T: kohonen_neuron::KohonenNeuron {
use std::collections::{HashSet, HashMap};
fn fluid_collect<T: kohonen_neuron::KohonenNeuron>(
net: &Kohonen<T>,
pos: (usize, usize),
range: i32,
pow_exp: f64)
-> Vec<(usize, usize, f64)> {
let (ro, co) = pos;
// use variant of Dijkstra's algorithm to produce the shortest-path tree, then
// prune that tree
let mut unvisited_nodes = HashSet::new();
for r in 0..net.rows {
for c in 0..net.cols {
unvisited_nodes.insert((r, c));
}
}
let inf = 0.0;
let mut energies: HashMap<(usize, usize), f64> = unvisited_nodes.clone().into_iter()
.map(|cur_pos| if cur_pos != pos {
(cur_pos, inf)
} else {
let (cur_r, cur_c) = cur_pos;
(pos, (1.0 - net[ro][co].distance(&net[cur_r][cur_c]).powf(pow_exp)))
})
.collect();
let mut current = pos;
while unvisited_nodes.len() > 0 {
let neighbours = get_neuron_neighbors(net, current);
let unvisited_neighbours: Vec<(usize, usize)> =
neighbours.iter()
.filter(|neighbour| unvisited_nodes.contains(*neighbour))
.map(|pos| *pos)
.collect();
let current_dist = *energies.get(¤t).unwrap();
{
let _res: Vec<(usize, usize)> =
unvisited_neighbours.clone().into_iter().map(
|(r, c)| {
let decay = 1.0 - 1.0 / range as f64;
let new_dist = (1.0 - net[ro][co].distance(&net[r][c]).powf(pow_exp)) * current_dist * decay;
let old_dist = *energies.get(&(r, c)).unwrap();
if new_dist > old_dist {
energies.remove(&(r, c));
energies.insert((r, c), new_dist);
};
(r, c)
})
.collect();
};
let old_len = unvisited_nodes.len();
unvisited_nodes.remove(¤t);
assert!(old_len > unvisited_nodes.len());
if unvisited_nodes.len() > 0 {
let old_cur = current;
current =
unvisited_nodes.clone().into_iter().fold(
None,
|acc, cand|
match acc {
None =>
Some(cand),
Some(pos) =>
if energies.get(&cand) > energies.get(&pos) {
Some(cand)
} else {
acc
},
})
.unwrap();
assert!(old_cur != current);
};
}
energies.into_iter()
.filter(|(_pos, energy)| range as f64 * energy >= 1.0)
.map(|((r, c), energy)| (r, c, /*range as f64 * */energy))
.collect()
}
let collected = fluid_collect(net, pos, radius, bucket_decay);
collected
.into_iter()
/*.map(|(r, c, local_range)| {
(r, c, radius - local_range)
})*/
.collect()
}
pub fn feed_sample<T>(
net: &mut Kohonen<T>,
sample: &T,
rate: f64,
radius: i32,
associate: sphere_of_influence::AssociationKind)
-> ()
where T: kohonen_neuron::KohonenNeuron {
let (r, c, bmu_dist) = kohonen::get_bmu(net, sample);
let bmu_pos = (r, c);
let items =
match associate {
sphere_of_influence::AssociationKind::Bucket(bucket_decay) =>
get_within_radius_fluid(net, (r, c), radius, bucket_decay),
sphere_of_influence::AssociationKind::Euclidean =>
get_within_radius(net, (r, c), radius),
};
let mut displaced = 0.0;
for i in 0..items.len() {
let (r, c, item_dist) = items[i];
let dist = item_dist as f64 / radius as f64;
let weight = (1.0 - dist).sqrt() * rate;
let old = &net[r][c].clone();
let _ = (&mut net[r][c]).shift(&sample, weight);
displaced = displaced + old.distance(&net[r][c]);
if (r, c) == bmu_pos {
//println!("\tweighting with {} at the BMU.", weight);
} else {
//println!("\tweighting with {} as {:?}.", weight, (r, c));
}
}
println!("\tDisplaced total of {} from {} items on a BMU of distance {}.",
displaced,
items.len(),
bmu_dist);
std::io::stdout().flush().unwrap();
thread::yield_now();
()
}
pub fn train<T>(
net: Kohonen<T>,
samples: &Vec<T>,
rate: f64,
radius: i32,
associate: sphere_of_influence::AssociationKind)
-> Kohonen<T>
where T: kohonen_neuron::KohonenNeuron + Send + Sync + Clone + 'static, | }
let nets: Vec<Kohonen<T>> =
descs
.par_iter()
.map(|(my_net, sample)| {
let associate = associate.clone();
let mut net = my_net.clone();
feed_sample(&mut net, &sample, rate, radius, associate);
net
})
.collect();
std::io::stdout().flush().unwrap();
kohonen::combine(net, nets)
}
pub fn iter_train<T>(
net: &Kohonen<T>,
samples: &std::vec::Vec<T>,
its: u32,
associate: sphere_of_influence::AssociationKind)
-> Kohonen<T>
where T: kohonen_neuron::KohonenNeuron + Send + Sync + 'static {
let mut rv = net.clone();
let width = net.cols as f64;
// training with a large fixed radius for a bit should help things get into
// the right general places
/*for _i in 0..(its / 2) {
let radius = width / 2.0;
let rate = 0.5;
rv = train(rv.clone(), samples, rate, radius as i32, associate.clone());
}
let its = its / 2 + (its % 2);*/
let time_constant = (its + 1) as f64 / width.ln();
for i in 0..its {
let radius = width * (0.0 - (i as f64 + 1.0) / time_constant).exp();
//let radius = width / 2.0;
let rate = (0.0 - (i as f64 + 1.0) / time_constant).exp().sqrt();
//let rate = 0.75;
println!("Radius: {radius}, rate: {rate}", radius=radius, rate=rate);
std::io::stdout().flush().unwrap();
let net2 = rv.clone();
rv = train(net2, samples, rate, radius.ceil() as i32, associate.clone())
}
rv
}
pub fn show<T: kohonen_neuron::KohonenNeuron>(net: &Kohonen<T>, path: &str) {
let rows = net.rows;
let cols = net.cols;
let path = Path::new(path);
let mut os = match File::create(&path) {
Err(why) => panic!("couldn't make file pls halp: {}", why),
Ok(file) => file,
};
let _ = os.write_all("P6\n".as_bytes());
let _ = os.write_all((cols as u64).to_string().as_bytes());
let _ = os.write_all(" ".as_bytes());
let _ = os.write_all((rows as u64).to_string().as_bytes());
let _ = os.write_all("\n255\n".as_bytes());
for r in 0..rows {
for c in 0..cols {
let (r, g, b) = net[r][c].get_rgb();
let _ = os.write_all(&[r, g, b]);
}
}
}
/*pub fn show_csv<T: kohonen_neuron::KohonenNeuron>(net: &kohonen<T>, path: &str) {
}*/
fn main() {
use clap::{Arg, App};
let matches =
App::new("kohonen")
.version("0.1.0")
.about("A Kohonen SOFM")
.author("Fuck off")
.arg(Arg::with_name("iterations")
.short("its")
.long("iterations")
.help("How many training iterations to do")
.takes_value(true))
.arg(Arg::with_name("dim")
.short("dim")
.long("dimension")
.help("The size of the network")
.takes_value(true))
.arg(Arg::with_name("associate")
.short("a")
.long("associate")
.help("The association method")
.default_value("bucket")
.possible_values(&["bucket", "euclidean"])
.takes_value(true))
.arg(Arg::with_name("bucket decay")
.long("bucket-decay")
.help( "Exponentially affects how much energy it takes to overcome a higher \
difference. Lower values will keep spheres of influence small and \
tight, while higher ones (above 1.0) will allow greater spread.")
.default_value("0.7")
.takes_value(true))
.arg(Arg::with_name("colour model")
.long("colour-model")
.help("The color model to use.")
.default_value("hsl")
.possible_values(&["hsl", "rgb"])
.takes_value(true))
.arg(Arg::with_name("centroids")
.long("centroids")
.help("A list of centroids")
.takes_value(true))
.get_matches();
let net_dim = str::parse::<u32>(matches.value_of("dim").unwrap()).unwrap();
let train_its = str::parse::<u32>(matches.value_of("iterations").unwrap()).unwrap();
let associate = sphere_of_influence::from_str(matches.value_of("associate").unwrap()).unwrap();
let bucket_decay = str::parse::<f64>(matches.value_of("bucket decay").unwrap()).unwrap();
let associate = match associate {
sphere_of_influence::AssociationKind::Bucket(_) =>
sphere_of_influence::AssociationKind::Bucket(bucket_decay),
_ => associate
};
println!(
"Building a Kohonen net of {dim}x{dim} and training it for {its} iterations.",
dim=net_dim, its=train_its);
let colors: Vec<[f64; 3]> = vec![
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0],
[0.8, 0.8, 0.0],
[0.0, 0.8, 0.8],
[0.8, 0.0, 0.8],
[0.4, 0.4, 0.4],
[1.0, 1.0, 1.0],
[0.0, 0.0, 0.0],
[1.0, 0.66, 0.75],
];
match matches.value_of("colour model").unwrap() {
"hsl" => {
let mut net = kohonen::new(net_dim as usize);
let old_net = net.clone();
let colors = colors
.into_iter()
.map(|[r, g, b]|
palette::Hsl::from(palette::Srgb::new(r as f32, g as f32, b as f32)))
.rev()
.collect();
net = iter_train(&net, &colors, train_its, associate);
println!("Overall displacement: {}", kohonen::disp(&old_net, &net));
let file = format!("./map_{its}its.ppm", its=train_its);
show(&net, &file)
},
"rgb" => {
let mut net = kohonen::new(net_dim as usize);
let old_net = net.clone();
net = iter_train(&net, &colors, train_its, associate);
println!("Overall displacement: {}", kohonen::disp(&old_net, &net));
let file = format!("./map_{its}its.ppm", its=train_its);
show(&net, &file)
},
_ => ()
};
} | Kohonen<T>: Send + Sync {
let mut descs = Vec::new();
for i in 0..samples.len() {
descs.push((net.clone(), samples[i].clone()));
//feed_sample(net, &samples[i], rate, radius); | random_line_split |
main.rs | extern crate rand;
extern crate palette;
use std::fs::File;
use std::io::prelude::*;
use std::path::Path;
extern crate clap;
use std::thread;
use std::sync::mpsc;
extern crate rayon;
use rayon::prelude::*;
mod kohonen_neuron;
//use kohonen_neuron::rgb_vector_neuron;
mod kohonen;
use kohonen::Kohonen;
mod sphere_of_influence;
/** note: the energy coefficient should be from [0, 1] and should take into account both
* distance from the BMU and color disparity
*/
pub fn get_within_radius<T>(net: &Kohonen<T>, pos: (usize, usize), radius: i32) ->
std::vec::Vec<(usize, usize, f64)>
where T: kohonen_neuron::KohonenNeuron {
let mut rv = Vec::new();
let (r, c) = pos;
let bmu = &net[r][c];
for r2 in 0..net.rows {
for c2 in 0..net.cols {
let comp1 = (r as f64) - (r2 as f64);
let comp2 = (c as f64) - (c2 as f64);
let distance = ((comp1 * comp1) + (comp2 * comp2)).sqrt();
if distance < (radius as f64) {
let color_dist = bmu.distance(&net[r2][c2]);
let energy = (distance / radius as f64) * (1.0 - color_dist);
rv.push((r2, c2, energy))
}
}
}
rv
}
pub fn get_neuron_neighbors<T>(net: &Kohonen<T>, pos: (usize, usize)) -> [(usize, usize); 8]
where T: kohonen_neuron::KohonenNeuron {
let (r, c) = pos;
let rows = net.rows;
let cols = net.cols;
assert_eq!(rows, cols);
let prev = |x| {
if x as i32 - 1 < 0 {
rows - 1
} else {
x - 1
}
};
let next = |x| (x + 1) % rows;
[ (prev(r), prev(c)), (prev(r), c), (prev(r), next(c)),
(r, prev(c)), (r, next(c)),
(next(r), prev(c)), (next(r), c), (next(r), next(c))
]
}
/**
* @returns a vector of triples consisting of (row, col, energy coefficient from [0, 1])
*/
pub fn get_within_radius_fluid<T>(
net: &Kohonen<T>,
pos: (usize, usize),
radius: i32,
bucket_decay: f64)
-> std::vec::Vec<(usize, usize, f64)>
where T: kohonen_neuron::KohonenNeuron {
use std::collections::{HashSet, HashMap};
fn fluid_collect<T: kohonen_neuron::KohonenNeuron>(
net: &Kohonen<T>,
pos: (usize, usize),
range: i32,
pow_exp: f64)
-> Vec<(usize, usize, f64)> {
let (ro, co) = pos;
// use variant of Dijkstra's algorithm to produce the shortest-path tree, then
// prune that tree
let mut unvisited_nodes = HashSet::new();
for r in 0..net.rows {
for c in 0..net.cols {
unvisited_nodes.insert((r, c));
}
}
let inf = 0.0;
let mut energies: HashMap<(usize, usize), f64> = unvisited_nodes.clone().into_iter()
.map(|cur_pos| if cur_pos != pos {
(cur_pos, inf)
} else {
let (cur_r, cur_c) = cur_pos;
(pos, (1.0 - net[ro][co].distance(&net[cur_r][cur_c]).powf(pow_exp)))
})
.collect();
let mut current = pos;
while unvisited_nodes.len() > 0 {
let neighbours = get_neuron_neighbors(net, current);
let unvisited_neighbours: Vec<(usize, usize)> =
neighbours.iter()
.filter(|neighbour| unvisited_nodes.contains(*neighbour))
.map(|pos| *pos)
.collect();
let current_dist = *energies.get(¤t).unwrap();
{
let _res: Vec<(usize, usize)> =
unvisited_neighbours.clone().into_iter().map(
|(r, c)| {
let decay = 1.0 - 1.0 / range as f64;
let new_dist = (1.0 - net[ro][co].distance(&net[r][c]).powf(pow_exp)) * current_dist * decay;
let old_dist = *energies.get(&(r, c)).unwrap();
if new_dist > old_dist {
energies.remove(&(r, c));
energies.insert((r, c), new_dist);
};
(r, c)
})
.collect();
};
let old_len = unvisited_nodes.len();
unvisited_nodes.remove(¤t);
assert!(old_len > unvisited_nodes.len());
if unvisited_nodes.len() > 0 {
let old_cur = current;
current =
unvisited_nodes.clone().into_iter().fold(
None,
|acc, cand|
match acc {
None =>
Some(cand),
Some(pos) =>
if energies.get(&cand) > energies.get(&pos) {
Some(cand)
} else {
acc
},
})
.unwrap();
assert!(old_cur != current);
};
}
energies.into_iter()
.filter(|(_pos, energy)| range as f64 * energy >= 1.0)
.map(|((r, c), energy)| (r, c, /*range as f64 * */energy))
.collect()
}
let collected = fluid_collect(net, pos, radius, bucket_decay);
collected
.into_iter()
/*.map(|(r, c, local_range)| {
(r, c, radius - local_range)
})*/
.collect()
}
pub fn feed_sample<T>(
net: &mut Kohonen<T>,
sample: &T,
rate: f64,
radius: i32,
associate: sphere_of_influence::AssociationKind)
-> ()
where T: kohonen_neuron::KohonenNeuron {
let (r, c, bmu_dist) = kohonen::get_bmu(net, sample);
let bmu_pos = (r, c);
let items =
match associate {
sphere_of_influence::AssociationKind::Bucket(bucket_decay) =>
get_within_radius_fluid(net, (r, c), radius, bucket_decay),
sphere_of_influence::AssociationKind::Euclidean =>
get_within_radius(net, (r, c), radius),
};
let mut displaced = 0.0;
for i in 0..items.len() {
let (r, c, item_dist) = items[i];
let dist = item_dist as f64 / radius as f64;
let weight = (1.0 - dist).sqrt() * rate;
let old = &net[r][c].clone();
let _ = (&mut net[r][c]).shift(&sample, weight);
displaced = displaced + old.distance(&net[r][c]);
if (r, c) == bmu_pos {
//println!("\tweighting with {} at the BMU.", weight);
} else {
//println!("\tweighting with {} as {:?}.", weight, (r, c));
}
}
println!("\tDisplaced total of {} from {} items on a BMU of distance {}.",
displaced,
items.len(),
bmu_dist);
std::io::stdout().flush().unwrap();
thread::yield_now();
()
}
pub fn train<T>(
net: Kohonen<T>,
samples: &Vec<T>,
rate: f64,
radius: i32,
associate: sphere_of_influence::AssociationKind)
-> Kohonen<T>
where T: kohonen_neuron::KohonenNeuron + Send + Sync + Clone + 'static,
Kohonen<T>: Send + Sync {
let mut descs = Vec::new();
for i in 0..samples.len() {
descs.push((net.clone(), samples[i].clone()));
//feed_sample(net, &samples[i], rate, radius);
}
let nets: Vec<Kohonen<T>> =
descs
.par_iter()
.map(|(my_net, sample)| {
let associate = associate.clone();
let mut net = my_net.clone();
feed_sample(&mut net, &sample, rate, radius, associate);
net
})
.collect();
std::io::stdout().flush().unwrap();
kohonen::combine(net, nets)
}
pub fn iter_train<T>(
net: &Kohonen<T>,
samples: &std::vec::Vec<T>,
its: u32,
associate: sphere_of_influence::AssociationKind)
-> Kohonen<T>
where T: kohonen_neuron::KohonenNeuron + Send + Sync + 'static |
pub fn show<T: kohonen_neuron::KohonenNeuron>(net: &Kohonen<T>, path: &str) {
let rows = net.rows;
let cols = net.cols;
let path = Path::new(path);
let mut os = match File::create(&path) {
Err(why) => panic!("couldn't make file pls halp: {}", why),
Ok(file) => file,
};
let _ = os.write_all("P6\n".as_bytes());
let _ = os.write_all((cols as u64).to_string().as_bytes());
let _ = os.write_all(" ".as_bytes());
let _ = os.write_all((rows as u64).to_string().as_bytes());
let _ = os.write_all("\n255\n".as_bytes());
for r in 0..rows {
for c in 0..cols {
let (r, g, b) = net[r][c].get_rgb();
let _ = os.write_all(&[r, g, b]);
}
}
}
/*pub fn show_csv<T: kohonen_neuron::KohonenNeuron>(net: &kohonen<T>, path: &str) {
}*/
fn main() {
use clap::{Arg, App};
let matches =
App::new("kohonen")
.version("0.1.0")
.about("A Kohonen SOFM")
.author("Fuck off")
.arg(Arg::with_name("iterations")
.short("its")
.long("iterations")
.help("How many training iterations to do")
.takes_value(true))
.arg(Arg::with_name("dim")
.short("dim")
.long("dimension")
.help("The size of the network")
.takes_value(true))
.arg(Arg::with_name("associate")
.short("a")
.long("associate")
.help("The association method")
.default_value("bucket")
.possible_values(&["bucket", "euclidean"])
.takes_value(true))
.arg(Arg::with_name("bucket decay")
.long("bucket-decay")
.help( "Exponentially affects how much energy it takes to overcome a higher \
difference. Lower values will keep spheres of influence small and \
tight, while higher ones (above 1.0) will allow greater spread.")
.default_value("0.7")
.takes_value(true))
.arg(Arg::with_name("colour model")
.long("colour-model")
.help("The color model to use.")
.default_value("hsl")
.possible_values(&["hsl", "rgb"])
.takes_value(true))
.arg(Arg::with_name("centroids")
.long("centroids")
.help("A list of centroids")
.takes_value(true))
.get_matches();
let net_dim = str::parse::<u32>(matches.value_of("dim").unwrap()).unwrap();
let train_its = str::parse::<u32>(matches.value_of("iterations").unwrap()).unwrap();
let associate = sphere_of_influence::from_str(matches.value_of("associate").unwrap()).unwrap();
let bucket_decay = str::parse::<f64>(matches.value_of("bucket decay").unwrap()).unwrap();
let associate = match associate {
sphere_of_influence::AssociationKind::Bucket(_) =>
sphere_of_influence::AssociationKind::Bucket(bucket_decay),
_ => associate
};
println!(
"Building a Kohonen net of {dim}x{dim} and training it for {its} iterations.",
dim=net_dim, its=train_its);
let colors: Vec<[f64; 3]> = vec![
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0],
[0.8, 0.8, 0.0],
[0.0, 0.8, 0.8],
[0.8, 0.0, 0.8],
[0.4, 0.4, 0.4],
[1.0, 1.0, 1.0],
[0.0, 0.0, 0.0],
[1.0, 0.66, 0.75],
];
match matches.value_of("colour model").unwrap() {
"hsl" => {
let mut net = kohonen::new(net_dim as usize);
let old_net = net.clone();
let colors = colors
.into_iter()
.map(|[r, g, b]|
palette::Hsl::from(palette::Srgb::new(r as f32, g as f32, b as f32)))
.rev()
.collect();
net = iter_train(&net, &colors, train_its, associate);
println!("Overall displacement: {}", kohonen::disp(&old_net, &net));
let file = format!("./map_{its}its.ppm", its=train_its);
show(&net, &file)
},
"rgb" => {
let mut net = kohonen::new(net_dim as usize);
let old_net = net.clone();
net = iter_train(&net, &colors, train_its, associate);
println!("Overall displacement: {}", kohonen::disp(&old_net, &net));
let file = format!("./map_{its}its.ppm", its=train_its);
show(&net, &file)
},
_ => ()
};
}
| {
let mut rv = net.clone();
let width = net.cols as f64;
// training with a large fixed radius for a bit should help things get into
// the right general places
/*for _i in 0..(its / 2) {
let radius = width / 2.0;
let rate = 0.5;
rv = train(rv.clone(), samples, rate, radius as i32, associate.clone());
}
let its = its / 2 + (its % 2);*/
let time_constant = (its + 1) as f64 / width.ln();
for i in 0..its {
let radius = width * (0.0 - (i as f64 + 1.0) / time_constant).exp();
//let radius = width / 2.0;
let rate = (0.0 - (i as f64 + 1.0) / time_constant).exp().sqrt();
//let rate = 0.75;
println!("Radius: {radius}, rate: {rate}", radius=radius, rate=rate);
std::io::stdout().flush().unwrap();
let net2 = rv.clone();
rv = train(net2, samples, rate, radius.ceil() as i32, associate.clone())
}
rv
} | identifier_body |
dis2py.py | import re
from ast import literal_eval
from dataclasses import dataclass
from . import operations
COMPREHENSION = 1
GEN_EXPR = 1 << 2
RAW_JUMPS = 1 << 3
@dataclass
class Instruction:
line_num: int
offset: int
opname: str
arg: int
argval: object
def get_code_obj_name(s):
match = re.match(r"<code object <?(.*?)>? at (0x[0-9a-f]+).*>", s)
return match.group(1) + "_" + match.group(2)
def dis_to_instructions(disasm):
""" converts output of dis.dis into list of instructions"""
line_num = None
instructions = []
for line in disasm.split("\n"):
match = re.search(
r"( ?(?P<line_num>\d+)[ >]+)?(?P<offset>\d+) (?P<opname>[A-Z_]+)(?:\s+(?P<arg>\d+)(?: \((?P<argval>.+)\))?)?",
line
)
if match is not None:
if match["line_num"]:
line_num = int(match["line_num"])
offset = int(match["offset"])
opname = match["opname"]
if match["arg"] is not None:
arg = int(match["arg"])
else:
arg = None
if opname == "EXTENDED_ARG":
continue
argval = match["argval"]
instructions.append(Instruction(line_num, offset, opname, arg, argval))
return instructions
def is_store(instruction):
return instruction.opname in ("STORE_FAST", "STORE_NAME", "STORE_GLOBAL", "STORE_DEREF")
def is_identifier(s: str):
return str.isidentifier(s) and s not in ("True", "False", "None")
def instructions_to_asts(instructions, flags=0):
""" converts list of instruction into an AST"""
is_comp = flags & COMPREHENSION
is_genexpr = flags & GEN_EXPR
raw_jumps = flags & RAW_JUMPS
temp_name = "__temp" # name of temporary list/set/etc for comprehensions
indent = 0
arg_names = []
var_names = []
# list of all future changes in indentation (caused by loops,if,etc). format is (offset,change)
indent_changes = []
ast = []
instruction = None
def push(operation):
if raw_jumps:
ast.append((indent,operation,instruction.offset))
else:
ast.append((indent, operation))
def pop():
return ast.pop()[1]
def pop_n(n):
nonlocal ast
if n > 0: # ast[:-0] would be the empty list and ast[-0:] would be every element in ast
if raw_jumps:
ret = [x for _, x,_ in ast[-n:]]
else:
ret = [x for _, x in ast[-n:]]
ast = ast[:-n]
else:
ret = []
return ret
def peek(i=1):
return ast[-i][1]
def dedent_jump_to(offset):
for instruction2 in instructions:
if instruction2.opname == "JUMP_ABSOLUTE" and instruction2.arg == offset:
indent_changes.append((instruction2.offset + 2, -1))
break
def push_invalid(instruction):
push(operations.Invalid(instruction.opname, instruction.arg, instruction.argval))
i = 0
while i < len(instructions):
instruction = instructions[i]
opname = instruction.opname
if indent_changes:
to_remove = []
for indent_change in indent_changes:
if indent_change[0] == instruction.offset:
indent += indent_change[1]
to_remove.append(indent_change)
for indent_change in to_remove:
indent_changes.remove(indent_change)
if opname in ("LOAD_METHOD", "LOAD_ATTR"):
push(operations.Attribute(pop(), instruction.argval))
elif opname.startswith("LOAD"):
var_name = instruction.argval
if var_name.startswith(".") and (is_comp or is_genexpr):
var_name = "__" + var_name[1:]
if is_identifier(var_name):
if opname != "LOAD_GLOBAL" and var_name not in var_names:
arg_names.append(var_name)
var_names.append(var_name)
push(operations.Value(var_name))
elif is_store(instruction):
var_name = instruction.argval
if is_identifier(var_name):
var_names.append(var_name)
push(operations.Assign(var_name, pop()))
elif opname == "YIELD_VALUE":
push(operations.Yield(pop()))
elif opname == "RETURN_VALUE":
if is_comp:
push(operations.Return(operations.Value(temp_name)))
else:
push(operations.Return(pop()))
elif opname == "BUILD_MAP":
count = int(instruction.arg)
args = pop_n(2 * count)
push(operations.BuildMap(args))
elif opname == "BUILD_SLICE":
if instruction.arg == 2:
stop = pop()
start = pop()
push(operations.Slice(start, stop))
else:
step = pop()
stop = pop()
start = pop()
push(operations.Slice(start, stop, step))
elif opname.startswith("BUILD"):
# used to create lists, sets and tuples
operation = opname[len("BUILD_"):]
count = int(instruction.arg)
args = pop_n(count)
push(operations.build_operation(operation)(args))
elif opname == "GET_ITER":
push(operations.Iter(pop()))
elif opname == "FOR_ITER":
iterator = pop()
if isinstance(iterator, operations.Iter):
iterator = iterator.val
assign_op = instructions[i + 1] # get next instruction
i += 1
if is_store(assign_op):
index = assign_op.argval
var_names.append(index)
push(operations.ForLoop([index], iterator))
indent += 1
#detect end of loop
loop_end = int(instruction.argval[len("to "):])
indent_changes.append((loop_end, -1))
elif assign_op.opname == "UNPACK_SEQUENCE":
# loops like for i,j in zip(x,y)
num_vals = assign_op.arg
assign_ops = instructions[i + 1:i + num_vals + 1]
i += num_vals #skip all stores
indicies = []
for op in assign_ops:
var_name = op.argval
var_names.append(var_name)
indicies.append(var_name)
push(operations.ForLoop(indicies, iterator))
indent += 1
#detect end of loop
loop_end = int(instruction.argval[len("to "):])
indent_changes.append((loop_end, -1))
else:
push_invalid(instruction)
elif opname.startswith("POP_JUMP"): # if statements and while loops
val = pop()
if opname.endswith("TRUE"):
val = operations.unary_operation("not")(val)
jump_target = int(instruction.arg)
if raw_jumps:
val=val.val if opname.endswith("TRUE") else operations.unary_operation("not")(val)
push(operations.Jump(jump_target,val))
else:
if jump_target > instruction.offset:
indent_changes.append((jump_target, -1))
for instruction2 in instructions:
if instruction2.offset == jump_target - 2:
is_while = False
if instruction2.opname == "JUMP_ABSOLUTE" and instruction2.arg < instruction.offset:
for instruction3 in instructions:
if instruction3.offset > instruction.offset:
break
if instruction3.offset >= instruction2.arg and (
instruction3.opname.startswith("POP_JUMP") or
instruction3.opname == "FOR_ITER"
):
#either a if statement that is last statement in a loop or a while loop
is_while = instruction3.offset == instruction.offset
break
if is_while:
#instruction before jump target jumps above us and no POP_JUMPs between;
# this is a while loop
push(operations.WhileLoop(val))
if not is_while: # this is a normal if
if opname == "POP_JUMP_IF_TRUE" and instruction2.opname == "POP_JUMP_IF_FALSE":
#TODO: fix if statement with "or" operators
pass
if ast and isinstance(peek(), operations.Else):
pop()
indent -= 1
push(operations.Elif(val))
else:
push(operations.If(val))
break
else:
# this is a if statement that is the last statement in a for loop,
# so it jumps directly to the top of the for loop, so we dedent the JUMP_ABSOLUTE again
dedent_jump_to(jump_target)
push(operations.If(val))
indent += 1
elif opname == "JUMP_ABSOLUTE":
# used for many things, including continue, break, and jumping to the top of a loop
#TODO: continue in while loops
jump_target = int(instruction.arg)
if raw_jumps:
push(operations.Jump(jump_target))
else:
for instruction2 in instructions:
if instruction2.offset == jump_target:
if instruction2.opname == "FOR_ITER":
loop_end = int(instruction2.argval[len("to "):]) - 2
if loop_end != instruction.offset: # this isn't the end of the loop, but its still jumping, so this is a "continue"
if not isinstance(peek(), operations.Break):
push(operations.Continue())
#otherwise this is a normal jump to the top of the loop, so do nothing
else:
for instruction3 in instructions:
if (instruction3.opname == "FOR_ITER" and int(
instruction3.argval[len("to "):]
) == instruction2.offset) or (
instruction3.opname.startswith("POP_JUMP") and
instruction3.arg == instruction2.offset
):
#there is a loop also jumping to the same spot, so this is a "break"
push(operations.Break())
break
break
elif opname == "JUMP_FORWARD":
# used to jump over the else statement from the if statement's branch
jump_target = int(instruction.argval[len("to "):])
if raw_jumps:
push(operations.Jump(jump_target))
else:
indent -= 1
push(operations.Else())
indent += 2
indent_changes.append((jump_target, -1))
elif opname == "IMPORT_NAME":
fromlist = pop()
level = int(pop().val)
if level == 0: #absolute import
next_op = instructions[i + 1]
if is_store(next_op):
i += 1
alias = next_op.argval if next_op.argval != instruction.argval else None
push(operations.Import(instruction.argval, alias))
elif next_op.opname == "IMPORT_FROM":
names = []
i += 1
while next_op.opname == "IMPORT_FROM":
i += 1
assign_op = instructions[i]
names.append(assign_op.argval)
i += 1
next_op = instructions[i]
i -= 1
push(operations.FromImport(instruction.argval, names))
elif next_op.opname == "IMPORT_STAR":
i += 1
push(operations.FromImport(instruction.argval, [operations.Value("*")]))
else:
push_invalid(instruction)
else: #TODO:relative import
push_invalid(instruction)
elif opname == "RAISE_VARARGS":
argc = instruction.arg
if argc == 0:
push(operations.Raise())
elif argc == 1:
push(operations.Raise(pop()))
else:
push(operations.Raise(pop(), pop()))
elif opname in ("CALL_FUNCTION", "CALL_METHOD"):
argc = int(instruction.arg)
args = pop_n(argc)
func = pop()
push(operations.FunctionCall(func, args))
elif opname == "CALL_FUNCTION_KW":
# top of stack is a tuple of kwarg names pushed by LOAD_CONST
kwarg_names = literal_eval(pop().val)
kwargs = {}
for name in kwarg_names:
kwargs[name] = pop()
argc = int(instruction.arg) - len(kwargs)
args = pop_n(argc)
func = pop()
push(operations.FunctionCall(func, args, kwargs))
elif opname == "CALL_FUNCTION_EX":
if instruction.arg & 1: #lowest bit set
kwargs = pop()
args = pop()
func = pop()
push(
operations.FunctionCall(
func, [operations.UnpackSeq(args),
operations.UnpackDict(kwargs)]
)
)
else:
args = pop()
func = pop()
push(operations.FunctionCall(func, [operations.UnpackSeq(args)]))
elif opname == "MAKE_FUNCTION": # list comps, lambdas and nested functions
#TODO: handle the other flags
flags = instruction.arg
pop() # qualified name
code_obj = pop()
func_name = get_code_obj_name(code_obj.val)
if flags & 8:
closure_vars = pop().args
push(operations.Closure(func_name, closure_vars))
else:
push(operations.Value(func_name))
elif opname in ("LIST_APPEND", "SET_ADD"): #used in comprehensions
func = opname[opname.index("_") + 1:].lower()
if is_comp: | )
)
else:
push_invalid(instruction)
elif opname == "MAP_ADD": #used in dict comprehensions
if is_comp:
key = pop()
val = pop()
push(operations.SubscriptAssign(key, operations.Value(temp_name), val))
else:
push_invalid(instruction)
elif opname == "UNPACK_SEQUENCE":
push(operations.UnpackSeq(pop()))
elif opname == "UNPACK_EX": # unpacking assignment
num_vals_before = instruction.arg & 0xff
num_vals_after = (instruction.arg >> 8) & 0xff #high byte
num_vals = num_vals_before + num_vals_after
assign_ops = []
for j in range(num_vals_before):
assign_ops.append(instructions[i + j + 1])
j += 1
assign_op = instructions[i + j + 1]
if is_store(assign_op): #list unpack
num_vals += 1
assign_op.argval = "*" + assign_op.argval
assign_ops.append(assign_op)
j += 1
for j in range(j, j + num_vals_after):
assign_ops.append(instructions[i + j + 1])
i += num_vals #skip all stores
names = []
for op in assign_ops:
var_name = op.argval
var_names.append(var_name)
names.append(var_name)
push(operations.Assign(operations.build_operation("tuple")(names), pop()))
elif opname == "COMPARE_OP":
right = pop()
left = pop()
push(operations.Comparison(instruction.argval, left, right))
elif opname == "BINARY_SUBSCR":
if isinstance(peek(), operations.Slice):
slice_ = pop()
val = pop()
push(operations.SubscriptSlice(val, slice_.start, slice_.stop, slice_.step))
else:
subscript = pop()
val = pop()
push(operations.Subscript(val, subscript))
elif opname == "STORE_SUBSCR":
push(operations.SubscriptAssign(pop(), pop(), pop()))
elif opname.startswith("UNARY"):
operation = opname[len("UNARY_"):]
push(operations.unary_operation(operation)(pop()))
elif opname.startswith("BINARY"):
operation = opname[len("BINARY_"):]
right = pop()
left = pop()
push(operations.binary_operation(operation)(left, right))
elif opname.startswith("INPLACE"):
operation = opname[len("INPLACE_"):]
right = pop()
left = pop()
if is_store(instructions[i + 1]):
i += 1
push(operations.inplace_operation(operation)(left, right))
else:
push_invalid(instruction)
elif opname not in ("NOP", "POP_TOP"):
push_invalid(instruction)
if i == 0 and is_comp: #give the temporary for list comps a name
push(operations.Assign(operations.Value(temp_name), pop()))
i += 1
return (ast, arg_names)
def asts_to_code(asts, flags=0,tab_char="\t"):
""" converts an ast into python code"""
if flags& RAW_JUMPS:
max_offset_len = len(str(asts[-1][2]))
return "\n".join(str(offset).ljust(max_offset_len," ") + tab_char * (indent + 1) + str(ast) for indent, ast, offset in asts)
else:
return "\n".join(tab_char * indent + str(ast) for indent, ast in asts)
def decompile(disasm, flags=0, tab_char="\t"):
instructions = dis_to_instructions(disasm)
asts, arg_names = instructions_to_asts(instructions, flags)
return asts_to_code(asts, flags,tab_char), arg_names
def split_funcs(disasm):
""" splits out comprehensions from the main func or functions from the module"""
start_positions = [0]
end_positions = []
names = []
if not disasm.startswith("Disassembly"):
names.append("main")
for match in re.finditer(r"Disassembly of (.+):", disasm):
end_positions.append(match.start())
start_positions.append(match.end())
name = match.group(1)
if name.startswith("<"):
names.append(get_code_obj_name(name))
else:
names.append(name)
end_positions.append(len(disasm))
if disasm.startswith("Disassembly"):
start_positions.pop(0)
end_positions.pop(0)
for start, end, name in zip(start_positions, end_positions, names):
yield (name, disasm[start:end])
def get_flags(name):
if name.startswith("genexpr"):
return GEN_EXPR
elif "comp" in name:
return COMPREHENSION
else:
return 0
def decompile_all(disasm,flags=0,tab_char="\t"):
disasm = re.sub(r"^#.*\n?", "", disasm, re.MULTILINE).strip() # ignore comments
for name, func in split_funcs(disasm):
yield name, *decompile(func, get_flags(name)|flags, tab_char)
def pretty_decompile(disasm,flags=0,tab_char="\t"):
ret = []
for name, code, arg_names in decompile_all(disasm, flags, tab_char):
ret.append(
f"def {name}({','.join(arg_names)}):\n" +
"\n".join(tab_char + line for line in code.split("\n"))
)
return "\n".join(ret) | push(
operations.FunctionCall(
operations.Attribute(operations.Value(temp_name), operations.Value(func)),
[pop()] | random_line_split |
dis2py.py | import re
from ast import literal_eval
from dataclasses import dataclass
from . import operations
COMPREHENSION = 1
GEN_EXPR = 1 << 2
RAW_JUMPS = 1 << 3
@dataclass
class Instruction:
line_num: int
offset: int
opname: str
arg: int
argval: object
def get_code_obj_name(s):
match = re.match(r"<code object <?(.*?)>? at (0x[0-9a-f]+).*>", s)
return match.group(1) + "_" + match.group(2)
def dis_to_instructions(disasm):
""" converts output of dis.dis into list of instructions"""
line_num = None
instructions = []
for line in disasm.split("\n"):
match = re.search(
r"( ?(?P<line_num>\d+)[ >]+)?(?P<offset>\d+) (?P<opname>[A-Z_]+)(?:\s+(?P<arg>\d+)(?: \((?P<argval>.+)\))?)?",
line
)
if match is not None:
if match["line_num"]:
line_num = int(match["line_num"])
offset = int(match["offset"])
opname = match["opname"]
if match["arg"] is not None:
arg = int(match["arg"])
else:
arg = None
if opname == "EXTENDED_ARG":
continue
argval = match["argval"]
instructions.append(Instruction(line_num, offset, opname, arg, argval))
return instructions
def is_store(instruction):
return instruction.opname in ("STORE_FAST", "STORE_NAME", "STORE_GLOBAL", "STORE_DEREF")
def is_identifier(s: str):
return str.isidentifier(s) and s not in ("True", "False", "None")
def instructions_to_asts(instructions, flags=0):
""" converts list of instruction into an AST"""
is_comp = flags & COMPREHENSION
is_genexpr = flags & GEN_EXPR
raw_jumps = flags & RAW_JUMPS
temp_name = "__temp" # name of temporary list/set/etc for comprehensions
indent = 0
arg_names = []
var_names = []
# list of all future changes in indentation (caused by loops,if,etc). format is (offset,change)
indent_changes = []
ast = []
instruction = None
def push(operation):
if raw_jumps:
ast.append((indent,operation,instruction.offset))
else:
ast.append((indent, operation))
def pop():
return ast.pop()[1]
def pop_n(n):
nonlocal ast
if n > 0: # ast[:-0] would be the empty list and ast[-0:] would be every element in ast
if raw_jumps:
ret = [x for _, x,_ in ast[-n:]]
else:
ret = [x for _, x in ast[-n:]]
ast = ast[:-n]
else:
ret = []
return ret
def peek(i=1):
return ast[-i][1]
def dedent_jump_to(offset):
for instruction2 in instructions:
if instruction2.opname == "JUMP_ABSOLUTE" and instruction2.arg == offset:
indent_changes.append((instruction2.offset + 2, -1))
break
def push_invalid(instruction):
push(operations.Invalid(instruction.opname, instruction.arg, instruction.argval))
i = 0
while i < len(instructions):
instruction = instructions[i]
opname = instruction.opname
if indent_changes:
to_remove = []
for indent_change in indent_changes:
if indent_change[0] == instruction.offset:
indent += indent_change[1]
to_remove.append(indent_change)
for indent_change in to_remove:
indent_changes.remove(indent_change)
if opname in ("LOAD_METHOD", "LOAD_ATTR"):
push(operations.Attribute(pop(), instruction.argval))
elif opname.startswith("LOAD"):
var_name = instruction.argval
if var_name.startswith(".") and (is_comp or is_genexpr):
var_name = "__" + var_name[1:]
if is_identifier(var_name):
if opname != "LOAD_GLOBAL" and var_name not in var_names:
arg_names.append(var_name)
var_names.append(var_name)
push(operations.Value(var_name))
elif is_store(instruction):
var_name = instruction.argval
if is_identifier(var_name):
var_names.append(var_name)
push(operations.Assign(var_name, pop()))
elif opname == "YIELD_VALUE":
push(operations.Yield(pop()))
elif opname == "RETURN_VALUE":
if is_comp:
push(operations.Return(operations.Value(temp_name)))
else:
push(operations.Return(pop()))
elif opname == "BUILD_MAP":
count = int(instruction.arg)
args = pop_n(2 * count)
push(operations.BuildMap(args))
elif opname == "BUILD_SLICE":
if instruction.arg == 2:
stop = pop()
start = pop()
push(operations.Slice(start, stop))
else:
step = pop()
stop = pop()
start = pop()
push(operations.Slice(start, stop, step))
elif opname.startswith("BUILD"):
# used to create lists, sets and tuples
operation = opname[len("BUILD_"):]
count = int(instruction.arg)
args = pop_n(count)
push(operations.build_operation(operation)(args))
elif opname == "GET_ITER":
push(operations.Iter(pop()))
elif opname == "FOR_ITER":
iterator = pop()
if isinstance(iterator, operations.Iter):
iterator = iterator.val
assign_op = instructions[i + 1] # get next instruction
i += 1
if is_store(assign_op):
index = assign_op.argval
var_names.append(index)
push(operations.ForLoop([index], iterator))
indent += 1
#detect end of loop
loop_end = int(instruction.argval[len("to "):])
indent_changes.append((loop_end, -1))
elif assign_op.opname == "UNPACK_SEQUENCE":
# loops like for i,j in zip(x,y)
num_vals = assign_op.arg
assign_ops = instructions[i + 1:i + num_vals + 1]
i += num_vals #skip all stores
indicies = []
for op in assign_ops:
var_name = op.argval
var_names.append(var_name)
indicies.append(var_name)
push(operations.ForLoop(indicies, iterator))
indent += 1
#detect end of loop
loop_end = int(instruction.argval[len("to "):])
indent_changes.append((loop_end, -1))
else:
push_invalid(instruction)
elif opname.startswith("POP_JUMP"): # if statements and while loops
val = pop()
if opname.endswith("TRUE"):
val = operations.unary_operation("not")(val)
jump_target = int(instruction.arg)
if raw_jumps:
val=val.val if opname.endswith("TRUE") else operations.unary_operation("not")(val)
push(operations.Jump(jump_target,val))
else:
if jump_target > instruction.offset:
indent_changes.append((jump_target, -1))
for instruction2 in instructions:
if instruction2.offset == jump_target - 2:
is_while = False
if instruction2.opname == "JUMP_ABSOLUTE" and instruction2.arg < instruction.offset:
for instruction3 in instructions:
if instruction3.offset > instruction.offset:
break
if instruction3.offset >= instruction2.arg and (
instruction3.opname.startswith("POP_JUMP") or
instruction3.opname == "FOR_ITER"
):
#either a if statement that is last statement in a loop or a while loop
is_while = instruction3.offset == instruction.offset
break
if is_while:
#instruction before jump target jumps above us and no POP_JUMPs between;
# this is a while loop
push(operations.WhileLoop(val))
if not is_while: # this is a normal if
if opname == "POP_JUMP_IF_TRUE" and instruction2.opname == "POP_JUMP_IF_FALSE":
#TODO: fix if statement with "or" operators
pass
if ast and isinstance(peek(), operations.Else):
pop()
indent -= 1
push(operations.Elif(val))
else:
push(operations.If(val))
break
else:
# this is a if statement that is the last statement in a for loop,
# so it jumps directly to the top of the for loop, so we dedent the JUMP_ABSOLUTE again
dedent_jump_to(jump_target)
push(operations.If(val))
indent += 1
elif opname == "JUMP_ABSOLUTE":
# used for many things, including continue, break, and jumping to the top of a loop
#TODO: continue in while loops
jump_target = int(instruction.arg)
if raw_jumps:
push(operations.Jump(jump_target))
else:
for instruction2 in instructions:
if instruction2.offset == jump_target:
if instruction2.opname == "FOR_ITER":
loop_end = int(instruction2.argval[len("to "):]) - 2
if loop_end != instruction.offset: # this isn't the end of the loop, but its still jumping, so this is a "continue"
if not isinstance(peek(), operations.Break):
push(operations.Continue())
#otherwise this is a normal jump to the top of the loop, so do nothing
else:
for instruction3 in instructions:
if (instruction3.opname == "FOR_ITER" and int(
instruction3.argval[len("to "):]
) == instruction2.offset) or (
instruction3.opname.startswith("POP_JUMP") and
instruction3.arg == instruction2.offset
):
#there is a loop also jumping to the same spot, so this is a "break"
push(operations.Break())
break
break
elif opname == "JUMP_FORWARD":
# used to jump over the else statement from the if statement's branch
jump_target = int(instruction.argval[len("to "):])
if raw_jumps:
push(operations.Jump(jump_target))
else:
indent -= 1
push(operations.Else())
indent += 2
indent_changes.append((jump_target, -1))
elif opname == "IMPORT_NAME":
fromlist = pop()
level = int(pop().val)
if level == 0: #absolute import
next_op = instructions[i + 1]
if is_store(next_op):
i += 1
alias = next_op.argval if next_op.argval != instruction.argval else None
push(operations.Import(instruction.argval, alias))
elif next_op.opname == "IMPORT_FROM":
names = []
i += 1
while next_op.opname == "IMPORT_FROM":
i += 1
assign_op = instructions[i]
names.append(assign_op.argval)
i += 1
next_op = instructions[i]
i -= 1
push(operations.FromImport(instruction.argval, names))
elif next_op.opname == "IMPORT_STAR":
i += 1
push(operations.FromImport(instruction.argval, [operations.Value("*")]))
else:
push_invalid(instruction)
else: #TODO:relative import
push_invalid(instruction)
elif opname == "RAISE_VARARGS":
argc = instruction.arg
if argc == 0:
push(operations.Raise())
elif argc == 1:
push(operations.Raise(pop()))
else:
push(operations.Raise(pop(), pop()))
elif opname in ("CALL_FUNCTION", "CALL_METHOD"):
argc = int(instruction.arg)
args = pop_n(argc)
func = pop()
push(operations.FunctionCall(func, args))
elif opname == "CALL_FUNCTION_KW":
# top of stack is a tuple of kwarg names pushed by LOAD_CONST
kwarg_names = literal_eval(pop().val)
kwargs = {}
for name in kwarg_names:
kwargs[name] = pop()
argc = int(instruction.arg) - len(kwargs)
args = pop_n(argc)
func = pop()
push(operations.FunctionCall(func, args, kwargs))
elif opname == "CALL_FUNCTION_EX":
if instruction.arg & 1: #lowest bit set
kwargs = pop()
args = pop()
func = pop()
push(
operations.FunctionCall(
func, [operations.UnpackSeq(args),
operations.UnpackDict(kwargs)]
)
)
else:
args = pop()
func = pop()
push(operations.FunctionCall(func, [operations.UnpackSeq(args)]))
elif opname == "MAKE_FUNCTION": # list comps, lambdas and nested functions
#TODO: handle the other flags
flags = instruction.arg
pop() # qualified name
code_obj = pop()
func_name = get_code_obj_name(code_obj.val)
if flags & 8:
closure_vars = pop().args
push(operations.Closure(func_name, closure_vars))
else:
push(operations.Value(func_name))
elif opname in ("LIST_APPEND", "SET_ADD"): #used in comprehensions
func = opname[opname.index("_") + 1:].lower()
if is_comp:
push(
operations.FunctionCall(
operations.Attribute(operations.Value(temp_name), operations.Value(func)),
[pop()]
)
)
else:
push_invalid(instruction)
elif opname == "MAP_ADD": #used in dict comprehensions
if is_comp:
key = pop()
val = pop()
push(operations.SubscriptAssign(key, operations.Value(temp_name), val))
else:
push_invalid(instruction)
elif opname == "UNPACK_SEQUENCE":
push(operations.UnpackSeq(pop()))
elif opname == "UNPACK_EX": # unpacking assignment
num_vals_before = instruction.arg & 0xff
num_vals_after = (instruction.arg >> 8) & 0xff #high byte
num_vals = num_vals_before + num_vals_after
assign_ops = []
for j in range(num_vals_before):
assign_ops.append(instructions[i + j + 1])
j += 1
assign_op = instructions[i + j + 1]
if is_store(assign_op): #list unpack
num_vals += 1
assign_op.argval = "*" + assign_op.argval
assign_ops.append(assign_op)
j += 1
for j in range(j, j + num_vals_after):
assign_ops.append(instructions[i + j + 1])
i += num_vals #skip all stores
names = []
for op in assign_ops:
var_name = op.argval
var_names.append(var_name)
names.append(var_name)
push(operations.Assign(operations.build_operation("tuple")(names), pop()))
elif opname == "COMPARE_OP":
right = pop()
left = pop()
push(operations.Comparison(instruction.argval, left, right))
elif opname == "BINARY_SUBSCR":
if isinstance(peek(), operations.Slice):
slice_ = pop()
val = pop()
push(operations.SubscriptSlice(val, slice_.start, slice_.stop, slice_.step))
else:
subscript = pop()
val = pop()
push(operations.Subscript(val, subscript))
elif opname == "STORE_SUBSCR":
push(operations.SubscriptAssign(pop(), pop(), pop()))
elif opname.startswith("UNARY"):
operation = opname[len("UNARY_"):]
push(operations.unary_operation(operation)(pop()))
elif opname.startswith("BINARY"):
operation = opname[len("BINARY_"):]
right = pop()
left = pop()
push(operations.binary_operation(operation)(left, right))
elif opname.startswith("INPLACE"):
operation = opname[len("INPLACE_"):]
right = pop()
left = pop()
if is_store(instructions[i + 1]):
i += 1
push(operations.inplace_operation(operation)(left, right))
else:
push_invalid(instruction)
elif opname not in ("NOP", "POP_TOP"):
push_invalid(instruction)
if i == 0 and is_comp: #give the temporary for list comps a name
push(operations.Assign(operations.Value(temp_name), pop()))
i += 1
return (ast, arg_names)
def asts_to_code(asts, flags=0,tab_char="\t"):
""" converts an ast into python code"""
if flags& RAW_JUMPS:
max_offset_len = len(str(asts[-1][2]))
return "\n".join(str(offset).ljust(max_offset_len," ") + tab_char * (indent + 1) + str(ast) for indent, ast, offset in asts)
else:
return "\n".join(tab_char * indent + str(ast) for indent, ast in asts)
def decompile(disasm, flags=0, tab_char="\t"):
instructions = dis_to_instructions(disasm)
asts, arg_names = instructions_to_asts(instructions, flags)
return asts_to_code(asts, flags,tab_char), arg_names
def split_funcs(disasm):
""" splits out comprehensions from the main func or functions from the module"""
start_positions = [0]
end_positions = []
names = []
if not disasm.startswith("Disassembly"):
names.append("main")
for match in re.finditer(r"Disassembly of (.+):", disasm):
end_positions.append(match.start())
start_positions.append(match.end())
name = match.group(1)
if name.startswith("<"):
names.append(get_code_obj_name(name))
else:
names.append(name)
end_positions.append(len(disasm))
if disasm.startswith("Disassembly"):
start_positions.pop(0)
end_positions.pop(0)
for start, end, name in zip(start_positions, end_positions, names):
yield (name, disasm[start:end])
def get_flags(name):
if name.startswith("genexpr"):
return GEN_EXPR
elif "comp" in name:
return COMPREHENSION
else:
return 0
def decompile_all(disasm,flags=0,tab_char="\t"):
|
def pretty_decompile(disasm,flags=0,tab_char="\t"):
ret = []
for name, code, arg_names in decompile_all(disasm, flags, tab_char):
ret.append(
f"def {name}({','.join(arg_names)}):\n" +
"\n".join(tab_char + line for line in code.split("\n"))
)
return "\n".join(ret)
| disasm = re.sub(r"^#.*\n?", "", disasm, re.MULTILINE).strip() # ignore comments
for name, func in split_funcs(disasm):
yield name, *decompile(func, get_flags(name)|flags, tab_char) | identifier_body |
dis2py.py | import re
from ast import literal_eval
from dataclasses import dataclass
from . import operations
COMPREHENSION = 1
GEN_EXPR = 1 << 2
RAW_JUMPS = 1 << 3
@dataclass
class Instruction:
line_num: int
offset: int
opname: str
arg: int
argval: object
def get_code_obj_name(s):
match = re.match(r"<code object <?(.*?)>? at (0x[0-9a-f]+).*>", s)
return match.group(1) + "_" + match.group(2)
def dis_to_instructions(disasm):
""" converts output of dis.dis into list of instructions"""
line_num = None
instructions = []
for line in disasm.split("\n"):
match = re.search(
r"( ?(?P<line_num>\d+)[ >]+)?(?P<offset>\d+) (?P<opname>[A-Z_]+)(?:\s+(?P<arg>\d+)(?: \((?P<argval>.+)\))?)?",
line
)
if match is not None:
if match["line_num"]:
line_num = int(match["line_num"])
offset = int(match["offset"])
opname = match["opname"]
if match["arg"] is not None:
arg = int(match["arg"])
else:
arg = None
if opname == "EXTENDED_ARG":
continue
argval = match["argval"]
instructions.append(Instruction(line_num, offset, opname, arg, argval))
return instructions
def is_store(instruction):
return instruction.opname in ("STORE_FAST", "STORE_NAME", "STORE_GLOBAL", "STORE_DEREF")
def is_identifier(s: str):
return str.isidentifier(s) and s not in ("True", "False", "None")
def instructions_to_asts(instructions, flags=0):
""" converts list of instruction into an AST"""
is_comp = flags & COMPREHENSION
is_genexpr = flags & GEN_EXPR
raw_jumps = flags & RAW_JUMPS
temp_name = "__temp" # name of temporary list/set/etc for comprehensions
indent = 0
arg_names = []
var_names = []
# list of all future changes in indentation (caused by loops,if,etc). format is (offset,change)
indent_changes = []
ast = []
instruction = None
def push(operation):
if raw_jumps:
|
else:
ast.append((indent, operation))
def pop():
return ast.pop()[1]
def pop_n(n):
nonlocal ast
if n > 0: # ast[:-0] would be the empty list and ast[-0:] would be every element in ast
if raw_jumps:
ret = [x for _, x,_ in ast[-n:]]
else:
ret = [x for _, x in ast[-n:]]
ast = ast[:-n]
else:
ret = []
return ret
def peek(i=1):
return ast[-i][1]
def dedent_jump_to(offset):
for instruction2 in instructions:
if instruction2.opname == "JUMP_ABSOLUTE" and instruction2.arg == offset:
indent_changes.append((instruction2.offset + 2, -1))
break
def push_invalid(instruction):
push(operations.Invalid(instruction.opname, instruction.arg, instruction.argval))
i = 0
while i < len(instructions):
instruction = instructions[i]
opname = instruction.opname
if indent_changes:
to_remove = []
for indent_change in indent_changes:
if indent_change[0] == instruction.offset:
indent += indent_change[1]
to_remove.append(indent_change)
for indent_change in to_remove:
indent_changes.remove(indent_change)
if opname in ("LOAD_METHOD", "LOAD_ATTR"):
push(operations.Attribute(pop(), instruction.argval))
elif opname.startswith("LOAD"):
var_name = instruction.argval
if var_name.startswith(".") and (is_comp or is_genexpr):
var_name = "__" + var_name[1:]
if is_identifier(var_name):
if opname != "LOAD_GLOBAL" and var_name not in var_names:
arg_names.append(var_name)
var_names.append(var_name)
push(operations.Value(var_name))
elif is_store(instruction):
var_name = instruction.argval
if is_identifier(var_name):
var_names.append(var_name)
push(operations.Assign(var_name, pop()))
elif opname == "YIELD_VALUE":
push(operations.Yield(pop()))
elif opname == "RETURN_VALUE":
if is_comp:
push(operations.Return(operations.Value(temp_name)))
else:
push(operations.Return(pop()))
elif opname == "BUILD_MAP":
count = int(instruction.arg)
args = pop_n(2 * count)
push(operations.BuildMap(args))
elif opname == "BUILD_SLICE":
if instruction.arg == 2:
stop = pop()
start = pop()
push(operations.Slice(start, stop))
else:
step = pop()
stop = pop()
start = pop()
push(operations.Slice(start, stop, step))
elif opname.startswith("BUILD"):
# used to create lists, sets and tuples
operation = opname[len("BUILD_"):]
count = int(instruction.arg)
args = pop_n(count)
push(operations.build_operation(operation)(args))
elif opname == "GET_ITER":
push(operations.Iter(pop()))
elif opname == "FOR_ITER":
iterator = pop()
if isinstance(iterator, operations.Iter):
iterator = iterator.val
assign_op = instructions[i + 1] # get next instruction
i += 1
if is_store(assign_op):
index = assign_op.argval
var_names.append(index)
push(operations.ForLoop([index], iterator))
indent += 1
#detect end of loop
loop_end = int(instruction.argval[len("to "):])
indent_changes.append((loop_end, -1))
elif assign_op.opname == "UNPACK_SEQUENCE":
# loops like for i,j in zip(x,y)
num_vals = assign_op.arg
assign_ops = instructions[i + 1:i + num_vals + 1]
i += num_vals #skip all stores
indicies = []
for op in assign_ops:
var_name = op.argval
var_names.append(var_name)
indicies.append(var_name)
push(operations.ForLoop(indicies, iterator))
indent += 1
#detect end of loop
loop_end = int(instruction.argval[len("to "):])
indent_changes.append((loop_end, -1))
else:
push_invalid(instruction)
elif opname.startswith("POP_JUMP"): # if statements and while loops
val = pop()
if opname.endswith("TRUE"):
val = operations.unary_operation("not")(val)
jump_target = int(instruction.arg)
if raw_jumps:
val=val.val if opname.endswith("TRUE") else operations.unary_operation("not")(val)
push(operations.Jump(jump_target,val))
else:
if jump_target > instruction.offset:
indent_changes.append((jump_target, -1))
for instruction2 in instructions:
if instruction2.offset == jump_target - 2:
is_while = False
if instruction2.opname == "JUMP_ABSOLUTE" and instruction2.arg < instruction.offset:
for instruction3 in instructions:
if instruction3.offset > instruction.offset:
break
if instruction3.offset >= instruction2.arg and (
instruction3.opname.startswith("POP_JUMP") or
instruction3.opname == "FOR_ITER"
):
#either a if statement that is last statement in a loop or a while loop
is_while = instruction3.offset == instruction.offset
break
if is_while:
#instruction before jump target jumps above us and no POP_JUMPs between;
# this is a while loop
push(operations.WhileLoop(val))
if not is_while: # this is a normal if
if opname == "POP_JUMP_IF_TRUE" and instruction2.opname == "POP_JUMP_IF_FALSE":
#TODO: fix if statement with "or" operators
pass
if ast and isinstance(peek(), operations.Else):
pop()
indent -= 1
push(operations.Elif(val))
else:
push(operations.If(val))
break
else:
# this is a if statement that is the last statement in a for loop,
# so it jumps directly to the top of the for loop, so we dedent the JUMP_ABSOLUTE again
dedent_jump_to(jump_target)
push(operations.If(val))
indent += 1
elif opname == "JUMP_ABSOLUTE":
# used for many things, including continue, break, and jumping to the top of a loop
#TODO: continue in while loops
jump_target = int(instruction.arg)
if raw_jumps:
push(operations.Jump(jump_target))
else:
for instruction2 in instructions:
if instruction2.offset == jump_target:
if instruction2.opname == "FOR_ITER":
loop_end = int(instruction2.argval[len("to "):]) - 2
if loop_end != instruction.offset: # this isn't the end of the loop, but its still jumping, so this is a "continue"
if not isinstance(peek(), operations.Break):
push(operations.Continue())
#otherwise this is a normal jump to the top of the loop, so do nothing
else:
for instruction3 in instructions:
if (instruction3.opname == "FOR_ITER" and int(
instruction3.argval[len("to "):]
) == instruction2.offset) or (
instruction3.opname.startswith("POP_JUMP") and
instruction3.arg == instruction2.offset
):
#there is a loop also jumping to the same spot, so this is a "break"
push(operations.Break())
break
break
elif opname == "JUMP_FORWARD":
# used to jump over the else statement from the if statement's branch
jump_target = int(instruction.argval[len("to "):])
if raw_jumps:
push(operations.Jump(jump_target))
else:
indent -= 1
push(operations.Else())
indent += 2
indent_changes.append((jump_target, -1))
elif opname == "IMPORT_NAME":
fromlist = pop()
level = int(pop().val)
if level == 0: #absolute import
next_op = instructions[i + 1]
if is_store(next_op):
i += 1
alias = next_op.argval if next_op.argval != instruction.argval else None
push(operations.Import(instruction.argval, alias))
elif next_op.opname == "IMPORT_FROM":
names = []
i += 1
while next_op.opname == "IMPORT_FROM":
i += 1
assign_op = instructions[i]
names.append(assign_op.argval)
i += 1
next_op = instructions[i]
i -= 1
push(operations.FromImport(instruction.argval, names))
elif next_op.opname == "IMPORT_STAR":
i += 1
push(operations.FromImport(instruction.argval, [operations.Value("*")]))
else:
push_invalid(instruction)
else: #TODO:relative import
push_invalid(instruction)
elif opname == "RAISE_VARARGS":
argc = instruction.arg
if argc == 0:
push(operations.Raise())
elif argc == 1:
push(operations.Raise(pop()))
else:
push(operations.Raise(pop(), pop()))
elif opname in ("CALL_FUNCTION", "CALL_METHOD"):
argc = int(instruction.arg)
args = pop_n(argc)
func = pop()
push(operations.FunctionCall(func, args))
elif opname == "CALL_FUNCTION_KW":
# top of stack is a tuple of kwarg names pushed by LOAD_CONST
kwarg_names = literal_eval(pop().val)
kwargs = {}
for name in kwarg_names:
kwargs[name] = pop()
argc = int(instruction.arg) - len(kwargs)
args = pop_n(argc)
func = pop()
push(operations.FunctionCall(func, args, kwargs))
elif opname == "CALL_FUNCTION_EX":
if instruction.arg & 1: #lowest bit set
kwargs = pop()
args = pop()
func = pop()
push(
operations.FunctionCall(
func, [operations.UnpackSeq(args),
operations.UnpackDict(kwargs)]
)
)
else:
args = pop()
func = pop()
push(operations.FunctionCall(func, [operations.UnpackSeq(args)]))
elif opname == "MAKE_FUNCTION": # list comps, lambdas and nested functions
#TODO: handle the other flags
flags = instruction.arg
pop() # qualified name
code_obj = pop()
func_name = get_code_obj_name(code_obj.val)
if flags & 8:
closure_vars = pop().args
push(operations.Closure(func_name, closure_vars))
else:
push(operations.Value(func_name))
elif opname in ("LIST_APPEND", "SET_ADD"): #used in comprehensions
func = opname[opname.index("_") + 1:].lower()
if is_comp:
push(
operations.FunctionCall(
operations.Attribute(operations.Value(temp_name), operations.Value(func)),
[pop()]
)
)
else:
push_invalid(instruction)
elif opname == "MAP_ADD": #used in dict comprehensions
if is_comp:
key = pop()
val = pop()
push(operations.SubscriptAssign(key, operations.Value(temp_name), val))
else:
push_invalid(instruction)
elif opname == "UNPACK_SEQUENCE":
push(operations.UnpackSeq(pop()))
elif opname == "UNPACK_EX": # unpacking assignment
num_vals_before = instruction.arg & 0xff
num_vals_after = (instruction.arg >> 8) & 0xff #high byte
num_vals = num_vals_before + num_vals_after
assign_ops = []
for j in range(num_vals_before):
assign_ops.append(instructions[i + j + 1])
j += 1
assign_op = instructions[i + j + 1]
if is_store(assign_op): #list unpack
num_vals += 1
assign_op.argval = "*" + assign_op.argval
assign_ops.append(assign_op)
j += 1
for j in range(j, j + num_vals_after):
assign_ops.append(instructions[i + j + 1])
i += num_vals #skip all stores
names = []
for op in assign_ops:
var_name = op.argval
var_names.append(var_name)
names.append(var_name)
push(operations.Assign(operations.build_operation("tuple")(names), pop()))
elif opname == "COMPARE_OP":
right = pop()
left = pop()
push(operations.Comparison(instruction.argval, left, right))
elif opname == "BINARY_SUBSCR":
if isinstance(peek(), operations.Slice):
slice_ = pop()
val = pop()
push(operations.SubscriptSlice(val, slice_.start, slice_.stop, slice_.step))
else:
subscript = pop()
val = pop()
push(operations.Subscript(val, subscript))
elif opname == "STORE_SUBSCR":
push(operations.SubscriptAssign(pop(), pop(), pop()))
elif opname.startswith("UNARY"):
operation = opname[len("UNARY_"):]
push(operations.unary_operation(operation)(pop()))
elif opname.startswith("BINARY"):
operation = opname[len("BINARY_"):]
right = pop()
left = pop()
push(operations.binary_operation(operation)(left, right))
elif opname.startswith("INPLACE"):
operation = opname[len("INPLACE_"):]
right = pop()
left = pop()
if is_store(instructions[i + 1]):
i += 1
push(operations.inplace_operation(operation)(left, right))
else:
push_invalid(instruction)
elif opname not in ("NOP", "POP_TOP"):
push_invalid(instruction)
if i == 0 and is_comp: #give the temporary for list comps a name
push(operations.Assign(operations.Value(temp_name), pop()))
i += 1
return (ast, arg_names)
def asts_to_code(asts, flags=0,tab_char="\t"):
""" converts an ast into python code"""
if flags& RAW_JUMPS:
max_offset_len = len(str(asts[-1][2]))
return "\n".join(str(offset).ljust(max_offset_len," ") + tab_char * (indent + 1) + str(ast) for indent, ast, offset in asts)
else:
return "\n".join(tab_char * indent + str(ast) for indent, ast in asts)
def decompile(disasm, flags=0, tab_char="\t"):
instructions = dis_to_instructions(disasm)
asts, arg_names = instructions_to_asts(instructions, flags)
return asts_to_code(asts, flags,tab_char), arg_names
def split_funcs(disasm):
""" splits out comprehensions from the main func or functions from the module"""
start_positions = [0]
end_positions = []
names = []
if not disasm.startswith("Disassembly"):
names.append("main")
for match in re.finditer(r"Disassembly of (.+):", disasm):
end_positions.append(match.start())
start_positions.append(match.end())
name = match.group(1)
if name.startswith("<"):
names.append(get_code_obj_name(name))
else:
names.append(name)
end_positions.append(len(disasm))
if disasm.startswith("Disassembly"):
start_positions.pop(0)
end_positions.pop(0)
for start, end, name in zip(start_positions, end_positions, names):
yield (name, disasm[start:end])
def get_flags(name):
if name.startswith("genexpr"):
return GEN_EXPR
elif "comp" in name:
return COMPREHENSION
else:
return 0
def decompile_all(disasm,flags=0,tab_char="\t"):
disasm = re.sub(r"^#.*\n?", "", disasm, re.MULTILINE).strip() # ignore comments
for name, func in split_funcs(disasm):
yield name, *decompile(func, get_flags(name)|flags, tab_char)
def pretty_decompile(disasm,flags=0,tab_char="\t"):
ret = []
for name, code, arg_names in decompile_all(disasm, flags, tab_char):
ret.append(
f"def {name}({','.join(arg_names)}):\n" +
"\n".join(tab_char + line for line in code.split("\n"))
)
return "\n".join(ret)
| ast.append((indent,operation,instruction.offset)) | conditional_block |
dis2py.py | import re
from ast import literal_eval
from dataclasses import dataclass
from . import operations
COMPREHENSION = 1
GEN_EXPR = 1 << 2
RAW_JUMPS = 1 << 3
@dataclass
class Instruction:
line_num: int
offset: int
opname: str
arg: int
argval: object
def get_code_obj_name(s):
match = re.match(r"<code object <?(.*?)>? at (0x[0-9a-f]+).*>", s)
return match.group(1) + "_" + match.group(2)
def dis_to_instructions(disasm):
""" converts output of dis.dis into list of instructions"""
line_num = None
instructions = []
for line in disasm.split("\n"):
match = re.search(
r"( ?(?P<line_num>\d+)[ >]+)?(?P<offset>\d+) (?P<opname>[A-Z_]+)(?:\s+(?P<arg>\d+)(?: \((?P<argval>.+)\))?)?",
line
)
if match is not None:
if match["line_num"]:
line_num = int(match["line_num"])
offset = int(match["offset"])
opname = match["opname"]
if match["arg"] is not None:
arg = int(match["arg"])
else:
arg = None
if opname == "EXTENDED_ARG":
continue
argval = match["argval"]
instructions.append(Instruction(line_num, offset, opname, arg, argval))
return instructions
def is_store(instruction):
return instruction.opname in ("STORE_FAST", "STORE_NAME", "STORE_GLOBAL", "STORE_DEREF")
def is_identifier(s: str):
return str.isidentifier(s) and s not in ("True", "False", "None")
def instructions_to_asts(instructions, flags=0):
""" converts list of instruction into an AST"""
is_comp = flags & COMPREHENSION
is_genexpr = flags & GEN_EXPR
raw_jumps = flags & RAW_JUMPS
temp_name = "__temp" # name of temporary list/set/etc for comprehensions
indent = 0
arg_names = []
var_names = []
# list of all future changes in indentation (caused by loops,if,etc). format is (offset,change)
indent_changes = []
ast = []
instruction = None
def | (operation):
if raw_jumps:
ast.append((indent,operation,instruction.offset))
else:
ast.append((indent, operation))
def pop():
return ast.pop()[1]
def pop_n(n):
nonlocal ast
if n > 0: # ast[:-0] would be the empty list and ast[-0:] would be every element in ast
if raw_jumps:
ret = [x for _, x,_ in ast[-n:]]
else:
ret = [x for _, x in ast[-n:]]
ast = ast[:-n]
else:
ret = []
return ret
def peek(i=1):
return ast[-i][1]
def dedent_jump_to(offset):
for instruction2 in instructions:
if instruction2.opname == "JUMP_ABSOLUTE" and instruction2.arg == offset:
indent_changes.append((instruction2.offset + 2, -1))
break
def push_invalid(instruction):
push(operations.Invalid(instruction.opname, instruction.arg, instruction.argval))
i = 0
while i < len(instructions):
instruction = instructions[i]
opname = instruction.opname
if indent_changes:
to_remove = []
for indent_change in indent_changes:
if indent_change[0] == instruction.offset:
indent += indent_change[1]
to_remove.append(indent_change)
for indent_change in to_remove:
indent_changes.remove(indent_change)
if opname in ("LOAD_METHOD", "LOAD_ATTR"):
push(operations.Attribute(pop(), instruction.argval))
elif opname.startswith("LOAD"):
var_name = instruction.argval
if var_name.startswith(".") and (is_comp or is_genexpr):
var_name = "__" + var_name[1:]
if is_identifier(var_name):
if opname != "LOAD_GLOBAL" and var_name not in var_names:
arg_names.append(var_name)
var_names.append(var_name)
push(operations.Value(var_name))
elif is_store(instruction):
var_name = instruction.argval
if is_identifier(var_name):
var_names.append(var_name)
push(operations.Assign(var_name, pop()))
elif opname == "YIELD_VALUE":
push(operations.Yield(pop()))
elif opname == "RETURN_VALUE":
if is_comp:
push(operations.Return(operations.Value(temp_name)))
else:
push(operations.Return(pop()))
elif opname == "BUILD_MAP":
count = int(instruction.arg)
args = pop_n(2 * count)
push(operations.BuildMap(args))
elif opname == "BUILD_SLICE":
if instruction.arg == 2:
stop = pop()
start = pop()
push(operations.Slice(start, stop))
else:
step = pop()
stop = pop()
start = pop()
push(operations.Slice(start, stop, step))
elif opname.startswith("BUILD"):
# used to create lists, sets and tuples
operation = opname[len("BUILD_"):]
count = int(instruction.arg)
args = pop_n(count)
push(operations.build_operation(operation)(args))
elif opname == "GET_ITER":
push(operations.Iter(pop()))
elif opname == "FOR_ITER":
iterator = pop()
if isinstance(iterator, operations.Iter):
iterator = iterator.val
assign_op = instructions[i + 1] # get next instruction
i += 1
if is_store(assign_op):
index = assign_op.argval
var_names.append(index)
push(operations.ForLoop([index], iterator))
indent += 1
#detect end of loop
loop_end = int(instruction.argval[len("to "):])
indent_changes.append((loop_end, -1))
elif assign_op.opname == "UNPACK_SEQUENCE":
# loops like for i,j in zip(x,y)
num_vals = assign_op.arg
assign_ops = instructions[i + 1:i + num_vals + 1]
i += num_vals #skip all stores
indicies = []
for op in assign_ops:
var_name = op.argval
var_names.append(var_name)
indicies.append(var_name)
push(operations.ForLoop(indicies, iterator))
indent += 1
#detect end of loop
loop_end = int(instruction.argval[len("to "):])
indent_changes.append((loop_end, -1))
else:
push_invalid(instruction)
elif opname.startswith("POP_JUMP"): # if statements and while loops
val = pop()
if opname.endswith("TRUE"):
val = operations.unary_operation("not")(val)
jump_target = int(instruction.arg)
if raw_jumps:
val=val.val if opname.endswith("TRUE") else operations.unary_operation("not")(val)
push(operations.Jump(jump_target,val))
else:
if jump_target > instruction.offset:
indent_changes.append((jump_target, -1))
for instruction2 in instructions:
if instruction2.offset == jump_target - 2:
is_while = False
if instruction2.opname == "JUMP_ABSOLUTE" and instruction2.arg < instruction.offset:
for instruction3 in instructions:
if instruction3.offset > instruction.offset:
break
if instruction3.offset >= instruction2.arg and (
instruction3.opname.startswith("POP_JUMP") or
instruction3.opname == "FOR_ITER"
):
#either a if statement that is last statement in a loop or a while loop
is_while = instruction3.offset == instruction.offset
break
if is_while:
#instruction before jump target jumps above us and no POP_JUMPs between;
# this is a while loop
push(operations.WhileLoop(val))
if not is_while: # this is a normal if
if opname == "POP_JUMP_IF_TRUE" and instruction2.opname == "POP_JUMP_IF_FALSE":
#TODO: fix if statement with "or" operators
pass
if ast and isinstance(peek(), operations.Else):
pop()
indent -= 1
push(operations.Elif(val))
else:
push(operations.If(val))
break
else:
# this is a if statement that is the last statement in a for loop,
# so it jumps directly to the top of the for loop, so we dedent the JUMP_ABSOLUTE again
dedent_jump_to(jump_target)
push(operations.If(val))
indent += 1
elif opname == "JUMP_ABSOLUTE":
# used for many things, including continue, break, and jumping to the top of a loop
#TODO: continue in while loops
jump_target = int(instruction.arg)
if raw_jumps:
push(operations.Jump(jump_target))
else:
for instruction2 in instructions:
if instruction2.offset == jump_target:
if instruction2.opname == "FOR_ITER":
loop_end = int(instruction2.argval[len("to "):]) - 2
if loop_end != instruction.offset: # this isn't the end of the loop, but its still jumping, so this is a "continue"
if not isinstance(peek(), operations.Break):
push(operations.Continue())
#otherwise this is a normal jump to the top of the loop, so do nothing
else:
for instruction3 in instructions:
if (instruction3.opname == "FOR_ITER" and int(
instruction3.argval[len("to "):]
) == instruction2.offset) or (
instruction3.opname.startswith("POP_JUMP") and
instruction3.arg == instruction2.offset
):
#there is a loop also jumping to the same spot, so this is a "break"
push(operations.Break())
break
break
elif opname == "JUMP_FORWARD":
# used to jump over the else statement from the if statement's branch
jump_target = int(instruction.argval[len("to "):])
if raw_jumps:
push(operations.Jump(jump_target))
else:
indent -= 1
push(operations.Else())
indent += 2
indent_changes.append((jump_target, -1))
elif opname == "IMPORT_NAME":
fromlist = pop()
level = int(pop().val)
if level == 0: #absolute import
next_op = instructions[i + 1]
if is_store(next_op):
i += 1
alias = next_op.argval if next_op.argval != instruction.argval else None
push(operations.Import(instruction.argval, alias))
elif next_op.opname == "IMPORT_FROM":
names = []
i += 1
while next_op.opname == "IMPORT_FROM":
i += 1
assign_op = instructions[i]
names.append(assign_op.argval)
i += 1
next_op = instructions[i]
i -= 1
push(operations.FromImport(instruction.argval, names))
elif next_op.opname == "IMPORT_STAR":
i += 1
push(operations.FromImport(instruction.argval, [operations.Value("*")]))
else:
push_invalid(instruction)
else: #TODO:relative import
push_invalid(instruction)
elif opname == "RAISE_VARARGS":
argc = instruction.arg
if argc == 0:
push(operations.Raise())
elif argc == 1:
push(operations.Raise(pop()))
else:
push(operations.Raise(pop(), pop()))
elif opname in ("CALL_FUNCTION", "CALL_METHOD"):
argc = int(instruction.arg)
args = pop_n(argc)
func = pop()
push(operations.FunctionCall(func, args))
elif opname == "CALL_FUNCTION_KW":
# top of stack is a tuple of kwarg names pushed by LOAD_CONST
kwarg_names = literal_eval(pop().val)
kwargs = {}
for name in kwarg_names:
kwargs[name] = pop()
argc = int(instruction.arg) - len(kwargs)
args = pop_n(argc)
func = pop()
push(operations.FunctionCall(func, args, kwargs))
elif opname == "CALL_FUNCTION_EX":
if instruction.arg & 1: #lowest bit set
kwargs = pop()
args = pop()
func = pop()
push(
operations.FunctionCall(
func, [operations.UnpackSeq(args),
operations.UnpackDict(kwargs)]
)
)
else:
args = pop()
func = pop()
push(operations.FunctionCall(func, [operations.UnpackSeq(args)]))
elif opname == "MAKE_FUNCTION": # list comps, lambdas and nested functions
#TODO: handle the other flags
flags = instruction.arg
pop() # qualified name
code_obj = pop()
func_name = get_code_obj_name(code_obj.val)
if flags & 8:
closure_vars = pop().args
push(operations.Closure(func_name, closure_vars))
else:
push(operations.Value(func_name))
elif opname in ("LIST_APPEND", "SET_ADD"): #used in comprehensions
func = opname[opname.index("_") + 1:].lower()
if is_comp:
push(
operations.FunctionCall(
operations.Attribute(operations.Value(temp_name), operations.Value(func)),
[pop()]
)
)
else:
push_invalid(instruction)
elif opname == "MAP_ADD": #used in dict comprehensions
if is_comp:
key = pop()
val = pop()
push(operations.SubscriptAssign(key, operations.Value(temp_name), val))
else:
push_invalid(instruction)
elif opname == "UNPACK_SEQUENCE":
push(operations.UnpackSeq(pop()))
elif opname == "UNPACK_EX": # unpacking assignment
num_vals_before = instruction.arg & 0xff
num_vals_after = (instruction.arg >> 8) & 0xff #high byte
num_vals = num_vals_before + num_vals_after
assign_ops = []
for j in range(num_vals_before):
assign_ops.append(instructions[i + j + 1])
j += 1
assign_op = instructions[i + j + 1]
if is_store(assign_op): #list unpack
num_vals += 1
assign_op.argval = "*" + assign_op.argval
assign_ops.append(assign_op)
j += 1
for j in range(j, j + num_vals_after):
assign_ops.append(instructions[i + j + 1])
i += num_vals #skip all stores
names = []
for op in assign_ops:
var_name = op.argval
var_names.append(var_name)
names.append(var_name)
push(operations.Assign(operations.build_operation("tuple")(names), pop()))
elif opname == "COMPARE_OP":
right = pop()
left = pop()
push(operations.Comparison(instruction.argval, left, right))
elif opname == "BINARY_SUBSCR":
if isinstance(peek(), operations.Slice):
slice_ = pop()
val = pop()
push(operations.SubscriptSlice(val, slice_.start, slice_.stop, slice_.step))
else:
subscript = pop()
val = pop()
push(operations.Subscript(val, subscript))
elif opname == "STORE_SUBSCR":
push(operations.SubscriptAssign(pop(), pop(), pop()))
elif opname.startswith("UNARY"):
operation = opname[len("UNARY_"):]
push(operations.unary_operation(operation)(pop()))
elif opname.startswith("BINARY"):
operation = opname[len("BINARY_"):]
right = pop()
left = pop()
push(operations.binary_operation(operation)(left, right))
elif opname.startswith("INPLACE"):
operation = opname[len("INPLACE_"):]
right = pop()
left = pop()
if is_store(instructions[i + 1]):
i += 1
push(operations.inplace_operation(operation)(left, right))
else:
push_invalid(instruction)
elif opname not in ("NOP", "POP_TOP"):
push_invalid(instruction)
if i == 0 and is_comp: #give the temporary for list comps a name
push(operations.Assign(operations.Value(temp_name), pop()))
i += 1
return (ast, arg_names)
def asts_to_code(asts, flags=0,tab_char="\t"):
""" converts an ast into python code"""
if flags& RAW_JUMPS:
max_offset_len = len(str(asts[-1][2]))
return "\n".join(str(offset).ljust(max_offset_len," ") + tab_char * (indent + 1) + str(ast) for indent, ast, offset in asts)
else:
return "\n".join(tab_char * indent + str(ast) for indent, ast in asts)
def decompile(disasm, flags=0, tab_char="\t"):
instructions = dis_to_instructions(disasm)
asts, arg_names = instructions_to_asts(instructions, flags)
return asts_to_code(asts, flags,tab_char), arg_names
def split_funcs(disasm):
""" splits out comprehensions from the main func or functions from the module"""
start_positions = [0]
end_positions = []
names = []
if not disasm.startswith("Disassembly"):
names.append("main")
for match in re.finditer(r"Disassembly of (.+):", disasm):
end_positions.append(match.start())
start_positions.append(match.end())
name = match.group(1)
if name.startswith("<"):
names.append(get_code_obj_name(name))
else:
names.append(name)
end_positions.append(len(disasm))
if disasm.startswith("Disassembly"):
start_positions.pop(0)
end_positions.pop(0)
for start, end, name in zip(start_positions, end_positions, names):
yield (name, disasm[start:end])
def get_flags(name):
if name.startswith("genexpr"):
return GEN_EXPR
elif "comp" in name:
return COMPREHENSION
else:
return 0
def decompile_all(disasm,flags=0,tab_char="\t"):
disasm = re.sub(r"^#.*\n?", "", disasm, re.MULTILINE).strip() # ignore comments
for name, func in split_funcs(disasm):
yield name, *decompile(func, get_flags(name)|flags, tab_char)
def pretty_decompile(disasm,flags=0,tab_char="\t"):
ret = []
for name, code, arg_names in decompile_all(disasm, flags, tab_char):
ret.append(
f"def {name}({','.join(arg_names)}):\n" +
"\n".join(tab_char + line for line in code.split("\n"))
)
return "\n".join(ret)
| push | identifier_name |
docker_image_dest.go | package docker
import (
"bytes"
"context"
"crypto/rand"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"path/filepath"
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
"github.com/containers/image/types"
"github.com/docker/distribution/registry/api/errcode"
"github.com/docker/distribution/registry/api/v2"
"github.com/docker/distribution/registry/client"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type dockerImageDestination struct {
ref dockerReference
c *dockerClient
// State
manifestDigest digest.Digest // or "" if not yet known.
}
// newImageDestination creates a new ImageDestination for the specified image reference.
func newImageDestination(sys *types.SystemContext, ref dockerReference) (types.ImageDestination, error) {
c, err := newDockerClientFromRef(sys, ref, true, "pull,push")
if err != nil {
return nil, err
}
return &dockerImageDestination{
ref: ref,
c: c,
}, nil
}
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
func (d *dockerImageDestination) Reference() types.ImageReference {
return d.ref
}
// Close removes resources associated with an initialized ImageDestination, if any.
func (d *dockerImageDestination) Close() error {
return nil
}
func (d *dockerImageDestination) SupportedManifestMIMETypes() []string {
return []string{
imgspecv1.MediaTypeImageManifest,
manifest.DockerV2Schema2MediaType,
manifest.DockerV2Schema1SignedMediaType,
manifest.DockerV2Schema1MediaType,
}
}
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
func (d *dockerImageDestination) SupportsSignatures(ctx context.Context) error {
if err := d.c.detectProperties(ctx); err != nil {
return err
}
switch {
case d.c.signatureBase != nil:
return nil
case d.c.supportsSignatures:
return nil
default:
return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured")
}
}
func (d *dockerImageDestination) DesiredLayerCompression() types.LayerCompression {
return types.Compress
}
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
// uploaded to the image destination, true otherwise.
func (d *dockerImageDestination) AcceptsForeignLayerURLs() bool {
return true
}
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
func (d *dockerImageDestination) MustMatchRuntimeOS() bool {
return false
}
// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
// Does not make a difference if Reference().DockerReference() is nil.
func (d *dockerImageDestination) IgnoresEmbeddedDockerReference() bool {
return false // We do want the manifest updated; older registry versions refuse manifests if the embedded reference does not match.
}
// sizeCounter is an io.Writer which only counts the total size of its input.
type sizeCounter struct{ size int64 }
func (c *sizeCounter) Write(p []byte) (n int, err error) {
c.size += int64(len(p))
return len(p), nil
}
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
if inputInfo.Digest.String() != "" {
haveBlob, size, err := d.HasBlob(ctx, inputInfo)
if err != nil {
return types.BlobInfo{}, err
}
if haveBlob {
return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil
}
}
// FIXME? Chunked upload, progress reporting, etc.
uploadPath := fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref))
logrus.Debugf("Uploading %s", uploadPath)
res, err := d.c.makeRequest(ctx, "POST", uploadPath, nil, nil, v2Auth)
if err != nil {
return types.BlobInfo{}, err
}
defer res.Body.Close()
if res.StatusCode != http.StatusAccepted {
logrus.Debugf("Error initiating layer upload, response %#v", *res)
return types.BlobInfo{}, errors.Wrapf(client.HandleErrorResponse(res), "Error initiating layer upload to %s in %s", uploadPath, d.c.registry)
}
uploadLocation, err := res.Location()
if err != nil {
return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL")
}
digester := digest.Canonical.Digester()
sizeCounter := &sizeCounter{}
tee := io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter))
res, err = d.c.makeRequestToResolvedURL(ctx, "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, v2Auth)
if err != nil {
logrus.Debugf("Error uploading layer chunked, response %#v", res)
return types.BlobInfo{}, err
}
defer res.Body.Close()
computedDigest := digester.Digest()
uploadLocation, err = res.Location()
if err != nil {
return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL")
}
// FIXME: DELETE uploadLocation on failure
locationQuery := uploadLocation.Query()
// TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717
locationQuery.Set("digest", computedDigest.String())
uploadLocation.RawQuery = locationQuery.Encode()
res, err = d.c.makeRequestToResolvedURL(ctx, "PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth)
if err != nil {
return types.BlobInfo{}, err
}
defer res.Body.Close()
if res.StatusCode != http.StatusCreated {
logrus.Debugf("Error uploading layer, response %#v", *res)
return types.BlobInfo{}, errors.Wrapf(client.HandleErrorResponse(res), "Error uploading layer to %s", uploadLocation)
}
logrus.Debugf("Upload of layer %s complete", computedDigest)
return types.BlobInfo{Digest: computedDigest, Size: sizeCounter.size}, nil
}
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
// it returns a non-nil error only on an unexpected failure.
func (d *dockerImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
if info.Digest == "" {
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
}
checkPath := fmt.Sprintf(blobsPath, reference.Path(d.ref.ref), info.Digest.String())
logrus.Debugf("Checking %s", checkPath)
res, err := d.c.makeRequest(ctx, "HEAD", checkPath, nil, nil, v2Auth)
if err != nil {
return false, -1, err
}
defer res.Body.Close()
switch res.StatusCode {
case http.StatusOK:
logrus.Debugf("... already exists")
return true, getBlobSize(res), nil
case http.StatusUnauthorized:
logrus.Debugf("... not authorized")
return false, -1, errors.Wrapf(client.HandleErrorResponse(res), "Error checking whether a blob %s exists in %s", info.Digest, d.ref.ref.Name())
case http.StatusNotFound:
logrus.Debugf("... not present")
return false, -1, nil
default:
return false, -1, errors.Errorf("failed to read from destination repository %s: %v", reference.Path(d.ref.ref), http.StatusText(res.StatusCode))
}
}
func (d *dockerImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
return info, nil
}
// PutManifest writes manifest to the destination.
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte) error {
digest, err := manifest.Digest(m)
if err != nil {
return err
}
d.manifestDigest = digest
refTail, err := d.ref.tagOrDigest()
if err != nil {
return err
}
path := fmt.Sprintf(manifestPath, reference.Path(d.ref.ref), refTail)
headers := map[string][]string{}
mimeType := manifest.GuessMIMEType(m)
if mimeType != "" {
headers["Content-Type"] = []string{mimeType}
}
res, err := d.c.makeRequest(ctx, "PUT", path, headers, bytes.NewReader(m), v2Auth)
if err != nil {
return err
}
defer res.Body.Close()
if !successStatus(res.StatusCode) {
err = errors.Wrapf(client.HandleErrorResponse(res), "Error uploading manifest %s to %s", refTail, d.ref.ref.Name())
if isManifestInvalidError(errors.Cause(err)) {
err = types.ManifestTypeRejectedError{Err: err}
}
return err
}
return nil
}
// successStatus returns true if the argument is a successful HTTP response
// code (in the range 200 - 399 inclusive).
func successStatus(status int) bool {
return status >= 200 && status <= 399
}
// isManifestInvalidError returns true iff err from client.HandleErrorReponse is a “manifest invalid” error.
func isManifestInvalidError(err error) bool {
errors, ok := err.(errcode.Errors)
if !ok || len(errors) == 0 {
return false
}
ec, ok := errors[0].(errcode.ErrorCoder)
if !ok {
return false
}
// ErrorCodeManifestInvalid is returned by OpenShift with acceptschema2=false.
// ErrorCodeTagInvalid is returned by docker/distribution (at least as of commit ec87e9b6971d831f0eff752ddb54fb64693e51cd)
// when uploading to a tag (because it can’t find a matching tag inside the manifest)
return ec.ErrorCode() == v2.ErrorCodeManifestInvalid || ec.ErrorCode() == v2.ErrorCodeTagInvalid
}
func (d *dockerImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error {
// Do not fail if we don’t really need to support signatures.
if len(signatures) == 0 {
return nil
}
if err := d.c.detectProperties(ctx); err != nil {
return err
}
switch {
case d.c.signatureBase != nil:
return d.putSignaturesToLookaside(signatures)
case d.c.supportsSignatures:
return d.putSignaturesToAPIExtension(ctx, signatures)
default:
return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured")
}
}
// putSignaturesToLookaside implements PutSignatures() from the lookaside location configured in s.c.signatureBase,
// which is not nil.
func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte) error {
// FIXME? This overwrites files one at a time, definitely not atomic.
// A failure when updating signatures with a reordered copy could lose some of them.
// Skip dealing with the manifest digest if not necessary.
if len(signatures) == 0 {
return nil
}
if d.manifestDigest.String() == "" {
// This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures
return errors.Errorf("Unknown manifest digest, can't add signatures")
}
// NOTE: Keep this in sync with docs/signature-protocols.md!
for i, signature := range signatures {
url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i)
if url == nil {
return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
}
err := d.putOneSignature(url, signature)
if err != nil {
return err
}
}
// Remove any other signatures, if present.
// We stop at the first missing signature; if a previous deleting loop aborted
// prematurely, this may not clean up all of them, but one missing signature
// is enough for dockerImageSource to stop looking for other signatures, so that
// is sufficient.
for i := len(signatures); ; i++ {
url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i)
if url == nil {
return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
}
missing, err := d.c.deleteOneSignature(url)
if err != nil {
return err
}
if missing {
break
}
}
return nil
}
// putOneSignature stores one signature to url.
// NOTE: Keep this in sync with docs/signature-protocols.md!
func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte) error {
switch url.Scheme {
case "file":
logrus.Debugf("Writing to %s", url.Path)
err := os.MkdirAll(filepath.Dir(url.Path), 0755)
if err != nil {
return err
}
err = ioutil.WriteFile(url.Path, signature, 0644)
if err != nil {
return err
}
return nil
case "http", "https":
return errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String())
default:
return errors.Errorf("Unsupported scheme when writing signature to %s", url.String())
}
}
// deleteOneSignature deletes a signature from url, if it exists.
// If it successfully determines that the signature does not exist, returns (true, nil)
// NOTE: Keep this in sync with docs/signature-protocols.md!
func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error) {
switch url.Scheme {
case "file":
logrus.Debugf("Deleting %s", url.Path)
err := os.Remove(url.Path)
if err != nil && os.IsNotExist(err) {
return true, nil
}
return false, err
case "http", "https":
return false, errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String())
default:
return false, errors.Errorf("Unsupported scheme when deleting signature from %s", url.String())
}
}
// putSignaturesToAPIExtension implements PutSignatures() using the X-Registry-Supports-Signatures API extension.
func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context, signatures [][]byte) error {
// Skip dealing with the manifest digest, or reading the old state, if not necessary.
if len(signatures) == 0 {
return nil
}
if d.manifestDigest.String() == "" {
// This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures
return errors.Errorf("Unknown manifest digest, can't add signatures")
}
// Because image signatures are a shared resource in Atomic Registry, the default upload
// always adds signatures. Eventually we should also allow removing signatures,
// but the X-Registry-Supports-Signatures API extension does not support that yet.
existingSignatures, err := d.c.getExtensionsSignatures(ctx, d.ref, d.manifestDigest)
if err != nil {
return err
}
existingSigNames := map[string]struct{}{}
for _, sig := range existingSignatures.Signatures {
existingSigNames[sig.Name] = struct{}{}
}
sigExists:
for _, newSig := range signatures {
for _, existingSig := range existingSignatures.Signatures {
if existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) {
continue sigExists
}
}
// The API expect us to invent a new unique name. This is racy, but hopefully good enough.
var signatureName string
for {
randBytes := make([]byte, 16)
n, err := rand.Read(randBytes)
if err != nil || n != 16 {
return errors.Wrapf(err, "Error generating random signature len %d", n)
}
signatureName = fmt.Sprintf("%s@%032x", d.manifestDigest.String(), randBytes)
if _, ok := existingSigNames[signatureName]; !ok {
break
}
}
sig := extensionSignature{
Version: extensionSignatureSchemaVersion,
Name: signatureName,
Type: extensionSignatureTypeAtomic,
Content: newSig,
}
body, err := json.Marshal(sig)
if err != nil {
return err
}
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), d.manifestDigest.String())
res, err := d.c.makeRequest(ctx, "PUT", path, nil, bytes.NewReader(body), v2Auth)
if err != nil {
return | .Body.Close()
if res.StatusCode != http.StatusCreated {
body, err := ioutil.ReadAll(res.Body)
if err == nil {
logrus.Debugf("Error body %s", string(body))
}
logrus.Debugf("Error uploading signature, status %d, %#v", res.StatusCode, res)
return errors.Wrapf(client.HandleErrorResponse(res), "Error uploading signature to %s in %s", path, d.c.registry)
}
}
return nil
}
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
// WARNING: This does not have any transactional semantics:
// - Uploaded data MAY be visible to others before Commit() is called
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
func (d *dockerImageDestination) Commit(ctx context.Context) error {
return nil
}
| err
}
defer res | conditional_block |
docker_image_dest.go | package docker
import (
"bytes"
"context"
"crypto/rand"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"path/filepath"
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
"github.com/containers/image/types"
"github.com/docker/distribution/registry/api/errcode"
"github.com/docker/distribution/registry/api/v2"
"github.com/docker/distribution/registry/client"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type dockerImageDestination struct {
ref dockerReference
c *dockerClient
// State
manifestDigest digest.Digest // or "" if not yet known.
}
// newImageDestination creates a new ImageDestination for the specified image reference.
func newImageDestination(sys *types.SystemContext, ref dockerReference) (types.ImageDestination, error) |
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
func (d *dockerImageDestination) Reference() types.ImageReference {
return d.ref
}
// Close removes resources associated with an initialized ImageDestination, if any.
func (d *dockerImageDestination) Close() error {
return nil
}
func (d *dockerImageDestination) SupportedManifestMIMETypes() []string {
return []string{
imgspecv1.MediaTypeImageManifest,
manifest.DockerV2Schema2MediaType,
manifest.DockerV2Schema1SignedMediaType,
manifest.DockerV2Schema1MediaType,
}
}
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
func (d *dockerImageDestination) SupportsSignatures(ctx context.Context) error {
if err := d.c.detectProperties(ctx); err != nil {
return err
}
switch {
case d.c.signatureBase != nil:
return nil
case d.c.supportsSignatures:
return nil
default:
return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured")
}
}
func (d *dockerImageDestination) DesiredLayerCompression() types.LayerCompression {
return types.Compress
}
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
// uploaded to the image destination, true otherwise.
func (d *dockerImageDestination) AcceptsForeignLayerURLs() bool {
return true
}
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
func (d *dockerImageDestination) MustMatchRuntimeOS() bool {
return false
}
// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
// Does not make a difference if Reference().DockerReference() is nil.
func (d *dockerImageDestination) IgnoresEmbeddedDockerReference() bool {
return false // We do want the manifest updated; older registry versions refuse manifests if the embedded reference does not match.
}
// sizeCounter is an io.Writer which only counts the total size of its input.
type sizeCounter struct{ size int64 }
func (c *sizeCounter) Write(p []byte) (n int, err error) {
c.size += int64(len(p))
return len(p), nil
}
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
if inputInfo.Digest.String() != "" {
haveBlob, size, err := d.HasBlob(ctx, inputInfo)
if err != nil {
return types.BlobInfo{}, err
}
if haveBlob {
return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil
}
}
// FIXME? Chunked upload, progress reporting, etc.
uploadPath := fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref))
logrus.Debugf("Uploading %s", uploadPath)
res, err := d.c.makeRequest(ctx, "POST", uploadPath, nil, nil, v2Auth)
if err != nil {
return types.BlobInfo{}, err
}
defer res.Body.Close()
if res.StatusCode != http.StatusAccepted {
logrus.Debugf("Error initiating layer upload, response %#v", *res)
return types.BlobInfo{}, errors.Wrapf(client.HandleErrorResponse(res), "Error initiating layer upload to %s in %s", uploadPath, d.c.registry)
}
uploadLocation, err := res.Location()
if err != nil {
return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL")
}
digester := digest.Canonical.Digester()
sizeCounter := &sizeCounter{}
tee := io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter))
res, err = d.c.makeRequestToResolvedURL(ctx, "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, v2Auth)
if err != nil {
logrus.Debugf("Error uploading layer chunked, response %#v", res)
return types.BlobInfo{}, err
}
defer res.Body.Close()
computedDigest := digester.Digest()
uploadLocation, err = res.Location()
if err != nil {
return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL")
}
// FIXME: DELETE uploadLocation on failure
locationQuery := uploadLocation.Query()
// TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717
locationQuery.Set("digest", computedDigest.String())
uploadLocation.RawQuery = locationQuery.Encode()
res, err = d.c.makeRequestToResolvedURL(ctx, "PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth)
if err != nil {
return types.BlobInfo{}, err
}
defer res.Body.Close()
if res.StatusCode != http.StatusCreated {
logrus.Debugf("Error uploading layer, response %#v", *res)
return types.BlobInfo{}, errors.Wrapf(client.HandleErrorResponse(res), "Error uploading layer to %s", uploadLocation)
}
logrus.Debugf("Upload of layer %s complete", computedDigest)
return types.BlobInfo{Digest: computedDigest, Size: sizeCounter.size}, nil
}
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
// it returns a non-nil error only on an unexpected failure.
func (d *dockerImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
if info.Digest == "" {
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
}
checkPath := fmt.Sprintf(blobsPath, reference.Path(d.ref.ref), info.Digest.String())
logrus.Debugf("Checking %s", checkPath)
res, err := d.c.makeRequest(ctx, "HEAD", checkPath, nil, nil, v2Auth)
if err != nil {
return false, -1, err
}
defer res.Body.Close()
switch res.StatusCode {
case http.StatusOK:
logrus.Debugf("... already exists")
return true, getBlobSize(res), nil
case http.StatusUnauthorized:
logrus.Debugf("... not authorized")
return false, -1, errors.Wrapf(client.HandleErrorResponse(res), "Error checking whether a blob %s exists in %s", info.Digest, d.ref.ref.Name())
case http.StatusNotFound:
logrus.Debugf("... not present")
return false, -1, nil
default:
return false, -1, errors.Errorf("failed to read from destination repository %s: %v", reference.Path(d.ref.ref), http.StatusText(res.StatusCode))
}
}
func (d *dockerImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
return info, nil
}
// PutManifest writes manifest to the destination.
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte) error {
digest, err := manifest.Digest(m)
if err != nil {
return err
}
d.manifestDigest = digest
refTail, err := d.ref.tagOrDigest()
if err != nil {
return err
}
path := fmt.Sprintf(manifestPath, reference.Path(d.ref.ref), refTail)
headers := map[string][]string{}
mimeType := manifest.GuessMIMEType(m)
if mimeType != "" {
headers["Content-Type"] = []string{mimeType}
}
res, err := d.c.makeRequest(ctx, "PUT", path, headers, bytes.NewReader(m), v2Auth)
if err != nil {
return err
}
defer res.Body.Close()
if !successStatus(res.StatusCode) {
err = errors.Wrapf(client.HandleErrorResponse(res), "Error uploading manifest %s to %s", refTail, d.ref.ref.Name())
if isManifestInvalidError(errors.Cause(err)) {
err = types.ManifestTypeRejectedError{Err: err}
}
return err
}
return nil
}
// successStatus returns true if the argument is a successful HTTP response
// code (in the range 200 - 399 inclusive).
func successStatus(status int) bool {
return status >= 200 && status <= 399
}
// isManifestInvalidError returns true iff err from client.HandleErrorReponse is a “manifest invalid” error.
func isManifestInvalidError(err error) bool {
errors, ok := err.(errcode.Errors)
if !ok || len(errors) == 0 {
return false
}
ec, ok := errors[0].(errcode.ErrorCoder)
if !ok {
return false
}
// ErrorCodeManifestInvalid is returned by OpenShift with acceptschema2=false.
// ErrorCodeTagInvalid is returned by docker/distribution (at least as of commit ec87e9b6971d831f0eff752ddb54fb64693e51cd)
// when uploading to a tag (because it can’t find a matching tag inside the manifest)
return ec.ErrorCode() == v2.ErrorCodeManifestInvalid || ec.ErrorCode() == v2.ErrorCodeTagInvalid
}
func (d *dockerImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error {
// Do not fail if we don’t really need to support signatures.
if len(signatures) == 0 {
return nil
}
if err := d.c.detectProperties(ctx); err != nil {
return err
}
switch {
case d.c.signatureBase != nil:
return d.putSignaturesToLookaside(signatures)
case d.c.supportsSignatures:
return d.putSignaturesToAPIExtension(ctx, signatures)
default:
return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured")
}
}
// putSignaturesToLookaside implements PutSignatures() from the lookaside location configured in s.c.signatureBase,
// which is not nil.
func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte) error {
// FIXME? This overwrites files one at a time, definitely not atomic.
// A failure when updating signatures with a reordered copy could lose some of them.
// Skip dealing with the manifest digest if not necessary.
if len(signatures) == 0 {
return nil
}
if d.manifestDigest.String() == "" {
// This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures
return errors.Errorf("Unknown manifest digest, can't add signatures")
}
// NOTE: Keep this in sync with docs/signature-protocols.md!
for i, signature := range signatures {
url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i)
if url == nil {
return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
}
err := d.putOneSignature(url, signature)
if err != nil {
return err
}
}
// Remove any other signatures, if present.
// We stop at the first missing signature; if a previous deleting loop aborted
// prematurely, this may not clean up all of them, but one missing signature
// is enough for dockerImageSource to stop looking for other signatures, so that
// is sufficient.
for i := len(signatures); ; i++ {
url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i)
if url == nil {
return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
}
missing, err := d.c.deleteOneSignature(url)
if err != nil {
return err
}
if missing {
break
}
}
return nil
}
// putOneSignature stores one signature to url.
// NOTE: Keep this in sync with docs/signature-protocols.md!
func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte) error {
switch url.Scheme {
case "file":
logrus.Debugf("Writing to %s", url.Path)
err := os.MkdirAll(filepath.Dir(url.Path), 0755)
if err != nil {
return err
}
err = ioutil.WriteFile(url.Path, signature, 0644)
if err != nil {
return err
}
return nil
case "http", "https":
return errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String())
default:
return errors.Errorf("Unsupported scheme when writing signature to %s", url.String())
}
}
// deleteOneSignature deletes a signature from url, if it exists.
// If it successfully determines that the signature does not exist, returns (true, nil)
// NOTE: Keep this in sync with docs/signature-protocols.md!
func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error) {
switch url.Scheme {
case "file":
logrus.Debugf("Deleting %s", url.Path)
err := os.Remove(url.Path)
if err != nil && os.IsNotExist(err) {
return true, nil
}
return false, err
case "http", "https":
return false, errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String())
default:
return false, errors.Errorf("Unsupported scheme when deleting signature from %s", url.String())
}
}
// putSignaturesToAPIExtension implements PutSignatures() using the X-Registry-Supports-Signatures API extension.
func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context, signatures [][]byte) error {
// Skip dealing with the manifest digest, or reading the old state, if not necessary.
if len(signatures) == 0 {
return nil
}
if d.manifestDigest.String() == "" {
// This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures
return errors.Errorf("Unknown manifest digest, can't add signatures")
}
// Because image signatures are a shared resource in Atomic Registry, the default upload
// always adds signatures. Eventually we should also allow removing signatures,
// but the X-Registry-Supports-Signatures API extension does not support that yet.
existingSignatures, err := d.c.getExtensionsSignatures(ctx, d.ref, d.manifestDigest)
if err != nil {
return err
}
existingSigNames := map[string]struct{}{}
for _, sig := range existingSignatures.Signatures {
existingSigNames[sig.Name] = struct{}{}
}
sigExists:
for _, newSig := range signatures {
for _, existingSig := range existingSignatures.Signatures {
if existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) {
continue sigExists
}
}
// The API expect us to invent a new unique name. This is racy, but hopefully good enough.
var signatureName string
for {
randBytes := make([]byte, 16)
n, err := rand.Read(randBytes)
if err != nil || n != 16 {
return errors.Wrapf(err, "Error generating random signature len %d", n)
}
signatureName = fmt.Sprintf("%s@%032x", d.manifestDigest.String(), randBytes)
if _, ok := existingSigNames[signatureName]; !ok {
break
}
}
sig := extensionSignature{
Version: extensionSignatureSchemaVersion,
Name: signatureName,
Type: extensionSignatureTypeAtomic,
Content: newSig,
}
body, err := json.Marshal(sig)
if err != nil {
return err
}
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), d.manifestDigest.String())
res, err := d.c.makeRequest(ctx, "PUT", path, nil, bytes.NewReader(body), v2Auth)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != http.StatusCreated {
body, err := ioutil.ReadAll(res.Body)
if err == nil {
logrus.Debugf("Error body %s", string(body))
}
logrus.Debugf("Error uploading signature, status %d, %#v", res.StatusCode, res)
return errors.Wrapf(client.HandleErrorResponse(res), "Error uploading signature to %s in %s", path, d.c.registry)
}
}
return nil
}
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
// WARNING: This does not have any transactional semantics:
// - Uploaded data MAY be visible to others before Commit() is called
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
func (d *dockerImageDestination) Commit(ctx context.Context) error {
return nil
}
| {
c, err := newDockerClientFromRef(sys, ref, true, "pull,push")
if err != nil {
return nil, err
}
return &dockerImageDestination{
ref: ref,
c: c,
}, nil
} | identifier_body |
docker_image_dest.go | package docker
import (
"bytes"
"context"
"crypto/rand"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"path/filepath"
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
"github.com/containers/image/types"
"github.com/docker/distribution/registry/api/errcode"
"github.com/docker/distribution/registry/api/v2"
"github.com/docker/distribution/registry/client"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type dockerImageDestination struct {
ref dockerReference
c *dockerClient
// State
manifestDigest digest.Digest // or "" if not yet known.
}
// newImageDestination creates a new ImageDestination for the specified image reference.
func newImageDestination(sys *types.SystemContext, ref dockerReference) (types.ImageDestination, error) {
c, err := newDockerClientFromRef(sys, ref, true, "pull,push")
if err != nil {
return nil, err
}
return &dockerImageDestination{
ref: ref,
c: c,
}, nil
}
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
func (d *dockerImageDestination) Reference() types.ImageReference {
return d.ref
}
// Close removes resources associated with an initialized ImageDestination, if any.
func (d *dockerImageDestination) Close() error {
return nil
}
func (d *dockerImageDestination) SupportedManifestMIMETypes() []string {
return []string{
imgspecv1.MediaTypeImageManifest,
manifest.DockerV2Schema2MediaType,
manifest.DockerV2Schema1SignedMediaType,
manifest.DockerV2Schema1MediaType,
}
}
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
func (d *dockerImageDestination) SupportsSignatures(ctx context.Context) error {
if err := d.c.detectProperties(ctx); err != nil {
return err
}
switch {
case d.c.signatureBase != nil:
return nil
case d.c.supportsSignatures:
return nil
default:
return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured")
}
}
func (d *dockerImageDestination) DesiredLayerCompression() types.LayerCompression {
return types.Compress
}
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
// uploaded to the image destination, true otherwise.
func (d *dockerImageDestination) AcceptsForeignLayerURLs() bool {
return true
}
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
func (d *dockerImageDestination) MustMatchRuntimeOS() bool {
return false
}
// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
// Does not make a difference if Reference().DockerReference() is nil.
func (d *dockerImageDestination) IgnoresEmbeddedDockerReference() bool {
return false // We do want the manifest updated; older registry versions refuse manifests if the embedded reference does not match.
}
// sizeCounter is an io.Writer which only counts the total size of its input.
type sizeCounter struct{ size int64 }
func (c *sizeCounter) Write(p []byte) (n int, err error) {
c.size += int64(len(p))
return len(p), nil
}
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
if inputInfo.Digest.String() != "" {
haveBlob, size, err := d.HasBlob(ctx, inputInfo)
if err != nil {
return types.BlobInfo{}, err
}
if haveBlob {
return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil
}
}
// FIXME? Chunked upload, progress reporting, etc.
uploadPath := fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref))
logrus.Debugf("Uploading %s", uploadPath)
res, err := d.c.makeRequest(ctx, "POST", uploadPath, nil, nil, v2Auth)
if err != nil {
return types.BlobInfo{}, err
}
defer res.Body.Close()
if res.StatusCode != http.StatusAccepted {
logrus.Debugf("Error initiating layer upload, response %#v", *res)
return types.BlobInfo{}, errors.Wrapf(client.HandleErrorResponse(res), "Error initiating layer upload to %s in %s", uploadPath, d.c.registry)
}
uploadLocation, err := res.Location()
if err != nil {
return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL")
}
digester := digest.Canonical.Digester()
sizeCounter := &sizeCounter{}
tee := io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter))
res, err = d.c.makeRequestToResolvedURL(ctx, "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, v2Auth)
if err != nil {
logrus.Debugf("Error uploading layer chunked, response %#v", res)
return types.BlobInfo{}, err
}
defer res.Body.Close()
computedDigest := digester.Digest()
uploadLocation, err = res.Location()
if err != nil {
return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL")
}
// FIXME: DELETE uploadLocation on failure
locationQuery := uploadLocation.Query()
// TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717
locationQuery.Set("digest", computedDigest.String())
uploadLocation.RawQuery = locationQuery.Encode()
res, err = d.c.makeRequestToResolvedURL(ctx, "PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth)
if err != nil {
return types.BlobInfo{}, err
}
defer res.Body.Close()
if res.StatusCode != http.StatusCreated {
logrus.Debugf("Error uploading layer, response %#v", *res)
return types.BlobInfo{}, errors.Wrapf(client.HandleErrorResponse(res), "Error uploading layer to %s", uploadLocation)
}
logrus.Debugf("Upload of layer %s complete", computedDigest)
return types.BlobInfo{Digest: computedDigest, Size: sizeCounter.size}, nil
}
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
// it returns a non-nil error only on an unexpected failure.
func (d *dockerImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
if info.Digest == "" {
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
}
checkPath := fmt.Sprintf(blobsPath, reference.Path(d.ref.ref), info.Digest.String())
logrus.Debugf("Checking %s", checkPath)
res, err := d.c.makeRequest(ctx, "HEAD", checkPath, nil, nil, v2Auth)
if err != nil {
return false, -1, err
}
defer res.Body.Close()
switch res.StatusCode {
case http.StatusOK:
logrus.Debugf("... already exists")
return true, getBlobSize(res), nil
case http.StatusUnauthorized:
logrus.Debugf("... not authorized")
return false, -1, errors.Wrapf(client.HandleErrorResponse(res), "Error checking whether a blob %s exists in %s", info.Digest, d.ref.ref.Name())
case http.StatusNotFound:
logrus.Debugf("... not present")
return false, -1, nil
default:
return false, -1, errors.Errorf("failed to read from destination repository %s: %v", reference.Path(d.ref.ref), http.StatusText(res.StatusCode))
}
}
func (d *dockerImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
return info, nil
}
// PutManifest writes manifest to the destination.
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte) error {
digest, err := manifest.Digest(m)
if err != nil {
return err
}
d.manifestDigest = digest
refTail, err := d.ref.tagOrDigest()
if err != nil {
return err
}
path := fmt.Sprintf(manifestPath, reference.Path(d.ref.ref), refTail)
headers := map[string][]string{}
mimeType := manifest.GuessMIMEType(m)
if mimeType != "" {
headers["Content-Type"] = []string{mimeType}
}
res, err := d.c.makeRequest(ctx, "PUT", path, headers, bytes.NewReader(m), v2Auth)
if err != nil {
return err
}
defer res.Body.Close()
if !successStatus(res.StatusCode) {
err = errors.Wrapf(client.HandleErrorResponse(res), "Error uploading manifest %s to %s", refTail, d.ref.ref.Name())
if isManifestInvalidError(errors.Cause(err)) {
err = types.ManifestTypeRejectedError{Err: err}
}
return err
}
return nil
}
// successStatus returns true if the argument is a successful HTTP response
// code (in the range 200 - 399 inclusive).
func successStatus(status int) bool {
return status >= 200 && status <= 399
}
// isManifestInvalidError returns true iff err from client.HandleErrorReponse is a “manifest invalid” error.
func isManifestInvalidError(err error) bool {
errors, ok := err.(errcode.Errors)
if !ok || len(errors) == 0 {
return false
}
ec, ok := errors[0].(errcode.ErrorCoder)
if !ok {
return false
}
// ErrorCodeManifestInvalid is returned by OpenShift with acceptschema2=false.
// ErrorCodeTagInvalid is returned by docker/distribution (at least as of commit ec87e9b6971d831f0eff752ddb54fb64693e51cd)
// when uploading to a tag (because it can’t find a matching tag inside the manifest)
return ec.ErrorCode() == v2.ErrorCodeManifestInvalid || ec.ErrorCode() == v2.ErrorCodeTagInvalid
}
func (d *dockerImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error {
// Do not fail if we don’t really need to support signatures.
if len(signatures) == 0 {
return nil
}
if err := d.c.detectProperties(ctx); err != nil {
return err
}
switch {
case d.c.signatureBase != nil:
return d.putSignaturesToLookaside(signatures)
case d.c.supportsSignatures:
return d.putSignaturesToAPIExtension(ctx, signatures)
default:
return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured")
}
}
// putSignaturesToLookaside implements PutSignatures() from the lookaside location configured in s.c.signatureBase,
// which is not nil.
func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte) error {
// FIXME? This overwrites files one at a time, definitely not atomic.
// A failure when updating signatures with a reordered copy could lose some of them.
// Skip dealing with the manifest digest if not necessary.
if len(signatures) == 0 {
return nil
}
if d.manifestDigest.String() == "" {
// This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures
return errors.Errorf("Unknown manifest digest, can't add signatures")
}
// NOTE: Keep this in sync with docs/signature-protocols.md!
for i, signature := range signatures {
url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i)
if url == nil {
return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
}
err := d.putOneSignature(url, signature)
if err != nil {
return err
}
}
// Remove any other signatures, if present.
// We stop at the first missing signature; if a previous deleting loop aborted
// prematurely, this may not clean up all of them, but one missing signature
// is enough for dockerImageSource to stop looking for other signatures, so that
// is sufficient.
for i := len(signatures); ; i++ {
url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i)
if url == nil {
return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
}
missing, err := d.c.deleteOneSignature(url)
if err != nil {
return err
}
if missing {
break
}
}
return nil
}
// putOneSignature stores one signature to url.
// NOTE: Keep this in sync with docs/signature-protocols.md!
func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte) error {
switch url.Scheme {
case "file":
logrus.Debugf("Writing to %s", url.Path)
err := os.MkdirAll(filepath.Dir(url.Path), 0755)
if err != nil {
return err
}
err = ioutil.WriteFile(url.Path, signature, 0644)
if err != nil {
return err
}
return nil
case "http", "https":
return errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String())
default:
return errors.Errorf("Unsupported scheme when writing signature to %s", url.String())
}
}
// deleteOneSignature deletes a signature from url, if it exists.
// If it successfully determines that the signature does not exist, returns (true, nil)
// NOTE: Keep this in sync with docs/signature-protocols.md!
func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error) {
switch url.Scheme {
case "file":
logrus.Debugf("Deleting %s", url.Path)
err := os.Remove(url.Path)
if err != nil && os.IsNotExist(err) {
return true, nil
}
return false, err
case "http", "https":
return false, errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String()) | return false, errors.Errorf("Unsupported scheme when deleting signature from %s", url.String())
}
}
// putSignaturesToAPIExtension implements PutSignatures() using the X-Registry-Supports-Signatures API extension.
func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context, signatures [][]byte) error {
// Skip dealing with the manifest digest, or reading the old state, if not necessary.
if len(signatures) == 0 {
return nil
}
if d.manifestDigest.String() == "" {
// This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures
return errors.Errorf("Unknown manifest digest, can't add signatures")
}
// Because image signatures are a shared resource in Atomic Registry, the default upload
// always adds signatures. Eventually we should also allow removing signatures,
// but the X-Registry-Supports-Signatures API extension does not support that yet.
existingSignatures, err := d.c.getExtensionsSignatures(ctx, d.ref, d.manifestDigest)
if err != nil {
return err
}
existingSigNames := map[string]struct{}{}
for _, sig := range existingSignatures.Signatures {
existingSigNames[sig.Name] = struct{}{}
}
sigExists:
for _, newSig := range signatures {
for _, existingSig := range existingSignatures.Signatures {
if existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) {
continue sigExists
}
}
// The API expect us to invent a new unique name. This is racy, but hopefully good enough.
var signatureName string
for {
randBytes := make([]byte, 16)
n, err := rand.Read(randBytes)
if err != nil || n != 16 {
return errors.Wrapf(err, "Error generating random signature len %d", n)
}
signatureName = fmt.Sprintf("%s@%032x", d.manifestDigest.String(), randBytes)
if _, ok := existingSigNames[signatureName]; !ok {
break
}
}
sig := extensionSignature{
Version: extensionSignatureSchemaVersion,
Name: signatureName,
Type: extensionSignatureTypeAtomic,
Content: newSig,
}
body, err := json.Marshal(sig)
if err != nil {
return err
}
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), d.manifestDigest.String())
res, err := d.c.makeRequest(ctx, "PUT", path, nil, bytes.NewReader(body), v2Auth)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != http.StatusCreated {
body, err := ioutil.ReadAll(res.Body)
if err == nil {
logrus.Debugf("Error body %s", string(body))
}
logrus.Debugf("Error uploading signature, status %d, %#v", res.StatusCode, res)
return errors.Wrapf(client.HandleErrorResponse(res), "Error uploading signature to %s in %s", path, d.c.registry)
}
}
return nil
}
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
// WARNING: This does not have any transactional semantics:
// - Uploaded data MAY be visible to others before Commit() is called
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
func (d *dockerImageDestination) Commit(ctx context.Context) error {
return nil
} | default: | random_line_split |
docker_image_dest.go | package docker
import (
"bytes"
"context"
"crypto/rand"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"path/filepath"
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
"github.com/containers/image/types"
"github.com/docker/distribution/registry/api/errcode"
"github.com/docker/distribution/registry/api/v2"
"github.com/docker/distribution/registry/client"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type dockerImageDestination struct {
ref dockerReference
c *dockerClient
// State
manifestDigest digest.Digest // or "" if not yet known.
}
// newImageDestination creates a new ImageDestination for the specified image reference.
func newImageDestination(sys *types.SystemContext, ref dockerReference) (types.ImageDestination, error) {
c, err := newDockerClientFromRef(sys, ref, true, "pull,push")
if err != nil {
return nil, err
}
return &dockerImageDestination{
ref: ref,
c: c,
}, nil
}
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
func (d *dockerImageDestination) | () types.ImageReference {
return d.ref
}
// Close removes resources associated with an initialized ImageDestination, if any.
func (d *dockerImageDestination) Close() error {
return nil
}
func (d *dockerImageDestination) SupportedManifestMIMETypes() []string {
return []string{
imgspecv1.MediaTypeImageManifest,
manifest.DockerV2Schema2MediaType,
manifest.DockerV2Schema1SignedMediaType,
manifest.DockerV2Schema1MediaType,
}
}
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
func (d *dockerImageDestination) SupportsSignatures(ctx context.Context) error {
if err := d.c.detectProperties(ctx); err != nil {
return err
}
switch {
case d.c.signatureBase != nil:
return nil
case d.c.supportsSignatures:
return nil
default:
return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured")
}
}
func (d *dockerImageDestination) DesiredLayerCompression() types.LayerCompression {
return types.Compress
}
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
// uploaded to the image destination, true otherwise.
func (d *dockerImageDestination) AcceptsForeignLayerURLs() bool {
return true
}
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
func (d *dockerImageDestination) MustMatchRuntimeOS() bool {
return false
}
// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
// Does not make a difference if Reference().DockerReference() is nil.
func (d *dockerImageDestination) IgnoresEmbeddedDockerReference() bool {
return false // We do want the manifest updated; older registry versions refuse manifests if the embedded reference does not match.
}
// sizeCounter is an io.Writer which only counts the total size of its input.
type sizeCounter struct{ size int64 }
func (c *sizeCounter) Write(p []byte) (n int, err error) {
c.size += int64(len(p))
return len(p), nil
}
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
if inputInfo.Digest.String() != "" {
haveBlob, size, err := d.HasBlob(ctx, inputInfo)
if err != nil {
return types.BlobInfo{}, err
}
if haveBlob {
return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil
}
}
// FIXME? Chunked upload, progress reporting, etc.
uploadPath := fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref))
logrus.Debugf("Uploading %s", uploadPath)
res, err := d.c.makeRequest(ctx, "POST", uploadPath, nil, nil, v2Auth)
if err != nil {
return types.BlobInfo{}, err
}
defer res.Body.Close()
if res.StatusCode != http.StatusAccepted {
logrus.Debugf("Error initiating layer upload, response %#v", *res)
return types.BlobInfo{}, errors.Wrapf(client.HandleErrorResponse(res), "Error initiating layer upload to %s in %s", uploadPath, d.c.registry)
}
uploadLocation, err := res.Location()
if err != nil {
return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL")
}
digester := digest.Canonical.Digester()
sizeCounter := &sizeCounter{}
tee := io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter))
res, err = d.c.makeRequestToResolvedURL(ctx, "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, v2Auth)
if err != nil {
logrus.Debugf("Error uploading layer chunked, response %#v", res)
return types.BlobInfo{}, err
}
defer res.Body.Close()
computedDigest := digester.Digest()
uploadLocation, err = res.Location()
if err != nil {
return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL")
}
// FIXME: DELETE uploadLocation on failure
locationQuery := uploadLocation.Query()
// TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717
locationQuery.Set("digest", computedDigest.String())
uploadLocation.RawQuery = locationQuery.Encode()
res, err = d.c.makeRequestToResolvedURL(ctx, "PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth)
if err != nil {
return types.BlobInfo{}, err
}
defer res.Body.Close()
if res.StatusCode != http.StatusCreated {
logrus.Debugf("Error uploading layer, response %#v", *res)
return types.BlobInfo{}, errors.Wrapf(client.HandleErrorResponse(res), "Error uploading layer to %s", uploadLocation)
}
logrus.Debugf("Upload of layer %s complete", computedDigest)
return types.BlobInfo{Digest: computedDigest, Size: sizeCounter.size}, nil
}
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
// it returns a non-nil error only on an unexpected failure.
func (d *dockerImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
if info.Digest == "" {
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
}
checkPath := fmt.Sprintf(blobsPath, reference.Path(d.ref.ref), info.Digest.String())
logrus.Debugf("Checking %s", checkPath)
res, err := d.c.makeRequest(ctx, "HEAD", checkPath, nil, nil, v2Auth)
if err != nil {
return false, -1, err
}
defer res.Body.Close()
switch res.StatusCode {
case http.StatusOK:
logrus.Debugf("... already exists")
return true, getBlobSize(res), nil
case http.StatusUnauthorized:
logrus.Debugf("... not authorized")
return false, -1, errors.Wrapf(client.HandleErrorResponse(res), "Error checking whether a blob %s exists in %s", info.Digest, d.ref.ref.Name())
case http.StatusNotFound:
logrus.Debugf("... not present")
return false, -1, nil
default:
return false, -1, errors.Errorf("failed to read from destination repository %s: %v", reference.Path(d.ref.ref), http.StatusText(res.StatusCode))
}
}
func (d *dockerImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
return info, nil
}
// PutManifest writes manifest to the destination.
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte) error {
digest, err := manifest.Digest(m)
if err != nil {
return err
}
d.manifestDigest = digest
refTail, err := d.ref.tagOrDigest()
if err != nil {
return err
}
path := fmt.Sprintf(manifestPath, reference.Path(d.ref.ref), refTail)
headers := map[string][]string{}
mimeType := manifest.GuessMIMEType(m)
if mimeType != "" {
headers["Content-Type"] = []string{mimeType}
}
res, err := d.c.makeRequest(ctx, "PUT", path, headers, bytes.NewReader(m), v2Auth)
if err != nil {
return err
}
defer res.Body.Close()
if !successStatus(res.StatusCode) {
err = errors.Wrapf(client.HandleErrorResponse(res), "Error uploading manifest %s to %s", refTail, d.ref.ref.Name())
if isManifestInvalidError(errors.Cause(err)) {
err = types.ManifestTypeRejectedError{Err: err}
}
return err
}
return nil
}
// successStatus returns true if the argument is a successful HTTP response
// code (in the range 200 - 399 inclusive).
func successStatus(status int) bool {
return status >= 200 && status <= 399
}
// isManifestInvalidError returns true iff err from client.HandleErrorReponse is a “manifest invalid” error.
func isManifestInvalidError(err error) bool {
errors, ok := err.(errcode.Errors)
if !ok || len(errors) == 0 {
return false
}
ec, ok := errors[0].(errcode.ErrorCoder)
if !ok {
return false
}
// ErrorCodeManifestInvalid is returned by OpenShift with acceptschema2=false.
// ErrorCodeTagInvalid is returned by docker/distribution (at least as of commit ec87e9b6971d831f0eff752ddb54fb64693e51cd)
// when uploading to a tag (because it can’t find a matching tag inside the manifest)
return ec.ErrorCode() == v2.ErrorCodeManifestInvalid || ec.ErrorCode() == v2.ErrorCodeTagInvalid
}
func (d *dockerImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error {
// Do not fail if we don’t really need to support signatures.
if len(signatures) == 0 {
return nil
}
if err := d.c.detectProperties(ctx); err != nil {
return err
}
switch {
case d.c.signatureBase != nil:
return d.putSignaturesToLookaside(signatures)
case d.c.supportsSignatures:
return d.putSignaturesToAPIExtension(ctx, signatures)
default:
return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured")
}
}
// putSignaturesToLookaside implements PutSignatures() from the lookaside location configured in s.c.signatureBase,
// which is not nil.
func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte) error {
// FIXME? This overwrites files one at a time, definitely not atomic.
// A failure when updating signatures with a reordered copy could lose some of them.
// Skip dealing with the manifest digest if not necessary.
if len(signatures) == 0 {
return nil
}
if d.manifestDigest.String() == "" {
// This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures
return errors.Errorf("Unknown manifest digest, can't add signatures")
}
// NOTE: Keep this in sync with docs/signature-protocols.md!
for i, signature := range signatures {
url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i)
if url == nil {
return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
}
err := d.putOneSignature(url, signature)
if err != nil {
return err
}
}
// Remove any other signatures, if present.
// We stop at the first missing signature; if a previous deleting loop aborted
// prematurely, this may not clean up all of them, but one missing signature
// is enough for dockerImageSource to stop looking for other signatures, so that
// is sufficient.
for i := len(signatures); ; i++ {
url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i)
if url == nil {
return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
}
missing, err := d.c.deleteOneSignature(url)
if err != nil {
return err
}
if missing {
break
}
}
return nil
}
// putOneSignature stores one signature to url.
// NOTE: Keep this in sync with docs/signature-protocols.md!
func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte) error {
switch url.Scheme {
case "file":
logrus.Debugf("Writing to %s", url.Path)
err := os.MkdirAll(filepath.Dir(url.Path), 0755)
if err != nil {
return err
}
err = ioutil.WriteFile(url.Path, signature, 0644)
if err != nil {
return err
}
return nil
case "http", "https":
return errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String())
default:
return errors.Errorf("Unsupported scheme when writing signature to %s", url.String())
}
}
// deleteOneSignature deletes a signature from url, if it exists.
// If it successfully determines that the signature does not exist, returns (true, nil)
// NOTE: Keep this in sync with docs/signature-protocols.md!
func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error) {
switch url.Scheme {
case "file":
logrus.Debugf("Deleting %s", url.Path)
err := os.Remove(url.Path)
if err != nil && os.IsNotExist(err) {
return true, nil
}
return false, err
case "http", "https":
return false, errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String())
default:
return false, errors.Errorf("Unsupported scheme when deleting signature from %s", url.String())
}
}
// putSignaturesToAPIExtension implements PutSignatures() using the X-Registry-Supports-Signatures API extension.
func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context, signatures [][]byte) error {
// Skip dealing with the manifest digest, or reading the old state, if not necessary.
if len(signatures) == 0 {
return nil
}
if d.manifestDigest.String() == "" {
// This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures
return errors.Errorf("Unknown manifest digest, can't add signatures")
}
// Because image signatures are a shared resource in Atomic Registry, the default upload
// always adds signatures. Eventually we should also allow removing signatures,
// but the X-Registry-Supports-Signatures API extension does not support that yet.
existingSignatures, err := d.c.getExtensionsSignatures(ctx, d.ref, d.manifestDigest)
if err != nil {
return err
}
existingSigNames := map[string]struct{}{}
for _, sig := range existingSignatures.Signatures {
existingSigNames[sig.Name] = struct{}{}
}
sigExists:
for _, newSig := range signatures {
for _, existingSig := range existingSignatures.Signatures {
if existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) {
continue sigExists
}
}
// The API expect us to invent a new unique name. This is racy, but hopefully good enough.
var signatureName string
for {
randBytes := make([]byte, 16)
n, err := rand.Read(randBytes)
if err != nil || n != 16 {
return errors.Wrapf(err, "Error generating random signature len %d", n)
}
signatureName = fmt.Sprintf("%s@%032x", d.manifestDigest.String(), randBytes)
if _, ok := existingSigNames[signatureName]; !ok {
break
}
}
sig := extensionSignature{
Version: extensionSignatureSchemaVersion,
Name: signatureName,
Type: extensionSignatureTypeAtomic,
Content: newSig,
}
body, err := json.Marshal(sig)
if err != nil {
return err
}
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), d.manifestDigest.String())
res, err := d.c.makeRequest(ctx, "PUT", path, nil, bytes.NewReader(body), v2Auth)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != http.StatusCreated {
body, err := ioutil.ReadAll(res.Body)
if err == nil {
logrus.Debugf("Error body %s", string(body))
}
logrus.Debugf("Error uploading signature, status %d, %#v", res.StatusCode, res)
return errors.Wrapf(client.HandleErrorResponse(res), "Error uploading signature to %s in %s", path, d.c.registry)
}
}
return nil
}
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
// WARNING: This does not have any transactional semantics:
// - Uploaded data MAY be visible to others before Commit() is called
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
func (d *dockerImageDestination) Commit(ctx context.Context) error {
return nil
}
| Reference | identifier_name |
factor_data_preprocess.py | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 22 09:47:11 2019
@author: admin
"""
import numpy as np
import pandas as pd
import os
from datetime import datetime
from itertools import chain
from functools import reduce
from sklearn.linear_model import LinearRegression
from utility.constant import info_cols, data_dair, non_processed_factors
from utility.relate_to_tushare import trade_days
from utility.tool0 import Data
import statsmodels.api as sm
from pyfinance.utils import rolling_windows
def align(df1, df2, *dfs):
# chain 是把多个迭代器合成一个迭代器
dfs_all = [df for df in chain([df1, df2], dfs)]
# 看df1和df2是否有单个列的
if any(len(df.shape) == 1 or 1 in df.shape for df in dfs_all):
dims = 1
else:
dims = 2
# 对日期求交期. reduce: 用传给reduce中的函数function(有两个参数)先对集合中的第 1、2个元素进行操作,
# 得到的结果再与第三个数据用function函数运算,最后得到一个结果。
mut_date_range = sorted(reduce(lambda x, y: x.intersection(y), (df.index for df in dfs_all)))
# 对columns求交集
mut_codes = sorted(reduce(lambda x, y: x.intersection(y), (df.columns for df in dfs_all)))
# 如果df1和df2都是多维的,求日期和代码的交集;否则,只求日期的交集
if dims == 2:
dfs_all = [df.loc[mut_date_range, mut_codes] for df in dfs_all]
elif dims == 1:
dfs_all = [df.loc[mut_date_range, :] for df in dfs_all]
return dfs_all
def drop_some(datdf):
global info_cols
cond = pd.Series(True, index=datdf.index)
# 最新一期数据
if pd.isnull(datdf['Pct_chg_nm']).all():
pass
else:
# 删除未上市股票
cond &= ~pd.isnull(datdf['Mkt_cap_float'])
# 删除未开盘股票
cond &= datdf['Is_open1']
datdf = datdf.loc[cond]
return datdf
def fill_na(data, ind='sw', fill_type='any'):
"""
缺失值填充:缺失值少于10%的情况下使用行业中位数代替
"""
global info_cols, non_processed_factors
datdf = data.copy()
if ind == 'sw':
datdf = datdf.loc[~pd.isnull(datdf['Industry_sw']), :]
non_deal = info_cols + non_processed_factors
tmp_info_cols = [inf for inf in non_deal if inf in datdf.columns]
# datdf中剔除info_cols后的列名
facs_to_fill = datdf.columns.difference(set(tmp_info_cols))
datdf[facs_to_fill] = datdf[facs_to_fill].applymap(coerce_numeric)
datdf = datdf.replace([np.inf, -np.inf], np.nan) # 替换inf
# pd.to_numeric( datdf[facs_to_fill], errors='coerce')
if fill_type != 'any':
facs_to_fill = [fac for fac in facs_to_fill # 筛选缺失值少于10%的因子
if pd.isnull(datdf[fac]).sum() / len(datdf) <= 0.1]
else:
facs_to_fill = [fac for fac in facs_to_fill # 筛选缺失值少于10%的因子
if pd.isnull(datdf[fac]).any()]
if ind in ['zx', 'sw']:
grouped_column = f'Industry_{ind}'
elif ind == 'Second_industry':
grouped_column = 'Second_industry'
else:
raise Exception
for fac in facs_to_fill:
fac_median_by_ind = datdf[[grouped_column, fac]].groupby(grouped_column).median()
# 把dateframe转为dict,并取fac为key以解决 dict套dict 的问题
fac_ind_map = fac_median_by_ind.to_dict()[fac]
# 选出需要替换的数据
fac_to_fill = datdf.loc[pd.isnull(datdf[fac]), [grouped_column, fac]]
# map函数可以接受含有映射关系的字典。使用map做行业到其均值的映射。
fac_to_fill.loc[:, fac] = fac_to_fill[grouped_column].map(fac_ind_map)
# 添加回到datdf
datdf.loc[fac_to_fill.index, fac] = fac_to_fill[fac].values
if pd.isnull(datdf[fac]).any():
datdf[fac] = datdf[fac].fillna(np.nanmean(datdf[fac]))
# 针对sw行业存在缺失值的情况
if len(datdf) < len(data):
idx_to_append = data.index.difference(datdf.index)
datdf = pd.concat([datdf, data.loc[idx_to_append, :]])
datdf.sort_index()
return datdf
def coerce_numeric(s):
try:
return float(s)
except:
return np.nan
def winsorize(data, n=5):
"""
去极值:5倍中位数标准差法(5mad)
"""
global info_cols, non_processed_factors
datdf = data.copy()
non_deal = info_cols + non_processed_factors
tmp_info_cols = [inf for inf in non_deal if inf in datdf.columns]
# 找出含有 nan 的列
if_contain_na = pd.isnull(datdf).sum().sort_values(ascending=True)
facs_to_remove = if_contain_na.loc[if_contain_na > 0].index.tolist()
if 'PCT_CHG_NM' in facs_to_remove:
facs_to_remove.remove('PCT_CHG_NM')
# 剔除含有 nan 的列 和 info_cols的列 后的所有列
facs_to_win = datdf.columns.difference(set(tmp_info_cols)).difference(set(tuple(facs_to_remove)))
dat_win = datdf[facs_to_win]
dat_win = dat_win.applymap(apply_func2)
fac_vals = dat_win.values
# np.median(fac_vals)
try:
dm = np.nanmedian(fac_vals, axis=0)
except Exception as e:
print('debug')
# 与均值差的绝对值的非 nan 均值
dm1 = np.nanmedian(np.abs(fac_vals - dm), axis=0)
if 0 in (dm + n*dm1):
# 针对存在去极值后均变为零的特殊情况(2009-05-27-'DP')
cut_points = [i for i in np.argwhere(dm1 == 0)[0]]
# 提取对应列,对其不进行去极值处理
facs_unchanged = [facs_to_win[cut_points[i]] for i in range(len(cut_points))]
# 仅对剩余列进行去极值处理
facs_to_win_median = facs_to_win.difference(set(tuple(facs_unchanged)))
dat_win_median = datdf[facs_to_win_median]
def fun1(x):
try:
r = float(x)
except Exception as e:
r = 0
return r
dat_win_median = dat_win_median.applymap(fun1)
fac_median_vals = dat_win_median.values
dmed = np.nanmedian(fac_median_vals, axis=0)
dmed1 = np.nanmedian(np.abs(fac_median_vals - dmed), axis=0)
dmed = np.repeat(dmed.reshape(1,-1), fac_median_vals.shape[0], axis=0)
dmed1 = np.repeat(dmed1.reshape(1,-1), fac_median_vals.shape[0], axis=0)
fac_median_vals = np.where(fac_median_vals > dmed + n*dmed1, dmed+n*dmed1,
np.where(fac_median_vals < dmed - n*dmed1, dmed - n*dmed1, fac_median_vals))
res1 = pd.DataFrame(fac_median_vals, index=dat_win_median.index, columns=dat_win_median.columns)
res2 = datdf[facs_unchanged]
res = pd.concat([res1, res2], axis=1)
else:
# 通过两个repeat,得到与fac_vals 中元素一一对应的极值
dm = np.repeat(dm.reshape(1, -1), fac_vals.shape[0], axis=0)
dm1 = np.repeat(dm1.reshape(1, -1), fac_vals.shape[0], axis=0)
# 替换
fac_vals = np.where(fac_vals > dm + n*dm1, dm+n*dm1,
np.where(fac_vals < dm - n*dm1, dm - n*dm1, fac_vals))
res = pd.DataFrame(fac_vals, index=dat_win.index, columns=dat_win.columns)
datdf[facs_to_win] = res
return datdf
def neutralize(data, ind_neu=True, size_neu=True, ind='sw', plate=None):
"""
中性化:因子暴露度对行业哑变量(ind_dummy_matrix)和对数流通市值(lncap_barra)
做线性回归, 取残差作为新的因子暴露度
"""
global info_cols, non_processed_factors
datdf = data.copy()
if ind == 'sw':
datdf = datdf.loc[~pd.isnull(datdf['Industry_sw']), :]
non_deal = info_cols + non_processed_factors
tmp_info_cols = [inf for inf in non_deal if inf in datdf.columns]
# 剔除 info_cols 这些列后剩下的列名
cols_to_neu = datdf.columns.difference(set(tmp_info_cols))
y = datdf[cols_to_neu]
# 剔除含有nan的
y = y.dropna(how='any', axis=1)
cols_neu = y.columns
if size_neu:
# 对数市值
lncap = np.log(datdf[['Mkt_cap_float']])
# 若针对特定行业,则无需生成行业哑变量
use_dummies = 1
if not ind_neu:
use_dummies = 0
# 市值中性行业不中性
if use_dummies == 0 and size_neu:
X = lncap
# 行业中性市值不中性
elif use_dummies == 1 and not size_neu:
X = pd.get_dummies(datdf[f'Industry_{ind}'])
else:
# 使用 pd.get_dummies 生成行业哑变量
ind_dummy_matrix = pd.get_dummies(datdf[f'Industry_{ind}'])
# 合并对数市值和行业哑变量
X = pd.concat([lncap, ind_dummy_matrix], axis=1)
model = LinearRegression(fit_intercept=False)
# 一次对所有的y都做回归
try:
res = model.fit(X, y)
except Exception as e:
pd.isna(y).sum().sum()
pd.isna(X).sum().sum()
for col, se in y.iteritems():
pd.isna(se).sum()
(se == -np.inf).sum()
np.where(se == -np.inf)
np.where(se == np.inf)
print(col)
res = model.fit(X, se)
print('debug')
coef = res.coef_
residue = y - np.dot(X, coef.T)
# 断言语言, 如果为false则触发错误
assert len(datdf.index.difference(residue.index)) == 0
datdf.loc[residue.index, cols_neu] = residue
return datdf
def standardize(data):
"""
标准化:Z-score标准化方法,减去均值,除以标准差
"""
global info_cols, non_processed_factors
datdf = data.copy()
non_deal = info_cols + non_processed_factors
tmp_info_cols = [inf for inf in non_deal if inf in datdf.columns]
facs_to_sta = datdf.columns.difference(set(tmp_info_cols))
dat_sta = np.float64(datdf[facs_to_sta].values)
dat_sta = (dat_sta - np.nanmean(dat_sta, axis=0)) / np.nanstd(dat_sta, axis=0)
datdf.loc[:, facs_to_sta] = dat_sta
return datdf
def process_input_names(factor_names):
if factor_names == 'a':
factor_names = None
else:
factor_names = [f.replace("'", "").replace('"', "") for f in factor_names.split(',')]
return factor_names
# 向现有的月度因子数据中添加一列因子
def add_columns(added_date_path, columns_list, target_date_path):
'''
:param added_date_path: 添加数据的存储位置
:param columns_list: 准备添加的列名
:param target_date_path: 需要被添加的数据存储位置
:return:
'''
toadded_list = os.listdir(added_date_path)
save_list = os.listdir(target_date_path)
if pd.to_datetime(toadded_list[0].split('.')[0]) > pd.to_datetime(save_list[0].split('.')[0]) or \
pd.to_datetime(toadded_list[-1].split('.')[0]) < pd.to_datetime(save_list[-1].split('.')[0]):
print('被添加数据长度不够')
raise Exception
for panel_f in os.listdir(target_date_path):
toadded_dat = pd.read_csv(os.path.join(added_date_path, panel_f),
encoding='gbk', engine='python',
index_col=['code'])
panel_dat = pd.read_csv(os.path.join(target_date_path, panel_f),
encoding='gbk', engine='python',
index_col=['code'])
real_add_list = [col for col in columns_list if col not in panel_dat.columns]
if len(real_add_list) == 0:
continue
# join_axes关键字为沿用那个的index,忽略另一个df的其余数据
panel_dat = pd.concat([panel_dat, toadded_dat[real_add_list]], axis=1, join_axes=[panel_dat.index])
panel_dat.to_csv(os.path.join(target_date_path, panel_f),
encoding='gbk')
print('数据添加完毕')
# 根据给定的日度日期序列和月末日期,找到该序列中该月末日期的月初日期
def getmonthfirstdate(dt, md):
tmp1 = dt[dt.year == md.year]
tmp2 = tmp1[tmp1.month == md.month]
return tmp2[0]
# 得到给定日度时间序列的月末时间list
def get_monthends_series(dt):
if isinstance(dt, pd.DataFrame):
dt = list(dt)
p = 0
med = []
for i in range(len(dt)-1):
mon_t = dt[i].month
mon_n = dt[i+1].month
if mon_t != mon_n:
med.append(dt[i])
p = p + 1
return pd.Series(med)
def simple_func(pd_s, mv, type='median'):
# 市值加权
if type == 'mv_weighted':
tmpp = pd.concat([pd_s, mv], axis=1)
tmpp = tmpp.dropna(axis=0)
pd_s = tmpp[tmpp.columns[0]]
mv = tmpp[tmpp.columns[1]]
mv_weights = mv/np.sum(mv)
v = np.dot(np.mat(pd_s), np.mat(mv_weights).T)
return np.array(v).flatten()
# 中位数
elif type == 'median':
return np.nanmedian(pd_s)
elif type == 'mean':
return np.nanmean(pd_s)
else:
raise Exception
def apply_func(df, mv, type='median'):
# 市值加权
if type == 'mv_weighted':
mv_weights = mv/np.sum(mv)
v = np.dot(np.mat(df), np.mat(mv_weights).T)
return np.array(v).flatten()
# 中位数
elif type == 'median':
return df.median()
else:
raise Exception
def apply_func2(x):
if isinstance(x, str):
try:
x = float(x)
except Exception as e:
x = 0
else:
x
return x
def concat_factor_2(data_path, save_path, classified_df, factor_name, wei_type, save_name):
# 创建文件夹
if not os.path.exists(save_path):
os.makedirs(save_path)
cols = set(list(classified_df[classified_df.columns[0]]))
total_df = pd.DataFrame()
for panel_f in os.listdir(data_path):
print(panel_f)
panel_dat = pd.read_csv(os.path.join(data_path, panel_f),
encoding='gbk', engine='python',
index_col=['code'])
tmp_df = pd.concat([panel_dat[[factor_name, 'MKT_CAP_FLOAT']], classified_df], axis=1, join='inner')
d = datetime.strptime(panel_f.split('.')[0], "%Y-%m-%d")
section_df = pd.DataFrame(index=[d], columns=cols)
grouped = tmp_df.groupby(classified_df.columns[0])
for pla, group in grouped:
group.dropna(how='any', inplace=True)
section_df.loc[d, pla] = simple_func(group[factor_name], mv=group['MKT_CAP_FLOAT'], type='mv_weighted')[0]
total_df = pd.concat([total_df, section_df], axis=0)
if '.' not in save_name:
save_name = save_name + '.csv'
total_df.index.name = 'date'
total_df.to_csv(os.path.join(save_path, save_name), encoding='gbk')
# 做一个累计净值走势图
# prod_total_df = (total_df + 1).cumprod()
# prod_total_df.to_csv(os.path.join(save_path, '累计_'+save_name), encoding='gbk')
# 把一个 截面数据添加到已经有的月度模式存储的文件中
def add_to_panels(dat, panel_path, f_name, freq_in_dat='M'):
"""说明: 把dat依次插入到panel_path的DF中,插入的列名为f_name, 根据dat的类型是DF还是Series可以判断
是每次插入的数据不同还是每次插入相同的数据。"""
print(f'开始添加{f_name}数据到目标文件夹')
panel = os.listdir(panel_path)
for month_date in panel:
hased_dat = pd.read_csv(os.path.join(panel_path, month_date), engine='python')
hased_dat = hased_dat.set_index('Code')
# 输入数据为 DataFrame, 那么按列插入
if isinstance(dat, pd.DataFrame):
mon_str = month_date.split('.')[0]
if mon_str in dat.columns:
# 当dat中的columns也是str格式,且日期与panel一样时,直接添加
hased_dat[f_name] = dat[mon_str]
else:
# 否则,当年、月相同,日不同时,需要变成datetime格式而且还有查找
target = datetime.strptime(mon_str, "%Y-%m-%d")
# 当dat的columns是datetime格式时
if isinstance(dat.columns[0], datetime):
if freq_in_dat == 'M':
finded = None
for col in dat.columns:
if col.year == target.year and col.month == target.month:
finded = col
break
if finded:
hased_dat[f_name] = dat[finded]
else:
print('{}该期未找到对应数据'.format(mon_str))
if freq_in_dat == 'D':
if target in dat.columns:
hased_dat[f_name] = dat[target]
else:
print('{}该期未找到对应数据'.format(mon_str))
else:
print('现有格式的还未完善')
raise Exception
# 输入数据为 DataFrame, 那么按列插入
elif isinstance(dat, pd.Series):
hased_dat[f_name] = dat[hased_dat.index]
try:
hased_dat = hased_dat.reset_index('Code')
except Exception as e:
print('debug')
if 'No' in hased_dat.columns:
del hased_dat['No']
hased_dat.index.name = 'No'
hased_dat.to_csv(os.path.join(panel_path, month_date), encoding='gbk')
print('完毕!')
# 从一个月度panel里面删除某个因子
def del_factor_from_panel(panel_path, factor_name):
print(f'开始从目标文件夹删除{factor_name}因子。')
panel = os.listdir(panel_path)
for month_date in panel:
dat_df = pd.read_csv(os.path.join(panel_path, month_date), engine='python')
dat_df = dat_df.set_index('Code')
if factor_name in dat_df.columns:
del dat_df[factor_name]
dat_df.reset_index(inplace=True)
dat_df.set_index('No', inplace=True)
dat_df.index.name = 'No'
dat_df.to_csv(os.path.join(panel_path, month_date), encoding='gbk')
print(f'完毕。')
return
def rolling_regress_1(y, x, window=5):
try:
rolling_ys = rolling_windows(y, window)
rolling_xs = rolling_windows(x, window)
except Exception as e:
print('debug')
bet = pd.Series()
# enumerate 形成带 i 的一个迭代器
for i, (rolling_x, rolling_y) in enumerate(zip(rolling_xs, rolling_ys)):
tmp_index = y.index[i + window - 1]
try:
model = sm.OLS(rolling_y, rolling_x)
result = model.fit()
params = result.params
b_v = params[0]
# print(result.params)
# print(result.summary())
except:
print(i)
raise
b = pd.Series(index=[tmp_index], data=b_v)
bet = pd.concat([bet, b])
return bet
# 计算不同股指合约的beta值
def compute_future_beta():
# 存储地址为:D:\Datebase_Stock\Date\index\stock_future\sf_beta.csv
data = Data()
sf_close_daily = data.sf_close_daily
index_price_daily = data.index_price_daily.T
# 求一下日期的交集,避免日期不同的潜在问题
tt = list(set(sf_close_daily.columns) & set(index_price_daily.index))
tt.sort() | index_price_daily = index_price_daily.loc[tt, :]
sf_beta = pd.DataFrame()
for c, se in sf_close_daily.iterrows():
if 'IC' in c:
tmp_i = index_price_daily['ZZ500']
elif 'IF' in c:
tmp_i = index_price_daily['HS300']
elif 'IH' in c:
tmp_i = index_price_daily['SZ50']
else:
print('Code Bug')
raise ValueError
# 去掉Nan
tmp_c = se.dropna()
tmp_i = tmp_i[tmp_c.index]
if len(tmp_c) > 22:
bet = rolling_regress_1(tmp_i, tmp_c, window=22)
sf_beta = pd.concat([sf_beta, pd.DataFrame({c: bet}).T], axis=0)
p = os.path.join(data_dair, 'index', 'stock_future')
data.save(sf_beta, 'sf_beta', p)
if __name__ == "__main__":
# compute_future_beta()
add_columns
# panel_path = r"D:\pythoncode\IndexEnhancement\因子预处理模块\因子"
# factor_name_list = ['Totaloperatingrevenueps_qoq_qoq']
# for f in factor_name_list:
# del_factor_from_panel(panel_path, f)
# panel_path = r'D:\pythoncode\IndexEnhancement\因子预处理模块\因子'
# add_fs_path = r'D:\pythoncode\IndexEnhancement\因子预处理模块\增加的因子\截面数据'
#
# f_list = os.listdir(add_fs_path)
# for fn in f_list:
# f_name = fn.split('.')[0]
# print(f_name)
# dat = pd.read_csv(os.path.join(add_fs_path, fn), engine='python')
# dat = dat.set_index(dat.columns[0])
# add_to_panels(dat, panel_path, f_name) |
sf_close_daily = sf_close_daily[tt] | random_line_split |
factor_data_preprocess.py | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 22 09:47:11 2019
@author: admin
"""
import numpy as np
import pandas as pd
import os
from datetime import datetime
from itertools import chain
from functools import reduce
from sklearn.linear_model import LinearRegression
from utility.constant import info_cols, data_dair, non_processed_factors
from utility.relate_to_tushare import trade_days
from utility.tool0 import Data
import statsmodels.api as sm
from pyfinance.utils import rolling_windows
def align(df1, df2, *dfs):
# chain 是把多个迭代器合成一个迭代器
dfs_all = [df for df in chain([df1, df2], dfs)]
# 看df1和df2是否有单个列的
if any(len(df.shape) == 1 or 1 in df.shape for df in dfs_all):
dims = 1
else:
dims = 2
# 对日期求交期. reduce: 用传给reduce中的函数function(有两个参数)先对集合中的第 1、2个元素进行操作,
# 得到的结果再与第三个数据用function函数运算,最后得到一个结果。
mut_date_range = sorted(reduce(lambda x, y: x.intersection(y), (df.index for df in dfs_all)))
# 对columns求交集
mut_codes = sorted(reduce(lambda x, y: x.intersection(y), (df.columns for df in dfs_all)))
# 如果df1和df2都是多维的,求日期和代码的交集;否则,只求日期的交集
if dims == 2:
dfs_all = [df.loc[mut_date_range, mut_codes] for df in dfs_all]
elif dims == 1:
dfs_all = [df.loc[mut_date_range, :] for df in dfs_all]
return dfs_all
def drop_some(datdf):
global info_cols
cond = pd.Series(True, index=datdf.index)
# 最新一期数据
if pd.isnull(datdf['Pct_chg_nm']).all():
pass
else:
# 删除未上市股票
cond &= ~pd.isnull(datdf['Mkt_cap_float'])
# 删除未开盘股票
cond &= datdf['Is_open1']
datdf = datdf.loc[cond]
return datdf
def fill_na(data, ind='sw', fill_type='any'):
"""
缺失值填充:缺失值少于10%的情况下使用行业中位数代替
"""
global info_cols, non_processed_factors
datdf = data.copy()
if ind == 'sw':
datdf = datdf.loc[~pd.isnull(datdf['Industry_sw']), :]
non_deal = info_cols + non_processed_factors
tmp_info_cols = [inf for inf in non_deal if inf in datdf.columns]
# datdf中剔除info_cols后的列名
facs_to_fill = datdf.columns.difference(set(tmp_info_cols))
datdf[facs_to_fill] = datdf[facs_to_fill].applymap(coerce_numeric)
datdf = datdf.replace([np.inf, -np.inf], np.nan) # 替换inf
# pd.to_numeric( datdf[facs_to_fill], errors='coerce')
if fill_type != 'any':
facs_to_fill = [fac for fac in facs_to_fill # 筛选缺失值少于10%的因子
if pd.isnull(datdf[fac]).sum() / len(datdf) <= 0.1]
else:
facs_to_fill = [fac for fac in facs_to_fill # 筛选缺失值少于10%的因子
if pd.isnull(datdf[fac]).any()]
if ind in ['zx', 'sw']:
grouped_column = f'Industry_{ind}'
elif ind == 'Second_industry':
grouped_column = 'Second_industry'
else:
raise Exception
for fac in facs_to_fill:
fac_median_by_ind = datdf[[grouped_column, fac]].groupby(grouped_column).median()
# 把dateframe转为dict,并取fac为key以解决 dict套dict 的问题
fac_ind_map = fac_median_by_ind.to_dict()[fac]
# 选出需要替换的数据
fac_to_fill = datdf.loc[pd.isnull(datdf[fac]), [grouped_column, fac]]
# map函数可以接受含有映射关系的字典。使用map做行业到其均值的映射。
fac_to_fill.loc[:, fac] = fac_to_fill[grouped_column].map(fac_ind_map)
# 添加回到datdf
datdf.loc[fac_to_fill.index, fac] = fac_to_fill[fac].values
if pd.isnull(datdf[fac]).any():
datdf[fac] = datdf[fac].fillna(np.nanmean(datdf[fac]))
# 针对sw行业存在缺失值的情况
if len(datdf) < len(data):
idx_to_append = data.index.difference(datdf.index)
datdf = pd.concat([datdf, data.loc[idx_to_append, :]])
datdf.sort_index()
return datdf
def coerce_numeric(s):
try:
return float(s)
except:
return np.nan
def winsorize(data, n=5):
"""
去极值:5倍中位数标准差法(5mad)
"""
global info_cols, non_processed_factors
datdf = data.copy()
non_deal = info_cols + non_processed_factors
tmp_info_cols = [inf for inf in non_deal if inf in datdf.columns]
# 找出含有 nan 的列
if_contain_na = pd.isnull(datdf).sum().sort_values(ascending=True)
facs_to_remove = if_contain_na.loc[if_contain_na > 0].index.tolist()
if 'PCT_CHG_NM' in facs_to_remove:
facs_to_remove.remove('PCT_CHG_NM')
# 剔除含有 nan 的列 和 info_cols的列 后的所有列
facs_to_win = datdf.columns.difference(set(tmp_info_cols)).difference(set(tuple(facs_to_remove)))
dat_win = datdf[facs_to_win]
dat_win = dat_win.applymap(apply_func2)
fac_vals = dat_win.values
# np.median(fac_vals)
try:
dm = np.nanmedian(fac_vals, axis=0)
except Exception as e:
print('debug')
# 与均值差的绝对值的非 nan 均值
dm1 = np.nanmedian(np.abs(fac_vals - dm), axis=0)
if 0 in (dm + n*dm1):
# 针对存在去极值后均变为零的特殊情况(2009-05-27-'DP')
cut_points = [i for i in np.argwhere(dm1 == 0)[0]]
# 提取对应列,对其不进行去极值处理
facs_unchanged = [facs_to_win[cut_points[i]] for i in range(len(cut_points))]
# 仅对剩余列进行去极值处理
facs_to_win_median = facs_to_win.difference(set(tuple(facs_unchanged)))
dat_win_median = datdf[facs_to_win_median]
def fun1(x):
try:
r = float(x)
except Exception as e:
r = 0
return r
dat_win_median = dat_win_median.applymap(fun1)
fac_median_vals = dat_win_median.values
dmed = np.nanmedian(fac_median_vals, axis=0)
dmed1 = np.nanmedian(np.abs(fac_median_vals - dmed), axis=0)
dmed = np.repeat(dmed.reshape(1,-1), fac_median_vals.shape[0], axis=0)
dmed1 = np.repeat(dmed1.reshape(1,-1), fac_median_vals.shape[0], axis=0)
fac_median_vals = np.where(fac_median_vals > dmed + n*dmed1, dmed+n*dmed1,
np.where(fac_median_vals < dmed - n*dmed1, dmed - n*dmed1, fac_median_vals))
res1 = pd.DataFrame(fac_median_vals, index=dat_win_median.index, columns=dat_win_median.columns)
res2 = datdf[facs_unchanged]
res = pd.concat([res1, res2], axis=1)
else:
# 通过两个repeat,得到与fac_vals 中元素一一对应的极值
dm = np.repeat(dm.reshape(1, -1), fac_vals.shape[0], axis=0)
dm1 = np.repeat(dm1.reshape(1, -1), fac_vals.shape[0], axis=0)
# 替换
fac_vals = np.where(fac_vals > dm + n*dm1, dm+n*dm1,
np.where(fac_vals < dm - n*dm1, dm - n*dm1, fac_vals))
res = pd.DataFrame(fac_vals, index=dat_win.index, columns=dat_win.columns)
datdf[facs_to_win] = res
return datdf
def neutralize(data, ind_neu=True, size_neu=True, ind='sw', plate=None):
"""
中性化:因子暴露度对行业哑变量(ind_dummy_matrix)和对数流通市值(lncap_barra)
做线性回归, 取残差作为新的因子暴露度
"""
global info_cols, non_processed_factors
datdf = data.copy()
if ind == 'sw':
datdf = datdf.loc[~pd.isnull(datdf['Industry_sw']), :]
non_deal = info_cols + non_processed_factors
tmp_info_cols = [inf for inf in non_deal if inf in datdf.columns]
# 剔除 info_cols 这些列后剩下的列名
cols_to_neu = datdf.columns.difference(set(tmp_info_cols))
y = datdf[cols_to_neu]
# 剔除含有nan的
y = y.dropna(how='any', axis=1)
cols_neu = y.columns
if size_neu:
# 对数市值
lncap = np.log(datdf[['Mkt_cap_float']])
# 若针对特定行业,则无需生成行业哑变量
use_dummies = 1
if not ind_neu:
use_dummies = 0
# 市值中性行业不中性
if use_dummies == 0 and size_neu:
X = lncap
# 行业中性市值不中性
elif use_dummies == 1 and not size_neu:
X = pd.get_dummies(datdf[f'Industry_{ind}'])
else:
# 使用 pd.get_dummies 生成行业哑变量
ind_dummy_matrix = pd.get_dummies(datdf[f'Industry_{ind}'])
# 合并对数市值和行业哑变量
X = pd.concat([lncap, ind_dummy_matrix], axis=1)
model = LinearRegression(fit_intercept=False)
# 一次对所有的y都做回归
try:
res = model.fit(X, y)
except Exception as e:
pd.isna(y).sum().sum()
pd.isna(X).sum().sum()
for col, se in y.iteritems():
pd.isna(se).sum()
(se == -np.inf).sum()
np.where(se == -np.inf)
np.where(se == np.inf)
print(col)
res = model.fit(X, se)
print('debug')
coef = res.coef_
residue = y - np.dot(X, coef.T)
# 断言语言, 如果为false则触发错误
assert len(datdf.index.difference(residue.index)) == 0
datdf.loc[residue.index, cols_neu] = residue
return datdf
def standardize(data):
"""
标准化:Z-score标准化方法,减去均值,除以标准差
"""
global info_cols, non_processed_factors
datdf = data.copy()
non_deal = info_cols + non_processed_factors
tmp_info_cols = [inf for inf in non_deal if inf in datdf.columns]
facs_to_sta = datdf.columns.difference(set(tmp_info_cols))
dat_sta = np.float64(datdf[facs_to_sta].values)
dat_sta = (dat_sta - np.nanmean(dat_sta, axis=0)) / np.nanstd(dat_sta, axis=0)
datdf.loc[:, facs_to_sta] = dat_sta
return datdf
def process_input_names(factor_names):
if factor_names == 'a':
factor_names = None
else:
factor_names = [f.replace("'", "").replace('"', "") for f in factor_names.split(',')]
return factor_names
# 向现有的月度因子数据中添加一列因子
def add_columns(added_date_path, columns_list, target_date_path):
'''
:param added_date_path: 添加数据的存储位置
:param columns_list: 准备添加的列名
:param target_date_path: 需要被添加的数据存储位置
:return:
'''
toadded_list = os.listdir(added_date_path)
save_list = os.listdir(target_date_path)
if pd.to_datetime(toadded_list[0].split('.')[0]) > pd.to_datetime(save_list[0].split('.')[0]) or \
pd.to_datetime(toadded_list[-1].split('.')[0]) < pd.to_datetime(save_list[-1].split('.')[0]):
print('被添加数据长度不够')
raise Exception
for panel_f in os.listdir(target_date_path):
toadded_dat = pd.read_csv(os.path.join(added_date_path, panel_f),
encoding='gbk', engine='python',
index_col=['code'])
panel_dat = pd.read_csv(os.path.join(target_date_path, panel_f),
encoding='gbk', engine='python',
index_col=['code'])
real_add_list = [col for col in columns_list if col not in panel_dat.columns]
if len(real_add_list) == 0:
continue
# join_axes关键字为沿用那个的index,忽略另一个df的其余数据
panel_dat = pd.concat([panel_dat, toadded_dat[real_add_list]], axis=1, join_axes=[panel_dat.index])
panel_dat.to_csv(os.path.join(target_date_path, panel_f),
encoding='gbk')
print('数据添加完毕')
# 根据给定的日度日期序列和月末日期,找到该序列中该月末日期的月初日期
def getmonthfirstdate(dt, md):
tmp1 = dt[dt.year == md.year]
tmp2 = tmp1[tmp1.month == md.month]
return tmp2[0]
# 得到给定日度时间序列的月末时间list
def get_monthends_series(dt):
if isinstance(dt, pd.DataFrame):
dt = list(dt)
p = 0
med = []
for i in range(len(dt)-1):
mon_t = dt[i].month
mon_n = dt[i+1].month
if mon_t != mon_n:
med.append(dt[i])
p = p + 1
return pd.Series(med)
def simple_func(pd_s, mv, type='median'):
# 市值加权
if type == 'mv_weighted':
tmpp = pd.concat([pd_s, mv], axis=1)
tmpp = tmpp.dropna(axis=0)
pd_s = tmpp[tmpp.columns[0]]
mv = tmpp[tmpp.columns[1]]
mv_weights = mv/np.sum(mv)
v = np.dot(np.mat(pd_s), np.mat(mv_weights).T)
return np.array(v).flatten()
# 中位数
elif type == 'median':
return np.nanmedian(pd_s)
elif type == 'mean':
return np.nanmean(pd_s)
else:
raise Exception
def apply_func(df, mv, type='median'):
# 市值加权
if type == 'mv_weighted':
mv_weights = mv/np.sum(mv)
v = np.dot(np.mat(df), np.mat(mv_weights).T)
return np.array(v).flatten()
# 中位数
elif type == 'median':
return df.median()
else:
raise Exception
def apply_func2(x):
if isinstance(x, str):
try:
x = float(x)
except Exception as e:
x = 0
else:
x
return x
def concat_factor_2(data_path, save_path, classified_df, factor_name, wei_type, save_name):
# 创建文件夹
if not os.path.exists(save_path):
os.makedirs(save_path)
cols = set(list(classified_df[classified_df.columns[0]]))
total_df = pd.DataFrame()
for panel_f in os.listdir(data_path):
print(panel_f)
panel_dat = pd.read_csv(os.path.join(data_path, panel_f),
encoding='gbk', engine='python',
index_col=['code'])
tmp_df = pd.concat([panel_dat[[factor_name, 'MKT_CAP_FLOAT']], classified_df], axis=1, join='inner')
d = datetime.strptime(panel_f.split('.')[0], "%Y-%m-%d")
section_df = pd.DataFrame(index=[d], columns=cols)
grouped = tmp_df.groupby(classified_df.columns[0])
for pla, group in grouped:
group.dropna(how='any', inplace=True)
section_df.loc[d, pla] = simple_func(group[factor_name], mv=group['MKT_CAP_FLOAT'], type='mv_weighted')[0]
total_df = pd.concat([total_df, section_df], axis=0)
if '.' not in save_name:
save_name = save_name + '.csv'
total_df.index.name = 'date'
total_df.to_csv(os.path.join(save_path, save_name), encoding='gbk')
# 做一个累计净值走势图
# prod_total_df = (total_df + 1).cumprod()
# prod_total_df.to_csv(os.path.join(save_path, '累计_'+save_name), encoding='gbk')
# 把一个 截面数据添加到已经有的月度模式存储的文件中
def add_to_panels(dat, panel_path, f_name, freq_in_dat='M'):
"""说明: 把dat依次插入到panel_path的DF中,插入的列名为f_name, 根据dat的类型是DF还是Series可以判断
是每次插入的数据不同还是每次插入相同的数据。"""
print(f'开始添加{f_name}数据到目标文件夹')
panel = os.listdir(panel_path)
for month_date in panel:
hased_dat = pd.read_csv(os.path.join(panel_path, month_date), engine='python')
hased_dat = hased_dat.set_index('Code')
# 输入数据为 DataFrame, 那么按列插入
if isinstance(dat, pd.DataFrame):
mon_str = month_date.split('.')[0]
if mon_str in dat.columns:
# 当dat中的columns也是str格式,且日期与panel一样时,直接添加
hased_dat[f_name] = dat[mon_str]
else:
# 否则,当年、月相同,日不同时,需要变成datetime格式而且还有查找
target = datetime.strptime(mon_str, "%Y-%m-%d")
# 当dat的columns是datetime格式时
if isinstance(dat.columns[0], datetime):
if freq_in_dat == 'M':
finded = None
for col in dat.columns:
if col.year == target.year and col.month == target.month:
finded = col
break
if finded:
hased_dat[f_name] = dat[finded]
else:
print('{}该期未找到对应数据'.format(mon_str))
if freq_in_dat == 'D':
if target in dat.columns:
hased_dat[f_name] = dat[target]
else:
print('{}该期未找到对应数据'.format(mon_str))
else:
print('现有格式的还未完善')
raise Exception
# 输入数据为 DataFrame, 那么按列插入
elif isinstance(dat, pd.Series):
hased_dat[f_name] = dat[hased_dat.index]
try:
hased_dat = hased_dat.reset_index('Code')
except Exception as e:
print('debug')
if 'No' in hased_dat.columns:
del hased_dat['No']
hased_dat.index.name = 'No'
hased_dat.to_csv(os.path.join(panel_path, month_date), encoding='gbk')
print('完毕!')
# 从一个月度panel里面删除某个因子
def del_factor_from_panel(panel_path, factor_name):
print(f'开始从目标文件夹删除{factor_name}因子。')
panel = os.listdir(panel_path)
for month_date in panel:
dat_df = pd.read_csv(os.path.join(panel_path, month_date), engine='python')
dat_df = dat_df.set_index('Code')
if factor_name in dat_df.columns:
del dat_df[factor_name]
dat_df.reset_index(inplace=True)
dat_df.set_index('No', inplace=True)
dat_df.index.name = 'No'
dat_df.to_csv(os.path.join(panel_path, month_date), encoding='gbk')
print(f'完毕。')
return
def rolling_regress_1(y, x, window=5):
try:
rolling_ys = rolling_windows(y, window)
rolling_xs = rolling_windows(x, window)
except Exception as e:
print('debug')
bet = pd.Series()
# enumerate 形成带 i 的一个迭代器
for i, (rolling_x, rolling_y) in enumerate(zip(rolling_xs, rolling_ys)):
tmp_index = y.index[i + window - 1]
try:
model = sm.OLS(rolling_y, rolling_x)
result = model.fit()
params = result.params
b_v = params[0]
# print(result.params)
# print(result.summary())
except:
print(i)
raise
b = pd.Series(index=[tmp_index], data=b_v)
bet = pd.concat([bet, b])
return bet
# 计算不同股指合约的beta值
def compute_future_beta():
# 存储地址为:D:\Datebase_Stock\Date\index\stock_future\sf_beta.csv
data = Data()
sf_close_daily = data.sf_close_daily
index_price_daily = data.index_price_daily.T
# 求一下日期的交集,避免日期不同的潜在问题
tt = list(set(sf_close_daily.columns) & set(index_price_daily.index))
tt.sort()
sf_close_daily = sf_close_daily[tt]
index_price_daily = index_price_daily.loc[tt, :]
sf_beta = pd.DataFrame()
for c, se in sf_close_daily.iterrows():
if 'IC' in c:
tmp_i = index_price_daily['ZZ500']
elif 'IF' in c:
tmp_i = index_price_daily['HS300']
elif 'IH' in c:
tmp_i = index_price_daily['SZ50']
else:
print('Code Bug')
raise ValueError
# 去掉Nan
tmp_c = se.dropna()
tmp_i = tmp_i[tmp_c.index]
if len(tmp_c) > 22:
bet = rolling_regress_1(tmp_i, tmp_c, window=22)
sf_beta = pd.concat([sf_beta, pd.DataFrame({c: bet}).T], axis=0)
p = os.path.join(data_dair, 'index', 'stock_future')
data.save(sf_beta, 'sf_beta', p)
if __name__ == "__main__":
# compute_future_beta()
add_columns
# panel_path = r"D:\pythoncode\IndexEnhancement\因子预处理模块\因子"
# factor_name_list = ['Totaloperatingrevenueps_qoq_qoq']
# for f in factor_name_list:
# del_factor_from_panel(panel_path, f)
# panel_path = r'D:\pythoncode\IndexEnhancement\因子预处理模块\因子'
# add_fs_path = r'D:\pythoncode\IndexEnhancement\因子预处理模块\增加的因子\截面数据'
#
# f_list = os.listdir(add_fs_path)
# for fn in f_list:
# f_name = fn.split('.')[0]
# print(f_name)
# dat = pd.read_csv(os.path.join(add_fs_path, fn), engine='python')
# dat = dat.set_index(dat.columns[0])
# add_to_panels(dat, panel_path, f_name)
| identifier_body | ||
factor_data_preprocess.py | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 22 09:47:11 2019
@author: admin
"""
import numpy as np
import pandas as pd
import os
from datetime import datetime
from itertools import chain
from functools import reduce
from sklearn.linear_model import LinearRegression
from utility.constant import info_cols, data_dair, non_processed_factors
from utility.relate_to_tushare import trade_days
from utility.tool0 import Data
import statsmodels.api as sm
from pyfinance.utils import rolling_windows
def align(df1, df2, *dfs):
# chain 是把多个迭代器合成一个迭代器
dfs_all = [df for df in chain([df1, df2], dfs)]
# 看df1和df2是否有单个列的
if any(len(df.shape) == 1 or 1 in df.shape for df in dfs_all):
dims = 1
else:
dims = 2
# 对日期求交期. reduce: 用传给reduce中的函数function(有两个参数)先对集合中的第 1、2个元素进行操作,
# 得到的结果再与第三个数据用function函数运算,最后得到一个结果。
mut_date_range = sorted(reduce(lambda x, y: x.intersection(y), (df.index for df in dfs_all)))
# 对columns求交集
mut_codes = sorted(reduce(lambda x, y: x.intersection(y), (df.columns for df in dfs_all)))
# 如果df1和df2都是多维的,求日期和代码的交集;否则,只求日期的交集
if dims == 2:
dfs_all = [df.loc[mut_date_range, mut_codes] for df in dfs_all]
elif dims == 1:
dfs_all = [df.loc[mut_date_range, :] for df in dfs_all]
return dfs_all
def drop_some(datdf):
global info_cols
cond = pd.Series(True, index=datdf.index)
# 最新一期数据
if pd.isnull(datdf['Pct_chg_nm']).all():
pass
else:
# 删除未上市股票
cond &= ~pd.isnull(datdf['Mkt_cap_float'])
# 删除未开盘股票
cond &= datdf['Is_open1']
datdf = datdf.loc[cond]
return datdf
def fill_na(data, ind='sw', fill_type='any'):
"""
缺失值填充:缺失值少于10%的情况下使用行业中位数代替
"""
global info_cols, non_processed_factors
datdf = data.copy()
if ind == 'sw':
datdf = datdf.loc[~pd.isnull(datdf['Industry_sw']), :]
non_deal = info_cols + non_processed_factors
tmp_info_cols = [inf for inf in non_deal if inf in datdf.columns]
# datdf中剔除info_cols后的列名
facs_to_fill = datdf.columns.difference(set(tmp_info_cols))
datdf[facs_to_fill] = datdf[facs_to_fill].applymap(coerce_numeric)
datdf = datdf.replace([np.inf, -np.inf], np.nan) # 替换inf
# pd.to_numeric( datdf[facs_to_fill], errors='coerce')
if fill_type != 'any':
facs_to_fill = [fac for fac in facs_to_fill # 筛选缺失值少于10%的因子
if pd.isnull(datdf[fac]).sum() / len(datdf) <= 0.1]
else:
facs_to_fill = [fac for fac in facs_to_fill # 筛选缺失值少于10%的因子
if pd.isnull(datdf[fac]).any()]
if ind in ['zx', 'sw']:
grouped_column = f'Industry_{ind}'
elif ind == 'Second_industry':
grouped_column = 'Second_industry'
else:
raise Exception
for fac in facs_to_fill:
fac_median_by_ind = datdf[[grouped_column, fac]].groupby(grouped_column).median()
# 把dateframe转为dict,并取fac为key以解决 dict套dict 的问题
fac_ind_map = fac_median_by_ind.to_dict()[fac]
# 选出需要替换的数据
fac_to_fill = datdf.loc[pd.isnull(datdf[fac]), [grouped_column, fac]]
# map函数可以接受含有映射关系的字典。使用map做行业到其均值的映射。
fac_to_fill.loc[:, fac] = fac_to_fill[grouped_column].map(fac_ind_map)
# 添加回到datdf
datdf.loc[fac_to_fill.index, fac] = fac_to_fill[fac].values
if pd.isnull(datdf[fac]).any():
datdf[fac] = datdf[fac].fillna(np.nanmean(datdf[fac]))
# 针对sw行业存在缺失值的情况
if len(datdf) < len(data):
idx_to_append = data.index.difference(datdf.index)
datdf = pd.concat([datdf, data.loc[idx_to_append, :]])
datdf.sort_index()
return datdf
def coerce_numeric(s):
try:
return float(s)
except:
return np.nan
def winsorize(data, n=5):
"""
去极值:5倍中位数标准差法(5mad)
"""
global info_cols, non_processed_factors
datdf = data.copy()
non_deal = info_cols + non_processed_factors
tmp_info_cols = [inf for inf in non_deal if inf in datdf.columns]
# 找出含有 nan 的列
if_contain_na = pd.isnull(datdf).sum().sort_values(ascending=True)
facs_to_remove = if_contain_na.loc[if_contain_na > 0].index.tolist()
if 'PCT_CHG_NM' in facs_to_remove:
facs_to_remove.remove('PCT_CHG_NM')
# 剔除含有 nan 的列 和 info_cols的列 后的所有列
facs_to_win = datdf.columns.difference(set(tmp_info_cols)).difference(set(tuple(facs_to_remove)))
dat_win = datdf[facs_to_win]
dat_win = dat_win.applymap(apply_func2)
fac_vals = dat_win.values
# np.median(fac_vals)
try:
dm = np.nanmedian(fac_vals, axis=0)
except Exception as e:
print('debug')
# 与均值差的绝对值的非 nan 均值
dm1 = np.nanmedian(np.abs(fac_vals - dm), axis=0)
if 0 in (dm + n*dm1):
# 针对存在去极值后均变为零的特殊情况(2009-05-27-'DP')
cut_points = [i for i in np.argwhere(dm1 == 0)[0]]
# 提取对应列,对其不进行去极值处理
facs_unchanged = [facs_to_win[cut_points[i]] for i in range(len(cut_points))]
# 仅对剩余列进行去极值处理
facs_to_win_median = facs_to_win.difference(set(tuple(facs_unchanged)))
dat_win_median = datdf[facs_to_win_median]
def fun1(x):
try:
r = float(x)
except Exception as e:
r = 0
return r
dat_win_median = dat_win_median.applymap(fun1)
fac_median_vals = dat_win_median.values
dmed = np.nanmedian(fac_median_vals, axis=0)
dmed1 = np.nanmedian(np.abs(fac_median_vals - dmed), axis=0)
dmed = np.repeat(dmed.reshape(1,-1), fac_median_vals.shape[0], axis=0)
dmed1 = np.repeat(dmed1.reshape(1,-1), fac_median_vals.shape[0], axis=0)
fac_median_vals = np.where(fac_median_vals > dmed + n*dmed1, dmed+n*dmed1,
np.where(fac_median_vals < dmed - n*dmed1, dmed - n*dmed1, fac_median_vals))
res1 = pd.DataFrame(fac_median_vals, index=dat_win_median.index, columns=dat_win_median.columns)
res2 = datdf[facs_unchanged]
res = pd.concat([res1, res2], axis=1)
else:
# 通过两个repeat,得到与fac_vals 中元素一一对应的极值
dm = np.repeat(dm.reshape(1, -1), fac_vals.shape[0], axis=0)
dm1 = np.repeat(dm1.reshape(1, -1), fac_vals.shape[0], axis=0)
# 替换
fac_vals = np.where(fac_vals > dm + n*dm1, dm+n*dm1,
np.where(fac_vals < dm - n*dm1, dm - n*dm1, fac_vals))
res = pd.DataFrame(fac_vals, index=dat_win.index, columns=dat_win.columns)
datdf[facs_to_win] = res
return datdf
def neutralize(data, ind_neu=True, size_neu=True, ind='sw', plate=None):
"""
中性化:因子暴露度对行业哑变量(ind_dummy_matrix)和对数流通市值(lncap_barra)
做线性回归, 取残差作为新的因子暴露度
"""
global info_cols, non_processed_factors
datdf = data.copy()
if ind == 'sw':
datdf = datdf.loc[~pd.isnull(datdf['Industry_sw']), :]
non_deal = info_cols + non_processed_factors
tmp_info_cols = [inf for inf in non_deal if inf in datdf.columns]
# 剔除 info_cols 这些列后剩下的列名
cols_to_neu = datdf.columns.difference(set(tmp_info_cols))
y = datdf[cols_to_neu]
# 剔除含有nan的
y = y.dropna(how='any', axis=1)
cols_neu = y.columns
if size_neu:
# 对数市值
lncap = np.log(datdf[['Mkt_cap_float']])
# 若针对特定行业,则无需 | use_dummies = 1
if not ind_neu:
use_dummies = 0
# 市值中性行业不中性
if use_dummies == 0 and size_neu:
X = lncap
# 行业中性市值不中性
elif use_dummies == 1 and not size_neu:
X = pd.get_dummies(datdf[f'Industry_{ind}'])
else:
# 使用 pd.get_dummies 生成行业哑变量
ind_dummy_matrix = pd.get_dummies(datdf[f'Industry_{ind}'])
# 合并对数市值和行业哑变量
X = pd.concat([lncap, ind_dummy_matrix], axis=1)
model = LinearRegression(fit_intercept=False)
# 一次对所有的y都做回归
try:
res = model.fit(X, y)
except Exception as e:
pd.isna(y).sum().sum()
pd.isna(X).sum().sum()
for col, se in y.iteritems():
pd.isna(se).sum()
(se == -np.inf).sum()
np.where(se == -np.inf)
np.where(se == np.inf)
print(col)
res = model.fit(X, se)
print('debug')
coef = res.coef_
residue = y - np.dot(X, coef.T)
# 断言语言, 如果为false则触发错误
assert len(datdf.index.difference(residue.index)) == 0
datdf.loc[residue.index, cols_neu] = residue
return datdf
def standardize(data):
"""
标准化:Z-score标准化方法,减去均值,除以标准差
"""
global info_cols, non_processed_factors
datdf = data.copy()
non_deal = info_cols + non_processed_factors
tmp_info_cols = [inf for inf in non_deal if inf in datdf.columns]
facs_to_sta = datdf.columns.difference(set(tmp_info_cols))
dat_sta = np.float64(datdf[facs_to_sta].values)
dat_sta = (dat_sta - np.nanmean(dat_sta, axis=0)) / np.nanstd(dat_sta, axis=0)
datdf.loc[:, facs_to_sta] = dat_sta
return datdf
def process_input_names(factor_names):
if factor_names == 'a':
factor_names = None
else:
factor_names = [f.replace("'", "").replace('"', "") for f in factor_names.split(',')]
return factor_names
# 向现有的月度因子数据中添加一列因子
def add_columns(added_date_path, columns_list, target_date_path):
'''
:param added_date_path: 添加数据的存储位置
:param columns_list: 准备添加的列名
:param target_date_path: 需要被添加的数据存储位置
:return:
'''
toadded_list = os.listdir(added_date_path)
save_list = os.listdir(target_date_path)
if pd.to_datetime(toadded_list[0].split('.')[0]) > pd.to_datetime(save_list[0].split('.')[0]) or \
pd.to_datetime(toadded_list[-1].split('.')[0]) < pd.to_datetime(save_list[-1].split('.')[0]):
print('被添加数据长度不够')
raise Exception
for panel_f in os.listdir(target_date_path):
toadded_dat = pd.read_csv(os.path.join(added_date_path, panel_f),
encoding='gbk', engine='python',
index_col=['code'])
panel_dat = pd.read_csv(os.path.join(target_date_path, panel_f),
encoding='gbk', engine='python',
index_col=['code'])
real_add_list = [col for col in columns_list if col not in panel_dat.columns]
if len(real_add_list) == 0:
continue
# join_axes关键字为沿用那个的index,忽略另一个df的其余数据
panel_dat = pd.concat([panel_dat, toadded_dat[real_add_list]], axis=1, join_axes=[panel_dat.index])
panel_dat.to_csv(os.path.join(target_date_path, panel_f),
encoding='gbk')
print('数据添加完毕')
# 根据给定的日度日期序列和月末日期,找到该序列中该月末日期的月初日期
def getmonthfirstdate(dt, md):
tmp1 = dt[dt.year == md.year]
tmp2 = tmp1[tmp1.month == md.month]
return tmp2[0]
# 得到给定日度时间序列的月末时间list
def get_monthends_series(dt):
if isinstance(dt, pd.DataFrame):
dt = list(dt)
p = 0
med = []
for i in range(len(dt)-1):
mon_t = dt[i].month
mon_n = dt[i+1].month
if mon_t != mon_n:
med.append(dt[i])
p = p + 1
return pd.Series(med)
def simple_func(pd_s, mv, type='median'):
# 市值加权
if type == 'mv_weighted':
tmpp = pd.concat([pd_s, mv], axis=1)
tmpp = tmpp.dropna(axis=0)
pd_s = tmpp[tmpp.columns[0]]
mv = tmpp[tmpp.columns[1]]
mv_weights = mv/np.sum(mv)
v = np.dot(np.mat(pd_s), np.mat(mv_weights).T)
return np.array(v).flatten()
# 中位数
elif type == 'median':
return np.nanmedian(pd_s)
elif type == 'mean':
return np.nanmean(pd_s)
else:
raise Exception
def apply_func(df, mv, type='median'):
# 市值加权
if type == 'mv_weighted':
mv_weights = mv/np.sum(mv)
v = np.dot(np.mat(df), np.mat(mv_weights).T)
return np.array(v).flatten()
# 中位数
elif type == 'median':
return df.median()
else:
raise Exception
def apply_func2(x):
if isinstance(x, str):
try:
x = float(x)
except Exception as e:
x = 0
else:
x
return x
def concat_factor_2(data_path, save_path, classified_df, factor_name, wei_type, save_name):
# 创建文件夹
if not os.path.exists(save_path):
os.makedirs(save_path)
cols = set(list(classified_df[classified_df.columns[0]]))
total_df = pd.DataFrame()
for panel_f in os.listdir(data_path):
print(panel_f)
panel_dat = pd.read_csv(os.path.join(data_path, panel_f),
encoding='gbk', engine='python',
index_col=['code'])
tmp_df = pd.concat([panel_dat[[factor_name, 'MKT_CAP_FLOAT']], classified_df], axis=1, join='inner')
d = datetime.strptime(panel_f.split('.')[0], "%Y-%m-%d")
section_df = pd.DataFrame(index=[d], columns=cols)
grouped = tmp_df.groupby(classified_df.columns[0])
for pla, group in grouped:
group.dropna(how='any', inplace=True)
section_df.loc[d, pla] = simple_func(group[factor_name], mv=group['MKT_CAP_FLOAT'], type='mv_weighted')[0]
total_df = pd.concat([total_df, section_df], axis=0)
if '.' not in save_name:
save_name = save_name + '.csv'
total_df.index.name = 'date'
total_df.to_csv(os.path.join(save_path, save_name), encoding='gbk')
# 做一个累计净值走势图
# prod_total_df = (total_df + 1).cumprod()
# prod_total_df.to_csv(os.path.join(save_path, '累计_'+save_name), encoding='gbk')
# 把一个 截面数据添加到已经有的月度模式存储的文件中
def add_to_panels(dat, panel_path, f_name, freq_in_dat='M'):
"""说明: 把dat依次插入到panel_path的DF中,插入的列名为f_name, 根据dat的类型是DF还是Series可以判断
是每次插入的数据不同还是每次插入相同的数据。"""
print(f'开始添加{f_name}数据到目标文件夹')
panel = os.listdir(panel_path)
for month_date in panel:
hased_dat = pd.read_csv(os.path.join(panel_path, month_date), engine='python')
hased_dat = hased_dat.set_index('Code')
# 输入数据为 DataFrame, 那么按列插入
if isinstance(dat, pd.DataFrame):
mon_str = month_date.split('.')[0]
if mon_str in dat.columns:
# 当dat中的columns也是str格式,且日期与panel一样时,直接添加
hased_dat[f_name] = dat[mon_str]
else:
# 否则,当年、月相同,日不同时,需要变成datetime格式而且还有查找
target = datetime.strptime(mon_str, "%Y-%m-%d")
# 当dat的columns是datetime格式时
if isinstance(dat.columns[0], datetime):
if freq_in_dat == 'M':
finded = None
for col in dat.columns:
if col.year == target.year and col.month == target.month:
finded = col
break
if finded:
hased_dat[f_name] = dat[finded]
else:
print('{}该期未找到对应数据'.format(mon_str))
if freq_in_dat == 'D':
if target in dat.columns:
hased_dat[f_name] = dat[target]
else:
print('{}该期未找到对应数据'.format(mon_str))
else:
print('现有格式的还未完善')
raise Exception
# 输入数据为 DataFrame, 那么按列插入
elif isinstance(dat, pd.Series):
hased_dat[f_name] = dat[hased_dat.index]
try:
hased_dat = hased_dat.reset_index('Code')
except Exception as e:
print('debug')
if 'No' in hased_dat.columns:
del hased_dat['No']
hased_dat.index.name = 'No'
hased_dat.to_csv(os.path.join(panel_path, month_date), encoding='gbk')
print('完毕!')
# 从一个月度panel里面删除某个因子
def del_factor_from_panel(panel_path, factor_name):
print(f'开始从目标文件夹删除{factor_name}因子。')
panel = os.listdir(panel_path)
for month_date in panel:
dat_df = pd.read_csv(os.path.join(panel_path, month_date), engine='python')
dat_df = dat_df.set_index('Code')
if factor_name in dat_df.columns:
del dat_df[factor_name]
dat_df.reset_index(inplace=True)
dat_df.set_index('No', inplace=True)
dat_df.index.name = 'No'
dat_df.to_csv(os.path.join(panel_path, month_date), encoding='gbk')
print(f'完毕。')
return
def rolling_regress_1(y, x, window=5):
try:
rolling_ys = rolling_windows(y, window)
rolling_xs = rolling_windows(x, window)
except Exception as e:
print('debug')
bet = pd.Series()
# enumerate 形成带 i 的一个迭代器
for i, (rolling_x, rolling_y) in enumerate(zip(rolling_xs, rolling_ys)):
tmp_index = y.index[i + window - 1]
try:
model = sm.OLS(rolling_y, rolling_x)
result = model.fit()
params = result.params
b_v = params[0]
# print(result.params)
# print(result.summary())
except:
print(i)
raise
b = pd.Series(index=[tmp_index], data=b_v)
bet = pd.concat([bet, b])
return bet
# 计算不同股指合约的beta值
def compute_future_beta():
# 存储地址为:D:\Datebase_Stock\Date\index\stock_future\sf_beta.csv
data = Data()
sf_close_daily = data.sf_close_daily
index_price_daily = data.index_price_daily.T
# 求一下日期的交集,避免日期不同的潜在问题
tt = list(set(sf_close_daily.columns) & set(index_price_daily.index))
tt.sort()
sf_close_daily = sf_close_daily[tt]
index_price_daily = index_price_daily.loc[tt, :]
sf_beta = pd.DataFrame()
for c, se in sf_close_daily.iterrows():
if 'IC' in c:
tmp_i = index_price_daily['ZZ500']
elif 'IF' in c:
tmp_i = index_price_daily['HS300']
elif 'IH' in c:
tmp_i = index_price_daily['SZ50']
else:
print('Code Bug')
raise ValueError
# 去掉Nan
tmp_c = se.dropna()
tmp_i = tmp_i[tmp_c.index]
if len(tmp_c) > 22:
bet = rolling_regress_1(tmp_i, tmp_c, window=22)
sf_beta = pd.concat([sf_beta, pd.DataFrame({c: bet}).T], axis=0)
p = os.path.join(data_dair, 'index', 'stock_future')
data.save(sf_beta, 'sf_beta', p)
if __name__ == "__main__":
# compute_future_beta()
add_columns
# panel_path = r"D:\pythoncode\IndexEnhancement\因子预处理模块\因子"
# factor_name_list = ['Totaloperatingrevenueps_qoq_qoq']
# for f in factor_name_list:
# del_factor_from_panel(panel_path, f)
# panel_path = r'D:\pythoncode\IndexEnhancement\因子预处理模块\因子'
# add_fs_path = r'D:\pythoncode\IndexEnhancement\因子预处理模块\增加的因子\截面数据'
#
# f_list = os.listdir(add_fs_path)
# for fn in f_list:
# f_name = fn.split('.')[0]
# print(f_name)
# dat = pd.read_csv(os.path.join(add_fs_path, fn), engine='python')
# dat = dat.set_index(dat.columns[0])
# add_to_panels(dat, panel_path, f_name)
| 生成行业哑变量
| identifier_name |
factor_data_preprocess.py | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 22 09:47:11 2019
@author: admin
"""
import numpy as np
import pandas as pd
import os
from datetime import datetime
from itertools import chain
from functools import reduce
from sklearn.linear_model import LinearRegression
from utility.constant import info_cols, data_dair, non_processed_factors
from utility.relate_to_tushare import trade_days
from utility.tool0 import Data
import statsmodels.api as sm
from pyfinance.utils import rolling_windows
def align(df1, df2, *dfs):
# chain 是把多个迭代器合成一个迭代器
dfs_all = [df for df in chain([df1, df2], dfs)]
# 看df1和df2是否有单个列的
if any(len(df.shape) == 1 or 1 in df.shape for df in dfs_all):
dims = 1
else:
dims = 2
# 对日期求交期. reduce: 用传给reduce中的函数function(有两个参数)先对集合中的第 1、2个元素进行操作,
# 得到的结果再与第三个数据用function函数运算,最后得到一个结果。
mut_date_range = sorted(reduce(lambda x, y: x.intersection(y), (df.index for df in dfs_all)))
# 对columns求交集
mut_codes = sorted(reduce(lambda x, y: x.intersection(y), (df.columns for df in dfs_all)))
# 如果df1和df2都是多维的,求日期和代码的交集;否则,只求日期的交集
if dims == 2:
dfs_all = [df.loc[mut_date_range, mut_codes] for df in dfs_all]
elif dims == 1:
dfs_all = [df.loc[mut_date_range, :] for df in dfs_all]
return dfs_all
def drop_some(datdf):
global info_cols
cond = pd.Series(True, index=datdf.index)
# 最新一期数据
if pd.isnull(datdf['Pct_chg_nm']).all():
pass
else:
# 删除未上市股票
cond &= ~pd.isnull(datdf['Mkt_cap_float'])
# 删除未开盘股票
cond &= datdf['Is_open1']
datdf = datdf.loc[cond]
return datdf
def fill_na(data, ind='sw', fill_type='any'):
"""
缺失值填充:缺失值少于10%的情况下使用行业中位数代替
"""
global info_cols, non_processed_factors
datdf = data.copy()
if ind == 'sw':
datdf = datdf.loc[~pd.isnull(datdf['Industry_sw']), :]
non_deal = info_cols + non_processed_factors
tmp_info_cols = [inf for inf in non_deal if inf in datdf.columns]
# datdf中剔除info_cols后的列名
facs_to_fill = datdf.columns.difference(set(tmp_info_cols))
datdf[facs_to_fill] = datdf[facs_to_fill].applymap(coerce_numeric)
datdf = datdf.replace([np.inf, -np.inf], np.nan) # 替换inf
# pd.to_numeric( datdf[facs_to_fill], errors='coerce')
if fill_type != 'any':
facs_to_fill = [fac for fac in facs_to_fill # 筛选缺失值少于10%的因子
if pd.isnull(datdf[fac]).sum() / len(datdf) <= 0.1]
else:
facs_to_fill = [fac for fac in facs_to_fill # 筛选缺失值少于10%的因子
if pd.isnull(datdf[fac]).any()]
if ind in ['zx', 'sw']:
grouped_column = f'Industry_{ind}'
elif ind == 'Second_industry':
grouped_column = 'Second_industry'
else:
raise Exception
for fac in facs_to_fill:
fac_median_by_ind = datdf[[grouped_column, fac]].groupby(grouped_column).median()
# 把dateframe转为dict,并取fac为key以解决 dict套dict 的问题
fac_ind_map = fac_median_by_ind.to_dict()[fac]
# 选出需要替换的数据
fac_to_fill = datdf.loc[pd.isnull(datdf[fac]), [grouped_column, fac]]
# map函数可以接受含有映射关系的字典。使用map做行业到其均值的映射。
fac_to_fill.loc[:, fac] = fac_to_fill[grouped_column].map(fac_ind_map)
# 添加回到datdf
datdf.loc[fac_to_fill.index, fac] = fac_to_fill[fac].values
if pd.isnull(datdf[fac]).any():
datdf[fac] = datdf[fac].fillna(np.nanmean(datdf[fac]))
# 针对sw行业存在缺失值的情况
if len(datdf) < len(data):
idx_to_append = data.index.difference(datdf.index)
datdf = pd.concat([datdf, data.loc[idx_to_append, :]])
datdf.sort_index()
return datdf
def coerce_numeric(s):
try:
return float(s)
except:
return np.nan
def winsorize(data, n=5):
"""
去极值:5倍中位数标准差法(5mad)
"""
global info_cols, non_processed_factors
datdf = data.copy()
non_deal = info_cols + non_processed_factors
tmp_info_cols = [inf for inf in non_deal if inf in datdf.columns]
# 找出含有 nan 的列
if_contain_na = pd.isnull(datdf).sum().sort_values(ascending=True)
facs_to_remove = if_contain_na.loc[if_contain_na > 0].index.tolist()
if 'PCT_CHG_NM' in facs_to_remove:
facs_to_remove.remove('PCT_CHG_NM')
# 剔除含有 nan 的列 和 info_cols的列 后的所有列
facs_to_win = datdf.columns.difference(set(tmp_info_cols)).difference(set(tuple(facs_to_remove)))
dat_win = datdf[facs_to_win]
dat_win = dat_win.applymap(apply_func2)
fac_vals = dat_win.values
# np.median(fac_vals)
try:
dm = np.nanmedian(fac_vals, axis=0)
except Exception as e:
print('debug')
# 与均值差的绝对值的非 nan 均值
dm1 = np.nanmedian(np.abs(fac_vals - dm), axis=0)
if 0 in (dm + n*dm1):
# 针对存在去极值后均变为零的特殊情况(2009-05-27-'DP')
cut_points = [i for i in np.argwhere(dm1 == 0)[0]]
# 提取对应列,对其不进行去极值处理
facs_unchanged = [facs_to_win[cut_points[i]] for i in range(len(cut_points))]
# 仅对剩余列进行去极值处理
facs_to_win_median = facs_to_win.difference(set(tuple(facs_unchanged)))
dat_win_median = datdf[facs_to_win_median]
def fun1(x):
try:
r = float(x)
except Exception as e:
r = 0
return r
dat_win_median = dat_win_median.applymap(fun1)
fac_median_vals = dat_win_median.values
dmed = np.nanmedian(fac_median_vals, axis=0)
dmed1 = np.nanmedian(np.abs(fac_median_vals - dmed), axis=0)
dmed = np.repeat(dmed.reshape(1,-1), fac_median_vals.shape[0], axis=0)
dmed1 = np.repeat(dmed1.reshape(1,-1), fac_median_vals.shape[0], axis=0)
fac_median_vals = np.where(fac_median_vals > dmed + n*dmed1, dmed+n*dmed1,
np.where(fac_median_vals < dmed - n*dmed1, dmed - n*dmed1, fac_median_vals))
res1 = pd.DataFrame(fac_median_vals, index=dat_win_median.index, columns=dat_win_median.columns)
res2 = datdf[facs_unchanged]
res = pd.concat([res1, res2], axis=1)
else:
# 通过两个repeat,得到与fac_vals 中元素一一对应的极值
dm = np.repeat(dm.reshape(1, -1), fac_vals.shape[0], axis=0)
dm1 = np.repeat(dm1.reshape(1, -1), fac_vals.shape[0], axis=0)
# 替换
fac_vals = np.where(fac_vals > dm + n*dm1, dm+n*dm1,
np.where(fac_vals < dm - n*dm1, dm - n*dm1, fac_vals))
res = pd.DataFrame(fac_vals, index=dat_win.index, columns=dat_win.columns)
datdf[facs_to_win] = res
return datdf
def neutralize(data, ind_neu=True, size_neu=True, ind='sw', plate=None):
"""
中性化:因子暴露度对行业哑变量(ind_dummy_matrix)和对数流通市值(lncap_barra)
做线性回归, 取残差作为新的因子暴露度
"""
global info_cols, non_processed_factors
datdf = data.copy()
if ind == 'sw':
datdf = datdf.loc[~pd.isnull(datdf['Industry_sw']), :]
non_deal = info_cols + non_processed_factors
tmp_info_cols = [inf for inf in non_deal if inf in datdf.columns]
# 剔除 info_cols 这些列后剩下的列名
cols_to_neu = datdf.columns.difference(set(tmp_info_cols))
y = datdf[cols_to_neu]
# 剔除含有nan的
y = y.dropna(how='any', axis=1)
cols_neu = y.columns
if size_neu:
# 对数市值
lncap = np.log(datdf[['Mkt_cap_float']])
# 若针对特定行业,则无需生成行业哑变量
use_dummies = 1
if not ind_neu:
use_dummies = 0
# 市值中性行业不中性
if use_dummies == 0 and size_neu:
X = lncap
# 行业中性市值不中性
elif use_dummies == 1 and not size_neu:
X = pd.get_dummies(datdf[f'Industry_{ind}'])
else:
# 使用 pd.get_dummies 生成行业哑变量
ind_dummy_matrix = pd.get_dummies(datdf[f'Industry_{ind}'])
# 合并对数市值和行业哑变量
X = pd.concat([lncap, ind_dummy_matrix], axis=1)
model = LinearRegression(fit_intercept=False)
# 一次对所有的y都做回归
try:
res = model.fit(X, y)
except Exception as e:
pd.isna(y).sum().sum()
pd.isna(X).sum().sum()
for col, se in y.iteritems():
pd.isna(se).sum()
(se == -np.inf).sum()
np.where(se == -np.inf)
np.where(se == np.inf)
print(col)
res = model.fit(X, se)
print('debug')
coef = res.coef_
residue = y - np.dot(X, coef.T)
# 断言语言, 如果为false则触发错误
assert len(datdf.index.difference(residue.index)) == 0
datdf.loc[residue.index, cols_neu] = residue
return datdf
def standardize(data):
"""
标准化:Z-score标准化方法,减去均值,除以标准差
"""
global info_cols, non_processed_factors
datdf = data.copy()
non_deal = info_cols + non_processed_factors
tmp_info_cols = [inf for inf in non_deal if inf in datdf.columns]
facs_to_sta = datdf.columns.difference(set(tmp_info_cols))
dat_sta = np.float64(datdf[facs_to_sta].values)
dat_sta = (dat_sta - np.nanmean(dat_sta, axis=0)) / np.nanstd(dat_sta, axis=0)
datdf.loc[:, facs_to_sta] = dat_sta
return datdf
def process_input_names(factor_names):
if factor_names == 'a':
factor_names = None
else:
factor_names = [f.replace("'", "").replace('"', "") for f in factor_names.split(',')]
return factor_names
# 向现有的月度因子数据中添加一列因子
def add_columns(added_date_path, columns_list, target_date_path):
'''
:param added_date_path: 添加数据的存储位置
:param columns_list: 准备添加的列名
:param target_date_path: 需要被添加的数据存储位置
:return:
'''
toadded_list = os.listdir(added_date_path)
save_list = os.listdir(target_date_path)
if pd.to_datetime(toadded_list[0].split('.')[0]) > pd.to_datetime(save_list[0].split('.')[0]) or \
pd.to_datetime(toadded_list[-1].split('.')[0]) < pd.to_datetime(save_list[-1].split('.')[0]):
print('被添加数据长度不够')
raise Exception
for panel_f in os.listdir(target_date_path):
toadded_dat = pd.read_csv(os.path.join(added_date_path, panel_f),
encoding='gbk', engine='python',
index_col=['code'])
panel_dat = pd.read_csv(os.path.join(target_date_path, panel_f),
encoding=' | eal_add_list = [col for col in columns_list if col not in panel_dat.columns]
if len(real_add_list) == 0:
continue
# join_axes关键字为沿用那个的index,忽略另一个df的其余数据
panel_dat = pd.concat([panel_dat, toadded_dat[real_add_list]], axis=1, join_axes=[panel_dat.index])
panel_dat.to_csv(os.path.join(target_date_path, panel_f),
encoding='gbk')
print('数据添加完毕')
# 根据给定的日度日期序列和月末日期,找到该序列中该月末日期的月初日期
def getmonthfirstdate(dt, md):
tmp1 = dt[dt.year == md.year]
tmp2 = tmp1[tmp1.month == md.month]
return tmp2[0]
# 得到给定日度时间序列的月末时间list
def get_monthends_series(dt):
if isinstance(dt, pd.DataFrame):
dt = list(dt)
p = 0
med = []
for i in range(len(dt)-1):
mon_t = dt[i].month
mon_n = dt[i+1].month
if mon_t != mon_n:
med.append(dt[i])
p = p + 1
return pd.Series(med)
def simple_func(pd_s, mv, type='median'):
# 市值加权
if type == 'mv_weighted':
tmpp = pd.concat([pd_s, mv], axis=1)
tmpp = tmpp.dropna(axis=0)
pd_s = tmpp[tmpp.columns[0]]
mv = tmpp[tmpp.columns[1]]
mv_weights = mv/np.sum(mv)
v = np.dot(np.mat(pd_s), np.mat(mv_weights).T)
return np.array(v).flatten()
# 中位数
elif type == 'median':
return np.nanmedian(pd_s)
elif type == 'mean':
return np.nanmean(pd_s)
else:
raise Exception
def apply_func(df, mv, type='median'):
# 市值加权
if type == 'mv_weighted':
mv_weights = mv/np.sum(mv)
v = np.dot(np.mat(df), np.mat(mv_weights).T)
return np.array(v).flatten()
# 中位数
elif type == 'median':
return df.median()
else:
raise Exception
def apply_func2(x):
if isinstance(x, str):
try:
x = float(x)
except Exception as e:
x = 0
else:
x
return x
def concat_factor_2(data_path, save_path, classified_df, factor_name, wei_type, save_name):
# 创建文件夹
if not os.path.exists(save_path):
os.makedirs(save_path)
cols = set(list(classified_df[classified_df.columns[0]]))
total_df = pd.DataFrame()
for panel_f in os.listdir(data_path):
print(panel_f)
panel_dat = pd.read_csv(os.path.join(data_path, panel_f),
encoding='gbk', engine='python',
index_col=['code'])
tmp_df = pd.concat([panel_dat[[factor_name, 'MKT_CAP_FLOAT']], classified_df], axis=1, join='inner')
d = datetime.strptime(panel_f.split('.')[0], "%Y-%m-%d")
section_df = pd.DataFrame(index=[d], columns=cols)
grouped = tmp_df.groupby(classified_df.columns[0])
for pla, group in grouped:
group.dropna(how='any', inplace=True)
section_df.loc[d, pla] = simple_func(group[factor_name], mv=group['MKT_CAP_FLOAT'], type='mv_weighted')[0]
total_df = pd.concat([total_df, section_df], axis=0)
if '.' not in save_name:
save_name = save_name + '.csv'
total_df.index.name = 'date'
total_df.to_csv(os.path.join(save_path, save_name), encoding='gbk')
# 做一个累计净值走势图
# prod_total_df = (total_df + 1).cumprod()
# prod_total_df.to_csv(os.path.join(save_path, '累计_'+save_name), encoding='gbk')
# 把一个 截面数据添加到已经有的月度模式存储的文件中
def add_to_panels(dat, panel_path, f_name, freq_in_dat='M'):
"""说明: 把dat依次插入到panel_path的DF中,插入的列名为f_name, 根据dat的类型是DF还是Series可以判断
是每次插入的数据不同还是每次插入相同的数据。"""
print(f'开始添加{f_name}数据到目标文件夹')
panel = os.listdir(panel_path)
for month_date in panel:
hased_dat = pd.read_csv(os.path.join(panel_path, month_date), engine='python')
hased_dat = hased_dat.set_index('Code')
# 输入数据为 DataFrame, 那么按列插入
if isinstance(dat, pd.DataFrame):
mon_str = month_date.split('.')[0]
if mon_str in dat.columns:
# 当dat中的columns也是str格式,且日期与panel一样时,直接添加
hased_dat[f_name] = dat[mon_str]
else:
# 否则,当年、月相同,日不同时,需要变成datetime格式而且还有查找
target = datetime.strptime(mon_str, "%Y-%m-%d")
# 当dat的columns是datetime格式时
if isinstance(dat.columns[0], datetime):
if freq_in_dat == 'M':
finded = None
for col in dat.columns:
if col.year == target.year and col.month == target.month:
finded = col
break
if finded:
hased_dat[f_name] = dat[finded]
else:
print('{}该期未找到对应数据'.format(mon_str))
if freq_in_dat == 'D':
if target in dat.columns:
hased_dat[f_name] = dat[target]
else:
print('{}该期未找到对应数据'.format(mon_str))
else:
print('现有格式的还未完善')
raise Exception
# 输入数据为 DataFrame, 那么按列插入
elif isinstance(dat, pd.Series):
hased_dat[f_name] = dat[hased_dat.index]
try:
hased_dat = hased_dat.reset_index('Code')
except Exception as e:
print('debug')
if 'No' in hased_dat.columns:
del hased_dat['No']
hased_dat.index.name = 'No'
hased_dat.to_csv(os.path.join(panel_path, month_date), encoding='gbk')
print('完毕!')
# 从一个月度panel里面删除某个因子
def del_factor_from_panel(panel_path, factor_name):
print(f'开始从目标文件夹删除{factor_name}因子。')
panel = os.listdir(panel_path)
for month_date in panel:
dat_df = pd.read_csv(os.path.join(panel_path, month_date), engine='python')
dat_df = dat_df.set_index('Code')
if factor_name in dat_df.columns:
del dat_df[factor_name]
dat_df.reset_index(inplace=True)
dat_df.set_index('No', inplace=True)
dat_df.index.name = 'No'
dat_df.to_csv(os.path.join(panel_path, month_date), encoding='gbk')
print(f'完毕。')
return
def rolling_regress_1(y, x, window=5):
try:
rolling_ys = rolling_windows(y, window)
rolling_xs = rolling_windows(x, window)
except Exception as e:
print('debug')
bet = pd.Series()
# enumerate 形成带 i 的一个迭代器
for i, (rolling_x, rolling_y) in enumerate(zip(rolling_xs, rolling_ys)):
tmp_index = y.index[i + window - 1]
try:
model = sm.OLS(rolling_y, rolling_x)
result = model.fit()
params = result.params
b_v = params[0]
# print(result.params)
# print(result.summary())
except:
print(i)
raise
b = pd.Series(index=[tmp_index], data=b_v)
bet = pd.concat([bet, b])
return bet
# 计算不同股指合约的beta值
def compute_future_beta():
# 存储地址为:D:\Datebase_Stock\Date\index\stock_future\sf_beta.csv
data = Data()
sf_close_daily = data.sf_close_daily
index_price_daily = data.index_price_daily.T
# 求一下日期的交集,避免日期不同的潜在问题
tt = list(set(sf_close_daily.columns) & set(index_price_daily.index))
tt.sort()
sf_close_daily = sf_close_daily[tt]
index_price_daily = index_price_daily.loc[tt, :]
sf_beta = pd.DataFrame()
for c, se in sf_close_daily.iterrows():
if 'IC' in c:
tmp_i = index_price_daily['ZZ500']
elif 'IF' in c:
tmp_i = index_price_daily['HS300']
elif 'IH' in c:
tmp_i = index_price_daily['SZ50']
else:
print('Code Bug')
raise ValueError
# 去掉Nan
tmp_c = se.dropna()
tmp_i = tmp_i[tmp_c.index]
if len(tmp_c) > 22:
bet = rolling_regress_1(tmp_i, tmp_c, window=22)
sf_beta = pd.concat([sf_beta, pd.DataFrame({c: bet}).T], axis=0)
p = os.path.join(data_dair, 'index', 'stock_future')
data.save(sf_beta, 'sf_beta', p)
if __name__ == "__main__":
# compute_future_beta()
add_columns
# panel_path = r"D:\pythoncode\IndexEnhancement\因子预处理模块\因子"
# factor_name_list = ['Totaloperatingrevenueps_qoq_qoq']
# for f in factor_name_list:
# del_factor_from_panel(panel_path, f)
# panel_path = r'D:\pythoncode\IndexEnhancement\因子预处理模块\因子'
# add_fs_path = r'D:\pythoncode\IndexEnhancement\因子预处理模块\增加的因子\截面数据'
#
# f_list = os.listdir(add_fs_path)
# for fn in f_list:
# f_name = fn.split('.')[0]
# print(f_name)
# dat = pd.read_csv(os.path.join(add_fs_path, fn), engine='python')
# dat = dat.set_index(dat.columns[0])
# add_to_panels(dat, panel_path, f_name)
| gbk', engine='python',
index_col=['code'])
r | conditional_block |
pplot.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"Post-plotting" utilities that take post-processed performance data
(as provided by cocopf.pproc) and plot them in a variety of ways.
Their counterpart are several scripts in the pptools/ directory, the goal
here is to have these scripts as thin as possible to enable code reuse.
For the same reason, we plot everything into a given Axes object.
See cocopf.pproc for some philosophical thoughts on portfolio analysis
compared to algorithm analysis done by stock COCO.
fig = figure('5-11')
ax = fig.add_subplot(111)
cocopf.pplot.fval_by_budget(ax, pds, dim=5, funcId=11)
fig.show()
"""
import sys
import numpy as np
from pylab import *
sys.path.append('.')
import bbob_pproc as bb
import bbob_pproc.genericsettings
import bbob_pproc.pproc as pp
import bbob_pproc.readalign as ra
class GroupByMedian:
def __call__(self, lst, **kwargs):
return np.median(lst, **kwargs)
def __str__(self):
return 'median'
def _style_thickline(xstyle):
style = { 'linestyle': 'solid', 'zorder': -1, 'linewidth': 6 }
style.update(xstyle)
return style
def _style_algorithm(name, i):
# Automatic colors are fine, no markers used
return { 'linestyle': 'dashed' }
def _style_oracle():
return _style_thickline({ 'color': '#DAFFE4' })
def _style_unifpf():
return _style_thickline({ 'color': '#D0E4FF' })
def _style_strategy(name, i):
if name.startswith('mUNIF'):
return _style_thickline({ 'color': 'wheat' })
styles = bb.genericsettings.line_styles
style = styles[i % len(styles)].copy()
del style['linestyle']
style['markersize'] = 12.
style['markeredgewidth'] = 1.5
style['markerfacecolor'] = 'None'
style['markeredgecolor'] = style['color']
style['linestyle'] = 'solid'
style['zorder'] = 1
style['linewidth'] = 2
return style
def _pds_plot_iterator(pds, dim, funcId):
"""
An iterator that will in turn yield all drawable curves
in the form of (kind, name, ds, style) tuples (where kind
is one of 'algorithm', 'oracle', 'unifpf', 'strategy').
"""
i = 0
for (algname, ds) in pds.algds_dimfunc((dim, funcId)):
yield ('algorithm', algname, ds, _style_algorithm(algname, i))
i += 1
yield ('oracle', 'oracle', pds.oracle((dim, funcId)), _style_oracle())
yield ('unifpf', 'eUNIF', pds.unifpf().dictByDimFunc()[dim][funcId][0], _style_unifpf())
i = 0
for (stratname, ds) in pds.stratds_dimfunc((dim, funcId)):
yield ('strategy', stratname, ds, _style_strategy(stratname, i))
i += 1
def legend(obj, ncol=3, **kwargs):
"""
Show a legend. obj can be an Axes or Figure (in that case, also pass
handles and labels arguments).
"""
# Font size handling here is a bit weird. We specify fontsize=6
# in legend constructor since that affects spacing. However, we
# need to manually override with 'small' later, because the original
# specification did not take effect on whole-figure legends (and for
# actual text, 6 is a wee bit small). We get a specific cramped
# appearance and correct behavior for whole-figure legends this way.
l = obj.legend(ncol=ncol, fancybox=True, markerscale=0.66, fontsize=6, **kwargs)
plt.setp(l.get_texts(), fontsize='small')
def _fval_label(baseline_ds, baseline_label, groupby):
groupby = groupby.title()
if baseline_ds:
if baseline_label:
return groupby+' Function Values Regr. Rel. To ' + baseline_label
else:
return groupby+' Function Values (Rel. Regression)'
else:
return groupby+' Best Function Values'
def fval_by_budget(ax, pds, baseline_ds=None, baseline_label="", dim=None, funcId=None, groupby=None):
"""
Plot a classic "convergence plot" that shows how the function value
approaches optimum as time passes, in terms of raw performance.
groupby is the method of aggregating results of multiple instances --
a callable, stringable object, GroupByMedian by default.
By default, raw function values (as difference to optimum) are shown,
but relative values to some baseline dataset can be shown instead.
"""
if groupby is None: groupby = GroupByMedian()
pfsize = len(pds.algds.keys())
if baseline_ds:
|
for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId):
#print name, ds
budgets = ds.funvals[:, 0]
funvals = groupby(ds.funvals[:, 1:], axis=1)
# Throw away funvals after ftarget reached
try:
limit = np.nonzero(funvals < 10**-8)[0][0] + 1
except IndexError:
limit = np.size(budgets)+1
budgets = budgets[:limit]
funvals = funvals[:limit]
fvb = np.transpose(np.vstack([budgets[:limit], funvals[:limit]]))
if baseline_ds:
# Relativize by baseline
fvba = ra.alignArrayData(ra.VArrayMultiReader([fvb, baseline_fvb]))
budgets = fvba[:, 0]
funvals = fvba[:, 1] / fvba[:, 2]
style['markevery'] = 16
ax.loglog(budgets, funvals, label=name, basex=pfsize, **style)
if baseline_ds:
ax.set_yticks([1], minor=True)
ax.set_xlabel('Budget')
ax.set_ylabel(_fval_label(baseline_ds, baseline_label, str(groupby)))
ax.grid()
if baseline_ds:
ax.yaxis.grid(True, which = 'minor')
def rank_by_budget(ax, pds, dim=None, funcId=None, groupby=None):
"""
Plot each algorithm/method's rank evolving as budget increases.
groupby is the method of aggregating results of multiple instances --
a callable, stringable object, GroupByMedian by default.
Note that funcId may be an array of id numbers; in that case,
an average rank over listed functions is taken.
"""
if groupby is None: groupby = GroupByMedian()
pfsize = len(pds.algds.keys())
try: # funcId is array?
# _pds_plot_iterator[] uses funcId only for things we don't care for
fakeFuncId = funcId[0]
manyranking = np.array([pds.ranking((dim, i), groupby) for i in funcId])
rankcount = np.shape(manyranking[0])[1] - 1
amanyranking = ra.alignArrayData(ra.VArrayMultiReader(manyranking))
budget = amanyranking[:,0]
rankings = np.hsplit(amanyranking[:,1:], len(funcId))
avgranking = np.average(rankings, axis=0)
ranking = np.vstack([budget, avgranking.T]).T
except TypeError: # funcId is scalar
fakeFuncId = funcId
ranking = pds.ranking((dim, funcId), groupby)
i = 0
for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, fakeFuncId):
if kind != 'algorithm' and kind != 'strategy':
continue
#print name, ds
budgets = ranking[:,0]
ranks = ranking[:,1+i]
style['markevery'] = 64
ax.plot(budgets, ranks, label=name, **style)
i += 1
ax.set_xlabel('Budget')
ax.set_ylabel('Rank by '+str(groupby).title()+' Function Value')
ax.set_xscale('log', basex=pfsize)
ax.grid()
def _evals_label(baseline_ds, baseline_label, groupby):
groupby = groupby.title()
if baseline_ds:
if baseline_label:
return groupby+' Eval.# Slowdown Rel. to ' + baseline_label
else:
return groupby+' Relative Eval.# Slowdown'
else:
return groupby+' Absolute Eval.#'
def evals_by_target(ax, pds, baseline_ds=None, baseline_label="", dim=None, funcId=None, groupby=None):
"""
Plot a rotated convergence plot. It is essentially like fval_by_budget(),
but rotated by 90 degrees, showing how big budget is required to reach
every target.
While this is a little less intuitive at first, it allows better judgement
of performance impact of each strategy. With fval_by_budget(), performance
change is represented by a curve phase shift, while in evals_by_target(),
it simply translates position on the y axis.
groupby is the method of aggregating results of multiple instances --
a callable, stringable object, GroupByMedian by default.
By default, absolute evaluations count is shown, but relative values to
some baseline dataset can be shown instead.
"""
if groupby is None: groupby = GroupByMedian()
pfsize = len(pds.algds.keys())
runlengths = 10**np.linspace(0, np.log10(pds.maxevals((dim, funcId))), num=500)
target_values = pp.RunlengthBasedTargetValues(runlengths,
reference_data=pds.bestalg(None), force_different_targets_factor=10**0.004)
targets = target_values((funcId, dim))
if baseline_ds:
baseline_fevs = groupby(baseline_ds.detEvals(targets), axis=1)
for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId):
#print name, ds
fevs = groupby(ds.detEvals(targets), axis=1)
if baseline_ds:
fevs /= baseline_fevs
style['markevery'] = 64
ax.loglog(targets, fevs, label=name, basey=pfsize, **style)
ax.set_xlim(10**2, 10**(np.log10(targets[-1])-0.2))
if baseline_ds:
ax.set_yticks([2, 3.5], minor=True)
ax.set_xlabel('Function Value Targets')
ax.set_ylabel(_evals_label(baseline_ds, baseline_label, str(groupby)))
ax.grid()
if baseline_ds:
ax.yaxis.grid(True, which = 'minor')
def evals_by_evals(ax, pds, baseline1_ds=None, baseline1_label="", baseline2_ds=None, baseline2_label="", dim=None, funcId=None, groupby=None):
"""
Plot the evolution of relative #evaluations for a target based on
increasing absolute #evaluations. In other words, for each absolute
number of evaluations, determine the target reached and show how faster
did baseline reach it.
groupby is the method of aggregating results of multiple instances --
a callable, stringable object, GroupByMedian by default.
It's not clear whether this will eventually be useful at all, but it
offers another perspective that might aid some analysis.
"""
if groupby is None: groupby = GroupByMedian()
pfsize = len(pds.algds.keys())
runlengths = 10**np.linspace(0, np.log10(pds.maxevals((dim, funcId))), num=500)
target_values = pp.RunlengthBasedTargetValues(runlengths,
reference_data=pds.bestalg(None), force_different_targets_factor=10**0.004)
targets = target_values((funcId, dim))
if baseline1_ds:
baseline1_fevs = np.array(groupby(baseline1_ds.detEvals(targets), axis=1))
if baseline2_ds:
baseline2_fevs = np.array(groupby(baseline2_ds.detEvals(targets), axis=1))
for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId):
#print name, ds
fevs1 = groupby(ds.detEvals(targets), axis=1)
if baseline1_ds:
fevs1 /= baseline1_fevs
fevs2 = groupby(ds.detEvals(targets), axis=1)
if baseline2_ds:
fevs2 /= baseline2_fevs
infsx = np.nonzero(fevs1 == inf)
infs = infsx[0]
if np.size(infs) > 0:
#print infs
fevs1 = fevs1[:infs[0]-1]
fevs2 = fevs2[:infs[0]-1]
#print name, fevs1, fevs2
style['markevery'] = 64
ax.loglog(fevs2, fevs1, label=name, basex=pfsize, basey=pfsize, **style)
ax.grid()
ax.set_xlim(0, runlengths[-1] * pfsize) # i.e. log(runlengths) + 1
ax.set_ylabel('Per-target ' + _evals_label(baseline1_ds, baseline1_label, str(groupby)))
ax.set_xlabel('Per-target ' + _evals_label(baseline2_ds, baseline2_label, str(groupby)))
| baseline_budgets = baseline_ds.funvals[:, 0]
baseline_funvals = groupby(baseline_ds.funvals[:, 1:], axis=1)
baseline_safefunvals = np.maximum(baseline_funvals, 10**-8) # eschew zeros
# fvb is matrix with each row being [budget,funval]
baseline_fvb = np.transpose(np.vstack([baseline_budgets, baseline_safefunvals])) | conditional_block |
pplot.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"Post-plotting" utilities that take post-processed performance data
(as provided by cocopf.pproc) and plot them in a variety of ways.
Their counterpart are several scripts in the pptools/ directory, the goal
here is to have these scripts as thin as possible to enable code reuse.
For the same reason, we plot everything into a given Axes object.
See cocopf.pproc for some philosophical thoughts on portfolio analysis
compared to algorithm analysis done by stock COCO.
fig = figure('5-11')
ax = fig.add_subplot(111)
cocopf.pplot.fval_by_budget(ax, pds, dim=5, funcId=11)
fig.show()
"""
import sys
import numpy as np
from pylab import *
sys.path.append('.')
import bbob_pproc as bb
import bbob_pproc.genericsettings
import bbob_pproc.pproc as pp
import bbob_pproc.readalign as ra
class GroupByMedian:
def __call__(self, lst, **kwargs):
return np.median(lst, **kwargs)
def __str__(self):
return 'median'
def _style_thickline(xstyle):
style = { 'linestyle': 'solid', 'zorder': -1, 'linewidth': 6 }
style.update(xstyle)
return style
def _style_algorithm(name, i):
# Automatic colors are fine, no markers used
return { 'linestyle': 'dashed' }
def _style_oracle():
return _style_thickline({ 'color': '#DAFFE4' })
def _style_unifpf():
return _style_thickline({ 'color': '#D0E4FF' })
def _style_strategy(name, i):
if name.startswith('mUNIF'):
return _style_thickline({ 'color': 'wheat' }) | style = styles[i % len(styles)].copy()
del style['linestyle']
style['markersize'] = 12.
style['markeredgewidth'] = 1.5
style['markerfacecolor'] = 'None'
style['markeredgecolor'] = style['color']
style['linestyle'] = 'solid'
style['zorder'] = 1
style['linewidth'] = 2
return style
def _pds_plot_iterator(pds, dim, funcId):
"""
An iterator that will in turn yield all drawable curves
in the form of (kind, name, ds, style) tuples (where kind
is one of 'algorithm', 'oracle', 'unifpf', 'strategy').
"""
i = 0
for (algname, ds) in pds.algds_dimfunc((dim, funcId)):
yield ('algorithm', algname, ds, _style_algorithm(algname, i))
i += 1
yield ('oracle', 'oracle', pds.oracle((dim, funcId)), _style_oracle())
yield ('unifpf', 'eUNIF', pds.unifpf().dictByDimFunc()[dim][funcId][0], _style_unifpf())
i = 0
for (stratname, ds) in pds.stratds_dimfunc((dim, funcId)):
yield ('strategy', stratname, ds, _style_strategy(stratname, i))
i += 1
def legend(obj, ncol=3, **kwargs):
"""
Show a legend. obj can be an Axes or Figure (in that case, also pass
handles and labels arguments).
"""
# Font size handling here is a bit weird. We specify fontsize=6
# in legend constructor since that affects spacing. However, we
# need to manually override with 'small' later, because the original
# specification did not take effect on whole-figure legends (and for
# actual text, 6 is a wee bit small). We get a specific cramped
# appearance and correct behavior for whole-figure legends this way.
l = obj.legend(ncol=ncol, fancybox=True, markerscale=0.66, fontsize=6, **kwargs)
plt.setp(l.get_texts(), fontsize='small')
def _fval_label(baseline_ds, baseline_label, groupby):
groupby = groupby.title()
if baseline_ds:
if baseline_label:
return groupby+' Function Values Regr. Rel. To ' + baseline_label
else:
return groupby+' Function Values (Rel. Regression)'
else:
return groupby+' Best Function Values'
def fval_by_budget(ax, pds, baseline_ds=None, baseline_label="", dim=None, funcId=None, groupby=None):
"""
Plot a classic "convergence plot" that shows how the function value
approaches optimum as time passes, in terms of raw performance.
groupby is the method of aggregating results of multiple instances --
a callable, stringable object, GroupByMedian by default.
By default, raw function values (as difference to optimum) are shown,
but relative values to some baseline dataset can be shown instead.
"""
if groupby is None: groupby = GroupByMedian()
pfsize = len(pds.algds.keys())
if baseline_ds:
baseline_budgets = baseline_ds.funvals[:, 0]
baseline_funvals = groupby(baseline_ds.funvals[:, 1:], axis=1)
baseline_safefunvals = np.maximum(baseline_funvals, 10**-8) # eschew zeros
# fvb is matrix with each row being [budget,funval]
baseline_fvb = np.transpose(np.vstack([baseline_budgets, baseline_safefunvals]))
for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId):
#print name, ds
budgets = ds.funvals[:, 0]
funvals = groupby(ds.funvals[:, 1:], axis=1)
# Throw away funvals after ftarget reached
try:
limit = np.nonzero(funvals < 10**-8)[0][0] + 1
except IndexError:
limit = np.size(budgets)+1
budgets = budgets[:limit]
funvals = funvals[:limit]
fvb = np.transpose(np.vstack([budgets[:limit], funvals[:limit]]))
if baseline_ds:
# Relativize by baseline
fvba = ra.alignArrayData(ra.VArrayMultiReader([fvb, baseline_fvb]))
budgets = fvba[:, 0]
funvals = fvba[:, 1] / fvba[:, 2]
style['markevery'] = 16
ax.loglog(budgets, funvals, label=name, basex=pfsize, **style)
if baseline_ds:
ax.set_yticks([1], minor=True)
ax.set_xlabel('Budget')
ax.set_ylabel(_fval_label(baseline_ds, baseline_label, str(groupby)))
ax.grid()
if baseline_ds:
ax.yaxis.grid(True, which = 'minor')
def rank_by_budget(ax, pds, dim=None, funcId=None, groupby=None):
"""
Plot each algorithm/method's rank evolving as budget increases.
groupby is the method of aggregating results of multiple instances --
a callable, stringable object, GroupByMedian by default.
Note that funcId may be an array of id numbers; in that case,
an average rank over listed functions is taken.
"""
if groupby is None: groupby = GroupByMedian()
pfsize = len(pds.algds.keys())
try: # funcId is array?
# _pds_plot_iterator[] uses funcId only for things we don't care for
fakeFuncId = funcId[0]
manyranking = np.array([pds.ranking((dim, i), groupby) for i in funcId])
rankcount = np.shape(manyranking[0])[1] - 1
amanyranking = ra.alignArrayData(ra.VArrayMultiReader(manyranking))
budget = amanyranking[:,0]
rankings = np.hsplit(amanyranking[:,1:], len(funcId))
avgranking = np.average(rankings, axis=0)
ranking = np.vstack([budget, avgranking.T]).T
except TypeError: # funcId is scalar
fakeFuncId = funcId
ranking = pds.ranking((dim, funcId), groupby)
i = 0
for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, fakeFuncId):
if kind != 'algorithm' and kind != 'strategy':
continue
#print name, ds
budgets = ranking[:,0]
ranks = ranking[:,1+i]
style['markevery'] = 64
ax.plot(budgets, ranks, label=name, **style)
i += 1
ax.set_xlabel('Budget')
ax.set_ylabel('Rank by '+str(groupby).title()+' Function Value')
ax.set_xscale('log', basex=pfsize)
ax.grid()
def _evals_label(baseline_ds, baseline_label, groupby):
groupby = groupby.title()
if baseline_ds:
if baseline_label:
return groupby+' Eval.# Slowdown Rel. to ' + baseline_label
else:
return groupby+' Relative Eval.# Slowdown'
else:
return groupby+' Absolute Eval.#'
def evals_by_target(ax, pds, baseline_ds=None, baseline_label="", dim=None, funcId=None, groupby=None):
"""
Plot a rotated convergence plot. It is essentially like fval_by_budget(),
but rotated by 90 degrees, showing how big budget is required to reach
every target.
While this is a little less intuitive at first, it allows better judgement
of performance impact of each strategy. With fval_by_budget(), performance
change is represented by a curve phase shift, while in evals_by_target(),
it simply translates position on the y axis.
groupby is the method of aggregating results of multiple instances --
a callable, stringable object, GroupByMedian by default.
By default, absolute evaluations count is shown, but relative values to
some baseline dataset can be shown instead.
"""
if groupby is None: groupby = GroupByMedian()
pfsize = len(pds.algds.keys())
runlengths = 10**np.linspace(0, np.log10(pds.maxevals((dim, funcId))), num=500)
target_values = pp.RunlengthBasedTargetValues(runlengths,
reference_data=pds.bestalg(None), force_different_targets_factor=10**0.004)
targets = target_values((funcId, dim))
if baseline_ds:
baseline_fevs = groupby(baseline_ds.detEvals(targets), axis=1)
for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId):
#print name, ds
fevs = groupby(ds.detEvals(targets), axis=1)
if baseline_ds:
fevs /= baseline_fevs
style['markevery'] = 64
ax.loglog(targets, fevs, label=name, basey=pfsize, **style)
ax.set_xlim(10**2, 10**(np.log10(targets[-1])-0.2))
if baseline_ds:
ax.set_yticks([2, 3.5], minor=True)
ax.set_xlabel('Function Value Targets')
ax.set_ylabel(_evals_label(baseline_ds, baseline_label, str(groupby)))
ax.grid()
if baseline_ds:
ax.yaxis.grid(True, which = 'minor')
def evals_by_evals(ax, pds, baseline1_ds=None, baseline1_label="", baseline2_ds=None, baseline2_label="", dim=None, funcId=None, groupby=None):
"""
Plot the evolution of relative #evaluations for a target based on
increasing absolute #evaluations. In other words, for each absolute
number of evaluations, determine the target reached and show how faster
did baseline reach it.
groupby is the method of aggregating results of multiple instances --
a callable, stringable object, GroupByMedian by default.
It's not clear whether this will eventually be useful at all, but it
offers another perspective that might aid some analysis.
"""
if groupby is None: groupby = GroupByMedian()
pfsize = len(pds.algds.keys())
runlengths = 10**np.linspace(0, np.log10(pds.maxevals((dim, funcId))), num=500)
target_values = pp.RunlengthBasedTargetValues(runlengths,
reference_data=pds.bestalg(None), force_different_targets_factor=10**0.004)
targets = target_values((funcId, dim))
if baseline1_ds:
baseline1_fevs = np.array(groupby(baseline1_ds.detEvals(targets), axis=1))
if baseline2_ds:
baseline2_fevs = np.array(groupby(baseline2_ds.detEvals(targets), axis=1))
for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId):
#print name, ds
fevs1 = groupby(ds.detEvals(targets), axis=1)
if baseline1_ds:
fevs1 /= baseline1_fevs
fevs2 = groupby(ds.detEvals(targets), axis=1)
if baseline2_ds:
fevs2 /= baseline2_fevs
infsx = np.nonzero(fevs1 == inf)
infs = infsx[0]
if np.size(infs) > 0:
#print infs
fevs1 = fevs1[:infs[0]-1]
fevs2 = fevs2[:infs[0]-1]
#print name, fevs1, fevs2
style['markevery'] = 64
ax.loglog(fevs2, fevs1, label=name, basex=pfsize, basey=pfsize, **style)
ax.grid()
ax.set_xlim(0, runlengths[-1] * pfsize) # i.e. log(runlengths) + 1
ax.set_ylabel('Per-target ' + _evals_label(baseline1_ds, baseline1_label, str(groupby)))
ax.set_xlabel('Per-target ' + _evals_label(baseline2_ds, baseline2_label, str(groupby))) |
styles = bb.genericsettings.line_styles | random_line_split |
pplot.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"Post-plotting" utilities that take post-processed performance data
(as provided by cocopf.pproc) and plot them in a variety of ways.
Their counterpart are several scripts in the pptools/ directory, the goal
here is to have these scripts as thin as possible to enable code reuse.
For the same reason, we plot everything into a given Axes object.
See cocopf.pproc for some philosophical thoughts on portfolio analysis
compared to algorithm analysis done by stock COCO.
fig = figure('5-11')
ax = fig.add_subplot(111)
cocopf.pplot.fval_by_budget(ax, pds, dim=5, funcId=11)
fig.show()
"""
import sys
import numpy as np
from pylab import *
sys.path.append('.')
import bbob_pproc as bb
import bbob_pproc.genericsettings
import bbob_pproc.pproc as pp
import bbob_pproc.readalign as ra
class GroupByMedian:
def | (self, lst, **kwargs):
return np.median(lst, **kwargs)
def __str__(self):
return 'median'
def _style_thickline(xstyle):
style = { 'linestyle': 'solid', 'zorder': -1, 'linewidth': 6 }
style.update(xstyle)
return style
def _style_algorithm(name, i):
# Automatic colors are fine, no markers used
return { 'linestyle': 'dashed' }
def _style_oracle():
return _style_thickline({ 'color': '#DAFFE4' })
def _style_unifpf():
return _style_thickline({ 'color': '#D0E4FF' })
def _style_strategy(name, i):
if name.startswith('mUNIF'):
return _style_thickline({ 'color': 'wheat' })
styles = bb.genericsettings.line_styles
style = styles[i % len(styles)].copy()
del style['linestyle']
style['markersize'] = 12.
style['markeredgewidth'] = 1.5
style['markerfacecolor'] = 'None'
style['markeredgecolor'] = style['color']
style['linestyle'] = 'solid'
style['zorder'] = 1
style['linewidth'] = 2
return style
def _pds_plot_iterator(pds, dim, funcId):
"""
An iterator that will in turn yield all drawable curves
in the form of (kind, name, ds, style) tuples (where kind
is one of 'algorithm', 'oracle', 'unifpf', 'strategy').
"""
i = 0
for (algname, ds) in pds.algds_dimfunc((dim, funcId)):
yield ('algorithm', algname, ds, _style_algorithm(algname, i))
i += 1
yield ('oracle', 'oracle', pds.oracle((dim, funcId)), _style_oracle())
yield ('unifpf', 'eUNIF', pds.unifpf().dictByDimFunc()[dim][funcId][0], _style_unifpf())
i = 0
for (stratname, ds) in pds.stratds_dimfunc((dim, funcId)):
yield ('strategy', stratname, ds, _style_strategy(stratname, i))
i += 1
def legend(obj, ncol=3, **kwargs):
"""
Show a legend. obj can be an Axes or Figure (in that case, also pass
handles and labels arguments).
"""
# Font size handling here is a bit weird. We specify fontsize=6
# in legend constructor since that affects spacing. However, we
# need to manually override with 'small' later, because the original
# specification did not take effect on whole-figure legends (and for
# actual text, 6 is a wee bit small). We get a specific cramped
# appearance and correct behavior for whole-figure legends this way.
l = obj.legend(ncol=ncol, fancybox=True, markerscale=0.66, fontsize=6, **kwargs)
plt.setp(l.get_texts(), fontsize='small')
def _fval_label(baseline_ds, baseline_label, groupby):
groupby = groupby.title()
if baseline_ds:
if baseline_label:
return groupby+' Function Values Regr. Rel. To ' + baseline_label
else:
return groupby+' Function Values (Rel. Regression)'
else:
return groupby+' Best Function Values'
def fval_by_budget(ax, pds, baseline_ds=None, baseline_label="", dim=None, funcId=None, groupby=None):
"""
Plot a classic "convergence plot" that shows how the function value
approaches optimum as time passes, in terms of raw performance.
groupby is the method of aggregating results of multiple instances --
a callable, stringable object, GroupByMedian by default.
By default, raw function values (as difference to optimum) are shown,
but relative values to some baseline dataset can be shown instead.
"""
if groupby is None: groupby = GroupByMedian()
pfsize = len(pds.algds.keys())
if baseline_ds:
baseline_budgets = baseline_ds.funvals[:, 0]
baseline_funvals = groupby(baseline_ds.funvals[:, 1:], axis=1)
baseline_safefunvals = np.maximum(baseline_funvals, 10**-8) # eschew zeros
# fvb is matrix with each row being [budget,funval]
baseline_fvb = np.transpose(np.vstack([baseline_budgets, baseline_safefunvals]))
for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId):
#print name, ds
budgets = ds.funvals[:, 0]
funvals = groupby(ds.funvals[:, 1:], axis=1)
# Throw away funvals after ftarget reached
try:
limit = np.nonzero(funvals < 10**-8)[0][0] + 1
except IndexError:
limit = np.size(budgets)+1
budgets = budgets[:limit]
funvals = funvals[:limit]
fvb = np.transpose(np.vstack([budgets[:limit], funvals[:limit]]))
if baseline_ds:
# Relativize by baseline
fvba = ra.alignArrayData(ra.VArrayMultiReader([fvb, baseline_fvb]))
budgets = fvba[:, 0]
funvals = fvba[:, 1] / fvba[:, 2]
style['markevery'] = 16
ax.loglog(budgets, funvals, label=name, basex=pfsize, **style)
if baseline_ds:
ax.set_yticks([1], minor=True)
ax.set_xlabel('Budget')
ax.set_ylabel(_fval_label(baseline_ds, baseline_label, str(groupby)))
ax.grid()
if baseline_ds:
ax.yaxis.grid(True, which = 'minor')
def rank_by_budget(ax, pds, dim=None, funcId=None, groupby=None):
"""
Plot each algorithm/method's rank evolving as budget increases.
groupby is the method of aggregating results of multiple instances --
a callable, stringable object, GroupByMedian by default.
Note that funcId may be an array of id numbers; in that case,
an average rank over listed functions is taken.
"""
if groupby is None: groupby = GroupByMedian()
pfsize = len(pds.algds.keys())
try: # funcId is array?
# _pds_plot_iterator[] uses funcId only for things we don't care for
fakeFuncId = funcId[0]
manyranking = np.array([pds.ranking((dim, i), groupby) for i in funcId])
rankcount = np.shape(manyranking[0])[1] - 1
amanyranking = ra.alignArrayData(ra.VArrayMultiReader(manyranking))
budget = amanyranking[:,0]
rankings = np.hsplit(amanyranking[:,1:], len(funcId))
avgranking = np.average(rankings, axis=0)
ranking = np.vstack([budget, avgranking.T]).T
except TypeError: # funcId is scalar
fakeFuncId = funcId
ranking = pds.ranking((dim, funcId), groupby)
i = 0
for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, fakeFuncId):
if kind != 'algorithm' and kind != 'strategy':
continue
#print name, ds
budgets = ranking[:,0]
ranks = ranking[:,1+i]
style['markevery'] = 64
ax.plot(budgets, ranks, label=name, **style)
i += 1
ax.set_xlabel('Budget')
ax.set_ylabel('Rank by '+str(groupby).title()+' Function Value')
ax.set_xscale('log', basex=pfsize)
ax.grid()
def _evals_label(baseline_ds, baseline_label, groupby):
groupby = groupby.title()
if baseline_ds:
if baseline_label:
return groupby+' Eval.# Slowdown Rel. to ' + baseline_label
else:
return groupby+' Relative Eval.# Slowdown'
else:
return groupby+' Absolute Eval.#'
def evals_by_target(ax, pds, baseline_ds=None, baseline_label="", dim=None, funcId=None, groupby=None):
"""
Plot a rotated convergence plot. It is essentially like fval_by_budget(),
but rotated by 90 degrees, showing how big budget is required to reach
every target.
While this is a little less intuitive at first, it allows better judgement
of performance impact of each strategy. With fval_by_budget(), performance
change is represented by a curve phase shift, while in evals_by_target(),
it simply translates position on the y axis.
groupby is the method of aggregating results of multiple instances --
a callable, stringable object, GroupByMedian by default.
By default, absolute evaluations count is shown, but relative values to
some baseline dataset can be shown instead.
"""
if groupby is None: groupby = GroupByMedian()
pfsize = len(pds.algds.keys())
runlengths = 10**np.linspace(0, np.log10(pds.maxevals((dim, funcId))), num=500)
target_values = pp.RunlengthBasedTargetValues(runlengths,
reference_data=pds.bestalg(None), force_different_targets_factor=10**0.004)
targets = target_values((funcId, dim))
if baseline_ds:
baseline_fevs = groupby(baseline_ds.detEvals(targets), axis=1)
for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId):
#print name, ds
fevs = groupby(ds.detEvals(targets), axis=1)
if baseline_ds:
fevs /= baseline_fevs
style['markevery'] = 64
ax.loglog(targets, fevs, label=name, basey=pfsize, **style)
ax.set_xlim(10**2, 10**(np.log10(targets[-1])-0.2))
if baseline_ds:
ax.set_yticks([2, 3.5], minor=True)
ax.set_xlabel('Function Value Targets')
ax.set_ylabel(_evals_label(baseline_ds, baseline_label, str(groupby)))
ax.grid()
if baseline_ds:
ax.yaxis.grid(True, which = 'minor')
def evals_by_evals(ax, pds, baseline1_ds=None, baseline1_label="", baseline2_ds=None, baseline2_label="", dim=None, funcId=None, groupby=None):
"""
Plot the evolution of relative #evaluations for a target based on
increasing absolute #evaluations. In other words, for each absolute
number of evaluations, determine the target reached and show how faster
did baseline reach it.
groupby is the method of aggregating results of multiple instances --
a callable, stringable object, GroupByMedian by default.
It's not clear whether this will eventually be useful at all, but it
offers another perspective that might aid some analysis.
"""
if groupby is None: groupby = GroupByMedian()
pfsize = len(pds.algds.keys())
runlengths = 10**np.linspace(0, np.log10(pds.maxevals((dim, funcId))), num=500)
target_values = pp.RunlengthBasedTargetValues(runlengths,
reference_data=pds.bestalg(None), force_different_targets_factor=10**0.004)
targets = target_values((funcId, dim))
if baseline1_ds:
baseline1_fevs = np.array(groupby(baseline1_ds.detEvals(targets), axis=1))
if baseline2_ds:
baseline2_fevs = np.array(groupby(baseline2_ds.detEvals(targets), axis=1))
for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId):
#print name, ds
fevs1 = groupby(ds.detEvals(targets), axis=1)
if baseline1_ds:
fevs1 /= baseline1_fevs
fevs2 = groupby(ds.detEvals(targets), axis=1)
if baseline2_ds:
fevs2 /= baseline2_fevs
infsx = np.nonzero(fevs1 == inf)
infs = infsx[0]
if np.size(infs) > 0:
#print infs
fevs1 = fevs1[:infs[0]-1]
fevs2 = fevs2[:infs[0]-1]
#print name, fevs1, fevs2
style['markevery'] = 64
ax.loglog(fevs2, fevs1, label=name, basex=pfsize, basey=pfsize, **style)
ax.grid()
ax.set_xlim(0, runlengths[-1] * pfsize) # i.e. log(runlengths) + 1
ax.set_ylabel('Per-target ' + _evals_label(baseline1_ds, baseline1_label, str(groupby)))
ax.set_xlabel('Per-target ' + _evals_label(baseline2_ds, baseline2_label, str(groupby)))
| __call__ | identifier_name |
pplot.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"Post-plotting" utilities that take post-processed performance data
(as provided by cocopf.pproc) and plot them in a variety of ways.
Their counterpart are several scripts in the pptools/ directory, the goal
here is to have these scripts as thin as possible to enable code reuse.
For the same reason, we plot everything into a given Axes object.
See cocopf.pproc for some philosophical thoughts on portfolio analysis
compared to algorithm analysis done by stock COCO.
fig = figure('5-11')
ax = fig.add_subplot(111)
cocopf.pplot.fval_by_budget(ax, pds, dim=5, funcId=11)
fig.show()
"""
import sys
import numpy as np
from pylab import *
sys.path.append('.')
import bbob_pproc as bb
import bbob_pproc.genericsettings
import bbob_pproc.pproc as pp
import bbob_pproc.readalign as ra
class GroupByMedian:
def __call__(self, lst, **kwargs):
return np.median(lst, **kwargs)
def __str__(self):
return 'median'
def _style_thickline(xstyle):
style = { 'linestyle': 'solid', 'zorder': -1, 'linewidth': 6 }
style.update(xstyle)
return style
def _style_algorithm(name, i):
# Automatic colors are fine, no markers used
return { 'linestyle': 'dashed' }
def _style_oracle():
return _style_thickline({ 'color': '#DAFFE4' })
def _style_unifpf():
return _style_thickline({ 'color': '#D0E4FF' })
def _style_strategy(name, i):
if name.startswith('mUNIF'):
return _style_thickline({ 'color': 'wheat' })
styles = bb.genericsettings.line_styles
style = styles[i % len(styles)].copy()
del style['linestyle']
style['markersize'] = 12.
style['markeredgewidth'] = 1.5
style['markerfacecolor'] = 'None'
style['markeredgecolor'] = style['color']
style['linestyle'] = 'solid'
style['zorder'] = 1
style['linewidth'] = 2
return style
def _pds_plot_iterator(pds, dim, funcId):
|
def legend(obj, ncol=3, **kwargs):
"""
Show a legend. obj can be an Axes or Figure (in that case, also pass
handles and labels arguments).
"""
# Font size handling here is a bit weird. We specify fontsize=6
# in legend constructor since that affects spacing. However, we
# need to manually override with 'small' later, because the original
# specification did not take effect on whole-figure legends (and for
# actual text, 6 is a wee bit small). We get a specific cramped
# appearance and correct behavior for whole-figure legends this way.
l = obj.legend(ncol=ncol, fancybox=True, markerscale=0.66, fontsize=6, **kwargs)
plt.setp(l.get_texts(), fontsize='small')
def _fval_label(baseline_ds, baseline_label, groupby):
groupby = groupby.title()
if baseline_ds:
if baseline_label:
return groupby+' Function Values Regr. Rel. To ' + baseline_label
else:
return groupby+' Function Values (Rel. Regression)'
else:
return groupby+' Best Function Values'
def fval_by_budget(ax, pds, baseline_ds=None, baseline_label="", dim=None, funcId=None, groupby=None):
"""
Plot a classic "convergence plot" that shows how the function value
approaches optimum as time passes, in terms of raw performance.
groupby is the method of aggregating results of multiple instances --
a callable, stringable object, GroupByMedian by default.
By default, raw function values (as difference to optimum) are shown,
but relative values to some baseline dataset can be shown instead.
"""
if groupby is None: groupby = GroupByMedian()
pfsize = len(pds.algds.keys())
if baseline_ds:
baseline_budgets = baseline_ds.funvals[:, 0]
baseline_funvals = groupby(baseline_ds.funvals[:, 1:], axis=1)
baseline_safefunvals = np.maximum(baseline_funvals, 10**-8) # eschew zeros
# fvb is matrix with each row being [budget,funval]
baseline_fvb = np.transpose(np.vstack([baseline_budgets, baseline_safefunvals]))
for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId):
#print name, ds
budgets = ds.funvals[:, 0]
funvals = groupby(ds.funvals[:, 1:], axis=1)
# Throw away funvals after ftarget reached
try:
limit = np.nonzero(funvals < 10**-8)[0][0] + 1
except IndexError:
limit = np.size(budgets)+1
budgets = budgets[:limit]
funvals = funvals[:limit]
fvb = np.transpose(np.vstack([budgets[:limit], funvals[:limit]]))
if baseline_ds:
# Relativize by baseline
fvba = ra.alignArrayData(ra.VArrayMultiReader([fvb, baseline_fvb]))
budgets = fvba[:, 0]
funvals = fvba[:, 1] / fvba[:, 2]
style['markevery'] = 16
ax.loglog(budgets, funvals, label=name, basex=pfsize, **style)
if baseline_ds:
ax.set_yticks([1], minor=True)
ax.set_xlabel('Budget')
ax.set_ylabel(_fval_label(baseline_ds, baseline_label, str(groupby)))
ax.grid()
if baseline_ds:
ax.yaxis.grid(True, which = 'minor')
def rank_by_budget(ax, pds, dim=None, funcId=None, groupby=None):
"""
Plot each algorithm/method's rank evolving as budget increases.
groupby is the method of aggregating results of multiple instances --
a callable, stringable object, GroupByMedian by default.
Note that funcId may be an array of id numbers; in that case,
an average rank over listed functions is taken.
"""
if groupby is None: groupby = GroupByMedian()
pfsize = len(pds.algds.keys())
try: # funcId is array?
# _pds_plot_iterator[] uses funcId only for things we don't care for
fakeFuncId = funcId[0]
manyranking = np.array([pds.ranking((dim, i), groupby) for i in funcId])
rankcount = np.shape(manyranking[0])[1] - 1
amanyranking = ra.alignArrayData(ra.VArrayMultiReader(manyranking))
budget = amanyranking[:,0]
rankings = np.hsplit(amanyranking[:,1:], len(funcId))
avgranking = np.average(rankings, axis=0)
ranking = np.vstack([budget, avgranking.T]).T
except TypeError: # funcId is scalar
fakeFuncId = funcId
ranking = pds.ranking((dim, funcId), groupby)
i = 0
for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, fakeFuncId):
if kind != 'algorithm' and kind != 'strategy':
continue
#print name, ds
budgets = ranking[:,0]
ranks = ranking[:,1+i]
style['markevery'] = 64
ax.plot(budgets, ranks, label=name, **style)
i += 1
ax.set_xlabel('Budget')
ax.set_ylabel('Rank by '+str(groupby).title()+' Function Value')
ax.set_xscale('log', basex=pfsize)
ax.grid()
def _evals_label(baseline_ds, baseline_label, groupby):
groupby = groupby.title()
if baseline_ds:
if baseline_label:
return groupby+' Eval.# Slowdown Rel. to ' + baseline_label
else:
return groupby+' Relative Eval.# Slowdown'
else:
return groupby+' Absolute Eval.#'
def evals_by_target(ax, pds, baseline_ds=None, baseline_label="", dim=None, funcId=None, groupby=None):
"""
Plot a rotated convergence plot. It is essentially like fval_by_budget(),
but rotated by 90 degrees, showing how big budget is required to reach
every target.
While this is a little less intuitive at first, it allows better judgement
of performance impact of each strategy. With fval_by_budget(), performance
change is represented by a curve phase shift, while in evals_by_target(),
it simply translates position on the y axis.
groupby is the method of aggregating results of multiple instances --
a callable, stringable object, GroupByMedian by default.
By default, absolute evaluations count is shown, but relative values to
some baseline dataset can be shown instead.
"""
if groupby is None: groupby = GroupByMedian()
pfsize = len(pds.algds.keys())
runlengths = 10**np.linspace(0, np.log10(pds.maxevals((dim, funcId))), num=500)
target_values = pp.RunlengthBasedTargetValues(runlengths,
reference_data=pds.bestalg(None), force_different_targets_factor=10**0.004)
targets = target_values((funcId, dim))
if baseline_ds:
baseline_fevs = groupby(baseline_ds.detEvals(targets), axis=1)
for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId):
#print name, ds
fevs = groupby(ds.detEvals(targets), axis=1)
if baseline_ds:
fevs /= baseline_fevs
style['markevery'] = 64
ax.loglog(targets, fevs, label=name, basey=pfsize, **style)
ax.set_xlim(10**2, 10**(np.log10(targets[-1])-0.2))
if baseline_ds:
ax.set_yticks([2, 3.5], minor=True)
ax.set_xlabel('Function Value Targets')
ax.set_ylabel(_evals_label(baseline_ds, baseline_label, str(groupby)))
ax.grid()
if baseline_ds:
ax.yaxis.grid(True, which = 'minor')
def evals_by_evals(ax, pds, baseline1_ds=None, baseline1_label="", baseline2_ds=None, baseline2_label="", dim=None, funcId=None, groupby=None):
"""
Plot the evolution of relative #evaluations for a target based on
increasing absolute #evaluations. In other words, for each absolute
number of evaluations, determine the target reached and show how faster
did baseline reach it.
groupby is the method of aggregating results of multiple instances --
a callable, stringable object, GroupByMedian by default.
It's not clear whether this will eventually be useful at all, but it
offers another perspective that might aid some analysis.
"""
if groupby is None: groupby = GroupByMedian()
pfsize = len(pds.algds.keys())
runlengths = 10**np.linspace(0, np.log10(pds.maxevals((dim, funcId))), num=500)
target_values = pp.RunlengthBasedTargetValues(runlengths,
reference_data=pds.bestalg(None), force_different_targets_factor=10**0.004)
targets = target_values((funcId, dim))
if baseline1_ds:
baseline1_fevs = np.array(groupby(baseline1_ds.detEvals(targets), axis=1))
if baseline2_ds:
baseline2_fevs = np.array(groupby(baseline2_ds.detEvals(targets), axis=1))
for (kind, name, ds, style) in _pds_plot_iterator(pds, dim, funcId):
#print name, ds
fevs1 = groupby(ds.detEvals(targets), axis=1)
if baseline1_ds:
fevs1 /= baseline1_fevs
fevs2 = groupby(ds.detEvals(targets), axis=1)
if baseline2_ds:
fevs2 /= baseline2_fevs
infsx = np.nonzero(fevs1 == inf)
infs = infsx[0]
if np.size(infs) > 0:
#print infs
fevs1 = fevs1[:infs[0]-1]
fevs2 = fevs2[:infs[0]-1]
#print name, fevs1, fevs2
style['markevery'] = 64
ax.loglog(fevs2, fevs1, label=name, basex=pfsize, basey=pfsize, **style)
ax.grid()
ax.set_xlim(0, runlengths[-1] * pfsize) # i.e. log(runlengths) + 1
ax.set_ylabel('Per-target ' + _evals_label(baseline1_ds, baseline1_label, str(groupby)))
ax.set_xlabel('Per-target ' + _evals_label(baseline2_ds, baseline2_label, str(groupby)))
| """
An iterator that will in turn yield all drawable curves
in the form of (kind, name, ds, style) tuples (where kind
is one of 'algorithm', 'oracle', 'unifpf', 'strategy').
"""
i = 0
for (algname, ds) in pds.algds_dimfunc((dim, funcId)):
yield ('algorithm', algname, ds, _style_algorithm(algname, i))
i += 1
yield ('oracle', 'oracle', pds.oracle((dim, funcId)), _style_oracle())
yield ('unifpf', 'eUNIF', pds.unifpf().dictByDimFunc()[dim][funcId][0], _style_unifpf())
i = 0
for (stratname, ds) in pds.stratds_dimfunc((dim, funcId)):
yield ('strategy', stratname, ds, _style_strategy(stratname, i))
i += 1 | identifier_body |
parser.go | <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/transitional.dtd">
<html>
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<title>Source file /src/pkg/exp/datafmt/parser.go</title>
<link rel="stylesheet" type="text/css" href="../../../../doc/style.css">
<script type="text/javascript" src="../../../../doc/godocs.js"></script>
</head>
<body>
<script>
// Catch 'enter' key down events and trigger the search form submission.
function codesearchKeyDown(event) {
if (event.which == 13) {
var form = document.getElementById('codesearch');
var query = document.getElementById('codesearchQuery');
form.q.value = "lang:go package:go.googlecode.com " + query.value;
document.getElementById('codesearch').submit();
} return true;
}
// Capture the submission event and construct the query parameter.
function codeSearchSubmit() {
var query = document.getElementById('codesearchQuery');
var form = document.getElementById('codesearch');
form.q.value = "lang:go package:go.googlecode.com " + query.value;
return true;
} </script>
<div id="topnav">
<table summary="">
<tr>
<td id="headerImage">
<a href="../../../../index.html"><img src="../../../../doc/logo-153x55.png" height="55" width="153" alt="Go Home Page" style="border:0" /></a>
</td>
<td>
<div id="headerDocSetTitle">The Go Programming Language</div>
</td>
<td>
<!-- <table>
<tr>
<td>
<! The input box is outside of the form because we want to add
a couple of restricts to the query before submitting. If we just
add the restricts to the text box before submitting, then they
appear in the box when the user presses 'back'. Thus we use a
hidden field in the form. However, there's no way to stop the
non-hidden text box from also submitting a value unless we move
it outside of the form
<input type="search" id="codesearchQuery" value="" size="30" onkeydown="return codesearchKeyDown(event);"/>
<form method="GET" action="http://www.google.com/codesearch" id="codesearch" class="search" onsubmit="return codeSearchSubmit();" style="display:inline;">
<input type="hidden" name="q" value=""/>
<input type="submit" value="Code search" />
<span style="color: red">(TODO: remove for now?)</span>
</form>
</td>
</tr>
<tr>
<td>
<span style="color: gray;">(e.g. “pem” or “xml”)</span>
</td>
</tr>
</table> -->
</td>
</tr>
</table>
</div>
<div id="linkList">
<ul>
<li class="navhead"><a href="../../../../index.html">Home</a></li>
<li class="blank"> </li>
<li class="navhead">Documents</li>
<li><a href="../../../../doc/go_tutorial.html">Tutorial</a></li>
<li><a href="../../../../doc/effective_go.html">Effective Go</a></li>
<li><a href="../../../../doc/go_faq.html">FAQ</a></li>
<li><a href="../../../../doc/go_lang_faq.html">Language Design FAQ</a></li>
<li><a href="http://www.youtube.com/watch?v=rKnDgT73v8s">Tech talk (1 hour)</a> (<a href="../../../../doc/go_talk-20091030.pdf">PDF</a>)</li>
<li><a href="../../../../doc/go_spec.html">Language Specification</a></li>
<li><a href="../../../../doc/go_mem.html">Memory Model</a></li>
<li><a href="../../../../doc/go_for_cpp_programmers.html">Go for C++ Programmers</a></li>
<li class="blank"> </li>
<li class="navhead">How To</li>
<li><a href="../../../../doc/install.html">Install Go</a></li>
<li><a href="../../../../doc/contribute.html">Contribute code</a></li>
<li class="blank"> </li>
<li class="navhead">Programming</li>
<li><a href="../../../../cmd/index.html">Command documentation</a></li>
<li><a href="../../../../pkg/index.html">Package documentation</a></li>
<li><a href="../../../index.html">Source files</a></li>
<li class="blank"> </li>
<li class="navhead">Help</li>
<li>#go-nuts on irc.freenode.net</li>
<li><a href="http://groups.google.com/group/golang-nuts">Go Nuts mailing list</a></li>
<li><a href="http://code.google.com/p/go/issues/list">Issue tracker</a></li>
<li class="blank"> </li>
<li class="navhead">Go code search</li>
<form method="GET" action="http://golang.org/search" class="search">
<input type="search" name="q" value="" size="25" style="width:80%; max-width:200px" />
<input type="submit" value="Go" />
</form>
<li class="blank"> </li>
<li class="navhead">Last update</li>
<li>Thu Nov 12 15:48:37 PST 2009</li>
</ul>
</div>
<div id="content"> | Do not delete this <div>. -->
<div id="nav"></div>
<!-- Content is HTML-escaped elsewhere -->
<pre>
<a id="L1"></a><span class="comment">// Copyright 2009 The Go Authors. All rights reserved.</span>
<a id="L2"></a><span class="comment">// Use of this source code is governed by a BSD-style</span>
<a id="L3"></a><span class="comment">// license that can be found in the LICENSE file.</span>
<a id="L5"></a>package datafmt
<a id="L7"></a>import (
<a id="L8"></a>"container/vector";
<a id="L9"></a>"go/scanner";
<a id="L10"></a>"go/token";
<a id="L11"></a>"os";
<a id="L12"></a>"strconv";
<a id="L13"></a>"strings";
<a id="L14"></a>)
<a id="L16"></a><span class="comment">// ----------------------------------------------------------------------------</span>
<a id="L17"></a><span class="comment">// Parsing</span>
<a id="L19"></a>type parser struct {
<a id="L20"></a>scanner.ErrorVector;
<a id="L21"></a>scanner scanner.Scanner;
<a id="L22"></a>pos token.Position; <span class="comment">// token position</span>
<a id="L23"></a>tok token.Token; <span class="comment">// one token look-ahead</span>
<a id="L24"></a>lit []byte; <span class="comment">// token literal</span>
<a id="L26"></a>packs map[string]string; <span class="comment">// PackageName -> ImportPath</span>
<a id="L27"></a>rules map[string]expr; <span class="comment">// RuleName -> Expression</span>
<a id="L28"></a>}
<a id="L31"></a>func (p *parser) next() {
<a id="L32"></a>p.pos, p.tok, p.lit = p.scanner.Scan();
<a id="L33"></a>switch p.tok {
<a id="L34"></a>case token.CHAN, token.FUNC, token.INTERFACE, token.MAP, token.STRUCT:
<a id="L35"></a><span class="comment">// Go keywords for composite types are type names</span>
<a id="L36"></a><span class="comment">// returned by reflect. Accept them as identifiers.</span>
<a id="L37"></a>p.tok = token.IDENT <span class="comment">// p.lit is already set correctly</span>
<a id="L38"></a>}
<a id="L39"></a>}
<a id="L42"></a>func (p *parser) init(filename string, src []byte) {
<a id="L43"></a>p.ErrorVector.Init();
<a id="L44"></a>p.scanner.Init(filename, src, p, scanner.AllowIllegalChars); <span class="comment">// return '@' as token.ILLEGAL w/o error message</span>
<a id="L45"></a>p.next(); <span class="comment">// initializes pos, tok, lit</span>
<a id="L46"></a>p.packs = make(map[string]string);
<a id="L47"></a>p.rules = make(map[string]expr);
<a id="L48"></a>}
<a id="L51"></a>func (p *parser) errorExpected(pos token.Position, msg string) {
<a id="L52"></a>msg = "expected " + msg;
<a id="L53"></a>if pos.Offset == p.pos.Offset {
<a id="L54"></a><span class="comment">// the error happened at the current position;</span>
<a id="L55"></a><span class="comment">// make the error message more specific</span>
<a id="L56"></a>msg += ", found '" + p.tok.String() + "'";
<a id="L57"></a>if p.tok.IsLiteral() {
<a id="L58"></a>msg += " " + string(p.lit)
<a id="L59"></a>}
<a id="L60"></a>}
<a id="L61"></a>p.Error(pos, msg);
<a id="L62"></a>}
<a id="L65"></a>func (p *parser) expect(tok token.Token) token.Position {
<a id="L66"></a>pos := p.pos;
<a id="L67"></a>if p.tok != tok {
<a id="L68"></a>p.errorExpected(pos, "'"+tok.String()+"'")
<a id="L69"></a>}
<a id="L70"></a>p.next(); <span class="comment">// make progress in any case</span>
<a id="L71"></a>return pos;
<a id="L72"></a>}
<a id="L75"></a>func (p *parser) parseIdentifier() string {
<a id="L76"></a>name := string(p.lit);
<a id="L77"></a>p.expect(token.IDENT);
<a id="L78"></a>return name;
<a id="L79"></a>}
<a id="L82"></a>func (p *parser) parseTypeName() (string, bool) {
<a id="L83"></a>pos := p.pos;
<a id="L84"></a>name, isIdent := p.parseIdentifier(), true;
<a id="L85"></a>if p.tok == token.PERIOD {
<a id="L86"></a><span class="comment">// got a package name, lookup package</span>
<a id="L87"></a>if importPath, found := p.packs[name]; found {
<a id="L88"></a>name = importPath
<a id="L89"></a>} else {
<a id="L90"></a>p.Error(pos, "package not declared: "+name)
<a id="L91"></a>}
<a id="L92"></a>p.next();
<a id="L93"></a>name, isIdent = name+"."+p.parseIdentifier(), false;
<a id="L94"></a>}
<a id="L95"></a>return name, isIdent;
<a id="L96"></a>}
<a id="L99"></a><span class="comment">// Parses a rule name and returns it. If the rule name is</span>
<a id="L100"></a><span class="comment">// a package-qualified type name, the package name is resolved.</span>
<a id="L101"></a><span class="comment">// The 2nd result value is true iff the rule name consists of a</span>
<a id="L102"></a><span class="comment">// single identifier only (and thus could be a package name).</span>
<a id="L103"></a><span class="comment">//</span>
<a id="L104"></a>func (p *parser) parseRuleName() (string, bool) {
<a id="L105"></a>name, isIdent := "", false;
<a id="L106"></a>switch p.tok {
<a id="L107"></a>case token.IDENT:
<a id="L108"></a>name, isIdent = p.parseTypeName()
<a id="L109"></a>case token.DEFAULT:
<a id="L110"></a>name = "default";
<a id="L111"></a>p.next();
<a id="L112"></a>case token.QUO:
<a id="L113"></a>name = "/";
<a id="L114"></a>p.next();
<a id="L115"></a>default:
<a id="L116"></a>p.errorExpected(p.pos, "rule name");
<a id="L117"></a>p.next(); <span class="comment">// make progress in any case</span>
<a id="L118"></a>}
<a id="L119"></a>return name, isIdent;
<a id="L120"></a>}
<a id="L123"></a>func (p *parser) parseString() string {
<a id="L124"></a>s := "";
<a id="L125"></a>if p.tok == token.STRING {
<a id="L126"></a>s, _ = strconv.Unquote(string(p.lit));
<a id="L127"></a><span class="comment">// Unquote may fail with an error, but only if the scanner found</span>
<a id="L128"></a><span class="comment">// an illegal string in the first place. In this case the error</span>
<a id="L129"></a><span class="comment">// has already been reported.</span>
<a id="L130"></a>p.next();
<a id="L131"></a>return s;
<a id="L132"></a>} else {
<a id="L133"></a>p.expect(token.STRING)
<a id="L134"></a>}
<a id="L135"></a>return s;
<a id="L136"></a>}
<a id="L139"></a>func (p *parser) parseLiteral() literal {
<a id="L140"></a>s := strings.Bytes(p.parseString());
<a id="L142"></a><span class="comment">// A string literal may contain %-format specifiers. To simplify</span>
<a id="L143"></a><span class="comment">// and speed up printing of the literal, split it into segments</span>
<a id="L144"></a><span class="comment">// that start with "%" possibly followed by a last segment that</span>
<a id="L145"></a><span class="comment">// starts with some other character.</span>
<a id="L146"></a>var list vector.Vector;
<a id="L147"></a>list.Init(0);
<a id="L148"></a>i0 := 0;
<a id="L149"></a>for i := 0; i < len(s); i++ {
<a id="L150"></a>if s[i] == '%' && i+1 < len(s) {
<a id="L151"></a><span class="comment">// the next segment starts with a % format</span>
<a id="L152"></a>if i0 < i {
<a id="L153"></a><span class="comment">// the current segment is not empty, split it off</span>
<a id="L154"></a>list.Push(s[i0:i]);
<a id="L155"></a>i0 = i;
<a id="L156"></a>}
<a id="L157"></a>i++; <span class="comment">// skip %; let loop skip over char after %</span>
<a id="L158"></a>}
<a id="L159"></a>}
<a id="L160"></a><span class="comment">// the final segment may start with any character</span>
<a id="L161"></a><span class="comment">// (it is empty iff the string is empty)</span>
<a id="L162"></a>list.Push(s[i0:len(s)]);
<a id="L164"></a><span class="comment">// convert list into a literal</span>
<a id="L165"></a>lit := make(literal, list.Len());
<a id="L166"></a>for i := 0; i < list.Len(); i++ {
<a id="L167"></a>lit[i] = list.At(i).([]byte)
<a id="L168"></a>}
<a id="L170"></a>return lit;
<a id="L171"></a>}
<a id="L174"></a>func (p *parser) parseField() expr {
<a id="L175"></a>var fname string;
<a id="L176"></a>switch p.tok {
<a id="L177"></a>case token.ILLEGAL:
<a id="L178"></a>if string(p.lit) != "@" {
<a id="L179"></a>return nil
<a id="L180"></a>}
<a id="L181"></a>fname = "@";
<a id="L182"></a>p.next();
<a id="L183"></a>case token.MUL:
<a id="L184"></a>fname = "*";
<a id="L185"></a>p.next();
<a id="L186"></a>case token.IDENT:
<a id="L187"></a>fname = p.parseIdentifier()
<a id="L188"></a>default:
<a id="L189"></a>return nil
<a id="L190"></a>}
<a id="L192"></a>var ruleName string;
<a id="L193"></a>if p.tok == token.COLON {
<a id="L194"></a>p.next();
<a id="L195"></a>ruleName, _ = p.parseRuleName();
<a id="L196"></a>}
<a id="L198"></a>return &field{fname, ruleName};
<a id="L199"></a>}
<a id="L202"></a>func (p *parser) parseOperand() (x expr) {
<a id="L203"></a>switch p.tok {
<a id="L204"></a>case token.STRING:
<a id="L205"></a>x = p.parseLiteral()
<a id="L207"></a>case token.LPAREN:
<a id="L208"></a>p.next();
<a id="L209"></a>x = p.parseExpression();
<a id="L210"></a>if p.tok == token.SHR {
<a id="L211"></a>p.next();
<a id="L212"></a>x = &group{x, p.parseExpression()};
<a id="L213"></a>}
<a id="L214"></a>p.expect(token.RPAREN);
<a id="L216"></a>case token.LBRACK:
<a id="L217"></a>p.next();
<a id="L218"></a>x = &option{p.parseExpression()};
<a id="L219"></a>p.expect(token.RBRACK);
<a id="L221"></a>case token.LBRACE:
<a id="L222"></a>p.next();
<a id="L223"></a>x = p.parseExpression();
<a id="L224"></a>var div expr;
<a id="L225"></a>if p.tok == token.QUO {
<a id="L226"></a>p.next();
<a id="L227"></a>div = p.parseExpression();
<a id="L228"></a>}
<a id="L229"></a>x = &repetition{x, div};
<a id="L230"></a>p.expect(token.RBRACE);
<a id="L232"></a>default:
<a id="L233"></a>x = p.parseField() <span class="comment">// may be nil</span>
<a id="L234"></a>}
<a id="L236"></a>return x;
<a id="L237"></a>}
<a id="L240"></a>func (p *parser) parseSequence() expr {
<a id="L241"></a>var list vector.Vector;
<a id="L242"></a>list.Init(0);
<a id="L244"></a>for x := p.parseOperand(); x != nil; x = p.parseOperand() {
<a id="L245"></a>list.Push(x)
<a id="L246"></a>}
<a id="L248"></a><span class="comment">// no need for a sequence if list.Len() < 2</span>
<a id="L249"></a>switch list.Len() {
<a id="L250"></a>case 0:
<a id="L251"></a>return nil
<a id="L252"></a>case 1:
<a id="L253"></a>return list.At(0).(expr)
<a id="L254"></a>}
<a id="L256"></a><span class="comment">// convert list into a sequence</span>
<a id="L257"></a>seq := make(sequence, list.Len());
<a id="L258"></a>for i := 0; i < list.Len(); i++ {
<a id="L259"></a>seq[i] = list.At(i).(expr)
<a id="L260"></a>}
<a id="L261"></a>return seq;
<a id="L262"></a>}
<a id="L265"></a>func (p *parser) parseExpression() expr {
<a id="L266"></a>var list vector.Vector;
<a id="L267"></a>list.Init(0);
<a id="L269"></a>for {
<a id="L270"></a>x := p.parseSequence();
<a id="L271"></a>if x != nil {
<a id="L272"></a>list.Push(x)
<a id="L273"></a>}
<a id="L274"></a>if p.tok != token.OR {
<a id="L275"></a>break
<a id="L276"></a>}
<a id="L277"></a>p.next();
<a id="L278"></a>}
<a id="L280"></a><span class="comment">// no need for an alternatives if list.Len() < 2</span>
<a id="L281"></a>switch list.Len() {
<a id="L282"></a>case 0:
<a id="L283"></a>return nil
<a id="L284"></a>case 1:
<a id="L285"></a>return list.At(0).(expr)
<a id="L286"></a>}
<a id="L288"></a><span class="comment">// convert list into a alternatives</span>
<a id="L289"></a>alt := make(alternatives, list.Len());
<a id="L290"></a>for i := 0; i < list.Len(); i++ {
<a id="L291"></a>alt[i] = list.At(i).(expr)
<a id="L292"></a>}
<a id="L293"></a>return alt;
<a id="L294"></a>}
<a id="L297"></a>func (p *parser) parseFormat() {
<a id="L298"></a>for p.tok != token.EOF {
<a id="L299"></a>pos := p.pos;
<a id="L301"></a>name, isIdent := p.parseRuleName();
<a id="L302"></a>switch p.tok {
<a id="L303"></a>case token.STRING:
<a id="L304"></a><span class="comment">// package declaration</span>
<a id="L305"></a>importPath := p.parseString();
<a id="L307"></a><span class="comment">// add package declaration</span>
<a id="L308"></a>if !isIdent {
<a id="L309"></a>p.Error(pos, "illegal package name: "+name)
<a id="L310"></a>} else if _, found := p.packs[name]; !found {
<a id="L311"></a>p.packs[name] = importPath
<a id="L312"></a>} else {
<a id="L313"></a>p.Error(pos, "package already declared: "+name)
<a id="L314"></a>}
<a id="L316"></a>case token.ASSIGN:
<a id="L317"></a><span class="comment">// format rule</span>
<a id="L318"></a>p.next();
<a id="L319"></a>x := p.parseExpression();
<a id="L321"></a><span class="comment">// add rule</span>
<a id="L322"></a>if _, found := p.rules[name]; !found {
<a id="L323"></a>p.rules[name] = x
<a id="L324"></a>} else {
<a id="L325"></a>p.Error(pos, "format rule already declared: "+name)
<a id="L326"></a>}
<a id="L328"></a>default:
<a id="L329"></a>p.errorExpected(p.pos, "package declaration or format rule");
<a id="L330"></a>p.next(); <span class="comment">// make progress in any case</span>
<a id="L331"></a>}
<a id="L333"></a>if p.tok == token.SEMICOLON {
<a id="L334"></a>p.next()
<a id="L335"></a>} else {
<a id="L336"></a>break
<a id="L337"></a>}
<a id="L338"></a>}
<a id="L339"></a>p.expect(token.EOF);
<a id="L340"></a>}
<a id="L343"></a>func remap(p *parser, name string) string {
<a id="L344"></a>i := strings.Index(name, ".");
<a id="L345"></a>if i >= 0 {
<a id="L346"></a>packageName, suffix := name[0:i], name[i:len(name)];
<a id="L347"></a><span class="comment">// lookup package</span>
<a id="L348"></a>if importPath, found := p.packs[packageName]; found {
<a id="L349"></a>name = importPath + suffix
<a id="L350"></a>} else {
<a id="L351"></a>var invalidPos token.Position;
<a id="L352"></a>p.Error(invalidPos, "package not declared: "+packageName);
<a id="L353"></a>}
<a id="L354"></a>}
<a id="L355"></a>return name;
<a id="L356"></a>}
<a id="L359"></a><span class="comment">// Parse parses a set of format productions from source src. Custom</span>
<a id="L360"></a><span class="comment">// formatters may be provided via a map of formatter functions. If</span>
<a id="L361"></a><span class="comment">// there are no errors, the result is a Format and the error is nil.</span>
<a id="L362"></a><span class="comment">// Otherwise the format is nil and a non-empty ErrorList is returned.</span>
<a id="L363"></a><span class="comment">//</span>
<a id="L364"></a>func Parse(filename string, src []byte, fmap FormatterMap) (Format, os.Error) {
<a id="L365"></a><span class="comment">// parse source</span>
<a id="L366"></a>var p parser;
<a id="L367"></a>p.init(filename, src);
<a id="L368"></a>p.parseFormat();
<a id="L370"></a><span class="comment">// add custom formatters, if any</span>
<a id="L371"></a>for name, form := range fmap {
<a id="L372"></a>name = remap(&p, name);
<a id="L373"></a>if _, found := p.rules[name]; !found {
<a id="L374"></a>p.rules[name] = &custom{name, form}
<a id="L375"></a>} else {
<a id="L376"></a>var invalidPos token.Position;
<a id="L377"></a>p.Error(invalidPos, "formatter already declared: "+name);
<a id="L378"></a>}
<a id="L379"></a>}
<a id="L381"></a>return p.rules, p.GetError(scanner.NoMultiples);
<a id="L382"></a>}
</pre>
</div>
<div id="footer">
<p>Except as noted, this content is
licensed under <a href="http://creativecommons.org/licenses/by/3.0/">
Creative Commons Attribution 3.0</a>.
</div>
<script type="text/javascript">
var gaJsHost = (("https:" == document.location.protocol) ? "https://ssl." : "http://www.");
document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.js' type='text/javascript'%3E%3C/script%3E"));
</script>
<script type="text/javascript">
var pageTracker = _gat._getTracker("UA-11222381-2");
pageTracker._trackPageview();
</script>
</body>
</html>
<!-- generated at Thu Nov 12 15:42:51 PST 2009 --> | <h1 id="generatedHeader">Source file /src/pkg/exp/datafmt/parser.go</h1>
<!-- The Table of Contents is automatically inserted in this <div>. | random_line_split |
capture_agents.py | """Interfaces for capture agents.
Champlain College CSI-480, Fall 2018
The following code was adapted by Joshua Auerbach (jauerbach@champlain.edu)
from the UC Berkeley Pacman Projects (see license and attribution below).
----------------------
Licensing Information: You are free to use or extend these projects for
educational purposes provided that (1) you do not distribute or publish
solutions, (2) you retain this notice, and (3) you provide clear
attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
Attribution Information: The Pacman AI projects were developed at UC Berkeley.
The core projects and autograders were primarily created by John DeNero
(denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
Student side autograding was added by Brad Miller, Nick Hay, and
Pieter Abbeel (pabbeel@cs.berkeley.edu).
"""
from game import Agent
import distance_calculator
from util import nearest_point
import util
import random
import time
class RandomAgent(Agent):
"""A random agent that abides by the rules."""
def __init__(self, index):
"""Initialize agent with given index."""
self.index = index
def get_action(self, state):
"""Return a random legal action."""
return random.choice(state.get_legal_actions(self.index))
class CaptureAgent(Agent):
"""A base class for capture agents.
The convenience methods herein handle some of the complications of a
two-team game.
Recommended Usage: Subclass CaptureAgent and override choose_action.
"""
#############################
# Methods to store key info #
#############################
def | (self, index, time_for_computing=.1):
"""Initialize capture agent with several variables you can query.
self.index = index for this agent
self.red = true if you're on the red team, false otherwise
self.agents_on_team = a list of agent objects that make up your team
self.distancer = distance calculator (contest code provides this)
self.observation_history = list of GameState objects that correspond
to the sequential order of states that have
occurred so far this game
self.time_for_computing = an amount of time to give each turn for
computing maze distances
(part of the provided distance calculator)
"""
# Agent index for querying state
self.index = index
# Whether or not you're on the red team
self.red = None
# Agent objects controlling you and your teammates
self.agents_on_team = None
# Maze distance calculator
self.distancer = None
# A history of observations
self.observation_history = []
# Time to spend each turn on computing maze distances
self.time_for_computing = time_for_computing
# Access to the graphics
self.display = None
def register_initial_state(self, game_state):
"""Handle the initial setup of the agent.
Populate useful fields (such as what team we're on).
A distance_calculator instance caches the maze distances
between each pair of positions, so your agents can use:
self.distancer.get_distance(p1, p2)
"""
self.red = game_state.is_on_red_team(self.index)
self.distancer = distance_calculator.Distancer(game_state.data.layout)
# comment this out to forgo maze distance computation and
# use manhattan distances
self.distancer.get_maze_distances()
import __main__
if '_display' in dir(__main__):
self.display = __main__._display
def final(self, game_state):
"""Finalize game."""
self.observation_history = []
def register_team(self, agents_on_team):
"""Fill the self.agents_on_team field.
Will be filled with a list of the indices of the agents on your team.
"""
self.agents_on_team = agents_on_team
def observation_function(self, game_state):
"""Make an observation about the game_state."""
return game_state.make_observation(self.index)
def debug_draw(self, cells, color, clear=False):
"""Draw debug information."""
if self.display:
from capture_graphics_display import PacmanGraphics
if isinstance(self.display, PacmanGraphics):
if not type(cells) is list:
cells = [cells]
self.display.debug_draw(cells, color, clear)
def debug_clear(self):
"""Clear out debug information."""
if self.display:
from capture_graphics_display import PacmanGraphics
if isinstance(self.display, PacmanGraphics):
self.display.clear_debug()
#################
# Action Choice #
#################
def get_action(self, game_state):
"""Call choose_action on a grid position; continues on half positions.
If you subclass CaptureAgent, you shouldn't need to override this
method. It takes care of appending the current game_state on to your
observation history (so you have a record of the game states of the
game) and will call your choose action method if you're in a state
(rather than halfway through your last move - this occurs because
Pacman agents move half as quickly as ghost agents).
"""
self.observation_history.append(game_state)
my_state = game_state.get_agent_state(self.index)
my_pos = my_state.get_position()
if my_pos != nearest_point(my_pos):
# We're halfway from one position to the next
return game_state.get_legal_actions(self.index)[0]
else:
return self.choose_action(game_state)
def choose_action(self, game_state):
"""Override this method to make a good agent.
It should return a legal action within the time limit (otherwise a
random legal action will be chosen for you).
"""
util.raise_not_defined()
#######################
# Convenience Methods #
#######################
def get_food(self, game_state):
"""Return the food you're meant to eat.
This is in the form of a matrix
where m[x][y]=true if there is food you can eat (based on your team)
in that square.
"""
if self.red:
return game_state.get_blue_food()
else:
return game_state.get_red_food()
def get_food_you_are_defending(self, game_state):
"""Return the food you're meant to protect.
i.e., the food that your opponent is supposed to eat.
This is in the form of a matrix where m[x][y]=true if
there is food at (x,y) that your opponent can eat.
"""
if self.red:
return game_state.get_red_food()
else:
return game_state.get_blue_food()
def get_capsules(self, game_state):
"""Return the capsule you're meant to eat."""
if self.red:
return game_state.get_blue_capsules()
else:
return game_state.get_red_capsules()
def get_capsules_you_are_defending(self, game_state):
"""Return the capsule you're meant to protect."""
if self.red:
return game_state.get_red_capsules()
else:
return game_state.get_blue_capsules()
def get_opponents(self, game_state):
"""Return agent indices of your opponents.
This is the list of the numbers of the agents
(e.g., red might be "1,3,5")
"""
if self.red:
return game_state.get_blue_team_indices()
else:
return game_state.get_red_team_indices()
def get_team(self, game_state):
"""Return agent indices of your team.
This is the list of the numbers of the agents
(e.g., red might be the list of 1,3,5)
"""
if self.red:
return game_state.get_red_team_indices()
else:
return game_state.get_blue_team_indices()
def get_score(self, game_state):
"""Return how much you are beating the other team by.
This is in the form of a number that is the difference between your
score and the opponents score. This number is negative if you're
losing.
"""
if self.red:
return game_state.get_score()
else:
return game_state.get_score() * -1
def get_maze_distance(self, pos1, pos2):
"""Return the distance between two points.
These are calculated using the provided distancer object.
If distancer.get_maze_distances() has been called, then maze distances
are available.
Otherwise, this just returns Manhattan distance.
"""
d = self.distancer.get_distance(pos1, pos2)
return d
def get_previous_observation(self):
"""Return GameState object of the last state this agent saw.
(the observed state of the game last time this agent moved -
this may not include all of your opponent's agent locations exactly).
"""
if len(self.observation_history) == 1:
return None
else:
return self.observation_history[-2]
def get_current_observation(self):
"""Return the GameState object of this agent's current observation.
(the observed state of the game - this may not include
all of your opponent's agent locations exactly).
"""
return self.observation_history[-1]
def display_distributions_over_positions(self, distributions):
"""Overlays a distribution over positions onto the pacman board.
This represents an agent's beliefs about the positions of each agent.
The arg distributions is a tuple or list of util.Counter objects,
where the i'th Counter has keys that are board positions (x,y) and
values that encode the probability that agent i is at (x,y).
If some elements are None, then they will be ignored.
If a Counter is passed to this function, it will be displayed.
This is helpful for figuring out if your agent is doing
inference correctly, and does not affect gameplay.
"""
dists = []
for dist in distributions:
if dist is not None:
if not isinstance(dist, util.Counter):
raise Exception("Wrong type of distribution")
dists.append(dist)
else:
dists.append(util.Counter())
if ((self.display is not None and
'update_distributions' in dir(self.display))):
self.display.update_distributions(dists)
else:
self._distributions = dists # These can be read by pacclient.py
class TimeoutAgent(Agent):
"""A random agent that takes too much time.
Taking too much time results in penalties and random moves.
"""
def __init__(self, index):
"""Initialize agent with given index."""
self.index = index
def get_action(self, state):
"""Take too much time getting action."""
time.sleep(2.0)
return random.choice(state.get_legal_actions(self.index))
| __init__ | identifier_name |
capture_agents.py | """Interfaces for capture agents.
Champlain College CSI-480, Fall 2018
The following code was adapted by Joshua Auerbach (jauerbach@champlain.edu)
from the UC Berkeley Pacman Projects (see license and attribution below).
----------------------
Licensing Information: You are free to use or extend these projects for
educational purposes provided that (1) you do not distribute or publish
solutions, (2) you retain this notice, and (3) you provide clear
attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
Attribution Information: The Pacman AI projects were developed at UC Berkeley.
The core projects and autograders were primarily created by John DeNero
(denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
Student side autograding was added by Brad Miller, Nick Hay, and
Pieter Abbeel (pabbeel@cs.berkeley.edu).
"""
from game import Agent
import distance_calculator
from util import nearest_point
import util
import random
import time
class RandomAgent(Agent):
"""A random agent that abides by the rules."""
def __init__(self, index):
"""Initialize agent with given index."""
self.index = index
def get_action(self, state):
"""Return a random legal action."""
return random.choice(state.get_legal_actions(self.index))
class CaptureAgent(Agent):
"""A base class for capture agents.
The convenience methods herein handle some of the complications of a
two-team game.
Recommended Usage: Subclass CaptureAgent and override choose_action.
"""
#############################
# Methods to store key info #
#############################
def __init__(self, index, time_for_computing=.1):
"""Initialize capture agent with several variables you can query.
self.index = index for this agent
self.red = true if you're on the red team, false otherwise
self.agents_on_team = a list of agent objects that make up your team
self.distancer = distance calculator (contest code provides this)
self.observation_history = list of GameState objects that correspond
to the sequential order of states that have
occurred so far this game
self.time_for_computing = an amount of time to give each turn for
computing maze distances
(part of the provided distance calculator)
"""
# Agent index for querying state
self.index = index
# Whether or not you're on the red team
self.red = None
# Agent objects controlling you and your teammates
self.agents_on_team = None
# Maze distance calculator
self.distancer = None
# A history of observations
self.observation_history = []
# Time to spend each turn on computing maze distances
self.time_for_computing = time_for_computing
# Access to the graphics
self.display = None
def register_initial_state(self, game_state):
"""Handle the initial setup of the agent.
Populate useful fields (such as what team we're on).
A distance_calculator instance caches the maze distances
between each pair of positions, so your agents can use:
self.distancer.get_distance(p1, p2)
"""
self.red = game_state.is_on_red_team(self.index)
self.distancer = distance_calculator.Distancer(game_state.data.layout)
# comment this out to forgo maze distance computation and
# use manhattan distances
self.distancer.get_maze_distances()
import __main__
if '_display' in dir(__main__):
self.display = __main__._display
def final(self, game_state):
"""Finalize game."""
self.observation_history = []
def register_team(self, agents_on_team):
"""Fill the self.agents_on_team field.
Will be filled with a list of the indices of the agents on your team.
"""
self.agents_on_team = agents_on_team
def observation_function(self, game_state):
"""Make an observation about the game_state."""
return game_state.make_observation(self.index)
def debug_draw(self, cells, color, clear=False):
"""Draw debug information."""
if self.display:
from capture_graphics_display import PacmanGraphics
if isinstance(self.display, PacmanGraphics):
if not type(cells) is list:
cells = [cells]
self.display.debug_draw(cells, color, clear)
def debug_clear(self):
"""Clear out debug information."""
if self.display:
from capture_graphics_display import PacmanGraphics
if isinstance(self.display, PacmanGraphics):
self.display.clear_debug()
#################
# Action Choice #
#################
def get_action(self, game_state):
"""Call choose_action on a grid position; continues on half positions.
If you subclass CaptureAgent, you shouldn't need to override this
method. It takes care of appending the current game_state on to your
observation history (so you have a record of the game states of the
game) and will call your choose action method if you're in a state
(rather than halfway through your last move - this occurs because
Pacman agents move half as quickly as ghost agents).
"""
self.observation_history.append(game_state)
my_state = game_state.get_agent_state(self.index)
my_pos = my_state.get_position()
if my_pos != nearest_point(my_pos):
# We're halfway from one position to the next
return game_state.get_legal_actions(self.index)[0]
else:
return self.choose_action(game_state)
def choose_action(self, game_state):
"""Override this method to make a good agent.
It should return a legal action within the time limit (otherwise a
random legal action will be chosen for you).
"""
util.raise_not_defined()
#######################
# Convenience Methods #
#######################
def get_food(self, game_state):
"""Return the food you're meant to eat.
This is in the form of a matrix
where m[x][y]=true if there is food you can eat (based on your team)
in that square.
"""
if self.red:
return game_state.get_blue_food()
else:
return game_state.get_red_food()
def get_food_you_are_defending(self, game_state):
"""Return the food you're meant to protect.
i.e., the food that your opponent is supposed to eat.
This is in the form of a matrix where m[x][y]=true if
there is food at (x,y) that your opponent can eat.
"""
if self.red:
return game_state.get_red_food()
else:
return game_state.get_blue_food()
def get_capsules(self, game_state):
"""Return the capsule you're meant to eat."""
if self.red:
return game_state.get_blue_capsules()
else:
return game_state.get_red_capsules()
def get_capsules_you_are_defending(self, game_state):
"""Return the capsule you're meant to protect."""
if self.red:
return game_state.get_red_capsules()
else:
return game_state.get_blue_capsules()
def get_opponents(self, game_state):
"""Return agent indices of your opponents.
This is the list of the numbers of the agents
(e.g., red might be "1,3,5")
"""
if self.red:
return game_state.get_blue_team_indices()
else:
return game_state.get_red_team_indices()
def get_team(self, game_state):
"""Return agent indices of your team.
This is the list of the numbers of the agents
(e.g., red might be the list of 1,3,5)
"""
if self.red:
return game_state.get_red_team_indices()
else:
return game_state.get_blue_team_indices()
| score and the opponents score. This number is negative if you're
losing.
"""
if self.red:
return game_state.get_score()
else:
return game_state.get_score() * -1
def get_maze_distance(self, pos1, pos2):
"""Return the distance between two points.
These are calculated using the provided distancer object.
If distancer.get_maze_distances() has been called, then maze distances
are available.
Otherwise, this just returns Manhattan distance.
"""
d = self.distancer.get_distance(pos1, pos2)
return d
def get_previous_observation(self):
"""Return GameState object of the last state this agent saw.
(the observed state of the game last time this agent moved -
this may not include all of your opponent's agent locations exactly).
"""
if len(self.observation_history) == 1:
return None
else:
return self.observation_history[-2]
def get_current_observation(self):
"""Return the GameState object of this agent's current observation.
(the observed state of the game - this may not include
all of your opponent's agent locations exactly).
"""
return self.observation_history[-1]
def display_distributions_over_positions(self, distributions):
"""Overlays a distribution over positions onto the pacman board.
This represents an agent's beliefs about the positions of each agent.
The arg distributions is a tuple or list of util.Counter objects,
where the i'th Counter has keys that are board positions (x,y) and
values that encode the probability that agent i is at (x,y).
If some elements are None, then they will be ignored.
If a Counter is passed to this function, it will be displayed.
This is helpful for figuring out if your agent is doing
inference correctly, and does not affect gameplay.
"""
dists = []
for dist in distributions:
if dist is not None:
if not isinstance(dist, util.Counter):
raise Exception("Wrong type of distribution")
dists.append(dist)
else:
dists.append(util.Counter())
if ((self.display is not None and
'update_distributions' in dir(self.display))):
self.display.update_distributions(dists)
else:
self._distributions = dists # These can be read by pacclient.py
class TimeoutAgent(Agent):
"""A random agent that takes too much time.
Taking too much time results in penalties and random moves.
"""
def __init__(self, index):
"""Initialize agent with given index."""
self.index = index
def get_action(self, state):
"""Take too much time getting action."""
time.sleep(2.0)
return random.choice(state.get_legal_actions(self.index)) | def get_score(self, game_state):
"""Return how much you are beating the other team by.
This is in the form of a number that is the difference between your | random_line_split |
capture_agents.py | """Interfaces for capture agents.
Champlain College CSI-480, Fall 2018
The following code was adapted by Joshua Auerbach (jauerbach@champlain.edu)
from the UC Berkeley Pacman Projects (see license and attribution below).
----------------------
Licensing Information: You are free to use or extend these projects for
educational purposes provided that (1) you do not distribute or publish
solutions, (2) you retain this notice, and (3) you provide clear
attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
Attribution Information: The Pacman AI projects were developed at UC Berkeley.
The core projects and autograders were primarily created by John DeNero
(denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
Student side autograding was added by Brad Miller, Nick Hay, and
Pieter Abbeel (pabbeel@cs.berkeley.edu).
"""
from game import Agent
import distance_calculator
from util import nearest_point
import util
import random
import time
class RandomAgent(Agent):
"""A random agent that abides by the rules."""
def __init__(self, index):
"""Initialize agent with given index."""
self.index = index
def get_action(self, state):
"""Return a random legal action."""
return random.choice(state.get_legal_actions(self.index))
class CaptureAgent(Agent):
"""A base class for capture agents.
The convenience methods herein handle some of the complications of a
two-team game.
Recommended Usage: Subclass CaptureAgent and override choose_action.
"""
#############################
# Methods to store key info #
#############################
def __init__(self, index, time_for_computing=.1):
"""Initialize capture agent with several variables you can query.
self.index = index for this agent
self.red = true if you're on the red team, false otherwise
self.agents_on_team = a list of agent objects that make up your team
self.distancer = distance calculator (contest code provides this)
self.observation_history = list of GameState objects that correspond
to the sequential order of states that have
occurred so far this game
self.time_for_computing = an amount of time to give each turn for
computing maze distances
(part of the provided distance calculator)
"""
# Agent index for querying state
self.index = index
# Whether or not you're on the red team
self.red = None
# Agent objects controlling you and your teammates
self.agents_on_team = None
# Maze distance calculator
self.distancer = None
# A history of observations
self.observation_history = []
# Time to spend each turn on computing maze distances
self.time_for_computing = time_for_computing
# Access to the graphics
self.display = None
def register_initial_state(self, game_state):
"""Handle the initial setup of the agent.
Populate useful fields (such as what team we're on).
A distance_calculator instance caches the maze distances
between each pair of positions, so your agents can use:
self.distancer.get_distance(p1, p2)
"""
self.red = game_state.is_on_red_team(self.index)
self.distancer = distance_calculator.Distancer(game_state.data.layout)
# comment this out to forgo maze distance computation and
# use manhattan distances
self.distancer.get_maze_distances()
import __main__
if '_display' in dir(__main__):
self.display = __main__._display
def final(self, game_state):
"""Finalize game."""
self.observation_history = []
def register_team(self, agents_on_team):
"""Fill the self.agents_on_team field.
Will be filled with a list of the indices of the agents on your team.
"""
self.agents_on_team = agents_on_team
def observation_function(self, game_state):
"""Make an observation about the game_state."""
return game_state.make_observation(self.index)
def debug_draw(self, cells, color, clear=False):
"""Draw debug information."""
if self.display:
from capture_graphics_display import PacmanGraphics
if isinstance(self.display, PacmanGraphics):
if not type(cells) is list:
cells = [cells]
self.display.debug_draw(cells, color, clear)
def debug_clear(self):
"""Clear out debug information."""
if self.display:
from capture_graphics_display import PacmanGraphics
if isinstance(self.display, PacmanGraphics):
self.display.clear_debug()
#################
# Action Choice #
#################
def get_action(self, game_state):
"""Call choose_action on a grid position; continues on half positions.
If you subclass CaptureAgent, you shouldn't need to override this
method. It takes care of appending the current game_state on to your
observation history (so you have a record of the game states of the
game) and will call your choose action method if you're in a state
(rather than halfway through your last move - this occurs because
Pacman agents move half as quickly as ghost agents).
"""
self.observation_history.append(game_state)
my_state = game_state.get_agent_state(self.index)
my_pos = my_state.get_position()
if my_pos != nearest_point(my_pos):
# We're halfway from one position to the next
return game_state.get_legal_actions(self.index)[0]
else:
return self.choose_action(game_state)
def choose_action(self, game_state):
"""Override this method to make a good agent.
It should return a legal action within the time limit (otherwise a
random legal action will be chosen for you).
"""
util.raise_not_defined()
#######################
# Convenience Methods #
#######################
def get_food(self, game_state):
"""Return the food you're meant to eat.
This is in the form of a matrix
where m[x][y]=true if there is food you can eat (based on your team)
in that square.
"""
if self.red:
return game_state.get_blue_food()
else:
return game_state.get_red_food()
def get_food_you_are_defending(self, game_state):
"""Return the food you're meant to protect.
i.e., the food that your opponent is supposed to eat.
This is in the form of a matrix where m[x][y]=true if
there is food at (x,y) that your opponent can eat.
"""
if self.red:
return game_state.get_red_food()
else:
return game_state.get_blue_food()
def get_capsules(self, game_state):
"""Return the capsule you're meant to eat."""
if self.red:
return game_state.get_blue_capsules()
else:
return game_state.get_red_capsules()
def get_capsules_you_are_defending(self, game_state):
"""Return the capsule you're meant to protect."""
if self.red:
return game_state.get_red_capsules()
else:
return game_state.get_blue_capsules()
def get_opponents(self, game_state):
"""Return agent indices of your opponents.
This is the list of the numbers of the agents
(e.g., red might be "1,3,5")
"""
if self.red:
return game_state.get_blue_team_indices()
else:
return game_state.get_red_team_indices()
def get_team(self, game_state):
"""Return agent indices of your team.
This is the list of the numbers of the agents
(e.g., red might be the list of 1,3,5)
"""
if self.red:
return game_state.get_red_team_indices()
else:
return game_state.get_blue_team_indices()
def get_score(self, game_state):
"""Return how much you are beating the other team by.
This is in the form of a number that is the difference between your
score and the opponents score. This number is negative if you're
losing.
"""
if self.red:
return game_state.get_score()
else:
return game_state.get_score() * -1
def get_maze_distance(self, pos1, pos2):
"""Return the distance between two points.
These are calculated using the provided distancer object.
If distancer.get_maze_distances() has been called, then maze distances
are available.
Otherwise, this just returns Manhattan distance.
"""
d = self.distancer.get_distance(pos1, pos2)
return d
def get_previous_observation(self):
"""Return GameState object of the last state this agent saw.
(the observed state of the game last time this agent moved -
this may not include all of your opponent's agent locations exactly).
"""
if len(self.observation_history) == 1:
return None
else:
return self.observation_history[-2]
def get_current_observation(self):
"""Return the GameState object of this agent's current observation.
(the observed state of the game - this may not include
all of your opponent's agent locations exactly).
"""
return self.observation_history[-1]
def display_distributions_over_positions(self, distributions):
"""Overlays a distribution over positions onto the pacman board.
This represents an agent's beliefs about the positions of each agent.
The arg distributions is a tuple or list of util.Counter objects,
where the i'th Counter has keys that are board positions (x,y) and
values that encode the probability that agent i is at (x,y).
If some elements are None, then they will be ignored.
If a Counter is passed to this function, it will be displayed.
This is helpful for figuring out if your agent is doing
inference correctly, and does not affect gameplay.
"""
dists = []
for dist in distributions:
if dist is not None:
if not isinstance(dist, util.Counter):
raise Exception("Wrong type of distribution")
dists.append(dist)
else:
dists.append(util.Counter())
if ((self.display is not None and
'update_distributions' in dir(self.display))):
self.display.update_distributions(dists)
else:
self._distributions = dists # These can be read by pacclient.py
class TimeoutAgent(Agent):
"""A random agent that takes too much time.
Taking too much time results in penalties and random moves.
"""
def __init__(self, index):
|
def get_action(self, state):
"""Take too much time getting action."""
time.sleep(2.0)
return random.choice(state.get_legal_actions(self.index))
| """Initialize agent with given index."""
self.index = index | identifier_body |
capture_agents.py | """Interfaces for capture agents.
Champlain College CSI-480, Fall 2018
The following code was adapted by Joshua Auerbach (jauerbach@champlain.edu)
from the UC Berkeley Pacman Projects (see license and attribution below).
----------------------
Licensing Information: You are free to use or extend these projects for
educational purposes provided that (1) you do not distribute or publish
solutions, (2) you retain this notice, and (3) you provide clear
attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
Attribution Information: The Pacman AI projects were developed at UC Berkeley.
The core projects and autograders were primarily created by John DeNero
(denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
Student side autograding was added by Brad Miller, Nick Hay, and
Pieter Abbeel (pabbeel@cs.berkeley.edu).
"""
from game import Agent
import distance_calculator
from util import nearest_point
import util
import random
import time
class RandomAgent(Agent):
"""A random agent that abides by the rules."""
def __init__(self, index):
"""Initialize agent with given index."""
self.index = index
def get_action(self, state):
"""Return a random legal action."""
return random.choice(state.get_legal_actions(self.index))
class CaptureAgent(Agent):
"""A base class for capture agents.
The convenience methods herein handle some of the complications of a
two-team game.
Recommended Usage: Subclass CaptureAgent and override choose_action.
"""
#############################
# Methods to store key info #
#############################
def __init__(self, index, time_for_computing=.1):
"""Initialize capture agent with several variables you can query.
self.index = index for this agent
self.red = true if you're on the red team, false otherwise
self.agents_on_team = a list of agent objects that make up your team
self.distancer = distance calculator (contest code provides this)
self.observation_history = list of GameState objects that correspond
to the sequential order of states that have
occurred so far this game
self.time_for_computing = an amount of time to give each turn for
computing maze distances
(part of the provided distance calculator)
"""
# Agent index for querying state
self.index = index
# Whether or not you're on the red team
self.red = None
# Agent objects controlling you and your teammates
self.agents_on_team = None
# Maze distance calculator
self.distancer = None
# A history of observations
self.observation_history = []
# Time to spend each turn on computing maze distances
self.time_for_computing = time_for_computing
# Access to the graphics
self.display = None
def register_initial_state(self, game_state):
"""Handle the initial setup of the agent.
Populate useful fields (such as what team we're on).
A distance_calculator instance caches the maze distances
between each pair of positions, so your agents can use:
self.distancer.get_distance(p1, p2)
"""
self.red = game_state.is_on_red_team(self.index)
self.distancer = distance_calculator.Distancer(game_state.data.layout)
# comment this out to forgo maze distance computation and
# use manhattan distances
self.distancer.get_maze_distances()
import __main__
if '_display' in dir(__main__):
self.display = __main__._display
def final(self, game_state):
"""Finalize game."""
self.observation_history = []
def register_team(self, agents_on_team):
"""Fill the self.agents_on_team field.
Will be filled with a list of the indices of the agents on your team.
"""
self.agents_on_team = agents_on_team
def observation_function(self, game_state):
"""Make an observation about the game_state."""
return game_state.make_observation(self.index)
def debug_draw(self, cells, color, clear=False):
"""Draw debug information."""
if self.display:
from capture_graphics_display import PacmanGraphics
if isinstance(self.display, PacmanGraphics):
if not type(cells) is list:
cells = [cells]
self.display.debug_draw(cells, color, clear)
def debug_clear(self):
"""Clear out debug information."""
if self.display:
from capture_graphics_display import PacmanGraphics
if isinstance(self.display, PacmanGraphics):
self.display.clear_debug()
#################
# Action Choice #
#################
def get_action(self, game_state):
"""Call choose_action on a grid position; continues on half positions.
If you subclass CaptureAgent, you shouldn't need to override this
method. It takes care of appending the current game_state on to your
observation history (so you have a record of the game states of the
game) and will call your choose action method if you're in a state
(rather than halfway through your last move - this occurs because
Pacman agents move half as quickly as ghost agents).
"""
self.observation_history.append(game_state)
my_state = game_state.get_agent_state(self.index)
my_pos = my_state.get_position()
if my_pos != nearest_point(my_pos):
# We're halfway from one position to the next
|
else:
return self.choose_action(game_state)
def choose_action(self, game_state):
"""Override this method to make a good agent.
It should return a legal action within the time limit (otherwise a
random legal action will be chosen for you).
"""
util.raise_not_defined()
#######################
# Convenience Methods #
#######################
def get_food(self, game_state):
"""Return the food you're meant to eat.
This is in the form of a matrix
where m[x][y]=true if there is food you can eat (based on your team)
in that square.
"""
if self.red:
return game_state.get_blue_food()
else:
return game_state.get_red_food()
def get_food_you_are_defending(self, game_state):
"""Return the food you're meant to protect.
i.e., the food that your opponent is supposed to eat.
This is in the form of a matrix where m[x][y]=true if
there is food at (x,y) that your opponent can eat.
"""
if self.red:
return game_state.get_red_food()
else:
return game_state.get_blue_food()
def get_capsules(self, game_state):
"""Return the capsule you're meant to eat."""
if self.red:
return game_state.get_blue_capsules()
else:
return game_state.get_red_capsules()
def get_capsules_you_are_defending(self, game_state):
"""Return the capsule you're meant to protect."""
if self.red:
return game_state.get_red_capsules()
else:
return game_state.get_blue_capsules()
def get_opponents(self, game_state):
"""Return agent indices of your opponents.
This is the list of the numbers of the agents
(e.g., red might be "1,3,5")
"""
if self.red:
return game_state.get_blue_team_indices()
else:
return game_state.get_red_team_indices()
def get_team(self, game_state):
"""Return agent indices of your team.
This is the list of the numbers of the agents
(e.g., red might be the list of 1,3,5)
"""
if self.red:
return game_state.get_red_team_indices()
else:
return game_state.get_blue_team_indices()
def get_score(self, game_state):
"""Return how much you are beating the other team by.
This is in the form of a number that is the difference between your
score and the opponents score. This number is negative if you're
losing.
"""
if self.red:
return game_state.get_score()
else:
return game_state.get_score() * -1
def get_maze_distance(self, pos1, pos2):
"""Return the distance between two points.
These are calculated using the provided distancer object.
If distancer.get_maze_distances() has been called, then maze distances
are available.
Otherwise, this just returns Manhattan distance.
"""
d = self.distancer.get_distance(pos1, pos2)
return d
def get_previous_observation(self):
"""Return GameState object of the last state this agent saw.
(the observed state of the game last time this agent moved -
this may not include all of your opponent's agent locations exactly).
"""
if len(self.observation_history) == 1:
return None
else:
return self.observation_history[-2]
def get_current_observation(self):
"""Return the GameState object of this agent's current observation.
(the observed state of the game - this may not include
all of your opponent's agent locations exactly).
"""
return self.observation_history[-1]
def display_distributions_over_positions(self, distributions):
"""Overlays a distribution over positions onto the pacman board.
This represents an agent's beliefs about the positions of each agent.
The arg distributions is a tuple or list of util.Counter objects,
where the i'th Counter has keys that are board positions (x,y) and
values that encode the probability that agent i is at (x,y).
If some elements are None, then they will be ignored.
If a Counter is passed to this function, it will be displayed.
This is helpful for figuring out if your agent is doing
inference correctly, and does not affect gameplay.
"""
dists = []
for dist in distributions:
if dist is not None:
if not isinstance(dist, util.Counter):
raise Exception("Wrong type of distribution")
dists.append(dist)
else:
dists.append(util.Counter())
if ((self.display is not None and
'update_distributions' in dir(self.display))):
self.display.update_distributions(dists)
else:
self._distributions = dists # These can be read by pacclient.py
class TimeoutAgent(Agent):
"""A random agent that takes too much time.
Taking too much time results in penalties and random moves.
"""
def __init__(self, index):
"""Initialize agent with given index."""
self.index = index
def get_action(self, state):
"""Take too much time getting action."""
time.sleep(2.0)
return random.choice(state.get_legal_actions(self.index))
| return game_state.get_legal_actions(self.index)[0] | conditional_block |
mod.rs | //! A stateless, layered, multithread video system with OpenGL backends.
//!
//! # Overview and Goals
//!
//! The management of video effects has become an important topic and key feature of
//! rendering engines. With the increasing number of effects it is not sufficient anymore
//! to only support them, but also to integrate them into the rendering engine in a clean
//! and extensible way.
//!
//! The goal of this work and simultaneously its main contribution is to design and
//! implement an advanced effects framework. Using this framework it should be easy for
//! further applications to combine several small effects like texture mapping, shading
//! and shadowing in an automated and transparent way and apply them to any 3D model.
//! Additionally, it should be possible to integrate new effects and use the provided
//! framework for rapid prototyping.
//!
//! ### Multi Platform
//!
//! Ideally, crayon should be able to run on macOS, windows and popular mobile-platforms.
//! There still are a huge number of performance and feature limited devices, so this
//! video module will always be limited by lower-end 3D APIs like OpenGL ES2.0.
//!
//! ### Stateless Pipeline
//!
//! Ordinary OpenGL application deals with stateful APIs, which is error-prone. This
//! means whenever you change any state in the API for subsequent draw calls, this state
//! change also affects draw calls submitted at a later point in time. Ideally, submitting
//! a draw call with whatever state we want should not affect any of the other draw calls,
//! even in multi-thread environments.
//!
//! Modern 3D-APIs like [gfx-rs](https://github.com/gfx-rs/gfx), [glium](https://github.com/glium/glium)
//! bundles render state and data into a few, precompiled resource objects which are
//! combined into final render pipeline. We should follow the same philosophy.
//!
//! ### Multi-thread
//!
//! In most cases, dividing OpenGL rendering across multiple threads will not result in
//! any performance improvement due the pipeline nature of OpenGL. What we are about
//! to do is actually exploiting parallelism in resource preparation, and provides a set of
//! multi-thread friendly APIs.
//!
//! The most common solution is by using a double-buffer of commands. This consists of
//! running the renderer backend in a speparate thread, where all draw calls and communication
//! with the OpenGL API are performed. The frontend thread that runs the game logic
//! communicates with the backend renderer via a command double-buffer.
//!
//! ### Layered Rendering
//!
//! Its important to sort video commands (generated by different threads) before submiting
//! them to OpenGL, for the sack of both correctness and performance. For example, to draw
//! transparent objects via blending, we need draw opaque object first, usually from front-to-back,
//! and draw translucents from back-to-front.
//!
//! The idea here is to assign a integer key to a command which is used for sorting. Depending
//! on where those bits are stored in the integer, you can apply different sorting criteria
//! for the same array of commands, as long as you know how the keys were built.
//!
//! # Resource Objects
//!
//! Render state and data, which are combined into final render pipeline, are bundled into a
//! few, precompiled resource objects in video module.
//!
//! All resources types can be created instantly from data in memory, and meshes, textures
//! can also be loaded asynchronously from the filesystem.
//!
//! And the actual resource objects are usually private and opaque, you will get a `Handle`
//! immediately for every resource objects you created instead of some kind of reference.
//! Its the unique identifier for the resource, its type-safe and copyable.
//!
//! When you are done with the created resource objects, its your responsiblity to delete the
//! resource object with `Handle` to avoid leaks.
//!
//! For these things loaded from filesystem, it could be safely shared by the `Location`. We
//! keeps a use-counting internally. It will not be freed really, before all the users deletes
//! its `Handle`.
//!
//! ### Surface Object
//!
//! Surface object plays as the `Layer` role we mentioned above, all the commands we submitted
//! in application code is attached to a specific `Surface`. Commands inside `Surface` are
//! sorted before submitting to underlying OpenGL.
//!
//! Surface object also holds references to render target, and wraps rendering operations to
//! it. Likes clearing, offscreen-rendering, MSAA resolve etc..
//!
//! ```rust
//! use crayon::prelude::*;
//! application::oneshot().unwrap();
//!
//! // Creates a `SurfaceParams` object.
//! let mut params = SurfaceParams::default();
//! /// Sets the attachments of internal frame-buffer. It consists of multiple color attachments
//! /// and a optional `Depth/DepthStencil` buffer attachment.
//! ///
//! /// If none attachment is assigned, the default framebuffer generated by the system will be
//! /// used.
//! params.set_attachments(&[], None);
//! // Sets the clear flags for this surface and its underlying framebuffer.
//! params.set_clear(Color::white(), 1.0, None);
//!
//! // Creates an surface with `SurfaceParams`.
//! let surface = video::create_surface(params).unwrap();
//! // Deletes the surface object.
//! video::delete_surface(surface);
//! ```
//!
//! ### Shader Object
//!
//! Shader object is introduced to encapsulate all stateful things we need to configurate
//! video pipeline. This would also enable us to easily change the order of draw calls
//! and get rid of redundant state changes.
//!
//! ```rust
//! use crayon::prelude::*;
//! application::oneshot().unwrap();
//!
//! // Declares the uniform variable layouts.
//! let mut uniforms = UniformVariableLayout::build()
//! .with("u_ModelViewMatrix", UniformVariableType::Matrix4f)
//! .with("u_MVPMatrix", UniformVariableType::Matrix4f)
//! .finish();
//!
//! // Declares the attributes.
//! let attributes = AttributeLayout::build()
//! .with(Attribute::Position, 3)
//! .with(Attribute::Normal, 3)
//! .finish();
//!
//! let mut params = ShaderParams::default();
//! params.attributes = attributes;
//! params.uniforms = uniforms;
//! params.state = RenderState::default();
//!
//! let vs = "..".into();
//! let fs = "..".into();
//!
//! // Create a shader with initial shaders and render state. It encapusulates all the
//! // informations we need to configurate graphics pipeline before real drawing.
//! let shader = video::create_shader(params, vs, fs).unwrap();
//!
//! // Deletes shader object.
//! video::delete_shader(shader);
//! ```
//!
//! ### Texture Object
//!
//! A texture object is a container of one or more images. It can be the source of a texture
//! access from a Shader.
//!
//! ```rust
//! use crayon::prelude::*;
//! application::oneshot().unwrap();
//!
//! let mut params = TextureParams::default();
//!
//! // Create a texture object with optional data. You can fill it later with `update_texture`.
//! let texture = video::create_texture(params, None).unwrap();
//!
//! // Deletes the texture object.
//! video::delete_texture(texture);
//! ```
//!
//! #### Compressed Texture Format
//!
//! _TODO_: Cube texture.
//! _TODO_: 3D texture.
//!
//! ### Mesh Object
//!
//! ```rust
//! use crayon::prelude::*;
//! application::oneshot().unwrap();
//!
//! let mut params = MeshParams::default();
//!
//! // Create a mesh object with optional data. You can fill it later with `update_mesh`.
//! let mesh = video::create_mesh(params, None).unwrap();
//!
//! // Deletes the mesh object.
//! video::delete_mesh(mesh);
//! ```
//!
//! # Commands
//!
//! _TODO_: CommandBuffer
//! _TODO_: DrawCommandBuffer
/// Maximum number of attributes in vertex layout.
pub const MAX_VERTEX_ATTRIBUTES: usize = 12;
/// Maximum number of attachments in framebuffer.
pub const MAX_FRAMEBUFFER_ATTACHMENTS: usize = 8;
/// Maximum number of uniform variables in shader.
pub const MAX_UNIFORM_VARIABLES: usize = 32;
/// Maximum number of textures in shader.
pub const MAX_UNIFORM_TEXTURE_SLOTS: usize = 8;
#[macro_use]
pub mod assets;
pub mod command;
pub mod errors;
mod system;
mod backends;
pub mod prelude {
pub use super::assets::prelude::*;
pub use super::command::{CommandBuffer, Draw, DrawCommandBuffer};
}
use std::sync::Arc;
use uuid::Uuid;
use crate::math::prelude::Aabb2;
use crate::prelude::CrResult;
use crate::res::utils::prelude::ResourceState;
use crate::utils::double_buf::DoubleBuf;
use self::assets::prelude::*;
use self::backends::frame::Frame;
use self::errors::*;
use self::ins::{ctx, CTX};
use self::system::VideoSystem;
/// Setup the video system.
pub(crate) unsafe fn setup() -> CrResult<()> {
debug_assert!(CTX.is_null(), "duplicated setup of video system.");
let ctx = VideoSystem::new()?;
CTX = Box::into_raw(Box::new(ctx));
Ok(())
}
/// Setup the video system.
pub(crate) unsafe fn headless() {
debug_assert!(CTX.is_null(), "duplicated setup of video system.");
let ctx = VideoSystem::headless();
CTX = Box::into_raw(Box::new(ctx));
}
/// Discard the video system.
pub(crate) unsafe fn discard() {
if CTX.is_null() |
drop(Box::from_raw(CTX as *mut VideoSystem));
CTX = std::ptr::null();
}
pub(crate) unsafe fn frames() -> Arc<DoubleBuf<Frame>> {
ctx().frames()
}
/// Creates an surface with `SurfaceParams`.
#[inline]
pub fn create_surface(params: SurfaceParams) -> Result<SurfaceHandle> {
ctx().create_surface(params)
}
/// Gets the `SurfaceParams` if available.
#[inline]
pub fn surface(handle: SurfaceHandle) -> Option<SurfaceParams> {
ctx().surface(handle)
}
/// Get the resource state of specified surface.
#[inline]
pub fn surface_state(handle: SurfaceHandle) -> ResourceState {
ctx().surface_state(handle)
}
/// Deletes surface object.
#[inline]
pub fn delete_surface(handle: SurfaceHandle) {
ctx().delete_surface(handle)
}
/// Create a shader with initial shaders and render state. It encapusulates all the
/// informations we need to configurate graphics pipeline before real drawing.
#[inline]
pub fn create_shader(params: ShaderParams, vs: String, fs: String) -> Result<ShaderHandle> {
ctx().create_shader(params, vs, fs)
}
/// Gets the `ShaderParams` if available.
#[inline]
pub fn shader(handle: ShaderHandle) -> Option<ShaderParams> {
ctx().shader(handle)
}
/// Get the resource state of specified shader.
#[inline]
pub fn shader_state(handle: ShaderHandle) -> ResourceState {
ctx().shader_state(handle)
}
/// Delete shader state object.
#[inline]
pub fn delete_shader(handle: ShaderHandle) {
ctx().delete_shader(handle)
}
/// Create a new mesh object.
#[inline]
pub fn create_mesh<T>(params: MeshParams, data: T) -> CrResult<MeshHandle>
where
T: Into<Option<MeshData>>,
{
ctx().create_mesh(params, data)
}
/// Creates a mesh object from file asynchronously.
#[inline]
pub fn create_mesh_from<T: AsRef<str>>(url: T) -> CrResult<MeshHandle> {
ctx().create_mesh_from(url)
}
/// Creates a mesh object from file asynchronously.
#[inline]
pub fn create_mesh_from_uuid(uuid: Uuid) -> CrResult<MeshHandle> {
ctx().create_mesh_from_uuid(uuid)
}
/// Gets the `MeshParams` if available.
#[inline]
pub fn mesh(handle: MeshHandle) -> Option<MeshParams> {
ctx().mesh(handle)
}
/// Get the resource state of specified mesh.
#[inline]
pub fn mesh_state(handle: MeshHandle) -> ResourceState {
ctx().mesh_state(handle)
}
/// Update a subset of dynamic vertex buffer. Use `offset` specifies the offset
/// into the buffer object's data store where data replacement will begin, measured
/// in bytes.
#[inline]
pub fn update_vertex_buffer(handle: MeshHandle, offset: usize, data: &[u8]) -> CrResult<()> {
ctx().update_vertex_buffer(handle, offset, data)
}
/// Update a subset of dynamic index buffer. Use `offset` specifies the offset
/// into the buffer object's data store where data replacement will begin, measured
/// in bytes.
#[inline]
pub fn update_index_buffer(handle: MeshHandle, offset: usize, data: &[u8]) -> CrResult<()> {
ctx().update_index_buffer(handle, offset, data)
}
/// Delete mesh object.
#[inline]
pub fn delete_mesh(handle: MeshHandle) {
ctx().delete_mesh(handle);
}
/// Create texture object. A texture is an image loaded in video memory,
/// which can be sampled in shaders.
#[inline]
pub fn create_texture<T>(params: TextureParams, data: T) -> CrResult<TextureHandle>
where
T: Into<Option<TextureData>>,
{
ctx().create_texture(params, data)
}
/// Creates a texture object from file asynchronously.
#[inline]
pub fn create_texture_from<T: AsRef<str>>(url: T) -> CrResult<TextureHandle> {
ctx().create_texture_from(url)
}
/// Creates a texture object from file asynchronously.
#[inline]
pub fn create_texture_from_uuid(uuid: Uuid) -> CrResult<TextureHandle> {
ctx().create_texture_from_uuid(uuid)
}
/// Get the resource state of specified texture.
#[inline]
pub fn texture_state(handle: TextureHandle) -> ResourceState {
ctx().texture_state(handle)
}
/// Update a contiguous subregion of an existing two-dimensional texture object.
#[inline]
pub fn update_texture(handle: TextureHandle, area: Aabb2<u32>, data: &[u8]) -> CrResult<()> {
ctx().update_texture(handle, area, data)
}
/// Delete the texture object.
#[inline]
pub fn delete_texture(handle: TextureHandle) {
ctx().delete_texture(handle);
}
/// Gets the `TextureParams` if available.
#[inline]
pub fn texture(handle: TextureHandle)->Option<TextureParams> {
ctx().texture(handle)
}
/// Create render texture object, which could be attached with a framebuffer.
#[inline]
pub fn create_render_texture(params: RenderTextureParams) -> Result<RenderTextureHandle> {
ctx().create_render_texture(params)
}
/// Gets the `RenderTextureParams` if available.
#[inline]
pub fn render_texture(handle: RenderTextureHandle) -> Option<RenderTextureParams> {
ctx().render_texture(handle)
}
/// Get the resource state of specified render texture.
#[inline]
pub fn render_texture_state(handle: RenderTextureHandle) -> ResourceState {
ctx().render_texture_state(handle)
}
/// Delete the render texture object.
#[inline]
pub fn delete_render_texture(handle: RenderTextureHandle) {
ctx().delete_render_texture(handle)
}
mod ins {
use super::system::VideoSystem;
pub static mut CTX: *const VideoSystem = std::ptr::null();
#[inline]
pub fn ctx() -> &'static VideoSystem {
unsafe {
debug_assert!(
!CTX.is_null(),
"video system has not been initialized properly."
);
&*CTX
}
}
}
| {
return;
} | conditional_block |
mod.rs | //! A stateless, layered, multithread video system with OpenGL backends.
//!
//! # Overview and Goals
//!
//! The management of video effects has become an important topic and key feature of
//! rendering engines. With the increasing number of effects it is not sufficient anymore
//! to only support them, but also to integrate them into the rendering engine in a clean
//! and extensible way.
//!
//! The goal of this work and simultaneously its main contribution is to design and
//! implement an advanced effects framework. Using this framework it should be easy for
//! further applications to combine several small effects like texture mapping, shading
//! and shadowing in an automated and transparent way and apply them to any 3D model.
//! Additionally, it should be possible to integrate new effects and use the provided
//! framework for rapid prototyping.
//!
//! ### Multi Platform
//!
//! Ideally, crayon should be able to run on macOS, windows and popular mobile-platforms.
//! There still are a huge number of performance and feature limited devices, so this
//! video module will always be limited by lower-end 3D APIs like OpenGL ES2.0.
//!
//! ### Stateless Pipeline
//!
//! Ordinary OpenGL application deals with stateful APIs, which is error-prone. This
//! means whenever you change any state in the API for subsequent draw calls, this state
//! change also affects draw calls submitted at a later point in time. Ideally, submitting
//! a draw call with whatever state we want should not affect any of the other draw calls,
//! even in multi-thread environments.
//!
//! Modern 3D-APIs like [gfx-rs](https://github.com/gfx-rs/gfx), [glium](https://github.com/glium/glium)
//! bundles render state and data into a few, precompiled resource objects which are
//! combined into final render pipeline. We should follow the same philosophy.
//!
//! ### Multi-thread
//!
//! In most cases, dividing OpenGL rendering across multiple threads will not result in
//! any performance improvement due the pipeline nature of OpenGL. What we are about
//! to do is actually exploiting parallelism in resource preparation, and provides a set of
//! multi-thread friendly APIs.
//!
//! The most common solution is by using a double-buffer of commands. This consists of
//! running the renderer backend in a speparate thread, where all draw calls and communication
//! with the OpenGL API are performed. The frontend thread that runs the game logic
//! communicates with the backend renderer via a command double-buffer.
//!
//! ### Layered Rendering
//!
//! Its important to sort video commands (generated by different threads) before submiting
//! them to OpenGL, for the sack of both correctness and performance. For example, to draw
//! transparent objects via blending, we need draw opaque object first, usually from front-to-back,
//! and draw translucents from back-to-front.
//!
//! The idea here is to assign a integer key to a command which is used for sorting. Depending
//! on where those bits are stored in the integer, you can apply different sorting criteria
//! for the same array of commands, as long as you know how the keys were built.
//!
//! # Resource Objects
//!
//! Render state and data, which are combined into final render pipeline, are bundled into a
//! few, precompiled resource objects in video module.
//!
//! All resources types can be created instantly from data in memory, and meshes, textures
//! can also be loaded asynchronously from the filesystem.
//!
//! And the actual resource objects are usually private and opaque, you will get a `Handle`
//! immediately for every resource objects you created instead of some kind of reference.
//! Its the unique identifier for the resource, its type-safe and copyable.
//!
//! When you are done with the created resource objects, its your responsiblity to delete the
//! resource object with `Handle` to avoid leaks.
//!
//! For these things loaded from filesystem, it could be safely shared by the `Location`. We
//! keeps a use-counting internally. It will not be freed really, before all the users deletes
//! its `Handle`.
//!
//! ### Surface Object
//!
//! Surface object plays as the `Layer` role we mentioned above, all the commands we submitted
//! in application code is attached to a specific `Surface`. Commands inside `Surface` are
//! sorted before submitting to underlying OpenGL.
//!
//! Surface object also holds references to render target, and wraps rendering operations to
//! it. Likes clearing, offscreen-rendering, MSAA resolve etc..
//!
//! ```rust
//! use crayon::prelude::*;
//! application::oneshot().unwrap();
//!
//! // Creates a `SurfaceParams` object.
//! let mut params = SurfaceParams::default();
//! /// Sets the attachments of internal frame-buffer. It consists of multiple color attachments
//! /// and a optional `Depth/DepthStencil` buffer attachment.
//! ///
//! /// If none attachment is assigned, the default framebuffer generated by the system will be
//! /// used.
//! params.set_attachments(&[], None);
//! // Sets the clear flags for this surface and its underlying framebuffer.
//! params.set_clear(Color::white(), 1.0, None);
//!
//! // Creates an surface with `SurfaceParams`.
//! let surface = video::create_surface(params).unwrap();
//! // Deletes the surface object.
//! video::delete_surface(surface);
//! ```
//!
//! ### Shader Object
//!
//! Shader object is introduced to encapsulate all stateful things we need to configurate
//! video pipeline. This would also enable us to easily change the order of draw calls
//! and get rid of redundant state changes.
//!
//! ```rust
//! use crayon::prelude::*;
//! application::oneshot().unwrap();
//!
//! // Declares the uniform variable layouts.
//! let mut uniforms = UniformVariableLayout::build()
//! .with("u_ModelViewMatrix", UniformVariableType::Matrix4f)
//! .with("u_MVPMatrix", UniformVariableType::Matrix4f)
//! .finish();
//!
//! // Declares the attributes.
//! let attributes = AttributeLayout::build()
//! .with(Attribute::Position, 3)
//! .with(Attribute::Normal, 3)
//! .finish();
//!
//! let mut params = ShaderParams::default();
//! params.attributes = attributes;
//! params.uniforms = uniforms;
//! params.state = RenderState::default();
//!
//! let vs = "..".into();
//! let fs = "..".into();
//!
//! // Create a shader with initial shaders and render state. It encapusulates all the
//! // informations we need to configurate graphics pipeline before real drawing.
//! let shader = video::create_shader(params, vs, fs).unwrap();
//!
//! // Deletes shader object.
//! video::delete_shader(shader);
//! ```
//!
//! ### Texture Object
//!
//! A texture object is a container of one or more images. It can be the source of a texture
//! access from a Shader.
//!
//! ```rust
//! use crayon::prelude::*;
//! application::oneshot().unwrap();
//!
//! let mut params = TextureParams::default();
//!
//! // Create a texture object with optional data. You can fill it later with `update_texture`.
//! let texture = video::create_texture(params, None).unwrap();
//!
//! // Deletes the texture object.
//! video::delete_texture(texture);
//! ```
//!
//! #### Compressed Texture Format
//!
//! _TODO_: Cube texture.
//! _TODO_: 3D texture.
//!
//! ### Mesh Object
//!
//! ```rust
//! use crayon::prelude::*;
//! application::oneshot().unwrap();
//!
//! let mut params = MeshParams::default();
//!
//! // Create a mesh object with optional data. You can fill it later with `update_mesh`.
//! let mesh = video::create_mesh(params, None).unwrap();
//!
//! // Deletes the mesh object.
//! video::delete_mesh(mesh);
//! ```
//!
//! # Commands
//!
//! _TODO_: CommandBuffer
//! _TODO_: DrawCommandBuffer
/// Maximum number of attributes in vertex layout.
pub const MAX_VERTEX_ATTRIBUTES: usize = 12;
/// Maximum number of attachments in framebuffer.
pub const MAX_FRAMEBUFFER_ATTACHMENTS: usize = 8;
/// Maximum number of uniform variables in shader.
pub const MAX_UNIFORM_VARIABLES: usize = 32;
/// Maximum number of textures in shader.
pub const MAX_UNIFORM_TEXTURE_SLOTS: usize = 8;
#[macro_use]
pub mod assets;
pub mod command;
pub mod errors;
mod system;
mod backends;
pub mod prelude {
pub use super::assets::prelude::*;
pub use super::command::{CommandBuffer, Draw, DrawCommandBuffer};
}
use std::sync::Arc;
use uuid::Uuid;
use crate::math::prelude::Aabb2;
use crate::prelude::CrResult;
use crate::res::utils::prelude::ResourceState;
use crate::utils::double_buf::DoubleBuf;
use self::assets::prelude::*;
use self::backends::frame::Frame;
use self::errors::*;
use self::ins::{ctx, CTX};
use self::system::VideoSystem;
/// Setup the video system.
pub(crate) unsafe fn setup() -> CrResult<()> {
debug_assert!(CTX.is_null(), "duplicated setup of video system.");
let ctx = VideoSystem::new()?;
CTX = Box::into_raw(Box::new(ctx));
Ok(())
}
/// Setup the video system.
pub(crate) unsafe fn headless() {
debug_assert!(CTX.is_null(), "duplicated setup of video system.");
let ctx = VideoSystem::headless();
CTX = Box::into_raw(Box::new(ctx));
}
/// Discard the video system.
pub(crate) unsafe fn discard() {
if CTX.is_null() {
return;
}
drop(Box::from_raw(CTX as *mut VideoSystem));
CTX = std::ptr::null();
}
pub(crate) unsafe fn frames() -> Arc<DoubleBuf<Frame>> {
ctx().frames()
}
/// Creates an surface with `SurfaceParams`.
#[inline]
pub fn create_surface(params: SurfaceParams) -> Result<SurfaceHandle> {
ctx().create_surface(params)
}
/// Gets the `SurfaceParams` if available.
#[inline]
pub fn surface(handle: SurfaceHandle) -> Option<SurfaceParams> {
ctx().surface(handle)
}
/// Get the resource state of specified surface.
#[inline]
pub fn surface_state(handle: SurfaceHandle) -> ResourceState {
ctx().surface_state(handle)
}
/// Deletes surface object.
#[inline]
pub fn delete_surface(handle: SurfaceHandle) {
ctx().delete_surface(handle)
}
/// Create a shader with initial shaders and render state. It encapusulates all the
/// informations we need to configurate graphics pipeline before real drawing. |
/// Gets the `ShaderParams` if available.
#[inline]
pub fn shader(handle: ShaderHandle) -> Option<ShaderParams> {
ctx().shader(handle)
}
/// Get the resource state of specified shader.
#[inline]
pub fn shader_state(handle: ShaderHandle) -> ResourceState {
ctx().shader_state(handle)
}
/// Delete shader state object.
#[inline]
pub fn delete_shader(handle: ShaderHandle) {
ctx().delete_shader(handle)
}
/// Create a new mesh object.
#[inline]
pub fn create_mesh<T>(params: MeshParams, data: T) -> CrResult<MeshHandle>
where
T: Into<Option<MeshData>>,
{
ctx().create_mesh(params, data)
}
/// Creates a mesh object from file asynchronously.
#[inline]
pub fn create_mesh_from<T: AsRef<str>>(url: T) -> CrResult<MeshHandle> {
ctx().create_mesh_from(url)
}
/// Creates a mesh object from file asynchronously.
#[inline]
pub fn create_mesh_from_uuid(uuid: Uuid) -> CrResult<MeshHandle> {
ctx().create_mesh_from_uuid(uuid)
}
/// Gets the `MeshParams` if available.
#[inline]
pub fn mesh(handle: MeshHandle) -> Option<MeshParams> {
ctx().mesh(handle)
}
/// Get the resource state of specified mesh.
#[inline]
pub fn mesh_state(handle: MeshHandle) -> ResourceState {
ctx().mesh_state(handle)
}
/// Update a subset of dynamic vertex buffer. Use `offset` specifies the offset
/// into the buffer object's data store where data replacement will begin, measured
/// in bytes.
#[inline]
pub fn update_vertex_buffer(handle: MeshHandle, offset: usize, data: &[u8]) -> CrResult<()> {
ctx().update_vertex_buffer(handle, offset, data)
}
/// Update a subset of dynamic index buffer. Use `offset` specifies the offset
/// into the buffer object's data store where data replacement will begin, measured
/// in bytes.
#[inline]
pub fn update_index_buffer(handle: MeshHandle, offset: usize, data: &[u8]) -> CrResult<()> {
ctx().update_index_buffer(handle, offset, data)
}
/// Delete mesh object.
#[inline]
pub fn delete_mesh(handle: MeshHandle) {
ctx().delete_mesh(handle);
}
/// Create texture object. A texture is an image loaded in video memory,
/// which can be sampled in shaders.
#[inline]
pub fn create_texture<T>(params: TextureParams, data: T) -> CrResult<TextureHandle>
where
T: Into<Option<TextureData>>,
{
ctx().create_texture(params, data)
}
/// Creates a texture object from file asynchronously.
#[inline]
pub fn create_texture_from<T: AsRef<str>>(url: T) -> CrResult<TextureHandle> {
ctx().create_texture_from(url)
}
/// Creates a texture object from file asynchronously.
#[inline]
pub fn create_texture_from_uuid(uuid: Uuid) -> CrResult<TextureHandle> {
ctx().create_texture_from_uuid(uuid)
}
/// Get the resource state of specified texture.
#[inline]
pub fn texture_state(handle: TextureHandle) -> ResourceState {
ctx().texture_state(handle)
}
/// Update a contiguous subregion of an existing two-dimensional texture object.
#[inline]
pub fn update_texture(handle: TextureHandle, area: Aabb2<u32>, data: &[u8]) -> CrResult<()> {
ctx().update_texture(handle, area, data)
}
/// Delete the texture object.
#[inline]
pub fn delete_texture(handle: TextureHandle) {
ctx().delete_texture(handle);
}
/// Gets the `TextureParams` if available.
#[inline]
pub fn texture(handle: TextureHandle)->Option<TextureParams> {
ctx().texture(handle)
}
/// Create render texture object, which could be attached with a framebuffer.
#[inline]
pub fn create_render_texture(params: RenderTextureParams) -> Result<RenderTextureHandle> {
ctx().create_render_texture(params)
}
/// Gets the `RenderTextureParams` if available.
#[inline]
pub fn render_texture(handle: RenderTextureHandle) -> Option<RenderTextureParams> {
ctx().render_texture(handle)
}
/// Get the resource state of specified render texture.
#[inline]
pub fn render_texture_state(handle: RenderTextureHandle) -> ResourceState {
ctx().render_texture_state(handle)
}
/// Delete the render texture object.
#[inline]
pub fn delete_render_texture(handle: RenderTextureHandle) {
ctx().delete_render_texture(handle)
}
mod ins {
use super::system::VideoSystem;
pub static mut CTX: *const VideoSystem = std::ptr::null();
#[inline]
pub fn ctx() -> &'static VideoSystem {
unsafe {
debug_assert!(
!CTX.is_null(),
"video system has not been initialized properly."
);
&*CTX
}
}
} | #[inline]
pub fn create_shader(params: ShaderParams, vs: String, fs: String) -> Result<ShaderHandle> {
ctx().create_shader(params, vs, fs)
} | random_line_split |
mod.rs | //! A stateless, layered, multithread video system with OpenGL backends.
//!
//! # Overview and Goals
//!
//! The management of video effects has become an important topic and key feature of
//! rendering engines. With the increasing number of effects it is not sufficient anymore
//! to only support them, but also to integrate them into the rendering engine in a clean
//! and extensible way.
//!
//! The goal of this work and simultaneously its main contribution is to design and
//! implement an advanced effects framework. Using this framework it should be easy for
//! further applications to combine several small effects like texture mapping, shading
//! and shadowing in an automated and transparent way and apply them to any 3D model.
//! Additionally, it should be possible to integrate new effects and use the provided
//! framework for rapid prototyping.
//!
//! ### Multi Platform
//!
//! Ideally, crayon should be able to run on macOS, windows and popular mobile-platforms.
//! There still are a huge number of performance and feature limited devices, so this
//! video module will always be limited by lower-end 3D APIs like OpenGL ES2.0.
//!
//! ### Stateless Pipeline
//!
//! Ordinary OpenGL application deals with stateful APIs, which is error-prone. This
//! means whenever you change any state in the API for subsequent draw calls, this state
//! change also affects draw calls submitted at a later point in time. Ideally, submitting
//! a draw call with whatever state we want should not affect any of the other draw calls,
//! even in multi-thread environments.
//!
//! Modern 3D-APIs like [gfx-rs](https://github.com/gfx-rs/gfx), [glium](https://github.com/glium/glium)
//! bundles render state and data into a few, precompiled resource objects which are
//! combined into final render pipeline. We should follow the same philosophy.
//!
//! ### Multi-thread
//!
//! In most cases, dividing OpenGL rendering across multiple threads will not result in
//! any performance improvement due the pipeline nature of OpenGL. What we are about
//! to do is actually exploiting parallelism in resource preparation, and provides a set of
//! multi-thread friendly APIs.
//!
//! The most common solution is by using a double-buffer of commands. This consists of
//! running the renderer backend in a speparate thread, where all draw calls and communication
//! with the OpenGL API are performed. The frontend thread that runs the game logic
//! communicates with the backend renderer via a command double-buffer.
//!
//! ### Layered Rendering
//!
//! Its important to sort video commands (generated by different threads) before submiting
//! them to OpenGL, for the sack of both correctness and performance. For example, to draw
//! transparent objects via blending, we need draw opaque object first, usually from front-to-back,
//! and draw translucents from back-to-front.
//!
//! The idea here is to assign a integer key to a command which is used for sorting. Depending
//! on where those bits are stored in the integer, you can apply different sorting criteria
//! for the same array of commands, as long as you know how the keys were built.
//!
//! # Resource Objects
//!
//! Render state and data, which are combined into final render pipeline, are bundled into a
//! few, precompiled resource objects in video module.
//!
//! All resources types can be created instantly from data in memory, and meshes, textures
//! can also be loaded asynchronously from the filesystem.
//!
//! And the actual resource objects are usually private and opaque, you will get a `Handle`
//! immediately for every resource objects you created instead of some kind of reference.
//! Its the unique identifier for the resource, its type-safe and copyable.
//!
//! When you are done with the created resource objects, its your responsiblity to delete the
//! resource object with `Handle` to avoid leaks.
//!
//! For these things loaded from filesystem, it could be safely shared by the `Location`. We
//! keeps a use-counting internally. It will not be freed really, before all the users deletes
//! its `Handle`.
//!
//! ### Surface Object
//!
//! Surface object plays as the `Layer` role we mentioned above, all the commands we submitted
//! in application code is attached to a specific `Surface`. Commands inside `Surface` are
//! sorted before submitting to underlying OpenGL.
//!
//! Surface object also holds references to render target, and wraps rendering operations to
//! it. Likes clearing, offscreen-rendering, MSAA resolve etc..
//!
//! ```rust
//! use crayon::prelude::*;
//! application::oneshot().unwrap();
//!
//! // Creates a `SurfaceParams` object.
//! let mut params = SurfaceParams::default();
//! /// Sets the attachments of internal frame-buffer. It consists of multiple color attachments
//! /// and a optional `Depth/DepthStencil` buffer attachment.
//! ///
//! /// If none attachment is assigned, the default framebuffer generated by the system will be
//! /// used.
//! params.set_attachments(&[], None);
//! // Sets the clear flags for this surface and its underlying framebuffer.
//! params.set_clear(Color::white(), 1.0, None);
//!
//! // Creates an surface with `SurfaceParams`.
//! let surface = video::create_surface(params).unwrap();
//! // Deletes the surface object.
//! video::delete_surface(surface);
//! ```
//!
//! ### Shader Object
//!
//! Shader object is introduced to encapsulate all stateful things we need to configurate
//! video pipeline. This would also enable us to easily change the order of draw calls
//! and get rid of redundant state changes.
//!
//! ```rust
//! use crayon::prelude::*;
//! application::oneshot().unwrap();
//!
//! // Declares the uniform variable layouts.
//! let mut uniforms = UniformVariableLayout::build()
//! .with("u_ModelViewMatrix", UniformVariableType::Matrix4f)
//! .with("u_MVPMatrix", UniformVariableType::Matrix4f)
//! .finish();
//!
//! // Declares the attributes.
//! let attributes = AttributeLayout::build()
//! .with(Attribute::Position, 3)
//! .with(Attribute::Normal, 3)
//! .finish();
//!
//! let mut params = ShaderParams::default();
//! params.attributes = attributes;
//! params.uniforms = uniforms;
//! params.state = RenderState::default();
//!
//! let vs = "..".into();
//! let fs = "..".into();
//!
//! // Create a shader with initial shaders and render state. It encapusulates all the
//! // informations we need to configurate graphics pipeline before real drawing.
//! let shader = video::create_shader(params, vs, fs).unwrap();
//!
//! // Deletes shader object.
//! video::delete_shader(shader);
//! ```
//!
//! ### Texture Object
//!
//! A texture object is a container of one or more images. It can be the source of a texture
//! access from a Shader.
//!
//! ```rust
//! use crayon::prelude::*;
//! application::oneshot().unwrap();
//!
//! let mut params = TextureParams::default();
//!
//! // Create a texture object with optional data. You can fill it later with `update_texture`.
//! let texture = video::create_texture(params, None).unwrap();
//!
//! // Deletes the texture object.
//! video::delete_texture(texture);
//! ```
//!
//! #### Compressed Texture Format
//!
//! _TODO_: Cube texture.
//! _TODO_: 3D texture.
//!
//! ### Mesh Object
//!
//! ```rust
//! use crayon::prelude::*;
//! application::oneshot().unwrap();
//!
//! let mut params = MeshParams::default();
//!
//! // Create a mesh object with optional data. You can fill it later with `update_mesh`.
//! let mesh = video::create_mesh(params, None).unwrap();
//!
//! // Deletes the mesh object.
//! video::delete_mesh(mesh);
//! ```
//!
//! # Commands
//!
//! _TODO_: CommandBuffer
//! _TODO_: DrawCommandBuffer
/// Maximum number of attributes in vertex layout.
pub const MAX_VERTEX_ATTRIBUTES: usize = 12;
/// Maximum number of attachments in framebuffer.
pub const MAX_FRAMEBUFFER_ATTACHMENTS: usize = 8;
/// Maximum number of uniform variables in shader.
pub const MAX_UNIFORM_VARIABLES: usize = 32;
/// Maximum number of textures in shader.
pub const MAX_UNIFORM_TEXTURE_SLOTS: usize = 8;
#[macro_use]
pub mod assets;
pub mod command;
pub mod errors;
mod system;
mod backends;
pub mod prelude {
pub use super::assets::prelude::*;
pub use super::command::{CommandBuffer, Draw, DrawCommandBuffer};
}
use std::sync::Arc;
use uuid::Uuid;
use crate::math::prelude::Aabb2;
use crate::prelude::CrResult;
use crate::res::utils::prelude::ResourceState;
use crate::utils::double_buf::DoubleBuf;
use self::assets::prelude::*;
use self::backends::frame::Frame;
use self::errors::*;
use self::ins::{ctx, CTX};
use self::system::VideoSystem;
/// Setup the video system.
pub(crate) unsafe fn setup() -> CrResult<()> {
debug_assert!(CTX.is_null(), "duplicated setup of video system.");
let ctx = VideoSystem::new()?;
CTX = Box::into_raw(Box::new(ctx));
Ok(())
}
/// Setup the video system.
pub(crate) unsafe fn headless() {
debug_assert!(CTX.is_null(), "duplicated setup of video system.");
let ctx = VideoSystem::headless();
CTX = Box::into_raw(Box::new(ctx));
}
/// Discard the video system.
pub(crate) unsafe fn discard() {
if CTX.is_null() {
return;
}
drop(Box::from_raw(CTX as *mut VideoSystem));
CTX = std::ptr::null();
}
pub(crate) unsafe fn frames() -> Arc<DoubleBuf<Frame>> {
ctx().frames()
}
/// Creates an surface with `SurfaceParams`.
#[inline]
pub fn | (params: SurfaceParams) -> Result<SurfaceHandle> {
ctx().create_surface(params)
}
/// Gets the `SurfaceParams` if available.
#[inline]
pub fn surface(handle: SurfaceHandle) -> Option<SurfaceParams> {
ctx().surface(handle)
}
/// Get the resource state of specified surface.
#[inline]
pub fn surface_state(handle: SurfaceHandle) -> ResourceState {
ctx().surface_state(handle)
}
/// Deletes surface object.
#[inline]
pub fn delete_surface(handle: SurfaceHandle) {
ctx().delete_surface(handle)
}
/// Create a shader with initial shaders and render state. It encapusulates all the
/// informations we need to configurate graphics pipeline before real drawing.
#[inline]
pub fn create_shader(params: ShaderParams, vs: String, fs: String) -> Result<ShaderHandle> {
ctx().create_shader(params, vs, fs)
}
/// Gets the `ShaderParams` if available.
#[inline]
pub fn shader(handle: ShaderHandle) -> Option<ShaderParams> {
ctx().shader(handle)
}
/// Get the resource state of specified shader.
#[inline]
pub fn shader_state(handle: ShaderHandle) -> ResourceState {
ctx().shader_state(handle)
}
/// Delete shader state object.
#[inline]
pub fn delete_shader(handle: ShaderHandle) {
ctx().delete_shader(handle)
}
/// Create a new mesh object.
#[inline]
pub fn create_mesh<T>(params: MeshParams, data: T) -> CrResult<MeshHandle>
where
T: Into<Option<MeshData>>,
{
ctx().create_mesh(params, data)
}
/// Creates a mesh object from file asynchronously.
#[inline]
pub fn create_mesh_from<T: AsRef<str>>(url: T) -> CrResult<MeshHandle> {
ctx().create_mesh_from(url)
}
/// Creates a mesh object from file asynchronously.
#[inline]
pub fn create_mesh_from_uuid(uuid: Uuid) -> CrResult<MeshHandle> {
ctx().create_mesh_from_uuid(uuid)
}
/// Gets the `MeshParams` if available.
#[inline]
pub fn mesh(handle: MeshHandle) -> Option<MeshParams> {
ctx().mesh(handle)
}
/// Get the resource state of specified mesh.
#[inline]
pub fn mesh_state(handle: MeshHandle) -> ResourceState {
ctx().mesh_state(handle)
}
/// Update a subset of dynamic vertex buffer. Use `offset` specifies the offset
/// into the buffer object's data store where data replacement will begin, measured
/// in bytes.
#[inline]
pub fn update_vertex_buffer(handle: MeshHandle, offset: usize, data: &[u8]) -> CrResult<()> {
ctx().update_vertex_buffer(handle, offset, data)
}
/// Update a subset of dynamic index buffer. Use `offset` specifies the offset
/// into the buffer object's data store where data replacement will begin, measured
/// in bytes.
#[inline]
pub fn update_index_buffer(handle: MeshHandle, offset: usize, data: &[u8]) -> CrResult<()> {
ctx().update_index_buffer(handle, offset, data)
}
/// Delete mesh object.
#[inline]
pub fn delete_mesh(handle: MeshHandle) {
ctx().delete_mesh(handle);
}
/// Create texture object. A texture is an image loaded in video memory,
/// which can be sampled in shaders.
#[inline]
pub fn create_texture<T>(params: TextureParams, data: T) -> CrResult<TextureHandle>
where
T: Into<Option<TextureData>>,
{
ctx().create_texture(params, data)
}
/// Creates a texture object from file asynchronously.
#[inline]
pub fn create_texture_from<T: AsRef<str>>(url: T) -> CrResult<TextureHandle> {
ctx().create_texture_from(url)
}
/// Creates a texture object from file asynchronously.
#[inline]
pub fn create_texture_from_uuid(uuid: Uuid) -> CrResult<TextureHandle> {
ctx().create_texture_from_uuid(uuid)
}
/// Get the resource state of specified texture.
#[inline]
pub fn texture_state(handle: TextureHandle) -> ResourceState {
ctx().texture_state(handle)
}
/// Update a contiguous subregion of an existing two-dimensional texture object.
#[inline]
pub fn update_texture(handle: TextureHandle, area: Aabb2<u32>, data: &[u8]) -> CrResult<()> {
ctx().update_texture(handle, area, data)
}
/// Delete the texture object.
#[inline]
pub fn delete_texture(handle: TextureHandle) {
ctx().delete_texture(handle);
}
/// Gets the `TextureParams` if available.
#[inline]
pub fn texture(handle: TextureHandle)->Option<TextureParams> {
ctx().texture(handle)
}
/// Create render texture object, which could be attached with a framebuffer.
#[inline]
pub fn create_render_texture(params: RenderTextureParams) -> Result<RenderTextureHandle> {
ctx().create_render_texture(params)
}
/// Gets the `RenderTextureParams` if available.
#[inline]
pub fn render_texture(handle: RenderTextureHandle) -> Option<RenderTextureParams> {
ctx().render_texture(handle)
}
/// Get the resource state of specified render texture.
#[inline]
pub fn render_texture_state(handle: RenderTextureHandle) -> ResourceState {
ctx().render_texture_state(handle)
}
/// Delete the render texture object.
#[inline]
pub fn delete_render_texture(handle: RenderTextureHandle) {
ctx().delete_render_texture(handle)
}
mod ins {
use super::system::VideoSystem;
pub static mut CTX: *const VideoSystem = std::ptr::null();
#[inline]
pub fn ctx() -> &'static VideoSystem {
unsafe {
debug_assert!(
!CTX.is_null(),
"video system has not been initialized properly."
);
&*CTX
}
}
}
| create_surface | identifier_name |
mod.rs | //! A stateless, layered, multithread video system with OpenGL backends.
//!
//! # Overview and Goals
//!
//! The management of video effects has become an important topic and key feature of
//! rendering engines. With the increasing number of effects it is not sufficient anymore
//! to only support them, but also to integrate them into the rendering engine in a clean
//! and extensible way.
//!
//! The goal of this work and simultaneously its main contribution is to design and
//! implement an advanced effects framework. Using this framework it should be easy for
//! further applications to combine several small effects like texture mapping, shading
//! and shadowing in an automated and transparent way and apply them to any 3D model.
//! Additionally, it should be possible to integrate new effects and use the provided
//! framework for rapid prototyping.
//!
//! ### Multi Platform
//!
//! Ideally, crayon should be able to run on macOS, windows and popular mobile-platforms.
//! There still are a huge number of performance and feature limited devices, so this
//! video module will always be limited by lower-end 3D APIs like OpenGL ES2.0.
//!
//! ### Stateless Pipeline
//!
//! Ordinary OpenGL application deals with stateful APIs, which is error-prone. This
//! means whenever you change any state in the API for subsequent draw calls, this state
//! change also affects draw calls submitted at a later point in time. Ideally, submitting
//! a draw call with whatever state we want should not affect any of the other draw calls,
//! even in multi-thread environments.
//!
//! Modern 3D-APIs like [gfx-rs](https://github.com/gfx-rs/gfx), [glium](https://github.com/glium/glium)
//! bundles render state and data into a few, precompiled resource objects which are
//! combined into final render pipeline. We should follow the same philosophy.
//!
//! ### Multi-thread
//!
//! In most cases, dividing OpenGL rendering across multiple threads will not result in
//! any performance improvement due the pipeline nature of OpenGL. What we are about
//! to do is actually exploiting parallelism in resource preparation, and provides a set of
//! multi-thread friendly APIs.
//!
//! The most common solution is by using a double-buffer of commands. This consists of
//! running the renderer backend in a speparate thread, where all draw calls and communication
//! with the OpenGL API are performed. The frontend thread that runs the game logic
//! communicates with the backend renderer via a command double-buffer.
//!
//! ### Layered Rendering
//!
//! Its important to sort video commands (generated by different threads) before submiting
//! them to OpenGL, for the sack of both correctness and performance. For example, to draw
//! transparent objects via blending, we need draw opaque object first, usually from front-to-back,
//! and draw translucents from back-to-front.
//!
//! The idea here is to assign a integer key to a command which is used for sorting. Depending
//! on where those bits are stored in the integer, you can apply different sorting criteria
//! for the same array of commands, as long as you know how the keys were built.
//!
//! # Resource Objects
//!
//! Render state and data, which are combined into final render pipeline, are bundled into a
//! few, precompiled resource objects in video module.
//!
//! All resources types can be created instantly from data in memory, and meshes, textures
//! can also be loaded asynchronously from the filesystem.
//!
//! And the actual resource objects are usually private and opaque, you will get a `Handle`
//! immediately for every resource objects you created instead of some kind of reference.
//! Its the unique identifier for the resource, its type-safe and copyable.
//!
//! When you are done with the created resource objects, its your responsiblity to delete the
//! resource object with `Handle` to avoid leaks.
//!
//! For these things loaded from filesystem, it could be safely shared by the `Location`. We
//! keeps a use-counting internally. It will not be freed really, before all the users deletes
//! its `Handle`.
//!
//! ### Surface Object
//!
//! Surface object plays as the `Layer` role we mentioned above, all the commands we submitted
//! in application code is attached to a specific `Surface`. Commands inside `Surface` are
//! sorted before submitting to underlying OpenGL.
//!
//! Surface object also holds references to render target, and wraps rendering operations to
//! it. Likes clearing, offscreen-rendering, MSAA resolve etc..
//!
//! ```rust
//! use crayon::prelude::*;
//! application::oneshot().unwrap();
//!
//! // Creates a `SurfaceParams` object.
//! let mut params = SurfaceParams::default();
//! /// Sets the attachments of internal frame-buffer. It consists of multiple color attachments
//! /// and a optional `Depth/DepthStencil` buffer attachment.
//! ///
//! /// If none attachment is assigned, the default framebuffer generated by the system will be
//! /// used.
//! params.set_attachments(&[], None);
//! // Sets the clear flags for this surface and its underlying framebuffer.
//! params.set_clear(Color::white(), 1.0, None);
//!
//! // Creates an surface with `SurfaceParams`.
//! let surface = video::create_surface(params).unwrap();
//! // Deletes the surface object.
//! video::delete_surface(surface);
//! ```
//!
//! ### Shader Object
//!
//! Shader object is introduced to encapsulate all stateful things we need to configurate
//! video pipeline. This would also enable us to easily change the order of draw calls
//! and get rid of redundant state changes.
//!
//! ```rust
//! use crayon::prelude::*;
//! application::oneshot().unwrap();
//!
//! // Declares the uniform variable layouts.
//! let mut uniforms = UniformVariableLayout::build()
//! .with("u_ModelViewMatrix", UniformVariableType::Matrix4f)
//! .with("u_MVPMatrix", UniformVariableType::Matrix4f)
//! .finish();
//!
//! // Declares the attributes.
//! let attributes = AttributeLayout::build()
//! .with(Attribute::Position, 3)
//! .with(Attribute::Normal, 3)
//! .finish();
//!
//! let mut params = ShaderParams::default();
//! params.attributes = attributes;
//! params.uniforms = uniforms;
//! params.state = RenderState::default();
//!
//! let vs = "..".into();
//! let fs = "..".into();
//!
//! // Create a shader with initial shaders and render state. It encapusulates all the
//! // informations we need to configurate graphics pipeline before real drawing.
//! let shader = video::create_shader(params, vs, fs).unwrap();
//!
//! // Deletes shader object.
//! video::delete_shader(shader);
//! ```
//!
//! ### Texture Object
//!
//! A texture object is a container of one or more images. It can be the source of a texture
//! access from a Shader.
//!
//! ```rust
//! use crayon::prelude::*;
//! application::oneshot().unwrap();
//!
//! let mut params = TextureParams::default();
//!
//! // Create a texture object with optional data. You can fill it later with `update_texture`.
//! let texture = video::create_texture(params, None).unwrap();
//!
//! // Deletes the texture object.
//! video::delete_texture(texture);
//! ```
//!
//! #### Compressed Texture Format
//!
//! _TODO_: Cube texture.
//! _TODO_: 3D texture.
//!
//! ### Mesh Object
//!
//! ```rust
//! use crayon::prelude::*;
//! application::oneshot().unwrap();
//!
//! let mut params = MeshParams::default();
//!
//! // Create a mesh object with optional data. You can fill it later with `update_mesh`.
//! let mesh = video::create_mesh(params, None).unwrap();
//!
//! // Deletes the mesh object.
//! video::delete_mesh(mesh);
//! ```
//!
//! # Commands
//!
//! _TODO_: CommandBuffer
//! _TODO_: DrawCommandBuffer
/// Maximum number of attributes in vertex layout.
pub const MAX_VERTEX_ATTRIBUTES: usize = 12;
/// Maximum number of attachments in framebuffer.
pub const MAX_FRAMEBUFFER_ATTACHMENTS: usize = 8;
/// Maximum number of uniform variables in shader.
pub const MAX_UNIFORM_VARIABLES: usize = 32;
/// Maximum number of textures in shader.
pub const MAX_UNIFORM_TEXTURE_SLOTS: usize = 8;
#[macro_use]
pub mod assets;
pub mod command;
pub mod errors;
mod system;
mod backends;
pub mod prelude {
pub use super::assets::prelude::*;
pub use super::command::{CommandBuffer, Draw, DrawCommandBuffer};
}
use std::sync::Arc;
use uuid::Uuid;
use crate::math::prelude::Aabb2;
use crate::prelude::CrResult;
use crate::res::utils::prelude::ResourceState;
use crate::utils::double_buf::DoubleBuf;
use self::assets::prelude::*;
use self::backends::frame::Frame;
use self::errors::*;
use self::ins::{ctx, CTX};
use self::system::VideoSystem;
/// Setup the video system.
pub(crate) unsafe fn setup() -> CrResult<()> {
debug_assert!(CTX.is_null(), "duplicated setup of video system.");
let ctx = VideoSystem::new()?;
CTX = Box::into_raw(Box::new(ctx));
Ok(())
}
/// Setup the video system.
pub(crate) unsafe fn headless() {
debug_assert!(CTX.is_null(), "duplicated setup of video system.");
let ctx = VideoSystem::headless();
CTX = Box::into_raw(Box::new(ctx));
}
/// Discard the video system.
pub(crate) unsafe fn discard() {
if CTX.is_null() {
return;
}
drop(Box::from_raw(CTX as *mut VideoSystem));
CTX = std::ptr::null();
}
pub(crate) unsafe fn frames() -> Arc<DoubleBuf<Frame>> {
ctx().frames()
}
/// Creates an surface with `SurfaceParams`.
#[inline]
pub fn create_surface(params: SurfaceParams) -> Result<SurfaceHandle> {
ctx().create_surface(params)
}
/// Gets the `SurfaceParams` if available.
#[inline]
pub fn surface(handle: SurfaceHandle) -> Option<SurfaceParams> {
ctx().surface(handle)
}
/// Get the resource state of specified surface.
#[inline]
pub fn surface_state(handle: SurfaceHandle) -> ResourceState {
ctx().surface_state(handle)
}
/// Deletes surface object.
#[inline]
pub fn delete_surface(handle: SurfaceHandle) {
ctx().delete_surface(handle)
}
/// Create a shader with initial shaders and render state. It encapusulates all the
/// informations we need to configurate graphics pipeline before real drawing.
#[inline]
pub fn create_shader(params: ShaderParams, vs: String, fs: String) -> Result<ShaderHandle> {
ctx().create_shader(params, vs, fs)
}
/// Gets the `ShaderParams` if available.
#[inline]
pub fn shader(handle: ShaderHandle) -> Option<ShaderParams> {
ctx().shader(handle)
}
/// Get the resource state of specified shader.
#[inline]
pub fn shader_state(handle: ShaderHandle) -> ResourceState {
ctx().shader_state(handle)
}
/// Delete shader state object.
#[inline]
pub fn delete_shader(handle: ShaderHandle) {
ctx().delete_shader(handle)
}
/// Create a new mesh object.
#[inline]
pub fn create_mesh<T>(params: MeshParams, data: T) -> CrResult<MeshHandle>
where
T: Into<Option<MeshData>>,
{
ctx().create_mesh(params, data)
}
/// Creates a mesh object from file asynchronously.
#[inline]
pub fn create_mesh_from<T: AsRef<str>>(url: T) -> CrResult<MeshHandle> {
ctx().create_mesh_from(url)
}
/// Creates a mesh object from file asynchronously.
#[inline]
pub fn create_mesh_from_uuid(uuid: Uuid) -> CrResult<MeshHandle> {
ctx().create_mesh_from_uuid(uuid)
}
/// Gets the `MeshParams` if available.
#[inline]
pub fn mesh(handle: MeshHandle) -> Option<MeshParams> {
ctx().mesh(handle)
}
/// Get the resource state of specified mesh.
#[inline]
pub fn mesh_state(handle: MeshHandle) -> ResourceState {
ctx().mesh_state(handle)
}
/// Update a subset of dynamic vertex buffer. Use `offset` specifies the offset
/// into the buffer object's data store where data replacement will begin, measured
/// in bytes.
#[inline]
pub fn update_vertex_buffer(handle: MeshHandle, offset: usize, data: &[u8]) -> CrResult<()> {
ctx().update_vertex_buffer(handle, offset, data)
}
/// Update a subset of dynamic index buffer. Use `offset` specifies the offset
/// into the buffer object's data store where data replacement will begin, measured
/// in bytes.
#[inline]
pub fn update_index_buffer(handle: MeshHandle, offset: usize, data: &[u8]) -> CrResult<()> {
ctx().update_index_buffer(handle, offset, data)
}
/// Delete mesh object.
#[inline]
pub fn delete_mesh(handle: MeshHandle) {
ctx().delete_mesh(handle);
}
/// Create texture object. A texture is an image loaded in video memory,
/// which can be sampled in shaders.
#[inline]
pub fn create_texture<T>(params: TextureParams, data: T) -> CrResult<TextureHandle>
where
T: Into<Option<TextureData>>,
{
ctx().create_texture(params, data)
}
/// Creates a texture object from file asynchronously.
#[inline]
pub fn create_texture_from<T: AsRef<str>>(url: T) -> CrResult<TextureHandle> {
ctx().create_texture_from(url)
}
/// Creates a texture object from file asynchronously.
#[inline]
pub fn create_texture_from_uuid(uuid: Uuid) -> CrResult<TextureHandle> {
ctx().create_texture_from_uuid(uuid)
}
/// Get the resource state of specified texture.
#[inline]
pub fn texture_state(handle: TextureHandle) -> ResourceState {
ctx().texture_state(handle)
}
/// Update a contiguous subregion of an existing two-dimensional texture object.
#[inline]
pub fn update_texture(handle: TextureHandle, area: Aabb2<u32>, data: &[u8]) -> CrResult<()> {
ctx().update_texture(handle, area, data)
}
/// Delete the texture object.
#[inline]
pub fn delete_texture(handle: TextureHandle) {
ctx().delete_texture(handle);
}
/// Gets the `TextureParams` if available.
#[inline]
pub fn texture(handle: TextureHandle)->Option<TextureParams> {
ctx().texture(handle)
}
/// Create render texture object, which could be attached with a framebuffer.
#[inline]
pub fn create_render_texture(params: RenderTextureParams) -> Result<RenderTextureHandle> {
ctx().create_render_texture(params)
}
/// Gets the `RenderTextureParams` if available.
#[inline]
pub fn render_texture(handle: RenderTextureHandle) -> Option<RenderTextureParams> {
ctx().render_texture(handle)
}
/// Get the resource state of specified render texture.
#[inline]
pub fn render_texture_state(handle: RenderTextureHandle) -> ResourceState {
ctx().render_texture_state(handle)
}
/// Delete the render texture object.
#[inline]
pub fn delete_render_texture(handle: RenderTextureHandle) |
mod ins {
use super::system::VideoSystem;
pub static mut CTX: *const VideoSystem = std::ptr::null();
#[inline]
pub fn ctx() -> &'static VideoSystem {
unsafe {
debug_assert!(
!CTX.is_null(),
"video system has not been initialized properly."
);
&*CTX
}
}
}
| {
ctx().delete_render_texture(handle)
} | identifier_body |
crystallography_frame_viewer.py | # This file is part of OnDA.
#
# OnDA is free software: you can redistribute it and/or modify it under the terms of
# the GNU General Public License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# OnDA is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with OnDA.
# If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2014-2019 Deutsches Elektronen-Synchrotron DESY,
# a research centre of the Helmholtz Association.
"""
OnDA frame viewer for crystallography.
This module contains a graphical interface that displays detector data frames in
crystallography experiments.
"""
from __future__ import absolute_import, division, print_function
import collections
import copy
import sys
from typing import Any, Dict # pylint: disable=unused-import
import cfelpyutils.crystfel_utils as cfel_crystfel
import cfelpyutils.geometry_utils as cfel_geometry
import click
import numpy
import pyqtgraph
from onda.utils import gui
try:
import PyQt5.QtGui as QtGui
except ImportError:
import PyQt4.QtGui as QtGui
class CrystallographyFrameViewer(gui.OndaGui):
"""
See documentation of the __init__ function.
"""
def __init__(self, geometry, hostname, port):
# type: (Dict[str, Any], str, int) -> None
"""
OnDA frame viewer for crystallography.
This viewer receives detector frame data from an OnDA crystallography monitor,
when it is tagged with the 'ondadetectordata' label. It displays the received
detector frames, together with any detected Bragg peak (if present). A data
buffer allows the viewer to stop receiving data from the monitor but still keep
in memory the last 10 displayed frames for inspection.
Arguments:
geometry (Dict[str, Any]): a dictionary containing CrystFEL detector
geometry information (as returned by the 'load_crystfel_geometry`
function in the 'cfelpyutils' module).
hostname (str): the hostname or IP address where the viewer will listen for
data.
port(int): the port at which the viewer will listen for data.
"""
super(CrystallographyFrameViewer, self).__init__(
hostname=hostname,
port=port,
gui_update_func=self._update_image,
tag=u"ondaframedata",
)
pixel_maps = cfel_geometry.compute_pix_maps(geometry)
x_map, y_map = pixel_maps.x, pixel_maps.y
y_minimum = 2 * int(max(abs(y_map.max()), abs(y_map.min()))) + 2
x_minimum = 2 * int(max(abs(x_map.max()), abs(x_map.min()))) + 2
self._img_shape = (y_minimum, x_minimum)
self._img_center_x = int(self._img_shape[1] / 2)
self._img_center_y = int(self._img_shape[0] / 2)
visual_pixel_map = cfel_geometry.compute_visualization_pix_maps(geometry)
self._visual_pixel_map_x = visual_pixel_map.x.flatten()
self._visual_pixel_map_y = visual_pixel_map.y.flatten()
self._img = numpy.zeros(shape=self._img_shape, dtype=numpy.float)
self._frame_list = collections.deque(maxlen=20)
self._current_frame_index = -1
pyqtgraph.setConfigOption("background", 0.2)
self._ring_pen = pyqtgraph.mkPen("r", width=2)
self._peak_canvas = pyqtgraph.ScatterPlotItem()
self._image_view = pyqtgraph.ImageView()
self._image_view.ui.menuBtn.hide()
self._image_view.ui.roiBtn.hide()
self._image_view.getView().addItem(self._peak_canvas)
self._back_button = QtGui.QPushButton(text="Back")
self._back_button.clicked.connect(self._back_button_clicked)
self._forward_button = QtGui.QPushButton(text="Forward")
self._forward_button.clicked.connect(self._forward_button_clicked)
self._play_pause_button = QtGui.QPushButton(text="Pause")
self._play_pause_button.clicked.connect(self._play_pause_button_clicked)
self._citation_label = QtGui.QLabel(
"You are using an <b>OnDA</b> real-time monitor. Please cite: "
"Mariani et al., J Appl Crystallogr. 2016 May 23;49(Pt 3):1073-1080"
)
self._citation_label.setSizePolicy(
QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
)
self._horizontal_layout = QtGui.QHBoxLayout()
self._horizontal_layout.addWidget(self._back_button)
self._horizontal_layout.addWidget(self._forward_button)
self._horizontal_layout.addWidget(self._play_pause_button)
self._vertical_layout = QtGui.QVBoxLayout()
self._vertical_layout.addWidget(self._citation_label)
self._vertical_layout.addWidget(self._image_view)
self._vertical_layout.addLayout(self._horizontal_layout)
self._central_widget = QtGui.QWidget()
self._central_widget.setLayout(self._vertical_layout)
self.setCentralWidget(self._central_widget)
self.show()
def _update_image(self):
# Type () -> None
# Updates the frame image shown by the viewer.
if self.received_data is not None:
# The received aggregated data is expected to be a list of event entries
# (each being a dictionary storing the data for an event:
# List[Dict[str, Any], ...]). The last event in the list is extracted for
# visualizaton.
self._frame_list.append(copy.deepcopy(self.received_data[-1]))
self._current_frame_index = len(self._frame_list) - 1
# Resets the 'received_data' attribute to None. One can then check if
# data has been received simply by checking wether the attribute is not
# None.
self.received_data = None
try:
current_data = self._frame_list[self._current_frame_index]
except IndexError:
# If the framebuffer is empty, returns without drawing anything.
return
self._img[self._visual_pixel_map_y, self._visual_pixel_map_x] = (
current_data[b"detector_data"].ravel().astype(self._img.dtype)
)
QtGui.QApplication.processEvents()
self._image_view.setImage(
self._img.T, autoLevels=False, autoRange=False, autoHistogramRange=False
)
QtGui.QApplication.processEvents()
peak_x_list = []
peak_y_list = []
for peak_fs, peak_ss in zip(
current_data[b"peak_list"][b"fs"], current_data[b"peak_list"][b"ss"]
):
peak_index_in_slab = int(round(peak_ss)) * current_data[
b"native_data_shape"
][1] + int(round(peak_fs))
peak_x_list.append(self._visual_pixel_map_x[peak_index_in_slab])
peak_y_list.append(self._visual_pixel_map_y[peak_index_in_slab])
QtGui.QApplication.processEvents()
self._peak_canvas.setData( | pen=self._ring_pen,
pxMode=False,
)
def _back_button_clicked(self):
# Type () -> None
# Manages clicks on the 'back' button.
self._stop_stream()
if self._current_frame_index > 0:
self._current_frame_index -= 1
print("Showing frame {0} in the buffer".format(self._current_frame_index))
self._update_image()
def _forward_button_clicked(self):
# Type () -> None
# Manages clicks on the 'forward' button.
self._stop_stream()
if (self._current_frame_index + 1) < len(self._frame_list):
self._current_frame_index += 1
print("Showing frame {0} in the buffer".format(self._current_frame_index))
self._update_image()
def _stop_stream(self):
# Type () -> None
# Disconnects from the OnDA monitor and stops receiving data.
if self.listening:
self._play_pause_button.setText("Play")
self.stop_listening()
def _start_stream(self):
# Type () -> None
# Connects to the the OnDA monitor and starts receiving data.
if not self.listening:
self._play_pause_button.setText("Pause")
self.start_listening()
def _play_pause_button_clicked(self):
# Type () -> None
# Manages clicks on the 'play/pause' button.
if self.listening:
self._stop_stream()
else:
self._start_stream()
@click.command()
@click.argument("geometry_file", type=click.Path())
@click.argument("hostname", type=str, required=False)
@click.argument("port", type=int, required=False)
def main(geometry_file, hostname, port):
# type: (Dict[str, Any], str, int) -> None
"""
OnDA frame viewer for crystallography. This program must connect to a running OnDA
monitor for crystallography. If the monitor broadcasts detector frame data, this
viewer will display it. The viewer will also show, overlayed on the frame data,
any found Bragg peak. The data stream from the monitor can also be temporarily
paused, and any of the last 10 displayed detector frames can be recalled for
inspection.
GEOMETRY_FILE: the relative or absolute path to a file containing the detector
geometry information (in CrystFEL format) to be used for visualization.
HOSTNAME: the hostname where viewer will listen for data. Optional: if not
provided, it defaults to localhost (127.0.0.1).
PORT: the port at which the viewer will listen for data. Optional: if not provided,
it defaults to 12321.
"""
if hostname is None:
hostname = "127.0.0.1"
if port is None:
port = 12321
geometry = cfel_crystfel.load_crystfel_geometry(geometry_file)
app = QtGui.QApplication(sys.argv)
_ = CrystallographyFrameViewer(geometry, hostname, port)
sys.exit(app.exec_()) | x=peak_x_list,
y=peak_y_list,
symbol="o",
size=[5] * len(current_data[b"peak_list"][b"intensity"]),
brush=(255, 255, 255, 0), | random_line_split |
crystallography_frame_viewer.py | # This file is part of OnDA.
#
# OnDA is free software: you can redistribute it and/or modify it under the terms of
# the GNU General Public License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# OnDA is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with OnDA.
# If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2014-2019 Deutsches Elektronen-Synchrotron DESY,
# a research centre of the Helmholtz Association.
"""
OnDA frame viewer for crystallography.
This module contains a graphical interface that displays detector data frames in
crystallography experiments.
"""
from __future__ import absolute_import, division, print_function
import collections
import copy
import sys
from typing import Any, Dict # pylint: disable=unused-import
import cfelpyutils.crystfel_utils as cfel_crystfel
import cfelpyutils.geometry_utils as cfel_geometry
import click
import numpy
import pyqtgraph
from onda.utils import gui
try:
import PyQt5.QtGui as QtGui
except ImportError:
import PyQt4.QtGui as QtGui
class CrystallographyFrameViewer(gui.OndaGui):
"""
See documentation of the __init__ function.
"""
def __init__(self, geometry, hostname, port):
# type: (Dict[str, Any], str, int) -> None
"""
OnDA frame viewer for crystallography.
This viewer receives detector frame data from an OnDA crystallography monitor,
when it is tagged with the 'ondadetectordata' label. It displays the received
detector frames, together with any detected Bragg peak (if present). A data
buffer allows the viewer to stop receiving data from the monitor but still keep
in memory the last 10 displayed frames for inspection.
Arguments:
geometry (Dict[str, Any]): a dictionary containing CrystFEL detector
geometry information (as returned by the 'load_crystfel_geometry`
function in the 'cfelpyutils' module).
hostname (str): the hostname or IP address where the viewer will listen for
data.
port(int): the port at which the viewer will listen for data.
"""
super(CrystallographyFrameViewer, self).__init__(
hostname=hostname,
port=port,
gui_update_func=self._update_image,
tag=u"ondaframedata",
)
pixel_maps = cfel_geometry.compute_pix_maps(geometry)
x_map, y_map = pixel_maps.x, pixel_maps.y
y_minimum = 2 * int(max(abs(y_map.max()), abs(y_map.min()))) + 2
x_minimum = 2 * int(max(abs(x_map.max()), abs(x_map.min()))) + 2
self._img_shape = (y_minimum, x_minimum)
self._img_center_x = int(self._img_shape[1] / 2)
self._img_center_y = int(self._img_shape[0] / 2)
visual_pixel_map = cfel_geometry.compute_visualization_pix_maps(geometry)
self._visual_pixel_map_x = visual_pixel_map.x.flatten()
self._visual_pixel_map_y = visual_pixel_map.y.flatten()
self._img = numpy.zeros(shape=self._img_shape, dtype=numpy.float)
self._frame_list = collections.deque(maxlen=20)
self._current_frame_index = -1
pyqtgraph.setConfigOption("background", 0.2)
self._ring_pen = pyqtgraph.mkPen("r", width=2)
self._peak_canvas = pyqtgraph.ScatterPlotItem()
self._image_view = pyqtgraph.ImageView()
self._image_view.ui.menuBtn.hide()
self._image_view.ui.roiBtn.hide()
self._image_view.getView().addItem(self._peak_canvas)
self._back_button = QtGui.QPushButton(text="Back")
self._back_button.clicked.connect(self._back_button_clicked)
self._forward_button = QtGui.QPushButton(text="Forward")
self._forward_button.clicked.connect(self._forward_button_clicked)
self._play_pause_button = QtGui.QPushButton(text="Pause")
self._play_pause_button.clicked.connect(self._play_pause_button_clicked)
self._citation_label = QtGui.QLabel(
"You are using an <b>OnDA</b> real-time monitor. Please cite: "
"Mariani et al., J Appl Crystallogr. 2016 May 23;49(Pt 3):1073-1080"
)
self._citation_label.setSizePolicy(
QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
)
self._horizontal_layout = QtGui.QHBoxLayout()
self._horizontal_layout.addWidget(self._back_button)
self._horizontal_layout.addWidget(self._forward_button)
self._horizontal_layout.addWidget(self._play_pause_button)
self._vertical_layout = QtGui.QVBoxLayout()
self._vertical_layout.addWidget(self._citation_label)
self._vertical_layout.addWidget(self._image_view)
self._vertical_layout.addLayout(self._horizontal_layout)
self._central_widget = QtGui.QWidget()
self._central_widget.setLayout(self._vertical_layout)
self.setCentralWidget(self._central_widget)
self.show()
def _update_image(self):
# Type () -> None
# Updates the frame image shown by the viewer.
if self.received_data is not None:
# The received aggregated data is expected to be a list of event entries
# (each being a dictionary storing the data for an event:
# List[Dict[str, Any], ...]). The last event in the list is extracted for
# visualizaton.
|
try:
current_data = self._frame_list[self._current_frame_index]
except IndexError:
# If the framebuffer is empty, returns without drawing anything.
return
self._img[self._visual_pixel_map_y, self._visual_pixel_map_x] = (
current_data[b"detector_data"].ravel().astype(self._img.dtype)
)
QtGui.QApplication.processEvents()
self._image_view.setImage(
self._img.T, autoLevels=False, autoRange=False, autoHistogramRange=False
)
QtGui.QApplication.processEvents()
peak_x_list = []
peak_y_list = []
for peak_fs, peak_ss in zip(
current_data[b"peak_list"][b"fs"], current_data[b"peak_list"][b"ss"]
):
peak_index_in_slab = int(round(peak_ss)) * current_data[
b"native_data_shape"
][1] + int(round(peak_fs))
peak_x_list.append(self._visual_pixel_map_x[peak_index_in_slab])
peak_y_list.append(self._visual_pixel_map_y[peak_index_in_slab])
QtGui.QApplication.processEvents()
self._peak_canvas.setData(
x=peak_x_list,
y=peak_y_list,
symbol="o",
size=[5] * len(current_data[b"peak_list"][b"intensity"]),
brush=(255, 255, 255, 0),
pen=self._ring_pen,
pxMode=False,
)
def _back_button_clicked(self):
# Type () -> None
# Manages clicks on the 'back' button.
self._stop_stream()
if self._current_frame_index > 0:
self._current_frame_index -= 1
print("Showing frame {0} in the buffer".format(self._current_frame_index))
self._update_image()
def _forward_button_clicked(self):
# Type () -> None
# Manages clicks on the 'forward' button.
self._stop_stream()
if (self._current_frame_index + 1) < len(self._frame_list):
self._current_frame_index += 1
print("Showing frame {0} in the buffer".format(self._current_frame_index))
self._update_image()
def _stop_stream(self):
# Type () -> None
# Disconnects from the OnDA monitor and stops receiving data.
if self.listening:
self._play_pause_button.setText("Play")
self.stop_listening()
def _start_stream(self):
# Type () -> None
# Connects to the the OnDA monitor and starts receiving data.
if not self.listening:
self._play_pause_button.setText("Pause")
self.start_listening()
def _play_pause_button_clicked(self):
# Type () -> None
# Manages clicks on the 'play/pause' button.
if self.listening:
self._stop_stream()
else:
self._start_stream()
@click.command()
@click.argument("geometry_file", type=click.Path())
@click.argument("hostname", type=str, required=False)
@click.argument("port", type=int, required=False)
def main(geometry_file, hostname, port):
# type: (Dict[str, Any], str, int) -> None
"""
OnDA frame viewer for crystallography. This program must connect to a running OnDA
monitor for crystallography. If the monitor broadcasts detector frame data, this
viewer will display it. The viewer will also show, overlayed on the frame data,
any found Bragg peak. The data stream from the monitor can also be temporarily
paused, and any of the last 10 displayed detector frames can be recalled for
inspection.
GEOMETRY_FILE: the relative or absolute path to a file containing the detector
geometry information (in CrystFEL format) to be used for visualization.
HOSTNAME: the hostname where viewer will listen for data. Optional: if not
provided, it defaults to localhost (127.0.0.1).
PORT: the port at which the viewer will listen for data. Optional: if not provided,
it defaults to 12321.
"""
if hostname is None:
hostname = "127.0.0.1"
if port is None:
port = 12321
geometry = cfel_crystfel.load_crystfel_geometry(geometry_file)
app = QtGui.QApplication(sys.argv)
_ = CrystallographyFrameViewer(geometry, hostname, port)
sys.exit(app.exec_())
| self._frame_list.append(copy.deepcopy(self.received_data[-1]))
self._current_frame_index = len(self._frame_list) - 1
# Resets the 'received_data' attribute to None. One can then check if
# data has been received simply by checking wether the attribute is not
# None.
self.received_data = None | conditional_block |
crystallography_frame_viewer.py | # This file is part of OnDA.
#
# OnDA is free software: you can redistribute it and/or modify it under the terms of
# the GNU General Public License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# OnDA is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with OnDA.
# If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2014-2019 Deutsches Elektronen-Synchrotron DESY,
# a research centre of the Helmholtz Association.
"""
OnDA frame viewer for crystallography.
This module contains a graphical interface that displays detector data frames in
crystallography experiments.
"""
from __future__ import absolute_import, division, print_function
import collections
import copy
import sys
from typing import Any, Dict # pylint: disable=unused-import
import cfelpyutils.crystfel_utils as cfel_crystfel
import cfelpyutils.geometry_utils as cfel_geometry
import click
import numpy
import pyqtgraph
from onda.utils import gui
try:
import PyQt5.QtGui as QtGui
except ImportError:
import PyQt4.QtGui as QtGui
class CrystallographyFrameViewer(gui.OndaGui):
"""
See documentation of the __init__ function.
"""
def __init__(self, geometry, hostname, port):
# type: (Dict[str, Any], str, int) -> None
"""
OnDA frame viewer for crystallography.
This viewer receives detector frame data from an OnDA crystallography monitor,
when it is tagged with the 'ondadetectordata' label. It displays the received
detector frames, together with any detected Bragg peak (if present). A data
buffer allows the viewer to stop receiving data from the monitor but still keep
in memory the last 10 displayed frames for inspection.
Arguments:
geometry (Dict[str, Any]): a dictionary containing CrystFEL detector
geometry information (as returned by the 'load_crystfel_geometry`
function in the 'cfelpyutils' module).
hostname (str): the hostname or IP address where the viewer will listen for
data.
port(int): the port at which the viewer will listen for data.
"""
super(CrystallographyFrameViewer, self).__init__(
hostname=hostname,
port=port,
gui_update_func=self._update_image,
tag=u"ondaframedata",
)
pixel_maps = cfel_geometry.compute_pix_maps(geometry)
x_map, y_map = pixel_maps.x, pixel_maps.y
y_minimum = 2 * int(max(abs(y_map.max()), abs(y_map.min()))) + 2
x_minimum = 2 * int(max(abs(x_map.max()), abs(x_map.min()))) + 2
self._img_shape = (y_minimum, x_minimum)
self._img_center_x = int(self._img_shape[1] / 2)
self._img_center_y = int(self._img_shape[0] / 2)
visual_pixel_map = cfel_geometry.compute_visualization_pix_maps(geometry)
self._visual_pixel_map_x = visual_pixel_map.x.flatten()
self._visual_pixel_map_y = visual_pixel_map.y.flatten()
self._img = numpy.zeros(shape=self._img_shape, dtype=numpy.float)
self._frame_list = collections.deque(maxlen=20)
self._current_frame_index = -1
pyqtgraph.setConfigOption("background", 0.2)
self._ring_pen = pyqtgraph.mkPen("r", width=2)
self._peak_canvas = pyqtgraph.ScatterPlotItem()
self._image_view = pyqtgraph.ImageView()
self._image_view.ui.menuBtn.hide()
self._image_view.ui.roiBtn.hide()
self._image_view.getView().addItem(self._peak_canvas)
self._back_button = QtGui.QPushButton(text="Back")
self._back_button.clicked.connect(self._back_button_clicked)
self._forward_button = QtGui.QPushButton(text="Forward")
self._forward_button.clicked.connect(self._forward_button_clicked)
self._play_pause_button = QtGui.QPushButton(text="Pause")
self._play_pause_button.clicked.connect(self._play_pause_button_clicked)
self._citation_label = QtGui.QLabel(
"You are using an <b>OnDA</b> real-time monitor. Please cite: "
"Mariani et al., J Appl Crystallogr. 2016 May 23;49(Pt 3):1073-1080"
)
self._citation_label.setSizePolicy(
QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
)
self._horizontal_layout = QtGui.QHBoxLayout()
self._horizontal_layout.addWidget(self._back_button)
self._horizontal_layout.addWidget(self._forward_button)
self._horizontal_layout.addWidget(self._play_pause_button)
self._vertical_layout = QtGui.QVBoxLayout()
self._vertical_layout.addWidget(self._citation_label)
self._vertical_layout.addWidget(self._image_view)
self._vertical_layout.addLayout(self._horizontal_layout)
self._central_widget = QtGui.QWidget()
self._central_widget.setLayout(self._vertical_layout)
self.setCentralWidget(self._central_widget)
self.show()
def _update_image(self):
# Type () -> None
# Updates the frame image shown by the viewer.
if self.received_data is not None:
# The received aggregated data is expected to be a list of event entries
# (each being a dictionary storing the data for an event:
# List[Dict[str, Any], ...]). The last event in the list is extracted for
# visualizaton.
self._frame_list.append(copy.deepcopy(self.received_data[-1]))
self._current_frame_index = len(self._frame_list) - 1
# Resets the 'received_data' attribute to None. One can then check if
# data has been received simply by checking wether the attribute is not
# None.
self.received_data = None
try:
current_data = self._frame_list[self._current_frame_index]
except IndexError:
# If the framebuffer is empty, returns without drawing anything.
return
self._img[self._visual_pixel_map_y, self._visual_pixel_map_x] = (
current_data[b"detector_data"].ravel().astype(self._img.dtype)
)
QtGui.QApplication.processEvents()
self._image_view.setImage(
self._img.T, autoLevels=False, autoRange=False, autoHistogramRange=False
)
QtGui.QApplication.processEvents()
peak_x_list = []
peak_y_list = []
for peak_fs, peak_ss in zip(
current_data[b"peak_list"][b"fs"], current_data[b"peak_list"][b"ss"]
):
peak_index_in_slab = int(round(peak_ss)) * current_data[
b"native_data_shape"
][1] + int(round(peak_fs))
peak_x_list.append(self._visual_pixel_map_x[peak_index_in_slab])
peak_y_list.append(self._visual_pixel_map_y[peak_index_in_slab])
QtGui.QApplication.processEvents()
self._peak_canvas.setData(
x=peak_x_list,
y=peak_y_list,
symbol="o",
size=[5] * len(current_data[b"peak_list"][b"intensity"]),
brush=(255, 255, 255, 0),
pen=self._ring_pen,
pxMode=False,
)
def _back_button_clicked(self):
# Type () -> None
# Manages clicks on the 'back' button.
self._stop_stream()
if self._current_frame_index > 0:
self._current_frame_index -= 1
print("Showing frame {0} in the buffer".format(self._current_frame_index))
self._update_image()
def _forward_button_clicked(self):
# Type () -> None
# Manages clicks on the 'forward' button.
self._stop_stream()
if (self._current_frame_index + 1) < len(self._frame_list):
self._current_frame_index += 1
print("Showing frame {0} in the buffer".format(self._current_frame_index))
self._update_image()
def _stop_stream(self):
# Type () -> None
# Disconnects from the OnDA monitor and stops receiving data.
if self.listening:
self._play_pause_button.setText("Play")
self.stop_listening()
def _start_stream(self):
# Type () -> None
# Connects to the the OnDA monitor and starts receiving data.
if not self.listening:
self._play_pause_button.setText("Pause")
self.start_listening()
def _play_pause_button_clicked(self):
# Type () -> None
# Manages clicks on the 'play/pause' button.
if self.listening:
self._stop_stream()
else:
self._start_stream()
@click.command()
@click.argument("geometry_file", type=click.Path())
@click.argument("hostname", type=str, required=False)
@click.argument("port", type=int, required=False)
def | (geometry_file, hostname, port):
# type: (Dict[str, Any], str, int) -> None
"""
OnDA frame viewer for crystallography. This program must connect to a running OnDA
monitor for crystallography. If the monitor broadcasts detector frame data, this
viewer will display it. The viewer will also show, overlayed on the frame data,
any found Bragg peak. The data stream from the monitor can also be temporarily
paused, and any of the last 10 displayed detector frames can be recalled for
inspection.
GEOMETRY_FILE: the relative or absolute path to a file containing the detector
geometry information (in CrystFEL format) to be used for visualization.
HOSTNAME: the hostname where viewer will listen for data. Optional: if not
provided, it defaults to localhost (127.0.0.1).
PORT: the port at which the viewer will listen for data. Optional: if not provided,
it defaults to 12321.
"""
if hostname is None:
hostname = "127.0.0.1"
if port is None:
port = 12321
geometry = cfel_crystfel.load_crystfel_geometry(geometry_file)
app = QtGui.QApplication(sys.argv)
_ = CrystallographyFrameViewer(geometry, hostname, port)
sys.exit(app.exec_())
| main | identifier_name |
crystallography_frame_viewer.py | # This file is part of OnDA.
#
# OnDA is free software: you can redistribute it and/or modify it under the terms of
# the GNU General Public License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# OnDA is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with OnDA.
# If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2014-2019 Deutsches Elektronen-Synchrotron DESY,
# a research centre of the Helmholtz Association.
"""
OnDA frame viewer for crystallography.
This module contains a graphical interface that displays detector data frames in
crystallography experiments.
"""
from __future__ import absolute_import, division, print_function
import collections
import copy
import sys
from typing import Any, Dict # pylint: disable=unused-import
import cfelpyutils.crystfel_utils as cfel_crystfel
import cfelpyutils.geometry_utils as cfel_geometry
import click
import numpy
import pyqtgraph
from onda.utils import gui
try:
import PyQt5.QtGui as QtGui
except ImportError:
import PyQt4.QtGui as QtGui
class CrystallographyFrameViewer(gui.OndaGui):
"""
See documentation of the __init__ function.
"""
def __init__(self, geometry, hostname, port):
# type: (Dict[str, Any], str, int) -> None
"""
OnDA frame viewer for crystallography.
This viewer receives detector frame data from an OnDA crystallography monitor,
when it is tagged with the 'ondadetectordata' label. It displays the received
detector frames, together with any detected Bragg peak (if present). A data
buffer allows the viewer to stop receiving data from the monitor but still keep
in memory the last 10 displayed frames for inspection.
Arguments:
geometry (Dict[str, Any]): a dictionary containing CrystFEL detector
geometry information (as returned by the 'load_crystfel_geometry`
function in the 'cfelpyutils' module).
hostname (str): the hostname or IP address where the viewer will listen for
data.
port(int): the port at which the viewer will listen for data.
"""
super(CrystallographyFrameViewer, self).__init__(
hostname=hostname,
port=port,
gui_update_func=self._update_image,
tag=u"ondaframedata",
)
pixel_maps = cfel_geometry.compute_pix_maps(geometry)
x_map, y_map = pixel_maps.x, pixel_maps.y
y_minimum = 2 * int(max(abs(y_map.max()), abs(y_map.min()))) + 2
x_minimum = 2 * int(max(abs(x_map.max()), abs(x_map.min()))) + 2
self._img_shape = (y_minimum, x_minimum)
self._img_center_x = int(self._img_shape[1] / 2)
self._img_center_y = int(self._img_shape[0] / 2)
visual_pixel_map = cfel_geometry.compute_visualization_pix_maps(geometry)
self._visual_pixel_map_x = visual_pixel_map.x.flatten()
self._visual_pixel_map_y = visual_pixel_map.y.flatten()
self._img = numpy.zeros(shape=self._img_shape, dtype=numpy.float)
self._frame_list = collections.deque(maxlen=20)
self._current_frame_index = -1
pyqtgraph.setConfigOption("background", 0.2)
self._ring_pen = pyqtgraph.mkPen("r", width=2)
self._peak_canvas = pyqtgraph.ScatterPlotItem()
self._image_view = pyqtgraph.ImageView()
self._image_view.ui.menuBtn.hide()
self._image_view.ui.roiBtn.hide()
self._image_view.getView().addItem(self._peak_canvas)
self._back_button = QtGui.QPushButton(text="Back")
self._back_button.clicked.connect(self._back_button_clicked)
self._forward_button = QtGui.QPushButton(text="Forward")
self._forward_button.clicked.connect(self._forward_button_clicked)
self._play_pause_button = QtGui.QPushButton(text="Pause")
self._play_pause_button.clicked.connect(self._play_pause_button_clicked)
self._citation_label = QtGui.QLabel(
"You are using an <b>OnDA</b> real-time monitor. Please cite: "
"Mariani et al., J Appl Crystallogr. 2016 May 23;49(Pt 3):1073-1080"
)
self._citation_label.setSizePolicy(
QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
)
self._horizontal_layout = QtGui.QHBoxLayout()
self._horizontal_layout.addWidget(self._back_button)
self._horizontal_layout.addWidget(self._forward_button)
self._horizontal_layout.addWidget(self._play_pause_button)
self._vertical_layout = QtGui.QVBoxLayout()
self._vertical_layout.addWidget(self._citation_label)
self._vertical_layout.addWidget(self._image_view)
self._vertical_layout.addLayout(self._horizontal_layout)
self._central_widget = QtGui.QWidget()
self._central_widget.setLayout(self._vertical_layout)
self.setCentralWidget(self._central_widget)
self.show()
def _update_image(self):
# Type () -> None
# Updates the frame image shown by the viewer.
if self.received_data is not None:
# The received aggregated data is expected to be a list of event entries
# (each being a dictionary storing the data for an event:
# List[Dict[str, Any], ...]). The last event in the list is extracted for
# visualizaton.
self._frame_list.append(copy.deepcopy(self.received_data[-1]))
self._current_frame_index = len(self._frame_list) - 1
# Resets the 'received_data' attribute to None. One can then check if
# data has been received simply by checking wether the attribute is not
# None.
self.received_data = None
try:
current_data = self._frame_list[self._current_frame_index]
except IndexError:
# If the framebuffer is empty, returns without drawing anything.
return
self._img[self._visual_pixel_map_y, self._visual_pixel_map_x] = (
current_data[b"detector_data"].ravel().astype(self._img.dtype)
)
QtGui.QApplication.processEvents()
self._image_view.setImage(
self._img.T, autoLevels=False, autoRange=False, autoHistogramRange=False
)
QtGui.QApplication.processEvents()
peak_x_list = []
peak_y_list = []
for peak_fs, peak_ss in zip(
current_data[b"peak_list"][b"fs"], current_data[b"peak_list"][b"ss"]
):
peak_index_in_slab = int(round(peak_ss)) * current_data[
b"native_data_shape"
][1] + int(round(peak_fs))
peak_x_list.append(self._visual_pixel_map_x[peak_index_in_slab])
peak_y_list.append(self._visual_pixel_map_y[peak_index_in_slab])
QtGui.QApplication.processEvents()
self._peak_canvas.setData(
x=peak_x_list,
y=peak_y_list,
symbol="o",
size=[5] * len(current_data[b"peak_list"][b"intensity"]),
brush=(255, 255, 255, 0),
pen=self._ring_pen,
pxMode=False,
)
def _back_button_clicked(self):
# Type () -> None
# Manages clicks on the 'back' button.
|
def _forward_button_clicked(self):
# Type () -> None
# Manages clicks on the 'forward' button.
self._stop_stream()
if (self._current_frame_index + 1) < len(self._frame_list):
self._current_frame_index += 1
print("Showing frame {0} in the buffer".format(self._current_frame_index))
self._update_image()
def _stop_stream(self):
# Type () -> None
# Disconnects from the OnDA monitor and stops receiving data.
if self.listening:
self._play_pause_button.setText("Play")
self.stop_listening()
def _start_stream(self):
# Type () -> None
# Connects to the the OnDA monitor and starts receiving data.
if not self.listening:
self._play_pause_button.setText("Pause")
self.start_listening()
def _play_pause_button_clicked(self):
# Type () -> None
# Manages clicks on the 'play/pause' button.
if self.listening:
self._stop_stream()
else:
self._start_stream()
@click.command()
@click.argument("geometry_file", type=click.Path())
@click.argument("hostname", type=str, required=False)
@click.argument("port", type=int, required=False)
def main(geometry_file, hostname, port):
# type: (Dict[str, Any], str, int) -> None
"""
OnDA frame viewer for crystallography. This program must connect to a running OnDA
monitor for crystallography. If the monitor broadcasts detector frame data, this
viewer will display it. The viewer will also show, overlayed on the frame data,
any found Bragg peak. The data stream from the monitor can also be temporarily
paused, and any of the last 10 displayed detector frames can be recalled for
inspection.
GEOMETRY_FILE: the relative or absolute path to a file containing the detector
geometry information (in CrystFEL format) to be used for visualization.
HOSTNAME: the hostname where viewer will listen for data. Optional: if not
provided, it defaults to localhost (127.0.0.1).
PORT: the port at which the viewer will listen for data. Optional: if not provided,
it defaults to 12321.
"""
if hostname is None:
hostname = "127.0.0.1"
if port is None:
port = 12321
geometry = cfel_crystfel.load_crystfel_geometry(geometry_file)
app = QtGui.QApplication(sys.argv)
_ = CrystallographyFrameViewer(geometry, hostname, port)
sys.exit(app.exec_())
| self._stop_stream()
if self._current_frame_index > 0:
self._current_frame_index -= 1
print("Showing frame {0} in the buffer".format(self._current_frame_index))
self._update_image() | identifier_body |
run.py | import os
import sys
import argparse
import functools
from functools import partial
import numpy as np
import shutil
import paddle
import paddle.nn as nn
from paddle.io import Dataset, BatchSampler, DataLoader
from paddle.metric import Metric, Accuracy, Precision, Recall
from paddlenlp.transformers import AutoModelForTokenClassification, AutoTokenizer
from paddlenlp.datasets import load_dataset
from paddlenlp.data import Stack, Tuple, Pad
from paddlenlp.data.sampler import SamplerHelper
from paddlenlp.metrics import Mcc, PearsonAndSpearman
from paddleslim.common import load_config
from paddleslim.auto_compression.compressor import AutoCompression
def argsparser():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--config_path',
type=str,
default=None,
help="path of compression strategy config.",
required=True)
parser.add_argument(
'--save_dir',
type=str,
default='output',
help="directory to save compressed model.")
parser.add_argument(
'--eval',
type=bool,
default=False,
help="whether validate the model only.")
return parser
METRIC_CLASSES = {
"cola": Mcc,
"sst-2": Accuracy,
"sts-b": PearsonAndSpearman,
"mnli": Accuracy,
"qnli": Accuracy,
"rte": Accuracy,
"afqmc": Accuracy,
"tnews": Accuracy,
"iflytek": Accuracy,
"ocnli": Accuracy,
"cmnli": Accuracy,
"cluewsc2020": Accuracy,
"csl": Accuracy,
}
def convert_example(example,
tokenizer,
label_list,
max_seq_length=512,
is_test=False):
assert global_config['dataset'] in [
'glue', 'clue'
], "This demo only supports for dataset glue or clue"
"""Convert a glue example into necessary features."""
if global_config['dataset'] == 'glue':
if not is_test:
# `label_list == None` is for regression task
label_dtype = "int64" if label_list else "float32"
# Get the label
label = example['labels']
label = np.array([label], dtype=label_dtype)
# Convert raw text to feature
example = tokenizer(example['sentence'], max_seq_len=max_seq_length)
if not is_test:
return example['input_ids'], example['token_type_ids'], label
else:
return example['input_ids'], example['token_type_ids']
else: #if global_config['dataset'] == 'clue':
if not is_test:
# `label_list == None` is for regression task
label_dtype = "int64" if label_list else "float32"
# Get the label
example['label'] = np.array(
example["label"], dtype="int64").reshape((-1, 1))
label = example['label']
# Convert raw text to feature
if 'keyword' in example: # CSL
sentence1 = " ".join(example['keyword'])
example = {
'sentence1': sentence1,
'sentence2': example['abst'],
'label': example['label']
}
elif 'target' in example: # wsc
text, query, pronoun, query_idx, pronoun_idx = example[
'text'], example['target']['span1_text'], example['target'][
'span2_text'], example['target']['span1_index'], example[
'target']['span2_index']
text_list = list(text)
assert text[pronoun_idx:(pronoun_idx + len(
pronoun))] == pronoun, "pronoun: {}".format(pronoun)
assert text[query_idx:(query_idx + len(query)
)] == query, "query: {}".format(query)
if pronoun_idx > query_idx:
text_list.insert(query_idx, "_")
text_list.insert(query_idx + len(query) + 1, "_")
text_list.insert(pronoun_idx + 2, "[")
text_list.insert(pronoun_idx + len(pronoun) + 2 + 1, "]")
else:
text_list.insert(pronoun_idx, "[")
text_list.insert(pronoun_idx + len(pronoun) + 1, "]")
text_list.insert(query_idx + 2, "_")
text_list.insert(query_idx + len(query) + 2 + 1, "_")
text = "".join(text_list)
example['sentence'] = text
if tokenizer is None:
return example
if 'sentence' in example:
example = tokenizer(example['sentence'], max_seq_len=max_seq_length)
elif 'sentence1' in example:
example = tokenizer(
example['sentence1'], | return example['input_ids'], example['token_type_ids'], label
else:
return example['input_ids'], example['token_type_ids']
def create_data_holder(task_name):
"""
Define the input data holder for the glue task.
"""
input_ids = paddle.static.data(
name="input_ids", shape=[-1, -1], dtype="int64")
token_type_ids = paddle.static.data(
name="token_type_ids", shape=[-1, -1], dtype="int64")
if task_name == "sts-b":
label = paddle.static.data(name="label", shape=[-1, 1], dtype="float32")
else:
label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64")
return [input_ids, token_type_ids, label]
def reader():
# Create the tokenizer and dataset
tokenizer = AutoTokenizer.from_pretrained(global_config['model_dir'])
train_ds, dev_ds = load_dataset(
global_config['dataset'],
global_config['task_name'],
splits=('train', 'dev'))
trans_func = partial(
convert_example,
tokenizer=tokenizer,
label_list=train_ds.label_list,
max_seq_length=global_config['max_seq_length'],
is_test=True)
train_ds = train_ds.map(trans_func, lazy=True)
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # input
Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # token_type
): fn(samples)
train_batch_sampler = paddle.io.DistributedBatchSampler(
train_ds, batch_size=global_config['batch_size'], shuffle=True)
[input_ids, token_type_ids,
labels] = create_data_holder(global_config['task_name'])
feed_list_name = []
train_data_loader = DataLoader(
dataset=train_ds,
feed_list=[input_ids, token_type_ids],
batch_sampler=train_batch_sampler,
collate_fn=batchify_fn,
num_workers=0,
return_list=False)
dev_trans_func = partial(
convert_example,
tokenizer=tokenizer,
label_list=train_ds.label_list,
max_seq_length=global_config['max_seq_length'])
dev_batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # input
Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # token_type
Stack(dtype="int64" if train_ds.label_list else "float32") # label
): fn(samples)
dev_ds = dev_ds.map(dev_trans_func, lazy=True)
dev_batch_sampler = paddle.io.BatchSampler(
dev_ds, batch_size=global_config['batch_size'], shuffle=False)
dev_data_loader = DataLoader(
dataset=dev_ds,
batch_sampler=dev_batch_sampler,
collate_fn=dev_batchify_fn,
num_workers=0,
feed_list=[input_ids, token_type_ids, labels],
return_list=False)
return train_data_loader, dev_data_loader
def eval_function(exe, compiled_test_program, test_feed_names, test_fetch_list):
metric.reset()
for data in eval_dataloader():
logits = exe.run(
compiled_test_program,
feed={
test_feed_names[0]: data[0]['input_ids'],
test_feed_names[1]: data[0]['token_type_ids']
},
fetch_list=test_fetch_list)
paddle.disable_static()
labels_pd = paddle.to_tensor(np.array(data[0]['label']).flatten())
logits_pd = paddle.to_tensor(logits[0])
correct = metric.compute(logits_pd, labels_pd)
metric.update(correct)
paddle.enable_static()
res = metric.accumulate()
return res
def eval():
devices = paddle.device.get_device().split(':')[0]
places = paddle.device._convert_to_place(devices)
exe = paddle.static.Executor(places)
val_program, feed_target_names, fetch_targets = paddle.static.load_inference_model(
global_config['model_dir'],
exe,
model_filename=global_config['model_filename'],
params_filename=global_config['params_filename'])
print('Loaded model from: {}'.format(global_config['model_dir']))
metric.reset()
print('Evaluating...')
for data in eval_dataloader():
logits = exe.run(
val_program,
feed={
feed_target_names[0]: data[0]['input_ids'],
feed_target_names[1]: data[0]['token_type_ids']
},
fetch_list=fetch_targets)
paddle.disable_static()
labels_pd = paddle.to_tensor(np.array(data[0]['label']).flatten())
logits_pd = paddle.to_tensor(logits[0])
correct = metric.compute(logits_pd, labels_pd)
metric.update(correct)
paddle.enable_static()
res = metric.accumulate()
return res
def apply_decay_param_fun(name):
if name.find("bias") > -1:
return True
elif name.find("b_0") > -1:
return True
elif name.find("norm") > -1:
return True
else:
return False
def main():
all_config = load_config(args.config_path)
global global_config
assert "Global" in all_config, "Key Global not found in config file."
global_config = all_config["Global"]
if 'TrainConfig' in all_config:
all_config['TrainConfig']['optimizer_builder'][
'apply_decay_param_fun'] = apply_decay_param_fun
global train_dataloader, eval_dataloader
train_dataloader, eval_dataloader = reader()
global metric
metric_class = METRIC_CLASSES[global_config['task_name']]
metric = metric_class()
if args.eval:
result = eval()
print('Eval metric:', result)
sys.exit(0)
ac = AutoCompression(
model_dir=global_config['model_dir'],
model_filename=global_config['model_filename'],
params_filename=global_config['params_filename'],
save_dir=args.save_dir,
config=all_config,
train_dataloader=train_dataloader,
eval_callback=eval_function,
eval_dataloader=eval_dataloader)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
for file_name in os.listdir(global_config['model_dir']):
if 'json' in file_name or 'txt' in file_name:
shutil.copy(
os.path.join(global_config['model_dir'], file_name),
args.save_dir)
ac.compress()
if __name__ == '__main__':
paddle.enable_static()
parser = argsparser()
args = parser.parse_args()
main() | text_pair=example['sentence2'],
max_seq_len=max_seq_length)
if not is_test: | random_line_split |
run.py | import os
import sys
import argparse
import functools
from functools import partial
import numpy as np
import shutil
import paddle
import paddle.nn as nn
from paddle.io import Dataset, BatchSampler, DataLoader
from paddle.metric import Metric, Accuracy, Precision, Recall
from paddlenlp.transformers import AutoModelForTokenClassification, AutoTokenizer
from paddlenlp.datasets import load_dataset
from paddlenlp.data import Stack, Tuple, Pad
from paddlenlp.data.sampler import SamplerHelper
from paddlenlp.metrics import Mcc, PearsonAndSpearman
from paddleslim.common import load_config
from paddleslim.auto_compression.compressor import AutoCompression
def argsparser():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--config_path',
type=str,
default=None,
help="path of compression strategy config.",
required=True)
parser.add_argument(
'--save_dir',
type=str,
default='output',
help="directory to save compressed model.")
parser.add_argument(
'--eval',
type=bool,
default=False,
help="whether validate the model only.")
return parser
METRIC_CLASSES = {
"cola": Mcc,
"sst-2": Accuracy,
"sts-b": PearsonAndSpearman,
"mnli": Accuracy,
"qnli": Accuracy,
"rte": Accuracy,
"afqmc": Accuracy,
"tnews": Accuracy,
"iflytek": Accuracy,
"ocnli": Accuracy,
"cmnli": Accuracy,
"cluewsc2020": Accuracy,
"csl": Accuracy,
}
def convert_example(example,
tokenizer,
label_list,
max_seq_length=512,
is_test=False):
assert global_config['dataset'] in [
'glue', 'clue'
], "This demo only supports for dataset glue or clue"
"""Convert a glue example into necessary features."""
if global_config['dataset'] == 'glue':
if not is_test:
# `label_list == None` is for regression task
label_dtype = "int64" if label_list else "float32"
# Get the label
label = example['labels']
label = np.array([label], dtype=label_dtype)
# Convert raw text to feature
example = tokenizer(example['sentence'], max_seq_len=max_seq_length)
if not is_test:
return example['input_ids'], example['token_type_ids'], label
else:
return example['input_ids'], example['token_type_ids']
else: #if global_config['dataset'] == 'clue':
if not is_test:
# `label_list == None` is for regression task
label_dtype = "int64" if label_list else "float32"
# Get the label
example['label'] = np.array(
example["label"], dtype="int64").reshape((-1, 1))
label = example['label']
# Convert raw text to feature
if 'keyword' in example: # CSL
sentence1 = " ".join(example['keyword'])
example = {
'sentence1': sentence1,
'sentence2': example['abst'],
'label': example['label']
}
elif 'target' in example: # wsc
text, query, pronoun, query_idx, pronoun_idx = example[
'text'], example['target']['span1_text'], example['target'][
'span2_text'], example['target']['span1_index'], example[
'target']['span2_index']
text_list = list(text)
assert text[pronoun_idx:(pronoun_idx + len(
pronoun))] == pronoun, "pronoun: {}".format(pronoun)
assert text[query_idx:(query_idx + len(query)
)] == query, "query: {}".format(query)
if pronoun_idx > query_idx:
text_list.insert(query_idx, "_")
text_list.insert(query_idx + len(query) + 1, "_")
text_list.insert(pronoun_idx + 2, "[")
text_list.insert(pronoun_idx + len(pronoun) + 2 + 1, "]")
else:
text_list.insert(pronoun_idx, "[")
text_list.insert(pronoun_idx + len(pronoun) + 1, "]")
text_list.insert(query_idx + 2, "_")
text_list.insert(query_idx + len(query) + 2 + 1, "_")
text = "".join(text_list)
example['sentence'] = text
if tokenizer is None:
return example
if 'sentence' in example:
example = tokenizer(example['sentence'], max_seq_len=max_seq_length)
elif 'sentence1' in example:
example = tokenizer(
example['sentence1'],
text_pair=example['sentence2'],
max_seq_len=max_seq_length)
if not is_test:
return example['input_ids'], example['token_type_ids'], label
else:
return example['input_ids'], example['token_type_ids']
def create_data_holder(task_name):
"""
Define the input data holder for the glue task.
"""
input_ids = paddle.static.data(
name="input_ids", shape=[-1, -1], dtype="int64")
token_type_ids = paddle.static.data(
name="token_type_ids", shape=[-1, -1], dtype="int64")
if task_name == "sts-b":
label = paddle.static.data(name="label", shape=[-1, 1], dtype="float32")
else:
label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64")
return [input_ids, token_type_ids, label]
def reader():
# Create the tokenizer and dataset
tokenizer = AutoTokenizer.from_pretrained(global_config['model_dir'])
train_ds, dev_ds = load_dataset(
global_config['dataset'],
global_config['task_name'],
splits=('train', 'dev'))
trans_func = partial(
convert_example,
tokenizer=tokenizer,
label_list=train_ds.label_list,
max_seq_length=global_config['max_seq_length'],
is_test=True)
train_ds = train_ds.map(trans_func, lazy=True)
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # input
Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # token_type
): fn(samples)
train_batch_sampler = paddle.io.DistributedBatchSampler(
train_ds, batch_size=global_config['batch_size'], shuffle=True)
[input_ids, token_type_ids,
labels] = create_data_holder(global_config['task_name'])
feed_list_name = []
train_data_loader = DataLoader(
dataset=train_ds,
feed_list=[input_ids, token_type_ids],
batch_sampler=train_batch_sampler,
collate_fn=batchify_fn,
num_workers=0,
return_list=False)
dev_trans_func = partial(
convert_example,
tokenizer=tokenizer,
label_list=train_ds.label_list,
max_seq_length=global_config['max_seq_length'])
dev_batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # input
Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # token_type
Stack(dtype="int64" if train_ds.label_list else "float32") # label
): fn(samples)
dev_ds = dev_ds.map(dev_trans_func, lazy=True)
dev_batch_sampler = paddle.io.BatchSampler(
dev_ds, batch_size=global_config['batch_size'], shuffle=False)
dev_data_loader = DataLoader(
dataset=dev_ds,
batch_sampler=dev_batch_sampler,
collate_fn=dev_batchify_fn,
num_workers=0,
feed_list=[input_ids, token_type_ids, labels],
return_list=False)
return train_data_loader, dev_data_loader
def eval_function(exe, compiled_test_program, test_feed_names, test_fetch_list):
metric.reset()
for data in eval_dataloader():
logits = exe.run(
compiled_test_program,
feed={
test_feed_names[0]: data[0]['input_ids'],
test_feed_names[1]: data[0]['token_type_ids']
},
fetch_list=test_fetch_list)
paddle.disable_static()
labels_pd = paddle.to_tensor(np.array(data[0]['label']).flatten())
logits_pd = paddle.to_tensor(logits[0])
correct = metric.compute(logits_pd, labels_pd)
metric.update(correct)
paddle.enable_static()
res = metric.accumulate()
return res
def | ():
devices = paddle.device.get_device().split(':')[0]
places = paddle.device._convert_to_place(devices)
exe = paddle.static.Executor(places)
val_program, feed_target_names, fetch_targets = paddle.static.load_inference_model(
global_config['model_dir'],
exe,
model_filename=global_config['model_filename'],
params_filename=global_config['params_filename'])
print('Loaded model from: {}'.format(global_config['model_dir']))
metric.reset()
print('Evaluating...')
for data in eval_dataloader():
logits = exe.run(
val_program,
feed={
feed_target_names[0]: data[0]['input_ids'],
feed_target_names[1]: data[0]['token_type_ids']
},
fetch_list=fetch_targets)
paddle.disable_static()
labels_pd = paddle.to_tensor(np.array(data[0]['label']).flatten())
logits_pd = paddle.to_tensor(logits[0])
correct = metric.compute(logits_pd, labels_pd)
metric.update(correct)
paddle.enable_static()
res = metric.accumulate()
return res
def apply_decay_param_fun(name):
if name.find("bias") > -1:
return True
elif name.find("b_0") > -1:
return True
elif name.find("norm") > -1:
return True
else:
return False
def main():
all_config = load_config(args.config_path)
global global_config
assert "Global" in all_config, "Key Global not found in config file."
global_config = all_config["Global"]
if 'TrainConfig' in all_config:
all_config['TrainConfig']['optimizer_builder'][
'apply_decay_param_fun'] = apply_decay_param_fun
global train_dataloader, eval_dataloader
train_dataloader, eval_dataloader = reader()
global metric
metric_class = METRIC_CLASSES[global_config['task_name']]
metric = metric_class()
if args.eval:
result = eval()
print('Eval metric:', result)
sys.exit(0)
ac = AutoCompression(
model_dir=global_config['model_dir'],
model_filename=global_config['model_filename'],
params_filename=global_config['params_filename'],
save_dir=args.save_dir,
config=all_config,
train_dataloader=train_dataloader,
eval_callback=eval_function,
eval_dataloader=eval_dataloader)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
for file_name in os.listdir(global_config['model_dir']):
if 'json' in file_name or 'txt' in file_name:
shutil.copy(
os.path.join(global_config['model_dir'], file_name),
args.save_dir)
ac.compress()
if __name__ == '__main__':
paddle.enable_static()
parser = argsparser()
args = parser.parse_args()
main()
| eval | identifier_name |
run.py | import os
import sys
import argparse
import functools
from functools import partial
import numpy as np
import shutil
import paddle
import paddle.nn as nn
from paddle.io import Dataset, BatchSampler, DataLoader
from paddle.metric import Metric, Accuracy, Precision, Recall
from paddlenlp.transformers import AutoModelForTokenClassification, AutoTokenizer
from paddlenlp.datasets import load_dataset
from paddlenlp.data import Stack, Tuple, Pad
from paddlenlp.data.sampler import SamplerHelper
from paddlenlp.metrics import Mcc, PearsonAndSpearman
from paddleslim.common import load_config
from paddleslim.auto_compression.compressor import AutoCompression
def argsparser():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--config_path',
type=str,
default=None,
help="path of compression strategy config.",
required=True)
parser.add_argument(
'--save_dir',
type=str,
default='output',
help="directory to save compressed model.")
parser.add_argument(
'--eval',
type=bool,
default=False,
help="whether validate the model only.")
return parser
METRIC_CLASSES = {
"cola": Mcc,
"sst-2": Accuracy,
"sts-b": PearsonAndSpearman,
"mnli": Accuracy,
"qnli": Accuracy,
"rte": Accuracy,
"afqmc": Accuracy,
"tnews": Accuracy,
"iflytek": Accuracy,
"ocnli": Accuracy,
"cmnli": Accuracy,
"cluewsc2020": Accuracy,
"csl": Accuracy,
}
def convert_example(example,
tokenizer,
label_list,
max_seq_length=512,
is_test=False):
assert global_config['dataset'] in [
'glue', 'clue'
], "This demo only supports for dataset glue or clue"
"""Convert a glue example into necessary features."""
if global_config['dataset'] == 'glue':
if not is_test:
# `label_list == None` is for regression task
label_dtype = "int64" if label_list else "float32"
# Get the label
label = example['labels']
label = np.array([label], dtype=label_dtype)
# Convert raw text to feature
example = tokenizer(example['sentence'], max_seq_len=max_seq_length)
if not is_test:
return example['input_ids'], example['token_type_ids'], label
else:
return example['input_ids'], example['token_type_ids']
else: #if global_config['dataset'] == 'clue':
if not is_test:
# `label_list == None` is for regression task
label_dtype = "int64" if label_list else "float32"
# Get the label
example['label'] = np.array(
example["label"], dtype="int64").reshape((-1, 1))
label = example['label']
# Convert raw text to feature
if 'keyword' in example: # CSL
sentence1 = " ".join(example['keyword'])
example = {
'sentence1': sentence1,
'sentence2': example['abst'],
'label': example['label']
}
elif 'target' in example: # wsc
text, query, pronoun, query_idx, pronoun_idx = example[
'text'], example['target']['span1_text'], example['target'][
'span2_text'], example['target']['span1_index'], example[
'target']['span2_index']
text_list = list(text)
assert text[pronoun_idx:(pronoun_idx + len(
pronoun))] == pronoun, "pronoun: {}".format(pronoun)
assert text[query_idx:(query_idx + len(query)
)] == query, "query: {}".format(query)
if pronoun_idx > query_idx:
text_list.insert(query_idx, "_")
text_list.insert(query_idx + len(query) + 1, "_")
text_list.insert(pronoun_idx + 2, "[")
text_list.insert(pronoun_idx + len(pronoun) + 2 + 1, "]")
else:
text_list.insert(pronoun_idx, "[")
text_list.insert(pronoun_idx + len(pronoun) + 1, "]")
text_list.insert(query_idx + 2, "_")
text_list.insert(query_idx + len(query) + 2 + 1, "_")
text = "".join(text_list)
example['sentence'] = text
if tokenizer is None:
return example
if 'sentence' in example:
example = tokenizer(example['sentence'], max_seq_len=max_seq_length)
elif 'sentence1' in example:
example = tokenizer(
example['sentence1'],
text_pair=example['sentence2'],
max_seq_len=max_seq_length)
if not is_test:
return example['input_ids'], example['token_type_ids'], label
else:
return example['input_ids'], example['token_type_ids']
def create_data_holder(task_name):
"""
Define the input data holder for the glue task.
"""
input_ids = paddle.static.data(
name="input_ids", shape=[-1, -1], dtype="int64")
token_type_ids = paddle.static.data(
name="token_type_ids", shape=[-1, -1], dtype="int64")
if task_name == "sts-b":
label = paddle.static.data(name="label", shape=[-1, 1], dtype="float32")
else:
label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64")
return [input_ids, token_type_ids, label]
def reader():
# Create the tokenizer and dataset
tokenizer = AutoTokenizer.from_pretrained(global_config['model_dir'])
train_ds, dev_ds = load_dataset(
global_config['dataset'],
global_config['task_name'],
splits=('train', 'dev'))
trans_func = partial(
convert_example,
tokenizer=tokenizer,
label_list=train_ds.label_list,
max_seq_length=global_config['max_seq_length'],
is_test=True)
train_ds = train_ds.map(trans_func, lazy=True)
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # input
Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # token_type
): fn(samples)
train_batch_sampler = paddle.io.DistributedBatchSampler(
train_ds, batch_size=global_config['batch_size'], shuffle=True)
[input_ids, token_type_ids,
labels] = create_data_holder(global_config['task_name'])
feed_list_name = []
train_data_loader = DataLoader(
dataset=train_ds,
feed_list=[input_ids, token_type_ids],
batch_sampler=train_batch_sampler,
collate_fn=batchify_fn,
num_workers=0,
return_list=False)
dev_trans_func = partial(
convert_example,
tokenizer=tokenizer,
label_list=train_ds.label_list,
max_seq_length=global_config['max_seq_length'])
dev_batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # input
Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # token_type
Stack(dtype="int64" if train_ds.label_list else "float32") # label
): fn(samples)
dev_ds = dev_ds.map(dev_trans_func, lazy=True)
dev_batch_sampler = paddle.io.BatchSampler(
dev_ds, batch_size=global_config['batch_size'], shuffle=False)
dev_data_loader = DataLoader(
dataset=dev_ds,
batch_sampler=dev_batch_sampler,
collate_fn=dev_batchify_fn,
num_workers=0,
feed_list=[input_ids, token_type_ids, labels],
return_list=False)
return train_data_loader, dev_data_loader
def eval_function(exe, compiled_test_program, test_feed_names, test_fetch_list):
metric.reset()
for data in eval_dataloader():
logits = exe.run(
compiled_test_program,
feed={
test_feed_names[0]: data[0]['input_ids'],
test_feed_names[1]: data[0]['token_type_ids']
},
fetch_list=test_fetch_list)
paddle.disable_static()
labels_pd = paddle.to_tensor(np.array(data[0]['label']).flatten())
logits_pd = paddle.to_tensor(logits[0])
correct = metric.compute(logits_pd, labels_pd)
metric.update(correct)
paddle.enable_static()
res = metric.accumulate()
return res
def eval():
devices = paddle.device.get_device().split(':')[0]
places = paddle.device._convert_to_place(devices)
exe = paddle.static.Executor(places)
val_program, feed_target_names, fetch_targets = paddle.static.load_inference_model(
global_config['model_dir'],
exe,
model_filename=global_config['model_filename'],
params_filename=global_config['params_filename'])
print('Loaded model from: {}'.format(global_config['model_dir']))
metric.reset()
print('Evaluating...')
for data in eval_dataloader():
logits = exe.run(
val_program,
feed={
feed_target_names[0]: data[0]['input_ids'],
feed_target_names[1]: data[0]['token_type_ids']
},
fetch_list=fetch_targets)
paddle.disable_static()
labels_pd = paddle.to_tensor(np.array(data[0]['label']).flatten())
logits_pd = paddle.to_tensor(logits[0])
correct = metric.compute(logits_pd, labels_pd)
metric.update(correct)
paddle.enable_static()
res = metric.accumulate()
return res
def apply_decay_param_fun(name):
|
def main():
all_config = load_config(args.config_path)
global global_config
assert "Global" in all_config, "Key Global not found in config file."
global_config = all_config["Global"]
if 'TrainConfig' in all_config:
all_config['TrainConfig']['optimizer_builder'][
'apply_decay_param_fun'] = apply_decay_param_fun
global train_dataloader, eval_dataloader
train_dataloader, eval_dataloader = reader()
global metric
metric_class = METRIC_CLASSES[global_config['task_name']]
metric = metric_class()
if args.eval:
result = eval()
print('Eval metric:', result)
sys.exit(0)
ac = AutoCompression(
model_dir=global_config['model_dir'],
model_filename=global_config['model_filename'],
params_filename=global_config['params_filename'],
save_dir=args.save_dir,
config=all_config,
train_dataloader=train_dataloader,
eval_callback=eval_function,
eval_dataloader=eval_dataloader)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
for file_name in os.listdir(global_config['model_dir']):
if 'json' in file_name or 'txt' in file_name:
shutil.copy(
os.path.join(global_config['model_dir'], file_name),
args.save_dir)
ac.compress()
if __name__ == '__main__':
paddle.enable_static()
parser = argsparser()
args = parser.parse_args()
main()
| if name.find("bias") > -1:
return True
elif name.find("b_0") > -1:
return True
elif name.find("norm") > -1:
return True
else:
return False | identifier_body |
run.py | import os
import sys
import argparse
import functools
from functools import partial
import numpy as np
import shutil
import paddle
import paddle.nn as nn
from paddle.io import Dataset, BatchSampler, DataLoader
from paddle.metric import Metric, Accuracy, Precision, Recall
from paddlenlp.transformers import AutoModelForTokenClassification, AutoTokenizer
from paddlenlp.datasets import load_dataset
from paddlenlp.data import Stack, Tuple, Pad
from paddlenlp.data.sampler import SamplerHelper
from paddlenlp.metrics import Mcc, PearsonAndSpearman
from paddleslim.common import load_config
from paddleslim.auto_compression.compressor import AutoCompression
def argsparser():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--config_path',
type=str,
default=None,
help="path of compression strategy config.",
required=True)
parser.add_argument(
'--save_dir',
type=str,
default='output',
help="directory to save compressed model.")
parser.add_argument(
'--eval',
type=bool,
default=False,
help="whether validate the model only.")
return parser
METRIC_CLASSES = {
"cola": Mcc,
"sst-2": Accuracy,
"sts-b": PearsonAndSpearman,
"mnli": Accuracy,
"qnli": Accuracy,
"rte": Accuracy,
"afqmc": Accuracy,
"tnews": Accuracy,
"iflytek": Accuracy,
"ocnli": Accuracy,
"cmnli": Accuracy,
"cluewsc2020": Accuracy,
"csl": Accuracy,
}
def convert_example(example,
tokenizer,
label_list,
max_seq_length=512,
is_test=False):
assert global_config['dataset'] in [
'glue', 'clue'
], "This demo only supports for dataset glue or clue"
"""Convert a glue example into necessary features."""
if global_config['dataset'] == 'glue':
if not is_test:
# `label_list == None` is for regression task
label_dtype = "int64" if label_list else "float32"
# Get the label
label = example['labels']
label = np.array([label], dtype=label_dtype)
# Convert raw text to feature
example = tokenizer(example['sentence'], max_seq_len=max_seq_length)
if not is_test:
return example['input_ids'], example['token_type_ids'], label
else:
return example['input_ids'], example['token_type_ids']
else: #if global_config['dataset'] == 'clue':
if not is_test:
# `label_list == None` is for regression task
label_dtype = "int64" if label_list else "float32"
# Get the label
example['label'] = np.array(
example["label"], dtype="int64").reshape((-1, 1))
label = example['label']
# Convert raw text to feature
if 'keyword' in example: # CSL
sentence1 = " ".join(example['keyword'])
example = {
'sentence1': sentence1,
'sentence2': example['abst'],
'label': example['label']
}
elif 'target' in example: # wsc
text, query, pronoun, query_idx, pronoun_idx = example[
'text'], example['target']['span1_text'], example['target'][
'span2_text'], example['target']['span1_index'], example[
'target']['span2_index']
text_list = list(text)
assert text[pronoun_idx:(pronoun_idx + len(
pronoun))] == pronoun, "pronoun: {}".format(pronoun)
assert text[query_idx:(query_idx + len(query)
)] == query, "query: {}".format(query)
if pronoun_idx > query_idx:
text_list.insert(query_idx, "_")
text_list.insert(query_idx + len(query) + 1, "_")
text_list.insert(pronoun_idx + 2, "[")
text_list.insert(pronoun_idx + len(pronoun) + 2 + 1, "]")
else:
text_list.insert(pronoun_idx, "[")
text_list.insert(pronoun_idx + len(pronoun) + 1, "]")
text_list.insert(query_idx + 2, "_")
text_list.insert(query_idx + len(query) + 2 + 1, "_")
text = "".join(text_list)
example['sentence'] = text
if tokenizer is None:
return example
if 'sentence' in example:
example = tokenizer(example['sentence'], max_seq_len=max_seq_length)
elif 'sentence1' in example:
example = tokenizer(
example['sentence1'],
text_pair=example['sentence2'],
max_seq_len=max_seq_length)
if not is_test:
return example['input_ids'], example['token_type_ids'], label
else:
return example['input_ids'], example['token_type_ids']
def create_data_holder(task_name):
"""
Define the input data holder for the glue task.
"""
input_ids = paddle.static.data(
name="input_ids", shape=[-1, -1], dtype="int64")
token_type_ids = paddle.static.data(
name="token_type_ids", shape=[-1, -1], dtype="int64")
if task_name == "sts-b":
label = paddle.static.data(name="label", shape=[-1, 1], dtype="float32")
else:
label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64")
return [input_ids, token_type_ids, label]
def reader():
# Create the tokenizer and dataset
tokenizer = AutoTokenizer.from_pretrained(global_config['model_dir'])
train_ds, dev_ds = load_dataset(
global_config['dataset'],
global_config['task_name'],
splits=('train', 'dev'))
trans_func = partial(
convert_example,
tokenizer=tokenizer,
label_list=train_ds.label_list,
max_seq_length=global_config['max_seq_length'],
is_test=True)
train_ds = train_ds.map(trans_func, lazy=True)
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # input
Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # token_type
): fn(samples)
train_batch_sampler = paddle.io.DistributedBatchSampler(
train_ds, batch_size=global_config['batch_size'], shuffle=True)
[input_ids, token_type_ids,
labels] = create_data_holder(global_config['task_name'])
feed_list_name = []
train_data_loader = DataLoader(
dataset=train_ds,
feed_list=[input_ids, token_type_ids],
batch_sampler=train_batch_sampler,
collate_fn=batchify_fn,
num_workers=0,
return_list=False)
dev_trans_func = partial(
convert_example,
tokenizer=tokenizer,
label_list=train_ds.label_list,
max_seq_length=global_config['max_seq_length'])
dev_batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # input
Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # token_type
Stack(dtype="int64" if train_ds.label_list else "float32") # label
): fn(samples)
dev_ds = dev_ds.map(dev_trans_func, lazy=True)
dev_batch_sampler = paddle.io.BatchSampler(
dev_ds, batch_size=global_config['batch_size'], shuffle=False)
dev_data_loader = DataLoader(
dataset=dev_ds,
batch_sampler=dev_batch_sampler,
collate_fn=dev_batchify_fn,
num_workers=0,
feed_list=[input_ids, token_type_ids, labels],
return_list=False)
return train_data_loader, dev_data_loader
def eval_function(exe, compiled_test_program, test_feed_names, test_fetch_list):
metric.reset()
for data in eval_dataloader():
logits = exe.run(
compiled_test_program,
feed={
test_feed_names[0]: data[0]['input_ids'],
test_feed_names[1]: data[0]['token_type_ids']
},
fetch_list=test_fetch_list)
paddle.disable_static()
labels_pd = paddle.to_tensor(np.array(data[0]['label']).flatten())
logits_pd = paddle.to_tensor(logits[0])
correct = metric.compute(logits_pd, labels_pd)
metric.update(correct)
paddle.enable_static()
res = metric.accumulate()
return res
def eval():
devices = paddle.device.get_device().split(':')[0]
places = paddle.device._convert_to_place(devices)
exe = paddle.static.Executor(places)
val_program, feed_target_names, fetch_targets = paddle.static.load_inference_model(
global_config['model_dir'],
exe,
model_filename=global_config['model_filename'],
params_filename=global_config['params_filename'])
print('Loaded model from: {}'.format(global_config['model_dir']))
metric.reset()
print('Evaluating...')
for data in eval_dataloader():
logits = exe.run(
val_program,
feed={
feed_target_names[0]: data[0]['input_ids'],
feed_target_names[1]: data[0]['token_type_ids']
},
fetch_list=fetch_targets)
paddle.disable_static()
labels_pd = paddle.to_tensor(np.array(data[0]['label']).flatten())
logits_pd = paddle.to_tensor(logits[0])
correct = metric.compute(logits_pd, labels_pd)
metric.update(correct)
paddle.enable_static()
res = metric.accumulate()
return res
def apply_decay_param_fun(name):
if name.find("bias") > -1:
return True
elif name.find("b_0") > -1:
return True
elif name.find("norm") > -1:
|
else:
return False
def main():
all_config = load_config(args.config_path)
global global_config
assert "Global" in all_config, "Key Global not found in config file."
global_config = all_config["Global"]
if 'TrainConfig' in all_config:
all_config['TrainConfig']['optimizer_builder'][
'apply_decay_param_fun'] = apply_decay_param_fun
global train_dataloader, eval_dataloader
train_dataloader, eval_dataloader = reader()
global metric
metric_class = METRIC_CLASSES[global_config['task_name']]
metric = metric_class()
if args.eval:
result = eval()
print('Eval metric:', result)
sys.exit(0)
ac = AutoCompression(
model_dir=global_config['model_dir'],
model_filename=global_config['model_filename'],
params_filename=global_config['params_filename'],
save_dir=args.save_dir,
config=all_config,
train_dataloader=train_dataloader,
eval_callback=eval_function,
eval_dataloader=eval_dataloader)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
for file_name in os.listdir(global_config['model_dir']):
if 'json' in file_name or 'txt' in file_name:
shutil.copy(
os.path.join(global_config['model_dir'], file_name),
args.save_dir)
ac.compress()
if __name__ == '__main__':
paddle.enable_static()
parser = argsparser()
args = parser.parse_args()
main()
| return True | conditional_block |
rt_threaded.rs | #![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(tokio_wasi)))]
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::{TcpListener, TcpStream};
use tokio::runtime;
use tokio::sync::oneshot;
use tokio_test::{assert_err, assert_ok};
use futures::future::poll_fn;
use std::future::Future;
use std::pin::Pin;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::Relaxed;
use std::sync::{mpsc, Arc, Mutex};
use std::task::{Context, Poll, Waker};
macro_rules! cfg_metrics {
($($t:tt)*) => {
#[cfg(tokio_unstable)]
{
$( $t )*
}
}
}
#[test]
fn single_thread() {
// No panic when starting a runtime w/ a single thread
let _ = runtime::Builder::new_multi_thread()
.enable_all()
.worker_threads(1)
.build();
}
#[test]
fn many_oneshot_futures() {
// used for notifying the main thread
const NUM: usize = 1_000;
for _ in 0..5 {
let (tx, rx) = mpsc::channel();
let rt = rt();
let cnt = Arc::new(AtomicUsize::new(0));
for _ in 0..NUM {
let cnt = cnt.clone();
let tx = tx.clone();
rt.spawn(async move {
let num = cnt.fetch_add(1, Relaxed) + 1;
if num == NUM {
tx.send(()).unwrap();
}
});
}
rx.recv().unwrap();
// Wait for the pool to shutdown
drop(rt);
}
}
#[test]
fn spawn_two() {
let rt = rt();
let out = rt.block_on(async {
let (tx, rx) = oneshot::channel();
tokio::spawn(async move {
tokio::spawn(async move {
tx.send("ZOMG").unwrap();
});
});
assert_ok!(rx.await)
});
assert_eq!(out, "ZOMG");
cfg_metrics! {
let metrics = rt.metrics();
drop(rt);
assert_eq!(1, metrics.remote_schedule_count());
let mut local = 0;
for i in 0..metrics.num_workers() {
local += metrics.worker_local_schedule_count(i);
}
assert_eq!(1, local);
}
}
#[test]
fn many_multishot_futures() {
const CHAIN: usize = 200;
const CYCLES: usize = 5;
const TRACKS: usize = 50;
for _ in 0..50 {
let rt = rt();
let mut start_txs = Vec::with_capacity(TRACKS);
let mut final_rxs = Vec::with_capacity(TRACKS);
for _ in 0..TRACKS {
let (start_tx, mut chain_rx) = tokio::sync::mpsc::channel(10);
for _ in 0..CHAIN {
let (next_tx, next_rx) = tokio::sync::mpsc::channel(10);
// Forward all the messages
rt.spawn(async move {
while let Some(v) = chain_rx.recv().await {
next_tx.send(v).await.unwrap();
}
});
chain_rx = next_rx;
}
// This final task cycles if needed
let (final_tx, final_rx) = tokio::sync::mpsc::channel(10);
let cycle_tx = start_tx.clone();
let mut rem = CYCLES;
rt.spawn(async move {
for _ in 0..CYCLES {
let msg = chain_rx.recv().await.unwrap();
rem -= 1;
if rem == 0 {
final_tx.send(msg).await.unwrap();
} else {
cycle_tx.send(msg).await.unwrap();
}
}
});
start_txs.push(start_tx);
final_rxs.push(final_rx);
}
{
rt.block_on(async move {
for start_tx in start_txs {
start_tx.send("ping").await.unwrap();
}
for mut final_rx in final_rxs {
final_rx.recv().await.unwrap();
}
});
}
}
}
#[test]
fn spawn_shutdown() {
let rt = rt();
let (tx, rx) = mpsc::channel();
rt.block_on(async {
tokio::spawn(client_server(tx.clone()));
});
// Use spawner
rt.spawn(client_server(tx));
assert_ok!(rx.recv());
assert_ok!(rx.recv());
drop(rt);
assert_err!(rx.try_recv());
}
async fn client_server(tx: mpsc::Sender<()>) {
let server = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
// Get the assigned address
let addr = assert_ok!(server.local_addr());
// Spawn the server
tokio::spawn(async move {
// Accept a socket
let (mut socket, _) = server.accept().await.unwrap();
// Write some data
socket.write_all(b"hello").await.unwrap();
});
let mut client = TcpStream::connect(&addr).await.unwrap();
let mut buf = vec![];
client.read_to_end(&mut buf).await.unwrap();
assert_eq!(buf, b"hello");
tx.send(()).unwrap();
}
#[test]
fn drop_threadpool_drops_futures() {
for _ in 0..1_000 {
let num_inc = Arc::new(AtomicUsize::new(0));
let num_dec = Arc::new(AtomicUsize::new(0));
let num_drop = Arc::new(AtomicUsize::new(0));
struct Never(Arc<AtomicUsize>);
impl Future for Never {
type Output = ();
fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<()> {
Poll::Pending
}
}
impl Drop for Never {
fn drop(&mut self) {
self.0.fetch_add(1, Relaxed);
}
}
let a = num_inc.clone();
let b = num_dec.clone();
let rt = runtime::Builder::new_multi_thread()
.enable_all()
.on_thread_start(move || {
a.fetch_add(1, Relaxed);
})
.on_thread_stop(move || {
b.fetch_add(1, Relaxed);
})
.build()
.unwrap();
rt.spawn(Never(num_drop.clone()));
// Wait for the pool to shutdown
drop(rt);
// Assert that only a single thread was spawned.
let a = num_inc.load(Relaxed);
assert!(a >= 1);
// Assert that all threads shutdown
let b = num_dec.load(Relaxed);
assert_eq!(a, b);
// Assert that the future was dropped
let c = num_drop.load(Relaxed);
assert_eq!(c, 1);
}
}
#[test]
fn start_stop_callbacks_called() {
use std::sync::atomic::{AtomicUsize, Ordering};
let after_start = Arc::new(AtomicUsize::new(0));
let before_stop = Arc::new(AtomicUsize::new(0));
let after_inner = after_start.clone();
let before_inner = before_stop.clone();
let rt = tokio::runtime::Builder::new_multi_thread()
.enable_all()
.on_thread_start(move || {
after_inner.clone().fetch_add(1, Ordering::Relaxed);
})
.on_thread_stop(move || {
before_inner.clone().fetch_add(1, Ordering::Relaxed);
})
.build()
.unwrap();
let (tx, rx) = oneshot::channel();
rt.spawn(async move {
assert_ok!(tx.send(()));
});
assert_ok!(rt.block_on(rx));
drop(rt);
assert!(after_start.load(Ordering::Relaxed) > 0);
assert!(before_stop.load(Ordering::Relaxed) > 0);
}
#[test]
fn blocking() {
// used for notifying the main thread
const NUM: usize = 1_000;
for _ in 0..10 {
let (tx, rx) = mpsc::channel();
let rt = rt();
let cnt = Arc::new(AtomicUsize::new(0));
// there are four workers in the pool
// so, if we run 4 blocking tasks, we know that handoff must have happened
let block = Arc::new(std::sync::Barrier::new(5));
for _ in 0..4 {
let block = block.clone();
rt.spawn(async move {
tokio::task::block_in_place(move || {
block.wait();
block.wait();
})
});
}
block.wait();
for _ in 0..NUM {
let cnt = cnt.clone();
let tx = tx.clone();
rt.spawn(async move {
let num = cnt.fetch_add(1, Relaxed) + 1;
if num == NUM |
});
}
rx.recv().unwrap();
// Wait for the pool to shutdown
block.wait();
}
}
#[test]
fn multi_threadpool() {
use tokio::sync::oneshot;
let rt1 = rt();
let rt2 = rt();
let (tx, rx) = oneshot::channel();
let (done_tx, done_rx) = mpsc::channel();
rt2.spawn(async move {
rx.await.unwrap();
done_tx.send(()).unwrap();
});
rt1.spawn(async move {
tx.send(()).unwrap();
});
done_rx.recv().unwrap();
}
// When `block_in_place` returns, it attempts to reclaim the yielded runtime
// worker. In this case, the remainder of the task is on the runtime worker and
// must take part in the cooperative task budgeting system.
//
// The test ensures that, when this happens, attempting to consume from a
// channel yields occasionally even if there are values ready to receive.
#[test]
fn coop_and_block_in_place() {
let rt = tokio::runtime::Builder::new_multi_thread()
// Setting max threads to 1 prevents another thread from claiming the
// runtime worker yielded as part of `block_in_place` and guarantees the
// same thread will reclaim the worker at the end of the
// `block_in_place` call.
.max_blocking_threads(1)
.build()
.unwrap();
rt.block_on(async move {
let (tx, mut rx) = tokio::sync::mpsc::channel(1024);
// Fill the channel
for _ in 0..1024 {
tx.send(()).await.unwrap();
}
drop(tx);
tokio::spawn(async move {
// Block in place without doing anything
tokio::task::block_in_place(|| {});
// Receive all the values, this should trigger a `Pending` as the
// coop limit will be reached.
poll_fn(|cx| {
while let Poll::Ready(v) = {
tokio::pin! {
let fut = rx.recv();
}
Pin::new(&mut fut).poll(cx)
} {
if v.is_none() {
panic!("did not yield");
}
}
Poll::Ready(())
})
.await
})
.await
.unwrap();
});
}
#[test]
fn yield_after_block_in_place() {
let rt = tokio::runtime::Builder::new_multi_thread()
.worker_threads(1)
.build()
.unwrap();
rt.block_on(async {
tokio::spawn(async move {
// Block in place then enter a new runtime
tokio::task::block_in_place(|| {
let rt = tokio::runtime::Builder::new_current_thread()
.build()
.unwrap();
rt.block_on(async {});
});
// Yield, then complete
tokio::task::yield_now().await;
})
.await
.unwrap()
});
}
// Testing this does not panic
#[test]
fn max_blocking_threads() {
let _rt = tokio::runtime::Builder::new_multi_thread()
.max_blocking_threads(1)
.build()
.unwrap();
}
#[test]
#[should_panic]
fn max_blocking_threads_set_to_zero() {
let _rt = tokio::runtime::Builder::new_multi_thread()
.max_blocking_threads(0)
.build()
.unwrap();
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn hang_on_shutdown() {
let (sync_tx, sync_rx) = std::sync::mpsc::channel::<()>();
tokio::spawn(async move {
tokio::task::block_in_place(|| sync_rx.recv().ok());
});
tokio::spawn(async {
tokio::time::sleep(std::time::Duration::from_secs(2)).await;
drop(sync_tx);
});
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
}
/// Demonstrates tokio-rs/tokio#3869
#[test]
fn wake_during_shutdown() {
struct Shared {
waker: Option<Waker>,
}
struct MyFuture {
shared: Arc<Mutex<Shared>>,
put_waker: bool,
}
impl MyFuture {
fn new() -> (Self, Self) {
let shared = Arc::new(Mutex::new(Shared { waker: None }));
let f1 = MyFuture {
shared: shared.clone(),
put_waker: true,
};
let f2 = MyFuture {
shared,
put_waker: false,
};
(f1, f2)
}
}
impl Future for MyFuture {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
let me = Pin::into_inner(self);
let mut lock = me.shared.lock().unwrap();
if me.put_waker {
lock.waker = Some(cx.waker().clone());
}
Poll::Pending
}
}
impl Drop for MyFuture {
fn drop(&mut self) {
let mut lock = self.shared.lock().unwrap();
if !self.put_waker {
lock.waker.take().unwrap().wake();
}
drop(lock);
}
}
let rt = tokio::runtime::Builder::new_multi_thread()
.worker_threads(1)
.enable_all()
.build()
.unwrap();
let (f1, f2) = MyFuture::new();
rt.spawn(f1);
rt.spawn(f2);
rt.block_on(async { tokio::time::sleep(tokio::time::Duration::from_millis(20)).await });
}
#[should_panic]
#[tokio::test]
async fn test_block_in_place1() {
tokio::task::block_in_place(|| {});
}
#[tokio::test(flavor = "multi_thread")]
async fn test_block_in_place2() {
tokio::task::block_in_place(|| {});
}
#[should_panic]
#[tokio::main(flavor = "current_thread")]
#[test]
async fn test_block_in_place3() {
tokio::task::block_in_place(|| {});
}
#[tokio::main]
#[test]
async fn test_block_in_place4() {
tokio::task::block_in_place(|| {});
}
fn rt() -> runtime::Runtime {
runtime::Runtime::new().unwrap()
}
#[cfg(tokio_unstable)]
mod unstable {
use super::*;
#[test]
fn test_disable_lifo_slot() {
let rt = runtime::Builder::new_multi_thread()
.disable_lifo_slot()
.worker_threads(2)
.build()
.unwrap();
rt.block_on(async {
tokio::spawn(async {
// Spawn another task and block the thread until completion. If the LIFO slot
// is used then the test doesn't complete.
futures::executor::block_on(tokio::spawn(async {})).unwrap();
})
.await
.unwrap();
})
}
}
| {
tx.send(()).unwrap();
} | conditional_block |
rt_threaded.rs | #![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(tokio_wasi)))]
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::{TcpListener, TcpStream};
use tokio::runtime;
use tokio::sync::oneshot;
use tokio_test::{assert_err, assert_ok};
use futures::future::poll_fn;
use std::future::Future;
use std::pin::Pin;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::Relaxed;
use std::sync::{mpsc, Arc, Mutex};
use std::task::{Context, Poll, Waker};
macro_rules! cfg_metrics {
($($t:tt)*) => {
#[cfg(tokio_unstable)]
{
$( $t )*
}
}
}
#[test]
fn single_thread() {
// No panic when starting a runtime w/ a single thread
let _ = runtime::Builder::new_multi_thread()
.enable_all()
.worker_threads(1)
.build();
}
#[test]
fn many_oneshot_futures() {
// used for notifying the main thread
const NUM: usize = 1_000;
for _ in 0..5 {
let (tx, rx) = mpsc::channel();
let rt = rt();
let cnt = Arc::new(AtomicUsize::new(0));
for _ in 0..NUM {
let cnt = cnt.clone();
let tx = tx.clone();
rt.spawn(async move {
let num = cnt.fetch_add(1, Relaxed) + 1;
if num == NUM {
tx.send(()).unwrap();
}
});
}
rx.recv().unwrap();
// Wait for the pool to shutdown
drop(rt);
}
}
#[test]
fn spawn_two() {
let rt = rt();
let out = rt.block_on(async {
let (tx, rx) = oneshot::channel();
tokio::spawn(async move {
tokio::spawn(async move {
tx.send("ZOMG").unwrap();
});
});
assert_ok!(rx.await)
});
assert_eq!(out, "ZOMG");
cfg_metrics! {
let metrics = rt.metrics();
drop(rt);
assert_eq!(1, metrics.remote_schedule_count());
let mut local = 0;
for i in 0..metrics.num_workers() {
local += metrics.worker_local_schedule_count(i);
}
assert_eq!(1, local);
}
}
#[test]
fn many_multishot_futures() { | const TRACKS: usize = 50;
for _ in 0..50 {
let rt = rt();
let mut start_txs = Vec::with_capacity(TRACKS);
let mut final_rxs = Vec::with_capacity(TRACKS);
for _ in 0..TRACKS {
let (start_tx, mut chain_rx) = tokio::sync::mpsc::channel(10);
for _ in 0..CHAIN {
let (next_tx, next_rx) = tokio::sync::mpsc::channel(10);
// Forward all the messages
rt.spawn(async move {
while let Some(v) = chain_rx.recv().await {
next_tx.send(v).await.unwrap();
}
});
chain_rx = next_rx;
}
// This final task cycles if needed
let (final_tx, final_rx) = tokio::sync::mpsc::channel(10);
let cycle_tx = start_tx.clone();
let mut rem = CYCLES;
rt.spawn(async move {
for _ in 0..CYCLES {
let msg = chain_rx.recv().await.unwrap();
rem -= 1;
if rem == 0 {
final_tx.send(msg).await.unwrap();
} else {
cycle_tx.send(msg).await.unwrap();
}
}
});
start_txs.push(start_tx);
final_rxs.push(final_rx);
}
{
rt.block_on(async move {
for start_tx in start_txs {
start_tx.send("ping").await.unwrap();
}
for mut final_rx in final_rxs {
final_rx.recv().await.unwrap();
}
});
}
}
}
#[test]
fn spawn_shutdown() {
let rt = rt();
let (tx, rx) = mpsc::channel();
rt.block_on(async {
tokio::spawn(client_server(tx.clone()));
});
// Use spawner
rt.spawn(client_server(tx));
assert_ok!(rx.recv());
assert_ok!(rx.recv());
drop(rt);
assert_err!(rx.try_recv());
}
async fn client_server(tx: mpsc::Sender<()>) {
let server = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
// Get the assigned address
let addr = assert_ok!(server.local_addr());
// Spawn the server
tokio::spawn(async move {
// Accept a socket
let (mut socket, _) = server.accept().await.unwrap();
// Write some data
socket.write_all(b"hello").await.unwrap();
});
let mut client = TcpStream::connect(&addr).await.unwrap();
let mut buf = vec![];
client.read_to_end(&mut buf).await.unwrap();
assert_eq!(buf, b"hello");
tx.send(()).unwrap();
}
#[test]
fn drop_threadpool_drops_futures() {
for _ in 0..1_000 {
let num_inc = Arc::new(AtomicUsize::new(0));
let num_dec = Arc::new(AtomicUsize::new(0));
let num_drop = Arc::new(AtomicUsize::new(0));
struct Never(Arc<AtomicUsize>);
impl Future for Never {
type Output = ();
fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<()> {
Poll::Pending
}
}
impl Drop for Never {
fn drop(&mut self) {
self.0.fetch_add(1, Relaxed);
}
}
let a = num_inc.clone();
let b = num_dec.clone();
let rt = runtime::Builder::new_multi_thread()
.enable_all()
.on_thread_start(move || {
a.fetch_add(1, Relaxed);
})
.on_thread_stop(move || {
b.fetch_add(1, Relaxed);
})
.build()
.unwrap();
rt.spawn(Never(num_drop.clone()));
// Wait for the pool to shutdown
drop(rt);
// Assert that only a single thread was spawned.
let a = num_inc.load(Relaxed);
assert!(a >= 1);
// Assert that all threads shutdown
let b = num_dec.load(Relaxed);
assert_eq!(a, b);
// Assert that the future was dropped
let c = num_drop.load(Relaxed);
assert_eq!(c, 1);
}
}
#[test]
fn start_stop_callbacks_called() {
use std::sync::atomic::{AtomicUsize, Ordering};
let after_start = Arc::new(AtomicUsize::new(0));
let before_stop = Arc::new(AtomicUsize::new(0));
let after_inner = after_start.clone();
let before_inner = before_stop.clone();
let rt = tokio::runtime::Builder::new_multi_thread()
.enable_all()
.on_thread_start(move || {
after_inner.clone().fetch_add(1, Ordering::Relaxed);
})
.on_thread_stop(move || {
before_inner.clone().fetch_add(1, Ordering::Relaxed);
})
.build()
.unwrap();
let (tx, rx) = oneshot::channel();
rt.spawn(async move {
assert_ok!(tx.send(()));
});
assert_ok!(rt.block_on(rx));
drop(rt);
assert!(after_start.load(Ordering::Relaxed) > 0);
assert!(before_stop.load(Ordering::Relaxed) > 0);
}
#[test]
fn blocking() {
// used for notifying the main thread
const NUM: usize = 1_000;
for _ in 0..10 {
let (tx, rx) = mpsc::channel();
let rt = rt();
let cnt = Arc::new(AtomicUsize::new(0));
// there are four workers in the pool
// so, if we run 4 blocking tasks, we know that handoff must have happened
let block = Arc::new(std::sync::Barrier::new(5));
for _ in 0..4 {
let block = block.clone();
rt.spawn(async move {
tokio::task::block_in_place(move || {
block.wait();
block.wait();
})
});
}
block.wait();
for _ in 0..NUM {
let cnt = cnt.clone();
let tx = tx.clone();
rt.spawn(async move {
let num = cnt.fetch_add(1, Relaxed) + 1;
if num == NUM {
tx.send(()).unwrap();
}
});
}
rx.recv().unwrap();
// Wait for the pool to shutdown
block.wait();
}
}
#[test]
fn multi_threadpool() {
use tokio::sync::oneshot;
let rt1 = rt();
let rt2 = rt();
let (tx, rx) = oneshot::channel();
let (done_tx, done_rx) = mpsc::channel();
rt2.spawn(async move {
rx.await.unwrap();
done_tx.send(()).unwrap();
});
rt1.spawn(async move {
tx.send(()).unwrap();
});
done_rx.recv().unwrap();
}
// When `block_in_place` returns, it attempts to reclaim the yielded runtime
// worker. In this case, the remainder of the task is on the runtime worker and
// must take part in the cooperative task budgeting system.
//
// The test ensures that, when this happens, attempting to consume from a
// channel yields occasionally even if there are values ready to receive.
#[test]
fn coop_and_block_in_place() {
let rt = tokio::runtime::Builder::new_multi_thread()
// Setting max threads to 1 prevents another thread from claiming the
// runtime worker yielded as part of `block_in_place` and guarantees the
// same thread will reclaim the worker at the end of the
// `block_in_place` call.
.max_blocking_threads(1)
.build()
.unwrap();
rt.block_on(async move {
let (tx, mut rx) = tokio::sync::mpsc::channel(1024);
// Fill the channel
for _ in 0..1024 {
tx.send(()).await.unwrap();
}
drop(tx);
tokio::spawn(async move {
// Block in place without doing anything
tokio::task::block_in_place(|| {});
// Receive all the values, this should trigger a `Pending` as the
// coop limit will be reached.
poll_fn(|cx| {
while let Poll::Ready(v) = {
tokio::pin! {
let fut = rx.recv();
}
Pin::new(&mut fut).poll(cx)
} {
if v.is_none() {
panic!("did not yield");
}
}
Poll::Ready(())
})
.await
})
.await
.unwrap();
});
}
#[test]
fn yield_after_block_in_place() {
let rt = tokio::runtime::Builder::new_multi_thread()
.worker_threads(1)
.build()
.unwrap();
rt.block_on(async {
tokio::spawn(async move {
// Block in place then enter a new runtime
tokio::task::block_in_place(|| {
let rt = tokio::runtime::Builder::new_current_thread()
.build()
.unwrap();
rt.block_on(async {});
});
// Yield, then complete
tokio::task::yield_now().await;
})
.await
.unwrap()
});
}
// Testing this does not panic
#[test]
fn max_blocking_threads() {
let _rt = tokio::runtime::Builder::new_multi_thread()
.max_blocking_threads(1)
.build()
.unwrap();
}
#[test]
#[should_panic]
fn max_blocking_threads_set_to_zero() {
let _rt = tokio::runtime::Builder::new_multi_thread()
.max_blocking_threads(0)
.build()
.unwrap();
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn hang_on_shutdown() {
let (sync_tx, sync_rx) = std::sync::mpsc::channel::<()>();
tokio::spawn(async move {
tokio::task::block_in_place(|| sync_rx.recv().ok());
});
tokio::spawn(async {
tokio::time::sleep(std::time::Duration::from_secs(2)).await;
drop(sync_tx);
});
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
}
/// Demonstrates tokio-rs/tokio#3869
#[test]
fn wake_during_shutdown() {
struct Shared {
waker: Option<Waker>,
}
struct MyFuture {
shared: Arc<Mutex<Shared>>,
put_waker: bool,
}
impl MyFuture {
fn new() -> (Self, Self) {
let shared = Arc::new(Mutex::new(Shared { waker: None }));
let f1 = MyFuture {
shared: shared.clone(),
put_waker: true,
};
let f2 = MyFuture {
shared,
put_waker: false,
};
(f1, f2)
}
}
impl Future for MyFuture {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
let me = Pin::into_inner(self);
let mut lock = me.shared.lock().unwrap();
if me.put_waker {
lock.waker = Some(cx.waker().clone());
}
Poll::Pending
}
}
impl Drop for MyFuture {
fn drop(&mut self) {
let mut lock = self.shared.lock().unwrap();
if !self.put_waker {
lock.waker.take().unwrap().wake();
}
drop(lock);
}
}
let rt = tokio::runtime::Builder::new_multi_thread()
.worker_threads(1)
.enable_all()
.build()
.unwrap();
let (f1, f2) = MyFuture::new();
rt.spawn(f1);
rt.spawn(f2);
rt.block_on(async { tokio::time::sleep(tokio::time::Duration::from_millis(20)).await });
}
#[should_panic]
#[tokio::test]
async fn test_block_in_place1() {
tokio::task::block_in_place(|| {});
}
#[tokio::test(flavor = "multi_thread")]
async fn test_block_in_place2() {
tokio::task::block_in_place(|| {});
}
#[should_panic]
#[tokio::main(flavor = "current_thread")]
#[test]
async fn test_block_in_place3() {
tokio::task::block_in_place(|| {});
}
#[tokio::main]
#[test]
async fn test_block_in_place4() {
tokio::task::block_in_place(|| {});
}
fn rt() -> runtime::Runtime {
runtime::Runtime::new().unwrap()
}
#[cfg(tokio_unstable)]
mod unstable {
use super::*;
#[test]
fn test_disable_lifo_slot() {
let rt = runtime::Builder::new_multi_thread()
.disable_lifo_slot()
.worker_threads(2)
.build()
.unwrap();
rt.block_on(async {
tokio::spawn(async {
// Spawn another task and block the thread until completion. If the LIFO slot
// is used then the test doesn't complete.
futures::executor::block_on(tokio::spawn(async {})).unwrap();
})
.await
.unwrap();
})
}
} | const CHAIN: usize = 200;
const CYCLES: usize = 5; | random_line_split |
rt_threaded.rs | #![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(tokio_wasi)))]
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::{TcpListener, TcpStream};
use tokio::runtime;
use tokio::sync::oneshot;
use tokio_test::{assert_err, assert_ok};
use futures::future::poll_fn;
use std::future::Future;
use std::pin::Pin;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::Relaxed;
use std::sync::{mpsc, Arc, Mutex};
use std::task::{Context, Poll, Waker};
macro_rules! cfg_metrics {
($($t:tt)*) => {
#[cfg(tokio_unstable)]
{
$( $t )*
}
}
}
#[test]
fn single_thread() {
// No panic when starting a runtime w/ a single thread
let _ = runtime::Builder::new_multi_thread()
.enable_all()
.worker_threads(1)
.build();
}
#[test]
fn many_oneshot_futures() {
// used for notifying the main thread
const NUM: usize = 1_000;
for _ in 0..5 {
let (tx, rx) = mpsc::channel();
let rt = rt();
let cnt = Arc::new(AtomicUsize::new(0));
for _ in 0..NUM {
let cnt = cnt.clone();
let tx = tx.clone();
rt.spawn(async move {
let num = cnt.fetch_add(1, Relaxed) + 1;
if num == NUM {
tx.send(()).unwrap();
}
});
}
rx.recv().unwrap();
// Wait for the pool to shutdown
drop(rt);
}
}
#[test]
fn spawn_two() {
let rt = rt();
let out = rt.block_on(async {
let (tx, rx) = oneshot::channel();
tokio::spawn(async move {
tokio::spawn(async move {
tx.send("ZOMG").unwrap();
});
});
assert_ok!(rx.await)
});
assert_eq!(out, "ZOMG");
cfg_metrics! {
let metrics = rt.metrics();
drop(rt);
assert_eq!(1, metrics.remote_schedule_count());
let mut local = 0;
for i in 0..metrics.num_workers() {
local += metrics.worker_local_schedule_count(i);
}
assert_eq!(1, local);
}
}
#[test]
fn many_multishot_futures() {
const CHAIN: usize = 200;
const CYCLES: usize = 5;
const TRACKS: usize = 50;
for _ in 0..50 {
let rt = rt();
let mut start_txs = Vec::with_capacity(TRACKS);
let mut final_rxs = Vec::with_capacity(TRACKS);
for _ in 0..TRACKS {
let (start_tx, mut chain_rx) = tokio::sync::mpsc::channel(10);
for _ in 0..CHAIN {
let (next_tx, next_rx) = tokio::sync::mpsc::channel(10);
// Forward all the messages
rt.spawn(async move {
while let Some(v) = chain_rx.recv().await {
next_tx.send(v).await.unwrap();
}
});
chain_rx = next_rx;
}
// This final task cycles if needed
let (final_tx, final_rx) = tokio::sync::mpsc::channel(10);
let cycle_tx = start_tx.clone();
let mut rem = CYCLES;
rt.spawn(async move {
for _ in 0..CYCLES {
let msg = chain_rx.recv().await.unwrap();
rem -= 1;
if rem == 0 {
final_tx.send(msg).await.unwrap();
} else {
cycle_tx.send(msg).await.unwrap();
}
}
});
start_txs.push(start_tx);
final_rxs.push(final_rx);
}
{
rt.block_on(async move {
for start_tx in start_txs {
start_tx.send("ping").await.unwrap();
}
for mut final_rx in final_rxs {
final_rx.recv().await.unwrap();
}
});
}
}
}
#[test]
fn spawn_shutdown() {
let rt = rt();
let (tx, rx) = mpsc::channel();
rt.block_on(async {
tokio::spawn(client_server(tx.clone()));
});
// Use spawner
rt.spawn(client_server(tx));
assert_ok!(rx.recv());
assert_ok!(rx.recv());
drop(rt);
assert_err!(rx.try_recv());
}
async fn client_server(tx: mpsc::Sender<()>) {
let server = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
// Get the assigned address
let addr = assert_ok!(server.local_addr());
// Spawn the server
tokio::spawn(async move {
// Accept a socket
let (mut socket, _) = server.accept().await.unwrap();
// Write some data
socket.write_all(b"hello").await.unwrap();
});
let mut client = TcpStream::connect(&addr).await.unwrap();
let mut buf = vec![];
client.read_to_end(&mut buf).await.unwrap();
assert_eq!(buf, b"hello");
tx.send(()).unwrap();
}
#[test]
fn drop_threadpool_drops_futures() {
for _ in 0..1_000 {
let num_inc = Arc::new(AtomicUsize::new(0));
let num_dec = Arc::new(AtomicUsize::new(0));
let num_drop = Arc::new(AtomicUsize::new(0));
struct Never(Arc<AtomicUsize>);
impl Future for Never {
type Output = ();
fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<()> {
Poll::Pending
}
}
impl Drop for Never {
fn drop(&mut self) {
self.0.fetch_add(1, Relaxed);
}
}
let a = num_inc.clone();
let b = num_dec.clone();
let rt = runtime::Builder::new_multi_thread()
.enable_all()
.on_thread_start(move || {
a.fetch_add(1, Relaxed);
})
.on_thread_stop(move || {
b.fetch_add(1, Relaxed);
})
.build()
.unwrap();
rt.spawn(Never(num_drop.clone()));
// Wait for the pool to shutdown
drop(rt);
// Assert that only a single thread was spawned.
let a = num_inc.load(Relaxed);
assert!(a >= 1);
// Assert that all threads shutdown
let b = num_dec.load(Relaxed);
assert_eq!(a, b);
// Assert that the future was dropped
let c = num_drop.load(Relaxed);
assert_eq!(c, 1);
}
}
#[test]
fn start_stop_callbacks_called() {
use std::sync::atomic::{AtomicUsize, Ordering};
let after_start = Arc::new(AtomicUsize::new(0));
let before_stop = Arc::new(AtomicUsize::new(0));
let after_inner = after_start.clone();
let before_inner = before_stop.clone();
let rt = tokio::runtime::Builder::new_multi_thread()
.enable_all()
.on_thread_start(move || {
after_inner.clone().fetch_add(1, Ordering::Relaxed);
})
.on_thread_stop(move || {
before_inner.clone().fetch_add(1, Ordering::Relaxed);
})
.build()
.unwrap();
let (tx, rx) = oneshot::channel();
rt.spawn(async move {
assert_ok!(tx.send(()));
});
assert_ok!(rt.block_on(rx));
drop(rt);
assert!(after_start.load(Ordering::Relaxed) > 0);
assert!(before_stop.load(Ordering::Relaxed) > 0);
}
#[test]
fn blocking() {
// used for notifying the main thread
const NUM: usize = 1_000;
for _ in 0..10 {
let (tx, rx) = mpsc::channel();
let rt = rt();
let cnt = Arc::new(AtomicUsize::new(0));
// there are four workers in the pool
// so, if we run 4 blocking tasks, we know that handoff must have happened
let block = Arc::new(std::sync::Barrier::new(5));
for _ in 0..4 {
let block = block.clone();
rt.spawn(async move {
tokio::task::block_in_place(move || {
block.wait();
block.wait();
})
});
}
block.wait();
for _ in 0..NUM {
let cnt = cnt.clone();
let tx = tx.clone();
rt.spawn(async move {
let num = cnt.fetch_add(1, Relaxed) + 1;
if num == NUM {
tx.send(()).unwrap();
}
});
}
rx.recv().unwrap();
// Wait for the pool to shutdown
block.wait();
}
}
#[test]
fn multi_threadpool() {
use tokio::sync::oneshot;
let rt1 = rt();
let rt2 = rt();
let (tx, rx) = oneshot::channel();
let (done_tx, done_rx) = mpsc::channel();
rt2.spawn(async move {
rx.await.unwrap();
done_tx.send(()).unwrap();
});
rt1.spawn(async move {
tx.send(()).unwrap();
});
done_rx.recv().unwrap();
}
// When `block_in_place` returns, it attempts to reclaim the yielded runtime
// worker. In this case, the remainder of the task is on the runtime worker and
// must take part in the cooperative task budgeting system.
//
// The test ensures that, when this happens, attempting to consume from a
// channel yields occasionally even if there are values ready to receive.
#[test]
fn coop_and_block_in_place() {
let rt = tokio::runtime::Builder::new_multi_thread()
// Setting max threads to 1 prevents another thread from claiming the
// runtime worker yielded as part of `block_in_place` and guarantees the
// same thread will reclaim the worker at the end of the
// `block_in_place` call.
.max_blocking_threads(1)
.build()
.unwrap();
rt.block_on(async move {
let (tx, mut rx) = tokio::sync::mpsc::channel(1024);
// Fill the channel
for _ in 0..1024 {
tx.send(()).await.unwrap();
}
drop(tx);
tokio::spawn(async move {
// Block in place without doing anything
tokio::task::block_in_place(|| {});
// Receive all the values, this should trigger a `Pending` as the
// coop limit will be reached.
poll_fn(|cx| {
while let Poll::Ready(v) = {
tokio::pin! {
let fut = rx.recv();
}
Pin::new(&mut fut).poll(cx)
} {
if v.is_none() {
panic!("did not yield");
}
}
Poll::Ready(())
})
.await
})
.await
.unwrap();
});
}
#[test]
fn yield_after_block_in_place() {
let rt = tokio::runtime::Builder::new_multi_thread()
.worker_threads(1)
.build()
.unwrap();
rt.block_on(async {
tokio::spawn(async move {
// Block in place then enter a new runtime
tokio::task::block_in_place(|| {
let rt = tokio::runtime::Builder::new_current_thread()
.build()
.unwrap();
rt.block_on(async {});
});
// Yield, then complete
tokio::task::yield_now().await;
})
.await
.unwrap()
});
}
// Testing this does not panic
#[test]
fn max_blocking_threads() {
let _rt = tokio::runtime::Builder::new_multi_thread()
.max_blocking_threads(1)
.build()
.unwrap();
}
#[test]
#[should_panic]
fn max_blocking_threads_set_to_zero() {
let _rt = tokio::runtime::Builder::new_multi_thread()
.max_blocking_threads(0)
.build()
.unwrap();
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn hang_on_shutdown() {
let (sync_tx, sync_rx) = std::sync::mpsc::channel::<()>();
tokio::spawn(async move {
tokio::task::block_in_place(|| sync_rx.recv().ok());
});
tokio::spawn(async {
tokio::time::sleep(std::time::Duration::from_secs(2)).await;
drop(sync_tx);
});
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
}
/// Demonstrates tokio-rs/tokio#3869
#[test]
fn | () {
struct Shared {
waker: Option<Waker>,
}
struct MyFuture {
shared: Arc<Mutex<Shared>>,
put_waker: bool,
}
impl MyFuture {
fn new() -> (Self, Self) {
let shared = Arc::new(Mutex::new(Shared { waker: None }));
let f1 = MyFuture {
shared: shared.clone(),
put_waker: true,
};
let f2 = MyFuture {
shared,
put_waker: false,
};
(f1, f2)
}
}
impl Future for MyFuture {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
let me = Pin::into_inner(self);
let mut lock = me.shared.lock().unwrap();
if me.put_waker {
lock.waker = Some(cx.waker().clone());
}
Poll::Pending
}
}
impl Drop for MyFuture {
fn drop(&mut self) {
let mut lock = self.shared.lock().unwrap();
if !self.put_waker {
lock.waker.take().unwrap().wake();
}
drop(lock);
}
}
let rt = tokio::runtime::Builder::new_multi_thread()
.worker_threads(1)
.enable_all()
.build()
.unwrap();
let (f1, f2) = MyFuture::new();
rt.spawn(f1);
rt.spawn(f2);
rt.block_on(async { tokio::time::sleep(tokio::time::Duration::from_millis(20)).await });
}
#[should_panic]
#[tokio::test]
async fn test_block_in_place1() {
tokio::task::block_in_place(|| {});
}
#[tokio::test(flavor = "multi_thread")]
async fn test_block_in_place2() {
tokio::task::block_in_place(|| {});
}
#[should_panic]
#[tokio::main(flavor = "current_thread")]
#[test]
async fn test_block_in_place3() {
tokio::task::block_in_place(|| {});
}
#[tokio::main]
#[test]
async fn test_block_in_place4() {
tokio::task::block_in_place(|| {});
}
fn rt() -> runtime::Runtime {
runtime::Runtime::new().unwrap()
}
#[cfg(tokio_unstable)]
mod unstable {
use super::*;
#[test]
fn test_disable_lifo_slot() {
let rt = runtime::Builder::new_multi_thread()
.disable_lifo_slot()
.worker_threads(2)
.build()
.unwrap();
rt.block_on(async {
tokio::spawn(async {
// Spawn another task and block the thread until completion. If the LIFO slot
// is used then the test doesn't complete.
futures::executor::block_on(tokio::spawn(async {})).unwrap();
})
.await
.unwrap();
})
}
}
| wake_during_shutdown | identifier_name |
FormBaseInput.ts | /* tslint:disable:no-any */
import * as React from 'react';
import * as PropTypes from 'prop-types';
import { IFormBaseInputProps, IFormBaseInputState, DataStoreEntry, typesForInject, IDataProviderCollection, IDataProviderService } from './FormBaseInput.types';
export { IFormBaseInputProps };
import { BaseComponent, ICancelable } from 'office-ui-fabric-react/lib/Utilities';
import { TranslatedProperty, ValidatorTypes, BinderType } from '../Enums';
import { IFormContext, IFormValidationResult } from '../form/Form.types';
import { autobind } from '@uifabric/utilities';
import { IDataBinder, IDataBinderAsync, IDataBinderFilterAsync, IDataProviderFilterAsync } from '../objects/DataBinder.types';
import { LocalsCommon } from '../locales/LocalsCommon';
import { Helper } from '../Helper';
import { Control } from '..';
/**
* Default Debaunce of 250 Ticks.
*/
export const DEFAULT_DEBOUNCE = 250;
/**
* Type alias for any simple form input
*/
export type GenericFormInput = FormBaseInput<any, IFormBaseInputProps, IFormBaseInputState>;
/**
* Type alias for any simple form input
*/
export type DataLoadedFunction = (key: string, data: any[], waitText: string, isAsync: boolean) => void;
/**
* The base class that all simple form inputs should inherit from
* The T generic should be the type of value this input accepts. For example, a TextBox would probably define T as string
*/
export abstract class FormBaseInput<T, P extends IFormBaseInputProps, S extends IFormBaseInputState> extends BaseComponent<P, S> {
protected commonFormater = Helper.getTranslator("common");
public static contextTypes: React.ValidationMap<IFormContext> = {
isFormValid: PropTypes.func.isRequired,
mountInput: PropTypes.func.isRequired,
unmountInput: PropTypes.func.isRequired,
submitValue: PropTypes.func.isRequired,
formData: PropTypes.object.isRequired,
container: PropTypes.object.isRequired
};
public innerControl: any;
/**
* The debounced version of formContext.submitValue
*/
protected readonly debouncedSubmitValue: ICancelable<void> & ((input: GenericFormInput, validateIt?: boolean, skipSendValue?: boolean) => void);
/**
* Form context passed by the parent form
*/
protected formContext: IFormContext;
/**
* Constructor for any Simple Form input
* @param props The props for this component
* @param context The context for this component
* @param leadingDebounce Sets the debounce setting for updates on this input.
* If leading, the component will update immediately and then debounce.
* Otherwise, the component will only update after the debounce interval. Defaults to true
*/
constructor(props: P, context: IFormContext, leadingDebounce?: boolean) {
super(props, context);
this.formContext = context;
this.debouncedSubmitValue = this._async.debounce(
this.formContext.submitValue, (
(this.props.debounceInterval !== null && this.props.debounceInterval !== undefined) ?
this.props.debounceInterval : DEFAULT_DEBOUNCE
),
{
leading: (leadingDebounce === null || leadingDebounce === undefined ? true : leadingDebounce)
});
if (props.control.Config)
this.ConfigProperties = props.control.Config as T;
else
this.ConfigProperties = {} as T;
this.ConfigProperties = Helper.getTranslatedObject(this.ConfigProperties, this.props.control.ConfigTranslation);
this.TranslatedTitle = Helper.getTranslatedProperty(TranslatedProperty.Title,this.props.control);
this.TranslatedInfo = Helper.getTranslatedProperty(TranslatedProperty.Info,this.props.control);
this.ControlClassName = this.props.control.CssClass ? this.props.control.CssClass : "";
}
/**
* React Lifecycle Method - Because this method uses state when rendering, the state must be
* updated when the prop's value updates
* @param nextProps The props that the component is receiving
*/
public componentWillReceiveProps(nextProps: P): void {
if (nextProps.control.Value !== this.props.control.Value && this.props.control.Value === this.state.currentValue) {
// If the props have changed and the previous props are equal to the current value, then we want to update the internal state value
this.setState((prevState: S) => {
prevState.currentValue = nextProps.control.Value;
return prevState;
});
}
}
/**
* Store the options to the state
* @param dataKey The databinder key to use
* @param data The Array with the Data.
* @param waitText The Wait Text for async loading
* @param isAsync True if async loading.
*/
@autobind
private storeOptions(dataKey: string, data: any[], waitText: string, isAsync: boolean): void {
let options:DataStoreEntry[] = this.state.dataStores;
if (!options)
options = [];
let entry = options.find(d => d.key == dataKey);
let refresh = false;
if (entry && !entry.data) entry.data = [];
if (entry && (!Helper.compareArrays(entry.data, data) || entry.onLoading != isAsync || entry.waitText != waitText)) {
refresh = true;
entry.data = data && data.length == 0 ? undefined : data;
entry.onLoading = isAsync;
entry.waitText = waitText;
}
else if (!entry) {
refresh = true;
options.push({
key: dataKey,
data: data && data.length == 0 ? undefined : data,
onLoading: isAsync,
waitText: waitText
});
}
if (refresh)
this.setState({dataStores: options});
}
/**
* Get the Error Message back after falidation the Value.
*/
@autobind
protected getErrorMessage() {
if (this.state.currentValue){
let result = this.doValidate();
return result.errorMessage;
}
return "";
}
/**
* Check the proprties and warn if the default are used.
* @param props The property Object to check.
*/
protected validateProps(props?: any): void {
if (props) {
if (props.ref) {
console.warn(this.props.inputKey + " 'ref' prop was specified and will be ignored");
}
if (props.id) {
console.warn(this.props.inputKey + " 'id' prop was specified and will be ignored");
}
if (props.name) {
console.warn(this.props.inputKey + " 'name' prop was specified and will be ignored");
}
if (props.label) {
console.warn(this.props.inputKey + " 'label' prop was specified and will be ignored");
}
if (props.onChange) {
console.warn(this.props.inputKey + " 'onChange' prop was specified and will be ignored");
}
}
}
/**
* Loads the data from the Store async with a filter teext
* If Async loading the return true
* @param configKey The Key from the datastore
* @param provider The Data Provider for Async Filter
* @param loadedFunction The funtion to call after data is loaded
* @param waitText The Waiting Text for async loading controls.
* @param control The sender Control that has the Filter Text
* @param filter The Filter Text.
*/
public loadDataFromStoreWithFilter(configKey:string, provider:IDataProviderFilterAsync, loadedFunction:DataLoadedFunction,
waitText: string, control:Control, filter:string) {
if (provider) |
}
/**
* Loads the data from the Store async or sync.
* If Async loading the return true
* @param dataStoreKey The Key from the datastore
* @param loadedFunction The funtion to call after data is loaded
* @param waitText The Waiting Text for async loading controls.
*/
public loadDataFromStore(dataStoreKey:string, loadedFunction:DataLoadedFunction, waitText: string): boolean {
let dataBinderAsync:Promise<any[]> = this.dataStore[dataStoreKey] as Promise<any[]>;
let dataBinder:any[] = this.dataStore[dataStoreKey] as any[];
if (dataBinderAsync && dataBinderAsync.then) {
let waitText = this.commonFormater.formatMessage(LocalsCommon.loadData);
loadedFunction(dataStoreKey, undefined, waitText, true);
dataBinderAsync.then((optionList) => {
loadedFunction(dataStoreKey, optionList, "", false);
});
return true;
}
else if (dataBinder) {
loadedFunction(dataStoreKey, dataBinder, waitText, false);
}
return false;
}
/**
* Get the Data options entry
* @param staticData Static data array from config.
* @param key DataStore key (config or databinder)
* @param defaultPlaceholder Default placholder text.
*/
protected getDataOptionEntry(staticData: any[], key:string, defaultPlaceholder: string): DataStoreEntry {
let optionsEntry:DataStoreEntry;
let controlKey = Helper.getControlKeyFromConfigKey(key);
if (controlKey && this.state.currentFilter) {
let provider = this.retrievFilterData[key] as IDataProviderFilterAsync;
let waitText = Helper.getPlaceHolderText(optionsEntry, defaultPlaceholder);
this.loadDataFromStoreWithFilter(key, provider, this.storeOptions, waitText, this.props.control, this.state.currentFilter);
let entry = this.state.dataStores ?
this.state.dataStores.find(e => e.key == key) : undefined;
return entry;
}
else {
if (!staticData && this.state.dataStores){
optionsEntry = this.state.dataStores.find(e => e.key == key);
}
if (optionsEntry) {
optionsEntry.waitText = Helper.getPlaceHolderText(optionsEntry, defaultPlaceholder);
}
else {
optionsEntry = {
key: "default",
data: staticData,
onLoading: false,
waitText: Helper.getPlaceHolderText(undefined, defaultPlaceholder)
}
}
if (this.props.control.ReadOnly)
optionsEntry.onLoading = true;
return optionsEntry;
}
}
/**
* Property for the Control. In case of UI Fabric Controls the UI Fabric Interface class can be used. This Config will overgiven to the
* Inner Control
*/
protected ConfigProperties:T;
/** Translation for the Title */
public TranslatedTitle?:string;
/** The cofigured class name or '' */
public ControlClassName:string;
/** True if the Required validator is set. */
public IsRequired(): boolean {
return this.props.control.FormValidators && this.props.control.FormValidators.find(v => v.ValidatorType == ValidatorTypes.Required) != undefined;
}
/** Translaiton for the Info */
public TranslatedInfo?:string;
/** Loaded data for this Control. */
protected dataStore:{ [key: string]: any[] | Promise<any[]> } = {}
/** The Asynchronous Filter Methods. */
protected retrievFilterData: { [key: string]: IDataBinderFilterAsync | IDataProviderFilterAsync } = {}
/** The Data Provier Service used for this control */
protected dataProviderService?: IDataProviderService;
/**
* Load the Databinder. Sync and Async are loaded. AsyncFilter is loade when user type an filter.
*/
public componentWillMount(): void {
this.formContext.mountInput(this);
let formData = this.formContext.formData;
let container = this.formContext.container;
if (this.props.dataBinder) {
for(let binder of this.props.dataBinder) {
let binderSync = binder.binderFunction as IDataBinder;
let binderAsync = binder.binderFunction as IDataBinderAsync;
let binderAsyncFilter = binder.binderFunction as IDataBinderFilterAsync;
if (binder.binderType == BinderType.Sync)
this.dataStore[binder.typeName] = binderSync.retrieveData(this.props.control, Helper.getLanguage());
else if (binder.binderType == BinderType.Async)
this.dataStore[binder.typeName] = binderAsync.retrieveData(this.props.control, Helper.getLanguage());
else if (binder.binderType == BinderType.AsyncFilter)
this.retrievFilterData[binder.typeName] = binderAsyncFilter;
}
}
if (this.props.control.DataProviderConfigKeys.length > 0 && container == undefined)
throw "No Data Service Container found"
if (this.props.control.DataProviderConfigKeys.length > 0) {
let dataProviders = container.get<IDataProviderCollection>(typesForInject.IDataProviderCollection);
if (dataProviders == undefined || dataProviders.providers.length == 0)
throw "No Data Service found"
for(let configKey of this.props.control.DataProviderConfigKeys) {
let keyParts = configKey.split(".");
this.dataProviderService = dataProviders.providers.find(p => p.providerServiceKey == keyParts[0])
if (this.dataProviderService == undefined)
throw "No DataProvider found with key " + keyParts[0] + " name is: " + dataProviders.providers[0].providerServiceKey;
this.dataProviderService.formData = formData;
this.dataProviderService.initialize();
let result = Helper.getControlKeyFromConfigKey(configKey);
if (result && this.dataProviderService.retrieveFilteredListData) {
let binderFuntion = this.dataProviderService as IDataProviderFilterAsync;
this.retrievFilterData[configKey] = binderFuntion;
}
else {
let providerConfigKey = Helper.getConfigKeyFromProviderKey(configKey);
this.dataProviderService.formData = formData;
this.dataProviderService.initialize();
this.dataStore[configKey] = this.dataProviderService.retrieveListData(providerConfigKey, this.props.control, Helper.getLanguage());
this.loadDataFromStore(configKey,this.storeOptions, "");
}
}
}
for(let binder of this.props.control.DataBinders) {
let key = this.props.inputKey + "_" + binder;
if (this.ConfigProperties[binder])
this.storeOptions(key, this.ConfigProperties[binder], "", false);
else {
this.loadDataFromStore(key,this.storeOptions, "");
}
}
}
/**
* Unmount the current control.
*/
public componentWillUnmount(): void {
this.debouncedSubmitValue.flush();
this.formContext.unmountInput(this);
}
/**
* Validate the input. By default, this function will run through all the validators and ensure they pass
*/
public doValidate(): IFormValidationResult {
const {
validators = []
} = this.props;
let validationResult: IFormValidationResult = {
isValid: true,
component: this
};
for (let validator of (validators as any)) {
let error: string = validator(this.state.currentValue);
if (error) {
validationResult.isValid = false;
validationResult.errorMessage = error;
return validationResult;
}
}
return validationResult;
}
/**
* Set the error state of this input
* @param errorMessage Message to set to the state.
*/
public setError(errorMessage?: string): void {
this.setState((prevState: S) => {
prevState.isValid = false;
prevState.currentError = errorMessage;
return prevState;
});
}
/**
* Clear any errors from this input
*/
public clearError(): void {
this.setState((prevState: S) => {
prevState.isValid = true;
prevState.currentError = undefined;
return prevState;
});
}
/**
* Set the current value of this input and validate it
* @param value The value to set
* @param validate True if the value should be validated.
*/
public setValue(value: any, validate?: boolean, skipSendValue?: boolean): void {
this.setState((prevState: S): S => {
this.props.control.Value = value;
prevState.currentValue = value;
return prevState;
},
() => {
this.debouncedSubmitValue(this, validate, skipSendValue);
}
);
}
} | {
let entry = this.state.dataStores ? this.state.dataStores.find(e => e.key == configKey) : undefined;
if (!entry) {
let waitText = this.commonFormater.formatMessage(LocalsCommon.loadData);
loadedFunction(configKey, undefined, waitText, true);
}
provider.retrieveFilteredListData(configKey,control,Helper.getLanguage(), filter).then((list) => {
let waitTextA = !list || list.length == 0 ?
this.commonFormater.formatMessage(LocalsCommon.nothingFound) : waitText;
loadedFunction(configKey, list, waitTextA, false);
});
} | conditional_block |
FormBaseInput.ts | /* tslint:disable:no-any */
import * as React from 'react';
import * as PropTypes from 'prop-types';
import { IFormBaseInputProps, IFormBaseInputState, DataStoreEntry, typesForInject, IDataProviderCollection, IDataProviderService } from './FormBaseInput.types';
export { IFormBaseInputProps };
import { BaseComponent, ICancelable } from 'office-ui-fabric-react/lib/Utilities';
import { TranslatedProperty, ValidatorTypes, BinderType } from '../Enums';
import { IFormContext, IFormValidationResult } from '../form/Form.types';
import { autobind } from '@uifabric/utilities';
import { IDataBinder, IDataBinderAsync, IDataBinderFilterAsync, IDataProviderFilterAsync } from '../objects/DataBinder.types';
import { LocalsCommon } from '../locales/LocalsCommon';
import { Helper } from '../Helper';
import { Control } from '..';
/**
* Default Debaunce of 250 Ticks.
*/
export const DEFAULT_DEBOUNCE = 250;
/**
* Type alias for any simple form input
*/
export type GenericFormInput = FormBaseInput<any, IFormBaseInputProps, IFormBaseInputState>;
/**
* Type alias for any simple form input
*/
export type DataLoadedFunction = (key: string, data: any[], waitText: string, isAsync: boolean) => void;
/**
* The base class that all simple form inputs should inherit from
* The T generic should be the type of value this input accepts. For example, a TextBox would probably define T as string
*/
export abstract class FormBaseInput<T, P extends IFormBaseInputProps, S extends IFormBaseInputState> extends BaseComponent<P, S> {
protected commonFormater = Helper.getTranslator("common");
public static contextTypes: React.ValidationMap<IFormContext> = {
isFormValid: PropTypes.func.isRequired,
mountInput: PropTypes.func.isRequired,
unmountInput: PropTypes.func.isRequired,
submitValue: PropTypes.func.isRequired,
formData: PropTypes.object.isRequired,
container: PropTypes.object.isRequired
};
public innerControl: any;
/**
* The debounced version of formContext.submitValue
*/
protected readonly debouncedSubmitValue: ICancelable<void> & ((input: GenericFormInput, validateIt?: boolean, skipSendValue?: boolean) => void);
/**
* Form context passed by the parent form
*/
protected formContext: IFormContext;
/**
* Constructor for any Simple Form input
* @param props The props for this component
* @param context The context for this component
* @param leadingDebounce Sets the debounce setting for updates on this input.
* If leading, the component will update immediately and then debounce.
* Otherwise, the component will only update after the debounce interval. Defaults to true
*/
constructor(props: P, context: IFormContext, leadingDebounce?: boolean) {
super(props, context);
this.formContext = context;
this.debouncedSubmitValue = this._async.debounce(
this.formContext.submitValue, (
(this.props.debounceInterval !== null && this.props.debounceInterval !== undefined) ?
this.props.debounceInterval : DEFAULT_DEBOUNCE
),
{
leading: (leadingDebounce === null || leadingDebounce === undefined ? true : leadingDebounce)
});
if (props.control.Config)
this.ConfigProperties = props.control.Config as T;
else
this.ConfigProperties = {} as T;
this.ConfigProperties = Helper.getTranslatedObject(this.ConfigProperties, this.props.control.ConfigTranslation);
this.TranslatedTitle = Helper.getTranslatedProperty(TranslatedProperty.Title,this.props.control);
this.TranslatedInfo = Helper.getTranslatedProperty(TranslatedProperty.Info,this.props.control);
this.ControlClassName = this.props.control.CssClass ? this.props.control.CssClass : "";
}
/**
* React Lifecycle Method - Because this method uses state when rendering, the state must be
* updated when the prop's value updates
* @param nextProps The props that the component is receiving
*/
public componentWillReceiveProps(nextProps: P): void {
if (nextProps.control.Value !== this.props.control.Value && this.props.control.Value === this.state.currentValue) {
// If the props have changed and the previous props are equal to the current value, then we want to update the internal state value
this.setState((prevState: S) => {
prevState.currentValue = nextProps.control.Value;
return prevState;
});
}
}
/**
* Store the options to the state
* @param dataKey The databinder key to use
* @param data The Array with the Data.
* @param waitText The Wait Text for async loading
* @param isAsync True if async loading.
*/
@autobind
private storeOptions(dataKey: string, data: any[], waitText: string, isAsync: boolean): void {
let options:DataStoreEntry[] = this.state.dataStores;
if (!options)
options = [];
let entry = options.find(d => d.key == dataKey);
let refresh = false;
if (entry && !entry.data) entry.data = [];
if (entry && (!Helper.compareArrays(entry.data, data) || entry.onLoading != isAsync || entry.waitText != waitText)) {
refresh = true;
entry.data = data && data.length == 0 ? undefined : data;
entry.onLoading = isAsync;
entry.waitText = waitText;
}
else if (!entry) {
refresh = true;
options.push({
key: dataKey,
data: data && data.length == 0 ? undefined : data,
onLoading: isAsync,
waitText: waitText
});
}
if (refresh)
this.setState({dataStores: options});
}
/**
* Get the Error Message back after falidation the Value.
*/
@autobind
protected getErrorMessage() {
if (this.state.currentValue){
let result = this.doValidate();
return result.errorMessage;
}
return "";
}
/**
* Check the proprties and warn if the default are used.
* @param props The property Object to check.
*/
protected validateProps(props?: any): void {
if (props) {
if (props.ref) {
console.warn(this.props.inputKey + " 'ref' prop was specified and will be ignored");
}
if (props.id) {
console.warn(this.props.inputKey + " 'id' prop was specified and will be ignored");
}
if (props.name) {
console.warn(this.props.inputKey + " 'name' prop was specified and will be ignored");
}
if (props.label) {
console.warn(this.props.inputKey + " 'label' prop was specified and will be ignored");
}
if (props.onChange) {
console.warn(this.props.inputKey + " 'onChange' prop was specified and will be ignored");
}
}
}
/**
* Loads the data from the Store async with a filter teext
* If Async loading the return true
* @param configKey The Key from the datastore
* @param provider The Data Provider for Async Filter
* @param loadedFunction The funtion to call after data is loaded
* @param waitText The Waiting Text for async loading controls.
* @param control The sender Control that has the Filter Text
* @param filter The Filter Text.
*/
public loadDataFromStoreWithFilter(configKey:string, provider:IDataProviderFilterAsync, loadedFunction:DataLoadedFunction,
waitText: string, control:Control, filter:string) {
if (provider) {
let entry = this.state.dataStores ? this.state.dataStores.find(e => e.key == configKey) : undefined;
if (!entry) {
let waitText = this.commonFormater.formatMessage(LocalsCommon.loadData);
loadedFunction(configKey, undefined, waitText, true);
}
provider.retrieveFilteredListData(configKey,control,Helper.getLanguage(), filter).then((list) => {
let waitTextA = !list || list.length == 0 ?
this.commonFormater.formatMessage(LocalsCommon.nothingFound) : waitText;
loadedFunction(configKey, list, waitTextA, false);
});
}
}
/**
* Loads the data from the Store async or sync.
* If Async loading the return true
* @param dataStoreKey The Key from the datastore
* @param loadedFunction The funtion to call after data is loaded
* @param waitText The Waiting Text for async loading controls.
*/
public loadDataFromStore(dataStoreKey:string, loadedFunction:DataLoadedFunction, waitText: string): boolean {
let dataBinderAsync:Promise<any[]> = this.dataStore[dataStoreKey] as Promise<any[]>;
let dataBinder:any[] = this.dataStore[dataStoreKey] as any[];
if (dataBinderAsync && dataBinderAsync.then) {
let waitText = this.commonFormater.formatMessage(LocalsCommon.loadData);
loadedFunction(dataStoreKey, undefined, waitText, true);
dataBinderAsync.then((optionList) => {
loadedFunction(dataStoreKey, optionList, "", false);
});
return true;
}
else if (dataBinder) {
loadedFunction(dataStoreKey, dataBinder, waitText, false);
}
return false;
}
/**
* Get the Data options entry
* @param staticData Static data array from config.
* @param key DataStore key (config or databinder)
* @param defaultPlaceholder Default placholder text.
*/
protected getDataOptionEntry(staticData: any[], key:string, defaultPlaceholder: string): DataStoreEntry {
let optionsEntry:DataStoreEntry;
let controlKey = Helper.getControlKeyFromConfigKey(key);
if (controlKey && this.state.currentFilter) {
let provider = this.retrievFilterData[key] as IDataProviderFilterAsync;
let waitText = Helper.getPlaceHolderText(optionsEntry, defaultPlaceholder);
this.loadDataFromStoreWithFilter(key, provider, this.storeOptions, waitText, this.props.control, this.state.currentFilter);
let entry = this.state.dataStores ?
this.state.dataStores.find(e => e.key == key) : undefined;
return entry;
}
else {
if (!staticData && this.state.dataStores){
optionsEntry = this.state.dataStores.find(e => e.key == key);
}
if (optionsEntry) {
optionsEntry.waitText = Helper.getPlaceHolderText(optionsEntry, defaultPlaceholder);
}
else {
optionsEntry = {
key: "default",
data: staticData,
onLoading: false,
waitText: Helper.getPlaceHolderText(undefined, defaultPlaceholder)
}
}
if (this.props.control.ReadOnly)
optionsEntry.onLoading = true;
return optionsEntry;
}
}
/**
* Property for the Control. In case of UI Fabric Controls the UI Fabric Interface class can be used. This Config will overgiven to the
* Inner Control
*/
protected ConfigProperties:T;
/** Translation for the Title */
public TranslatedTitle?:string;
/** The cofigured class name or '' */
public ControlClassName:string;
/** True if the Required validator is set. */
public IsRequired(): boolean {
return this.props.control.FormValidators && this.props.control.FormValidators.find(v => v.ValidatorType == ValidatorTypes.Required) != undefined;
}
/** Translaiton for the Info */
public TranslatedInfo?:string;
/** Loaded data for this Control. */
protected dataStore:{ [key: string]: any[] | Promise<any[]> } = {}
/** The Asynchronous Filter Methods. */
protected retrievFilterData: { [key: string]: IDataBinderFilterAsync | IDataProviderFilterAsync } = {}
/** The Data Provier Service used for this control */
protected dataProviderService?: IDataProviderService;
/**
* Load the Databinder. Sync and Async are loaded. AsyncFilter is loade when user type an filter.
*/
public componentWillMount(): void {
this.formContext.mountInput(this);
let formData = this.formContext.formData;
let container = this.formContext.container;
if (this.props.dataBinder) {
for(let binder of this.props.dataBinder) {
let binderSync = binder.binderFunction as IDataBinder;
let binderAsync = binder.binderFunction as IDataBinderAsync;
let binderAsyncFilter = binder.binderFunction as IDataBinderFilterAsync;
if (binder.binderType == BinderType.Sync)
this.dataStore[binder.typeName] = binderSync.retrieveData(this.props.control, Helper.getLanguage());
else if (binder.binderType == BinderType.Async)
this.dataStore[binder.typeName] = binderAsync.retrieveData(this.props.control, Helper.getLanguage());
else if (binder.binderType == BinderType.AsyncFilter)
this.retrievFilterData[binder.typeName] = binderAsyncFilter;
}
}
if (this.props.control.DataProviderConfigKeys.length > 0 && container == undefined)
throw "No Data Service Container found"
if (this.props.control.DataProviderConfigKeys.length > 0) {
let dataProviders = container.get<IDataProviderCollection>(typesForInject.IDataProviderCollection);
if (dataProviders == undefined || dataProviders.providers.length == 0)
throw "No Data Service found"
for(let configKey of this.props.control.DataProviderConfigKeys) {
let keyParts = configKey.split(".");
this.dataProviderService = dataProviders.providers.find(p => p.providerServiceKey == keyParts[0])
if (this.dataProviderService == undefined)
throw "No DataProvider found with key " + keyParts[0] + " name is: " + dataProviders.providers[0].providerServiceKey;
this.dataProviderService.formData = formData;
this.dataProviderService.initialize();
let result = Helper.getControlKeyFromConfigKey(configKey);
if (result && this.dataProviderService.retrieveFilteredListData) {
let binderFuntion = this.dataProviderService as IDataProviderFilterAsync;
this.retrievFilterData[configKey] = binderFuntion;
}
else {
let providerConfigKey = Helper.getConfigKeyFromProviderKey(configKey);
this.dataProviderService.formData = formData;
this.dataProviderService.initialize();
this.dataStore[configKey] = this.dataProviderService.retrieveListData(providerConfigKey, this.props.control, Helper.getLanguage());
this.loadDataFromStore(configKey,this.storeOptions, "");
}
}
}
for(let binder of this.props.control.DataBinders) {
let key = this.props.inputKey + "_" + binder;
if (this.ConfigProperties[binder])
this.storeOptions(key, this.ConfigProperties[binder], "", false);
else {
this.loadDataFromStore(key,this.storeOptions, "");
}
}
}
/**
* Unmount the current control.
*/
public componentWillUnmount(): void {
this.debouncedSubmitValue.flush();
this.formContext.unmountInput(this);
}
/**
* Validate the input. By default, this function will run through all the validators and ensure they pass
*/
public doValidate(): IFormValidationResult {
const {
validators = []
} = this.props;
let validationResult: IFormValidationResult = {
isValid: true,
component: this
};
for (let validator of (validators as any)) {
let error: string = validator(this.state.currentValue);
if (error) {
validationResult.isValid = false;
validationResult.errorMessage = error;
return validationResult;
}
}
return validationResult;
}
/**
* Set the error state of this input
* @param errorMessage Message to set to the state.
*/
public | (errorMessage?: string): void {
this.setState((prevState: S) => {
prevState.isValid = false;
prevState.currentError = errorMessage;
return prevState;
});
}
/**
* Clear any errors from this input
*/
public clearError(): void {
this.setState((prevState: S) => {
prevState.isValid = true;
prevState.currentError = undefined;
return prevState;
});
}
/**
* Set the current value of this input and validate it
* @param value The value to set
* @param validate True if the value should be validated.
*/
public setValue(value: any, validate?: boolean, skipSendValue?: boolean): void {
this.setState((prevState: S): S => {
this.props.control.Value = value;
prevState.currentValue = value;
return prevState;
},
() => {
this.debouncedSubmitValue(this, validate, skipSendValue);
}
);
}
} | setError | identifier_name |
FormBaseInput.ts | /* tslint:disable:no-any */
import * as React from 'react';
import * as PropTypes from 'prop-types';
import { IFormBaseInputProps, IFormBaseInputState, DataStoreEntry, typesForInject, IDataProviderCollection, IDataProviderService } from './FormBaseInput.types';
export { IFormBaseInputProps };
import { BaseComponent, ICancelable } from 'office-ui-fabric-react/lib/Utilities';
import { TranslatedProperty, ValidatorTypes, BinderType } from '../Enums';
import { IFormContext, IFormValidationResult } from '../form/Form.types';
import { autobind } from '@uifabric/utilities';
import { IDataBinder, IDataBinderAsync, IDataBinderFilterAsync, IDataProviderFilterAsync } from '../objects/DataBinder.types';
import { LocalsCommon } from '../locales/LocalsCommon';
import { Helper } from '../Helper';
import { Control } from '..';
/**
* Default Debaunce of 250 Ticks.
*/
export const DEFAULT_DEBOUNCE = 250;
/**
* Type alias for any simple form input
*/
export type GenericFormInput = FormBaseInput<any, IFormBaseInputProps, IFormBaseInputState>;
/**
* Type alias for any simple form input
*/
export type DataLoadedFunction = (key: string, data: any[], waitText: string, isAsync: boolean) => void;
/**
* The base class that all simple form inputs should inherit from
* The T generic should be the type of value this input accepts. For example, a TextBox would probably define T as string
*/
export abstract class FormBaseInput<T, P extends IFormBaseInputProps, S extends IFormBaseInputState> extends BaseComponent<P, S> {
protected commonFormater = Helper.getTranslator("common");
public static contextTypes: React.ValidationMap<IFormContext> = {
isFormValid: PropTypes.func.isRequired,
mountInput: PropTypes.func.isRequired,
unmountInput: PropTypes.func.isRequired,
submitValue: PropTypes.func.isRequired,
formData: PropTypes.object.isRequired,
container: PropTypes.object.isRequired
};
public innerControl: any;
/**
* The debounced version of formContext.submitValue
*/
protected readonly debouncedSubmitValue: ICancelable<void> & ((input: GenericFormInput, validateIt?: boolean, skipSendValue?: boolean) => void);
/**
* Form context passed by the parent form
*/
protected formContext: IFormContext;
/**
* Constructor for any Simple Form input
* @param props The props for this component
* @param context The context for this component
* @param leadingDebounce Sets the debounce setting for updates on this input.
* If leading, the component will update immediately and then debounce.
* Otherwise, the component will only update after the debounce interval. Defaults to true
*/
constructor(props: P, context: IFormContext, leadingDebounce?: boolean) {
super(props, context);
this.formContext = context;
this.debouncedSubmitValue = this._async.debounce(
this.formContext.submitValue, (
(this.props.debounceInterval !== null && this.props.debounceInterval !== undefined) ?
this.props.debounceInterval : DEFAULT_DEBOUNCE
),
{
leading: (leadingDebounce === null || leadingDebounce === undefined ? true : leadingDebounce)
});
if (props.control.Config)
this.ConfigProperties = props.control.Config as T;
else
this.ConfigProperties = {} as T;
this.ConfigProperties = Helper.getTranslatedObject(this.ConfigProperties, this.props.control.ConfigTranslation);
this.TranslatedTitle = Helper.getTranslatedProperty(TranslatedProperty.Title,this.props.control);
this.TranslatedInfo = Helper.getTranslatedProperty(TranslatedProperty.Info,this.props.control);
this.ControlClassName = this.props.control.CssClass ? this.props.control.CssClass : "";
}
/**
* React Lifecycle Method - Because this method uses state when rendering, the state must be
* updated when the prop's value updates
* @param nextProps The props that the component is receiving
*/
public componentWillReceiveProps(nextProps: P): void {
if (nextProps.control.Value !== this.props.control.Value && this.props.control.Value === this.state.currentValue) {
// If the props have changed and the previous props are equal to the current value, then we want to update the internal state value
this.setState((prevState: S) => {
prevState.currentValue = nextProps.control.Value;
return prevState;
});
}
}
/**
* Store the options to the state
* @param dataKey The databinder key to use
* @param data The Array with the Data.
* @param waitText The Wait Text for async loading
* @param isAsync True if async loading.
*/
@autobind
private storeOptions(dataKey: string, data: any[], waitText: string, isAsync: boolean): void {
let options:DataStoreEntry[] = this.state.dataStores;
if (!options)
options = [];
let entry = options.find(d => d.key == dataKey);
let refresh = false;
if (entry && !entry.data) entry.data = [];
if (entry && (!Helper.compareArrays(entry.data, data) || entry.onLoading != isAsync || entry.waitText != waitText)) {
refresh = true;
entry.data = data && data.length == 0 ? undefined : data;
entry.onLoading = isAsync;
entry.waitText = waitText;
}
else if (!entry) {
refresh = true;
options.push({
key: dataKey,
data: data && data.length == 0 ? undefined : data,
onLoading: isAsync,
waitText: waitText
});
}
if (refresh)
this.setState({dataStores: options});
}
/**
* Get the Error Message back after falidation the Value.
*/
@autobind
protected getErrorMessage() {
if (this.state.currentValue){
let result = this.doValidate();
return result.errorMessage;
}
return "";
}
/**
* Check the proprties and warn if the default are used.
* @param props The property Object to check.
*/
protected validateProps(props?: any): void {
if (props) {
if (props.ref) {
console.warn(this.props.inputKey + " 'ref' prop was specified and will be ignored");
}
if (props.id) {
console.warn(this.props.inputKey + " 'id' prop was specified and will be ignored");
}
if (props.name) {
console.warn(this.props.inputKey + " 'name' prop was specified and will be ignored");
}
if (props.label) {
console.warn(this.props.inputKey + " 'label' prop was specified and will be ignored");
}
if (props.onChange) {
console.warn(this.props.inputKey + " 'onChange' prop was specified and will be ignored");
}
}
}
/**
* Loads the data from the Store async with a filter teext
* If Async loading the return true
* @param configKey The Key from the datastore
* @param provider The Data Provider for Async Filter
* @param loadedFunction The funtion to call after data is loaded
* @param waitText The Waiting Text for async loading controls.
* @param control The sender Control that has the Filter Text
* @param filter The Filter Text.
*/
public loadDataFromStoreWithFilter(configKey:string, provider:IDataProviderFilterAsync, loadedFunction:DataLoadedFunction,
waitText: string, control:Control, filter:string) {
if (provider) {
let entry = this.state.dataStores ? this.state.dataStores.find(e => e.key == configKey) : undefined;
if (!entry) {
let waitText = this.commonFormater.formatMessage(LocalsCommon.loadData);
loadedFunction(configKey, undefined, waitText, true);
}
provider.retrieveFilteredListData(configKey,control,Helper.getLanguage(), filter).then((list) => {
let waitTextA = !list || list.length == 0 ?
this.commonFormater.formatMessage(LocalsCommon.nothingFound) : waitText;
loadedFunction(configKey, list, waitTextA, false);
});
}
}
/**
* Loads the data from the Store async or sync.
* If Async loading the return true
* @param dataStoreKey The Key from the datastore | * @param loadedFunction The funtion to call after data is loaded
* @param waitText The Waiting Text for async loading controls.
*/
public loadDataFromStore(dataStoreKey:string, loadedFunction:DataLoadedFunction, waitText: string): boolean {
let dataBinderAsync:Promise<any[]> = this.dataStore[dataStoreKey] as Promise<any[]>;
let dataBinder:any[] = this.dataStore[dataStoreKey] as any[];
if (dataBinderAsync && dataBinderAsync.then) {
let waitText = this.commonFormater.formatMessage(LocalsCommon.loadData);
loadedFunction(dataStoreKey, undefined, waitText, true);
dataBinderAsync.then((optionList) => {
loadedFunction(dataStoreKey, optionList, "", false);
});
return true;
}
else if (dataBinder) {
loadedFunction(dataStoreKey, dataBinder, waitText, false);
}
return false;
}
/**
* Get the Data options entry
* @param staticData Static data array from config.
* @param key DataStore key (config or databinder)
* @param defaultPlaceholder Default placholder text.
*/
protected getDataOptionEntry(staticData: any[], key:string, defaultPlaceholder: string): DataStoreEntry {
let optionsEntry:DataStoreEntry;
let controlKey = Helper.getControlKeyFromConfigKey(key);
if (controlKey && this.state.currentFilter) {
let provider = this.retrievFilterData[key] as IDataProviderFilterAsync;
let waitText = Helper.getPlaceHolderText(optionsEntry, defaultPlaceholder);
this.loadDataFromStoreWithFilter(key, provider, this.storeOptions, waitText, this.props.control, this.state.currentFilter);
let entry = this.state.dataStores ?
this.state.dataStores.find(e => e.key == key) : undefined;
return entry;
}
else {
if (!staticData && this.state.dataStores){
optionsEntry = this.state.dataStores.find(e => e.key == key);
}
if (optionsEntry) {
optionsEntry.waitText = Helper.getPlaceHolderText(optionsEntry, defaultPlaceholder);
}
else {
optionsEntry = {
key: "default",
data: staticData,
onLoading: false,
waitText: Helper.getPlaceHolderText(undefined, defaultPlaceholder)
}
}
if (this.props.control.ReadOnly)
optionsEntry.onLoading = true;
return optionsEntry;
}
}
/**
* Property for the Control. In case of UI Fabric Controls the UI Fabric Interface class can be used. This Config will overgiven to the
* Inner Control
*/
protected ConfigProperties:T;
/** Translation for the Title */
public TranslatedTitle?:string;
/** The cofigured class name or '' */
public ControlClassName:string;
/** True if the Required validator is set. */
public IsRequired(): boolean {
return this.props.control.FormValidators && this.props.control.FormValidators.find(v => v.ValidatorType == ValidatorTypes.Required) != undefined;
}
/** Translaiton for the Info */
public TranslatedInfo?:string;
/** Loaded data for this Control. */
protected dataStore:{ [key: string]: any[] | Promise<any[]> } = {}
/** The Asynchronous Filter Methods. */
protected retrievFilterData: { [key: string]: IDataBinderFilterAsync | IDataProviderFilterAsync } = {}
/** The Data Provier Service used for this control */
protected dataProviderService?: IDataProviderService;
/**
* Load the Databinder. Sync and Async are loaded. AsyncFilter is loade when user type an filter.
*/
public componentWillMount(): void {
this.formContext.mountInput(this);
let formData = this.formContext.formData;
let container = this.formContext.container;
if (this.props.dataBinder) {
for(let binder of this.props.dataBinder) {
let binderSync = binder.binderFunction as IDataBinder;
let binderAsync = binder.binderFunction as IDataBinderAsync;
let binderAsyncFilter = binder.binderFunction as IDataBinderFilterAsync;
if (binder.binderType == BinderType.Sync)
this.dataStore[binder.typeName] = binderSync.retrieveData(this.props.control, Helper.getLanguage());
else if (binder.binderType == BinderType.Async)
this.dataStore[binder.typeName] = binderAsync.retrieveData(this.props.control, Helper.getLanguage());
else if (binder.binderType == BinderType.AsyncFilter)
this.retrievFilterData[binder.typeName] = binderAsyncFilter;
}
}
if (this.props.control.DataProviderConfigKeys.length > 0 && container == undefined)
throw "No Data Service Container found"
if (this.props.control.DataProviderConfigKeys.length > 0) {
let dataProviders = container.get<IDataProviderCollection>(typesForInject.IDataProviderCollection);
if (dataProviders == undefined || dataProviders.providers.length == 0)
throw "No Data Service found"
for(let configKey of this.props.control.DataProviderConfigKeys) {
let keyParts = configKey.split(".");
this.dataProviderService = dataProviders.providers.find(p => p.providerServiceKey == keyParts[0])
if (this.dataProviderService == undefined)
throw "No DataProvider found with key " + keyParts[0] + " name is: " + dataProviders.providers[0].providerServiceKey;
this.dataProviderService.formData = formData;
this.dataProviderService.initialize();
let result = Helper.getControlKeyFromConfigKey(configKey);
if (result && this.dataProviderService.retrieveFilteredListData) {
let binderFuntion = this.dataProviderService as IDataProviderFilterAsync;
this.retrievFilterData[configKey] = binderFuntion;
}
else {
let providerConfigKey = Helper.getConfigKeyFromProviderKey(configKey);
this.dataProviderService.formData = formData;
this.dataProviderService.initialize();
this.dataStore[configKey] = this.dataProviderService.retrieveListData(providerConfigKey, this.props.control, Helper.getLanguage());
this.loadDataFromStore(configKey,this.storeOptions, "");
}
}
}
for(let binder of this.props.control.DataBinders) {
let key = this.props.inputKey + "_" + binder;
if (this.ConfigProperties[binder])
this.storeOptions(key, this.ConfigProperties[binder], "", false);
else {
this.loadDataFromStore(key,this.storeOptions, "");
}
}
}
/**
* Unmount the current control.
*/
public componentWillUnmount(): void {
this.debouncedSubmitValue.flush();
this.formContext.unmountInput(this);
}
/**
* Validate the input. By default, this function will run through all the validators and ensure they pass
*/
public doValidate(): IFormValidationResult {
const {
validators = []
} = this.props;
let validationResult: IFormValidationResult = {
isValid: true,
component: this
};
for (let validator of (validators as any)) {
let error: string = validator(this.state.currentValue);
if (error) {
validationResult.isValid = false;
validationResult.errorMessage = error;
return validationResult;
}
}
return validationResult;
}
/**
* Set the error state of this input
* @param errorMessage Message to set to the state.
*/
public setError(errorMessage?: string): void {
this.setState((prevState: S) => {
prevState.isValid = false;
prevState.currentError = errorMessage;
return prevState;
});
}
/**
* Clear any errors from this input
*/
public clearError(): void {
this.setState((prevState: S) => {
prevState.isValid = true;
prevState.currentError = undefined;
return prevState;
});
}
/**
* Set the current value of this input and validate it
* @param value The value to set
* @param validate True if the value should be validated.
*/
public setValue(value: any, validate?: boolean, skipSendValue?: boolean): void {
this.setState((prevState: S): S => {
this.props.control.Value = value;
prevState.currentValue = value;
return prevState;
},
() => {
this.debouncedSubmitValue(this, validate, skipSendValue);
}
);
}
} | random_line_split | |
FormBaseInput.ts | /* tslint:disable:no-any */
import * as React from 'react';
import * as PropTypes from 'prop-types';
import { IFormBaseInputProps, IFormBaseInputState, DataStoreEntry, typesForInject, IDataProviderCollection, IDataProviderService } from './FormBaseInput.types';
export { IFormBaseInputProps };
import { BaseComponent, ICancelable } from 'office-ui-fabric-react/lib/Utilities';
import { TranslatedProperty, ValidatorTypes, BinderType } from '../Enums';
import { IFormContext, IFormValidationResult } from '../form/Form.types';
import { autobind } from '@uifabric/utilities';
import { IDataBinder, IDataBinderAsync, IDataBinderFilterAsync, IDataProviderFilterAsync } from '../objects/DataBinder.types';
import { LocalsCommon } from '../locales/LocalsCommon';
import { Helper } from '../Helper';
import { Control } from '..';
/**
* Default Debaunce of 250 Ticks.
*/
export const DEFAULT_DEBOUNCE = 250;
/**
* Type alias for any simple form input
*/
export type GenericFormInput = FormBaseInput<any, IFormBaseInputProps, IFormBaseInputState>;
/**
* Type alias for any simple form input
*/
export type DataLoadedFunction = (key: string, data: any[], waitText: string, isAsync: boolean) => void;
/**
* The base class that all simple form inputs should inherit from
* The T generic should be the type of value this input accepts. For example, a TextBox would probably define T as string
*/
export abstract class FormBaseInput<T, P extends IFormBaseInputProps, S extends IFormBaseInputState> extends BaseComponent<P, S> {
protected commonFormater = Helper.getTranslator("common");
public static contextTypes: React.ValidationMap<IFormContext> = {
isFormValid: PropTypes.func.isRequired,
mountInput: PropTypes.func.isRequired,
unmountInput: PropTypes.func.isRequired,
submitValue: PropTypes.func.isRequired,
formData: PropTypes.object.isRequired,
container: PropTypes.object.isRequired
};
public innerControl: any;
/**
* The debounced version of formContext.submitValue
*/
protected readonly debouncedSubmitValue: ICancelable<void> & ((input: GenericFormInput, validateIt?: boolean, skipSendValue?: boolean) => void);
/**
* Form context passed by the parent form
*/
protected formContext: IFormContext;
/**
* Constructor for any Simple Form input
* @param props The props for this component
* @param context The context for this component
* @param leadingDebounce Sets the debounce setting for updates on this input.
* If leading, the component will update immediately and then debounce.
* Otherwise, the component will only update after the debounce interval. Defaults to true
*/
constructor(props: P, context: IFormContext, leadingDebounce?: boolean) {
super(props, context);
this.formContext = context;
this.debouncedSubmitValue = this._async.debounce(
this.formContext.submitValue, (
(this.props.debounceInterval !== null && this.props.debounceInterval !== undefined) ?
this.props.debounceInterval : DEFAULT_DEBOUNCE
),
{
leading: (leadingDebounce === null || leadingDebounce === undefined ? true : leadingDebounce)
});
if (props.control.Config)
this.ConfigProperties = props.control.Config as T;
else
this.ConfigProperties = {} as T;
this.ConfigProperties = Helper.getTranslatedObject(this.ConfigProperties, this.props.control.ConfigTranslation);
this.TranslatedTitle = Helper.getTranslatedProperty(TranslatedProperty.Title,this.props.control);
this.TranslatedInfo = Helper.getTranslatedProperty(TranslatedProperty.Info,this.props.control);
this.ControlClassName = this.props.control.CssClass ? this.props.control.CssClass : "";
}
/**
* React Lifecycle Method - Because this method uses state when rendering, the state must be
* updated when the prop's value updates
* @param nextProps The props that the component is receiving
*/
public componentWillReceiveProps(nextProps: P): void {
if (nextProps.control.Value !== this.props.control.Value && this.props.control.Value === this.state.currentValue) {
// If the props have changed and the previous props are equal to the current value, then we want to update the internal state value
this.setState((prevState: S) => {
prevState.currentValue = nextProps.control.Value;
return prevState;
});
}
}
/**
* Store the options to the state
* @param dataKey The databinder key to use
* @param data The Array with the Data.
* @param waitText The Wait Text for async loading
* @param isAsync True if async loading.
*/
@autobind
private storeOptions(dataKey: string, data: any[], waitText: string, isAsync: boolean): void {
let options:DataStoreEntry[] = this.state.dataStores;
if (!options)
options = [];
let entry = options.find(d => d.key == dataKey);
let refresh = false;
if (entry && !entry.data) entry.data = [];
if (entry && (!Helper.compareArrays(entry.data, data) || entry.onLoading != isAsync || entry.waitText != waitText)) {
refresh = true;
entry.data = data && data.length == 0 ? undefined : data;
entry.onLoading = isAsync;
entry.waitText = waitText;
}
else if (!entry) {
refresh = true;
options.push({
key: dataKey,
data: data && data.length == 0 ? undefined : data,
onLoading: isAsync,
waitText: waitText
});
}
if (refresh)
this.setState({dataStores: options});
}
/**
* Get the Error Message back after falidation the Value.
*/
@autobind
protected getErrorMessage() {
if (this.state.currentValue){
let result = this.doValidate();
return result.errorMessage;
}
return "";
}
/**
* Check the proprties and warn if the default are used.
* @param props The property Object to check.
*/
protected validateProps(props?: any): void {
if (props) {
if (props.ref) {
console.warn(this.props.inputKey + " 'ref' prop was specified and will be ignored");
}
if (props.id) {
console.warn(this.props.inputKey + " 'id' prop was specified and will be ignored");
}
if (props.name) {
console.warn(this.props.inputKey + " 'name' prop was specified and will be ignored");
}
if (props.label) {
console.warn(this.props.inputKey + " 'label' prop was specified and will be ignored");
}
if (props.onChange) {
console.warn(this.props.inputKey + " 'onChange' prop was specified and will be ignored");
}
}
}
/**
* Loads the data from the Store async with a filter teext
* If Async loading the return true
* @param configKey The Key from the datastore
* @param provider The Data Provider for Async Filter
* @param loadedFunction The funtion to call after data is loaded
* @param waitText The Waiting Text for async loading controls.
* @param control The sender Control that has the Filter Text
* @param filter The Filter Text.
*/
public loadDataFromStoreWithFilter(configKey:string, provider:IDataProviderFilterAsync, loadedFunction:DataLoadedFunction,
waitText: string, control:Control, filter:string) {
if (provider) {
let entry = this.state.dataStores ? this.state.dataStores.find(e => e.key == configKey) : undefined;
if (!entry) {
let waitText = this.commonFormater.formatMessage(LocalsCommon.loadData);
loadedFunction(configKey, undefined, waitText, true);
}
provider.retrieveFilteredListData(configKey,control,Helper.getLanguage(), filter).then((list) => {
let waitTextA = !list || list.length == 0 ?
this.commonFormater.formatMessage(LocalsCommon.nothingFound) : waitText;
loadedFunction(configKey, list, waitTextA, false);
});
}
}
/**
* Loads the data from the Store async or sync.
* If Async loading the return true
* @param dataStoreKey The Key from the datastore
* @param loadedFunction The funtion to call after data is loaded
* @param waitText The Waiting Text for async loading controls.
*/
public loadDataFromStore(dataStoreKey:string, loadedFunction:DataLoadedFunction, waitText: string): boolean {
let dataBinderAsync:Promise<any[]> = this.dataStore[dataStoreKey] as Promise<any[]>;
let dataBinder:any[] = this.dataStore[dataStoreKey] as any[];
if (dataBinderAsync && dataBinderAsync.then) {
let waitText = this.commonFormater.formatMessage(LocalsCommon.loadData);
loadedFunction(dataStoreKey, undefined, waitText, true);
dataBinderAsync.then((optionList) => {
loadedFunction(dataStoreKey, optionList, "", false);
});
return true;
}
else if (dataBinder) {
loadedFunction(dataStoreKey, dataBinder, waitText, false);
}
return false;
}
/**
* Get the Data options entry
* @param staticData Static data array from config.
* @param key DataStore key (config or databinder)
* @param defaultPlaceholder Default placholder text.
*/
protected getDataOptionEntry(staticData: any[], key:string, defaultPlaceholder: string): DataStoreEntry {
let optionsEntry:DataStoreEntry;
let controlKey = Helper.getControlKeyFromConfigKey(key);
if (controlKey && this.state.currentFilter) {
let provider = this.retrievFilterData[key] as IDataProviderFilterAsync;
let waitText = Helper.getPlaceHolderText(optionsEntry, defaultPlaceholder);
this.loadDataFromStoreWithFilter(key, provider, this.storeOptions, waitText, this.props.control, this.state.currentFilter);
let entry = this.state.dataStores ?
this.state.dataStores.find(e => e.key == key) : undefined;
return entry;
}
else {
if (!staticData && this.state.dataStores){
optionsEntry = this.state.dataStores.find(e => e.key == key);
}
if (optionsEntry) {
optionsEntry.waitText = Helper.getPlaceHolderText(optionsEntry, defaultPlaceholder);
}
else {
optionsEntry = {
key: "default",
data: staticData,
onLoading: false,
waitText: Helper.getPlaceHolderText(undefined, defaultPlaceholder)
}
}
if (this.props.control.ReadOnly)
optionsEntry.onLoading = true;
return optionsEntry;
}
}
/**
* Property for the Control. In case of UI Fabric Controls the UI Fabric Interface class can be used. This Config will overgiven to the
* Inner Control
*/
protected ConfigProperties:T;
/** Translation for the Title */
public TranslatedTitle?:string;
/** The cofigured class name or '' */
public ControlClassName:string;
/** True if the Required validator is set. */
public IsRequired(): boolean {
return this.props.control.FormValidators && this.props.control.FormValidators.find(v => v.ValidatorType == ValidatorTypes.Required) != undefined;
}
/** Translaiton for the Info */
public TranslatedInfo?:string;
/** Loaded data for this Control. */
protected dataStore:{ [key: string]: any[] | Promise<any[]> } = {}
/** The Asynchronous Filter Methods. */
protected retrievFilterData: { [key: string]: IDataBinderFilterAsync | IDataProviderFilterAsync } = {}
/** The Data Provier Service used for this control */
protected dataProviderService?: IDataProviderService;
/**
* Load the Databinder. Sync and Async are loaded. AsyncFilter is loade when user type an filter.
*/
public componentWillMount(): void {
this.formContext.mountInput(this);
let formData = this.formContext.formData;
let container = this.formContext.container;
if (this.props.dataBinder) {
for(let binder of this.props.dataBinder) {
let binderSync = binder.binderFunction as IDataBinder;
let binderAsync = binder.binderFunction as IDataBinderAsync;
let binderAsyncFilter = binder.binderFunction as IDataBinderFilterAsync;
if (binder.binderType == BinderType.Sync)
this.dataStore[binder.typeName] = binderSync.retrieveData(this.props.control, Helper.getLanguage());
else if (binder.binderType == BinderType.Async)
this.dataStore[binder.typeName] = binderAsync.retrieveData(this.props.control, Helper.getLanguage());
else if (binder.binderType == BinderType.AsyncFilter)
this.retrievFilterData[binder.typeName] = binderAsyncFilter;
}
}
if (this.props.control.DataProviderConfigKeys.length > 0 && container == undefined)
throw "No Data Service Container found"
if (this.props.control.DataProviderConfigKeys.length > 0) {
let dataProviders = container.get<IDataProviderCollection>(typesForInject.IDataProviderCollection);
if (dataProviders == undefined || dataProviders.providers.length == 0)
throw "No Data Service found"
for(let configKey of this.props.control.DataProviderConfigKeys) {
let keyParts = configKey.split(".");
this.dataProviderService = dataProviders.providers.find(p => p.providerServiceKey == keyParts[0])
if (this.dataProviderService == undefined)
throw "No DataProvider found with key " + keyParts[0] + " name is: " + dataProviders.providers[0].providerServiceKey;
this.dataProviderService.formData = formData;
this.dataProviderService.initialize();
let result = Helper.getControlKeyFromConfigKey(configKey);
if (result && this.dataProviderService.retrieveFilteredListData) {
let binderFuntion = this.dataProviderService as IDataProviderFilterAsync;
this.retrievFilterData[configKey] = binderFuntion;
}
else {
let providerConfigKey = Helper.getConfigKeyFromProviderKey(configKey);
this.dataProviderService.formData = formData;
this.dataProviderService.initialize();
this.dataStore[configKey] = this.dataProviderService.retrieveListData(providerConfigKey, this.props.control, Helper.getLanguage());
this.loadDataFromStore(configKey,this.storeOptions, "");
}
}
}
for(let binder of this.props.control.DataBinders) {
let key = this.props.inputKey + "_" + binder;
if (this.ConfigProperties[binder])
this.storeOptions(key, this.ConfigProperties[binder], "", false);
else {
this.loadDataFromStore(key,this.storeOptions, "");
}
}
}
/**
* Unmount the current control.
*/
public componentWillUnmount(): void {
this.debouncedSubmitValue.flush();
this.formContext.unmountInput(this);
}
/**
* Validate the input. By default, this function will run through all the validators and ensure they pass
*/
public doValidate(): IFormValidationResult {
const {
validators = []
} = this.props;
let validationResult: IFormValidationResult = {
isValid: true,
component: this
};
for (let validator of (validators as any)) {
let error: string = validator(this.state.currentValue);
if (error) {
validationResult.isValid = false;
validationResult.errorMessage = error;
return validationResult;
}
}
return validationResult;
}
/**
* Set the error state of this input
* @param errorMessage Message to set to the state.
*/
public setError(errorMessage?: string): void {
this.setState((prevState: S) => {
prevState.isValid = false;
prevState.currentError = errorMessage;
return prevState;
});
}
/**
* Clear any errors from this input
*/
public clearError(): void {
this.setState((prevState: S) => {
prevState.isValid = true;
prevState.currentError = undefined;
return prevState;
});
}
/**
* Set the current value of this input and validate it
* @param value The value to set
* @param validate True if the value should be validated.
*/
public setValue(value: any, validate?: boolean, skipSendValue?: boolean): void |
} | {
this.setState((prevState: S): S => {
this.props.control.Value = value;
prevState.currentValue = value;
return prevState;
},
() => {
this.debouncedSubmitValue(this, validate, skipSendValue);
}
);
} | identifier_body |
object-security-ui.js | if(!EURB.ObjSec) {
EURB.ObjSec = {};
}
EURB.ObjSec.getSharingWindows = function() {
//var CheckBoxClass = Ext.ux.form.TriCheckbox;
var CheckBoxClass = Ext.form.Checkbox;
var groupStore = new Ext.data.Store({
reader:new Ext.data.JsonReader({
id:'id'
,totalProperty:'totalCount'
,root:'data'
,fields:[
{name:'id', type:'int'}
,{name:'groupName', type:'string'}
]
})
,proxy:new Ext.data.HttpProxy({
url:EURB.ObjSec.groupSearchAction
,listeners: {
'exception' : EURB.proxyExceptionHandler
}
})
,autoLoad : false
});
var groupCols = [{
header:EURB.ObjSec.groupName
,id:'groupName'
,dataIndex:'groupName'
,width:40
,sortable:true
,editor:new Ext.form.TextField({
allowBlank:false
})
}];
var groupGrid = new Ext.grid.GridPanel({
ddGroup : 'groupGridDDGroup',
store : groupStore,
columns : groupCols,
enableDragDrop : true,
stripeRows : true,
autoExpandColumn : 'groupName'
//,title : EURB.ObjSec.availableGroups
});
var userStore = new Ext.data.Store({
reader:new Ext.data.JsonReader({
id:'id'
,totalProperty:'totalCount'
,root:'data'
,fields:[
{name: 'id', mapping : 'id', type:'int'},
{name: 'username', mapping : 'username', type:'string'}
]
})
,proxy:new Ext.data.HttpProxy({
url:EURB.ObjSec.userSearchAction
,listeners: {
'exception' : EURB.proxyExceptionHandler
}
})
,autoLoad : false
});
// Column Model shortcut array
var userCols = [
{ id : 'username', header: EURB.ObjSec.userName, width: 160, sortable: true, dataIndex: 'username'}
];
// declare the source Grid
var userGrid = new Ext.grid.GridPanel({
ddGroup : 'userGridDDGroup',
store : userStore,
columns : userCols,
enableDragDrop : true,
stripeRows : true,
autoExpandColumn : 'username'
//,title : EURB.ObjSec.availableUsers
});
var authoritiesStore = new Ext.data.Store({
reader:new Ext.data.JsonReader({
id:'id'
,totalProperty:'aclEntryListTotalCount'
,root:'aclEntryList'
,fields:[
{name: 'id', type: 'number'},
{name: 'type', type: 'number'},
{name: 'name', type: 'string'},
{name: 'view', type: 'boolean'},
{name: 'edit', type: 'boolean'},
{name: 'del', type: 'boolean'},
{name: 'execute', type: 'boolean'},
{name: 'sharing', type: 'boolean'}
]
})
,proxy:new Ext.data.HttpProxy({
url:EURB.ObjSec.objectAuthoritiesSearchAction
,listeners: {
'exception' : function(proxy, type, action, options, res) {
Ext.Msg.show({
title: Ext.MessageBox.title.error,
msg: Ext.util.JSON.decode(res.responseText).message,
icon: Ext.MessageBox.ERROR,
buttons: Ext.Msg.OK
});
window.hide();
}
}
})
,baseParams: {
data: ''
}
,remoteSort:false
,autoLoad:false
});
var viewCheckColumn = new Ext.grid.CheckColumn({
header:EURB.ObjSec.authoritiesView
,id:'view'
,dataIndex:'view'
,editor:new CheckBoxClass()
,align:'center'
});
var editCheckColumn = new Ext.grid.CheckColumn({
header:EURB.ObjSec.authoritiesEdit
,id:'edit'
,dataIndex:'edit'
,editor:new CheckBoxClass()
,align:'center'
});
var delCheckColumn = new Ext.grid.CheckColumn({
header:EURB.ObjSec.authoritiesDel
,id:'del'
,dataIndex:'del'
,editor:new CheckBoxClass()
,align:'center'
});
var executeCheckColumn = new Ext.grid.CheckColumn({
header:EURB.ObjSec.authoritiesExecute
,id:'execute'
,dataIndex:'execute' | var sharingCheckColumn = new Ext.grid.CheckColumn({
header:EURB.ObjSec.authoritiesSharing
,id:'sharing'
,dataIndex:'sharing'
,editor:new CheckBoxClass()
,align:'center'
});
var authoritiesColModel = new Ext.grid.ColumnModel({
defaults: {
sortable: true
,width:20
},
columns: [{
header: EURB.ObjSec.groupOrUserName
,id:'name'
,dataIndex:'name'
,width:40
,renderer: function(value, metaData, record, rowIndex, colIndex, store) {
if(record.get('type') == 1) {
return '<div class="ux-row-action-item icon-group" style="float: right"></div> ' + value;
} else {
return '<div class="ux-row-action-item icon-user" style="float: right"></div> ' + value;
}
}
},viewCheckColumn,editCheckColumn,delCheckColumn,executeCheckColumn,sharingCheckColumn]
});
var AuthoritiesGrid = Ext.extend(Ext.grid.GridPanel, {
ddGroup : 'authGridDDGroup',
enableDragDrop : true,
// defaults - can be changed from outside
region:'center'
,layout:'fit'
,border:true
,stateful:false
,idName:'id'
//,title: EURB.appMenu.authorities
,initComponent:function() {
// hard coded - cannot be changed from outside
var config = {
store: authoritiesStore
,cm: authoritiesColModel
,viewConfig: {forceFit:true}
,plugins: [viewCheckColumn,editCheckColumn,delCheckColumn,executeCheckColumn,sharingCheckColumn]
};
// apply config
Ext.apply(this, config);
Ext.apply(this.initialConfig, config);
// call parent
AuthoritiesGrid.superclass.initComponent.apply(this, arguments);
}
,onRender:function() {
// call parent
AuthoritiesGrid.superclass.onRender.apply(this, arguments);
// load store
this.store.load();
}
,afterRender:function() {
AuthoritiesGrid.superclass.afterRender.apply(this, arguments);
//this.getBottomToolbar().add({text:'A test button',iconCls:'icon-info'});
}
,commitChanges:function() {
/*var data = [];
this.store.each(function(r) {
data.push(r.data);
}, this);
if(!data.length) {
return;
}
var o = {
url:EURB.Authorities.storeAction
,method:'post'
,callback:this.requestCallback
,scope:this
,params:{
cmd:'storeData',
data:Ext.encode(data),
sid:EURB.Authorities.selectedSID
}
};
Ext.Ajax.request(o);*/
}
,requestCallback:function(options, success, response) {
if(true !== success) {
this.showError(response.responseText);
return false;
}
try {
var o = Ext.decode(response.responseText);
}
catch(e) {
this.showError(response.responseText, EURB.unableToDecodeJSON);
return false;
}
if(true !== o.success) {
this.showError(o.error || o.message || EURB.unknownError);
return false;
}
switch(options.params.cmd) {
default:
this.store.commitChanges();
return true;
break;
}
}
,showError:EURB.showError
,selectAllRecords: function() {
/*this.store.each(function(rec) {
var catId = rec.get('id');
if(!rec.get('viewlist') && EURB.Authorities.isEditableCell(catId, VIEWLIST)) {
rec.set('viewlist',true);
}
if(!rec.get('view') && EURB.Authorities.isEditableCell(catId, VIEW)) {
rec.set('view',true);
}
if(!rec.get('create') && EURB.Authorities.isEditableCell(catId, CREATE)) {
rec.set('create',true);
}
if(!rec.get('edit') && EURB.Authorities.isEditableCell(catId, EDIT)) {
rec.set('edit',true);
}
if(!rec.get('del') && EURB.Authorities.isEditableCell(catId, DEL)) {
rec.set('del',true);
}
if(!rec.get('execute') && EURB.Authorities.isEditableCell(catId, EXECUTE)) {
rec.set('execute',true);
}
if(!rec.get('sharing') && EURB.Authorities.isEditableCell(catId, SHARING)) {
rec.set('sharing',true);
}
return true;
}, this);*/
}
,selectNoneRecords: function() {
/*this.store.each(function(rec) {
var catId = rec.get('id');
if(rec.get('viewlist') && EURB.Authorities.isEditableCell(catId, VIEWLIST)) {
rec.set('viewlist',false);
}
if(rec.get('view') && EURB.Authorities.isEditableCell(catId, VIEW)) {
rec.set('view',false);
}
if(rec.get('create') && EURB.Authorities.isEditableCell(catId, CREATE)) {
rec.set('create',false);
}
if(rec.get('edit') && EURB.Authorities.isEditableCell(catId, EDIT)) {
rec.set('edit',false);
}
if(rec.get('del') && EURB.Authorities.isEditableCell(catId, DEL)) {
rec.set('del',false);
}
if(rec.get('execute') && EURB.Authorities.isEditableCell(catId, EXECUTE)) {
rec.set('execute',false);
}
if(rec.get('sharing') && EURB.Authorities.isEditableCell(catId, SHARING)) {
rec.set('sharing',false);
}
return true;
}, this);*/
}
});
var authGrid = new AuthoritiesGrid();
var window = new Ext.Window({
modal: true,
title: EURB.ObjSec.share,
width: 600,
height:350,
minWidth: 300,
closeAction: 'hide',
minHeight: 150,
layout: 'fit',
plain:true,
bodyStyle:'padding:5px;',
buttonAlign:'center',
items: new Ext.Panel({
layout: 'border',
items: [new Ext.Panel({
split:true,
region:'south',
height: 100,
layout : 'hbox',
defaults : { flex : 1 }, //auto stretch
layoutConfig : { align : 'stretch' },
items : [
userGrid,
groupGrid
]
//,bbar: [EURB.Group.Usr.userSelectDragDropHelp]
}),authGrid]
}),
buttons: [{
text:Ext.MessageBox.buttonText.ok,
handler: function() {
var data = [];
authoritiesStore.each(function(r) {
data.push(r.data);
}, this);
var o = {
url:EURB.ObjSec.objectAuthoritiesStoreAction
,method:'post'
,callback: function(options, success, response){
if(authGrid.requestCallback(options, success, response)) {
window.hide();
}
}
,scope:authGrid
,params:{
cmd:'storeData',
permData:Ext.encode(data),
objData:[window.currentObjectId]
}
};
Ext.Ajax.request(o);
}
//, tabIndex: 1
},{
text: Ext.MessageBox.buttonText.cancel,
handler: function(){
window.hide();
}
//, tabIndex: 2
}],
listeners: {
hide: function(thiz) {
authoritiesStore.removeAll();
}
}
});
window.render(Ext.getBody());
var notifyDropIntoAuthGrid = function(ddSource, e, data) {
var records = ddSource.dragData.selections;
var sidArr = [];
var username;
Ext.each(records, function(r, i) {
username = r.get('username');
if(username) { //it is a user
sidArr.push(new authoritiesStore.recordType({
id: r.get('id')
,type: 2
,name: username
,view: false
,edit: false
,del: false
,execute: false
,sharing: false
}, r.get('id')));
} else { //it is a group
sidArr.push(new authoritiesStore.recordType({
id: r.get('id')
,type: 1
,name: r.get('groupName')
,view: false
,edit: false
,del: false
,execute: false
,sharing: false
}, r.get('id')));
}
},this);
userStore.remove(records);
groupStore.remove(records);
authoritiesStore.add(sidArr);
return true;
};
var notifyDropIntoUserOrGroupGrid = function(ddSource, e, data) {
var records = ddSource.dragData.selections;
var usersArr = [];
var groupsArr = [];
var type;
Ext.each(records, function(r, i) {
type = r.get('type');
if(type == 2) { //it is a user
usersArr.push(new userStore.recordType({
id: r.get('id')
,username: r.get('name')
}, r.get('id')));
} else { // if(type == 1)//it is a group
groupsArr.push(new groupStore.recordType({
id: r.get('id')
,groupName: r.get('name')
}, r.get('id')));
}
},this);
authoritiesStore.remove(records);
userStore.add(usersArr);
groupStore.add(groupsArr);
return true;
};
//drop a user to authGrid
var authGridDropTargetEl = authGrid.getView().scroller.dom;
var authGridDropTarget = new Ext.dd.DropTarget(authGridDropTargetEl, {
ddGroup : 'userGridDDGroup',
notifyDrop : notifyDropIntoAuthGrid
});
var authGridDropTarget = new Ext.dd.DropTarget(authGridDropTargetEl, {
ddGroup : 'groupGridDDGroup',
notifyDrop : notifyDropIntoAuthGrid
});
var groupGridDropTargetEl = groupGrid.getView().scroller.dom;
var groupGridDropTarget = new Ext.dd.DropTarget(groupGridDropTargetEl, {
ddGroup : 'authGridDDGroup',
notifyDrop : notifyDropIntoUserOrGroupGrid
});
var userGridDropTargetEl = userGrid.getView().scroller.dom;
var userGridDropTarget = new Ext.dd.DropTarget(userGridDropTargetEl, {
ddGroup : 'authGridDDGroup',
notifyDrop : notifyDropIntoUserOrGroupGrid
});
window.onShowForARecord = function(identifier, title, objectType) {
this.setTitle(title);
this.currentObjectType = objectType;
this.currentObjectId = identifier;
authoritiesStore.load({
params:{
data: identifier
},
callback: function(){
userStore.load({
callback: function() {
groupStore.load({
callback: function() {
var usersArr = [];
var groupsArr = [];
authoritiesStore.each(function(rec){
var theId = rec.get('id');
var theName = rec.get('name');
if(rec.get('type') == 2) {//user
userStore.each(function(r){
if(r.get('id') === theId) {
usersArr.push(r);
}
});
} else {//if(theId.get('type') == 1) { //group
groupStore.each(function(r){
if(r.get('id') === theId) {
groupsArr.push(r);
}
});
}
});
userStore.remove(usersArr);
groupStore.remove(groupsArr);
}
});
}
});
}
});
};
return window;
} | ,width:20
,editor:new CheckBoxClass()
,align:'center'
}); | random_line_split |
object-security-ui.js | if(!EURB.ObjSec) {
EURB.ObjSec = {};
}
EURB.ObjSec.getSharingWindows = function() {
//var CheckBoxClass = Ext.ux.form.TriCheckbox;
var CheckBoxClass = Ext.form.Checkbox;
var groupStore = new Ext.data.Store({
reader:new Ext.data.JsonReader({
id:'id'
,totalProperty:'totalCount'
,root:'data'
,fields:[
{name:'id', type:'int'}
,{name:'groupName', type:'string'}
]
})
,proxy:new Ext.data.HttpProxy({
url:EURB.ObjSec.groupSearchAction
,listeners: {
'exception' : EURB.proxyExceptionHandler
}
})
,autoLoad : false
});
var groupCols = [{
header:EURB.ObjSec.groupName
,id:'groupName'
,dataIndex:'groupName'
,width:40
,sortable:true
,editor:new Ext.form.TextField({
allowBlank:false
})
}];
var groupGrid = new Ext.grid.GridPanel({
ddGroup : 'groupGridDDGroup',
store : groupStore,
columns : groupCols,
enableDragDrop : true,
stripeRows : true,
autoExpandColumn : 'groupName'
//,title : EURB.ObjSec.availableGroups
});
var userStore = new Ext.data.Store({
reader:new Ext.data.JsonReader({
id:'id'
,totalProperty:'totalCount'
,root:'data'
,fields:[
{name: 'id', mapping : 'id', type:'int'},
{name: 'username', mapping : 'username', type:'string'}
]
})
,proxy:new Ext.data.HttpProxy({
url:EURB.ObjSec.userSearchAction
,listeners: {
'exception' : EURB.proxyExceptionHandler
}
})
,autoLoad : false
});
// Column Model shortcut array
var userCols = [
{ id : 'username', header: EURB.ObjSec.userName, width: 160, sortable: true, dataIndex: 'username'}
];
// declare the source Grid
var userGrid = new Ext.grid.GridPanel({
ddGroup : 'userGridDDGroup',
store : userStore,
columns : userCols,
enableDragDrop : true,
stripeRows : true,
autoExpandColumn : 'username'
//,title : EURB.ObjSec.availableUsers
});
var authoritiesStore = new Ext.data.Store({
reader:new Ext.data.JsonReader({
id:'id'
,totalProperty:'aclEntryListTotalCount'
,root:'aclEntryList'
,fields:[
{name: 'id', type: 'number'},
{name: 'type', type: 'number'},
{name: 'name', type: 'string'},
{name: 'view', type: 'boolean'},
{name: 'edit', type: 'boolean'},
{name: 'del', type: 'boolean'},
{name: 'execute', type: 'boolean'},
{name: 'sharing', type: 'boolean'}
]
})
,proxy:new Ext.data.HttpProxy({
url:EURB.ObjSec.objectAuthoritiesSearchAction
,listeners: {
'exception' : function(proxy, type, action, options, res) {
Ext.Msg.show({
title: Ext.MessageBox.title.error,
msg: Ext.util.JSON.decode(res.responseText).message,
icon: Ext.MessageBox.ERROR,
buttons: Ext.Msg.OK
});
window.hide();
}
}
})
,baseParams: {
data: ''
}
,remoteSort:false
,autoLoad:false
});
var viewCheckColumn = new Ext.grid.CheckColumn({
header:EURB.ObjSec.authoritiesView
,id:'view'
,dataIndex:'view'
,editor:new CheckBoxClass()
,align:'center'
});
var editCheckColumn = new Ext.grid.CheckColumn({
header:EURB.ObjSec.authoritiesEdit
,id:'edit'
,dataIndex:'edit'
,editor:new CheckBoxClass()
,align:'center'
});
var delCheckColumn = new Ext.grid.CheckColumn({
header:EURB.ObjSec.authoritiesDel
,id:'del'
,dataIndex:'del'
,editor:new CheckBoxClass()
,align:'center'
});
var executeCheckColumn = new Ext.grid.CheckColumn({
header:EURB.ObjSec.authoritiesExecute
,id:'execute'
,dataIndex:'execute'
,width:20
,editor:new CheckBoxClass()
,align:'center'
});
var sharingCheckColumn = new Ext.grid.CheckColumn({
header:EURB.ObjSec.authoritiesSharing
,id:'sharing'
,dataIndex:'sharing'
,editor:new CheckBoxClass()
,align:'center'
});
var authoritiesColModel = new Ext.grid.ColumnModel({
defaults: {
sortable: true
,width:20
},
columns: [{
header: EURB.ObjSec.groupOrUserName
,id:'name'
,dataIndex:'name'
,width:40
,renderer: function(value, metaData, record, rowIndex, colIndex, store) {
if(record.get('type') == 1) {
return '<div class="ux-row-action-item icon-group" style="float: right"></div> ' + value;
} else {
return '<div class="ux-row-action-item icon-user" style="float: right"></div> ' + value;
}
}
},viewCheckColumn,editCheckColumn,delCheckColumn,executeCheckColumn,sharingCheckColumn]
});
var AuthoritiesGrid = Ext.extend(Ext.grid.GridPanel, {
ddGroup : 'authGridDDGroup',
enableDragDrop : true,
// defaults - can be changed from outside
region:'center'
,layout:'fit'
,border:true
,stateful:false
,idName:'id'
//,title: EURB.appMenu.authorities
,initComponent:function() {
// hard coded - cannot be changed from outside
var config = {
store: authoritiesStore
,cm: authoritiesColModel
,viewConfig: {forceFit:true}
,plugins: [viewCheckColumn,editCheckColumn,delCheckColumn,executeCheckColumn,sharingCheckColumn]
};
// apply config
Ext.apply(this, config);
Ext.apply(this.initialConfig, config);
// call parent
AuthoritiesGrid.superclass.initComponent.apply(this, arguments);
}
,onRender:function() {
// call parent
AuthoritiesGrid.superclass.onRender.apply(this, arguments);
// load store
this.store.load();
}
,afterRender:function() {
AuthoritiesGrid.superclass.afterRender.apply(this, arguments);
//this.getBottomToolbar().add({text:'A test button',iconCls:'icon-info'});
}
,commitChanges:function() {
/*var data = [];
this.store.each(function(r) {
data.push(r.data);
}, this);
if(!data.length) {
return;
}
var o = {
url:EURB.Authorities.storeAction
,method:'post'
,callback:this.requestCallback
,scope:this
,params:{
cmd:'storeData',
data:Ext.encode(data),
sid:EURB.Authorities.selectedSID
}
};
Ext.Ajax.request(o);*/
}
,requestCallback:function(options, success, response) {
if(true !== success) {
this.showError(response.responseText);
return false;
}
try {
var o = Ext.decode(response.responseText);
}
catch(e) {
this.showError(response.responseText, EURB.unableToDecodeJSON);
return false;
}
if(true !== o.success) {
this.showError(o.error || o.message || EURB.unknownError);
return false;
}
switch(options.params.cmd) {
default:
this.store.commitChanges();
return true;
break;
}
}
,showError:EURB.showError
,selectAllRecords: function() {
/*this.store.each(function(rec) {
var catId = rec.get('id');
if(!rec.get('viewlist') && EURB.Authorities.isEditableCell(catId, VIEWLIST)) {
rec.set('viewlist',true);
}
if(!rec.get('view') && EURB.Authorities.isEditableCell(catId, VIEW)) {
rec.set('view',true);
}
if(!rec.get('create') && EURB.Authorities.isEditableCell(catId, CREATE)) {
rec.set('create',true);
}
if(!rec.get('edit') && EURB.Authorities.isEditableCell(catId, EDIT)) {
rec.set('edit',true);
}
if(!rec.get('del') && EURB.Authorities.isEditableCell(catId, DEL)) {
rec.set('del',true);
}
if(!rec.get('execute') && EURB.Authorities.isEditableCell(catId, EXECUTE)) {
rec.set('execute',true);
}
if(!rec.get('sharing') && EURB.Authorities.isEditableCell(catId, SHARING)) {
rec.set('sharing',true);
}
return true;
}, this);*/
}
,selectNoneRecords: function() {
/*this.store.each(function(rec) {
var catId = rec.get('id');
if(rec.get('viewlist') && EURB.Authorities.isEditableCell(catId, VIEWLIST)) {
rec.set('viewlist',false);
}
if(rec.get('view') && EURB.Authorities.isEditableCell(catId, VIEW)) {
rec.set('view',false);
}
if(rec.get('create') && EURB.Authorities.isEditableCell(catId, CREATE)) {
rec.set('create',false);
}
if(rec.get('edit') && EURB.Authorities.isEditableCell(catId, EDIT)) {
rec.set('edit',false);
}
if(rec.get('del') && EURB.Authorities.isEditableCell(catId, DEL)) {
rec.set('del',false);
}
if(rec.get('execute') && EURB.Authorities.isEditableCell(catId, EXECUTE)) {
rec.set('execute',false);
}
if(rec.get('sharing') && EURB.Authorities.isEditableCell(catId, SHARING)) {
rec.set('sharing',false);
}
return true;
}, this);*/
}
});
var authGrid = new AuthoritiesGrid();
var window = new Ext.Window({
modal: true,
title: EURB.ObjSec.share,
width: 600,
height:350,
minWidth: 300,
closeAction: 'hide',
minHeight: 150,
layout: 'fit',
plain:true,
bodyStyle:'padding:5px;',
buttonAlign:'center',
items: new Ext.Panel({
layout: 'border',
items: [new Ext.Panel({
split:true,
region:'south',
height: 100,
layout : 'hbox',
defaults : { flex : 1 }, //auto stretch
layoutConfig : { align : 'stretch' },
items : [
userGrid,
groupGrid
]
//,bbar: [EURB.Group.Usr.userSelectDragDropHelp]
}),authGrid]
}),
buttons: [{
text:Ext.MessageBox.buttonText.ok,
handler: function() {
var data = [];
authoritiesStore.each(function(r) {
data.push(r.data);
}, this);
var o = {
url:EURB.ObjSec.objectAuthoritiesStoreAction
,method:'post'
,callback: function(options, success, response){
if(authGrid.requestCallback(options, success, response)) {
window.hide();
}
}
,scope:authGrid
,params:{
cmd:'storeData',
permData:Ext.encode(data),
objData:[window.currentObjectId]
}
};
Ext.Ajax.request(o);
}
//, tabIndex: 1
},{
text: Ext.MessageBox.buttonText.cancel,
handler: function(){
window.hide();
}
//, tabIndex: 2
}],
listeners: {
hide: function(thiz) {
authoritiesStore.removeAll();
}
}
});
window.render(Ext.getBody());
var notifyDropIntoAuthGrid = function(ddSource, e, data) {
var records = ddSource.dragData.selections;
var sidArr = [];
var username;
Ext.each(records, function(r, i) {
username = r.get('username');
if(username) { //it is a user
sidArr.push(new authoritiesStore.recordType({
id: r.get('id')
,type: 2
,name: username
,view: false
,edit: false
,del: false
,execute: false
,sharing: false
}, r.get('id')));
} else { //it is a group
sidArr.push(new authoritiesStore.recordType({
id: r.get('id')
,type: 1
,name: r.get('groupName')
,view: false
,edit: false
,del: false
,execute: false
,sharing: false
}, r.get('id')));
}
},this);
userStore.remove(records);
groupStore.remove(records);
authoritiesStore.add(sidArr);
return true;
};
var notifyDropIntoUserOrGroupGrid = function(ddSource, e, data) {
var records = ddSource.dragData.selections;
var usersArr = [];
var groupsArr = [];
var type;
Ext.each(records, function(r, i) {
type = r.get('type');
if(type == 2) { //it is a user
usersArr.push(new userStore.recordType({
id: r.get('id')
,username: r.get('name')
}, r.get('id')));
} else { // if(type == 1)//it is a group
groupsArr.push(new groupStore.recordType({
id: r.get('id')
,groupName: r.get('name')
}, r.get('id')));
}
},this);
authoritiesStore.remove(records);
userStore.add(usersArr);
groupStore.add(groupsArr);
return true;
};
//drop a user to authGrid
var authGridDropTargetEl = authGrid.getView().scroller.dom;
var authGridDropTarget = new Ext.dd.DropTarget(authGridDropTargetEl, {
ddGroup : 'userGridDDGroup',
notifyDrop : notifyDropIntoAuthGrid
});
var authGridDropTarget = new Ext.dd.DropTarget(authGridDropTargetEl, {
ddGroup : 'groupGridDDGroup',
notifyDrop : notifyDropIntoAuthGrid
});
var groupGridDropTargetEl = groupGrid.getView().scroller.dom;
var groupGridDropTarget = new Ext.dd.DropTarget(groupGridDropTargetEl, {
ddGroup : 'authGridDDGroup',
notifyDrop : notifyDropIntoUserOrGroupGrid
});
var userGridDropTargetEl = userGrid.getView().scroller.dom;
var userGridDropTarget = new Ext.dd.DropTarget(userGridDropTargetEl, {
ddGroup : 'authGridDDGroup',
notifyDrop : notifyDropIntoUserOrGroupGrid
});
window.onShowForARecord = function(identifier, title, objectType) {
this.setTitle(title);
this.currentObjectType = objectType;
this.currentObjectId = identifier;
authoritiesStore.load({
params:{
data: identifier
},
callback: function(){
userStore.load({
callback: function() {
groupStore.load({
callback: function() {
var usersArr = [];
var groupsArr = [];
authoritiesStore.each(function(rec){
var theId = rec.get('id');
var theName = rec.get('name');
if(rec.get('type') == 2) | else {//if(theId.get('type') == 1) { //group
groupStore.each(function(r){
if(r.get('id') === theId) {
groupsArr.push(r);
}
});
}
});
userStore.remove(usersArr);
groupStore.remove(groupsArr);
}
});
}
});
}
});
};
return window;
} | {//user
userStore.each(function(r){
if(r.get('id') === theId) {
usersArr.push(r);
}
});
} | conditional_block |
1_compute_MASTER.py | #!/bin/env python
######################################################################
''' COMPUTE_MASTER.py
=========================
AIM: Cycle through the observability maps and execute stray light computations
Temporal resolution is 60 seconds and orbit step size is given by the max. error
INPUT: files: - observability maps in form of minutes,ra,dec [rad] in a file orbit_<#>.dat in raw_maps_<orbit_id>/
- in resources/ : minute_tables__<orbit_id>, moon__<orbit_id>, sun_<orbit_id>
- The complete package of the stray_light.f code (including the orbit file)
variables: see section PARAMETERS (see below)
OUTPUT: in height_part/ (see PARAMETERS): one file per minute with ra,dec,flux [ra,ra, ph/(s.px)]
CMD: python (name_file).py
ISSUES: <NONE KNOWN>
REQUIRES: standard python libraries, specific libraries in resources/
REMARKS: <none>
'''
######################################################################
# DEFINITIONS AND INCLUDES
import numpy as np
import subprocess
import os
import sys
import time
import resources.constants as const
from resources.routines import *
from resources.TimeStepping import *
######################################################################
# PARAMETERS
# km (only required to compute orbit's period)
apogee = 700
perigee= 700
# orbit id
orbit_id = 'ORBIT_ID'
# 1, 2 or 3 (or, ...) into which folder ? Scheme is orbit_id_part/
part = 1
# Orbit to start by:
orbit_ini = 1
# Last orbit to compute (see # of orbits in one year in parameters.py)
orbit_end = 1331
# Defintions of the steps
orbit_step = 10
# If after a given computation, the precision is not good enough, the step of the orbit is adapted.
adaptative_timestep = True
# Minimum orbital step
min_step = 5
# must be in straylight_xxx/ORBIT
file_orbit = 'orbit_%s.dat' % orbit_id
file_sun = 'sun_%s.dat' % orbit_id
folder = 'absolute_map_%s' % orbit_id
# Standard values are:
# folder_flux = '%d_%d' % (orbit_id,part)
# file_flux = 'flux_'
folder_flux = '%s_%d' % (orbit_id,part)
file_flux = 'flux_'
# Tolerance of the maximal magnitude difference (5% = 0.05)
p = 0.1
# Recalculate previously computed minutes ? (Recommendation: False -- time consuming)
override = False
# Save the date to a file for straylight.f (used only if the log_non_nominal is set to .true.)
save_date_JD = True
# Recompiles the Fortran code to ensure that the correct parameters and constants are loaded.
# Make sure "compile" file CHMOD is set to 777. (Recommendation: True -- very fast)
recompile = True
monitor_angle_usage = False
######################################################################
# INITIALISATION
# path to stray light folder (containting CODE, OUTPUT, INPUT, ORBIT)
path = 'straylight_%s_%d' % (orbit_id,part)
# Says hi
print '\nSALSA v%s' % const.version
print 'Computing max. every %d orbits for orbit ID %s' % (orbit_step,orbit_id)
print '------------------------------------------------------'
start = time.time()
# Loads the heavy tables (orbit, sun)
sys.stdout.write("Loading trajectory file...\t\t")
sys.stdout.flush()
try:
orbit = np.loadtxt(path+'/ORBIT/'+file_orbit, delimiter='\t')
except ValueError:
orbit = np.loadtxt(path+'/ORBIT/'+file_orbit, delimiter=' ')
print "Done."
sys.stdout.write("Loading Sun position file...\t\t")
sys.stdout.flush()
try:
sun = np.loadtxt(path+'/ORBIT/'+file_sun, delimiter='\t')
except ValueError:
sun = np.loadtxt(path+'/ORBIT/'+file_sun, delimiter=' ')
print "Done."
# initialise a few variables
total_targets = 0
map_obstot = np.empty(3)
is_first_run = True
orig_dir = os.getcwd()
# Computes the period of the orbit given the altitude
period = altitude2period(apogee,perigee)
# Recompiles the code
if recompile:
sys.stdout.write("Compilation of the code...\t\t")
sys.stdout.flush()
os.chdir(os.path.join(os.path.abspath(sys.path[0]), '%s/CODE/' % path))
os.system("./compile")
os.chdir(orig_dir)
print "Done."
######################################################################
# LOOPING OVER ALL GIVEN MAPS
orbit_current = orbit_ini
# Loop on the orbits
preceeding = orbit_current
former_step = orbit_step
while (orbit_current <= orbit_end):
print '\n---------------- ORBIT %d --- ID %s ----------------------------' % (orbit_current, orbit_id)
# Get the initial and final time
start_minute = time.time()
t_ini, t_end, a_ini, a_end = orbit2times(orbit_current,orbit_id)
minute = a_ini
# Load the observability map for the orbit
try:
map_obstot = load_map(orbit_current,folder)
except IOError:
raise IOError("Critical Error: Could not load orbit %d" % orbit_current)
while (minute <= a_end):
if not override:
try:
# Try to load the fluxes for a given minute (minute goes from 0 to period whereas a_ini is the absolute time from 0:0:0.0 1/1/2018 in min
ra, dec, S_sl = load_flux_file(minute, file_flux,folder=folder_flux)
minute += 1
continue
# If there is an error while loading the file, compute it!
except IOError:
pass
sys.stdout.write( '\rComputing minute: %3d\tAbsolute: %6d\t' % ((minute-a_ini),minute) )
sys.stdout.flush()
if save_date_JD:
# compute the julian date for a given minute and saves it to a file read by stray_light.f
JD = minutes_2_JD(minute)
f = open('%s/INPUT/date_JD.dat' % path,'w')
f.write(str(JD)+'d0\n')
f.close()
# find the position of the satellite in the orbit and compute RA, DEC of the sat with respect to the Earth
id_sat = find_nearest(orbit[:,0],minute)
x = orbit[id_sat,1]
y = orbit[id_sat,2]
z = orbit[id_sat,3]
r = R_3d(x,y,z)
ra_sat = rev( right_ascension(x,y) )
dec_sat= declination(x,y,z)
np.savetxt('%s/INPUT/coord_sat.dat' % path,[x,y,z,ra_sat,dec_sat,r],delimiter='\n')
# find the position of the sun in the orbit and compute RA, DEC of the sat with respect to the Earth
id_sun = find_nearest(sun[:,0],minute)
xs = sun[id_sun,1]
ys = sun[id_sun,2]
zs = sun[id_sun,3]
rs = R_3d(xs,ys,zs)
ra_sun = rev( right_ascension(xs,ys) )
dec_sun= declination(xs,ys,zs)
np.savetxt('%s/INPUT/coord_sun.dat' % path,[xs,ys,zs,ra_sun,dec_sun,rs],delimiter='\n')
# select only none zero value with resilience to rounding errors
# See resources/routines.py
map_obs = slice_map(map_obstot, minute)
# Count the number of points for that particular minute
total_targets += np.shape(map_obs)[0]
if np.shape(map_obs)[0] > 0 :
# Execute the stray light code only if there is more than 0 target !
sys.stdout.write(str(np.shape(map_obs)[0])+' points\t')
sys.stdout.flush()
# Save the targets and the number of line to two separate files to optimise Fortran read.
np.savetxt('%s/INPUT/coord_targets.dat' % path, map_obs[:,1:3],
delimiter=' ', fmt='%3.3f')
f = open('%s/INPUT/number_of_targets.dat' % path, 'w')
f.write(str(np.shape(map_obs)[0])+'\n')
f.close()
os.chdir(os.path.join(os.path.abspath(sys.path[0]), '%s/CODE/' % path))
res=subprocess.call(["./stray_light"])
if not res == 0:
raise RuntimeError("Critical Error in stray light code")
# Move the files to the right ouput folder
os.chdir(orig_dir)
res=subprocess.call(["mv", "%s/OUTPUT/straylight.out" % path,
'%s/%s%d.dat' % (folder_flux,file_flux,minute)])
if not res == 0:
raise RuntimeError("Critical Error: impossible to move output file")
else:
sys.stdout.write('No points. ')
sys.stdout.flush()
minute += 1
end_minute = time.time()
elapsed_time = round( end_minute - start_minute , 1)
# end of orbit
message = '\r%3.1f minutes to compute %d minutes of the orbit no %d / %d \n' % (elapsed_time/60, t_end+1, orbit_current, orbit_end)
sys.stdout.write(message)
sys.stdout.flush()
if monitor_angle_usage:
res=subprocess.call(["mv", "%s/OUTPUT/angle_usage.out" % path,'%s/angles_%d.dat' % (folder_flux,orbit_current)])
if not res == 0:
raise RuntimeError("Critical Error: impossible to move angle usage file")
# Computes the differences to the reference orbit
if adaptative_timestep and orbit_current>orbit_ini:
# See details of compare_two_orbits in resources/routines
# tries with minute 0 of reference is same as current
pp = compare_two_orbits(preceeding, orbit_current, orbit_id, p=p, file_flux=file_flux, folder=folder_flux)
pp_old = pp
# tries with minute 0 of reference is same as current+1
if pp > p :
pp = compare_two_orbits(preceeding, orbit_current, orbit_id, p=p, file_flux=file_flux, folder=folder_flux, shift = 1)
# tries with minute 0 of reference is same as current+2
if pp > p :
pp = compare_two_orbits(preceeding, orbit_current, orbit_id, p=p, file_flux=file_flux, folder=folder_flux, shift = 2)
if pp > p :
# Tried, still bad
status = False
pp = pp_old
else : status = True
print 'Precision former step:', hilite( str(np.round(pp*100,2)), status, False),'%'
# if the test worked, continue with optimum step size or increase it
if pp < p:
preceeding = orbit_current
if former_step == orbit_step:
current_step = orbit_step
else:
current_step = int(former_step*2)
if current_step > orbit_step : |
else : print 'Next adaptative step of :', current_step, 'orbit(s)'
orbit_current += current_step
former_step = current_step
# if the test failed, reduce the step size
else :
current_step = int(former_step/2)
if current_step < min_step: current_step = min_step
if current_step == former_step and current_step ==min_step :
message = 'Could not reach precision (reference orbit=%s, current=%s)' % (preceeding, orbit_current)
print hilite(message, False, True)
preceeding = orbit_current
orbit_current += min_step
else:
orbit_current += current_step - former_step
former_step = current_step
print 'Next adaptative step of :', current_step, 'orbit(s)'
# Ensure that we do not get stuck somehow.
else: orbit_current += orbit_step
######################################################################
# OUTPUTS THE FINAL REMARK
end = time.time()
elapsed_time = round((end-start)/60.,1)
print 'Stray light calculation carried out in '+ str(elapsed_time) +' min for '+ str(total_targets)+ ' points. Have nice day!'
| current_step = orbit_step | conditional_block |
1_compute_MASTER.py | #!/bin/env python
######################################################################
''' COMPUTE_MASTER.py
=========================
AIM: Cycle through the observability maps and execute stray light computations
Temporal resolution is 60 seconds and orbit step size is given by the max. error
INPUT: files: - observability maps in form of minutes,ra,dec [rad] in a file orbit_<#>.dat in raw_maps_<orbit_id>/
- in resources/ : minute_tables__<orbit_id>, moon__<orbit_id>, sun_<orbit_id>
- The complete package of the stray_light.f code (including the orbit file)
variables: see section PARAMETERS (see below)
OUTPUT: in height_part/ (see PARAMETERS): one file per minute with ra,dec,flux [ra,ra, ph/(s.px)]
CMD: python (name_file).py
ISSUES: <NONE KNOWN>
REQUIRES: standard python libraries, specific libraries in resources/
REMARKS: <none>
'''
######################################################################
# DEFINITIONS AND INCLUDES
import numpy as np
import subprocess
import os
import sys
import time
import resources.constants as const
from resources.routines import *
from resources.TimeStepping import *
######################################################################
# PARAMETERS
# km (only required to compute orbit's period)
apogee = 700
perigee= 700
# orbit id
orbit_id = 'ORBIT_ID'
# 1, 2 or 3 (or, ...) into which folder ? Scheme is orbit_id_part/
part = 1
# Orbit to start by:
orbit_ini = 1
# Last orbit to compute (see # of orbits in one year in parameters.py)
orbit_end = 1331
# Defintions of the steps
orbit_step = 10
# If after a given computation, the precision is not good enough, the step of the orbit is adapted.
adaptative_timestep = True
# Minimum orbital step
min_step = 5
# must be in straylight_xxx/ORBIT
file_orbit = 'orbit_%s.dat' % orbit_id
file_sun = 'sun_%s.dat' % orbit_id
folder = 'absolute_map_%s' % orbit_id
# Standard values are:
# folder_flux = '%d_%d' % (orbit_id,part)
# file_flux = 'flux_'
folder_flux = '%s_%d' % (orbit_id,part)
file_flux = 'flux_'
# Tolerance of the maximal magnitude difference (5% = 0.05)
p = 0.1
# Recalculate previously computed minutes ? (Recommendation: False -- time consuming)
override = False
# Save the date to a file for straylight.f (used only if the log_non_nominal is set to .true.)
save_date_JD = True
# Recompiles the Fortran code to ensure that the correct parameters and constants are loaded.
# Make sure "compile" file CHMOD is set to 777. (Recommendation: True -- very fast)
recompile = True
monitor_angle_usage = False
######################################################################
# INITIALISATION
# path to stray light folder (containting CODE, OUTPUT, INPUT, ORBIT)
path = 'straylight_%s_%d' % (orbit_id,part)
# Says hi
print '\nSALSA v%s' % const.version
print 'Computing max. every %d orbits for orbit ID %s' % (orbit_step,orbit_id)
print '------------------------------------------------------'
start = time.time()
# Loads the heavy tables (orbit, sun)
sys.stdout.write("Loading trajectory file...\t\t")
sys.stdout.flush()
try:
orbit = np.loadtxt(path+'/ORBIT/'+file_orbit, delimiter='\t')
except ValueError:
orbit = np.loadtxt(path+'/ORBIT/'+file_orbit, delimiter=' ')
print "Done."
sys.stdout.write("Loading Sun position file...\t\t")
sys.stdout.flush()
try:
sun = np.loadtxt(path+'/ORBIT/'+file_sun, delimiter='\t')
except ValueError:
sun = np.loadtxt(path+'/ORBIT/'+file_sun, delimiter=' ')
print "Done."
# initialise a few variables
total_targets = 0
map_obstot = np.empty(3)
is_first_run = True
orig_dir = os.getcwd()
# Computes the period of the orbit given the altitude
period = altitude2period(apogee,perigee)
# Recompiles the code
if recompile:
sys.stdout.write("Compilation of the code...\t\t")
sys.stdout.flush()
os.chdir(os.path.join(os.path.abspath(sys.path[0]), '%s/CODE/' % path))
os.system("./compile")
os.chdir(orig_dir)
print "Done."
######################################################################
# LOOPING OVER ALL GIVEN MAPS
orbit_current = orbit_ini
# Loop on the orbits
preceeding = orbit_current
former_step = orbit_step
while (orbit_current <= orbit_end):
print '\n---------------- ORBIT %d --- ID %s ----------------------------' % (orbit_current, orbit_id)
# Get the initial and final time
start_minute = time.time()
t_ini, t_end, a_ini, a_end = orbit2times(orbit_current,orbit_id)
minute = a_ini
# Load the observability map for the orbit
try:
map_obstot = load_map(orbit_current,folder)
except IOError:
raise IOError("Critical Error: Could not load orbit %d" % orbit_current)
while (minute <= a_end):
if not override:
try:
# Try to load the fluxes for a given minute (minute goes from 0 to period whereas a_ini is the absolute time from 0:0:0.0 1/1/2018 in min
ra, dec, S_sl = load_flux_file(minute, file_flux,folder=folder_flux)
minute += 1
continue
# If there is an error while loading the file, compute it!
except IOError:
pass
sys.stdout.write( '\rComputing minute: %3d\tAbsolute: %6d\t' % ((minute-a_ini),minute) )
sys.stdout.flush()
if save_date_JD:
# compute the julian date for a given minute and saves it to a file read by stray_light.f
JD = minutes_2_JD(minute)
f = open('%s/INPUT/date_JD.dat' % path,'w')
f.write(str(JD)+'d0\n')
f.close()
# find the position of the satellite in the orbit and compute RA, DEC of the sat with respect to the Earth
id_sat = find_nearest(orbit[:,0],minute)
x = orbit[id_sat,1]
y = orbit[id_sat,2]
z = orbit[id_sat,3]
r = R_3d(x,y,z)
ra_sat = rev( right_ascension(x,y) )
dec_sat= declination(x,y,z)
np.savetxt('%s/INPUT/coord_sat.dat' % path,[x,y,z,ra_sat,dec_sat,r],delimiter='\n')
# find the position of the sun in the orbit and compute RA, DEC of the sat with respect to the Earth
id_sun = find_nearest(sun[:,0],minute)
xs = sun[id_sun,1]
ys = sun[id_sun,2]
zs = sun[id_sun,3]
rs = R_3d(xs,ys,zs)
ra_sun = rev( right_ascension(xs,ys) )
dec_sun= declination(xs,ys,zs)
| # select only none zero value with resilience to rounding errors
# See resources/routines.py
map_obs = slice_map(map_obstot, minute)
# Count the number of points for that particular minute
total_targets += np.shape(map_obs)[0]
if np.shape(map_obs)[0] > 0 :
# Execute the stray light code only if there is more than 0 target !
sys.stdout.write(str(np.shape(map_obs)[0])+' points\t')
sys.stdout.flush()
# Save the targets and the number of line to two separate files to optimise Fortran read.
np.savetxt('%s/INPUT/coord_targets.dat' % path, map_obs[:,1:3],
delimiter=' ', fmt='%3.3f')
f = open('%s/INPUT/number_of_targets.dat' % path, 'w')
f.write(str(np.shape(map_obs)[0])+'\n')
f.close()
os.chdir(os.path.join(os.path.abspath(sys.path[0]), '%s/CODE/' % path))
res=subprocess.call(["./stray_light"])
if not res == 0:
raise RuntimeError("Critical Error in stray light code")
# Move the files to the right ouput folder
os.chdir(orig_dir)
res=subprocess.call(["mv", "%s/OUTPUT/straylight.out" % path,
'%s/%s%d.dat' % (folder_flux,file_flux,minute)])
if not res == 0:
raise RuntimeError("Critical Error: impossible to move output file")
else:
sys.stdout.write('No points. ')
sys.stdout.flush()
minute += 1
end_minute = time.time()
elapsed_time = round( end_minute - start_minute , 1)
# end of orbit
message = '\r%3.1f minutes to compute %d minutes of the orbit no %d / %d \n' % (elapsed_time/60, t_end+1, orbit_current, orbit_end)
sys.stdout.write(message)
sys.stdout.flush()
if monitor_angle_usage:
res=subprocess.call(["mv", "%s/OUTPUT/angle_usage.out" % path,'%s/angles_%d.dat' % (folder_flux,orbit_current)])
if not res == 0:
raise RuntimeError("Critical Error: impossible to move angle usage file")
# Computes the differences to the reference orbit
if adaptative_timestep and orbit_current>orbit_ini:
# See details of compare_two_orbits in resources/routines
# tries with minute 0 of reference is same as current
pp = compare_two_orbits(preceeding, orbit_current, orbit_id, p=p, file_flux=file_flux, folder=folder_flux)
pp_old = pp
# tries with minute 0 of reference is same as current+1
if pp > p :
pp = compare_two_orbits(preceeding, orbit_current, orbit_id, p=p, file_flux=file_flux, folder=folder_flux, shift = 1)
# tries with minute 0 of reference is same as current+2
if pp > p :
pp = compare_two_orbits(preceeding, orbit_current, orbit_id, p=p, file_flux=file_flux, folder=folder_flux, shift = 2)
if pp > p :
# Tried, still bad
status = False
pp = pp_old
else : status = True
print 'Precision former step:', hilite( str(np.round(pp*100,2)), status, False),'%'
# if the test worked, continue with optimum step size or increase it
if pp < p:
preceeding = orbit_current
if former_step == orbit_step:
current_step = orbit_step
else:
current_step = int(former_step*2)
if current_step > orbit_step : current_step = orbit_step
else : print 'Next adaptative step of :', current_step, 'orbit(s)'
orbit_current += current_step
former_step = current_step
# if the test failed, reduce the step size
else :
current_step = int(former_step/2)
if current_step < min_step: current_step = min_step
if current_step == former_step and current_step ==min_step :
message = 'Could not reach precision (reference orbit=%s, current=%s)' % (preceeding, orbit_current)
print hilite(message, False, True)
preceeding = orbit_current
orbit_current += min_step
else:
orbit_current += current_step - former_step
former_step = current_step
print 'Next adaptative step of :', current_step, 'orbit(s)'
# Ensure that we do not get stuck somehow.
else: orbit_current += orbit_step
######################################################################
# OUTPUTS THE FINAL REMARK
end = time.time()
elapsed_time = round((end-start)/60.,1)
print 'Stray light calculation carried out in '+ str(elapsed_time) +' min for '+ str(total_targets)+ ' points. Have nice day!' | np.savetxt('%s/INPUT/coord_sun.dat' % path,[xs,ys,zs,ra_sun,dec_sun,rs],delimiter='\n')
| random_line_split |
d3cap.rs | use std::thread::{self, JoinHandle};
use std::hash::{Hash};
use std::collections::hash_map::{Entry, HashMap};
use std::fs::File;
use std::io::{self, Read};
use std::sync::{Arc,RwLock};
use std::sync::mpsc::{channel, Sender, SendError};
use toml;
use multicast::Multicast;
use json_serve::uiserver::UIServer;
use util::{ntohs, skip_bytes_cast, skip_cast};
use ip::{IP4Addr, IP6Addr, IP4Header, IP6Header};
use ether::{EthernetHeader, MacAddr,
ETHERTYPE_ARP, ETHERTYPE_IP4, ETHERTYPE_IP6, ETHERTYPE_802_1X};
use dot11::{self, FrameType};
use tap;
use pkt_graph::{PktMeta, ProtocolGraph, RouteStats};
use fixed_ring::FixedRingBuffer;
use pcap::pcap as cap;
#[derive(RustcEncodable, Clone)]
struct RouteStatsMsg<T> {
typ: &'static str,
route: RouteStats<T>,
}
#[derive(Debug)]
pub enum Pkt {
Mac(PktMeta<MacAddr>),
IP4(PktMeta<IP4Addr>),
IP6(PktMeta<IP6Addr>),
}
#[derive(Clone)]
pub struct ProtocolHandler<T:Eq+Hash+Send+Sync+'static> {
pub typ: &'static str,
pub graph: Arc<RwLock<ProtocolGraph<T>>>,
stats_mcast: Multicast<RouteStatsMsg<T>>,
}
impl <T:Send+Sync+Copy+Clone+Eq+Hash> ProtocolHandler<T> {
fn new(typ: &'static str) -> io::Result<ProtocolHandler<T>> {
Ok(ProtocolHandler {
typ: typ,
graph: Arc::new(RwLock::new(ProtocolGraph::new())),
stats_mcast: Multicast::spawn()?
})
}
fn update(&mut self, pkt: &PktMeta<T>) {
let route_stats = {
self.graph.write().unwrap().update(pkt)
};
let route_stats_msg = Arc::new(RouteStatsMsg {
typ: self.typ,
route: route_stats
});
self.stats_mcast.send(route_stats_msg).unwrap();
}
}
#[derive(Clone)]
pub struct ProtoGraphController {
pub cap_tx: Sender<Pkt>,
pub mac: ProtocolHandler<MacAddr>,
pub ip4: ProtocolHandler<IP4Addr>,
pub ip6: ProtocolHandler<IP6Addr>,
}
impl ProtoGraphController {
fn spawn() -> io::Result<ProtoGraphController> {
let (cap_tx, cap_rx) = channel();
let ctl = ProtoGraphController {
cap_tx: cap_tx,
mac: ProtocolHandler::new("mac")?,
ip4: ProtocolHandler::new("ip4")?,
ip6: ProtocolHandler::new("ip6")?,
};
let mut phctl = ctl.clone();
thread::Builder::new().name("protocol_handler".to_owned()).spawn(move || {
loop {
let pkt = cap_rx.recv();
if pkt.is_err() {
break
}
match pkt.unwrap() {
Pkt::Mac(ref p) => phctl.mac.update(p),
Pkt::IP4(ref p) => phctl.ip4.update(p),
Pkt::IP6(ref p) => phctl.ip6.update(p),
}
}
})?;
Ok(ctl)
}
fn sender(&self) -> Sender<Pkt> {
self.cap_tx.clone()
}
fn register_mac_listener(&self, s: Sender<Arc<RouteStatsMsg<MacAddr>>>) {
self.mac.stats_mcast.register(s).unwrap();
}
fn register_ip4_listener(&self, s: Sender<Arc<RouteStatsMsg<IP4Addr>>>) {
self.ip4.stats_mcast.register(s).unwrap();
}
fn register_ip6_listener(&self, s: Sender<Arc<RouteStatsMsg<IP6Addr>>>) {
self.ip6.stats_mcast.register(s).unwrap();
}
}
enum ParseErr {
Send,
UnknownPacket
}
impl<T> From<SendError<T>> for ParseErr {
fn from(_: SendError<T>) -> ParseErr {
ParseErr::Send
}
}
trait PktParser {
fn parse(&mut self, pkt: &cap::PcapData) -> Result<(), ParseErr>;
}
pub struct CaptureCtx {
sess: cap::PcapSession,
parser: Box<PktParser+'static>
}
impl CaptureCtx {
fn parse_next(&mut self) {
let p = &mut self.parser;
self.sess.next(|cap| {
match p.parse(cap) {
_ => () //just ignore
}
});
}
}
struct EthernetParser {
pkts: Sender<Pkt>,
}
impl PktParser for EthernetParser {
fn parse(&mut self, pkt: &cap::PcapData) -> Result<(), ParseErr> {
let ether_hdr = unsafe { &*(pkt.pkt_ptr() as *const EthernetHeader) };
self.pkts.send(Pkt::Mac(PktMeta::new(ether_hdr.src, ether_hdr.dst, pkt.len())))?;
match ether_hdr.typ {
ETHERTYPE_ARP => {
//io::println("ARP!");
},
ETHERTYPE_IP4 => {
let ipp: &IP4Header = unsafe { skip_cast(ether_hdr) };
self.pkts.send(Pkt::IP4(PktMeta::new(ipp.src, ipp.dst, u32::from(ntohs(ipp.len)))))?;
},
ETHERTYPE_IP6 => {
let ipp: &IP6Header = unsafe { skip_cast(ether_hdr) };
self.pkts.send(Pkt::IP6(PktMeta::new(ipp.src, ipp.dst, u32::from(ntohs(ipp.len)))))?;
},
ETHERTYPE_802_1X => {
//io::println("802.1X!");
},
_ => {
//println!("Unknown type: {:x}", x);
}
}
Ok(())
}
}
#[derive(Debug)]
pub struct PhysData { // TODO: this name sucks
frame_ty: FrameType,
addrs: [MacAddr; 3],
rate: Option<tap::Rate>,
channel: tap::Channel,
antenna_signal: tap::AntennaSignal,
antenna_noise: tap::AntennaNoise,
antenna: tap::Antenna,
}
impl PhysData {
fn new(frame_ty: FrameType,
addrs: [MacAddr; 3],
rate: Option<tap::Rate>,
channel: tap::Channel,
antenna_signal: tap::AntennaSignal,
antenna_noise: tap::AntennaNoise,
antenna: tap::Antenna,
) -> PhysData {
PhysData {
frame_ty: frame_ty,
addrs: addrs,
rate: rate,
channel: channel,
antenna_signal: antenna_signal,
antenna_noise: antenna_noise,
antenna: antenna
}
}
fn dist(&self) -> f32 {
let freq = f32::from(self.channel.mhz);
let signal = f32::from(self.antenna_signal.dbm);
let exp = (27.55 - (20.0 * freq.log10()) + signal.abs()) / 20.0;
(10.0f32).powf(exp)
}
}
#[derive(PartialEq, Eq, Hash)]
pub struct PhysDataKey(pub FrameType, pub [MacAddr;3]);
pub struct PhysDataVal {
pub dat: FixedRingBuffer<PhysData>,
pub count: u32,
}
impl PhysDataVal {
pub fn new() -> PhysDataVal {
PhysDataVal {
dat: FixedRingBuffer::new(10),
count: 0
}
}
pub fn avg_dist(&self) -> f32 {
let mut s = 0.0;
for pd in self.dat.iter() {
s += pd.dist();
}
s / (self.dat.len() as f32)
}
}
#[derive(Clone)]
pub struct PhysDataController {
pub map: Arc<RwLock<HashMap<PhysDataKey, PhysDataVal>>>,
pd_tx: Sender<PhysData>
}
impl PhysDataController {
fn spawn() -> io::Result<PhysDataController> {
let (pd_tx, pd_rx) = channel();
let out = PhysDataController {
pd_tx: pd_tx,
map: Arc::new(RwLock::new(HashMap::new()))
};
let ctl = out.clone();
thread::Builder::new().name("physdata_handler".to_owned()).spawn(move || {
loop {
let res = pd_rx.recv();
if res.is_err() {
break
}
let pd = res.unwrap();
match ctl.map.write().unwrap().entry(PhysDataKey(pd.frame_ty, pd.addrs)) {
Entry::Occupied(mut e) => {
let mut pdc = e.get_mut();
pdc.dat.push(pd);
pdc.count += 1;
}
Entry::Vacant(e) => {
let mut pdc = PhysDataVal::new();
pdc.dat.push(pd);
pdc.count += 1;
e.insert(pdc);
}
};
}
})?;
Ok(out)
}
fn sender(&self) -> Sender<PhysData> {
self.pd_tx.clone()
}
}
struct RadiotapParser {
pkts: Sender<Pkt>,
phys: Sender<PhysData>
}
impl RadiotapParser {
fn parse_known_headers(&self,
frame_ty: FrameType,
addrs: [MacAddr; 3],
tap_hdr: &tap::RadiotapHeader) {
match tap_hdr.it_present {
tap::ItPresent::COMMON_A => {
if let Some(vals) = tap::CommonA::parse(tap_hdr) {
self.phys.send(PhysData::new(
frame_ty,
addrs,
Some(vals.rate),
vals.channel,
vals.antenna_signal,
vals.antenna_noise,
vals.antenna
)).unwrap();
}
},
tap::ItPresent::COMMON_B => {
if let Some(vals) = tap::CommonB::parse(tap_hdr) {
self.phys.send(PhysData::new(
frame_ty,
addrs,
None,
vals.channel,
vals.antenna_signal,
vals.antenna_noise,
vals.antenna
)).unwrap();
}
},
_ => {} //Unknown header
}
}
}
impl PktParser for RadiotapParser {
fn parse(&mut self, pkt: &cap::PcapData) -> Result<(), ParseErr> {
fn magic<U>(pkt: &tap::RadiotapHeader) -> &U {
unsafe { skip_bytes_cast(pkt, pkt.it_len as isize) }
}
let tap_hdr = unsafe { &*(pkt.pkt_ptr() as *const tap::RadiotapHeader) };
let base: &dot11::Dot11BaseHeader = magic(tap_hdr);
let fc = &base.fr_ctrl;
if fc.protocol_version() != 0 {
// bogus packet, bail
return Err(ParseErr::UnknownPacket);
}
match fc.frame_type() {
ft @ FrameType::Management => {
let mgt: &dot11::ManagementFrameHeader = magic(tap_hdr);
self.parse_known_headers(ft, [mgt.addr1, mgt.addr2, mgt.addr3], tap_hdr);
}
ft @ FrameType::Data => {
let data: &dot11::DataFrameHeader = magic(tap_hdr);
//TODO: get length
self.pkts.send(Pkt::Mac(PktMeta::new(data.addr1, data.addr2, 1)))?;
self.parse_known_headers(ft, [data.addr1, data.addr2, data.addr3], tap_hdr);
}
FrameType::Control | FrameType::Unknown => {
//println!("Unknown frame type");
}
}
Ok(())
}
}
pub fn init_capture(conf: &D3capConf,
pkt_sender: Sender<Pkt>,
pd_sender: Sender<PhysData>) -> CaptureCtx {
let sess = match conf.file {
Some(ref f) => cap::PcapSession::from_file(f),
None => {
println!("No session file");
let sess_builder = match conf.interface {
Some(ref dev) => cap::PcapSessionBuilder::new_dev(dev),
None => cap::PcapSessionBuilder::new()
};
sess_builder.unwrap()
.buffer_size(0xFFFF)
.timeout(1000)
.promisc(conf.promisc)
.rfmon(conf.monitor)
.activate()
}
};
let parser = match sess.datalink() {
cap::DLT_ETHERNET => {
Box::new(EthernetParser { pkts: pkt_sender }) as Box<PktParser>
}
cap::DLT_IEEE802_11_RADIO => {
Box::new(RadiotapParser { pkts: pkt_sender, phys: pd_sender }) as Box<PktParser>
}
x => panic!("unsupported datalink type: {}", x)
};
CaptureCtx { sess: sess, parser: parser }
}
pub fn start_capture(conf: D3capConf,
pkt_sender: Sender<Pkt>,
pd_sender: Sender<PhysData>) -> io::Result<JoinHandle<()>> {
thread::Builder::new().name("packet_capture".to_owned()).spawn(move || {
let mut cap = init_capture(&conf, pkt_sender, pd_sender);
loop {
cap.parse_next();
}
})
}
enum LoadMacError {
IOError(io::Error),
TomlError(Option<toml::de::Error>)
}
impl From<io::Error> for LoadMacError {
fn from(err: io::Error) -> LoadMacError {
LoadMacError::IOError(err)
}
}
impl From<toml::de::Error> for LoadMacError {
fn from(err: toml::de::Error) -> LoadMacError { LoadMacError::TomlError(Some(err)) }
}
fn load_mac_addrs(file: &str) -> Result<HashMap<MacAddr, String>, LoadMacError> {
let mut s = String::new();
let mut f = File::open(&file)?;
f.read_to_string(&mut s)?;
let t = s.parse::<toml::Value>()?;
if let Some(k) = t.get(&"known-macs".to_owned()) {
if let Some(tbl) = k.as_table() {
return Ok(tbl.iter()
.map(|(k, v)| (MacAddr::from_string(k), v.as_str()))
.filter_map(|x| match x {
(Some(addr), Some(alias)) => Some((addr, alias.to_owned())),
_ => None
}) | }
}
Err(LoadMacError::TomlError(None))
}
fn start_websocket(port: u16, mac_map: &MacMap, pg_ctl: &ProtoGraphController) -> io::Result<()> {
let ui = UIServer::spawn(port, mac_map)?;
pg_ctl.register_mac_listener(ui.create_sender()?);
pg_ctl.register_ip4_listener(ui.create_sender()?);
pg_ctl.register_ip6_listener(ui.create_sender()?);
Ok(())
}
pub type MacMap = HashMap<MacAddr, String>;
pub type IP4Map = HashMap<IP4Addr, String>;
pub type IP6Map = HashMap<IP6Addr, String>;
#[derive(Clone)]
pub struct D3capController {
pub pg_ctrl: ProtoGraphController,
pub pd_ctrl: PhysDataController,
pub mac_names: MacMap,
pub ip4_names: IP4Map,
pub ip6_names: IP6Map,
pub server_started: bool
}
impl D3capController {
pub fn spawn(conf: D3capConf) -> io::Result<D3capController> {
let mac_names = conf.conf.as_ref()
.map_or_else(HashMap::new, |x| {
load_mac_addrs(x).unwrap_or_else(|_| HashMap::new())
});
let ip4_names = HashMap::new();
let ip6_names = HashMap::new();
let pg_ctrl = ProtoGraphController::spawn()?;
let pd_ctrl = PhysDataController::spawn()?;
start_capture(conf, pg_ctrl.sender(), pd_ctrl.sender()).unwrap();
Ok(D3capController {
pg_ctrl: pg_ctrl,
pd_ctrl: pd_ctrl,
mac_names: mac_names,
ip4_names: ip4_names,
ip6_names: ip6_names,
server_started: false
})
}
pub fn start_websocket(&mut self, port: u16) -> io::Result<()> {
if self.server_started {
println!("server already started");
} else {
start_websocket(port, &self.mac_names, &self.pg_ctrl)?;
self.server_started = true;
}
Ok(())
}
}
#[derive(Clone, Debug)]
pub struct D3capConf {
pub websocket: Option<u16>,
pub interface: Option<String>,
pub file: Option<String>,
pub conf: Option<String>,
pub promisc: bool,
pub monitor: bool
} | .collect()) | random_line_split |
d3cap.rs | use std::thread::{self, JoinHandle};
use std::hash::{Hash};
use std::collections::hash_map::{Entry, HashMap};
use std::fs::File;
use std::io::{self, Read};
use std::sync::{Arc,RwLock};
use std::sync::mpsc::{channel, Sender, SendError};
use toml;
use multicast::Multicast;
use json_serve::uiserver::UIServer;
use util::{ntohs, skip_bytes_cast, skip_cast};
use ip::{IP4Addr, IP6Addr, IP4Header, IP6Header};
use ether::{EthernetHeader, MacAddr,
ETHERTYPE_ARP, ETHERTYPE_IP4, ETHERTYPE_IP6, ETHERTYPE_802_1X};
use dot11::{self, FrameType};
use tap;
use pkt_graph::{PktMeta, ProtocolGraph, RouteStats};
use fixed_ring::FixedRingBuffer;
use pcap::pcap as cap;
#[derive(RustcEncodable, Clone)]
struct RouteStatsMsg<T> {
typ: &'static str,
route: RouteStats<T>,
}
#[derive(Debug)]
pub enum Pkt {
Mac(PktMeta<MacAddr>),
IP4(PktMeta<IP4Addr>),
IP6(PktMeta<IP6Addr>),
}
#[derive(Clone)]
pub struct ProtocolHandler<T:Eq+Hash+Send+Sync+'static> {
pub typ: &'static str,
pub graph: Arc<RwLock<ProtocolGraph<T>>>,
stats_mcast: Multicast<RouteStatsMsg<T>>,
}
impl <T:Send+Sync+Copy+Clone+Eq+Hash> ProtocolHandler<T> {
fn new(typ: &'static str) -> io::Result<ProtocolHandler<T>> {
Ok(ProtocolHandler {
typ: typ,
graph: Arc::new(RwLock::new(ProtocolGraph::new())),
stats_mcast: Multicast::spawn()?
})
}
fn update(&mut self, pkt: &PktMeta<T>) {
let route_stats = {
self.graph.write().unwrap().update(pkt)
};
let route_stats_msg = Arc::new(RouteStatsMsg {
typ: self.typ,
route: route_stats
});
self.stats_mcast.send(route_stats_msg).unwrap();
}
}
#[derive(Clone)]
pub struct ProtoGraphController {
pub cap_tx: Sender<Pkt>,
pub mac: ProtocolHandler<MacAddr>,
pub ip4: ProtocolHandler<IP4Addr>,
pub ip6: ProtocolHandler<IP6Addr>,
}
impl ProtoGraphController {
fn spawn() -> io::Result<ProtoGraphController> {
let (cap_tx, cap_rx) = channel();
let ctl = ProtoGraphController {
cap_tx: cap_tx,
mac: ProtocolHandler::new("mac")?,
ip4: ProtocolHandler::new("ip4")?,
ip6: ProtocolHandler::new("ip6")?,
};
let mut phctl = ctl.clone();
thread::Builder::new().name("protocol_handler".to_owned()).spawn(move || {
loop {
let pkt = cap_rx.recv();
if pkt.is_err() {
break
}
match pkt.unwrap() {
Pkt::Mac(ref p) => phctl.mac.update(p),
Pkt::IP4(ref p) => phctl.ip4.update(p),
Pkt::IP6(ref p) => phctl.ip6.update(p),
}
}
})?;
Ok(ctl)
}
fn sender(&self) -> Sender<Pkt> {
self.cap_tx.clone()
}
fn register_mac_listener(&self, s: Sender<Arc<RouteStatsMsg<MacAddr>>>) {
self.mac.stats_mcast.register(s).unwrap();
}
fn register_ip4_listener(&self, s: Sender<Arc<RouteStatsMsg<IP4Addr>>>) {
self.ip4.stats_mcast.register(s).unwrap();
}
fn register_ip6_listener(&self, s: Sender<Arc<RouteStatsMsg<IP6Addr>>>) {
self.ip6.stats_mcast.register(s).unwrap();
}
}
enum ParseErr {
Send,
UnknownPacket
}
impl<T> From<SendError<T>> for ParseErr {
fn from(_: SendError<T>) -> ParseErr {
ParseErr::Send
}
}
trait PktParser {
fn parse(&mut self, pkt: &cap::PcapData) -> Result<(), ParseErr>;
}
pub struct CaptureCtx {
sess: cap::PcapSession,
parser: Box<PktParser+'static>
}
impl CaptureCtx {
fn parse_next(&mut self) {
let p = &mut self.parser;
self.sess.next(|cap| {
match p.parse(cap) {
_ => () //just ignore
}
});
}
}
struct EthernetParser {
pkts: Sender<Pkt>,
}
impl PktParser for EthernetParser {
fn parse(&mut self, pkt: &cap::PcapData) -> Result<(), ParseErr> {
let ether_hdr = unsafe { &*(pkt.pkt_ptr() as *const EthernetHeader) };
self.pkts.send(Pkt::Mac(PktMeta::new(ether_hdr.src, ether_hdr.dst, pkt.len())))?;
match ether_hdr.typ {
ETHERTYPE_ARP => {
//io::println("ARP!");
},
ETHERTYPE_IP4 => {
let ipp: &IP4Header = unsafe { skip_cast(ether_hdr) };
self.pkts.send(Pkt::IP4(PktMeta::new(ipp.src, ipp.dst, u32::from(ntohs(ipp.len)))))?;
},
ETHERTYPE_IP6 => {
let ipp: &IP6Header = unsafe { skip_cast(ether_hdr) };
self.pkts.send(Pkt::IP6(PktMeta::new(ipp.src, ipp.dst, u32::from(ntohs(ipp.len)))))?;
},
ETHERTYPE_802_1X => {
//io::println("802.1X!");
},
_ => {
//println!("Unknown type: {:x}", x);
}
}
Ok(())
}
}
#[derive(Debug)]
pub struct PhysData { // TODO: this name sucks
frame_ty: FrameType,
addrs: [MacAddr; 3],
rate: Option<tap::Rate>,
channel: tap::Channel,
antenna_signal: tap::AntennaSignal,
antenna_noise: tap::AntennaNoise,
antenna: tap::Antenna,
}
impl PhysData {
fn new(frame_ty: FrameType,
addrs: [MacAddr; 3],
rate: Option<tap::Rate>,
channel: tap::Channel,
antenna_signal: tap::AntennaSignal,
antenna_noise: tap::AntennaNoise,
antenna: tap::Antenna,
) -> PhysData {
PhysData {
frame_ty: frame_ty,
addrs: addrs,
rate: rate,
channel: channel,
antenna_signal: antenna_signal,
antenna_noise: antenna_noise,
antenna: antenna
}
}
fn dist(&self) -> f32 {
let freq = f32::from(self.channel.mhz);
let signal = f32::from(self.antenna_signal.dbm);
let exp = (27.55 - (20.0 * freq.log10()) + signal.abs()) / 20.0;
(10.0f32).powf(exp)
}
}
#[derive(PartialEq, Eq, Hash)]
pub struct PhysDataKey(pub FrameType, pub [MacAddr;3]);
pub struct PhysDataVal {
pub dat: FixedRingBuffer<PhysData>,
pub count: u32,
}
impl PhysDataVal {
pub fn new() -> PhysDataVal {
PhysDataVal {
dat: FixedRingBuffer::new(10),
count: 0
}
}
pub fn avg_dist(&self) -> f32 {
let mut s = 0.0;
for pd in self.dat.iter() {
s += pd.dist();
}
s / (self.dat.len() as f32)
}
}
#[derive(Clone)]
pub struct PhysDataController {
pub map: Arc<RwLock<HashMap<PhysDataKey, PhysDataVal>>>,
pd_tx: Sender<PhysData>
}
impl PhysDataController {
fn spawn() -> io::Result<PhysDataController> {
let (pd_tx, pd_rx) = channel();
let out = PhysDataController {
pd_tx: pd_tx,
map: Arc::new(RwLock::new(HashMap::new()))
};
let ctl = out.clone();
thread::Builder::new().name("physdata_handler".to_owned()).spawn(move || {
loop {
let res = pd_rx.recv();
if res.is_err() {
break
}
let pd = res.unwrap();
match ctl.map.write().unwrap().entry(PhysDataKey(pd.frame_ty, pd.addrs)) {
Entry::Occupied(mut e) => {
let mut pdc = e.get_mut();
pdc.dat.push(pd);
pdc.count += 1;
}
Entry::Vacant(e) => {
let mut pdc = PhysDataVal::new();
pdc.dat.push(pd);
pdc.count += 1;
e.insert(pdc);
}
};
}
})?;
Ok(out)
}
fn sender(&self) -> Sender<PhysData> {
self.pd_tx.clone()
}
}
struct RadiotapParser {
pkts: Sender<Pkt>,
phys: Sender<PhysData>
}
impl RadiotapParser {
fn parse_known_headers(&self,
frame_ty: FrameType,
addrs: [MacAddr; 3],
tap_hdr: &tap::RadiotapHeader) {
match tap_hdr.it_present {
tap::ItPresent::COMMON_A => {
if let Some(vals) = tap::CommonA::parse(tap_hdr) {
self.phys.send(PhysData::new(
frame_ty,
addrs,
Some(vals.rate),
vals.channel,
vals.antenna_signal,
vals.antenna_noise,
vals.antenna
)).unwrap();
}
},
tap::ItPresent::COMMON_B => {
if let Some(vals) = tap::CommonB::parse(tap_hdr) {
self.phys.send(PhysData::new(
frame_ty,
addrs,
None,
vals.channel,
vals.antenna_signal,
vals.antenna_noise,
vals.antenna
)).unwrap();
}
},
_ => {} //Unknown header
}
}
}
impl PktParser for RadiotapParser {
fn parse(&mut self, pkt: &cap::PcapData) -> Result<(), ParseErr> {
fn magic<U>(pkt: &tap::RadiotapHeader) -> &U |
let tap_hdr = unsafe { &*(pkt.pkt_ptr() as *const tap::RadiotapHeader) };
let base: &dot11::Dot11BaseHeader = magic(tap_hdr);
let fc = &base.fr_ctrl;
if fc.protocol_version() != 0 {
// bogus packet, bail
return Err(ParseErr::UnknownPacket);
}
match fc.frame_type() {
ft @ FrameType::Management => {
let mgt: &dot11::ManagementFrameHeader = magic(tap_hdr);
self.parse_known_headers(ft, [mgt.addr1, mgt.addr2, mgt.addr3], tap_hdr);
}
ft @ FrameType::Data => {
let data: &dot11::DataFrameHeader = magic(tap_hdr);
//TODO: get length
self.pkts.send(Pkt::Mac(PktMeta::new(data.addr1, data.addr2, 1)))?;
self.parse_known_headers(ft, [data.addr1, data.addr2, data.addr3], tap_hdr);
}
FrameType::Control | FrameType::Unknown => {
//println!("Unknown frame type");
}
}
Ok(())
}
}
pub fn init_capture(conf: &D3capConf,
pkt_sender: Sender<Pkt>,
pd_sender: Sender<PhysData>) -> CaptureCtx {
let sess = match conf.file {
Some(ref f) => cap::PcapSession::from_file(f),
None => {
println!("No session file");
let sess_builder = match conf.interface {
Some(ref dev) => cap::PcapSessionBuilder::new_dev(dev),
None => cap::PcapSessionBuilder::new()
};
sess_builder.unwrap()
.buffer_size(0xFFFF)
.timeout(1000)
.promisc(conf.promisc)
.rfmon(conf.monitor)
.activate()
}
};
let parser = match sess.datalink() {
cap::DLT_ETHERNET => {
Box::new(EthernetParser { pkts: pkt_sender }) as Box<PktParser>
}
cap::DLT_IEEE802_11_RADIO => {
Box::new(RadiotapParser { pkts: pkt_sender, phys: pd_sender }) as Box<PktParser>
}
x => panic!("unsupported datalink type: {}", x)
};
CaptureCtx { sess: sess, parser: parser }
}
pub fn start_capture(conf: D3capConf,
pkt_sender: Sender<Pkt>,
pd_sender: Sender<PhysData>) -> io::Result<JoinHandle<()>> {
thread::Builder::new().name("packet_capture".to_owned()).spawn(move || {
let mut cap = init_capture(&conf, pkt_sender, pd_sender);
loop {
cap.parse_next();
}
})
}
enum LoadMacError {
IOError(io::Error),
TomlError(Option<toml::de::Error>)
}
impl From<io::Error> for LoadMacError {
fn from(err: io::Error) -> LoadMacError {
LoadMacError::IOError(err)
}
}
impl From<toml::de::Error> for LoadMacError {
fn from(err: toml::de::Error) -> LoadMacError { LoadMacError::TomlError(Some(err)) }
}
fn load_mac_addrs(file: &str) -> Result<HashMap<MacAddr, String>, LoadMacError> {
let mut s = String::new();
let mut f = File::open(&file)?;
f.read_to_string(&mut s)?;
let t = s.parse::<toml::Value>()?;
if let Some(k) = t.get(&"known-macs".to_owned()) {
if let Some(tbl) = k.as_table() {
return Ok(tbl.iter()
.map(|(k, v)| (MacAddr::from_string(k), v.as_str()))
.filter_map(|x| match x {
(Some(addr), Some(alias)) => Some((addr, alias.to_owned())),
_ => None
})
.collect())
}
}
Err(LoadMacError::TomlError(None))
}
fn start_websocket(port: u16, mac_map: &MacMap, pg_ctl: &ProtoGraphController) -> io::Result<()> {
let ui = UIServer::spawn(port, mac_map)?;
pg_ctl.register_mac_listener(ui.create_sender()?);
pg_ctl.register_ip4_listener(ui.create_sender()?);
pg_ctl.register_ip6_listener(ui.create_sender()?);
Ok(())
}
pub type MacMap = HashMap<MacAddr, String>;
pub type IP4Map = HashMap<IP4Addr, String>;
pub type IP6Map = HashMap<IP6Addr, String>;
#[derive(Clone)]
pub struct D3capController {
pub pg_ctrl: ProtoGraphController,
pub pd_ctrl: PhysDataController,
pub mac_names: MacMap,
pub ip4_names: IP4Map,
pub ip6_names: IP6Map,
pub server_started: bool
}
impl D3capController {
pub fn spawn(conf: D3capConf) -> io::Result<D3capController> {
let mac_names = conf.conf.as_ref()
.map_or_else(HashMap::new, |x| {
load_mac_addrs(x).unwrap_or_else(|_| HashMap::new())
});
let ip4_names = HashMap::new();
let ip6_names = HashMap::new();
let pg_ctrl = ProtoGraphController::spawn()?;
let pd_ctrl = PhysDataController::spawn()?;
start_capture(conf, pg_ctrl.sender(), pd_ctrl.sender()).unwrap();
Ok(D3capController {
pg_ctrl: pg_ctrl,
pd_ctrl: pd_ctrl,
mac_names: mac_names,
ip4_names: ip4_names,
ip6_names: ip6_names,
server_started: false
})
}
pub fn start_websocket(&mut self, port: u16) -> io::Result<()> {
if self.server_started {
println!("server already started");
} else {
start_websocket(port, &self.mac_names, &self.pg_ctrl)?;
self.server_started = true;
}
Ok(())
}
}
#[derive(Clone, Debug)]
pub struct D3capConf {
pub websocket: Option<u16>,
pub interface: Option<String>,
pub file: Option<String>,
pub conf: Option<String>,
pub promisc: bool,
pub monitor: bool
}
| {
unsafe { skip_bytes_cast(pkt, pkt.it_len as isize) }
} | identifier_body |
d3cap.rs | use std::thread::{self, JoinHandle};
use std::hash::{Hash};
use std::collections::hash_map::{Entry, HashMap};
use std::fs::File;
use std::io::{self, Read};
use std::sync::{Arc,RwLock};
use std::sync::mpsc::{channel, Sender, SendError};
use toml;
use multicast::Multicast;
use json_serve::uiserver::UIServer;
use util::{ntohs, skip_bytes_cast, skip_cast};
use ip::{IP4Addr, IP6Addr, IP4Header, IP6Header};
use ether::{EthernetHeader, MacAddr,
ETHERTYPE_ARP, ETHERTYPE_IP4, ETHERTYPE_IP6, ETHERTYPE_802_1X};
use dot11::{self, FrameType};
use tap;
use pkt_graph::{PktMeta, ProtocolGraph, RouteStats};
use fixed_ring::FixedRingBuffer;
use pcap::pcap as cap;
#[derive(RustcEncodable, Clone)]
struct RouteStatsMsg<T> {
typ: &'static str,
route: RouteStats<T>,
}
#[derive(Debug)]
pub enum Pkt {
Mac(PktMeta<MacAddr>),
IP4(PktMeta<IP4Addr>),
IP6(PktMeta<IP6Addr>),
}
#[derive(Clone)]
pub struct ProtocolHandler<T:Eq+Hash+Send+Sync+'static> {
pub typ: &'static str,
pub graph: Arc<RwLock<ProtocolGraph<T>>>,
stats_mcast: Multicast<RouteStatsMsg<T>>,
}
impl <T:Send+Sync+Copy+Clone+Eq+Hash> ProtocolHandler<T> {
fn new(typ: &'static str) -> io::Result<ProtocolHandler<T>> {
Ok(ProtocolHandler {
typ: typ,
graph: Arc::new(RwLock::new(ProtocolGraph::new())),
stats_mcast: Multicast::spawn()?
})
}
fn update(&mut self, pkt: &PktMeta<T>) {
let route_stats = {
self.graph.write().unwrap().update(pkt)
};
let route_stats_msg = Arc::new(RouteStatsMsg {
typ: self.typ,
route: route_stats
});
self.stats_mcast.send(route_stats_msg).unwrap();
}
}
#[derive(Clone)]
pub struct ProtoGraphController {
pub cap_tx: Sender<Pkt>,
pub mac: ProtocolHandler<MacAddr>,
pub ip4: ProtocolHandler<IP4Addr>,
pub ip6: ProtocolHandler<IP6Addr>,
}
impl ProtoGraphController {
fn spawn() -> io::Result<ProtoGraphController> {
let (cap_tx, cap_rx) = channel();
let ctl = ProtoGraphController {
cap_tx: cap_tx,
mac: ProtocolHandler::new("mac")?,
ip4: ProtocolHandler::new("ip4")?,
ip6: ProtocolHandler::new("ip6")?,
};
let mut phctl = ctl.clone();
thread::Builder::new().name("protocol_handler".to_owned()).spawn(move || {
loop {
let pkt = cap_rx.recv();
if pkt.is_err() {
break
}
match pkt.unwrap() {
Pkt::Mac(ref p) => phctl.mac.update(p),
Pkt::IP4(ref p) => phctl.ip4.update(p),
Pkt::IP6(ref p) => phctl.ip6.update(p),
}
}
})?;
Ok(ctl)
}
fn sender(&self) -> Sender<Pkt> {
self.cap_tx.clone()
}
fn register_mac_listener(&self, s: Sender<Arc<RouteStatsMsg<MacAddr>>>) {
self.mac.stats_mcast.register(s).unwrap();
}
fn register_ip4_listener(&self, s: Sender<Arc<RouteStatsMsg<IP4Addr>>>) {
self.ip4.stats_mcast.register(s).unwrap();
}
fn register_ip6_listener(&self, s: Sender<Arc<RouteStatsMsg<IP6Addr>>>) {
self.ip6.stats_mcast.register(s).unwrap();
}
}
enum ParseErr {
Send,
UnknownPacket
}
impl<T> From<SendError<T>> for ParseErr {
fn from(_: SendError<T>) -> ParseErr {
ParseErr::Send
}
}
trait PktParser {
fn parse(&mut self, pkt: &cap::PcapData) -> Result<(), ParseErr>;
}
pub struct CaptureCtx {
sess: cap::PcapSession,
parser: Box<PktParser+'static>
}
impl CaptureCtx {
fn parse_next(&mut self) {
let p = &mut self.parser;
self.sess.next(|cap| {
match p.parse(cap) {
_ => () //just ignore
}
});
}
}
struct EthernetParser {
pkts: Sender<Pkt>,
}
impl PktParser for EthernetParser {
fn parse(&mut self, pkt: &cap::PcapData) -> Result<(), ParseErr> {
let ether_hdr = unsafe { &*(pkt.pkt_ptr() as *const EthernetHeader) };
self.pkts.send(Pkt::Mac(PktMeta::new(ether_hdr.src, ether_hdr.dst, pkt.len())))?;
match ether_hdr.typ {
ETHERTYPE_ARP => {
//io::println("ARP!");
},
ETHERTYPE_IP4 => {
let ipp: &IP4Header = unsafe { skip_cast(ether_hdr) };
self.pkts.send(Pkt::IP4(PktMeta::new(ipp.src, ipp.dst, u32::from(ntohs(ipp.len)))))?;
},
ETHERTYPE_IP6 => {
let ipp: &IP6Header = unsafe { skip_cast(ether_hdr) };
self.pkts.send(Pkt::IP6(PktMeta::new(ipp.src, ipp.dst, u32::from(ntohs(ipp.len)))))?;
},
ETHERTYPE_802_1X => {
//io::println("802.1X!");
},
_ => {
//println!("Unknown type: {:x}", x);
}
}
Ok(())
}
}
#[derive(Debug)]
pub struct PhysData { // TODO: this name sucks
frame_ty: FrameType,
addrs: [MacAddr; 3],
rate: Option<tap::Rate>,
channel: tap::Channel,
antenna_signal: tap::AntennaSignal,
antenna_noise: tap::AntennaNoise,
antenna: tap::Antenna,
}
impl PhysData {
fn new(frame_ty: FrameType,
addrs: [MacAddr; 3],
rate: Option<tap::Rate>,
channel: tap::Channel,
antenna_signal: tap::AntennaSignal,
antenna_noise: tap::AntennaNoise,
antenna: tap::Antenna,
) -> PhysData {
PhysData {
frame_ty: frame_ty,
addrs: addrs,
rate: rate,
channel: channel,
antenna_signal: antenna_signal,
antenna_noise: antenna_noise,
antenna: antenna
}
}
fn dist(&self) -> f32 {
let freq = f32::from(self.channel.mhz);
let signal = f32::from(self.antenna_signal.dbm);
let exp = (27.55 - (20.0 * freq.log10()) + signal.abs()) / 20.0;
(10.0f32).powf(exp)
}
}
#[derive(PartialEq, Eq, Hash)]
pub struct PhysDataKey(pub FrameType, pub [MacAddr;3]);
pub struct PhysDataVal {
pub dat: FixedRingBuffer<PhysData>,
pub count: u32,
}
impl PhysDataVal {
pub fn new() -> PhysDataVal {
PhysDataVal {
dat: FixedRingBuffer::new(10),
count: 0
}
}
pub fn avg_dist(&self) -> f32 {
let mut s = 0.0;
for pd in self.dat.iter() {
s += pd.dist();
}
s / (self.dat.len() as f32)
}
}
#[derive(Clone)]
pub struct PhysDataController {
pub map: Arc<RwLock<HashMap<PhysDataKey, PhysDataVal>>>,
pd_tx: Sender<PhysData>
}
impl PhysDataController {
fn spawn() -> io::Result<PhysDataController> {
let (pd_tx, pd_rx) = channel();
let out = PhysDataController {
pd_tx: pd_tx,
map: Arc::new(RwLock::new(HashMap::new()))
};
let ctl = out.clone();
thread::Builder::new().name("physdata_handler".to_owned()).spawn(move || {
loop {
let res = pd_rx.recv();
if res.is_err() {
break
}
let pd = res.unwrap();
match ctl.map.write().unwrap().entry(PhysDataKey(pd.frame_ty, pd.addrs)) {
Entry::Occupied(mut e) => {
let mut pdc = e.get_mut();
pdc.dat.push(pd);
pdc.count += 1;
}
Entry::Vacant(e) => {
let mut pdc = PhysDataVal::new();
pdc.dat.push(pd);
pdc.count += 1;
e.insert(pdc);
}
};
}
})?;
Ok(out)
}
fn sender(&self) -> Sender<PhysData> {
self.pd_tx.clone()
}
}
struct RadiotapParser {
pkts: Sender<Pkt>,
phys: Sender<PhysData>
}
impl RadiotapParser {
fn parse_known_headers(&self,
frame_ty: FrameType,
addrs: [MacAddr; 3],
tap_hdr: &tap::RadiotapHeader) {
match tap_hdr.it_present {
tap::ItPresent::COMMON_A => {
if let Some(vals) = tap::CommonA::parse(tap_hdr) {
self.phys.send(PhysData::new(
frame_ty,
addrs,
Some(vals.rate),
vals.channel,
vals.antenna_signal,
vals.antenna_noise,
vals.antenna
)).unwrap();
}
},
tap::ItPresent::COMMON_B => {
if let Some(vals) = tap::CommonB::parse(tap_hdr) {
self.phys.send(PhysData::new(
frame_ty,
addrs,
None,
vals.channel,
vals.antenna_signal,
vals.antenna_noise,
vals.antenna
)).unwrap();
}
},
_ => {} //Unknown header
}
}
}
impl PktParser for RadiotapParser {
fn parse(&mut self, pkt: &cap::PcapData) -> Result<(), ParseErr> {
fn magic<U>(pkt: &tap::RadiotapHeader) -> &U {
unsafe { skip_bytes_cast(pkt, pkt.it_len as isize) }
}
let tap_hdr = unsafe { &*(pkt.pkt_ptr() as *const tap::RadiotapHeader) };
let base: &dot11::Dot11BaseHeader = magic(tap_hdr);
let fc = &base.fr_ctrl;
if fc.protocol_version() != 0 {
// bogus packet, bail
return Err(ParseErr::UnknownPacket);
}
match fc.frame_type() {
ft @ FrameType::Management => {
let mgt: &dot11::ManagementFrameHeader = magic(tap_hdr);
self.parse_known_headers(ft, [mgt.addr1, mgt.addr2, mgt.addr3], tap_hdr);
}
ft @ FrameType::Data => {
let data: &dot11::DataFrameHeader = magic(tap_hdr);
//TODO: get length
self.pkts.send(Pkt::Mac(PktMeta::new(data.addr1, data.addr2, 1)))?;
self.parse_known_headers(ft, [data.addr1, data.addr2, data.addr3], tap_hdr);
}
FrameType::Control | FrameType::Unknown => {
//println!("Unknown frame type");
}
}
Ok(())
}
}
pub fn init_capture(conf: &D3capConf,
pkt_sender: Sender<Pkt>,
pd_sender: Sender<PhysData>) -> CaptureCtx {
let sess = match conf.file {
Some(ref f) => cap::PcapSession::from_file(f),
None => {
println!("No session file");
let sess_builder = match conf.interface {
Some(ref dev) => cap::PcapSessionBuilder::new_dev(dev),
None => cap::PcapSessionBuilder::new()
};
sess_builder.unwrap()
.buffer_size(0xFFFF)
.timeout(1000)
.promisc(conf.promisc)
.rfmon(conf.monitor)
.activate()
}
};
let parser = match sess.datalink() {
cap::DLT_ETHERNET => {
Box::new(EthernetParser { pkts: pkt_sender }) as Box<PktParser>
}
cap::DLT_IEEE802_11_RADIO => {
Box::new(RadiotapParser { pkts: pkt_sender, phys: pd_sender }) as Box<PktParser>
}
x => panic!("unsupported datalink type: {}", x)
};
CaptureCtx { sess: sess, parser: parser }
}
pub fn | (conf: D3capConf,
pkt_sender: Sender<Pkt>,
pd_sender: Sender<PhysData>) -> io::Result<JoinHandle<()>> {
thread::Builder::new().name("packet_capture".to_owned()).spawn(move || {
let mut cap = init_capture(&conf, pkt_sender, pd_sender);
loop {
cap.parse_next();
}
})
}
enum LoadMacError {
IOError(io::Error),
TomlError(Option<toml::de::Error>)
}
impl From<io::Error> for LoadMacError {
fn from(err: io::Error) -> LoadMacError {
LoadMacError::IOError(err)
}
}
impl From<toml::de::Error> for LoadMacError {
fn from(err: toml::de::Error) -> LoadMacError { LoadMacError::TomlError(Some(err)) }
}
fn load_mac_addrs(file: &str) -> Result<HashMap<MacAddr, String>, LoadMacError> {
let mut s = String::new();
let mut f = File::open(&file)?;
f.read_to_string(&mut s)?;
let t = s.parse::<toml::Value>()?;
if let Some(k) = t.get(&"known-macs".to_owned()) {
if let Some(tbl) = k.as_table() {
return Ok(tbl.iter()
.map(|(k, v)| (MacAddr::from_string(k), v.as_str()))
.filter_map(|x| match x {
(Some(addr), Some(alias)) => Some((addr, alias.to_owned())),
_ => None
})
.collect())
}
}
Err(LoadMacError::TomlError(None))
}
fn start_websocket(port: u16, mac_map: &MacMap, pg_ctl: &ProtoGraphController) -> io::Result<()> {
let ui = UIServer::spawn(port, mac_map)?;
pg_ctl.register_mac_listener(ui.create_sender()?);
pg_ctl.register_ip4_listener(ui.create_sender()?);
pg_ctl.register_ip6_listener(ui.create_sender()?);
Ok(())
}
pub type MacMap = HashMap<MacAddr, String>;
pub type IP4Map = HashMap<IP4Addr, String>;
pub type IP6Map = HashMap<IP6Addr, String>;
#[derive(Clone)]
pub struct D3capController {
pub pg_ctrl: ProtoGraphController,
pub pd_ctrl: PhysDataController,
pub mac_names: MacMap,
pub ip4_names: IP4Map,
pub ip6_names: IP6Map,
pub server_started: bool
}
impl D3capController {
pub fn spawn(conf: D3capConf) -> io::Result<D3capController> {
let mac_names = conf.conf.as_ref()
.map_or_else(HashMap::new, |x| {
load_mac_addrs(x).unwrap_or_else(|_| HashMap::new())
});
let ip4_names = HashMap::new();
let ip6_names = HashMap::new();
let pg_ctrl = ProtoGraphController::spawn()?;
let pd_ctrl = PhysDataController::spawn()?;
start_capture(conf, pg_ctrl.sender(), pd_ctrl.sender()).unwrap();
Ok(D3capController {
pg_ctrl: pg_ctrl,
pd_ctrl: pd_ctrl,
mac_names: mac_names,
ip4_names: ip4_names,
ip6_names: ip6_names,
server_started: false
})
}
pub fn start_websocket(&mut self, port: u16) -> io::Result<()> {
if self.server_started {
println!("server already started");
} else {
start_websocket(port, &self.mac_names, &self.pg_ctrl)?;
self.server_started = true;
}
Ok(())
}
}
#[derive(Clone, Debug)]
pub struct D3capConf {
pub websocket: Option<u16>,
pub interface: Option<String>,
pub file: Option<String>,
pub conf: Option<String>,
pub promisc: bool,
pub monitor: bool
}
| start_capture | identifier_name |
d3cap.rs | use std::thread::{self, JoinHandle};
use std::hash::{Hash};
use std::collections::hash_map::{Entry, HashMap};
use std::fs::File;
use std::io::{self, Read};
use std::sync::{Arc,RwLock};
use std::sync::mpsc::{channel, Sender, SendError};
use toml;
use multicast::Multicast;
use json_serve::uiserver::UIServer;
use util::{ntohs, skip_bytes_cast, skip_cast};
use ip::{IP4Addr, IP6Addr, IP4Header, IP6Header};
use ether::{EthernetHeader, MacAddr,
ETHERTYPE_ARP, ETHERTYPE_IP4, ETHERTYPE_IP6, ETHERTYPE_802_1X};
use dot11::{self, FrameType};
use tap;
use pkt_graph::{PktMeta, ProtocolGraph, RouteStats};
use fixed_ring::FixedRingBuffer;
use pcap::pcap as cap;
#[derive(RustcEncodable, Clone)]
struct RouteStatsMsg<T> {
typ: &'static str,
route: RouteStats<T>,
}
#[derive(Debug)]
pub enum Pkt {
Mac(PktMeta<MacAddr>),
IP4(PktMeta<IP4Addr>),
IP6(PktMeta<IP6Addr>),
}
#[derive(Clone)]
pub struct ProtocolHandler<T:Eq+Hash+Send+Sync+'static> {
pub typ: &'static str,
pub graph: Arc<RwLock<ProtocolGraph<T>>>,
stats_mcast: Multicast<RouteStatsMsg<T>>,
}
impl <T:Send+Sync+Copy+Clone+Eq+Hash> ProtocolHandler<T> {
fn new(typ: &'static str) -> io::Result<ProtocolHandler<T>> {
Ok(ProtocolHandler {
typ: typ,
graph: Arc::new(RwLock::new(ProtocolGraph::new())),
stats_mcast: Multicast::spawn()?
})
}
fn update(&mut self, pkt: &PktMeta<T>) {
let route_stats = {
self.graph.write().unwrap().update(pkt)
};
let route_stats_msg = Arc::new(RouteStatsMsg {
typ: self.typ,
route: route_stats
});
self.stats_mcast.send(route_stats_msg).unwrap();
}
}
#[derive(Clone)]
pub struct ProtoGraphController {
pub cap_tx: Sender<Pkt>,
pub mac: ProtocolHandler<MacAddr>,
pub ip4: ProtocolHandler<IP4Addr>,
pub ip6: ProtocolHandler<IP6Addr>,
}
impl ProtoGraphController {
fn spawn() -> io::Result<ProtoGraphController> {
let (cap_tx, cap_rx) = channel();
let ctl = ProtoGraphController {
cap_tx: cap_tx,
mac: ProtocolHandler::new("mac")?,
ip4: ProtocolHandler::new("ip4")?,
ip6: ProtocolHandler::new("ip6")?,
};
let mut phctl = ctl.clone();
thread::Builder::new().name("protocol_handler".to_owned()).spawn(move || {
loop {
let pkt = cap_rx.recv();
if pkt.is_err() |
match pkt.unwrap() {
Pkt::Mac(ref p) => phctl.mac.update(p),
Pkt::IP4(ref p) => phctl.ip4.update(p),
Pkt::IP6(ref p) => phctl.ip6.update(p),
}
}
})?;
Ok(ctl)
}
fn sender(&self) -> Sender<Pkt> {
self.cap_tx.clone()
}
fn register_mac_listener(&self, s: Sender<Arc<RouteStatsMsg<MacAddr>>>) {
self.mac.stats_mcast.register(s).unwrap();
}
fn register_ip4_listener(&self, s: Sender<Arc<RouteStatsMsg<IP4Addr>>>) {
self.ip4.stats_mcast.register(s).unwrap();
}
fn register_ip6_listener(&self, s: Sender<Arc<RouteStatsMsg<IP6Addr>>>) {
self.ip6.stats_mcast.register(s).unwrap();
}
}
enum ParseErr {
Send,
UnknownPacket
}
impl<T> From<SendError<T>> for ParseErr {
fn from(_: SendError<T>) -> ParseErr {
ParseErr::Send
}
}
trait PktParser {
fn parse(&mut self, pkt: &cap::PcapData) -> Result<(), ParseErr>;
}
pub struct CaptureCtx {
sess: cap::PcapSession,
parser: Box<PktParser+'static>
}
impl CaptureCtx {
fn parse_next(&mut self) {
let p = &mut self.parser;
self.sess.next(|cap| {
match p.parse(cap) {
_ => () //just ignore
}
});
}
}
struct EthernetParser {
pkts: Sender<Pkt>,
}
impl PktParser for EthernetParser {
fn parse(&mut self, pkt: &cap::PcapData) -> Result<(), ParseErr> {
let ether_hdr = unsafe { &*(pkt.pkt_ptr() as *const EthernetHeader) };
self.pkts.send(Pkt::Mac(PktMeta::new(ether_hdr.src, ether_hdr.dst, pkt.len())))?;
match ether_hdr.typ {
ETHERTYPE_ARP => {
//io::println("ARP!");
},
ETHERTYPE_IP4 => {
let ipp: &IP4Header = unsafe { skip_cast(ether_hdr) };
self.pkts.send(Pkt::IP4(PktMeta::new(ipp.src, ipp.dst, u32::from(ntohs(ipp.len)))))?;
},
ETHERTYPE_IP6 => {
let ipp: &IP6Header = unsafe { skip_cast(ether_hdr) };
self.pkts.send(Pkt::IP6(PktMeta::new(ipp.src, ipp.dst, u32::from(ntohs(ipp.len)))))?;
},
ETHERTYPE_802_1X => {
//io::println("802.1X!");
},
_ => {
//println!("Unknown type: {:x}", x);
}
}
Ok(())
}
}
#[derive(Debug)]
pub struct PhysData { // TODO: this name sucks
frame_ty: FrameType,
addrs: [MacAddr; 3],
rate: Option<tap::Rate>,
channel: tap::Channel,
antenna_signal: tap::AntennaSignal,
antenna_noise: tap::AntennaNoise,
antenna: tap::Antenna,
}
impl PhysData {
fn new(frame_ty: FrameType,
addrs: [MacAddr; 3],
rate: Option<tap::Rate>,
channel: tap::Channel,
antenna_signal: tap::AntennaSignal,
antenna_noise: tap::AntennaNoise,
antenna: tap::Antenna,
) -> PhysData {
PhysData {
frame_ty: frame_ty,
addrs: addrs,
rate: rate,
channel: channel,
antenna_signal: antenna_signal,
antenna_noise: antenna_noise,
antenna: antenna
}
}
fn dist(&self) -> f32 {
let freq = f32::from(self.channel.mhz);
let signal = f32::from(self.antenna_signal.dbm);
let exp = (27.55 - (20.0 * freq.log10()) + signal.abs()) / 20.0;
(10.0f32).powf(exp)
}
}
#[derive(PartialEq, Eq, Hash)]
pub struct PhysDataKey(pub FrameType, pub [MacAddr;3]);
pub struct PhysDataVal {
pub dat: FixedRingBuffer<PhysData>,
pub count: u32,
}
impl PhysDataVal {
pub fn new() -> PhysDataVal {
PhysDataVal {
dat: FixedRingBuffer::new(10),
count: 0
}
}
pub fn avg_dist(&self) -> f32 {
let mut s = 0.0;
for pd in self.dat.iter() {
s += pd.dist();
}
s / (self.dat.len() as f32)
}
}
#[derive(Clone)]
pub struct PhysDataController {
pub map: Arc<RwLock<HashMap<PhysDataKey, PhysDataVal>>>,
pd_tx: Sender<PhysData>
}
impl PhysDataController {
fn spawn() -> io::Result<PhysDataController> {
let (pd_tx, pd_rx) = channel();
let out = PhysDataController {
pd_tx: pd_tx,
map: Arc::new(RwLock::new(HashMap::new()))
};
let ctl = out.clone();
thread::Builder::new().name("physdata_handler".to_owned()).spawn(move || {
loop {
let res = pd_rx.recv();
if res.is_err() {
break
}
let pd = res.unwrap();
match ctl.map.write().unwrap().entry(PhysDataKey(pd.frame_ty, pd.addrs)) {
Entry::Occupied(mut e) => {
let mut pdc = e.get_mut();
pdc.dat.push(pd);
pdc.count += 1;
}
Entry::Vacant(e) => {
let mut pdc = PhysDataVal::new();
pdc.dat.push(pd);
pdc.count += 1;
e.insert(pdc);
}
};
}
})?;
Ok(out)
}
fn sender(&self) -> Sender<PhysData> {
self.pd_tx.clone()
}
}
struct RadiotapParser {
pkts: Sender<Pkt>,
phys: Sender<PhysData>
}
impl RadiotapParser {
fn parse_known_headers(&self,
frame_ty: FrameType,
addrs: [MacAddr; 3],
tap_hdr: &tap::RadiotapHeader) {
match tap_hdr.it_present {
tap::ItPresent::COMMON_A => {
if let Some(vals) = tap::CommonA::parse(tap_hdr) {
self.phys.send(PhysData::new(
frame_ty,
addrs,
Some(vals.rate),
vals.channel,
vals.antenna_signal,
vals.antenna_noise,
vals.antenna
)).unwrap();
}
},
tap::ItPresent::COMMON_B => {
if let Some(vals) = tap::CommonB::parse(tap_hdr) {
self.phys.send(PhysData::new(
frame_ty,
addrs,
None,
vals.channel,
vals.antenna_signal,
vals.antenna_noise,
vals.antenna
)).unwrap();
}
},
_ => {} //Unknown header
}
}
}
impl PktParser for RadiotapParser {
fn parse(&mut self, pkt: &cap::PcapData) -> Result<(), ParseErr> {
fn magic<U>(pkt: &tap::RadiotapHeader) -> &U {
unsafe { skip_bytes_cast(pkt, pkt.it_len as isize) }
}
let tap_hdr = unsafe { &*(pkt.pkt_ptr() as *const tap::RadiotapHeader) };
let base: &dot11::Dot11BaseHeader = magic(tap_hdr);
let fc = &base.fr_ctrl;
if fc.protocol_version() != 0 {
// bogus packet, bail
return Err(ParseErr::UnknownPacket);
}
match fc.frame_type() {
ft @ FrameType::Management => {
let mgt: &dot11::ManagementFrameHeader = magic(tap_hdr);
self.parse_known_headers(ft, [mgt.addr1, mgt.addr2, mgt.addr3], tap_hdr);
}
ft @ FrameType::Data => {
let data: &dot11::DataFrameHeader = magic(tap_hdr);
//TODO: get length
self.pkts.send(Pkt::Mac(PktMeta::new(data.addr1, data.addr2, 1)))?;
self.parse_known_headers(ft, [data.addr1, data.addr2, data.addr3], tap_hdr);
}
FrameType::Control | FrameType::Unknown => {
//println!("Unknown frame type");
}
}
Ok(())
}
}
pub fn init_capture(conf: &D3capConf,
pkt_sender: Sender<Pkt>,
pd_sender: Sender<PhysData>) -> CaptureCtx {
let sess = match conf.file {
Some(ref f) => cap::PcapSession::from_file(f),
None => {
println!("No session file");
let sess_builder = match conf.interface {
Some(ref dev) => cap::PcapSessionBuilder::new_dev(dev),
None => cap::PcapSessionBuilder::new()
};
sess_builder.unwrap()
.buffer_size(0xFFFF)
.timeout(1000)
.promisc(conf.promisc)
.rfmon(conf.monitor)
.activate()
}
};
let parser = match sess.datalink() {
cap::DLT_ETHERNET => {
Box::new(EthernetParser { pkts: pkt_sender }) as Box<PktParser>
}
cap::DLT_IEEE802_11_RADIO => {
Box::new(RadiotapParser { pkts: pkt_sender, phys: pd_sender }) as Box<PktParser>
}
x => panic!("unsupported datalink type: {}", x)
};
CaptureCtx { sess: sess, parser: parser }
}
pub fn start_capture(conf: D3capConf,
pkt_sender: Sender<Pkt>,
pd_sender: Sender<PhysData>) -> io::Result<JoinHandle<()>> {
thread::Builder::new().name("packet_capture".to_owned()).spawn(move || {
let mut cap = init_capture(&conf, pkt_sender, pd_sender);
loop {
cap.parse_next();
}
})
}
enum LoadMacError {
IOError(io::Error),
TomlError(Option<toml::de::Error>)
}
impl From<io::Error> for LoadMacError {
fn from(err: io::Error) -> LoadMacError {
LoadMacError::IOError(err)
}
}
impl From<toml::de::Error> for LoadMacError {
fn from(err: toml::de::Error) -> LoadMacError { LoadMacError::TomlError(Some(err)) }
}
fn load_mac_addrs(file: &str) -> Result<HashMap<MacAddr, String>, LoadMacError> {
let mut s = String::new();
let mut f = File::open(&file)?;
f.read_to_string(&mut s)?;
let t = s.parse::<toml::Value>()?;
if let Some(k) = t.get(&"known-macs".to_owned()) {
if let Some(tbl) = k.as_table() {
return Ok(tbl.iter()
.map(|(k, v)| (MacAddr::from_string(k), v.as_str()))
.filter_map(|x| match x {
(Some(addr), Some(alias)) => Some((addr, alias.to_owned())),
_ => None
})
.collect())
}
}
Err(LoadMacError::TomlError(None))
}
fn start_websocket(port: u16, mac_map: &MacMap, pg_ctl: &ProtoGraphController) -> io::Result<()> {
let ui = UIServer::spawn(port, mac_map)?;
pg_ctl.register_mac_listener(ui.create_sender()?);
pg_ctl.register_ip4_listener(ui.create_sender()?);
pg_ctl.register_ip6_listener(ui.create_sender()?);
Ok(())
}
pub type MacMap = HashMap<MacAddr, String>;
pub type IP4Map = HashMap<IP4Addr, String>;
pub type IP6Map = HashMap<IP6Addr, String>;
#[derive(Clone)]
pub struct D3capController {
pub pg_ctrl: ProtoGraphController,
pub pd_ctrl: PhysDataController,
pub mac_names: MacMap,
pub ip4_names: IP4Map,
pub ip6_names: IP6Map,
pub server_started: bool
}
impl D3capController {
pub fn spawn(conf: D3capConf) -> io::Result<D3capController> {
let mac_names = conf.conf.as_ref()
.map_or_else(HashMap::new, |x| {
load_mac_addrs(x).unwrap_or_else(|_| HashMap::new())
});
let ip4_names = HashMap::new();
let ip6_names = HashMap::new();
let pg_ctrl = ProtoGraphController::spawn()?;
let pd_ctrl = PhysDataController::spawn()?;
start_capture(conf, pg_ctrl.sender(), pd_ctrl.sender()).unwrap();
Ok(D3capController {
pg_ctrl: pg_ctrl,
pd_ctrl: pd_ctrl,
mac_names: mac_names,
ip4_names: ip4_names,
ip6_names: ip6_names,
server_started: false
})
}
pub fn start_websocket(&mut self, port: u16) -> io::Result<()> {
if self.server_started {
println!("server already started");
} else {
start_websocket(port, &self.mac_names, &self.pg_ctrl)?;
self.server_started = true;
}
Ok(())
}
}
#[derive(Clone, Debug)]
pub struct D3capConf {
pub websocket: Option<u16>,
pub interface: Option<String>,
pub file: Option<String>,
pub conf: Option<String>,
pub promisc: bool,
pub monitor: bool
}
| {
break
} | conditional_block |
mod.rs | //! This module implements the global `Function` object as well as creates Native Functions.
//!
//! Objects wrap `Function`s and expose them via call/construct slots.
//!
//! `The `Function` object is used for matching text with a pattern.
//!
//! More information:
//! - [ECMAScript reference][spec]
//! - [MDN documentation][mdn]
//!
//! [spec]: https://tc39.es/ecma262/#sec-function-objects
//! [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function
use std::{
fmt,
ops::{Deref, DerefMut},
};
use dyn_clone::DynClone;
use crate::{
builtins::BuiltIn,
context::StandardObjects,
environment::lexical_environment::Environment,
gc::{Finalize, Trace},
object::JsObject,
object::{
internal_methods::get_prototype_from_constructor, ConstructorBuilder, FunctionBuilder,
NativeObject, ObjectData,
},
property::Attribute,
property::PropertyDescriptor,
syntax::ast::node::{FormalParameter, RcStatementList},
BoaProfiler, Context, JsResult, JsValue,
};
use super::JsArgs;
pub(crate) mod arguments;
#[cfg(test)]
mod tests;
/// Type representing a native built-in function a.k.a. function pointer.
///
/// Native functions need to have this signature in order to
/// be callable from Javascript.
pub type NativeFunctionSignature = fn(&JsValue, &[JsValue], &mut Context) -> JsResult<JsValue>;
// Allows restricting closures to only `Copy` ones.
// Used the sealed pattern to disallow external implementations
// of `DynCopy`.
mod sealed {
pub trait Sealed {}
impl<T: Copy> Sealed for T {}
}
pub trait DynCopy: sealed::Sealed {}
impl<T: Copy> DynCopy for T {}
/// Trait representing a native built-in closure.
///
/// Closures need to have this signature in order to
/// be callable from Javascript, but most of the time the compiler
/// is smart enough to correctly infer the types.
pub trait ClosureFunctionSignature:
Fn(&JsValue, &[JsValue], Captures, &mut Context) -> JsResult<JsValue> + DynCopy + DynClone + 'static
{
}
// The `Copy` bound automatically infers `DynCopy` and `DynClone`
impl<T> ClosureFunctionSignature for T where
T: Fn(&JsValue, &[JsValue], Captures, &mut Context) -> JsResult<JsValue> + Copy + 'static
{
}
// Allows cloning Box<dyn ClosureFunctionSignature>
dyn_clone::clone_trait_object!(ClosureFunctionSignature);
#[derive(Debug, Trace, Finalize, PartialEq, Clone)]
pub enum ThisMode {
Lexical,
Strict,
Global,
}
impl ThisMode {
/// Returns `true` if the this mode is `Lexical`.
pub fn is_lexical(&self) -> bool {
matches!(self, Self::Lexical)
}
/// Returns `true` if the this mode is `Strict`.
pub fn is_strict(&self) -> bool {
matches!(self, Self::Strict)
}
/// Returns `true` if the this mode is `Global`.
pub fn is_global(&self) -> bool |
}
#[derive(Debug, Trace, Finalize, PartialEq, Clone)]
pub enum ConstructorKind {
Base,
Derived,
}
impl ConstructorKind {
/// Returns `true` if the constructor kind is `Base`.
pub fn is_base(&self) -> bool {
matches!(self, Self::Base)
}
/// Returns `true` if the constructor kind is `Derived`.
pub fn is_derived(&self) -> bool {
matches!(self, Self::Derived)
}
}
// We don't use a standalone `NativeObject` for `Captures` because it doesn't
// guarantee that the internal type implements `Clone`.
// This private trait guarantees that the internal type passed to `Captures`
// implements `Clone`, and `DynClone` allows us to implement `Clone` for
// `Box<dyn CapturesObject>`.
trait CapturesObject: NativeObject + DynClone {}
impl<T: NativeObject + Clone> CapturesObject for T {}
dyn_clone::clone_trait_object!(CapturesObject);
/// Wrapper for `Box<dyn NativeObject + Clone>` that allows passing additional
/// captures through a `Copy` closure.
///
/// Any type implementing `Trace + Any + Debug + Clone`
/// can be used as a capture context, so you can pass e.g. a String,
/// a tuple or even a full struct.
///
/// You can downcast to any type and handle the fail case as you like
/// with `downcast_ref` and `downcast_mut`, or you can use `try_downcast_ref`
/// and `try_downcast_mut` to automatically throw a `TypeError` if the downcast
/// fails.
#[derive(Debug, Clone, Trace, Finalize)]
pub struct Captures(Box<dyn CapturesObject>);
impl Captures {
/// Creates a new capture context.
pub(crate) fn new<T>(captures: T) -> Self
where
T: NativeObject + Clone,
{
Self(Box::new(captures))
}
/// Downcasts `Captures` to the specified type, returning a reference to the
/// downcasted type if successful or `None` otherwise.
pub fn downcast_ref<T>(&self) -> Option<&T>
where
T: NativeObject + Clone,
{
self.0.deref().as_any().downcast_ref::<T>()
}
/// Mutably downcasts `Captures` to the specified type, returning a
/// mutable reference to the downcasted type if successful or `None` otherwise.
pub fn downcast_mut<T>(&mut self) -> Option<&mut T>
where
T: NativeObject + Clone,
{
self.0.deref_mut().as_mut_any().downcast_mut::<T>()
}
/// Downcasts `Captures` to the specified type, returning a reference to the
/// downcasted type if successful or a `TypeError` otherwise.
pub fn try_downcast_ref<T>(&self, context: &mut Context) -> JsResult<&T>
where
T: NativeObject + Clone,
{
self.0
.deref()
.as_any()
.downcast_ref::<T>()
.ok_or_else(|| context.construct_type_error("cannot downcast `Captures` to given type"))
}
/// Downcasts `Captures` to the specified type, returning a reference to the
/// downcasted type if successful or a `TypeError` otherwise.
pub fn try_downcast_mut<T>(&mut self, context: &mut Context) -> JsResult<&mut T>
where
T: NativeObject + Clone,
{
self.0
.deref_mut()
.as_mut_any()
.downcast_mut::<T>()
.ok_or_else(|| context.construct_type_error("cannot downcast `Captures` to given type"))
}
}
/// Boa representation of a Function Object.
///
/// FunctionBody is specific to this interpreter, it will either be Rust code or JavaScript code (AST Node)
///
/// <https://tc39.es/ecma262/#sec-ecmascript-function-objects>
#[derive(Clone, Trace, Finalize)]
pub enum Function {
Native {
#[unsafe_ignore_trace]
function: NativeFunctionSignature,
constructable: bool,
},
Closure {
#[unsafe_ignore_trace]
function: Box<dyn ClosureFunctionSignature>,
constructable: bool,
captures: Captures,
},
Ordinary {
constructable: bool,
this_mode: ThisMode,
body: RcStatementList,
params: Box<[FormalParameter]>,
environment: Environment,
},
#[cfg(feature = "vm")]
VmOrdinary {
code: gc::Gc<crate::vm::CodeBlock>,
environment: Environment,
},
}
impl fmt::Debug for Function {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Function {{ ... }}")
}
}
impl Function {
// Adds the final rest parameters to the Environment as an array
#[cfg(not(feature = "vm"))]
pub(crate) fn add_rest_param(
param: &FormalParameter,
index: usize,
args_list: &[JsValue],
context: &mut Context,
local_env: &Environment,
) {
use crate::builtins::Array;
// Create array of values
let array = Array::new_array(context);
Array::add_to_array_object(&array, args_list.get(index..).unwrap_or_default(), context)
.unwrap();
// Create binding
local_env
// Function parameters can share names in JavaScript...
.create_mutable_binding(param.name(), false, true, context)
.expect("Failed to create binding for rest param");
// Set Binding to value
local_env
.initialize_binding(param.name(), array, context)
.expect("Failed to initialize rest param");
}
// Adds an argument to the environment
pub(crate) fn add_arguments_to_environment(
param: &FormalParameter,
value: JsValue,
local_env: &Environment,
context: &mut Context,
) {
// Create binding
local_env
.create_mutable_binding(param.name(), false, true, context)
.expect("Failed to create binding");
// Set Binding to value
local_env
.initialize_binding(param.name(), value, context)
.expect("Failed to intialize binding");
}
/// Returns true if the function object is constructable.
pub fn is_constructable(&self) -> bool {
match self {
Self::Native { constructable, .. } => *constructable,
Self::Closure { constructable, .. } => *constructable,
Self::Ordinary { constructable, .. } => *constructable,
#[cfg(feature = "vm")]
Self::VmOrdinary { code, .. } => code.constructable,
}
}
}
/// Creates a new member function of a `Object` or `prototype`.
///
/// A function registered using this macro can then be called from Javascript using:
///
/// parent.name()
///
/// See the javascript 'Number.toString()' as an example.
///
/// # Arguments
/// function: The function to register as a built in function.
/// name: The name of the function (how it will be called but without the ()).
/// parent: The object to register the function on, if the global object is used then the function is instead called as name()
/// without requiring the parent, see parseInt() as an example.
/// length: As described at <https://tc39.es/ecma262/#sec-function-instances-length>, The value of the "length" property is an integer that
/// indicates the typical number of arguments expected by the function. However, the language permits the function to be invoked with
/// some other number of arguments.
///
/// If no length is provided, the length will be set to 0.
// TODO: deprecate/remove this.
pub(crate) fn make_builtin_fn<N>(
function: NativeFunctionSignature,
name: N,
parent: &JsObject,
length: usize,
interpreter: &Context,
) where
N: Into<String>,
{
let name = name.into();
let _timer = BoaProfiler::global().start_event(&format!("make_builtin_fn: {}", &name), "init");
let function = JsObject::from_proto_and_data(
interpreter.standard_objects().function_object().prototype(),
ObjectData::function(Function::Native {
function,
constructable: false,
}),
);
let attribute = PropertyDescriptor::builder()
.writable(false)
.enumerable(false)
.configurable(true);
function.insert_property("length", attribute.clone().value(length));
function.insert_property("name", attribute.value(name.as_str()));
parent.clone().insert_property(
name,
PropertyDescriptor::builder()
.value(function)
.writable(true)
.enumerable(false)
.configurable(true),
);
}
#[derive(Debug, Clone, Copy)]
pub struct BuiltInFunctionObject;
impl BuiltInFunctionObject {
pub const LENGTH: usize = 1;
fn constructor(
new_target: &JsValue,
_: &[JsValue],
context: &mut Context,
) -> JsResult<JsValue> {
let prototype =
get_prototype_from_constructor(new_target, StandardObjects::function_object, context)?;
let this = JsObject::from_proto_and_data(
prototype,
ObjectData::function(Function::Native {
function: |_, _, _| Ok(JsValue::undefined()),
constructable: true,
}),
);
Ok(this.into())
}
fn prototype(_: &JsValue, _: &[JsValue], _: &mut Context) -> JsResult<JsValue> {
Ok(JsValue::undefined())
}
/// `Function.prototype.call`
///
/// The call() method invokes self with the first argument as the `this` value.
///
/// More information:
/// - [MDN documentation][mdn]
/// - [ECMAScript reference][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-function.prototype.call
/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function/call
fn call(this: &JsValue, args: &[JsValue], context: &mut Context) -> JsResult<JsValue> {
if !this.is_function() {
return context.throw_type_error(format!("{} is not a function", this.display()));
}
let this_arg = args.get_or_undefined(0);
// TODO?: 3. Perform PrepareForTailCall
let start = if !args.is_empty() { 1 } else { 0 };
context.call(this, this_arg, &args[start..])
}
/// `Function.prototype.apply`
///
/// The apply() method invokes self with the first argument as the `this` value
/// and the rest of the arguments provided as an array (or an array-like object).
///
/// More information:
/// - [MDN documentation][mdn]
/// - [ECMAScript reference][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-function.prototype.apply
/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function/apply
fn apply(this: &JsValue, args: &[JsValue], context: &mut Context) -> JsResult<JsValue> {
if !this.is_function() {
return context.throw_type_error(format!("{} is not a function", this.display()));
}
let this_arg = args.get_or_undefined(0);
let arg_array = args.get_or_undefined(1);
if arg_array.is_null_or_undefined() {
// TODO?: 3.a. PrepareForTailCall
return context.call(this, this_arg, &[]);
}
let arg_list = arg_array.create_list_from_array_like(&[], context)?;
// TODO?: 5. PrepareForTailCall
context.call(this, this_arg, &arg_list)
}
#[allow(clippy::wrong_self_convention)]
fn to_string(this: &JsValue, _: &[JsValue], context: &mut Context) -> JsResult<JsValue> {
let name = {
// Is there a case here where if there is no name field on a value
// name should default to None? Do all functions have names set?
let value = this.get_field("name", &mut *context)?;
if value.is_null_or_undefined() {
None
} else {
Some(value.to_string(context)?)
}
};
let function = {
let object = this
.as_object()
.map(|object| object.borrow().as_function().cloned());
if let Some(Some(function)) = object {
function
} else {
return context.throw_type_error("Not a function");
}
};
match (&function, name) {
(
Function::Native {
function: _,
constructable: _,
},
Some(name),
) => Ok(format!("function {}() {{\n [native Code]\n}}", &name).into()),
(Function::Ordinary { body, params, .. }, Some(name)) => {
let arguments: String = params
.iter()
.map(|param| param.name())
.collect::<Vec<&str>>()
.join(", ");
let statement_list = &*body;
// This is a kluge. The implementaion in browser seems to suggest that
// the value here is printed exactly as defined in source. I'm not sure if
// that's possible here, but for now here's a dumb heuristic that prints functions
let is_multiline = {
let value = statement_list.to_string();
value.lines().count() > 1
};
if is_multiline {
Ok(
// ?? For some reason statement_list string implementation
// sticks a \n at the end no matter what
format!(
"{}({}) {{\n{}}}",
&name,
arguments,
statement_list.to_string()
)
.into(),
)
} else {
Ok(format!(
"{}({}) {{{}}}",
&name,
arguments,
// The trim here is to remove a \n stuck at the end
// of the statement_list to_string method
statement_list.to_string().trim()
)
.into())
}
}
_ => Ok("TODO".into()),
}
}
}
impl BuiltIn for BuiltInFunctionObject {
const NAME: &'static str = "Function";
const ATTRIBUTE: Attribute = Attribute::WRITABLE
.union(Attribute::NON_ENUMERABLE)
.union(Attribute::CONFIGURABLE);
fn init(context: &mut Context) -> JsValue {
let _timer = BoaProfiler::global().start_event("function", "init");
let function_prototype = context.standard_objects().function_object().prototype();
FunctionBuilder::native(context, Self::prototype)
.name("")
.length(0)
.constructable(false)
.build_function_prototype(&function_prototype);
let function_object = ConstructorBuilder::with_standard_object(
context,
Self::constructor,
context.standard_objects().function_object().clone(),
)
.name(Self::NAME)
.length(Self::LENGTH)
.method(Self::call, "call", 1)
.method(Self::apply, "apply", 1)
.method(Self::to_string, "toString", 0)
.build();
function_object.into()
}
}
| {
matches!(self, Self::Global)
} | identifier_body |
mod.rs | //! This module implements the global `Function` object as well as creates Native Functions.
//!
//! Objects wrap `Function`s and expose them via call/construct slots.
//!
//! `The `Function` object is used for matching text with a pattern.
//!
//! More information:
//! - [ECMAScript reference][spec]
//! - [MDN documentation][mdn]
//!
//! [spec]: https://tc39.es/ecma262/#sec-function-objects
//! [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function
use std::{
fmt,
ops::{Deref, DerefMut},
};
use dyn_clone::DynClone;
use crate::{
builtins::BuiltIn,
context::StandardObjects,
environment::lexical_environment::Environment,
gc::{Finalize, Trace},
object::JsObject,
object::{
internal_methods::get_prototype_from_constructor, ConstructorBuilder, FunctionBuilder,
NativeObject, ObjectData,
},
property::Attribute,
property::PropertyDescriptor,
syntax::ast::node::{FormalParameter, RcStatementList},
BoaProfiler, Context, JsResult, JsValue,
};
use super::JsArgs;
pub(crate) mod arguments;
#[cfg(test)]
mod tests;
/// Type representing a native built-in function a.k.a. function pointer.
///
/// Native functions need to have this signature in order to
/// be callable from Javascript.
pub type NativeFunctionSignature = fn(&JsValue, &[JsValue], &mut Context) -> JsResult<JsValue>;
// Allows restricting closures to only `Copy` ones.
// Used the sealed pattern to disallow external implementations
// of `DynCopy`.
mod sealed {
pub trait Sealed {}
impl<T: Copy> Sealed for T {}
}
pub trait DynCopy: sealed::Sealed {}
impl<T: Copy> DynCopy for T {}
/// Trait representing a native built-in closure.
///
/// Closures need to have this signature in order to
/// be callable from Javascript, but most of the time the compiler
/// is smart enough to correctly infer the types.
pub trait ClosureFunctionSignature:
Fn(&JsValue, &[JsValue], Captures, &mut Context) -> JsResult<JsValue> + DynCopy + DynClone + 'static
{
}
// The `Copy` bound automatically infers `DynCopy` and `DynClone`
impl<T> ClosureFunctionSignature for T where
T: Fn(&JsValue, &[JsValue], Captures, &mut Context) -> JsResult<JsValue> + Copy + 'static
{
}
// Allows cloning Box<dyn ClosureFunctionSignature>
dyn_clone::clone_trait_object!(ClosureFunctionSignature);
#[derive(Debug, Trace, Finalize, PartialEq, Clone)]
pub enum ThisMode {
Lexical,
Strict,
Global,
}
impl ThisMode {
/// Returns `true` if the this mode is `Lexical`.
pub fn is_lexical(&self) -> bool {
matches!(self, Self::Lexical)
}
/// Returns `true` if the this mode is `Strict`.
pub fn is_strict(&self) -> bool {
matches!(self, Self::Strict)
}
/// Returns `true` if the this mode is `Global`.
pub fn is_global(&self) -> bool {
matches!(self, Self::Global)
}
}
#[derive(Debug, Trace, Finalize, PartialEq, Clone)]
pub enum ConstructorKind {
Base,
Derived,
}
impl ConstructorKind {
/// Returns `true` if the constructor kind is `Base`.
pub fn is_base(&self) -> bool {
matches!(self, Self::Base)
}
/// Returns `true` if the constructor kind is `Derived`.
pub fn is_derived(&self) -> bool {
matches!(self, Self::Derived)
}
}
// We don't use a standalone `NativeObject` for `Captures` because it doesn't
// guarantee that the internal type implements `Clone`.
// This private trait guarantees that the internal type passed to `Captures`
// implements `Clone`, and `DynClone` allows us to implement `Clone` for
// `Box<dyn CapturesObject>`.
trait CapturesObject: NativeObject + DynClone {}
impl<T: NativeObject + Clone> CapturesObject for T {}
dyn_clone::clone_trait_object!(CapturesObject);
/// Wrapper for `Box<dyn NativeObject + Clone>` that allows passing additional
/// captures through a `Copy` closure.
///
/// Any type implementing `Trace + Any + Debug + Clone`
/// can be used as a capture context, so you can pass e.g. a String,
/// a tuple or even a full struct.
///
/// You can downcast to any type and handle the fail case as you like
/// with `downcast_ref` and `downcast_mut`, or you can use `try_downcast_ref`
/// and `try_downcast_mut` to automatically throw a `TypeError` if the downcast
/// fails.
#[derive(Debug, Clone, Trace, Finalize)]
pub struct Captures(Box<dyn CapturesObject>);
impl Captures {
/// Creates a new capture context.
pub(crate) fn new<T>(captures: T) -> Self
where
T: NativeObject + Clone,
{
Self(Box::new(captures))
}
/// Downcasts `Captures` to the specified type, returning a reference to the
/// downcasted type if successful or `None` otherwise.
pub fn downcast_ref<T>(&self) -> Option<&T>
where
T: NativeObject + Clone,
{
self.0.deref().as_any().downcast_ref::<T>()
}
/// Mutably downcasts `Captures` to the specified type, returning a
/// mutable reference to the downcasted type if successful or `None` otherwise.
pub fn downcast_mut<T>(&mut self) -> Option<&mut T>
where
T: NativeObject + Clone,
{
self.0.deref_mut().as_mut_any().downcast_mut::<T>()
}
/// Downcasts `Captures` to the specified type, returning a reference to the
/// downcasted type if successful or a `TypeError` otherwise.
pub fn try_downcast_ref<T>(&self, context: &mut Context) -> JsResult<&T>
where
T: NativeObject + Clone,
{
self.0
.deref()
.as_any()
.downcast_ref::<T>()
.ok_or_else(|| context.construct_type_error("cannot downcast `Captures` to given type"))
}
/// Downcasts `Captures` to the specified type, returning a reference to the
/// downcasted type if successful or a `TypeError` otherwise.
pub fn try_downcast_mut<T>(&mut self, context: &mut Context) -> JsResult<&mut T>
where
T: NativeObject + Clone,
{
self.0
.deref_mut()
.as_mut_any()
.downcast_mut::<T>()
.ok_or_else(|| context.construct_type_error("cannot downcast `Captures` to given type"))
}
}
/// Boa representation of a Function Object.
///
/// FunctionBody is specific to this interpreter, it will either be Rust code or JavaScript code (AST Node)
///
/// <https://tc39.es/ecma262/#sec-ecmascript-function-objects>
#[derive(Clone, Trace, Finalize)]
pub enum Function {
Native {
#[unsafe_ignore_trace]
function: NativeFunctionSignature,
constructable: bool,
},
Closure {
#[unsafe_ignore_trace]
function: Box<dyn ClosureFunctionSignature>,
constructable: bool,
captures: Captures,
},
Ordinary {
constructable: bool,
this_mode: ThisMode,
body: RcStatementList,
params: Box<[FormalParameter]>,
environment: Environment,
},
#[cfg(feature = "vm")]
VmOrdinary {
code: gc::Gc<crate::vm::CodeBlock>,
environment: Environment,
},
}
impl fmt::Debug for Function {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Function {{ ... }}")
}
}
impl Function {
// Adds the final rest parameters to the Environment as an array
#[cfg(not(feature = "vm"))]
pub(crate) fn add_rest_param(
param: &FormalParameter,
index: usize,
args_list: &[JsValue],
context: &mut Context,
local_env: &Environment,
) {
use crate::builtins::Array;
// Create array of values
let array = Array::new_array(context);
Array::add_to_array_object(&array, args_list.get(index..).unwrap_or_default(), context)
.unwrap();
// Create binding
local_env
// Function parameters can share names in JavaScript...
.create_mutable_binding(param.name(), false, true, context)
.expect("Failed to create binding for rest param");
// Set Binding to value
local_env
.initialize_binding(param.name(), array, context)
.expect("Failed to initialize rest param");
}
// Adds an argument to the environment
pub(crate) fn add_arguments_to_environment(
param: &FormalParameter,
value: JsValue,
local_env: &Environment,
context: &mut Context,
) {
// Create binding
local_env
.create_mutable_binding(param.name(), false, true, context)
.expect("Failed to create binding");
// Set Binding to value
local_env
.initialize_binding(param.name(), value, context)
.expect("Failed to intialize binding");
}
/// Returns true if the function object is constructable.
pub fn is_constructable(&self) -> bool {
match self {
Self::Native { constructable, .. } => *constructable,
Self::Closure { constructable, .. } => *constructable,
Self::Ordinary { constructable, .. } => *constructable,
#[cfg(feature = "vm")]
Self::VmOrdinary { code, .. } => code.constructable,
}
}
}
/// Creates a new member function of a `Object` or `prototype`.
///
/// A function registered using this macro can then be called from Javascript using:
///
/// parent.name()
///
/// See the javascript 'Number.toString()' as an example.
///
/// # Arguments
/// function: The function to register as a built in function.
/// name: The name of the function (how it will be called but without the ()).
/// parent: The object to register the function on, if the global object is used then the function is instead called as name()
/// without requiring the parent, see parseInt() as an example.
/// length: As described at <https://tc39.es/ecma262/#sec-function-instances-length>, The value of the "length" property is an integer that
/// indicates the typical number of arguments expected by the function. However, the language permits the function to be invoked with
/// some other number of arguments.
///
/// If no length is provided, the length will be set to 0.
// TODO: deprecate/remove this.
pub(crate) fn make_builtin_fn<N>(
function: NativeFunctionSignature,
name: N,
parent: &JsObject,
length: usize,
interpreter: &Context,
) where
N: Into<String>,
{
let name = name.into();
let _timer = BoaProfiler::global().start_event(&format!("make_builtin_fn: {}", &name), "init");
let function = JsObject::from_proto_and_data(
interpreter.standard_objects().function_object().prototype(),
ObjectData::function(Function::Native {
function,
constructable: false,
}),
);
let attribute = PropertyDescriptor::builder()
.writable(false)
.enumerable(false)
.configurable(true);
function.insert_property("length", attribute.clone().value(length));
function.insert_property("name", attribute.value(name.as_str()));
parent.clone().insert_property(
name,
PropertyDescriptor::builder()
.value(function)
.writable(true)
.enumerable(false)
.configurable(true),
);
}
#[derive(Debug, Clone, Copy)]
pub struct BuiltInFunctionObject;
impl BuiltInFunctionObject {
pub const LENGTH: usize = 1;
fn constructor(
new_target: &JsValue,
_: &[JsValue],
context: &mut Context,
) -> JsResult<JsValue> {
let prototype =
get_prototype_from_constructor(new_target, StandardObjects::function_object, context)?;
let this = JsObject::from_proto_and_data(
prototype,
ObjectData::function(Function::Native {
function: |_, _, _| Ok(JsValue::undefined()),
constructable: true,
}),
);
Ok(this.into())
}
fn | (_: &JsValue, _: &[JsValue], _: &mut Context) -> JsResult<JsValue> {
Ok(JsValue::undefined())
}
/// `Function.prototype.call`
///
/// The call() method invokes self with the first argument as the `this` value.
///
/// More information:
/// - [MDN documentation][mdn]
/// - [ECMAScript reference][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-function.prototype.call
/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function/call
fn call(this: &JsValue, args: &[JsValue], context: &mut Context) -> JsResult<JsValue> {
if !this.is_function() {
return context.throw_type_error(format!("{} is not a function", this.display()));
}
let this_arg = args.get_or_undefined(0);
// TODO?: 3. Perform PrepareForTailCall
let start = if !args.is_empty() { 1 } else { 0 };
context.call(this, this_arg, &args[start..])
}
/// `Function.prototype.apply`
///
/// The apply() method invokes self with the first argument as the `this` value
/// and the rest of the arguments provided as an array (or an array-like object).
///
/// More information:
/// - [MDN documentation][mdn]
/// - [ECMAScript reference][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-function.prototype.apply
/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function/apply
fn apply(this: &JsValue, args: &[JsValue], context: &mut Context) -> JsResult<JsValue> {
if !this.is_function() {
return context.throw_type_error(format!("{} is not a function", this.display()));
}
let this_arg = args.get_or_undefined(0);
let arg_array = args.get_or_undefined(1);
if arg_array.is_null_or_undefined() {
// TODO?: 3.a. PrepareForTailCall
return context.call(this, this_arg, &[]);
}
let arg_list = arg_array.create_list_from_array_like(&[], context)?;
// TODO?: 5. PrepareForTailCall
context.call(this, this_arg, &arg_list)
}
#[allow(clippy::wrong_self_convention)]
fn to_string(this: &JsValue, _: &[JsValue], context: &mut Context) -> JsResult<JsValue> {
let name = {
// Is there a case here where if there is no name field on a value
// name should default to None? Do all functions have names set?
let value = this.get_field("name", &mut *context)?;
if value.is_null_or_undefined() {
None
} else {
Some(value.to_string(context)?)
}
};
let function = {
let object = this
.as_object()
.map(|object| object.borrow().as_function().cloned());
if let Some(Some(function)) = object {
function
} else {
return context.throw_type_error("Not a function");
}
};
match (&function, name) {
(
Function::Native {
function: _,
constructable: _,
},
Some(name),
) => Ok(format!("function {}() {{\n [native Code]\n}}", &name).into()),
(Function::Ordinary { body, params, .. }, Some(name)) => {
let arguments: String = params
.iter()
.map(|param| param.name())
.collect::<Vec<&str>>()
.join(", ");
let statement_list = &*body;
// This is a kluge. The implementaion in browser seems to suggest that
// the value here is printed exactly as defined in source. I'm not sure if
// that's possible here, but for now here's a dumb heuristic that prints functions
let is_multiline = {
let value = statement_list.to_string();
value.lines().count() > 1
};
if is_multiline {
Ok(
// ?? For some reason statement_list string implementation
// sticks a \n at the end no matter what
format!(
"{}({}) {{\n{}}}",
&name,
arguments,
statement_list.to_string()
)
.into(),
)
} else {
Ok(format!(
"{}({}) {{{}}}",
&name,
arguments,
// The trim here is to remove a \n stuck at the end
// of the statement_list to_string method
statement_list.to_string().trim()
)
.into())
}
}
_ => Ok("TODO".into()),
}
}
}
impl BuiltIn for BuiltInFunctionObject {
const NAME: &'static str = "Function";
const ATTRIBUTE: Attribute = Attribute::WRITABLE
.union(Attribute::NON_ENUMERABLE)
.union(Attribute::CONFIGURABLE);
fn init(context: &mut Context) -> JsValue {
let _timer = BoaProfiler::global().start_event("function", "init");
let function_prototype = context.standard_objects().function_object().prototype();
FunctionBuilder::native(context, Self::prototype)
.name("")
.length(0)
.constructable(false)
.build_function_prototype(&function_prototype);
let function_object = ConstructorBuilder::with_standard_object(
context,
Self::constructor,
context.standard_objects().function_object().clone(),
)
.name(Self::NAME)
.length(Self::LENGTH)
.method(Self::call, "call", 1)
.method(Self::apply, "apply", 1)
.method(Self::to_string, "toString", 0)
.build();
function_object.into()
}
}
| prototype | identifier_name |
mod.rs | //! This module implements the global `Function` object as well as creates Native Functions.
//!
//! Objects wrap `Function`s and expose them via call/construct slots.
//!
//! `The `Function` object is used for matching text with a pattern.
//!
//! More information:
//! - [ECMAScript reference][spec]
//! - [MDN documentation][mdn]
//!
//! [spec]: https://tc39.es/ecma262/#sec-function-objects
//! [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function
use std::{
fmt,
ops::{Deref, DerefMut},
};
use dyn_clone::DynClone;
use crate::{
builtins::BuiltIn,
context::StandardObjects,
environment::lexical_environment::Environment,
gc::{Finalize, Trace},
object::JsObject,
object::{
internal_methods::get_prototype_from_constructor, ConstructorBuilder, FunctionBuilder,
NativeObject, ObjectData,
},
property::Attribute,
property::PropertyDescriptor,
syntax::ast::node::{FormalParameter, RcStatementList},
BoaProfiler, Context, JsResult, JsValue,
};
use super::JsArgs;
pub(crate) mod arguments;
#[cfg(test)]
mod tests;
/// Type representing a native built-in function a.k.a. function pointer.
///
/// Native functions need to have this signature in order to
/// be callable from Javascript.
pub type NativeFunctionSignature = fn(&JsValue, &[JsValue], &mut Context) -> JsResult<JsValue>;
// Allows restricting closures to only `Copy` ones.
// Used the sealed pattern to disallow external implementations
// of `DynCopy`.
mod sealed {
pub trait Sealed {}
impl<T: Copy> Sealed for T {}
}
pub trait DynCopy: sealed::Sealed {}
impl<T: Copy> DynCopy for T {}
/// Trait representing a native built-in closure.
///
/// Closures need to have this signature in order to
/// be callable from Javascript, but most of the time the compiler
/// is smart enough to correctly infer the types.
pub trait ClosureFunctionSignature:
Fn(&JsValue, &[JsValue], Captures, &mut Context) -> JsResult<JsValue> + DynCopy + DynClone + 'static
{
}
// The `Copy` bound automatically infers `DynCopy` and `DynClone`
impl<T> ClosureFunctionSignature for T where
T: Fn(&JsValue, &[JsValue], Captures, &mut Context) -> JsResult<JsValue> + Copy + 'static
{
}
// Allows cloning Box<dyn ClosureFunctionSignature>
dyn_clone::clone_trait_object!(ClosureFunctionSignature);
#[derive(Debug, Trace, Finalize, PartialEq, Clone)]
pub enum ThisMode {
Lexical,
Strict,
Global,
}
impl ThisMode {
/// Returns `true` if the this mode is `Lexical`.
pub fn is_lexical(&self) -> bool {
matches!(self, Self::Lexical)
}
/// Returns `true` if the this mode is `Strict`.
pub fn is_strict(&self) -> bool {
matches!(self, Self::Strict)
}
/// Returns `true` if the this mode is `Global`.
pub fn is_global(&self) -> bool {
matches!(self, Self::Global)
}
}
#[derive(Debug, Trace, Finalize, PartialEq, Clone)]
pub enum ConstructorKind {
Base,
Derived,
}
impl ConstructorKind {
/// Returns `true` if the constructor kind is `Base`.
pub fn is_base(&self) -> bool {
matches!(self, Self::Base)
}
/// Returns `true` if the constructor kind is `Derived`.
pub fn is_derived(&self) -> bool {
matches!(self, Self::Derived)
}
}
// We don't use a standalone `NativeObject` for `Captures` because it doesn't
// guarantee that the internal type implements `Clone`.
// This private trait guarantees that the internal type passed to `Captures`
// implements `Clone`, and `DynClone` allows us to implement `Clone` for
// `Box<dyn CapturesObject>`.
trait CapturesObject: NativeObject + DynClone {}
impl<T: NativeObject + Clone> CapturesObject for T {}
dyn_clone::clone_trait_object!(CapturesObject);
/// Wrapper for `Box<dyn NativeObject + Clone>` that allows passing additional
/// captures through a `Copy` closure.
///
/// Any type implementing `Trace + Any + Debug + Clone`
/// can be used as a capture context, so you can pass e.g. a String,
/// a tuple or even a full struct.
///
/// You can downcast to any type and handle the fail case as you like
/// with `downcast_ref` and `downcast_mut`, or you can use `try_downcast_ref`
/// and `try_downcast_mut` to automatically throw a `TypeError` if the downcast
/// fails.
#[derive(Debug, Clone, Trace, Finalize)]
pub struct Captures(Box<dyn CapturesObject>);
impl Captures {
/// Creates a new capture context.
pub(crate) fn new<T>(captures: T) -> Self
where
T: NativeObject + Clone,
{
Self(Box::new(captures))
}
/// Downcasts `Captures` to the specified type, returning a reference to the
/// downcasted type if successful or `None` otherwise.
pub fn downcast_ref<T>(&self) -> Option<&T>
where
T: NativeObject + Clone,
{
self.0.deref().as_any().downcast_ref::<T>()
}
/// Mutably downcasts `Captures` to the specified type, returning a
/// mutable reference to the downcasted type if successful or `None` otherwise.
pub fn downcast_mut<T>(&mut self) -> Option<&mut T>
where
T: NativeObject + Clone,
{
self.0.deref_mut().as_mut_any().downcast_mut::<T>()
}
/// Downcasts `Captures` to the specified type, returning a reference to the
/// downcasted type if successful or a `TypeError` otherwise.
pub fn try_downcast_ref<T>(&self, context: &mut Context) -> JsResult<&T>
where
T: NativeObject + Clone,
{
self.0
.deref()
.as_any()
.downcast_ref::<T>()
.ok_or_else(|| context.construct_type_error("cannot downcast `Captures` to given type"))
}
/// Downcasts `Captures` to the specified type, returning a reference to the
/// downcasted type if successful or a `TypeError` otherwise.
pub fn try_downcast_mut<T>(&mut self, context: &mut Context) -> JsResult<&mut T>
where
T: NativeObject + Clone,
{
self.0 | }
/// Boa representation of a Function Object.
///
/// FunctionBody is specific to this interpreter, it will either be Rust code or JavaScript code (AST Node)
///
/// <https://tc39.es/ecma262/#sec-ecmascript-function-objects>
#[derive(Clone, Trace, Finalize)]
pub enum Function {
Native {
#[unsafe_ignore_trace]
function: NativeFunctionSignature,
constructable: bool,
},
Closure {
#[unsafe_ignore_trace]
function: Box<dyn ClosureFunctionSignature>,
constructable: bool,
captures: Captures,
},
Ordinary {
constructable: bool,
this_mode: ThisMode,
body: RcStatementList,
params: Box<[FormalParameter]>,
environment: Environment,
},
#[cfg(feature = "vm")]
VmOrdinary {
code: gc::Gc<crate::vm::CodeBlock>,
environment: Environment,
},
}
impl fmt::Debug for Function {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Function {{ ... }}")
}
}
impl Function {
// Adds the final rest parameters to the Environment as an array
#[cfg(not(feature = "vm"))]
pub(crate) fn add_rest_param(
param: &FormalParameter,
index: usize,
args_list: &[JsValue],
context: &mut Context,
local_env: &Environment,
) {
use crate::builtins::Array;
// Create array of values
let array = Array::new_array(context);
Array::add_to_array_object(&array, args_list.get(index..).unwrap_or_default(), context)
.unwrap();
// Create binding
local_env
// Function parameters can share names in JavaScript...
.create_mutable_binding(param.name(), false, true, context)
.expect("Failed to create binding for rest param");
// Set Binding to value
local_env
.initialize_binding(param.name(), array, context)
.expect("Failed to initialize rest param");
}
// Adds an argument to the environment
pub(crate) fn add_arguments_to_environment(
param: &FormalParameter,
value: JsValue,
local_env: &Environment,
context: &mut Context,
) {
// Create binding
local_env
.create_mutable_binding(param.name(), false, true, context)
.expect("Failed to create binding");
// Set Binding to value
local_env
.initialize_binding(param.name(), value, context)
.expect("Failed to intialize binding");
}
/// Returns true if the function object is constructable.
pub fn is_constructable(&self) -> bool {
match self {
Self::Native { constructable, .. } => *constructable,
Self::Closure { constructable, .. } => *constructable,
Self::Ordinary { constructable, .. } => *constructable,
#[cfg(feature = "vm")]
Self::VmOrdinary { code, .. } => code.constructable,
}
}
}
/// Creates a new member function of a `Object` or `prototype`.
///
/// A function registered using this macro can then be called from Javascript using:
///
/// parent.name()
///
/// See the javascript 'Number.toString()' as an example.
///
/// # Arguments
/// function: The function to register as a built in function.
/// name: The name of the function (how it will be called but without the ()).
/// parent: The object to register the function on, if the global object is used then the function is instead called as name()
/// without requiring the parent, see parseInt() as an example.
/// length: As described at <https://tc39.es/ecma262/#sec-function-instances-length>, The value of the "length" property is an integer that
/// indicates the typical number of arguments expected by the function. However, the language permits the function to be invoked with
/// some other number of arguments.
///
/// If no length is provided, the length will be set to 0.
// TODO: deprecate/remove this.
pub(crate) fn make_builtin_fn<N>(
function: NativeFunctionSignature,
name: N,
parent: &JsObject,
length: usize,
interpreter: &Context,
) where
N: Into<String>,
{
let name = name.into();
let _timer = BoaProfiler::global().start_event(&format!("make_builtin_fn: {}", &name), "init");
let function = JsObject::from_proto_and_data(
interpreter.standard_objects().function_object().prototype(),
ObjectData::function(Function::Native {
function,
constructable: false,
}),
);
let attribute = PropertyDescriptor::builder()
.writable(false)
.enumerable(false)
.configurable(true);
function.insert_property("length", attribute.clone().value(length));
function.insert_property("name", attribute.value(name.as_str()));
parent.clone().insert_property(
name,
PropertyDescriptor::builder()
.value(function)
.writable(true)
.enumerable(false)
.configurable(true),
);
}
#[derive(Debug, Clone, Copy)]
pub struct BuiltInFunctionObject;
impl BuiltInFunctionObject {
pub const LENGTH: usize = 1;
fn constructor(
new_target: &JsValue,
_: &[JsValue],
context: &mut Context,
) -> JsResult<JsValue> {
let prototype =
get_prototype_from_constructor(new_target, StandardObjects::function_object, context)?;
let this = JsObject::from_proto_and_data(
prototype,
ObjectData::function(Function::Native {
function: |_, _, _| Ok(JsValue::undefined()),
constructable: true,
}),
);
Ok(this.into())
}
fn prototype(_: &JsValue, _: &[JsValue], _: &mut Context) -> JsResult<JsValue> {
Ok(JsValue::undefined())
}
/// `Function.prototype.call`
///
/// The call() method invokes self with the first argument as the `this` value.
///
/// More information:
/// - [MDN documentation][mdn]
/// - [ECMAScript reference][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-function.prototype.call
/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function/call
fn call(this: &JsValue, args: &[JsValue], context: &mut Context) -> JsResult<JsValue> {
if !this.is_function() {
return context.throw_type_error(format!("{} is not a function", this.display()));
}
let this_arg = args.get_or_undefined(0);
// TODO?: 3. Perform PrepareForTailCall
let start = if !args.is_empty() { 1 } else { 0 };
context.call(this, this_arg, &args[start..])
}
/// `Function.prototype.apply`
///
/// The apply() method invokes self with the first argument as the `this` value
/// and the rest of the arguments provided as an array (or an array-like object).
///
/// More information:
/// - [MDN documentation][mdn]
/// - [ECMAScript reference][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-function.prototype.apply
/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function/apply
fn apply(this: &JsValue, args: &[JsValue], context: &mut Context) -> JsResult<JsValue> {
if !this.is_function() {
return context.throw_type_error(format!("{} is not a function", this.display()));
}
let this_arg = args.get_or_undefined(0);
let arg_array = args.get_or_undefined(1);
if arg_array.is_null_or_undefined() {
// TODO?: 3.a. PrepareForTailCall
return context.call(this, this_arg, &[]);
}
let arg_list = arg_array.create_list_from_array_like(&[], context)?;
// TODO?: 5. PrepareForTailCall
context.call(this, this_arg, &arg_list)
}
#[allow(clippy::wrong_self_convention)]
fn to_string(this: &JsValue, _: &[JsValue], context: &mut Context) -> JsResult<JsValue> {
let name = {
// Is there a case here where if there is no name field on a value
// name should default to None? Do all functions have names set?
let value = this.get_field("name", &mut *context)?;
if value.is_null_or_undefined() {
None
} else {
Some(value.to_string(context)?)
}
};
let function = {
let object = this
.as_object()
.map(|object| object.borrow().as_function().cloned());
if let Some(Some(function)) = object {
function
} else {
return context.throw_type_error("Not a function");
}
};
match (&function, name) {
(
Function::Native {
function: _,
constructable: _,
},
Some(name),
) => Ok(format!("function {}() {{\n [native Code]\n}}", &name).into()),
(Function::Ordinary { body, params, .. }, Some(name)) => {
let arguments: String = params
.iter()
.map(|param| param.name())
.collect::<Vec<&str>>()
.join(", ");
let statement_list = &*body;
// This is a kluge. The implementaion in browser seems to suggest that
// the value here is printed exactly as defined in source. I'm not sure if
// that's possible here, but for now here's a dumb heuristic that prints functions
let is_multiline = {
let value = statement_list.to_string();
value.lines().count() > 1
};
if is_multiline {
Ok(
// ?? For some reason statement_list string implementation
// sticks a \n at the end no matter what
format!(
"{}({}) {{\n{}}}",
&name,
arguments,
statement_list.to_string()
)
.into(),
)
} else {
Ok(format!(
"{}({}) {{{}}}",
&name,
arguments,
// The trim here is to remove a \n stuck at the end
// of the statement_list to_string method
statement_list.to_string().trim()
)
.into())
}
}
_ => Ok("TODO".into()),
}
}
}
impl BuiltIn for BuiltInFunctionObject {
const NAME: &'static str = "Function";
const ATTRIBUTE: Attribute = Attribute::WRITABLE
.union(Attribute::NON_ENUMERABLE)
.union(Attribute::CONFIGURABLE);
fn init(context: &mut Context) -> JsValue {
let _timer = BoaProfiler::global().start_event("function", "init");
let function_prototype = context.standard_objects().function_object().prototype();
FunctionBuilder::native(context, Self::prototype)
.name("")
.length(0)
.constructable(false)
.build_function_prototype(&function_prototype);
let function_object = ConstructorBuilder::with_standard_object(
context,
Self::constructor,
context.standard_objects().function_object().clone(),
)
.name(Self::NAME)
.length(Self::LENGTH)
.method(Self::call, "call", 1)
.method(Self::apply, "apply", 1)
.method(Self::to_string, "toString", 0)
.build();
function_object.into()
}
} | .deref_mut()
.as_mut_any()
.downcast_mut::<T>()
.ok_or_else(|| context.construct_type_error("cannot downcast `Captures` to given type"))
} | random_line_split |
mod.rs | //! This module implements the global `Function` object as well as creates Native Functions.
//!
//! Objects wrap `Function`s and expose them via call/construct slots.
//!
//! `The `Function` object is used for matching text with a pattern.
//!
//! More information:
//! - [ECMAScript reference][spec]
//! - [MDN documentation][mdn]
//!
//! [spec]: https://tc39.es/ecma262/#sec-function-objects
//! [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function
use std::{
fmt,
ops::{Deref, DerefMut},
};
use dyn_clone::DynClone;
use crate::{
builtins::BuiltIn,
context::StandardObjects,
environment::lexical_environment::Environment,
gc::{Finalize, Trace},
object::JsObject,
object::{
internal_methods::get_prototype_from_constructor, ConstructorBuilder, FunctionBuilder,
NativeObject, ObjectData,
},
property::Attribute,
property::PropertyDescriptor,
syntax::ast::node::{FormalParameter, RcStatementList},
BoaProfiler, Context, JsResult, JsValue,
};
use super::JsArgs;
pub(crate) mod arguments;
#[cfg(test)]
mod tests;
/// Type representing a native built-in function a.k.a. function pointer.
///
/// Native functions need to have this signature in order to
/// be callable from Javascript.
pub type NativeFunctionSignature = fn(&JsValue, &[JsValue], &mut Context) -> JsResult<JsValue>;
// Allows restricting closures to only `Copy` ones.
// Used the sealed pattern to disallow external implementations
// of `DynCopy`.
mod sealed {
pub trait Sealed {}
impl<T: Copy> Sealed for T {}
}
pub trait DynCopy: sealed::Sealed {}
impl<T: Copy> DynCopy for T {}
/// Trait representing a native built-in closure.
///
/// Closures need to have this signature in order to
/// be callable from Javascript, but most of the time the compiler
/// is smart enough to correctly infer the types.
pub trait ClosureFunctionSignature:
Fn(&JsValue, &[JsValue], Captures, &mut Context) -> JsResult<JsValue> + DynCopy + DynClone + 'static
{
}
// The `Copy` bound automatically infers `DynCopy` and `DynClone`
impl<T> ClosureFunctionSignature for T where
T: Fn(&JsValue, &[JsValue], Captures, &mut Context) -> JsResult<JsValue> + Copy + 'static
{
}
// Allows cloning Box<dyn ClosureFunctionSignature>
dyn_clone::clone_trait_object!(ClosureFunctionSignature);
#[derive(Debug, Trace, Finalize, PartialEq, Clone)]
pub enum ThisMode {
Lexical,
Strict,
Global,
}
impl ThisMode {
/// Returns `true` if the this mode is `Lexical`.
pub fn is_lexical(&self) -> bool {
matches!(self, Self::Lexical)
}
/// Returns `true` if the this mode is `Strict`.
pub fn is_strict(&self) -> bool {
matches!(self, Self::Strict)
}
/// Returns `true` if the this mode is `Global`.
pub fn is_global(&self) -> bool {
matches!(self, Self::Global)
}
}
#[derive(Debug, Trace, Finalize, PartialEq, Clone)]
pub enum ConstructorKind {
Base,
Derived,
}
impl ConstructorKind {
/// Returns `true` if the constructor kind is `Base`.
pub fn is_base(&self) -> bool {
matches!(self, Self::Base)
}
/// Returns `true` if the constructor kind is `Derived`.
pub fn is_derived(&self) -> bool {
matches!(self, Self::Derived)
}
}
// We don't use a standalone `NativeObject` for `Captures` because it doesn't
// guarantee that the internal type implements `Clone`.
// This private trait guarantees that the internal type passed to `Captures`
// implements `Clone`, and `DynClone` allows us to implement `Clone` for
// `Box<dyn CapturesObject>`.
trait CapturesObject: NativeObject + DynClone {}
impl<T: NativeObject + Clone> CapturesObject for T {}
dyn_clone::clone_trait_object!(CapturesObject);
/// Wrapper for `Box<dyn NativeObject + Clone>` that allows passing additional
/// captures through a `Copy` closure.
///
/// Any type implementing `Trace + Any + Debug + Clone`
/// can be used as a capture context, so you can pass e.g. a String,
/// a tuple or even a full struct.
///
/// You can downcast to any type and handle the fail case as you like
/// with `downcast_ref` and `downcast_mut`, or you can use `try_downcast_ref`
/// and `try_downcast_mut` to automatically throw a `TypeError` if the downcast
/// fails.
#[derive(Debug, Clone, Trace, Finalize)]
pub struct Captures(Box<dyn CapturesObject>);
impl Captures {
/// Creates a new capture context.
pub(crate) fn new<T>(captures: T) -> Self
where
T: NativeObject + Clone,
{
Self(Box::new(captures))
}
/// Downcasts `Captures` to the specified type, returning a reference to the
/// downcasted type if successful or `None` otherwise.
pub fn downcast_ref<T>(&self) -> Option<&T>
where
T: NativeObject + Clone,
{
self.0.deref().as_any().downcast_ref::<T>()
}
/// Mutably downcasts `Captures` to the specified type, returning a
/// mutable reference to the downcasted type if successful or `None` otherwise.
pub fn downcast_mut<T>(&mut self) -> Option<&mut T>
where
T: NativeObject + Clone,
{
self.0.deref_mut().as_mut_any().downcast_mut::<T>()
}
/// Downcasts `Captures` to the specified type, returning a reference to the
/// downcasted type if successful or a `TypeError` otherwise.
pub fn try_downcast_ref<T>(&self, context: &mut Context) -> JsResult<&T>
where
T: NativeObject + Clone,
{
self.0
.deref()
.as_any()
.downcast_ref::<T>()
.ok_or_else(|| context.construct_type_error("cannot downcast `Captures` to given type"))
}
/// Downcasts `Captures` to the specified type, returning a reference to the
/// downcasted type if successful or a `TypeError` otherwise.
pub fn try_downcast_mut<T>(&mut self, context: &mut Context) -> JsResult<&mut T>
where
T: NativeObject + Clone,
{
self.0
.deref_mut()
.as_mut_any()
.downcast_mut::<T>()
.ok_or_else(|| context.construct_type_error("cannot downcast `Captures` to given type"))
}
}
/// Boa representation of a Function Object.
///
/// FunctionBody is specific to this interpreter, it will either be Rust code or JavaScript code (AST Node)
///
/// <https://tc39.es/ecma262/#sec-ecmascript-function-objects>
#[derive(Clone, Trace, Finalize)]
pub enum Function {
Native {
#[unsafe_ignore_trace]
function: NativeFunctionSignature,
constructable: bool,
},
Closure {
#[unsafe_ignore_trace]
function: Box<dyn ClosureFunctionSignature>,
constructable: bool,
captures: Captures,
},
Ordinary {
constructable: bool,
this_mode: ThisMode,
body: RcStatementList,
params: Box<[FormalParameter]>,
environment: Environment,
},
#[cfg(feature = "vm")]
VmOrdinary {
code: gc::Gc<crate::vm::CodeBlock>,
environment: Environment,
},
}
impl fmt::Debug for Function {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Function {{ ... }}")
}
}
impl Function {
// Adds the final rest parameters to the Environment as an array
#[cfg(not(feature = "vm"))]
pub(crate) fn add_rest_param(
param: &FormalParameter,
index: usize,
args_list: &[JsValue],
context: &mut Context,
local_env: &Environment,
) {
use crate::builtins::Array;
// Create array of values
let array = Array::new_array(context);
Array::add_to_array_object(&array, args_list.get(index..).unwrap_or_default(), context)
.unwrap();
// Create binding
local_env
// Function parameters can share names in JavaScript...
.create_mutable_binding(param.name(), false, true, context)
.expect("Failed to create binding for rest param");
// Set Binding to value
local_env
.initialize_binding(param.name(), array, context)
.expect("Failed to initialize rest param");
}
// Adds an argument to the environment
pub(crate) fn add_arguments_to_environment(
param: &FormalParameter,
value: JsValue,
local_env: &Environment,
context: &mut Context,
) {
// Create binding
local_env
.create_mutable_binding(param.name(), false, true, context)
.expect("Failed to create binding");
// Set Binding to value
local_env
.initialize_binding(param.name(), value, context)
.expect("Failed to intialize binding");
}
/// Returns true if the function object is constructable.
pub fn is_constructable(&self) -> bool {
match self {
Self::Native { constructable, .. } => *constructable,
Self::Closure { constructable, .. } => *constructable,
Self::Ordinary { constructable, .. } => *constructable,
#[cfg(feature = "vm")]
Self::VmOrdinary { code, .. } => code.constructable,
}
}
}
/// Creates a new member function of a `Object` or `prototype`.
///
/// A function registered using this macro can then be called from Javascript using:
///
/// parent.name()
///
/// See the javascript 'Number.toString()' as an example.
///
/// # Arguments
/// function: The function to register as a built in function.
/// name: The name of the function (how it will be called but without the ()).
/// parent: The object to register the function on, if the global object is used then the function is instead called as name()
/// without requiring the parent, see parseInt() as an example.
/// length: As described at <https://tc39.es/ecma262/#sec-function-instances-length>, The value of the "length" property is an integer that
/// indicates the typical number of arguments expected by the function. However, the language permits the function to be invoked with
/// some other number of arguments.
///
/// If no length is provided, the length will be set to 0.
// TODO: deprecate/remove this.
pub(crate) fn make_builtin_fn<N>(
function: NativeFunctionSignature,
name: N,
parent: &JsObject,
length: usize,
interpreter: &Context,
) where
N: Into<String>,
{
let name = name.into();
let _timer = BoaProfiler::global().start_event(&format!("make_builtin_fn: {}", &name), "init");
let function = JsObject::from_proto_and_data(
interpreter.standard_objects().function_object().prototype(),
ObjectData::function(Function::Native {
function,
constructable: false,
}),
);
let attribute = PropertyDescriptor::builder()
.writable(false)
.enumerable(false)
.configurable(true);
function.insert_property("length", attribute.clone().value(length));
function.insert_property("name", attribute.value(name.as_str()));
parent.clone().insert_property(
name,
PropertyDescriptor::builder()
.value(function)
.writable(true)
.enumerable(false)
.configurable(true),
);
}
#[derive(Debug, Clone, Copy)]
pub struct BuiltInFunctionObject;
impl BuiltInFunctionObject {
pub const LENGTH: usize = 1;
fn constructor(
new_target: &JsValue,
_: &[JsValue],
context: &mut Context,
) -> JsResult<JsValue> {
let prototype =
get_prototype_from_constructor(new_target, StandardObjects::function_object, context)?;
let this = JsObject::from_proto_and_data(
prototype,
ObjectData::function(Function::Native {
function: |_, _, _| Ok(JsValue::undefined()),
constructable: true,
}),
);
Ok(this.into())
}
fn prototype(_: &JsValue, _: &[JsValue], _: &mut Context) -> JsResult<JsValue> {
Ok(JsValue::undefined())
}
/// `Function.prototype.call`
///
/// The call() method invokes self with the first argument as the `this` value.
///
/// More information:
/// - [MDN documentation][mdn]
/// - [ECMAScript reference][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-function.prototype.call
/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function/call
fn call(this: &JsValue, args: &[JsValue], context: &mut Context) -> JsResult<JsValue> {
if !this.is_function() {
return context.throw_type_error(format!("{} is not a function", this.display()));
}
let this_arg = args.get_or_undefined(0);
// TODO?: 3. Perform PrepareForTailCall
let start = if !args.is_empty() { 1 } else { 0 };
context.call(this, this_arg, &args[start..])
}
/// `Function.prototype.apply`
///
/// The apply() method invokes self with the first argument as the `this` value
/// and the rest of the arguments provided as an array (or an array-like object).
///
/// More information:
/// - [MDN documentation][mdn]
/// - [ECMAScript reference][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-function.prototype.apply
/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function/apply
fn apply(this: &JsValue, args: &[JsValue], context: &mut Context) -> JsResult<JsValue> {
if !this.is_function() {
return context.throw_type_error(format!("{} is not a function", this.display()));
}
let this_arg = args.get_or_undefined(0);
let arg_array = args.get_or_undefined(1);
if arg_array.is_null_or_undefined() {
// TODO?: 3.a. PrepareForTailCall
return context.call(this, this_arg, &[]);
}
let arg_list = arg_array.create_list_from_array_like(&[], context)?;
// TODO?: 5. PrepareForTailCall
context.call(this, this_arg, &arg_list)
}
#[allow(clippy::wrong_self_convention)]
fn to_string(this: &JsValue, _: &[JsValue], context: &mut Context) -> JsResult<JsValue> {
let name = {
// Is there a case here where if there is no name field on a value
// name should default to None? Do all functions have names set?
let value = this.get_field("name", &mut *context)?;
if value.is_null_or_undefined() {
None
} else {
Some(value.to_string(context)?)
}
};
let function = {
let object = this
.as_object()
.map(|object| object.borrow().as_function().cloned());
if let Some(Some(function)) = object {
function
} else |
};
match (&function, name) {
(
Function::Native {
function: _,
constructable: _,
},
Some(name),
) => Ok(format!("function {}() {{\n [native Code]\n}}", &name).into()),
(Function::Ordinary { body, params, .. }, Some(name)) => {
let arguments: String = params
.iter()
.map(|param| param.name())
.collect::<Vec<&str>>()
.join(", ");
let statement_list = &*body;
// This is a kluge. The implementaion in browser seems to suggest that
// the value here is printed exactly as defined in source. I'm not sure if
// that's possible here, but for now here's a dumb heuristic that prints functions
let is_multiline = {
let value = statement_list.to_string();
value.lines().count() > 1
};
if is_multiline {
Ok(
// ?? For some reason statement_list string implementation
// sticks a \n at the end no matter what
format!(
"{}({}) {{\n{}}}",
&name,
arguments,
statement_list.to_string()
)
.into(),
)
} else {
Ok(format!(
"{}({}) {{{}}}",
&name,
arguments,
// The trim here is to remove a \n stuck at the end
// of the statement_list to_string method
statement_list.to_string().trim()
)
.into())
}
}
_ => Ok("TODO".into()),
}
}
}
impl BuiltIn for BuiltInFunctionObject {
const NAME: &'static str = "Function";
const ATTRIBUTE: Attribute = Attribute::WRITABLE
.union(Attribute::NON_ENUMERABLE)
.union(Attribute::CONFIGURABLE);
fn init(context: &mut Context) -> JsValue {
let _timer = BoaProfiler::global().start_event("function", "init");
let function_prototype = context.standard_objects().function_object().prototype();
FunctionBuilder::native(context, Self::prototype)
.name("")
.length(0)
.constructable(false)
.build_function_prototype(&function_prototype);
let function_object = ConstructorBuilder::with_standard_object(
context,
Self::constructor,
context.standard_objects().function_object().clone(),
)
.name(Self::NAME)
.length(Self::LENGTH)
.method(Self::call, "call", 1)
.method(Self::apply, "apply", 1)
.method(Self::to_string, "toString", 0)
.build();
function_object.into()
}
}
| {
return context.throw_type_error("Not a function");
} | conditional_block |
EncryptDecryptTextFile.py | import os
import pprint
import math
import sys
import datetime as dt
from pathlib import Path
import RotateCipher
import ShiftCipher
import TranspositionCipher
def process_textfile(
string_path: str,
encryption_algorithm: str,
algorithm_key: float,
output_folderpath: str = str(
Path(os.path.expandvars("$HOME")).anchor
) + r"/EncryptDecrypt/",
output_filename: str = r"EncryptDecrypt.txt",
to_decrypt=False,
**kwargs
):
|
def manual_test():
dict_processedtext = process_textfile(
string_path=r"C:\Users\Rives\Downloads\Quizzes\Quiz 0 Overwrite Number 1.txt",
encryption_algorithm="rotate",
algorithm_key=1,
shift_left=True
)
print("Encrypt ROT1 with default values.")
# pprint.pprint(
# dict_processedtext
# )
print(dict_processedtext["output_file"])
dict_processedtext2 = process_textfile(
string_path=dict_processedtext["output_file"],
encryption_algorithm="rotate",
algorithm_key=1,
output_folderpath=r"C:\Users\Rives\Downloads\Decryptions",
output_filename="Quiz 0 Overwrite Number 1 Decrypted",
shift_left=False
)
print("Decrypt ROT1 with all values user-supplied.")
print(dict_processedtext["output_file"])
for i in range(2):
dict_processedtext3a = process_textfile(
string_path=r"C:\Users\Rives\Downloads\Quizzes\Quiz 0 Overwrite Number 2.txt",
encryption_algorithm="rotate",
algorithm_key=1,
output_folderpath=r"C:\Users\Rives\Downloads\Encryptions"
)
print(dict_processedtext3a["output_file"])
dict_processedtext3b = process_textfile(
string_path=dict_processedtext3a["output_file"],
encryption_algorithm="rotate",
algorithm_key=1,
output_folderpath=r"C:\Users\Rives\Downloads\Decryptions",
output_filename="Quiz 0 Overwrite Number 2 Decrypted",
shift_left=False
)
print(dict_processedtext3b["output_file"])
return None
def main():
while True:
print("Press Enter or New Line to skip entering any input.\t")
task = input("Encrypt or decrypt? Encrypts by default. Press E/D.\t")
algo = input("Algorithm? Uses Rotate by default.\t")
algorithm_key = float(input("Key? Uses 1 by default.\t"))
input_filepath = input(
"""Mandatory / Required.
Full path of target file. Includes file name and extension.\n""")
output_folder = input(
"Optional. Give the path of the output folder.\n"
)
output_file = input(
"Optional. Default output file name is EncryptDecrypt.txt.\n")
keyword_arguments = input(
"""Last question. Depends on algorithm.
Format: "key=value,key2,value2,...".
Use comma with no space as separator for two or more items.\n"""
)
while len(input_filepath) == 0:
input_filepath = input(
"""Mandatory / Required.
Full path of target file.
Includes file name and extension.\n"""
)
dict_kwargs = dict()
for pair in keyword_arguments.split(','):
try:
key, pair = tuple(pair.split('='))
dict_kwargs[key] = pair
except ValueError:
break
to_decrypt = False
if task.lower().startswith('d'):
to_decrypt = True
if len(output_folder) == 0:
output_folder = str(Path.cwd().parent / r"/EncryptDecrypt/")
if len(output_file) == 0:
output_file = "EncryptDecrypt.txt"
if len(algo) == 0:
algo = "rotate"
pprint.pprint(
process_textfile(
string_path=input_filepath,
encryption_algorithm=algo,
algorithm_key=algorithm_key,
output_folderpath=output_folder,
output_filename=output_file,
to_decrypt=to_decrypt,
kwargs_dict=dict_kwargs
)
)
print(
"""Done Running.
Press Q to quit, any other key to process another file.""")
to_quit = input()
if to_quit.lower().startswith("q"):
sys.exit()
else:
continue
# manual_test()
return None
if __name__ == "__main__":
main()
"""
Notes:
*
The declared parameter data types in python functions are not enforced as of
version 3.4.
*
For some reason, even if the name "key" was a parameter for process_textfile,
it was being passed to rot13_e as a string. In the function process_textfile,
Visual Basic also listed "key" as a string when passed to rot13_e even though
the function definition specified its data type as a float and the user input
for "key" was also converted to a float in the main function. This was caused
by a for-loop. When VS Code followed the definition of key (F12) when it
was passed to rot13_e, VS Code pointed to the temporary variable "key" in a
for-loop. The parameter name was changed as a quick fix.
- Adding an else clause to the for-loop did not fix it.
- The for-loop declaration was funciton-level code while the call to rot13_e
that bugged was inside an else-clause. The else-clause holding the call to
rot13_e was also function-level, same as the for-loop declaration. The call
to RotateCipher.rot13_e was assigned to output_string.
"""
| encryption_algorithm = encryption_algorithm.lower()
available_algorithms = ["rotate", "transposition"]
if encryption_algorithm not in available_algorithms:
pprint.pprint(
["Enter an algorithm from the list. Not case-sensitive.",
available_algorithms]
)
return None
# A single dictionary may be passed as a **kwarg if it is the
# ONLY KEY-WORD ARGUMENT. Else, error is thrown.
lst_kwargs = list(kwargs.values())
if len(lst_kwargs) == 1 and (isinstance(lst_kwargs[0], dict)):
kwargs = lst_kwargs[0]
# Key in **kwargs overwrites `algorithm_key` function parameter.
if "algorithm_key" in kwargs:
algorithm_key = float(kwargs["algorithm_key"])
# Convert strings saying "True" or "False" to booleans.
for key, value in kwargs.items():
str_value = str(value)
if str_value.lower() == "False":
kwargs[key] = False
elif str_value.lower() == "True":
kwargs[key] = True
output_filename = ('/' + output_filename)
if not (output_filename.endswith(".txt")):
output_filename += ".txt"
full_outputpath = output_folderpath + output_filename
path_input = Path(string_path)
# fileobj_target = open(path_input, 'r') # Only for Python 3.6 and later.
fileobj_target = open(str(path_input), 'r')
lst_input = fileobj_target.readlines()
# str_input = '\n'.join(lst_input)
str_input = "".join(lst_input)
output_string = "None"
print(
"""Started processing.
Key-word arguments for %s algorithm:""" % encryption_algorithm
)
pprint.pprint(kwargs)
if (encryption_algorithm == "transposition") and to_decrypt is True:
output_string = ''.join(
TranspositionCipher.decrypt_transposition(
str_input, int(algorithm_key)
)
)
elif encryption_algorithm == "transposition" and not to_decrypt:
output_string = ''.join(
TranspositionCipher.encrypt_transposition(
str_input, int(algorithm_key)
)
)
elif encryption_algorithm == "rotate":
warning = """
When the algorithm is set to rotate, the "to_decrypt" parameter
is ignored. To decrypt, set the key-word argument shift left
so that it reverses the shift direction during encryption.
Ex: If the text was shifted left, i.e. values were swapped
with those "higher" up on the list read from left to right, pass
the key-word argument shift_left=False to decrypt.
RotateCipher's methods can return a list. However, it is
forced to always return a string. Passing return_list=True as
a key-word argument will have no effect. The argument is not
passed to RotateCipher.
"""
# pprint.pprint(warning) # Included literl \n and single quotes.
print(warning)
to_shiftleft = True
if "shift_left" in kwargs:
to_shiftleft = kwargs["shift_left"]
process_numbers = False
if "shift_numbers" in kwargs:
process_numbers = kwargs["shift_numbers"]
output_string = RotateCipher.rot13_e(
string=str_input,
shift_left=to_shiftleft,
rotations=int(algorithm_key),
# return_list=kwargs["return_list"], # Removed for safety.
shift_numbers=process_numbers
)
if not (os.path.exists(output_folderpath)):
os.mkdir(output_folderpath)
fileobj_output = open(
full_outputpath,
'a' # Create a file and open it for writing. Append if exists.
)
fileobj_output.write(
"\n=====\nEncryptDecrypt Output on\n%s\n=====\n" %
dt.datetime.now()
)
fileobj_output.write(output_string)
fileobj_output.close()
print("Done processing. Output folder:\n{}".format(
Path(full_outputpath)
)
)
return {
"output_file": Path(full_outputpath).resolve(),
"output_text": output_string
} | identifier_body |
EncryptDecryptTextFile.py | import os
import pprint
import math
import sys
import datetime as dt
from pathlib import Path
import RotateCipher
import ShiftCipher
import TranspositionCipher
def | (
string_path: str,
encryption_algorithm: str,
algorithm_key: float,
output_folderpath: str = str(
Path(os.path.expandvars("$HOME")).anchor
) + r"/EncryptDecrypt/",
output_filename: str = r"EncryptDecrypt.txt",
to_decrypt=False,
**kwargs
):
encryption_algorithm = encryption_algorithm.lower()
available_algorithms = ["rotate", "transposition"]
if encryption_algorithm not in available_algorithms:
pprint.pprint(
["Enter an algorithm from the list. Not case-sensitive.",
available_algorithms]
)
return None
# A single dictionary may be passed as a **kwarg if it is the
# ONLY KEY-WORD ARGUMENT. Else, error is thrown.
lst_kwargs = list(kwargs.values())
if len(lst_kwargs) == 1 and (isinstance(lst_kwargs[0], dict)):
kwargs = lst_kwargs[0]
# Key in **kwargs overwrites `algorithm_key` function parameter.
if "algorithm_key" in kwargs:
algorithm_key = float(kwargs["algorithm_key"])
# Convert strings saying "True" or "False" to booleans.
for key, value in kwargs.items():
str_value = str(value)
if str_value.lower() == "False":
kwargs[key] = False
elif str_value.lower() == "True":
kwargs[key] = True
output_filename = ('/' + output_filename)
if not (output_filename.endswith(".txt")):
output_filename += ".txt"
full_outputpath = output_folderpath + output_filename
path_input = Path(string_path)
# fileobj_target = open(path_input, 'r') # Only for Python 3.6 and later.
fileobj_target = open(str(path_input), 'r')
lst_input = fileobj_target.readlines()
# str_input = '\n'.join(lst_input)
str_input = "".join(lst_input)
output_string = "None"
print(
"""Started processing.
Key-word arguments for %s algorithm:""" % encryption_algorithm
)
pprint.pprint(kwargs)
if (encryption_algorithm == "transposition") and to_decrypt is True:
output_string = ''.join(
TranspositionCipher.decrypt_transposition(
str_input, int(algorithm_key)
)
)
elif encryption_algorithm == "transposition" and not to_decrypt:
output_string = ''.join(
TranspositionCipher.encrypt_transposition(
str_input, int(algorithm_key)
)
)
elif encryption_algorithm == "rotate":
warning = """
When the algorithm is set to rotate, the "to_decrypt" parameter
is ignored. To decrypt, set the key-word argument shift left
so that it reverses the shift direction during encryption.
Ex: If the text was shifted left, i.e. values were swapped
with those "higher" up on the list read from left to right, pass
the key-word argument shift_left=False to decrypt.
RotateCipher's methods can return a list. However, it is
forced to always return a string. Passing return_list=True as
a key-word argument will have no effect. The argument is not
passed to RotateCipher.
"""
# pprint.pprint(warning) # Included literl \n and single quotes.
print(warning)
to_shiftleft = True
if "shift_left" in kwargs:
to_shiftleft = kwargs["shift_left"]
process_numbers = False
if "shift_numbers" in kwargs:
process_numbers = kwargs["shift_numbers"]
output_string = RotateCipher.rot13_e(
string=str_input,
shift_left=to_shiftleft,
rotations=int(algorithm_key),
# return_list=kwargs["return_list"], # Removed for safety.
shift_numbers=process_numbers
)
if not (os.path.exists(output_folderpath)):
os.mkdir(output_folderpath)
fileobj_output = open(
full_outputpath,
'a' # Create a file and open it for writing. Append if exists.
)
fileobj_output.write(
"\n=====\nEncryptDecrypt Output on\n%s\n=====\n" %
dt.datetime.now()
)
fileobj_output.write(output_string)
fileobj_output.close()
print("Done processing. Output folder:\n{}".format(
Path(full_outputpath)
)
)
return {
"output_file": Path(full_outputpath).resolve(),
"output_text": output_string
}
def manual_test():
dict_processedtext = process_textfile(
string_path=r"C:\Users\Rives\Downloads\Quizzes\Quiz 0 Overwrite Number 1.txt",
encryption_algorithm="rotate",
algorithm_key=1,
shift_left=True
)
print("Encrypt ROT1 with default values.")
# pprint.pprint(
# dict_processedtext
# )
print(dict_processedtext["output_file"])
dict_processedtext2 = process_textfile(
string_path=dict_processedtext["output_file"],
encryption_algorithm="rotate",
algorithm_key=1,
output_folderpath=r"C:\Users\Rives\Downloads\Decryptions",
output_filename="Quiz 0 Overwrite Number 1 Decrypted",
shift_left=False
)
print("Decrypt ROT1 with all values user-supplied.")
print(dict_processedtext["output_file"])
for i in range(2):
dict_processedtext3a = process_textfile(
string_path=r"C:\Users\Rives\Downloads\Quizzes\Quiz 0 Overwrite Number 2.txt",
encryption_algorithm="rotate",
algorithm_key=1,
output_folderpath=r"C:\Users\Rives\Downloads\Encryptions"
)
print(dict_processedtext3a["output_file"])
dict_processedtext3b = process_textfile(
string_path=dict_processedtext3a["output_file"],
encryption_algorithm="rotate",
algorithm_key=1,
output_folderpath=r"C:\Users\Rives\Downloads\Decryptions",
output_filename="Quiz 0 Overwrite Number 2 Decrypted",
shift_left=False
)
print(dict_processedtext3b["output_file"])
return None
def main():
while True:
print("Press Enter or New Line to skip entering any input.\t")
task = input("Encrypt or decrypt? Encrypts by default. Press E/D.\t")
algo = input("Algorithm? Uses Rotate by default.\t")
algorithm_key = float(input("Key? Uses 1 by default.\t"))
input_filepath = input(
"""Mandatory / Required.
Full path of target file. Includes file name and extension.\n""")
output_folder = input(
"Optional. Give the path of the output folder.\n"
)
output_file = input(
"Optional. Default output file name is EncryptDecrypt.txt.\n")
keyword_arguments = input(
"""Last question. Depends on algorithm.
Format: "key=value,key2,value2,...".
Use comma with no space as separator for two or more items.\n"""
)
while len(input_filepath) == 0:
input_filepath = input(
"""Mandatory / Required.
Full path of target file.
Includes file name and extension.\n"""
)
dict_kwargs = dict()
for pair in keyword_arguments.split(','):
try:
key, pair = tuple(pair.split('='))
dict_kwargs[key] = pair
except ValueError:
break
to_decrypt = False
if task.lower().startswith('d'):
to_decrypt = True
if len(output_folder) == 0:
output_folder = str(Path.cwd().parent / r"/EncryptDecrypt/")
if len(output_file) == 0:
output_file = "EncryptDecrypt.txt"
if len(algo) == 0:
algo = "rotate"
pprint.pprint(
process_textfile(
string_path=input_filepath,
encryption_algorithm=algo,
algorithm_key=algorithm_key,
output_folderpath=output_folder,
output_filename=output_file,
to_decrypt=to_decrypt,
kwargs_dict=dict_kwargs
)
)
print(
"""Done Running.
Press Q to quit, any other key to process another file.""")
to_quit = input()
if to_quit.lower().startswith("q"):
sys.exit()
else:
continue
# manual_test()
return None
if __name__ == "__main__":
main()
"""
Notes:
*
The declared parameter data types in python functions are not enforced as of
version 3.4.
*
For some reason, even if the name "key" was a parameter for process_textfile,
it was being passed to rot13_e as a string. In the function process_textfile,
Visual Basic also listed "key" as a string when passed to rot13_e even though
the function definition specified its data type as a float and the user input
for "key" was also converted to a float in the main function. This was caused
by a for-loop. When VS Code followed the definition of key (F12) when it
was passed to rot13_e, VS Code pointed to the temporary variable "key" in a
for-loop. The parameter name was changed as a quick fix.
- Adding an else clause to the for-loop did not fix it.
- The for-loop declaration was funciton-level code while the call to rot13_e
that bugged was inside an else-clause. The else-clause holding the call to
rot13_e was also function-level, same as the for-loop declaration. The call
to RotateCipher.rot13_e was assigned to output_string.
"""
| process_textfile | identifier_name |
EncryptDecryptTextFile.py | import os
import pprint
import math
import sys
import datetime as dt
from pathlib import Path
import RotateCipher
import ShiftCipher
import TranspositionCipher
def process_textfile(
string_path: str,
encryption_algorithm: str,
algorithm_key: float,
output_folderpath: str = str(
Path(os.path.expandvars("$HOME")).anchor
) + r"/EncryptDecrypt/",
output_filename: str = r"EncryptDecrypt.txt",
to_decrypt=False,
**kwargs
):
encryption_algorithm = encryption_algorithm.lower()
available_algorithms = ["rotate", "transposition"]
if encryption_algorithm not in available_algorithms:
pprint.pprint(
["Enter an algorithm from the list. Not case-sensitive.",
available_algorithms]
)
return None
# A single dictionary may be passed as a **kwarg if it is the
# ONLY KEY-WORD ARGUMENT. Else, error is thrown.
lst_kwargs = list(kwargs.values())
if len(lst_kwargs) == 1 and (isinstance(lst_kwargs[0], dict)):
kwargs = lst_kwargs[0]
# Key in **kwargs overwrites `algorithm_key` function parameter.
if "algorithm_key" in kwargs:
algorithm_key = float(kwargs["algorithm_key"])
# Convert strings saying "True" or "False" to booleans.
for key, value in kwargs.items():
str_value = str(value)
if str_value.lower() == "False":
kwargs[key] = False
elif str_value.lower() == "True":
kwargs[key] = True
output_filename = ('/' + output_filename)
if not (output_filename.endswith(".txt")):
output_filename += ".txt"
full_outputpath = output_folderpath + output_filename
path_input = Path(string_path)
# fileobj_target = open(path_input, 'r') # Only for Python 3.6 and later.
fileobj_target = open(str(path_input), 'r')
lst_input = fileobj_target.readlines()
# str_input = '\n'.join(lst_input)
str_input = "".join(lst_input)
output_string = "None"
print(
"""Started processing.
Key-word arguments for %s algorithm:""" % encryption_algorithm
)
pprint.pprint(kwargs)
if (encryption_algorithm == "transposition") and to_decrypt is True:
output_string = ''.join(
TranspositionCipher.decrypt_transposition(
str_input, int(algorithm_key)
)
)
elif encryption_algorithm == "transposition" and not to_decrypt:
output_string = ''.join(
TranspositionCipher.encrypt_transposition(
str_input, int(algorithm_key)
)
)
elif encryption_algorithm == "rotate":
warning = """
When the algorithm is set to rotate, the "to_decrypt" parameter
is ignored. To decrypt, set the key-word argument shift left
so that it reverses the shift direction during encryption.
Ex: If the text was shifted left, i.e. values were swapped
with those "higher" up on the list read from left to right, pass
the key-word argument shift_left=False to decrypt.
RotateCipher's methods can return a list. However, it is
forced to always return a string. Passing return_list=True as
a key-word argument will have no effect. The argument is not
passed to RotateCipher.
"""
# pprint.pprint(warning) # Included literl \n and single quotes.
print(warning)
to_shiftleft = True
if "shift_left" in kwargs:
to_shiftleft = kwargs["shift_left"]
process_numbers = False
if "shift_numbers" in kwargs:
process_numbers = kwargs["shift_numbers"]
output_string = RotateCipher.rot13_e(
string=str_input,
shift_left=to_shiftleft,
rotations=int(algorithm_key),
# return_list=kwargs["return_list"], # Removed for safety.
shift_numbers=process_numbers
)
if not (os.path.exists(output_folderpath)):
os.mkdir(output_folderpath)
fileobj_output = open(
full_outputpath,
'a' # Create a file and open it for writing. Append if exists.
)
fileobj_output.write(
"\n=====\nEncryptDecrypt Output on\n%s\n=====\n" %
dt.datetime.now()
)
fileobj_output.write(output_string)
fileobj_output.close()
print("Done processing. Output folder:\n{}".format(
Path(full_outputpath)
)
)
return {
"output_file": Path(full_outputpath).resolve(),
"output_text": output_string
}
def manual_test():
dict_processedtext = process_textfile(
string_path=r"C:\Users\Rives\Downloads\Quizzes\Quiz 0 Overwrite Number 1.txt",
encryption_algorithm="rotate",
algorithm_key=1,
shift_left=True
)
print("Encrypt ROT1 with default values.")
# pprint.pprint(
# dict_processedtext
# )
print(dict_processedtext["output_file"])
dict_processedtext2 = process_textfile(
string_path=dict_processedtext["output_file"],
encryption_algorithm="rotate",
algorithm_key=1,
output_folderpath=r"C:\Users\Rives\Downloads\Decryptions",
output_filename="Quiz 0 Overwrite Number 1 Decrypted",
shift_left=False
)
print("Decrypt ROT1 with all values user-supplied.")
print(dict_processedtext["output_file"])
| algorithm_key=1,
output_folderpath=r"C:\Users\Rives\Downloads\Encryptions"
)
print(dict_processedtext3a["output_file"])
dict_processedtext3b = process_textfile(
string_path=dict_processedtext3a["output_file"],
encryption_algorithm="rotate",
algorithm_key=1,
output_folderpath=r"C:\Users\Rives\Downloads\Decryptions",
output_filename="Quiz 0 Overwrite Number 2 Decrypted",
shift_left=False
)
print(dict_processedtext3b["output_file"])
return None
def main():
while True:
print("Press Enter or New Line to skip entering any input.\t")
task = input("Encrypt or decrypt? Encrypts by default. Press E/D.\t")
algo = input("Algorithm? Uses Rotate by default.\t")
algorithm_key = float(input("Key? Uses 1 by default.\t"))
input_filepath = input(
"""Mandatory / Required.
Full path of target file. Includes file name and extension.\n""")
output_folder = input(
"Optional. Give the path of the output folder.\n"
)
output_file = input(
"Optional. Default output file name is EncryptDecrypt.txt.\n")
keyword_arguments = input(
"""Last question. Depends on algorithm.
Format: "key=value,key2,value2,...".
Use comma with no space as separator for two or more items.\n"""
)
while len(input_filepath) == 0:
input_filepath = input(
"""Mandatory / Required.
Full path of target file.
Includes file name and extension.\n"""
)
dict_kwargs = dict()
for pair in keyword_arguments.split(','):
try:
key, pair = tuple(pair.split('='))
dict_kwargs[key] = pair
except ValueError:
break
to_decrypt = False
if task.lower().startswith('d'):
to_decrypt = True
if len(output_folder) == 0:
output_folder = str(Path.cwd().parent / r"/EncryptDecrypt/")
if len(output_file) == 0:
output_file = "EncryptDecrypt.txt"
if len(algo) == 0:
algo = "rotate"
pprint.pprint(
process_textfile(
string_path=input_filepath,
encryption_algorithm=algo,
algorithm_key=algorithm_key,
output_folderpath=output_folder,
output_filename=output_file,
to_decrypt=to_decrypt,
kwargs_dict=dict_kwargs
)
)
print(
"""Done Running.
Press Q to quit, any other key to process another file.""")
to_quit = input()
if to_quit.lower().startswith("q"):
sys.exit()
else:
continue
# manual_test()
return None
if __name__ == "__main__":
main()
"""
Notes:
*
The declared parameter data types in python functions are not enforced as of
version 3.4.
*
For some reason, even if the name "key" was a parameter for process_textfile,
it was being passed to rot13_e as a string. In the function process_textfile,
Visual Basic also listed "key" as a string when passed to rot13_e even though
the function definition specified its data type as a float and the user input
for "key" was also converted to a float in the main function. This was caused
by a for-loop. When VS Code followed the definition of key (F12) when it
was passed to rot13_e, VS Code pointed to the temporary variable "key" in a
for-loop. The parameter name was changed as a quick fix.
- Adding an else clause to the for-loop did not fix it.
- The for-loop declaration was funciton-level code while the call to rot13_e
that bugged was inside an else-clause. The else-clause holding the call to
rot13_e was also function-level, same as the for-loop declaration. The call
to RotateCipher.rot13_e was assigned to output_string.
""" | for i in range(2):
dict_processedtext3a = process_textfile(
string_path=r"C:\Users\Rives\Downloads\Quizzes\Quiz 0 Overwrite Number 2.txt",
encryption_algorithm="rotate",
| random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.